diff --git a/.envrc b/.envrc new file mode 100644 index 0000000..3550a30 --- /dev/null +++ b/.envrc @@ -0,0 +1 @@ +use flake diff --git a/_build/dev/.mix/compile.lock b/_build/dev/.mix/compile.lock new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/.mix/compile.protocols b/_build/dev/.mix/compile.protocols new file mode 100644 index 0000000..133bbac Binary files /dev/null and b/_build/dev/.mix/compile.protocols differ diff --git a/_build/dev/consolidated/Elixir.Bandit.HTTP2.Frame.Serializable.beam b/_build/dev/consolidated/Elixir.Bandit.HTTP2.Frame.Serializable.beam new file mode 100644 index 0000000..a0644c6 Binary files /dev/null and b/_build/dev/consolidated/Elixir.Bandit.HTTP2.Frame.Serializable.beam differ diff --git a/_build/dev/consolidated/Elixir.Bandit.HTTPTransport.beam b/_build/dev/consolidated/Elixir.Bandit.HTTPTransport.beam new file mode 100644 index 0000000..a2d9e8a Binary files /dev/null and b/_build/dev/consolidated/Elixir.Bandit.HTTPTransport.beam differ diff --git a/_build/dev/consolidated/Elixir.Bandit.WebSocket.Frame.Serializable.beam b/_build/dev/consolidated/Elixir.Bandit.WebSocket.Frame.Serializable.beam new file mode 100644 index 0000000..20b646c Binary files /dev/null and b/_build/dev/consolidated/Elixir.Bandit.WebSocket.Frame.Serializable.beam differ diff --git a/_build/dev/consolidated/Elixir.Bandit.WebSocket.Socket.beam b/_build/dev/consolidated/Elixir.Bandit.WebSocket.Socket.beam new file mode 100644 index 0000000..b0f0628 Binary files /dev/null and b/_build/dev/consolidated/Elixir.Bandit.WebSocket.Socket.beam differ diff --git a/_build/dev/consolidated/Elixir.Collectable.beam b/_build/dev/consolidated/Elixir.Collectable.beam new file mode 100644 index 0000000..8f9a50d Binary files /dev/null and b/_build/dev/consolidated/Elixir.Collectable.beam differ diff --git a/_build/dev/consolidated/Elixir.DBConnection.Query.beam b/_build/dev/consolidated/Elixir.DBConnection.Query.beam new file mode 100644 index 0000000..4de5414 Binary files /dev/null and b/_build/dev/consolidated/Elixir.DBConnection.Query.beam differ diff --git a/_build/dev/consolidated/Elixir.Ecto.Queryable.beam b/_build/dev/consolidated/Elixir.Ecto.Queryable.beam new file mode 100644 index 0000000..341ae33 Binary files /dev/null and b/_build/dev/consolidated/Elixir.Ecto.Queryable.beam differ diff --git a/_build/dev/consolidated/Elixir.Enumerable.beam b/_build/dev/consolidated/Elixir.Enumerable.beam new file mode 100644 index 0000000..ec061b3 Binary files /dev/null and b/_build/dev/consolidated/Elixir.Enumerable.beam differ diff --git a/_build/dev/consolidated/Elixir.IEx.Info.beam b/_build/dev/consolidated/Elixir.IEx.Info.beam new file mode 100644 index 0000000..314f1a6 Binary files /dev/null and b/_build/dev/consolidated/Elixir.IEx.Info.beam differ diff --git a/_build/dev/consolidated/Elixir.Inspect.beam b/_build/dev/consolidated/Elixir.Inspect.beam new file mode 100644 index 0000000..8df0901 Binary files /dev/null and b/_build/dev/consolidated/Elixir.Inspect.beam differ diff --git a/_build/dev/consolidated/Elixir.JSON.Encoder.beam b/_build/dev/consolidated/Elixir.JSON.Encoder.beam new file mode 100644 index 0000000..186e1a9 Binary files /dev/null and b/_build/dev/consolidated/Elixir.JSON.Encoder.beam differ diff --git a/_build/dev/consolidated/Elixir.Jason.Encoder.beam b/_build/dev/consolidated/Elixir.Jason.Encoder.beam new file mode 100644 index 0000000..e0279b0 Binary files /dev/null and b/_build/dev/consolidated/Elixir.Jason.Encoder.beam differ diff --git a/_build/dev/consolidated/Elixir.List.Chars.beam b/_build/dev/consolidated/Elixir.List.Chars.beam new file mode 100644 index 0000000..f711a13 Binary files /dev/null and b/_build/dev/consolidated/Elixir.List.Chars.beam differ diff --git a/_build/dev/consolidated/Elixir.Phoenix.HTML.FormData.beam b/_build/dev/consolidated/Elixir.Phoenix.HTML.FormData.beam new file mode 100644 index 0000000..0451f9a Binary files /dev/null and b/_build/dev/consolidated/Elixir.Phoenix.HTML.FormData.beam differ diff --git a/_build/dev/consolidated/Elixir.Phoenix.HTML.Safe.beam b/_build/dev/consolidated/Elixir.Phoenix.HTML.Safe.beam new file mode 100644 index 0000000..89756c2 Binary files /dev/null and b/_build/dev/consolidated/Elixir.Phoenix.HTML.Safe.beam differ diff --git a/_build/dev/consolidated/Elixir.Phoenix.Param.beam b/_build/dev/consolidated/Elixir.Phoenix.Param.beam new file mode 100644 index 0000000..a94b08f Binary files /dev/null and b/_build/dev/consolidated/Elixir.Phoenix.Param.beam differ diff --git a/_build/dev/consolidated/Elixir.Plug.Exception.beam b/_build/dev/consolidated/Elixir.Plug.Exception.beam new file mode 100644 index 0000000..7cc2fff Binary files /dev/null and b/_build/dev/consolidated/Elixir.Plug.Exception.beam differ diff --git a/_build/dev/consolidated/Elixir.String.Chars.beam b/_build/dev/consolidated/Elixir.String.Chars.beam new file mode 100644 index 0000000..c0aea86 Binary files /dev/null and b/_build/dev/consolidated/Elixir.String.Chars.beam differ diff --git a/_build/dev/consolidated/Elixir.Swoosh.Email.Recipient.beam b/_build/dev/consolidated/Elixir.Swoosh.Email.Recipient.beam new file mode 100644 index 0000000..cbcf6bd Binary files /dev/null and b/_build/dev/consolidated/Elixir.Swoosh.Email.Recipient.beam differ diff --git a/_build/dev/lib/bandit/.mix/compile.elixir b/_build/dev/lib/bandit/.mix/compile.elixir new file mode 100644 index 0000000..e463d3b Binary files /dev/null and b/_build/dev/lib/bandit/.mix/compile.elixir differ diff --git a/_build/dev/lib/bandit/.mix/compile.elixir_scm b/_build/dev/lib/bandit/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/bandit/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/bandit/.mix/compile.fetch b/_build/dev/lib/bandit/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.Adapter.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Adapter.beam new file mode 100644 index 0000000..6dfa1ad Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Adapter.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.Application.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Application.beam new file mode 100644 index 0000000..e0d40a0 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Application.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.Clock.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Clock.beam new file mode 100644 index 0000000..881b9e8 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Clock.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.Compression.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Compression.beam new file mode 100644 index 0000000..df50109 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Compression.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.DelegatingHandler.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.DelegatingHandler.beam new file mode 100644 index 0000000..69fbba7 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.DelegatingHandler.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.Extractor.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Extractor.beam new file mode 100644 index 0000000..58ac99e Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Extractor.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP1.Handler.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP1.Handler.beam new file mode 100644 index 0000000..afe0d9c Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP1.Handler.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP1.Socket.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP1.Socket.beam new file mode 100644 index 0000000..5726825 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP1.Socket.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Connection.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Connection.beam new file mode 100644 index 0000000..19a5b8a Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Connection.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Errors.ConnectionError.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Errors.ConnectionError.beam new file mode 100644 index 0000000..e0690a5 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Errors.ConnectionError.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Errors.StreamError.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Errors.StreamError.beam new file mode 100644 index 0000000..d12c60c Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Errors.StreamError.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Errors.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Errors.beam new file mode 100644 index 0000000..5bfdb37 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Errors.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.FlowControl.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.FlowControl.beam new file mode 100644 index 0000000..a3e69a5 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.FlowControl.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Continuation.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Continuation.beam new file mode 100644 index 0000000..0123332 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Continuation.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Data.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Data.beam new file mode 100644 index 0000000..0203835 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Data.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Flags.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Flags.beam new file mode 100644 index 0000000..7a9f910 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Flags.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Goaway.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Goaway.beam new file mode 100644 index 0000000..2d13bcb Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Goaway.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Headers.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Headers.beam new file mode 100644 index 0000000..bd61941 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Headers.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Ping.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Ping.beam new file mode 100644 index 0000000..25224f9 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Ping.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Priority.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Priority.beam new file mode 100644 index 0000000..b0921bf Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Priority.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.PushPromise.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.PushPromise.beam new file mode 100644 index 0000000..9af8c73 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.PushPromise.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.RstStream.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.RstStream.beam new file mode 100644 index 0000000..d337ebc Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.RstStream.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Continuation.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Continuation.beam new file mode 100644 index 0000000..2726609 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Continuation.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Data.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Data.beam new file mode 100644 index 0000000..51b3c63 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Data.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Goaway.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Goaway.beam new file mode 100644 index 0000000..760038b Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Goaway.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Headers.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Headers.beam new file mode 100644 index 0000000..5d25ce9 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Headers.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Ping.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Ping.beam new file mode 100644 index 0000000..98027f2 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Ping.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Priority.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Priority.beam new file mode 100644 index 0000000..2bf63bb Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Priority.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.RstStream.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.RstStream.beam new file mode 100644 index 0000000..e2556cf Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.RstStream.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Settings.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Settings.beam new file mode 100644 index 0000000..f101043 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Settings.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.WindowUpdate.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.WindowUpdate.beam new file mode 100644 index 0000000..7a29f19 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.WindowUpdate.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.beam new file mode 100644 index 0000000..380a5c8 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Serializable.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Settings.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Settings.beam new file mode 100644 index 0000000..e9f236c Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Settings.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Unknown.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Unknown.beam new file mode 100644 index 0000000..966b3c8 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.Unknown.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.WindowUpdate.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.WindowUpdate.beam new file mode 100644 index 0000000..57c4217 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.WindowUpdate.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.beam new file mode 100644 index 0000000..abff7f1 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Frame.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Handler.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Handler.beam new file mode 100644 index 0000000..b84f6f9 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Handler.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Settings.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Settings.beam new file mode 100644 index 0000000..cbd7ba9 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Settings.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Stream.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Stream.beam new file mode 100644 index 0000000..393b334 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.Stream.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.StreamCollection.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.StreamCollection.beam new file mode 100644 index 0000000..dd88534 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.StreamCollection.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.StreamProcess.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.StreamProcess.beam new file mode 100644 index 0000000..3c9728c Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTP2.StreamProcess.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTPError.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTPError.beam new file mode 100644 index 0000000..ddf7230 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTPError.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTPTransport.Bandit.HTTP1.Socket.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTPTransport.Bandit.HTTP1.Socket.beam new file mode 100644 index 0000000..d0429d3 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTPTransport.Bandit.HTTP1.Socket.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTPTransport.Bandit.HTTP2.Stream.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTPTransport.Bandit.HTTP2.Stream.beam new file mode 100644 index 0000000..a5359c1 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTPTransport.Bandit.HTTP2.Stream.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTPTransport.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTPTransport.beam new file mode 100644 index 0000000..6a5a25e Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.HTTPTransport.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.Headers.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Headers.beam new file mode 100644 index 0000000..10a30a9 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Headers.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.InitialHandler.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.InitialHandler.beam new file mode 100644 index 0000000..60e2213 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.InitialHandler.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.Logger.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Logger.beam new file mode 100644 index 0000000..19f41b8 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Logger.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.PhoenixAdapter.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.PhoenixAdapter.beam new file mode 100644 index 0000000..cccb6e7 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.PhoenixAdapter.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.Pipeline.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Pipeline.beam new file mode 100644 index 0000000..7fc07d4 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Pipeline.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.PrimitiveOps.WebSocket.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.PrimitiveOps.WebSocket.beam new file mode 100644 index 0000000..13c6584 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.PrimitiveOps.WebSocket.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.SocketHelpers.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.SocketHelpers.beam new file mode 100644 index 0000000..f88a956 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.SocketHelpers.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.Telemetry.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Telemetry.beam new file mode 100644 index 0000000..b60fb5f Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Telemetry.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.Trace.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Trace.beam new file mode 100644 index 0000000..7ee6f21 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.Trace.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.TransportError.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.TransportError.beam new file mode 100644 index 0000000..2e35ca8 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.TransportError.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Connection.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Connection.beam new file mode 100644 index 0000000..222e1c2 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Connection.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Binary.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Binary.beam new file mode 100644 index 0000000..9d5f1af Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Binary.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.ConnectionClose.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.ConnectionClose.beam new file mode 100644 index 0000000..63931d8 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.ConnectionClose.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Continuation.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Continuation.beam new file mode 100644 index 0000000..79756dd Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Continuation.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Ping.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Ping.beam new file mode 100644 index 0000000..01c5234 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Ping.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Pong.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Pong.beam new file mode 100644 index 0000000..88627b8 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Pong.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Binary.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Binary.beam new file mode 100644 index 0000000..90cfaf8 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Binary.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.ConnectionClose.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.ConnectionClose.beam new file mode 100644 index 0000000..5a69649 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.ConnectionClose.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Continuation.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Continuation.beam new file mode 100644 index 0000000..9ddadeb Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Continuation.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Ping.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Ping.beam new file mode 100644 index 0000000..171b16c Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Ping.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Pong.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Pong.beam new file mode 100644 index 0000000..6493fc7 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Pong.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Text.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Text.beam new file mode 100644 index 0000000..6458d15 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Text.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.beam new file mode 100644 index 0000000..1201203 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Serializable.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Text.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Text.beam new file mode 100644 index 0000000..2ee5b14 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.Text.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.beam new file mode 100644 index 0000000..e7f27ee Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Frame.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Handler.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Handler.beam new file mode 100644 index 0000000..854f830 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Handler.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Handshake.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Handshake.beam new file mode 100644 index 0000000..b1dbcda Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Handshake.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.PerMessageDeflate.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.PerMessageDeflate.beam new file mode 100644 index 0000000..c8c7976 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.PerMessageDeflate.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Socket.ThousandIsland.Socket.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Socket.ThousandIsland.Socket.beam new file mode 100644 index 0000000..4970e31 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Socket.ThousandIsland.Socket.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Socket.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Socket.beam new file mode 100644 index 0000000..671ae5b Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.Socket.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.UpgradeValidation.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.UpgradeValidation.beam new file mode 100644 index 0000000..5e8f323 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.WebSocket.UpgradeValidation.beam differ diff --git a/_build/dev/lib/bandit/ebin/Elixir.Bandit.beam b/_build/dev/lib/bandit/ebin/Elixir.Bandit.beam new file mode 100644 index 0000000..bad6c64 Binary files /dev/null and b/_build/dev/lib/bandit/ebin/Elixir.Bandit.beam differ diff --git a/_build/dev/lib/bandit/ebin/bandit.app b/_build/dev/lib/bandit/ebin/bandit.app new file mode 100644 index 0000000..d0c9417 --- /dev/null +++ b/_build/dev/lib/bandit/ebin/bandit.app @@ -0,0 +1,81 @@ +{application,bandit, + [{modules,['Elixir.Bandit','Elixir.Bandit.Adapter', + 'Elixir.Bandit.Application','Elixir.Bandit.Clock', + 'Elixir.Bandit.Compression', + 'Elixir.Bandit.DelegatingHandler', + 'Elixir.Bandit.Extractor', + 'Elixir.Bandit.HTTP1.Handler', + 'Elixir.Bandit.HTTP1.Socket', + 'Elixir.Bandit.HTTP2.Connection', + 'Elixir.Bandit.HTTP2.Errors', + 'Elixir.Bandit.HTTP2.Errors.ConnectionError', + 'Elixir.Bandit.HTTP2.Errors.StreamError', + 'Elixir.Bandit.HTTP2.FlowControl', + 'Elixir.Bandit.HTTP2.Frame', + 'Elixir.Bandit.HTTP2.Frame.Continuation', + 'Elixir.Bandit.HTTP2.Frame.Data', + 'Elixir.Bandit.HTTP2.Frame.Flags', + 'Elixir.Bandit.HTTP2.Frame.Goaway', + 'Elixir.Bandit.HTTP2.Frame.Headers', + 'Elixir.Bandit.HTTP2.Frame.Ping', + 'Elixir.Bandit.HTTP2.Frame.Priority', + 'Elixir.Bandit.HTTP2.Frame.PushPromise', + 'Elixir.Bandit.HTTP2.Frame.RstStream', + 'Elixir.Bandit.HTTP2.Frame.Serializable', + 'Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Continuation', + 'Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Data', + 'Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Goaway', + 'Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Headers', + 'Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Ping', + 'Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Priority', + 'Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.RstStream', + 'Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.Settings', + 'Elixir.Bandit.HTTP2.Frame.Serializable.Bandit.HTTP2.Frame.WindowUpdate', + 'Elixir.Bandit.HTTP2.Frame.Settings', + 'Elixir.Bandit.HTTP2.Frame.Unknown', + 'Elixir.Bandit.HTTP2.Frame.WindowUpdate', + 'Elixir.Bandit.HTTP2.Handler', + 'Elixir.Bandit.HTTP2.Settings', + 'Elixir.Bandit.HTTP2.Stream', + 'Elixir.Bandit.HTTP2.StreamCollection', + 'Elixir.Bandit.HTTP2.StreamProcess', + 'Elixir.Bandit.HTTPError', + 'Elixir.Bandit.HTTPTransport', + 'Elixir.Bandit.HTTPTransport.Bandit.HTTP1.Socket', + 'Elixir.Bandit.HTTPTransport.Bandit.HTTP2.Stream', + 'Elixir.Bandit.Headers', + 'Elixir.Bandit.InitialHandler','Elixir.Bandit.Logger', + 'Elixir.Bandit.PhoenixAdapter', + 'Elixir.Bandit.Pipeline', + 'Elixir.Bandit.PrimitiveOps.WebSocket', + 'Elixir.Bandit.SocketHelpers', + 'Elixir.Bandit.Telemetry','Elixir.Bandit.Trace', + 'Elixir.Bandit.TransportError', + 'Elixir.Bandit.WebSocket.Connection', + 'Elixir.Bandit.WebSocket.Frame', + 'Elixir.Bandit.WebSocket.Frame.Binary', + 'Elixir.Bandit.WebSocket.Frame.ConnectionClose', + 'Elixir.Bandit.WebSocket.Frame.Continuation', + 'Elixir.Bandit.WebSocket.Frame.Ping', + 'Elixir.Bandit.WebSocket.Frame.Pong', + 'Elixir.Bandit.WebSocket.Frame.Serializable', + 'Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Binary', + 'Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.ConnectionClose', + 'Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Continuation', + 'Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Ping', + 'Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Pong', + 'Elixir.Bandit.WebSocket.Frame.Serializable.Bandit.WebSocket.Frame.Text', + 'Elixir.Bandit.WebSocket.Frame.Text', + 'Elixir.Bandit.WebSocket.Handler', + 'Elixir.Bandit.WebSocket.Handshake', + 'Elixir.Bandit.WebSocket.PerMessageDeflate', + 'Elixir.Bandit.WebSocket.Socket', + 'Elixir.Bandit.WebSocket.Socket.ThousandIsland.Socket', + 'Elixir.Bandit.WebSocket.UpgradeValidation']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger,thousand_island,plug, + websock,hpax,telemetry]}, + {description,"A pure-Elixir HTTP server built for Plug & WebSock apps"}, + {registered,[]}, + {vsn,"1.11.0"}, + {mod,{'Elixir.Bandit.Application',[]}}]}. diff --git a/_build/dev/lib/centralcloud_core/.mix/compile.app_cache b/_build/dev/lib/centralcloud_core/.mix/compile.app_cache new file mode 100644 index 0000000..089cd66 Binary files /dev/null and b/_build/dev/lib/centralcloud_core/.mix/compile.app_cache differ diff --git a/_build/dev/lib/centralcloud_core/.mix/compile.elixir b/_build/dev/lib/centralcloud_core/.mix/compile.elixir new file mode 100644 index 0000000..563f253 Binary files /dev/null and b/_build/dev/lib/centralcloud_core/.mix/compile.elixir differ diff --git a/_build/dev/lib/centralcloud_core/.mix/compile.elixir_scm b/_build/dev/lib/centralcloud_core/.mix/compile.elixir_scm new file mode 100644 index 0000000..94f1ebb Binary files /dev/null and b/_build/dev/lib/centralcloud_core/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/centralcloud_core/.mix/compile.lock b/_build/dev/lib/centralcloud_core/.mix/compile.lock new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/centralcloud_core/ebin/Elixir.CentralcloudCore.DrPortal.beam b/_build/dev/lib/centralcloud_core/ebin/Elixir.CentralcloudCore.DrPortal.beam new file mode 100644 index 0000000..52e45b2 Binary files /dev/null and b/_build/dev/lib/centralcloud_core/ebin/Elixir.CentralcloudCore.DrPortal.beam differ diff --git a/_build/dev/lib/centralcloud_core/ebin/Elixir.CentralcloudCore.HostBill.beam b/_build/dev/lib/centralcloud_core/ebin/Elixir.CentralcloudCore.HostBill.beam new file mode 100644 index 0000000..fbd0f5c Binary files /dev/null and b/_build/dev/lib/centralcloud_core/ebin/Elixir.CentralcloudCore.HostBill.beam differ diff --git a/_build/dev/lib/centralcloud_core/ebin/centralcloud_core.app b/_build/dev/lib/centralcloud_core/ebin/centralcloud_core.app new file mode 100644 index 0000000..e23a3ea --- /dev/null +++ b/_build/dev/lib/centralcloud_core/ebin/centralcloud_core.app @@ -0,0 +1,8 @@ +{application,centralcloud_core, + [{modules,['Elixir.CentralcloudCore.DrPortal', + 'Elixir.CentralcloudCore.HostBill']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger,req,jason,jose]}, + {description,"centralcloud_core"}, + {registered,[]}, + {vsn,"0.1.0"}]}. diff --git a/_build/dev/lib/centralcloud_my/.mix/compile.app_cache b/_build/dev/lib/centralcloud_my/.mix/compile.app_cache new file mode 100644 index 0000000..eeb42c1 Binary files /dev/null and b/_build/dev/lib/centralcloud_my/.mix/compile.app_cache differ diff --git a/_build/dev/lib/centralcloud_my/.mix/compile.lock b/_build/dev/lib/centralcloud_my/.mix/compile.lock new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/centralcloud_my/ebin/centralcloud_my.app b/_build/dev/lib/centralcloud_my/ebin/centralcloud_my.app new file mode 100644 index 0000000..eac72e0 --- /dev/null +++ b/_build/dev/lib/centralcloud_my/ebin/centralcloud_my.app @@ -0,0 +1,12 @@ +{application,centralcloud_my, + [{modules,[]}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger,runtime_tools, + centralcloud_core,phoenix,phoenix_live_view, + phoenix_html,phoenix_live_reload,bandit,ecto_sql, + postgrex,swoosh,finch,telemetry_metrics, + telemetry_poller,jason,dns_cluster,oidcc]}, + {description,"centralcloud_my"}, + {registered,[]}, + {vsn,"0.1.0"}, + {mod,{'Elixir.CentralcloudMy.Application',[]}}]}. diff --git a/_build/dev/lib/centralcloud_ops/.mix/compile.app_cache b/_build/dev/lib/centralcloud_ops/.mix/compile.app_cache new file mode 100644 index 0000000..ff2fa5a Binary files /dev/null and b/_build/dev/lib/centralcloud_ops/.mix/compile.app_cache differ diff --git a/_build/dev/lib/centralcloud_ops/.mix/compile.lock b/_build/dev/lib/centralcloud_ops/.mix/compile.lock new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/centralcloud_ops/ebin/centralcloud_ops.app b/_build/dev/lib/centralcloud_ops/ebin/centralcloud_ops.app new file mode 100644 index 0000000..9c56c24 --- /dev/null +++ b/_build/dev/lib/centralcloud_ops/ebin/centralcloud_ops.app @@ -0,0 +1,11 @@ +{application,centralcloud_ops, + [{modules,[]}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger,runtime_tools, + centralcloud_core,phoenix,phoenix_live_view, + phoenix_html,phoenix_live_reload,bandit,jason, + oidcc]}, + {description,"centralcloud_ops"}, + {registered,[]}, + {vsn,"0.1.0"}, + {mod,{'Elixir.CentralcloudOps.Application',[]}}]}. diff --git a/_build/dev/lib/db_connection/.mix/compile.elixir b/_build/dev/lib/db_connection/.mix/compile.elixir new file mode 100644 index 0000000..2f8c80a Binary files /dev/null and b/_build/dev/lib/db_connection/.mix/compile.elixir differ diff --git a/_build/dev/lib/db_connection/.mix/compile.elixir_scm b/_build/dev/lib/db_connection/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/db_connection/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/db_connection/.mix/compile.fetch b/_build/dev/lib/db_connection/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.App.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.App.beam new file mode 100644 index 0000000..0a60c9e Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.App.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Backoff.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Backoff.beam new file mode 100644 index 0000000..a59a323 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Backoff.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Connection.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Connection.beam new file mode 100644 index 0000000..eceb4bf Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Connection.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.ConnectionError.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.ConnectionError.beam new file mode 100644 index 0000000..55af58b Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.ConnectionError.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.ConnectionPool.Pool.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.ConnectionPool.Pool.beam new file mode 100644 index 0000000..0a6a989 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.ConnectionPool.Pool.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.ConnectionPool.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.ConnectionPool.beam new file mode 100644 index 0000000..a36ef17 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.ConnectionPool.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.EncodeError.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.EncodeError.beam new file mode 100644 index 0000000..7eaa58a Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.EncodeError.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Holder.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Holder.beam new file mode 100644 index 0000000..654cf2a Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Holder.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.LogEntry.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.LogEntry.beam new file mode 100644 index 0000000..6475421 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.LogEntry.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Ownership.Manager.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Ownership.Manager.beam new file mode 100644 index 0000000..ff725d2 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Ownership.Manager.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Ownership.Proxy.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Ownership.Proxy.beam new file mode 100644 index 0000000..d255472 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Ownership.Proxy.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Ownership.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Ownership.beam new file mode 100644 index 0000000..ba3abae Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Ownership.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.OwnershipError.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.OwnershipError.beam new file mode 100644 index 0000000..742de47 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.OwnershipError.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Pool.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Pool.beam new file mode 100644 index 0000000..89db3d8 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Pool.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.PrepareStream.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.PrepareStream.beam new file mode 100644 index 0000000..a36704e Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.PrepareStream.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Query.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Query.beam new file mode 100644 index 0000000..6ad4e26 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Query.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.SensitiveData.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.SensitiveData.beam new file mode 100644 index 0000000..37766a9 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.SensitiveData.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Stream.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Stream.beam new file mode 100644 index 0000000..18f82b2 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Stream.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Task.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Task.beam new file mode 100644 index 0000000..a28b40e Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Task.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.TelemetryListener.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.TelemetryListener.beam new file mode 100644 index 0000000..b12d32d Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.TelemetryListener.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.TransactionError.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.TransactionError.beam new file mode 100644 index 0000000..e894a17 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.TransactionError.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Util.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Util.beam new file mode 100644 index 0000000..4d14b85 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Util.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Watcher.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Watcher.beam new file mode 100644 index 0000000..4ef1588 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.Watcher.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.beam b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.beam new file mode 100644 index 0000000..b8ee2e1 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.DBConnection.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.Enumerable.DBConnection.PrepareStream.beam b/_build/dev/lib/db_connection/ebin/Elixir.Enumerable.DBConnection.PrepareStream.beam new file mode 100644 index 0000000..0e228bc Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.Enumerable.DBConnection.PrepareStream.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.Enumerable.DBConnection.Stream.beam b/_build/dev/lib/db_connection/ebin/Elixir.Enumerable.DBConnection.Stream.beam new file mode 100644 index 0000000..0c57dc0 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.Enumerable.DBConnection.Stream.beam differ diff --git a/_build/dev/lib/db_connection/ebin/Elixir.Inspect.DBConnection.SensitiveData.beam b/_build/dev/lib/db_connection/ebin/Elixir.Inspect.DBConnection.SensitiveData.beam new file mode 100644 index 0000000..af8b851 Binary files /dev/null and b/_build/dev/lib/db_connection/ebin/Elixir.Inspect.DBConnection.SensitiveData.beam differ diff --git a/_build/dev/lib/db_connection/ebin/db_connection.app b/_build/dev/lib/db_connection/ebin/db_connection.app new file mode 100644 index 0000000..03d0079 --- /dev/null +++ b/_build/dev/lib/db_connection/ebin/db_connection.app @@ -0,0 +1,33 @@ +{application,db_connection, + [{modules,['Elixir.DBConnection','Elixir.DBConnection.App', + 'Elixir.DBConnection.Backoff', + 'Elixir.DBConnection.Connection', + 'Elixir.DBConnection.ConnectionError', + 'Elixir.DBConnection.ConnectionPool', + 'Elixir.DBConnection.ConnectionPool.Pool', + 'Elixir.DBConnection.EncodeError', + 'Elixir.DBConnection.Holder', + 'Elixir.DBConnection.LogEntry', + 'Elixir.DBConnection.Ownership', + 'Elixir.DBConnection.Ownership.Manager', + 'Elixir.DBConnection.Ownership.Proxy', + 'Elixir.DBConnection.OwnershipError', + 'Elixir.DBConnection.Pool', + 'Elixir.DBConnection.PrepareStream', + 'Elixir.DBConnection.Query', + 'Elixir.DBConnection.SensitiveData', + 'Elixir.DBConnection.Stream', + 'Elixir.DBConnection.Task', + 'Elixir.DBConnection.TelemetryListener', + 'Elixir.DBConnection.TransactionError', + 'Elixir.DBConnection.Util', + 'Elixir.DBConnection.Watcher', + 'Elixir.Enumerable.DBConnection.PrepareStream', + 'Elixir.Enumerable.DBConnection.Stream', + 'Elixir.Inspect.DBConnection.SensitiveData']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger,telemetry]}, + {description,"Database connection behaviour for database transactions and connection pooling\n"}, + {registered,[]}, + {vsn,"2.10.1"}, + {mod,{'Elixir.DBConnection.App',[]}}]}. diff --git a/_build/dev/lib/decimal/.mix/compile.elixir b/_build/dev/lib/decimal/.mix/compile.elixir new file mode 100644 index 0000000..24a36ef Binary files /dev/null and b/_build/dev/lib/decimal/.mix/compile.elixir differ diff --git a/_build/dev/lib/decimal/.mix/compile.elixir_scm b/_build/dev/lib/decimal/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/decimal/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/decimal/.mix/compile.fetch b/_build/dev/lib/decimal/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/decimal/ebin/Elixir.Decimal.Context.beam b/_build/dev/lib/decimal/ebin/Elixir.Decimal.Context.beam new file mode 100644 index 0000000..4374eef Binary files /dev/null and b/_build/dev/lib/decimal/ebin/Elixir.Decimal.Context.beam differ diff --git a/_build/dev/lib/decimal/ebin/Elixir.Decimal.Error.beam b/_build/dev/lib/decimal/ebin/Elixir.Decimal.Error.beam new file mode 100644 index 0000000..ebf6adc Binary files /dev/null and b/_build/dev/lib/decimal/ebin/Elixir.Decimal.Error.beam differ diff --git a/_build/dev/lib/decimal/ebin/Elixir.Decimal.Macros.beam b/_build/dev/lib/decimal/ebin/Elixir.Decimal.Macros.beam new file mode 100644 index 0000000..86bb045 Binary files /dev/null and b/_build/dev/lib/decimal/ebin/Elixir.Decimal.Macros.beam differ diff --git a/_build/dev/lib/decimal/ebin/Elixir.Decimal.beam b/_build/dev/lib/decimal/ebin/Elixir.Decimal.beam new file mode 100644 index 0000000..11bb764 Binary files /dev/null and b/_build/dev/lib/decimal/ebin/Elixir.Decimal.beam differ diff --git a/_build/dev/lib/decimal/ebin/Elixir.Inspect.Decimal.beam b/_build/dev/lib/decimal/ebin/Elixir.Inspect.Decimal.beam new file mode 100644 index 0000000..76acd69 Binary files /dev/null and b/_build/dev/lib/decimal/ebin/Elixir.Inspect.Decimal.beam differ diff --git a/_build/dev/lib/decimal/ebin/Elixir.JSON.Encoder.Decimal.beam b/_build/dev/lib/decimal/ebin/Elixir.JSON.Encoder.Decimal.beam new file mode 100644 index 0000000..a80736e Binary files /dev/null and b/_build/dev/lib/decimal/ebin/Elixir.JSON.Encoder.Decimal.beam differ diff --git a/_build/dev/lib/decimal/ebin/Elixir.String.Chars.Decimal.beam b/_build/dev/lib/decimal/ebin/Elixir.String.Chars.Decimal.beam new file mode 100644 index 0000000..2f8051b Binary files /dev/null and b/_build/dev/lib/decimal/ebin/Elixir.String.Chars.Decimal.beam differ diff --git a/_build/dev/lib/decimal/ebin/decimal.app b/_build/dev/lib/decimal/ebin/decimal.app new file mode 100644 index 0000000..ae16d94 --- /dev/null +++ b/_build/dev/lib/decimal/ebin/decimal.app @@ -0,0 +1,11 @@ +{application,decimal, + [{modules,['Elixir.Decimal','Elixir.Decimal.Context', + 'Elixir.Decimal.Error','Elixir.Decimal.Macros', + 'Elixir.Inspect.Decimal', + 'Elixir.JSON.Encoder.Decimal', + 'Elixir.String.Chars.Decimal']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir]}, + {description,"Arbitrary precision decimal arithmetic."}, + {registered,[]}, + {vsn,"3.1.0"}]}. diff --git a/_build/dev/lib/dns_cluster/.mix/compile.elixir b/_build/dev/lib/dns_cluster/.mix/compile.elixir new file mode 100644 index 0000000..204aee8 Binary files /dev/null and b/_build/dev/lib/dns_cluster/.mix/compile.elixir differ diff --git a/_build/dev/lib/dns_cluster/.mix/compile.elixir_scm b/_build/dev/lib/dns_cluster/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/dns_cluster/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/dns_cluster/.mix/compile.fetch b/_build/dev/lib/dns_cluster/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/dns_cluster/ebin/Elixir.DNSCluster.Resolver.beam b/_build/dev/lib/dns_cluster/ebin/Elixir.DNSCluster.Resolver.beam new file mode 100644 index 0000000..ca4282f Binary files /dev/null and b/_build/dev/lib/dns_cluster/ebin/Elixir.DNSCluster.Resolver.beam differ diff --git a/_build/dev/lib/dns_cluster/ebin/Elixir.DNSCluster.beam b/_build/dev/lib/dns_cluster/ebin/Elixir.DNSCluster.beam new file mode 100644 index 0000000..cf7aef1 Binary files /dev/null and b/_build/dev/lib/dns_cluster/ebin/Elixir.DNSCluster.beam differ diff --git a/_build/dev/lib/dns_cluster/ebin/dns_cluster.app b/_build/dev/lib/dns_cluster/ebin/dns_cluster.app new file mode 100644 index 0000000..46c0ebc --- /dev/null +++ b/_build/dev/lib/dns_cluster/ebin/dns_cluster.app @@ -0,0 +1,7 @@ +{application,dns_cluster, + [{modules,['Elixir.DNSCluster','Elixir.DNSCluster.Resolver']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger]}, + {description,"Simple DNS clustering for distributed Elixir nodes"}, + {registered,[]}, + {vsn,"0.1.3"}]}. diff --git a/_build/dev/lib/ecto/.mix/compile.elixir b/_build/dev/lib/ecto/.mix/compile.elixir new file mode 100644 index 0000000..3412f08 Binary files /dev/null and b/_build/dev/lib/ecto/.mix/compile.elixir differ diff --git a/_build/dev/lib/ecto/.mix/compile.elixir_scm b/_build/dev/lib/ecto/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/ecto/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/ecto/.mix/compile.fetch b/_build/dev/lib/ecto/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.Queryable.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.Queryable.beam new file mode 100644 index 0000000..c6e9ff7 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.Queryable.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.Schema.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.Schema.beam new file mode 100644 index 0000000..f0c7caa Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.Schema.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.Storage.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.Storage.beam new file mode 100644 index 0000000..5431ef4 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.Storage.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.Transaction.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.Transaction.beam new file mode 100644 index 0000000..5f121fc Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.Transaction.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.beam new file mode 100644 index 0000000..4eb16c6 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Adapter.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Application.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Application.beam new file mode 100644 index 0000000..5bc6132 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Application.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.BelongsTo.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.BelongsTo.beam new file mode 100644 index 0000000..aa404fd Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.BelongsTo.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.Has.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.Has.beam new file mode 100644 index 0000000..92c983f Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.Has.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.HasThrough.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.HasThrough.beam new file mode 100644 index 0000000..0a4f58d Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.HasThrough.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.ManyToMany.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.ManyToMany.beam new file mode 100644 index 0000000..9ce57c1 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.ManyToMany.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.NotLoaded.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.NotLoaded.beam new file mode 100644 index 0000000..7463f34 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.NotLoaded.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.beam new file mode 100644 index 0000000..2bed54d Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Association.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.CastError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.CastError.beam new file mode 100644 index 0000000..24fbe3c Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.CastError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.ChangeError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.ChangeError.beam new file mode 100644 index 0000000..f67aaec Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.ChangeError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Changeset.Relation.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Changeset.Relation.beam new file mode 100644 index 0000000..88d190d Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Changeset.Relation.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Changeset.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Changeset.beam new file mode 100644 index 0000000..8b1eb3d Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Changeset.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.ConstraintError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.ConstraintError.beam new file mode 100644 index 0000000..54f56b5 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.ConstraintError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Embedded.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Embedded.beam new file mode 100644 index 0000000..d6705fe Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Embedded.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Enum.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Enum.beam new file mode 100644 index 0000000..24b89c8 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Enum.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.InvalidChangesetError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.InvalidChangesetError.beam new file mode 100644 index 0000000..1c60986 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.InvalidChangesetError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.InvalidURLError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.InvalidURLError.beam new file mode 100644 index 0000000..9fc8dcd Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.InvalidURLError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.MigrationError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.MigrationError.beam new file mode 100644 index 0000000..6edff30 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.MigrationError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Multi.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Multi.beam new file mode 100644 index 0000000..6d9b8e5 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Multi.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.MultiplePrimaryKeyError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.MultiplePrimaryKeyError.beam new file mode 100644 index 0000000..4fbf3d6 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.MultiplePrimaryKeyError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.MultipleResultsError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.MultipleResultsError.beam new file mode 100644 index 0000000..c97aaa2 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.MultipleResultsError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.NoPrimaryKeyFieldError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.NoPrimaryKeyFieldError.beam new file mode 100644 index 0000000..e9fdadd Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.NoPrimaryKeyFieldError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.NoPrimaryKeyValueError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.NoPrimaryKeyValueError.beam new file mode 100644 index 0000000..2e2cf88 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.NoPrimaryKeyValueError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.NoResultsError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.NoResultsError.beam new file mode 100644 index 0000000..787866c Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.NoResultsError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.ParameterizedType.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.ParameterizedType.beam new file mode 100644 index 0000000..2f99f54 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.ParameterizedType.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.API.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.API.beam new file mode 100644 index 0000000..5c5bd7f Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.API.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.BooleanExpr.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.BooleanExpr.beam new file mode 100644 index 0000000..91e3089 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.BooleanExpr.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.CTE.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.CTE.beam new file mode 100644 index 0000000..aa705b8 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.CTE.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Combination.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Combination.beam new file mode 100644 index 0000000..1a3ac44 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Combination.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Distinct.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Distinct.beam new file mode 100644 index 0000000..fd48108 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Distinct.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Dynamic.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Dynamic.beam new file mode 100644 index 0000000..2e7a6c3 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Dynamic.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Filter.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Filter.beam new file mode 100644 index 0000000..6838962 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Filter.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.From.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.From.beam new file mode 100644 index 0000000..cc635b4 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.From.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.GroupBy.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.GroupBy.beam new file mode 100644 index 0000000..82c600e Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.GroupBy.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Join.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Join.beam new file mode 100644 index 0000000..033ec7e Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Join.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.LimitOffset.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.LimitOffset.beam new file mode 100644 index 0000000..12bed52 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.LimitOffset.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Lock.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Lock.beam new file mode 100644 index 0000000..8321566 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Lock.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.OrderBy.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.OrderBy.beam new file mode 100644 index 0000000..a264d12 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.OrderBy.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Preload.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Preload.beam new file mode 100644 index 0000000..7073936 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Preload.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Select.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Select.beam new file mode 100644 index 0000000..2753f21 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Select.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Update.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Update.beam new file mode 100644 index 0000000..32c0382 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Update.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Windows.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Windows.beam new file mode 100644 index 0000000..ec3a7b8 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.Windows.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.beam new file mode 100644 index 0000000..22f5696 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Builder.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.ByExpr.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.ByExpr.beam new file mode 100644 index 0000000..5027c89 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.ByExpr.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.CastError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.CastError.beam new file mode 100644 index 0000000..e13a7b2 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.CastError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.CompileError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.CompileError.beam new file mode 100644 index 0000000..cbcf1a1 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.CompileError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.DynamicExpr.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.DynamicExpr.beam new file mode 100644 index 0000000..1d8d7e3 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.DynamicExpr.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.FromExpr.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.FromExpr.beam new file mode 100644 index 0000000..1e734bc Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.FromExpr.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.JoinExpr.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.JoinExpr.beam new file mode 100644 index 0000000..b313fc9 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.JoinExpr.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.LimitExpr.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.LimitExpr.beam new file mode 100644 index 0000000..de8d8b8 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.LimitExpr.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Planner.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Planner.beam new file mode 100644 index 0000000..10b7b54 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Planner.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.QueryExpr.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.QueryExpr.beam new file mode 100644 index 0000000..99a5726 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.QueryExpr.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.SelectExpr.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.SelectExpr.beam new file mode 100644 index 0000000..8d3ec18 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.SelectExpr.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Tagged.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Tagged.beam new file mode 100644 index 0000000..fd166a5 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Tagged.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Values.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Values.beam new file mode 100644 index 0000000..c925b4e Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.Values.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.WindowAPI.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.WindowAPI.beam new file mode 100644 index 0000000..631ce86 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.WindowAPI.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.WithExpr.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.WithExpr.beam new file mode 100644 index 0000000..22dcb5b Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.WithExpr.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.beam new file mode 100644 index 0000000..ba543f2 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Query.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.QueryError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.QueryError.beam new file mode 100644 index 0000000..c5e794b Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.QueryError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.Atom.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.Atom.beam new file mode 100644 index 0000000..ae8b8ee Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.Atom.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.BitString.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.BitString.beam new file mode 100644 index 0000000..27b8cbd Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.BitString.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.Ecto.Query.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.Ecto.Query.beam new file mode 100644 index 0000000..4534c67 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.Ecto.Query.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.Ecto.SubQuery.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.Ecto.SubQuery.beam new file mode 100644 index 0000000..6099dc9 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.Ecto.SubQuery.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.Tuple.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.Tuple.beam new file mode 100644 index 0000000..1c22595 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.Tuple.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.beam new file mode 100644 index 0000000..768e429 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Queryable.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Assoc.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Assoc.beam new file mode 100644 index 0000000..5d65a66 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Assoc.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Preloader.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Preloader.beam new file mode 100644 index 0000000..138224b Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Preloader.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Queryable.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Queryable.beam new file mode 100644 index 0000000..160fada Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Queryable.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Registry.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Registry.beam new file mode 100644 index 0000000..ae7636b Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Registry.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Schema.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Schema.beam new file mode 100644 index 0000000..4708e5e Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Schema.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Supervisor.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Supervisor.beam new file mode 100644 index 0000000..0691d7f Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Supervisor.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Transaction.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Transaction.beam new file mode 100644 index 0000000..08e9e6c Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.Transaction.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.beam new file mode 100644 index 0000000..cf31561 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Repo.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Schema.Loader.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Schema.Loader.beam new file mode 100644 index 0000000..e81e508 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Schema.Loader.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Schema.Metadata.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Schema.Metadata.beam new file mode 100644 index 0000000..d176045 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Schema.Metadata.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Schema.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Schema.beam new file mode 100644 index 0000000..5307fe9 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Schema.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.StaleEntryError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.StaleEntryError.beam new file mode 100644 index 0000000..f5b2cfe Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.StaleEntryError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.SubQuery.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.SubQuery.beam new file mode 100644 index 0000000..ddb0721 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.SubQuery.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.SubQueryError.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.SubQueryError.beam new file mode 100644 index 0000000..ce79f4c Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.SubQueryError.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.Type.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Type.beam new file mode 100644 index 0000000..e83c5e0 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.Type.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.UUID.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.UUID.beam new file mode 100644 index 0000000..ac15f26 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.UUID.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Ecto.beam b/_build/dev/lib/ecto/ebin/Elixir.Ecto.beam new file mode 100644 index 0000000..2f4db83 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Ecto.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Association.NotLoaded.beam b/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Association.NotLoaded.beam new file mode 100644 index 0000000..4b83a2d Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Association.NotLoaded.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Changeset.beam b/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Changeset.beam new file mode 100644 index 0000000..75e7d98 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Changeset.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Query.DynamicExpr.beam b/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Query.DynamicExpr.beam new file mode 100644 index 0000000..f818a26 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Query.DynamicExpr.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Query.beam b/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Query.beam new file mode 100644 index 0000000..a70d46a Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Query.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Schema.Metadata.beam b/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Schema.Metadata.beam new file mode 100644 index 0000000..9bcd282 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Inspect.Ecto.Schema.Metadata.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.JSON.Encoder.Ecto.Association.NotLoaded.beam b/_build/dev/lib/ecto/ebin/Elixir.JSON.Encoder.Ecto.Association.NotLoaded.beam new file mode 100644 index 0000000..604c285 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.JSON.Encoder.Ecto.Association.NotLoaded.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.JSON.Encoder.Ecto.Schema.Metadata.beam b/_build/dev/lib/ecto/ebin/Elixir.JSON.Encoder.Ecto.Schema.Metadata.beam new file mode 100644 index 0000000..af84aab Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.JSON.Encoder.Ecto.Schema.Metadata.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Jason.Encoder.Ecto.Association.NotLoaded.beam b/_build/dev/lib/ecto/ebin/Elixir.Jason.Encoder.Ecto.Association.NotLoaded.beam new file mode 100644 index 0000000..167a250 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Jason.Encoder.Ecto.Association.NotLoaded.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Jason.Encoder.Ecto.Schema.Metadata.beam b/_build/dev/lib/ecto/ebin/Elixir.Jason.Encoder.Ecto.Schema.Metadata.beam new file mode 100644 index 0000000..bd7daa4 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Jason.Encoder.Ecto.Schema.Metadata.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Mix.Ecto.beam b/_build/dev/lib/ecto/ebin/Elixir.Mix.Ecto.beam new file mode 100644 index 0000000..ace7cfe Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Mix.Ecto.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Mix.Tasks.Ecto.Create.beam b/_build/dev/lib/ecto/ebin/Elixir.Mix.Tasks.Ecto.Create.beam new file mode 100644 index 0000000..b0fe031 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Mix.Tasks.Ecto.Create.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Mix.Tasks.Ecto.Drop.beam b/_build/dev/lib/ecto/ebin/Elixir.Mix.Tasks.Ecto.Drop.beam new file mode 100644 index 0000000..74c85fc Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Mix.Tasks.Ecto.Drop.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Mix.Tasks.Ecto.Gen.Repo.beam b/_build/dev/lib/ecto/ebin/Elixir.Mix.Tasks.Ecto.Gen.Repo.beam new file mode 100644 index 0000000..4ca094b Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Mix.Tasks.Ecto.Gen.Repo.beam differ diff --git a/_build/dev/lib/ecto/ebin/Elixir.Mix.Tasks.Ecto.beam b/_build/dev/lib/ecto/ebin/Elixir.Mix.Tasks.Ecto.beam new file mode 100644 index 0000000..72fb1b7 Binary files /dev/null and b/_build/dev/lib/ecto/ebin/Elixir.Mix.Tasks.Ecto.beam differ diff --git a/_build/dev/lib/ecto/ebin/ecto.app b/_build/dev/lib/ecto/ebin/ecto.app new file mode 100644 index 0000000..9bfc5bd --- /dev/null +++ b/_build/dev/lib/ecto/ebin/ecto.app @@ -0,0 +1,92 @@ +{application,ecto, + [{modules,['Elixir.Ecto','Elixir.Ecto.Adapter', + 'Elixir.Ecto.Adapter.Queryable', + 'Elixir.Ecto.Adapter.Schema', + 'Elixir.Ecto.Adapter.Storage', + 'Elixir.Ecto.Adapter.Transaction', + 'Elixir.Ecto.Application','Elixir.Ecto.Association', + 'Elixir.Ecto.Association.BelongsTo', + 'Elixir.Ecto.Association.Has', + 'Elixir.Ecto.Association.HasThrough', + 'Elixir.Ecto.Association.ManyToMany', + 'Elixir.Ecto.Association.NotLoaded', + 'Elixir.Ecto.CastError','Elixir.Ecto.ChangeError', + 'Elixir.Ecto.Changeset', + 'Elixir.Ecto.Changeset.Relation', + 'Elixir.Ecto.ConstraintError','Elixir.Ecto.Embedded', + 'Elixir.Ecto.Enum', + 'Elixir.Ecto.InvalidChangesetError', + 'Elixir.Ecto.InvalidURLError', + 'Elixir.Ecto.MigrationError','Elixir.Ecto.Multi', + 'Elixir.Ecto.MultiplePrimaryKeyError', + 'Elixir.Ecto.MultipleResultsError', + 'Elixir.Ecto.NoPrimaryKeyFieldError', + 'Elixir.Ecto.NoPrimaryKeyValueError', + 'Elixir.Ecto.NoResultsError', + 'Elixir.Ecto.ParameterizedType','Elixir.Ecto.Query', + 'Elixir.Ecto.Query.API', + 'Elixir.Ecto.Query.BooleanExpr', + 'Elixir.Ecto.Query.Builder', + 'Elixir.Ecto.Query.Builder.CTE', + 'Elixir.Ecto.Query.Builder.Combination', + 'Elixir.Ecto.Query.Builder.Distinct', + 'Elixir.Ecto.Query.Builder.Dynamic', + 'Elixir.Ecto.Query.Builder.Filter', + 'Elixir.Ecto.Query.Builder.From', + 'Elixir.Ecto.Query.Builder.GroupBy', + 'Elixir.Ecto.Query.Builder.Join', + 'Elixir.Ecto.Query.Builder.LimitOffset', + 'Elixir.Ecto.Query.Builder.Lock', + 'Elixir.Ecto.Query.Builder.OrderBy', + 'Elixir.Ecto.Query.Builder.Preload', + 'Elixir.Ecto.Query.Builder.Select', + 'Elixir.Ecto.Query.Builder.Update', + 'Elixir.Ecto.Query.Builder.Windows', + 'Elixir.Ecto.Query.ByExpr', + 'Elixir.Ecto.Query.CastError', + 'Elixir.Ecto.Query.CompileError', + 'Elixir.Ecto.Query.DynamicExpr', + 'Elixir.Ecto.Query.FromExpr', + 'Elixir.Ecto.Query.JoinExpr', + 'Elixir.Ecto.Query.LimitExpr', + 'Elixir.Ecto.Query.Planner', + 'Elixir.Ecto.Query.QueryExpr', + 'Elixir.Ecto.Query.SelectExpr', + 'Elixir.Ecto.Query.Tagged','Elixir.Ecto.Query.Values', + 'Elixir.Ecto.Query.WindowAPI', + 'Elixir.Ecto.Query.WithExpr','Elixir.Ecto.QueryError', + 'Elixir.Ecto.Queryable','Elixir.Ecto.Queryable.Atom', + 'Elixir.Ecto.Queryable.BitString', + 'Elixir.Ecto.Queryable.Ecto.Query', + 'Elixir.Ecto.Queryable.Ecto.SubQuery', + 'Elixir.Ecto.Queryable.Tuple','Elixir.Ecto.Repo', + 'Elixir.Ecto.Repo.Assoc','Elixir.Ecto.Repo.Preloader', + 'Elixir.Ecto.Repo.Queryable', + 'Elixir.Ecto.Repo.Registry','Elixir.Ecto.Repo.Schema', + 'Elixir.Ecto.Repo.Supervisor', + 'Elixir.Ecto.Repo.Transaction','Elixir.Ecto.Schema', + 'Elixir.Ecto.Schema.Loader', + 'Elixir.Ecto.Schema.Metadata', + 'Elixir.Ecto.StaleEntryError','Elixir.Ecto.SubQuery', + 'Elixir.Ecto.SubQueryError','Elixir.Ecto.Type', + 'Elixir.Ecto.UUID', + 'Elixir.Inspect.Ecto.Association.NotLoaded', + 'Elixir.Inspect.Ecto.Changeset', + 'Elixir.Inspect.Ecto.Query', + 'Elixir.Inspect.Ecto.Query.DynamicExpr', + 'Elixir.Inspect.Ecto.Schema.Metadata', + 'Elixir.JSON.Encoder.Ecto.Association.NotLoaded', + 'Elixir.JSON.Encoder.Ecto.Schema.Metadata', + 'Elixir.Jason.Encoder.Ecto.Association.NotLoaded', + 'Elixir.Jason.Encoder.Ecto.Schema.Metadata', + 'Elixir.Mix.Ecto','Elixir.Mix.Tasks.Ecto', + 'Elixir.Mix.Tasks.Ecto.Create', + 'Elixir.Mix.Tasks.Ecto.Drop', + 'Elixir.Mix.Tasks.Ecto.Gen.Repo']}, + {optional_applications,[jason]}, + {applications,[kernel,stdlib,elixir,logger,crypto,eex,telemetry, + decimal,jason]}, + {description,"A toolkit for data mapping and language integrated query for Elixir"}, + {registered,[]}, + {vsn,"3.13.6"}, + {mod,{'Elixir.Ecto.Application',[]}}]}. diff --git a/_build/dev/lib/ecto_sql/.mix/compile.elixir b/_build/dev/lib/ecto_sql/.mix/compile.elixir new file mode 100644 index 0000000..7d44a8d Binary files /dev/null and b/_build/dev/lib/ecto_sql/.mix/compile.elixir differ diff --git a/_build/dev/lib/ecto_sql/.mix/compile.elixir_scm b/_build/dev/lib/ecto_sql/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/ecto_sql/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/ecto_sql/.mix/compile.fetch b/_build/dev/lib/ecto_sql/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Collectable.Ecto.Adapters.SQL.Stream.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Collectable.Ecto.Adapters.SQL.Stream.beam new file mode 100644 index 0000000..b3af9a7 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Collectable.Ecto.Adapters.SQL.Stream.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapter.Migration.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapter.Migration.beam new file mode 100644 index 0000000..cd9fe0b Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapter.Migration.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapter.Structure.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapter.Structure.beam new file mode 100644 index 0000000..4f7079a Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapter.Structure.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.MyXQL.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.MyXQL.beam new file mode 100644 index 0000000..57570d2 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.MyXQL.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.Postgres.Connection.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.Postgres.Connection.beam new file mode 100644 index 0000000..cbe98b4 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.Postgres.Connection.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.Postgres.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.Postgres.beam new file mode 100644 index 0000000..8e0b340 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.Postgres.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Application.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Application.beam new file mode 100644 index 0000000..9d3adf2 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Application.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Connection.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Connection.beam new file mode 100644 index 0000000..48c63ee Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Connection.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Sandbox.Connection.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Sandbox.Connection.beam new file mode 100644 index 0000000..eab4ba1 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Sandbox.Connection.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Sandbox.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Sandbox.beam new file mode 100644 index 0000000..9d1a363 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Sandbox.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Stream.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Stream.beam new file mode 100644 index 0000000..5b303be Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.Stream.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.beam new file mode 100644 index 0000000..62e0f16 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.SQL.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.Tds.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.Tds.beam new file mode 100644 index 0000000..1d629ae Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Adapters.Tds.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Command.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Command.beam new file mode 100644 index 0000000..54b5fbe Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Command.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Constraint.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Constraint.beam new file mode 100644 index 0000000..4154bdd Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Constraint.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Index.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Index.beam new file mode 100644 index 0000000..3738f7d Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Index.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Reference.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Reference.beam new file mode 100644 index 0000000..e89f769 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Reference.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Runner.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Runner.beam new file mode 100644 index 0000000..8b3f99a Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Runner.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.SchemaMigration.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.SchemaMigration.beam new file mode 100644 index 0000000..c7afe89 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.SchemaMigration.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Table.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Table.beam new file mode 100644 index 0000000..3202989 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.Table.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.beam new file mode 100644 index 0000000..5136ef3 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migration.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migrator.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migrator.beam new file mode 100644 index 0000000..b5d5267 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Ecto.Migrator.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Enumerable.Ecto.Adapters.SQL.Stream.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Enumerable.Ecto.Adapters.SQL.Stream.beam new file mode 100644 index 0000000..ef3118a Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Enumerable.Ecto.Adapters.SQL.Stream.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.EctoSQL.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.EctoSQL.beam new file mode 100644 index 0000000..41d5f8c Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.EctoSQL.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Dump.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Dump.beam new file mode 100644 index 0000000..0ef8bb7 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Dump.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Gen.Migration.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Gen.Migration.beam new file mode 100644 index 0000000..95b3fa7 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Gen.Migration.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Load.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Load.beam new file mode 100644 index 0000000..f2d4f91 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Load.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Migrate.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Migrate.beam new file mode 100644 index 0000000..08ba9c3 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Migrate.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Migrations.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Migrations.beam new file mode 100644 index 0000000..eb2cd9d Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Migrations.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Rollback.beam b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Rollback.beam new file mode 100644 index 0000000..6472a92 Binary files /dev/null and b/_build/dev/lib/ecto_sql/ebin/Elixir.Mix.Tasks.Ecto.Rollback.beam differ diff --git a/_build/dev/lib/ecto_sql/ebin/ecto_sql.app b/_build/dev/lib/ecto_sql/ebin/ecto_sql.app new file mode 100644 index 0000000..6644cb5 --- /dev/null +++ b/_build/dev/lib/ecto_sql/ebin/ecto_sql.app @@ -0,0 +1,36 @@ +{application,ecto_sql, + [{modules,['Elixir.Collectable.Ecto.Adapters.SQL.Stream', + 'Elixir.Ecto.Adapter.Migration', + 'Elixir.Ecto.Adapter.Structure', + 'Elixir.Ecto.Adapters.MyXQL', + 'Elixir.Ecto.Adapters.Postgres', + 'Elixir.Ecto.Adapters.Postgres.Connection', + 'Elixir.Ecto.Adapters.SQL', + 'Elixir.Ecto.Adapters.SQL.Application', + 'Elixir.Ecto.Adapters.SQL.Connection', + 'Elixir.Ecto.Adapters.SQL.Sandbox', + 'Elixir.Ecto.Adapters.SQL.Sandbox.Connection', + 'Elixir.Ecto.Adapters.SQL.Stream', + 'Elixir.Ecto.Adapters.Tds','Elixir.Ecto.Migration', + 'Elixir.Ecto.Migration.Command', + 'Elixir.Ecto.Migration.Constraint', + 'Elixir.Ecto.Migration.Index', + 'Elixir.Ecto.Migration.Reference', + 'Elixir.Ecto.Migration.Runner', + 'Elixir.Ecto.Migration.SchemaMigration', + 'Elixir.Ecto.Migration.Table','Elixir.Ecto.Migrator', + 'Elixir.Enumerable.Ecto.Adapters.SQL.Stream', + 'Elixir.Mix.EctoSQL','Elixir.Mix.Tasks.Ecto.Dump', + 'Elixir.Mix.Tasks.Ecto.Gen.Migration', + 'Elixir.Mix.Tasks.Ecto.Load', + 'Elixir.Mix.Tasks.Ecto.Migrate', + 'Elixir.Mix.Tasks.Ecto.Migrations', + 'Elixir.Mix.Tasks.Ecto.Rollback']}, + {optional_applications,[postgrex,myxql,tds]}, + {applications,[kernel,stdlib,elixir,logger,eex,ecto,telemetry, + db_connection,postgrex,myxql,tds]}, + {description,"SQL-based adapters for Ecto and database migrations"}, + {registered,[]}, + {vsn,"3.13.5"}, + {env,[{postgres_map_type,<<"jsonb">>}]}, + {mod,{'Elixir.Ecto.Adapters.SQL.Application',[]}}]}. diff --git a/_build/dev/lib/file_system/.mix/compile.elixir b/_build/dev/lib/file_system/.mix/compile.elixir new file mode 100644 index 0000000..aca2fc4 Binary files /dev/null and b/_build/dev/lib/file_system/.mix/compile.elixir differ diff --git a/_build/dev/lib/file_system/.mix/compile.elixir_scm b/_build/dev/lib/file_system/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/file_system/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/file_system/.mix/compile.fetch b/_build/dev/lib/file_system/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backend.beam b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backend.beam new file mode 100644 index 0000000..2fe3fe4 Binary files /dev/null and b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backend.beam differ diff --git a/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backends.FSInotify.beam b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backends.FSInotify.beam new file mode 100644 index 0000000..c30d29e Binary files /dev/null and b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backends.FSInotify.beam differ diff --git a/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backends.FSMac.beam b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backends.FSMac.beam new file mode 100644 index 0000000..15278a0 Binary files /dev/null and b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backends.FSMac.beam differ diff --git a/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backends.FSPoll.beam b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backends.FSPoll.beam new file mode 100644 index 0000000..324d84d Binary files /dev/null and b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backends.FSPoll.beam differ diff --git a/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backends.FSWindows.beam b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backends.FSWindows.beam new file mode 100644 index 0000000..c3b498a Binary files /dev/null and b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Backends.FSWindows.beam differ diff --git a/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Worker.beam b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Worker.beam new file mode 100644 index 0000000..285eed4 Binary files /dev/null and b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.Worker.beam differ diff --git a/_build/dev/lib/file_system/ebin/Elixir.FileSystem.beam b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.beam new file mode 100644 index 0000000..890e6a2 Binary files /dev/null and b/_build/dev/lib/file_system/ebin/Elixir.FileSystem.beam differ diff --git a/_build/dev/lib/file_system/ebin/file_system.app b/_build/dev/lib/file_system/ebin/file_system.app new file mode 100644 index 0000000..b29751e --- /dev/null +++ b/_build/dev/lib/file_system/ebin/file_system.app @@ -0,0 +1,12 @@ +{application,file_system, + [{modules,['Elixir.FileSystem','Elixir.FileSystem.Backend', + 'Elixir.FileSystem.Backends.FSInotify', + 'Elixir.FileSystem.Backends.FSMac', + 'Elixir.FileSystem.Backends.FSPoll', + 'Elixir.FileSystem.Backends.FSWindows', + 'Elixir.FileSystem.Worker']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger]}, + {description,"An Elixir file system change watcher wrapper based on FS, the native file\nsystem listener.\n"}, + {registered,[]}, + {vsn,"1.1.1"}]}. diff --git a/_build/dev/lib/file_system/priv b/_build/dev/lib/file_system/priv new file mode 120000 index 0000000..f352993 --- /dev/null +++ b/_build/dev/lib/file_system/priv @@ -0,0 +1 @@ +../../../../deps/file_system/priv \ No newline at end of file diff --git a/_build/dev/lib/finch/.mix/compile.elixir b/_build/dev/lib/finch/.mix/compile.elixir new file mode 100644 index 0000000..dc666e1 Binary files /dev/null and b/_build/dev/lib/finch/.mix/compile.elixir differ diff --git a/_build/dev/lib/finch/.mix/compile.elixir_scm b/_build/dev/lib/finch/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/finch/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/finch/.mix/compile.fetch b/_build/dev/lib/finch/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.Error.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.Error.beam new file mode 100644 index 0000000..bd6019e Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.Error.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP1.Conn.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP1.Conn.beam new file mode 100644 index 0000000..627f78e Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP1.Conn.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP1.Pool.State.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP1.Pool.State.beam new file mode 100644 index 0000000..a0e5349 Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP1.Pool.State.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP1.Pool.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP1.Pool.beam new file mode 100644 index 0000000..ce2b95c Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP1.Pool.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP1.PoolMetrics.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP1.PoolMetrics.beam new file mode 100644 index 0000000..56ab155 Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP1.PoolMetrics.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP2.Pool.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP2.Pool.beam new file mode 100644 index 0000000..1bd1199 Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP2.Pool.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP2.PoolMetrics.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP2.PoolMetrics.beam new file mode 100644 index 0000000..56fd750 Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP2.PoolMetrics.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP2.RequestStream.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP2.RequestStream.beam new file mode 100644 index 0000000..e5cc3ce Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.HTTP2.RequestStream.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.Pool.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.Pool.beam new file mode 100644 index 0000000..3b86572 Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.Pool.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.PoolManager.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.PoolManager.beam new file mode 100644 index 0000000..b3c8d40 Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.PoolManager.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.Request.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.Request.beam new file mode 100644 index 0000000..b1b3d31 Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.Request.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.Response.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.Response.beam new file mode 100644 index 0000000..669f685 Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.Response.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.SSL.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.SSL.beam new file mode 100644 index 0000000..99d17b9 Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.SSL.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.Telemetry.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.Telemetry.beam new file mode 100644 index 0000000..6f72334 Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.Telemetry.beam differ diff --git a/_build/dev/lib/finch/ebin/Elixir.Finch.beam b/_build/dev/lib/finch/ebin/Elixir.Finch.beam new file mode 100644 index 0000000..ae1fd81 Binary files /dev/null and b/_build/dev/lib/finch/ebin/Elixir.Finch.beam differ diff --git a/_build/dev/lib/finch/ebin/finch.app b/_build/dev/lib/finch/ebin/finch.app new file mode 100644 index 0000000..eaab183 --- /dev/null +++ b/_build/dev/lib/finch/ebin/finch.app @@ -0,0 +1,17 @@ +{application,finch, + [{modules,['Elixir.Finch','Elixir.Finch.Error', + 'Elixir.Finch.HTTP1.Conn','Elixir.Finch.HTTP1.Pool', + 'Elixir.Finch.HTTP1.Pool.State', + 'Elixir.Finch.HTTP1.PoolMetrics', + 'Elixir.Finch.HTTP2.Pool', + 'Elixir.Finch.HTTP2.PoolMetrics', + 'Elixir.Finch.HTTP2.RequestStream', + 'Elixir.Finch.Pool','Elixir.Finch.PoolManager', + 'Elixir.Finch.Request','Elixir.Finch.Response', + 'Elixir.Finch.SSL','Elixir.Finch.Telemetry']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger,mint,nimble_pool, + nimble_options,telemetry,mime]}, + {description,"An HTTP client focused on performance."}, + {registered,[]}, + {vsn,"0.21.0"}]}. diff --git a/_build/dev/lib/hpax/.mix/compile.elixir b/_build/dev/lib/hpax/.mix/compile.elixir new file mode 100644 index 0000000..72a5f51 Binary files /dev/null and b/_build/dev/lib/hpax/.mix/compile.elixir differ diff --git a/_build/dev/lib/hpax/.mix/compile.elixir_scm b/_build/dev/lib/hpax/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/hpax/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/hpax/.mix/compile.fetch b/_build/dev/lib/hpax/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/hpax/ebin/Elixir.HPAX.Huffman.beam b/_build/dev/lib/hpax/ebin/Elixir.HPAX.Huffman.beam new file mode 100644 index 0000000..c8aff58 Binary files /dev/null and b/_build/dev/lib/hpax/ebin/Elixir.HPAX.Huffman.beam differ diff --git a/_build/dev/lib/hpax/ebin/Elixir.HPAX.Table.beam b/_build/dev/lib/hpax/ebin/Elixir.HPAX.Table.beam new file mode 100644 index 0000000..9a59ef7 Binary files /dev/null and b/_build/dev/lib/hpax/ebin/Elixir.HPAX.Table.beam differ diff --git a/_build/dev/lib/hpax/ebin/Elixir.HPAX.Types.beam b/_build/dev/lib/hpax/ebin/Elixir.HPAX.Types.beam new file mode 100644 index 0000000..2025bb7 Binary files /dev/null and b/_build/dev/lib/hpax/ebin/Elixir.HPAX.Types.beam differ diff --git a/_build/dev/lib/hpax/ebin/Elixir.HPAX.beam b/_build/dev/lib/hpax/ebin/Elixir.HPAX.beam new file mode 100644 index 0000000..b000351 Binary files /dev/null and b/_build/dev/lib/hpax/ebin/Elixir.HPAX.beam differ diff --git a/_build/dev/lib/hpax/ebin/hpax.app b/_build/dev/lib/hpax/ebin/hpax.app new file mode 100644 index 0000000..2865e61 --- /dev/null +++ b/_build/dev/lib/hpax/ebin/hpax.app @@ -0,0 +1,8 @@ +{application,hpax, + [{modules,['Elixir.HPAX','Elixir.HPAX.Huffman', + 'Elixir.HPAX.Table','Elixir.HPAX.Types']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir]}, + {description,"Implementation of the HPACK protocol (RFC 7541) for Elixir"}, + {registered,[]}, + {vsn,"1.0.3"}]}. diff --git a/_build/dev/lib/idna/.mix/compile.fetch b/_build/dev/lib/idna/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/idna/ebin/idna.app b/_build/dev/lib/idna/ebin/idna.app new file mode 100644 index 0000000..3eb4464 --- /dev/null +++ b/_build/dev/lib/idna/ebin/idna.app @@ -0,0 +1,9 @@ +{application,idna, + [{description,"A pure Erlang IDNA implementation"}, + {vsn,"6.1.1"}, + {modules,[idna,idna_bidi,idna_context,idna_data,idna_mapping, + idna_table,idna_ucs,punycode]}, + {registered,[]}, + {applications,[kernel,stdlib,unicode_util_compat]}, + {licenses,["MIT"]}, + {links,[{"Github","https://github.com/benoitc/erlang-idna"}]}]}. diff --git a/_build/dev/lib/idna/ebin/idna.beam b/_build/dev/lib/idna/ebin/idna.beam new file mode 100644 index 0000000..a095647 Binary files /dev/null and b/_build/dev/lib/idna/ebin/idna.beam differ diff --git a/_build/dev/lib/idna/ebin/idna_bidi.beam b/_build/dev/lib/idna/ebin/idna_bidi.beam new file mode 100644 index 0000000..a7b0443 Binary files /dev/null and b/_build/dev/lib/idna/ebin/idna_bidi.beam differ diff --git a/_build/dev/lib/idna/ebin/idna_context.beam b/_build/dev/lib/idna/ebin/idna_context.beam new file mode 100644 index 0000000..bf9e104 Binary files /dev/null and b/_build/dev/lib/idna/ebin/idna_context.beam differ diff --git a/_build/dev/lib/idna/ebin/idna_data.beam b/_build/dev/lib/idna/ebin/idna_data.beam new file mode 100644 index 0000000..79916c5 Binary files /dev/null and b/_build/dev/lib/idna/ebin/idna_data.beam differ diff --git a/_build/dev/lib/idna/ebin/idna_mapping.beam b/_build/dev/lib/idna/ebin/idna_mapping.beam new file mode 100644 index 0000000..98be20a Binary files /dev/null and b/_build/dev/lib/idna/ebin/idna_mapping.beam differ diff --git a/_build/dev/lib/idna/ebin/idna_table.beam b/_build/dev/lib/idna/ebin/idna_table.beam new file mode 100644 index 0000000..0505511 Binary files /dev/null and b/_build/dev/lib/idna/ebin/idna_table.beam differ diff --git a/_build/dev/lib/idna/ebin/idna_ucs.beam b/_build/dev/lib/idna/ebin/idna_ucs.beam new file mode 100644 index 0000000..7f4f86f Binary files /dev/null and b/_build/dev/lib/idna/ebin/idna_ucs.beam differ diff --git a/_build/dev/lib/idna/ebin/punycode.beam b/_build/dev/lib/idna/ebin/punycode.beam new file mode 100644 index 0000000..6e562d1 Binary files /dev/null and b/_build/dev/lib/idna/ebin/punycode.beam differ diff --git a/_build/dev/lib/idna/mix.rebar.config b/_build/dev/lib/idna/mix.rebar.config new file mode 100644 index 0000000..925f046 --- /dev/null +++ b/_build/dev/lib/idna/mix.rebar.config @@ -0,0 +1,3 @@ +{erl_opts,[]}. +{deps,[{unicode_util_compat,"~>0.7.0"}]}. +{overrides,[]}. diff --git a/_build/dev/lib/idna/src b/_build/dev/lib/idna/src new file mode 120000 index 0000000..4cb49e8 --- /dev/null +++ b/_build/dev/lib/idna/src @@ -0,0 +1 @@ +../../../../deps/idna/src \ No newline at end of file diff --git a/_build/dev/lib/jason/.mix/compile.elixir b/_build/dev/lib/jason/.mix/compile.elixir new file mode 100644 index 0000000..c2f4acc Binary files /dev/null and b/_build/dev/lib/jason/.mix/compile.elixir differ diff --git a/_build/dev/lib/jason/.mix/compile.elixir_scm b/_build/dev/lib/jason/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/jason/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/jason/.mix/compile.fetch b/_build/dev/lib/jason/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/jason/ebin/Elixir.Enumerable.Jason.OrderedObject.beam b/_build/dev/lib/jason/ebin/Elixir.Enumerable.Jason.OrderedObject.beam new file mode 100644 index 0000000..043c600 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Enumerable.Jason.OrderedObject.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Codegen.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Codegen.beam new file mode 100644 index 0000000..ac76bfd Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Codegen.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.DecodeError.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.DecodeError.beam new file mode 100644 index 0000000..076ab56 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.DecodeError.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Decoder.Unescape.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Decoder.Unescape.beam new file mode 100644 index 0000000..461c77d Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Decoder.Unescape.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Decoder.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Decoder.beam new file mode 100644 index 0000000..c9872e1 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Decoder.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encode.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encode.beam new file mode 100644 index 0000000..2151efc Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encode.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.EncodeError.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.EncodeError.beam new file mode 100644 index 0000000..a4683cd Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.EncodeError.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Any.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Any.beam new file mode 100644 index 0000000..8cc8a63 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Any.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Atom.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Atom.beam new file mode 100644 index 0000000..3533ad9 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Atom.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.BitString.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.BitString.beam new file mode 100644 index 0000000..eb87479 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.BitString.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Date.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Date.beam new file mode 100644 index 0000000..efee646 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Date.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.DateTime.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.DateTime.beam new file mode 100644 index 0000000..b440cef Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.DateTime.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Decimal.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Decimal.beam new file mode 100644 index 0000000..2fdc9fb Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Decimal.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Float.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Float.beam new file mode 100644 index 0000000..60c8b65 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Float.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Integer.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Integer.beam new file mode 100644 index 0000000..c9a2e8c Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Integer.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Jason.Fragment.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Jason.Fragment.beam new file mode 100644 index 0000000..f927119 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Jason.Fragment.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Jason.OrderedObject.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Jason.OrderedObject.beam new file mode 100644 index 0000000..f552826 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Jason.OrderedObject.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.List.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.List.beam new file mode 100644 index 0000000..02f8e2a Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.List.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Map.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Map.beam new file mode 100644 index 0000000..706272f Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Map.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.NaiveDateTime.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.NaiveDateTime.beam new file mode 100644 index 0000000..063a190 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.NaiveDateTime.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Time.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Time.beam new file mode 100644 index 0000000..d6518dc Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.Time.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.beam new file mode 100644 index 0000000..feb95e4 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Encoder.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Formatter.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Formatter.beam new file mode 100644 index 0000000..ab5b3e0 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Formatter.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Fragment.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Fragment.beam new file mode 100644 index 0000000..d0a9b87 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Fragment.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Helpers.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Helpers.beam new file mode 100644 index 0000000..aad9fe7 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Helpers.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.OrderedObject.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.OrderedObject.beam new file mode 100644 index 0000000..3ccae6a Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.OrderedObject.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.Sigil.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.Sigil.beam new file mode 100644 index 0000000..16f2db3 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.Sigil.beam differ diff --git a/_build/dev/lib/jason/ebin/Elixir.Jason.beam b/_build/dev/lib/jason/ebin/Elixir.Jason.beam new file mode 100644 index 0000000..f345db0 Binary files /dev/null and b/_build/dev/lib/jason/ebin/Elixir.Jason.beam differ diff --git a/_build/dev/lib/jason/ebin/jason.app b/_build/dev/lib/jason/ebin/jason.app new file mode 100644 index 0000000..10ad32c --- /dev/null +++ b/_build/dev/lib/jason/ebin/jason.app @@ -0,0 +1,27 @@ +{application,jason, + [{modules,['Elixir.Enumerable.Jason.OrderedObject', + 'Elixir.Jason','Elixir.Jason.Codegen', + 'Elixir.Jason.DecodeError','Elixir.Jason.Decoder', + 'Elixir.Jason.Decoder.Unescape','Elixir.Jason.Encode', + 'Elixir.Jason.EncodeError','Elixir.Jason.Encoder', + 'Elixir.Jason.Encoder.Any', + 'Elixir.Jason.Encoder.Atom', + 'Elixir.Jason.Encoder.BitString', + 'Elixir.Jason.Encoder.Date', + 'Elixir.Jason.Encoder.DateTime', + 'Elixir.Jason.Encoder.Decimal', + 'Elixir.Jason.Encoder.Float', + 'Elixir.Jason.Encoder.Integer', + 'Elixir.Jason.Encoder.Jason.Fragment', + 'Elixir.Jason.Encoder.Jason.OrderedObject', + 'Elixir.Jason.Encoder.List', + 'Elixir.Jason.Encoder.Map', + 'Elixir.Jason.Encoder.NaiveDateTime', + 'Elixir.Jason.Encoder.Time','Elixir.Jason.Formatter', + 'Elixir.Jason.Fragment','Elixir.Jason.Helpers', + 'Elixir.Jason.OrderedObject','Elixir.Jason.Sigil']}, + {optional_applications,[decimal]}, + {applications,[kernel,stdlib,elixir,decimal]}, + {description,"A blazing fast JSON parser and generator in pure Elixir.\n"}, + {registered,[]}, + {vsn,"1.4.5"}]}. diff --git a/_build/dev/lib/jose/.mix/compile.elixir b/_build/dev/lib/jose/.mix/compile.elixir new file mode 100644 index 0000000..70d43ee Binary files /dev/null and b/_build/dev/lib/jose/.mix/compile.elixir differ diff --git a/_build/dev/lib/jose/.mix/compile.elixir_scm b/_build/dev/lib/jose/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/jose/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/jose/.mix/compile.erlang b/_build/dev/lib/jose/.mix/compile.erlang new file mode 100644 index 0000000..0c3e746 Binary files /dev/null and b/_build/dev/lib/jose/.mix/compile.erlang differ diff --git a/_build/dev/lib/jose/.mix/compile.fetch b/_build/dev/lib/jose/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/jose/ebin/Elixir.Inspect.JOSE.JWK.beam b/_build/dev/lib/jose/ebin/Elixir.Inspect.JOSE.JWK.beam new file mode 100644 index 0000000..7c7f4cc Binary files /dev/null and b/_build/dev/lib/jose/ebin/Elixir.Inspect.JOSE.JWK.beam differ diff --git a/_build/dev/lib/jose/ebin/Elixir.JOSE.JWA.beam b/_build/dev/lib/jose/ebin/Elixir.JOSE.JWA.beam new file mode 100644 index 0000000..ad81e75 Binary files /dev/null and b/_build/dev/lib/jose/ebin/Elixir.JOSE.JWA.beam differ diff --git a/_build/dev/lib/jose/ebin/Elixir.JOSE.JWE.beam b/_build/dev/lib/jose/ebin/Elixir.JOSE.JWE.beam new file mode 100644 index 0000000..7b3238e Binary files /dev/null and b/_build/dev/lib/jose/ebin/Elixir.JOSE.JWE.beam differ diff --git a/_build/dev/lib/jose/ebin/Elixir.JOSE.JWK.beam b/_build/dev/lib/jose/ebin/Elixir.JOSE.JWK.beam new file mode 100644 index 0000000..1780cdd Binary files /dev/null and b/_build/dev/lib/jose/ebin/Elixir.JOSE.JWK.beam differ diff --git a/_build/dev/lib/jose/ebin/Elixir.JOSE.JWS.beam b/_build/dev/lib/jose/ebin/Elixir.JOSE.JWS.beam new file mode 100644 index 0000000..c2f4cc4 Binary files /dev/null and b/_build/dev/lib/jose/ebin/Elixir.JOSE.JWS.beam differ diff --git a/_build/dev/lib/jose/ebin/Elixir.JOSE.JWT.beam b/_build/dev/lib/jose/ebin/Elixir.JOSE.JWT.beam new file mode 100644 index 0000000..772c652 Binary files /dev/null and b/_build/dev/lib/jose/ebin/Elixir.JOSE.JWT.beam differ diff --git a/_build/dev/lib/jose/ebin/Elixir.JOSE.beam b/_build/dev/lib/jose/ebin/Elixir.JOSE.beam new file mode 100644 index 0000000..d8b2a65 Binary files /dev/null and b/_build/dev/lib/jose/ebin/Elixir.JOSE.beam differ diff --git a/_build/dev/lib/jose/ebin/jose.app b/_build/dev/lib/jose/ebin/jose.app new file mode 100644 index 0000000..d77244c --- /dev/null +++ b/_build/dev/lib/jose/ebin/jose.app @@ -0,0 +1,58 @@ +{application,jose, + [{modules,['Elixir.Inspect.JOSE.JWK','Elixir.JOSE', + 'Elixir.JOSE.JWA','Elixir.JOSE.JWE','Elixir.JOSE.JWK', + 'Elixir.JOSE.JWS','Elixir.JOSE.JWT',jose,jose_app, + jose_base64,jose_base64url,jose_block_encryptor, + jose_chacha20_poly1305,jose_chacha20_poly1305_crypto, + jose_chacha20_poly1305_libsodium, + jose_chacha20_poly1305_unsupported,jose_crypto_compat, + jose_curve25519,jose_curve25519_crypto, + jose_curve25519_fallback,jose_curve25519_libdecaf, + jose_curve25519_libsodium,jose_curve25519_unsupported, + jose_curve448,jose_curve448_crypto, + jose_curve448_fallback,jose_curve448_libdecaf, + jose_curve448_unsupported,jose_json,jose_json_jason, + jose_json_jiffy,jose_json_jsone,jose_json_jsx, + jose_json_ojson,jose_json_otp,jose_json_poison, + jose_json_poison_compat_encoder, + jose_json_poison_lexical_encoder,jose_json_thoas, + jose_json_unsupported,jose_jwa,jose_jwa_aes, + jose_jwa_aes_kw,jose_jwa_base64url,jose_jwa_bench, + jose_jwa_chacha20,jose_jwa_chacha20_poly1305, + jose_jwa_concat_kdf,jose_jwa_curve25519, + jose_jwa_curve448,jose_jwa_ed25519,jose_jwa_ed448, + jose_jwa_hchacha20,jose_jwa_math,jose_jwa_pkcs1, + jose_jwa_pkcs5,jose_jwa_pkcs7,jose_jwa_poly1305, + jose_jwa_sha3,jose_jwa_unsupported,jose_jwa_x25519, + jose_jwa_x448,jose_jwa_xchacha20, + jose_jwa_xchacha20_poly1305,jose_jwe,jose_jwe_alg, + jose_jwe_alg_aes_kw,jose_jwe_alg_c20p_kw, + jose_jwe_alg_dir,jose_jwe_alg_ecdh_1pu, + jose_jwe_alg_ecdh_es,jose_jwe_alg_ecdh_ss, + jose_jwe_alg_pbes2,jose_jwe_alg_rsa, + jose_jwe_alg_xc20p_kw,jose_jwe_enc,jose_jwe_enc_aes, + jose_jwe_enc_c20p,jose_jwe_enc_xc20p,jose_jwe_zip, + jose_jwk,jose_jwk_der,jose_jwk_kty,jose_jwk_kty_ec, + jose_jwk_kty_oct,jose_jwk_kty_okp_ed25519, + jose_jwk_kty_okp_ed25519ph,jose_jwk_kty_okp_ed448, + jose_jwk_kty_okp_ed448ph,jose_jwk_kty_okp_x25519, + jose_jwk_kty_okp_x448,jose_jwk_kty_rsa,jose_jwk_oct, + jose_jwk_openssh_key,jose_jwk_pem,jose_jwk_set, + jose_jwk_use_enc,jose_jwk_use_sig,jose_jws, + jose_jws_alg,jose_jws_alg_ecdsa,jose_jws_alg_eddsa, + jose_jws_alg_hmac,jose_jws_alg_none, + jose_jws_alg_poly1305,jose_jws_alg_rsa_pkcs1_v1_5, + jose_jws_alg_rsa_pss,jose_jwt,jose_public_key, + jose_server,jose_sha3,jose_sha3_keccakf1600_driver, + jose_sha3_keccakf1600_nif,jose_sha3_libdecaf, + jose_sha3_unsupported,jose_sup, + jose_xchacha20_poly1305, + jose_xchacha20_poly1305_crypto, + jose_xchacha20_poly1305_libsodium, + jose_xchacha20_poly1305_unsupported]}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,crypto,asn1,public_key]}, + {description,"JSON Object Signing and Encryption (JOSE) for Erlang and Elixir."}, + {registered,[]}, + {vsn,"1.11.12"}, + {mod,{jose_app,[]}}]}. diff --git a/_build/dev/lib/jose/ebin/jose.beam b/_build/dev/lib/jose/ebin/jose.beam new file mode 100644 index 0000000..adac37a Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_app.beam b/_build/dev/lib/jose/ebin/jose_app.beam new file mode 100644 index 0000000..430dd94 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_app.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_base64.beam b/_build/dev/lib/jose/ebin/jose_base64.beam new file mode 100644 index 0000000..6f79f6d Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_base64.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_base64url.beam b/_build/dev/lib/jose/ebin/jose_base64url.beam new file mode 100644 index 0000000..46bd0f0 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_base64url.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_block_encryptor.beam b/_build/dev/lib/jose/ebin/jose_block_encryptor.beam new file mode 100644 index 0000000..fc6936f Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_block_encryptor.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_chacha20_poly1305.beam b/_build/dev/lib/jose/ebin/jose_chacha20_poly1305.beam new file mode 100644 index 0000000..f839c72 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_chacha20_poly1305.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_chacha20_poly1305_crypto.beam b/_build/dev/lib/jose/ebin/jose_chacha20_poly1305_crypto.beam new file mode 100644 index 0000000..0f1d720 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_chacha20_poly1305_crypto.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_chacha20_poly1305_libsodium.beam b/_build/dev/lib/jose/ebin/jose_chacha20_poly1305_libsodium.beam new file mode 100644 index 0000000..9fc17f2 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_chacha20_poly1305_libsodium.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_chacha20_poly1305_unsupported.beam b/_build/dev/lib/jose/ebin/jose_chacha20_poly1305_unsupported.beam new file mode 100644 index 0000000..b653c29 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_chacha20_poly1305_unsupported.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_crypto_compat.beam b/_build/dev/lib/jose/ebin/jose_crypto_compat.beam new file mode 100644 index 0000000..7cc1bfe Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_crypto_compat.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_curve25519.beam b/_build/dev/lib/jose/ebin/jose_curve25519.beam new file mode 100644 index 0000000..f68d3fc Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_curve25519.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_curve25519_crypto.beam b/_build/dev/lib/jose/ebin/jose_curve25519_crypto.beam new file mode 100644 index 0000000..507d3ab Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_curve25519_crypto.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_curve25519_fallback.beam b/_build/dev/lib/jose/ebin/jose_curve25519_fallback.beam new file mode 100644 index 0000000..01d008c Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_curve25519_fallback.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_curve25519_libdecaf.beam b/_build/dev/lib/jose/ebin/jose_curve25519_libdecaf.beam new file mode 100644 index 0000000..3431872 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_curve25519_libdecaf.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_curve25519_libsodium.beam b/_build/dev/lib/jose/ebin/jose_curve25519_libsodium.beam new file mode 100644 index 0000000..dd0e2de Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_curve25519_libsodium.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_curve25519_unsupported.beam b/_build/dev/lib/jose/ebin/jose_curve25519_unsupported.beam new file mode 100644 index 0000000..1540e37 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_curve25519_unsupported.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_curve448.beam b/_build/dev/lib/jose/ebin/jose_curve448.beam new file mode 100644 index 0000000..bea202a Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_curve448.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_curve448_crypto.beam b/_build/dev/lib/jose/ebin/jose_curve448_crypto.beam new file mode 100644 index 0000000..c851086 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_curve448_crypto.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_curve448_fallback.beam b/_build/dev/lib/jose/ebin/jose_curve448_fallback.beam new file mode 100644 index 0000000..7f142c8 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_curve448_fallback.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_curve448_libdecaf.beam b/_build/dev/lib/jose/ebin/jose_curve448_libdecaf.beam new file mode 100644 index 0000000..576b7a3 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_curve448_libdecaf.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_curve448_unsupported.beam b/_build/dev/lib/jose/ebin/jose_curve448_unsupported.beam new file mode 100644 index 0000000..5f9f1b8 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_curve448_unsupported.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_json.beam b/_build/dev/lib/jose/ebin/jose_json.beam new file mode 100644 index 0000000..abb6c70 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_json.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_json_jason.beam b/_build/dev/lib/jose/ebin/jose_json_jason.beam new file mode 100644 index 0000000..63ce69b Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_json_jason.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_json_jiffy.beam b/_build/dev/lib/jose/ebin/jose_json_jiffy.beam new file mode 100644 index 0000000..b294d60 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_json_jiffy.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_json_jsone.beam b/_build/dev/lib/jose/ebin/jose_json_jsone.beam new file mode 100644 index 0000000..bec5a54 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_json_jsone.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_json_jsx.beam b/_build/dev/lib/jose/ebin/jose_json_jsx.beam new file mode 100644 index 0000000..3bf545a Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_json_jsx.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_json_ojson.beam b/_build/dev/lib/jose/ebin/jose_json_ojson.beam new file mode 100644 index 0000000..4f6dda1 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_json_ojson.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_json_otp.beam b/_build/dev/lib/jose/ebin/jose_json_otp.beam new file mode 100644 index 0000000..d9016e5 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_json_otp.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_json_poison.beam b/_build/dev/lib/jose/ebin/jose_json_poison.beam new file mode 100644 index 0000000..be6182d Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_json_poison.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_json_poison_compat_encoder.beam b/_build/dev/lib/jose/ebin/jose_json_poison_compat_encoder.beam new file mode 100644 index 0000000..1c33ed3 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_json_poison_compat_encoder.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_json_poison_lexical_encoder.beam b/_build/dev/lib/jose/ebin/jose_json_poison_lexical_encoder.beam new file mode 100644 index 0000000..7622711 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_json_poison_lexical_encoder.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_json_thoas.beam b/_build/dev/lib/jose/ebin/jose_json_thoas.beam new file mode 100644 index 0000000..c047a4a Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_json_thoas.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_json_unsupported.beam b/_build/dev/lib/jose/ebin/jose_json_unsupported.beam new file mode 100644 index 0000000..aafeba0 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_json_unsupported.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa.beam b/_build/dev/lib/jose/ebin/jose_jwa.beam new file mode 100644 index 0000000..68b2883 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_aes.beam b/_build/dev/lib/jose/ebin/jose_jwa_aes.beam new file mode 100644 index 0000000..e6c8962 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_aes.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_aes_kw.beam b/_build/dev/lib/jose/ebin/jose_jwa_aes_kw.beam new file mode 100644 index 0000000..bfe9475 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_aes_kw.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_base64url.beam b/_build/dev/lib/jose/ebin/jose_jwa_base64url.beam new file mode 100644 index 0000000..26c9141 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_base64url.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_bench.beam b/_build/dev/lib/jose/ebin/jose_jwa_bench.beam new file mode 100644 index 0000000..53ec339 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_bench.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_chacha20.beam b/_build/dev/lib/jose/ebin/jose_jwa_chacha20.beam new file mode 100644 index 0000000..cac5417 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_chacha20.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_chacha20_poly1305.beam b/_build/dev/lib/jose/ebin/jose_jwa_chacha20_poly1305.beam new file mode 100644 index 0000000..75cd10b Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_chacha20_poly1305.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_concat_kdf.beam b/_build/dev/lib/jose/ebin/jose_jwa_concat_kdf.beam new file mode 100644 index 0000000..a95ce59 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_concat_kdf.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_curve25519.beam b/_build/dev/lib/jose/ebin/jose_jwa_curve25519.beam new file mode 100644 index 0000000..7812ae0 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_curve25519.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_curve448.beam b/_build/dev/lib/jose/ebin/jose_jwa_curve448.beam new file mode 100644 index 0000000..2b9e7cd Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_curve448.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_ed25519.beam b/_build/dev/lib/jose/ebin/jose_jwa_ed25519.beam new file mode 100644 index 0000000..655cb08 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_ed25519.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_ed448.beam b/_build/dev/lib/jose/ebin/jose_jwa_ed448.beam new file mode 100644 index 0000000..24c67af Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_ed448.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_hchacha20.beam b/_build/dev/lib/jose/ebin/jose_jwa_hchacha20.beam new file mode 100644 index 0000000..8b7d51d Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_hchacha20.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_math.beam b/_build/dev/lib/jose/ebin/jose_jwa_math.beam new file mode 100644 index 0000000..25b3c08 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_math.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_pkcs1.beam b/_build/dev/lib/jose/ebin/jose_jwa_pkcs1.beam new file mode 100644 index 0000000..10d9bae Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_pkcs1.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_pkcs5.beam b/_build/dev/lib/jose/ebin/jose_jwa_pkcs5.beam new file mode 100644 index 0000000..abcf670 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_pkcs5.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_pkcs7.beam b/_build/dev/lib/jose/ebin/jose_jwa_pkcs7.beam new file mode 100644 index 0000000..14a4845 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_pkcs7.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_poly1305.beam b/_build/dev/lib/jose/ebin/jose_jwa_poly1305.beam new file mode 100644 index 0000000..115afa5 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_poly1305.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_sha3.beam b/_build/dev/lib/jose/ebin/jose_jwa_sha3.beam new file mode 100644 index 0000000..ce5f95a Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_sha3.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_unsupported.beam b/_build/dev/lib/jose/ebin/jose_jwa_unsupported.beam new file mode 100644 index 0000000..c3be306 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_unsupported.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_x25519.beam b/_build/dev/lib/jose/ebin/jose_jwa_x25519.beam new file mode 100644 index 0000000..1b1de23 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_x25519.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_x448.beam b/_build/dev/lib/jose/ebin/jose_jwa_x448.beam new file mode 100644 index 0000000..7f9a7d0 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_x448.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_xchacha20.beam b/_build/dev/lib/jose/ebin/jose_jwa_xchacha20.beam new file mode 100644 index 0000000..dd89339 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_xchacha20.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwa_xchacha20_poly1305.beam b/_build/dev/lib/jose/ebin/jose_jwa_xchacha20_poly1305.beam new file mode 100644 index 0000000..80cca9a Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwa_xchacha20_poly1305.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe.beam b/_build/dev/lib/jose/ebin/jose_jwe.beam new file mode 100644 index 0000000..6290318 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_alg.beam b/_build/dev/lib/jose/ebin/jose_jwe_alg.beam new file mode 100644 index 0000000..1abb379 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_alg.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_alg_aes_kw.beam b/_build/dev/lib/jose/ebin/jose_jwe_alg_aes_kw.beam new file mode 100644 index 0000000..be194b4 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_alg_aes_kw.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_alg_c20p_kw.beam b/_build/dev/lib/jose/ebin/jose_jwe_alg_c20p_kw.beam new file mode 100644 index 0000000..b6580f4 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_alg_c20p_kw.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_alg_dir.beam b/_build/dev/lib/jose/ebin/jose_jwe_alg_dir.beam new file mode 100644 index 0000000..e673cce Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_alg_dir.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_alg_ecdh_1pu.beam b/_build/dev/lib/jose/ebin/jose_jwe_alg_ecdh_1pu.beam new file mode 100644 index 0000000..4154cd9 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_alg_ecdh_1pu.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_alg_ecdh_es.beam b/_build/dev/lib/jose/ebin/jose_jwe_alg_ecdh_es.beam new file mode 100644 index 0000000..b993155 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_alg_ecdh_es.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_alg_ecdh_ss.beam b/_build/dev/lib/jose/ebin/jose_jwe_alg_ecdh_ss.beam new file mode 100644 index 0000000..ea8e3c2 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_alg_ecdh_ss.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_alg_pbes2.beam b/_build/dev/lib/jose/ebin/jose_jwe_alg_pbes2.beam new file mode 100644 index 0000000..19dce1a Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_alg_pbes2.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_alg_rsa.beam b/_build/dev/lib/jose/ebin/jose_jwe_alg_rsa.beam new file mode 100644 index 0000000..1a16562 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_alg_rsa.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_alg_xc20p_kw.beam b/_build/dev/lib/jose/ebin/jose_jwe_alg_xc20p_kw.beam new file mode 100644 index 0000000..7f8c66f Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_alg_xc20p_kw.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_enc.beam b/_build/dev/lib/jose/ebin/jose_jwe_enc.beam new file mode 100644 index 0000000..c4e03bd Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_enc.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_enc_aes.beam b/_build/dev/lib/jose/ebin/jose_jwe_enc_aes.beam new file mode 100644 index 0000000..e96670e Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_enc_aes.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_enc_c20p.beam b/_build/dev/lib/jose/ebin/jose_jwe_enc_c20p.beam new file mode 100644 index 0000000..f017587 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_enc_c20p.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_enc_xc20p.beam b/_build/dev/lib/jose/ebin/jose_jwe_enc_xc20p.beam new file mode 100644 index 0000000..fefb27f Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_enc_xc20p.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwe_zip.beam b/_build/dev/lib/jose/ebin/jose_jwe_zip.beam new file mode 100644 index 0000000..af26ff3 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwe_zip.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk.beam b/_build/dev/lib/jose/ebin/jose_jwk.beam new file mode 100644 index 0000000..2a3c23e Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_der.beam b/_build/dev/lib/jose/ebin/jose_jwk_der.beam new file mode 100644 index 0000000..7050a53 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_der.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_kty.beam b/_build/dev/lib/jose/ebin/jose_jwk_kty.beam new file mode 100644 index 0000000..f497553 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_kty.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_kty_ec.beam b/_build/dev/lib/jose/ebin/jose_jwk_kty_ec.beam new file mode 100644 index 0000000..20dc22f Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_kty_ec.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_kty_oct.beam b/_build/dev/lib/jose/ebin/jose_jwk_kty_oct.beam new file mode 100644 index 0000000..7797320 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_kty_oct.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_ed25519.beam b/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_ed25519.beam new file mode 100644 index 0000000..3f8b476 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_ed25519.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_ed25519ph.beam b/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_ed25519ph.beam new file mode 100644 index 0000000..6a4bff4 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_ed25519ph.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_ed448.beam b/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_ed448.beam new file mode 100644 index 0000000..492b54a Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_ed448.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_ed448ph.beam b/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_ed448ph.beam new file mode 100644 index 0000000..125b348 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_ed448ph.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_x25519.beam b/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_x25519.beam new file mode 100644 index 0000000..dadccee Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_x25519.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_x448.beam b/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_x448.beam new file mode 100644 index 0000000..3352391 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_kty_okp_x448.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_kty_rsa.beam b/_build/dev/lib/jose/ebin/jose_jwk_kty_rsa.beam new file mode 100644 index 0000000..8eeb1c4 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_kty_rsa.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_oct.beam b/_build/dev/lib/jose/ebin/jose_jwk_oct.beam new file mode 100644 index 0000000..65735c5 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_oct.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_openssh_key.beam b/_build/dev/lib/jose/ebin/jose_jwk_openssh_key.beam new file mode 100644 index 0000000..53c38e6 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_openssh_key.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_pem.beam b/_build/dev/lib/jose/ebin/jose_jwk_pem.beam new file mode 100644 index 0000000..5aac163 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_pem.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_set.beam b/_build/dev/lib/jose/ebin/jose_jwk_set.beam new file mode 100644 index 0000000..6a8e4ab Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_set.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_use_enc.beam b/_build/dev/lib/jose/ebin/jose_jwk_use_enc.beam new file mode 100644 index 0000000..13b6d5c Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_use_enc.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwk_use_sig.beam b/_build/dev/lib/jose/ebin/jose_jwk_use_sig.beam new file mode 100644 index 0000000..e11c185 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwk_use_sig.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jws.beam b/_build/dev/lib/jose/ebin/jose_jws.beam new file mode 100644 index 0000000..ccf30b3 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jws.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jws_alg.beam b/_build/dev/lib/jose/ebin/jose_jws_alg.beam new file mode 100644 index 0000000..f290d98 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jws_alg.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jws_alg_ecdsa.beam b/_build/dev/lib/jose/ebin/jose_jws_alg_ecdsa.beam new file mode 100644 index 0000000..2fee6fc Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jws_alg_ecdsa.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jws_alg_eddsa.beam b/_build/dev/lib/jose/ebin/jose_jws_alg_eddsa.beam new file mode 100644 index 0000000..5559a72 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jws_alg_eddsa.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jws_alg_hmac.beam b/_build/dev/lib/jose/ebin/jose_jws_alg_hmac.beam new file mode 100644 index 0000000..ffc919d Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jws_alg_hmac.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jws_alg_none.beam b/_build/dev/lib/jose/ebin/jose_jws_alg_none.beam new file mode 100644 index 0000000..8130638 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jws_alg_none.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jws_alg_poly1305.beam b/_build/dev/lib/jose/ebin/jose_jws_alg_poly1305.beam new file mode 100644 index 0000000..c001b89 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jws_alg_poly1305.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jws_alg_rsa_pkcs1_v1_5.beam b/_build/dev/lib/jose/ebin/jose_jws_alg_rsa_pkcs1_v1_5.beam new file mode 100644 index 0000000..8b486d5 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jws_alg_rsa_pkcs1_v1_5.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jws_alg_rsa_pss.beam b/_build/dev/lib/jose/ebin/jose_jws_alg_rsa_pss.beam new file mode 100644 index 0000000..0168b15 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jws_alg_rsa_pss.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_jwt.beam b/_build/dev/lib/jose/ebin/jose_jwt.beam new file mode 100644 index 0000000..225d3f2 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_jwt.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_public_key.beam b/_build/dev/lib/jose/ebin/jose_public_key.beam new file mode 100644 index 0000000..200f9e5 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_public_key.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_server.beam b/_build/dev/lib/jose/ebin/jose_server.beam new file mode 100644 index 0000000..11114b6 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_server.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_sha3.beam b/_build/dev/lib/jose/ebin/jose_sha3.beam new file mode 100644 index 0000000..a816f11 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_sha3.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_sha3_keccakf1600_driver.beam b/_build/dev/lib/jose/ebin/jose_sha3_keccakf1600_driver.beam new file mode 100644 index 0000000..c174691 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_sha3_keccakf1600_driver.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_sha3_keccakf1600_nif.beam b/_build/dev/lib/jose/ebin/jose_sha3_keccakf1600_nif.beam new file mode 100644 index 0000000..c025445 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_sha3_keccakf1600_nif.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_sha3_libdecaf.beam b/_build/dev/lib/jose/ebin/jose_sha3_libdecaf.beam new file mode 100644 index 0000000..0ecb9f1 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_sha3_libdecaf.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_sha3_unsupported.beam b/_build/dev/lib/jose/ebin/jose_sha3_unsupported.beam new file mode 100644 index 0000000..74d7740 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_sha3_unsupported.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_sup.beam b/_build/dev/lib/jose/ebin/jose_sup.beam new file mode 100644 index 0000000..db0ad14 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_sup.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_xchacha20_poly1305.beam b/_build/dev/lib/jose/ebin/jose_xchacha20_poly1305.beam new file mode 100644 index 0000000..281ac48 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_xchacha20_poly1305.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_xchacha20_poly1305_crypto.beam b/_build/dev/lib/jose/ebin/jose_xchacha20_poly1305_crypto.beam new file mode 100644 index 0000000..69f0de5 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_xchacha20_poly1305_crypto.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_xchacha20_poly1305_libsodium.beam b/_build/dev/lib/jose/ebin/jose_xchacha20_poly1305_libsodium.beam new file mode 100644 index 0000000..7dcf422 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_xchacha20_poly1305_libsodium.beam differ diff --git a/_build/dev/lib/jose/ebin/jose_xchacha20_poly1305_unsupported.beam b/_build/dev/lib/jose/ebin/jose_xchacha20_poly1305_unsupported.beam new file mode 100644 index 0000000..cc60b79 Binary files /dev/null and b/_build/dev/lib/jose/ebin/jose_xchacha20_poly1305_unsupported.beam differ diff --git a/_build/dev/lib/jose/include b/_build/dev/lib/jose/include new file mode 120000 index 0000000..e566a1b --- /dev/null +++ b/_build/dev/lib/jose/include @@ -0,0 +1 @@ +../../../../deps/jose/include \ No newline at end of file diff --git a/_build/dev/lib/jose/priv b/_build/dev/lib/jose/priv new file mode 120000 index 0000000..ae050a7 --- /dev/null +++ b/_build/dev/lib/jose/priv @@ -0,0 +1 @@ +../../../../deps/jose/priv \ No newline at end of file diff --git a/_build/dev/lib/mime/.mix/compile.elixir b/_build/dev/lib/mime/.mix/compile.elixir new file mode 100644 index 0000000..7e5a45d Binary files /dev/null and b/_build/dev/lib/mime/.mix/compile.elixir differ diff --git a/_build/dev/lib/mime/.mix/compile.elixir_scm b/_build/dev/lib/mime/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/mime/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/mime/.mix/compile.fetch b/_build/dev/lib/mime/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/mime/ebin/Elixir.MIME.beam b/_build/dev/lib/mime/ebin/Elixir.MIME.beam new file mode 100644 index 0000000..cd8e430 Binary files /dev/null and b/_build/dev/lib/mime/ebin/Elixir.MIME.beam differ diff --git a/_build/dev/lib/mime/ebin/mime.app b/_build/dev/lib/mime/ebin/mime.app new file mode 100644 index 0000000..d840ce0 --- /dev/null +++ b/_build/dev/lib/mime/ebin/mime.app @@ -0,0 +1,11 @@ +{application,mime, + [{modules,['Elixir.MIME']}, + {compile_env,[{mime,[extensions],error}, + {mime,[suffixes],error}, + {mime,[types],error}]}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger]}, + {description,"A MIME type module for Elixir"}, + {registered,[]}, + {vsn,"2.0.7"}, + {env,[]}]}. diff --git a/_build/dev/lib/mint/.mix/compile.elixir b/_build/dev/lib/mint/.mix/compile.elixir new file mode 100644 index 0000000..4504d22 Binary files /dev/null and b/_build/dev/lib/mint/.mix/compile.elixir differ diff --git a/_build/dev/lib/mint/.mix/compile.elixir_scm b/_build/dev/lib/mint/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/mint/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/mint/.mix/compile.erlang b/_build/dev/lib/mint/.mix/compile.erlang new file mode 100644 index 0000000..b6c4949 Binary files /dev/null and b/_build/dev/lib/mint/.mix/compile.erlang differ diff --git a/_build/dev/lib/mint/.mix/compile.fetch b/_build/dev/lib/mint/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Conn.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Conn.beam new file mode 100644 index 0000000..e3c66c2 Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Conn.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Headers.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Headers.beam new file mode 100644 index 0000000..3684669 Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Headers.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Transport.SSL.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Transport.SSL.beam new file mode 100644 index 0000000..68c8bcd Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Transport.SSL.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Transport.TCP.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Transport.TCP.beam new file mode 100644 index 0000000..f071bea Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Transport.TCP.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Transport.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Transport.beam new file mode 100644 index 0000000..036dc70 Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Transport.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Util.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Util.beam new file mode 100644 index 0000000..d4f9b65 Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.Core.Util.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP.beam new file mode 100644 index 0000000..c89df96 Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP1.Parse.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP1.Parse.beam new file mode 100644 index 0000000..5ef39d1 Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP1.Parse.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP1.Request.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP1.Request.beam new file mode 100644 index 0000000..95f47a0 Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP1.Request.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP1.Response.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP1.Response.beam new file mode 100644 index 0000000..35f177d Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP1.Response.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP1.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP1.beam new file mode 100644 index 0000000..b628edf Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP1.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP2.Frame.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP2.Frame.beam new file mode 100644 index 0000000..e623ed6 Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP2.Frame.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP2.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP2.beam new file mode 100644 index 0000000..635dcdc Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTP2.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.HTTPError.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTPError.beam new file mode 100644 index 0000000..af0b15a Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.HTTPError.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.Negotiate.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.Negotiate.beam new file mode 100644 index 0000000..fab00b8 Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.Negotiate.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.TransportError.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.TransportError.beam new file mode 100644 index 0000000..6d1cb8a Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.TransportError.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.TunnelProxy.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.TunnelProxy.beam new file mode 100644 index 0000000..47e5877 Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.TunnelProxy.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.Types.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.Types.beam new file mode 100644 index 0000000..8dd400f Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.Types.beam differ diff --git a/_build/dev/lib/mint/ebin/Elixir.Mint.UnsafeProxy.beam b/_build/dev/lib/mint/ebin/Elixir.Mint.UnsafeProxy.beam new file mode 100644 index 0000000..17deae7 Binary files /dev/null and b/_build/dev/lib/mint/ebin/Elixir.Mint.UnsafeProxy.beam differ diff --git a/_build/dev/lib/mint/ebin/mint.app b/_build/dev/lib/mint/ebin/mint.app new file mode 100644 index 0000000..7d375a5 --- /dev/null +++ b/_build/dev/lib/mint/ebin/mint.app @@ -0,0 +1,18 @@ +{application,mint, + [{modules,['Elixir.Mint.Core.Conn','Elixir.Mint.Core.Headers', + 'Elixir.Mint.Core.Transport', + 'Elixir.Mint.Core.Transport.SSL', + 'Elixir.Mint.Core.Transport.TCP', + 'Elixir.Mint.Core.Util','Elixir.Mint.HTTP', + 'Elixir.Mint.HTTP1','Elixir.Mint.HTTP1.Parse', + 'Elixir.Mint.HTTP1.Request', + 'Elixir.Mint.HTTP1.Response','Elixir.Mint.HTTP2', + 'Elixir.Mint.HTTP2.Frame','Elixir.Mint.HTTPError', + 'Elixir.Mint.Negotiate','Elixir.Mint.TransportError', + 'Elixir.Mint.TunnelProxy','Elixir.Mint.Types', + 'Elixir.Mint.UnsafeProxy',mint_shims]}, + {optional_applications,[castore]}, + {applications,[kernel,stdlib,elixir,logger,ssl,castore,hpax]}, + {description,"Small and composable HTTP client."}, + {registered,[]}, + {vsn,"1.8.0"}]}. diff --git a/_build/dev/lib/mint/ebin/mint_shims.beam b/_build/dev/lib/mint/ebin/mint_shims.beam new file mode 100644 index 0000000..34e1831 Binary files /dev/null and b/_build/dev/lib/mint/ebin/mint_shims.beam differ diff --git a/_build/dev/lib/nimble_options/.mix/compile.elixir b/_build/dev/lib/nimble_options/.mix/compile.elixir new file mode 100644 index 0000000..31887cc Binary files /dev/null and b/_build/dev/lib/nimble_options/.mix/compile.elixir differ diff --git a/_build/dev/lib/nimble_options/.mix/compile.elixir_scm b/_build/dev/lib/nimble_options/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/nimble_options/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/nimble_options/.mix/compile.fetch b/_build/dev/lib/nimble_options/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/nimble_options/ebin/Elixir.NimbleOptions.Docs.beam b/_build/dev/lib/nimble_options/ebin/Elixir.NimbleOptions.Docs.beam new file mode 100644 index 0000000..5915de4 Binary files /dev/null and b/_build/dev/lib/nimble_options/ebin/Elixir.NimbleOptions.Docs.beam differ diff --git a/_build/dev/lib/nimble_options/ebin/Elixir.NimbleOptions.ValidationError.beam b/_build/dev/lib/nimble_options/ebin/Elixir.NimbleOptions.ValidationError.beam new file mode 100644 index 0000000..5124dd6 Binary files /dev/null and b/_build/dev/lib/nimble_options/ebin/Elixir.NimbleOptions.ValidationError.beam differ diff --git a/_build/dev/lib/nimble_options/ebin/Elixir.NimbleOptions.beam b/_build/dev/lib/nimble_options/ebin/Elixir.NimbleOptions.beam new file mode 100644 index 0000000..5b840fa Binary files /dev/null and b/_build/dev/lib/nimble_options/ebin/Elixir.NimbleOptions.beam differ diff --git a/_build/dev/lib/nimble_options/ebin/nimble_options.app b/_build/dev/lib/nimble_options/ebin/nimble_options.app new file mode 100644 index 0000000..86fce2b --- /dev/null +++ b/_build/dev/lib/nimble_options/ebin/nimble_options.app @@ -0,0 +1,8 @@ +{application,nimble_options, + [{modules,['Elixir.NimbleOptions','Elixir.NimbleOptions.Docs', + 'Elixir.NimbleOptions.ValidationError']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir]}, + {description,"A tiny library for validating and documenting high-level options"}, + {registered,[]}, + {vsn,"1.1.1"}]}. diff --git a/_build/dev/lib/nimble_pool/.mix/compile.elixir b/_build/dev/lib/nimble_pool/.mix/compile.elixir new file mode 100644 index 0000000..85436f5 Binary files /dev/null and b/_build/dev/lib/nimble_pool/.mix/compile.elixir differ diff --git a/_build/dev/lib/nimble_pool/.mix/compile.elixir_scm b/_build/dev/lib/nimble_pool/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/nimble_pool/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/nimble_pool/.mix/compile.fetch b/_build/dev/lib/nimble_pool/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/nimble_pool/ebin/Elixir.NimblePool.Application.beam b/_build/dev/lib/nimble_pool/ebin/Elixir.NimblePool.Application.beam new file mode 100644 index 0000000..ee425ed Binary files /dev/null and b/_build/dev/lib/nimble_pool/ebin/Elixir.NimblePool.Application.beam differ diff --git a/_build/dev/lib/nimble_pool/ebin/Elixir.NimblePool.beam b/_build/dev/lib/nimble_pool/ebin/Elixir.NimblePool.beam new file mode 100644 index 0000000..457e249 Binary files /dev/null and b/_build/dev/lib/nimble_pool/ebin/Elixir.NimblePool.beam differ diff --git a/_build/dev/lib/nimble_pool/ebin/nimble_pool.app b/_build/dev/lib/nimble_pool/ebin/nimble_pool.app new file mode 100644 index 0000000..d0bd75e --- /dev/null +++ b/_build/dev/lib/nimble_pool/ebin/nimble_pool.app @@ -0,0 +1,8 @@ +{application,nimble_pool, + [{modules,['Elixir.NimblePool','Elixir.NimblePool.Application']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger]}, + {description,"A tiny resource-pool implementation"}, + {registered,[]}, + {vsn,"1.1.0"}, + {mod,{'Elixir.NimblePool.Application',[]}}]}. diff --git a/_build/dev/lib/oidcc/.mix/compile.elixir b/_build/dev/lib/oidcc/.mix/compile.elixir new file mode 100644 index 0000000..612ee9b Binary files /dev/null and b/_build/dev/lib/oidcc/.mix/compile.elixir differ diff --git a/_build/dev/lib/oidcc/.mix/compile.elixir_scm b/_build/dev/lib/oidcc/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/oidcc/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/oidcc/.mix/compile.erlang b/_build/dev/lib/oidcc/.mix/compile.erlang new file mode 100644 index 0000000..174c069 Binary files /dev/null and b/_build/dev/lib/oidcc/.mix/compile.erlang differ diff --git a/_build/dev/lib/oidcc/.mix/compile.fetch b/_build/dev/lib/oidcc/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Mix.Tasks.Oidcc.Gen.ProviderConfigurationWorker.beam b/_build/dev/lib/oidcc/ebin/Elixir.Mix.Tasks.Oidcc.Gen.ProviderConfigurationWorker.beam new file mode 100644 index 0000000..01bcb89 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Mix.Tasks.Oidcc.Gen.ProviderConfigurationWorker.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Authorization.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Authorization.beam new file mode 100644 index 0000000..0c89e62 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Authorization.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ClientContext.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ClientContext.beam new file mode 100644 index 0000000..f9a113f Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ClientContext.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ClientRegistration.Response.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ClientRegistration.Response.beam new file mode 100644 index 0000000..929b63b Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ClientRegistration.Response.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ClientRegistration.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ClientRegistration.beam new file mode 100644 index 0000000..623bbea Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ClientRegistration.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Logout.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Logout.beam new file mode 100644 index 0000000..2184f97 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Logout.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ProviderConfiguration.Worker.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ProviderConfiguration.Worker.beam new file mode 100644 index 0000000..f7717f3 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ProviderConfiguration.Worker.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ProviderConfiguration.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ProviderConfiguration.beam new file mode 100644 index 0000000..572ff29 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.ProviderConfiguration.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.RecordStruct.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.RecordStruct.beam new file mode 100644 index 0000000..b3c8407 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.RecordStruct.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Token.Access.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Token.Access.beam new file mode 100644 index 0000000..3f1b5e6 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Token.Access.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Token.Id.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Token.Id.beam new file mode 100644 index 0000000..3d6dca3 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Token.Id.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Token.Refresh.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Token.Refresh.beam new file mode 100644 index 0000000..5dcbd3d Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Token.Refresh.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Token.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Token.beam new file mode 100644 index 0000000..c5e2edb Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Token.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.TokenIntrospection.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.TokenIntrospection.beam new file mode 100644 index 0000000..3eeac95 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.TokenIntrospection.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Userinfo.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Userinfo.beam new file mode 100644 index 0000000..5e0fbb7 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.Userinfo.beam differ diff --git a/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.beam b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.beam new file mode 100644 index 0000000..9b810bf Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/Elixir.Oidcc.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc.app b/_build/dev/lib/oidcc/ebin/oidcc.app new file mode 100644 index 0000000..5346d29 --- /dev/null +++ b/_build/dev/lib/oidcc/ebin/oidcc.app @@ -0,0 +1,27 @@ +{application,oidcc, + [{modules,['Elixir.Mix.Tasks.Oidcc.Gen.ProviderConfigurationWorker', + 'Elixir.Oidcc','Elixir.Oidcc.Authorization', + 'Elixir.Oidcc.ClientContext', + 'Elixir.Oidcc.ClientRegistration', + 'Elixir.Oidcc.ClientRegistration.Response', + 'Elixir.Oidcc.Logout', + 'Elixir.Oidcc.ProviderConfiguration', + 'Elixir.Oidcc.ProviderConfiguration.Worker', + 'Elixir.Oidcc.RecordStruct','Elixir.Oidcc.Token', + 'Elixir.Oidcc.Token.Access','Elixir.Oidcc.Token.Id', + 'Elixir.Oidcc.Token.Refresh', + 'Elixir.Oidcc.TokenIntrospection', + 'Elixir.Oidcc.Userinfo',oidcc,oidcc_auth_util, + oidcc_authorization,oidcc_backoff, + oidcc_client_context,oidcc_client_registration, + oidcc_decode_util,oidcc_http_util,oidcc_jwt_util, + oidcc_logout,oidcc_profile, + oidcc_provider_configuration, + oidcc_provider_configuration_worker,oidcc_scope, + oidcc_token,oidcc_token_introspection,oidcc_userinfo]}, + {optional_applications,[igniter]}, + {applications,[kernel,stdlib,elixir,inets,ssl,telemetry, + telemetry_registry,jose,igniter]}, + {description,"OpenID Connect client library for the BEAM."}, + {registered,[]}, + {vsn,"3.7.2"}]}. diff --git a/_build/dev/lib/oidcc/ebin/oidcc.beam b/_build/dev/lib/oidcc/ebin/oidcc.beam new file mode 100644 index 0000000..d4411ba Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_auth_util.beam b/_build/dev/lib/oidcc/ebin/oidcc_auth_util.beam new file mode 100644 index 0000000..9b40e57 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_auth_util.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_authorization.beam b/_build/dev/lib/oidcc/ebin/oidcc_authorization.beam new file mode 100644 index 0000000..159f0a4 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_authorization.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_backoff.beam b/_build/dev/lib/oidcc/ebin/oidcc_backoff.beam new file mode 100644 index 0000000..5c5023f Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_backoff.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_client_context.beam b/_build/dev/lib/oidcc/ebin/oidcc_client_context.beam new file mode 100644 index 0000000..9436da1 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_client_context.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_client_registration.beam b/_build/dev/lib/oidcc/ebin/oidcc_client_registration.beam new file mode 100644 index 0000000..e2eae4b Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_client_registration.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_decode_util.beam b/_build/dev/lib/oidcc/ebin/oidcc_decode_util.beam new file mode 100644 index 0000000..5c59755 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_decode_util.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_http_util.beam b/_build/dev/lib/oidcc/ebin/oidcc_http_util.beam new file mode 100644 index 0000000..b5f66aa Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_http_util.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_jwt_util.beam b/_build/dev/lib/oidcc/ebin/oidcc_jwt_util.beam new file mode 100644 index 0000000..3545090 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_jwt_util.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_logout.beam b/_build/dev/lib/oidcc/ebin/oidcc_logout.beam new file mode 100644 index 0000000..13da518 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_logout.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_profile.beam b/_build/dev/lib/oidcc/ebin/oidcc_profile.beam new file mode 100644 index 0000000..9efa39b Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_profile.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_provider_configuration.beam b/_build/dev/lib/oidcc/ebin/oidcc_provider_configuration.beam new file mode 100644 index 0000000..56533a5 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_provider_configuration.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_provider_configuration_worker.beam b/_build/dev/lib/oidcc/ebin/oidcc_provider_configuration_worker.beam new file mode 100644 index 0000000..dc01751 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_provider_configuration_worker.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_scope.beam b/_build/dev/lib/oidcc/ebin/oidcc_scope.beam new file mode 100644 index 0000000..769c62c Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_scope.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_token.beam b/_build/dev/lib/oidcc/ebin/oidcc_token.beam new file mode 100644 index 0000000..5f25d12 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_token.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_token_introspection.beam b/_build/dev/lib/oidcc/ebin/oidcc_token_introspection.beam new file mode 100644 index 0000000..d380214 Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_token_introspection.beam differ diff --git a/_build/dev/lib/oidcc/ebin/oidcc_userinfo.beam b/_build/dev/lib/oidcc/ebin/oidcc_userinfo.beam new file mode 100644 index 0000000..1b830ae Binary files /dev/null and b/_build/dev/lib/oidcc/ebin/oidcc_userinfo.beam differ diff --git a/_build/dev/lib/oidcc/include b/_build/dev/lib/oidcc/include new file mode 120000 index 0000000..e826bac --- /dev/null +++ b/_build/dev/lib/oidcc/include @@ -0,0 +1 @@ +../../../../deps/oidcc/include \ No newline at end of file diff --git a/_build/dev/lib/phoenix/.mix/compile.elixir b/_build/dev/lib/phoenix/.mix/compile.elixir new file mode 100644 index 0000000..e60ec9c Binary files /dev/null and b/_build/dev/lib/phoenix/.mix/compile.elixir differ diff --git a/_build/dev/lib/phoenix/.mix/compile.elixir_scm b/_build/dev/lib/phoenix/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/phoenix/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/phoenix/.mix/compile.fetch b/_build/dev/lib/phoenix/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Inspect.Phoenix.Socket.Message.beam b/_build/dev/lib/phoenix/ebin/Elixir.Inspect.Phoenix.Socket.Message.beam new file mode 100644 index 0000000..74a5e94 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Inspect.Phoenix.Socket.Message.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Phoenix.Context.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Phoenix.Context.beam new file mode 100644 index 0000000..8e17ddd Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Phoenix.Context.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Phoenix.Schema.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Phoenix.Schema.beam new file mode 100644 index 0000000..ab788ba Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Phoenix.Schema.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Phoenix.Scope.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Phoenix.Scope.beam new file mode 100644 index 0000000..4d86289 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Phoenix.Scope.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Phoenix.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Phoenix.beam new file mode 100644 index 0000000..bc60179 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Phoenix.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Compile.Phoenix.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Compile.Phoenix.beam new file mode 100644 index 0000000..2ab41cb Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Compile.Phoenix.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Digest.Clean.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Digest.Clean.beam new file mode 100644 index 0000000..d793dc2 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Digest.Clean.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Digest.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Digest.beam new file mode 100644 index 0000000..48cd56f Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Digest.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Auth.HashingLibrary.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Auth.HashingLibrary.beam new file mode 100644 index 0000000..f674fc7 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Auth.HashingLibrary.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Auth.Injector.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Auth.Injector.beam new file mode 100644 index 0000000..62922a7 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Auth.Injector.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Auth.Migration.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Auth.Migration.beam new file mode 100644 index 0000000..2d45b1c Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Auth.Migration.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Auth.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Auth.beam new file mode 100644 index 0000000..946b265 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Auth.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Cert.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Cert.beam new file mode 100644 index 0000000..2070466 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Cert.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Channel.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Channel.beam new file mode 100644 index 0000000..649c91e Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Channel.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Context.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Context.beam new file mode 100644 index 0000000..6760114 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Context.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Embedded.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Embedded.beam new file mode 100644 index 0000000..354af9a Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Embedded.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Html.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Html.beam new file mode 100644 index 0000000..3cc112e Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Html.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Json.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Json.beam new file mode 100644 index 0000000..dd34dc1 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Json.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Live.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Live.beam new file mode 100644 index 0000000..edcc07e Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Live.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Notifier.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Notifier.beam new file mode 100644 index 0000000..800fa3a Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Notifier.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Presence.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Presence.beam new file mode 100644 index 0000000..c6cb012 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Presence.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Release.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Release.beam new file mode 100644 index 0000000..020ceba Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Release.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Schema.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Schema.beam new file mode 100644 index 0000000..b3813ab Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Schema.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Secret.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Secret.beam new file mode 100644 index 0000000..5b766fa Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Secret.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Socket.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Socket.beam new file mode 100644 index 0000000..ed95707 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.Socket.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.beam new file mode 100644 index 0000000..9606e22 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Gen.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Routes.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Routes.beam new file mode 100644 index 0000000..abebbcb Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Routes.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Server.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Server.beam new file mode 100644 index 0000000..f916f02 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.Server.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.beam b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.beam new file mode 100644 index 0000000..b0ca6ce Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Mix.Tasks.Phx.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.ActionClauseError.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.ActionClauseError.beam new file mode 100644 index 0000000..d34be17 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.ActionClauseError.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Channel.Server.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Channel.Server.beam new file mode 100644 index 0000000..0fd8304 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Channel.Server.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Channel.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Channel.beam new file mode 100644 index 0000000..b0059b9 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Channel.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.ChannelTest.NoopSerializer.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.ChannelTest.NoopSerializer.beam new file mode 100644 index 0000000..99a95d2 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.ChannelTest.NoopSerializer.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.ChannelTest.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.ChannelTest.beam new file mode 100644 index 0000000..1453733 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.ChannelTest.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.CodeReloader.MixListener.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.CodeReloader.MixListener.beam new file mode 100644 index 0000000..d752d45 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.CodeReloader.MixListener.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.CodeReloader.Proxy.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.CodeReloader.Proxy.beam new file mode 100644 index 0000000..8645e28 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.CodeReloader.Proxy.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.CodeReloader.Server.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.CodeReloader.Server.beam new file mode 100644 index 0000000..452f9bc Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.CodeReloader.Server.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.CodeReloader.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.CodeReloader.beam new file mode 100644 index 0000000..7d5e73b Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.CodeReloader.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Config.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Config.beam new file mode 100644 index 0000000..a86c881 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Config.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.ConnTest.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.ConnTest.beam new file mode 100644 index 0000000..e673e7e Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.ConnTest.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Controller.Pipeline.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Controller.Pipeline.beam new file mode 100644 index 0000000..19f55d0 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Controller.Pipeline.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Controller.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Controller.beam new file mode 100644 index 0000000..812def2 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Controller.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Debug.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Debug.beam new file mode 100644 index 0000000..2f8b7b3 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Debug.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Digester.Compressor.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Digester.Compressor.beam new file mode 100644 index 0000000..e6320d2 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Digester.Compressor.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Digester.Gzip.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Digester.Gzip.beam new file mode 100644 index 0000000..5d64f12 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Digester.Gzip.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Digester.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Digester.beam new file mode 100644 index 0000000..4bedcd5 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Digester.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.Cowboy2Adapter.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.Cowboy2Adapter.beam new file mode 100644 index 0000000..e48a190 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.Cowboy2Adapter.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.RenderErrors.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.RenderErrors.beam new file mode 100644 index 0000000..667479f Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.RenderErrors.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.Supervisor.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.Supervisor.beam new file mode 100644 index 0000000..b24bcfc Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.Supervisor.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.SyncCodeReloadPlug.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.SyncCodeReloadPlug.beam new file mode 100644 index 0000000..28476d3 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.SyncCodeReloadPlug.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.Watcher.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.Watcher.beam new file mode 100644 index 0000000..af6ff94 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.Watcher.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.beam new file mode 100644 index 0000000..7cb6ad7 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Endpoint.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Flash.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Flash.beam new file mode 100644 index 0000000..ac92210 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Flash.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Logger.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Logger.beam new file mode 100644 index 0000000..57efc69 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Logger.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.MissingParamError.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.MissingParamError.beam new file mode 100644 index 0000000..8d894a0 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.MissingParamError.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Naming.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Naming.beam new file mode 100644 index 0000000..594c7ee Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Naming.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.NotAcceptableError.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.NotAcceptableError.beam new file mode 100644 index 0000000..aaee80e Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.NotAcceptableError.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Any.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Any.beam new file mode 100644 index 0000000..01a6e1a Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Any.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Atom.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Atom.beam new file mode 100644 index 0000000..a4bda04 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Atom.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.BitString.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.BitString.beam new file mode 100644 index 0000000..a42dc16 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.BitString.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Float.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Float.beam new file mode 100644 index 0000000..d04d18b Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Float.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Integer.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Integer.beam new file mode 100644 index 0000000..25fb6d0 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Integer.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Map.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Map.beam new file mode 100644 index 0000000..e97b359 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.Map.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.beam new file mode 100644 index 0000000..f31741f Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Param.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Presence.Tracker.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Presence.Tracker.beam new file mode 100644 index 0000000..242aa32 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Presence.Tracker.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Presence.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Presence.beam new file mode 100644 index 0000000..8d86c86 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Presence.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.ConsoleFormatter.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.ConsoleFormatter.beam new file mode 100644 index 0000000..91f8fa9 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.ConsoleFormatter.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.Helpers.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.Helpers.beam new file mode 100644 index 0000000..8773b63 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.Helpers.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.MalformedURIError.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.MalformedURIError.beam new file mode 100644 index 0000000..81ac78b Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.MalformedURIError.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.NoRouteError.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.NoRouteError.beam new file mode 100644 index 0000000..441d1eb Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.NoRouteError.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.Resource.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.Resource.beam new file mode 100644 index 0000000..6ea76cf Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.Resource.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.Route.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.Route.beam new file mode 100644 index 0000000..5887033 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.Route.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.Scope.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.Scope.beam new file mode 100644 index 0000000..a80d808 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.Scope.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.beam new file mode 100644 index 0000000..881f344 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Router.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Broadcast.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Broadcast.beam new file mode 100644 index 0000000..142d42f Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Broadcast.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.InvalidMessageError.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.InvalidMessageError.beam new file mode 100644 index 0000000..0eeafd6 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.InvalidMessageError.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Message.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Message.beam new file mode 100644 index 0000000..fc4f3d9 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Message.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.PoolDrainer.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.PoolDrainer.beam new file mode 100644 index 0000000..940efb7 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.PoolDrainer.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.PoolSupervisor.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.PoolSupervisor.beam new file mode 100644 index 0000000..69eee90 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.PoolSupervisor.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Reply.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Reply.beam new file mode 100644 index 0000000..2ba106b Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Reply.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Serializer.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Serializer.beam new file mode 100644 index 0000000..4c62f4e Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Serializer.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Transport.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Transport.beam new file mode 100644 index 0000000..92bb808 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.Transport.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.V1.JSONSerializer.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.V1.JSONSerializer.beam new file mode 100644 index 0000000..11fbdd4 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.V1.JSONSerializer.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.V2.JSONSerializer.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.V2.JSONSerializer.beam new file mode 100644 index 0000000..ffa3787 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.V2.JSONSerializer.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.beam new file mode 100644 index 0000000..4fe4441 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Socket.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Token.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Token.beam new file mode 100644 index 0000000..ff7121e Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Token.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Transports.LongPoll.Server.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Transports.LongPoll.Server.beam new file mode 100644 index 0000000..d1321dc Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Transports.LongPoll.Server.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Transports.LongPoll.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Transports.LongPoll.beam new file mode 100644 index 0000000..e039a04 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Transports.LongPoll.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Transports.WebSocket.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Transports.WebSocket.beam new file mode 100644 index 0000000..a0bc134 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.Transports.WebSocket.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.VerifiedRoutes.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.VerifiedRoutes.beam new file mode 100644 index 0000000..4da9d23 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.VerifiedRoutes.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.beam b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.beam new file mode 100644 index 0000000..07ed106 Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Phoenix.beam differ diff --git a/_build/dev/lib/phoenix/ebin/Elixir.Plug.Exception.Phoenix.ActionClauseError.beam b/_build/dev/lib/phoenix/ebin/Elixir.Plug.Exception.Phoenix.ActionClauseError.beam new file mode 100644 index 0000000..703ab2e Binary files /dev/null and b/_build/dev/lib/phoenix/ebin/Elixir.Plug.Exception.Phoenix.ActionClauseError.beam differ diff --git a/_build/dev/lib/phoenix/ebin/phoenix.app b/_build/dev/lib/phoenix/ebin/phoenix.app new file mode 100644 index 0000000..61b92e1 --- /dev/null +++ b/_build/dev/lib/phoenix/ebin/phoenix.app @@ -0,0 +1,102 @@ +{application,phoenix, + [{modules,['Elixir.Inspect.Phoenix.Socket.Message', + 'Elixir.Mix.Phoenix','Elixir.Mix.Phoenix.Context', + 'Elixir.Mix.Phoenix.Schema', + 'Elixir.Mix.Phoenix.Scope', + 'Elixir.Mix.Tasks.Compile.Phoenix', + 'Elixir.Mix.Tasks.Phx','Elixir.Mix.Tasks.Phx.Digest', + 'Elixir.Mix.Tasks.Phx.Digest.Clean', + 'Elixir.Mix.Tasks.Phx.Gen', + 'Elixir.Mix.Tasks.Phx.Gen.Auth', + 'Elixir.Mix.Tasks.Phx.Gen.Auth.HashingLibrary', + 'Elixir.Mix.Tasks.Phx.Gen.Auth.Injector', + 'Elixir.Mix.Tasks.Phx.Gen.Auth.Migration', + 'Elixir.Mix.Tasks.Phx.Gen.Cert', + 'Elixir.Mix.Tasks.Phx.Gen.Channel', + 'Elixir.Mix.Tasks.Phx.Gen.Context', + 'Elixir.Mix.Tasks.Phx.Gen.Embedded', + 'Elixir.Mix.Tasks.Phx.Gen.Html', + 'Elixir.Mix.Tasks.Phx.Gen.Json', + 'Elixir.Mix.Tasks.Phx.Gen.Live', + 'Elixir.Mix.Tasks.Phx.Gen.Notifier', + 'Elixir.Mix.Tasks.Phx.Gen.Presence', + 'Elixir.Mix.Tasks.Phx.Gen.Release', + 'Elixir.Mix.Tasks.Phx.Gen.Schema', + 'Elixir.Mix.Tasks.Phx.Gen.Secret', + 'Elixir.Mix.Tasks.Phx.Gen.Socket', + 'Elixir.Mix.Tasks.Phx.Routes', + 'Elixir.Mix.Tasks.Phx.Server','Elixir.Phoenix', + 'Elixir.Phoenix.ActionClauseError', + 'Elixir.Phoenix.Channel', + 'Elixir.Phoenix.Channel.Server', + 'Elixir.Phoenix.ChannelTest', + 'Elixir.Phoenix.ChannelTest.NoopSerializer', + 'Elixir.Phoenix.CodeReloader', + 'Elixir.Phoenix.CodeReloader.MixListener', + 'Elixir.Phoenix.CodeReloader.Proxy', + 'Elixir.Phoenix.CodeReloader.Server', + 'Elixir.Phoenix.Config','Elixir.Phoenix.ConnTest', + 'Elixir.Phoenix.Controller', + 'Elixir.Phoenix.Controller.Pipeline', + 'Elixir.Phoenix.Debug','Elixir.Phoenix.Digester', + 'Elixir.Phoenix.Digester.Compressor', + 'Elixir.Phoenix.Digester.Gzip', + 'Elixir.Phoenix.Endpoint', + 'Elixir.Phoenix.Endpoint.Cowboy2Adapter', + 'Elixir.Phoenix.Endpoint.RenderErrors', + 'Elixir.Phoenix.Endpoint.Supervisor', + 'Elixir.Phoenix.Endpoint.SyncCodeReloadPlug', + 'Elixir.Phoenix.Endpoint.Watcher', + 'Elixir.Phoenix.Flash','Elixir.Phoenix.Logger', + 'Elixir.Phoenix.MissingParamError', + 'Elixir.Phoenix.Naming', + 'Elixir.Phoenix.NotAcceptableError', + 'Elixir.Phoenix.Param','Elixir.Phoenix.Param.Any', + 'Elixir.Phoenix.Param.Atom', + 'Elixir.Phoenix.Param.BitString', + 'Elixir.Phoenix.Param.Float', + 'Elixir.Phoenix.Param.Integer', + 'Elixir.Phoenix.Param.Map','Elixir.Phoenix.Presence', + 'Elixir.Phoenix.Presence.Tracker', + 'Elixir.Phoenix.Router', + 'Elixir.Phoenix.Router.ConsoleFormatter', + 'Elixir.Phoenix.Router.Helpers', + 'Elixir.Phoenix.Router.MalformedURIError', + 'Elixir.Phoenix.Router.NoRouteError', + 'Elixir.Phoenix.Router.Resource', + 'Elixir.Phoenix.Router.Route', + 'Elixir.Phoenix.Router.Scope','Elixir.Phoenix.Socket', + 'Elixir.Phoenix.Socket.Broadcast', + 'Elixir.Phoenix.Socket.InvalidMessageError', + 'Elixir.Phoenix.Socket.Message', + 'Elixir.Phoenix.Socket.PoolDrainer', + 'Elixir.Phoenix.Socket.PoolSupervisor', + 'Elixir.Phoenix.Socket.Reply', + 'Elixir.Phoenix.Socket.Serializer', + 'Elixir.Phoenix.Socket.Transport', + 'Elixir.Phoenix.Socket.V1.JSONSerializer', + 'Elixir.Phoenix.Socket.V2.JSONSerializer', + 'Elixir.Phoenix.Token', + 'Elixir.Phoenix.Transports.LongPoll', + 'Elixir.Phoenix.Transports.LongPoll.Server', + 'Elixir.Phoenix.Transports.WebSocket', + 'Elixir.Phoenix.VerifiedRoutes', + 'Elixir.Plug.Exception.Phoenix.ActionClauseError']}, + {optional_applications,[phoenix_view,plug_cowboy,bandit,jason]}, + {applications,[kernel,stdlib,elixir,logger,eex,crypto, + public_key,plug,plug_crypto,telemetry, + phoenix_pubsub,phoenix_template,websock_adapter, + phoenix_view,plug_cowboy,bandit,jason]}, + {description,"Peace of mind from prototype to production"}, + {registered,[]}, + {vsn,"1.8.7"}, + {mod,{'Elixir.Phoenix',[]}}, + {env,[{logger,true}, + {stacktrace_depth,nil}, + {filter_parameters,[<<"password">>,<<"token">>]}, + {serve_endpoints,false}, + {gzippable_exts,[<<".js">>,<<".map">>,<<".css">>, + <<".txt">>,<<".text">>,<<".html">>, + <<".json">>,<<".svg">>,<<".eot">>, + <<".ttf">>]}, + {static_compressors,['Elixir.Phoenix.Digester.Gzip']}]}]}. diff --git a/_build/dev/lib/phoenix/priv b/_build/dev/lib/phoenix/priv new file mode 120000 index 0000000..70b47a8 --- /dev/null +++ b/_build/dev/lib/phoenix/priv @@ -0,0 +1 @@ +../../../../deps/phoenix/priv \ No newline at end of file diff --git a/_build/dev/lib/phoenix_html/.mix/compile.elixir b/_build/dev/lib/phoenix_html/.mix/compile.elixir new file mode 100644 index 0000000..4a207fb Binary files /dev/null and b/_build/dev/lib/phoenix_html/.mix/compile.elixir differ diff --git a/_build/dev/lib/phoenix_html/.mix/compile.elixir_scm b/_build/dev/lib/phoenix_html/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/phoenix_html/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/phoenix_html/.mix/compile.fetch b/_build/dev/lib/phoenix_html/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Engine.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Engine.beam new file mode 100644 index 0000000..d368d55 Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Engine.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Form.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Form.beam new file mode 100644 index 0000000..c6430c4 Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Form.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.FormData.Map.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.FormData.Map.beam new file mode 100644 index 0000000..bc1b6af Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.FormData.Map.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.FormData.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.FormData.beam new file mode 100644 index 0000000..102955b Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.FormData.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.FormField.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.FormField.beam new file mode 100644 index 0000000..999bc45 Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.FormField.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Atom.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Atom.beam new file mode 100644 index 0000000..b7b2e73 Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Atom.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.BitString.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.BitString.beam new file mode 100644 index 0000000..a51d0a9 Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.BitString.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Date.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Date.beam new file mode 100644 index 0000000..c1cce73 Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Date.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.DateTime.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.DateTime.beam new file mode 100644 index 0000000..36a7e3b Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.DateTime.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Duration.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Duration.beam new file mode 100644 index 0000000..06b1c22 Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Duration.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Float.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Float.beam new file mode 100644 index 0000000..7ae95fb Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Float.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Integer.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Integer.beam new file mode 100644 index 0000000..d3ac00a Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Integer.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.List.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.List.beam new file mode 100644 index 0000000..ba607c4 Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.List.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.NaiveDateTime.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.NaiveDateTime.beam new file mode 100644 index 0000000..d71388f Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.NaiveDateTime.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Time.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Time.beam new file mode 100644 index 0000000..75e340b Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Time.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Tuple.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Tuple.beam new file mode 100644 index 0000000..1de7b4c Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.Tuple.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.URI.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.URI.beam new file mode 100644 index 0000000..9067604 Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.URI.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.beam new file mode 100644 index 0000000..aeaa8f9 Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.Safe.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.beam b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.beam new file mode 100644 index 0000000..ad9945a Binary files /dev/null and b/_build/dev/lib/phoenix_html/ebin/Elixir.Phoenix.HTML.beam differ diff --git a/_build/dev/lib/phoenix_html/ebin/phoenix_html.app b/_build/dev/lib/phoenix_html/ebin/phoenix_html.app new file mode 100644 index 0000000..cedcb70 --- /dev/null +++ b/_build/dev/lib/phoenix_html/ebin/phoenix_html.app @@ -0,0 +1,24 @@ +{application,phoenix_html, + [{modules,['Elixir.Phoenix.HTML','Elixir.Phoenix.HTML.Engine', + 'Elixir.Phoenix.HTML.Form', + 'Elixir.Phoenix.HTML.FormData', + 'Elixir.Phoenix.HTML.FormData.Map', + 'Elixir.Phoenix.HTML.FormField', + 'Elixir.Phoenix.HTML.Safe', + 'Elixir.Phoenix.HTML.Safe.Atom', + 'Elixir.Phoenix.HTML.Safe.BitString', + 'Elixir.Phoenix.HTML.Safe.Date', + 'Elixir.Phoenix.HTML.Safe.DateTime', + 'Elixir.Phoenix.HTML.Safe.Duration', + 'Elixir.Phoenix.HTML.Safe.Float', + 'Elixir.Phoenix.HTML.Safe.Integer', + 'Elixir.Phoenix.HTML.Safe.List', + 'Elixir.Phoenix.HTML.Safe.NaiveDateTime', + 'Elixir.Phoenix.HTML.Safe.Time', + 'Elixir.Phoenix.HTML.Safe.Tuple', + 'Elixir.Phoenix.HTML.Safe.URI']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,eex,logger]}, + {description,"Phoenix view functions for working with HTML templates"}, + {registered,[]}, + {vsn,"4.3.0"}]}. diff --git a/_build/dev/lib/phoenix_html/priv b/_build/dev/lib/phoenix_html/priv new file mode 120000 index 0000000..3955d72 --- /dev/null +++ b/_build/dev/lib/phoenix_html/priv @@ -0,0 +1 @@ +../../../../deps/phoenix_html/priv \ No newline at end of file diff --git a/_build/dev/lib/phoenix_live_reload/.mix/compile.elixir b/_build/dev/lib/phoenix_live_reload/.mix/compile.elixir new file mode 100644 index 0000000..5057b79 Binary files /dev/null and b/_build/dev/lib/phoenix_live_reload/.mix/compile.elixir differ diff --git a/_build/dev/lib/phoenix_live_reload/.mix/compile.elixir_scm b/_build/dev/lib/phoenix_live_reload/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/phoenix_live_reload/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/phoenix_live_reload/.mix/compile.fetch b/_build/dev/lib/phoenix_live_reload/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.Application.beam b/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.Application.beam new file mode 100644 index 0000000..c46d18b Binary files /dev/null and b/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.Application.beam differ diff --git a/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.Channel.beam b/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.Channel.beam new file mode 100644 index 0000000..d1278d7 Binary files /dev/null and b/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.Channel.beam differ diff --git a/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.Socket.beam b/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.Socket.beam new file mode 100644 index 0000000..674f856 Binary files /dev/null and b/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.Socket.beam differ diff --git a/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.WebConsoleLogger.beam b/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.WebConsoleLogger.beam new file mode 100644 index 0000000..90c8888 Binary files /dev/null and b/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.WebConsoleLogger.beam differ diff --git a/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.beam b/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.beam new file mode 100644 index 0000000..95906c2 Binary files /dev/null and b/_build/dev/lib/phoenix_live_reload/ebin/Elixir.Phoenix.LiveReloader.beam differ diff --git a/_build/dev/lib/phoenix_live_reload/ebin/phoenix_live_reload.app b/_build/dev/lib/phoenix_live_reload/ebin/phoenix_live_reload.app new file mode 100644 index 0000000..12aa36e --- /dev/null +++ b/_build/dev/lib/phoenix_live_reload/ebin/phoenix_live_reload.app @@ -0,0 +1,12 @@ +{application,phoenix_live_reload, + [{modules,['Elixir.Phoenix.LiveReloader', + 'Elixir.Phoenix.LiveReloader.Application', + 'Elixir.Phoenix.LiveReloader.Channel', + 'Elixir.Phoenix.LiveReloader.Socket', + 'Elixir.Phoenix.LiveReloader.WebConsoleLogger']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger,phoenix,file_system]}, + {description,"Provides live-reload functionality for Phoenix"}, + {registered,[]}, + {vsn,"1.6.2"}, + {mod,{'Elixir.Phoenix.LiveReloader.Application',[]}}]}. diff --git a/_build/dev/lib/phoenix_live_reload/priv b/_build/dev/lib/phoenix_live_reload/priv new file mode 120000 index 0000000..d6a5861 --- /dev/null +++ b/_build/dev/lib/phoenix_live_reload/priv @@ -0,0 +1 @@ +../../../../deps/phoenix_live_reload/priv \ No newline at end of file diff --git a/_build/dev/lib/phoenix_live_view/.mix/compile.elixir b/_build/dev/lib/phoenix_live_view/.mix/compile.elixir new file mode 100644 index 0000000..081d8ce Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/.mix/compile.elixir differ diff --git a/_build/dev/lib/phoenix_live_view/.mix/compile.elixir_scm b/_build/dev/lib/phoenix_live_view/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/phoenix_live_view/.mix/compile.fetch b/_build/dev/lib/phoenix_live_view/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Enumerable.Phoenix.LiveView.LiveStream.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Enumerable.Phoenix.LiveView.LiveStream.beam new file mode 100644 index 0000000..c3ba7b5 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Enumerable.Phoenix.LiveView.LiveStream.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveView.Socket.AssignsNotInSocket.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveView.Socket.AssignsNotInSocket.beam new file mode 100644 index 0000000..350c678 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveView.Socket.AssignsNotInSocket.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveView.Socket.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveView.Socket.beam new file mode 100644 index 0000000..5330bb7 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveView.Socket.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveView.UploadConfig.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveView.UploadConfig.beam new file mode 100644 index 0000000..f7dcfda Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveView.UploadConfig.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveViewTest.Element.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveViewTest.Element.beam new file mode 100644 index 0000000..288c7d9 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveViewTest.Element.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveViewTest.Upload.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveViewTest.Upload.beam new file mode 100644 index 0000000..f1a4bf6 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveViewTest.Upload.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveViewTest.View.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveViewTest.View.beam new file mode 100644 index 0000000..306b855 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Inspect.Phoenix.LiveViewTest.View.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Mix.Tasks.Compile.PhoenixLiveView.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Mix.Tasks.Compile.PhoenixLiveView.beam new file mode 100644 index 0000000..96a24cf Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Mix.Tasks.Compile.PhoenixLiveView.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Mix.Tasks.PhoenixLiveView.Upgrade.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Mix.Tasks.PhoenixLiveView.Upgrade.beam new file mode 100644 index 0000000..a544e4d Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Mix.Tasks.PhoenixLiveView.Upgrade.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.Component.Declarative.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.Component.Declarative.beam new file mode 100644 index 0000000..2bccb3c Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.Component.Declarative.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.Component.MacroComponent.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.Component.MacroComponent.beam new file mode 100644 index 0000000..bddadac Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.Component.MacroComponent.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.Component.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.Component.beam new file mode 100644 index 0000000..345c74d Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.Component.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveComponent.CID.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveComponent.CID.beam new file mode 100644 index 0000000..5599695 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveComponent.CID.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.Component.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.Component.beam new file mode 100644 index 0000000..1518d34 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.Component.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.Comprehension.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.Comprehension.beam new file mode 100644 index 0000000..6bd18a8 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.Comprehension.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.JS.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.JS.beam new file mode 100644 index 0000000..ca9a055 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.JS.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.Rendered.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.Rendered.beam new file mode 100644 index 0000000..92a353d Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.Rendered.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveComponent.CID.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveComponent.CID.beam new file mode 100644 index 0000000..12dc479 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveComponent.CID.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveComponent.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveComponent.beam new file mode 100644 index 0000000..01d4001 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveComponent.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Application.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Application.beam new file mode 100644 index 0000000..f12c922 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Application.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Async.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Async.beam new file mode 100644 index 0000000..4f34933 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Async.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.AsyncResult.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.AsyncResult.beam new file mode 100644 index 0000000..288a28e Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.AsyncResult.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Channel.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Channel.beam new file mode 100644 index 0000000..b048f2b Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Channel.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.ColocatedHook.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.ColocatedHook.beam new file mode 100644 index 0000000..1aa8283 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.ColocatedHook.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.ColocatedJS.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.ColocatedJS.beam new file mode 100644 index 0000000..e0055d7 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.ColocatedJS.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Component.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Component.beam new file mode 100644 index 0000000..092963e Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Component.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Comprehension.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Comprehension.beam new file mode 100644 index 0000000..5cf82c8 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Comprehension.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Controller.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Controller.beam new file mode 100644 index 0000000..bade1bf Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Controller.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Debug.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Debug.beam new file mode 100644 index 0000000..6b07ddb Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Debug.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Diff.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Diff.beam new file mode 100644 index 0000000..de99fd0 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Diff.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Engine.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Engine.beam new file mode 100644 index 0000000..b179435 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Engine.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.HTMLAlgebra.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.HTMLAlgebra.beam new file mode 100644 index 0000000..ec595eb Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.HTMLAlgebra.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.HTMLEngine.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.HTMLEngine.beam new file mode 100644 index 0000000..3e4db4c Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.HTMLEngine.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.HTMLFormatter.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.HTMLFormatter.beam new file mode 100644 index 0000000..5e010d3 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.HTMLFormatter.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Helpers.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Helpers.beam new file mode 100644 index 0000000..ee4a684 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Helpers.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.JS.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.JS.beam new file mode 100644 index 0000000..4d12923 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.JS.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Lifecycle.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Lifecycle.beam new file mode 100644 index 0000000..609b155 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Lifecycle.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.LiveStream.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.LiveStream.beam new file mode 100644 index 0000000..4ad9276 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.LiveStream.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Logger.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Logger.beam new file mode 100644 index 0000000..ff9d050 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Logger.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Plug.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Plug.beam new file mode 100644 index 0000000..1057a7d Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Plug.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.ReloadError.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.ReloadError.beam new file mode 100644 index 0000000..1cebd6c Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.ReloadError.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Rendered.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Rendered.beam new file mode 100644 index 0000000..e432173 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Rendered.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Renderer.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Renderer.beam new file mode 100644 index 0000000..33c2b76 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Renderer.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Route.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Route.beam new file mode 100644 index 0000000..33b2eea Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Route.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Router.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Router.beam new file mode 100644 index 0000000..789cd87 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Router.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Session.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Session.beam new file mode 100644 index 0000000..d006256 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Session.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Socket.AssignsNotInSocket.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Socket.AssignsNotInSocket.beam new file mode 100644 index 0000000..3a792c4 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Socket.AssignsNotInSocket.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Socket.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Socket.beam new file mode 100644 index 0000000..923aac9 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Socket.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Static.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Static.beam new file mode 100644 index 0000000..a1206fc Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Static.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.TagEngine.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.TagEngine.beam new file mode 100644 index 0000000..a26f1f1 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.TagEngine.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Tokenizer.ParseError.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Tokenizer.ParseError.beam new file mode 100644 index 0000000..e35b401 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Tokenizer.ParseError.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Tokenizer.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Tokenizer.beam new file mode 100644 index 0000000..92a1264 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Tokenizer.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Upload.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Upload.beam new file mode 100644 index 0000000..1c6d812 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Upload.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadChannel.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadChannel.beam new file mode 100644 index 0000000..59dc510 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadChannel.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadConfig.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadConfig.beam new file mode 100644 index 0000000..ace91ca Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadConfig.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadEntry.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadEntry.beam new file mode 100644 index 0000000..8249a30 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadEntry.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadTmpFileWriter.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadTmpFileWriter.beam new file mode 100644 index 0000000..d2ffd44 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadTmpFileWriter.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadWriter.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadWriter.beam new file mode 100644 index 0000000..fd3ce23 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.UploadWriter.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Utils.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Utils.beam new file mode 100644 index 0000000..b36b6b3 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.Utils.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.beam new file mode 100644 index 0000000..27e9cec Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveView.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.ClientProxy.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.ClientProxy.beam new file mode 100644 index 0000000..bc51a02 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.ClientProxy.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.DOM.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.DOM.beam new file mode 100644 index 0000000..c5a0d08 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.DOM.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.Diff.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.Diff.beam new file mode 100644 index 0000000..d961dac Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.Diff.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.Element.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.Element.beam new file mode 100644 index 0000000..a12063f Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.Element.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.TreeDOM.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.TreeDOM.beam new file mode 100644 index 0000000..a83fea5 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.TreeDOM.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.Upload.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.Upload.beam new file mode 100644 index 0000000..a41d1e9 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.Upload.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.UploadClient.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.UploadClient.beam new file mode 100644 index 0000000..1cb1269 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.UploadClient.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.Utils.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.Utils.beam new file mode 100644 index 0000000..10ff735 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.Utils.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.View.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.View.beam new file mode 100644 index 0000000..969635d Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.View.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.beam new file mode 100644 index 0000000..87f07f3 Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.Phoenix.LiveViewTest.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/Elixir.String.Chars.Phoenix.LiveComponent.CID.beam b/_build/dev/lib/phoenix_live_view/ebin/Elixir.String.Chars.Phoenix.LiveComponent.CID.beam new file mode 100644 index 0000000..a9abcec Binary files /dev/null and b/_build/dev/lib/phoenix_live_view/ebin/Elixir.String.Chars.Phoenix.LiveComponent.CID.beam differ diff --git a/_build/dev/lib/phoenix_live_view/ebin/phoenix_live_view.app b/_build/dev/lib/phoenix_live_view/ebin/phoenix_live_view.app new file mode 100644 index 0000000..c704513 --- /dev/null +++ b/_build/dev/lib/phoenix_live_view/ebin/phoenix_live_view.app @@ -0,0 +1,75 @@ +{application,phoenix_live_view, + [{modules, + ['Elixir.Enumerable.Phoenix.LiveView.LiveStream', + 'Elixir.Inspect.Phoenix.LiveView.Socket', + 'Elixir.Inspect.Phoenix.LiveView.Socket.AssignsNotInSocket', + 'Elixir.Inspect.Phoenix.LiveView.UploadConfig', + 'Elixir.Inspect.Phoenix.LiveViewTest.Element', + 'Elixir.Inspect.Phoenix.LiveViewTest.Upload', + 'Elixir.Inspect.Phoenix.LiveViewTest.View', + 'Elixir.Mix.Tasks.Compile.PhoenixLiveView', + 'Elixir.Mix.Tasks.PhoenixLiveView.Upgrade', + 'Elixir.Phoenix.Component','Elixir.Phoenix.Component.Declarative', + 'Elixir.Phoenix.Component.MacroComponent', + 'Elixir.Phoenix.HTML.Safe.Phoenix.LiveComponent.CID', + 'Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.Component', + 'Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.Comprehension', + 'Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.JS', + 'Elixir.Phoenix.HTML.Safe.Phoenix.LiveView.Rendered', + 'Elixir.Phoenix.LiveComponent','Elixir.Phoenix.LiveComponent.CID', + 'Elixir.Phoenix.LiveView','Elixir.Phoenix.LiveView.Application', + 'Elixir.Phoenix.LiveView.Async', + 'Elixir.Phoenix.LiveView.AsyncResult', + 'Elixir.Phoenix.LiveView.Channel', + 'Elixir.Phoenix.LiveView.ColocatedHook', + 'Elixir.Phoenix.LiveView.ColocatedJS', + 'Elixir.Phoenix.LiveView.Component', + 'Elixir.Phoenix.LiveView.Comprehension', + 'Elixir.Phoenix.LiveView.Controller', + 'Elixir.Phoenix.LiveView.Debug','Elixir.Phoenix.LiveView.Diff', + 'Elixir.Phoenix.LiveView.Engine', + 'Elixir.Phoenix.LiveView.HTMLAlgebra', + 'Elixir.Phoenix.LiveView.HTMLEngine', + 'Elixir.Phoenix.LiveView.HTMLFormatter', + 'Elixir.Phoenix.LiveView.Helpers','Elixir.Phoenix.LiveView.JS', + 'Elixir.Phoenix.LiveView.Lifecycle', + 'Elixir.Phoenix.LiveView.LiveStream', + 'Elixir.Phoenix.LiveView.Logger','Elixir.Phoenix.LiveView.Plug', + 'Elixir.Phoenix.LiveView.ReloadError', + 'Elixir.Phoenix.LiveView.Rendered', + 'Elixir.Phoenix.LiveView.Renderer','Elixir.Phoenix.LiveView.Route', + 'Elixir.Phoenix.LiveView.Router','Elixir.Phoenix.LiveView.Session', + 'Elixir.Phoenix.LiveView.Socket', + 'Elixir.Phoenix.LiveView.Socket.AssignsNotInSocket', + 'Elixir.Phoenix.LiveView.Static', + 'Elixir.Phoenix.LiveView.TagEngine', + 'Elixir.Phoenix.LiveView.Tokenizer', + 'Elixir.Phoenix.LiveView.Tokenizer.ParseError', + 'Elixir.Phoenix.LiveView.Upload', + 'Elixir.Phoenix.LiveView.UploadChannel', + 'Elixir.Phoenix.LiveView.UploadConfig', + 'Elixir.Phoenix.LiveView.UploadEntry', + 'Elixir.Phoenix.LiveView.UploadTmpFileWriter', + 'Elixir.Phoenix.LiveView.UploadWriter', + 'Elixir.Phoenix.LiveView.Utils','Elixir.Phoenix.LiveViewTest', + 'Elixir.Phoenix.LiveViewTest.ClientProxy', + 'Elixir.Phoenix.LiveViewTest.DOM', + 'Elixir.Phoenix.LiveViewTest.Diff', + 'Elixir.Phoenix.LiveViewTest.Element', + 'Elixir.Phoenix.LiveViewTest.TreeDOM', + 'Elixir.Phoenix.LiveViewTest.Upload', + 'Elixir.Phoenix.LiveViewTest.UploadClient', + 'Elixir.Phoenix.LiveViewTest.Utils', + 'Elixir.Phoenix.LiveViewTest.View', + 'Elixir.String.Chars.Phoenix.LiveComponent.CID']}, + {compile_env, + [{phoenix_live_view,[enable_expensive_runtime_checks],error}]}, + {optional_applications,[igniter,phoenix_view,jason,lazy_html]}, + {applications, + [kernel,stdlib,elixir,logger,igniter,phoenix,plug,phoenix_template, + phoenix_html,telemetry,phoenix_view,jason,lazy_html]}, + {description, + "Rich, real-time user experiences with server-rendered HTML\n"}, + {registered,[]}, + {vsn,"1.1.30"}, + {mod,{'Elixir.Phoenix.LiveView.Application',[]}}]}. diff --git a/_build/dev/lib/phoenix_live_view/priv b/_build/dev/lib/phoenix_live_view/priv new file mode 120000 index 0000000..7a8068e --- /dev/null +++ b/_build/dev/lib/phoenix_live_view/priv @@ -0,0 +1 @@ +../../../../deps/phoenix_live_view/priv \ No newline at end of file diff --git a/_build/dev/lib/phoenix_pubsub/.mix/compile.elixir b/_build/dev/lib/phoenix_pubsub/.mix/compile.elixir new file mode 100644 index 0000000..1aa13a4 Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/.mix/compile.elixir differ diff --git a/_build/dev/lib/phoenix_pubsub/.mix/compile.elixir_scm b/_build/dev/lib/phoenix_pubsub/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/phoenix_pubsub/.mix/compile.fetch b/_build/dev/lib/phoenix_pubsub/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.Adapter.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.Adapter.beam new file mode 100644 index 0000000..da207df Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.Adapter.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.Application.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.Application.beam new file mode 100644 index 0000000..4f5bbcf Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.Application.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.BroadcastError.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.BroadcastError.beam new file mode 100644 index 0000000..6e51692 Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.BroadcastError.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.PG2.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.PG2.beam new file mode 100644 index 0000000..b3c7163 Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.PG2.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.PG2Worker.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.PG2Worker.beam new file mode 100644 index 0000000..6b0c5dc Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.PG2Worker.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.Supervisor.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.Supervisor.beam new file mode 100644 index 0000000..9cdc3dd Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.Supervisor.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.beam new file mode 100644 index 0000000..a1100e5 Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.PubSub.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.Clock.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.Clock.beam new file mode 100644 index 0000000..3e88dce Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.Clock.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.DeltaGeneration.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.DeltaGeneration.beam new file mode 100644 index 0000000..7b49e94 Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.DeltaGeneration.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.Replica.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.Replica.beam new file mode 100644 index 0000000..3f36a05 Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.Replica.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.Shard.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.Shard.beam new file mode 100644 index 0000000..47c35f2 Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.Shard.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.ShutdownHandler.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.ShutdownHandler.beam new file mode 100644 index 0000000..41ef84c Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.ShutdownHandler.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.State.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.State.beam new file mode 100644 index 0000000..a53ae71 Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.State.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.beam b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.beam new file mode 100644 index 0000000..cb2df70 Binary files /dev/null and b/_build/dev/lib/phoenix_pubsub/ebin/Elixir.Phoenix.Tracker.beam differ diff --git a/_build/dev/lib/phoenix_pubsub/ebin/phoenix_pubsub.app b/_build/dev/lib/phoenix_pubsub/ebin/phoenix_pubsub.app new file mode 100644 index 0000000..de04bc9 --- /dev/null +++ b/_build/dev/lib/phoenix_pubsub/ebin/phoenix_pubsub.app @@ -0,0 +1,21 @@ +{application,phoenix_pubsub, + [{modules,['Elixir.Phoenix.PubSub', + 'Elixir.Phoenix.PubSub.Adapter', + 'Elixir.Phoenix.PubSub.Application', + 'Elixir.Phoenix.PubSub.BroadcastError', + 'Elixir.Phoenix.PubSub.PG2', + 'Elixir.Phoenix.PubSub.PG2Worker', + 'Elixir.Phoenix.PubSub.Supervisor', + 'Elixir.Phoenix.Tracker', + 'Elixir.Phoenix.Tracker.Clock', + 'Elixir.Phoenix.Tracker.DeltaGeneration', + 'Elixir.Phoenix.Tracker.Replica', + 'Elixir.Phoenix.Tracker.Shard', + 'Elixir.Phoenix.Tracker.ShutdownHandler', + 'Elixir.Phoenix.Tracker.State']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger,crypto]}, + {description,"Distributed PubSub and Presence platform"}, + {registered,[]}, + {vsn,"2.2.0"}, + {mod,{'Elixir.Phoenix.PubSub.Application',[]}}]}. diff --git a/_build/dev/lib/phoenix_template/.mix/compile.elixir b/_build/dev/lib/phoenix_template/.mix/compile.elixir new file mode 100644 index 0000000..e62c700 Binary files /dev/null and b/_build/dev/lib/phoenix_template/.mix/compile.elixir differ diff --git a/_build/dev/lib/phoenix_template/.mix/compile.elixir_scm b/_build/dev/lib/phoenix_template/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/phoenix_template/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/phoenix_template/.mix/compile.fetch b/_build/dev/lib/phoenix_template/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/phoenix_template/ebin/Elixir.Phoenix.Template.EExEngine.beam b/_build/dev/lib/phoenix_template/ebin/Elixir.Phoenix.Template.EExEngine.beam new file mode 100644 index 0000000..05a75ac Binary files /dev/null and b/_build/dev/lib/phoenix_template/ebin/Elixir.Phoenix.Template.EExEngine.beam differ diff --git a/_build/dev/lib/phoenix_template/ebin/Elixir.Phoenix.Template.Engine.beam b/_build/dev/lib/phoenix_template/ebin/Elixir.Phoenix.Template.Engine.beam new file mode 100644 index 0000000..bc55832 Binary files /dev/null and b/_build/dev/lib/phoenix_template/ebin/Elixir.Phoenix.Template.Engine.beam differ diff --git a/_build/dev/lib/phoenix_template/ebin/Elixir.Phoenix.Template.ExsEngine.beam b/_build/dev/lib/phoenix_template/ebin/Elixir.Phoenix.Template.ExsEngine.beam new file mode 100644 index 0000000..2289594 Binary files /dev/null and b/_build/dev/lib/phoenix_template/ebin/Elixir.Phoenix.Template.ExsEngine.beam differ diff --git a/_build/dev/lib/phoenix_template/ebin/Elixir.Phoenix.Template.beam b/_build/dev/lib/phoenix_template/ebin/Elixir.Phoenix.Template.beam new file mode 100644 index 0000000..4e735b3 Binary files /dev/null and b/_build/dev/lib/phoenix_template/ebin/Elixir.Phoenix.Template.beam differ diff --git a/_build/dev/lib/phoenix_template/ebin/phoenix_template.app b/_build/dev/lib/phoenix_template/ebin/phoenix_template.app new file mode 100644 index 0000000..d10dd36 --- /dev/null +++ b/_build/dev/lib/phoenix_template/ebin/phoenix_template.app @@ -0,0 +1,10 @@ +{application,phoenix_template, + [{modules,['Elixir.Phoenix.Template', + 'Elixir.Phoenix.Template.EExEngine', + 'Elixir.Phoenix.Template.Engine', + 'Elixir.Phoenix.Template.ExsEngine']}, + {optional_applications,[phoenix_html]}, + {applications,[kernel,stdlib,elixir,eex,phoenix_html]}, + {description,"Template rendering for Phoenix"}, + {registered,[]}, + {vsn,"1.0.4"}]}. diff --git a/_build/dev/lib/plug/.mix/compile.elixir b/_build/dev/lib/plug/.mix/compile.elixir new file mode 100644 index 0000000..3c19546 Binary files /dev/null and b/_build/dev/lib/plug/.mix/compile.elixir differ diff --git a/_build/dev/lib/plug/.mix/compile.elixir_scm b/_build/dev/lib/plug/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/plug/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/plug/.mix/compile.erlang b/_build/dev/lib/plug/.mix/compile.erlang new file mode 100644 index 0000000..a3d5e27 Binary files /dev/null and b/_build/dev/lib/plug/.mix/compile.erlang differ diff --git a/_build/dev/lib/plug/.mix/compile.fetch b/_build/dev/lib/plug/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/plug/ebin/Elixir.Inspect.Plug.Conn.beam b/_build/dev/lib/plug/ebin/Elixir.Inspect.Plug.Conn.beam new file mode 100644 index 0000000..fa095b8 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Inspect.Plug.Conn.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Adapters.Cowboy.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Adapters.Cowboy.beam new file mode 100644 index 0000000..579c748 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Adapters.Cowboy.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Adapters.Test.Conn.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Adapters.Test.Conn.beam new file mode 100644 index 0000000..60d02d3 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Adapters.Test.Conn.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Application.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Application.beam new file mode 100644 index 0000000..a398a05 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Application.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.BadRequestError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.BadRequestError.beam new file mode 100644 index 0000000..cafd8e2 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.BadRequestError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.BasicAuth.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.BasicAuth.beam new file mode 100644 index 0000000..89219c5 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.BasicAuth.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Builder.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Builder.beam new file mode 100644 index 0000000..9cb81b3 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Builder.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.CSRFProtection.InvalidCSRFTokenError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.CSRFProtection.InvalidCSRFTokenError.beam new file mode 100644 index 0000000..ce8b9b4 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.CSRFProtection.InvalidCSRFTokenError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.CSRFProtection.InvalidCrossOriginRequestError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.CSRFProtection.InvalidCrossOriginRequestError.beam new file mode 100644 index 0000000..31e3baa Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.CSRFProtection.InvalidCrossOriginRequestError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.CSRFProtection.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.CSRFProtection.beam new file mode 100644 index 0000000..71826fa Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.CSRFProtection.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Adapter.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Adapter.beam new file mode 100644 index 0000000..7aaf98c Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Adapter.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.AlreadySentError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.AlreadySentError.beam new file mode 100644 index 0000000..2bba36d Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.AlreadySentError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.CookieOverflowError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.CookieOverflowError.beam new file mode 100644 index 0000000..92838c9 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.CookieOverflowError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Cookies.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Cookies.beam new file mode 100644 index 0000000..9b7c556 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Cookies.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.InvalidHeaderError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.InvalidHeaderError.beam new file mode 100644 index 0000000..4e91fe9 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.InvalidHeaderError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.InvalidQueryError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.InvalidQueryError.beam new file mode 100644 index 0000000..ea4ffbc Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.InvalidQueryError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.NotSentError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.NotSentError.beam new file mode 100644 index 0000000..9d14ddc Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.NotSentError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Query.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Query.beam new file mode 100644 index 0000000..787b39f Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Query.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Status.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Status.beam new file mode 100644 index 0000000..366cd3d Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Status.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Unfetched.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Unfetched.beam new file mode 100644 index 0000000..6b0f486 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Unfetched.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Utils.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Utils.beam new file mode 100644 index 0000000..c363c75 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.Utils.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.WrapperError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.WrapperError.beam new file mode 100644 index 0000000..a4d447c Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.WrapperError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.beam new file mode 100644 index 0000000..93a6365 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Conn.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Debugger.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Debugger.beam new file mode 100644 index 0000000..92c6b6d Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Debugger.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.ErrorHandler.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.ErrorHandler.beam new file mode 100644 index 0000000..0e84c6b Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.ErrorHandler.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Exception.Any.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Exception.Any.beam new file mode 100644 index 0000000..ebb88ea Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Exception.Any.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Exception.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Exception.beam new file mode 100644 index 0000000..791a74d Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Exception.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.HTML.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.HTML.beam new file mode 100644 index 0000000..c70c39e Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.HTML.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Head.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Head.beam new file mode 100644 index 0000000..6bbbd5b Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Head.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Logger.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Logger.beam new file mode 100644 index 0000000..8ea438d Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Logger.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.MIME.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.MIME.beam new file mode 100644 index 0000000..f91111b Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.MIME.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.MethodOverride.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.MethodOverride.beam new file mode 100644 index 0000000..b7c9b39 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.MethodOverride.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.BadEncodingError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.BadEncodingError.beam new file mode 100644 index 0000000..3802017 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.BadEncodingError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.JSON.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.JSON.beam new file mode 100644 index 0000000..948e1df Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.JSON.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.MULTIPART.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.MULTIPART.beam new file mode 100644 index 0000000..a642149 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.MULTIPART.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.ParseError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.ParseError.beam new file mode 100644 index 0000000..f6265b7 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.ParseError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.RequestTooLargeError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.RequestTooLargeError.beam new file mode 100644 index 0000000..73a1914 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.RequestTooLargeError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.URLENCODED.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.URLENCODED.beam new file mode 100644 index 0000000..77578e8 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.URLENCODED.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.UnsupportedMediaTypeError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.UnsupportedMediaTypeError.beam new file mode 100644 index 0000000..0753380 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.UnsupportedMediaTypeError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.beam new file mode 100644 index 0000000..f9a08d7 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Parsers.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.RequestId.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.RequestId.beam new file mode 100644 index 0000000..534e08a Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.RequestId.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.RewriteOn.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.RewriteOn.beam new file mode 100644 index 0000000..c80a7c0 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.RewriteOn.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Router.InvalidSpecError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Router.InvalidSpecError.beam new file mode 100644 index 0000000..d14dd8a Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Router.InvalidSpecError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Router.MalformedURIError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Router.MalformedURIError.beam new file mode 100644 index 0000000..98a4c58 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Router.MalformedURIError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Router.Utils.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Router.Utils.beam new file mode 100644 index 0000000..05bc2e0 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Router.Utils.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Router.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Router.beam new file mode 100644 index 0000000..6dc0c65 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Router.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.SSL.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.SSL.beam new file mode 100644 index 0000000..172360c Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.SSL.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Session.COOKIE.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Session.COOKIE.beam new file mode 100644 index 0000000..8c580da Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Session.COOKIE.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Session.ETS.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Session.ETS.beam new file mode 100644 index 0000000..7f18c98 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Session.ETS.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Session.Store.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Session.Store.beam new file mode 100644 index 0000000..3ac2f26 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Session.Store.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Session.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Session.beam new file mode 100644 index 0000000..90410cf Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Session.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Static.InvalidPathError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Static.InvalidPathError.beam new file mode 100644 index 0000000..c4c4e1a Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Static.InvalidPathError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Static.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Static.beam new file mode 100644 index 0000000..6ebfd26 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Static.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Telemetry.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Telemetry.beam new file mode 100644 index 0000000..d511492 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Telemetry.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Test.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Test.beam new file mode 100644 index 0000000..7041fc6 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Test.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.TimeoutError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.TimeoutError.beam new file mode 100644 index 0000000..d590cea Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.TimeoutError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Upload.Supervisor.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Upload.Supervisor.beam new file mode 100644 index 0000000..1e9640d Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Upload.Supervisor.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Upload.Terminator.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Upload.Terminator.beam new file mode 100644 index 0000000..f1ce7b4 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Upload.Terminator.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.Upload.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.Upload.beam new file mode 100644 index 0000000..156fd38 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.Upload.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.UploadError.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.UploadError.beam new file mode 100644 index 0000000..8d69e43 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.UploadError.beam differ diff --git a/_build/dev/lib/plug/ebin/Elixir.Plug.beam b/_build/dev/lib/plug/ebin/Elixir.Plug.beam new file mode 100644 index 0000000..d244e31 Binary files /dev/null and b/_build/dev/lib/plug/ebin/Elixir.Plug.beam differ diff --git a/_build/dev/lib/plug/ebin/plug.app b/_build/dev/lib/plug/ebin/plug.app new file mode 100644 index 0000000..d3bef94 --- /dev/null +++ b/_build/dev/lib/plug/ebin/plug.app @@ -0,0 +1,54 @@ +{application,plug, + [{modules,['Elixir.Inspect.Plug.Conn','Elixir.Plug', + 'Elixir.Plug.Adapters.Cowboy', + 'Elixir.Plug.Adapters.Test.Conn', + 'Elixir.Plug.Application', + 'Elixir.Plug.BadRequestError','Elixir.Plug.BasicAuth', + 'Elixir.Plug.Builder','Elixir.Plug.CSRFProtection', + 'Elixir.Plug.CSRFProtection.InvalidCSRFTokenError', + 'Elixir.Plug.CSRFProtection.InvalidCrossOriginRequestError', + 'Elixir.Plug.Conn','Elixir.Plug.Conn.Adapter', + 'Elixir.Plug.Conn.AlreadySentError', + 'Elixir.Plug.Conn.CookieOverflowError', + 'Elixir.Plug.Conn.Cookies', + 'Elixir.Plug.Conn.InvalidHeaderError', + 'Elixir.Plug.Conn.InvalidQueryError', + 'Elixir.Plug.Conn.NotSentError', + 'Elixir.Plug.Conn.Query','Elixir.Plug.Conn.Status', + 'Elixir.Plug.Conn.Unfetched','Elixir.Plug.Conn.Utils', + 'Elixir.Plug.Conn.WrapperError', + 'Elixir.Plug.Debugger','Elixir.Plug.ErrorHandler', + 'Elixir.Plug.Exception','Elixir.Plug.Exception.Any', + 'Elixir.Plug.HTML','Elixir.Plug.Head', + 'Elixir.Plug.Logger','Elixir.Plug.MIME', + 'Elixir.Plug.MethodOverride','Elixir.Plug.Parsers', + 'Elixir.Plug.Parsers.BadEncodingError', + 'Elixir.Plug.Parsers.JSON', + 'Elixir.Plug.Parsers.MULTIPART', + 'Elixir.Plug.Parsers.ParseError', + 'Elixir.Plug.Parsers.RequestTooLargeError', + 'Elixir.Plug.Parsers.URLENCODED', + 'Elixir.Plug.Parsers.UnsupportedMediaTypeError', + 'Elixir.Plug.RequestId','Elixir.Plug.RewriteOn', + 'Elixir.Plug.Router', + 'Elixir.Plug.Router.InvalidSpecError', + 'Elixir.Plug.Router.MalformedURIError', + 'Elixir.Plug.Router.Utils','Elixir.Plug.SSL', + 'Elixir.Plug.Session','Elixir.Plug.Session.COOKIE', + 'Elixir.Plug.Session.ETS','Elixir.Plug.Session.Store', + 'Elixir.Plug.Static', + 'Elixir.Plug.Static.InvalidPathError', + 'Elixir.Plug.Telemetry','Elixir.Plug.Test', + 'Elixir.Plug.TimeoutError','Elixir.Plug.Upload', + 'Elixir.Plug.Upload.Supervisor', + 'Elixir.Plug.Upload.Terminator', + 'Elixir.Plug.UploadError',plug_multipart]}, + {compile_env,[{plug,[mimes],error},{plug,[statuses],error}]}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger,eex,mime,plug_crypto, + telemetry]}, + {description,"Compose web applications with functions"}, + {registered,[]}, + {vsn,"1.19.1"}, + {mod,{'Elixir.Plug.Application',[]}}, + {env,[{validate_header_keys_during_test,true}]}]}. diff --git a/_build/dev/lib/plug/ebin/plug_multipart.beam b/_build/dev/lib/plug/ebin/plug_multipart.beam new file mode 100644 index 0000000..f2a8bed Binary files /dev/null and b/_build/dev/lib/plug/ebin/plug_multipart.beam differ diff --git a/_build/dev/lib/plug_crypto/.mix/compile.elixir b/_build/dev/lib/plug_crypto/.mix/compile.elixir new file mode 100644 index 0000000..29a6f13 Binary files /dev/null and b/_build/dev/lib/plug_crypto/.mix/compile.elixir differ diff --git a/_build/dev/lib/plug_crypto/.mix/compile.elixir_scm b/_build/dev/lib/plug_crypto/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/plug_crypto/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/plug_crypto/.mix/compile.fetch b/_build/dev/lib/plug_crypto/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.Application.beam b/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.Application.beam new file mode 100644 index 0000000..b43b978 Binary files /dev/null and b/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.Application.beam differ diff --git a/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.KeyGenerator.beam b/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.KeyGenerator.beam new file mode 100644 index 0000000..d08d9bc Binary files /dev/null and b/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.KeyGenerator.beam differ diff --git a/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.MessageEncryptor.beam b/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.MessageEncryptor.beam new file mode 100644 index 0000000..a70412d Binary files /dev/null and b/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.MessageEncryptor.beam differ diff --git a/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.MessageVerifier.beam b/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.MessageVerifier.beam new file mode 100644 index 0000000..030e5cc Binary files /dev/null and b/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.MessageVerifier.beam differ diff --git a/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.beam b/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.beam new file mode 100644 index 0000000..d3e2da3 Binary files /dev/null and b/_build/dev/lib/plug_crypto/ebin/Elixir.Plug.Crypto.beam differ diff --git a/_build/dev/lib/plug_crypto/ebin/plug_crypto.app b/_build/dev/lib/plug_crypto/ebin/plug_crypto.app new file mode 100644 index 0000000..55578d0 --- /dev/null +++ b/_build/dev/lib/plug_crypto/ebin/plug_crypto.app @@ -0,0 +1,11 @@ +{application,plug_crypto, + [{modules,['Elixir.Plug.Crypto','Elixir.Plug.Crypto.Application', + 'Elixir.Plug.Crypto.KeyGenerator', + 'Elixir.Plug.Crypto.MessageEncryptor', + 'Elixir.Plug.Crypto.MessageVerifier']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,crypto]}, + {description,"Crypto-related functionality for the web"}, + {registered,[]}, + {vsn,"2.1.1"}, + {mod,{'Elixir.Plug.Crypto.Application',[]}}]}. diff --git a/_build/dev/lib/postgrex/.mix/compile.elixir b/_build/dev/lib/postgrex/.mix/compile.elixir new file mode 100644 index 0000000..69748cb Binary files /dev/null and b/_build/dev/lib/postgrex/.mix/compile.elixir differ diff --git a/_build/dev/lib/postgrex/.mix/compile.elixir_scm b/_build/dev/lib/postgrex/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/postgrex/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/postgrex/.mix/compile.fetch b/_build/dev/lib/postgrex/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Collectable.Postgrex.Stream.beam b/_build/dev/lib/postgrex/ebin/Elixir.Collectable.Postgrex.Stream.beam new file mode 100644 index 0000000..76ea8e2 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Collectable.Postgrex.Stream.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.DBConnection.Query.Postgrex.Copy.beam b/_build/dev/lib/postgrex/ebin/Elixir.DBConnection.Query.Postgrex.Copy.beam new file mode 100644 index 0000000..411f744 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.DBConnection.Query.Postgrex.Copy.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.DBConnection.Query.Postgrex.Parameters.beam b/_build/dev/lib/postgrex/ebin/Elixir.DBConnection.Query.Postgrex.Parameters.beam new file mode 100644 index 0000000..d085242 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.DBConnection.Query.Postgrex.Parameters.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.DBConnection.Query.Postgrex.Query.beam b/_build/dev/lib/postgrex/ebin/Elixir.DBConnection.Query.Postgrex.Query.beam new file mode 100644 index 0000000..8a44dc1 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.DBConnection.Query.Postgrex.Query.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.DBConnection.Query.Postgrex.TextQuery.beam b/_build/dev/lib/postgrex/ebin/Elixir.DBConnection.Query.Postgrex.TextQuery.beam new file mode 100644 index 0000000..ce609a6 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.DBConnection.Query.Postgrex.TextQuery.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Enumerable.Postgrex.Stream.beam b/_build/dev/lib/postgrex/ebin/Elixir.Enumerable.Postgrex.Stream.beam new file mode 100644 index 0000000..fcd6525 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Enumerable.Postgrex.Stream.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Inspect.Postgrex.Stream.beam b/_build/dev/lib/postgrex/ebin/Elixir.Inspect.Postgrex.Stream.beam new file mode 100644 index 0000000..556caa2 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Inspect.Postgrex.Stream.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.App.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.App.beam new file mode 100644 index 0000000..133a337 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.App.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.BinaryExtension.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.BinaryExtension.beam new file mode 100644 index 0000000..ea4b26a Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.BinaryExtension.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.BinaryUtils.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.BinaryUtils.beam new file mode 100644 index 0000000..71d7c5a Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.BinaryUtils.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Box.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Box.beam new file mode 100644 index 0000000..31c9cbf Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Box.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Circle.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Circle.beam new file mode 100644 index 0000000..2af3e0d Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Circle.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Copy.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Copy.beam new file mode 100644 index 0000000..7c6b084 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Copy.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Cursor.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Cursor.beam new file mode 100644 index 0000000..2d4ef64 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Cursor.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.DefaultTypes.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.DefaultTypes.beam new file mode 100644 index 0000000..bc47922 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.DefaultTypes.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Error.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Error.beam new file mode 100644 index 0000000..ccf3508 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Error.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.ErrorCode.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.ErrorCode.beam new file mode 100644 index 0000000..b4c7dd0 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.ErrorCode.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extension.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extension.beam new file mode 100644 index 0000000..5feb84c Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extension.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Array.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Array.beam new file mode 100644 index 0000000..e1e6ae0 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Array.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.BitString.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.BitString.beam new file mode 100644 index 0000000..a683661 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.BitString.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Bool.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Bool.beam new file mode 100644 index 0000000..81afd7e Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Bool.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Box.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Box.beam new file mode 100644 index 0000000..e9a924c Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Box.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Circle.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Circle.beam new file mode 100644 index 0000000..efab640 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Circle.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Date.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Date.beam new file mode 100644 index 0000000..56f5ae2 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Date.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Float4.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Float4.beam new file mode 100644 index 0000000..e52818d Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Float4.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Float8.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Float8.beam new file mode 100644 index 0000000..11a6c5a Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Float8.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.HStore.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.HStore.beam new file mode 100644 index 0000000..b37afe7 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.HStore.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.INET.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.INET.beam new file mode 100644 index 0000000..6859bbc Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.INET.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Int2.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Int2.beam new file mode 100644 index 0000000..4feb53f Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Int2.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Int4.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Int4.beam new file mode 100644 index 0000000..3eece93 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Int4.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Int8.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Int8.beam new file mode 100644 index 0000000..a87b721 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Int8.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Interval.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Interval.beam new file mode 100644 index 0000000..cfaca7a Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Interval.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.JSON.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.JSON.beam new file mode 100644 index 0000000..79c233a Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.JSON.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.JSONB.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.JSONB.beam new file mode 100644 index 0000000..bac7bd4 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.JSONB.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Line.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Line.beam new file mode 100644 index 0000000..68e5e38 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Line.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.LineSegment.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.LineSegment.beam new file mode 100644 index 0000000..ba1fcbf Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.LineSegment.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Lquery.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Lquery.beam new file mode 100644 index 0000000..4f0693d Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Lquery.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Ltree.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Ltree.beam new file mode 100644 index 0000000..845354e Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Ltree.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Ltxtquery.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Ltxtquery.beam new file mode 100644 index 0000000..98ce314 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Ltxtquery.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.MACADDR.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.MACADDR.beam new file mode 100644 index 0000000..7999600 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.MACADDR.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Multirange.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Multirange.beam new file mode 100644 index 0000000..3d9a6f6 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Multirange.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Name.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Name.beam new file mode 100644 index 0000000..98aa004 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Name.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Numeric.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Numeric.beam new file mode 100644 index 0000000..387dca9 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Numeric.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.OID.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.OID.beam new file mode 100644 index 0000000..dbfc897 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.OID.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Path.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Path.beam new file mode 100644 index 0000000..ce1c628 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Path.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Point.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Point.beam new file mode 100644 index 0000000..3317209 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Point.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Polygon.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Polygon.beam new file mode 100644 index 0000000..26ebfa4 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Polygon.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Range.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Range.beam new file mode 100644 index 0000000..8830e48 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Range.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Raw.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Raw.beam new file mode 100644 index 0000000..a4b92ff Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Raw.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Record.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Record.beam new file mode 100644 index 0000000..31b00a6 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Record.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.TID.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.TID.beam new file mode 100644 index 0000000..18b8f2c Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.TID.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.TSVector.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.TSVector.beam new file mode 100644 index 0000000..dda72f4 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.TSVector.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Time.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Time.beam new file mode 100644 index 0000000..0024b86 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Time.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.TimeTZ.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.TimeTZ.beam new file mode 100644 index 0000000..ef3fd81 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.TimeTZ.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Timestamp.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Timestamp.beam new file mode 100644 index 0000000..ce53856 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Timestamp.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.TimestampTZ.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.TimestampTZ.beam new file mode 100644 index 0000000..824752a Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.TimestampTZ.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.UUID.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.UUID.beam new file mode 100644 index 0000000..4e53647 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.UUID.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.VoidBinary.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.VoidBinary.beam new file mode 100644 index 0000000..8367fa2 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.VoidBinary.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.VoidText.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.VoidText.beam new file mode 100644 index 0000000..3278d55 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.VoidText.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Xid8.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Xid8.beam new file mode 100644 index 0000000..9fe2bb4 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Extensions.Xid8.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.INET.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.INET.beam new file mode 100644 index 0000000..27b3075 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.INET.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Interval.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Interval.beam new file mode 100644 index 0000000..4dcb956 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Interval.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Lexeme.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Lexeme.beam new file mode 100644 index 0000000..0fcee3a Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Lexeme.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Line.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Line.beam new file mode 100644 index 0000000..65088a4 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Line.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.LineSegment.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.LineSegment.beam new file mode 100644 index 0000000..7f52c9b Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.LineSegment.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.MACADDR.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.MACADDR.beam new file mode 100644 index 0000000..dd6208e Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.MACADDR.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Messages.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Messages.beam new file mode 100644 index 0000000..abea43c Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Messages.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Multirange.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Multirange.beam new file mode 100644 index 0000000..b9bdc85 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Multirange.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Notifications.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Notifications.beam new file mode 100644 index 0000000..6dec550 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Notifications.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Parameters.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Parameters.beam new file mode 100644 index 0000000..52a1495 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Parameters.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Path.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Path.beam new file mode 100644 index 0000000..6a7f11d Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Path.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Point.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Point.beam new file mode 100644 index 0000000..8f07146 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Point.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Polygon.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Polygon.beam new file mode 100644 index 0000000..e8bef8c Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Polygon.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Protocol.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Protocol.beam new file mode 100644 index 0000000..7a66ac2 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Protocol.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Query.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Query.beam new file mode 100644 index 0000000..14c0eae Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Query.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.QueryError.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.QueryError.beam new file mode 100644 index 0000000..4da01ea Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.QueryError.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Range.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Range.beam new file mode 100644 index 0000000..6c234b4 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Range.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.ReplicationConnection.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.ReplicationConnection.beam new file mode 100644 index 0000000..2386f5c Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.ReplicationConnection.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Result.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Result.beam new file mode 100644 index 0000000..e7fbe99 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Result.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.SCRAM.LockedCache.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.SCRAM.LockedCache.beam new file mode 100644 index 0000000..dd8c9af Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.SCRAM.LockedCache.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.SCRAM.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.SCRAM.beam new file mode 100644 index 0000000..eba1935 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.SCRAM.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.SimpleConnection.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.SimpleConnection.beam new file mode 100644 index 0000000..1f90e51 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.SimpleConnection.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Stream.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Stream.beam new file mode 100644 index 0000000..a7e9407 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Stream.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.SuperExtension.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.SuperExtension.beam new file mode 100644 index 0000000..90bff07 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.SuperExtension.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TextQuery.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TextQuery.beam new file mode 100644 index 0000000..f194a75 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TextQuery.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TypeInfo.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TypeInfo.beam new file mode 100644 index 0000000..c96e2c3 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TypeInfo.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TypeModule.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TypeModule.beam new file mode 100644 index 0000000..35e9cd4 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TypeModule.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TypeServer.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TypeServer.beam new file mode 100644 index 0000000..a0c2769 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TypeServer.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TypeSupervisor.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TypeSupervisor.beam new file mode 100644 index 0000000..3f75439 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.TypeSupervisor.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Types.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Types.beam new file mode 100644 index 0000000..a5de0de Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Types.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Utils.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Utils.beam new file mode 100644 index 0000000..61921b3 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.Utils.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.beam b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.beam new file mode 100644 index 0000000..486fd29 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.Postgrex.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.String.Chars.Postgrex.Copy.beam b/_build/dev/lib/postgrex/ebin/Elixir.String.Chars.Postgrex.Copy.beam new file mode 100644 index 0000000..81ff765 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.String.Chars.Postgrex.Copy.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.String.Chars.Postgrex.Query.beam b/_build/dev/lib/postgrex/ebin/Elixir.String.Chars.Postgrex.Query.beam new file mode 100644 index 0000000..afb4f35 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.String.Chars.Postgrex.Query.beam differ diff --git a/_build/dev/lib/postgrex/ebin/Elixir.String.Chars.Postgrex.TextQuery.beam b/_build/dev/lib/postgrex/ebin/Elixir.String.Chars.Postgrex.TextQuery.beam new file mode 100644 index 0000000..cf63eb8 Binary files /dev/null and b/_build/dev/lib/postgrex/ebin/Elixir.String.Chars.Postgrex.TextQuery.beam differ diff --git a/_build/dev/lib/postgrex/ebin/postgrex.app b/_build/dev/lib/postgrex/ebin/postgrex.app new file mode 100644 index 0000000..e75ed0e --- /dev/null +++ b/_build/dev/lib/postgrex/ebin/postgrex.app @@ -0,0 +1,93 @@ +{application,postgrex, + [{modules,['Elixir.Collectable.Postgrex.Stream', + 'Elixir.DBConnection.Query.Postgrex.Copy', + 'Elixir.DBConnection.Query.Postgrex.Parameters', + 'Elixir.DBConnection.Query.Postgrex.Query', + 'Elixir.DBConnection.Query.Postgrex.TextQuery', + 'Elixir.Enumerable.Postgrex.Stream', + 'Elixir.Inspect.Postgrex.Stream','Elixir.Postgrex', + 'Elixir.Postgrex.App', + 'Elixir.Postgrex.BinaryExtension', + 'Elixir.Postgrex.BinaryUtils','Elixir.Postgrex.Box', + 'Elixir.Postgrex.Circle','Elixir.Postgrex.Copy', + 'Elixir.Postgrex.Cursor', + 'Elixir.Postgrex.DefaultTypes', + 'Elixir.Postgrex.Error','Elixir.Postgrex.ErrorCode', + 'Elixir.Postgrex.Extension', + 'Elixir.Postgrex.Extensions.Array', + 'Elixir.Postgrex.Extensions.BitString', + 'Elixir.Postgrex.Extensions.Bool', + 'Elixir.Postgrex.Extensions.Box', + 'Elixir.Postgrex.Extensions.Circle', + 'Elixir.Postgrex.Extensions.Date', + 'Elixir.Postgrex.Extensions.Float4', + 'Elixir.Postgrex.Extensions.Float8', + 'Elixir.Postgrex.Extensions.HStore', + 'Elixir.Postgrex.Extensions.INET', + 'Elixir.Postgrex.Extensions.Int2', + 'Elixir.Postgrex.Extensions.Int4', + 'Elixir.Postgrex.Extensions.Int8', + 'Elixir.Postgrex.Extensions.Interval', + 'Elixir.Postgrex.Extensions.JSON', + 'Elixir.Postgrex.Extensions.JSONB', + 'Elixir.Postgrex.Extensions.Line', + 'Elixir.Postgrex.Extensions.LineSegment', + 'Elixir.Postgrex.Extensions.Lquery', + 'Elixir.Postgrex.Extensions.Ltree', + 'Elixir.Postgrex.Extensions.Ltxtquery', + 'Elixir.Postgrex.Extensions.MACADDR', + 'Elixir.Postgrex.Extensions.Multirange', + 'Elixir.Postgrex.Extensions.Name', + 'Elixir.Postgrex.Extensions.Numeric', + 'Elixir.Postgrex.Extensions.OID', + 'Elixir.Postgrex.Extensions.Path', + 'Elixir.Postgrex.Extensions.Point', + 'Elixir.Postgrex.Extensions.Polygon', + 'Elixir.Postgrex.Extensions.Range', + 'Elixir.Postgrex.Extensions.Raw', + 'Elixir.Postgrex.Extensions.Record', + 'Elixir.Postgrex.Extensions.TID', + 'Elixir.Postgrex.Extensions.TSVector', + 'Elixir.Postgrex.Extensions.Time', + 'Elixir.Postgrex.Extensions.TimeTZ', + 'Elixir.Postgrex.Extensions.Timestamp', + 'Elixir.Postgrex.Extensions.TimestampTZ', + 'Elixir.Postgrex.Extensions.UUID', + 'Elixir.Postgrex.Extensions.VoidBinary', + 'Elixir.Postgrex.Extensions.VoidText', + 'Elixir.Postgrex.Extensions.Xid8', + 'Elixir.Postgrex.INET','Elixir.Postgrex.Interval', + 'Elixir.Postgrex.Lexeme','Elixir.Postgrex.Line', + 'Elixir.Postgrex.LineSegment', + 'Elixir.Postgrex.MACADDR','Elixir.Postgrex.Messages', + 'Elixir.Postgrex.Multirange', + 'Elixir.Postgrex.Notifications', + 'Elixir.Postgrex.Parameters','Elixir.Postgrex.Path', + 'Elixir.Postgrex.Point','Elixir.Postgrex.Polygon', + 'Elixir.Postgrex.Protocol','Elixir.Postgrex.Query', + 'Elixir.Postgrex.QueryError','Elixir.Postgrex.Range', + 'Elixir.Postgrex.ReplicationConnection', + 'Elixir.Postgrex.Result','Elixir.Postgrex.SCRAM', + 'Elixir.Postgrex.SCRAM.LockedCache', + 'Elixir.Postgrex.SimpleConnection', + 'Elixir.Postgrex.Stream', + 'Elixir.Postgrex.SuperExtension', + 'Elixir.Postgrex.TextQuery', + 'Elixir.Postgrex.TypeInfo', + 'Elixir.Postgrex.TypeModule', + 'Elixir.Postgrex.TypeServer', + 'Elixir.Postgrex.TypeSupervisor', + 'Elixir.Postgrex.Types','Elixir.Postgrex.Utils', + 'Elixir.String.Chars.Postgrex.Copy', + 'Elixir.String.Chars.Postgrex.Query', + 'Elixir.String.Chars.Postgrex.TextQuery']}, + {optional_applications,[jason,table]}, + {applications,[kernel,stdlib,elixir,logger,crypto,ssl,jason, + table,decimal,db_connection]}, + {description,"PostgreSQL driver for Elixir"}, + {registered,[]}, + {vsn,"0.22.1"}, + {mod,{'Elixir.Postgrex.App',[]}}, + {env,[{type_server_reap_after,180000}, + {type_server_timeout,60000}, + {json_library,'Elixir.Jason'}]}]}. diff --git a/_build/dev/lib/req/.mix/compile.elixir b/_build/dev/lib/req/.mix/compile.elixir new file mode 100644 index 0000000..24b5b97 Binary files /dev/null and b/_build/dev/lib/req/.mix/compile.elixir differ diff --git a/_build/dev/lib/req/.mix/compile.elixir_scm b/_build/dev/lib/req/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/req/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/req/.mix/compile.fetch b/_build/dev/lib/req/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/req/ebin/Elixir.Collectable.Req.Utils.CollectWithHash.beam b/_build/dev/lib/req/ebin/Elixir.Collectable.Req.Utils.CollectWithHash.beam new file mode 100644 index 0000000..809d2c9 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Collectable.Req.Utils.CollectWithHash.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Enumerable.Req.Response.Async.beam b/_build/dev/lib/req/ebin/Elixir.Enumerable.Req.Response.Async.beam new file mode 100644 index 0000000..178a122 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Enumerable.Req.Response.Async.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Inspect.Req.Request.beam b/_build/dev/lib/req/ebin/Elixir.Inspect.Req.Request.beam new file mode 100644 index 0000000..92b0373 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Inspect.Req.Request.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Inspect.Req.Response.Async.beam b/_build/dev/lib/req/ebin/Elixir.Inspect.Req.Response.Async.beam new file mode 100644 index 0000000..c64f661 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Inspect.Req.Response.Async.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.Application.beam b/_build/dev/lib/req/ebin/Elixir.Req.Application.beam new file mode 100644 index 0000000..a7e3718 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.Application.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.ArchiveError.beam b/_build/dev/lib/req/ebin/Elixir.Req.ArchiveError.beam new file mode 100644 index 0000000..6d17c85 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.ArchiveError.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.ChecksumMismatchError.beam b/_build/dev/lib/req/ebin/Elixir.Req.ChecksumMismatchError.beam new file mode 100644 index 0000000..c2c5802 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.ChecksumMismatchError.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.DecompressError.beam b/_build/dev/lib/req/ebin/Elixir.Req.DecompressError.beam new file mode 100644 index 0000000..8fdeb50 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.DecompressError.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.Fields.beam b/_build/dev/lib/req/ebin/Elixir.Req.Fields.beam new file mode 100644 index 0000000..4b3831a Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.Fields.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.Finch.beam b/_build/dev/lib/req/ebin/Elixir.Req.Finch.beam new file mode 100644 index 0000000..51ed9aa Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.Finch.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.HTTPError.beam b/_build/dev/lib/req/ebin/Elixir.Req.HTTPError.beam new file mode 100644 index 0000000..d6a29a3 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.HTTPError.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.Request.beam b/_build/dev/lib/req/ebin/Elixir.Req.Request.beam new file mode 100644 index 0000000..23ae892 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.Request.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.Response.Async.beam b/_build/dev/lib/req/ebin/Elixir.Req.Response.Async.beam new file mode 100644 index 0000000..6a95795 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.Response.Async.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.Response.beam b/_build/dev/lib/req/ebin/Elixir.Req.Response.beam new file mode 100644 index 0000000..8d1864f Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.Response.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.Steps.beam b/_build/dev/lib/req/ebin/Elixir.Req.Steps.beam new file mode 100644 index 0000000..3f624fb Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.Steps.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.Test.Adapter.beam b/_build/dev/lib/req/ebin/Elixir.Req.Test.Adapter.beam new file mode 100644 index 0000000..6a9ac81 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.Test.Adapter.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.Test.Ownership.beam b/_build/dev/lib/req/ebin/Elixir.Req.Test.Ownership.beam new file mode 100644 index 0000000..e9b7694 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.Test.Ownership.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.Test.OwnershipError.beam b/_build/dev/lib/req/ebin/Elixir.Req.Test.OwnershipError.beam new file mode 100644 index 0000000..bc8e7d9 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.Test.OwnershipError.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.Test.beam b/_build/dev/lib/req/ebin/Elixir.Req.Test.beam new file mode 100644 index 0000000..752a9b6 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.Test.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.TooManyRedirectsError.beam b/_build/dev/lib/req/ebin/Elixir.Req.TooManyRedirectsError.beam new file mode 100644 index 0000000..0bb7357 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.TooManyRedirectsError.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.TransportError.beam b/_build/dev/lib/req/ebin/Elixir.Req.TransportError.beam new file mode 100644 index 0000000..4e97bad Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.TransportError.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.Utils.CollectWithHash.beam b/_build/dev/lib/req/ebin/Elixir.Req.Utils.CollectWithHash.beam new file mode 100644 index 0000000..7c76395 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.Utils.CollectWithHash.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.Utils.beam b/_build/dev/lib/req/ebin/Elixir.Req.Utils.beam new file mode 100644 index 0000000..f159599 Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.Utils.beam differ diff --git a/_build/dev/lib/req/ebin/Elixir.Req.beam b/_build/dev/lib/req/ebin/Elixir.Req.beam new file mode 100644 index 0000000..1b02cfd Binary files /dev/null and b/_build/dev/lib/req/ebin/Elixir.Req.beam differ diff --git a/_build/dev/lib/req/ebin/req.app b/_build/dev/lib/req/ebin/req.app new file mode 100644 index 0000000..aa41284 --- /dev/null +++ b/_build/dev/lib/req/ebin/req.app @@ -0,0 +1,24 @@ +{application,req, + [{modules,['Elixir.Collectable.Req.Utils.CollectWithHash', + 'Elixir.Enumerable.Req.Response.Async', + 'Elixir.Inspect.Req.Request', + 'Elixir.Inspect.Req.Response.Async','Elixir.Req', + 'Elixir.Req.Application','Elixir.Req.ArchiveError', + 'Elixir.Req.ChecksumMismatchError', + 'Elixir.Req.DecompressError','Elixir.Req.Fields', + 'Elixir.Req.Finch','Elixir.Req.HTTPError', + 'Elixir.Req.Request','Elixir.Req.Response', + 'Elixir.Req.Response.Async','Elixir.Req.Steps', + 'Elixir.Req.Test','Elixir.Req.Test.Adapter', + 'Elixir.Req.Test.Ownership', + 'Elixir.Req.Test.OwnershipError', + 'Elixir.Req.TooManyRedirectsError', + 'Elixir.Req.TransportError','Elixir.Req.Utils', + 'Elixir.Req.Utils.CollectWithHash']}, + {optional_applications,[nimble_csv,plug,brotli,ezstd]}, + {applications,[kernel,stdlib,elixir,logger,finch,mime,jason, + nimble_csv,plug,brotli,ezstd]}, + {description,"req"}, + {registered,[]}, + {vsn,"0.5.17"}, + {mod,{'Elixir.Req.Application',[]}}]}. diff --git a/_build/dev/lib/swoosh/.mix/compile.elixir b/_build/dev/lib/swoosh/.mix/compile.elixir new file mode 100644 index 0000000..cc1a346 Binary files /dev/null and b/_build/dev/lib/swoosh/.mix/compile.elixir differ diff --git a/_build/dev/lib/swoosh/.mix/compile.elixir_scm b/_build/dev/lib/swoosh/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/swoosh/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/swoosh/.mix/compile.fetch b/_build/dev/lib/swoosh/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Mix.Tasks.Swoosh.Mailbox.Server.beam b/_build/dev/lib/swoosh/ebin/Elixir.Mix.Tasks.Swoosh.Mailbox.Server.beam new file mode 100644 index 0000000..64dc144 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Mix.Tasks.Swoosh.Mailbox.Server.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Plug.Swoosh.MailboxPreview.beam b/_build/dev/lib/swoosh/ebin/Elixir.Plug.Swoosh.MailboxPreview.beam new file mode 100644 index 0000000..d0d26e9 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Plug.Swoosh.MailboxPreview.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapter.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapter.beam new file mode 100644 index 0000000..376da57 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapter.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.AmazonSES.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.AmazonSES.beam new file mode 100644 index 0000000..9a161b6 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.AmazonSES.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.AzureCommunicationServices.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.AzureCommunicationServices.beam new file mode 100644 index 0000000..7026088 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.AzureCommunicationServices.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Brevo.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Brevo.beam new file mode 100644 index 0000000..ffd62e4 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Brevo.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.CustomerIO.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.CustomerIO.beam new file mode 100644 index 0000000..14c318f Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.CustomerIO.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Dyn.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Dyn.beam new file mode 100644 index 0000000..3847ff3 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Dyn.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.ExAwsAmazonSES.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.ExAwsAmazonSES.beam new file mode 100644 index 0000000..4bfc719 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.ExAwsAmazonSES.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Gmail.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Gmail.beam new file mode 100644 index 0000000..f401652 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Gmail.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Lettermint.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Lettermint.beam new file mode 100644 index 0000000..1094c88 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Lettermint.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Local.Storage.Manager.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Local.Storage.Manager.beam new file mode 100644 index 0000000..2ce4f0f Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Local.Storage.Manager.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Local.Storage.Memory.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Local.Storage.Memory.beam new file mode 100644 index 0000000..87d7098 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Local.Storage.Memory.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Local.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Local.beam new file mode 100644 index 0000000..b1560ee Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Local.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Logger.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Logger.beam new file mode 100644 index 0000000..2b38093 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Logger.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Loops.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Loops.beam new file mode 100644 index 0000000..2e270b5 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Loops.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.MailPace.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.MailPace.beam new file mode 100644 index 0000000..4952575 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.MailPace.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mailersend.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mailersend.beam new file mode 100644 index 0000000..d05df6a Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mailersend.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mailgun.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mailgun.beam new file mode 100644 index 0000000..7bcb91d Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mailgun.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mailjet.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mailjet.beam new file mode 100644 index 0000000..a4a3045 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mailjet.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mailtrap.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mailtrap.beam new file mode 100644 index 0000000..b8bdc15 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mailtrap.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mandrill.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mandrill.beam new file mode 100644 index 0000000..229cc69 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mandrill.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.MsGraph.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.MsGraph.beam new file mode 100644 index 0000000..89dd1c9 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.MsGraph.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mua.MultihostError.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mua.MultihostError.beam new file mode 100644 index 0000000..e993244 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mua.MultihostError.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mua.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mua.beam new file mode 100644 index 0000000..398dfad Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Mua.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.OhMySmtp.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.OhMySmtp.beam new file mode 100644 index 0000000..26c2411 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.OhMySmtp.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.PostUp.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.PostUp.beam new file mode 100644 index 0000000..63ca153 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.PostUp.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Postal.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Postal.beam new file mode 100644 index 0000000..c4d0d02 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Postal.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Postmark.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Postmark.beam new file mode 100644 index 0000000..7d4c4d9 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Postmark.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.ProtonBridge.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.ProtonBridge.beam new file mode 100644 index 0000000..93e1a5d Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.ProtonBridge.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Resend.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Resend.beam new file mode 100644 index 0000000..17aa310 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Resend.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SMTP.Helpers.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SMTP.Helpers.beam new file mode 100644 index 0000000..2c6c3bc Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SMTP.Helpers.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SMTP.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SMTP.beam new file mode 100644 index 0000000..3d44b1a Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SMTP.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SMTP2GO.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SMTP2GO.beam new file mode 100644 index 0000000..8287eb7 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SMTP2GO.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sandbox.Storage.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sandbox.Storage.beam new file mode 100644 index 0000000..3e30087 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sandbox.Storage.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sandbox.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sandbox.beam new file mode 100644 index 0000000..93beb90 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sandbox.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Scaleway.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Scaleway.beam new file mode 100644 index 0000000..8e18638 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Scaleway.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sendgrid.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sendgrid.beam new file mode 100644 index 0000000..52ff6c9 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sendgrid.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sendinblue.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sendinblue.beam new file mode 100644 index 0000000..7cc7cbc Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sendinblue.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sendmail.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sendmail.beam new file mode 100644 index 0000000..e1ae235 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Sendmail.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SocketLabs.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SocketLabs.beam new file mode 100644 index 0000000..ec9072a Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SocketLabs.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SparkPost.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SparkPost.beam new file mode 100644 index 0000000..0834282 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.SparkPost.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Test.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Test.beam new file mode 100644 index 0000000..7e0aca9 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.Test.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.XML.Helpers.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.XML.Helpers.beam new file mode 100644 index 0000000..5a20e53 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.XML.Helpers.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.ZeptoMail.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.ZeptoMail.beam new file mode 100644 index 0000000..7b3dc60 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Adapters.ZeptoMail.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.ApiClient.Finch.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.ApiClient.Finch.beam new file mode 100644 index 0000000..bb708f5 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.ApiClient.Finch.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.ApiClient.Hackney.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.ApiClient.Hackney.beam new file mode 100644 index 0000000..5089234 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.ApiClient.Hackney.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.ApiClient.Req.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.ApiClient.Req.beam new file mode 100644 index 0000000..155b59c Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.ApiClient.Req.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.ApiClient.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.ApiClient.beam new file mode 100644 index 0000000..6e007ef Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.ApiClient.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Application.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Application.beam new file mode 100644 index 0000000..12b70dd Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Application.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Attachment.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Attachment.beam new file mode 100644 index 0000000..cb1e639 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Attachment.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.AttachmentContentError.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.AttachmentContentError.beam new file mode 100644 index 0000000..125c1b1 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.AttachmentContentError.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.DeliveryError.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.DeliveryError.beam new file mode 100644 index 0000000..391a5fd Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.DeliveryError.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Recipient.Any.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Recipient.Any.beam new file mode 100644 index 0000000..00becdc Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Recipient.Any.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Recipient.BitString.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Recipient.BitString.beam new file mode 100644 index 0000000..9c9ab7b Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Recipient.BitString.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Recipient.Tuple.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Recipient.Tuple.beam new file mode 100644 index 0000000..14c4334 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Recipient.Tuple.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Recipient.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Recipient.beam new file mode 100644 index 0000000..2a11172 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Recipient.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Render.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Render.beam new file mode 100644 index 0000000..10687fb Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.Render.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.beam new file mode 100644 index 0000000..fe85f2f Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Email.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Mailer.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Mailer.beam new file mode 100644 index 0000000..b46d020 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.Mailer.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.TestAssertions.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.TestAssertions.beam new file mode 100644 index 0000000..cecc666 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.TestAssertions.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.X.TestAssertions.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.X.TestAssertions.beam new file mode 100644 index 0000000..35c0233 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.X.TestAssertions.beam differ diff --git a/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.beam b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.beam new file mode 100644 index 0000000..e68a490 Binary files /dev/null and b/_build/dev/lib/swoosh/ebin/Elixir.Swoosh.beam differ diff --git a/_build/dev/lib/swoosh/ebin/swoosh.app b/_build/dev/lib/swoosh/ebin/swoosh.app new file mode 100644 index 0000000..1c5769f --- /dev/null +++ b/_build/dev/lib/swoosh/ebin/swoosh.app @@ -0,0 +1,74 @@ +{application,swoosh, + [{modules,['Elixir.Mix.Tasks.Swoosh.Mailbox.Server', + 'Elixir.Plug.Swoosh.MailboxPreview','Elixir.Swoosh', + 'Elixir.Swoosh.Adapter', + 'Elixir.Swoosh.Adapters.AmazonSES', + 'Elixir.Swoosh.Adapters.AzureCommunicationServices', + 'Elixir.Swoosh.Adapters.Brevo', + 'Elixir.Swoosh.Adapters.CustomerIO', + 'Elixir.Swoosh.Adapters.Dyn', + 'Elixir.Swoosh.Adapters.ExAwsAmazonSES', + 'Elixir.Swoosh.Adapters.Gmail', + 'Elixir.Swoosh.Adapters.Lettermint', + 'Elixir.Swoosh.Adapters.Local', + 'Elixir.Swoosh.Adapters.Local.Storage.Manager', + 'Elixir.Swoosh.Adapters.Local.Storage.Memory', + 'Elixir.Swoosh.Adapters.Logger', + 'Elixir.Swoosh.Adapters.Loops', + 'Elixir.Swoosh.Adapters.MailPace', + 'Elixir.Swoosh.Adapters.Mailersend', + 'Elixir.Swoosh.Adapters.Mailgun', + 'Elixir.Swoosh.Adapters.Mailjet', + 'Elixir.Swoosh.Adapters.Mailtrap', + 'Elixir.Swoosh.Adapters.Mandrill', + 'Elixir.Swoosh.Adapters.MsGraph', + 'Elixir.Swoosh.Adapters.Mua', + 'Elixir.Swoosh.Adapters.Mua.MultihostError', + 'Elixir.Swoosh.Adapters.OhMySmtp', + 'Elixir.Swoosh.Adapters.PostUp', + 'Elixir.Swoosh.Adapters.Postal', + 'Elixir.Swoosh.Adapters.Postmark', + 'Elixir.Swoosh.Adapters.ProtonBridge', + 'Elixir.Swoosh.Adapters.Resend', + 'Elixir.Swoosh.Adapters.SMTP', + 'Elixir.Swoosh.Adapters.SMTP.Helpers', + 'Elixir.Swoosh.Adapters.SMTP2GO', + 'Elixir.Swoosh.Adapters.Sandbox', + 'Elixir.Swoosh.Adapters.Sandbox.Storage', + 'Elixir.Swoosh.Adapters.Scaleway', + 'Elixir.Swoosh.Adapters.Sendgrid', + 'Elixir.Swoosh.Adapters.Sendinblue', + 'Elixir.Swoosh.Adapters.Sendmail', + 'Elixir.Swoosh.Adapters.SocketLabs', + 'Elixir.Swoosh.Adapters.SparkPost', + 'Elixir.Swoosh.Adapters.Test', + 'Elixir.Swoosh.Adapters.XML.Helpers', + 'Elixir.Swoosh.Adapters.ZeptoMail', + 'Elixir.Swoosh.ApiClient', + 'Elixir.Swoosh.ApiClient.Finch', + 'Elixir.Swoosh.ApiClient.Hackney', + 'Elixir.Swoosh.ApiClient.Req', + 'Elixir.Swoosh.Application', + 'Elixir.Swoosh.Attachment', + 'Elixir.Swoosh.AttachmentContentError', + 'Elixir.Swoosh.DeliveryError','Elixir.Swoosh.Email', + 'Elixir.Swoosh.Email.Recipient', + 'Elixir.Swoosh.Email.Recipient.Any', + 'Elixir.Swoosh.Email.Recipient.BitString', + 'Elixir.Swoosh.Email.Recipient.Tuple', + 'Elixir.Swoosh.Email.Render','Elixir.Swoosh.Mailer', + 'Elixir.Swoosh.TestAssertions', + 'Elixir.Swoosh.X.TestAssertions']}, + {optional_applications,[hackney,finch,req,mail,gen_smtp,mua, + cowboy,plug,plug_cowboy,bandit, + multipart,ex_aws]}, + {applications,[kernel,stdlib,elixir,logger,xmerl,mime,jason, + telemetry,idna,hackney,finch,req,mail,gen_smtp, + mua,cowboy,plug,plug_cowboy,bandit,multipart, + ex_aws]}, + {description,"Compose, deliver and test your emails easily in Elixir. Supports SMTP,\nSendgrid, Mandrill, Postmark, Mailgun and many more out of the box.\nPreview your emails in the browser. Test your email sending code.\n"}, + {registered,[]}, + {vsn,"1.25.1"}, + {mod,{'Elixir.Swoosh.Application',[]}}, + {env,[{json_library,'Elixir.Jason'}, + {api_client,'Elixir.Swoosh.ApiClient.Hackney'}]}]}. diff --git a/_build/dev/lib/swoosh/priv b/_build/dev/lib/swoosh/priv new file mode 120000 index 0000000..5949415 --- /dev/null +++ b/_build/dev/lib/swoosh/priv @@ -0,0 +1 @@ +../../../../deps/swoosh/priv \ No newline at end of file diff --git a/_build/dev/lib/telemetry/.mix/compile.fetch b/_build/dev/lib/telemetry/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/telemetry/ebin/telemetry.app b/_build/dev/lib/telemetry/ebin/telemetry.app new file mode 100644 index 0000000..b09b016 --- /dev/null +++ b/_build/dev/lib/telemetry/ebin/telemetry.app @@ -0,0 +1,15 @@ +{application,telemetry, + [{description,"Dynamic dispatching library for metrics and instrumentations"}, + {vsn,"1.4.1"}, + {registered,[]}, + {mod,{telemetry_app,[]}}, + {applications,[kernel,stdlib]}, + {env,[]}, + {modules,[telemetry,telemetry_app,telemetry_ets, + telemetry_handler_table,telemetry_pt,telemetry_sup, + telemetry_test]}, + {licenses,["Apache-2.0"]}, + {links,[{"GitHub", + "https://github.com/beam-telemetry/telemetry"}]}, + {doc,"doc"}, + {include_files,["mix.exs"]}]}. diff --git a/_build/dev/lib/telemetry/ebin/telemetry.beam b/_build/dev/lib/telemetry/ebin/telemetry.beam new file mode 100644 index 0000000..e268779 Binary files /dev/null and b/_build/dev/lib/telemetry/ebin/telemetry.beam differ diff --git a/_build/dev/lib/telemetry/ebin/telemetry_app.beam b/_build/dev/lib/telemetry/ebin/telemetry_app.beam new file mode 100644 index 0000000..fac4e14 Binary files /dev/null and b/_build/dev/lib/telemetry/ebin/telemetry_app.beam differ diff --git a/_build/dev/lib/telemetry/ebin/telemetry_ets.beam b/_build/dev/lib/telemetry/ebin/telemetry_ets.beam new file mode 100644 index 0000000..beffa87 Binary files /dev/null and b/_build/dev/lib/telemetry/ebin/telemetry_ets.beam differ diff --git a/_build/dev/lib/telemetry/ebin/telemetry_handler_table.beam b/_build/dev/lib/telemetry/ebin/telemetry_handler_table.beam new file mode 100644 index 0000000..b502709 Binary files /dev/null and b/_build/dev/lib/telemetry/ebin/telemetry_handler_table.beam differ diff --git a/_build/dev/lib/telemetry/ebin/telemetry_pt.beam b/_build/dev/lib/telemetry/ebin/telemetry_pt.beam new file mode 100644 index 0000000..9427218 Binary files /dev/null and b/_build/dev/lib/telemetry/ebin/telemetry_pt.beam differ diff --git a/_build/dev/lib/telemetry/ebin/telemetry_sup.beam b/_build/dev/lib/telemetry/ebin/telemetry_sup.beam new file mode 100644 index 0000000..9075763 Binary files /dev/null and b/_build/dev/lib/telemetry/ebin/telemetry_sup.beam differ diff --git a/_build/dev/lib/telemetry/ebin/telemetry_test.beam b/_build/dev/lib/telemetry/ebin/telemetry_test.beam new file mode 100644 index 0000000..6ad933a Binary files /dev/null and b/_build/dev/lib/telemetry/ebin/telemetry_test.beam differ diff --git a/_build/dev/lib/telemetry/mix.rebar.config b/_build/dev/lib/telemetry/mix.rebar.config new file mode 100644 index 0000000..ca1ef50 --- /dev/null +++ b/_build/dev/lib/telemetry/mix.rebar.config @@ -0,0 +1,17 @@ +{erl_opts,[debug_info]}. +{deps,[]}. +{profiles,[{test,[{erl_opts,[nowarn_export_all]}, + {ct_opts,[{ct_hooks,[cth_surefire]}]}, + {cover_enabled,true}, + {cover_opts,[verbose]}, + {plugins,[covertool]}, + {covertool,[{coverdata_files,["ct.coverdata"]}]}]}]}. +{shell,[{apps,[telemetry]}]}. +{xref_checks,[undefined_function_calls,undefined_functions,locals_not_used, + deprecated_function_calls,deprecated_functions]}. +{hex,[{doc,#{provider => ex_doc}}]}. +{ex_doc,[{source_url,<<"https://github.com/beam-telemetry/telemetry">>}, + {extras,[<<"README.md">>,<<"CHANGELOG.md">>,<<"LICENSE">>, + <<"NOTICE">>]}, + {main,<<"readme">>}]}. +{overrides,[]}. diff --git a/_build/dev/lib/telemetry/src b/_build/dev/lib/telemetry/src new file mode 120000 index 0000000..f3de4ce --- /dev/null +++ b/_build/dev/lib/telemetry/src @@ -0,0 +1 @@ +../../../../deps/telemetry/src \ No newline at end of file diff --git a/_build/dev/lib/telemetry_metrics/.mix/compile.elixir b/_build/dev/lib/telemetry_metrics/.mix/compile.elixir new file mode 100644 index 0000000..2237477 Binary files /dev/null and b/_build/dev/lib/telemetry_metrics/.mix/compile.elixir differ diff --git a/_build/dev/lib/telemetry_metrics/.mix/compile.elixir_scm b/_build/dev/lib/telemetry_metrics/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/telemetry_metrics/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/telemetry_metrics/.mix/compile.fetch b/_build/dev/lib/telemetry_metrics/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.ConsoleReporter.beam b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.ConsoleReporter.beam new file mode 100644 index 0000000..8b47f05 Binary files /dev/null and b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.ConsoleReporter.beam differ diff --git a/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.Counter.beam b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.Counter.beam new file mode 100644 index 0000000..1bb1e23 Binary files /dev/null and b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.Counter.beam differ diff --git a/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.Distribution.beam b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.Distribution.beam new file mode 100644 index 0000000..27a8083 Binary files /dev/null and b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.Distribution.beam differ diff --git a/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.LastValue.beam b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.LastValue.beam new file mode 100644 index 0000000..12fcea8 Binary files /dev/null and b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.LastValue.beam differ diff --git a/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.Sum.beam b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.Sum.beam new file mode 100644 index 0000000..2208b2d Binary files /dev/null and b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.Sum.beam differ diff --git a/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.Summary.beam b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.Summary.beam new file mode 100644 index 0000000..ed3f3e0 Binary files /dev/null and b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.Summary.beam differ diff --git a/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.beam b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.beam new file mode 100644 index 0000000..8928e58 Binary files /dev/null and b/_build/dev/lib/telemetry_metrics/ebin/Elixir.Telemetry.Metrics.beam differ diff --git a/_build/dev/lib/telemetry_metrics/ebin/telemetry_metrics.app b/_build/dev/lib/telemetry_metrics/ebin/telemetry_metrics.app new file mode 100644 index 0000000..a92e5cd --- /dev/null +++ b/_build/dev/lib/telemetry_metrics/ebin/telemetry_metrics.app @@ -0,0 +1,13 @@ +{application,telemetry_metrics, + [{modules,['Elixir.Telemetry.Metrics', + 'Elixir.Telemetry.Metrics.ConsoleReporter', + 'Elixir.Telemetry.Metrics.Counter', + 'Elixir.Telemetry.Metrics.Distribution', + 'Elixir.Telemetry.Metrics.LastValue', + 'Elixir.Telemetry.Metrics.Sum', + 'Elixir.Telemetry.Metrics.Summary']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger,telemetry]}, + {description,"Provides a common interface for defining metrics based on Telemetry events.\n"}, + {registered,[]}, + {vsn,"1.1.0"}]}. diff --git a/_build/dev/lib/telemetry_poller/.mix/compile.fetch b/_build/dev/lib/telemetry_poller/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/telemetry_poller/ebin/telemetry_poller.app b/_build/dev/lib/telemetry_poller/ebin/telemetry_poller.app new file mode 100644 index 0000000..bea50a1 --- /dev/null +++ b/_build/dev/lib/telemetry_poller/ebin/telemetry_poller.app @@ -0,0 +1,13 @@ +{application,telemetry_poller, + [{description,"Periodically collect measurements and dispatch them as Telemetry events."}, + {vsn,"1.3.0"}, + {registered,[]}, + {mod,{telemetry_poller_app,[]}}, + {applications,[kernel,stdlib,telemetry]}, + {env,[]}, + {modules,[telemetry_poller,telemetry_poller_app, + telemetry_poller_builtin,telemetry_poller_sup]}, + {licenses,["Apache-2.0"]}, + {doc,"doc"}, + {links,[{"GitHub", + "https://github.com/beam-telemetry/telemetry_poller"}]}]}. diff --git a/_build/dev/lib/telemetry_poller/ebin/telemetry_poller.beam b/_build/dev/lib/telemetry_poller/ebin/telemetry_poller.beam new file mode 100644 index 0000000..843a500 Binary files /dev/null and b/_build/dev/lib/telemetry_poller/ebin/telemetry_poller.beam differ diff --git a/_build/dev/lib/telemetry_poller/ebin/telemetry_poller_app.beam b/_build/dev/lib/telemetry_poller/ebin/telemetry_poller_app.beam new file mode 100644 index 0000000..23ab0e9 Binary files /dev/null and b/_build/dev/lib/telemetry_poller/ebin/telemetry_poller_app.beam differ diff --git a/_build/dev/lib/telemetry_poller/ebin/telemetry_poller_builtin.beam b/_build/dev/lib/telemetry_poller/ebin/telemetry_poller_builtin.beam new file mode 100644 index 0000000..249be2d Binary files /dev/null and b/_build/dev/lib/telemetry_poller/ebin/telemetry_poller_builtin.beam differ diff --git a/_build/dev/lib/telemetry_poller/ebin/telemetry_poller_sup.beam b/_build/dev/lib/telemetry_poller/ebin/telemetry_poller_sup.beam new file mode 100644 index 0000000..ba6c5ea Binary files /dev/null and b/_build/dev/lib/telemetry_poller/ebin/telemetry_poller_sup.beam differ diff --git a/_build/dev/lib/telemetry_poller/mix.rebar.config b/_build/dev/lib/telemetry_poller/mix.rebar.config new file mode 100644 index 0000000..c78cea1 --- /dev/null +++ b/_build/dev/lib/telemetry_poller/mix.rebar.config @@ -0,0 +1,16 @@ +{minimum_otp_vsn,"24.0"}. +{erl_opts,[debug_info]}. +{deps,[{telemetry,"~> 1.0"}]}. +{profiles,[{test,[{erl_opts,[nowarn_export_all]}, + {ct_opts,[{ct_hooks,[cth_surefire]}]}, + {src_dirs,["src","test/support"]}]}]}. +{shell,[{apps,[telemetry_poller]}]}. +{ex_doc,[{main,"README"}, + {extras,[<<"README.md">>,<<"CHANGELOG.md">>,<<"LICENSE">>, + <<"NOTICE">>]}, + {source_url,<<"https://github.com/beam-telemetry/telemetry_poller">>}, + {source_ref,<<"v1.3.0">>}]}. +{hex,[{doc,#{provider => ex_doc}}]}. +{xref_checks,[undefined_function_calls,undefined_functions,locals_not_used, + deprecated_function_calls,deprecated_functions]}. +{overrides,[]}. diff --git a/_build/dev/lib/telemetry_poller/src b/_build/dev/lib/telemetry_poller/src new file mode 120000 index 0000000..1893ac0 --- /dev/null +++ b/_build/dev/lib/telemetry_poller/src @@ -0,0 +1 @@ +../../../../deps/telemetry_poller/src \ No newline at end of file diff --git a/_build/dev/lib/telemetry_registry/.mix/compile.elixir b/_build/dev/lib/telemetry_registry/.mix/compile.elixir new file mode 100644 index 0000000..e000587 Binary files /dev/null and b/_build/dev/lib/telemetry_registry/.mix/compile.elixir differ diff --git a/_build/dev/lib/telemetry_registry/.mix/compile.elixir_scm b/_build/dev/lib/telemetry_registry/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/telemetry_registry/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/telemetry_registry/.mix/compile.erlang b/_build/dev/lib/telemetry_registry/.mix/compile.erlang new file mode 100644 index 0000000..a713b18 Binary files /dev/null and b/_build/dev/lib/telemetry_registry/.mix/compile.erlang differ diff --git a/_build/dev/lib/telemetry_registry/.mix/compile.fetch b/_build/dev/lib/telemetry_registry/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/telemetry_registry/ebin/Elixir.TelemetryRegistry.beam b/_build/dev/lib/telemetry_registry/ebin/Elixir.TelemetryRegistry.beam new file mode 100644 index 0000000..52d5741 Binary files /dev/null and b/_build/dev/lib/telemetry_registry/ebin/Elixir.TelemetryRegistry.beam differ diff --git a/_build/dev/lib/telemetry_registry/ebin/telemetry_registry.app b/_build/dev/lib/telemetry_registry/ebin/telemetry_registry.app new file mode 100644 index 0000000..d3bc07d --- /dev/null +++ b/_build/dev/lib/telemetry_registry/ebin/telemetry_registry.app @@ -0,0 +1,7 @@ +{application,telemetry_registry, + [{modules,['Elixir.TelemetryRegistry',telemetry_registry]}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger,telemetry]}, + {description,"A library for telemetry event declaration, discovery, and registration."}, + {registered,[]}, + {vsn,"0.3.2"}]}. diff --git a/_build/dev/lib/telemetry_registry/ebin/telemetry_registry.beam b/_build/dev/lib/telemetry_registry/ebin/telemetry_registry.beam new file mode 100644 index 0000000..b87f490 Binary files /dev/null and b/_build/dev/lib/telemetry_registry/ebin/telemetry_registry.beam differ diff --git a/_build/dev/lib/thousand_island/.mix/compile.elixir b/_build/dev/lib/thousand_island/.mix/compile.elixir new file mode 100644 index 0000000..3c3b8c9 Binary files /dev/null and b/_build/dev/lib/thousand_island/.mix/compile.elixir differ diff --git a/_build/dev/lib/thousand_island/.mix/compile.elixir_scm b/_build/dev/lib/thousand_island/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/thousand_island/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/thousand_island/.mix/compile.fetch b/_build/dev/lib/thousand_island/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Acceptor.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Acceptor.beam new file mode 100644 index 0000000..008ae35 Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Acceptor.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.AcceptorPoolSupervisor.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.AcceptorPoolSupervisor.beam new file mode 100644 index 0000000..caed967 Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.AcceptorPoolSupervisor.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.AcceptorSupervisor.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.AcceptorSupervisor.beam new file mode 100644 index 0000000..46e4fd8 Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.AcceptorSupervisor.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Connection.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Connection.beam new file mode 100644 index 0000000..b8c4b94 Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Connection.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Handler.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Handler.beam new file mode 100644 index 0000000..67226bb Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Handler.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.HandlerConfig.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.HandlerConfig.beam new file mode 100644 index 0000000..f465b20 Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.HandlerConfig.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Listener.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Listener.beam new file mode 100644 index 0000000..08281d7 Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Listener.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Logger.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Logger.beam new file mode 100644 index 0000000..27d825c Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Logger.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.ProcessLabel.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.ProcessLabel.beam new file mode 100644 index 0000000..4ccef3a Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.ProcessLabel.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Server.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Server.beam new file mode 100644 index 0000000..2b415e6 Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Server.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.ServerConfig.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.ServerConfig.beam new file mode 100644 index 0000000..5647490 Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.ServerConfig.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.ShutdownListener.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.ShutdownListener.beam new file mode 100644 index 0000000..5d05bb2 Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.ShutdownListener.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Socket.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Socket.beam new file mode 100644 index 0000000..f973747 Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Socket.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Telemetry.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Telemetry.beam new file mode 100644 index 0000000..dc5555c Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Telemetry.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Transport.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Transport.beam new file mode 100644 index 0000000..9f585bd Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Transport.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Transports.SSL.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Transports.SSL.beam new file mode 100644 index 0000000..2f3b684 Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Transports.SSL.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Transports.TCP.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Transports.TCP.beam new file mode 100644 index 0000000..175554a Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.Transports.TCP.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.beam b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.beam new file mode 100644 index 0000000..698502a Binary files /dev/null and b/_build/dev/lib/thousand_island/ebin/Elixir.ThousandIsland.beam differ diff --git a/_build/dev/lib/thousand_island/ebin/thousand_island.app b/_build/dev/lib/thousand_island/ebin/thousand_island.app new file mode 100644 index 0000000..73d9569 --- /dev/null +++ b/_build/dev/lib/thousand_island/ebin/thousand_island.app @@ -0,0 +1,24 @@ +{application,thousand_island, + [{modules,['Elixir.ThousandIsland', + 'Elixir.ThousandIsland.Acceptor', + 'Elixir.ThousandIsland.AcceptorPoolSupervisor', + 'Elixir.ThousandIsland.AcceptorSupervisor', + 'Elixir.ThousandIsland.Connection', + 'Elixir.ThousandIsland.Handler', + 'Elixir.ThousandIsland.HandlerConfig', + 'Elixir.ThousandIsland.Listener', + 'Elixir.ThousandIsland.Logger', + 'Elixir.ThousandIsland.ProcessLabel', + 'Elixir.ThousandIsland.Server', + 'Elixir.ThousandIsland.ServerConfig', + 'Elixir.ThousandIsland.ShutdownListener', + 'Elixir.ThousandIsland.Socket', + 'Elixir.ThousandIsland.Telemetry', + 'Elixir.ThousandIsland.Transport', + 'Elixir.ThousandIsland.Transports.SSL', + 'Elixir.ThousandIsland.Transports.TCP']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir,logger,ssl,telemetry]}, + {description,"A simple & modern pure Elixir socket server"}, + {registered,[]}, + {vsn,"1.4.3"}]}. diff --git a/_build/dev/lib/unicode_util_compat/.mix/compile.fetch b/_build/dev/lib/unicode_util_compat/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/unicode_util_compat/ebin/string_compat.beam b/_build/dev/lib/unicode_util_compat/ebin/string_compat.beam new file mode 100644 index 0000000..02b4109 Binary files /dev/null and b/_build/dev/lib/unicode_util_compat/ebin/string_compat.beam differ diff --git a/_build/dev/lib/unicode_util_compat/ebin/unicode_util_compat.app b/_build/dev/lib/unicode_util_compat/ebin/unicode_util_compat.app new file mode 100644 index 0000000..af1ce26 --- /dev/null +++ b/_build/dev/lib/unicode_util_compat/ebin/unicode_util_compat.app @@ -0,0 +1,11 @@ +{application,unicode_util_compat, + [{description,"unicode_util compatibility library for Erlang < 20"}, + {vsn,"0.7.1"}, + {registered,[]}, + {applications,[kernel,stdlib]}, + {env,[]}, + {modules,[string_compat,unicode_util_compat]}, + {licenses,["Apache 2.0"]}, + {links,[{"Github", + "https://github.com/benoitc/unicode_util_compat"}]}, + {files,["src","rebar.config","README.md","LICENSE"]}]}. diff --git a/_build/dev/lib/unicode_util_compat/ebin/unicode_util_compat.beam b/_build/dev/lib/unicode_util_compat/ebin/unicode_util_compat.beam new file mode 100644 index 0000000..49ae502 Binary files /dev/null and b/_build/dev/lib/unicode_util_compat/ebin/unicode_util_compat.beam differ diff --git a/_build/dev/lib/unicode_util_compat/mix.rebar.config b/_build/dev/lib/unicode_util_compat/mix.rebar.config new file mode 100644 index 0000000..6a1f1fe --- /dev/null +++ b/_build/dev/lib/unicode_util_compat/mix.rebar.config @@ -0,0 +1,3 @@ +{erl_opts,[debug_info,{platform_define,"^[2-9]",'OTP20'}]}. +{deps,[]}. +{overrides,[]}. diff --git a/_build/dev/lib/unicode_util_compat/src b/_build/dev/lib/unicode_util_compat/src new file mode 120000 index 0000000..ad4fae1 --- /dev/null +++ b/_build/dev/lib/unicode_util_compat/src @@ -0,0 +1 @@ +../../../../deps/unicode_util_compat/src \ No newline at end of file diff --git a/_build/dev/lib/websock/.mix/compile.elixir b/_build/dev/lib/websock/.mix/compile.elixir new file mode 100644 index 0000000..a9adb3c Binary files /dev/null and b/_build/dev/lib/websock/.mix/compile.elixir differ diff --git a/_build/dev/lib/websock/.mix/compile.elixir_scm b/_build/dev/lib/websock/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/websock/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/websock/.mix/compile.fetch b/_build/dev/lib/websock/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/websock/ebin/Elixir.WebSock.beam b/_build/dev/lib/websock/ebin/Elixir.WebSock.beam new file mode 100644 index 0000000..b3b212d Binary files /dev/null and b/_build/dev/lib/websock/ebin/Elixir.WebSock.beam differ diff --git a/_build/dev/lib/websock/ebin/websock.app b/_build/dev/lib/websock/ebin/websock.app new file mode 100644 index 0000000..cbc7dce --- /dev/null +++ b/_build/dev/lib/websock/ebin/websock.app @@ -0,0 +1,7 @@ +{application,websock, + [{modules,['Elixir.WebSock']}, + {optional_applications,[]}, + {applications,[kernel,stdlib,elixir]}, + {description,"A specification for WebSocket connections"}, + {registered,[]}, + {vsn,"0.5.3"}]}. diff --git a/_build/dev/lib/websock_adapter/.mix/compile.elixir b/_build/dev/lib/websock_adapter/.mix/compile.elixir new file mode 100644 index 0000000..9cf6688 Binary files /dev/null and b/_build/dev/lib/websock_adapter/.mix/compile.elixir differ diff --git a/_build/dev/lib/websock_adapter/.mix/compile.elixir_scm b/_build/dev/lib/websock_adapter/.mix/compile.elixir_scm new file mode 100644 index 0000000..c82d886 Binary files /dev/null and b/_build/dev/lib/websock_adapter/.mix/compile.elixir_scm differ diff --git a/_build/dev/lib/websock_adapter/.mix/compile.fetch b/_build/dev/lib/websock_adapter/.mix/compile.fetch new file mode 100644 index 0000000..e69de29 diff --git a/_build/dev/lib/websock_adapter/ebin/Elixir.WebSockAdapter.UpgradeError.beam b/_build/dev/lib/websock_adapter/ebin/Elixir.WebSockAdapter.UpgradeError.beam new file mode 100644 index 0000000..28c8270 Binary files /dev/null and b/_build/dev/lib/websock_adapter/ebin/Elixir.WebSockAdapter.UpgradeError.beam differ diff --git a/_build/dev/lib/websock_adapter/ebin/Elixir.WebSockAdapter.UpgradeValidation.beam b/_build/dev/lib/websock_adapter/ebin/Elixir.WebSockAdapter.UpgradeValidation.beam new file mode 100644 index 0000000..6ec22a8 Binary files /dev/null and b/_build/dev/lib/websock_adapter/ebin/Elixir.WebSockAdapter.UpgradeValidation.beam differ diff --git a/_build/dev/lib/websock_adapter/ebin/Elixir.WebSockAdapter.beam b/_build/dev/lib/websock_adapter/ebin/Elixir.WebSockAdapter.beam new file mode 100644 index 0000000..e951fa3 Binary files /dev/null and b/_build/dev/lib/websock_adapter/ebin/Elixir.WebSockAdapter.beam differ diff --git a/_build/dev/lib/websock_adapter/ebin/websock_adapter.app b/_build/dev/lib/websock_adapter/ebin/websock_adapter.app new file mode 100644 index 0000000..7293e80 --- /dev/null +++ b/_build/dev/lib/websock_adapter/ebin/websock_adapter.app @@ -0,0 +1,10 @@ +{application,websock_adapter, + [{modules,['Elixir.WebSockAdapter', + 'Elixir.WebSockAdapter.UpgradeError', + 'Elixir.WebSockAdapter.UpgradeValidation']}, + {optional_applications,[bandit,plug_cowboy]}, + {applications,[kernel,stdlib,elixir,websock,plug,bandit, + plug_cowboy]}, + {description,"A set of WebSock adapters for common web servers"}, + {registered,[]}, + {vsn,"0.5.9"}]}. diff --git a/config/config.exs b/config/config.exs index b84ec96..7d51dd3 100644 --- a/config/config.exs +++ b/config/config.exs @@ -2,29 +2,22 @@ import Config config :centralcloud_my, CentralcloudMy.Endpoint, url: [host: "my.centralcloud.com"], - http: [port: 4001], - secret_key_base: System.fetch_env!("MY_SECRET_KEY_BASE") + http: [port: 4001] config :centralcloud_ops, CentralcloudOps.Endpoint, url: [host: "ops.centralcloud.com"], - http: [port: 4000], - secret_key_base: System.fetch_env!("OPS_SECRET_KEY_BASE") + http: [port: 4000] -# HostBill Admin API +# HostBill Admin API (defaults — overridden at runtime) config :centralcloud_core, :hostbill, - base_url: System.get_env("HOSTBILL_URL", "https://portal.centralcloud.com"), - api_id: System.fetch_env!("HOSTBILL_API_ID"), - api_key: System.fetch_env!("HOSTBILL_API_KEY") + base_url: "https://portal.centralcloud.com" -# DR Portal API +# DR Portal API (defaults — overridden at runtime) config :centralcloud_core, :dr_portal, - base_url: System.get_env("DR_PORTAL_URL", "https://dr.centralcloud.com"), - api_key: System.fetch_env!("DR_PORTAL_API_KEY") + base_url: "https://dr.centralcloud.com" -# Authentik OIDC +# Authentik OIDC (defaults — overridden at runtime) config :centralcloud_core, :oidc, - issuer: System.get_env("AUTHENTIK_ISSUER", "https://sso.centralcloud.com/application/o/centralcloud/"), - client_id: System.fetch_env!("OIDC_CLIENT_ID"), - client_secret: System.fetch_env!("OIDC_CLIENT_SECRET") + issuer: "https://sso.centralcloud.com/application/o/centralcloud/" import_config "#{config_env()}.exs" diff --git a/config/runtime.exs b/config/runtime.exs new file mode 100644 index 0000000..838a1d4 --- /dev/null +++ b/config/runtime.exs @@ -0,0 +1,24 @@ +import Config + +# Runtime secrets — all required in production, optional in dev (defaults used) +if config_env() == :prod do + config :centralcloud_my, CentralcloudMy.Endpoint, + secret_key_base: System.fetch_env!("MY_SECRET_KEY_BASE") + + config :centralcloud_ops, CentralcloudOps.Endpoint, + secret_key_base: System.fetch_env!("OPS_SECRET_KEY_BASE") +end + +config :centralcloud_core, :hostbill, + base_url: System.get_env("HOSTBILL_URL", "https://portal.centralcloud.com"), + api_id: System.get_env("HOSTBILL_API_ID"), + api_key: System.get_env("HOSTBILL_API_KEY") + +config :centralcloud_core, :dr_portal, + base_url: System.get_env("DR_PORTAL_URL", "https://dr.centralcloud.com"), + api_key: System.get_env("DR_PORTAL_API_KEY") + +config :centralcloud_core, :oidc, + issuer: System.get_env("AUTHENTIK_ISSUER", "https://sso.centralcloud.com/application/o/centralcloud/"), + client_id: System.get_env("OIDC_CLIENT_ID"), + client_secret: System.get_env("OIDC_CLIENT_SECRET") diff --git a/deps/bandit/.hex b/deps/bandit/.hex new file mode 100644 index 0000000..1f0f1c8 Binary files /dev/null and b/deps/bandit/.hex differ diff --git a/deps/bandit/CHANGELOG.md b/deps/bandit/CHANGELOG.md new file mode 100644 index 0000000..4ef3948 --- /dev/null +++ b/deps/bandit/CHANGELOG.md @@ -0,0 +1,863 @@ +# 1.11.0 (1 May 2026) + +### Fixes + +* Fix WebSocket inflate vulnerability (CVE-2026-39804, commit 8156921, thanks @PJUllrich & @maennchen!) +* Fix WebSocket continuation frame handling vulnerability (CVE-2026-42786, commit 21612c7, thanks @PJUllrich & @maennchen!) +* Fix HTTP/2 frame size parsing vulnerability (CVE-2026-42788, commit 1e8e559, thanks @PJUllrich & @maennchen!) +* Improve handling of zero/negative length & offset parameters to send_file (#580, thanks @PJUllrich & @maennchen!) + +### Enhancements + +* Define a new `max_inflate_ratio` WebSocket configuration option that defines a + maximum allowable decompression ratio to help mitigate inflate bombing. Defaults to 25:1 +* Define a new `max_fragmented_message_size` WebSocket configuration option + which defines the maximum allowed WebSocket frame size (inclusive of + continuation frames). Defaults to 8MB + +# Changes + +* The default value of the `max_frame_size` WebSocket option has changed from `:infinity` to 8MB +* Zero length non-fin continuation frames are now disallowed (we now skip Autobahn 6.1.2 as a result) +* Multiple content-length fields in an HTTP/1 request are now disallowed (CVE-2026-39805, commit f2ca636, thanks @PJUllrich & @maennchen!) +* We now *only* use the underlying transport when determining scheme (CVE-2026-39807, commit 45feea2, thanks @PJUllrich & @maennchen!) + +## 1.10.4 (25 Mar 2026) + +### Enhancements + +* Support `{:shutdown, :disconnected}` as a normal WebSocket result code (#576, thanks @wwitek-whatnot!) + +## 1.10.3 (22 Feb 2026) + +### Enhancements + +* Support authority form requests for CONNECT requests (#571) +* Narrow acceptance of asterisk form requests to OPTIONS requests (#571) +* Detect client disconnect on timeout in ensure_completed (#566, thanks @pepicrft!) +* Improve http2 sendfile streaming (#565, thanks @elibosley!) + +## 1.10.2 (22 Jan 2026) + +### Enhancements + +* Distinguish client disconnects from genuine body read timeouts (#564, thanks @pepicrft!) + +## 1.10.1 (5 Jan 2026) + +### Changes + +* Change default preference order for compression methods to be 'zstd (if present), gzip, deflate' (#562) + +### Fixes + +* Allow `:zstd_options` key to be set in config (#558, thanks @Fudoshiki!) +* Fix error where deflate responses weren't always completely sent (#559, thanks @josevalim!) + +## 1.10.0 (29 Dec 2025) + +### Enhancements + +* Expose `response_encodings` to allow specifying an explicit preference order to compression encodings (#555) + +## 1.9.0 (12 Dec 2025) + +### Enhancements + +* Skip body draining when Connection: close is set (#546, thanks @pepicrft!) +* Make deflate options for WebSockets configurable (#540, thanks @proxima!) +* Mitigate HTTP/2 rapid reset attacks (#533, thanks @NelsonVides!) +* Implement improved respect for SETTINGS_MAX_CONCURRENT_STREAMS (#524, thanks @NelsonVides!) +* Support zstd HTTP compression (#514, thanks @mattmatters!) + +## 1.8.0 (18 Aug 2025) + +### Enhancements + +* If the user has set a `content-length` header when calling `send_chunked/3`, +the response is streamed via content-length delimited framing and not chunked (#510) + +## 1.7.0 (29 May 2025) + +### Enhancements + +* Add support for new `get_sock_data/1` and `get_ssl_data/1` callbacks from Plug 1.18 (#497) +* Honour server-sent `Connection: close` headers (#495, thanks @ruslandoga!) + +### Fixes + +* Don't overwrite non-default HTTP/2 settings when receiving HTTP/2 settings (#494, thanks @ns-blee!) +* Fix handling of early-connection error handling in HTTP/2 (#486) + +## 1.6.11 (31 Mar 2025) + +### Changes + +* Ensure that HTTP/1 request headers are sent to the Plug in the order they're +sent (#482) +* Do not populate the `cookies` header with an empty string if no cookies were +sent in HTTP/2 (#483) + +## 1.6.10 (25 Mar 2025) + +### Fixes + +* Fix bug introduced when closing compressed websock connections in certain circumstances (#478) + +### Enhancements + +* Standardize & document the format of messages sent to HTTP/2 Stream processes (#481) + +## 1.6.9 (21 Mar 2025) + +### Fixes + +* Do not close compression context before calling websock close callback (#462, + thanks @thiagopromano!) + +## 1.6.8 (5 Mar 2025) + +### Fixes + +* Do not send stream WINDOW_UPDATEs on the last data frame of a stream + +### Enhancements + +* Add `status` to the telemetry metadata emitted on WebSocket upgrades (#466) + +## 1.6.7 (30 Jan 2025) + +### Changes + +* Consider timeouts when reading HTTP/1 headers as a connection error and not an HTTP error +* Enhance logging for WebSocket deflation errors + +## 1.6.6 (25 Jan 2025) + +### Fixes + +* Consider closures during HTTP/1 header reading as a socket error to silence them by default via `log_client_closures` config flag +* Send `connection: close` when closing connection on error per RFC9112§9.6 + +### Enhancements + +* Add experimental opt-in trace logging to help diagnose hard to reproduce errors +* Move CI to 1.18 & improve tests (#459, #461, thanks @grzuy!) + +## 1.6.5 (15 Jan 2025) + +### Fixes + +* Fix regression introduced in 1.6.1 where we would not send headers set by the Plug during WebSocket upgrades (#458) + +### Enhancements + +* Properly normalize Erlang errors before emitting telemetry and logged crash_reason (#455, thanks @grzuy!) + +## 1.6.4 (11 Jan 2025) + +### Fixes + +* Fix error in socket setup error handling introduced in 1.6.2 (thanks @danielspofford!) + +## 1.6.3 (8 Jan 2025) + +### Fixes + +* Always close HTTP/1 connection in any case where an error comes out of the plug (#452, thanks @zookzook!) +* Fix dialyzer warning introduced by Thousand Island 1.3.9 + +## 1.6.2 (4 Jan 2025) + +### Enhancements + +* Send telemetry events on Plugs that throw or exit (#443) +* Improve test robustness & speed (#446) +* Read a minimal number of bytes when sniffing for protocol (#449) +* Add `plug` and `websock` to logging metadata whenever possible (#448) +* Add `plug` and `websock` to telemetry metadata whenever possible (#447) +* Silently eat Bandit.TransportError errors during HTTP/1 error fallback handling + +### Fixes + +* Bump hpax to 1.0.2, fixes https://github.com/phoenixframework/phoenix/issues/6020 (thanks @krainboltgreene!) +* Fix cases where we would desync on pipelined POST requests (#442) + +### Changes + +* Unwrap Plug.Conn.WrapperErrors raised by Plug and handle the wrapped error per policy +* Surface socket setup errors as Bandit.TransportError for consistency in logging + +## 1.6.1 (6 Dec 2024) + +### Enhancements + +* Add deflate support when sending chunked responses (#429) + +### Fixes + +* Bring in updated HPAX to fix HTTP/2 error cases seen in AWS load balancing + environments (#392) +* Improve handle of pipelined HTTP/1.1 requests (#437) +* Improve error handling when dealing with socket errors (#433) + +### Changes + +* Use `Plug.Call.inform/2` to send websocket upgrades (#428) + +## 1.6.0 (18 Nov 2024) + +### Enhancements + +* Add framework for supporting optimized native code on various hot paths (#394, + thanks @alisinabh!) +* Pass conn and exception data as logger metadata (#417 & #420, thanks @grzuy!) +* Loosen hpax dependency requirements +* Add `log_client_closures` http option, defaulting to false (#397, thanks @goncalotomas!) +* Handle plugs that throw a result (#411, thanks @grzuy!) + +### Fixes + +* Improve content-length send logic per RFC9110§8.6/8.7 +* Explicitly signal keepalives in HTTP/1.0 requests + +### Changes + +* Fix typo & clarify docs +* Update security policy + +## 1.5.7 (1 Aug 2024) + +### Changes + +* Timeouts encountered while reading a request body will now result in a `408 + Request Timeout` being returned to the client by way of a `Bandit.HTTPError` + being raised. Previously, a `:more` tuple was returned (#385, thanks + @martosaur!) + +## 1.5.6 (1 Aug 2024) + +### Fixes + +* Improve handling of the end of stream condition for HTTP/2 requests that send + a body which isn't read by the Plug (#387, thanks @fekle!) + +## 1.5.5 (19 Jun 2024) + +### Changes + +* Add `domain: [:bandit]` to the metadata of all logger calls +* Bring logging of early-connect HTTP2 errors under the `log_protocol_errors` umbrella + +## 1.5.4 (14 Jun 2024) + +### Changes + +* Raise HTTP/2 send window timeouts as stream errors so that they're logged as + protocol errors (thanks @hunterboerner!) + +## 1.5.3 (7 Jun 2024) + +### Changes + +* Add `:short` and `:verbose` options to `log_protocol_errors` configuration + option. **Change default value to `:short`, which will log protocol + errors as a single summary line instead of a full stack trace** +* Raise `Bandit.HTTPError` errors when attempting to write to a closed client + connection (except for chunk/2 calls, which now return `{:error, reason}`). + Unless otherwise caught by the user, these errors will bubble out past the + configured plug and terminate the plug process. This closely mimics the + behaviour of Cowboy in this regard (#359) +* Respect the plug-provided content-length on HEAD responses (#353, thanks + @meeq!) +* Minor changes to how 'non-system process dictionary entries' are identified + +### Fixes + +* No longer closes on HTTP/1 requests smaller than the size of the HTTP/2 + preamble +* Close deflate contexts more eagerly for reduced memory use + +## 1.5.2 (10 May 2024) + +### Fixes + +* Don't crash on non-stringable process dictionary keys (#350, thanks + @ryanwinchester, @chrismccord!) + +## 1.5.1 (10 May 2024) + +### Enhancements + +* Process dictionary is now cleared of all non-system process dictionary entries + between keepalive requests (#349) +* Explicitly run a GC before upgrading a connection to websocket (#348) +* Improve docs around deflate options (thanks @kotsius!) + +## 1.5.0 (21 Apr 2024) + +### Enhancements + +* Bandit now respects an exception's conformance to `Plug.Exception` when + determining which status code to return to the client (if the plug did not + already send one). Previously they were always returned as 500 (for HTTP/1) + or an 'internal error' stream error (for HTTP/2) +* Bandit now only logs the stacktrace of plug-generated exceptions whose status + code (as determined by `Plug.Exception.status/1`) is contained within the new + `log_exceptions_with_status_codes` configuration option (defaulting to + `500..599`) +* As a corollary to the above, Bandit request handler processes no longer exit + abnormally in the case of plug-generated exceptions + +### Changes + +* HTTP semantic errors encountered in an HTTP/2 request are returned to the + client using their proper status code instead of as a 'protocol error' stream + error + +## 1.4.2 (2 Apr 2024) + +### Enhancements + +* Support top-level :inet and :inet6 options for Plug.Cowboy compatibility (#337) + +## 1.4.1 (27 Mar 2024) + +### Changes + +* **BREAKING CHANGE** Move `log_protocol_errors` configuration option into + shared `http_options` top-level config (and apply it to HTTP/2 errors as well) +* **BREAKING CHANGE** Remove `origin_telemetry_span_context` from WebSocket + telemetry events +* **BREAKING CHANGE** Remove `stream_id` from HTTP/2 telemetry events +* Add `conn` to the metadata of telemetry start events for HTTP requests +* Stop sending WebSocket upgrade failure reasons to the client (they're still + logged) + +### Fixes + +* Return HTTP semantic errors to HTTP/2 clients as protocol errors instead of + internal errors + +## 1.4.0 (26 Mar 2024) + +> [!WARNING] +> **IMPORTANT** Phoenix users MUST upgrade to WebSockAdapter `0.5.6` or newer when +> upgrading to Bandit `1.4.0` or newer as some internal module names have changed + +### Enhancements + +* Complete refactor of HTTP/2. Improved process model is MUCH easier to + understand and yields about a 10% performance boost to HTTP/2 requests (#286 / + #307) +* Substantial refactor of the HTTP/1 and HTTP/2 stacks to share a common code + path for much of their implementations, with the protocol-specific parts being + factored out to a minimal `Bandit.HTTPTransport` protocol internally, which + allows each protocol to define its own implementation for the minimal set of + things that are different between the two stacks (#297 / #329) + +### Changes + +* **BREAKING CHANGE** Move configuration options that are common between HTTP/1 + and HTTP/2 stacks into a shared `http_options` top-level config +* **BREAKING CHANGE** The HTTP/2 header size limit options have been deprecated, + and have been replaced with a single `max_header_block_size` option. The setting + defaults to 50k bytes, and refers to the size of the compressed header block + as sent on the wire (including any continuation frames) +* **BREAKING CHANGE** Remove `req_line_bytes`, `req_header_bytes`, `resp_line_bytes` and + `resp_header_bytes` from HTTP/1 request telemetry measurements +* **BREAKING CHANGE** Remove `status`, `method` and `request_target` from + telemetry metadata. All of this information can be obtained from the `conn` + struct attached to most telemetry events +* **BREAKING CHANGE** Re-reading a body that has already been read returns `{:ok, + "", conn}` instead of raising a `Bandit.BodyAlreadyReadError` +* **BREAKING CHANGE** Remove `Bandit.BodyAlreadyReadError` +* **BREAKING CHANGE** Remove h2c support via Upgrade header. This was deprecated + in RFC9113 and never in widespread use. We continue to support h2c via prior + knowledge, which remains the only supported mechanism for h2c in RFC9113 +* Treat trailing bytes beyond the indicated content-length on HTTP/1 requests as + an error +* Surface request body read timeouts on HTTP/1 requests as `{:more...}` tuples + and not errors +* Socket sending errors are no longer surfaced on chunk sends in HTTP/1 +* We no longer log if processes that are linked to an HTTP/2 stream process + terminate unexpectedly. This has always been unspecified behaviour so is not + considered a breaking change +* Calls of `Plug.Conn` functions for an HTTP/2 connection must now come from the + stream process; any other process will raise an error. Again, this has always + been unspecified behaviour +* We now send an empty DATA frame for explicitly zero byte bodies instead of + optimizing to a HEADERS frame with end_stream set (we still do so for cases + such as 204/304 and HEAD requests) +* We now send RST_STREAM frames if we complete a stream and the remote end is + still open. This optimizes cases where the client may still be sending a body + that we never consumed and don't care about +* We no longer explicitly close the connection when we receive a GOAWAY frame + +## 1.3.0 (8 Mar 2024) + +### Enhancements + +* Run an explicit garbage collection between every 'n' keepalive requests on the same HTTP/1.1 connection in order to keep reported (but not actual!) memory usage from growing over time. Add `gc_every_n_keepalive_requests` option to configure this (default value of + `5`). #322, thanks @ianko & @Nilsonn!) +* Add `log_protocol_errors` option to optionally quell console logging of 4xx errors generated by Bandit. Defaults to `true` for now; may switch to `false` in the future based on adoption (#321, thanks @Stroemgren!) + +### Changes + +* Don't send a `transfer-encoding` header for 1xx or 204 responses (#317, thanks + @mwhitworth!) + +## 1.2.3 (23 Feb 2024) + +### Changes + +* Log port number when listen fails (#312, thanks @jonatanklosko!) +* Accept mixed-case keepalive directives (#308, thanks @gregors!) + +## 1.2.2 (16 Feb 2024) + +### Changes + +* Reset Logger metadata on every request + +## 1.2.1 (12 Feb 2024) + +### Changes + +* Disable logging of unknown messages received by an idle HTTP/1 handler to + avoid noise on long polling clients. This can be changed via the + `log_unknown_messages` http_1 option (#299) + +## 1.2.0 (31 Jan 2024) + +### Enhancements + +* Automatically pull in `:otp_app` value in Bandit.PhoenixAdapter (thanks + @krns!) +* Include response body metrics for HTTP/1 chunk responses + +### Fixes + +* Fix broken HTTP/1 inform/3 return value (thanks @wojtekmach!) +* Maintain HTTP/1 read timeout after receiving unknown messages + +## 1.1.3 (12 Jan 2024) + +### Fixes + +* Do not send a fallback response if the plug has already sent one (#288 & #289, thanks @jclem!) + +### Changes + +* Packagaing improvements (#283, thanks @wojtekmach!) + +## 1.1.2 (20 Dec 2023) + +### Fixes + +* Fix support for proplist-style arguments (#277, thanks @jjcarstens!) +* Speed up WebSocket framing (#272, thanks @crertel!) +* Fix off-by-one error in HTTP2 sendfile (#269, thanks @OrangeDrangon!) +* Improve mix file packaging (#266, thanks @patrickjaberg!) + +## 1.1.1 (14 Nov 2023) + +### Fixes + +* Do not advertise disabled protocols via ALPN (#263) + +## 1.1.0 (2 Nov 2023) + +### Changes + +* Messages sent to Bandit HTTP/1 handlers no longer intentionally crash the + handler process but are now logged in the same manner as messages sent to a + no-op GenServer (#259) +* Messages regarding normal termination of monitored processes are no longer + handled by the WebSocket handler, but are now passed to the configured + `c:WebSock.handle_info/2` callback (#259) + +### Enhancements + +* Add support for `Phoenix.Endpoint.server_info/1` (now in Phoenix main; #258) +* Add support for `:max_heap_size` option in WebSocket handler (introduced in + websock_adapter 0.5.5; #255, thanks @v0idpwn!) + +## 1.0.0 (18 Oct 2023) + +### Changes + +* Remove internal tracking of remote `max_concurrent_streams` setting (#248) + +## 1.0.0-pre.18 (10 Oct 2023) + +### Fixes + +* Fix startup when plug module has not yet been loaded by the BEAM + +## 1.0.0-pre.17 (9 Oct 2023) + +### Enhancements + +* Support function based plugs & improve startup analysis of plug configuration + (#236) +* Improve keepalive support when Plug does not read request bodies (#244) +* Improve logic around not sending bodies on HEAD requests (#242) + +### Changes + +* Internal refactor of WebSocket validation (#229) + + +## 1.0.0-pre.16 (18 Sep 2023) + +### Changes + +* Use protocol default port in the event that no port is provided in host header (#228) + +### Fixes + +* Improve handling of iolist response bodies (#231, thanks @travelmassive!) + +## 1.0.0-pre.15 (9 Sep 2023) + +### Fixes + +* Fix issue with setting remote IP at connection startup (#227, thanks @jimc64!) + +## 1.0.0-pre.14 (28 Aug 2023) + +### Enhancements + +* Add `Bandit.PhoenixAdapter.bandit_pid/2` (#212) +* Return errors to `Plug.Conn.Adapter.chunk/2` HTTP/1 calls (#216) + +### Changes + +* `Plug.Conn` function calls must come from the process on which `Plug.call/2` was called (#217, reverts #117) + +## 1.0.0-pre.13 (15 Aug 2023) + +### Enhancements + +* Add ability to send preamble frames when closing a WebSock connection (#211) + +## 1.0.0-pre.12 (12 Aug 2023) + +## Fixes + +* Bump ThousandIsland to 1.0.0-pre.7 to fix leaking file descriptors on + `Plug.Conn.sendfile/5` calls (thanks @Hermanverschooten!) + +## 1.0.0-pre.11 (11 Aug 2023) + +## Changes + +* **BREAKING CHANGE** Move `conn` value in telemetry events from measurements to metadata + +## Enhancements + +* Add `method`, `request_target` and `status` fields to telemetry metadata on HTTP stop events +* Improve RFC compliance regarding cache-related headers on deflated responses (#207, thanks @tanguilp!) +* Bump to Thousand Island `1.0.0-pre.6` +* Doc improvements (particularly around implementation notes) +* Typespec improvements (thanks @moogle19!) + +## 1.0.0-pre.10 (28 Jun 2023) + +## Enhancements + +* Add support for `Plug.Conn.inform/3` on HTTP/1 connections (#180) +* Add support for h2c upgrades (#186, thanks @alisinabh!) +* Internal refactoring of HTTP/1 content-length encoded body reads (#184, #190, + thanks @asakura & @moogle19!) + +## Changes + +* Bump Thousand Island to 1.0.0-pre.6 (gaining support for suspend/resume API) +* Drop Elixir 1.12 as a supported target (it should continue to work, but is no + longer covered by CI) + +## Fixes + +* Fix crash when Plug used `Plug.Conn.get_peer_data/1` function on HTTP/1 + connections (#170, thanks @moogle19!) +* Fix port behaviour when connecting over unix socket (#176, thanks @asakura + & @ibarchenkov!) + +## 1.0.0-pre.9 (16 Jun 2023) + +## Changes + +* Use new ThousandIsland APIs for socket info (#167, thanks @asakura!) + +## Fixes + +* Handle nil connection close reason when closing a WebSocket + +## 1.0.0-pre.8 (15 Jun 2023) + +## Fixes + +* Further improve logging on WebSocket upgrade errors (#149) + +## 1.0.0-pre.7 (14 Jun 2023) + +## Enhancements + +* Refactor HTTP/1 read routines (#158 & #166, thanks @asakura!) +* Improve logging on WebSocket upgrade errors (#149) + +## Changes + +* Override any content-length headers that may have been set by Plug (#165) +* Send content-length on HTTP/2 responses where appropriate (#165) + +## Fixes + +* Send correct content-length header when sending deflated response (#151) +* Do not attempt to deflate if Plug sends a content-encoding header (#165) +* Improve corner case handling of content-length request header (#163, thanks + @ryanwinchester!) +* Handle case where ThousandIsland returns error tuples on some helper routines + (#162) + +## 1.0.0-pre.6 (8 Jun 2023) + +### Changes + +* Always use the declaed scheme if declared in a request-line or `:scheme` + pseudo-header (#159) +* Internal tidying (thanks @asakura!) + +## 1.0.0-pre.5 (2 Jun 2023) + +### Enhancements + +* Total overhaul of typespecs throughout the library (thanks @asakura!) + +## 1.0.0-pre.4 (23 May 2023) + +### Enhancements + +* Performance / correctness improvements to header length validation (#143, + thanks @moogle19!) +* Performance improvements to host header port parsing (#145 & #147, thanks + @ryanwinchester!) +* Improve WebSocket upgrade failure error messages to aid in diagnosis (#152) + +### Changes + +* Consolidate credo config (#146, thanks @ryanwinchester!) + +### Fixes + +* Fix error in suggested version dependencies during 1.0-pre series (#142, + thanks @cvkmohan!) + +## 1.0.0-pre.3 (3 May 2023) + +### Enhancements + +* Respect read timeout for HTTP/1 keepalives (#140) +* Support Websock 0.5.1, including support for optional `c:WebSock.terminate/2` + (#131) + +### Changes + +* Use Req instead of Finch in tests (#137) +* Improve a few corner cases in tests (#136) + +## 1.0.0-pre.2 (24 Apr 2023) + +### Fixes + +* Don't require transport_options to be a keyword list (#130, thanks @justinludwig!) + +## 1.0.0-pre.1 (21 Apr 2023) + +### Changes + +* Update Thousand Island dependency to 1.0-pre + +# Changelog for 0.7.x + +## 0.7.7 (11 Apr 2023) + +### Changes + +* Bandit will now raise an error at startup if no plug is specified in config + (thanks @moogle19!) + +### Fixes + +* Fix crash at startup when using `otp_app` option (thanks @moogle19!) +* Minor doc formatting fixes + +## 0.7.6 (9 Apr 2023) + +### Changes + +* **BREAKING CHANGE** Rename top-level `options` field to `thousand_island_options` +* **BREAKING CHANGE** Rename `deflate_opts` to `deflate_options` where used +* Massive overhaul of documentation to use types where possible +* Bandit now uses a term of the form `{Bandit, ref()}` for `id` in our child spec +* Bumped to Thousand Island 0.6.7. `num_connections` is now 16384 by default + +### Enhancements + +* Added top level support for the following convenience parameters: + * `port` can now be set at the top level of your configuration + * `ip` can now be set at the top level of your configuration + * `keyfile` and `certfile` can now be set at the top level of your configuration +* Transport options are now validated by `Plug.SSL.configure/1` when starting + an HTTPS server +* Rely on Thousand Island to validate options specified in `thousand_island_options`. This should avoid cases like #125 in the future. + +## 0.7.5 (4 Apr 2023) + +### Changes + +* Drop explicit support for Elixir 1.11 since we no longer test it in CI (should + still work, just that it's now at-your-own-risk) +* Add logo to ex_doc and README + +### Fixes + +* Allow access to Thousand Island's underlying `shutdown_timeout` option +* Fix test errors that cropped up in OTP 26 + + +## 0.7.4 (27 Mar 2023) + +### Changes + +* Calling `Plug.Conn` adapter functions for HTTP/2 based requests are no longer + restricted to being called from the process which called `c:Plug.call/2` + +### Enhancements + +* Added `startup_log` to control whether / how Bandit logs the bound host & port + at startup (Thanks @danschultzer) +* Improved logging when the configured port is in use at startup (Thanks + @danschultzer) +* Update to Thousand Island 0.6.5 + +## 0.7.3 (20 Mar 2023) + +### Enhancements + +* Added advanced `handler_module` configuration option to `options` + +### Fixes + +* Support returning `x-gzip` as negotiated `content-encoding` (previously would + negotiate a request for `x-gzip` as `gzip`) + +## 0.7.2 (18 Mar 2023) + +### Enhancements + +* Added HTTP compression via 'Content-Encoding' negotiation, enabled by default. + Configuration is available; see [Bandit + docs](https://hexdocs.pm/bandit/Bandit.html#module-config-options) for details + +### Changes + +* Minor refactor of internal HTTP/2 plumbing. No user visible changes + +## 0.7.1 (17 Mar 2023) + +### Changes + +* Update documentation & messaging to refer to RFC911x RFCs where appropriate +* Validate top-level config options at startup +* Revise Phoenix adapter to support new config options +* Doc updates + +## 0.7.0 (17 Mar 2023) + +### Enhancements + +* Add configuration points for various parameters within the HTTP/1, HTTP/2 and + WebSocket stacks. See [Bandit + docs](https://hexdocs.pm/bandit/Bandit.html#module-config-options) for details + +# Changelog for 0.6.x + +## 0.6.11 (17 Mar 2023) + +### Changes + +* Modified telemetry event payloads to match the conventions espoused by + `:telemetry.span/3` +* Default shutdown timeout is now 15s (up from 5s) + +### Enhancements + +* Update to Thosuand Island 0.6.4 (from 0.6.2) + +## 0.6.10 (10 Mar 2023) + +### Enhancements + +* Support explicit setting of WebSocket close codes & reasons as added in WebSock +0.5.0 + +## 0.6.9 (20 Feb 2023) + +### Enhancements + +* Add comprehensive Telemetry support within Bandit, as documented in the + `Bandit.Telemetry` module +* Update our ThousandIsland dependnecy to pull in Thousand Island's newly + updated Telemetry support as documented in the `ThousandIsland.Telemetry` + module +* Fix parsing of host / request headers which contain IPv6 addresses (#97). + Thanks @derekkraan! + +# Changes + +* Use Plug's list of response code reason phrases (#96). Thanks @jclem! +* Minor doc updates + +## 0.6.8 (31 Jan 2023) + +### Changes + +* Close WebSocket connections with a code of 1000 (instead of 1001) when + shutting down the server (#89) +* Use 100 acceptor processes by default (instead of 10) +* Improvements to make WebSocket frame masking faster + +## 0.6.7 (17 Jan 2023) + +### Enhancements + +* Remove logging entirely when client connections do not contain a valid protocol +* Refactor WebSocket support for about a 20% performance lift + +### Bug Fixes + +* Add `nodelay` option to test suite to fix artificially slow WebSocket perf tests + +## 0.6.6 (11 Jan 2023) + +### Enhancements + +* Log useful message when a TLS connection is made to plaintext server (#74) + +## 0.6.5 (10 Jan 2023) + +### Enhancements + +* Update Thousand Island to 0.5.15 (quiets logging in timeout cases) +* Quiet logging in when client connections do not contain a valid protocol +* Refactor HTTP/1 for about a 20% performance lift +* Add WebSocket support to CI benchmark workflow +* Doc updates + +### Bug Fixes + +* Allow multiple instances of Bandit to be started in the same node (#75) +* Improve error handling in HTTP/1 when protocol errors are encountered (#74) diff --git a/deps/bandit/LICENSE b/deps/bandit/LICENSE new file mode 100644 index 0000000..10c0f8e --- /dev/null +++ b/deps/bandit/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Mat Trudel + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/deps/bandit/README.md b/deps/bandit/README.md new file mode 100644 index 0000000..6351b72 --- /dev/null +++ b/deps/bandit/README.md @@ -0,0 +1,245 @@ +![Bandit](https://github.com/mtrudel/bandit/raw/main/assets/readme_logo.png#gh-light-mode-only) +![Bandit](https://github.com/mtrudel/bandit/raw/main/assets/readme_logo-darkmode.png#gh-dark-mode-only) + +[![Build Status](https://github.com/mtrudel/bandit/workflows/Elixir%20CI/badge.svg)](https://github.com/mtrudel/bandit/actions) +[![Docs](https://img.shields.io/badge/api-docs-green.svg?style=flat)](https://hexdocs.pm/bandit) +[![Hex.pm](https://img.shields.io/hexpm/v/bandit.svg?style=flat&color=blue)](https://hex.pm/packages/bandit) + +Bandit is an HTTP server for Plug and WebSock apps. + +Bandit is written entirely in Elixir and is built atop [Thousand +Island](https://github.com/mtrudel/thousand_island). It can serve HTTP/1.x, +HTTP/2 and WebSocket clients over both HTTP and HTTPS. It is written with +correctness, clarity & performance as fundamental goals. It is the default HTTP +server for [Phoenix](https://github.com/phoenixframework/phoenix) since release 1.7.11 of the framework. + +In [ongoing automated performance +tests](https://github.com/mtrudel/bandit/actions/workflows/manual_benchmark.yml), +Bandit's HTTP/1.x engine is up to 4x faster than Cowboy depending on the number of concurrent +requests. When comparing HTTP/2 performance, Bandit is up to 1.5x faster than Cowboy. This is +possible because Bandit has been built from the ground up for use with Plug applications; this +focus pays dividends in both performance and also in the approachability of the code base. + +Bandit also emphasizes correctness. Its HTTP/2 implementation scores 100% on the +[h2spec](https://github.com/summerwind/h2spec) suite in strict mode, and its +WebSocket implementation scores 100% on the +[Autobahn](https://github.com/crossbario/autobahn-testsuite) test suite, both of +which run as part of Bandit's comprehensive CI suite. Extensive unit test, +credo, dialyzer, and performance regression test coverage round out a test suite +that ensures that Bandit is and will remain a platform you can count on. + +Lastly, Bandit exists to demystify the lower layers of infrastructure code. In a world where +The New Thing is nearly always adding abstraction on top of abstraction, it's important to have +foundational work that is approachable & understandable by users above it in the stack. + +## Project Goals + +* Implement comprehensive support for HTTP/1.0 through HTTP/2 & WebSockets (and + beyond) backed by obsessive RFC literacy and automated conformance testing +* Aim for minimal internal policy and HTTP-level configuration. Delegate to Plug & WebSock as much as + possible, and only interpret requests to the extent necessary to safely manage a connection + & fulfill the requirements of safely supporting protocol correctness +* Prioritize (in order): correctness, clarity, performance. Seek to remove the mystery of + infrastructure code by being approachable and easy to understand +* Along with our companion library [Thousand + Island](https://github.com/mtrudel/thousand_island), become the go-to HTTP + & low-level networking stack of choice for the Elixir community by being + reliable, efficient, and approachable + +## Project Status + +* Complete support for [Phoenix](https://github.com/phoenixframework/phoenix) applications (WebSocket + support requires Phoenix 1.7+) +* Complete support of the [Plug API](https://github.com/elixir-plug/plug) +* Complete support of the [WebSock API](https://github.com/phoenixframework/websock) +* Complete server support for HTTP/1.x as defined in [RFC + 9112](https://datatracker.ietf.org/doc/html/rfc9112) & [RFC + 9110](https://datatracker.ietf.org/doc/html/rfc9110) +* Complete server support for HTTP/2 as defined in [RFC + 9113](https://datatracker.ietf.org/doc/html/rfc9113) & [RFC + 9110](https://datatracker.ietf.org/doc/html/rfc9110), comprehensively covered + by automated [h2spec](https://github.com/summerwind/h2spec) conformance testing +* Support for HTTP content encoding compression on both HTTP/1.x and HTTP/2. + gzip and deflate methods are supported per + [RFC9110§8.4.1.{2,3}](https://www.rfc-editor.org/rfc/rfc9110.html#section-8.4.1.2) +* Complete server support for WebSockets as defined in [RFC + 6455](https://datatracker.ietf.org/doc/html/rfc6455), comprehensively covered by automated + [Autobahn](https://github.com/crossbario/autobahn-testsuite) conformance testing. Per-message + compression as defined in [RFC 7692](https://datatracker.ietf.org/doc/html/rfc7692) is also + supported +* Extremely scalable and performant client handling at a rate up to 4x that of Cowboy for the same + workload with as-good-or-better memory use + +Any Phoenix or Plug app should work with Bandit as a drop-in replacement for +Cowboy; exceptions to this are errors (if you find one, please [file an +issue!](https://github.com/mtrudel/bandit/issues)). + + + +## Using Bandit With Phoenix + +Bandit fully supports Phoenix. Phoenix applications which use WebSockets for +features such as Channels or LiveView require Phoenix 1.7 or later. + +Using Bandit to host your Phoenix application couldn't be simpler: + +1. Add Bandit as a dependency in your Phoenix application's `mix.exs`: + + ```elixir + {:bandit, "~> 1.8"} + ``` +2. Add the following `adapter:` line to your endpoint configuration in `config/config.exs`, as in the following example: + + ```elixir + # config/config.exs + + config :your_app, YourAppWeb.Endpoint, + adapter: Bandit.PhoenixAdapter, # <---- ADD THIS LINE + url: [host: "localhost"], + render_errors: ... + ``` +3. That's it! **You should now see messages at startup indicating that Phoenix is + using Bandit to serve your endpoint**, and everything should 'just work'. Note + that if you have set any exotic configuration options within your endpoint, + you may need to update that configuration to work with Bandit; see the + [Bandit.PhoenixAdapter](https://hexdocs.pm/bandit/Bandit.PhoenixAdapter.html) + documentation for more information. + +## Using Bandit With Plug Applications + +Using Bandit to host your own Plug is very straightforward. Assuming you have +a Plug module implemented already, you can host it within Bandit by adding +something similar to the following to your application's `Application.start/2` +function: + +```elixir +# lib/my_app/application.ex + +defmodule MyApp.Application do + use Application + + def start(_type, _args) do + children = [ + {Bandit, plug: MyApp.MyPlug} + ] + + opts = [strategy: :one_for_one, name: MyApp.Supervisor] + Supervisor.start_link(children, opts) + end +end +``` + +For less formal usage, you can also start Bandit using the same configuration +options via the `Bandit.start_link/1` function: + +```elixir +# Start an http server on the default port 4000, serving MyApp.MyPlug +Bandit.start_link(plug: MyPlug) +``` + +## Configuration + +A number of options are defined when starting a server. The complete list is +defined by the [`t:Bandit.options/0`](https://hexdocs.pm/bandit/Bandit.html#summary) type. + +## Setting up an HTTPS Server + +By far the most common stumbling block encountered when setting up an HTTPS +server involves configuring key and certificate data. Bandit is comparatively +easy to set up in this regard, with a working example looking similar to the +following: + +```elixir +# lib/my_app/application.ex + +defmodule MyApp.Application do + use Application + + def start(_type, _args) do + children = [ + {Bandit, + plug: MyApp.MyPlug, + scheme: :https, + certfile: "/absolute/path/to/cert.pem", + keyfile: "/absolute/path/to/key.pem"} + ] + + opts = [strategy: :one_for_one, name: MyApp.Supervisor] + Supervisor.start_link(children, opts) + end +end +``` + +## WebSocket Support + +If you're using Bandit to run a Phoenix application as suggested above, there is +nothing more for you to do; WebSocket support will 'just work'. + +If you wish to interact with WebSockets at a more fundamental level, the +[WebSock](https://hexdocs.pm/websock/WebSock.html) and +[WebSockAdapter](https://hexdocs.pm/websock_adapter/WebSockAdapter.html) libraries +provides a generic abstraction for WebSockets (very similar to how Plug is +a generic abstraction on top of HTTP). Bandit fully supports all aspects of +these libraries. + +## Receiving messages in your Plug process: A word of warning + +The Plug specification is concerned only with the shape of the `c:Plug.init/1` +and `c:Plug.call/2` functions; it says nothing about the process model that +underlies the call, nor about how the Plug function should respond to any +messages it may receive. Although it is occasionally necessary to receive +messages from within your Plug call, this must be done with caution as Bandit +makes extensive use of messaging internally, especially with HTTP/2 based +requests. + +In particular, you must ensure that your code *never* receives messages that +match the patterns `{:bandit, _}` or `{:plug_conn, :sent}`. Any `receive` calls +you make should be appropriately guarded to ensure that these messages remain in +the process' mailbox for Bandit to process them when required. + + + +## Implementation Details + +Bandit primarily consists of three protocol-specific implementations, one each +for [HTTP/1][], [HTTP/2][] and [WebSockets][]. Each of these implementations is +largely distinct from one another, and is described in its own README linked +above. + +If you're just taking a casual look at Bandit or trying to understand how an +HTTP server works, the [HTTP/1][] implementation is likely the best place to +start exploring. + +[HTTP/1]: lib/bandit/http1/README.md +[HTTP/2]: lib/bandit/http2/README.md +[WebSockets]: lib/bandit/websocket/README.md + +## Contributing + +Contributions to Bandit are very much welcome! Before undertaking any substantial work, please +open an issue on the project to discuss ideas and planned approaches so we can ensure we keep +progress moving in the same direction. + +All contributors must agree and adhere to the project's [Code of +Conduct](https://github.com/mtrudel/bandit/blob/main/CODE_OF_CONDUCT.md). + +Security disclosures should be handled per Bandit's published [security policy](https://github.com/mtrudel/bandit/blob/main/SECURITY.md). + +## Installation + +Bandit is [available in Hex](https://hex.pm/docs/publish). The package can be installed +by adding `bandit` to your list of dependencies in `mix.exs`: + +```elixir +def deps do + [ + {:bandit, "~> 1.8"} + ] +end +``` + +Documentation can be found at [https://hexdocs.pm/bandit](https://hexdocs.pm/bandit). + +# License + +MIT diff --git a/deps/bandit/hex_metadata.config b/deps/bandit/hex_metadata.config new file mode 100644 index 0000000..3853dda --- /dev/null +++ b/deps/bandit/hex_metadata.config @@ -0,0 +1,83 @@ +{<<"links">>, + [{<<"Changelog">>,<<"https://hexdocs.pm/bandit/changelog.html">>}, + {<<"GitHub">>,<<"https://github.com/mtrudel/bandit">>}]}. +{<<"name">>,<<"bandit">>}. +{<<"version">>,<<"1.11.0">>}. +{<<"description">>, + <<"A pure-Elixir HTTP server built for Plug & WebSock apps">>}. +{<<"elixir">>,<<"~> 1.13">>}. +{<<"app">>,<<"bandit">>}. +{<<"licenses">>,[<<"MIT">>]}. +{<<"files">>, + [<<"lib">>,<<"lib/bandit">>,<<"lib/bandit/trace.ex">>, + <<"lib/bandit/application.ex">>,<<"lib/bandit/socket_helpers.ex">>, + <<"lib/bandit/extractor.ex">>,<<"lib/bandit/telemetry.ex">>, + <<"lib/bandit/http2">>,<<"lib/bandit/http2/README.md">>, + <<"lib/bandit/http2/settings.ex">>,<<"lib/bandit/http2/frame.ex">>, + <<"lib/bandit/http2/connection.ex">>, + <<"lib/bandit/http2/stream_process.ex">>, + <<"lib/bandit/http2/flow_control.ex">>,<<"lib/bandit/http2/handler.ex">>, + <<"lib/bandit/http2/errors.ex">>, + <<"lib/bandit/http2/stream_collection.ex">>,<<"lib/bandit/http2/frame">>, + <<"lib/bandit/http2/frame/settings.ex">>, + <<"lib/bandit/http2/frame/priority.ex">>, + <<"lib/bandit/http2/frame/goaway.ex">>, + <<"lib/bandit/http2/frame/window_update.ex">>, + <<"lib/bandit/http2/frame/push_promise.ex">>, + <<"lib/bandit/http2/frame/headers.ex">>, + <<"lib/bandit/http2/frame/ping.ex">>, + <<"lib/bandit/http2/frame/unknown.ex">>, + <<"lib/bandit/http2/frame/rst_stream.ex">>, + <<"lib/bandit/http2/frame/data.ex">>, + <<"lib/bandit/http2/frame/continuation.ex">>, + <<"lib/bandit/http2/stream.ex">>,<<"lib/bandit/http1">>, + <<"lib/bandit/http1/README.md">>,<<"lib/bandit/http1/handler.ex">>, + <<"lib/bandit/http1/socket.ex">>,<<"lib/bandit/compression.ex">>, + <<"lib/bandit/delegating_handler.ex">>,<<"lib/bandit/http_error.ex">>, + <<"lib/bandit/websocket">>,<<"lib/bandit/websocket/README.md">>, + <<"lib/bandit/websocket/frame.ex">>, + <<"lib/bandit/websocket/permessage_deflate.ex">>, + <<"lib/bandit/websocket/connection.ex">>, + <<"lib/bandit/websocket/handler.ex">>,<<"lib/bandit/websocket/socket.ex">>, + <<"lib/bandit/websocket/handshake.ex">>,<<"lib/bandit/websocket/frame">>, + <<"lib/bandit/websocket/frame/binary.ex">>, + <<"lib/bandit/websocket/frame/text.ex">>, + <<"lib/bandit/websocket/frame/connection_close.ex">>, + <<"lib/bandit/websocket/frame/ping.ex">>, + <<"lib/bandit/websocket/frame/pong.ex">>, + <<"lib/bandit/websocket/frame/continuation.ex">>, + <<"lib/bandit/websocket/upgrade_validation.ex">>, + <<"lib/bandit/pipeline.ex">>,<<"lib/bandit/clock.ex">>, + <<"lib/bandit/transport_error.ex">>,<<"lib/bandit/logger.ex">>, + <<"lib/bandit/headers.ex">>,<<"lib/bandit/initial_handler.ex">>, + <<"lib/bandit/phoenix_adapter.ex">>,<<"lib/bandit/adapter.ex">>, + <<"lib/bandit/primitive_ops">>,<<"lib/bandit/primitive_ops/websocket.ex">>, + <<"lib/bandit/http_transport.ex">>,<<"lib/bandit.ex">>,<<"mix.exs">>, + <<"README.md">>,<<"LICENSE">>,<<"CHANGELOG.md">>]}. +{<<"requirements">>, + [[{<<"name">>,<<"thousand_island">>}, + {<<"app">>,<<"thousand_island">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"plug">>}, + {<<"app">>,<<"plug">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 1.18">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"websock">>}, + {<<"app">>,<<"websock">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 0.5">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"hpax">>}, + {<<"app">>,<<"hpax">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"telemetry">>}, + {<<"app">>,<<"telemetry">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 0.4 or ~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}]]}. +{<<"build_tools">>,[<<"mix">>]}. diff --git a/deps/bandit/lib/bandit.ex b/deps/bandit/lib/bandit.ex new file mode 100644 index 0000000..7a49124 --- /dev/null +++ b/deps/bandit/lib/bandit.ex @@ -0,0 +1,444 @@ +defmodule Bandit do + @external_resource Path.join([__DIR__, "../README.md"]) + + @moduledoc """ + Bandit is an HTTP server for Plug and WebSock apps. + + As an HTTP server, Bandit's primary goal is to act as 'glue' between client connections managed + by [Thousand Island](https://github.com/mtrudel/thousand_island) and application code defined + via the [Plug](https://github.com/elixir-plug/plug) and/or + [WebSock](https://github.com/phoenixframework/websock) APIs. As such there really isn't a whole lot of + user-visible surface area to Bandit, and as a consequence the API documentation presented here + is somewhat sparse. This is by design! Bandit is intended to 'just work' in almost all cases; + the only thought users typically have to put into Bandit comes in the choice of which options (if + any) they would like to change when starting a Bandit server. The sparseness of the Bandit API + should not be taken as an indicator of the comprehensiveness or robustness of the project. + + #{@external_resource |> File.read!() |> String.split("") |> Enum.fetch!(1)} + """ + + @typedoc """ + Possible top-level options to configure a Bandit server + + * `plug`: The Plug to use to handle connections. Can be specified as `MyPlug` or `{MyPlug, plug_opts}` + * `scheme`: One of `:http` or `:https`. If `:https` is specified, you will also need to specify + valid `certfile` and `keyfile` values (or an equivalent value within + `thousand_island_options.transport_options`). Defaults to `:http` + * `port`: The TCP port to listen on. This option is offered as a convenience and actually sets + the option of the same name within `thousand_island_options`. If a string value is passed, it + will be parsed as an integer. Defaults to 4000 if `scheme` is `:http`, and 4040 if `scheme` is + `:https` + * `ip`: The interface(s) to listen on. This option is offered as a convenience and actually sets the + option of the same name within `thousand_island_options.transport_options`. Can be specified as: + * `{1, 2, 3, 4}` for IPv4 addresses + * `{1, 2, 3, 4, 5, 6, 7, 8}` for IPv6 addresses + * `:loopback` for local loopback (ie: `127.0.0.1`) + * `:any` for all interfaces (ie: `0.0.0.0`) + * `{:local, "/path/to/socket"}` for a Unix domain socket. If this option is used, the `port` + option *must* be set to `0` + * `inet`: Only bind to IPv4 interfaces. This option is offered as a convenience and actually sets the + option of the same name within `thousand_island_options.transport_options`. Must be specified + as a bare atom `:inet` + * `inet6`: Only bind to IPv6 interfaces. This option is offered as a convenience and actually sets the + option of the same name within `thousand_island_options.transport_options`. Must be specified + as a bare atom `:inet6` + * `keyfile`: The path to a file containing the SSL key to use for this server. This option is + offered as a convenience and actually sets the option of the same name within + `thousand_island_options.transport_options`. If a relative path is used here, you will also + need to set the `otp_app` parameter and ensure that the named file is part of your application + build + * `certfile`: The path to a file containing the SSL certificate to use for this server. This option is + offered as a convenience and actually sets the option of the same name within + `thousand_island_options.transport_options`. If a relative path is used here, you will also + need to set the `otp_app` parameter and ensure that the named file is part of your application + build + * `otp_app`: Provided as a convenience when using relative paths for `keyfile` and `certfile` + * `cipher_suite`: Used to define a pre-selected set of ciphers, as described by + `Plug.SSL.configure/1`. Optional, can be either `:strong` or `:compatible` + * `display_plug`: The plug to use when describing the connection in logs. Useful for situations + such as Phoenix code reloading where you have a 'wrapper' plug but wish to refer to the + connection by the endpoint name + * `startup_log`: The log level at which Bandit should log startup info. + Defaults to `:info` log level, can be set to false to disable it + * `thousand_island_options`: A list of options to pass to Thousand Island. Bandit sets some + default values in this list based on your top-level configuration; these values will be + overridden by values appearing here. A complete list can be found at + `t:ThousandIsland.options/0` + * `http_options`: A list of options to configure the shared aspects of Bandit's HTTP stack. A + complete list can be found at `t:http_options/0` + * `http_1_options`: A list of options to configure Bandit's HTTP/1 stack. A complete list can + be found at `t:http_1_options/0` + * `http_2_options`: A list of options to configure Bandit's HTTP/2 stack. A complete list can + be found at `t:http_2_options/0` + * `websocket_options`: A list of options to configure Bandit's WebSocket stack. A complete list can + be found at `t:websocket_options/0` + """ + @type options :: [ + {:plug, module() | {module(), Plug.opts()}} + | {:scheme, :http | :https} + | {:port, :inet.port_number()} + | {:ip, :inet.socket_address()} + | :inet + | :inet6 + | {:keyfile, binary()} + | {:certfile, binary()} + | {:otp_app, Application.app()} + | {:cipher_suite, :strong | :compatible} + | {:display_plug, module()} + | {:startup_log, Logger.level() | false} + | {:thousand_island_options, ThousandIsland.options()} + | {:http_options, http_options()} + | {:http_1_options, http_1_options()} + | {:http_2_options, http_2_options()} + | {:websocket_options, websocket_options()} + ] + + @typedoc """ + Options to configure shared aspects of the HTTP stack in Bandit + + * `compress`: Whether or not to attempt compression of responses via content-encoding + negotiation as described in + [RFC9110§8.4](https://www.rfc-editor.org/rfc/rfc9110.html#section-8.4). Defaults to true + * `response_encodings`: A list of compression encodings, expressed in order of preference. + Defaults to `~w(zstd gzip x-gzip deflate)`, with `zstd` only being present on platforms which + have the zstd library compiled in + * `deflate_options`: A keyword list of options to set on the deflate library. A complete list can + be found at `t:deflate_options/0`. Note that these options only affect the behaviour of the + 'deflate' content encoding; 'gzip' does not have any configurable options (this is a + limitation of the underlying `:zlib` library) + * `zstd_options`: A map of options passed verbatim to :zstd, review the options [here](https://www.erlang.org/doc/apps/stdlib/zstd.html#t:compress_parameters/0) + * `log_exceptions_with_status_codes`: Which exceptions to log. Bandit will log only those + exceptions whose status codes (as determined by `Plug.Exception.status/1`) match the specified + list or range. Defaults to `500..599` + * `log_protocol_errors`: How to log protocol errors such as malformed requests. `:short` will + log a single-line summary, while `:verbose` will log full stack traces. The value of `false` + will disable protocol error logging entirely. Defaults to `:short` + * `log_client_closures`: How to log cases where the client closes the connection. These happen + routinely in the real world and so the handling of them is configured separately since they + can be quite noisy. Takes the same options as `log_protocol_errors`, but defaults to `false` + """ + @type http_options :: [ + {:compress, boolean()} + | {:response_encodings, list()} + | {:deflate_options, deflate_options()} + | {:zstd_options, zstd_options()} + | {:log_exceptions_with_status_codes, list() | Range.t()} + | {:log_protocol_errors, :short | :verbose | false} + | {:log_client_closures, :short | :verbose | false} + ] + + @typedoc """ + Options to configure the HTTP/1 stack in Bandit + + * `enabled`: Whether or not to serve HTTP/1 requests. Defaults to true + * `max_request_line_length`: The maximum permitted length of the request line + (expressed as the number of bytes on the wire) in an HTTP/1.1 request. Defaults to 10_000 bytes + * `max_header_length`: The maximum permitted length of any single header (combined + key & value, expressed as the number of bytes on the wire) in an HTTP/1.1 request. Defaults to 10_000 bytes + * `max_header_count`: The maximum permitted number of headers in an HTTP/1.1 request. + Defaults to 50 headers + * `max_requests`: The maximum number of requests to serve in a single + HTTP/1.1 connection before closing the connection. Defaults to 0 (no limit) + * `clear_process_dict`: Whether to clear the process dictionary of all non-internal entries + between subsequent keepalive requests. If set, all keys not starting with `$` are removed from + the process dictionary between requests. Defaults to `true` + * `gc_every_n_keepalive_requests`: How often to run a full garbage collection pass between subsequent + keepalive requests on the same HTTP/1.1 connection. Defaults to 5 (garbage collect between + every 5 requests). This option is currently experimental, and may change at any time + * `log_unknown_messages`: Whether or not to log unknown messages sent to the handler process. + Defaults to `false` + """ + @type http_1_options :: [ + {:enabled, boolean()} + | {:max_request_line_length, pos_integer()} + | {:max_header_length, pos_integer()} + | {:max_header_count, pos_integer()} + | {:max_requests, pos_integer()} + | {:clear_process_dict, boolean()} + | {:gc_every_n_keepalive_requests, pos_integer()} + | {:log_unknown_messages, boolean()} + ] + + @typedoc """ + Options to configure the HTTP/2 stack in Bandit + + * `enabled`: Whether or not to serve HTTP/2 requests. Defaults to true + * `max_header_block_size`: The maximum permitted length of a field block of an HTTP/2 request + (expressed as the number of compressed bytes). Includes any concatenated block fragments from + continuation frames. Defaults to 50_000 bytes + * `max_requests`: The maximum number of requests to serve in a single + HTTP/2 connection before closing the connection. Defaults to 0 (no limit) + * `max_reset_stream_rate`: The maximum rate of stream resets (RST_STREAM frames) allowed. + Specified as a tuple of `{count, milliseconds}` where `count` is the maximum number of + RST_STREAM frames allowed within the time window of `milliseconds`. Defaults to `{500, 10_000}` + (500 resets per 10 seconds). Setting this to `nil` disables rate limiting + * `sendfile_chunk_size`: The maximum number of bytes read per sendfile chunk when streaming + HTTP/2 responses. Defaults to 1_048_576 (1 MiB) + * `default_local_settings`: Options to override the default values for local HTTP/2 + settings. Values provided here will override the defaults specified in RFC9113§6.5.2 + """ + @type http_2_options :: [ + {:enabled, boolean()} + | {:max_header_block_size, pos_integer()} + | {:max_requests, pos_integer()} + | {:max_reset_stream_rate, {pos_integer(), pos_integer()} | nil} + | {:sendfile_chunk_size, pos_integer()} + | {:default_local_settings, keyword()} + ] + + @typedoc """ + Options to configure the WebSocket stack in Bandit + + * `enabled`: Whether or not to serve WebSocket upgrade requests. Defaults to true + * `max_frame_size`: The maximum size of a single WebSocket frame (expressed as + a number of bytes on the wire). Use a value of 0 for no limit. Defaults to 8_000_000 + * `max_fragmented_message_size`: The maximum size of a WebSocket message delivered across + multiple continuation frames (expressed as a number of bytes on the wire). Does NOT affect the + handling of single-frame messages. Use a value of 0 for no limit. Defaults to 8_000_000 + * `validate_text_frames`: Whether or not to validate text frames as being UTF-8. Strictly + speaking this is required per RFC6455§5.6, however it can be an expensive operation and one + that may be safely skipped in some situations. Defaults to true + * `compress`: Whether or not to allow per-message deflate compression globally. Note that + upgrade requests still need to set the `compress: true` option in `connection_opts` on + a per-upgrade basis for compression to be negotiated (see 'WebSocket Support' section below + for details). Defaults to `true` + * `deflate_options`: A keyword list of options to set on the deflate library when using the + per-message deflate extension. A complete list can be found at `t:deflate_options/0`. + `window_bits` is currently ignored and left to negotiation. + * `max_inflate_ratio`: The maximum allowable ratio to allow decompression of received WebSocket + messages. Intended to prevent 'inflate bomb' attacks where a tiny deflated messages inflates to + a massive one. Defaults to `25` representing a 25:1 allowable inflation ratio. + """ + @type websocket_options :: [ + {:enabled, boolean()} + | {:max_frame_size, pos_integer()} + | {:max_fragmented_message_size, pos_integer()} + | {:validate_text_frames, boolean()} + | {:compress, boolean()} + | {:deflate_options, deflate_options()} + | {:max_inflate_ratio, pos_integer()} + ] + + @typedoc """ + Options to configure the deflate library used for HTTP and WebSocket compression + """ + @type deflate_options :: [ + {:level, :zlib.zlevel()} + | {:window_bits, :zlib.zwindowbits()} + | {:memory_level, :zlib.zmemlevel()} + | {:strategy, :zlib.zstrategy()} + ] + + @typedoc """ + Options to configure the zstd library used for HTTP compression + """ + @type zstd_options :: map + + @typep scheme :: :http | :https + + require Logger + + @doc false + @spec child_spec(options()) :: Supervisor.child_spec() + def child_spec(arg) do + %{ + id: {__MODULE__, make_ref()}, + start: {__MODULE__, :start_link, [arg]}, + type: :supervisor, + restart: :permanent + } + end + + @top_level_keys ~w(plug scheme port ip keyfile certfile otp_app cipher_suite display_plug startup_log thousand_island_options http_options http_1_options http_2_options websocket_options)a + @http_keys ~w(compress response_encodings deflate_options zstd_options log_exceptions_with_status_codes log_protocol_errors log_client_closures)a + @http_1_keys ~w(enabled max_request_line_length max_header_length max_header_count max_requests clear_process_dict gc_every_n_keepalive_requests log_unknown_messages)a + @http_2_keys ~w(enabled max_header_block_size max_requests max_reset_stream_rate sendfile_chunk_size default_local_settings)a + @websocket_keys ~w(enabled max_frame_size max_fragmented_message_size validate_text_frames compress deflate_options max_inflate_ratio primitive_ops_module)a + @thousand_island_keys ThousandIsland.ServerConfig.__struct__() + |> Map.from_struct() + |> Map.keys() + + @doc """ + Starts a Bandit server using the provided arguments. See `t:options/0` for specific options to + pass to this function. + """ + @spec start_link(options()) :: Supervisor.on_start() + def start_link(arg) do + # Special case top-level `:inet` and `:inet6` options so we can use keyword logic everywhere else + arg = arg |> special_case_inet_options() |> validate_options(@top_level_keys, "top level") + + thousand_island_options = + Keyword.get(arg, :thousand_island_options, []) + |> validate_options(@thousand_island_keys, :thousand_island_options) + + http_options = + Keyword.get(arg, :http_options, []) + |> validate_options(@http_keys, :http_options) + + http_1_options = + Keyword.get(arg, :http_1_options, []) + |> validate_options(@http_1_keys, :http_1_options) + + http_2_options = + Keyword.get(arg, :http_2_options, []) + |> validate_options(@http_2_keys, :http_2_options) + + websocket_options = + Keyword.get(arg, :websocket_options, []) + |> validate_options(@websocket_keys, :websocket_options) + + {plug_mod, _} = plug = plug(arg) + display_plug = Keyword.get(arg, :display_plug, plug_mod) + startup_log = Keyword.get(arg, :startup_log, :info) + + {http_1_enabled, http_1_options} = Keyword.pop(http_1_options, :enabled, true) + {http_2_enabled, http_2_options} = Keyword.pop(http_2_options, :enabled, true) + + handler_options = %{ + plug: plug, + handler_module: Bandit.InitialHandler, + opts: %{ + http: http_options, + http_1: http_1_options, + http_2: http_2_options, + websocket: websocket_options + }, + http_1_enabled: http_1_enabled, + http_2_enabled: http_2_enabled + } + + scheme = Keyword.get(arg, :scheme, :http) + + {transport_module, transport_options, default_port} = + case scheme do + :http -> + transport_options = + Keyword.take(arg, [:ip]) + |> then(&(Keyword.get(thousand_island_options, :transport_options, []) ++ &1)) + + {ThousandIsland.Transports.TCP, transport_options, 4000} + + :https -> + supported_protocols = + if(http_2_enabled, do: ["h2"], else: []) ++ + if http_1_enabled, do: ["http/1.1"], else: [] + + transport_options = + Keyword.take(arg, [:ip, :keyfile, :certfile, :otp_app, :cipher_suite]) + |> Keyword.merge(alpn_preferred_protocols: supported_protocols) + |> then(&(Keyword.get(thousand_island_options, :transport_options, []) ++ &1)) + |> Plug.SSL.configure() + |> case do + {:ok, options} -> options + {:error, message} -> raise "Plug.SSL.configure/1 encountered error: #{message}" + end + |> Enum.reject(&(is_tuple(&1) and elem(&1, 0) == :otp_app)) + + {ThousandIsland.Transports.SSL, transport_options, 4040} + end + + port = Keyword.get(arg, :port, default_port) |> parse_as_number() + + thousand_island_options + |> Keyword.put_new(:port, port) + |> Keyword.put_new(:transport_module, transport_module) + |> Keyword.put(:transport_options, transport_options) + |> Keyword.put_new(:handler_module, Bandit.DelegatingHandler) + |> Keyword.put_new(:handler_options, handler_options) + |> ThousandIsland.start_link() + |> case do + {:ok, pid} -> + startup_log && + Logger.log(startup_log, info(scheme, display_plug, pid), domain: [:bandit], plug: plug) + + {:ok, pid} + + {:error, {:shutdown, {:failed_to_start_child, :listener, :eaddrinuse}}} = error -> + Logger.error([info(scheme, display_plug, nil), " failed, port #{port} already in use"], + domain: [:bandit], + plug: plug + ) + + error + + {:error, _} = error -> + error + end + end + + @spec special_case_inet_options(options()) :: options() + defp special_case_inet_options(opts) do + {inet_opts, opts} = Enum.split_with(opts, &(&1 in [:inet, :inet6])) + + if inet_opts == [] do + opts + else + Keyword.update( + opts, + :thousand_island_options, + [transport_options: inet_opts], + fn thousand_island_opts -> + Keyword.update(thousand_island_opts, :transport_options, inet_opts, &(&1 ++ inet_opts)) + end + ) + end + end + + @spec validate_options(Keyword.t(), [atom(), ...], String.t() | atom()) :: + Keyword.t() | no_return() + defp validate_options(options, valid_values, name) do + case Keyword.split(options, valid_values) do + {options, []} -> + options + + {_, illegal_options} -> + raise "Unsupported key(s) in #{name} config: #{inspect(Keyword.keys(illegal_options))}" + end + end + + @spec plug(options()) :: {module(), Plug.opts()} + defp plug(arg) do + arg + |> Keyword.get(:plug) + |> case do + nil -> raise "A value is required for :plug" + {plug_fn, plug_options} when is_function(plug_fn, 2) -> {plug_fn, plug_options} + plug_fn when is_function(plug_fn) -> {plug_fn, []} + {plug, plug_options} when is_atom(plug) -> validate_plug(plug, plug_options) + plug when is_atom(plug) -> validate_plug(plug, []) + other -> raise "Invalid value for plug: #{inspect(other)}" + end + end + + defp validate_plug(plug, plug_options) do + Code.ensure_loaded!(plug) + if !function_exported?(plug, :init, 1), do: raise("plug module does not define init/1") + if !function_exported?(plug, :call, 2), do: raise("plug module does not define call/2") + + {plug, plug.init(plug_options)} + end + + @spec parse_as_number(binary() | integer()) :: integer() + defp parse_as_number(value) when is_binary(value), do: String.to_integer(value) + defp parse_as_number(value) when is_integer(value), do: value + + @spec info(scheme(), module(), nil | pid()) :: String.t() + defp info(scheme, plug, pid) do + server_vsn = Application.spec(:bandit)[:vsn] + "Running #{inspect(plug)} with Bandit #{server_vsn} at #{bound_address(scheme, pid)}" + end + + @spec bound_address(scheme(), nil | pid()) :: String.t() | scheme() + defp bound_address(scheme, nil), do: scheme + + defp bound_address(scheme, pid) do + {:ok, {address, port}} = ThousandIsland.listener_info(pid) + + case address do + :local -> "#{_unix_path = port} (#{scheme}+unix)" + :undefined -> "#{inspect(port)} (#{scheme}+undefined)" + :unspec -> "unspec (#{scheme})" + address -> "#{:inet.ntoa(address)}:#{port} (#{scheme})" + end + end +end diff --git a/deps/bandit/lib/bandit/adapter.ex b/deps/bandit/lib/bandit/adapter.ex new file mode 100644 index 0000000..7272924 --- /dev/null +++ b/deps/bandit/lib/bandit/adapter.ex @@ -0,0 +1,297 @@ +defmodule Bandit.Adapter do + @moduledoc false + # Implements the Plug-facing `Plug.Conn.Adapter` behaviour. These functions provide the primary + # mechanism for Plug applications to interact with a client, including functions to read the + # client body (if sent) and send response information back to the client. The concerns in this + # module are broadly about the semantics of HTTP in general, and less about transport-specific + # concerns, which are managed by the underlying `Bandit.HTTPTransport` implementation + + @behaviour Plug.Conn.Adapter + @already_sent {:plug_conn, :sent} + + defstruct transport: nil, + owner_pid: nil, + method: nil, + status: nil, + content_encoding: nil, + compression_context: nil, + upgrade: nil, + metrics: %{}, + opts: [] + + @typedoc "A struct for backing a Plug.Conn.Adapter" + @type t :: %__MODULE__{ + transport: Bandit.HTTPTransport.t(), + owner_pid: pid() | nil, + method: Plug.Conn.method() | nil, + status: Plug.Conn.status() | nil, + content_encoding: String.t(), + compression_context: Bandit.Compression.t() | nil, + upgrade: nil | {:websocket, opts :: keyword(), websocket_opts :: keyword()}, + metrics: %{}, + opts: %{ + required(:http) => Bandit.http_options(), + required(:websocket) => Bandit.websocket_options() + } + } + + def init(owner_pid, transport, method, headers, opts) do + content_encoding = + Bandit.Compression.negotiate_content_encoding( + Bandit.Headers.get_header(headers, "accept-encoding"), + opts.http + ) + + %__MODULE__{ + transport: transport, + owner_pid: owner_pid, + method: method, + content_encoding: content_encoding, + metrics: %{req_header_end_time: Bandit.Telemetry.monotonic_time()}, + opts: opts + } + end + + @impl Plug.Conn.Adapter + def read_req_body(%__MODULE__{} = adapter, opts) do + validate_calling_process!(adapter) + + metrics = + adapter.metrics + |> Map.put_new_lazy(:req_body_start_time, &Bandit.Telemetry.monotonic_time/0) + + case Bandit.HTTPTransport.read_data(adapter.transport, opts) do + {:ok, body, transport} -> + body = IO.iodata_to_binary(body) + + metrics = + metrics + |> Map.update(:req_body_bytes, byte_size(body), &(&1 + byte_size(body))) + |> Map.put(:req_body_end_time, Bandit.Telemetry.monotonic_time()) + + {:ok, body, %{adapter | transport: transport, metrics: metrics}} + + {:more, body, transport} -> + body = IO.iodata_to_binary(body) + + metrics = + metrics + |> Map.update(:req_body_bytes, byte_size(body), &(&1 + byte_size(body))) + + {:more, body, %{adapter | transport: transport, metrics: metrics}} + end + end + + ################## + # Response Sending + ################## + + @impl Plug.Conn.Adapter + def send_resp(%__MODULE__{} = adapter, status, headers, body) do + validate_calling_process!(adapter) + start_time = Bandit.Telemetry.monotonic_time() + + # Save an extra iodata_length by checking common cases + empty_body? = Bandit.SocketHelpers.iodata_empty?(body) + {headers, compression_context} = Bandit.Compression.new(adapter, status, headers, empty_body?) + + {compress_chunk, compression_context} = + Bandit.Compression.compress_chunk(body, compression_context) + + {close_chunk, compression_metrics} = Bandit.Compression.close(compression_context) + + encoded_body = [compress_chunk | close_chunk] + encoded_length = IO.iodata_length(encoded_body) + headers = Bandit.Headers.add_content_length(headers, encoded_length, status, adapter.method) + + metrics = + adapter.metrics + |> Map.put(:resp_start_time, start_time) + |> Map.merge(compression_metrics) + + adapter = + %{adapter | metrics: metrics} + |> send_headers(status, headers, :raw) + |> send_data(encoded_body, true) + + send(adapter.owner_pid, @already_sent) + {:ok, nil, adapter} + end + + @impl Plug.Conn.Adapter + def send_file( + %__MODULE__{} = adapter, + status, + headers, + path, + offset, + length + ) do + if offset < 0, do: raise("Offset cannot be negative") + if is_number(length) && length <= 0, do: raise("Length cannot be zero or negative") + + validate_calling_process!(adapter) + start_time = Bandit.Telemetry.monotonic_time() + {:ok, fileinfo} = :file.read_file_info(path, [:raw, time: :universal]) + %File.Stat{type: :regular, size: size} = File.Stat.from_record(fileinfo) + length = if length == :all, do: size - offset, else: length + + if offset + length <= size do + headers = Bandit.Headers.add_content_length(headers, length, status, adapter.method) + adapter = send_headers(adapter, status, headers, :raw) + + {socket, bytes_actually_written} = + if send_resp_body?(adapter), + do: {Bandit.HTTPTransport.sendfile(adapter.transport, path, offset, length), length}, + else: {adapter.transport, 0} + + metrics = + adapter.metrics + |> Map.put(:resp_body_bytes, bytes_actually_written) + |> Map.put(:resp_start_time, start_time) + |> Map.put(:resp_end_time, Bandit.Telemetry.monotonic_time()) + + send(adapter.owner_pid, @already_sent) + {:ok, nil, %{adapter | transport: socket, metrics: metrics}} + else + raise "Cannot read #{length} bytes starting at #{offset} as #{path} is only #{size} octets in length" + end + end + + @impl Plug.Conn.Adapter + def send_chunked(%__MODULE__{} = adapter, status, headers) do + validate_calling_process!(adapter) + start_time = Bandit.Telemetry.monotonic_time() + metrics = Map.put(adapter.metrics, :resp_start_time, start_time) + + {headers, compression_context} = Bandit.Compression.new(adapter, status, headers, false, true) + adapter = %{adapter | metrics: metrics, compression_context: compression_context} + send(adapter.owner_pid, @already_sent) + {:ok, nil, send_headers(adapter, status, headers, :chunk_encoded)} + end + + @impl Plug.Conn.Adapter + def chunk(%__MODULE__{} = adapter, chunk) do + # Sending an empty chunk implicitly ends the response. This is a bit of an undefined corner of + # the Plug.Conn.Adapter behaviour (see https://github.com/elixir-plug/plug/pull/535 for + # details) and ending the response here carves closest to the underlying HTTP/1.1 behaviour + # (RFC9112§7.1). Since there is no notion of chunked encoding is in HTTP/2 anyway (RFC9113§8.1) + # this entire section of the API is a bit slanty regardless. + + validate_calling_process!(adapter) + + # chunk/2 is unique among Plug.Conn.Adapter's sending callbacks in that it can return an error + # tuple instead of just raising or dying on error. Rescue here to implement this + try do + if Bandit.SocketHelpers.iodata_empty?(chunk) do + {encoded_chunk, compression_metrics} = + Bandit.Compression.close(adapter.compression_context) + + adapter = %{adapter | metrics: Map.merge(adapter.metrics, compression_metrics)} + + adapter = + if encoded_chunk != [] do + send_data(adapter, encoded_chunk, false) + else + adapter + end + + {:ok, nil, send_data(adapter, "", true)} + else + {encoded_chunk, compression_context} = + Bandit.Compression.compress_chunk(chunk, adapter.compression_context) + + adapter = %{adapter | compression_context: compression_context} + {:ok, nil, send_data(adapter, encoded_chunk, false)} + end + rescue + error in Bandit.TransportError -> {:error, error.error} + error -> {:error, Exception.message(error)} + end + end + + @impl Plug.Conn.Adapter + def inform(%__MODULE__{} = adapter, status, headers) do + validate_calling_process!(adapter) + # It's a bit weird to be casing on the underlying version here, but whether or not to send + # an informational response is actually defined in RFC9110§15.2 so we consider it as an aspect + # of semantics that belongs here and not in the underlying transport + if get_http_protocol(adapter) == :"HTTP/1.0" do + {:error, :not_supported} + else + # inform/3 is unique in that headers comes in as a keyword list + headers = Enum.map(headers, fn {header, value} -> {to_string(header), value} end) + {:ok, send_headers(adapter, status, headers, :inform)} + end + end + + defp send_headers(adapter, status, headers, body_disposition) do + headers = + if is_nil(Bandit.Headers.get_header(headers, "date")) do + [Bandit.Clock.date_header() | headers] + else + headers + end + + adapter = %{adapter | status: status} + + body_disposition = if send_resp_body?(adapter), do: body_disposition, else: :no_body + + socket = + Bandit.HTTPTransport.send_headers(adapter.transport, status, headers, body_disposition) + + %{adapter | transport: socket} + end + + defp send_data(adapter, data, end_request) do + socket = + if send_resp_body?(adapter), + do: Bandit.HTTPTransport.send_data(adapter.transport, data, end_request), + else: adapter.transport + + data_size = IO.iodata_length(data) + metrics = Map.update(adapter.metrics, :resp_body_bytes, data_size, &(&1 + data_size)) + + metrics = + if end_request, + do: Map.put(metrics, :resp_end_time, Bandit.Telemetry.monotonic_time()), + else: metrics + + %{adapter | transport: socket, metrics: metrics} + end + + defp send_resp_body?(%{method: "HEAD"}), do: false + defp send_resp_body?(%{status: 204}), do: false + defp send_resp_body?(%{status: 304}), do: false + defp send_resp_body?(_adapter), do: true + + @impl Plug.Conn.Adapter + def upgrade(%__MODULE__{} = adapter, protocol, opts) do + if Keyword.get(adapter.opts.websocket, :enabled, true) && + Bandit.HTTPTransport.supported_upgrade?(adapter.transport, protocol), + do: {:ok, %{adapter | upgrade: {protocol, opts, adapter.opts.websocket}}}, + else: {:error, :not_supported} + end + + @impl Plug.Conn.Adapter + def push(_adapter, _path, _headers), do: {:error, :not_supported} + + @impl Plug.Conn.Adapter + def get_peer_data(%__MODULE__{} = adapter), + do: Bandit.HTTPTransport.peer_data(adapter.transport) + + @impl Plug.Conn.Adapter + def get_sock_data(%__MODULE__{} = adapter), + do: Bandit.HTTPTransport.sock_data(adapter.transport) + + @impl Plug.Conn.Adapter + def get_ssl_data(%__MODULE__{} = adapter), + do: Bandit.HTTPTransport.ssl_data(adapter.transport) + + @impl Plug.Conn.Adapter + def get_http_protocol(%__MODULE__{} = adapter), + do: Bandit.HTTPTransport.version(adapter.transport) + + defp validate_calling_process!(%{owner_pid: owner}) when owner == self(), do: :ok + defp validate_calling_process!(_), do: raise("Adapter functions must be called by stream owner") +end diff --git a/deps/bandit/lib/bandit/application.ex b/deps/bandit/lib/bandit/application.ex new file mode 100644 index 0000000..4786f34 --- /dev/null +++ b/deps/bandit/lib/bandit/application.ex @@ -0,0 +1,14 @@ +defmodule Bandit.Application do + @moduledoc false + + use Application + + @impl Application + @spec start(Application.start_type(), start_args :: term) :: + {:ok, pid} + | {:error, {:already_started, pid} | {:shutdown, term} | term} + def start(_type, _args) do + children = [Bandit.Clock] + Supervisor.start_link(children, strategy: :one_for_one) + end +end diff --git a/deps/bandit/lib/bandit/clock.ex b/deps/bandit/lib/bandit/clock.ex new file mode 100644 index 0000000..64dcb4b --- /dev/null +++ b/deps/bandit/lib/bandit/clock.ex @@ -0,0 +1,56 @@ +defmodule Bandit.Clock do + @moduledoc false + # Task which updates an ETS table with the current pre-formatted HTTP header + # timestamp once a second. This saves the individual request processes from + # having to construct this themselves, since it is a surprisingly expensive + # operation + + use Task, restart: :permanent + + require Logger + + @doc """ + Returns the current timestamp according to RFC9110§5.6.7. + + If the timestamp doesn't exist in the ETS table or the table doesn't exist + the timestamp is newly created for every request + """ + @spec date_header() :: {header :: binary(), date :: binary()} + def date_header do + date = + try do + :ets.lookup_element(__MODULE__, :date_header, 2) + rescue + ArgumentError -> + Logger.warning("Header timestamp couldn't be fetched from ETS cache", domain: [:bandit]) + get_date_header() + end + + {"date", date} + end + + @spec start_link(any()) :: {:ok, pid()} + def start_link(_opts) do + Task.start_link(__MODULE__, :init, []) + end + + @spec init :: no_return() + def init do + __MODULE__ = :ets.new(__MODULE__, [:set, :protected, :named_table, {:read_concurrency, true}]) + + run() + end + + @spec run() :: no_return() + defp run do + _ = update_header() + Process.sleep(1_000) + run() + end + + @spec get_date_header() :: String.t() + defp get_date_header, do: Calendar.strftime(DateTime.utc_now(), "%a, %d %b %Y %X GMT") + + @spec update_header() :: true + defp update_header, do: :ets.insert(__MODULE__, {:date_header, get_date_header()}) +end diff --git a/deps/bandit/lib/bandit/compression.ex b/deps/bandit/lib/bandit/compression.ex new file mode 100644 index 0000000..24c03ce --- /dev/null +++ b/deps/bandit/lib/bandit/compression.ex @@ -0,0 +1,168 @@ +defmodule Bandit.Compression do + @moduledoc false + + defstruct method: nil, bytes_in: 0, lib_context: nil + + @typedoc "A struct containing the context for response compression" + @type t :: %__MODULE__{ + method: :deflate | :gzip | :identity | :zstd, + bytes_in: non_neg_integer(), + lib_context: term() + } + + @accepted_encodings ~w(gzip x-gzip deflate) + + if Code.ensure_loaded?(:zstd) do + @accepted_encodings ~w(zstd) ++ @accepted_encodings + end + + @spec negotiate_content_encoding(nil | binary(), keyword()) :: String.t() | nil + def negotiate_content_encoding(nil, _), do: nil + + def negotiate_content_encoding(accept_encoding, http_opts) do + if Keyword.get(http_opts, :compress, true) do + client_accept_encoding = Plug.Conn.Utils.list(accept_encoding) + + Keyword.get(http_opts, :response_encodings, @accepted_encodings) + |> Enum.find(&(&1 in client_accept_encoding)) + else + nil + end + end + + def new(adapter, status, headers, empty_body?, streamable \\ false) do + response_content_encoding_header = Bandit.Headers.get_header(headers, "content-encoding") + + headers = maybe_add_vary_header(adapter, status, headers) + + if status not in [204, 304] && not is_nil(adapter.content_encoding) && + is_nil(response_content_encoding_header) && + !response_has_strong_etag(headers) && !response_indicates_no_transform(headers) && + !empty_body? do + case start_stream(adapter.content_encoding, adapter.opts.http, streamable) do + {:ok, context} -> {[{"content-encoding", adapter.content_encoding} | headers], context} + {:error, :unsupported_encoding} -> {headers, %__MODULE__{method: :identity}} + end + else + {headers, %__MODULE__{method: :identity}} + end + end + + defp maybe_add_vary_header(adapter, status, headers) do + if status != 204 && Keyword.get(adapter.opts.http, :compress, true), + do: [{"vary", "accept-encoding"} | headers], + else: headers + end + + defp response_has_strong_etag(headers) do + case Bandit.Headers.get_header(headers, "etag") do + nil -> false + "\W" <> _rest -> false + _strong_etag -> true + end + end + + defp response_indicates_no_transform(headers) do + case Bandit.Headers.get_header(headers, "cache-control") do + nil -> false + header -> "no-transform" in Plug.Conn.Utils.list(header) + end + end + + defp start_stream("deflate", http_opts, _streamable) do + opts = Keyword.get(http_opts, :deflate_options, []) + deflate_context = :zlib.open() + + :zlib.deflateInit( + deflate_context, + Keyword.get(opts, :level, :default), + :deflated, + Keyword.get(opts, :window_bits, 15), + Keyword.get(opts, :mem_level, 8), + Keyword.get(opts, :strategy, :default) + ) + + {:ok, %__MODULE__{method: :deflate, lib_context: deflate_context}} + end + + defp start_stream("x-gzip", _opts, false), do: {:ok, %__MODULE__{method: :gzip}} + defp start_stream("gzip", _opts, false), do: {:ok, %__MODULE__{method: :gzip}} + + if Code.ensure_loaded?(:zstd) do + defp start_stream("zstd", http_opts, false) do + opts = Keyword.get(http_opts, :zstd_options, %{}) + {:ok, zstd_context} = :zstd.context(:compress, opts) + + {:ok, %__MODULE__{method: :zstd, lib_context: zstd_context}} + end + end + + defp start_stream(_encoding, _opts, _streamable), do: {:error, :unsupported_encoding} + + def compress_chunk(chunk, %__MODULE__{method: :deflate} = context) do + result = :zlib.deflate(context.lib_context, chunk, :sync) + + context = + context + |> Map.update!(:bytes_in, &(&1 + IO.iodata_length(chunk))) + + {result, context} + end + + if Code.ensure_loaded?(:zstd) do + def compress_chunk(chunk, %__MODULE__{method: :zstd} = context) do + result = :zstd.compress(chunk, context.lib_context) + + context = + context + |> Map.update!(:bytes_in, &(&1 + IO.iodata_length(chunk))) + + {result, context} + end + end + + def compress_chunk(chunk, %__MODULE__{method: :gzip, lib_context: nil} = context) do + result = :zlib.gzip(chunk) + + context = + context + |> Map.update!(:bytes_in, &(&1 + IO.iodata_length(chunk))) + |> Map.put(:lib_context, :done) + + {result, context} + end + + def compress_chunk(chunk, %__MODULE__{method: :identity} = context) do + {chunk, context} + end + + def close(%__MODULE__{} = context) do + chunk = close_context(context) + + if context.method == :identity do + {chunk, %{}} + else + {chunk, + %{ + resp_compression_method: to_string(context.method), + resp_uncompressed_body_bytes: context.bytes_in + }} + end + end + + defp close_context(%__MODULE__{method: :deflate, lib_context: lib_context}) do + last = :zlib.deflate(lib_context, [], :finish) + :ok = :zlib.deflateEnd(lib_context) + :zlib.close(lib_context) + last + end + + if Code.ensure_loaded?(:zstd) do + defp close_context(%__MODULE__{method: :zstd, lib_context: lib_context}) do + :zstd.close(lib_context) + [] + end + end + + defp close_context(_context), do: [] +end diff --git a/deps/bandit/lib/bandit/delegating_handler.ex b/deps/bandit/lib/bandit/delegating_handler.ex new file mode 100644 index 0000000..9951b21 --- /dev/null +++ b/deps/bandit/lib/bandit/delegating_handler.ex @@ -0,0 +1,78 @@ +defmodule Bandit.DelegatingHandler do + @moduledoc false + # Delegates all implementation of the ThousandIsland.Handler behaviour + # to an implementation specified in state. Allows for clean separation + # between protocol implementations & friction free protocol selection & + # upgrades. + + use ThousandIsland.Handler + + @impl ThousandIsland.Handler + def handle_connection(socket, %{handler_module: handler_module} = state) do + handler_module.handle_connection(socket, state) + |> handle_bandit_continuation(socket) + end + + @impl ThousandIsland.Handler + def handle_data(data, socket, %{handler_module: handler_module} = state) do + handler_module.handle_data(data, socket, state) + |> handle_bandit_continuation(socket) + end + + @impl ThousandIsland.Handler + def handle_shutdown(socket, %{handler_module: handler_module} = state) do + handler_module.handle_shutdown(socket, state) + end + + @impl ThousandIsland.Handler + def handle_close(socket, %{handler_module: handler_module} = state) do + handler_module.handle_close(socket, state) + end + + @impl ThousandIsland.Handler + def handle_timeout(socket, %{handler_module: handler_module} = state) do + handler_module.handle_timeout(socket, state) + end + + @impl ThousandIsland.Handler + def handle_error(error, socket, %{handler_module: handler_module} = state) do + handler_module.handle_error(error, socket, state) + end + + @impl GenServer + def handle_call(msg, from, {_socket, %{handler_module: handler_module}} = state) do + handler_module.handle_call(msg, from, state) + end + + @impl GenServer + def handle_cast(msg, {_socket, %{handler_module: handler_module}} = state) do + handler_module.handle_cast(msg, state) + end + + @impl GenServer + def handle_info(msg, {_socket, %{handler_module: handler_module}} = state) do + handler_module.handle_info(msg, state) + end + + defp handle_bandit_continuation(continuation, socket) do + case continuation do + {:switch, next_handler, state} -> + handle_connection(socket, %{state | handler_module: next_handler}) + + {:switch, next_handler, data, state} -> + case handle_connection(socket, %{state | handler_module: next_handler}) do + {:continue, state} -> + handle_data(data, socket, state) + + {:continue, state, _timeout} -> + handle_data(data, socket, state) + + other -> + other + end + + other -> + other + end + end +end diff --git a/deps/bandit/lib/bandit/extractor.ex b/deps/bandit/lib/bandit/extractor.ex new file mode 100644 index 0000000..3c5d642 --- /dev/null +++ b/deps/bandit/lib/bandit/extractor.ex @@ -0,0 +1,113 @@ +defmodule Bandit.Extractor do + @moduledoc false + # A state machine for efficiently extracting full frames from received packets + + @type deserialize_result :: any() + + @callback header_and_payload_length(binary(), max_frame_size :: integer()) :: + {:ok, {header_length :: integer(), payload_length :: integer()}} + | {:error, term()} + | :more + + @callback deserialize(binary(), primitive_ops_module :: module()) :: deserialize_result() + + @type t :: %__MODULE__{ + header: binary(), + payload: iodata(), + payload_length: non_neg_integer(), + required_length: non_neg_integer(), + mode: :header_parsing | :payload_parsing, + max_frame_size: non_neg_integer(), + frame_parser: atom(), + primitive_ops_module: module() + } + + defstruct header: <<>>, + payload: [], + payload_length: 0, + required_length: 0, + mode: :header_parsing, + max_frame_size: 0, + frame_parser: nil, + primitive_ops_module: nil + + @spec new(module(), module(), Keyword.t()) :: t() + def new(frame_parser, primitive_ops_module, opts) do + max_frame_size = Keyword.get(opts, :max_frame_size, 8_000_000) + + %__MODULE__{ + max_frame_size: max_frame_size, + frame_parser: frame_parser, + primitive_ops_module: primitive_ops_module + } + end + + @spec push_data(t(), binary()) :: t() + def push_data(%__MODULE__{} = state, data) do + case state do + %{mode: :header_parsing} -> + %{state | header: state.header <> data} + + %{mode: :payload_parsing, payload: payload, payload_length: length} -> + %{state | payload: [payload, data], payload_length: length + byte_size(data)} + end + end + + @spec pop_frame(t()) :: {t(), :more | deserialize_result()} + def pop_frame(state) + + def pop_frame(%__MODULE__{mode: :header_parsing} = state) do + case state.frame_parser.header_and_payload_length(state.header, state.max_frame_size) do + {:ok, {header_length, required_length}} -> + state + |> transition_to_payload_parsing(header_length, required_length) + |> pop_frame() + + {:error, message} -> + {state, {:error, message}} + + :more -> + {state, :more} + end + end + + def pop_frame( + %__MODULE__{ + mode: :payload_parsing, + payload_length: payload_length, + required_length: required_length + } = state + ) do + if payload_length >= required_length do + <> = + IO.iodata_to_binary(state.payload) + + frame = state.frame_parser.deserialize(state.header <> payload, state.primitive_ops_module) + state = transition_to_header_parsing(state, rest) + + {state, frame} + else + {state, :more} + end + end + + defp transition_to_payload_parsing(state, header_length, required_length) do + payload_length = byte_size(state.header) - header_length + + state + |> Map.put(:header, binary_part(state.header, 0, header_length)) + |> Map.put(:payload, binary_part(state.header, header_length, payload_length)) + |> Map.put(:payload_length, payload_length) + |> Map.put(:required_length, required_length) + |> Map.put(:mode, :payload_parsing) + end + + defp transition_to_header_parsing(state, rest) do + state + |> Map.put(:header, rest) + |> Map.put(:payload, []) + |> Map.put(:payload_length, 0) + |> Map.put(:required_length, 0) + |> Map.put(:mode, :header_parsing) + end +end diff --git a/deps/bandit/lib/bandit/headers.ex b/deps/bandit/lib/bandit/headers.ex new file mode 100644 index 0000000..f46d8fd --- /dev/null +++ b/deps/bandit/lib/bandit/headers.ex @@ -0,0 +1,125 @@ +defmodule Bandit.Headers do + @moduledoc false + # Conveniences for dealing with headers. + + @spec is_port_number(integer()) :: Macro.t() + defguardp is_port_number(port) when Bitwise.band(port, 0xFFFF) === port + + @spec get_header(Plug.Conn.headers(), header :: binary()) :: binary() | nil + def get_header(headers, header) do + case List.keyfind(headers, header, 0) do + {_, value} -> value + nil -> nil + end + end + + # Covers IPv6 addresses, like `[::1]:4000` as defined in RFC3986. + @spec parse_hostlike_header!(host_header :: binary()) :: + {Plug.Conn.host(), nil | Plug.Conn.port_number()} + def parse_hostlike_header!("[" <> _ = host_header) do + host_header + |> :binary.split("]:") + |> case do + [host, port] -> + case parse_integer(port) do + {port, ""} when is_port_number(port) -> {host <> "]", port} + _ -> raise Bandit.HTTPError, "Header contains invalid port" + end + + [host] -> + {host, nil} + end + end + + def parse_hostlike_header!(host_header) do + host_header + |> :binary.split(":") + |> case do + [host, port] -> + case parse_integer(port) do + {port, ""} when is_port_number(port) -> {host, port} + _ -> raise Bandit.HTTPError, "Header contains invalid port" + end + + [host] -> + {host, nil} + end + end + + @spec get_content_length(Plug.Conn.headers()) :: + {:ok, nil | non_neg_integer()} | {:error, String.t()} + def get_content_length(headers) do + # We need to special case this because we don't accept multiple content-length headers + case Enum.filter(headers, &(elem(&1, 0) == "content-length")) do + [] -> {:ok, nil} + [{"content-length", value}] -> parse_content_length(value) + _ -> {:error, "invalid content-length header (RFC9112§6.3)"} + end + end + + @spec parse_content_length(binary()) :: {:ok, non_neg_integer()} | {:error, String.t()} + defp parse_content_length(value) do + case parse_integer(value) do + {length, ""} -> + {:ok, length} + + {length, _rest} -> + if value |> Plug.Conn.Utils.list() |> Enum.all?(&(&1 == to_string(length))), + do: {:ok, length}, + else: {:error, "invalid content-length header (RFC9112§6.3.5)"} + + :error -> + {:error, "invalid content-length header (RFC9112§6.3.5)"} + end + end + + # Parses non-negative integers from strings. Return the valid portion of an + # integer and the remaining string as a tuple like `{123, ""}` or `:error`. + @spec parse_integer(String.t()) :: {non_neg_integer(), rest :: String.t()} | :error + defp parse_integer(<>) when digit >= ?0 and digit <= ?9 do + parse_integer(rest, digit - ?0) + end + + defp parse_integer(_), do: :error + + @spec parse_integer(String.t(), non_neg_integer()) :: {non_neg_integer(), String.t()} + defp parse_integer(<>, total) when digit >= ?0 and digit <= ?9 do + parse_integer(rest, total * 10 + digit - ?0) + end + + defp parse_integer(rest, total), do: {total, rest} + + @spec add_content_length( + headers :: Plug.Conn.headers(), + length :: non_neg_integer(), + status :: Plug.Conn.int_status(), + method :: Plug.Conn.method() + ) :: + Plug.Conn.headers() + + # Per RFC9110§8.6, we use the following logic: + # + # * If the response is 1xx or 204, content-length is NEVER sent + # * If the response is 304 or the method is HEAD AND the body length is zero, respect any + # content-length header the plug may have set on the assumption that it knows what it would + # have sent + # * For all other responses, use the length of the provided response body as the content-length, + # overwriting any content-length the plug may have set + def add_content_length(headers, _length, status, _method) + when status in 100..199 or status == 204 do + drop_content_length(headers) + end + + def add_content_length(headers, 0, status, method) when status == 304 or method == "HEAD" do + headers + end + + def add_content_length(headers, length, _status, _method) do + [{"content-length", to_string(length)} | drop_content_length(headers)] + end + + @spec drop_content_length(Plug.Conn.headers()) :: Plug.Conn.headers() + defp drop_content_length(headers) do + Enum.reject(headers, &(elem(&1, 0) == "content-length")) + end +end diff --git a/deps/bandit/lib/bandit/http1/README.md b/deps/bandit/lib/bandit/http1/README.md new file mode 100644 index 0000000..2fc7197 --- /dev/null +++ b/deps/bandit/lib/bandit/http1/README.md @@ -0,0 +1,30 @@ +# HTTP/1 Handler + +Included in this folder is a complete `ThousandIsland.Handler` based implementation of HTTP/1.x as +defined in [RFC 9112](https://datatracker.ietf.org/doc/rfc9112). + +## Process model + +Within a Bandit server, an HTTP/1 connection is modeled as a single process. +This process is tied to the lifecycle of the underlying TCP connection; in the +case of an HTTP client which makes use of HTTP's keep-alive feature to make +multiple requests on the same connection, all of these requests will be serviced +by this same process. + +The execution model to handle a given request is quite straightforward: the +underlying [Thousand Island](https://github.com/mtrudel/thousand_island) library +will call `Bandit.HTTP1.Handler.handle_data/3`, which will then construct a +`Bandit.HTTP1.Socket` struct that conforms to the `Bandit.HTTPTransport` +protocol. It will then call `Bandit.Pipeline.run/3`, which will go through the +process of reading the request (by calling functions on the +`Bandit.HTTPTransport` protocol), and constructing a `Plug.Conn` structure to +represent the request and subsequently pass it to the configured `Plug` module. + +# Testing + +All of this is exhaustively tested. Tests are located in `request_test.exs`, and +are broadly either concerned with testing network-facing aspects of the +implementation (ie: how well Bandit satisfies the relevant RFCs) or the Plug-facing +aspects of the implementation. + +Unfortunately, there is no HTTP/1 equivalent to the external h2spec test suite. diff --git a/deps/bandit/lib/bandit/http1/handler.ex b/deps/bandit/lib/bandit/http1/handler.ex new file mode 100644 index 0000000..459ee59 --- /dev/null +++ b/deps/bandit/lib/bandit/http1/handler.ex @@ -0,0 +1,96 @@ +defmodule Bandit.HTTP1.Handler do + @moduledoc false + # An HTTP 1.0 & 1.1 Thousand Island Handler + + use ThousandIsland.Handler + + @impl ThousandIsland.Handler + def handle_data(data, socket, state) do + transport = %Bandit.HTTP1.Socket{socket: socket, buffer: data, opts: state.opts} + connection_span = ThousandIsland.Socket.telemetry_span(socket) + conn_data = Bandit.SocketHelpers.conn_data(socket) + + case Bandit.Pipeline.run(transport, state.plug, connection_span, conn_data, state.opts) do + {:ok, transport} -> maybe_keepalive(transport, state) + {:error, _reason} -> {:close, state} + {:upgrade, _transport, :websocket, opts} -> do_websocket_upgrade(opts, state) + end + end + + defp maybe_keepalive(transport, state) do + requests_processed = Map.get(state, :requests_processed, 0) + 1 + request_limit = Keyword.get(state.opts.http_1, :max_requests, 0) + under_limit = request_limit == 0 || requests_processed < request_limit + + if under_limit && transport.keepalive do + if Keyword.get(state.opts.http_1, :clear_process_dict, true), do: clear_process_dict() + gc_every_n_requests = Keyword.get(state.opts.http_1, :gc_every_n_keepalive_requests, 5) + if rem(requests_processed, gc_every_n_requests) == 0, do: :erlang.garbage_collect() + + state = Map.put(state, :requests_processed, requests_processed) + + # We have bytes that we've read but haven't yet processed, tail call handle_data to start + # reading the next request + if Bandit.SocketHelpers.iodata_empty?(transport.buffer) do + {:continue, state} + else + handle_data(transport.buffer, transport.socket, state) + end + else + {:close, state} + end + end + + defp clear_process_dict do + Process.get_keys() + |> Enum.each( + &if &1 not in ~w[$ancestors $initial_call $process_label]a, do: Process.delete(&1) + ) + end + + defp do_websocket_upgrade(upgrade_opts, state) do + :erlang.garbage_collect() + {:switch, Bandit.WebSocket.Handler, Map.put(state, :upgrade_opts, upgrade_opts)} + end + + def handle_info({:plug_conn, :sent}, {socket, state}), + do: {:noreply, {socket, state}, socket.read_timeout} + + def handle_info({:EXIT, _pid, :normal}, {socket, state}), + do: {:noreply, {socket, state}, socket.read_timeout} + + def handle_info(msg, {socket, state}) do + if Keyword.get(state.opts.http_1, :log_unknown_messages, false), do: log_no_handle_info(msg) + {:noreply, {socket, state}, socket.read_timeout} + end + + def handle_info(msg, state) do + log_no_handle_info(msg) + {:noreply, state} + end + + defp log_no_handle_info(msg) do + # Copied verbatim from lib/elixir/lib/gen_server.ex + proc = + case Process.info(self(), :registered_name) do + {_, []} -> self() + {_, name} -> name + end + + :logger.error( + %{ + label: {GenServer, :no_handle_info}, + report: %{ + module: __MODULE__, + message: msg, + name: proc + } + }, + %{ + domain: [:otp, :elixir], + error_logger: %{tag: :error_msg}, + report_cb: &GenServer.format_report/1 + } + ) + end +end diff --git a/deps/bandit/lib/bandit/http1/socket.ex b/deps/bandit/lib/bandit/http1/socket.ex new file mode 100644 index 0000000..84ce945 --- /dev/null +++ b/deps/bandit/lib/bandit/http1/socket.ex @@ -0,0 +1,503 @@ +defmodule Bandit.HTTP1.Socket do + @moduledoc false + # This module implements the lower level parts of HTTP/1 (roughly, the aspects of the protocol + # described in RFC 9112 as opposed to RFC 9110). It is similar in spirit to + # `Bandit.HTTP2.Stream` for HTTP/2, and indeed both implement the `Bandit.HTTPTransport` + # behaviour. An instance of this struct is maintained as the state of a `Bandit.HTTP1.Handler` + # process, and it moves an HTTP/1 request through its lifecycle by calling functions defined on + # this module. This state is also tracked within the `Bandit.Adapter` instance that backs + # Bandit's Plug API. + + defstruct socket: nil, + buffer: <<>>, + read_state: :unread, + write_state: :unsent, + unread_content_length: nil, + body_encoding: nil, + version: :"HTTP/1.0", + send_buffer: nil, + request_connection_header: nil, + keepalive: nil, + opts: %{} + + @typedoc "An HTTP/1 read state" + @type read_state :: :unread | :headers_read | :read + + @typedoc "An HTTP/1 write state" + @type write_state :: :unsent | :writing | :chunking | :chunk_streaming | :sent + + @typedoc "The information necessary to communicate to/from a socket" + @type t :: %__MODULE__{ + socket: ThousandIsland.Socket.t(), + buffer: iodata(), + read_state: read_state(), + write_state: write_state(), + unread_content_length: non_neg_integer() | :chunked | nil, + body_encoding: nil | binary(), + version: nil | :"HTTP/1.1" | :"HTTP/1.0", + send_buffer: iolist(), + request_connection_header: binary(), + keepalive: boolean(), + opts: %{ + required(:http_1) => Bandit.http_1_options() + } + } + + defimpl Bandit.HTTPTransport do + def peer_data(%@for{} = socket), do: Bandit.SocketHelpers.peer_data(socket.socket) + + def sock_data(%@for{} = socket), do: Bandit.SocketHelpers.sock_data(socket.socket) + + def ssl_data(%@for{} = socket), do: Bandit.SocketHelpers.ssl_data(socket.socket) + + def version(%@for{} = socket), do: socket.version + + def read_headers(%@for{read_state: :unread} = socket) do + {method, request_target, socket} = do_read_request_line!(socket) + {headers, socket} = do_read_headers!(socket) + content_length = get_content_length!(headers) + body_encoding = Bandit.Headers.get_header(headers, "transfer-encoding") + request_connection_header = safe_downcase(Bandit.Headers.get_header(headers, "connection")) + socket = %{socket | request_connection_header: request_connection_header} + + case {content_length, body_encoding} do + {nil, nil} -> + # No body, so just go straight to 'read' + {:ok, method, request_target, headers, %{socket | read_state: :read}} + + {content_length, nil} -> + socket = %{socket | read_state: :headers_read, unread_content_length: content_length} + {:ok, method, request_target, headers, socket} + + {nil, body_encoding} -> + socket = %{socket | read_state: :headers_read, body_encoding: body_encoding} + {:ok, method, request_target, headers, socket} + + {_content_length, _body_encoding} -> + request_error!( + "Request cannot contain both 'content-length' and 'transfer-encoding' (RFC9112§6.3.3)" + ) + end + end + + defp do_read_request_line!(socket, request_target \\ nil) do + packet_size = Keyword.get(socket.opts.http_1, :max_request_line_length, 10_000) + + case :erlang.decode_packet(:http_bin, socket.buffer, packet_size: packet_size) do + {:more, _len} -> + chunk = read_available_for_header!(socket.socket) + do_read_request_line!(%{socket | buffer: socket.buffer <> chunk}, request_target) + + {:ok, {:http_request, method, request_target, version}, rest} -> + version = get_version!(version) + # decode_packet is inconsistent about atom/string method returns + method = to_string(method) + request_target = resolve_request_target!(request_target, method) + socket = %{socket | buffer: rest, version: version} + {method, request_target, socket} + + {:ok, {:http_error, reason}, _rest} -> + request_error!("Request line HTTP error: #{inspect(reason)}") + + {:error, :invalid} -> + request_error!("Request URI is too long", :request_uri_too_long) + + {:error, reason} -> + request_error!("Request line unknown error: #{inspect(reason)}") + end + end + + defp get_version!({1, 1}), do: :"HTTP/1.1" + defp get_version!({1, 0}), do: :"HTTP/1.0" + defp get_version!(other), do: request_error!("Invalid HTTP version: #{inspect(other)}") + + # Unwrap different request_targets returned by :erlang.decode_packet/3 + defp resolve_request_target!({:abs_path, path}, _), do: {nil, nil, nil, path} + + defp resolve_request_target!({:absoluteURI, scheme, host, :undefined, path}, _), + do: {to_string(scheme), host, nil, path} + + defp resolve_request_target!({:absoluteURI, scheme, host, port, path}, _), + do: {to_string(scheme), host, port, path} + + defp resolve_request_target!(:*, "OPTIONS"), do: {nil, nil, nil, :*} + + defp resolve_request_target!({:scheme, scheme, port}, "CONNECT"), + do: {nil, scheme, port, nil} + + defp resolve_request_target!(_request_target, _method), + do: request_error!("Unsupported request target (RFC9112§3.2)") + + defp do_read_headers!(socket, headers \\ []) do + packet_size = Keyword.get(socket.opts.http_1, :max_header_length, 10_000) + + case :erlang.decode_packet(:httph_bin, socket.buffer, packet_size: packet_size) do + {:more, _len} -> + chunk = read_available_for_header!(socket.socket) + socket = %{socket | buffer: socket.buffer <> chunk} + do_read_headers!(socket, headers) + + {:ok, {:http_header, _, header, _, value}, rest} -> + socket = %{socket | buffer: rest} + headers = [{header |> to_string() |> String.downcase(:ascii), value} | headers] + + if length(headers) <= Keyword.get(socket.opts.http_1, :max_header_count, 50) do + do_read_headers!(socket, headers) + else + request_error!("Too many headers", :request_header_fields_too_large) + end + + {:ok, :http_eoh, rest} -> + socket = %{socket | read_state: :headers_read, buffer: rest} + {Enum.reverse(headers), socket} + + {:ok, {:http_error, reason}, _rest} -> + request_error!("Header read HTTP error: #{inspect(reason)}") + + {:error, :invalid} -> + request_error!("Header too long", :request_header_fields_too_large) + + {:error, reason} -> + request_error!("Header read unknown error: #{inspect(reason)}") + end + end + + defp get_content_length!(headers) do + case Bandit.Headers.get_content_length(headers) do + {:ok, content_length} -> content_length + {:error, reason} -> request_error!("Content length unknown error: #{inspect(reason)}") + end + end + + def read_data( + %@for{read_state: :headers_read, unread_content_length: unread_content_length} = socket, + opts + ) + when is_number(unread_content_length) do + {to_return, buffer, remaining_unread_content_length} = + do_read_content_length_data!(socket.socket, socket.buffer, unread_content_length, opts) + + socket = %{socket | buffer: buffer, unread_content_length: remaining_unread_content_length} + + if remaining_unread_content_length == 0 do + {:ok, to_return, %{socket | read_state: :read}} + else + {:more, to_return, socket} + end + end + + def read_data(%@for{read_state: :headers_read, body_encoding: "chunked"} = socket, opts) do + read_size = Keyword.get(opts, :read_length, 1_000_000) + read_timeout = Keyword.get(opts, :read_timeout) + + {body, buffer} = + do_read_chunked_data!(socket.socket, socket.buffer, <<>>, read_size, read_timeout) + + body = IO.iodata_to_binary(body) + + {:ok, body, %{socket | read_state: :read, buffer: buffer}} + end + + def read_data(%@for{read_state: :headers_read, body_encoding: body_encoding}, _opts) + when not is_nil(body_encoding) do + request_error!("Unsupported transfer-encoding") + end + + def read_data(%@for{} = socket, _opts), do: {:ok, <<>>, socket} + + @dialyzer {:no_improper_lists, do_read_content_length_data!: 4} + defp do_read_content_length_data!(socket, buffer, unread_content_length, opts) do + max_to_return = min(unread_content_length, Keyword.get(opts, :length, 8_000_000)) + + cond do + max_to_return == 0 -> + # We have already satisfied our content length + {<<>>, buffer, unread_content_length} + + byte_size(buffer) >= max_to_return -> + # We can satisfy the read request entirely from our buffer + <> = buffer + {to_return, rest, unread_content_length - max_to_return} + + byte_size(buffer) < max_to_return -> + # We need to read off the wire + read_size = Keyword.get(opts, :read_length, 1_000_000) + read_timeout = Keyword.get(opts, :read_timeout) + + to_return = + read!(socket, max_to_return - byte_size(buffer), [buffer], read_size, read_timeout) + |> IO.iodata_to_binary() + + # We may have read more than we need to return + if byte_size(to_return) >= max_to_return do + <> = to_return + {to_return, rest, unread_content_length - max_to_return} + else + {to_return, <<>>, unread_content_length - byte_size(to_return)} + end + end + end + + @dialyzer {:no_improper_lists, do_read_chunked_data!: 5} + defp do_read_chunked_data!(socket, buffer, body, read_size, read_timeout) do + case :binary.split(buffer, "\r\n") do + ["0", "\r\n" <> rest] -> + # We should be reading (and ignoring) trailers here + {IO.iodata_to_binary(body), rest} + + [chunk_size, rest] -> + chunk_size = String.to_integer(chunk_size, 16) + + case rest do + <> -> + do_read_chunked_data!(socket, rest, [body, next_chunk], read_size, read_timeout) + + _ -> + to_read = chunk_size - byte_size(rest) + + if to_read > 0 do + iolist = read!(socket, to_read, [], read_size, read_timeout) + buffer = IO.iodata_to_binary([buffer | iolist]) + do_read_chunked_data!(socket, buffer, body, read_size, read_timeout) + else + chunk = read_available!(socket, read_timeout) + buffer = buffer <> chunk + do_read_chunked_data!(socket, buffer, body, read_size, read_timeout) + end + end + + _ -> + chunk = read_available!(socket, read_timeout) + buffer = buffer <> chunk + do_read_chunked_data!(socket, buffer, body, read_size, read_timeout) + end + end + + ################## + # Internal Reading + ################## + + @compile {:inline, read_available_for_header!: 1} + @spec read_available_for_header!(ThousandIsland.Socket.t()) :: binary() + defp read_available_for_header!(socket) do + case ThousandIsland.Socket.recv(socket, 0) do + {:ok, chunk} -> chunk + {:error, reason} -> socket_error!(reason) + end + end + + @compile {:inline, read_available!: 2} + @spec read_available!(ThousandIsland.Socket.t(), timeout()) :: binary() + defp read_available!(socket, read_timeout) do + case ThousandIsland.Socket.recv(socket, 0, read_timeout) do + {:ok, chunk} -> chunk + {:error, :timeout} -> <<>> + {:error, reason} -> socket_error!(reason) + end + end + + @dialyzer {:no_improper_lists, read!: 5} + @spec read!( + ThousandIsland.Socket.t(), + non_neg_integer(), + iolist(), + non_neg_integer(), + timeout() + ) :: + iolist() + defp read!(socket, to_read, already_read, read_size, read_timeout) do + case ThousandIsland.Socket.recv(socket, min(to_read, read_size), read_timeout) do + {:ok, chunk} -> + remaining_bytes = to_read - byte_size(chunk) + + if remaining_bytes > 0 do + read!(socket, remaining_bytes, [already_read | chunk], read_size, read_timeout) + else + [already_read | chunk] + end + + {:error, :timeout} -> + handle_timeout_with_disconnect_check!(socket) + + {:error, reason} -> + socket_error!(reason) + end + end + + # After a timeout, check if the peer is still connected. If not, this is + # likely a client disconnect that manifested as a timeout. + # We raise TransportError for disconnects and HTTPError for genuine timeouts. + # Use a non-blocking recv (timeout: 0) to detect closed connections. + @spec handle_timeout_with_disconnect_check!(ThousandIsland.Socket.t()) :: no_return() + defp handle_timeout_with_disconnect_check!(socket) do + case ThousandIsland.Socket.recv(socket, 0, 0) do + {:error, :timeout} -> + # Socket is still open but no data - genuine timeout + request_error!("Body read timeout", :request_timeout) + + {:error, reason} -> + # Socket error (e.g., :closed) - client disconnected + socket_error!(reason) + + {:ok, _data} -> + # Unexpected: data arrived just after timeout. Treat as timeout + # since we already committed to the timeout path. + request_error!("Body read timeout", :request_timeout) + end + end + + def send_headers(%@for{write_state: :unsent} = socket, status, headers, body_disposition) do + resp_line = "#{socket.version} #{status} #{Plug.Conn.Status.reason_phrase(status)}\r\n" + + {headers, socket} = handle_keepalive(status, headers, socket) + + has_content_length = Bandit.Headers.get_header(headers, "content-length") != nil + + case body_disposition do + :raw -> + # This is an optimization for the common case of sending a non-encoded body (or file), + # and coalesces the header and body send calls into a single ThousandIsland.Socket.send/2 + # call. This makes a _substantial_ difference in practice + %{socket | write_state: :writing, send_buffer: [resp_line | encode_headers(headers)]} + + :chunk_encoded when not has_content_length -> + headers = [{"transfer-encoding", "chunked"} | headers] + send!(socket.socket, [resp_line | encode_headers(headers)]) + %{socket | write_state: :chunking} + + :chunk_encoded when has_content_length -> + send!(socket.socket, [resp_line | encode_headers(headers)]) + %{socket | write_state: :chunk_streaming} + + :no_body -> + send!(socket.socket, [resp_line | encode_headers(headers)]) + %{socket | write_state: :sent} + + :inform -> + send!(socket.socket, [resp_line | encode_headers(headers)]) + %{socket | write_state: :unsent} + end + end + + defp handle_keepalive(status, headers, socket) do + response_connection_header = safe_downcase(Bandit.Headers.get_header(headers, "connection")) + + # Per RFC9112§9.3 + cond do + status in 100..199 -> + {headers, socket} + + socket.request_connection_header == "close" || response_connection_header == "close" -> + {headers, %{socket | keepalive: false}} + + socket.version == :"HTTP/1.1" -> + {headers, %{socket | keepalive: true}} + + socket.version == :"HTTP/1.0" && socket.request_connection_header == "keep-alive" -> + {[{"connection", "keep-alive"} | headers], %{socket | keepalive: true}} + + true -> + {[{"connection", "close"} | headers], %{socket | keepalive: false}} + end + end + + defp safe_downcase(str) when is_binary(str), do: String.downcase(str, :ascii) + defp safe_downcase(str), do: str + + defp encode_headers(headers) do + headers + |> Enum.map(fn {k, v} -> [k, ": ", v, "\r\n"] end) + |> then(&[&1 | ["\r\n"]]) + end + + def send_data(%@for{write_state: :writing} = socket, data, end_request) do + send!(socket.socket, [socket.send_buffer | data]) + write_state = if end_request, do: :sent, else: :writing + %{socket | write_state: write_state, send_buffer: []} + end + + def send_data(%@for{write_state: :chunking} = socket, data, end_request) do + byte_size = data |> IO.iodata_length() + send!(socket.socket, [Integer.to_string(byte_size, 16), "\r\n", data, "\r\n"]) + write_state = if end_request, do: :sent, else: :chunking + %{socket | write_state: write_state} + end + + def send_data(%@for{write_state: :chunk_streaming} = socket, data, end_request) do + send!(socket.socket, data) + write_state = if end_request, do: :sent, else: :chunk_streaming + %{socket | write_state: write_state} + end + + def sendfile(%@for{write_state: :writing} = socket, path, offset, length) do + send!(socket.socket, socket.send_buffer) + + case ThousandIsland.Socket.sendfile(socket.socket, path, offset, length) do + {:ok, _bytes_written} -> %{socket | write_state: :sent} + {:error, reason} -> socket_error!(reason) + end + end + + @spec send!(ThousandIsland.Socket.t(), iolist()) :: :ok | no_return() + defp send!(socket, payload) do + case ThousandIsland.Socket.send(socket, payload) do + :ok -> + :ok + + {:error, reason} -> + # Prevent error handlers from possibly trying to send again + send(self(), {:plug_conn, :sent}) + socket_error!(reason) + end + end + + def ensure_completed(%@for{read_state: :read} = socket), do: socket + def ensure_completed(%@for{keepalive: false} = socket), do: socket + + def ensure_completed(%@for{} = socket) do + case read_data(socket, []) do + {:ok, _data, socket} -> socket + {:more, _data, _socket} -> request_error!("Unable to read remaining data in request body") + end + rescue + e in [Bandit.HTTPError] -> + # If we got a timeout during ensure_completed (draining the body), + # check if the client actually disconnected. + if e.plug_status == :request_timeout do + handle_timeout_with_disconnect_check!(socket.socket) + else + reraise e, __STACKTRACE__ + end + end + + def supported_upgrade?(%@for{} = _socket, protocol), do: protocol == :websocket + + def send_on_error(%@for{}, %Bandit.TransportError{}), do: :ok + + def send_on_error(%@for{} = socket, error) do + receive do + {:plug_conn, :sent} -> %{socket | write_state: :sent} + after + 0 -> + status = error |> Plug.Exception.status() |> Plug.Conn.Status.code() + + try do + send_headers(socket, status, [{"connection", "close"}], :no_body) + rescue + _e in [Bandit.TransportError, Bandit.HTTPError] -> :ok + end + end + end + + @spec request_error!(term()) :: no_return() + @spec request_error!(term(), Plug.Conn.status()) :: no_return() + defp request_error!(reason, plug_status \\ :bad_request) do + raise Bandit.HTTPError, message: to_string(reason), plug_status: plug_status + end + + @spec socket_error!(term()) :: no_return() + defp socket_error!(reason) do + raise Bandit.TransportError, message: "Unrecoverable error: #{reason}", error: reason + end + end +end diff --git a/deps/bandit/lib/bandit/http2/README.md b/deps/bandit/lib/bandit/http2/README.md new file mode 100644 index 0000000..d9aac52 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/README.md @@ -0,0 +1,108 @@ +# HTTP/2 Handler + +Included in this folder is a complete `ThousandIsland.Handler` based implementation of HTTP/2 as +defined in [RFC 9110](https://datatracker.ietf.org/doc/rfc9110) & [RFC +9113](https://datatracker.ietf.org/doc/rfc9113) + +## Process model + +Within a Bandit server, an HTTP/2 connection is modeled as a set of processes: + +* 1 process per connection, a `Bandit.HTTP2.Handler` module implementing the + `ThousandIsland.Handler` behaviour, and; +* 1 process per stream (i.e.: per HTTP request) within the connection, implemented as + a `Bandit.HTTP2.StreamProcess` process + +Each of these processes model the majority of their state via a +`Bandit.HTTP2.Connection` & `Bandit.HTTP2.Stream` struct, respectively. + +The lifetimes of these processes correspond to their role; a connection process lives for as long +as a client is connected, and a stream process lives only as long as is required to process +a single stream request within a connection. + +Connection processes are the 'root' of each connection's process group, and are supervised by +Thousand Island in the same manner that `ThousandIsland.Handler` processes are usually supervised +(see the [project README](https://github.com/mtrudel/thousand_island) for details). + +Stream processes are not supervised by design. The connection process starts new +stream processes as required, via a standard `start_link` +call, and manages the termination of the resultant linked stream processes by +handling `{:EXIT,...}` messages as described in the Elixir documentation. Each +stream process stays alive long enough to fully model an HTTP/2 stream, +beginning its life in the `:init` state and ending it in the `:closed` state (or +else by a stream or connection error being raised). This approach is aligned +with the realities of the HTTP/2 model, insofar as if a connection process +terminates there is no reason to keep its constituent stream processes around, +and if a stream process dies the connection should be able to handle this +without itself terminating. It also means that our process model is very +lightweight - there is no extra supervision overhead present because no such +supervision is required for the system to function in the desired way. + +## Reading client data + +The overall structure of the implementation is managed by the `Bandit.HTTP2.Handler` module, and +looks like the following: + +1. Bytes are asynchronously received from ThousandIsland via the + `Bandit.HTTP2.Handler.handle_data/3` function +2. Frames are parsed from these bytes by calling the `Bandit.HTTP2.Frame.deserialize/2` + function. If successful, the parsed frame(s) are returned. We retain any unparsed bytes in + a buffer in order to attempt parsing them upon receipt of subsequent data from the client +3. Parsed frames are passed into the `Bandit.HTTP2.Connection` module along with a struct of + same module. Frames are processed via the `Bandit.HTTP2.Connection.handle_frame/3` function. + Connection-level frames are handled within the `Bandit.HTTP2.Connection` + struct, and stream-level frames are passed along to the corresponding stream + process, which is wholly responsible for managing all aspects of a stream's + state (which is tracked via the `Bandit.HTTP2.Stream` struct). The one + exception to this is the handling of frames sent to streams which have + already been closed (and whose corresponding processes have thus terminated). + Any such frames are discarded without effect. +4. This process is repeated every time we receive data from the client until the + `Bandit.HTTP2.Connection` module indicates that the connection should be closed, either + normally or due to error. Note that frame deserialization may end up returning a connection + error if the parsed frames fail specific criteria (generally, the frame parsing modules are + responsible for identifying errors as described in [section + 6](https://datatracker.ietf.org/doc/html/rfc9113#section-6) of RFC 9113). In these cases, the + failure is passed through to the connection module for processing in order to coordinate an + orderly shutdown or client notification as appropriate + +## Processing requests + +The state of a particular stream are contained within a `Bandit.HTTP2.Stream` +struct, maintained within a `Bandit.HTTP2.StreamProcess` process. As part of the +stream's lifecycle, the server's configured Plug is called, with an instance of +the `Bandit.Adapter` struct being used to interface with the Plug. There +is a separation of concerns between the aspect of HTTP semantics managed by +`Bandit.Adapter` (roughly, those concerns laid out in +[RFC9110](https://datatracker.ietf.org/doc/html/rfc9110)) and the more +transport-specific HTTP/2 concerns managed by `Bandit.HTTP2.Stream` (roughly the +concerns specified in [RFC9113](https://datatracker.ietf.org/doc/html/rfc9113)). + +# Testing + +All of this is exhaustively tested. Tests are broken up primarily into `protocol_test.exs`, which +is concerned with aspects of the implementation relating to protocol conformance and +client-facing concerns, while `plug_test.exs` is concerned with aspects of the implementation +having to do with the Plug API and application-facing concerns. There are also more +unit-style tests covering frame serialization and deserialization. + +In addition, the `h2spec` conformance suite is run via a `System` wrapper & executes the entirety +of the suite (in strict mode) against a running Bandit server. + +## Limitations and Assumptions + +Some limitations and assumptions of this implementation: + +* This handler assumes that the HTTP/2 connection preface has already been consumed from the + client. The `Bandit.InitialHandler` module uses this preface to discriminate between various + HTTP versions when determining which handler to use +* Priority frames are parsed and validated, but do not induce any action on the part of the + server. There is no priority assigned to respective streams in terms of processing; all streams + are run in parallel as soon as they arrive +* While flow control is completely implemented here, the specific values used for upload flow + control (that is, the end that we control) are fixed. Specifically, we attempt to maintain + fairly large windows in order to not restrict client uploads (we 'slow-start' window changes + upon receipt of first byte, mostly to retain parity between connection and stream window + management since connection windows cannot be changed via settings). The majority of flow + control logic has been encapsulated in the `Bandit.HTTP2.FlowControl` module should future + refinement be required diff --git a/deps/bandit/lib/bandit/http2/connection.ex b/deps/bandit/lib/bandit/http2/connection.ex new file mode 100644 index 0000000..03804e0 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/connection.ex @@ -0,0 +1,473 @@ +defmodule Bandit.HTTP2.Connection do + @moduledoc false + # Represents the state of an HTTP/2 connection, in a process-free manner. An instance of this + # struct is maintained as the state of a `Bandit.HTTP2.Handler` process, and it moves an HTTP/2 + # connection through its lifecycle by calling functions defined on this module + + require Logger + + defstruct local_settings: %Bandit.HTTP2.Settings{}, + remote_settings: %Bandit.HTTP2.Settings{}, + fragment_frame: nil, + send_hpack_state: HPAX.new(4096), + recv_hpack_state: HPAX.new(4096), + send_window_size: 65_535, + recv_window_size: 65_535, + streams: %Bandit.HTTP2.StreamCollection{}, + pending_sends: [], + conn_data: nil, + telemetry_span: nil, + plug: nil, + opts: %{}, + reset_stream_timestamps: [] + + @typedoc "Encapsulates the state of an HTTP/2 connection" + @type t :: %__MODULE__{ + local_settings: Bandit.HTTP2.Settings.t(), + remote_settings: Bandit.HTTP2.Settings.t(), + fragment_frame: Bandit.HTTP2.Frame.Headers.t() | nil, + send_hpack_state: term(), + recv_hpack_state: term(), + send_window_size: non_neg_integer(), + recv_window_size: non_neg_integer(), + streams: Bandit.HTTP2.StreamCollection.t(), + pending_sends: [{Bandit.HTTP2.Stream.stream_id(), iodata(), boolean(), fun()}], + conn_data: Bandit.Pipeline.conn_data(), + telemetry_span: ThousandIsland.Telemetry.t(), + plug: Bandit.Pipeline.plug_def(), + opts: %{ + required(:http) => Bandit.http_options(), + required(:http_2) => Bandit.http_2_options() + }, + reset_stream_timestamps: [integer()] + } + + @spec init(ThousandIsland.Socket.t(), Bandit.Pipeline.plug_def(), map()) :: t() + def init(socket, plug, opts) do + connection = %__MODULE__{ + local_settings: + struct!(Bandit.HTTP2.Settings, Keyword.get(opts.http_2, :default_local_settings, [])), + conn_data: Bandit.SocketHelpers.conn_data(socket), + telemetry_span: ThousandIsland.Socket.telemetry_span(socket), + plug: plug, + opts: opts + } + + # Send SETTINGS frame per RFC9113§3.4 + %Bandit.HTTP2.Frame.Settings{ack: false, settings: Map.from_struct(connection.local_settings)} + |> send_frame(socket, connection) + + connection + end + + # + # Receiving while expecting CONTINUATION frames is a special case (RFC9113§6.10); handle it first + # + + @spec handle_frame(Bandit.HTTP2.Frame.frame(), ThousandIsland.Socket.t(), t()) :: t() + def handle_frame( + %Bandit.HTTP2.Frame.Continuation{end_headers: true, stream_id: stream_id} = frame, + socket, + %__MODULE__{fragment_frame: %Bandit.HTTP2.Frame.Headers{stream_id: stream_id}} = + connection + ) do + header_block = connection.fragment_frame.fragment <> frame.fragment + header_frame = %{connection.fragment_frame | end_headers: true, fragment: header_block} + handle_frame(header_frame, socket, %{connection | fragment_frame: nil}) + end + + def handle_frame( + %Bandit.HTTP2.Frame.Continuation{end_headers: false, stream_id: stream_id} = frame, + _socket, + %__MODULE__{fragment_frame: %Bandit.HTTP2.Frame.Headers{stream_id: stream_id}} = + connection + ) do + fragment = connection.fragment_frame.fragment <> frame.fragment + check_oversize_fragment!(fragment, connection) + fragment_frame = %{connection.fragment_frame | fragment: fragment} + %{connection | fragment_frame: fragment_frame} + end + + def handle_frame(_frame, _socket, %__MODULE__{fragment_frame: %Bandit.HTTP2.Frame.Headers{}}) do + connection_error!("Expected CONTINUATION frame (RFC9113§6.10)") + end + + # + # Connection-level receiving + # + + def handle_frame(%Bandit.HTTP2.Frame.Settings{ack: true}, _socket, connection), do: connection + + def handle_frame(%Bandit.HTTP2.Frame.Settings{ack: false} = frame, socket, connection) do + %Bandit.HTTP2.Frame.Settings{ack: true} |> send_frame(socket, connection) + + # Merge whatever new settings were sent with our existing remote settings + remote_settings = struct(connection.remote_settings, frame.settings) + + send_hpack_state = HPAX.resize(connection.send_hpack_state, remote_settings.header_table_size) + delta = remote_settings.initial_window_size - connection.remote_settings.initial_window_size + + Bandit.HTTP2.StreamCollection.get_pids(connection.streams) + |> Enum.each(&Bandit.HTTP2.Stream.deliver_send_window_update(&1, delta)) + + do_pending_sends(socket, %{ + connection + | remote_settings: remote_settings, + send_hpack_state: send_hpack_state + }) + end + + def handle_frame(%Bandit.HTTP2.Frame.Ping{ack: true}, _socket, connection), do: connection + + def handle_frame(%Bandit.HTTP2.Frame.Ping{ack: false} = frame, socket, connection) do + %Bandit.HTTP2.Frame.Ping{ack: true, payload: frame.payload} |> send_frame(socket, connection) + connection + end + + def handle_frame(%Bandit.HTTP2.Frame.Goaway{}, _socket, connection), do: connection + + def handle_frame(%Bandit.HTTP2.Frame.WindowUpdate{stream_id: 0} = frame, socket, connection) do + case Bandit.HTTP2.FlowControl.update_send_window( + connection.send_window_size, + frame.size_increment + ) do + {:ok, new_window} -> do_pending_sends(socket, %{connection | send_window_size: new_window}) + {:error, error} -> connection_error!(error, Bandit.HTTP2.Errors.flow_control_error()) + end + end + + # + # Stream-level receiving + # + + def handle_frame(%Bandit.HTTP2.Frame.WindowUpdate{} = frame, _socket, connection) do + streams = + with_stream(connection, frame.stream_id, fn stream -> + Bandit.HTTP2.Stream.deliver_send_window_update(stream, frame.size_increment) + end) + + %{connection | streams: streams} + end + + def handle_frame(%Bandit.HTTP2.Frame.Headers{end_headers: true} = frame, _socket, connection) do + check_oversize_fragment!(frame.fragment, connection) + + case HPAX.decode(frame.fragment, connection.recv_hpack_state) do + {:ok, headers, recv_hpack_state} -> + streams = + with_stream(connection, frame.stream_id, fn stream -> + Bandit.HTTP2.Stream.deliver_headers(stream, headers, frame.end_stream) + end) + + %{connection | recv_hpack_state: recv_hpack_state, streams: streams} + + _ -> + connection_error!("Header decode error", Bandit.HTTP2.Errors.compression_error()) + end + end + + def handle_frame(%Bandit.HTTP2.Frame.Headers{end_headers: false} = frame, _socket, connection) do + check_oversize_fragment!(frame.fragment, connection) + %{connection | fragment_frame: frame} + end + + def handle_frame(%Bandit.HTTP2.Frame.Continuation{}, _socket, _connection) do + connection_error!("Received unexpected CONTINUATION frame (RFC9113§6.10)") + end + + def handle_frame(%Bandit.HTTP2.Frame.Data{} = frame, socket, connection) do + streams = + with_stream(connection, frame.stream_id, fn stream -> + Bandit.HTTP2.Stream.deliver_data(stream, frame.data, frame.end_stream) + end) + + {recv_window_size, window_increment} = + Bandit.HTTP2.FlowControl.compute_recv_window( + connection.recv_window_size, + byte_size(frame.data) + ) + + if window_increment > 0 do + %Bandit.HTTP2.Frame.WindowUpdate{stream_id: 0, size_increment: window_increment} + |> send_frame(socket, connection) + end + + %{connection | recv_window_size: recv_window_size, streams: streams} + end + + def handle_frame(%Bandit.HTTP2.Frame.Priority{}, _socket, connection), do: connection + + def handle_frame(%Bandit.HTTP2.Frame.RstStream{} = frame, _socket, connection) do + streams = + with_stream(connection, frame.stream_id, fn stream -> + Bandit.HTTP2.Stream.deliver_rst_stream(stream, frame.error_code) + end) + + %{connection | streams: streams} + |> check_reset_stream_rate_limit!() + end + + # Catch-all handler for unknown frame types + + def handle_frame(%Bandit.HTTP2.Frame.Unknown{} = frame, _socket, connection) do + Logger.warning("Unknown frame (#{inspect(Map.from_struct(frame))})", + domain: [:bandit], + plug: connection.plug + ) + + connection + end + + defp with_stream(connection, stream_id, fun) do + case Bandit.HTTP2.StreamCollection.get_pid(connection.streams, stream_id) do + pid when is_pid(pid) or pid == :closed -> + fun.(pid) + connection.streams + + :new -> + new_stream!(connection, stream_id) + + sendfile_chunk_size = + Keyword.get(connection.opts.http_2, :sendfile_chunk_size, 1_048_576) + + stream = + Bandit.HTTP2.Stream.init( + self(), + stream_id, + connection.remote_settings.initial_window_size, + sendfile_chunk_size + ) + + case Bandit.HTTP2.StreamProcess.start_link( + stream, + connection.plug, + connection.telemetry_span, + connection.conn_data, + connection.opts + ) do + {:ok, pid} -> + streams = Bandit.HTTP2.StreamCollection.insert(connection.streams, stream_id, pid) + with_stream(%{connection | streams: streams}, stream_id, fun) + + _ -> + raise "Unable to start stream process" + end + + :invalid -> + connection_error!("Received invalid stream identifier") + end + end + + defp new_stream!(connection, stream_id) do + max_requests = Keyword.get(connection.opts.http_2, :max_requests, 0) + + if max_requests != 0 and + max_requests <= Bandit.HTTP2.StreamCollection.stream_count(connection.streams) do + connection_error!("Connection count exceeded", Bandit.HTTP2.Errors.refused_stream()) + end + + if connection.local_settings.max_concurrent_streams <= + Bandit.HTTP2.StreamCollection.open_stream_count(connection.streams) do + stream_error!( + "Concurrent stream count exceeded", + stream_id, + Bandit.HTTP2.Errors.refused_stream() + ) + end + end + + defp check_oversize_fragment!(fragment, connection) do + if byte_size(fragment) > Keyword.get(connection.opts.http_2, :max_header_block_size, 50_000), + do: connection_error!("Received overlong headers") + end + + @spec check_reset_stream_rate_limit!(t()) :: t() + defp check_reset_stream_rate_limit!(connection) do + case Keyword.get(connection.opts.http_2, :max_reset_stream_rate, {500, 10_000}) do + nil -> + connection + + {intensity, period} -> + now = :erlang.monotonic_time(:millisecond) + threshold = now - period + resets = connection.reset_stream_timestamps + recent_timestamps = can_reset(intensity - 1, threshold, resets, [], intensity, period) + %{connection | reset_stream_timestamps: [now | recent_timestamps]} + end + end + + defp can_reset(_, _, [], acc, _, _), + do: :lists.reverse(acc) + + defp can_reset(_, threshold, [restart | _], acc, _, _) when restart < threshold, + do: :lists.reverse(acc) + + defp can_reset(0, _, [_ | _], _acc, intensity, period), + do: + connection_error!( + "Stream resets rate exceeded #{intensity} resets in #{period}ms", + Bandit.HTTP2.Errors.enhance_your_calm() + ) + + defp can_reset(n, threshold, [restart | restarts], acc, intensity, period), + do: can_reset(n - 1, threshold, restarts, [restart | acc], intensity, period) + + # Shared logic to send any pending frames upon adjustment of our send window + defp do_pending_sends(socket, connection) do + connection.pending_sends + |> Enum.reverse() + |> Enum.reduce(connection, fn pending_send, connection -> + connection = connection |> Map.update!(:pending_sends, &List.delete(&1, pending_send)) + {stream_id, rest, end_stream, on_unblock} = pending_send + send_data(stream_id, rest, end_stream, on_unblock, socket, connection) + end) + end + + # + # Sending logic + # + # All callers of functions below will be from stream processes + # + + # + # Stream-level sending + # + + @spec send_headers( + Bandit.HTTP2.Stream.stream_id(), + Plug.Conn.headers(), + boolean(), + ThousandIsland.Socket.t(), + t() + ) :: t() + def send_headers(stream_id, headers, end_stream, socket, connection) do + with enc_headers <- Enum.map(headers, fn {key, value} -> {:store, key, value} end), + {block, send_hpack_state} <- HPAX.encode(enc_headers, connection.send_hpack_state) do + %Bandit.HTTP2.Frame.Headers{ + stream_id: stream_id, + end_stream: end_stream, + fragment: block + } + |> send_frame(socket, connection) + + %{connection | send_hpack_state: send_hpack_state} + end + end + + @spec send_data( + Bandit.HTTP2.Stream.stream_id(), + iodata(), + boolean(), + fun(), + ThousandIsland.Socket.t(), + t() + ) :: t() + def send_data(stream_id, data, end_stream, on_unblock, socket, connection) do + with connection_window_size <- connection.send_window_size, + max_bytes_to_send <- max(connection_window_size, 0), + {data_to_send, bytes_to_send, rest} <- split_data(data, max_bytes_to_send), + connection <- %{connection | send_window_size: connection_window_size - bytes_to_send}, + end_stream_to_send <- end_stream && byte_size(rest) == 0 do + if end_stream_to_send || bytes_to_send > 0 do + %Bandit.HTTP2.Frame.Data{ + stream_id: stream_id, + end_stream: end_stream_to_send, + data: data_to_send + } + |> send_frame(socket, connection) + end + + if byte_size(rest) == 0 do + on_unblock.() + connection + else + pending_sends = [{stream_id, rest, end_stream, on_unblock} | connection.pending_sends] + %{connection | pending_sends: pending_sends} + end + end + end + + defp split_data(data, desired_length) do + data_length = IO.iodata_length(data) + + if data_length <= desired_length do + {data, data_length, <<>>} + else + <> = IO.iodata_to_binary(data) + {to_send, desired_length, rest} + end + end + + @spec send_recv_window_update( + Bandit.HTTP2.Stream.stream_id(), + non_neg_integer(), + ThousandIsland.Socket.t(), + t() + ) :: term() + def send_recv_window_update(stream_id, size_increment, socket, connection) do + %Bandit.HTTP2.Frame.WindowUpdate{stream_id: stream_id, size_increment: size_increment} + |> send_frame(socket, connection) + end + + @spec send_rst_stream( + Bandit.HTTP2.Stream.stream_id(), + Bandit.HTTP2.Errors.error_code(), + ThousandIsland.Socket.t(), + t() + ) :: term() + def send_rst_stream(stream_id, error_code, socket, connection) do + %Bandit.HTTP2.Frame.RstStream{stream_id: stream_id, error_code: error_code} + |> send_frame(socket, connection) + end + + @spec stream_terminated(pid(), t()) :: t() + def stream_terminated(pid, connection) do + %{connection | streams: Bandit.HTTP2.StreamCollection.delete(connection.streams, pid)} + end + + # + # Helper functions + # + + @spec close_connection(Bandit.HTTP2.Errors.error_code(), term(), ThousandIsland.Socket.t(), t()) :: + {:close, t()} | {:error, term(), t()} + def close_connection(error_code, reason, socket, connection) do + last_stream_id = Bandit.HTTP2.StreamCollection.last_stream_id(connection.streams) + + %Bandit.HTTP2.Frame.Goaway{last_stream_id: last_stream_id, error_code: error_code} + |> send_frame(socket, connection) + + if error_code == Bandit.HTTP2.Errors.no_error(), + do: {:close, connection}, + else: {:error, reason, connection} + end + + @spec connection_error!(term()) :: no_return() + @spec connection_error!(term(), Bandit.HTTP2.Errors.error_code()) :: no_return() + defp connection_error!(message, error_code \\ Bandit.HTTP2.Errors.protocol_error()) do + raise Bandit.HTTP2.Errors.ConnectionError, message: message, error_code: error_code + end + + @spec stream_error!( + String.t(), + Bandit.HTTP2.Stream.stream_id(), + Bandit.HTTP2.Errors.error_code() + ) :: + no_return() + defp stream_error!(message, stream_id, error_code) do + raise Bandit.HTTP2.Errors.StreamError, + message: message, + error_code: error_code, + stream_id: stream_id + end + + defp send_frame(frame, socket, connection) do + _ = + ThousandIsland.Socket.send( + socket, + Bandit.HTTP2.Frame.serialize(frame, connection.remote_settings.max_frame_size) + ) + + :ok + end +end diff --git a/deps/bandit/lib/bandit/http2/errors.ex b/deps/bandit/lib/bandit/http2/errors.ex new file mode 100644 index 0000000..8d0bd90 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/errors.ex @@ -0,0 +1,59 @@ +defmodule Bandit.HTTP2.Errors do + @moduledoc false + # Errors as defined in RFC9113§7 + + @typedoc "An error code as defined for GOAWAY and RST_STREAM errors" + @type error_code() :: + (no_error :: 0x0) + | (protocol_error :: 0x1) + | (internal_error :: 0x2) + | (flow_control_error :: 0x3) + | (settings_timeout :: 0x4) + | (stream_closed :: 0x5) + | (frame_size_error :: 0x6) + | (refused_stream :: 0x7) + | (cancel :: 0x8) + | (compression_error :: 0x9) + | (connect_error :: 0xA) + | (enhance_your_calm :: 0xB) + | (inadequate_security :: 0xC) + | (http_1_1_requires :: 0xD) + + error_codes = %{ + no_error: 0x0, + protocol_error: 0x1, + internal_error: 0x2, + flow_control_error: 0x3, + settings_timeout: 0x4, + stream_closed: 0x5, + frame_size_error: 0x6, + refused_stream: 0x7, + cancel: 0x8, + compression_error: 0x9, + connect_error: 0xA, + enhance_your_calm: 0xB, + inadequate_security: 0xC, + http_1_1_requires: 0xD + } + + @spec to_reason(integer()) :: atom() + + for {name, value} <- error_codes do + @spec unquote(name)() :: unquote(Macro.var(name, Elixir)) :: unquote(value) + def unquote(name)(), do: unquote(value) + + def to_reason(unquote(value)), do: unquote(name) + end + + def to_reason(_), do: :unknown + + # Represents a stream error as defined in RFC9113§5.4.2 + defmodule StreamError do + defexception [:message, :error_code, :stream_id] + end + + # Represents a stream error as defined in RFC9113§5.4.3 + defmodule ConnectionError do + defexception [:message, :error_code] + end +end diff --git a/deps/bandit/lib/bandit/http2/flow_control.ex b/deps/bandit/lib/bandit/http2/flow_control.ex new file mode 100644 index 0000000..db67a38 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/flow_control.ex @@ -0,0 +1,43 @@ +defmodule Bandit.HTTP2.FlowControl do + @moduledoc false + # Helpers for working with flow control window calculations + + import Bitwise + + @max_window_increment (1 <<< 31) - 1 + @max_window_size (1 <<< 31) - 1 + @min_window_size 1 <<< 30 + + @spec compute_recv_window(non_neg_integer(), non_neg_integer()) :: + {non_neg_integer(), non_neg_integer()} + def compute_recv_window(recv_window_size, data_size) do + # This is what our window size will be after receiving data_size bytes + recv_window_size = recv_window_size - data_size + + if recv_window_size > @min_window_size do + # We have room to go before we need to update our window + {recv_window_size, 0} + else + # We want our new window to be as large as possible, but are limited by both the maximum size + # of the window (2^31-1) and the maximum size of the increment we can send to the client, both + # per RFC9113§6.9. Be careful about handling cases where we have a negative window due to + # misbehaving clients or network races + new_recv_window_size = min(recv_window_size + @max_window_increment, @max_window_size) + + # Finally, determine what increment to send to the client + increment = new_recv_window_size - recv_window_size + + {new_recv_window_size, increment} + end + end + + @spec update_send_window(non_neg_integer(), non_neg_integer()) :: + {:ok, non_neg_integer()} | {:error, String.t()} + def update_send_window(current_send_window, increment) do + if current_send_window + increment > @max_window_size do + {:error, "Invalid WINDOW_UPDATE increment RFC9113§6.9.1"} + else + {:ok, current_send_window + increment} + end + end +end diff --git a/deps/bandit/lib/bandit/http2/frame.ex b/deps/bandit/lib/bandit/http2/frame.ex new file mode 100644 index 0000000..74da60f --- /dev/null +++ b/deps/bandit/lib/bandit/http2/frame.ex @@ -0,0 +1,105 @@ +defmodule Bandit.HTTP2.Frame do + @moduledoc false + + @typedoc "Indicates a frame type" + @type frame_type :: non_neg_integer() + + @typedoc "The flags passed along with a frame" + @type flags :: byte() + + @typedoc "A valid HTTP/2 frame" + @type frame :: + Bandit.HTTP2.Frame.Data.t() + | Bandit.HTTP2.Frame.Headers.t() + | Bandit.HTTP2.Frame.Priority.t() + | Bandit.HTTP2.Frame.RstStream.t() + | Bandit.HTTP2.Frame.Settings.t() + | Bandit.HTTP2.Frame.Ping.t() + | Bandit.HTTP2.Frame.Goaway.t() + | Bandit.HTTP2.Frame.WindowUpdate.t() + | Bandit.HTTP2.Frame.Continuation.t() + | Bandit.HTTP2.Frame.Unknown.t() + + @spec deserialize(binary(), non_neg_integer()) :: + {{:ok, frame()}, iodata()} + | {{:more, iodata()}, <<>>} + | {{:error, Bandit.HTTP2.Errors.error_code(), binary()}, iodata()} + | nil + # This is a little more aggressive than necessary. RFC9113§4.2 says we only need + # to treat frame size violations as connection level errors if the frame in + # question would affect the connection as a whole, so we could be more surgical + # here and send stream level errors in some cases. However, we are well within + # our rights to consider such errors as connection errors + def deserialize( + <>, + max_frame_size + ) + when length > max_frame_size do + {{:error, Bandit.HTTP2.Errors.frame_size_error(), "Payload size too large (RFC9113§4.2)"}, + rest} + end + + def deserialize( + <>, + max_frame_size + ) + when length <= max_frame_size do + type + |> case do + 0x0 -> Bandit.HTTP2.Frame.Data.deserialize(flags, stream_id, payload) + 0x1 -> Bandit.HTTP2.Frame.Headers.deserialize(flags, stream_id, payload) + 0x2 -> Bandit.HTTP2.Frame.Priority.deserialize(flags, stream_id, payload) + 0x3 -> Bandit.HTTP2.Frame.RstStream.deserialize(flags, stream_id, payload) + 0x4 -> Bandit.HTTP2.Frame.Settings.deserialize(flags, stream_id, payload) + 0x5 -> Bandit.HTTP2.Frame.PushPromise.deserialize(flags, stream_id, payload) + 0x6 -> Bandit.HTTP2.Frame.Ping.deserialize(flags, stream_id, payload) + 0x7 -> Bandit.HTTP2.Frame.Goaway.deserialize(flags, stream_id, payload) + 0x8 -> Bandit.HTTP2.Frame.WindowUpdate.deserialize(flags, stream_id, payload) + 0x9 -> Bandit.HTTP2.Frame.Continuation.deserialize(flags, stream_id, payload) + unknown -> Bandit.HTTP2.Frame.Unknown.deserialize(unknown, flags, stream_id, payload) + end + |> then(&{&1, rest}) + end + + # nil is used to indicate for Stream.unfold/2 that the frame deserialization is finished + def deserialize(<<>>, _max_frame_size) do + nil + end + + def deserialize(msg, _max_frame_size) do + {{:more, msg}, <<>>} + end + + defmodule Flags do + @moduledoc false + + import Bitwise + + defguard set?(flags, bit) when band(flags, bsl(1, bit)) != 0 + defguard clear?(flags, bit) when band(flags, bsl(1, bit)) == 0 + + @spec set([0..255]) :: 0..255 + def set([]), do: 0x0 + def set([bit | rest]), do: bor(bsl(1, bit), set(rest)) + end + + defprotocol Serializable do + @moduledoc false + + @spec serialize(any(), non_neg_integer()) :: [ + {Bandit.HTTP2.Frame.frame_type(), Bandit.HTTP2.Frame.flags(), + Bandit.HTTP2.Stream.stream_id(), iodata()} + ] + def serialize(frame, max_frame_size) + end + + @spec serialize(frame(), non_neg_integer()) :: iolist() + def serialize(frame, max_frame_size) do + frame + |> Serializable.serialize(max_frame_size) + |> Enum.map(fn {type, flags, stream_id, payload} -> + [<>, payload] + end) + end +end diff --git a/deps/bandit/lib/bandit/http2/frame/continuation.ex b/deps/bandit/lib/bandit/http2/frame/continuation.ex new file mode 100644 index 0000000..65fb59a --- /dev/null +++ b/deps/bandit/lib/bandit/http2/frame/continuation.ex @@ -0,0 +1,57 @@ +defmodule Bandit.HTTP2.Frame.Continuation do + @moduledoc false + + import Bandit.HTTP2.Frame.Flags + + defstruct stream_id: nil, + end_headers: false, + fragment: nil + + @typedoc "An HTTP/2 CONTINUATION frame" + @type t :: %__MODULE__{ + stream_id: Bandit.HTTP2.Stream.stream_id(), + end_headers: boolean(), + fragment: iodata() + } + + @end_headers_bit 2 + + @spec deserialize(Bandit.HTTP2.Frame.flags(), Bandit.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, Bandit.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, 0, _payload) do + {:error, Bandit.HTTP2.Errors.protocol_error(), + "CONTINUATION frame with zero stream_id (RFC9113§6.10)"} + end + + def deserialize(flags, stream_id, <>) do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_headers: set?(flags, @end_headers_bit), + fragment: fragment + }} + end + + defimpl Bandit.HTTP2.Frame.Serializable do + @end_headers_bit 2 + + def serialize(%Bandit.HTTP2.Frame.Continuation{} = frame, max_frame_size) do + fragment_length = IO.iodata_length(frame.fragment) + + if fragment_length <= max_frame_size do + [{0x9, set([@end_headers_bit]), frame.stream_id, frame.fragment}] + else + <> = + IO.iodata_to_binary(frame.fragment) + + [ + {0x9, 0x00, frame.stream_id, this_frame} + | Bandit.HTTP2.Frame.Serializable.serialize( + %Bandit.HTTP2.Frame.Continuation{stream_id: frame.stream_id, fragment: rest}, + max_frame_size + ) + ] + end + end + end +end diff --git a/deps/bandit/lib/bandit/http2/frame/data.ex b/deps/bandit/lib/bandit/http2/frame/data.ex new file mode 100644 index 0000000..01b7b63 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/frame/data.ex @@ -0,0 +1,78 @@ +defmodule Bandit.HTTP2.Frame.Data do + @moduledoc false + + import Bandit.HTTP2.Frame.Flags + + defstruct stream_id: nil, + end_stream: false, + data: nil + + @typedoc "An HTTP/2 DATA frame" + @type t :: %__MODULE__{ + stream_id: Bandit.HTTP2.Stream.stream_id(), + end_stream: boolean(), + data: iodata() + } + + @end_stream_bit 0 + @padding_bit 3 + + @spec deserialize(Bandit.HTTP2.Frame.flags(), Bandit.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, Bandit.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, 0, _payload) do + {:error, Bandit.HTTP2.Errors.protocol_error(), "DATA frame with zero stream_id (RFC9113§6.1)"} + end + + def deserialize(flags, stream_id, <>) + when set?(flags, @padding_bit) and byte_size(rest) >= padding_length do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_stream: set?(flags, @end_stream_bit), + data: binary_part(rest, 0, byte_size(rest) - padding_length) + }} + end + + def deserialize(flags, stream_id, <>) when clear?(flags, @padding_bit) do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_stream: set?(flags, @end_stream_bit), + data: data + }} + end + + def deserialize(flags, _stream_id, <<_padding_length::8, _rest::binary>>) + when set?(flags, @padding_bit) do + {:error, Bandit.HTTP2.Errors.protocol_error(), + "DATA frame with invalid padding length (RFC9113§6.1)"} + end + + defimpl Bandit.HTTP2.Frame.Serializable do + @end_stream_bit 0 + + def serialize(%Bandit.HTTP2.Frame.Data{} = frame, max_frame_size) do + data_length = IO.iodata_length(frame.data) + + if data_length <= max_frame_size do + flags = if frame.end_stream, do: [@end_stream_bit], else: [] + [{0x0, set(flags), frame.stream_id, frame.data}] + else + <> = + IO.iodata_to_binary(frame.data) + + [ + {0x0, 0x00, frame.stream_id, this_frame} + | Bandit.HTTP2.Frame.Serializable.serialize( + %Bandit.HTTP2.Frame.Data{ + stream_id: frame.stream_id, + end_stream: frame.end_stream, + data: rest + }, + max_frame_size + ) + ] + end + end + end +end diff --git a/deps/bandit/lib/bandit/http2/frame/goaway.ex b/deps/bandit/lib/bandit/http2/frame/goaway.ex new file mode 100644 index 0000000..6825653 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/frame/goaway.ex @@ -0,0 +1,42 @@ +defmodule Bandit.HTTP2.Frame.Goaway do + @moduledoc false + + defstruct last_stream_id: 0, error_code: 0, debug_data: <<>> + + @typedoc "An HTTP/2 GOAWAY frame" + @type t :: %__MODULE__{ + last_stream_id: Bandit.HTTP2.Stream.stream_id(), + error_code: Bandit.HTTP2.Errors.error_code(), + debug_data: iodata() + } + + @spec deserialize(Bandit.HTTP2.Frame.flags(), Bandit.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, Bandit.HTTP2.Errors.error_code(), binary()} + def deserialize( + _flags, + 0, + <<_reserved::1, last_stream_id::31, error_code::32, debug_data::binary>> + ) do + {:ok, + %__MODULE__{last_stream_id: last_stream_id, error_code: error_code, debug_data: debug_data}} + end + + def deserialize(_flags, stream_id, _payload) when stream_id != 0 do + {:error, Bandit.HTTP2.Errors.protocol_error(), + "Invalid stream ID in GOAWAY frame (RFC9113§6.8)"} + end + + def deserialize(_flags, _stream_id, _payload) do + {:error, Bandit.HTTP2.Errors.frame_size_error(), + "GOAWAY frame with invalid payload size (RFC9113§6.8)"} + end + + defimpl Bandit.HTTP2.Frame.Serializable do + def serialize(%Bandit.HTTP2.Frame.Goaway{} = frame, _max_frame_size) do + [ + {0x7, 0x0, 0, + <<0x0::1, frame.last_stream_id::31, frame.error_code::32, frame.debug_data::binary>>} + ] + end + end +end diff --git a/deps/bandit/lib/bandit/http2/frame/headers.ex b/deps/bandit/lib/bandit/http2/frame/headers.ex new file mode 100644 index 0000000..0ed43b7 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/frame/headers.ex @@ -0,0 +1,140 @@ +defmodule Bandit.HTTP2.Frame.Headers do + @moduledoc false + + import Bandit.HTTP2.Frame.Flags + + defstruct stream_id: nil, + end_stream: false, + end_headers: false, + exclusive_dependency: false, + stream_dependency: nil, + weight: nil, + fragment: nil + + @typedoc "An HTTP/2 HEADERS frame" + @type t :: %__MODULE__{ + stream_id: Bandit.HTTP2.Stream.stream_id(), + end_stream: boolean(), + end_headers: boolean(), + exclusive_dependency: boolean(), + stream_dependency: Bandit.HTTP2.Stream.stream_id() | nil, + weight: non_neg_integer() | nil, + fragment: iodata() + } + + @end_stream_bit 0 + @end_headers_bit 2 + @padding_bit 3 + @priority_bit 5 + + @spec deserialize(Bandit.HTTP2.Frame.flags(), Bandit.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, Bandit.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, 0, _payload) do + {:error, Bandit.HTTP2.Errors.protocol_error(), + "HEADERS frame with zero stream_id (RFC9113§6.2)"} + end + + # Padding and priority + def deserialize( + flags, + stream_id, + <> + ) + when set?(flags, @padding_bit) and set?(flags, @priority_bit) and + byte_size(rest) >= padding_length do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_stream: set?(flags, @end_stream_bit), + end_headers: set?(flags, @end_headers_bit), + exclusive_dependency: exclusive_dependency == 0x01, + stream_dependency: stream_dependency, + weight: weight, + fragment: binary_part(rest, 0, byte_size(rest) - padding_length) + }} + end + + # Padding but not priority + def deserialize(flags, stream_id, <>) + when set?(flags, @padding_bit) and clear?(flags, @priority_bit) and + byte_size(rest) >= padding_length do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_stream: set?(flags, @end_stream_bit), + end_headers: set?(flags, @end_headers_bit), + fragment: binary_part(rest, 0, byte_size(rest) - padding_length) + }} + end + + # Any other case where padding is set + def deserialize(flags, _stream_id, <<_padding_length::8, _rest::binary>>) + when set?(flags, @padding_bit) do + {:error, Bandit.HTTP2.Errors.protocol_error(), + "HEADERS frame with invalid padding length (RFC9113§6.2)"} + end + + def deserialize( + flags, + stream_id, + <> + ) + when set?(flags, @priority_bit) do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_stream: set?(flags, @end_stream_bit), + end_headers: set?(flags, @end_headers_bit), + exclusive_dependency: exclusive_dependency == 0x01, + stream_dependency: stream_dependency, + weight: weight, + fragment: fragment + }} + end + + def deserialize(flags, stream_id, <>) + when clear?(flags, @priority_bit) and clear?(flags, @padding_bit) do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_stream: set?(flags, @end_stream_bit), + end_headers: set?(flags, @end_headers_bit), + fragment: fragment + }} + end + + defimpl Bandit.HTTP2.Frame.Serializable do + @end_stream_bit 0 + @end_headers_bit 2 + + def serialize( + %Bandit.HTTP2.Frame.Headers{ + exclusive_dependency: false, + stream_dependency: nil, + weight: nil + } = + frame, + max_frame_size + ) do + flags = if frame.end_stream, do: [@end_stream_bit], else: [] + + fragment_length = IO.iodata_length(frame.fragment) + + if fragment_length <= max_frame_size do + [{0x1, set([@end_headers_bit | flags]), frame.stream_id, frame.fragment}] + else + <> = + IO.iodata_to_binary(frame.fragment) + + [ + {0x1, set(flags), frame.stream_id, this_frame} + | Bandit.HTTP2.Frame.Serializable.serialize( + %Bandit.HTTP2.Frame.Continuation{stream_id: frame.stream_id, fragment: rest}, + max_frame_size + ) + ] + end + end + end +end diff --git a/deps/bandit/lib/bandit/http2/frame/ping.ex b/deps/bandit/lib/bandit/http2/frame/ping.ex new file mode 100644 index 0000000..83eedc4 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/frame/ping.ex @@ -0,0 +1,45 @@ +defmodule Bandit.HTTP2.Frame.Ping do + @moduledoc false + + import Bandit.HTTP2.Frame.Flags + + defstruct ack: false, payload: nil + + @typedoc "An HTTP/2 PING frame" + @type t :: %__MODULE__{ + ack: boolean(), + payload: iodata() + } + + @ack_bit 0 + + @spec deserialize(Bandit.HTTP2.Frame.flags(), Bandit.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, Bandit.HTTP2.Errors.error_code(), binary()} + def deserialize(flags, 0, <>) when set?(flags, @ack_bit) do + {:ok, %__MODULE__{ack: true, payload: payload}} + end + + def deserialize(flags, 0, <>) when clear?(flags, @ack_bit) do + {:ok, %__MODULE__{ack: false, payload: payload}} + end + + def deserialize(_flags, stream_id, _payload) when stream_id != 0 do + {:error, Bandit.HTTP2.Errors.protocol_error(), + "Invalid stream ID in PING frame (RFC9113§6.7)"} + end + + def deserialize(_flags, _stream_id, _payload) do + {:error, Bandit.HTTP2.Errors.frame_size_error(), + "PING frame with invalid payload size (RFC9113§6.7)"} + end + + defimpl Bandit.HTTP2.Frame.Serializable do + @ack_bit 0 + + def serialize(%Bandit.HTTP2.Frame.Ping{ack: true} = frame, _max_frame_size), + do: [{0x6, set([@ack_bit]), 0, frame.payload}] + + def serialize(%Bandit.HTTP2.Frame.Ping{ack: false} = frame, _max_frame_size), + do: [{0x6, 0x0, 0, frame.payload}] + end +end diff --git a/deps/bandit/lib/bandit/http2/frame/priority.ex b/deps/bandit/lib/bandit/http2/frame/priority.ex new file mode 100644 index 0000000..7b3f776 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/frame/priority.ex @@ -0,0 +1,35 @@ +defmodule Bandit.HTTP2.Frame.Priority do + @moduledoc false + + defstruct stream_id: nil, dependent_stream_id: nil, weight: nil + + @typedoc "An HTTP/2 PRIORITY frame" + @type t :: %__MODULE__{ + stream_id: Bandit.HTTP2.Stream.stream_id(), + dependent_stream_id: Bandit.HTTP2.Stream.stream_id(), + weight: non_neg_integer() + } + + @spec deserialize(Bandit.HTTP2.Frame.flags(), Bandit.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, Bandit.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, 0, _payload) do + {:error, Bandit.HTTP2.Errors.protocol_error(), + "PRIORITY frame with zero stream_id (RFC9113§6.3)"} + end + + def deserialize(_flags, stream_id, <<_reserved::1, dependent_stream_id::31, weight::8>>) do + {:ok, + %__MODULE__{stream_id: stream_id, dependent_stream_id: dependent_stream_id, weight: weight}} + end + + def deserialize(_flags, _stream_id, _payload) do + {:error, Bandit.HTTP2.Errors.frame_size_error(), + "Invalid payload size in PRIORITY frame (RFC9113§6.3)"} + end + + defimpl Bandit.HTTP2.Frame.Serializable do + def serialize(%Bandit.HTTP2.Frame.Priority{} = frame, _max_frame_size) do + [{0x2, 0x0, frame.stream_id, <<0::1, frame.dependent_stream_id::31, frame.weight::8>>}] + end + end +end diff --git a/deps/bandit/lib/bandit/http2/frame/push_promise.ex b/deps/bandit/lib/bandit/http2/frame/push_promise.ex new file mode 100644 index 0000000..f1d46c4 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/frame/push_promise.ex @@ -0,0 +1,9 @@ +defmodule Bandit.HTTP2.Frame.PushPromise do + @moduledoc false + + @spec deserialize(Bandit.HTTP2.Frame.flags(), Bandit.HTTP2.Stream.stream_id(), iodata()) :: + {:error, Bandit.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, _stream, _payload) do + {:error, Bandit.HTTP2.Errors.protocol_error(), "PUSH_PROMISE frame received (RFC9113§8.4)"} + end +end diff --git a/deps/bandit/lib/bandit/http2/frame/rst_stream.ex b/deps/bandit/lib/bandit/http2/frame/rst_stream.ex new file mode 100644 index 0000000..8bfa799 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/frame/rst_stream.ex @@ -0,0 +1,33 @@ +defmodule Bandit.HTTP2.Frame.RstStream do + @moduledoc false + + defstruct stream_id: nil, error_code: nil + + @typedoc "An HTTP/2 RST_STREAM frame" + @type t :: %__MODULE__{ + stream_id: Bandit.HTTP2.Stream.stream_id(), + error_code: Bandit.HTTP2.Errors.error_code() + } + + @spec deserialize(Bandit.HTTP2.Frame.flags(), Bandit.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, Bandit.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, 0, _payload) do + {:error, Bandit.HTTP2.Errors.protocol_error(), + "RST_STREAM frame with zero stream_id (RFC9113§6.4)"} + end + + def deserialize(_flags, stream_id, <>) do + {:ok, %__MODULE__{stream_id: stream_id, error_code: error_code}} + end + + def deserialize(_flags, _stream_id, _payload) do + {:error, Bandit.HTTP2.Errors.frame_size_error(), + "Invalid payload size in RST_STREAM frame (RFC9113§6.4)"} + end + + defimpl Bandit.HTTP2.Frame.Serializable do + def serialize(%Bandit.HTTP2.Frame.RstStream{} = frame, _max_frame_size) do + [{0x3, 0x0, frame.stream_id, <>}] + end + end +end diff --git a/deps/bandit/lib/bandit/http2/frame/settings.ex b/deps/bandit/lib/bandit/http2/frame/settings.ex new file mode 100644 index 0000000..f3a0844 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/frame/settings.ex @@ -0,0 +1,117 @@ +defmodule Bandit.HTTP2.Frame.Settings do + @moduledoc false + + import Bandit.HTTP2.Frame.Flags + import Bitwise + + @max_window_size (1 <<< 31) - 1 + @min_frame_size 1 <<< 14 + @max_frame_size (1 <<< 24) - 1 + + defstruct ack: false, settings: nil + + @typedoc "An HTTP/2 SETTINGS frame" + @type t :: %__MODULE__{ack: true, settings: nil} | %__MODULE__{ack: false, settings: map()} + + @ack_bit 0 + + @spec deserialize(Bandit.HTTP2.Frame.flags(), Bandit.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, Bandit.HTTP2.Errors.error_code(), binary()} + def deserialize(flags, 0, payload) when clear?(flags, @ack_bit) do + payload + |> Stream.unfold(fn + <<>> -> nil + <> -> {{:ok, {setting, value}}, rest} + <> -> {{:error, rest}, <<>>} + end) + |> Enum.reduce_while({:ok, %{}}, fn + {:ok, {0x01, value}}, {:ok, acc} -> + {:cont, {:ok, Map.put(acc, :header_table_size, value)}} + + {:ok, {0x02, val}}, {:ok, acc} when val in [0x00, 0x01] -> + {:cont, {:ok, acc}} + + {:ok, {0x02, _value}}, {:ok, _acc} -> + {:halt, + {:error, Bandit.HTTP2.Errors.protocol_error(), "Invalid enable_push value (RFC9113§6.5)"}} + + {:ok, {0x03, value}}, {:ok, acc} -> + {:cont, {:ok, Map.put(acc, :max_concurrent_streams, value)}} + + {:ok, {0x04, value}}, {:ok, _acc} when value > @max_window_size -> + {:halt, + {:error, Bandit.HTTP2.Errors.flow_control_error(), "Invalid window_size (RFC9113§6.5)"}} + + {:ok, {0x04, value}}, {:ok, acc} -> + {:cont, {:ok, Map.put(acc, :initial_window_size, value)}} + + {:ok, {0x05, value}}, {:ok, _acc} when value < @min_frame_size -> + {:halt, + {:error, Bandit.HTTP2.Errors.frame_size_error(), "Invalid max_frame_size (RFC9113§6.5)"}} + + {:ok, {0x05, value}}, {:ok, _acc} when value > @max_frame_size -> + {:halt, + {:error, Bandit.HTTP2.Errors.frame_size_error(), "Invalid max_frame_size (RFC9113§6.5)"}} + + {:ok, {0x05, value}}, {:ok, acc} -> + {:cont, {:ok, Map.put(acc, :max_frame_size, value)}} + + {:ok, {0x06, value}}, {:ok, acc} -> + {:cont, {:ok, Map.put(acc, :max_header_list_size, value)}} + + {:ok, {_setting, _value}}, {:ok, acc} -> + {:cont, {:ok, acc}} + + {:error, _rest}, _acc -> + {:halt, + {:error, Bandit.HTTP2.Errors.frame_size_error(), "Invalid SETTINGS size (RFC9113§6.5)"}} + end) + |> case do + {:ok, settings} -> {:ok, %__MODULE__{ack: false, settings: settings}} + {:error, error_code, reason} -> {:error, error_code, reason} + end + end + + def deserialize(flags, 0, <<>>) when set?(flags, @ack_bit) do + {:ok, %__MODULE__{ack: true}} + end + + def deserialize(flags, 0, _payload) when set?(flags, @ack_bit) do + {:error, Bandit.HTTP2.Errors.frame_size_error(), + "SETTINGS ack frame with non-empty payload (RFC9113§6.5)"} + end + + def deserialize(_flags, _stream_id, _payload) do + {:error, Bandit.HTTP2.Errors.protocol_error(), "Invalid SETTINGS frame (RFC9113§6.5)"} + end + + defimpl Bandit.HTTP2.Frame.Serializable do + @ack_bit 0 + + def serialize(%Bandit.HTTP2.Frame.Settings{ack: true}, _max_frame_size), + do: [{0x4, set([@ack_bit]), 0, <<>>}] + + def serialize(%Bandit.HTTP2.Frame.Settings{ack: false} = frame, _max_frame_size) do + # Encode default settings values as empty binaries so that we do not send + # them. This means we can't restore settings back to default values if we + # change them, but since we don't ever change our settings this is fine + payload = + frame.settings + |> Enum.uniq_by(fn {setting, _} -> setting end) + |> Enum.map(fn + {:header_table_size, 4_096} -> <<>> + {:header_table_size, value} -> <<0x01::16, value::32>> + {:max_concurrent_streams, :infinity} -> <<>> + {:max_concurrent_streams, value} -> <<0x03::16, value::32>> + {:initial_window_size, 65_535} -> <<>> + {:initial_window_size, value} -> <<0x04::16, value::32>> + {:max_frame_size, 16_384} -> <<>> + {:max_frame_size, value} -> <<0x05::16, value::32>> + {:max_header_list_size, :infinity} -> <<>> + {:max_header_list_size, value} -> <<0x06::16, value::32>> + end) + + [{0x4, 0x0, 0, payload}] + end + end +end diff --git a/deps/bandit/lib/bandit/http2/frame/unknown.ex b/deps/bandit/lib/bandit/http2/frame/unknown.ex new file mode 100644 index 0000000..652a976 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/frame/unknown.ex @@ -0,0 +1,27 @@ +defmodule Bandit.HTTP2.Frame.Unknown do + @moduledoc false + + defstruct type: nil, + flags: nil, + stream_id: nil, + payload: nil + + @typedoc "An HTTP/2 frame of unknown type" + @type t :: %__MODULE__{ + type: Bandit.HTTP2.Frame.frame_type(), + flags: Bandit.HTTP2.Frame.flags(), + stream_id: Bandit.HTTP2.Stream.stream_id(), + payload: iodata() + } + + # Note this is arity 4 + @spec deserialize( + Bandit.HTTP2.Frame.frame_type(), + Bandit.HTTP2.Frame.flags(), + Bandit.HTTP2.Stream.stream_id(), + iodata() + ) :: {:ok, t()} + def deserialize(type, flags, stream_id, payload) do + {:ok, %__MODULE__{type: type, flags: flags, stream_id: stream_id, payload: payload}} + end +end diff --git a/deps/bandit/lib/bandit/http2/frame/window_update.ex b/deps/bandit/lib/bandit/http2/frame/window_update.ex new file mode 100644 index 0000000..85fa879 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/frame/window_update.ex @@ -0,0 +1,33 @@ +defmodule Bandit.HTTP2.Frame.WindowUpdate do + @moduledoc false + + defstruct stream_id: nil, + size_increment: nil + + @typedoc "An HTTP/2 WINDOW_UPDATE frame" + @type t :: %__MODULE__{ + stream_id: Bandit.HTTP2.Stream.stream_id(), + size_increment: non_neg_integer() + } + + @spec deserialize(Bandit.HTTP2.Frame.flags(), Bandit.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, Bandit.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, _stream_id, <<_reserved::1, 0::31>>) do + {:error, Bandit.HTTP2.Errors.flow_control_error(), + "Invalid WINDOW_UPDATE size increment (RFC9113§6.9)"} + end + + def deserialize(_flags, stream_id, <<_reserved::1, size_increment::31>>) do + {:ok, %__MODULE__{stream_id: stream_id, size_increment: size_increment}} + end + + def deserialize(_flags, _stream_id, _payload) do + {:error, Bandit.HTTP2.Errors.frame_size_error(), "Invalid WINDOW_UPDATE frame (RFC9113§6.9)"} + end + + defimpl Bandit.HTTP2.Frame.Serializable do + def serialize(%Bandit.HTTP2.Frame.WindowUpdate{} = frame, _max_frame_size) do + [{0x8, 0, frame.stream_id, <<0::1, frame.size_increment::31>>}] + end + end +end diff --git a/deps/bandit/lib/bandit/http2/handler.ex b/deps/bandit/lib/bandit/http2/handler.ex new file mode 100644 index 0000000..c737eee --- /dev/null +++ b/deps/bandit/lib/bandit/http2/handler.ex @@ -0,0 +1,186 @@ +defmodule Bandit.HTTP2.Handler do + @moduledoc false + # An HTTP/2 handler, this module comprises the primary interface between Thousand Island and an + # HTTP connection. It is responsible for: + # + # * All socket-level sending and receiving from the client + # * Coordinating the parsing of frames & attendant error handling + # * Tracking connection state as represented by a `Bandit.HTTP2.Connection` struct + + use ThousandIsland.Handler + + @impl ThousandIsland.Handler + def handle_connection(socket, state) do + connection = Bandit.HTTP2.Connection.init(socket, state.plug, state.opts) + {:continue, Map.merge(state, %{buffer: <<>>, connection: connection})} + rescue + error -> rescue_connection_error(error, __STACKTRACE__, socket, state) + end + + @impl ThousandIsland.Handler + def handle_data(data, socket, state) do + (state.buffer <> data) + |> Stream.unfold( + &Bandit.HTTP2.Frame.deserialize(&1, state.connection.local_settings.max_frame_size) + ) + |> Enum.reduce_while(state, fn + {:ok, frame}, state -> + connection = Bandit.HTTP2.Connection.handle_frame(frame, socket, state.connection) + {:cont, %{state | connection: connection, buffer: <<>>}} + + {:more, rest}, state -> + {:halt, %{state | buffer: rest}} + + {:error, error_code, message}, _state -> + # We encountered an error while deserializing the frame. Let the connection figure out + # how to respond to it + raise Bandit.HTTP2.Errors.ConnectionError, message: message, error_code: error_code + end) + |> then(&{:continue, &1}) + rescue + error in Bandit.HTTP2.Errors.StreamError -> rescue_stream_error(error, socket, state) + error -> rescue_connection_error(error, __STACKTRACE__, socket, state) + end + + @impl ThousandIsland.Handler + def handle_shutdown(socket, state) do + Bandit.HTTP2.Connection.close_connection( + Bandit.HTTP2.Errors.no_error(), + "Server shutdown", + socket, + state.connection + ) + end + + @impl ThousandIsland.Handler + def handle_timeout(socket, state) do + Bandit.HTTP2.Connection.close_connection( + Bandit.HTTP2.Errors.no_error(), + "Client timeout", + socket, + state.connection + ) + end + + def handle_call({:peer_data, _stream_id}, _from, {socket, state}) do + {:reply, Bandit.SocketHelpers.peer_data(socket), {socket, state}, socket.read_timeout} + end + + def handle_call({:sock_data, _stream_id}, _from, {socket, state}) do + {:reply, Bandit.SocketHelpers.sock_data(socket), {socket, state}, socket.read_timeout} + end + + def handle_call({:ssl_data, _stream_id}, _from, {socket, state}) do + {:reply, Bandit.SocketHelpers.ssl_data(socket), {socket, state}, socket.read_timeout} + end + + def handle_call({{:send_data, data, end_stream}, stream_id}, from, {socket, state}) do + # In 'normal' cases where there is sufficient space in the send windows for this message to be + # sent, Connection will call `unblock` synchronously in the `Connection.send_data` call below. + # In cases where there is not enough space in the connection window, Connection will call + # `unblock` at some point in the future once space opens up in the window. This + # keeps this code simple in that we can blindly send noreply here and let Connection handle + # the separate cases. This ensures that we have backpressure all the way back to the + # stream's handler process in the event of window overruns. + # + # Note that the above only applies to the connection-level send window; stream-level windows + # are managed internally by the stream and are not considered here at all. If the stream has + # managed to send this message, it is because there was enough room in the stream's send + # window to do so. + unblock = fn -> GenServer.reply(from, :ok) end + + connection = + Bandit.HTTP2.Connection.send_data( + stream_id, + data, + end_stream, + unblock, + socket, + state.connection + ) + + {:noreply, {socket, %{state | connection: connection}}, socket.read_timeout} + rescue + error -> rescue_error_handle_info(error, __STACKTRACE__, socket, state) + end + + def handle_info({{:send_headers, headers, end_stream}, stream_id}, {socket, state}) do + connection = + Bandit.HTTP2.Connection.send_headers( + stream_id, + headers, + end_stream, + socket, + state.connection + ) + + {:noreply, {socket, %{state | connection: connection}}, socket.read_timeout} + rescue + error -> rescue_error_handle_info(error, __STACKTRACE__, socket, state) + end + + def handle_info({{:send_recv_window_update, size_increment}, stream_id}, {socket, state}) do + Bandit.HTTP2.Connection.send_recv_window_update( + stream_id, + size_increment, + socket, + state.connection + ) + + {:noreply, {socket, state}, socket.read_timeout} + rescue + error -> rescue_error_handle_info(error, __STACKTRACE__, socket, state) + end + + def handle_info({{:send_rst_stream, error_code}, stream_id}, {socket, state}) do + Bandit.HTTP2.Connection.send_rst_stream(stream_id, error_code, socket, state.connection) + {:noreply, {socket, state}, socket.read_timeout} + rescue + error -> rescue_error_handle_info(error, __STACKTRACE__, socket, state) + end + + def handle_info({{:close_connection, error_code, msg}, _stream_id}, {socket, state}) do + _ = Bandit.HTTP2.Connection.close_connection(error_code, msg, socket, state.connection) + {:stop, :normal, {socket, state}} + end + + def handle_info({:EXIT, pid, _reason}, {socket, state}) do + connection = Bandit.HTTP2.Connection.stream_terminated(pid, state.connection) + {:noreply, {socket, %{state | connection: connection}}, socket.read_timeout} + end + + defp rescue_stream_error(error, socket, state) do + Bandit.HTTP2.Connection.send_rst_stream( + error.stream_id, + error.error_code, + socket, + state.connection + ) + + {:continue, state} + end + + defp rescue_connection_error(error, stacktrace, socket, state) do + do_rescue_error(error, stacktrace, socket, state) + {:close, state} + end + + defp rescue_error_handle_info(error, stacktrace, socket, state) do + do_rescue_error(error, stacktrace, socket, state) + {:stop, :normal} + end + + defp do_rescue_error(error, stacktrace, socket, state) do + _ = + if state[:connection] do + Bandit.HTTP2.Connection.close_connection( + error.error_code, + error.message, + socket, + state[:connection] + ) + end + + Bandit.Logger.maybe_log_protocol_error(error, stacktrace, state.opts, plug: state.plug) + end +end diff --git a/deps/bandit/lib/bandit/http2/settings.ex b/deps/bandit/lib/bandit/http2/settings.ex new file mode 100644 index 0000000..48b922d --- /dev/null +++ b/deps/bandit/lib/bandit/http2/settings.ex @@ -0,0 +1,20 @@ +defmodule Bandit.HTTP2.Settings do + @moduledoc """ + Settings as defined in RFC9113§6.5.2 + """ + + defstruct header_table_size: 4_096, + max_concurrent_streams: :infinity, + initial_window_size: 65_535, + max_frame_size: 16_384, + max_header_list_size: :infinity + + @typedoc "A collection of settings as defined in RFC9113§6.5" + @type t :: %__MODULE__{ + header_table_size: non_neg_integer(), + max_concurrent_streams: non_neg_integer() | :infinity, + initial_window_size: non_neg_integer(), + max_frame_size: non_neg_integer(), + max_header_list_size: non_neg_integer() | :infinity + } +end diff --git a/deps/bandit/lib/bandit/http2/stream.ex b/deps/bandit/lib/bandit/http2/stream.ex new file mode 100644 index 0000000..60a182c --- /dev/null +++ b/deps/bandit/lib/bandit/http2/stream.ex @@ -0,0 +1,634 @@ +defmodule Bandit.HTTP2.Stream do + @moduledoc false + # This module implements an HTTP/2 stream as described in RFC 9113, without concern for the higher-level + # HTTP semantics described in RFC 9110. It is similar in spirit to `Bandit.HTTP1.Socket` for + # HTTP/1, and indeed both implement the `Bandit.HTTPTransport` behaviour. An instance of this + # struct is maintained as the state of a `Bandit.HTTP2.StreamProcess` process, and it moves an + # HTTP/2 stream through its lifecycle by calling functions defined on this module. This state is + # also tracked within the `Bandit.Adapter` instance that backs Bandit's Plug API. + # + # A note about naming: + # + # This module has several intended callers, and due to its nature as a coordinator, needs to be + # careful about how it uses terms like 'read', 'send', 'receive', etc. To that end, there are + # some conventions in place: + # + # * Functions on this module which are intended to be called internally by the containing + # `Bandit.HTTP2.Connection` to pass information received from the client (such as headers or + # request data) to this stream. These functions are named `deliver_*`, and are intended to be + # called by the connection process. As such, they take a `stream_handle()` argument, which + # corresponds either to a pid (in the case of an active stream), or the value `:closed` (in the + # case of a stream which has already completed processing) + # + # * Functions on this module which are intended to be called by the higher-level implementation + # that is processing this stream are implemented via the `Bandit.HTTPTransport` protocol + # + # * In order for this stream to receive information from the containing connection process, we + # use carefully crafted `receive` calls (we do this in a manner that is safe to do within a + # GenServer). This work is handled internally by a number of functions named `do_recv_*`, which + # generally present a blocking interface in order to align with the expectations of the + # `Plug.Conn.Adapter` behaviour. + # + # This module also uses exceptions by convention rather than error tuples since many + # of these functions are called within `Plug.Conn.Adapter` calls, which makes it + # difficult to properly unwind many error conditions back to a place where we can properly shut + # down the stream by sending a RstStream frame to the client and terminating our process. The + # pattern here is to raise exceptions, and have the `Bandit.HTTP2.StreamProcess`'s `terminate/2` + # callback take care of calling back into us via the `reset_stream/2` and `close_connection/2` + # functions here, with the luxury of a nicely unwound stack and a process that is guaranteed to + # be terminated as soon as these functions are called + + require Logger + + defstruct connection_pid: nil, + stream_id: nil, + state: :idle, + recv_window_size: 65_535, + send_window_size: nil, + sendfile_chunk_size: nil, + bytes_remaining: nil, + read_timeout: 15_000 + + @typedoc "An HTTP/2 stream identifier" + @type stream_id :: non_neg_integer() + + @typedoc "A handle to a stream, suitable for passing to the `deliver_*` functions on this module" + @type stream_handle :: pid() | :closed + + @typedoc "An HTTP/2 stream state" + @type state :: :idle | :open | :local_closed | :remote_closed | :closed + + @typedoc "The information necessary to communicate to/from a stream" + @type t :: %__MODULE__{ + connection_pid: pid(), + stream_id: non_neg_integer(), + state: state(), + recv_window_size: non_neg_integer(), + send_window_size: non_neg_integer(), + sendfile_chunk_size: pos_integer(), + bytes_remaining: non_neg_integer() | nil, + read_timeout: timeout() + } + + def init(connection_pid, stream_id, initial_send_window_size, sendfile_chunk_size) do + %__MODULE__{ + connection_pid: connection_pid, + stream_id: stream_id, + send_window_size: initial_send_window_size, + sendfile_chunk_size: sendfile_chunk_size + } + end + + # Collection API - Delivery + # + # These functions are intended to be called by the connection process which contains this + # stream. All of these start with `deliver_` + + @spec deliver_headers(stream_handle(), Plug.Conn.headers(), boolean()) :: term() + def deliver_headers(:closed, _headers, _end_stream), do: :ok + + def deliver_headers(pid, headers, end_stream), + do: send(pid, {:bandit, {:headers, headers, end_stream}}) + + @spec deliver_data(stream_handle(), iodata(), boolean()) :: term() + def deliver_data(:closed, _data, _end_stream), do: :ok + def deliver_data(pid, data, end_stream), do: send(pid, {:bandit, {:data, data, end_stream}}) + + @spec deliver_send_window_update(stream_handle(), non_neg_integer()) :: term() + def deliver_send_window_update(:closed, _delta), do: :ok + + def deliver_send_window_update(pid, delta), + do: send(pid, {:bandit, {:send_window_update, delta}}) + + @spec deliver_rst_stream(stream_handle(), Bandit.HTTP2.Errors.error_code()) :: term() + def deliver_rst_stream(:closed, _error_code), do: :ok + def deliver_rst_stream(pid, error_code), do: send(pid, {:bandit, {:rst_stream, error_code}}) + + defimpl Bandit.HTTPTransport do + def peer_data(%@for{} = stream), do: call(stream, :peer_data, :infinity) + + def sock_data(%@for{} = stream), do: call(stream, :sock_data, :infinity) + + def ssl_data(%@for{} = stream), do: call(stream, :ssl_data, :infinity) + + def version(%@for{}), do: :"HTTP/2" + + def read_headers(%@for{state: :idle} = stream) do + case do_recv(stream, stream.read_timeout) do + {:headers, headers, stream} -> + method = Bandit.Headers.get_header(headers, ":method") + request_target = build_request_target!(headers, stream) + {pseudo_headers, headers} = split_headers!(headers, stream) + pseudo_headers_all_request!(pseudo_headers, stream) + exactly_one_instance_of!(pseudo_headers, ":scheme", stream) + exactly_one_instance_of!(pseudo_headers, ":method", stream) + exactly_one_instance_of!(pseudo_headers, ":path", stream) + headers_all_lowercase!(headers, stream) + no_connection_headers!(headers, stream) + valid_te_header!(headers, stream) + content_length = get_content_length!(headers, stream) + headers = combine_cookie_crumbs(headers) + stream = %{stream | bytes_remaining: content_length} + {:ok, method, request_target, headers, stream} + + :timeout -> + stream_error!("Timed out waiting for HEADER", stream) + + %@for{} = stream -> + read_headers(stream) + end + end + + defp build_request_target!(headers, stream) do + scheme = Bandit.Headers.get_header(headers, ":scheme") + {host, port} = get_host_and_port!(headers) + path = get_path!(headers, stream) + {scheme, host, port, path} + end + + defp get_host_and_port!(headers) do + case Bandit.Headers.get_header(headers, ":authority") do + authority when not is_nil(authority) -> Bandit.Headers.parse_hostlike_header!(authority) + nil -> {nil, nil} + end + end + + # RFC9113§8.3.1 - path should be non-empty and absolute + defp get_path!(headers, stream) do + headers + |> Bandit.Headers.get_header(":path") + |> case do + nil -> stream_error!("Received empty :path", stream) + "*" -> :* + "/" <> _ = path -> split_path!(path, stream) + _ -> stream_error!("Path does not start with /", stream) + end + end + + # RFC9113§8.3.1 - path should match the path-absolute production from RFC3986 + defp split_path!(path, stream) do + if path |> String.split("/") |> Enum.all?(&(&1 not in [".", ".."])), + do: path, + else: stream_error!("Path contains dot segment", stream) + end + + # RFC9113§8.3 - pseudo headers must appear first + defp split_headers!(headers, stream) do + {pseudo_headers, headers} = + Enum.split_while(headers, fn {key, _value} -> String.starts_with?(key, ":") end) + + if Enum.any?(headers, fn {key, _value} -> String.starts_with?(key, ":") end), + do: stream_error!("Received pseudo headers after regular one", stream), + else: {pseudo_headers, headers} + end + + # RFC9113§8.3.1 - only request pseudo headers may appear + defp pseudo_headers_all_request!(headers, stream) do + if Enum.any?(headers, fn {key, _value} -> + key not in ~w[:method :scheme :authority :path] + end), + do: stream_error!("Received invalid pseudo header", stream) + end + + # RFC9113§8.3.1 - method, scheme, path pseudo headers must appear exactly once + defp exactly_one_instance_of!(headers, header, stream) do + if Enum.count(headers, fn {key, _value} -> key == header end) != 1, + do: stream_error!("Expected 1 #{header} headers", stream) + end + + # RFC9113§8.2 - all headers name fields must be lowercsae + defp headers_all_lowercase!(headers, stream) do + if !Enum.all?(headers, fn {key, _value} -> lowercase?(key) end), + do: stream_error!("Received uppercase header", stream) + end + + defp lowercase?(<>) when char >= ?A and char <= ?Z, do: false + defp lowercase?(<<_char, rest::bits>>), do: lowercase?(rest) + defp lowercase?(<<>>), do: true + + # RFC9113§8.2.2 - no hop-by-hop headers + # Note that we do not filter out the TE header here, since it is allowed in + # specific cases by RFC9113§8.2.2. We check those cases in a separate filter + defp no_connection_headers!(headers, stream) do + connection_headers = + ~w[connection keep-alive proxy-authenticate proxy-authorization trailers transfer-encoding upgrade] + + if Enum.any?(headers, fn {key, _value} -> key in connection_headers end), + do: stream_error!("Received connection-specific header", stream) + end + + # RFC9113§8.2.2 - TE header may be present if it contains exactly 'trailers' + defp valid_te_header!(headers, stream) do + if Bandit.Headers.get_header(headers, "te") not in [nil, "trailers"], + do: stream_error!("Received invalid TE header", stream) + end + + defp get_content_length!(headers, stream) do + case Bandit.Headers.get_content_length(headers) do + {:ok, content_length} -> content_length + {:error, reason} -> stream_error!(reason, stream) + end + end + + # RFC9113§8.2.3 - cookie headers may be split during transmission + defp combine_cookie_crumbs(headers) do + {crumbs, other_headers} = + headers |> Enum.split_with(fn {header, _} -> header == "cookie" end) + + case Enum.map_join(crumbs, "; ", fn {"cookie", crumb} -> crumb end) do + "" -> other_headers + combined_cookie -> [{"cookie", combined_cookie} | other_headers] + end + end + + def read_data(%@for{} = stream, opts) do + max_bytes = Keyword.get(opts, :length, 8_000_000) + timeout = Keyword.get(opts, :read_timeout, 15_000) + do_read_data(stream, max_bytes, timeout, []) + end + + defp do_read_data(%@for{state: state} = stream, max_bytes, timeout, acc) + when state in [:open, :local_closed] do + case do_recv(stream, timeout) do + {:headers, trailers, stream} -> + no_pseudo_headers!(trailers, stream) + Logger.warning("Ignoring trailers #{inspect(trailers)}", domain: [:bandit]) + do_read_data(stream, max_bytes, timeout, acc) + + {:data, data, stream} -> + acc = [data | acc] + max_bytes = max_bytes - byte_size(data) + + if max_bytes >= 0 do + do_read_data(stream, max_bytes, timeout, acc) + else + {:more, Enum.reverse(acc), stream} + end + + :timeout -> + {:more, Enum.reverse(acc), stream} + + %@for{} = stream -> + do_read_data(stream, max_bytes, timeout, acc) + end + end + + defp do_read_data(%@for{state: :remote_closed} = stream, _max_bytes, _timeout, acc) do + {:ok, Enum.reverse(acc), stream} + end + + defp no_pseudo_headers!(headers, stream) do + if Enum.any?(headers, fn {key, _value} -> String.starts_with?(key, ":") end), + do: stream_error!("Received trailers with pseudo headers", stream) + end + + defp do_recv(%@for{state: :idle} = stream, timeout) do + receive do + {:bandit, {:headers, headers, end_stream}} -> + {:headers, headers, stream |> do_recv_headers() |> do_recv_end_stream(end_stream)} + + {:bandit, {:data, _data, _end_stream}} -> + connection_error!("Received DATA in idle state") + + {:bandit, {:send_window_update, _delta}} -> + connection_error!("Received WINDOW_UPDATE in idle state") + + {:bandit, {:rst_stream, _error_code}} -> + connection_error!("Received RST_STREAM in idle state") + after + timeout -> :timeout + end + end + + defp do_recv(%@for{state: state} = stream, timeout) + when state in [:open, :local_closed] do + receive do + {:bandit, {:headers, headers, end_stream}} -> + {:headers, headers, stream |> do_recv_headers() |> do_recv_end_stream(end_stream)} + + {:bandit, {:data, data, end_stream}} -> + {:data, data, + stream |> do_recv_data(data, end_stream) |> do_recv_end_stream(end_stream)} + + {:bandit, {:send_window_update, delta}} -> + do_recv_send_window_update(stream, delta) + + {:bandit, {:rst_stream, error_code}} -> + do_recv_rst_stream!(stream, error_code) + after + timeout -> :timeout + end + end + + defp do_recv(%@for{state: :remote_closed} = stream, timeout) do + receive do + {:bandit, {:headers, _headers, _end_stream}} -> + do_stream_closed_error!("Received HEADERS in remote_closed state", stream) + + {:bandit, {:data, _data, _end_stream}} -> + do_stream_closed_error!("Received DATA in remote_closed state", stream) + + {:bandit, {:send_window_update, delta}} -> + do_recv_send_window_update(stream, delta) + + {:bandit, {:rst_stream, error_code}} -> + do_recv_rst_stream!(stream, error_code) + after + timeout -> :timeout + end + end + + defp do_recv(%@for{state: :closed} = stream, timeout) do + receive do + {:bandit, {:headers, _headers, _end_stream}} -> stream + {:bandit, {:data, _data, _end_stream}} -> stream + {:bandit, {:send_window_update, _delta}} -> stream + {:bandit, {:rst_stream, _error_code}} -> stream + after + timeout -> :timeout + end + end + + defp do_recv_headers(%@for{state: :idle} = stream), do: %{stream | state: :open} + defp do_recv_headers(stream), do: stream + + defp do_recv_data(stream, data, end_stream) do + {new_window, increment} = + Bandit.HTTP2.FlowControl.compute_recv_window(stream.recv_window_size, byte_size(data)) + + if increment > 0 && !end_stream, do: do_send(stream, {:send_recv_window_update, increment}) + + bytes_remaining = + case stream.bytes_remaining do + nil -> nil + bytes_remaining -> bytes_remaining - byte_size(data) + end + + %{stream | recv_window_size: new_window, bytes_remaining: bytes_remaining} + end + + defp do_recv_end_stream(stream, false), do: stream + + defp do_recv_end_stream(stream, true) do + next_state = + case stream.state do + :open -> :remote_closed + :local_closed -> :closed + end + + if stream.bytes_remaining not in [nil, 0], + do: stream_error!("Received END_STREAM with byte still pending", stream) + + %{stream | state: next_state} + end + + defp do_recv_send_window_update(stream, delta) do + case Bandit.HTTP2.FlowControl.update_send_window(stream.send_window_size, delta) do + {:ok, new_window} -> + %{stream | send_window_size: new_window} + + {:error, reason} -> + stream_error!(reason, stream, Bandit.HTTP2.Errors.flow_control_error()) + end + end + + @spec do_recv_rst_stream!(term(), term()) :: no_return() + defp do_recv_rst_stream!(_stream, error_code) do + case Bandit.HTTP2.Errors.to_reason(error_code) do + reason when reason in [:no_error, :cancel] -> + raise(Bandit.TransportError, message: "Client reset stream normally", error: :closed) + + reason -> + raise(Bandit.TransportError, + message: "Received RST_STREAM from client: #{reason} (#{error_code})", + error: reason + ) + end + end + + @spec do_stream_closed_error!(String.t(), Bandit.HTTP2.Stream.t()) :: no_return() + defp do_stream_closed_error!(msg, stream), + do: stream_error!(msg, stream, Bandit.HTTP2.Errors.stream_closed()) + + # Stream API - Sending + + def send_headers(%@for{state: state} = stream, status, headers, body_disposition) + when state in [:open, :remote_closed] do + # We need to map body_disposition into the state model of HTTP/2. This turns out to be really + # easy, since HTTP/2 only has one way to send data. The only bit we need from the disposition + # is whether there will be any data forthcoming (ie: whether or not to end the stream). That + # will possibly walk us to a different state per RFC9113§5.1, as determined by the tail call + # to set_state_on_send_end_stream/2 + end_stream = body_disposition == :no_body + headers = [{":status", to_string(status)} | split_cookies(headers)] + do_send(stream, {:send_headers, headers, end_stream}) + set_state_on_send_end_stream(stream, end_stream) + end + + # RFC9113§8.2.3 - cookie headers may be split during transmission + defp split_cookies(headers) do + headers + |> Enum.flat_map(fn + {"cookie", cookie} -> + cookie |> String.split("; ") |> Enum.map(fn crumb -> {"cookie", crumb} end) + + {header, value} -> + [{header, value}] + end) + end + + def send_data(%@for{state: state} = stream, data, end_stream) + when state in [:open, :remote_closed] do + stream = + receive do + {:bandit, {:send_window_update, delta}} -> do_recv_send_window_update(stream, delta) + {:bandit, {:rst_stream, error_code}} -> do_recv_rst_stream!(stream, error_code) + after + 0 -> stream + end + + max_bytes_to_send = max(stream.send_window_size, 0) + {data_to_send, bytes_to_send, rest} = split_data(data, max_bytes_to_send) + + stream = + if end_stream || bytes_to_send > 0 do + end_stream_to_send = end_stream && byte_size(rest) == 0 + call(stream, {:send_data, data_to_send, end_stream_to_send}, :infinity) + %{stream | send_window_size: stream.send_window_size - bytes_to_send} + else + stream + end + + if byte_size(rest) == 0 do + set_state_on_send_end_stream(stream, end_stream) + else + receive do + {:bandit, {:send_window_update, delta}} -> + stream + |> do_recv_send_window_update(delta) + |> send_data(rest, end_stream) + after + stream.read_timeout -> + stream_error!( + "Timeout waiting for space in the send_window", + stream, + Bandit.HTTP2.Errors.flow_control_error() + ) + end + end + end + + def sendfile(%@for{} = stream, path, offset, length) do + case :file.open(path, [:raw, :binary]) do + {:ok, fd} -> + try do + if length == 0 do + send_data(stream, "", true) + else + sendfile_loop(stream, fd, offset, length, 0) + end + after + :file.close(fd) + end + + {:error, reason} -> + raise "Error opening file for sendfile: #{inspect(reason)}" + end + end + + defp sendfile_loop(stream, _fd, _offset, length, sent) when sent >= length do + stream + end + + defp sendfile_loop(stream, fd, offset, length, sent) do + read_size = min(length - sent, sendfile_chunk_size(stream)) + + case :file.pread(fd, offset + sent, read_size) do + {:ok, data} -> + now_sent = byte_size(data) + end_stream = sent + now_sent >= length + stream = send_data(stream, data, end_stream) + + if end_stream do + stream + else + sendfile_loop(stream, fd, offset, length, sent + now_sent) + end + + :eof -> + raise "Error reading file for sendfile: :eof" + + {:error, reason} -> + raise "Error reading file for sendfile: #{inspect(reason)}" + end + end + + defp sendfile_chunk_size(%@for{sendfile_chunk_size: sendfile_chunk_size}) do + max(sendfile_chunk_size, 1) + end + + defp split_data(data, desired_length) do + data_length = IO.iodata_length(data) + + if data_length <= desired_length do + {data, data_length, <<>>} + else + <> = IO.iodata_to_binary(data) + {to_send, desired_length, rest} + end + end + + defp set_state_on_send_end_stream(stream, false), do: stream + + defp set_state_on_send_end_stream(%@for{state: :open} = stream, true), + do: %{stream | state: :local_closed} + + defp set_state_on_send_end_stream(%@for{state: :remote_closed} = stream, true), + do: %{stream | state: :closed} + + # Closing off the stream upon completion or error + + def ensure_completed(%@for{state: :closed} = stream), do: stream + + def ensure_completed(%@for{state: :local_closed} = stream) do + receive do + {:bandit, {:headers, _headers, true}} -> + do_recv_end_stream(stream, true) + + {:bandit, {:data, data, true}} -> + do_recv_data(stream, data, true) |> do_recv_end_stream(true) + after + # RFC9113§8.1 - hint the client to stop sending data + 0 -> do_send(stream, {:send_rst_stream, Bandit.HTTP2.Errors.no_error()}) + end + end + + def ensure_completed(%@for{state: state} = stream) do + stream_error!( + "Terminating stream in #{state} state", + stream, + Bandit.HTTP2.Errors.internal_error() + ) + end + + def supported_upgrade?(%@for{} = _stream, _protocol), do: false + + def send_on_error(%@for{} = stream, %Bandit.HTTP2.Errors.StreamError{} = error) do + do_send(stream, {:send_rst_stream, error.error_code}) + %{stream | state: :closed} + end + + def send_on_error(%@for{} = stream, %Bandit.HTTP2.Errors.ConnectionError{} = error) do + do_send(stream, {:close_connection, error.error_code, error.message}) + stream + end + + def send_on_error(%@for{state: state} = stream, error) when state in [:idle, :open] do + stream = maybe_send_error(%{stream | state: :open}, error) + %{stream | state: :local_closed} + end + + def send_on_error(%@for{state: :remote_closed} = stream, error) do + stream = maybe_send_error(%{stream | state: :open}, error) + %{stream | state: :closed} + end + + def send_on_error(%@for{} = stream, _error), do: stream + + defp maybe_send_error(stream, error) do + receive do + {:plug_conn, :sent} -> stream + after + 0 -> + status = error |> Plug.Exception.status() |> Plug.Conn.Status.code() + send_headers(stream, status, [], :no_body) + end + end + + # Helpers + + defp do_send(stream, msg), do: send(stream.connection_pid, {msg, stream.stream_id}) + + defp call(stream, msg, timeout), + do: GenServer.call(stream.connection_pid, {msg, stream.stream_id}, timeout) + + @spec stream_error!(String.t(), Bandit.HTTP2.Stream.t()) :: no_return() + @spec stream_error!( + String.t(), + Bandit.HTTP2.Stream.t(), + Bandit.HTTP2.Errors.error_code() + ) :: no_return() + defp stream_error!(message, stream, error_code \\ Bandit.HTTP2.Errors.protocol_error()), + do: + raise(Bandit.HTTP2.Errors.StreamError, + message: message, + error_code: error_code, + stream_id: stream.stream_id + ) + + @spec connection_error!(term()) :: no_return() + @spec connection_error!(term(), Bandit.HTTP2.Errors.error_code()) :: no_return() + defp connection_error!(message, error_code \\ Bandit.HTTP2.Errors.protocol_error()), + do: raise(Bandit.HTTP2.Errors.ConnectionError, message: message, error_code: error_code) + end +end diff --git a/deps/bandit/lib/bandit/http2/stream_collection.ex b/deps/bandit/lib/bandit/http2/stream_collection.ex new file mode 100644 index 0000000..f0c3369 --- /dev/null +++ b/deps/bandit/lib/bandit/http2/stream_collection.ex @@ -0,0 +1,78 @@ +defmodule Bandit.HTTP2.StreamCollection do + @moduledoc false + # Represents a collection of stream IDs and what process IDs are running them. An instance of + # this struct is contained within each `Bandit.HTTP2.Connection` struct and is responsible for + # encapsulating the data about the streams which are currently active within the connection. + # + # This collection has a number of useful properties: + # + # * Process IDs are accessible by stream id + # * Process IDs are deletable by themselves (ie: deletion is via PID) + # * The collection is able to determine if a stream not currently contained in this collection + # represents a previously seen stream (in which case it is considered to be in a 'closed' + # state), or if it is a stream ID of a stream that has yet to be created + + require Integer + + defstruct last_stream_id: 0, + stream_count: 0, + id_to_pid: %{}, + pid_to_id: %{} + + @typedoc "A map from stream id to pid" + @type t :: %__MODULE__{ + last_stream_id: Bandit.HTTP2.Stream.stream_id(), + stream_count: non_neg_integer(), + id_to_pid: %{Bandit.HTTP2.Stream.stream_id() => pid()}, + pid_to_id: %{pid() => Bandit.HTTP2.Stream.stream_id()} + } + + @spec get_pids(t()) :: [pid()] + def get_pids(collection), do: Map.values(collection.id_to_pid) + + @spec get_pid(t(), Bandit.HTTP2.Stream.stream_id()) :: pid() | :new | :closed | :invalid + def get_pid(_collection, stream_id) when Integer.is_even(stream_id), do: :invalid + def get_pid(collection, stream_id) when stream_id > collection.last_stream_id, do: :new + + def get_pid(collection, stream_id) do + case Map.get(collection.id_to_pid, stream_id) do + pid when is_pid(pid) -> pid + nil -> :closed + end + end + + @spec insert(t(), Bandit.HTTP2.Stream.stream_id(), pid()) :: t() + def insert(collection, stream_id, pid) do + %__MODULE__{ + last_stream_id: stream_id, + stream_count: collection.stream_count + 1, + id_to_pid: Map.put(collection.id_to_pid, stream_id, pid), + pid_to_id: Map.put(collection.pid_to_id, pid, stream_id) + } + end + + # Dialyzer insists on the atom() here even though it doesn't make sense + @spec delete(t(), pid()) :: t() | atom() + def delete(collection, pid) do + case Map.pop(collection.pid_to_id, pid) do + {nil, _} -> + collection + + {stream_id, new_pid_to_id} -> + %{ + collection + | id_to_pid: Map.delete(collection.id_to_pid, stream_id), + pid_to_id: new_pid_to_id + } + end + end + + @spec stream_count(t()) :: non_neg_integer() + def stream_count(collection), do: collection.stream_count + + @spec open_stream_count(t()) :: non_neg_integer() + def open_stream_count(collection), do: collection.pid_to_id |> map_size() + + @spec last_stream_id(t()) :: Bandit.HTTP2.Stream.stream_id() + def last_stream_id(collection), do: collection.last_stream_id +end diff --git a/deps/bandit/lib/bandit/http2/stream_process.ex b/deps/bandit/lib/bandit/http2/stream_process.ex new file mode 100644 index 0000000..21c8cfd --- /dev/null +++ b/deps/bandit/lib/bandit/http2/stream_process.ex @@ -0,0 +1,31 @@ +defmodule Bandit.HTTP2.StreamProcess do + @moduledoc false + # This process runs the lifecycle of an HTTP/2 stream, which is modeled by a + # `Bandit.HTTP2.Stream` struct that this process maintains in its state + # + # As part of this lifecycle, the execution of a Plug to handle this stream's request + # takes place here; the entirety of the Plug lifecycle takes place in a single + # `c:handle_continue/2` call. + + use GenServer, restart: :temporary + + @spec start_link( + Bandit.HTTP2.Stream.t(), + Bandit.Pipeline.plug_def(), + Bandit.Telemetry.t(), + Bandit.Pipeline.conn_data(), + keyword() + ) :: GenServer.on_start() + def start_link(stream, plug, connection_span, conn_data, opts) do + GenServer.start_link(__MODULE__, {stream, plug, connection_span, conn_data, opts}) + end + + @impl GenServer + def init(state), do: {:ok, state, {:continue, :start_stream}} + + @impl GenServer + def handle_continue(:start_stream, {stream, plug, connection_span, conn_data, opts} = state) do + _ = Bandit.Pipeline.run(stream, plug, connection_span, conn_data, opts) + {:stop, :normal, state} + end +end diff --git a/deps/bandit/lib/bandit/http_error.ex b/deps/bandit/lib/bandit/http_error.ex new file mode 100644 index 0000000..d83a86b --- /dev/null +++ b/deps/bandit/lib/bandit/http_error.ex @@ -0,0 +1,6 @@ +defmodule Bandit.HTTPError do + # Represents an error suitable for return as an HTTP status. Note that these may be surfaced + # from anywhere that such a message is well defined, including within HTTP/1 transport concerns + # and also within shared HTTP semantics (ie: within Bandit.Adapter or Bandit.Pipeline) + defexception message: nil, plug_status: :bad_request +end diff --git a/deps/bandit/lib/bandit/http_transport.ex b/deps/bandit/lib/bandit/http_transport.ex new file mode 100644 index 0000000..b1a05bd --- /dev/null +++ b/deps/bandit/lib/bandit/http_transport.ex @@ -0,0 +1,47 @@ +defprotocol Bandit.HTTPTransport do + @moduledoc false + # A protocol implemented by the lower level transports (HTTP/1 and HTTP/2) to encapsulate the + # low-level mechanics needed to complete an HTTP request/response cycle. Implementations of this + # protocol should be broadly concerned with the protocol-specific aspects of a connection, and + # can rely on higher-level code taking care of shared HTTP semantics + + @typedoc "How the response body is to be delivered" + @type body_disposition :: :raw | :chunk_encoded | :no_body | :inform + + @spec peer_data(t()) :: Plug.Conn.Adapter.peer_data() + def peer_data(transport) + + @spec sock_data(t()) :: Plug.Conn.Adapter.sock_data() + def sock_data(transport) + + @spec ssl_data(t()) :: Plug.Conn.Adapter.ssl_data() + def ssl_data(transport) + + @spec version(t()) :: Plug.Conn.Adapter.http_protocol() + def version(transport) + + @spec read_headers(t()) :: + {:ok, Plug.Conn.method(), Bandit.Pipeline.request_target(), Plug.Conn.headers(), t()} + def read_headers(transport) + + @spec read_data(t(), opts :: keyword()) :: {:ok, iodata(), t()} | {:more, iodata(), t()} + def read_data(transport, opts) + + @spec send_headers(t(), Plug.Conn.status(), Plug.Conn.headers(), body_disposition()) :: t() + def send_headers(transport, status, heeaders, disposition) + + @spec send_data(t(), data :: iodata(), end_request :: boolean()) :: t() + def send_data(transport, data, end_request) + + @spec sendfile(t(), Path.t(), offset :: integer(), length :: integer() | :all) :: t() + def sendfile(transport, path, offset, length) + + @spec ensure_completed(t()) :: t() + def ensure_completed(transport) + + @spec supported_upgrade?(t(), atom()) :: boolean() + def supported_upgrade?(transport, protocol) + + @spec send_on_error(t(), struct()) :: t() + def send_on_error(transport, error) +end diff --git a/deps/bandit/lib/bandit/initial_handler.ex b/deps/bandit/lib/bandit/initial_handler.ex new file mode 100644 index 0000000..7e5ab27 --- /dev/null +++ b/deps/bandit/lib/bandit/initial_handler.ex @@ -0,0 +1,86 @@ +defmodule Bandit.InitialHandler do + @moduledoc false + # The initial protocol implementation used for all connections. Switches to a + # specific protocol implementation based on configuration, ALPN negotiation, and + # line heuristics. + + use ThousandIsland.Handler + + require Logger + + @type on_switch_handler :: + {:switch, bandit_http_handler(), data :: term(), state :: term()} + | {:switch, bandit_http_handler(), state :: term()} + + @type bandit_http_handler :: Bandit.HTTP1.Handler | Bandit.HTTP2.Handler + + # Attempts to guess the protocol in use, returning the applicable next handler and any + # data consumed in the course of guessing which must be processed by the actual protocol handler + @impl ThousandIsland.Handler + @spec handle_connection(ThousandIsland.Socket.t(), state :: term()) :: + ThousandIsland.Handler.handler_result() | on_switch_handler() + def handle_connection(socket, state) do + case {state.http_1_enabled, state.http_2_enabled, alpn_protocol(socket), sniff_wire(socket)} do + {_, _, _, :likely_tls} -> + Logger.warning("Connection that looks like TLS received on a clear channel", + domain: [:bandit], + plug: state.plug + ) + + {:close, state} + + {_, true, Bandit.HTTP2.Handler, Bandit.HTTP2.Handler} -> + {:switch, Bandit.HTTP2.Handler, state} + + {true, _, Bandit.HTTP1.Handler, {:no_match, data}} -> + {:switch, Bandit.HTTP1.Handler, data, state} + + {_, true, :no_match, Bandit.HTTP2.Handler} -> + {:switch, Bandit.HTTP2.Handler, state} + + {true, _, :no_match, {:no_match, data}} -> + {:switch, Bandit.HTTP1.Handler, data, state} + + _other -> + {:close, state} + end + end + + # Returns the protocol as negotiated via ALPN, if applicable + @spec alpn_protocol(ThousandIsland.Socket.t()) :: + Bandit.HTTP2.Handler | Bandit.HTTP1.Handler | :no_match + defp alpn_protocol(socket) do + case ThousandIsland.Socket.negotiated_protocol(socket) do + {:ok, "h2"} -> Bandit.HTTP2.Handler + {:ok, "http/1.1"} -> Bandit.HTTP1.Handler + _ -> :no_match + end + end + + # Returns the protocol as suggested by received data, if possible. + # We do this in two phases so that we don't hang on *really* short HTTP/1 + # requests that are less than 24 bytes + @spec sniff_wire(ThousandIsland.Socket.t()) :: + Bandit.HTTP2.Handler + | :likely_tls + | {:no_match, binary()} + | {:error, :closed | :timeout | :inet.posix()} + defp sniff_wire(socket) do + case ThousandIsland.Socket.recv(socket, 3) do + {:ok, "PRI" = buffer} -> sniff_wire_for_http2(socket, buffer) + {:ok, <<22::8, 3::8, minor::8>>} when minor in [1, 3] -> :likely_tls + {:ok, data} -> {:no_match, data} + {:error, :timeout} -> {:no_match, <<>>} + {:error, error} -> {:error, error} + end + end + + defp sniff_wire_for_http2(socket, buffer) do + case ThousandIsland.Socket.recv(socket, 21) do + {:ok, " * HTTP/2.0\r\n\r\nSM\r\n\r\n"} -> Bandit.HTTP2.Handler + {:ok, data} -> {:no_match, buffer <> data} + {:error, :timeout} -> {:no_match, buffer} + {:error, error} -> {:error, error} + end + end +end diff --git a/deps/bandit/lib/bandit/logger.ex b/deps/bandit/lib/bandit/logger.ex new file mode 100644 index 0000000..351b84f --- /dev/null +++ b/deps/bandit/lib/bandit/logger.ex @@ -0,0 +1,46 @@ +defmodule Bandit.Logger do + @moduledoc false + + require Logger + + def maybe_log_protocol_error(error, stacktrace, opts, metadata) do + logging_verbosity = + case error do + %Bandit.TransportError{error: :closed} -> + Keyword.get(opts.http, :log_client_closures, false) + + _error -> + Keyword.get(opts.http, :log_protocol_errors, :short) + end + + case logging_verbosity do + :short -> + logger_metadata = logger_metadata_for(:error, error, stacktrace, metadata) + Logger.error(Exception.format_banner(:error, error, stacktrace), logger_metadata) + + :verbose -> + logger_metadata = logger_metadata_for(:error, error, stacktrace, metadata) + Logger.error(Exception.format(:error, error, stacktrace), logger_metadata) + + false -> + :ok + end + end + + def logger_metadata_for(kind, reason, stacktrace, metadata) do + crash_reason = crash_reason(kind, reason, stacktrace) + + case reason do + %Bandit.HTTP2.Errors.StreamError{stream_id: stream_id} when is_integer(stream_id) -> + [stream_id: stream_id, domain: [:bandit], crash_reason: crash_reason] + |> Keyword.merge(metadata) + + _ -> + [domain: [:bandit], crash_reason: crash_reason] + |> Keyword.merge(metadata) + end + end + + defp crash_reason(:throw, reason, stacktrace), do: {{:nocatch, reason}, stacktrace} + defp crash_reason(_, reason, stacktrace), do: {reason, stacktrace} +end diff --git a/deps/bandit/lib/bandit/phoenix_adapter.ex b/deps/bandit/lib/bandit/phoenix_adapter.ex new file mode 100644 index 0000000..3ed8584 --- /dev/null +++ b/deps/bandit/lib/bandit/phoenix_adapter.ex @@ -0,0 +1,116 @@ +defmodule Bandit.PhoenixAdapter do + @moduledoc """ + A Bandit adapter for Phoenix. + + This adapter provides out-of-the-box support for all aspects of Phoenix 1.7 and later. Earlier + versions of Phoenix will work with this adapter, but without support for WebSockets. + + To use this adapter, your project will need to include Bandit as a dependency: + + ```elixir + {:bandit, "~> 1.0"} + ``` + + Once Bandit is included as a dependency of your Phoenix project, add the following `adapter:` + line to your endpoint configuration in `config/config.exs`, as in the following example: + + ``` + # config/config.exs + + config :your_app, YourAppWeb.Endpoint, + adapter: Bandit.PhoenixAdapter, # <---- ADD THIS LINE + url: [host: "localhost"], + render_errors: ... + ``` + + That's it! **After restarting Phoenix you should see the startup message indicate that it is being + served by Bandit**, and everything should 'just work'. Note that if you have set any exotic + configuration options within your endpoint, you may need to update that configuration to work + with Bandit; see below for details. + + ## Endpoint configuration + + This adapter supports the standard Phoenix structure for endpoint configuration. Top-level keys for + `:http` and `:https` are supported, and configuration values within each of those are interpreted + as raw Bandit configuration as specified by `t:Bandit.options/0`. Bandit's configuration supports + all values used in a standard out-of-the-box Phoenix application, so if you haven't made any + substantial changes to your endpoint configuration things should 'just work' for you. + + In the event that you *have* made advanced changes to your endpoint configuration, you may need + to update this config to work with Bandit. Consult Bandit's documentation at + `t:Bandit.options/0` for details. + + It can be difficult to know exactly *where* to put the options that you may need to set from the + ones available at `t:Bandit.options/0`. The general idea is that anything inside the `http:` or + `https:` keyword lists in your configuration are passed directly to `Bandit.start_link/1`, so an + example may look like so: + + ```elixir + # config/{dev,prod,etc}.exs + + config :your_app, YourAppWeb.Endpoint, + http: [ + ip: {127, 0, 0, 1}, + port: 4000, + thousand_island_options: [num_acceptors: 123], + http_options: [log_protocol_errors: false], + http_1_options: [max_requests: 1], + websocket_options: [compress: false] + ], + ``` + + Note that, unlike the `adapter: Bandit.PhoenixAdapter` configuration change outlined previously, + configuration of specific `http:` and `https:` values is done on a per-environment basis in + Phoenix, so these changes will typically be in your `config/dev.exs`, `config/prod.exs` and + similar files. + """ + + @doc """ + Returns the Bandit server process for the provided scheme within the given Phoenix Endpoint + """ + @spec bandit_pid(module()) :: + {:ok, Supervisor.child() | :restarting | :undefined} | {:error, :no_server_found} + def bandit_pid(endpoint, scheme \\ :http) do + endpoint + |> Supervisor.which_children() + |> Enum.find(fn {id, _, _, _} -> id == {endpoint, scheme} end) + |> case do + {_, pid, _, _} -> {:ok, pid} + nil -> {:error, :no_server_found} + end + end + + @doc """ + Returns the bound address and port of the Bandit server process for the provided + scheme within the given Phoenix Endpoint + """ + def server_info(endpoint, scheme) do + case bandit_pid(endpoint, scheme) do + {:ok, pid} -> ThousandIsland.listener_info(pid) + {:error, reason} -> {:error, reason} + end + end + + @doc false + def child_specs(endpoint, config) do + otp_app = Keyword.fetch!(config, :otp_app) + + plug = resolve_plug(config[:code_reloader], endpoint) + + for scheme <- [:http, :https], opts = config[scheme] do + ([plug: plug, display_plug: endpoint, scheme: scheme, otp_app: otp_app] ++ opts) + |> Bandit.child_spec() + |> Supervisor.child_spec(id: {endpoint, scheme}) + end + end + + defp resolve_plug(code_reload?, endpoint) do + if code_reload? && + Code.ensure_loaded?(Phoenix.Endpoint.SyncCodeReloadPlug) && + function_exported?(Phoenix.Endpoint.SyncCodeReloadPlug, :call, 2) do + {Phoenix.Endpoint.SyncCodeReloadPlug, {endpoint, []}} + else + endpoint + end + end +end diff --git a/deps/bandit/lib/bandit/pipeline.ex b/deps/bandit/lib/bandit/pipeline.ex new file mode 100644 index 0000000..06aaa47 --- /dev/null +++ b/deps/bandit/lib/bandit/pipeline.ex @@ -0,0 +1,243 @@ +defmodule Bandit.Pipeline do + @moduledoc false + # Provides a common pipeline for HTTP/1.1 and h2 adapters, factoring together shared + # functionality relating to `Plug.Conn` management + + @type plug_def :: {function() | module(), Plug.opts()} + @type conn_data :: {boolean(), :inet.ip_address()} + @type request_target :: + {scheme(), nil | Plug.Conn.host(), nil | Plug.Conn.port_number(), path()} + @type scheme :: String.t() | nil + @type path :: String.t() | :* + + require Logger + + @spec run( + Bandit.HTTPTransport.t(), + plug_def(), + ThousandIsland.Telemetry.t() | Bandit.Telemetry.t(), + conn_data(), + map() + ) :: + {:ok, Bandit.HTTPTransport.t()} + | {:upgrade, Bandit.HTTPTransport.t(), :websocket, tuple()} + | {:error, term()} + def run(transport, plug, connection_span, conn_data, opts) do + measurements = %{monotonic_time: Bandit.Telemetry.monotonic_time()} + + metadata = %{ + connection_telemetry_span_context: connection_span.telemetry_span_context, + plug: plug + } + + try do + {:ok, method, request_target, headers, transport} = + Bandit.HTTPTransport.read_headers(transport) + + conn = build_conn!(transport, method, request_target, headers, conn_data, opts) + span = Bandit.Telemetry.start_span(:request, measurements, Map.put(metadata, :conn, conn)) + + try do + conn + |> call_plug!(plug) + |> maybe_upgrade!() + |> case do + {:no_upgrade, conn} -> + %Plug.Conn{adapter: {_mod, adapter}} = conn = commit_response!(conn) + Bandit.Telemetry.stop_span(span, adapter.metrics, %{conn: conn}) + {:ok, adapter.transport} + + {:upgrade, %Plug.Conn{adapter: {_mod, adapter}} = conn, protocol, opts} -> + conn = Plug.Conn.put_status(conn, 101) + Bandit.Telemetry.stop_span(span, adapter.metrics, %{conn: conn}) + {:upgrade, adapter.transport, protocol, opts} + end + catch + kind, value -> + handle_error(kind, value, __STACKTRACE__, transport, span, opts, plug: plug, conn: conn) + end + rescue + exception -> + span = Bandit.Telemetry.start_span(:request, measurements, metadata) + handle_error(:error, exception, __STACKTRACE__, transport, span, opts, plug: plug) + end + end + + @spec build_conn!( + Bandit.HTTPTransport.t(), + Plug.Conn.method(), + request_target(), + Plug.Conn.headers(), + conn_data(), + map() + ) :: Plug.Conn.t() + defp build_conn!(transport, method, request_target, headers, {secure?, peer_address}, opts) do + adapter = Bandit.Adapter.init(self(), transport, method, headers, opts) + scheme = determine_scheme(secure?) + version = Bandit.HTTPTransport.version(transport) + {host, port} = determine_host_and_port!(scheme, version, request_target, headers) + {path, query} = determine_path_and_query(request_target) + uri = %URI{scheme: scheme, host: host, port: port, path: path, query: query} + Plug.Conn.Adapter.conn({Bandit.Adapter, adapter}, method, uri, peer_address, headers) + end + + @spec determine_scheme(boolean()) :: String.t() + defp determine_scheme(true), do: "https" + defp determine_scheme(false), do: "http" + + @spec determine_host_and_port!(binary(), atom(), request_target(), Plug.Conn.headers()) :: + {Plug.Conn.host(), Plug.Conn.port_number()} + defp determine_host_and_port!(scheme, version, {_, nil, nil, _}, headers) do + case {Bandit.Headers.get_header(headers, "host"), version} do + {nil, :"HTTP/1.0"} -> + {"", URI.default_port(scheme)} + + {nil, _} -> + request_error!("Unable to obtain host and port: No host header") + + {host_header, _} -> + {host, port} = Bandit.Headers.parse_hostlike_header!(host_header) + {host, port || URI.default_port(scheme)} + end + end + + defp determine_host_and_port!(scheme, _version, {_, host, port, _}, _headers), + do: {to_string(host), port || URI.default_port(scheme)} + + @spec determine_path_and_query(request_target()) :: {String.t(), nil | String.t()} + defp determine_path_and_query({_, _, _, :*}), do: {"*", nil} + defp determine_path_and_query({_, _, _, path}), do: split_path(path) + + @spec split_path(String.t()) :: {String.t(), nil | String.t()} + defp split_path(path) do + path + |> to_string() + |> :binary.split("#") + |> hd() + |> :binary.split("?") + |> case do + [path, query] -> {path, query} + [path] -> {path, nil} + end + end + + @spec call_plug!(Plug.Conn.t(), plug_def()) :: Plug.Conn.t() | no_return() + defp call_plug!(%Plug.Conn{} = conn, {plug, plug_opts}) when is_atom(plug) do + case plug.call(conn, plug_opts) do + %Plug.Conn{} = conn -> conn + other -> raise("Expected #{plug}.call/2 to return %Plug.Conn{} but got: #{inspect(other)}") + end + end + + defp call_plug!(%Plug.Conn{} = conn, {plug_fn, plug_opts}) when is_function(plug_fn) do + case plug_fn.(conn, plug_opts) do + %Plug.Conn{} = conn -> conn + other -> raise("Expected Plug function to return %Plug.Conn{} but got: #{inspect(other)}") + end + end + + @spec maybe_upgrade!(Plug.Conn.t()) :: + {:no_upgrade, Plug.Conn.t()} | {:upgrade, Plug.Conn.t(), :websocket, tuple()} + defp maybe_upgrade!( + %Plug.Conn{ + state: :upgraded, + adapter: + {_, + %{upgrade: {:websocket, {websock, websock_opts, connection_opts}, websocket_opts}}} + } = conn + ) do + # We can safely unset the state, since we match on :upgraded above + case Bandit.WebSocket.Handshake.handshake( + %{conn | state: :unset}, + connection_opts, + websocket_opts + ) do + {:ok, conn, connection_opts} -> + {:upgrade, conn, :websocket, {websock, websock_opts, connection_opts}} + + {:error, reason} -> + request_error!(reason) + end + end + + defp maybe_upgrade!(conn), do: {:no_upgrade, conn} + + @spec commit_response!(Plug.Conn.t()) :: Plug.Conn.t() | no_return() + defp commit_response!(conn) do + case conn do + %Plug.Conn{state: :unset} -> + raise(Plug.Conn.NotSentError) + + %Plug.Conn{state: :set} -> + Plug.Conn.send_resp(conn) + + %Plug.Conn{state: :chunked, adapter: {mod, adapter}} -> + adapter = + case mod.chunk(adapter, "") do + {:ok, _, adapter} -> adapter + _ -> adapter + end + + %{conn | adapter: {mod, adapter}} + + %Plug.Conn{} -> + conn + end + |> then(fn %Plug.Conn{adapter: {mod, adapter}} = conn -> + transport = Bandit.HTTPTransport.ensure_completed(adapter.transport) + %{conn | adapter: {mod, %{adapter | transport: transport}}} + end) + end + + @spec request_error!(term()) :: no_return() + @spec request_error!(term(), Plug.Conn.status()) :: no_return() + defp request_error!(reason, plug_status \\ :bad_request) do + raise Bandit.HTTPError, message: reason, plug_status: plug_status + end + + @spec handle_error( + :error | :throw | :exit, + Exception.t() | term(), + Exception.stacktrace(), + Bandit.HTTPTransport.t(), + Bandit.Telemetry.t(), + map(), + keyword() + ) :: {:ok, Bandit.HTTPTransport.t()} | {:error, term()} + defp handle_error(:error, %Plug.Conn.WrapperError{} = error, _, transport, span, opts, metadata) do + # Unwrap the inner error and handle it + handle_error(error.kind, error.reason, error.stack, transport, span, opts, metadata) + end + + defp handle_error(:error, %type{} = error, stacktrace, transport, span, opts, metadata) + when type in [ + Bandit.HTTPError, + Bandit.TransportError, + Bandit.HTTP2.Errors.StreamError, + Bandit.HTTP2.Errors.ConnectionError + ] do + Bandit.Telemetry.stop_span(span, %{}, Enum.into(metadata, %{error: error.message})) + + Bandit.Logger.maybe_log_protocol_error(error, stacktrace, opts, metadata) + + # We want to do this at the end of the function, since the HTTP2 stack may kill this process + # in the course of handling a ConnectionError + Bandit.HTTPTransport.send_on_error(transport, error) + {:error, error} + end + + defp handle_error(kind, reason, stacktrace, transport, span, opts, metadata) do + reason = Exception.normalize(kind, reason, stacktrace) + + Bandit.Telemetry.span_exception(span, kind, reason, stacktrace) + status = reason |> Plug.Exception.status() |> Plug.Conn.Status.code() + + if status in Keyword.get(opts.http, :log_exceptions_with_status_codes, 500..599) do + logger_metadata = Bandit.Logger.logger_metadata_for(kind, reason, stacktrace, metadata) + Logger.error(Exception.format(kind, reason, stacktrace), logger_metadata) + end + + Bandit.HTTPTransport.send_on_error(transport, reason) + {:error, reason} + end +end diff --git a/deps/bandit/lib/bandit/primitive_ops/websocket.ex b/deps/bandit/lib/bandit/primitive_ops/websocket.ex new file mode 100644 index 0000000..d9af167 --- /dev/null +++ b/deps/bandit/lib/bandit/primitive_ops/websocket.ex @@ -0,0 +1,34 @@ +defmodule Bandit.PrimitiveOps.WebSocket do + @moduledoc """ + WebSocket primitive operations behaviour and default implementation + """ + + @doc """ + WebSocket masking according to [RFC6455§5.3](https://www.rfc-editor.org/rfc/rfc6455#section-5.3) + """ + @callback ws_mask(payload :: binary(), mask :: integer()) :: binary() + + @behaviour __MODULE__ + + # Note that masking is an involution, so we don't need a separate unmask function + @impl true + def ws_mask(payload, mask) + when is_binary(payload) and is_integer(mask) and mask >= 0x00000000 and mask <= 0xFFFFFFFF do + ws_mask(<<>>, payload, mask) + end + + defp ws_mask(acc, <>, mask) do + ws_mask(<>)>>, rest, mask) + end + + for size <- [24, 16, 8] do + defp ws_mask(acc, <>, mask) do + <> = <> + <>)>> + end + end + + defp ws_mask(acc, <<>>, _mask) do + acc + end +end diff --git a/deps/bandit/lib/bandit/socket_helpers.ex b/deps/bandit/lib/bandit/socket_helpers.ex new file mode 100644 index 0000000..4d3fb5a --- /dev/null +++ b/deps/bandit/lib/bandit/socket_helpers.ex @@ -0,0 +1,74 @@ +defmodule Bandit.SocketHelpers do + @moduledoc false + + def iodata_empty?(""), do: true + def iodata_empty?([]), do: true + def iodata_empty?([head | tail]), do: iodata_empty?(head) and iodata_empty?(tail) + def iodata_empty?(_), do: false + + @spec conn_data(ThousandIsland.Socket.t()) :: Bandit.Pipeline.conn_data() + def conn_data(socket) do + secure? = ThousandIsland.Socket.secure?(socket) + + {peer_address, _port} = + case ThousandIsland.Socket.peername(socket) do + {:ok, peername} -> map_address(peername) + {:error, reason} -> transport_error!("Unable to obtain conn_data", reason) + end + + {secure?, peer_address} + end + + @spec peer_data(ThousandIsland.Socket.t()) :: Plug.Conn.Adapter.peer_data() + def peer_data(socket) do + with {:ok, peername} <- ThousandIsland.Socket.peername(socket), + {address, port} <- map_address(peername), + {:ok, ssl_cert} <- peercert(socket) do + %{address: address, port: port, ssl_cert: ssl_cert} + else + {:error, reason} -> transport_error!("Unable to obtain peer_data", reason) + end + end + + @spec sock_data(ThousandIsland.Socket.t()) :: Plug.Conn.Adapter.sock_data() + def sock_data(socket) do + with {:ok, sockname} <- ThousandIsland.Socket.sockname(socket), + {address, port} <- map_address(sockname) do + %{address: address, port: port} + else + {:error, reason} -> transport_error!("Unable to obtain sock_data", reason) + end + end + + @spec ssl_data(ThousandIsland.Socket.t()) :: Plug.Conn.Adapter.ssl_data() + def ssl_data(socket) do + case ThousandIsland.Socket.connection_information(socket) do + {:ok, connection_information} -> connection_information + {:error, :not_secure} -> nil + {:error, reason} -> transport_error!("Unable to obtain ssl_data", reason) + end + end + + defp map_address(address) do + case address do + {:local, path} -> {{:local, path}, 0} + {:unspec, <<>>} -> {:unspec, 0} + {:undefined, term} -> {{:undefined, term}, 0} + {ip, port} -> {ip, port} + end + end + + defp peercert(socket) do + case ThousandIsland.Socket.peercert(socket) do + {:ok, cert} -> {:ok, cert} + {:error, :no_peercert} -> {:ok, nil} + {:error, :not_secure} -> {:ok, nil} + {:error, reason} -> {:error, reason} + end + end + + @spec transport_error!(term(), term()) :: no_return() + defp transport_error!(message, error) do + raise Bandit.TransportError, message: message, error: error + end +end diff --git a/deps/bandit/lib/bandit/telemetry.ex b/deps/bandit/lib/bandit/telemetry.ex new file mode 100644 index 0000000..c940189 --- /dev/null +++ b/deps/bandit/lib/bandit/telemetry.ex @@ -0,0 +1,250 @@ +defmodule Bandit.Telemetry do + @moduledoc """ + The following telemetry spans are emitted by bandit + + ## `[:bandit, :request, *]` + + Represents Bandit handling a specific client HTTP request + + This span is started by the following event: + + * `[:bandit, :request, :start]` + + Represents the start of the span + + This event contains the following measurements: + + * `monotonic_time`: The time of this event, in `:native` units + + This event contains the following metadata: + + * `telemetry_span_context`: A unique identifier for this span + * `connection_telemetry_span_context`: The span context of the Thousand Island `:connection` + span which contains this request + * `conn`: The `Plug.Conn` representing this connection. Not present in cases where `error` + is also set and the nature of error is such that Bandit was unable to successfully build + the conn + * `plug`: The Plug which is being used to serve this request. Specified as `{plug_module, plug_opts}` + + This span is ended by the following event: + + * `[:bandit, :request, :stop]` + + Represents the end of the span + + This event contains the following measurements: + + * `monotonic_time`: The time of this event, in `:native` units + * `duration`: The span duration, in `:native` units + * `req_header_end_time`: The time that header reading completed, in `:native` units + * `req_body_start_time`: The time that request body reading started, in `:native` units. + * `req_body_end_time`: The time that request body reading completed, in `:native` units + * `req_body_bytes`: The length of the request body, in octets + * `resp_start_time`: The time that the response started, in `:native` units + * `resp_end_time`: The time that the response completed, in `:native` units + * `resp_body_bytes`: The length of the response body, in octets. If the response is + compressed, this is the size of the compressed payload as sent on the wire + * `resp_uncompressed_body_bytes`: The length of the original, uncompressed body. Only + included for responses which are compressed + * `resp_compression_method`: The method of compression, as sent in the `Content-Encoding` + header of the response. Only included for responses which are compressed + + This event contains the following metadata: + + * `telemetry_span_context`: A unique identifier for this span + * `connection_telemetry_span_context`: The span context of the Thousand Island `:connection` + span which contains this request + * `conn`: The `Plug.Conn` representing this connection. Not present in cases where `error` + is also set and the nature of error is such that Bandit was unable to successfully build + the conn + * `plug`: The Plug which is being used to serve this request. Specified as `{plug_module, plug_opts}` + * `error`: The error that caused the span to end, if it ended in error + + The following events may be emitted within this span: + + * `[:bandit, :request, :exception]` + + The request for this span ended unexpectedly + + This event contains the following measurements: + + * `monotonic_time`: The time of this event, in `:native` units + + This event contains the following metadata: + + * `telemetry_span_context`: A unique identifier for this span + * `connection_telemetry_span_context`: The span context of the Thousand Island `:connection` + span which contains this request + * `conn`: The `Plug.Conn` representing this connection. Not present in cases where `error` + is also set and the nature of error is such that Bandit was unable to successfully build + the conn + * `plug`: The Plug which is being used to serve this request. Specified as `{plug_module, plug_opts}` + * `kind`: The kind of unexpected condition, typically `:exit` + * `exception`: The exception which caused this unexpected termination. May be an exception + or an arbitrary value when the event was an uncaught throw or an exit + * `stacktrace`: The stacktrace of the location which caused this unexpected termination + + ## `[:bandit, :websocket, *]` + + Represents Bandit handling a WebSocket connection + + This span is started by the following event: + + * `[:bandit, :websocket, :start]` + + Represents the start of the span + + This event contains the following measurements: + + * `monotonic_time`: The time of this event, in `:native` units + * `compress`: Details about the compression configuration for this connection + + This event contains the following metadata: + + * `telemetry_span_context`: A unique identifier for this span + * `connection_telemetry_span_context`: The span context of the Thousand Island `:connection` + span which contains this request + * `websock`: The WebSock which is being used to serve this request. Specified as `websock_module` + + This span is ended by the following event: + + * `[:bandit, :websocket, :stop]` + + Represents the end of the span + + This event contains the following measurements: + + * `monotonic_time`: The time of this event, in `:native` units + * `duration`: The span duration, in `:native` units + * `recv_text_frame_count`: The number of text frames received + * `recv_text_frame_bytes`: The total number of bytes received in the payload of text frames + * `recv_binary_frame_count`: The number of binary frames received + * `recv_binary_frame_bytes`: The total number of bytes received in the payload of binary frames + * `recv_ping_frame_count`: The number of ping frames received + * `recv_ping_frame_bytes`: The total number of bytes received in the payload of ping frames + * `recv_pong_frame_count`: The number of pong frames received + * `recv_pong_frame_bytes`: The total number of bytes received in the payload of pong frames + * `recv_connection_close_frame_count`: The number of connection close frames received + * `recv_connection_close_frame_bytes`: The total number of bytes received in the payload of connection close frames + * `recv_continuation_frame_count`: The number of continuation frames received + * `recv_continuation_frame_bytes`: The total number of bytes received in the payload of continuation frames + * `send_text_frame_count`: The number of text frames sent + * `send_text_frame_bytes`: The total number of bytes sent in the payload of text frames + * `send_binary_frame_count`: The number of binary frames sent + * `send_binary_frame_bytes`: The total number of bytes sent in the payload of binary frames + * `send_ping_frame_count`: The number of ping frames sent + * `send_ping_frame_bytes`: The total number of bytes sent in the payload of ping frames + * `send_pong_frame_count`: The number of pong frames sent + * `send_pong_frame_bytes`: The total number of bytes sent in the payload of pong frames + * `send_connection_close_frame_count`: The number of connection close frames sent + * `send_connection_close_frame_bytes`: The total number of bytes sent in the payload of connection close frames + * `send_continuation_frame_count`: The number of continuation frames sent + * `send_continuation_frame_bytes`: The total number of bytes sent in the payload of continuation frames + + This event contains the following metadata: + + * `telemetry_span_context`: A unique identifier for this span + * `origin_telemetry_span_context`: The span context of the Bandit `:request` span from which + this connection originated + * `connection_telemetry_span_context`: The span context of the Thousand Island `:connection` + span which contains this request + * `websock`: The WebSock which is being used to serve this request. Specified as `websock_module` + * `error`: The error that caused the span to end, if it ended in error + """ + + defstruct span_name: nil, telemetry_span_context: nil, start_time: nil, start_metadata: nil + + @typep span_name :: atom() + @opaque t :: %__MODULE__{ + span_name: span_name(), + telemetry_span_context: reference(), + start_time: integer(), + start_metadata: :telemetry.event_metadata() + } + + @app_name :bandit + + @doc false + @spec start_span(span_name(), :telemetry.event_measurements(), :telemetry.event_metadata()) :: + t() + def start_span(span_name, measurements \\ %{}, metadata \\ %{}) do + measurements = Map.put_new_lazy(measurements, :monotonic_time, &monotonic_time/0) + telemetry_span_context = make_ref() + metadata = Map.put(metadata, :telemetry_span_context, telemetry_span_context) + event([span_name, :start], measurements, metadata) + + %__MODULE__{ + span_name: span_name, + telemetry_span_context: telemetry_span_context, + start_time: measurements[:monotonic_time], + start_metadata: metadata + } + end + + @doc false + @spec stop_span(t(), :telemetry.event_measurements(), :telemetry.event_metadata()) :: :ok + def stop_span(span, measurements \\ %{}, metadata \\ %{}) do + measurements = Map.put_new_lazy(measurements, :monotonic_time, &monotonic_time/0) + + measurements = + Map.put(measurements, :duration, measurements[:monotonic_time] - span.start_time) + + metadata = Map.merge(span.start_metadata, metadata) + + untimed_span_event(span, :stop, measurements, metadata) + end + + @spec span_exception(t(), Exception.kind(), Exception.t() | term(), Exception.stacktrace()) :: + :ok + def span_exception(span, kind, reason, stacktrace) do + # Using :exit for backwards-compatibility with Bandit =< 1.5.7 + kind = if kind == :error, do: :exit, else: kind + + metadata = + Map.merge(span.start_metadata, %{ + kind: kind, + exception: reason, + stacktrace: stacktrace + }) + + span_event(span, :exception, %{}, metadata) + end + + @doc false + @spec span_event(t(), span_name(), :telemetry.event_measurements(), :telemetry.event_metadata()) :: + :ok + def span_event(span, name, measurements \\ %{}, metadata \\ %{}) do + measurements = Map.put_new_lazy(measurements, :monotonic_time, &monotonic_time/0) + untimed_span_event(span, name, measurements, metadata) + end + + @doc false + @spec untimed_span_event( + t(), + span_name(), + :telemetry.event_measurements(), + :telemetry.event_metadata() + ) :: :ok + def untimed_span_event(span, name, measurements \\ %{}, metadata \\ %{}) do + metadata = Map.put(metadata, :telemetry_span_context, span.telemetry_span_context) + event([span.span_name, name], measurements, metadata) + end + + @spec monotonic_time :: integer() + defdelegate monotonic_time, to: System + + @spec event( + :telemetry.event_name(), + :telemetry.event_measurements(), + :telemetry.event_metadata() + ) :: :ok + defp event(suffix, measurements, metadata) do + :telemetry.execute([@app_name | suffix], measurements, metadata) + end + + @doc false + @spec telemetry_span_context(t()) :: reference() + def telemetry_span_context(span) do + span.telemetry_span_context + end +end diff --git a/deps/bandit/lib/bandit/trace.ex b/deps/bandit/lib/bandit/trace.ex new file mode 100644 index 0000000..61f165e --- /dev/null +++ b/deps/bandit/lib/bandit/trace.ex @@ -0,0 +1,150 @@ +defmodule Bandit.Trace do + @moduledoc """ + **THIS MODULE IS EXPERIMENTAL AND SUBJECT TO CHANGE** + + Helper functions to provide visibility into runtime errors within a running Bandit instance + + Can be used within an IEx session attached to a running Bandit instance, as follows: + + ``` + iex> Bandit.Trace.start_tracing() + ... # Wait for traces to show up whenever exceptions are raised + iex> Bandit.Trace.stop_tracing() + ``` + + It can also be started within your application by adding `Bandit.Trace` to your process tree. + + `Bandit.Trace` will emit a trace on every exception that Bandit sees (both those emitted from + within your Plug as well as internal ones due to protocol violations and the like). These traces + consist of a complete dump of all telemetry events that occur in the offending request's parent + connection. + + Tracing imposes a modest but non-zero load; it *should* be safe to run in most production + environments, but it is not intended to run on an ongoing basis. + + By default, `Bandit.Trace` maintains a FIFO log of the last 10000 telemetry events that Bandit + has emitted. Events which correlate to the parent connection which have been evicted from this + queue will not be included in this output. + + **WARNING** The emitted logs contains a *complete* copy of your request's Plug data, as well as *all* data + sent and received on all requests which are contained in the output. It is therefore of the utmost + importance that you carefully redact the output before sharing it publicly. + """ + + defstruct queue: nil, size: 0, max_size: 10_000, trace_on_exception: true + + use GenServer + + @events [ + [:bandit, :request, :start], + [:bandit, :request, :stop], + [:bandit, :request, :exception], + [:bandit, :websocket, :start], + [:bandit, :websocket, :stop], + [:thousand_island, :connection, :start], + [:thousand_island, :connection, :stop], + [:thousand_island, :connection, :ready], + [:thousand_island, :connection, :async_recv], + [:thousand_island, :connection, :recv], + [:thousand_island, :connection, :recv_error], + [:thousand_island, :connection, :send], + [:thousand_island, :connection, :send_error], + [:thousand_island, :connection, :sendfile], + [:thousand_island, :connection, :sendfile_error], + [:thousand_island, :connection, :socket_shutdown] + ] + + @doc """ + Start tracing of all Bandit requests + + See module documentation for intended usage. Accepts the following options: + + * `max_size`: The size of the telemetry event queue to maintain. By default, `Bandit.Trace` maintains a + queue of the last 10000 telemetry events + * `trace_on_exception`: Whether or not to emit traces when an error is raised within + Bandit. Defaults to `true` + """ + def start_tracing(opts \\ []), do: GenServer.start_link(__MODULE__, opts, name: __MODULE__) + + @doc """ + Stop any active trace session + """ + def stop_tracing, do: GenServer.stop(__MODULE__) + + def handle_event(event, measurements, metadata, pid), + do: GenServer.cast(pid, {:event, {event, measurements, metadata, :os.perf_counter()}}) + + @doc """ + Return the complete queue of telemetry events that `Bandit.Trace` is currently tracking + """ + def get_events, do: GenServer.call(__MODULE__, :get_events) + + @impl GenServer + def init(opts) do + _ = :telemetry.attach_many(self(), @events, &__MODULE__.handle_event/4, self()) + {:ok, struct!(%__MODULE__{queue: :queue.new()}, opts)} + end + + @impl GenServer + def terminate(_, _), do: :telemetry.detach(self()) + + @impl GenServer + def handle_cast({:event, event}, state) do + state + |> maybe_pop() + |> push(event) + |> tap(&maybe_trace(&1, event)) + |> then(&{:noreply, &1}) + end + + defp maybe_pop(%{size: size, max_size: max_size} = state) when size >= max_size, + do: maybe_pop(%{state | queue: :queue.drop(state.queue), size: size - 1}) + + defp maybe_pop(state), do: state + + defp push(state, event), + do: %{state | queue: :queue.in(event, state.queue), size: state.size + 1} + + defp maybe_trace( + %{trace_on_exception: true} = state, + {[:bandit, :request, :exception], _, metadata, _} + ) do + connection_span_context = Map.get(metadata, :connection_telemetry_span_context) + + IO.puts("======================================") + IO.puts("Starting telemetry trace for exception") + IO.puts("======================================") + + :queue.to_list(state.queue) + |> Enum.filter(fn {_, _, metadata, _} -> + Map.get(metadata, :telemetry_span_context) == connection_span_context || + Map.get(metadata, :connection_telemetry_span_context) == connection_span_context + end) + |> format_list() + |> inspect(limit: :infinity, pretty: true, printable_limit: :infinity) + |> IO.puts() + + IO.puts("=======================================") + IO.puts("Completed telemetry trace for exception") + IO.puts("=======================================") + + :ok + end + + defp maybe_trace(_state, _event), do: :ok + + @impl GenServer + def handle_call(:get_events, _from, state), + do: {:reply, :queue.to_list(state.queue) |> format_list(), state} + + defp format_list([]), do: :ok + + defp format_list([{_, _, _, start_time} | _] = events), + do: Enum.map(events, &format_tuple(&1, start_time)) + + defp format_tuple({event, measurements, metadata, time}, start_time) do + time = :erlang.convert_time_unit(time - start_time, :perf_counter, :microsecond) + %{telemetry_span_context: span_id} = metadata + {time, span_id, event, measurements, metadata} + end +end diff --git a/deps/bandit/lib/bandit/transport_error.ex b/deps/bandit/lib/bandit/transport_error.ex new file mode 100644 index 0000000..86e0101 --- /dev/null +++ b/deps/bandit/lib/bandit/transport_error.ex @@ -0,0 +1,6 @@ +defmodule Bandit.TransportError do + # Represents an error coming from the underlying transport which cannot be signalled back to the + # client by conventional means within the request. Examples include TCP socket closures and + # errors in the case of HTTP/1, and stream resets in HTTP/2 + defexception message: nil, error: nil +end diff --git a/deps/bandit/lib/bandit/websocket/README.md b/deps/bandit/lib/bandit/websocket/README.md new file mode 100644 index 0000000..ba76d6f --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/README.md @@ -0,0 +1,58 @@ +# WebSocket Handler + +Included in this folder is a complete `ThousandIsland.Handler` based implementation of WebSockets +as defined in [RFC 6455](https://datatracker.ietf.org/doc/rfc6455). + +## Upgrade mechanism + +A good overview of this process is contained in this [ElixirConf EU +talk](https://www.youtube.com/watch?v=usKLrYl4zlY). + +Upgrading an HTTP connection to a WebSocket connection is coordinated by code +contained within several libraries, including Bandit, +[WebSockAdapter](https://github.com/phoenixframework/websock_adapter), and +[Plug](https://github.com/elixir-plug/plug). + +The HTTP request containing the upgrade request is first passed to the user's +application as a standard Plug call. After inspecting the request and deeming it +a suitable upgrade candidate (via whatever policy the application dictates), the +user indicates a desire to upgrade the connection to a WebSocket by calling +`WebSockAdapter.upgrade/4`, which checks that the request is a valid WebSocket +upgrade request, and then calls `Plug.Conn.upgrade_adapter/3` to signal to +Bandit that the connection should be upgraded at the conclusion of the request. +At the conclusion of the `Plug.call/2` callback, `Bandit.Pipeline` will then +attempt to upgrade the underlying connection. As part of this upgrade process, +`Bandit.DelegatingHandler` will switch the Handler for the connection to be +`Bandit.WebSocket.Handler`. This will cause any future communication after the +upgrade process to be handled directly by Bandit's WebSocket stack. + +## Process model + +Within a Bandit server, a WebSocket connection is modeled as a single process. +This process is directly tied to the lifecycle of the underlying WebSocket +connection; when upgrading from HTTP/1, the existing HTTP/1 handler process +'magically' becomes a WebSocket process by changing which Handler the +`Bandit.DelegatingHandler` delegates to. + +The execution model to handle a given request is quite straightforward: at +upgrade time, the `Bandit.DelegatingHandler` will call `handle_connection/2` to +allow the WebSocket handler to initialize any startup state. Connection state is +modeled by the `Bandit.WebSocket.Connection` struct and module. + +All data subsequently received by the underlying [Thousand +Island](https://github.com/mtrudel/thousand_island) library will result in +a call to `Bandit.WebSocket.Handler.handle_data/3`, which will then attempt to +parse the data into one or more WebSocket frames. Once a frame has been +constructed, it is them passed through to the configured `WebSock` handler by +way of the underlying `Bandit.WebSocket.Connection`. + +# Testing + +All of this is exhaustively tested. Tests are broken up primarily into `protocol_test.exs`, which +is concerned with aspects of the implementation relating to protocol conformance and +client-facing concerns, while `sock_test.exs` is concerned with aspects of the implementation +having to do with the WebSock API and application-facing concerns. There are also more +unit-style tests covering frame serialization and deserialization. + +In addition, the `autobahn` conformance suite is run via a `System` wrapper & executes the entirety +of the suite against a running Bandit server. diff --git a/deps/bandit/lib/bandit/websocket/connection.ex b/deps/bandit/lib/bandit/websocket/connection.ex new file mode 100644 index 0000000..02cf008 --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/connection.ex @@ -0,0 +1,338 @@ +defmodule Bandit.WebSocket.Connection do + @moduledoc false + # Implementation of a WebSocket lifecycle, implemented using a Socket protocol for communication + + alias Bandit.WebSocket.{Frame, PerMessageDeflate, Socket} + + defstruct websock: nil, + websock_state: nil, + state: :open, + compress: nil, + opts: [], + fragment_frame: nil, + span: nil, + metrics: %{} + + @typedoc "Connection state" + @type state :: :open | :closing | :closed + + @typedoc "Encapsulates the state of a WebSocket connection" + @type t :: %__MODULE__{ + websock: WebSock.impl(), + websock_state: WebSock.state(), + state: state(), + compress: PerMessageDeflate.t() | nil, + opts: keyword(), + fragment_frame: Frame.Text.t() | Frame.Binary.t() | nil, + span: Bandit.Telemetry.t(), + metrics: map() + } + + def init(websock, websock_state, connection_opts, socket) do + compress = Keyword.get(connection_opts, :compress) + + connection_telemetry_span_context = + ThousandIsland.Socket.telemetry_span(socket).telemetry_span_context + + span = + Bandit.Telemetry.start_span(:websocket, %{compress: compress}, %{ + connection_telemetry_span_context: connection_telemetry_span_context, + websock: websock + }) + + instance = %__MODULE__{ + websock: websock, + websock_state: websock_state, + compress: compress, + opts: connection_opts, + span: span + } + + websock.init(websock_state) |> handle_continutation(socket, instance) + end + + def handle_frame(frame, socket, %{fragment_frame: nil} = connection) do + connection = do_recv_metrics(frame, connection) + + case frame do + %Frame.Continuation{} -> + do_error(1002, "Received unexpected continuation frame (RFC6455§5.4)", socket, connection) + + %Frame.Text{fin: true, compressed: true} = frame -> + do_inflate(frame, socket, connection) + + %Frame.Text{fin: true} = frame -> + if !Keyword.get(connection.opts, :validate_text_frames, true) || String.valid?(frame.data) do + connection.websock.handle_in({frame.data, opcode: :text}, connection.websock_state) + |> handle_continutation(socket, connection) + else + do_error(1007, "Received non UTF-8 text frame (RFC6455§8.1)", socket, connection) + end + + %Frame.Text{fin: false} = frame -> + {:continue, %{connection | fragment_frame: frame}} + + %Frame.Binary{fin: true, compressed: true} = frame -> + do_inflate(frame, socket, connection) + + %Frame.Binary{fin: true} = frame -> + connection.websock.handle_in({frame.data, opcode: :binary}, connection.websock_state) + |> handle_continutation(socket, connection) + + %Frame.Binary{fin: false} = frame -> + {:continue, %{connection | fragment_frame: frame}} + + frame -> + handle_control_frame(frame, socket, connection) + end + end + + def handle_frame(frame, socket, %{fragment_frame: fragment_frame} = connection) + when not is_nil(fragment_frame) do + connection = do_recv_metrics(frame, connection) + + case frame do + %Frame.Continuation{fin: true} = frame -> + data = IO.iodata_to_binary([connection.fragment_frame.data | frame.data]) + + if oversize_message?(data, connection.opts) do + do_error(1009, "Received oversize fragmented message", socket, connection) + else + frame = %{connection.fragment_frame | fin: true, data: data} + handle_frame(frame, socket, %{connection | fragment_frame: nil}) + end + + %Frame.Continuation{fin: false} = frame -> + if IO.iodata_length(frame.data) == 0 do + do_error(1008, "Received zero byte non-fin continuation frame", socket, connection) + else + data = [connection.fragment_frame.data | frame.data] + + if oversize_message?(data, connection.opts) do + do_error(1009, "Received oversize fragmented message", socket, connection) + else + {:continue, %{connection | fragment_frame: %{connection.fragment_frame | data: data}}} + end + end + + %Frame.Text{} -> + do_error(1002, "Received unexpected text frame (RFC6455§5.4)", socket, connection) + + %Frame.Binary{} -> + do_error(1002, "Received unexpected binary frame (RFC6455§5.4)", socket, connection) + + frame -> + handle_control_frame(frame, socket, connection) + end + end + + defp oversize_message?(data, opts) do + case Keyword.get(opts, :max_fragmented_message_size, 8_000_000) do + 0 -> false + max_fragmented_message_size -> IO.iodata_length(data) > max_fragmented_message_size + end + end + + defp handle_control_frame(frame, socket, connection) do + case frame do + %Frame.ConnectionClose{} = frame -> + # This is a bit of a subtle case, see RFC6455§7.4.1-2 + reply_code = + case frame.code do + code when code in 1000..1003 or code in 1007..1011 or code > 2999 -> 1000 + _code -> 1002 + end + + {:continue, connection} = do_stop(reply_code, :remote, socket, connection) + {:close, %{connection | state: :closed, compress: nil}} + + %Frame.Ping{} = frame -> + connection = + Socket.send_frame(socket, {:pong, frame.data}, false) + |> do_send_metrics(connection) + + if function_exported?(connection.websock, :handle_control, 2) do + connection.websock.handle_control({frame.data, opcode: :ping}, connection.websock_state) + |> handle_continutation(socket, connection) + else + {:continue, connection} + end + + %Frame.Pong{} = frame -> + if function_exported?(connection.websock, :handle_control, 2) do + connection.websock.handle_control({frame.data, opcode: :pong}, connection.websock_state) + |> handle_continutation(socket, connection) + else + {:continue, connection} + end + end + end + + defp do_recv_metrics(frame, connection) do + metrics = + Bandit.WebSocket.Frame.recv_metrics(frame) + |> Enum.reduce(connection.metrics, fn {key, value}, metrics -> + Map.update(metrics, key, value, &(&1 + value)) + end) + + %{connection | metrics: metrics} + end + + defp do_send_metrics(metrics, connection) do + metrics = + metrics + |> Enum.reduce(connection.metrics, fn {key, value}, metrics -> + Map.update(metrics, key, value, &(&1 + value)) + end) + + %{connection | metrics: metrics} + end + + def handle_close(socket, connection), do: do_error(1006, :closed, socket, connection) + + # Some uncertainty if this should be 1000 or 1001 @ https://github.com/mtrudel/bandit/issues/89 + def handle_shutdown(socket, connection), do: do_stop(1000, :shutdown, socket, connection) + + def handle_error({:deserializing, :max_frame_size_exceeded = reason}, socket, connection), + do: do_error(1009, reason, socket, connection) + + def handle_error({:deserializing, reason}, socket, connection), + do: do_error(1002, reason, socket, connection) + + def handle_error(reason, socket, connection), do: do_error(1011, reason, socket, connection) + + def handle_timeout(socket, connection), do: do_error(1002, :timeout, socket, connection) + + def handle_info(msg, socket, connection) do + connection.websock.handle_info(msg, connection.websock_state) + |> handle_continutation(socket, connection) + end + + defp handle_continutation(continutation, socket, connection) do + case continutation do + {:ok, websock_state} -> + {:continue, %{connection | websock_state: websock_state}} + + {:reply, _status, msg, websock_state} -> + do_deflate(msg, socket, %{connection | websock_state: websock_state}) + + {:push, msg, websock_state} -> + do_deflate(msg, socket, %{connection | websock_state: websock_state}) + + {:stop, :normal, websock_state} -> + do_stop(1000, :normal, socket, %{connection | websock_state: websock_state}) + + {:stop, :normal, code, websock_state} -> + do_stop(code, :normal, socket, %{connection | websock_state: websock_state}) + + {:stop, :normal, code, msg, websock_state} -> + case do_deflate(msg, socket, %{connection | websock_state: websock_state}) do + {:continue, connection} -> do_stop(code, :normal, socket, connection) + other -> other + end + + {:stop, {:shutdown, :disconnected}, websock_state} -> + do_stop(1000, :normal, socket, %{connection | websock_state: websock_state}) + + {:stop, {:shutdown, :restart}, websock_state} -> + do_stop(1012, :normal, socket, %{connection | websock_state: websock_state}) + + {:stop, reason, websock_state} -> + do_error(1011, reason, socket, %{connection | websock_state: websock_state}) + + {:stop, reason, code, websock_state} -> + do_error(code, reason, socket, %{connection | websock_state: websock_state}) + + {:stop, reason, code, msg, websock_state} -> + case do_deflate(msg, socket, %{connection | websock_state: websock_state}) do + {:continue, connection} -> do_error(code, reason, socket, connection) + other -> other + end + end + end + + defp do_stop(code, reason, socket, connection) do + if connection.state == :open do + if function_exported?(connection.websock, :terminate, 2) do + connection.websock.terminate(reason, connection.websock_state) + end + + _ = Socket.close(socket, code) + if connection.compress, do: PerMessageDeflate.close(connection.compress) + Bandit.Telemetry.stop_span(connection.span, connection.metrics) + end + + {:continue, %{connection | state: :closing, compress: nil}} + end + + defp do_error(code, reason, socket, connection) do + if connection.state == :open do + if function_exported?(connection.websock, :terminate, 2) do + connection.websock.terminate(maybe_wrap_reason(reason), connection.websock_state) + end + + _ = Socket.close(socket, code) + if connection.compress, do: PerMessageDeflate.close(connection.compress) + Bandit.Telemetry.stop_span(connection.span, connection.metrics, %{error: reason}) + end + + {:error, reason, %{connection | state: :closed, compress: nil}} + end + + defp maybe_wrap_reason(:timeout), do: :timeout + defp maybe_wrap_reason(reason), do: {:error, reason} + + defp do_deflate(msgs, socket, connection) when is_list(msgs) do + Enum.reduce(msgs, {:continue, connection}, fn + msg, {:continue, connection} -> do_deflate(msg, socket, connection) + _msg, other -> other + end) + end + + defp do_deflate({opcode, data} = msg, socket, connection) when opcode in [:text, :binary] do + case PerMessageDeflate.deflate(data, connection.compress) do + {:ok, data, compress} -> + connection = + Socket.send_frame(socket, {opcode, data}, true) + |> do_send_metrics(connection) + + {:continue, %{connection | compress: compress}} + + {:error, :no_compress} -> + connection = + Socket.send_frame(socket, msg, false) + |> do_send_metrics(connection) + + {:continue, connection} + + {:error, reason} -> + do_error(1007, "Deflation error: #{inspect(reason)}", socket, connection) + end + end + + defp do_deflate({opcode, _data} = msg, socket, connection) when opcode in [:ping, :pong] do + connection = + Socket.send_frame(socket, msg, false) + |> do_send_metrics(connection) + + {:continue, connection} + end + + defp do_inflate(frame, socket, connection) do + case PerMessageDeflate.inflate(frame.data, connection.compress) do + {:ok, data, compress} -> + frame = %{frame | data: data, compressed: false} + connection = %{connection | compress: compress} + handle_frame(frame, socket, connection) + + {:error, :no_compress} -> + do_error(1002, "Received unexpected compressed frame (RFC6455§5.2)", socket, connection) + + {:error, :too_much_inflation} -> + do_error(1009, "Received compressed frame inflating too much", socket, connection) + + {:error, _reason} -> + do_error(1007, "Inflation error", socket, connection) + end + end +end diff --git a/deps/bandit/lib/bandit/websocket/frame.ex b/deps/bandit/lib/bandit/websocket/frame.ex new file mode 100644 index 0000000..1d5b42c --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/frame.ex @@ -0,0 +1,205 @@ +defmodule Bandit.WebSocket.Frame do + @moduledoc false + + alias Bandit.WebSocket.Frame + + @behaviour Bandit.Extractor + + @typedoc "Indicates an opcode" + @type opcode :: + (binary :: 0x2) + | (connection_close :: 0x8) + | (continuation :: 0x0) + | (ping :: 0x9) + | (pong :: 0xA) + | (text :: 0x1) + + @typedoc "A valid WebSocket frame" + @type frame :: + Frame.Continuation.t() + | Frame.Text.t() + | Frame.Binary.t() + | Frame.ConnectionClose.t() + | Frame.Ping.t() + | Frame.Pong.t() + + @impl Bandit.Extractor + @spec header_and_payload_length(binary(), non_neg_integer()) :: + {:ok, {header_length :: integer(), payload_length :: integer()}} + | {:error, :max_frame_size_exceeded | :client_frame_without_mask} + | :more + def header_and_payload_length( + <<_fin::1, _compressed::1, _rsv::2, _opcode::4, 1::1, 127::7, length::64, _mask::32, + _rest::binary>>, + max_frame_size + ) do + validate_max_frame_size(14, length, max_frame_size) + end + + def header_and_payload_length( + <<_fin::1, _compressed::1, _rsv::2, _opcode::4, 1::1, 126::7, length::16, _mask::32, + _rest::binary>>, + max_frame_size + ) do + validate_max_frame_size(8, length, max_frame_size) + end + + def header_and_payload_length( + <<_fin::1, _compressed::1, _rsv::2, _opcode::4, 1::1, length::7, _mask::32, + _rest::binary>>, + max_frame_size + ) + when length <= 125 do + validate_max_frame_size(6, length, max_frame_size) + end + + def header_and_payload_length( + <<_fin::1, _compressed::1, _rsv::2, _opcode::4, 0::1, _rest::binary>>, + _max_frame_size + ) do + {:error, :client_frame_without_mask} + end + + def header_and_payload_length(_msg, _max_frame_size) do + :more + end + + defp validate_max_frame_size(header_length, payload_length, max_frame_size) do + if max_frame_size != 0 and header_length + payload_length > max_frame_size do + {:error, :max_frame_size_exceeded} + else + {:ok, {header_length, payload_length}} + end + end + + @impl Bandit.Extractor + @spec deserialize(binary(), module()) :: {:ok, frame()} | {:error, term()} + def deserialize( + <>, + primitive_ops_module + ) do + to_frame(fin, compressed, rsv, opcode, mask, payload, primitive_ops_module) + end + + def deserialize( + <>, + primitive_ops_module + ) do + to_frame(fin, compressed, rsv, opcode, mask, payload, primitive_ops_module) + end + + def deserialize( + <>, + primitive_ops_module + ) do + to_frame(fin, compressed, rsv, opcode, mask, payload, primitive_ops_module) + end + + def deserialize(_msg, _primitive_ops_module) do + {:error, :deserialization_failed} + end + + def recv_metrics(%frame_type{} = frame) do + case frame_type do + Frame.Continuation -> + [ + recv_continuation_frame_count: 1, + recv_continuation_frame_bytes: IO.iodata_length(frame.data) + ] + + Frame.Text -> + [recv_text_frame_count: 1, recv_text_frame_bytes: IO.iodata_length(frame.data)] + + Frame.Binary -> + [recv_binary_frame_count: 1, recv_binary_frame_bytes: IO.iodata_length(frame.data)] + + Frame.ConnectionClose -> + [ + recv_connection_close_frame_count: 1, + recv_connection_close_frame_bytes: IO.iodata_length(frame.reason) + ] + + Frame.Ping -> + [recv_ping_frame_count: 1, recv_ping_frame_bytes: IO.iodata_length(frame.data)] + + Frame.Pong -> + [recv_pong_frame_count: 1, recv_pong_frame_bytes: IO.iodata_length(frame.data)] + end + end + + def send_metrics(%frame_type{} = frame) do + case frame_type do + Frame.Continuation -> + [ + send_continuation_frame_count: 1, + send_continuation_frame_bytes: IO.iodata_length(frame.data) + ] + + Frame.Text -> + [send_text_frame_count: 1, send_text_frame_bytes: IO.iodata_length(frame.data)] + + Frame.Binary -> + [send_binary_frame_count: 1, send_binary_frame_bytes: IO.iodata_length(frame.data)] + + Frame.ConnectionClose -> + [ + send_connection_close_frame_count: 1, + send_connection_close_frame_bytes: IO.iodata_length(frame.reason) + ] + + Frame.Ping -> + [send_ping_frame_count: 1, send_ping_frame_bytes: IO.iodata_length(frame.data)] + + Frame.Pong -> + [send_pong_frame_count: 1, send_pong_frame_bytes: IO.iodata_length(frame.data)] + end + end + + defp to_frame(_fin, _compressed, rsv, _opcode, _mask, _payload, _primitive_ops_module) + when rsv != 0x0 do + {:error, "Received unsupported RSV flags #{rsv}"} + end + + defp to_frame(fin, compressed, 0x0, opcode, mask, payload, primitive_ops_module) do + fin = fin == 0x1 + compressed = compressed == 0x1 + unmasked_payload = primitive_ops_module.ws_mask(payload, mask) + + opcode + |> case do + 0x0 -> Frame.Continuation.deserialize(fin, compressed, unmasked_payload) + 0x1 -> Frame.Text.deserialize(fin, compressed, unmasked_payload) + 0x2 -> Frame.Binary.deserialize(fin, compressed, unmasked_payload) + 0x8 -> Frame.ConnectionClose.deserialize(fin, compressed, unmasked_payload) + 0x9 -> Frame.Ping.deserialize(fin, compressed, unmasked_payload) + 0xA -> Frame.Pong.deserialize(fin, compressed, unmasked_payload) + unknown -> {:error, "unknown opcode #{unknown}"} + end + end + + defprotocol Serializable do + @moduledoc false + + @spec serialize(any()) :: [{Frame.opcode(), boolean(), boolean(), iodata()}] + def serialize(frame) + end + + @spec serialize(frame()) :: iolist() + def serialize(frame) do + frame + |> Serializable.serialize() + |> Enum.map(fn {opcode, fin, compressed, payload} -> + fin = if fin, do: 0x1, else: 0x0 + compressed = if compressed, do: 0x1, else: 0x0 + mask_and_length = payload |> IO.iodata_length() |> mask_and_length() + [<>, mask_and_length, payload] + end) + end + + defp mask_and_length(length) when length <= 125, do: <<0::1, length::7>> + defp mask_and_length(length) when length <= 65_535, do: <<0::1, 126::7, length::16>> + defp mask_and_length(length), do: <<0::1, 127::7, length::64>> +end diff --git a/deps/bandit/lib/bandit/websocket/frame/binary.ex b/deps/bandit/lib/bandit/websocket/frame/binary.ex new file mode 100644 index 0000000..3041826 --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/frame/binary.ex @@ -0,0 +1,20 @@ +defmodule Bandit.WebSocket.Frame.Binary do + @moduledoc false + + defstruct fin: nil, compressed: false, data: <<>> + + @typedoc "A WebSocket binary frame" + @type t :: %__MODULE__{fin: boolean(), compressed: boolean(), data: iodata()} + + @spec deserialize(boolean(), boolean(), iodata()) :: {:ok, t()} + def deserialize(fin, compressed, payload) do + {:ok, %__MODULE__{fin: fin, compressed: compressed, data: payload}} + end + + defimpl Bandit.WebSocket.Frame.Serializable do + alias Bandit.WebSocket.Frame + + @spec serialize(@for.t()) :: [{Frame.opcode(), boolean(), boolean(), iodata()}] + def serialize(%@for{} = frame), do: [{0x2, frame.fin, frame.compressed, frame.data}] + end +end diff --git a/deps/bandit/lib/bandit/websocket/frame/connection_close.ex b/deps/bandit/lib/bandit/websocket/frame/connection_close.ex new file mode 100644 index 0000000..b073df1 --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/frame/connection_close.ex @@ -0,0 +1,49 @@ +defmodule Bandit.WebSocket.Frame.ConnectionClose do + @moduledoc false + + defstruct code: nil, reason: <<>> + + @typedoc "A WebSocket status code, or none at all" + @type status_code :: non_neg_integer() | nil + + @typedoc "A WebSocket connection close frame" + @type t :: %__MODULE__{code: status_code(), reason: binary()} + + @spec deserialize(boolean(), boolean(), iodata()) :: {:ok, t()} | {:error, term()} + def deserialize(true, false, <<>>) do + {:ok, %__MODULE__{}} + end + + def deserialize(true, false, <>) do + {:ok, %__MODULE__{code: code}} + end + + def deserialize(true, false, <>) when byte_size(reason) <= 123 do + if String.valid?(reason) do + {:ok, %__MODULE__{code: code, reason: reason}} + else + {:error, "Received non UTF-8 connection close frame (RFC6455§5.5.1)"} + end + end + + def deserialize(true, false, _payload) do + {:error, "Invalid connection close payload (RFC6455§5.5)"} + end + + def deserialize(false, false, _payload) do + {:error, "Cannot have a fragmented connection close frame (RFC6455§5.5)"} + end + + def deserialize(true, true, _payload) do + {:error, "Cannot have a compressed connection close frame (RFC7692§6.1)"} + end + + defimpl Bandit.WebSocket.Frame.Serializable do + alias Bandit.WebSocket.Frame + + @spec serialize(@for.t()) :: [{Frame.opcode(), boolean(), boolean(), iodata()}] + def serialize(%@for{code: nil}), do: [{0x8, true, false, <<>>}] + def serialize(%@for{reason: nil} = frame), do: [{0x8, true, false, <>}] + def serialize(%@for{} = frame), do: [{0x8, true, false, [<>, frame.reason]}] + end +end diff --git a/deps/bandit/lib/bandit/websocket/frame/continuation.ex b/deps/bandit/lib/bandit/websocket/frame/continuation.ex new file mode 100644 index 0000000..e629851 --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/frame/continuation.ex @@ -0,0 +1,24 @@ +defmodule Bandit.WebSocket.Frame.Continuation do + @moduledoc false + + defstruct fin: nil, data: <<>> + + @typedoc "A WebSocket continuation frame" + @type t :: %__MODULE__{fin: boolean(), data: iodata()} + + @spec deserialize(boolean(), boolean(), iodata()) :: {:ok, t()} | {:error, term()} + def deserialize(fin, false, payload) do + {:ok, %__MODULE__{fin: fin, data: payload}} + end + + def deserialize(_fin, true, _payload) do + {:error, "Cannot have a compressed continuation frame (RFC7692§6.1)"} + end + + defimpl Bandit.WebSocket.Frame.Serializable do + alias Bandit.WebSocket.Frame + + @spec serialize(@for.t()) :: [{Frame.opcode(), boolean(), boolean(), iodata()}] + def serialize(%@for{} = frame), do: [{0x0, frame.fin, false, frame.data}] + end +end diff --git a/deps/bandit/lib/bandit/websocket/frame/ping.ex b/deps/bandit/lib/bandit/websocket/frame/ping.ex new file mode 100644 index 0000000..491ee3d --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/frame/ping.ex @@ -0,0 +1,32 @@ +defmodule Bandit.WebSocket.Frame.Ping do + @moduledoc false + + defstruct data: <<>> + + @typedoc "A WebSocket ping frame" + @type t :: %__MODULE__{data: iodata()} + + @spec deserialize(boolean(), boolean(), iodata()) :: {:ok, t()} | {:error, term()} + def deserialize(true, false, <>) when byte_size(data) <= 125 do + {:ok, %__MODULE__{data: data}} + end + + def deserialize(true, false, _payload) do + {:error, "Invalid ping payload (RFC6455§5.5.2)"} + end + + def deserialize(false, false, _payload) do + {:error, "Cannot have a fragmented ping frame (RFC6455§5.5.2)"} + end + + def deserialize(true, true, _payload) do + {:error, "Cannot have a compressed ping frame (RFC7692§6.1)"} + end + + defimpl Bandit.WebSocket.Frame.Serializable do + alias Bandit.WebSocket.Frame + + @spec serialize(@for.t()) :: [{Frame.opcode(), boolean(), boolean(), iodata()}] + def serialize(%@for{} = frame), do: [{0x9, true, false, frame.data}] + end +end diff --git a/deps/bandit/lib/bandit/websocket/frame/pong.ex b/deps/bandit/lib/bandit/websocket/frame/pong.ex new file mode 100644 index 0000000..88eb043 --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/frame/pong.ex @@ -0,0 +1,32 @@ +defmodule Bandit.WebSocket.Frame.Pong do + @moduledoc false + + defstruct data: <<>> + + @typedoc "A WebSocket pong frame" + @type t :: %__MODULE__{data: iodata()} + + @spec deserialize(boolean(), boolean(), iodata()) :: {:ok, t()} | {:error, term()} + def deserialize(true, false, <>) when byte_size(data) <= 125 do + {:ok, %__MODULE__{data: data}} + end + + def deserialize(true, false, _payload) do + {:error, "Invalid pong payload (RFC6455§5.5.3)"} + end + + def deserialize(false, false, _payload) do + {:error, "Cannot have a fragmented pong frame (RFC6455§5.5.3)"} + end + + def deserialize(true, true, _payload) do + {:error, "Cannot have a compressed pong frame (RFC7692§6.1)"} + end + + defimpl Bandit.WebSocket.Frame.Serializable do + alias Bandit.WebSocket.Frame + + @spec serialize(@for.t()) :: [{Frame.opcode(), boolean(), boolean(), iodata()}] + def serialize(%@for{} = frame), do: [{0xA, true, false, frame.data}] + end +end diff --git a/deps/bandit/lib/bandit/websocket/frame/text.ex b/deps/bandit/lib/bandit/websocket/frame/text.ex new file mode 100644 index 0000000..f2f4fc4 --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/frame/text.ex @@ -0,0 +1,20 @@ +defmodule Bandit.WebSocket.Frame.Text do + @moduledoc false + + defstruct fin: nil, compressed: false, data: <<>> + + @typedoc "A WebSocket text frame" + @type t :: %__MODULE__{fin: boolean(), compressed: boolean(), data: iodata()} + + @spec deserialize(boolean(), boolean(), iodata()) :: {:ok, t()} + def deserialize(fin, compressed, payload) do + {:ok, %__MODULE__{fin: fin, compressed: compressed, data: payload}} + end + + defimpl Bandit.WebSocket.Frame.Serializable do + alias Bandit.WebSocket.Frame + + @spec serialize(@for.t()) :: [{Frame.opcode(), boolean(), boolean(), iodata()}] + def serialize(%@for{} = frame), do: [{0x1, frame.fin, frame.compressed, frame.data}] + end +end diff --git a/deps/bandit/lib/bandit/websocket/handler.ex b/deps/bandit/lib/bandit/websocket/handler.ex new file mode 100644 index 0000000..88e39c4 --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/handler.ex @@ -0,0 +1,97 @@ +defmodule Bandit.WebSocket.Handler do + @moduledoc false + # A WebSocket handler conforming to RFC6455, structured as a ThousandIsland.Handler + + use ThousandIsland.Handler + + alias Bandit.Extractor + alias Bandit.WebSocket.{Connection, Frame} + + @impl ThousandIsland.Handler + def handle_connection(socket, state) do + {websock, websock_opts, connection_opts} = state.upgrade_opts + + connection_opts + |> Keyword.take([:fullsweep_after, :max_heap_size]) + |> Enum.each(fn {key, value} -> :erlang.process_flag(key, value) end) + + connection_opts = Keyword.merge(state.opts.websocket, connection_opts) + + primitive_ops_module = + Keyword.get(state.opts.websocket, :primitive_ops_module, Bandit.PrimitiveOps.WebSocket) + + state = + state + |> Map.take([:handler_module]) + |> Map.put(:extractor, Extractor.new(Frame, primitive_ops_module, connection_opts)) + + case Connection.init(websock, websock_opts, connection_opts, socket) do + {:continue, connection} -> + case Keyword.get(connection_opts, :timeout) do + nil -> {:continue, Map.put(state, :connection, connection)} + timeout -> {:continue, Map.put(state, :connection, connection), {:persistent, timeout}} + end + + {:error, reason, connection} -> + {:error, reason, Map.put(state, :connection, connection)} + end + end + + @impl ThousandIsland.Handler + def handle_data(data, socket, state) do + state.extractor + |> Extractor.push_data(data) + |> pop_frame(socket, state) + end + + defp pop_frame(extractor, socket, state) do + case Extractor.pop_frame(extractor) do + {extractor, {:ok, frame}} -> + case Connection.handle_frame(frame, socket, state.connection) do + {:continue, connection} -> + pop_frame(extractor, socket, %{state | extractor: extractor, connection: connection}) + + {:close, connection} -> + {:close, %{state | extractor: extractor, connection: connection}} + + {:error, reason, connection} -> + {:error, reason, %{state | extractor: extractor, connection: connection}} + end + + {extractor, {:error, reason}} -> + {:error, {:deserializing, reason}, %{state | extractor: extractor}} + + {extractor, :more} -> + {:continue, %{state | extractor: extractor}} + end + end + + @impl ThousandIsland.Handler + def handle_close(socket, %{connection: connection}), + do: Connection.handle_close(socket, connection) + + def handle_close(_socket, _state), do: :ok + + @impl ThousandIsland.Handler + def handle_shutdown(socket, state), do: Connection.handle_shutdown(socket, state.connection) + + @impl ThousandIsland.Handler + def handle_error(reason, socket, state), + do: Connection.handle_error(reason, socket, state.connection) + + @impl ThousandIsland.Handler + def handle_timeout(socket, state), do: Connection.handle_timeout(socket, state.connection) + + def handle_info({:plug_conn, :sent}, {socket, state}), + do: {:noreply, {socket, state}, socket.read_timeout} + + def handle_info(msg, {socket, state}) do + case Connection.handle_info(msg, socket, state.connection) do + {:continue, connection_state} -> + {:noreply, {socket, %{state | connection: connection_state}}, socket.read_timeout} + + {:error, reason, connection_state} -> + {:stop, reason, {socket, %{state | connection: connection_state}}} + end + end +end diff --git a/deps/bandit/lib/bandit/websocket/handshake.ex b/deps/bandit/lib/bandit/websocket/handshake.ex new file mode 100644 index 0000000..a736162 --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/handshake.ex @@ -0,0 +1,105 @@ +defmodule Bandit.WebSocket.Handshake do + @moduledoc false + # Functions to support WebSocket handshaking as described in RFC6455§4.2 & RFC7692 + + import Plug.Conn + + @type extensions :: [{String.t(), [{String.t(), String.t() | true}]}] + + @spec handshake(Plug.Conn.t(), keyword(), keyword()) :: + {:ok, Plug.Conn.t(), Keyword.t()} | {:error, String.t()} + def handshake(%Plug.Conn{} = conn, opts, websocket_opts) do + with :ok <- Bandit.WebSocket.UpgradeValidation.validate_upgrade(conn) do + do_handshake(conn, opts, websocket_opts) + end + end + + @spec do_handshake(Plug.Conn.t(), keyword(), keyword()) :: {:ok, Plug.Conn.t(), keyword()} + defp do_handshake(conn, opts, websocket_opts) do + requested_extensions = requested_extensions(conn) + + {negotiated_params, returned_data} = + if Keyword.get(opts, :compress) && Keyword.get(websocket_opts, :compress, true) do + Bandit.WebSocket.PerMessageDeflate.negotiate(requested_extensions, websocket_opts) + else + {nil, []} + end + + conn = send_handshake(conn, returned_data) + {:ok, conn, Keyword.put(opts, :compress, negotiated_params)} + end + + @spec requested_extensions(Plug.Conn.t()) :: extensions() + defp requested_extensions(%Plug.Conn{} = conn) do + conn + |> get_req_header("sec-websocket-extensions") + |> Enum.flat_map(&Plug.Conn.Utils.list/1) + |> Enum.map(fn extension -> + [name | params] = + extension + |> String.split(";", trim: true) + |> Enum.map(&String.trim/1) + + params = split_params(params) + + {name, params} + end) + end + + @spec split_params([String.t()]) :: [{String.t(), String.t() | true}] + defp split_params(params) do + params + |> Enum.map(fn param -> + param + |> String.split("=", trim: true) + |> Enum.map(&String.trim/1) + |> case do + [key, value] -> {key, value} + [key] -> {key, true} + end + end) + end + + @spec send_handshake(Plug.Conn.t(), extensions()) :: Plug.Conn.t() + defp send_handshake(%Plug.Conn{} = conn, extensions) do + # Taken from RFC6455§4.2.2/5. Note that we can take for granted the existence of the + # sec-websocket-key header in the request, since we check for it in the handshake? call above + [client_key] = get_req_header(conn, "sec-websocket-key") + concatenated_key = client_key <> "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + hashed_key = :crypto.hash(:sha, concatenated_key) + server_key = Base.encode64(hashed_key) + + headers = + [ + {:upgrade, "websocket"}, + {:connection, "Upgrade"}, + {:"sec-websocket-accept", server_key} + ] ++ + websocket_extension_header(extensions) ++ + conn.resp_headers + + inform(conn, 101, headers) + end + + @spec websocket_extension_header(extensions()) :: keyword() + defp websocket_extension_header([]), do: [] + + defp websocket_extension_header(extensions) do + extensions = + extensions + |> Enum.map_join(",", fn {extension, params} -> + params = + params + |> Enum.flat_map(fn + {_param, false} -> [] + {param, true} -> [to_string(param)] + {param, value} -> [to_string(param) <> "=" <> to_string(value)] + end) + + [to_string(extension) | params] + |> Enum.join(";") + end) + + [{:"sec-websocket-extensions", extensions}] + end +end diff --git a/deps/bandit/lib/bandit/websocket/permessage_deflate.ex b/deps/bandit/lib/bandit/websocket/permessage_deflate.ex new file mode 100644 index 0000000..e567771 --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/permessage_deflate.ex @@ -0,0 +1,187 @@ +defmodule Bandit.WebSocket.PerMessageDeflate do + @moduledoc false + # Support for per-message deflate extension, per RFC7692§7 + + @typedoc "Encapsulates the state of a WebSocket permessage-deflate context" + @type t :: %__MODULE__{ + server_no_context_takeover: boolean(), + client_no_context_takeover: boolean(), + server_max_window_bits: 8..15, + client_max_window_bits: 8..15, + inflate_context: :zlib.zstream(), + deflate_context: :zlib.zstream(), + max_inflate_ratio: integer() + } + + defstruct server_no_context_takeover: false, + client_no_context_takeover: false, + server_max_window_bits: 15, + client_max_window_bits: 15, + inflate_context: nil, + deflate_context: nil, + max_inflate_ratio: nil + + @valid_params ~w[server_no_context_takeover client_no_context_takeover server_max_window_bits client_max_window_bits max_inflate_ratio] + + def negotiate(requested_extensions, opts) do + :proplists.get_all_values("permessage-deflate", requested_extensions) + |> Enum.find_value(&do_negotiate/1) + |> case do + nil -> {nil, []} + params -> {init(params, opts), "permessage-deflate": params} + end + end + + defp do_negotiate(params) do + with params <- normalize_params(params), + true <- validate_params(params) do + resolve_params(params) + else + _ -> nil + end + end + + defp normalize_params(params) do + params + |> Enum.map(fn + {"server_max_window_bits", true} -> {"server_max_window_bits", true} + {"server_max_window_bits", value} -> {"server_max_window_bits", parse(value)} + {"client_max_window_bits", true} -> {"client_max_window_bits", 15} + {"client_max_window_bits", value} -> {"client_max_window_bits", parse(value)} + value -> value + end) + end + + defp parse(value) do + case Integer.parse(value) do + {int_value, ""} -> int_value + :error -> value + end + end + + defp validate_params(params) do + no_invalid_params = params |> :proplists.split(@valid_params) |> elem(1) == [] + no_repeat_params = params |> :proplists.get_keys() |> length() == length(params) + + no_invalid_values = + :proplists.get_value("server_no_context_takeover", params) in [:undefined, true] && + :proplists.get_value("client_no_context_takeover", params) in [:undefined, true] && + :proplists.get_value("server_max_window_bits", params, 15) in 8..15 && + :proplists.get_value("client_max_window_bits", params, 15) in 8..15 + + no_invalid_params && no_repeat_params && no_invalid_values + end + + # This is where we finally determine which parameters to accept. Note that we don't convert to + # atoms until this stage to avoid potential atom exhaustion + defp resolve_params(params) do + @valid_params + |> Enum.flat_map(fn param_name -> + case :proplists.get_value(param_name, params) do + :undefined -> [] + param -> [{String.to_existing_atom(param_name), param}] + end + end) + end + + defp init(params, opts) do + instance = struct(__MODULE__, params) + inflate_context = :zlib.open() + :ok = :zlib.inflateInit(inflate_context, fix_bits(-instance.client_max_window_bits)) + + deflate_context = :zlib.open() + deflate_opts = Keyword.get(opts, :deflate_options, []) + + :ok = + :zlib.deflateInit( + deflate_context, + Keyword.get(deflate_opts, :level, :default), + :deflated, + fix_bits(-instance.server_max_window_bits), + Keyword.get(deflate_opts, :mem_level, 8), + Keyword.get(deflate_opts, :strategy, :default) + ) + + max_inflate_ratio = Keyword.get(opts, :max_inflate_ratio, 25) + + %{ + instance + | inflate_context: inflate_context, + deflate_context: deflate_context, + max_inflate_ratio: max_inflate_ratio + } + end + + # https://www.erlang.org/doc/man/zlib.html#deflateInit-6 + defp fix_bits(-8), do: -9 + defp fix_bits(other), do: other + + # Note that we pass back the context to the caller even though it is unmodified locally + + def inflate(data, %__MODULE__{} = context) do + safe_inflate( + context.inflate_context, + :zlib.safeInflate(context.inflate_context, <>), + [], + byte_size(data) * context.max_inflate_ratio + ) + |> case do + {:ok, inflated_iodata, inflate_context} -> + if context.client_no_context_takeover, do: :zlib.inflateReset(context.inflate_context) + {:ok, IO.iodata_to_binary(inflated_iodata), %{context | inflate_context: inflate_context}} + + {:error, reason} -> + {:error, reason} + end + rescue + e -> {:error, "Error encountered #{inspect(e)}"} + end + + def inflate(_data, nil), do: {:error, :no_compress} + + defp safe_inflate(inflate_context, {:continue, deflated}, buffer, bytes_remaining) + when bytes_remaining > 0 do + safe_inflate( + inflate_context, + :zlib.safeInflate(inflate_context, <<>>), + [buffer | deflated], + bytes_remaining - IO.iodata_length(deflated) + ) + end + + defp safe_inflate(_inflate_context, {:continue, _deflated}, _buffer, bytes_remaining) + when bytes_remaining <= 0 do + {:error, :too_much_inflation} + end + + defp safe_inflate(inflate_context, {:finished, deflated}, buffer, _bytes_remaining) do + {:ok, [buffer | deflated], inflate_context} + end + + def deflate(data, %__MODULE__{} = context) do + deflated_data = + context.deflate_context + |> :zlib.deflate(data, :sync) + |> IO.iodata_to_binary() + + deflated_size = byte_size(deflated_data) - 4 + + deflated_data = + case deflated_data do + <> -> deflated_data + deflated -> deflated + end + + if context.server_no_context_takeover, do: :zlib.deflateReset(context.deflate_context) + {:ok, deflated_data, context} + rescue + e -> {:error, "Error encountered #{inspect(e)}"} + end + + def deflate(_data, nil), do: {:error, :no_compress} + + def close(%__MODULE__{} = context) do + :zlib.close(context.inflate_context) + :zlib.close(context.deflate_context) + end +end diff --git a/deps/bandit/lib/bandit/websocket/socket.ex b/deps/bandit/lib/bandit/websocket/socket.ex new file mode 100644 index 0000000..bec73e4 --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/socket.ex @@ -0,0 +1,75 @@ +defprotocol Bandit.WebSocket.Socket do + @moduledoc false + # + # A protocol defining the low-level functionality of a WebSocket + # + + @type t :: term() + @type frame_type :: :text | :binary | :ping | :pong + @type send_frame_stats :: [ + send_binary_frame_bytes: non_neg_integer(), + send_binary_frame_count: non_neg_integer(), + send_ping_frame_bytes: non_neg_integer(), + send_ping_frame_count: non_neg_integer(), + send_pong_frame_bytes: non_neg_integer(), + send_pong_frame_count: non_neg_integer(), + send_text_frame_bytes: non_neg_integer(), + send_text_frame_count: non_neg_integer() + ] + + @spec send_frame(t(), {frame_type :: frame_type(), data :: iodata()}, boolean()) :: + send_frame_stats() + def send_frame(socket, data_and_frame_type, compressed) + + @spec close(t(), code :: WebSock.close_detail()) :: :ok | {:error, :inet.posix()} + def close(socket, code) +end + +defimpl Bandit.WebSocket.Socket, for: ThousandIsland.Socket do + @moduledoc false + # + # An implementation of Bandit.WebSocket.Socket for use with ThousandIsland.Socket instances + # + + alias Bandit.WebSocket.Frame + + @spec send_frame(@for.t(), {frame_type :: @protocol.frame_type(), data :: iodata()}, boolean()) :: + @protocol.send_frame_stats() + def send_frame(socket, {:text, data}, compressed) do + _ = do_send_frame(socket, %Frame.Text{fin: true, data: data, compressed: compressed}) + [send_text_frame_count: 1, send_text_frame_bytes: IO.iodata_length(data)] + end + + def send_frame(socket, {:binary, data}, compressed) do + _ = do_send_frame(socket, %Frame.Binary{fin: true, data: data, compressed: compressed}) + [send_binary_frame_count: 1, send_binary_frame_bytes: IO.iodata_length(data)] + end + + def send_frame(socket, {:ping, data}, false) do + _ = do_send_frame(socket, %Frame.Ping{data: data}) + [send_ping_frame_count: 1, send_ping_frame_bytes: IO.iodata_length(data)] + end + + def send_frame(socket, {:pong, data}, false) do + _ = do_send_frame(socket, %Frame.Pong{data: data}) + [send_pong_frame_count: 1, send_pong_frame_bytes: IO.iodata_length(data)] + end + + @spec close(@for.t(), non_neg_integer() | {non_neg_integer(), binary()}) :: + :ok | {:error, :inet.posix()} + def close(socket, {code, detail}) when is_integer(code) do + _ = do_send_frame(socket, %Frame.ConnectionClose{code: code, reason: detail}) + @for.shutdown(socket, :write) + end + + def close(socket, code) when is_integer(code) do + _ = do_send_frame(socket, %Frame.ConnectionClose{code: code}) + @for.shutdown(socket, :write) + end + + @spec do_send_frame(@for.t(), Frame.frame()) :: + :ok | {:error, :closed | :timeout | :inet.posix()} + defp do_send_frame(socket, frame) do + @for.send(socket, Frame.serialize(frame)) + end +end diff --git a/deps/bandit/lib/bandit/websocket/upgrade_validation.ex b/deps/bandit/lib/bandit/websocket/upgrade_validation.ex new file mode 100644 index 0000000..0b67352 --- /dev/null +++ b/deps/bandit/lib/bandit/websocket/upgrade_validation.ex @@ -0,0 +1,65 @@ +defmodule Bandit.WebSocket.UpgradeValidation do + @moduledoc false + # Provides validation of WebSocket upgrade requests as described in RFC6455§4.2 + + # Validates that the request satisfies the requirements to issue a WebSocket upgrade response. + # Validations are performed based on the clauses laid out in RFC6455§4.2 + # + # This function does not actually perform an upgrade or change the connection in any way + # + # Returns `:ok` if the connection satisfies the requirements for a WebSocket upgrade, and + # `{:error, reason}` if not + # + @spec validate_upgrade(Plug.Conn.t()) :: :ok | {:error, String.t()} + def validate_upgrade(conn) do + case Plug.Conn.get_http_protocol(conn) do + :"HTTP/1.1" -> validate_upgrade_http1(conn) + other -> {:error, "HTTP version #{other} unsupported"} + end + end + + # Validate the conn per RFC6455§4.2.1 + defp validate_upgrade_http1(conn) do + with :ok <- assert_method(conn, "GET"), + :ok <- assert_header_nonempty(conn, "host"), + :ok <- assert_header_contains(conn, "connection", "upgrade"), + :ok <- assert_header_contains(conn, "upgrade", "websocket"), + :ok <- assert_header_nonempty(conn, "sec-websocket-key"), + :ok <- assert_header_equals(conn, "sec-websocket-version", "13") do + :ok + end + end + + defp assert_method(conn, verb) do + case conn.method do + ^verb -> :ok + other -> {:error, "HTTP method #{other} unsupported"} + end + end + + defp assert_header_nonempty(conn, header) do + case Plug.Conn.get_req_header(conn, header) do + [] -> {:error, "'#{header}' header is absent"} + _ -> :ok + end + end + + defp assert_header_equals(conn, header, expected) do + case Plug.Conn.get_req_header(conn, header) |> Enum.map(&String.downcase(&1, :ascii)) do + [^expected] -> :ok + value -> {:error, "'#{header}' header must equal '#{expected}', got #{inspect(value)}"} + end + end + + defp assert_header_contains(conn, header, needle) do + haystack = Plug.Conn.get_req_header(conn, header) + + haystack + |> Enum.flat_map(&Plug.Conn.Utils.list/1) + |> Enum.any?(&(String.downcase(&1, :ascii) == needle)) + |> case do + true -> :ok + false -> {:error, "'#{header}' header must contain '#{needle}', got #{inspect(haystack)}"} + end + end +end diff --git a/deps/bandit/mix.exs b/deps/bandit/mix.exs new file mode 100644 index 0000000..21ef5eb --- /dev/null +++ b/deps/bandit/mix.exs @@ -0,0 +1,95 @@ +defmodule Bandit.MixProject do + use Mix.Project + + def project do + [ + app: :bandit, + version: "1.11.0", + elixir: "~> 1.13", + start_permanent: Mix.env() == :prod, + deps: deps(), + elixirc_paths: elixirc_path(Mix.env()), + dialyzer: dialyzer(), + name: "Bandit", + description: "A pure-Elixir HTTP server built for Plug & WebSock apps", + source_url: "https://github.com/mtrudel/bandit", + package: [ + maintainers: ["Mat Trudel"], + licenses: ["MIT"], + links: %{ + "GitHub" => "https://github.com/mtrudel/bandit", + "Changelog" => "https://hexdocs.pm/bandit/changelog.html" + }, + files: ["lib", "mix.exs", "README*", "LICENSE*", "CHANGELOG*"] + ], + docs: docs() + ] + end + + def application do + [extra_applications: [:logger], mod: {Bandit.Application, []}] + end + + defp deps do + [ + {:thousand_island, "~> 1.0"}, + {:plug, "~> 1.18"}, + {:websock, "~> 0.5"}, + {:hpax, "~> 1.0"}, + {:telemetry, "~> 0.4 or ~> 1.0"}, + {:req, "~> 0.3", only: [:dev, :test]}, + {:machete, ">= 0.0.0", only: [:dev, :test]}, + {:ex_doc, "~> 0.24", only: [:dev, :test], runtime: false}, + {:dialyxir, "~> 1.0", only: [:dev, :test], runtime: false}, + {:credo, "~> 1.0", only: [:dev, :test], runtime: false}, + {:mix_test_watch, "~> 1.0", only: :dev, runtime: false} + ] + end + + defp elixirc_path(:test), do: ["lib/", "test/support"] + defp elixirc_path(_), do: ["lib/"] + + defp dialyzer do + [ + plt_core_path: "priv/plts", + plt_file: {:no_warn, "priv/plts/dialyzer.plt"}, + plt_add_deps: :apps_direct, + plt_add_apps: [:ssl, :public_key], + flags: [ + "-Werror_handling", + "-Wextra_return", + "-Wmissing_return", + "-Wunknown", + "-Wunmatched_returns", + "-Wunderspecs" + ] + ] + end + + defp docs do + [ + extras: [ + "CHANGELOG.md": [title: "Changelog"], + "README.md": [title: "README"], + "lib/bandit/http1/README.md": [ + filename: "HTTP1_README.md", + title: "HTTP/1 Implementation Notes" + ], + "lib/bandit/http2/README.md": [ + filename: "HTTP2_README.md", + title: "HTTP/2 Implementation Notes" + ], + "lib/bandit/websocket/README.md": [ + filename: "WebSocket_README.md", + title: "WebSocket Implementation Notes" + ] + ], + groups_for_extras: [ + "Implementation Notes": Path.wildcard("lib/bandit/*/README.md") + ], + skip_undefined_reference_warnings_on: Path.wildcard("**/*.md"), + main: "Bandit", + logo: "assets/ex_doc_logo.png" + ] + end +end diff --git a/deps/db_connection/.formatter.exs b/deps/db_connection/.formatter.exs new file mode 100644 index 0000000..ab786a0 --- /dev/null +++ b/deps/db_connection/.formatter.exs @@ -0,0 +1,4 @@ +# Used by "mix format" +[ + inputs: ["{mix,.formatter}.exs", "{config,lib,test,examples,integration_test}/**/*.{ex,exs}"] +] diff --git a/deps/db_connection/.hex b/deps/db_connection/.hex new file mode 100644 index 0000000..0d60bbc Binary files /dev/null and b/deps/db_connection/.hex differ diff --git a/deps/db_connection/CHANGELOG.md b/deps/db_connection/CHANGELOG.md new file mode 100644 index 0000000..44c63aa --- /dev/null +++ b/deps/db_connection/CHANGELOG.md @@ -0,0 +1,125 @@ +# Changelog + +## v2.10.1 (2026-05-09) + +* Ensure `:max_lifetime` works on connections that have been idle +* Always distribute `:disconnect_all` over the given limit, even for idle connections + +## v2.10.0 (2026-04-24) + +* Add `:max_lifetime` to connection +* Fix watcher blocking during slow pool termination +* Fix owner unallow using wrong ref when switching owners +* Add label (repo name) to ownership errors +* Wrap options into a sensitive data struct when crossing process boundaries + +## v2.9.0 (2026-01-10) + +* Enhancements + * Whenever possible, have error messages that include a PID also include ancestors and one of process label, name, or initial call + * Allow clients to retry when specified + * Remove sensitive options before calling `after_connect` + +## v2.8.1 (2025-06-24) + +* Enhancements + * Automatically set and read process labels in error reports + +## v2.8.0 (2025-06-24) + +* Enhancements + * Allow `unallow_existing` as an opt to `ownership_allow/4` + * Improve ETS performance by enabling descentralized counters + * Increase default queue interval to 2000ms + +## v2.7.0 (2024-07-02) + +* Enhancements + * Add API for retrieving pool metrics + * Include a built-in listener that emits telemetry events + +* Bug fixes + * Discard EXIT messages from trapped exits + +## v2.6.0 (2023-10-15) + +* Enhancements + * Call `disconnect` on terminate + * Allow `handle_begin` callbacks to return query for logging purposes + * Add `:connection_listeners_tag` + * Add `DBConnection.available_connection_options/0` + * Add `DBConnection.available_start_options/0` + +## v2.5.0 (2023-04-10) + +* Internal changes + * No longer depend on `connection` + +## v2.4.3 (2022-11-22) + +* Bug fixes + * Fix bug where `disconnect_all/2` interval would be disabled above 4294ms + * Add `:idle_limit` to limit the amount of disconnections on a ping + +## v2.4.2 (2022-03-03) + +* Enhancements + * Add `DBConnection.connection_module/1` + +## v2.4.1 (2021-10-14) + +* Enhancements + * Add `DBConnection.disconnect_all/2` + +## v2.4.0 (2021-04-02) + +* Enhancements + * Add telemetry events for connection errors + * Use `:rand` default algorithm + * Allow decentralized lookups on DBConnection.Ownership + +## v2.3.1 (2020-11-25) + +* Enhancements + * Add `:connection_listeners` to `DBConnection.start_link/2` + * Allow connection `~> 1.0` + +## v2.3.0 (2020-10-14) + +This release requires Elixir v1.7+. + +* Bug fixes + * Fix deprecation warnings related to the use of `System.stacktrace()` + +## v2.2.2 (2020-04-22) + +* Bug fixes + * Make sure all idle connections in the pool are pinged on each idle interval + +## v2.2.1 (2020-02-04) + +* Enhancements + * Remove warnings + +## v2.2.0 (2019-12-11) + +* Enhancements + * Add `:idle_time` to `DBConnection.LogEntry` + * Ping all stale connections on idle interval + * Add `crash_reason` to relevant Logger error reports + * Ping all stale connections on idle interval. One possible downside of this approach is that we may shut down all connections at once and if there is a request around this time, the response time will be higher. However, this is likely better than the current approach, where we ping only the first one, which means we can have a pool of stale connections. The current behaviour is the same as in v1.0 + +## v2.1.1 (2019-07-17) + +* Enhancements + * Reduce severity in client exits to info + * Improve error message on redirect checkout + +* Bug fixes + * Make sure ownership timeout is respected on automatic checkouts + +## v2.1.0 (2019-06-07) + +* Enhancements + * Require Elixir v1.6+ + * Include client stacktrace on check out timeouts diff --git a/deps/db_connection/README.md b/deps/db_connection/README.md new file mode 100644 index 0000000..0cd937d --- /dev/null +++ b/deps/db_connection/README.md @@ -0,0 +1,100 @@ +# DBConnection + +Database connection behaviour and database connection pool designed for +handling transaction, prepare/execute, cursors and client process +describe/encode/decode. + +Examples of using the `DBConnection` behaviour are available in +`./examples/db_agent/` and `./examples/tcp_connection/`. + +There is also [a series of articles on building database adapters](http://blog.plataformatec.com.br/2018/11/building-a-new-mysql-adapter-for-ecto-part-i-hello-world/). It includes articles covering both DBConnection and Ecto integrations. + +## Contributing + +Run unit tests with: + + $ mix test + +To run the integration tests (for each available pool): + + $ mix test.pools + +To run all tests: + + $ mix test.all + +## Design + +This library is made of four main modules: + + * `DBConnection` - this is the code running on the client + and the specification of the DBConnection API + + * `DBConnection.Connection` - this is the process that + establishes the database connection + + * `DBConnection.ConnectionPool` - this is the connection + pool. A client asks the connection pool for a connection. + There is also an ownership pool, used mostly during tests, + which we won't discuss here. + + * `DBConnection.Holder` - the holder is responsible for + keeping the connection and checkout state. It is modelled + by using an ETS table. + +Once a connection is created, it creates a holder and +assigns the connection pool as the heir. Then the holder +is promptly given away to the pool. The connection itself +is mostly a dummy. It is there to handle connections and pings. +The state itself (such as the socket) is all in the holder. + +Once there is a checkout, the pool gives the holder to the +client process and stores all relevant information in the +holder table itself. If the client terminates without +checking in, then the holder is given back to the pool via +the heir mechanism. The pool will then discard the connection. + +One important design detail in DBConnection is that it avoids +copying data. Other database libraries would send a request +to the connection process, perform the query in the connection +process, and then send it back to the client. This means a lot of +data copying in Elixir. DBConnection keeps the socket in the +holder and works on it directly. + +DBConnection also takes all of the care necessary to handle +failures, and it shuts down the connection and the socket +whenever the client does not check in the connection to avoid +recycling sockets/connections in a corrupted state (such as a socket +that is stuck inside a transaction). + +### Deadlines + +When a checkout happens, a deadline is started by the client +to send a message to the pool after a time interval. If the +deadline is reached and the connection is still checked out, +the holder is deleted and the connection is terminated. If the +client tries to use a terminated connection, an error will +be raised (see `Holder.handle/4`). + +### Pool + +The queuing algorithm used by the pool is [CoDel](https://queue.acm.org/appendices/codel.html) +which allows us to plan for overloads and reject requests +without clogging the pool once checkouts do not read a certain +target. + +## License + +Copyright 2015 James Fish + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/deps/db_connection/hex_metadata.config b/deps/db_connection/hex_metadata.config new file mode 100644 index 0000000..55902c8 --- /dev/null +++ b/deps/db_connection/hex_metadata.config @@ -0,0 +1,33 @@ +{<<"links">>, + [{<<"GitHub">>,<<"https://github.com/elixir-ecto/db_connection">>}]}. +{<<"name">>,<<"db_connection">>}. +{<<"version">>,<<"2.10.1">>}. +{<<"description">>, + <<"Database connection behaviour for database transactions and connection pooling">>}. +{<<"elixir">>,<<"~> 1.11">>}. +{<<"files">>, + [<<"lib">>,<<"lib/db_connection">>,<<"lib/db_connection/util.ex">>, + <<"lib/db_connection/backoff.ex">>,<<"lib/db_connection/log_entry.ex">>, + <<"lib/db_connection/ownership.ex">>, + <<"lib/db_connection/connection_pool">>, + <<"lib/db_connection/connection_pool/pool.ex">>, + <<"lib/db_connection/task.ex">>,<<"lib/db_connection/connection_error.ex">>, + <<"lib/db_connection/query.ex">>,<<"lib/db_connection/holder.ex">>, + <<"lib/db_connection/ownership">>, + <<"lib/db_connection/ownership/proxy.ex">>, + <<"lib/db_connection/ownership/manager.ex">>, + <<"lib/db_connection/watcher.ex">>,<<"lib/db_connection/pool.ex">>, + <<"lib/db_connection/connection_pool.ex">>, + <<"lib/db_connection/connection.ex">>, + <<"lib/db_connection/telemetry_listener.ex">>, + <<"lib/db_connection/app.ex">>,<<"lib/db_connection.ex">>, + <<".formatter.exs">>,<<"mix.exs">>,<<"README.md">>,<<"CHANGELOG.md">>]}. +{<<"app">>,<<"db_connection">>}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"requirements">>, + [[{<<"name">>,<<"telemetry">>}, + {<<"app">>,<<"telemetry">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 0.4 or ~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}]]}. +{<<"build_tools">>,[<<"mix">>]}. diff --git a/deps/db_connection/lib/db_connection.ex b/deps/db_connection/lib/db_connection.ex new file mode 100644 index 0000000..b971601 --- /dev/null +++ b/deps/db_connection/lib/db_connection.ex @@ -0,0 +1,2010 @@ +defmodule DBConnection.SensitiveData do + @moduledoc false + @derive {Inspect, only: []} + defstruct [:data] +end + +defmodule DBConnection.Stream do + defstruct [:conn, :query, :params, :opts] + + @type t :: %__MODULE__{conn: DBConnection.conn(), query: any, params: any, opts: Keyword.t()} +end + +defimpl Enumerable, for: DBConnection.Stream do + def count(_), do: {:error, __MODULE__} + + def member?(_, _), do: {:error, __MODULE__} + + def slice(_), do: {:error, __MODULE__} + + def reduce(stream, acc, fun), do: DBConnection.reduce(stream, acc, fun) +end + +defmodule DBConnection.PrepareStream do + defstruct [:conn, :query, :params, :opts] + + @type t :: %__MODULE__{conn: DBConnection.conn(), query: any, params: any, opts: Keyword.t()} +end + +defimpl Enumerable, for: DBConnection.PrepareStream do + def count(_), do: {:error, __MODULE__} + + def member?(_, _), do: {:error, __MODULE__} + + def slice(_), do: {:error, __MODULE__} + + def reduce(stream, acc, fun), do: DBConnection.reduce(stream, acc, fun) +end + +defmodule DBConnection do + @moduledoc """ + A behaviour module for implementing efficient database connection + client processes, pools and transactions. + + `DBConnection` handles callbacks differently to most behaviours. Some + callbacks will be called in the calling process, with the state + copied to and from the calling process. This is useful when the data + for a request is large and means that a calling process can interact + with a socket directly. + + A side effect of this is that query handling can be written in a + simple blocking fashion, while the connection process itself will + remain responsive to OTP messages and can enqueue and cancel queued + requests. + + If a request or series of requests takes too long to handle in the + client process a timeout will trigger and the socket can be cleanly + disconnected by the connection process. + + If a calling process waits too long to start its request it will + timeout and its request will be cancelled. This prevents requests + building up when the database can not keep up. + + If no requests are received for an idle interval, the pool will + ping all stale connections which can then ping the database to keep + the connection alive. + + Should the connection be lost, attempts will be made to reconnect with + (configurable) exponential random backoff to reconnect. All state is + lost when a connection disconnects but the process is reused. + + The `DBConnection.Query` protocol provide utility functions so that + queries can be encoded and decoded without blocking the connection or pool. + + ## Connection pools + + DBConnection connections support using different pools via the `:pool` option + passed to `start_link/2`. The default pool is `DBConnection.ConnectionPool`. + Another supported pool that is commonly used for tests is `DBConnection.Ownership`. + + For now, using *custom* pools is not supported since the API for pools is not + public. + + ## Errors + + Most functions in this module raise a `DBConnection.ConnectionError` exception + when failing to check out a connection from the pool. + """ + require Logger + + alias DBConnection.Holder + alias DBConnection.Util + + require Holder + + defstruct [:pool_ref, :conn_ref, :conn_mode] + + defmodule EncodeError do + defexception [:message] + end + + defmodule TransactionError do + defexception [:status, :message] + + def exception(:idle), + do: %__MODULE__{status: :idle, message: "transaction is not started"} + + def exception(:transaction), + do: %__MODULE__{status: :transaction, message: "transaction is already started"} + + def exception(:error), + do: %__MODULE__{status: :error, message: "transaction is aborted"} + end + + @typedoc """ + Run or transaction connection reference. + """ + @type t :: %__MODULE__{pool_ref: any, conn_ref: reference} + @type conn :: GenServer.server() | t + @type query :: DBConnection.Query.t() + @type params :: any + @type result :: any + @type cursor :: any + @type status :: :idle | :transaction | :error + + @type start_option :: + {:after_connect, (t -> any) | {module, atom, [any]} | nil} + | {:after_connect_timeout, timeout} + | {:connection_listeners, [Process.dest()] | nil | {[Process.dest()], any}} + | {:backoff_max, non_neg_integer} + | {:backoff_min, non_neg_integer} + | {:backoff_type, :stop | :exp | :rand | :rand_exp} + | {:checkout_retries, non_neg_integer} + | {:configure, (keyword -> keyword) | {module, atom, [any]} | nil} + | {:idle_interval, non_neg_integer} + | {:idle_limit, non_neg_integer} + | {:max_lifetime, Range.t()} + | {:max_restarts, non_neg_integer} + | {:max_seconds, pos_integer} + | {:name, GenServer.name()} + | {:pool, module} + | {:pool_size, pos_integer} + | {:queue_interval, non_neg_integer} + | {:queue_target, non_neg_integer} + | {:show_sensitive_data_on_connection_error, boolean} + + @typedoc """ + An option you can pass to DBConnection functions (*deprecated*). + + > #### Deprecated {: .warning} + > + > This option is deprecated since v2.6.0. Use `t:connection_option/0` instead. + + """ + @type option :: connection_option + + @typedoc """ + An option you can pass to DBConnection functions. + """ + @typedoc since: "2.6.0" + @type connection_option :: + {:log, (DBConnection.LogEntry.t() -> any) | {module, atom, [any]} | nil} + | {:queue, boolean} + | {:timeout, timeout} + | {:deadline, integer | nil} + + @doc """ + Connect to the database. Return `{:ok, state}` on success or + `{:error, exception}` on failure. + + If an error is returned it will be logged and another + connection attempt will be made after a backoff interval. + + This callback is called in the connection process. + """ + @callback connect(opts :: Keyword.t()) :: + {:ok, state :: any} | {:error, Exception.t()} + + @doc """ + Checkouts the state from the connection process. Return `{:ok, state}` + to allow the checkout or `{:disconnect, exception, state}` to disconnect. + + This callback is called immediately after the connection is established + and the state is never effectively checked in again. That's because + DBConnection keeps the connection state in an ETS table that is moved + between the different clients checking out connections. There is no + `checkin` callback. The state is only handed back to the connection + process during pings and (re)connects. + + This callback is called in the connection process. + """ + @callback checkout(state :: any) :: + {:ok, new_state :: any} | {:disconnect, Exception.t(), new_state :: any} + + @doc """ + Called when the connection has been idle for a period of time. Return + `{:ok, state}` to continue or `{:disconnect, exception, state}` to + disconnect. + + This callback is called if no callbacks have been called after the + idle timeout and a client process is not using the state. The idle + timeout can be configured by the `:idle_interval` and `:idle_limit` + options. This function can be called whether the connection is checked + in or checked out. + + This callback is called in the connection process. + """ + @callback ping(state :: any) :: + {:ok, new_state :: any} | {:disconnect, Exception.t(), new_state :: any} + + @doc """ + Handle the beginning of a transaction. + + Return `{:ok, result, state}`/`{:ok, query, result, state}` to continue, + `{status, state}` to notify caller that the transaction can not begin due + to the transaction status `status`, or `{:disconnect | :disconnect_and_retry, exception, state}` + to error and disconnect (and optionally retry). If `{:ok, query, result, state}` + is returned, the query will be used to log the begin command. Otherwise, + it will be logged as `begin`. + + A callback implementation should only return `status` if it + can determine the database's transaction status without side effect. + + This callback is called in the client process. + """ + @callback handle_begin(opts :: Keyword.t(), state :: any) :: + {:ok, result, new_state :: any} + | {:ok, query, result, new_state :: any} + | {status, new_state :: any} + | {:disconnect | :disconnect_and_retry, Exception.t(), new_state :: any} + + @doc """ + Handle committing a transaction. Return `{:ok, result, state}` on successfully + committing transaction, `{status, state}` to notify caller that the + transaction can not commit due to the transaction status `status`, + or `{:disconnect, exception, state}` to error and disconnect. + + A callback implementation should only return `status` if it + can determine the database's transaction status without side effect. + + This callback is called in the client process. + """ + @callback handle_commit(opts :: Keyword.t(), state :: any) :: + {:ok, result, new_state :: any} + | {status, new_state :: any} + | {:disconnect, Exception.t(), new_state :: any} + + @doc """ + Handle rolling back a transaction. Return `{:ok, result, state}` on successfully + rolling back transaction, `{status, state}` to notify caller that the + transaction can not rollback due to the transaction status `status` or + `{:disconnect, exception, state}` to error and disconnect. + + A callback implementation should only return `status` if it + can determine the database' transaction status without side effect. + + This callback is called in the client and connection process. + """ + @callback handle_rollback(opts :: Keyword.t(), state :: any) :: + {:ok, result, new_state :: any} + | {status, new_state :: any} + | {:disconnect, Exception.t(), new_state :: any} + + @doc """ + Handle getting the transaction status. Return `{:idle, state}` if outside a + transaction, `{:transaction, state}` if inside a transaction, + `{:error, state}` if inside an aborted transaction, or + `{:disconnect | :disconnect_and_retry, exception, state}` to error and disconnect + (and optionally retry). + + If the callback returns a `:disconnect` tuples then `status/2` will return + `:error`. + """ + @callback handle_status(opts :: Keyword.t(), state :: any) :: + {status, new_state :: any} + | {:disconnect | :disconnect_and_retry, Exception.t(), new_state :: any} + + @doc """ + Prepare a query with the database. Return `{:ok, query, state}` where + `query` is a query to pass to `execute/4` or `close/3`, + `{:error, exception, state}` to return an error and continue or + `{:disconnect | :disconnect_and_retry, exception, state}` to error and disconnect + (and optionally retry). + + This callback is intended for cases where the state of a connection is + needed to prepare a query and/or the query can be saved in the + database to call later. + + This callback is called in the client process. + """ + @callback handle_prepare(query, opts :: Keyword.t(), state :: any) :: + {:ok, query, new_state :: any} + | {:error | :disconnect | :disconnect_and_retry, Exception.t(), new_state :: any} + + @doc """ + Execute a query prepared by `c:handle_prepare/3`. Return + `{:ok, query, result, state}` to return altered query `query` and result + `result` and continue, `{:error, exception, state}` to return an error and + continue or `{:disconnect | :disconnect_and_retry, exception, state}` to + error and disconnect (and optionally retry). + + This callback is called in the client process. + """ + @callback handle_execute(query, params, opts :: Keyword.t(), state :: any) :: + {:ok, query, result, new_state :: any} + | {:error | :disconnect | :disconnect_and_retry, Exception.t(), new_state :: any} + + @doc """ + Close a query prepared by `c:handle_prepare/3` with the database. Return + `{:ok, result, state}` on success and to continue, + `{:error, exception, state}` to return an error and continue, or + `{:disconnect | :disconnect_and_retry, exception, state}` to + error and disconnect (and optionally retry). + + This callback is called in the client process. + """ + @callback handle_close(query, opts :: Keyword.t(), state :: any) :: + {:ok, result, new_state :: any} + | {:error | :disconnect | :disconnect_and_retry, Exception.t(), new_state :: any} + + @doc """ + Declare a cursor using a query prepared by `c:handle_prepare/3`. Return + `{:ok, query, cursor, state}` to return altered query `query` and cursor + `cursor` for a stream and continue, `{:error, exception, state}` to return an + error and continue or `{:disconnect, exception, state}` to error and disconnect. + + This callback is called in the client process. + """ + @callback handle_declare(query, params, opts :: Keyword.t(), state :: any) :: + {:ok, query, cursor, new_state :: any} + | {:error | :disconnect, Exception.t(), new_state :: any} + + @doc """ + Fetch the next result from a cursor declared by `c:handle_declare/4`. Return + `{:cont, result, state}` to return the result `result` and continue using + cursor, `{:halt, result, state}` to return the result `result` and close the + cursor, `{:error, exception, state}` to return an error and close the + cursor, `{:disconnect, exception, state}` to return an error and disconnect. + + This callback is called in the client process. + """ + @callback handle_fetch(query, cursor, opts :: Keyword.t(), state :: any) :: + {:cont | :halt, result, new_state :: any} + | {:error | :disconnect, Exception.t(), new_state :: any} + + @doc """ + Deallocate a cursor declared by `c:handle_declare/4` with the database. Return + `{:ok, result, state}` on success and to continue, + `{:error, exception, state}` to return an error and continue, or + `{:disconnect, exception, state}` to return an error and disconnect. + + This callback is called in the client process. + """ + @callback handle_deallocate(query, cursor, opts :: Keyword.t(), state :: any) :: + {:ok, result, new_state :: any} + | {:error | :disconnect, Exception.t(), new_state :: any} + + @doc """ + Disconnect from the database. Return `:ok`. + + This callback is called from the connection process. The first argument is + either the exception from a `:disconnect` 3-tuple returned by a previous + callback or an exception generated by the connection process. + + If the state is controlled by a client and it exits or times out while + processing a request, the last known state will be sent and the exception + will be a `DBConnection.ConnectionError`. + + When the connection is stopped, this callback will be invoked from `terminate`. + The last known state will be sent and the exception will be a `DBConnection.ConnectionError` + containing the reason for the exit. To have the same happen on unexpected + shutdowns, you may trap exits from the `connect` callback. + """ + @callback disconnect(err :: Exception.t(), state :: any) :: :ok + + @connection_module_key :connection_module + @checkout_retries 3 + + @doc """ + Use `DBConnection` to set the behaviour. + """ + defmacro __using__(_) do + quote location: :keep do + @behaviour DBConnection + end + end + + @doc """ + Starts and links to a database connection process. + + By default the `DBConnection` starts a pool with a single connection. + The size of the pool can be increased with `:pool_size`. A separate + pool can be given with the `:pool` option. + + ### Options + + * `:after_connect` - A function to run on connect using `run/3`, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.t/0` prepended + to `args` or `nil` (default: `nil`) + + * `:after_connect_timeout` - The maximum time allowed to perform + function specified by `:after_connect` option (default: `15_000`) + + * `:backoff_min` - The minimum backoff interval (default: `1_000`) + + * `:backoff_max` - The maximum backoff interval (default: `30_000`) + + * `:backoff_type` - The backoff strategy, `:stop` for no backoff and + to stop, `:exp` for exponential, `:rand` for random and `:rand_exp` for + random exponential (default: `:rand_exp`) + + * `:checkout_retries` - The number of times to checkout a new connection + whenever the operation fails because the database disconnected. Note + not all operations can be retried and each adapter specifies which + operations are safe to retry + + * `:configure` - A function to run before every connect attempt to + dynamically configure the options, either a 1-arity fun, + `{module, function, args}` or `nil`. This function is called *in the + connection process*. For more details, see + [Connection Configuration Callback](#start_link/2-connection-configuration-callback) + + * `:connection_listeners` - A list of process destinations to send + notification messages whenever a connection is connected or disconnected. + See "Connection listeners" below + + * `:idle_interval` - Controls the frequency we check for idle connections + in the pool. We then notify each idle connection to ping the database. + In practice, the ping happens within `idle_interval <= ping < 2 * idle_interval`. + Defaults to 1000ms. + + * `:idle_limit` - The number of connections to ping on each `:idle_interval`. + Defaults to the pool size (all connections). + + * `:max_restarts` and `:max_seconds` - Configures the `:max_restarts` and + `:max_seconds` for the connection pool supervisor (see the `Supervisor` docs). + Typically speaking the connection process doesn't terminate, except due to + faults in DBConnection. However, if backoff has been disabled, then they + also terminate whenever a connection is disconnected (for instance, due to + client or server errors) + + * `:max_lifetime` - The number of ms the connection is allowed to live. + It is a range so you can jitter/spread disconnections over some time period. + For example, to have a max lifetime between 8 and 9 minutes, you can set it + to `480_000..540_000`. Because the timer is started *after* the connection + to the database is established, the connection may live for slightly longer. + If the connection is idle, the worst case wait is of + `540_000 + 2 * idle_interval`. If the connection is in use, it may last as + long as the connection is checked out over the max period. Default is `nil`. + Enabling this option requires a backoff to be set, so connections can properly + reconnect. + + * `:name` - A name to register the started process (see the `:name` option + in `GenServer.start_link/3`) + + * `:pool` - Chooses the pool to be started (default: `DBConnection.ConnectionPool`). + See ["Connection pools"](#module-connection-pools). + + * `:pool_size` - Chooses the size of the pool. Must be greater or equal to 1. (default: `1`) + + * `:queue_target` and `:queue_interval` - See "Queue config" below + + * `:show_sensitive_data_on_connection_error` - By default, `DBConnection` + hides all information during connection errors to avoid leaking credentials + or other sensitive information. You can set this option if you wish to + see complete errors and stacktraces during connection errors + + ### Example + + {:ok, conn} = DBConnection.start_link(mod, [idle_interval: 5_000]) + + ## Queue config + + Handling requests is done through a queue. When DBConnection is + started, there are two relevant options to control the queue: + + * `:queue_target` in milliseconds, defaults to 50ms + * `:queue_interval` in milliseconds, defaults to 2000ms + + Our goal is to wait at most `:queue_target` for a connection. + If all connections checked out during a `:queue_interval` takes + more than `:queue_target`, then we double the `:queue_target`. + If checking out connections take longer than the new target, + then we start dropping messages. + + For example, by default our target is 50ms. If all connections + checkouts take longer than 50ms for a whole second, we double + the target to 100ms and we start dropping messages if the + time to checkout goes above the new limit. + + This allows us to better plan for overloads as we can refuse + requests before they are sent to the database, which would + otherwise increase the burden on the database, making the + overload worse. + + ## Connection listeners + + The `:connection_listeners` option allows one or more processes to be notified + whenever a connection is connected or disconnected. A listener may be a remote + or local PID, a locally registered name, or a tuple in the form of + `{registered_name, node}` for a registered name at another node. + + Each listener process may receive the following messages where `pid` + identifies the connection process: + + * `{:connected, pid}` + * `{:disconnected, pid}` + + If the value of `:connection_listeners` is a tuple like `{listeners, term}`, then + the messages are these instead: + + * `{:connected, pid, term}` + * `{:disconnected, pid, term}` + + Note the disconnected messages are not guaranteed to be delivered if the + `pid` for connection crashes. So it is recommended to monitor the connected + `pid` if you want to track all disconnections. + + Here is an example of a `:connection_listener` implementation: + + defmodule DBConnectionListener do + use GenServer + + def start_link(opts) do + GenServer.start_link(__MODULE__, [], opts) + end + + def get_notifications(pid) do + GenServer.call(pid, :read_state) + end + + @impl true + def init(stack) when is_list(stack) do + {:ok, stack} + end + + @impl true + def handle_call(:read_state, _from, state) do + {:reply, state, state} + end + + @impl true + def handle_info({:connected, _pid} = msg, state) do + {:noreply, [msg | state]} + end + + @impl true + def handle_info({_other_states, _pid} = msg, state) do + {:noreply, [msg | state]} + end + end + + You can then start it, pass the PID in the `connection_listeners` + option on `DBConnection.start_link/2` and, when needed, can query the notifications: + + {:ok, pid} = DBConnectionListener.start_link([]) + {:ok, _conn} = DBConnection.start_link(SomeModule, [connection_listeners: [pid]]) + notifications = DBConnectionListener.get_notifications(pid) + + ### Tagging messages + + If you pass `{listeners, tag}` as an option, you can specify an arbitrary `tag` term that will + be sent alongside all `:connected`/`:disconnected` messages. This is useful if you + want to track information about the pool a connection belongs to or any other information. + + This feature is available since v2.6.0. Before this version `:connection_listeners` only + accepted a list of listener processes. + + ## Connection Configuration Callback + + The `:configure` function will be called before each individual connection to the + database is made. It receives all of the options provided to `start_link/2` as well + as an additional generated value named `:pool_index`. The returned value will be + passed as the options into the appropriate `:connect` callback. This provides a way + for the user to dynamically configure the connection options. + + `:pool_index` is an integer in `1..pool_size` that represents the current connection's + place in the enumeration of all of the pool's connections. It can be used, for example, + to configure a unique database per connection when asynchronous tests cannot be performed + on a single database. + + The allowed callbacks are: + + * A 1-arity function that receives the options from `start_link/2` as well as + `:pool_index` + * `{module, function, args}` where the options from `start_link/2` as well as + `:pool_index` are prepended to `args` before the function is called + * `nil` if you do not want to modify the existing options + + ## Telemetry + + A `[:db_connection, :connection_error]` event is published whenever a + connection checkout receives a `%DBConnection.ConnectionError{}`. + This event is emitted from the process that attempts to checkout the + connection. + + Measurements: + + * `:count` - A fixed-value measurement which always measures 1. + + Metadata + + * `:error` - The `DBConnection.ConnectionError` struct which triggered the event. + + * `:opts` - All options given to the pool operation + + You may also consume `[:db_connection, :connected]` and `[:db_connection, :disconnected]` + events by spawning a `DBConnection.TelemetryListener` process that subscribes to the pool + and emits them in a robust manner. + """ + @spec start_link(module, [start_option()] | Keyword.t()) :: GenServer.on_start() + def start_link(conn_mod, opts) do + case child_spec(conn_mod, opts) do + {_, {m, f, args}, _, _, _, _} -> apply(m, f, args) + %{start: {m, f, args}} -> apply(m, f, args) + end + end + + @doc """ + Creates a supervisor child specification for a pool of connections. + + See `start_link/2` for options. + """ + @spec child_spec(module, [start_option()] | Keyword.t()) :: :supervisor.child_spec() + def child_spec(conn_mod, opts) do + pool = Keyword.get(opts, :pool, DBConnection.ConnectionPool) + pool.child_spec({conn_mod, opts}) + end + + @doc """ + Returns the names of all possible options that you can pass to `start_link/2`. + + This is mostly useful for library authors that base their library on top of + `DBConnection`, since they can use the return value of this function to perform + validation on options only passing down these options to DBConnection. + + See also `t:start_option/0`. + """ + @doc since: "2.6.0" + @spec available_start_options() :: [atom, ...] + def available_start_options do + [ + :after_connect, + :after_connect_timeout, + :connection_listeners, + :backoff_max, + :backoff_min, + :backoff_type, + :checkout_retries, + :configure, + :idle_interval, + :idle_limit, + :max_restarts, + :max_seconds, + :name, + :pool, + :pool_size, + :queue_interval, + :queue_target, + :show_sensitive_data_on_connection_error + ] + end + + @doc """ + Returns the names of all possible options that you can pass to most functions + in this module. + + This is mostly useful for library authors that base their library on top of + `DBConnection`, since they can use the return value of this function to perform + validation on options only passing down these options to DBConnection. + + See also `t:connection_option/0`. + """ + @doc since: "2.6.0" + @spec available_connection_options() :: [atom, ...] + def available_connection_options do + [:log, :queue, :timeout, :deadline] + end + + @doc """ + Forces all connections in the pool to disconnect within the given interval + in milliseconds. + + Once this function is called, the pool will disconnect all of its connections + as they are checked in or as they are pinged. Checked in and idle connections + will be randomly disconnected within the given time interval. + + If the connection has a backoff configured (which is the case by default), + disconnecting means an attempt at a new connection will be done immediately + after, without starting a new process for each connection. However, if backoff + has been disabled, the connection process will terminate. In such cases, + disconnecting all connections may cause the pool supervisor to restart + depending on the max_restarts/max_seconds configuration of the pool, + so you will want to set those carefully. + """ + @spec disconnect_all(conn, non_neg_integer, [connection_option()] | Keyword.t()) :: :ok + def disconnect_all(conn, interval, opts \\ []) when interval >= 0 do + pool = Keyword.get(opts, :pool, DBConnection.ConnectionPool) + pool.disconnect_all(conn, interval, opts) + end + + @doc """ + Prepare a query with a database connection for later execution. + + It returns `{:ok, query}` on success or `{:error, exception}` if there was + an error. + + The returned `query` can then be passed to `execute/4` and/or `close/3` + + ### Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about a call, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.LogEntry.t/0` + prepended to `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + The pool and connection module may support other options. All options + are passed to `c:handle_prepare/3`. + + ### Example + + DBConnection.transaction(pool, fn conn -> + query = %Query{statement: "SELECT * FROM table"} + query = DBConnection.prepare!(conn, query) + try do + DBConnection.execute!(conn, query, []) + after + DBConnection.close(conn, query) + end + end) + + """ + @spec prepare(conn, query, [connection_option()] | Keyword.t()) :: + {:ok, query} | {:error, Exception.t()} + def prepare(conn, query, opts \\ []) do + meter = meter(opts) + + result = + with {:ok, query, meter} <- parse(query, meter, opts) do + run(conn, &run_prepare(&1, query, &2, &3), meter, opts) + end + + log(result, :prepare, query, nil) + end + + @doc """ + Prepare a query with a database connection and return the prepared + query. An exception is raised on error. + + See `prepare/3`. + """ + @spec prepare!(conn, query, [connection_option()] | Keyword.t()) :: query + def prepare!(conn, query, opts \\ []) do + case prepare(conn, query, opts) do + {:ok, result} -> result + {:error, err} -> raise err + end + end + + @doc """ + Prepare a query and execute it with a database connection and return both the + prepared query and the result, `{:ok, query, result}` on success or + `{:error, exception}` if there was an error. + + The returned `query` can be passed to `execute/4` and `close/3`. + + ### Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about a call, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.LogEntry.t/0` + prepended to `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + ### Example + + query = %Query{statement: "SELECT id FROM table WHERE id=$1"} + {:ok, query, result} = DBConnection.prepare_execute(conn, query, [1]) + {:ok, result2} = DBConnection.execute(conn, query, [2]) + :ok = DBConnection.close(conn, query) + """ + @spec prepare_execute(conn, query, params, [connection_option()] | Keyword.t()) :: + {:ok, query, result} + | {:error, Exception.t()} + def prepare_execute(conn, query, params, opts \\ []) do + result = + with {:ok, query, meter} <- parse(query, meter(opts), opts) do + parsed_prepare_execute(conn, query, params, meter, opts) + end + + log(result, :prepare_execute, query, params) + end + + defp parsed_prepare_execute(conn, query, params, meter, opts) do + with {:ok, query, result, meter} <- + run(conn, &run_prepare_execute(&1, query, params, &2, &3), meter, opts), + {:ok, result, meter} <- decode(query, result, meter, opts) do + {:ok, query, result, meter} + end + end + + @doc """ + Prepare a query and execute it with a database connection and return both the + prepared query and result. An exception is raised on error. + + See `prepare_execute/4`. + """ + @spec prepare_execute!(conn, query, [connection_option()] | Keyword.t()) :: {query, result} + def prepare_execute!(conn, query, params, opts \\ []) do + case prepare_execute(conn, query, params, opts) do + {:ok, query, result} -> {query, result} + {:error, err} -> raise err + end + end + + @doc """ + Execute a prepared query with a database connection and return + `{:ok, query, result}` on success or `{:error, exception}` if there was an error. + + If the query is not prepared on the connection an attempt may be made to + prepare it and then execute again. + + ### Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about a call, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.LogEntry.t/0` + prepended to `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + The pool and connection module may support other options. All options + are passed to `handle_execute/4`. + + See `prepare/3`. + """ + @spec execute(conn, query, params, [connection_option()] | Keyword.t()) :: + {:ok, query, result} | {:error, Exception.t()} + def execute(conn, query, params, opts \\ []) do + result = + case maybe_encode(query, params, meter(opts), opts) do + {:prepare, meter} -> + parsed_prepare_execute(conn, query, params, meter, opts) + + {:ok, params, meter} -> + with {:ok, query, result, meter} <- + run(conn, &run_execute(&1, query, params, &2, &3), meter, opts), + {:ok, result, meter} <- decode(query, result, meter, opts) do + {:ok, query, result, meter} + end + + {_, _, _, _} = error -> + error + end + + log(result, :execute, query, params) + end + + @doc """ + Execute a prepared query with a database connection and return the + result. Raises an exception on error. + + See `execute/4` + """ + @spec execute!(conn, query, params, [connection_option()] | Keyword.t()) :: result + def execute!(conn, query, params, opts \\ []) do + case execute(conn, query, params, opts) do + {:ok, _query, result} -> result + {:error, err} -> raise err + end + end + + @doc """ + Close a prepared query on a database connection and return `{:ok, result}` on + success or `{:error, exception}` on error. + + This function should be used to free resources held by the connection + process and/or the database server. + + ## Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about a call, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.LogEntry.t/0` + prepended to `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + The pool and connection module may support other options. All options + are passed to `c:handle_close/3`. + + See `prepare/3`. + """ + @spec close(conn, query, [connection_option()] | Keyword.t()) :: + {:ok, result} | {:error, Exception.t()} + def close(conn, query, opts \\ []) do + conn + |> run(&run_close(&1, query, &2, &3), meter(opts), opts) + |> log(:close, query, nil) + end + + @doc """ + Close a prepared query on a database connection and return the result. Raises + an exception on error. + + See `close/3`. + """ + @spec close!(conn, query, [connection_option()] | Keyword.t()) :: result + def close!(conn, query, opts \\ []) do + case close(conn, query, opts) do + {:ok, result} -> result + {:error, err} -> raise err + end + end + + @doc """ + Acquire a lock on a connection and run a series of requests on it. + + The return value of this function is the return value of `fun`. + + To use the locked connection call the request with the connection + reference passed as the single argument to the `fun`. If the + connection disconnects all future calls using that connection + reference will fail. + + `run/3` and `transaction/3` can be nested multiple times but a + `transaction/3` call inside another `transaction/3` will be treated + the same as `run/3`. + + > #### Checkout failures {: .warning} + > + > If we cannot check out a connection from the pool, this function raises a + > `DBConnection.ConnectionError` exception. + > If you want to handle these cases, you should rescue + > `DBConnection.ConnectionError` exceptions when using `run/3`. + + ## Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + + The pool may support other options. + + ## Example + + {:ok, res} = DBConnection.run(conn, fn conn -> + DBConnection.execute!(conn, query, []) + end) + + """ + @spec run(conn, (t -> result), [connection_option()] | Keyword.t()) :: result when result: var + def run(conn, fun, opts \\ []) + + def run(%DBConnection{} = conn, fun, _) do + fun.(conn) + end + + def run(pool, fun, opts) do + case checkout(pool, &run_status/3, nil, opts) do + {:ok, conn, old_status, _} -> + try do + result = fun.(conn) + + case run_status(conn, nil, opts) do + {:ok, new_status, _meter} -> + {result, new_status} + + {:retry, err, _meter} -> + disconnect(conn, err) + {result, :error} + end + catch + kind, error -> + checkin(conn) + :erlang.raise(kind, error, __STACKTRACE__) + else + {result, new_status} when new_status == :error or old_status == new_status -> + checkin(conn) + result + + {_result, new_status} -> + err = + DBConnection.ConnectionError.exception( + "connection was checked out with status #{inspect(old_status)} " <> + "but it was checked in with status #{inspect(new_status)}" + ) + + disconnect(conn, err) + raise err + end + + {:error, err, _} -> + raise err + + {kind, reason, stack, _} -> + :erlang.raise(kind, reason, stack) + end + end + + @doc """ + Acquire a lock on a connection and run a series of requests inside a + transaction. The result of the transaction fun is return inside an `:ok` + tuple: `{:ok, result}`. + + To use the locked connection call the request with the connection + reference passed as the single argument to the `fun`. If the + connection disconnects all future calls using that connection + reference will fail. + + `run/3` and `transaction/3` can be nested multiple times. If a transaction is + rolled back or a nested transaction `fun` raises the transaction is marked as + failed. All calls except `run/3`, `transaction/3`, `rollback/2`, `close/3` and + `close!/3` will raise an exception inside a failed transaction until the outer + transaction call returns. All `transaction/3` calls will return + `{:error, :rollback}` if the transaction failed or connection closed and + `rollback/2` is not called for that `transaction/3`. + + ### Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about begin, commit and rollback + calls made as part of the transaction, either a 1-arity fun, + `{module, function, args}` with `t:DBConnection.LogEntry.t/0` prepended to + `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + The pool and connection module may support other options. All options + are passed to `c:handle_begin/2`, `c:handle_commit/2` and + `c:handle_rollback/2`. + + ### Example + + {:ok, res} = DBConnection.transaction(conn, fn conn -> + DBConnection.execute!(conn, query, []) + end) + """ + @spec transaction(conn, (t -> result), [connection_option()] | Keyword.t()) :: + {:ok, result} | {:error, reason :: any} + when result: var + def transaction(conn, fun, opts \\ []) + + def transaction(%DBConnection{conn_mode: :transaction} = conn, fun, _opts) do + %DBConnection{conn_ref: conn_ref} = conn + + try do + result = fun.(conn) + conclude(conn, result) + catch + :throw, {__MODULE__, ^conn_ref, reason} -> + fail(conn) + {:error, reason} + + kind, reason -> + stack = __STACKTRACE__ + fail(conn) + :erlang.raise(kind, reason, stack) + else + result -> + {:ok, result} + end + end + + def transaction(%DBConnection{} = conn, fun, opts) do + case begin(conn, &run/4, opts) do + {:ok, _} -> + run_transaction(conn, fun, &run/4, opts) + + {:error, error} -> + rollback_or_raise(error) + end + end + + def transaction(pool, fun, opts) do + case begin(pool, &checkout/4, opts) do + {:ok, conn, _} -> + run_transaction(conn, fun, &checkin/4, opts) + + {:error, error} -> + rollback_or_raise(error) + end + end + + defp rollback_or_raise(%DBConnection.TransactionError{}), do: {:error, :rollback} + defp rollback_or_raise(other), do: raise(other) + + @doc """ + Rollback a database transaction and release lock on connection. + + When inside of a `transaction/3` call does a non-local return, using a + `throw/1` to cause the transaction to enter a failed state and the + `transaction/3` call returns `{:error, reason}`. If `transaction/3` calls are + nested the connection is marked as failed until the outermost transaction call + does the database rollback. + + ### Example + + {:error, :oops} = DBConnection.transaction(pool, fun(conn) -> + DBConnection.rollback(conn, :oops) + end) + """ + @spec rollback(t, reason :: any) :: no_return + def rollback(conn, reason) + + def rollback(%DBConnection{conn_mode: :transaction} = conn, reason) do + %DBConnection{conn_ref: conn_ref} = conn + throw({__MODULE__, conn_ref, reason}) + end + + def rollback(%DBConnection{} = _conn, _reason) do + raise "not inside transaction" + end + + @doc """ + Return the transaction status of a connection. + + The callback implementation should return the transaction status according to + the database, and not make assumptions based on client-side state. + + This function will raise a `DBConnection.ConnectionError` when called inside a + deprecated `transaction/3`. + + ### Options + + See module documentation. The pool and connection module may support other + options. All options are passed to `c:handle_status/2`. + + ### Example + + # outside of the transaction, the status is `:idle` + DBConnection.status(conn) #=> :idle + + DBConnection.transaction(conn, fn conn -> + DBConnection.status(conn) #=> :transaction + + # run a query that will cause the transaction to rollback, e.g. + # uniqueness constraint violation + DBConnection.execute(conn, bad_query, []) + + DBConnection.status(conn) #=> :error + end) + + DBConnection.status(conn) #=> :idle + """ + @spec status(conn, opts :: Keyword.t()) :: status + def status(conn, opts \\ []) do + case run(conn, &run_status/3, nil, opts) do + {:ok, status, _meter} -> status + {:error, _err, _meter} -> :error + end + end + + @doc """ + Create a stream that will prepare a query, execute it and stream results + using a cursor. + + ### Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about a call, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.LogEntry.t/0` + prepended to `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + The pool and connection module may support other options. All options + are passed to `c:handle_prepare/3`, `c:handle_close/3`, `c:handle_declare/4`, + and `c:handle_deallocate/4`. + + ### Example + + {:ok, results} = DBConnection.transaction(conn, fn conn -> + query = %Query{statement: "SELECT id FROM table"} + stream = DBConnection.prepare_stream(conn, query, []) + Enum.to_list(stream) + end) + """ + @spec prepare_stream(t, query, params, [connection_option()] | Keyword.t()) :: + DBConnection.PrepareStream.t() + def prepare_stream(%DBConnection{} = conn, query, params, opts \\ []) do + %DBConnection.PrepareStream{conn: conn, query: query, params: params, opts: opts} + end + + @doc """ + Create a stream that will execute a prepared query and stream results using a + cursor. + + ### Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about a call, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.LogEntry.t/0` + prepended to `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + The pool and connection module may support other options. All options + are passed to `c:handle_declare/4` and `c:handle_deallocate/4`. + + ### Example + + DBConnection.transaction(pool, fn conn -> + query = %Query{statement: "SELECT id FROM table"} + query = DBConnection.prepare!(conn, query) + try do + stream = DBConnection.stream(conn, query, []) + Enum.to_list(stream) + after + # Make sure query is closed! + DBConnection.close(conn, query) + end + end) + """ + @spec stream(t, query, params, [connection_option()] | Keyword.t()) :: DBConnection.Stream.t() + def stream(%DBConnection{} = conn, query, params, opts \\ []) do + %DBConnection.Stream{conn: conn, query: query, params: params, opts: opts} + end + + @doc """ + Reduces a previously built stream or prepared stream. + """ + def reduce(%DBConnection.PrepareStream{} = stream, acc, fun) do + %DBConnection.PrepareStream{conn: conn, query: query, params: params, opts: opts} = stream + + declare = fn conn, opts -> + {query, cursor} = prepare_declare!(conn, query, params, opts) + {:cont, query, cursor} + end + + enum = resource(conn, declare, &stream_fetch/3, &stream_deallocate/3, opts) + enum.(acc, fun) + end + + def reduce(%DBConnection.Stream{} = stream, acc, fun) do + %DBConnection.Stream{conn: conn, query: query, params: params, opts: opts} = stream + + declare = fn conn, opts -> + case declare(conn, query, params, opts) do + {:ok, query, cursor} -> + {:cont, query, cursor} + + {:ok, cursor} -> + {:cont, query, cursor} + + {:error, err} -> + raise err + end + end + + enum = resource(conn, declare, &stream_fetch/3, &stream_deallocate/3, opts) + enum.(acc, fun) + end + + @doc false + def register_as_pool(conn_module) do + Process.put(@connection_module_key, conn_module) + end + + @doc """ + Returns the connection module used by the given connection pool. + + When given a process that is not a connection pool, returns an `:error`. + """ + @spec connection_module(conn) :: {:ok, module} | :error + def connection_module(conn) do + with pid when is_pid(pid) <- pool_pid(conn), + {:dictionary, dictionary} <- Process.info(pid, :dictionary), + {@connection_module_key, module} <- List.keyfind(dictionary, @connection_module_key, 0), + do: {:ok, module}, + else: (_ -> :error) + end + + @doc """ + Returns connection metrics as a list in the shape of: + + [%{ + source: {:pool | :proxy, pid()}, + ready_conn_count: non_neg_integer(), + checkout_queue_length: non_neg_integer() + }] + + """ + @spec get_connection_metrics(conn, Keyword.t()) :: [DBConnection.Pool.connection_metrics()] + + def get_connection_metrics(conn, opts \\ []) do + pool = Keyword.get(opts, :pool, DBConnection.ConnectionPool) + pool.get_connection_metrics(conn) + end + + defp pool_pid(%DBConnection{pool_ref: Holder.pool_ref(pool: pid)}), do: pid + defp pool_pid(conn), do: GenServer.whereis(conn) + + ## Helpers + + defp checkout(pool, meter, opts) do + checkout = System.monotonic_time() + pool_mod = Keyword.get(opts, :pool, DBConnection.ConnectionPool) + + caller = Keyword.get(opts, :caller, self()) + callers = [caller | Process.get(:"$callers") || []] + + try do + pool_mod.checkout(pool, callers, opts) + catch + kind, reason -> + stack = __STACKTRACE__ + {kind, reason, stack, past_event(meter, :checkout, checkout)} + else + {:ok, pool_ref, _conn_mod, checkin, _conn_state} -> + conn = %DBConnection{pool_ref: pool_ref, conn_ref: make_ref()} + meter = meter |> past_event(:checkin, checkin) |> past_event(:checkout, checkout) + {:ok, conn, meter} + + {:error, err} -> + {:error, err, past_event(meter, :checkout, checkout)} + end + end + + defp checkin(%DBConnection{pool_ref: pool_ref}) do + Holder.checkin(pool_ref) + end + + defp disconnect(%DBConnection{pool_ref: pool_ref}, err) do + _ = Holder.disconnect(pool_ref, err) + :ok + end + + defp stop(%DBConnection{pool_ref: pool_ref}, kind, reason, stack) do + msg = "client #{Util.inspect_pid(self())} stopped: " <> Exception.format(kind, reason, stack) + exception = DBConnection.ConnectionError.exception(msg) + _ = Holder.stop(pool_ref, exception) + :ok + end + + defp retry_or_handle_common_result(return, conn, meter) do + case return do + {:disconnect_and_retry, err, _conn_state} -> + disconnect(conn, err) + {:retry, err, meter} + + _ -> + handle_common_result(return, conn, meter) + end + end + + defp handle_common_result(return, conn, meter) do + case return do + {:ok, result, _conn_state} -> + {:ok, result, meter} + + {:error, err, _conn_state} -> + {:error, err, meter} + + {:disconnect, err, _conn_state} -> + disconnect(conn, err) + {:error, err, meter} + + {:catch, kind, reason, stack} -> + stop(conn, kind, reason, stack) + {kind, reason, stack, meter} + + other -> + bad_return!(other, conn, meter) + end + end + + @compile {:inline, bad_return!: 3} + + defp bad_return!(other, conn, meter) do + try do + raise DBConnection.ConnectionError, "bad return value: #{inspect(other)}" + catch + :error, reason -> + stack = __STACKTRACE__ + stop(conn, :error, reason, stack) + {:error, reason, stack, meter} + end + end + + defp parse(query, meter, opts) do + try do + DBConnection.Query.parse(query, opts) + catch + kind, reason -> + stack = __STACKTRACE__ + {kind, reason, stack, meter} + else + query -> + {:ok, query, meter} + end + end + + defp describe(conn, query, meter, opts) do + try do + DBConnection.Query.describe(query, opts) + catch + kind, reason -> + stack = __STACKTRACE__ + raised_close(conn, query, meter, opts, kind, reason, stack) + else + query -> + {:ok, query, meter} + end + end + + defp encode(conn, query, params, meter, opts) do + try do + DBConnection.Query.encode(query, params, opts) + catch + kind, reason -> + stack = __STACKTRACE__ + raised_close(conn, query, meter, opts, kind, reason, stack) + else + params -> + {:ok, params, meter} + end + end + + defp maybe_encode(query, params, meter, opts) do + try do + DBConnection.Query.encode(query, params, opts) + rescue + DBConnection.EncodeError -> {:prepare, meter} + catch + kind, reason -> + stack = __STACKTRACE__ + {kind, reason, stack, meter} + else + params -> + {:ok, params, meter} + end + end + + defp decode(query, result, meter, opts) do + meter = event(meter, :decode) + + try do + DBConnection.Query.decode(query, result, opts) + catch + kind, reason -> + stack = __STACKTRACE__ + {kind, reason, stack, meter} + else + result -> + {:ok, result, meter} + end + end + + defp prepare_declare(conn, query, params, opts) do + result = + with {:ok, query, meter} <- parse(query, meter(opts), opts) do + run(conn, &run_prepare_declare(&1, query, params, &2, &3), meter, opts) + end + + log(result, :prepare_declare, query, params) + end + + defp prepare_declare!(conn, query, params, opts) do + case prepare_declare(conn, query, params, opts) do + {:ok, query, cursor} -> + {query, cursor} + + {:error, err} -> + raise err + end + end + + defp declare(conn, query, params, opts) do + result = + case maybe_encode(query, params, meter(opts), opts) do + {:prepare, meter} -> + run(conn, &run_prepare_declare(&1, query, params, &2, &3), meter, opts) + + {:ok, params, meter} -> + run(conn, &run_declare(&1, query, params, &2, &3), meter, opts) + + {_, _, _, _} = error -> + error + end + + log(result, :declare, query, params) + end + + defp run_prepare(conn, query, meter, opts) do + with {:ok, query, meter} <- prepare(conn, query, meter, opts) do + describe(conn, query, meter, opts) + end + end + + defp prepare(%DBConnection{pool_ref: pool_ref} = conn, query, meter, opts) do + pool_ref + |> Holder.handle(:handle_prepare, [query], opts) + |> retry_or_handle_common_result(conn, event(meter, :prepare)) + end + + defp run_prepare_execute(conn, query, params, meter, opts) do + with {:ok, query, meter} <- run_prepare(conn, query, meter, opts), + {:ok, params, meter} <- encode(conn, query, params, meter, opts) do + run_execute(conn, query, params, meter, opts) + end + end + + defp run_execute(conn, query, params, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + meter = event(meter, :execute) + + case Holder.handle(pool_ref, :handle_execute, [query, params], opts) do + {:ok, query, result, _conn_state} -> + {:ok, query, result, meter} + + {:ok, _, _} = other -> + bad_return!(other, conn, meter) + + other -> + retry_or_handle_common_result(other, conn, meter) + end + end + + defp raised_close(conn, query, meter, opts, kind, reason, stack) do + with {:ok, _, meter} <- run_close(conn, query, meter, opts) do + {kind, reason, stack, meter} + end + end + + defp run_close(conn, query, meter, opts) do + meter = event(meter, :close) + + cleanup(conn, :handle_close, [query], opts) + |> retry_or_handle_common_result(conn, meter) + end + + defp cleanup(conn, fun, args, opts) do + %DBConnection{pool_ref: pool_ref} = conn + Holder.cleanup(pool_ref, fun, args, opts) + end + + # run/4 and checkout/4 are the two entry points to get a connection. + # run returns only the result, checkout also returns the connection. + + defp run(%DBConnection{} = conn, fun, meter, opts) do + with {:retry, err, meter} <- fun.(conn, meter, opts) do + {:error, err, meter} + end + end + + defp run(pool, fun, meter, opts) do + retries = Keyword.get(opts, :checkout_retries, @checkout_retries) + run_with_retries(retries, pool, fun, meter, opts) + end + + defp run_with_retries(retries, pool, fun, meter, opts) do + with {:ok, conn, meter} <- checkout(pool, meter, opts) do + result = + try do + fun.(conn, meter, opts) + after + checkin(conn) + end + + case result do + {:retry, _err, meter} when retries > 0 -> + run_with_retries(retries - 1, pool, fun, meter, opts) + + {:retry, err, meter} -> + {:error, err, meter} + + other -> + other + end + end + end + + defp checkout(%DBConnection{} = conn, fun, meter, opts) do + case fun.(conn, meter, opts) do + {:ok, result, meter} -> + {:ok, conn, result, meter} + + {:retry, err, meter} -> + {:error, err, meter} + + other -> + other + end + end + + defp checkout(pool, fun, meter, opts) do + retries = Keyword.get(opts, :checkout_retries, @checkout_retries) + checkout_with_retries(retries, pool, fun, meter, opts) + end + + defp checkout_with_retries(retries, pool, fun, meter, opts) do + with {:ok, conn, meter} <- checkout(pool, meter, opts) do + case fun.(conn, meter, opts) do + {:ok, result, meter} -> + {:ok, conn, result, meter} + + {:retry, err, meter} -> + checkin(conn) + + if retries > 0 do + checkout_with_retries(retries - 1, pool, fun, meter, opts) + else + {:error, err, meter} + end + + error -> + checkin(conn) + error + end + end + end + + defp checkin(%DBConnection{} = conn, fun, meter, opts) do + return = fun.(conn, meter, opts) + checkin(conn) + return + end + + defp meter(opts) do + case Keyword.get(opts, :log) do + nil -> nil + log -> {log, []} + end + end + + defp event(nil, _), + do: nil + + defp event({log, events}, event), + do: {log, [{event, System.monotonic_time()} | events]} + + defp past_event(nil, _, _), + do: nil + + defp past_event(log_events, _, nil), + do: log_events + + defp past_event({log, events}, event, time), + do: {log, [{event, time} | events]} + + defp log({:ok, res, meter}, call, query, params), + do: log(meter, call, query, params, {:ok, res}) + + defp log({:ok, res1, res2, meter}, call, query, params), + do: log(meter, call, query, params, {:ok, res1, res2}) + + defp log({ok, res, meter}, call, query, cursor) when ok in [:cont, :halt], + do: log(meter, call, query, cursor, {ok, res}) + + defp log({:error, err, meter}, call, query, params), + do: log(meter, call, query, params, {:error, err}) + + defp log({kind, reason, stack, meter}, call, query, params), + do: log(meter, call, query, params, {kind, reason, stack}) + + defp log(nil, _, _, _, result), + do: log_result(result) + + defp log({log, times}, call, query, params, result) do + entry = DBConnection.LogEntry.new(call, query, params, times, entry_result(result)) + + try do + log(log, entry) + catch + kind, reason -> + stack = __STACKTRACE__ + log_raised(entry, kind, reason, stack) + end + + log_result(result) + end + + defp entry_result({kind, reason, stack}) + when kind in [:error, :exit, :throw] do + msg = "an exception was raised: " <> Exception.format(kind, reason, stack) + {:error, %DBConnection.ConnectionError{message: msg}} + end + + defp entry_result({ok, res}) when ok in [:cont, :halt], + do: {:ok, res} + + defp entry_result(other), do: other + + defp log({mod, fun, args}, entry), do: apply(mod, fun, [entry | args]) + defp log(fun, entry), do: fun.(entry) + + defp log_result({kind, reason, stack}) when kind in [:error, :exit, :throw] do + :erlang.raise(kind, reason, stack) + end + + defp log_result(other), do: other + + defp log_raised(entry, kind, reason, stack) do + reason = Exception.normalize(kind, reason, stack) + + Logger.error( + fn -> + "an exception was raised logging #{inspect(entry)}: " <> + Exception.format(kind, reason, stack) + end, + crash_reason: {crash_reason(kind, reason), stack} + ) + catch + _, _ -> + :ok + end + + defp crash_reason(:throw, value), do: {:nocatch, value} + defp crash_reason(_, value), do: value + + defp run_transaction(conn, fun, run, opts) do + %DBConnection{conn_ref: conn_ref} = conn + + try do + result = fun.(%{conn | conn_mode: :transaction}) + conclude(conn, result) + catch + :throw, {__MODULE__, ^conn_ref, reason} -> + reset(conn) + + case rollback(conn, run, opts) do + {:ok, _} -> + {:error, reason} + + {:error, %DBConnection.TransactionError{}} -> + {:error, reason} + + {:error, %DBConnection.ConnectionError{}} -> + {:error, reason} + + {:error, err} -> + raise err + end + + kind, reason -> + stack = __STACKTRACE__ + reset(conn) + _ = rollback(conn, run, opts) + :erlang.raise(kind, reason, stack) + else + result -> + case commit(conn, run, opts) do + {:ok, _} -> + {:ok, result} + + {:error, %DBConnection.TransactionError{}} -> + {:error, :rollback} + + {:error, err} -> + raise err + end + after + reset(conn) + end + end + + defp fail(%DBConnection{pool_ref: pool_ref}) do + case Holder.status?(pool_ref, :ok) do + true -> Holder.put_status(pool_ref, :aborted) + false -> :ok + end + end + + defp conclude(%DBConnection{pool_ref: pool_ref, conn_ref: conn_ref}, result) do + case Holder.status?(pool_ref, :ok) do + true -> result + false -> throw({__MODULE__, conn_ref, :rollback}) + end + end + + defp reset(%DBConnection{pool_ref: pool_ref}) do + case Holder.status?(pool_ref, :aborted) do + true -> Holder.put_status(pool_ref, :ok) + false -> :ok + end + end + + defp begin(conn, run, opts) do + case run.(conn, &run_begin/3, meter(opts), opts) do + {:ok, conn, {query, result}, meter} -> + query = String.Chars.to_string(query) + log({:ok, conn, result, meter}, :begin, query, nil) + + {:ok, {query, result}, meter} -> + query = String.Chars.to_string(query) + log({:ok, result, meter}, :begin, query, nil) + + other -> + log(other, :begin, :begin, nil) + end + end + + defp run_begin(conn, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + meter = event(meter, :begin) + + case Holder.handle(pool_ref, :handle_begin, [], opts) do + {status, _conn_state} when status in [:idle, :transaction, :error] -> + status_disconnect(conn, status, meter) + + {:ok, query, result, _conn_status} -> + {:ok, {query, result}, meter} + + other -> + retry_or_handle_common_result(other, conn, meter) + end + end + + defp rollback(conn, run, opts) do + conn + |> run.(&run_rollback/3, meter(opts), opts) + |> log(:rollback, :rollback, nil) + end + + defp run_rollback(conn, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + meter = event(meter, :rollback) + + case Holder.handle(pool_ref, :handle_rollback, [], opts) do + {status, _conn_state} when status in [:idle, :transaction, :error] -> + status_disconnect(conn, status, meter) + + other -> + handle_common_result(other, conn, meter) + end + end + + defp commit(conn, run, opts) do + case run.(conn, &run_commit/3, meter(opts), opts) do + {:rollback, {:ok, result, meter}} -> + log(meter, :commit, :rollback, nil, {:ok, result}) + err = DBConnection.TransactionError.exception(:error) + {:error, err} + + {:rollback, other} -> + log(other, :commit, :rollback, nil) + + other -> + log(other, :commit, :commit, nil) + end + end + + defp run_commit(conn, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + meter = event(meter, :commit) + + case Holder.handle(pool_ref, :handle_commit, [], opts) do + {:error, _conn_state} -> + {:rollback, run_rollback(conn, meter, opts)} + + {status, _conn_state} when status in [:idle, :transaction] -> + status_disconnect(conn, status, meter) + + other -> + handle_common_result(other, conn, meter) + end + end + + defp status_disconnect(conn, status, meter) do + err = DBConnection.TransactionError.exception(status) + disconnect(conn, err) + {:error, err, meter} + end + + defp run_status(conn, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + + # status queries are not logged, which means we need to deal + # with catch and disconnections explicitly + case Holder.handle(pool_ref, :handle_status, [], opts) do + {status, _conn_state} when status in [:idle, :transaction, :error] -> + {:ok, status, meter} + + other -> + case retry_or_handle_common_result(other, conn, meter) do + {:error, _, meter} -> {:ok, :error, meter} + {kind, reason, stack, _meter} -> :erlang.raise(kind, reason, stack) + _ -> other + end + end + end + + defp run_prepare_declare(conn, query, params, meter, opts) do + with {:ok, query, meter} <- prepare(conn, query, meter, opts), + {:ok, query, meter} <- describe(conn, query, meter, opts), + {:ok, params, meter} <- encode(conn, query, params, meter, opts), + {:ok, query, cursor, meter} <- run_declare(conn, query, params, meter, opts) do + {:ok, query, cursor, meter} + end + end + + defp run_declare(conn, query, params, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + meter = event(meter, :declare) + + case Holder.handle(pool_ref, :handle_declare, [query, params], opts) do + {:ok, query, result, _conn_state} -> + {:ok, query, result, meter} + + {:ok, _, _} = other -> + bad_return!(other, conn, meter) + + other -> + handle_common_result(other, conn, meter) + end + end + + defp stream_fetch(%DBConnection{} = conn, {:cont, query, cursor}, opts) do + with {ok, result, meter} when ok in [:cont, :halt] <- + fetch(conn, [query, cursor], meter(opts), opts), + {:ok, result, meter} <- decode(query, result, meter, opts) do + {ok, result, meter} + end + |> log(:fetch, query, cursor) + |> case do + {ok, result} when ok in [:cont, :halt] -> + {[result], {ok, query, cursor}} + + {:error, err} -> + raise err + end + end + + defp stream_fetch(_, {:halt, _, _} = state, _) do + {:halt, state} + end + + defp fetch(conn, args, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + meter = event(meter, :fetch) + + case Holder.handle(pool_ref, :handle_fetch, args, opts) do + {:cont, result, _conn_state} -> + {:cont, result, meter} + + {:halt, result, _conn_state} -> + {:halt, result, meter} + + other -> + handle_common_result(other, conn, meter) + end + end + + defp stream_deallocate(conn, {_status, query, cursor}, opts) do + meter = event(meter(opts), :deallocate) + + conn + |> cleanup(:handle_deallocate, [query, cursor], opts) + |> handle_common_result(conn, meter) + |> log(:deallocate, query, cursor) + end + + defp resource(%DBConnection{} = conn, start, next, stop, opts) do + start = fn -> start.(conn, opts) end + next = fn state -> next.(conn, state, opts) end + stop = fn state -> stop.(conn, state, opts) end + Stream.resource(start, next, stop) + end +end diff --git a/deps/db_connection/lib/db_connection/app.ex b/deps/db_connection/lib/db_connection/app.ex new file mode 100644 index 0000000..5bf6466 --- /dev/null +++ b/deps/db_connection/lib/db_connection/app.ex @@ -0,0 +1,23 @@ +defmodule DBConnection.App do + @moduledoc false + use Application + + @impl true + def start(_type, _args) do + children = [ + {Task.Supervisor, name: DBConnection.Task}, + dynamic_supervisor(DBConnection.Ownership.Supervisor), + dynamic_supervisor(DBConnection.ConnectionPool.Supervisor), + DBConnection.Watcher + ] + + Supervisor.start_link(children, strategy: :one_for_all, name: __MODULE__) + end + + defp dynamic_supervisor(name) do + Supervisor.child_spec( + {DynamicSupervisor, name: name, strategy: :one_for_one}, + id: name + ) + end +end diff --git a/deps/db_connection/lib/db_connection/backoff.ex b/deps/db_connection/lib/db_connection/backoff.ex new file mode 100644 index 0000000..c98f322 --- /dev/null +++ b/deps/db_connection/lib/db_connection/backoff.ex @@ -0,0 +1,111 @@ +defmodule DBConnection.Backoff do + # This module provides a functional abstraction over backoffs with different types. It exposes + # a struct and a couple of functions to work with it. + @moduledoc false + @compile :nowarn_deprecated_function + + alias DBConnection.Backoff + + @default_type :rand_exp + @min 1_000 + @max 30_000 + + @type t :: %__MODULE__{ + type: :stop | :rand | :exp | :rand_exp, + min: non_neg_integer(), + max: non_neg_integer(), + state: term() + } + + defstruct [:type, :min, :max, :state] + + @spec new(keyword) :: t | nil + def new(opts) when is_list(opts) do + case Keyword.get(opts, :backoff_type, @default_type) do + :stop -> + nil + + type -> + {min, max} = min_max(opts) + new(type, min, max) + end + end + + @spec backoff(t) :: {non_neg_integer, t} + def backoff(backoff) + + def backoff(%Backoff{type: :rand, min: min, max: max} = s) do + {rand(min, max), s} + end + + def backoff(%Backoff{type: :exp, min: min, state: nil} = s) do + {min, %{s | state: min}} + end + + def backoff(%Backoff{type: :exp, max: max, state: prev} = s) do + next = min(Bitwise.<<<(prev, 1), max) + {next, %{s | state: next}} + end + + def backoff(%Backoff{type: :rand_exp, max: max, state: state} = s) do + {prev, lower} = state + next_min = min(prev, lower) + next_max = min(prev * 3, max) + next = rand(next_min, next_max) + {next, %{s | state: {next, lower}}} + end + + @spec reset(t) :: t + def reset(backoff) + + def reset(%Backoff{type: :rand} = s), do: s + def reset(%Backoff{type: :exp} = s), do: %{s | state: nil} + + def reset(%Backoff{type: :rand_exp, min: min, state: {_, lower}} = s) do + %{s | state: {min, lower}} + end + + ## Internal + + defp min_max(opts) do + case {opts[:backoff_min], opts[:backoff_max]} do + {nil, nil} -> {@min, @max} + {nil, max} -> {min(@min, max), max} + {min, nil} -> {min, max(min, @max)} + {min, max} -> {min, max} + end + end + + defp new(_, min, _) when not (is_integer(min) and min >= 0) do + raise ArgumentError, "minimum #{inspect(min)} not 0 or a positive integer" + end + + defp new(_, _, max) when not (is_integer(max) and max >= 0) do + raise ArgumentError, "maximum #{inspect(max)} not 0 or a positive integer" + end + + defp new(_, min, max) when min > max do + raise ArgumentError, "minimum #{min} is greater than maximum #{max}" + end + + defp new(:rand, min, max) do + %Backoff{type: :rand, min: min, max: max, state: nil} + end + + defp new(:exp, min, max) do + %Backoff{type: :exp, min: min, max: max, state: nil} + end + + defp new(:rand_exp, min, max) do + lower = max(min, div(max, 3)) + %Backoff{type: :rand_exp, min: min, max: max, state: {min, lower}} + end + + defp new(type, _, _) do + raise ArgumentError, "unknown type #{inspect(type)}" + end + + defp rand(min, max) do + :rand.uniform(max - min + 1) + min - 1 + end +end diff --git a/deps/db_connection/lib/db_connection/connection.ex b/deps/db_connection/lib/db_connection/connection.ex new file mode 100644 index 0000000..5b6abf7 --- /dev/null +++ b/deps/db_connection/lib/db_connection/connection.ex @@ -0,0 +1,546 @@ +defmodule DBConnection.Connection do + @moduledoc false + + @behaviour :gen_statem + + require Logger + alias DBConnection.Backoff + alias DBConnection.Holder + alias DBConnection.Util + + @timeout 15_000 + + @doc false + def start_link(mod, opts, pool, tag) do + start_opts = Keyword.take(opts, [:debug, :spawn_opt]) + sensitive_options = %DBConnection.SensitiveData{data: opts} + :gen_statem.start_link(__MODULE__, {mod, sensitive_options, pool, tag}, start_opts) + end + + @doc false + def child_spec(mod, opts, pool, tag, child_opts) do + Supervisor.child_spec( + %{id: __MODULE__, start: {__MODULE__, :start_link, [mod, opts, pool, tag]}}, + child_opts + ) + end + + @doc false + def disconnect({pid, ref}, err, state) do + :gen_statem.cast(pid, {:disconnect, ref, err, state}) + end + + @doc false + def stop({pid, ref}, err, state) do + :gen_statem.cast(pid, {:stop, ref, err, state}) + end + + @doc false + def ping({pid, ref}, state) do + :gen_statem.cast(pid, {:ping, ref, state}) + end + + ## gen_statem API + + @doc false + @impl :gen_statem + def callback_mode, do: :handle_event_function + + @doc false + @impl :gen_statem + def init({mod, %DBConnection.SensitiveData{data: opts}, pool, tag}) do + pool_index = Keyword.get(opts, :pool_index) + label = if pool_index, do: "db_conn_#{pool_index}", else: "db_conn" + Util.set_label(label) + + s = %{ + mod: mod, + opts: opts, + state: nil, + client: :closed, + pool: pool, + tag: tag, + timer: nil, + connected_at: nil, + backoff: Backoff.new(opts), + connection_listeners: Keyword.get(opts, :connection_listeners, []), + after_connect: Keyword.get(opts, :after_connect), + after_connect_timeout: Keyword.get(opts, :after_connect_timeout, @timeout) + } + + {:ok, :no_state, s, {:next_event, :internal, {:connect, :init}}} + end + + @impl :gen_statem + def handle_event(type, info, state, s) + + def handle_event(:internal, {:connect, _info}, :no_state, s) do + %{mod: mod, opts: opts, backoff: backoff, after_connect: after_connect} = s + + try do + apply(mod, :connect, [connect_opts(opts)]) + rescue + e -> + {e, stack} = maybe_sanitize_exception(e, __STACKTRACE__, opts) + reraise e, stack + else + {:ok, state} when after_connect != nil -> + ref = make_ref() + connected_at = System.monotonic_time() + :gen_statem.cast(self(), {:after_connect, ref}) + {:keep_state, %{s | state: state, client: {ref, :connect}, connected_at: connected_at}} + + {:ok, state} -> + backoff = backoff && Backoff.reset(backoff) + ref = make_ref() + connected_at = System.monotonic_time() + :gen_statem.cast(self(), {:connected, ref}) + + {:keep_state, + %{ + s + | state: state, + client: {ref, :connect}, + backoff: backoff, + connected_at: connected_at + }} + + {:error, err} when is_nil(backoff) -> + Logger.error( + fn -> + [ + inspect(mod), + " (", + Util.inspect_pid(self()), + ") failed to connect: " | Exception.format_banner(:error, err, []) + ] + end, + crash_reason: {err, []} + ) + + raise err + + {:error, err} -> + Logger.error( + fn -> + [ + inspect(mod), + ?\s, + ?(, + Util.inspect_pid(self()), + ") failed to connect: " + | Exception.format_banner(:error, err, []) + ] + end, + crash_reason: {err, []} + ) + + {timeout, backoff} = Backoff.backoff(backoff) + {:keep_state, %{s | backoff: backoff}, {{:timeout, :backoff}, timeout, nil}} + end + end + + def handle_event(:internal, {:disconnect, {log, err}}, :no_state, %{mod: mod} = s) do + if log == :log do + severity = + case err do + %DBConnection.ConnectionError{severity: severity} -> severity + _ -> :error + end + + Logger.log(severity, fn -> + [ + inspect(mod), + ?\s, + ?(, + Util.inspect_pid(self()), + ") disconnected: " | Exception.format_banner(:error, err, []) + ] + end) + + :ok + end + + %{state: state, client: client, timer: timer, backoff: backoff} = s + demonitor(client) + cancel_timer(timer) + :ok = apply(mod, :disconnect, [err, state]) + s = %{s | state: nil, client: :closed, timer: nil, connected_at: nil} + + notify_connection_listeners(:disconnected, s) + + case client do + _ when backoff == nil -> + {:stop, {:shutdown, err}, s} + + {_, :after_connect} -> + {timeout, backoff} = Backoff.backoff(backoff) + {:keep_state, %{s | backoff: backoff}, {{:timeout, :backoff}, timeout, nil}} + + _ -> + {:keep_state, s, {:next_event, :internal, {:connect, :disconnect}}} + end + end + + def handle_event({:timeout, :backoff}, _content, :no_state, s) do + {:keep_state, s, {:next_event, :internal, {:connect, :backoff}}} + end + + def handle_event(:cast, {:ping, ref, state}, :no_state, %{client: {ref, :pool}, mod: mod} = s) do + case apply(mod, :ping, [state]) do + {:ok, state} -> + pool_update(state, s) + + {:disconnect, err, state} -> + {:keep_state, %{s | state: state}, {:next_event, :internal, {:disconnect, {:log, err}}}} + end + end + + def handle_event(:cast, {:disconnect, ref, err, state}, :no_state, %{client: {ref, _}} = s) do + {:keep_state, %{s | state: state}, {:next_event, :internal, {:disconnect, {:log, err}}}} + end + + def handle_event(:cast, {:stop, ref, err, state}, :no_state, %{client: {ref, _}} = s) do + {_, stack} = :erlang.process_info(self(), :current_stacktrace) + + case err do + ok when ok in [:normal, :shutdown] -> + :ok + + {:shutdown, _term} -> + :ok + + _ -> + reason = + case err do + %{__exception__: true} -> Exception.format_banner(:error, err, stack) + _other -> "** #{inspect(err)}" + end + + format = + ~c"** State machine ~p terminating~n" ++ + ~c"** Reason for termination ==~n" ++ + ~c"~s~n" + + :error_logger.format(format, [self(), reason]) + end + + {:stop, {err, stack}, %{s | state: state}} + end + + def handle_event(:cast, {tag, _, _, _}, :no_state, s) when tag in [:disconnect, :stop] do + handle_timeout(s) + end + + def handle_event(:cast, {:after_connect, ref}, :no_state, %{client: {ref, :connect}} = s) do + %{ + mod: mod, + state: state, + after_connect: after_connect, + after_connect_timeout: timeout, + opts: opts + } = s + + notify_connection_listeners(:connected, s) + + case apply(mod, :checkout, [state]) do + {:ok, state} -> + opts = [timeout: timeout] ++ opts + {pid, ref} = DBConnection.Task.run_child(mod, state, after_connect, opts) + timer = start_timer(pid, timeout) + s = %{s | client: {ref, :after_connect}, timer: timer, state: state} + {:keep_state, s} + + {:disconnect, err, state} -> + {:keep_state, %{s | state: state}, {:next_event, :internal, {:disconnect, {:log, err}}}} + end + end + + def handle_event(:cast, {:after_connect, _}, :no_state, _s) do + :keep_state_and_data + end + + def handle_event(:cast, {:connected, ref}, :no_state, %{client: {ref, :connect}} = s) do + %{mod: mod, state: state} = s + + notify_connection_listeners(:connected, s) + + case apply(mod, :checkout, [state]) do + {:ok, state} -> + pool_update(state, s) + + {:disconnect, err, state} -> + {:keep_state, %{s | state: state}, {:next_event, :internal, {:disconnect, {:log, err}}}} + end + end + + def handle_event(:cast, {:connected, _}, :no_state, _s) do + :keep_state_and_data + end + + def handle_event( + :info, + {:DOWN, ref, _, pid, reason}, + :no_state, + %{client: {ref, :after_connect}} = s + ) do + message = + "client #{Util.inspect_pid(pid)} exited: " <> Exception.format_exit(reason) + + err = DBConnection.ConnectionError.exception(message) + + {:keep_state, %{s | client: {nil, :after_connect}}, + {:next_event, :internal, {:disconnect, {down_log(reason), err}}}} + end + + def handle_event(:info, {:DOWN, mon, _, pid, reason}, :no_state, %{client: {ref, mon}} = s) do + message = + "client #{Util.inspect_pid(pid)} exited: " <> Exception.format_exit(reason) + + err = DBConnection.ConnectionError.exception(message) + + {:keep_state, %{s | client: {ref, nil}}, + {:next_event, :internal, {:disconnect, {down_log(reason), err}}}} + end + + def handle_event( + :info, + {:timeout, timer, {__MODULE__, pid, timeout}}, + :no_state, + %{timer: timer} = s + ) + when is_reference(timer) do + message = + "client #{Util.inspect_pid(pid)} timed out because it checked out " <> + "the connection for longer than #{timeout}ms" + + exc = + case Process.info(pid, :current_stacktrace) do + {:current_stacktrace, stacktrace} -> + message <> + "\n\n#{Util.inspect_pid(pid)} was at location:\n\n" <> + Exception.format_stacktrace(stacktrace) + + _ -> + message + end + |> DBConnection.ConnectionError.exception() + + {:keep_state, %{s | timer: nil}, {:next_event, :internal, {:disconnect, {:log, exc}}}} + end + + def handle_event( + :info, + {:"ETS-TRANSFER", holder, _pid, {msg, ref, extra}}, + :no_state, + %{client: {ref, :after_connect}, timer: timer} = s + ) do + {_, state} = Holder.delete(holder) + cancel_timer(timer) + s = %{s | timer: nil} + + case msg do + :checkin -> handle_checkin(state, s) + :disconnect -> handle_event(:cast, {:disconnect, ref, extra, state}, :no_state, s) + :stop -> handle_event(:cast, {:stop, ref, extra, state}, :no_state, s) + end + end + + # We discard EXIT messages which may arrive if the process is trapping exits + def handle_event(:info, {:EXIT, _, _}, :no_state, s) do + handle_timeout(s) + end + + def handle_event(:info, msg, :no_state, %{mod: mod} = s) do + Logger.info(fn -> + [inspect(mod), ?\s, ?(, Util.inspect_pid(self()), ") missed message: " | inspect(msg)] + end) + + handle_timeout(s) + end + + @doc false + @impl :gen_statem + # If client is :closed then the connection was previously disconnected + # and cleanup is not required. + def terminate(_, _, %{client: :closed}), do: :ok + + def terminate(reason, _, s) do + %{mod: mod, state: state} = s + msg = "connection exited: " <> Exception.format_exit(reason) + + msg + |> DBConnection.ConnectionError.exception() + |> mod.disconnect(state) + end + + @doc false + @impl :gen_statem + def format_status(info, [_, :no_state, %{client: :closed, mod: mod}]) do + case info do + :normal -> [{:data, [{~c"Module", mod}]}] + :terminate -> mod + end + end + + def format_status(info, [pdict, :no_state, %{mod: mod, state: state}]) do + case function_exported?(mod, :format_status, 2) do + true when info == :normal -> + normal_status(mod, pdict, state) + + false when info == :normal -> + normal_status_default(mod, state) + + true when info == :terminate -> + {mod, terminate_status(mod, pdict, state)} + + false when info == :terminate -> + {mod, state} + end + end + + ## Helpers + + defp maybe_sanitize_exception(e, stack, opts) do + if Keyword.get(opts, :show_sensitive_data_on_connection_error, false) do + {e, stack} + else + message = + "connect raised #{inspect(e.__struct__)} exception#{sanitized_message(e)}. " <> + "The exception details are hidden, as they may contain sensitive data such as " <> + "database credentials. You may set :show_sensitive_data_on_connection_error " <> + "to true when starting your connection if you wish to see all of the details" + + {RuntimeError.exception(message), cleanup_stacktrace(stack)} + end + end + + defp sanitized_message(%KeyError{} = e), do: ": #{Exception.message(%{e | term: nil})}" + defp sanitized_message(_), do: "" + + defp connect_opts(opts) do + case Keyword.get(opts, :configure) do + {mod, fun, args} -> + apply(mod, fun, [opts | args]) + + fun when is_function(fun, 1) -> + fun.(opts) + + nil -> + opts + end + end + + defp down_log(:normal), do: :nolog + defp down_log(:shutdown), do: :nolog + defp down_log({:shutdown, _}), do: :nolog + defp down_log(_), do: :log + + defp handle_timeout(s), do: {:keep_state, s} + + defp demonitor({_, mon}) when is_reference(mon) do + Process.demonitor(mon, [:flush]) + end + + defp demonitor({mon, :after_connect}) when is_reference(mon) do + Process.demonitor(mon, [:flush]) + end + + defp demonitor({_, _}), do: true + defp demonitor(nil), do: true + + defp start_timer(_, :infinity), do: nil + + defp start_timer(pid, timeout) do + :erlang.start_timer(timeout, self(), {__MODULE__, pid, timeout}) + end + + defp cancel_timer(nil), do: :ok + + defp cancel_timer(timer) do + case :erlang.cancel_timer(timer) do + false -> flush_timer(timer) + _ -> :ok + end + end + + defp flush_timer(timer) do + receive do + {:timeout, ^timer, {__MODULE__, _, _}} -> + :ok + after + 0 -> + raise ArgumentError, "timer #{inspect(timer)} does not exist" + end + end + + defp handle_checkin(state, s) do + %{backoff: backoff, client: client} = s + backoff = backoff && Backoff.reset(backoff) + demonitor(client) + pool_update(state, %{s | client: nil, backoff: backoff}) + end + + defp pool_update(state, %{pool: pool, tag: tag, mod: mod, connected_at: connected_at} = s) do + case Holder.update(pool, tag, mod, state, connected_at) do + {:ok, ref} -> + {:keep_state, %{s | client: {ref, :pool}, state: state}, :hibernate} + + :error -> + {:stop, {:shutdown, :no_more_pool}, s} + end + end + + defp normal_status(mod, pdict, state) do + try do + mod.format_status(:normal, [pdict, state]) + catch + _, _ -> + normal_status_default(mod, state) + else + status -> + status + end + end + + defp normal_status_default(mod, state) do + [{:data, [{~c"Module", mod}, {~c"State", state}]}] + end + + defp terminate_status(mod, pdict, state) do + try do + mod.format_status(:terminate, [pdict, state]) + catch + _, _ -> + state + else + status -> + status + end + end + + defp cleanup_stacktrace(stack) do + case stack do + [{_, _, arity, _} | _rest] = stacktrace when is_integer(arity) -> + stacktrace + + [{mod, fun, args, info} | rest] when is_list(args) -> + [{mod, fun, length(args), info} | rest] + end + end + + defp notify_connection_listeners(action, %{} = state) do + %{connection_listeners: connection_listeners} = state + + {listeners, message} = + case connection_listeners do + listeners when is_list(listeners) -> + {listeners, {action, self()}} + + {listeners, tag} when is_list(listeners) -> + {listeners, {action, self(), tag}} + end + + Enum.each(listeners, &send(&1, message)) + end +end diff --git a/deps/db_connection/lib/db_connection/connection_error.ex b/deps/db_connection/lib/db_connection/connection_error.ex new file mode 100644 index 0000000..0c98ec8 --- /dev/null +++ b/deps/db_connection/lib/db_connection/connection_error.ex @@ -0,0 +1,24 @@ +defmodule DBConnection.ConnectionError do + @moduledoc """ + A generic connection error exception. + + The raised exception might include the reason which would be useful + to programmatically determine what was causing the error. + """ + + @typedoc since: "2.7.0" + @type t() :: %__MODULE__{ + message: String.t(), + reason: :error | :queue_timeout, + severity: Logger.level() + } + + defexception [:message, severity: :error, reason: :error] + + @doc false + def exception(message, reason) when is_binary(message) and reason in [:error, :queue_timeout] do + message + |> exception() + |> Map.replace!(:reason, reason) + end +end diff --git a/deps/db_connection/lib/db_connection/connection_pool.ex b/deps/db_connection/lib/db_connection/connection_pool.ex new file mode 100644 index 0000000..e9b9a20 --- /dev/null +++ b/deps/db_connection/lib/db_connection/connection_pool.ex @@ -0,0 +1,402 @@ +defmodule DBConnection.ConnectionPool do + @moduledoc """ + The default connection pool. + + The queueing algorithm is based on [CoDel](https://queue.acm.org/appendices/codel.html). + + You're not supposed to call any functions on this pool directly, but only pass this + as the value of the `:pool` option in functions such as `DBConnection.start_link/2`. + """ + + use GenServer + alias DBConnection.Holder + alias DBConnection.Util + + @behaviour DBConnection.Pool + + @queue_target 50 + @queue_interval 2000 + @idle_interval 1000 + @time_unit 1000 + + @doc false + def start_link({mod, opts}) do + GenServer.start_link(__MODULE__, {mod, opts}, start_opts(opts)) + end + + @doc false + @impl DBConnection.Pool + def checkout(pool, callers, opts) do + Holder.checkout(pool, callers, opts) + end + + @doc false + @impl DBConnection.Pool + def disconnect_all(pool, interval, _opts) do + GenServer.call(pool, {:disconnect_all, interval}, :infinity) + end + + @doc false + @impl DBConnection.Pool + def get_connection_metrics(pool) do + GenServer.call(pool, :get_connection_metrics, :infinity) + end + + ## GenServer api + + @impl GenServer + def init({mod, opts}) do + DBConnection.register_as_pool(mod) + + queue = :ets.new(__MODULE__.Queue, [:protected, :ordered_set, decentralized_counters: true]) + + max_lifetime = + case Keyword.fetch(opts, :max_lifetime) do + {:ok, %Range{first: first, last: last, step: 1}} when first >= 0 and last >= first -> + {System.convert_time_unit(first, :millisecond, :native), last - first} + + {:ok, invalid} -> + raise ArgumentError, + "invalid value for :max_lifetime, expected a non-negative step-1 range, got: #{inspect(invalid)}" + + :error -> + nil + end + + ts = {nil, max_lifetime} + {:ok, _} = DBConnection.ConnectionPool.Pool.start_supervised(queue, mod, opts) + target = Keyword.get(opts, :queue_target, @queue_target) + interval = Keyword.get(opts, :queue_interval, @queue_interval) + idle_interval = Keyword.get(opts, :idle_interval, @idle_interval) + idle_limit = Keyword.get_lazy(opts, :idle_limit, fn -> Keyword.get(opts, :pool_size, 1) end) + now_in_native = System.monotonic_time() + now_in_ms = System.convert_time_unit(now_in_native, :native, @time_unit) + + codel = %{ + target: target, + interval: interval, + delay: 0, + slow: false, + next: now_in_ms, + poll: nil, + idle_interval: idle_interval, + idle_limit: idle_limit, + idle: nil + } + + codel = start_idle(now_in_native, start_poll(now_in_ms, now_in_ms, codel)) + {:ok, {:busy, queue, codel, ts}} + end + + @impl GenServer + def handle_call(:get_connection_metrics, _from, {status, queue, _, _} = state) do + {ready_conn_count, checkout_queue_length} = + case status do + :busy -> + {0, :ets.select_count(queue, [{{{:_, :_, :_}}, [], [true]}])} + + :ready -> + {:ets.select_count(queue, [{{{:_, :_}}, [], [true]}]), 0} + end + + metrics = %{ + source: {:pool, self()}, + ready_conn_count: ready_conn_count, + checkout_queue_length: checkout_queue_length + } + + {:reply, [metrics], state} + end + + def handle_call({:disconnect_all, interval}, _from, {type, queue, codel, ts}) do + {_, max_lifetime} = ts + ts = {{System.monotonic_time(), interval}, max_lifetime} + {:reply, :ok, {type, queue, codel, ts}} + end + + @impl GenServer + def handle_info( + {:db_connection, from, {:checkout, _caller, now, queue?}}, + {:busy, queue, _, _} = busy + ) do + case queue? do + true -> + :ets.insert(queue, {{now, System.unique_integer(), from}}) + {:noreply, busy} + + false -> + message = "connection not available and queuing is disabled" + err = DBConnection.ConnectionError.exception(message) + Holder.reply_error(from, err) + {:noreply, busy} + end + end + + def handle_info( + {:db_connection, from, {:checkout, _caller, _now, _queue?}} = checkout, + {:ready, queue, _codel, _ts} = ready + ) do + case :ets.first(queue) do + {queued_in_native, holder} = key -> + Holder.handle_checkout(holder, from, queue, queued_in_native) and :ets.delete(queue, key) + {:noreply, ready} + + :"$end_of_table" -> + handle_info(checkout, put_elem(ready, 0, :busy)) + end + end + + def handle_info({:"ETS-TRANSFER", holder, pid, queue}, {_, queue, _, _} = data) do + message = "client #{Util.inspect_pid(pid)} exited" + err = DBConnection.ConnectionError.exception(message: message, severity: :info) + Holder.handle_disconnect(holder, err) + {:noreply, data} + end + + def handle_info({:"ETS-TRANSFER", holder, _, {msg, queue, extra}}, {_, queue, _, ts} = data) do + case msg do + :checkin -> + owner = self() + + case :ets.info(holder, :owner) do + ^owner -> + {interval, max_lifetime} = ts + + if Holder.maybe_disconnect(holder, interval, max_lifetime) do + {:noreply, data} + else + handle_checkin(holder, extra, data) + end + + :undefined -> + {:noreply, data} + end + + :disconnect -> + Holder.handle_disconnect(holder, extra) + {:noreply, data} + + :stop -> + Holder.handle_stop(holder, extra) + {:noreply, data} + end + end + + def handle_info({:timeout, deadline, {queue, holder, pid, len}}, {_, queue, _, _} = data) do + # Check that timeout refers to current holder (and not previous) + if Holder.handle_deadline(holder, deadline) do + message = + "client #{Util.inspect_pid(pid)} timed out because " <> + "it queued and checked out the connection for longer than #{len}ms" + + exc = + case Process.info(pid, :current_stacktrace) do + {:current_stacktrace, stacktrace} -> + message <> + "\n\n#{Util.inspect_pid(pid)} was at location:\n\n" <> + Exception.format_stacktrace(stacktrace) + + _ -> + message + end + |> DBConnection.ConnectionError.exception() + + Holder.handle_disconnect(holder, exc) + end + + {:noreply, data} + end + + def handle_info({:timeout, poll, {time, last_sent}}, {_, _, %{poll: poll}, _} = data) do + {status, queue, codel, ts} = data + + # If no queue progress since last poll check queue + case :ets.first(queue) do + {sent, _, _} when sent <= last_sent and status == :busy -> + delay = time - sent + timeout(delay, time, queue, start_poll(time, sent, codel), ts) + + {sent, _, _} -> + {:noreply, {status, queue, start_poll(time, sent, codel), ts}} + + _ -> + {:noreply, {status, queue, start_poll(time, time, codel), ts}} + end + end + + def handle_info({:timeout, idle, past_in_native}, {_, _, %{idle: idle}, _} = data) do + {status, queue, %{idle_limit: limit} = codel, ts} = data + drop_idle(past_in_native, limit, status, queue, codel, ts) + end + + defp drop_idle(past_in_native, limit, status, queue, codel, ts) do + with true <- status == :ready and limit > 0, + {queued_in_native, holder} = key when queued_in_native <= past_in_native <- + :ets.first(queue) do + :ets.delete(queue, key) + {interval, max_lifetime} = ts + Holder.maybe_disconnect(holder, interval, max_lifetime) or Holder.handle_ping(holder) + drop_idle(past_in_native, limit - 1, status, queue, codel, ts) + else + _ -> + {:noreply, {status, queue, start_idle(System.monotonic_time(), codel), ts}} + end + end + + defp timeout(delay, time, queue, codel, ts) do + case codel do + %{delay: min_delay, next: next, target: target, interval: interval} + when time >= next and min_delay > target -> + codel = %{codel | slow: true, delay: delay, next: time + interval} + drop_slow(time, target * 2, queue) + {:noreply, {:busy, queue, codel, ts}} + + %{next: next, interval: interval} when time >= next -> + codel = %{codel | slow: false, delay: delay, next: time + interval} + {:noreply, {:busy, queue, codel, ts}} + + _ -> + {:noreply, {:busy, queue, codel, ts}} + end + end + + defp drop_slow(time, timeout, queue) do + min_sent = time - timeout + match = {{:"$1", :_, :"$2"}} + guards = [{:<, :"$1", min_sent}] + select_slow = [{match, guards, [{{:"$1", :"$2"}}]}] + + for {sent, from} <- :ets.select(queue, select_slow) do + drop(time - sent, from) + end + + :ets.select_delete(queue, [{match, guards, [true]}]) + end + + defp handle_checkin(holder, now_in_native, {:ready, queue, _, _} = data) do + :ets.insert(queue, {{now_in_native, holder}}) + {:noreply, data} + end + + defp handle_checkin(holder, now_in_native, {:busy, queue, codel, ts}) do + now_in_ms = System.convert_time_unit(now_in_native, :native, @time_unit) + + case dequeue(now_in_ms, holder, queue, codel, ts) do + {:busy, _, _, _} = busy -> + {:noreply, busy} + + {:ready, _, _, _} = ready -> + :ets.insert(queue, {{now_in_native, holder}}) + {:noreply, ready} + end + end + + defp dequeue(time, holder, queue, codel, ts) do + case codel do + %{next: next, delay: delay, target: target} when time >= next -> + dequeue_first(time, delay > target, holder, queue, codel, ts) + + %{slow: false} -> + dequeue_fast(time, holder, queue, codel, ts) + + %{slow: true, target: target} -> + dequeue_slow(time, target * 2, holder, queue, codel, ts) + end + end + + defp dequeue_first(time, slow?, holder, queue, codel, ts) do + %{interval: interval} = codel + next = time + interval + + case :ets.first(queue) do + {sent, _, from} = key -> + :ets.delete(queue, key) + delay = time - sent + codel = %{codel | next: next, delay: delay, slow: slow?} + go(delay, from, time, holder, queue, codel, ts) + + :"$end_of_table" -> + codel = %{codel | next: next, delay: 0, slow: slow?} + {:ready, queue, codel, ts} + end + end + + defp dequeue_fast(time, holder, queue, codel, ts) do + case :ets.first(queue) do + {sent, _, from} = key -> + :ets.delete(queue, key) + go(time - sent, from, time, holder, queue, codel, ts) + + :"$end_of_table" -> + {:ready, queue, %{codel | delay: 0}, ts} + end + end + + defp dequeue_slow(time, timeout, holder, queue, codel, ts) do + case :ets.first(queue) do + {sent, _, from} = key when time - sent > timeout -> + :ets.delete(queue, key) + drop(time - sent, from) + dequeue_slow(time, timeout, holder, queue, codel, ts) + + {sent, _, from} = key -> + :ets.delete(queue, key) + go(time - sent, from, time, holder, queue, codel, ts) + + :"$end_of_table" -> + {:ready, queue, %{codel | delay: 0}, ts} + end + end + + defp go(delay, from, time, holder, queue, %{delay: min} = codel, ts) do + case Holder.handle_checkout(holder, from, queue, 0) do + true when delay < min -> + {:busy, queue, %{codel | delay: delay}, ts} + + true -> + {:busy, queue, codel, ts} + + false -> + dequeue(time, holder, queue, codel, ts) + end + end + + defp drop(delay, from) do + message = """ + [#{ancestor()}] connection not available and request was dropped from queue after #{delay}ms. \ + This means requests are coming in and your connection pool cannot serve them fast enough. \ + You can address this by: + + 1. Ensuring your database is available and that you can connect to it + 2. Tracking down slow queries and making sure they are running fast enough + 3. Increasing the pool_size (although this increases resource consumption) + 4. Allowing requests to wait longer by increasing :queue_target and :queue_interval + + See DBConnection.start_link/2 for more information + """ + + err = DBConnection.ConnectionError.exception(message, :queue_timeout) + + Holder.reply_error(from, err) + end + + defp ancestor do + Process.get(:"$ancestors", []) |> Enum.find(&is_atom/1) + end + + defp start_opts(opts) do + Keyword.take(opts, [:name, :spawn_opt]) + end + + defp start_poll(now, last_sent, %{interval: interval} = codel) do + timeout = now + interval + poll = :erlang.start_timer(timeout, self(), {timeout, last_sent}, abs: true) + %{codel | poll: poll} + end + + defp start_idle(now_in_native, %{idle_interval: interval} = codel) do + timeout = System.convert_time_unit(now_in_native, :native, :millisecond) + interval + idle = :erlang.start_timer(timeout, self(), now_in_native, abs: true) + %{codel | idle: idle} + end +end diff --git a/deps/db_connection/lib/db_connection/connection_pool/pool.ex b/deps/db_connection/lib/db_connection/connection_pool/pool.ex new file mode 100644 index 0000000..554d206 --- /dev/null +++ b/deps/db_connection/lib/db_connection/connection_pool/pool.ex @@ -0,0 +1,33 @@ +defmodule DBConnection.ConnectionPool.Pool do + @moduledoc false + use Supervisor, restart: :temporary + + def start_supervised(tag, mod, opts) do + DBConnection.Watcher.watch( + DBConnection.ConnectionPool.Supervisor, + {DBConnection.ConnectionPool.Pool, {self(), tag, mod, opts}} + ) + end + + def start_link(arg) do + Supervisor.start_link(__MODULE__, arg) + end + + @impl true + def init({owner, tag, mod, opts}) do + size = Keyword.get(opts, :pool_size, 1) + + if size < 1, do: raise(ArgumentError, "pool size must be greater or equal to 1, got #{size}") + + children = for id <- 1..size, do: conn(owner, tag, id, mod, opts) + sup_opts = [strategy: :one_for_one] ++ Keyword.take(opts, [:max_restarts, :max_seconds]) + Supervisor.init(children, sup_opts) + end + + ## Helpers + + defp conn(owner, tag, id, mod, opts) do + child_opts = [id: {mod, owner, id}] ++ Keyword.take(opts, [:shutdown]) + DBConnection.Connection.child_spec(mod, [pool_index: id] ++ opts, owner, tag, child_opts) + end +end diff --git a/deps/db_connection/lib/db_connection/holder.ex b/deps/db_connection/lib/db_connection/holder.ex new file mode 100644 index 0000000..3b9916d --- /dev/null +++ b/deps/db_connection/lib/db_connection/holder.ex @@ -0,0 +1,474 @@ +defmodule DBConnection.Holder do + @moduledoc false + require Record + + alias DBConnection.Util + + @queue true + @timeout 15000 + @time_unit 1000 + + Record.defrecord(:conn, [ + :connection, + :module, + :state, + :lock, + :connected_at, + deadline: nil, + status: :ok + ]) + + Record.defrecord(:pool_ref, [:pool, :reference, :deadline, :holder, :lock]) + + @type t :: :ets.tid() + @type checkin_time :: non_neg_integer() | nil + + ## Holder API + + @spec new(pid, reference, module, term) :: t + @spec new(pid, reference, module, term, integer) :: t + def new(pool, ref, mod, state, connected_at \\ System.monotonic_time()) do + # Insert before setting heir so that pool can't receive empty table + holder = :ets.new(__MODULE__, [:public, :ordered_set, decentralized_counters: true]) + + conn = conn(connection: self(), module: mod, state: state, connected_at: connected_at) + true = :ets.insert_new(holder, conn) + + :ets.setopts(holder, {:heir, pool, ref}) + holder + end + + @spec update(pid, reference, module, term) :: {:ok, t} | :error + @spec update(pid, reference, module, term, integer) :: {:ok, t} | :error + def update(pool, ref, mod, state, connected_at \\ System.monotonic_time()) do + holder = new(pool, ref, mod, state, connected_at) + + try do + :ets.give_away(holder, pool, {:checkin, ref, System.monotonic_time()}) + {:ok, holder} + rescue + ArgumentError -> :error + end + end + + @spec delete(t) :: {module, term} + def delete(holder) do + [conn(module: module, state: state)] = :ets.lookup(holder, :conn) + :ets.delete(holder) + {module, state} + end + + ## Pool API (invoked by caller) + + @callback checkout(pool :: GenServer.server(), [pid], opts :: Keyword.t()) :: + {:ok, pool_ref :: any, module, checkin_time, state :: any} + | {:error, Exception.t()} + def checkout(pool, callers, opts) do + queue? = Keyword.get(opts, :queue, @queue) + now = System.monotonic_time(@time_unit) + timeout = abs_timeout(now, opts) + + case checkout(pool, callers, queue?, now, timeout) do + {:ok, _, _, _, _} = ok -> + ok + + {:error, %DBConnection.ConnectionError{} = connection_error} = error -> + :telemetry.execute( + [:db_connection, :connection_error], + %{count: 1}, + %{ + error: connection_error, + opts: opts + } + ) + + error + + {:error, _} = error -> + error + + {:redirect, caller, proxy} -> + case checkout(proxy, [caller], opts) do + {:ok, _, _, _, _} = ok -> + ok + + {:error, %DBConnection.ConnectionError{message: message} = exception} -> + {:error, + %{ + exception + | message: + "could not checkout the connection owned by #{Util.inspect_pid(caller)}. " <> + "When using the sandbox, connections are shared, so this may imply " <> + "another process is using a connection. Reason: #{message}" + }} + + {:error, _} = error -> + error + end + + {:exit, reason} -> + exit({reason, {__MODULE__, :checkout, [pool, opts]}}) + end + end + + @spec checkin(pool_ref :: any) :: :ok + def checkin(pool_ref) do + # Note we may call checkin after a disconnect/stop. For this reason, we choose + # to not change the status on checkin but strictly speaking nobody can access + # the holder after disconnect/stop unless they store a copy of %DBConnection{}. + # Note status can't be :aborted as aborted is always reverted at the end of a + # transaction. + done(pool_ref, [{conn(:lock) + 1, nil}], :checkin, System.monotonic_time()) + end + + @spec disconnect(pool_ref :: any, err :: Exception.t()) :: :ok + def disconnect(pool_ref, err) do + done(pool_ref, [{conn(:status) + 1, :error}], :disconnect, err) + end + + @spec stop(pool_ref :: any, err :: Exception.t()) :: :ok + def stop(pool_ref, err) do + done(pool_ref, [{conn(:status) + 1, :error}], :stop, err) + end + + @spec handle(pool_ref :: any, fun :: atom, args :: [term], Keyword.t()) :: tuple + def handle(pool_ref, fun, args, opts) do + handle_or_cleanup(:handle, pool_ref, fun, args, opts) + end + + @spec cleanup(pool_ref :: any, fun :: atom, args :: [term], Keyword.t()) :: tuple + def cleanup(pool_ref, fun, args, opts) do + handle_or_cleanup(:cleanup, pool_ref, fun, args, opts) + end + + defp handle_or_cleanup(type, pool_ref, fun, args, opts) do + pool_ref(holder: holder, lock: lock) = pool_ref + + try do + :ets.lookup(holder, :conn) + rescue + ArgumentError -> + msg = "connection is closed because of an error, disconnect or timeout" + {:disconnect, DBConnection.ConnectionError.exception(msg), _state = :unused} + else + [conn(lock: conn_lock)] when conn_lock != lock -> + raise "an outdated connection has been given to DBConnection on #{fun}/#{length(args) + 2}" + + [conn(status: :error)] -> + msg = "connection is closed because of an error, disconnect or timeout" + {:disconnect, DBConnection.ConnectionError.exception(msg), _state = :unused} + + [conn(status: :aborted)] when type != :cleanup -> + msg = "transaction rolling back" + {:disconnect, DBConnection.ConnectionError.exception(msg), _state = :unused} + + [conn(module: module, state: state)] -> + holder_apply(holder, module, fun, args ++ [opts, state]) + end + end + + ## Pool state helpers API (invoked by callers) + + @spec put_state(pool_ref :: any, term) :: :ok + def put_state(pool_ref(holder: sink_holder), state) do + :ets.update_element(sink_holder, :conn, [{conn(:state) + 1, state}]) + :ok + end + + @spec status?(pool_ref :: any, :ok | :aborted) :: boolean() + def status?(pool_ref(holder: holder), status) do + try do + :ets.lookup_element(holder, :conn, conn(:status) + 1) == status + rescue + ArgumentError -> false + end + end + + @spec put_status(pool_ref :: any, :ok | :aborted) :: boolean() + def put_status(pool_ref(holder: holder), status) do + try do + :ets.update_element(holder, :conn, [{conn(:status) + 1, status}]) + rescue + ArgumentError -> false + end + end + + ## Pool callbacks (invoked by pools) + + @spec reply_redirect({pid, reference}, pid | :shared | :auto, GenServer.server()) :: :ok + def reply_redirect(from, caller, redirect) do + GenServer.reply(from, {:redirect, caller, redirect}) + :ok + end + + @spec reply_error({pid, reference}, Exception.t()) :: :ok + def reply_error(from, exception) do + GenServer.reply(from, {:error, exception}) + :ok + end + + @spec handle_checkout(t, {pid, reference}, reference, checkin_time) :: boolean + def handle_checkout(holder, {pid, mref}, ref, checkin_time) do + :ets.give_away(holder, pid, {mref, ref, checkin_time}) + rescue + ArgumentError -> + if Process.alive?(pid) or :ets.info(holder, :owner) != self() do + raise ArgumentError, no_holder(holder, pid) + else + false + end + end + + @spec handle_deadline(t, reference) :: boolean + def handle_deadline(holder, deadline) do + :ets.lookup_element(holder, :conn, conn(:deadline) + 1) + rescue + ArgumentError -> false + else + ^deadline -> true + _ -> false + end + + @spec handle_ping(t) :: true + def handle_ping(holder) do + :ets.lookup(holder, :conn) + rescue + ArgumentError -> + raise ArgumentError, no_holder(holder, nil) + else + [conn(connection: conn, state: state)] -> + DBConnection.Connection.ping({conn, holder}, state) + :ets.delete(holder) + true + end + + @spec handle_disconnect(t, Exception.t()) :: boolean + def handle_disconnect(holder, err) do + handle_done(holder, &DBConnection.Connection.disconnect/3, err) + end + + @spec handle_stop(t, term) :: boolean + def handle_stop(holder, err) do + handle_done(holder, &DBConnection.Connection.stop/3, err) + end + + @spec maybe_disconnect(t, {integer, non_neg_integer} | nil, {integer, non_neg_integer} | nil) :: + boolean() + def maybe_disconnect(_holder, nil, nil), do: false + + def maybe_disconnect(holder, interval, lifetime) do + ts = :ets.lookup_element(holder, :conn, conn(:connected_at) + 1) + disconnect_all_reason(holder, ts, interval) || max_lifetime_reason(holder, ts, lifetime) + rescue + _ -> false + else + nil -> + false + + reason -> + opts = [message: reason, severity: :debug] + handle_disconnect(holder, DBConnection.ConnectionError.exception(opts)) + end + + defp max_lifetime_reason(_holder, _ts, nil), do: nil + + defp max_lifetime_reason(holder, ts, {min_lifetime, interval_ms}) do + elapsed = System.monotonic_time() - ts + + # First check if passed start then check if also the interval + if elapsed > min_lifetime and elapsed > hash_holder(holder, interval_ms) + min_lifetime do + "max_lifetime exceeded" + end + end + + defp disconnect_all_reason(_holder, _ts, nil), do: nil + + defp disconnect_all_reason(holder, ts, {disconnect_start, interval_ms}) do + if disconnect_start > ts and + System.monotonic_time() > hash_holder(holder, interval_ms) + disconnect_start do + "disconnect_all requested" + end + end + + ## Private + + defp checkout(pool, callers, queue?, start, timeout) do + case GenServer.whereis(pool) do + pid when node(pid) == node() -> + checkout_call(pid, callers, queue?, start, timeout) + + pid when node(pid) != node() -> + {:exit, {:badnode, node(pid)}} + + {_name, node} -> + {:exit, {:badnode, node}} + + nil -> + {:exit, :noproc} + end + end + + defp checkout_call(pid, callers, queue?, start, timeout) do + lock = Process.monitor(pid) + send(pid, {:db_connection, {self(), lock}, {:checkout, callers, start, queue?}}) + + receive do + {:"ETS-TRANSFER", holder, pool, {^lock, ref, checkin_time}} -> + Process.demonitor(lock, [:flush]) + {deadline, ops} = start_deadline(timeout, pool, ref, holder, start) + :ets.update_element(holder, :conn, [{conn(:lock) + 1, lock} | ops]) + + pool_ref = + pool_ref(pool: pool, reference: ref, deadline: deadline, holder: holder, lock: lock) + + checkout_result(holder, pool_ref, checkin_time) + + {^lock, reply} -> + Process.demonitor(lock, [:flush]) + reply + + {:DOWN, ^lock, _, _, reason} -> + {:exit, reason} + end + end + + defp checkout_result(holder, pool_ref, checkin_time) do + try do + :ets.lookup(holder, :conn) + rescue + ArgumentError -> + # Deadline could hit and be handled pool before using connection + msg = "connection not available because deadline reached while in queue" + {:error, DBConnection.ConnectionError.exception(msg)} + else + [conn(module: mod, state: state)] -> + {:ok, pool_ref, mod, checkin_time, state} + end + end + + defp no_holder(holder, maybe_pid) do + reason = + case :ets.info(holder, :owner) do + :undefined -> "does not exist" + ^maybe_pid -> "is being given to its current owner" + owner when owner != self() -> "does not belong to the giving process" + _ -> "could not be given away" + end + + call_reason = + if maybe_pid do + "Error happened when attempting to transfer to #{Util.inspect_pid(maybe_pid)} " <> + "(alive: #{Process.alive?(maybe_pid)})" + else + "Error happened when looking up connection" + end + + """ + #{inspect(__MODULE__)} #{inspect(holder)} #{reason}, pool inconsistent. + #{call_reason}. + + SELF: #{Util.inspect_pid(self())} + ETS INFO: #{inspect(:ets.info(holder))} + + Please report at https://github.com/elixir-ecto/db_connection/issues" + """ + end + + defp holder_apply(holder, module, fun, args) do + try do + apply(module, fun, args) + catch + kind, reason -> + {:catch, kind, reason, __STACKTRACE__} + else + result when is_tuple(result) -> + state = :erlang.element(:erlang.tuple_size(result), result) + + try do + :ets.update_element(holder, :conn, {conn(:state) + 1, state}) + result + rescue + ArgumentError -> + augment_disconnect(result) + end + + # If it is not a tuple, we just return it as is so we raise bad return. + result -> + result + end + end + + defp augment_disconnect({:disconnect, %DBConnection.ConnectionError{} = err, state}) do + %{message: message} = err + + message = + message <> + " (the connection was closed by the pool, " <> + "possibly due to a timeout or because the pool has been terminated)" + + {:disconnect, %{err | message: message}, state} + end + + defp augment_disconnect(result), do: result + + defp done(pool_ref, ops, tag, info) do + pool_ref(pool: pool, reference: ref, deadline: deadline, holder: holder) = pool_ref + cancel_deadline(deadline) + + try do + :ets.update_element(holder, :conn, [{conn(:deadline) + 1, nil} | ops]) + :ets.give_away(holder, pool, {tag, ref, info}) + rescue + ArgumentError -> :ok + else + true -> :ok + end + end + + defp handle_done(holder, stop, err) do + :ets.lookup(holder, :conn) + rescue + ArgumentError -> + false + else + [conn(connection: pid, deadline: deadline, state: state)] -> + cancel_deadline(deadline) + :ets.delete(holder) + stop.({pid, holder}, err, state) + true + end + + defp abs_timeout(now, opts) do + case Keyword.get(opts, :timeout, @timeout) do + :infinity -> Keyword.get(opts, :deadline) + timeout -> min(now + timeout, Keyword.get(opts, :deadline)) + end + end + + defp start_deadline(nil, _, _, _, _) do + {nil, []} + end + + defp start_deadline(timeout, pid, ref, holder, start) do + deadline = + :erlang.start_timer(timeout, pid, {ref, holder, self(), timeout - start}, abs: true) + + {deadline, [{conn(:deadline) + 1, deadline}]} + end + + defp cancel_deadline(nil) do + :ok + end + + defp cancel_deadline(deadline) do + :erlang.cancel_timer(deadline, async: true, info: false) + end + + defp hash_holder(_holder, 0), do: 0 + + defp hash_holder(holder, interval_ms) do + pid = :ets.lookup_element(holder, :conn, conn(:connection) + 1) + hash = :erlang.phash2(pid, interval_ms) + System.convert_time_unit(hash, :millisecond, :native) + end +end diff --git a/deps/db_connection/lib/db_connection/log_entry.ex b/deps/db_connection/lib/db_connection/log_entry.ex new file mode 100644 index 0000000..f765984 --- /dev/null +++ b/deps/db_connection/lib/db_connection/log_entry.ex @@ -0,0 +1,83 @@ +defmodule DBConnection.LogEntry do + @moduledoc """ + Struct containing log entry information. + + See `t:t/0` for information on the fields. + """ + + defstruct [ + :call, + :query, + :params, + :result, + :pool_time, + :connection_time, + :decode_time, + :idle_time + ] + + @typedoc """ + Log entry information. + + * `:call` - The `DBConnection` function called + * `:query` - The query used by the function + * `:params` - The params passed to the function (if any) + * `:result` - The result of the call + * `:pool_time` - The length of time awaiting a connection from the pool (if + the connection was not already checked out) + * `:connection_time` - The length of time using the connection (if a + connection was used) + * `:decode_time` - The length of time decoding the result (if decoded the + result using `DBConnection.Query.decode/3`) + * `:idle_time` - The amount of time the connection was idle before use + + All times are in the native time units of the VM, see + `System.monotonic_time/0`. + """ + @type t :: %__MODULE__{ + call: atom, + query: any, + params: any, + result: {:ok, any} | {:ok, any, any} | {:error, Exception.t()}, + pool_time: non_neg_integer | nil, + connection_time: non_neg_integer | nil, + idle_time: non_neg_integer | nil, + decode_time: non_neg_integer | nil + } + + @doc false + def new(call, query, params, times, result) do + entry = %__MODULE__{call: call, query: query, params: params, result: result} + parse_times(times, entry) + end + + ## Helpers + + defp parse_times([], entry), do: entry + + defp parse_times(times, entry) do + stop = :erlang.monotonic_time() + {_, entry} = Enum.reduce(times, {stop, entry}, &parse_time/2) + entry + end + + defp parse_time({:decode, start}, {stop, entry}) do + {start, %{entry | decode_time: stop - start}} + end + + defp parse_time({:checkout, start}, {stop, entry}) do + {start, %{entry | pool_time: stop - start}} + end + + defp parse_time({:checkin, start}, {stop, entry}) do + # The checkin time was most likely before checkout but it is + # not guaranteed as they are tracked by different processes. + # There should be no further measurements after checkin. + {stop, %{entry | idle_time: max(stop - start, 0)}} + end + + defp parse_time({_, start}, {stop, entry}) do + %{connection_time: connection_time} = entry + {start, %{entry | connection_time: (connection_time || 0) + (stop - start)}} + end +end diff --git a/deps/db_connection/lib/db_connection/ownership.ex b/deps/db_connection/lib/db_connection/ownership.ex new file mode 100644 index 0000000..77fe394 --- /dev/null +++ b/deps/db_connection/lib/db_connection/ownership.ex @@ -0,0 +1,155 @@ +defmodule DBConnection.OwnershipError do + @moduledoc """ + An exception for when errors with ownership occur. + """ + + defexception [:message] + + def exception(message), do: %DBConnection.OwnershipError{message: message} +end + +defmodule DBConnection.Ownership do + @moduledoc """ + A DBConnection pool that requires explicit checkout and checkin + as a mechanism to coordinate between processes. + + ## Options + + * `:ownership_mode` - When mode is `:manual`, all connections must + be explicitly checked out before by using `ownership_checkout/2`. + Otherwise, mode is `:auto` and connections are checked out + implicitly. `{:shared, owner}` mode is also supported so + processes are allowed on demand. On all cases, checkins are + explicit via `ownership_checkin/2`. Defaults to `:auto`. + * `:ownership_timeout` - The maximum time (in milliseconds) that a process + is allowed to own a connection or `:infinity`, default `120_000`. + This timeout exists mostly for sanity checking purposes and can be increased + at will, since DBConnection automatically checks in connections whenever + there is a mode change. + * `:ownership_log` - The `Logger.level` to log ownership changes, or `nil` + not to log, default `nil`. + + There are also two experimental options, `:post_checkout` and `:pre_checkin` + which allows a developer to configure what happens when a connection is + checked out and checked in. Those options are meant to be used during tests, + and have the following behaviour: + + * `:post_checkout` - it must be an anonymous function that receives the + connection module, the connection state and it must return either + `{:ok, connection_module, connection_state}` or + `{:disconnect, err, connection_module, connection_state}`. This allows + the developer to change the connection module on post checkout. However, + in case of disconnects, the return `connection_module` must be the same + as the `connection_module` given. Defaults to simply returning the given + connection module and state. + + * `:pre_checkin` - it must be an anonymous function that receives the + checkin reason (`:checkin`, `{:disconnect, err}` or `{:stop, err}`), + the connection module and the connection state returned by `post_checkout`. + It must return either `{:ok, connection_module, connection_state}` or + `{:disconnect, err, connection_module, connection_state}` where the connection + module is the module given to `:post_checkout` Defaults to simply returning + the given connection module and state. + + ## Callers lookup + + When checking out, the ownership pool first looks if there is a connection + assigned to the current process and then checks if there is a connection + assigned to any of the processes listed under the `$callers` process + dictionary entry. The `$callers` entry is set by default for tasks from + Elixir v1.8. + + You can also pass the `:caller` option on checkout with a pid and that + pid will be looked up first, instead of `self()`, and then we fall back + to `$callers`. + """ + + alias DBConnection.Ownership.Manager + alias DBConnection.Holder + + @behaviour DBConnection.Pool + + @doc false + defdelegate child_spec(args), to: Manager + + @doc false + @impl DBConnection.Pool + defdelegate disconnect_all(pool, interval, opts), to: Manager + + @doc false + @impl DBConnection.Pool + def checkout(pool, callers, opts) do + case Manager.proxy_for(callers, opts) do + {caller, pool} -> Holder.checkout(pool, [caller], opts) + nil -> Holder.checkout(pool, callers, opts) + end + end + + @doc false + @impl DBConnection.Pool + defdelegate get_connection_metrics(pool), to: Manager + + @doc """ + Explicitly checks a connection out from the ownership manager. + + It may return `:ok` if the connection is checked out. + `{:already, :owner | :allowed}` if the caller process already + has a connection, or raise if there was an error. + """ + @spec ownership_checkout(GenServer.server(), Keyword.t()) :: + :ok | {:already, :owner | :allowed} + def ownership_checkout(manager, opts) do + with {:ok, pid} <- Manager.checkout(manager, opts) do + case Holder.checkout(pid, [self()], opts) do + {:ok, pool_ref, _module, _idle_time, _state} -> + Holder.checkin(pool_ref) + + {:error, err} -> + raise err + end + end + end + + @doc """ + Changes the ownership mode. + + `mode` may be `:auto`, `:manual` or `{:shared, owner}`. + + The operation will always succeed when setting the mode to + `:auto` or `:manual`. It may fail with reason `:not_owner` + or `:not_found` when setting `{:shared, pid}` and the + given pid does not own any connection. May return + `:already_shared` if another process set the ownership + mode to `{:shared, _}` and is still alive. + """ + @spec ownership_mode(GenServer.server(), :auto | :manual | {:shared, pid}, Keyword.t()) :: + :ok | :already_shared | :not_owner | :not_found + defdelegate ownership_mode(manager, mode, opts), to: Manager, as: :mode + + @doc """ + Checks a connection back in. + + A connection can only be checked back in by its owner. + """ + @spec ownership_checkin(GenServer.server(), Keyword.t()) :: + :ok | :not_owner | :not_found + defdelegate ownership_checkin(manager, opts), to: Manager, as: :checkin + + @doc """ + Allows the process given by `allow` to use the connection checked out + by `owner_or_allowed`. + + It may return `:ok` if the connection is checked out. + `{:already, :owner | :allowed}` if the `allow` process already + has a connection. `owner_or_allowed` may either be the owner or any + other allowed process. Returns `:not_found` if the given process + does not have any connection checked out. + + Setting the `unallow_existing` option to `true` will remove the process given by `allow` from + any existing allowance it may have (this is necessary because a given process can only be + allowed on a single connection at a time). + """ + @spec ownership_allow(GenServer.server(), owner_or_allowed :: pid, allow :: pid, Keyword.t()) :: + :ok | {:already, :owner | :allowed} | :not_found + defdelegate ownership_allow(manager, owner, allow, opts), to: Manager, as: :allow +end diff --git a/deps/db_connection/lib/db_connection/ownership/manager.ex b/deps/db_connection/lib/db_connection/ownership/manager.ex new file mode 100644 index 0000000..a6f3103 --- /dev/null +++ b/deps/db_connection/lib/db_connection/ownership/manager.ex @@ -0,0 +1,454 @@ +defmodule DBConnection.Ownership.Manager do + @moduledoc false + use GenServer + require Logger + alias DBConnection.Ownership.Proxy + alias DBConnection.Util + + @timeout 5_000 + + @callback start_link({module, opts :: Keyword.t()}) :: + GenServer.on_start() + def start_link({module, opts}) do + {owner_opts, pool_opts} = Keyword.split(opts, [:name]) + GenServer.start_link(__MODULE__, {module, owner_opts, pool_opts}, owner_opts) + end + + @callback disconnect_all(GenServer.server(), non_neg_integer, Keyword.t()) :: :ok + def disconnect_all(pool, interval, opts) do + inner_pool = GenServer.call(pool, :pool, :infinity) + DBConnection.ConnectionPool.disconnect_all(inner_pool, interval, opts) + end + + @spec proxy_for(callers :: [pid], Keyword.t()) :: {caller :: pid, proxy :: pid} | nil + def proxy_for(callers, opts) do + case Keyword.fetch(opts, :name) do + {:ok, name} -> + Enum.find_value(callers, &List.first(:ets.lookup(name, &1))) + + :error -> + nil + end + end + + @spec checkout(GenServer.server(), Keyword.t()) :: + {:ok, pid} | {:already, :owner | :allowed} + def checkout(manager, opts) do + GenServer.call(manager, {:checkout, opts}, :infinity) + end + + @spec checkin(GenServer.server(), Keyword.t()) :: + :ok | :not_owner | :not_found + def checkin(manager, opts) do + timeout = Keyword.get(opts, :timeout, @timeout) + GenServer.call(manager, :checkin, timeout) + end + + @spec mode(GenServer.server(), :auto | :manual | {:shared, pid}, Keyword.t()) :: + :ok | :already_shared | :not_owner | :not_found + def mode(manager, mode, opts) + when mode in [:auto, :manual] + when elem(mode, 0) == :shared and is_pid(elem(mode, 1)) do + timeout = Keyword.get(opts, :timeout, @timeout) + GenServer.call(manager, {:mode, mode}, timeout) + end + + @spec allow(GenServer.server(), parent :: pid, allow :: pid, Keyword.t()) :: + :ok | {:already, :owner | :allowed} | :not_found + def allow(manager, parent, allow, opts) do + timeout = Keyword.get(opts, :timeout, @timeout) + passed_opts = Keyword.take(opts, [:unallow_existing]) + GenServer.call(manager, {:allow, parent, allow, passed_opts}, timeout) + end + + @spec get_connection_metrics(GenServer.server()) :: + {:ok, [DBConnection.Pool.connection_metrics()]} | :error + def get_connection_metrics(manager) do + GenServer.call(manager, :get_connection_metrics, :infinity) + end + + ## Callbacks + + @impl true + def init({module, owner_opts, pool_opts}) do + DBConnection.register_as_pool(module) + + ets = + case Keyword.fetch(owner_opts, :name) do + {:ok, name} when is_atom(name) -> + :ets.new(name, [ + :set, + :named_table, + :protected, + read_concurrency: true, + decentralized_counters: true + ]) + + _ -> + nil + end + + # We can only start the connection pool directly because + # neither the pool's GenServer nor the manager trap exits. + # Otherwise we would need a supervisor plus a watcher process. + pool_opts = Keyword.delete(pool_opts, :pool) + {:ok, pool} = DBConnection.start_link(module, pool_opts) + + log = Keyword.get(pool_opts, :ownership_log, nil) + mode = Keyword.get(pool_opts, :ownership_mode, :auto) + checkout_opts = Keyword.take(pool_opts, [:ownership_timeout, :queue_target, :queue_interval]) + + if label = pool_opts[:label] do + Util.set_label({__MODULE__, label}) + end + + {:ok, + %{ + pool: pool, + checkouts: %{}, + owners: %{}, + checkout_opts: checkout_opts, + mode: mode, + mode_ref: nil, + ets: ets, + log: log + }} + end + + @impl true + def handle_call(:get_connection_metrics, _from, %{pool: pool, owners: owners, log: log} = state) do + pool_metrics = DBConnection.ConnectionPool.get_connection_metrics(pool) + + proxy_metrics = + owners + |> Enum.map(fn {_, {proxy, _, _}} -> + try do + GenServer.call(proxy, :get_connection_metrics) + catch + :exit, reason -> + if log do + Logger.log( + log, + "Caught :exit while calling :get_connection_metrics due to #{inspect(reason)}" + ) + end + + nil + end + end) + |> Enum.reject(&is_nil/1) + + {:reply, pool_metrics ++ proxy_metrics, state} + end + + def handle_call(:pool, _from, %{pool: pool} = state) do + {:reply, pool, state} + end + + def handle_call({:mode, {:shared, shared}}, {caller, _}, %{mode: {:shared, current}} = state) do + cond do + shared == current -> + {:reply, :ok, state} + + Process.alive?(current) -> + {:reply, :already_shared, state} + + true -> + share_and_reply(state, shared, caller) + end + end + + def handle_call({:mode, {:shared, shared}}, {caller, _}, state) do + share_and_reply(state, shared, caller) + end + + def handle_call({:mode, mode}, _from, %{mode: mode} = state) do + {:reply, :ok, state} + end + + def handle_call({:mode, mode}, {caller, _}, state) do + state = proxy_checkin_all_except(state, [], caller) + {:reply, :ok, %{state | mode: mode, mode_ref: nil}} + end + + def handle_call(:checkin, {caller, _}, state) do + {reply, state} = proxy_checkin(state, caller, caller) + {:reply, reply, state} + end + + def handle_call({:allow, caller, allow, opts}, _from, %{checkouts: checkouts} = state) do + unallow_existing = Keyword.get(opts, :unallow_existing, false) + kind = already_checked_out(checkouts, allow) + + if !unallow_existing && kind do + {:reply, {:already, kind}, state} + else + case Map.get(checkouts, caller, :not_found) do + {:owner, ref, proxy} -> + state = + if unallow_existing, do: owner_unallow(state, caller, allow, ref, proxy), else: state + + {:reply, :ok, owner_allow(state, caller, allow, ref, proxy)} + + {:allowed, ref, proxy} -> + state = + if unallow_existing, do: owner_unallow(state, caller, allow, ref, proxy), else: state + + {:reply, :ok, owner_allow(state, caller, allow, ref, proxy)} + + :not_found -> + {:reply, :not_found, state} + end + end + end + + def handle_call({:checkout, opts}, {caller, _}, %{checkouts: checkouts} = state) do + if kind = already_checked_out(checkouts, caller) do + {:reply, {:already, kind}, state} + else + {proxy, state} = proxy_checkout(state, caller, opts) + {:reply, {:ok, proxy}, state} + end + end + + @impl true + def handle_info({:db_connection, from, {:checkout, callers, _now, queue?}}, state) do + %{checkouts: checkouts, mode: mode, checkout_opts: checkout_opts} = state + caller = find_caller(callers, checkouts, mode) + + case Map.get(checkouts, caller, :not_found) do + {status, _ref, proxy} when status in [:owner, :allowed] -> + DBConnection.Holder.reply_redirect(from, caller, proxy) + {:noreply, state} + + :not_found when mode == :auto -> + {proxy, state} = proxy_checkout(state, caller, [queue: queue?] ++ checkout_opts) + DBConnection.Holder.reply_redirect(from, caller, proxy) + {:noreply, state} + + :not_found when mode == :manual -> + not_found(from, mode) + {:noreply, state} + + :not_found -> + {:shared, shared} = mode + {:owner, _ref, proxy} = Map.fetch!(checkouts, shared) + DBConnection.Holder.reply_redirect(from, shared, proxy) + {:noreply, state} + end + end + + def handle_info({:DOWN, ref, _, _, _}, state) do + {:noreply, state |> owner_down(ref) |> unshare(ref)} + end + + def handle_info(_msg, state) do + {:noreply, state} + end + + defp already_checked_out(checkouts, pid) do + case Map.get(checkouts, pid, :not_found) do + {:owner, _, _} -> :owner + {:allowed, _, _} -> :allowed + :not_found -> nil + end + end + + defp proxy_checkout(state, caller, opts) do + %{pool: pool, checkouts: checkouts, owners: owners, ets: ets, log: log, mode: mode} = state + + {:ok, proxy} = + DynamicSupervisor.start_child( + DBConnection.Ownership.Supervisor, + {DBConnection.Ownership.Proxy, {caller, pool, opts}} + ) + + if log do + Logger.log(log, fn -> + [ + Util.inspect_pid(caller), + " checked out connection in ", + inspect(mode), + " mode using proxy ", + Util.inspect_pid(proxy) + ] + end) + end + + ref = Process.monitor(proxy) + checkouts = Map.put(checkouts, caller, {:owner, ref, proxy}) + owners = Map.put(owners, ref, {proxy, caller, []}) + ets && :ets.insert(ets, {caller, proxy}) + {proxy, %{state | checkouts: checkouts, owners: owners}} + end + + defp proxy_checkin(state, maybe_owner, caller) do + case get_and_update_in(state.checkouts, &Map.pop(&1, maybe_owner, :not_found)) do + {{:owner, ref, proxy}, state} -> + Proxy.stop(proxy, caller) + {:ok, state |> owner_down(ref) |> unshare(ref)} + + {{:allowed, _, _}, _} -> + {:not_owner, state} + + {:not_found, _} -> + {:not_found, state} + end + end + + defp proxy_checkin_all_except(state, except, caller) do + Enum.reduce(state.checkouts, state, fn {pid, _}, state -> + if pid in except do + state + else + {_, state} = proxy_checkin(state, pid, caller) + state + end + end) + end + + defp owner_allow(%{ets: ets, log: log} = state, caller, allow, ref, proxy) do + if log do + Logger.log(log, fn -> + [ + Util.inspect_pid(allow), + " was allowed by ", + Util.inspect_pid(caller), + " on proxy ", + Util.inspect_pid(proxy) + ] + end) + end + + state = put_in(state.checkouts[allow], {:allowed, ref, proxy}) + + state = + update_in(state.owners[ref], fn {proxy, caller, allowed} -> + {proxy, caller, [allow | List.delete(allowed, allow)]} + end) + + ets && :ets.insert(ets, {allow, proxy}) + state + end + + defp owner_unallow(%{ets: ets, log: log} = state, caller, unallow, _ref, _proxy) do + case Map.get(state.checkouts, unallow, :not_found) do + {_status, old_ref, old_proxy} -> + if log do + Logger.log(log, fn -> + [ + Util.inspect_pid(unallow), + " was unallowed by ", + Util.inspect_pid(caller), + " on proxy ", + Util.inspect_pid(old_proxy) + ] + end) + end + + state = update_in(state.checkouts, &Map.delete(&1, unallow)) + + state = + update_in(state.owners[old_ref], fn {proxy, caller, allowed} -> + {proxy, caller, List.delete(allowed, unallow)} + end) + + ets && :ets.delete(ets, unallow) + state + + :not_found -> + state + end + end + + defp owner_down(%{ets: ets, log: log} = state, ref) do + case get_and_update_in(state.owners, &Map.pop(&1, ref)) do + {{proxy, caller, allowed}, state} -> + Process.demonitor(ref, [:flush]) + entries = [caller | allowed] + + if log do + Logger.log(log, fn -> + [ + Enum.map_join(entries, ", ", &Util.inspect_pid/1), + " lost connection from proxy ", + Util.inspect_pid(proxy) + ] + end) + end + + ets && Enum.each(entries, &:ets.delete(ets, &1)) + update_in(state.checkouts, &Map.drop(&1, entries)) + + {nil, state} -> + state + end + end + + defp share_and_reply(%{checkouts: checkouts} = state, shared, caller) do + case Map.get(checkouts, shared, :not_found) do + {:owner, ref, _} -> + state = proxy_checkin_all_except(state, [shared], caller) + {:reply, :ok, %{state | mode: {:shared, shared}, mode_ref: ref}} + + {:allowed, _, _} -> + {:reply, :not_owner, state} + + :not_found -> + {:reply, :not_found, state} + end + end + + defp unshare(%{mode_ref: ref} = state, ref) do + %{state | mode: :manual, mode_ref: nil} + end + + defp unshare(state, _ref) do + state + end + + defp find_caller(callers, checkouts, :manual) do + Enum.find(callers, &Map.has_key?(checkouts, &1)) || hd(callers) + end + + defp find_caller([caller | _], _checkouts, _mode) do + caller + end + + defp not_found({pid, _} = from, mode) do + label = Util.pool_label(self()) + label_info = if label, do: "(#{inspect(label)}) ", else: "" + + msg = """ + cannot find ownership process for #{Util.inspect_pid(pid)} + #{label_info}using mode #{inspect(mode)}. + (Note that a connection's mode reverts to :manual if its owner + terminates.) + + When using ownership, you must manage connections in one + of the four ways: + + * By explicitly checking out a connection + * By explicitly allowing a spawned process + * By running the pool in shared mode + * By using :caller option with allowed process + + The first two options require every new process to explicitly + check a connection out or be allowed by calling checkout or + allow respectively. + + The third option requires a {:shared, pid} mode to be set. + If using shared mode in tests, make sure your tests are not + async. + + The fourth option requires [caller: pid] to be used when + checking out a connection from the pool. The caller process + should already be allowed on a connection. + + If you are reading this error, it means you have not done one + of the steps above or that the owner process has crashed. + """ + + DBConnection.Holder.reply_error(from, DBConnection.OwnershipError.exception(msg)) + end +end diff --git a/deps/db_connection/lib/db_connection/ownership/proxy.ex b/deps/db_connection/lib/db_connection/ownership/proxy.ex new file mode 100644 index 0000000..41ac7f3 --- /dev/null +++ b/deps/db_connection/lib/db_connection/ownership/proxy.ex @@ -0,0 +1,334 @@ +defmodule DBConnection.Ownership.Proxy do + @moduledoc false + + alias DBConnection.Holder + alias DBConnection.Util + use GenServer, restart: :temporary + + @time_unit 1000 + @ownership_timeout 120_000 + @queue_target 50 + @queue_interval 1000 + + def start_link({caller, pool, pool_opts}) do + GenServer.start_link(__MODULE__, {caller, pool, pool_opts}, []) + end + + def stop(proxy, caller) do + GenServer.cast(proxy, {:stop, caller}) + end + + # Callbacks + + @impl true + def init({caller, pool, pool_opts}) do + pool_opts = + pool_opts + |> Keyword.put(:timeout, :infinity) + |> Keyword.delete(:deadline) + + owner_ref = Process.monitor(caller) + ownership_timeout = Keyword.get(pool_opts, :ownership_timeout, @ownership_timeout) + timeout = Keyword.get(pool_opts, :queue_target, @queue_target) * 2 + interval = Keyword.get(pool_opts, :queue_interval, @queue_interval) + + pre_checkin = Keyword.get(pool_opts, :pre_checkin, fn _, mod, state -> {:ok, mod, state} end) + post_checkout = Keyword.get(pool_opts, :post_checkout, &{:ok, &1, &2}) + + state = %{ + client: nil, + timer: nil, + holder: nil, + timeout: timeout, + interval: interval, + poll: nil, + owner: {caller, owner_ref}, + pool: pool, + pool_ref: nil, + pool_opts: pool_opts, + queue: :queue.new(), + mod: nil, + pre_checkin: pre_checkin, + post_checkout: post_checkout, + ownership_timer: start_timer(caller, ownership_timeout) + } + + now = System.monotonic_time(@time_unit) + {:ok, start_poll(now, state)} + end + + @impl true + def handle_info({:DOWN, ref, _, pid, _reason}, %{owner: {_, ref}} = state) do + shutdown("owner #{Util.inspect_pid(pid)} exited", state) + end + + def handle_info({:timeout, deadline, {_ref, holder, pid, len}}, %{holder: holder} = state) do + if Holder.handle_deadline(holder, deadline) do + message = + "client #{Util.inspect_pid(pid)} timed out because " <> + "it queued and checked out the connection for longer than #{len}ms" + + shutdown(message, state) + else + {:noreply, state} + end + end + + def handle_info( + {:timeout, timer, {__MODULE__, pid, timeout}}, + %{ownership_timer: timer} = state + ) do + message = + "owner #{Util.inspect_pid(pid)} timed out because " <> + "it owned the connection for longer than #{timeout}ms (set via the :ownership_timeout option)" + + # We don't invoke shutdown because this is always a disconnect, even if there is no client. + # On the other hand, those timeouts are unlikely to trigger, as it defaults to 2 mins. + pool_disconnect(DBConnection.ConnectionError.exception(message), false, state) + end + + def handle_info({:timeout, poll, time}, %{poll: poll} = state) do + state = timeout(time, state) + {:noreply, start_poll(time, state)} + end + + def handle_info( + {:db_connection, from, {:checkout, _caller, _now, _queue?}}, + %{holder: nil} = state + ) do + %{pool: pool, pool_opts: pool_opts, owner: {_, owner_ref}, post_checkout: post_checkout} = + state + + case Holder.checkout(pool, [self()], pool_opts) do + {:ok, pool_ref, original_mod, _idle_time, conn_state} -> + case post_checkout.(original_mod, conn_state) do + {:ok, conn_mod, conn_state} -> + holder = Holder.new(self(), owner_ref, conn_mod, conn_state) + state = %{state | pool_ref: pool_ref, holder: holder, mod: original_mod} + checkout(from, state) + + {:disconnect, err, ^original_mod, _conn_state} -> + Holder.disconnect(pool_ref, err) + Holder.reply_error(from, err) + {:stop, {:shutdown, err}, state} + end + + {:error, err} -> + Holder.reply_error(from, err) + {:stop, {:shutdown, err}, state} + end + end + + def handle_info( + {:db_connection, from, {:checkout, _caller, _now, _queue?}}, + %{client: nil} = state + ) do + checkout(from, state) + end + + def handle_info({:db_connection, from, {:checkout, _caller, now, queue?}}, state) do + if queue? do + %{queue: queue} = state + queue = :queue.in({now, from}, queue) + {:noreply, %{state | queue: queue}} + else + message = "connection not available and queuing is disabled" + err = DBConnection.ConnectionError.exception(message) + Holder.reply_error(from, err) + {:noreply, state} + end + end + + def handle_info( + {:"ETS-TRANSFER", holder, _, {msg, ref, extra}}, + %{holder: holder, client: {_, ref, _}} = state + ) do + case msg do + :checkin -> checkin(state) + :disconnect -> pool_disconnect(extra, true, state) + :stop -> pool_stop(extra, state) + end + end + + def handle_info({:"ETS-TRANSFER", holder, pid, ref}, %{holder: holder, owner: {_, ref}} = state) do + shutdown("client #{Util.inspect_pid(pid)} exited", state) + end + + @impl true + def handle_cast({:stop, caller}, %{owner: {owner, _}} = state) do + message = + "#{Util.inspect_pid(caller)} checked in the connection owned by #{Util.inspect_pid(owner)}" + + message = + case pruned_stacktrace(caller) do + [] -> + message + + current_stack -> + message <> + "\n\n#{Util.inspect_pid(caller)} triggered the checkin at location:\n\n" <> + Exception.format_stacktrace(current_stack) + end + + shutdown(message, state) + end + + @impl true + def handle_call( + :get_connection_metrics, + _, + %{queue: queue, holder: holder, client: client} = state + ) do + connection_metrics = %{ + source: {:proxy, self()}, + ready_conn_count: + if is_nil(holder) or not is_nil(client) do + 0 + else + 1 + end, + checkout_queue_length: :queue.len(queue) + } + + {:reply, connection_metrics, state} + end + + defp checkout({pid, ref} = from, %{holder: holder} = state) do + if Holder.handle_checkout(holder, from, ref, nil) do + {:noreply, %{state | client: {pid, ref, pruned_stacktrace(pid)}}} + else + next(state) + end + end + + defp checkin(state) do + next(%{state | client: nil}) + end + + defp next(%{queue: queue} = state) do + case :queue.out(queue) do + {{:value, {_, from}}, queue} -> + checkout(from, %{state | queue: queue}) + + {:empty, queue} -> + {:noreply, %{state | queue: queue}} + end + end + + defp start_timer(_, :infinity), do: nil + + defp start_timer(pid, timeout) do + :erlang.start_timer(timeout, self(), {__MODULE__, pid, timeout}) + end + + # If shutting down but it has no client, checkin + defp shutdown(reason, %{client: nil} = state) do + pool_checkin(reason, state) + end + + # If shutting down but it has a client, disconnect + defp shutdown(reason, %{client: {client, _, checkout_stack}} = state) do + reason = + case pruned_stacktrace(client) do + [] -> + reason + + current_stack -> + reason <> + """ + \n\nClient #{Util.inspect_pid(client)} is still using a connection from owner at location: + + #{Exception.format_stacktrace(current_stack)} + The connection itself was checked out by #{Util.inspect_pid(client)} at location: + + #{Exception.format_stacktrace(checkout_stack)} + """ + end + + err = DBConnection.ConnectionError.exception(reason) + pool_disconnect(err, false, state) + end + + ## Helpers + + defp pool_checkin(reason, state) do + checkin = fn pool_ref, _ -> Holder.checkin(pool_ref) end + pool_done(reason, state, :checkin, false, checkin, &Holder.disconnect/2) + end + + defp pool_disconnect(err, keep_alive?, state) do + disconnect = &Holder.disconnect/2 + pool_done(err, state, {:disconnect, err}, keep_alive?, disconnect, disconnect) + end + + defp pool_stop(err, state) do + stop = &Holder.stop/2 + pool_done(err, state, {:stop, err}, false, stop, stop) + end + + defp pool_done(err, state, op, keep_alive?, done, stop_or_disconnect) do + %{holder: holder, pool_ref: pool_ref, pre_checkin: pre_checkin, mod: original_mod} = state + + if holder do + {conn_mod, conn_state} = Holder.delete(holder) + + case pre_checkin.(op, conn_mod, conn_state) do + {:ok, ^original_mod, conn_state} -> + Holder.put_state(pool_ref, conn_state) + done.(pool_ref, err) + + if keep_alive? do + {:noreply, %{state | holder: nil}} + else + {:stop, {:shutdown, err}, state} + end + + {:disconnect, err, ^original_mod, conn_state} -> + Holder.put_state(pool_ref, conn_state) + stop_or_disconnect.(pool_ref, err) + {:stop, {:shutdown, err}, state} + end + else + {:stop, {:shutdown, err}, state} + end + end + + defp start_poll(now, %{interval: interval} = state) do + timeout = now + interval + poll = :erlang.start_timer(timeout, self(), timeout, abs: true) + %{state | poll: poll} + end + + defp timeout(time, %{queue: queue, timeout: timeout} = state) do + case :queue.out(queue) do + {{:value, {sent, from}}, queue} when sent + timeout < time -> + drop(time - sent, from) + timeout(time, %{state | queue: queue}) + + {_, _} -> + state + end + end + + defp drop(delay, from) do + message = + "connection not available and request was dropped from queue after #{delay}ms. " <> + "You can configure how long requests wait in the queue using :queue_target and " <> + ":queue_interval. See DBConnection.start_link/2 for more information" + + err = DBConnection.ConnectionError.exception(message, :queue_timeout) + Holder.reply_error(from, err) + end + + @prune_modules [:gen, GenServer, DBConnection, DBConnection.Holder, DBConnection.Ownership] + + defp pruned_stacktrace(pid) do + case Process.info(pid, :current_stacktrace) do + {:current_stacktrace, stacktrace} -> + Enum.drop_while(stacktrace, &match?({mod, _, _, _} when mod in @prune_modules, &1)) + + _ -> + [] + end + end +end diff --git a/deps/db_connection/lib/db_connection/pool.ex b/deps/db_connection/lib/db_connection/pool.ex new file mode 100644 index 0000000..3f2242c --- /dev/null +++ b/deps/db_connection/lib/db_connection/pool.ex @@ -0,0 +1,19 @@ +defmodule DBConnection.Pool do + @moduledoc false + + @type pool :: GenServer.server() + @type connection_metrics :: %{ + source: {:pool | :proxy, pid()}, + ready_conn_count: non_neg_integer(), + checkout_queue_length: non_neg_integer() + } + + @callback disconnect_all(pool, interval :: term, options :: keyword) :: :ok + + @callback checkout(pool, callers :: [pid], options :: keyword) :: + {:ok, pool_ref :: term, module, checkin_time :: non_neg_integer() | nil, + state :: term} + | {:error, Exception.t()} + + @callback get_connection_metrics(pool :: pool()) :: [connection_metrics()] +end diff --git a/deps/db_connection/lib/db_connection/query.ex b/deps/db_connection/lib/db_connection/query.ex new file mode 100644 index 0000000..734cee7 --- /dev/null +++ b/deps/db_connection/lib/db_connection/query.ex @@ -0,0 +1,57 @@ +defprotocol DBConnection.Query do + @moduledoc """ + The `DBConnection.Query` protocol is responsible for preparing and + encoding queries. + + All `DBConnection.Query` functions are executed in the caller process which + means it's safe to, for example, raise exceptions or do blocking calls as + they won't affect the connection process. + """ + + @doc """ + Parse a query. + + This function is called to parse a query term before it is prepared using a + connection callback module. + + See `DBConnection.prepare/3`. + """ + @spec parse(any, Keyword.t()) :: any + def parse(query, opts) + + @doc """ + Describe a query. + + This function is called to describe a query after it is prepared using a + connection callback module. + + See `DBConnection.prepare/3`. + """ + @spec describe(any, Keyword.t()) :: any + def describe(query, opts) + + @doc """ + Encode parameters using a query. + + This function is called to encode a query before it is executed using a + connection callback module. + + If this function raises `DBConnection.EncodeError`, then the query is + prepared once again. + + See `DBConnection.execute/3`. + """ + @spec encode(any, any, Keyword.t()) :: any + def encode(query, params, opts) + + @doc """ + Decode a result using a query. + + This function is called to decode a result after it is returned by a + connection callback module. + + See `DBConnection.execute/3`. + """ + @spec decode(any, any, Keyword.t()) :: any + def decode(query, result, opts) +end diff --git a/deps/db_connection/lib/db_connection/task.ex b/deps/db_connection/lib/db_connection/task.ex new file mode 100644 index 0000000..6d6671f --- /dev/null +++ b/deps/db_connection/lib/db_connection/task.ex @@ -0,0 +1,47 @@ +defmodule DBConnection.Task do + @moduledoc false + @name __MODULE__ + + require DBConnection.Holder + + def run_child(mod, state, fun, opts) do + arg = [fun, self(), %DBConnection.SensitiveData{data: opts}] + {:ok, pid} = Task.Supervisor.start_child(@name, __MODULE__, :init, arg) + ref = Process.monitor(pid) + _ = DBConnection.Holder.update(pid, ref, mod, state) + {pid, ref} + end + + def init(fun, parent, %DBConnection.SensitiveData{data: opts}) do + try do + Process.link(parent) + catch + :error, :noproc -> + exit({:shutdown, :noproc}) + end + + receive do + {:"ETS-TRANSFER", holder, ^parent, {:checkin, ref, _extra}} -> + Process.unlink(parent) + pool_ref = DBConnection.Holder.pool_ref(pool: parent, reference: ref, holder: holder) + checkout = {:via, __MODULE__, pool_ref} + _ = DBConnection.run(checkout, make_fun(fun), [pool: __MODULE__] ++ opts) + exit(:normal) + end + end + + def checkout({:via, __MODULE__, pool_ref}, _callers, _opts) do + {:ok, pool_ref, _mod = :unused, _idle_time = nil, _state = :unused} + end + + defp make_fun(fun) when is_function(fun, 1) do + fun + end + + defp make_fun(mfargs) do + fn conn -> + {mod, fun, args} = mfargs + apply(mod, fun, [conn | args]) + end + end +end diff --git a/deps/db_connection/lib/db_connection/telemetry_listener.ex b/deps/db_connection/lib/db_connection/telemetry_listener.ex new file mode 100644 index 0000000..021920c --- /dev/null +++ b/deps/db_connection/lib/db_connection/telemetry_listener.ex @@ -0,0 +1,116 @@ +defmodule DBConnection.TelemetryListener do + @moduledoc """ + A connection listener that emits telemetry events for connection and disconnection + + It monitors connection processes and ensures that disconnection events are + always emitted. + + ## Usage + + Start the listener, and pass it under the `:connection_listeners` option when + starting DBConnection: + + {:ok, pid} = DBConnection.TelemetryListener.start_link() + {:ok, _conn} = DBConnection.start_link(SomeModule, connection_listeners: [pid]) + + # Using a tag, which will be sent in telemetry metadata + {:ok, _conn} = DBConnection.start_link(SomeModule, connection_listeners: {[pid], :my_tag}) + + # Or, with a Supervisor: + Supervisor.start_link([ + {DBConnection.TelemetryListener, name: MyListener}, + DBConnection.child_spec(SomeModule, connection_listeners: {[MyListener], :my_tag}) + ]) + + When using with Ecto, you can pass the `connection_listeners` option to Ecto, and we + recommend passing the repository as the tag. In your supervision tree: + + Supervisor.start_link([ + {DBConnection.TelemetryListener, name: MyApp.DBListener}, + {MyApp.Repo, connection_listeners: {[MyApp.DBListener], MyApp.Repo}) + ]) + + ## Telemetry events + + ### Connected + + `[:db_connection, :connected]` - Executed after a connection is established. + + #### Measurements + + * `:count` - Always 1 + + #### Metadata + + * `:pid` - The connection pid + * `:tag` - The connection pool tag + + ### Disconnected + + `[:db_connection, :disconnected]` - Executed after a disconnect. + + #### Measurements + + * `:count` - Always 1 + + #### Metadata + + * `:pid` - The connection pid + * `:tag` - The connection pool tag + """ + + use GenServer + + @doc "Starts a telemetry listener" + @spec start_link(GenServer.options()) :: {:ok, pid()} + def start_link(opts \\ []) do + GenServer.start_link(__MODULE__, nil, opts) + end + + @impl GenServer + def init(nil) do + {:ok, %{monitoring: %{}}} + end + + @impl GenServer + def handle_info({:connected, pid, tag}, state) do + handle_connected(pid, tag, state) + end + + def handle_info({:connected, pid}, state) do + handle_connected(pid, nil, state) + end + + def handle_info({:disconnected, pid, _}, state) do + handle_disconnected(pid, state) + end + + def handle_info({:disconnected, pid}, state) do + handle_disconnected(pid, state) + end + + def handle_info({:DOWN, _ref, :process, pid, _reason}, state) do + handle_disconnected(pid, state) + end + + defp handle_connected(pid, tag, state) do + :telemetry.execute([:db_connection, :connected], %{count: 1}, %{tag: tag, pid: pid}) + ref = Process.monitor(pid) + + {:noreply, put_in(state.monitoring[pid], {ref, tag})} + end + + defp handle_disconnected(pid, state) do + case state.monitoring[pid] do + # Already handled. We may receive two messages: one from monitor and one + # from listener. For this reason, we need to handle both. + nil -> + {:noreply, state} + + {ref, tag} -> + Process.demonitor(ref, [:flush]) + :telemetry.execute([:db_connection, :disconnected], %{count: 1}, %{tag: tag, pid: pid}) + {:noreply, %{state | monitoring: Map.delete(state.monitoring, pid)}} + end + end +end diff --git a/deps/db_connection/lib/db_connection/util.ex b/deps/db_connection/lib/db_connection/util.ex new file mode 100644 index 0000000..e866ec8 --- /dev/null +++ b/deps/db_connection/lib/db_connection/util.ex @@ -0,0 +1,77 @@ +defmodule DBConnection.Util do + @moduledoc false + + @doc """ + Inspect a pid, including the process label if possible. + """ + def inspect_pid(pid) when is_pid(pid) do + with :undefined <- get_label(pid), + :undefined <- get_name(pid), + :undefined <- get_initial_call(pid) do + inspect(pid) + else + label_or_name_or_call -> "#{inspect(pid)} (#{inspect(label_or_name_or_call)})" + end + end + + def inspect_pid(other), do: inspect(other) + + defp get_name(pid) do + try do + Process.info(pid, :registered_name) + rescue + _ -> :undefined + else + {:registered_name, name} when is_atom(name) -> name + _ -> :undefined + end + end + + @doc """ + Set a process label if `Process.set_label/1` is available. + """ + def set_label(label) do + if function_exported?(Process, :set_label, 1) do + apply(Process, :set_label, [label]) + else + :ok + end + end + + @doc """ + Get the pool label from a pid's process label. + + Returns the label if found, or `nil` otherwise. + Process labels set as `{module, label}` tuples have the label extracted. + """ + def pool_label(pid) when is_pid(pid) do + case get_label(pid) do + {module, label} when is_atom(module) and module != nil and label != nil -> label + _ -> nil + end + end + + def pool_label(_other), do: nil + + # Get a process label if `:proc_lib.get_label/1` is available. + defp get_label(pid) do + if function_exported?(:proc_lib, :get_label, 1) do + # Avoid a compiler warning if the function isn't + # defined in your version of Erlang/OTP + apply(:proc_lib, :get_label, [pid]) + else + # mimic return value of + # `:proc_lib.get_label/1` when none is set. + # Don't resort to using `Process.info(pid, :dictionary)`, + # as this is not efficient. + :undefined + end + end + + defp get_initial_call(pid) do + case Process.info(pid, :initial_call) do + {:initial_call, {mod, _, _}} -> mod + _ -> :undefined + end + end +end diff --git a/deps/db_connection/lib/db_connection/watcher.ex b/deps/db_connection/lib/db_connection/watcher.ex new file mode 100644 index 0000000..e2ee324 --- /dev/null +++ b/deps/db_connection/lib/db_connection/watcher.ex @@ -0,0 +1,77 @@ +defmodule DBConnection.Watcher do + @moduledoc false + @name __MODULE__ + + use GenServer + + def start_link(_) do + GenServer.start_link(__MODULE__, :ok, name: @name) + end + + def watch(supervisor, args) do + GenServer.call(@name, {:watch, supervisor, args}, :infinity) + end + + @impl true + def init(:ok) do + Process.flag(:trap_exit, true) + {:ok, {%{}, %{}}} + end + + @impl true + def handle_call({:watch, supervisor, args}, {caller_pid, _ref}, {caller_refs, started_refs}) do + case DynamicSupervisor.start_child(supervisor, args) do + {:ok, started_pid} -> + Process.link(caller_pid) + caller_ref = Process.monitor(caller_pid) + started_ref = Process.monitor(started_pid) + caller_refs = Map.put(caller_refs, caller_ref, {supervisor, started_pid, started_ref}) + started_refs = Map.put(started_refs, started_ref, {caller_pid, caller_ref}) + {:reply, {:ok, started_pid}, {caller_refs, started_refs}} + + other -> + {:reply, other, {caller_refs, started_refs}} + end + end + + @impl true + def handle_info({:DOWN, ref, _, _, _}, {caller_refs, started_refs}) do + case caller_refs do + %{^ref => {_supervisor, started_pid, started_ref}} -> + try do + :sys.terminate(started_pid, :shutdown, :infinity) + catch + :exit, {:noproc, {:sys, :terminate, _}} -> :ok + :exit, {:shutdown, {:sys, :terminate, _}} -> :ok + end + + caller_refs = Map.delete(caller_refs, ref) + started_refs = Map.put(started_refs, started_ref, {nil, nil}) + {:noreply, {caller_refs, started_refs}} + + %{} -> + case started_refs do + %{^ref => {nil, nil}} -> + {:noreply, {caller_refs, Map.delete(started_refs, ref)}} + + %{^ref => {caller_pid, caller_ref}} -> + Process.demonitor(caller_ref, [:flush]) + Process.exit(caller_pid, :kill) + {:noreply, {Map.delete(caller_refs, caller_ref), Map.delete(started_refs, ref)}} + end + end + end + + def handle_info({:EXIT, _, _}, state) do + {:noreply, state} + end + + @impl true + def terminate(_, {_, started_refs}) do + for {_, {caller_pid, _}} when caller_pid != nil <- started_refs do + Process.exit(caller_pid, :kill) + end + + :ok + end +end diff --git a/deps/db_connection/mix.exs b/deps/db_connection/mix.exs new file mode 100644 index 0000000..6d16247 --- /dev/null +++ b/deps/db_connection/mix.exs @@ -0,0 +1,85 @@ +defmodule DBConnection.Mixfile do + use Mix.Project + + @source_url "https://github.com/elixir-ecto/db_connection" + @pools [:connection_pool, :ownership] + @version "2.10.1" + + def project do + [ + app: :db_connection, + version: @version, + elixir: "~> 1.11", + deps: deps(), + docs: docs(), + description: description(), + package: package(), + build_per_environment: false, + consolidate_protocols: false, + test_paths: test_paths(Mix.env()), + test_ignore_filters: [~r/test_support\.exs$/], + aliases: ["test.all": ["test", "test.pools"], "test.pools": &test_pools/1], + preferred_cli_env: ["test.all": :test] + ] + end + + def application do + [ + extra_applications: [:logger], + mod: {DBConnection.App, []} + ] + end + + defp deps do + [ + {:ex_doc, ">= 0.0.0", only: :dev, runtime: false}, + {:telemetry, "~> 0.4 or ~> 1.0"} + ] + end + + defp docs do + [ + source_url: @source_url, + source_ref: "v#{@version}", + main: DBConnection, + extras: ["CHANGELOG.md"] + ] + end + + defp description do + """ + Database connection behaviour for database transactions and connection pooling + """ + end + + defp package do + %{ + licenses: ["Apache-2.0"], + maintainers: ["James Fish", "José Valim"], + links: %{"GitHub" => @source_url} + } + end + + defp test_paths(pool) when pool in @pools, do: ["integration_test/#{pool}"] + defp test_paths(_), do: ["test"] + + defp test_pools(args) do + for env <- @pools, do: env_run(env, args) + end + + defp env_run(env, args) do + args = if IO.ANSI.enabled?(), do: ["--color" | args], else: ["--no-color" | args] + + IO.puts("==> Running tests for MIX_ENV=#{env} mix test") + + {_, res} = + System.cmd("mix", ["test" | args], + into: IO.binstream(:stdio, :line), + env: [{"MIX_ENV", to_string(env)}] + ) + + if res > 0 do + System.at_exit(fn _ -> exit({:shutdown, 1}) end) + end + end +end diff --git a/deps/decimal/.formatter.exs b/deps/decimal/.formatter.exs new file mode 100644 index 0000000..cc79f69 --- /dev/null +++ b/deps/decimal/.formatter.exs @@ -0,0 +1,3 @@ +[ + inputs: ["{bench,mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] +] diff --git a/deps/decimal/.hex b/deps/decimal/.hex new file mode 100644 index 0000000..070f7d6 Binary files /dev/null and b/deps/decimal/.hex differ diff --git a/deps/decimal/CHANGELOG.md b/deps/decimal/CHANGELOG.md new file mode 100644 index 0000000..aaa1a67 --- /dev/null +++ b/deps/decimal/CHANGELOG.md @@ -0,0 +1,224 @@ +# CHANGELOG + +## v3.1.0 (2026-05-08) + +### Enhancements + +* `Decimal.new/2` now accepts an optional `opts` keyword list and + forwards it to `Decimal.parse/2`, allowing callers to override + `:max_digits` and `:max_exponent` when constructing a decimal from + a string. + +### Bug fixes + +* Fix infinite loop in `Decimal.to_integer/1` when the coefficient is + zero and the exponent is negative (e.g. `Decimal.new("0.0")`). Such + values now correctly convert to the integer `0`. + +## v3.0.0 (2026-05-07) + +### Note on the new defaults + +The new decimal128 defaults are more than sufficient for currency and +other real-world numeric use cases. With `precision: 34` and a scale of +2 (two digits after the decimal point for cents), values from `0.00` up +to roughly `99_999_999_999_999_999_999_999_999_999_999.99` (~10³², 100 +nonillion) round-trip without rounding. Most upgrades from 2.x require +no code changes. + +### Security + +* Make the v2.4.0 mitigations for CVE-2026-32686 the default. The + default `Decimal.Context` and the public parse, cast, and to_string + functions now follow IEEE 754 decimal128 limits, rejecting inputs + such as `1e1000000000` without materializing them. + +### Breaking changes + +* `Decimal.Context` defaults change from precision `28` and unbounded + `emax`/`emin` to decimal128 values: `precision: 34`, `emax: 6_144`, + `emin: -6_143`. Operation results whose adjusted exponent leaves that + band signal overflow or underflow. +* `Decimal.parse/1` and `Decimal.cast/1` reject inputs whose digit count + exceeds `34` (decimal128 precision) or whose absolute exponent exceeds + `6_144` (decimal128 emax). Use `parse/2` / `cast/2` with + `max_digits: :infinity` and `max_exponent: :infinity` to restore + unbounded behavior. +* `Decimal.parse/2` and `Decimal.cast/2` default `:max_digits` to `34` + and `:max_exponent` to `6_144` when not specified. +* `Decimal.to_string/2` and `Decimal.to_string/3` raise `ArgumentError` + when the rendered output would exceed `6_178` digit characters + (precision + emax — the worst-case `:normal` width of any in-range + decimal128 value). `Inspect`, `String.Chars`, and `JSON.Encoder` + protocol implementations pass `max_digits: :infinity` so debug output + always succeeds. + +## v2.4.0 (2026-05-07) + +### Security + +* Mitigate exponent amplification (CVE-2026-32686). + Compact inputs such as `1e1000000` could force multi-second expansions + during arithmetic, parsing, normalization, comparison, or formatting. + `Decimal.add/2` and `Decimal.sub/2` now scale operands to `precision + 2` + digits with a sticky bit instead of materializing the full coefficient. + +### Enhancements + +* Add `:max_digits` and `:max_exponent` options to `Decimal.parse/2` and + `Decimal.cast/2` to reject pathological inputs without expansion +* Add `:max_digits` option to `Decimal.to_string/3` to cap formatted output + before materialization +* Add `:emax` and `:emin` fields to `Decimal.Context` for IBM General Decimal + Arithmetic-style overflow and underflow signaling +* Optimize hot paths for large decimals: `coef_length`, `normalize`, + `to_integer`, `integer?`, parsing, and large-coefficient string formatting + +## v2.3.0 (2024-12-13) + +* Implement the upcoming [`JSON.Encoder`](https://hexdocs.pm/elixir/main/JSON.Encoder.html) + protocol + +## v2.2.0 (2024-11-13) + +* Add `Decimal.gte?/2` and `Decimal.lte?/2` +* Add `Decimal.compare/3` and `Decimal.eq?/3` with threshold as parameter + +## v2.1.1 (2023-04-26) + +Decimal v2.1 requires Elixir v1.8+. + +### Bug fixes + +* Fix `Decimal.compare/2` when comparing against `0` + +## v2.1.0 (2023-04-26) + +Decimal v2.1 requires Elixir v1.8+. + +### Enhancements + +* Improve error message from `Decimal.to_integer/1` during precision loss +* `Inspect` protocol implementation returns strings in the `Decimal.new(...)` format +* Add `Decimal.scale/1` +* Optimize `Decimal.compare/2` for numbers with large exponents + +### Bug fixes + +* Fix `Decimal.integer?/1` spec +* Fix `Decimal.integer?/1` check on 0 with >1 significant digits + +## v2.0.0 (2020-09-08) + +Decimal v2.0 requires Elixir v1.2+. + +### Enhancements + +* Add `Decimal.integer?/1` + +### Breaking changes + +* Change `Decimal.compare/2` to return `:lt | :eq | :gt` +* Change `Decimal.cast/1` to return `{:ok, t} | :error` +* Change `Decimal.parse/1` to return `{t, binary} | :error` +* Remove `:message` and `:result` fields from `Decimal.Error` +* Remove sNaN +* Rename qNaN to NaN +* Remove deprecated support for floats in `Decimal.new/1` +* Remove deprecated `Decimal.minus/1` +* Remove deprecated `Decimal.plus/1` +* Remove deprecated `Decimal.reduce/1` +* Remove deprecated `Decimal.with_context/2`, `Decimal.get_context/1`, `Decimal.set_context/1`, + and `Decimal.update_context/1` +* Remove deprecated `Decimal.decimal?/1` + +### Deprecations + +* Deprecate `Decimal.cmp/2` + +## v1.9.0 (2020-09-08) + +### Enhancements + +* Add `Decimal.negate/1` +* Add `Decimal.apply_context/1` +* Add `Decimal.normalize/1` +* Add `Decimal.Context.with/2`, `Decimal.Context.get/1`, `Decimal.Context.set/2`, + and `Decimal.Context.update/1` +* Add `Decimal.is_decimal/1` + +### Deprecations + +* Deprecate `Decimal.minus/1` in favour of the new `Decimal.negate/1` +* Deprecate `Decimal.plus/1` in favour of the new `Decimal.apply_context/1` +* Deprecate `Decimal.reduce/1` in favour of the new `Decimal.normalize/1` +* Deprecate `Decimal.with_context/2`, `Decimal.get_context/1`, `Decimal.set_context/2`, + and `Decimal.update_context/1` in favour of new functions on the `Decimal.Context` module +* Deprecate `Decimal.decimal?/1` in favour of the new `Decimal.is_decimal/1` + +## v1.8.1 (2019-12-20) + +### Bug fixes + +* Fix Decimal.compare/2 with string arguments +* Set :signal on error + +## v1.8.0 (2019-06-24) + +### Enhancements + +* Add `Decimal.cast/1` +* Add `Decimal.eq?/2`, `Decimal.gt?/2`, and `Decimal.lt?/2` +* Add guards to `Decimal.new/3` to prevent invalid Decimal numbers + +## v1.7.0 (2019-02-16) + +### Enhancements + +* Add `Decimal.sqrt/1` + +## v1.6.0 (2018-11-22) + +### Enhancements + +* Support for canonical XSD representation on `Decimal.to_string/2` + +### Bugfixes + +* Fix exponent off-by-one when converting from decimal to float +* Fix negative?/1 and positive?/1 specs + +### Deprecations + +* Deprecate passing float to `Decimal.new/1` in favor of `Decimal.from_float/1` + +## v1.5.0 (2018-03-24) + +### Enhancements + +* Add `Decimal.positive?/1` and `Decimal.negative?/1` +* Accept integers and strings in arithmetic functions, e.g.: `Decimal.add(1, "2.0")` +* Add `Decimal.from_float/1` + +### Soft deprecations (no warnings emitted) + +* Soft deprecate passing float to `new/1` in favor of `from_float/1` + +## v1.4.1 (2017-10-12) + +### Bugfixes + +* Include the given value as part of the error reason +* Fix `:half_even` `:lists.last` bug (empty signif) +* Fix error message for round +* Fix `:half_down` rounding error when remainder is greater than 5 +* Fix `Decimal.new/1` float conversion with bigger precision than 4 +* Fix precision default value + +## v1.4.0 (2017-06-25) + +### Bugfixes + +* Fix `Decimal.to_integer/1` for large coefficients +* Fix rounding of ~0 values +* Fix errors when comparing and adding two infinities diff --git a/deps/decimal/LICENSE.txt b/deps/decimal/LICENSE.txt new file mode 100644 index 0000000..d9a10c0 --- /dev/null +++ b/deps/decimal/LICENSE.txt @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/deps/decimal/README.md b/deps/decimal/README.md new file mode 100644 index 0000000..911b010 --- /dev/null +++ b/deps/decimal/README.md @@ -0,0 +1,261 @@ +# Decimal + +Arbitrary precision decimal arithmetic. + +## Concept + +Decimal represents values internally using three integers: a sign, a coefficient, and an exponent. +In this way, numbers of any size and with any number of decimal places can be represented exactly. + +```elixir +Decimal.new(_sign = 1, _coefficient = 42, _exponent = 0) #=> Decimal.new("42") +Decimal.new(-1, 42, 0) #=> Decimal.new("-42") +Decimal.new(1, 42, -1) #=> Decimal.new("4.2") +Decimal.new(1, 42, -20) #=> Decimal.new("4.2E-19") +Decimal.new(1, 42, 20) #=> Decimal.new("4.2E+21") +Decimal.new(1, 123456789987654321, -9) #=> Decimal.new("123456789.987654321") +``` + +For calculations, the amount of desired precision - that is, the number of +decimal digits in the coefficient - can be specified. + +## Handling untrusted input + +Decimal can represent compact values with very large exponents, such as +`1e1000000`. These values are valid decimals, but some operations can require +memory or CPU proportional to the expanded size of the number. This matters when +decimals are parsed from user input, JSON payloads, form fields, database +fields, or other external data. + +Use bounded parsing for untrusted input: + +```elixir +Decimal.parse(input, max_digits: 100, max_exponent: 1000) +Decimal.cast(input, max_digits: 100, max_exponent: 1000) +``` + +Use bounded output when rendering decimals in formats that may expand the +exponent: + +```elixir +Decimal.to_string(decimal, :normal, max_digits: 1000) +Decimal.to_string(decimal, :xsd, max_digits: 1000) +``` + +The default scientific string format is compact for large positive exponents, +but `:normal` and `:xsd` output can materialize many zeroes. APIs that convert +to an integer or otherwise need the expanded value may also be expensive for +large exponents. `Decimal.Context` supports finite `emax` and `emin` values to +limit operation results, but context limits do not validate already-created +decimals and should not replace parse/cast limits for untrusted input. + +## Usage + +Add Decimal as a dependency in your `mix.exs` file: + +```elixir +def deps do + [{:decimal, "~> 2.0"}] +end +``` + +Next, run `mix deps.get` in your shell to fetch and compile `Decimal`. Start an +interactive Elixir shell with `iex -S mix`: + +```elixir +iex> alias Decimal, as: D +iex> D.add(6, 7) +Decimal.new("13") +iex> D.div(1, 3) +Decimal.new("0.3333333333333333333333333333") +iex> D.new("0.33") +Decimal.new("0.33") +``` + +## Examples + +### Using the context + +The context specifies the maximum precision of the result of calculations and +the rounding algorithm if the result has a higher precision than the specified +maximum. It also holds the list of trap enablers and the current set +flags. + +The context is stored in the process dictionary. You don't have to pass the +context around explicitly and the flags will be updated automatically. + +The context is accessed with `Decimal.Context.get/0` and set with +`Decimal.Context.set/1`. It can be set temporarily with +`Decimal.Context.with/2`. + +```elixir +iex> D.Context.get() +%Decimal.Context{ + precision: 9, + rounding: :half_up, + emax: :infinity, + emin: :infinity, + flags: [:rounded, :inexact], + traps: [:invalid_operation, :division_by_zero] +} + +iex> D.Context.with(%D.Context{precision: 2}, fn -> IO.inspect D.Context.get() end) +%Decimal.Context{ + precision: 2, + rounding: :half_up, + emax: :infinity, + emin: :infinity, + flags: [], + traps: [:invalid_operation, :division_by_zero] +} +%Decimal.Context{ + precision: 2, + rounding: :half_up, + emax: :infinity, + emin: :infinity, + flags: [], + traps: [:invalid_operation, :division_by_zero] +} + +iex> D.Context.set(%D.Context{D.Context.get() | traps: []}) +:ok + +iex> D.Context.get() +%Decimal.Context{ + precision: 9, + rounding: :half_up, + emax: :infinity, + emin: :infinity, + flags: [:rounded, :inexact], + traps: [] +} +``` + +### Precision and rounding + +Use `:precision` option to limit the amount of decimal digits in the +coefficient of any calculation result: + +```elixir +iex> D.Context.set(%D.Context{D.Context.get() | precision: 9}) +:ok + +iex> D.div(100, 3) +Decimal.new("33.3333333") + +iex> D.Context.set(%D.Context{D.Context.get() | precision: 2}) +:ok + +iex> D.div(100, 3) +Decimal.new("33") +``` + +The `:rounding` option specifies the algorithm and precision of the rounding +operation: + +```elixir +iex> D.Context.set(%D.Context{D.Context.get() | rounding: :half_up}) +:ok + +iex> D.div(31, 2) +Decimal.new("16") + +iex> D.Context.set(%D.Context{D.Context.get() | rounding: :floor}) +:ok + +iex> D.div(31, 2) +Decimal.new("15") +``` + +### Comparisons + +Using comparison operators (`<`, `=`, `>`) with two or more decimal digits may +not produce accurate result. Instead, use comparison functions. + +```elixir +iex> D.compare(-1, 0) +:lt +iex> D.compare(0, -1) +:gt +iex> D.compare(0, 0) +:eq + +iex> D.equal?(-1, 0) +false +iex> D.equal?(0, "0.0") +true +``` + +### Flags and trap enablers + +When an exceptional condition is signalled, its flag is set in the current +context. `Decimal.Error` will be raised if the trap enabler is set. + +```elixir +iex> D.Context.set(%D.Context{D.Context.get() | rounding: :floor, precision: 2}) +:ok + +iex> D.Context.get().traps +[:invalid_operation, :division_by_zero] + +iex> D.Context.get().flags +[] + +iex> D.div(31, 2) +Decimal.new("15") + +iex> D.Context.get().flags +[:inexact, :rounded] +``` + +`:inexact` and `:rounded` flag were signalled above because the result of the +operation was inexact given the context's precision and had to be rounded to +fit the precision. `Decimal.Error` was not raised because the signals' trap +enablers weren't set. + +```elixir +iex> D.Context.set(%D.Context{D.Context.get() | traps: D.Context.get().traps ++ [:inexact]}) +:ok + +iex> D.div(31, 2) +** (Decimal.Error) +``` + +The default trap enablers, such as `:division_by_zero`, can be unset: + +```elixir +iex> D.Context.get().traps +[:invalid_operation, :division_by_zero] + +iex> D.div(42, 0) +** (Decimal.Error) + +iex> D.Context.set(%D.Context{D.Context.get() | traps: [], flags: []}) +:ok + +iex> D.div(42, 0) +Decimal.new("Infinity") + +iex> D.Context.get().flags +[:division_by_zero] +``` + +### Mitigating rounding errors + +TODO + +## License + + Copyright 2013 Eric Meadows-Jönsson + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/deps/decimal/hex_metadata.config b/deps/decimal/hex_metadata.config new file mode 100644 index 0000000..c3dc1e3 --- /dev/null +++ b/deps/decimal/hex_metadata.config @@ -0,0 +1,14 @@ +{<<"links">>,[{<<"GitHub">>,<<"https://github.com/ericmj/decimal">>}]}. +{<<"name">>,<<"decimal">>}. +{<<"version">>,<<"3.1.0">>}. +{<<"description">>,<<"Arbitrary precision decimal arithmetic.">>}. +{<<"elixir">>,<<"~> 1.12">>}. +{<<"files">>, + [<<"lib">>,<<"lib/decimal">>,<<"lib/decimal/error.ex">>, + <<"lib/decimal/context.ex">>,<<"lib/decimal/macros.ex">>, + <<"lib/decimal.ex">>,<<".formatter.exs">>,<<"mix.exs">>,<<"README.md">>, + <<"LICENSE.txt">>,<<"CHANGELOG.md">>]}. +{<<"app">>,<<"decimal">>}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"requirements">>,[]}. +{<<"build_tools">>,[<<"mix">>]}. diff --git a/deps/decimal/lib/decimal.ex b/deps/decimal/lib/decimal.ex new file mode 100644 index 0000000..79d95ee --- /dev/null +++ b/deps/decimal/lib/decimal.ex @@ -0,0 +1,2849 @@ +defmodule Decimal do + @moduledoc """ + Decimal arithmetic on arbitrary precision floating-point numbers. + + A number is represented by a signed coefficient and exponent such that: `sign + * coefficient * 10 ^ exponent`. All numbers are represented and calculated + exactly, but the result of an operation may be rounded depending on the + context the operation is performed with, see: `Decimal.Context`. Trailing + zeros in the coefficient are never truncated to preserve the number of + significant digits unless explicitly done so. + + There are also special values such as NaN (not a number) and ±Infinity. + -0 and +0 are two distinct values. + Some operation results are not defined and will return NaN. + This kind of NaN is quiet, any operation returning a number will return + NaN when given a quiet NaN (the NaN value will flow through all operations). + + Exceptional conditions are grouped into signals, each signal has a flag and a + trap enabler in the context. Whenever a signal is triggered it's flag is set + in the context and will be set until explicitly cleared. If the signal is trap + enabled `Decimal.Error` will be raised. + + ## Specifications + + * [IBM's General Decimal Arithmetic Specification](http://speleotrove.com/decimal/decarith.html) + * [IEEE standard 854-1987](http://web.archive.org/web/20150908012941/http://754r.ucbtest.org/standards/854.pdf) + + This library follows the above specifications for reference of arithmetic + operation implementations, but the public APIs may differ to provide a + more idiomatic Elixir interface. + + The specification models the sign of the number as 1, for a negative number, + and 0 for a positive number. Internally this implementation models the sign as + 1 or -1 such that the complete number will be `sign * coefficient * + 10 ^ exponent` and will refer to the sign in documentation as either *positive* + or *negative*. + + The default `Decimal.Context` follows IEEE 754 decimal128: `precision` is + 34, `emax` is 6 144, and `emin` is -6 143. Operation results whose adjusted + exponent leaves that band signal overflow or underflow. Clamped is still + not signalled. + + ## Large exponents and untrusted input + + Decimal can represent compact values with very large exponents, such as + `1e1000000`. These values are valid decimals, but some APIs may need memory + or CPU proportional to the expanded size of the number. + + `parse/1`, `parse/2`, `cast/1`, `cast/2`, `to_string/2`, and `to_string/3` + apply IEEE 754 decimal128 limits by default: `:max_digits` of 34, + `:max_exponent` of 6 144, and a `:max_digits` for output of 6 178 + (precision + emax — large enough to render any in-range decimal128 in any + format). These defaults reject the pathological inputs described in + CVE-2026-32686 without materializing them. Pass options on the explicit + arities to override; pass `:infinity` to disable a limit entirely. + + ## Protocol Implementations + + `Decimal` implements the following protocols: + + ### `Inspect` + + iex> inspect(Decimal.new("1.00")) + "Decimal.new(\\"1.00\\")" + + ### `String.Chars` + + iex> to_string(Decimal.new("1.00")) + "1.00" + + ### `JSON.Encoder` + + _(If running Elixir 1.18+.)_ + + By default, decimals are encoded as strings to preserve precision: + + iex> JSON.encode!(Decimal.new("1.00")) + "\\"1.00\\"" + + To change that, pass a custom encoder to `JSON.encode!/2`. The following encodes + decimals as floats: + + iex> encoder = fn + ...> %Decimal{} = decimal, _encoder -> + ...> if Decimal.inf?(decimal) or Decimal.nan?(decimal) do + ...> raise ArgumentError, "\#{inspect(decimal)} cannot be encoded to JSON" + ...> end + ...> + ...> Decimal.to_string(decimal) + ...> + ...> other, encoder -> + ...> JSON.protocol_encode(other, encoder) + ...> end + ...> + iex> JSON.encode!(%{x: Decimal.new("1.00")}, encoder) + "{\\"x\\":1.00}" + + """ + + import Bitwise + import Kernel, except: [abs: 1, div: 2, max: 2, min: 2, rem: 2, round: 1] + import Decimal.Macros + alias Decimal.Context + alias Decimal.Error + + @power_of_2_to_52 4_503_599_627_370_496 + + @typedoc """ + The coefficient of the power of `10`. Non-negative because the sign is stored separately in `sign`. + + * `non_neg_integer` - when the `t` represents a number, instead of one of the special values below. + * `:NaN` - Not a Number. + * `:inf` - Infinity. + + """ + @type coefficient :: non_neg_integer | :NaN | :inf + + @typedoc """ + The exponent to which `10` is raised. + """ + @type exponent :: integer + + @typedoc """ + + * `1` for positive + * `-1` for negative + + """ + @type sign :: 1 | -1 + + @type signal :: + :invalid_operation + | :division_by_zero + | :rounded + | :inexact + | :overflow + | :underflow + + @type compare_result :: + :lt | :gt | :eq + + @typedoc """ + Rounding algorithm. + + See `Decimal.Context` for more information. + """ + @type rounding :: + :down + | :half_up + | :half_even + | :ceiling + | :floor + | :half_down + | :up + + @type parse_option :: + {:max_digits, non_neg_integer | :infinity} + | {:max_exponent, non_neg_integer | :infinity} + + @type to_string_option :: + {:max_digits, non_neg_integer | :infinity} + + # IEEE 754 decimal128 defaults: precision = 34, emax = 6_144, emin = -6_143. + # The to_string default is precision + emax (34 + 6_144), which is the + # worst-case `:normal` digit-character count for any in-range decimal128 + # value. + @default_max_digits 34 + @default_max_exponent 6_144 + @default_to_string_max_digits 6_178 + + # Below 10^2000 the BIF `:erlang.integer_to_binary/1` is fast enough; for + # larger integers `integer_to_decimal_iodata/3` recursively splits on a + # power of 10 (down to chunks of `@decimal_conversion_leaf_digits` digits) + # to avoid the quadratic cost of the BIF on very large bignums. + @decimal_conversion_direct_limit :erlang.binary_to_integer("1" <> String.duplicate("0", 2_000)) + @decimal_conversion_leaf_digits 1_024 + + # Rational approximation of log10(2) used by `integer_decimal_digit_count/1` + # to estimate decimal digit count from bit length: + # + # log10(2) ≈ 0.30102999566398119521... + # @log10_2_num = round(log10(2) * 2^48) = 84_732_411_018_728 + # @log10_2_den = 2^48 = 281_474_976_710_656 + # + # 2^48 keeps both constants below 2^47/2^48 so `(bits - 1) * @log10_2_num` + # stays a cheap small-bignum multiply, while the approximation is exact + # enough that `digits = div((bits - 1) * num, den) + 1` is off by at most + # one for any bit length we care about; the caller then nudges by ±1. + @log10_2_num 84_732_411_018_728 + @log10_2_den 281_474_976_710_656 + @normalize_chunk 16 + @normalize_chunk_pow 10_000_000_000_000_000 + + @typedoc """ + This implementation models the `sign` as `1` or `-1` such that the complete number will be: `sign * coef * 10 ^ exp`. + + * `coef` - the coefficient of the power of `10`. + * `exp` - the exponent of the power of `10`. + * `sign` - `1` for positive, `-1` for negative. + + """ + @type t :: %__MODULE__{ + sign: sign, + coef: coefficient, + exp: exponent + } + + @type decimal :: t | integer | String.t() + + defstruct sign: 1, coef: 0, exp: 0 + + defmacrop error(flags, reason, result, context \\ nil) do + quote bind_quoted: binding() do + case handle_error(flags, reason, result, context) do + {:ok, result} -> result + {:error, error} -> raise Error, error + end + end + end + + @doc """ + Returns `true` if number is NaN, otherwise `false`. + + ## Examples + + iex> Decimal.nan?(Decimal.new("NaN")) + true + + iex> Decimal.nan?(Decimal.new(42)) + false + + """ + @spec nan?(t) :: boolean + def nan?(%Decimal{coef: :NaN}), do: true + def nan?(%Decimal{}), do: false + + @doc """ + Returns `true` if number is ±Infinity, otherwise `false`. + + ## Examples + + iex> Decimal.inf?(Decimal.new("+Infinity")) + true + + iex> Decimal.inf?(Decimal.new("-Infinity")) + true + + iex> Decimal.inf?(Decimal.new("1.5")) + false + + """ + @spec inf?(t) :: boolean + def inf?(%Decimal{coef: :inf}), do: true + def inf?(%Decimal{}), do: false + + @doc """ + Returns `true` if argument is a decimal number, otherwise `false`. + + ## Examples + + iex> Decimal.is_decimal(Decimal.new(42)) + true + + iex> Decimal.is_decimal(42) + false + + Allowed in guard tests on OTP 21+. + """ + doc_since("1.9.0") + defmacro is_decimal(term) + + if function_exported?(:erlang, :is_map_key, 2) do + defmacro is_decimal(term) do + case __CALLER__.context do + nil -> + quote do + case unquote(term) do + %Decimal{} -> true + _ -> false + end + end + + :match -> + raise ArgumentError, + "invalid expression in match, is_decimal is not allowed in patterns " <> + "such as function clauses, case clauses or on the left side of the = operator" + + :guard -> + quote do + is_map(unquote(term)) and :erlang.is_map_key(:__struct__, unquote(term)) and + :erlang.map_get(:__struct__, unquote(term)) == Decimal + end + end + end + else + # TODO: remove when we require Elixir v1.10 + defmacro is_decimal(term) do + quote do + case unquote(term) do + %Decimal{} -> true + _ -> false + end + end + end + end + + @doc """ + The absolute value of given number. Sets the number's sign to positive. + + ## Examples + + iex> Decimal.abs(Decimal.new("1")) + Decimal.new("1") + + iex> Decimal.abs(Decimal.new("-1")) + Decimal.new("1") + + iex> Decimal.abs(Decimal.new("NaN")) + Decimal.new("NaN") + + """ + @spec abs(t) :: t + def abs(%Decimal{coef: :NaN} = num), do: %{num | sign: 1} + def abs(%Decimal{} = num), do: context(%{num | sign: 1}) + + @doc """ + Adds two numbers together. + + ## Exceptional conditions + + * If one number is -Infinity and the other +Infinity, `:invalid_operation` will + be signalled. + + ## Examples + + iex> Decimal.add(1, "1.1") + Decimal.new("2.1") + + iex> Decimal.add(1, "Inf") + Decimal.new("Infinity") + + """ + @spec add(decimal, decimal) :: t + def add(%Decimal{coef: :NaN} = num1, %Decimal{}), do: num1 + + def add(%Decimal{}, %Decimal{coef: :NaN} = num2), do: num2 + + def add(%Decimal{coef: :inf, sign: sign} = num1, %Decimal{coef: :inf, sign: sign} = num2) do + if num1.exp > num2.exp do + num1 + else + num2 + end + end + + def add(%Decimal{coef: :inf}, %Decimal{coef: :inf}), + do: error(:invalid_operation, "adding +Infinity and -Infinity", %Decimal{coef: :NaN}) + + def add(%Decimal{coef: :inf} = num1, %Decimal{}), do: num1 + + def add(%Decimal{}, %Decimal{coef: :inf} = num2), do: num2 + + def add(%Decimal{} = num1, %Decimal{} = num2) do + %Decimal{sign: sign1, coef: coef1, exp: exp1} = num1 + %Decimal{sign: sign2, coef: coef2, exp: exp2} = num2 + + cond do + coef1 == 0 and coef2 == 0 -> + sign = add_sign(sign1, sign2, 0) + context(%Decimal{sign: sign, coef: 0, exp: Kernel.min(exp1, exp2)}) + + coef1 == 0 -> + add_zero(num1, num2) + + coef2 == 0 -> + add_zero(num2, num1) + + add_bounded?(num1, num2) -> + add_bounded(num1, num2) + + true -> + {coef1, coef2} = add_align(coef1, exp1, coef2, exp2) + coef = sign1 * coef1 + sign2 * coef2 + exp = Kernel.min(exp1, exp2) + sign = add_sign(sign1, sign2, coef) + context(%Decimal{sign: sign, coef: Kernel.abs(coef), exp: exp}) + end + end + + def add(num1, num2), do: add(decimal(num1), decimal(num2)) + + @doc """ + Subtracts second number from the first. Equivalent to `Decimal.add/2` when the + second number's sign is negated. + + ## Exceptional conditions + + * If one number is -Infinity and the other +Infinity `:invalid_operation` will + be signalled. + + ## Examples + + iex> Decimal.sub(1, "0.1") + Decimal.new("0.9") + + iex> Decimal.sub(1, "Inf") + Decimal.new("-Infinity") + + """ + @spec sub(decimal, decimal) :: t + def sub(%Decimal{} = num1, %Decimal{sign: sign} = num2) do + add(num1, %{num2 | sign: -sign}) + end + + def sub(num1, num2) do + sub(decimal(num1), decimal(num2)) + end + + @doc """ + Compares two numbers numerically using a threshold. If the first number added + to the threshold is greater than the second number, and the first number + subtracted by the threshold is smaller than the second number, then the two + numbers are considered equal. + + ## Examples + + iex> Decimal.compare("1.1", 1, "0.2") + :eq + + iex> Decimal.compare("1.2", 1, "0.1") + :gt + + iex> Decimal.compare("1.0", "1.2", "0.1") + :lt + """ + @spec compare(decimal :: decimal(), decimal :: decimal(), threshold :: decimal()) :: + compare_result() + + def compare(_, _, %Decimal{sign: -1}), do: raise(Error, reason: "threshold cannot be negative") + + def compare(%Decimal{} = n1, %Decimal{} = n2, %Decimal{} = threshold) do + add_threshold = n1 |> Decimal.add(threshold) + sub_threshold = n1 |> Decimal.sub(threshold) + case1 = compare(add_threshold, n2) + case2 = compare(sub_threshold, n2) + + cond do + (case1 == :gt or case1 == :eq) and (case2 == :lt or case2 == :eq) -> :eq + case1 == :gt -> :gt + case2 == :lt -> :lt + end + end + + def compare(n1, n2, threshold), do: compare(decimal(n1), decimal(n2), decimal(threshold)) + + @doc """ + Compares two numbers numerically. If the first number is greater than the second + `:gt` is returned, if less than `:lt` is returned, if both numbers are equal + `:eq` is returned. + + Neither number can be a NaN. + + ## Examples + + iex> Decimal.compare("1.0", 1) + :eq + + iex> Decimal.compare("Inf", -1) + :gt + + """ + @spec compare(decimal, decimal) :: compare_result() + def compare(%Decimal{coef: :inf, sign: sign}, %Decimal{coef: :inf, sign: sign}), + do: :eq + + def compare(%Decimal{coef: :inf, sign: sign1}, %Decimal{coef: :inf, sign: sign2}) + when sign1 < sign2, + do: :lt + + def compare(%Decimal{coef: :inf, sign: sign1}, %Decimal{coef: :inf, sign: sign2}) + when sign1 > sign2, + do: :gt + + def compare(%Decimal{coef: :inf, sign: 1}, _num2), do: :gt + def compare(%Decimal{coef: :inf, sign: -1}, _num2), do: :lt + + def compare(_num1, %Decimal{coef: :inf, sign: 1}), do: :lt + def compare(_num1, %Decimal{coef: :inf, sign: -1}), do: :gt + + def compare(%Decimal{coef: :NaN} = num1, _num2), + do: error(:invalid_operation, "operation on NaN", num1) + + def compare(_num1, %Decimal{coef: :NaN} = num2), + do: error(:invalid_operation, "operation on NaN", num2) + + def compare(%Decimal{coef: 0}, %Decimal{coef: 0}), do: :eq + + def compare(%Decimal{sign: 1}, %Decimal{coef: 0}), do: :gt + def compare(%Decimal{coef: 0}, %Decimal{sign: 1}), do: :lt + def compare(%Decimal{sign: -1}, %Decimal{coef: 0}), do: :lt + def compare(%Decimal{coef: 0}, %Decimal{sign: -1}), do: :gt + + def compare(%Decimal{sign: 1}, %Decimal{sign: -1}), do: :gt + def compare(%Decimal{sign: -1}, %Decimal{sign: 1}), do: :lt + + def compare(%Decimal{} = num1, %Decimal{} = num2) do + adjusted_exp1 = adjust_exp(num1) + adjusted_exp2 = adjust_exp(num2) + + sign = + cond do + adjusted_exp1 == adjusted_exp2 -> + padded_num1 = pad_num(num1, num1.exp - num2.exp) + padded_num2 = pad_num(num2, num2.exp - num1.exp) + + cond do + padded_num1 == padded_num2 -> 0 + padded_num1 < padded_num2 -> -num1.sign + true -> num1.sign + end + + adjusted_exp1 < adjusted_exp2 -> + -num1.sign + + true -> + num1.sign + end + + case sign do + 0 -> :eq + 1 -> :gt + -1 -> :lt + end + end + + def compare(num1, num2) do + compare(decimal(num1), decimal(num2)) + end + + defp adjust_exp(%Decimal{coef: coef, exp: exp}) do + coef_adjustment = coef_length(coef) + exp + coef_adjustment - 1 + end + + defp coef_length(0), do: 1 + defp coef_length(coef) when coef < 10, do: 1 + defp coef_length(coef) when coef < 100, do: 2 + defp coef_length(coef) when coef < 1_000, do: 3 + defp coef_length(coef) when coef < 10_000, do: 4 + defp coef_length(coef) when coef < 100_000, do: 5 + defp coef_length(coef) when coef < 1_000_000, do: 6 + defp coef_length(coef) when coef < 10_000_000, do: 7 + defp coef_length(coef) when coef < 100_000_000, do: 8 + defp coef_length(coef) when coef < 1_000_000_000, do: 9 + defp coef_length(coef) when coef < 10_000_000_000, do: 10 + defp coef_length(coef) when coef < 100_000_000_000, do: 11 + defp coef_length(coef) when coef < 1_000_000_000_000, do: 12 + defp coef_length(coef) when coef < 10_000_000_000_000, do: 13 + defp coef_length(coef) when coef < 100_000_000_000_000, do: 14 + defp coef_length(coef) when coef < 1_000_000_000_000_000, do: 15 + defp coef_length(coef) when coef < 10_000_000_000_000_000, do: 16 + defp coef_length(coef) when coef < 100_000_000_000_000_000, do: 17 + defp coef_length(coef) when coef < 1_000_000_000_000_000_000, do: 18 + defp coef_length(coef), do: integer_decimal_digit_count(coef) + + defp pad_num(%Decimal{coef: coef}, n) do + coef * pow10(Kernel.max(n, 0) + 1) + end + + @deprecated "Use compare/2 instead" + @spec cmp(decimal, decimal) :: :lt | :eq | :gt + def cmp(num1, num2) do + compare(num1, num2) + end + + @doc """ + Compares two numbers numerically and returns `true` if they are equal, + otherwise `false`. If one of the operands is a quiet NaN this operation + will always return `false`. + + ## Examples + + iex> Decimal.equal?("1.0", 1) + true + + iex> Decimal.equal?(1, -1) + false + + """ + @spec equal?(decimal, decimal) :: boolean + def equal?(num1, num2) do + eq?(num1, num2) + end + + @doc """ + Compares two numbers numerically and returns `true` if they are equal, + otherwise `false`. If one of the operands is a quiet NaN this operation + will always return `false`. + + ## Examples + + iex> Decimal.eq?("1.0", 1) + true + + iex> Decimal.eq?(1, -1) + false + + """ + doc_since("1.8.0") + @spec eq?(decimal, decimal) :: boolean + def eq?(%Decimal{coef: :NaN}, _num2), do: false + def eq?(_num1, %Decimal{coef: :NaN}), do: false + def eq?(num1, num2), do: compare(num1, num2) == :eq + + @doc """ + It compares the equality of two numbers. If the second number is within + the range of first - threshold and first + threshold, it returns true; + otherwise, it returns false. + + ## Examples + + iex> Decimal.eq?("1.0", 1, "0") + true + + iex> Decimal.eq?("1.2", 1, "0.1") + false + + iex> Decimal.eq?("1.2", 1, "0.2") + true + + iex> Decimal.eq?(1, -1, "0.0") + false + + """ + doc_since("2.2.0") + @spec eq?(decimal :: decimal(), decimal :: decimal(), threshold :: decimal()) :: boolean() + def eq?(num1, num2, threshold), do: compare(num1, num2, threshold) == :eq + + @doc """ + Compares two numbers numerically and returns `true` if the first argument + is greater than the second, otherwise `false`. If one the operands is a + quiet NaN this operation will always return `false`. + + ## Examples + + iex> Decimal.gt?("1.3", "1.2") + true + + iex> Decimal.gt?("1.2", "1.3") + false + + """ + doc_since("1.8.0") + @spec gt?(decimal, decimal) :: boolean + def gt?(%Decimal{coef: :NaN}, _num2), do: false + def gt?(_num1, %Decimal{coef: :NaN}), do: false + def gt?(num1, num2), do: compare(num1, num2) == :gt + + @doc """ + Compares two numbers numerically and returns `true` if the first number is + less than the second number, otherwise `false`. If one of the operands is a + quiet NaN this operation will always return `false`. + + ## Examples + + iex> Decimal.lt?("1.1", "1.2") + true + + iex> Decimal.lt?("1.4", "1.2") + false + + """ + doc_since("1.8.0") + @spec lt?(decimal, decimal) :: boolean + def lt?(%Decimal{coef: :NaN}, _num2), do: false + def lt?(_num1, %Decimal{coef: :NaN}), do: false + def lt?(num1, num2), do: compare(num1, num2) == :lt + + @doc """ + Compares two numbers numerically and returns `true` if + the first argument is greater than or equal the second, + otherwise `false`. + + If one the operands is a quiet NaN this operation + will always return `false`. + + ## Examples + + iex> Decimal.gte?("1.3", "1.3") + true + + iex> Decimal.gte?("1.3", "1.2") + true + + iex> Decimal.gte?("1.2", "1.3") + false + + """ + doc_since("2.2.0") + @spec gte?(decimal, decimal) :: boolean + + def gte?(%Decimal{coef: :NaN}, _num2), do: false + def gte?(_num1, %Decimal{coef: :NaN}), do: false + + def gte?(num1, num2) do + case compare(num1, num2) do + :gt -> true + :eq -> true + _ -> false + end + end + + @doc """ + Compares two numbers numerically and returns `true` if + the first number is less than or equal the second number, + otherwise `false`. + + If one of the operands is a quiet NaN this operation + will always return `false`. + + ## Examples + + iex> Decimal.lte?("1.1", "1.1") + true + + iex> Decimal.lte?("1.1", "1.2") + true + + iex> Decimal.lte?("1.4", "1.2") + false + + """ + doc_since("2.2.0") + @spec lte?(decimal, decimal) :: boolean + + def lte?(%Decimal{coef: :NaN}, _num2), do: false + def lte?(_num1, %Decimal{coef: :NaN}), do: false + + def lte?(num1, num2) do + case compare(num1, num2) do + :lt -> true + :eq -> true + _ -> false + end + end + + @doc """ + Divides two numbers. + + ## Exceptional conditions + + * If both numbers are ±Infinity `:invalid_operation` is signalled. + * If both numbers are ±0 `:invalid_operation` is signalled. + * If second number (denominator) is ±0 `:division_by_zero` is signalled. + + ## Examples + + iex> Decimal.div(3, 4) + Decimal.new("0.75") + + iex> Decimal.div("Inf", -1) + Decimal.new("-Infinity") + + """ + @spec div(decimal, decimal) :: t + def div(%Decimal{coef: :NaN} = num1, %Decimal{}), do: num1 + + def div(%Decimal{}, %Decimal{coef: :NaN} = num2), do: num2 + + def div(%Decimal{coef: :inf}, %Decimal{coef: :inf}), + do: error(:invalid_operation, "±Infinity / ±Infinity", %Decimal{coef: :NaN}) + + def div(%Decimal{sign: sign1, coef: :inf} = num1, %Decimal{sign: sign2}) do + sign = if sign1 == sign2, do: 1, else: -1 + %{num1 | sign: sign} + end + + def div(%Decimal{sign: sign1, exp: exp1}, %Decimal{sign: sign2, coef: :inf, exp: exp2}) do + sign = if sign1 == sign2, do: 1, else: -1 + # TODO: Subnormal + # exponent? + %Decimal{sign: sign, coef: 0, exp: exp1 - exp2} + end + + def div(%Decimal{coef: 0}, %Decimal{coef: 0}), + do: error(:invalid_operation, "0 / 0", %Decimal{coef: :NaN}) + + def div(%Decimal{sign: sign1}, %Decimal{sign: sign2, coef: 0}) do + sign = if sign1 == sign2, do: 1, else: -1 + error(:division_by_zero, nil, %Decimal{sign: sign, coef: :inf}) + end + + def div(%Decimal{} = num1, %Decimal{} = num2) do + %Decimal{sign: sign1, coef: coef1, exp: exp1} = num1 + %Decimal{sign: sign2, coef: coef2, exp: exp2} = num2 + sign = if sign1 == sign2, do: 1, else: -1 + + if coef1 == 0 do + context(%Decimal{sign: sign, coef: 0, exp: exp1 - exp2}, []) + else + prec10 = pow10(Context.get().precision) + {coef1, coef2, adjust} = div_adjust(coef1, coef2, 0) + {coef, adjust, _rem, signals} = div_calc(coef1, coef2, 0, adjust, prec10) + + context(%Decimal{sign: sign, coef: coef, exp: exp1 - exp2 - adjust}, signals) + end + end + + def div(num1, num2) do + div(decimal(num1), decimal(num2)) + end + + @doc """ + Divides two numbers and returns the integer part. + + ## Exceptional conditions + + * If both numbers are ±Infinity `:invalid_operation` is signalled. + * If both numbers are ±0 `:invalid_operation` is signalled. + * If second number (denominator) is ±0 `:division_by_zero` is signalled. + + ## Examples + + iex> Decimal.div_int(5, 2) + Decimal.new("2") + + iex> Decimal.div_int("Inf", -1) + Decimal.new("-Infinity") + + """ + @spec div_int(decimal, decimal) :: t + def div_int(%Decimal{coef: :NaN} = num1, %Decimal{}), do: num1 + + def div_int(%Decimal{}, %Decimal{coef: :NaN} = num2), do: num2 + + def div_int(%Decimal{coef: :inf}, %Decimal{coef: :inf}), + do: error(:invalid_operation, "±Infinity / ±Infinity", %Decimal{coef: :NaN}) + + def div_int(%Decimal{sign: sign1, coef: :inf} = num1, %Decimal{sign: sign2}) do + sign = if sign1 == sign2, do: 1, else: -1 + %{num1 | sign: sign} + end + + def div_int(%Decimal{sign: sign1, exp: exp1}, %Decimal{sign: sign2, coef: :inf, exp: exp2}) do + sign = if sign1 == sign2, do: 1, else: -1 + # TODO: Subnormal + # exponent? + %Decimal{sign: sign, coef: 0, exp: exp1 - exp2} + end + + def div_int(%Decimal{coef: 0}, %Decimal{coef: 0}), + do: error(:invalid_operation, "0 / 0", %Decimal{coef: :NaN}) + + def div_int(%Decimal{sign: sign1}, %Decimal{sign: sign2, coef: 0}) do + div_sign = if sign1 == sign2, do: 1, else: -1 + error(:division_by_zero, nil, %Decimal{sign: div_sign, coef: :inf}) + end + + def div_int(%Decimal{} = num1, %Decimal{} = num2) do + %Decimal{sign: sign1, coef: coef1, exp: exp1} = num1 + %Decimal{sign: sign2, coef: coef2, exp: exp2} = num2 + div_sign = if sign1 == sign2, do: 1, else: -1 + + cond do + compare(%{num1 | sign: 1}, %{num2 | sign: 1}) == :lt -> + %Decimal{sign: div_sign, coef: 0, exp: exp1 - exp2} + + coef1 == 0 -> + context(%{num1 | sign: div_sign}) + + true -> + case integer_division(div_sign, coef1, exp1, coef2, exp2) do + {:ok, result} -> + result + + {:error, error, reason, num} -> + error(error, reason, num) + end + end + end + + def div_int(num1, num2) do + div_int(decimal(num1), decimal(num2)) + end + + @doc """ + Remainder of integer division of two numbers. The result will have the sign of + the first number. + + ## Exceptional conditions + + * If both numbers are ±Infinity `:invalid_operation` is signalled. + * If both numbers are ±0 `:invalid_operation` is signalled. + * If second number (denominator) is ±0 `:division_by_zero` is signalled. + + ## Examples + + iex> Decimal.rem(5, 2) + Decimal.new("1") + + """ + @spec rem(decimal, decimal) :: t + def rem(%Decimal{coef: :NaN} = num1, %Decimal{}), do: num1 + + def rem(%Decimal{}, %Decimal{coef: :NaN} = num2), do: num2 + + def rem(%Decimal{coef: :inf}, %Decimal{coef: :inf}), + do: error(:invalid_operation, "±Infinity / ±Infinity", %Decimal{coef: :NaN}) + + def rem(%Decimal{sign: sign1, coef: :inf}, %Decimal{}), do: %Decimal{sign: sign1, coef: 0} + + def rem(%Decimal{sign: sign1}, %Decimal{coef: :inf} = num2) do + # TODO: Subnormal + # exponent? + %{num2 | sign: sign1} + end + + def rem(%Decimal{coef: 0}, %Decimal{coef: 0}), + do: error(:invalid_operation, "0 / 0", %Decimal{coef: :NaN}) + + def rem(%Decimal{sign: sign1}, %Decimal{coef: 0}), + do: error(:division_by_zero, nil, %Decimal{sign: sign1, coef: 0}) + + def rem(%Decimal{} = num1, %Decimal{} = num2) do + %Decimal{sign: sign1, coef: coef1, exp: exp1} = num1 + %Decimal{sign: sign2, coef: coef2, exp: exp2} = num2 + + cond do + compare(%{num1 | sign: 1}, %{num2 | sign: 1}) == :lt -> + context(%{num1 | sign: sign1}) + + coef1 == 0 -> + context(%{num2 | sign: sign1}) + + true -> + div_sign = if sign1 == sign2, do: 1, else: -1 + + case integer_division(div_sign, coef1, exp1, coef2, exp2) do + {:ok, result} -> + sub(num1, mult(num2, result)) + + {:error, error, reason, num} -> + error(error, reason, num) + end + end + end + + def rem(num1, num2) do + rem(decimal(num1), decimal(num2)) + end + + @doc """ + Integer division of two numbers and the remainder. Should be used when both + `div_int/2` and `rem/2` is needed. Equivalent to: `{Decimal.div_int(x, y), + Decimal.rem(x, y)}`. + + ## Exceptional conditions + + * If both numbers are ±Infinity `:invalid_operation` is signalled. + * If both numbers are ±0 `:invalid_operation` is signalled. + * If second number (denominator) is ±0 `:division_by_zero` is signalled. + + ## Examples + + iex> Decimal.div_rem(5, 2) + {Decimal.new(2), Decimal.new(1)} + + """ + @spec div_rem(decimal, decimal) :: {t, t} + def div_rem(%Decimal{coef: :NaN} = num1, %Decimal{}), do: {num1, num1} + + def div_rem(%Decimal{}, %Decimal{coef: :NaN} = num2), do: {num2, num2} + + def div_rem(%Decimal{coef: :inf}, %Decimal{coef: :inf}) do + numbers = {%Decimal{coef: :NaN}, %Decimal{coef: :NaN}} + error(:invalid_operation, "±Infinity / ±Infinity", numbers) + end + + def div_rem(%Decimal{sign: sign1, coef: :inf} = num1, %Decimal{sign: sign2}) do + sign = if sign1 == sign2, do: 1, else: -1 + {%{num1 | sign: sign}, %Decimal{sign: sign1, coef: 0}} + end + + def div_rem(%Decimal{} = num1, %Decimal{coef: :inf} = num2) do + %Decimal{sign: sign1, exp: exp1} = num1 + %Decimal{sign: sign2, exp: exp2} = num2 + + sign = if sign1 == sign2, do: 1, else: -1 + # TODO: Subnormal + # exponent? + {%Decimal{sign: sign, coef: 0, exp: exp1 - exp2}, %{num2 | sign: sign1}} + end + + def div_rem(%Decimal{coef: 0}, %Decimal{coef: 0}) do + error = error(:invalid_operation, "0 / 0", %Decimal{coef: :NaN}) + {error, error} + end + + def div_rem(%Decimal{sign: sign1}, %Decimal{sign: sign2, coef: 0}) do + div_sign = if sign1 == sign2, do: 1, else: -1 + div_error = error(:division_by_zero, nil, %Decimal{sign: div_sign, coef: :inf}) + rem_error = error(:division_by_zero, nil, %Decimal{sign: sign1, coef: 0}) + {div_error, rem_error} + end + + def div_rem(%Decimal{} = num1, %Decimal{} = num2) do + %Decimal{sign: sign1, coef: coef1, exp: exp1} = num1 + %Decimal{sign: sign2, coef: coef2, exp: exp2} = num2 + div_sign = if sign1 == sign2, do: 1, else: -1 + + cond do + compare(%{num1 | sign: 1}, %{num2 | sign: 1}) == :lt -> + {%Decimal{sign: div_sign, coef: 0, exp: exp1 - exp2}, %{num1 | sign: sign1}} + + coef1 == 0 -> + {context(%{num1 | sign: div_sign}), context(%{num2 | sign: sign1})} + + true -> + case integer_division(div_sign, coef1, exp1, coef2, exp2) do + {:ok, result} -> + {result, sub(num1, mult(num2, result))} + + {:error, error, reason, num} -> + error(error, reason, {num, num}) + end + end + end + + def div_rem(num1, num2) do + div_rem(decimal(num1), decimal(num2)) + end + + @doc """ + Compares two values numerically and returns the maximum. Unlike most other + functions in `Decimal` if a number is NaN the result will be the other number. + Only if both numbers are NaN will NaN be returned. + + ## Examples + + iex> Decimal.max(1, "2.0") + Decimal.new("2.0") + + iex> Decimal.max(1, "NaN") + Decimal.new("1") + + iex> Decimal.max("NaN", "NaN") + Decimal.new("NaN") + + """ + @spec max(decimal, decimal) :: t + def max(%Decimal{coef: :NaN}, %Decimal{} = num2), do: num2 + + def max(%Decimal{} = num1, %Decimal{coef: :NaN}), do: num1 + + def max(%Decimal{sign: sign1, exp: exp1} = num1, %Decimal{sign: sign2, exp: exp2} = num2) do + case compare(num1, num2) do + :lt -> + num2 + + :gt -> + num1 + + :eq -> + cond do + sign1 != sign2 -> + if sign1 == 1, do: num1, else: num2 + + sign1 == 1 -> + if exp1 > exp2, do: num1, else: num2 + + sign1 == -1 -> + if exp1 < exp2, do: num1, else: num2 + end + end + |> context() + end + + def max(num1, num2) do + max(decimal(num1), decimal(num2)) + end + + @doc """ + Compares two values numerically and returns the minimum. Unlike most other + functions in `Decimal` if a number is NaN the result will be the other number. + Only if both numbers are NaN will NaN be returned. + + ## Examples + + iex> Decimal.min(1, "2.0") + Decimal.new("1") + + iex> Decimal.min(1, "NaN") + Decimal.new("1") + + iex> Decimal.min("NaN", "NaN") + Decimal.new("NaN") + + """ + @spec min(decimal, decimal) :: t + def min(%Decimal{coef: :NaN}, %Decimal{} = num2), do: num2 + + def min(%Decimal{} = num1, %Decimal{coef: :NaN}), do: num1 + + def min(%Decimal{sign: sign1, exp: exp1} = num1, %Decimal{sign: sign2, exp: exp2} = num2) do + case compare(num1, num2) do + :lt -> + num1 + + :gt -> + num2 + + :eq -> + cond do + sign1 != sign2 -> + if sign1 == -1, do: num1, else: num2 + + sign1 == 1 -> + if exp1 < exp2, do: num1, else: num2 + + sign1 == -1 -> + if exp1 > exp2, do: num1, else: num2 + end + end + |> context() + end + + def min(num1, num2) do + min(decimal(num1), decimal(num2)) + end + + @doc """ + Negates the given number. + + ## Examples + + iex> Decimal.negate(1) + Decimal.new("-1") + + iex> Decimal.negate("-Inf") + Decimal.new("Infinity") + + """ + doc_since("1.9.0") + @spec negate(decimal) :: t + def negate(%Decimal{coef: :NaN} = num), do: num + def negate(%Decimal{sign: sign} = num), do: context(%{num | sign: -sign}) + def negate(num), do: negate(decimal(num)) + + @doc """ + Applies the context to the given number rounding it to specified precision. + """ + doc_since("1.9.0") + @spec apply_context(t) :: t + def apply_context(%Decimal{} = num), do: context(num) + + @doc """ + Returns `true` if given number is positive, otherwise `false`. + + ## Examples + + iex> Decimal.positive?(Decimal.new("42")) + true + + iex> Decimal.positive?(Decimal.new("-42")) + false + + iex> Decimal.positive?(Decimal.new("0")) + false + + iex> Decimal.positive?(Decimal.new("NaN")) + false + + """ + doc_since("1.5.0") + @spec positive?(t) :: boolean + def positive?(%Decimal{coef: :NaN}), do: false + def positive?(%Decimal{coef: 0}), do: false + def positive?(%Decimal{sign: -1}), do: false + def positive?(%Decimal{sign: 1}), do: true + + @doc """ + Returns `true` if given number is negative, otherwise `false`. + + ## Examples + + iex> Decimal.negative?(Decimal.new("-42")) + true + + iex> Decimal.negative?(Decimal.new("42")) + false + + iex> Decimal.negative?(Decimal.new("0")) + false + + iex> Decimal.negative?(Decimal.new("NaN")) + false + + """ + doc_since("1.5.0") + @spec negative?(t) :: boolean + def negative?(%Decimal{coef: :NaN}), do: false + def negative?(%Decimal{coef: 0}), do: false + def negative?(%Decimal{sign: 1}), do: false + def negative?(%Decimal{sign: -1}), do: true + + @doc """ + Multiplies two numbers. + + ## Exceptional conditions + + * If one number is ±0 and the other is ±Infinity `:invalid_operation` is + signalled. + + ## Examples + + iex> Decimal.mult("0.5", 3) + Decimal.new("1.5") + + iex> Decimal.mult("Inf", -1) + Decimal.new("-Infinity") + + """ + @spec mult(decimal, decimal) :: t + def mult(%Decimal{coef: :NaN} = num1, %Decimal{}), do: num1 + + def mult(%Decimal{}, %Decimal{coef: :NaN} = num2), do: num2 + + def mult(%Decimal{coef: 0}, %Decimal{coef: :inf}), + do: error(:invalid_operation, "0 * ±Infinity", %Decimal{coef: :NaN}) + + def mult(%Decimal{coef: :inf}, %Decimal{coef: 0}), + do: error(:invalid_operation, "0 * ±Infinity", %Decimal{coef: :NaN}) + + def mult(%Decimal{sign: sign1, coef: :inf, exp: exp1}, %Decimal{sign: sign2, exp: exp2}) do + sign = if sign1 == sign2, do: 1, else: -1 + # exponent? + %Decimal{sign: sign, coef: :inf, exp: exp1 + exp2} + end + + def mult(%Decimal{sign: sign1, exp: exp1}, %Decimal{sign: sign2, coef: :inf, exp: exp2}) do + sign = if sign1 == sign2, do: 1, else: -1 + # exponent? + %Decimal{sign: sign, coef: :inf, exp: exp1 + exp2} + end + + def mult(%Decimal{} = num1, %Decimal{} = num2) do + %Decimal{sign: sign1, coef: coef1, exp: exp1} = num1 + %Decimal{sign: sign2, coef: coef2, exp: exp2} = num2 + sign = if sign1 == sign2, do: 1, else: -1 + %Decimal{sign: sign, coef: coef1 * coef2, exp: exp1 + exp2} |> context() + end + + def mult(num1, num2) do + mult(decimal(num1), decimal(num2)) + end + + @doc """ + Normalizes the given decimal: removes trailing zeros from coefficient while + keeping the number numerically equivalent by increasing the exponent. + + ## Examples + + iex> Decimal.normalize(Decimal.new("1.00")) + Decimal.new("1") + + iex> Decimal.normalize(Decimal.new("1.01")) + Decimal.new("1.01") + + """ + doc_since("1.9.0") + @spec normalize(t) :: t + def normalize(%Decimal{coef: :NaN} = num), do: num + + def normalize(%Decimal{coef: :inf} = num) do + # exponent? + %{num | exp: 0} + end + + def normalize(%Decimal{sign: sign, coef: coef, exp: exp}) do + if coef == 0 do + %Decimal{sign: sign, coef: 0, exp: 0} + else + %{do_normalize(coef, exp) | sign: sign} |> context + end + end + + @doc """ + Rounds the given number to specified decimal places with the given strategy + (default is to round to nearest one). If places is negative, at least that + many digits to the left of the decimal point will be zero. + + See `Decimal.Context` for more information about rounding algorithms. + + ## Examples + + iex> Decimal.round("1.234") + Decimal.new("1") + + iex> Decimal.round("1.234", 1) + Decimal.new("1.2") + + """ + @spec round(decimal, integer, rounding) :: t + def round(num, places \\ 0, mode \\ :half_up) + + def round(%Decimal{coef: :NaN} = num, _, _), do: num + + def round(%Decimal{coef: :inf} = num, _, _), do: num + + def round(%Decimal{} = num, n, mode) do + %Decimal{sign: sign, coef: coef, exp: exp} = normalize(num) + digits = :erlang.integer_to_list(coef) + target_exp = -n + value = do_round(sign, digits, exp, target_exp, mode) + context(value, []) + end + + def round(num, n, mode) do + round(decimal(num), n, mode) + end + + @doc """ + Finds the square root. + + ## Examples + + iex> Decimal.sqrt("100") + Decimal.new("10") + + """ + doc_since("1.7.0") + @spec sqrt(decimal) :: t + def sqrt(%Decimal{coef: :NaN} = num), + do: error(:invalid_operation, "operation on NaN", num) + + def sqrt(%Decimal{coef: 0, exp: exp} = num), + do: %{num | exp: exp >>> 1} + + def sqrt(%Decimal{sign: -1} = num), + do: error(:invalid_operation, "less than zero", num) + + def sqrt(%Decimal{sign: 1, coef: :inf} = num), + do: num + + def sqrt(%Decimal{sign: 1, coef: coef, exp: exp}) do + precision = Context.get().precision + 1 + digits = :erlang.integer_to_list(coef) + num_digits = length(digits) + + # Since the root is calculated from integer operations only, it must be + # large enough to contain the desired precision. Calculate the amount of + # `shift` required (powers of 10). + case exp &&& 1 do + 0 -> + # To get the desired `shift`, subtract the precision of `coef`'s square + # root from the desired precision. + # + # If `coef` is 10_000, the root is 100 (3 digits of precision). + # If `coef` is 100, the root is 10 (2 digits of precision). + shift = precision - ((num_digits + 1) >>> 1) + sqrt(coef, shift, exp) + + _ -> + # If `exp` is odd, multiply `coef` by 10 and reduce shift by 1/2. `exp` + # must be even so the root's exponent is an integer. + shift = precision - ((num_digits >>> 1) + 1) + sqrt(coef * 10, shift, exp) + end + end + + def sqrt(num) do + sqrt(decimal(num)) + end + + defp sqrt(coef, shift, exp) do + if shift >= 0 do + # shift `coef` up by `shift * 2` digits + sqrt(coef * pow10(shift <<< 1), shift, exp, true) + else + # shift `coef` down by `shift * 2` digits + operand = pow10(-shift <<< 1) + sqrt(Kernel.div(coef, operand), shift, exp, Kernel.rem(coef, operand) === 0) + end + end + + defp sqrt(shifted_coef, shift, exp, exact) do + # the preferred exponent is `exp / 2` as per IEEE 754 + exp = exp >>> 1 + # guess a root 10x higher than desired precision + guess = pow10(Context.get().precision + 1) + root = sqrt_loop(shifted_coef, guess) + + if exact and root * root === shifted_coef do + # if the root is exact, use preferred `exp` and shift `coef` to match + coef = + if shift >= 0, + do: Kernel.div(root, pow10(shift)), + else: root * pow10(-shift) + + context(%Decimal{sign: 1, coef: coef, exp: exp}) + else + # otherwise the calculated root is inexact (but still meets precision), + # so use the root as `coef` and get the final exponent by shifting `exp` + context(%Decimal{sign: 1, coef: root, exp: exp - shift}) + end + end + + # Babylonion method + defp sqrt_loop(coef, guess) do + quotient = Kernel.div(coef, guess) + + if guess <= quotient do + guess + else + sqrt_loop(coef, (guess + quotient) >>> 1) + end + end + + @doc """ + Creates a new decimal number from an integer or a string representation. + + A decimal number will always be created exactly as specified with all digits + kept - it will not be rounded with the context. + + ## Backus–Naur form + + sign ::= "+" | "-" + digit ::= "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9" + indicator ::= "e" | "E" + digits ::= digit [digit]... + decimal-part ::= digits "." [digits] | ["."] digits + exponent-part ::= indicator [sign] digits + infinity ::= "Infinity" | "Inf" + nan ::= "NaN" [digits] + numeric-value ::= decimal-part [exponent-part] | infinity + numeric-string ::= [sign] numeric-value | [sign] nan + + ## Floats + + See also `from_float/1`. + + ## Examples + + iex> Decimal.new(1) + Decimal.new("1") + + iex> Decimal.new("3.14") + Decimal.new("3.14") + + iex> Decimal.new("1.79769313486231581e308") + Decimal.new("1.79769313486231581e308") + + iex> Decimal.new("2.22507385850720139e-308") + Decimal.new("2.22507385850720139e-308") + + iex> Decimal.new("1.01234567890123457890123457890123456789", max_digits: 39) + Decimal.new("1.01234567890123457890123457890123456789", max_digits: 39) + """ + @spec new(decimal) :: t + def new(%Decimal{sign: sign, coef: coef, exp: exp} = num) + when sign in [1, -1] and ((is_integer(coef) and coef >= 0) or coef in [:NaN, :inf]) and + is_integer(exp), + do: num + + def new(int) when is_integer(int), + do: %Decimal{sign: if(int < 0, do: -1, else: 1), coef: Kernel.abs(int)} + + def new(binary, opts \\ []) when is_binary(binary) and is_list(opts) do + case parse(binary, opts) do + {decimal, ""} -> decimal + _ -> raise Error, reason: "number parsing syntax: #{inspect(binary)}" + end + end + + @doc """ + Creates a new decimal number from the sign, coefficient and exponent such that + the number will be: `sign * coefficient * 10 ^ exponent`. + + A decimal number will always be created exactly as specified with all digits + kept - it will not be rounded with the context. + + ## Examples + + iex> Decimal.new(1, 42, 0) + Decimal.new("42") + + """ + @spec new(sign :: 1 | -1, coef :: non_neg_integer | :NaN | :inf, exp :: integer) :: t + def new(sign, coef, exp) + when sign in [1, -1] and ((is_integer(coef) and coef >= 0) or coef in [:NaN, :inf]) and + is_integer(exp), + do: %Decimal{sign: sign, coef: coef, exp: exp} + + @doc """ + Creates a new decimal number from a floating point number. + + Floating point numbers use a fixed number of binary digits to represent + a decimal number which has inherent inaccuracy as some decimal numbers cannot + be represented exactly in limited precision binary. + + Floating point numbers will be converted to decimal numbers with + `:io_lib_format.fwrite_g/1`. Since this conversion is not exact and + because of inherent inaccuracy mentioned above, we may run into counter-intuitive results: + + iex> Enum.reduce([0.1, 0.1, 0.1], &+/2) + 0.30000000000000004 + + iex> Enum.reduce([Decimal.new("0.1"), Decimal.new("0.1"), Decimal.new("0.1")], &Decimal.add/2) + Decimal.new("0.3") + + For this reason, it's recommended to build decimals with `new/1`, which is always precise, instead. + + ## Examples + + iex> Decimal.from_float(3.14) + Decimal.new("3.14") + + """ + doc_since("1.5.0") + @spec from_float(float) :: t + def from_float(float) when is_float(float) do + float + |> :io_lib_format.fwrite_g() + |> fix_float_exp() + |> IO.iodata_to_binary() + |> new() + end + + @doc """ + Creates a new decimal number from an integer, string, float, or existing decimal number. + + Because conversion from a floating point number is not exact, it's recommended + to instead use `new/1` or `from_float/1` when the argument's type is certain. + See `from_float/1`. + + ## Examples + + iex> {:ok, decimal} = Decimal.cast(3) + iex> decimal + Decimal.new("3") + + iex> Decimal.cast("bad") + :error + + """ + @spec cast(term) :: {:ok, t} | :error + def cast(term), do: cast_with_limits(term, default_parse_limits()) + + @doc """ + Creates a new decimal number from an integer, string, float, or existing decimal + number with parsing limits. + + Options are the same as `parse/2`. + """ + doc_since("2.4.0") + @spec cast(term, [parse_option]) :: {:ok, t} | :error + def cast(term, opts) when is_list(opts) do + cast_with_limits(term, parse_limits!(opts)) + end + + defp cast_with_limits(term, limits) do + cond do + is_integer(term) -> + decimal = Decimal.new(term) + if decimal_within_limits?(decimal, limits), do: {:ok, decimal}, else: :error + + match?(%Decimal{}, term) -> + if decimal_within_limits?(term, limits), do: {:ok, term}, else: :error + + is_float(term) -> + decimal = from_float(term) + if decimal_within_limits?(decimal, limits), do: {:ok, decimal}, else: :error + + is_binary(term) -> + case parse_with_limits(term, limits) do + {decimal, ""} -> {:ok, decimal} + _ -> :error + end + + true -> + :error + end + end + + @doc """ + Parses a binary into a decimal. + + If successful, returns a tuple in the form of `{decimal, remainder_of_binary}`, + otherwise `:error`. + + Inputs whose digit count or exponent magnitude exceed the default limits + (`#{@default_max_digits}` digits, `#{@default_max_exponent}` absolute + exponent) return `:error`. Use `parse/2` to override the limits. + + ## Examples + + iex> Decimal.parse("3.14") + {%Decimal{coef: 314, exp: -2, sign: 1}, ""} + + iex> Decimal.parse("3.14.15") + {%Decimal{coef: 314, exp: -2, sign: 1}, ".15"} + + iex> Decimal.parse("-1.1e3") + {%Decimal{coef: 11, exp: 2, sign: -1}, ""} + + iex> Decimal.parse("bad") + :error + + """ + @spec parse(binary()) :: {t(), binary()} | :error + def parse(binary) when is_binary(binary) do + parse_with_limits(binary, default_parse_limits()) + end + + @doc """ + Parses a binary into a decimal with explicit limits. + + The following options are supported: + + * `:max_digits` - maximum number of decimal digits consumed from the input, + including leading and trailing zeros. Defaults to `#{@default_max_digits}`. + Pass `:infinity` to disable. + * `:max_exponent` - maximum absolute value of the parsed decimal exponent, + after fractional digits are accounted for. Defaults to + `#{@default_max_exponent}`. Pass `:infinity` to disable. + + Returns `:error` when a parsed number exceeds the configured limits. + """ + doc_since("2.4.0") + @spec parse(binary(), [parse_option]) :: {t(), binary()} | :error + def parse(binary, opts) when is_binary(binary) and is_list(opts) do + parse_with_limits(binary, parse_limits!(opts)) + end + + defp parse_with_limits(binary, limits) do + case binary do + "+" <> rest -> + parse_unsign(rest, limits) + + "-" <> rest -> + case parse_unsign(rest, limits) do + {%Decimal{} = num, rest} -> {%{num | sign: -1}, rest} + :error -> :error + end + + binary -> + parse_unsign(binary, limits) + end + end + + @doc """ + Converts given number to its string representation. + + Output is bounded to `#{@default_to_string_max_digits}` digit characters by + default; pass options via `to_string/3` to override. `:scientific` is compact + for large positive exponents and rarely hits the limit; `:normal` and `:xsd` + expand proportional to the exponent and will raise `ArgumentError` when the + limit would be exceeded. + + ## Options + + * `:scientific` - number converted to scientific notation. + * `:normal` - number converted without a exponent. + * `:xsd` - number converted to the [canonical XSD representation](https://www.w3.org/TR/xmlschema-2/#decimal). + * `:raw` - number converted to its raw, internal format. + + ## Examples + + iex> Decimal.to_string(Decimal.new("1.00")) + "1.00" + + iex> Decimal.to_string(Decimal.new("123e1"), :scientific) + "1.23E+3" + + iex> Decimal.to_string(Decimal.new("42.42"), :normal) + "42.42" + + iex> Decimal.to_string(Decimal.new("1.00"), :xsd) + "1.0" + + iex> Decimal.to_string(Decimal.new("4321.768"), :raw) + "4321768E-3" + + """ + @spec to_string(t, :scientific | :normal | :xsd | :raw) :: String.t() + def to_string(num, type \\ :scientific) + + def to_string(%Decimal{} = num, type) + when type in [:scientific, :normal, :xsd, :raw] do + check_to_string_max_digits!(num, type, @default_to_string_max_digits) + do_to_string(num, type) + end + + defp do_to_string(%Decimal{sign: sign, coef: :NaN}, _type) do + if sign == 1, do: "NaN", else: "-NaN" + end + + defp do_to_string(%Decimal{sign: sign, coef: :inf}, _type) do + if sign == 1, do: "Infinity", else: "-Infinity" + end + + defp do_to_string(%Decimal{sign: sign, coef: coef, exp: exp}, :normal) do + digits = integer_to_decimal_binary(coef) + length = byte_size(digits) + + iodata = + if exp >= 0 do + [digits, zeroes(exp)] + else + diff = length + exp + + if diff > 0 do + [binary_part(digits, 0, diff), ?., binary_part(digits, diff, length - diff)] + else + ["0.", zeroes(-diff), digits] + end + end + + iodata = if sign == -1, do: [?-, iodata], else: iodata + IO.iodata_to_binary(iodata) + end + + defp do_to_string(%Decimal{sign: sign, coef: coef, exp: exp}, :scientific) do + digits = integer_to_decimal_binary(coef) + length = byte_size(digits) + adjusted = exp + length - 1 + + iodata = + cond do + exp == 0 -> + digits + + exp < 0 and adjusted >= -6 -> + abs_exp = Kernel.abs(exp) + diff = -length + abs_exp + 1 + + if diff > 0 do + ["0.", zeroes(diff - 1), digits] + else + split = length + exp + [binary_part(digits, 0, split), ?., binary_part(digits, split, length - split)] + end + + true -> + mantissa = + if length > 1 do + [binary_part(digits, 0, 1), ?., binary_part(digits, 1, length - 1)] + else + digits + end + + exp_sign = if exp >= 0, do: ?+, else: [] + [mantissa, ?E, exp_sign, :erlang.integer_to_binary(adjusted)] + end + + iodata = if sign == -1, do: [?-, iodata], else: iodata + IO.iodata_to_binary(iodata) + end + + defp do_to_string(%Decimal{sign: sign, coef: coef, exp: exp}, :raw) do + str = integer_to_decimal_binary(coef) + str = if sign == -1, do: [?- | str], else: str + str = if exp != 0, do: [str, "E", :erlang.integer_to_binary(exp)], else: str + + IO.iodata_to_binary(str) + end + + defp do_to_string(%Decimal{} = decimal, :xsd) do + decimal |> canonical_xsd() |> do_to_string(:normal) + end + + defp zeroes(0), do: "" + defp zeroes(count), do: :binary.copy("0", count) + + defp integer_to_decimal_binary(int) when int < @decimal_conversion_direct_limit do + :erlang.integer_to_binary(int) + end + + defp integer_to_decimal_binary(int) do + digits = integer_decimal_digit_count(int) + int |> integer_to_decimal_iodata(digits, false) |> IO.iodata_to_binary() + end + + defp integer_to_decimal_iodata(int, digits, pad?) + when digits <= @decimal_conversion_leaf_digits do + binary = :erlang.integer_to_binary(int) + + if pad? do + [zeroes(digits - byte_size(binary)), binary] + else + binary + end + end + + defp integer_to_decimal_iodata(int, digits, pad?) do + low_digits = Kernel.div(digits, 2) + high_digits = digits - low_digits + base = decimal_power10(low_digits) + high = Kernel.div(int, base) + low = Kernel.rem(int, base) + + [ + integer_to_decimal_iodata(high, high_digits, pad?), + integer_to_decimal_iodata(low, low_digits, true) + ] + end + + defp integer_decimal_digit_count(int) do + bits = int |> :binary.encode_unsigned() |> bit_length() + digits = Kernel.div((bits - 1) * @log10_2_num, @log10_2_den) + 1 + integer_decimal_digit_count(int, digits) + end + + defp integer_decimal_digit_count(int, digits) do + cond do + int >= decimal_power10(digits) -> + integer_decimal_digit_count(int, digits + 1) + + digits > 1 and int < decimal_power10(digits - 1) -> + integer_decimal_digit_count(int, digits - 1) + + true -> + digits + end + end + + defp decimal_power10(digits), do: :erlang.binary_to_integer("1" <> zeroes(digits)) + + defp bit_length(<>) do + byte_size(rest) * 8 + byte_bit_length(byte) + end + + defp byte_bit_length(byte) when byte >= 128, do: 8 + defp byte_bit_length(byte) when byte >= 64, do: 7 + defp byte_bit_length(byte) when byte >= 32, do: 6 + defp byte_bit_length(byte) when byte >= 16, do: 5 + defp byte_bit_length(byte) when byte >= 8, do: 4 + defp byte_bit_length(byte) when byte >= 4, do: 3 + defp byte_bit_length(byte) when byte >= 2, do: 2 + defp byte_bit_length(_byte), do: 1 + + @doc """ + Converts given number to its string representation with explicit limits. + + The following options are supported: + + * `:max_digits` - maximum number of digit characters in the output. Sign, + decimal point, and exponent markers are not counted. Defaults to + `#{@default_to_string_max_digits}`. Pass `:infinity` to disable. + + Raises `ArgumentError` when the configured limit would be exceeded. + """ + doc_since("2.4.0") + @spec to_string(t, :scientific | :normal | :xsd | :raw, [to_string_option]) :: String.t() + def to_string(%Decimal{} = num, type, opts) + when is_list(opts) and type in [:scientific, :normal, :xsd, :raw] do + max_digits = + limit!(:max_digits, Keyword.get(opts, :max_digits, @default_to_string_max_digits)) + + check_to_string_max_digits!(num, type, max_digits) + do_to_string(num, type) + end + + defp canonical_xsd(%Decimal{coef: 0} = decimal), do: %{decimal | exp: -1} + + defp canonical_xsd(%Decimal{coef: coef, exp: exp} = decimal) + when exp < 0 and Kernel.rem(coef, 10) != 0 do + decimal + end + + defp canonical_xsd(%Decimal{coef: coef, exp: exp} = decimal) do + %Decimal{coef: coef, exp: exp} = do_normalize(coef, exp) + + if exp >= 0 do + %{decimal | coef: coef * decimal_power10(exp + 1), exp: -1} + else + %{decimal | coef: coef, exp: exp} + end + end + + defp check_to_string_max_digits!(_num, _type, :infinity), do: :ok + + defp check_to_string_max_digits!(num, type, max_digits) do + digits = to_string_digit_count(num, type) + + if digits > max_digits do + raise ArgumentError, + "#{inspect(type)} representation requires #{digits} digits, " <> + "but the configured maximum is #{max_digits}" + end + end + + defp to_string_digit_count(%Decimal{coef: coef}, _type) when coef in [:NaN, :inf], do: 0 + + defp to_string_digit_count(%Decimal{coef: coef, exp: exp}, :normal), + do: normal_digit_count(coef, exp) + + defp to_string_digit_count(%Decimal{coef: coef, exp: exp}, :xsd), + do: xsd_digit_count(coef, exp) + + defp to_string_digit_count(%Decimal{coef: coef, exp: exp}, :raw) do + digits = coef_length(coef) + if exp == 0, do: digits, else: digits + integer_digit_count(exp) + end + + defp to_string_digit_count(%Decimal{coef: coef, exp: exp}, :scientific) do + digits = coef_length(coef) + adjusted = exp + digits - 1 + + cond do + exp == 0 -> digits + exp < 0 and adjusted >= -6 -> normal_digit_count(coef, exp) + true -> digits + integer_digit_count(adjusted) + end + end + + defp normal_digit_count(coef, exp) do + digits = coef_length(coef) + + if exp >= 0 do + digits + exp + else + diff = digits + exp + + if diff > 0 do + digits + else + 1 - diff + digits + end + end + end + + defp xsd_digit_count(0, _exp), do: 2 + + defp xsd_digit_count(coef, exp) do + %Decimal{coef: coef, exp: exp} = do_normalize(coef, exp) + + if exp >= 0 do + coef_length(coef) + exp + 1 + else + normal_digit_count(coef, exp) + end + end + + defp integer_digit_count(int), do: int |> Kernel.abs() |> coef_length() + + @doc """ + Returns the decimal represented as an integer. + + Raises when loss of precision will occur. + + ## Examples + + iex> Decimal.to_integer(Decimal.new("42")) + 42 + + iex> Decimal.to_integer(Decimal.new("1.00")) + 1 + + iex> Decimal.to_integer(Decimal.new("1.10")) + ** (ArgumentError) cannot convert Decimal.new("1.1") without losing precision. Use Decimal.round/3 first. + + """ + @spec to_integer(t) :: integer + def to_integer(%Decimal{sign: sign, coef: coef, exp: 0}) + when is_integer(coef), + do: sign * coef + + def to_integer(%Decimal{sign: sign, coef: coef, exp: exp}) + when is_integer(coef) and exp > 0, + do: sign * coef * pow10(exp) + + def to_integer(%Decimal{sign: sign, coef: coef, exp: exp}) + when is_integer(coef) and exp < 0 do + {coef, exp} = strip_trailing_zeros(coef, exp) + + if exp >= 0 do + sign * coef * pow10(exp) + else + normalized = %Decimal{sign: sign, coef: coef, exp: exp} + + raise ArgumentError, + "cannot convert #{inspect(normalized)} without losing precision. Use Decimal.round/3 first." + end + end + + @doc """ + Returns the decimal converted to a float. + + The returned float may have lower precision than the decimal. + + Raises if the decimal cannot be converted to a float. + + ## Examples + + iex> Decimal.to_float(Decimal.new("1.5")) + 1.5 + + iex> Decimal.to_float(Decimal.new("-1.79769313486231581e308")) + ** (Decimal.Error) : negative number smaller than DBL_MAX: Decimal.new("-1.79769313486231581E+308") + + iex> Decimal.to_float(Decimal.new("-1.79769313486231581e308")) + ** (Decimal.Error) : negative number smaller than DBL_MAX: Decimal.new("-1.79769313486231581E+308") + + iex> Decimal.to_float(Decimal.new("2.22507385850720139e-308")) + ** (Decimal.Error) : number smaller than DBL_MIN: Decimal.new("2.22507385850720139E-308") + + iex> Decimal.to_float(Decimal.new("-2.22507385850720139e-308")) + ** (Decimal.Error): negative number bigger than DBL_MIN: Decimal.new(\"-2.22507385850720139E-308\") + + iex> Decimal.to_float(Decimal.new("inf")) + ** (ArgumentError) Decimal.new("Infinity") cannot be converted to float + + """ + @spec to_float(t) :: float + def to_float(%Decimal{coef: coef} = decimal) when is_integer(coef) do + %Decimal{sign: sign, coef: coef, exp: exp} = check_dbl_min_max(decimal) + # Convert back to float without loss + # http://www.exploringbinary.com/correct-decimal-to-floating-point-using-big-integers/ + {num, den} = ratio(coef, exp) + + boundary = den <<< 52 + + cond do + num == 0 -> + 0.0 + + num >= boundary -> + {den, exp} = scale_down(num, boundary, 52) + decimal_to_float(sign, num, den, exp) + + true -> + {num, exp} = scale_up(num, boundary, 52) + decimal_to_float(sign, num, den, exp) + end + end + + def to_float(%Decimal{} = decimal) do + raise ArgumentError, "#{inspect(decimal)} cannot be converted to float" + end + + @doc """ + Returns the scale of the decimal. + + A decimal's scale is the number of digits after the decimal point. This + includes trailing zeros; see `normalize/1` to remove them. + + ## Examples + + iex> Decimal.scale(Decimal.new("42")) + 0 + + iex> Decimal.scale(Decimal.new(1, 2, 26)) + 0 + + iex> Decimal.scale(Decimal.new("99.12345")) + 5 + + iex> Decimal.scale(Decimal.new("1.50")) + 2 + """ + @spec scale(t) :: non_neg_integer() + def scale(%Decimal{exp: exp}), do: Kernel.max(0, -exp) + + defp scale_up(num, den, exp) when num >= den, do: {num, exp} + defp scale_up(num, den, exp), do: scale_up(num <<< 1, den, exp - 1) + + defp scale_down(num, den, exp) do + new_den = den <<< 1 + + if num < new_den do + {den >>> 52, exp} + else + scale_down(num, new_den, exp + 1) + end + end + + defp decimal_to_float(sign, num, den, exp) do + quo = Kernel.div(num, den) + rem = num - quo * den + + tmp = + case den >>> 1 do + den when rem > den -> quo + 1 + den when rem < den -> quo + _ when (quo &&& 1) === 1 -> quo + 1 + _ -> quo + end + + sign = if sign == -1, do: 1, else: 0 + tmp = tmp - @power_of_2_to_52 + exp = if tmp < @power_of_2_to_52, do: exp, else: exp + 1 + <> = <> + tmp + end + + @doc """ + Returns `true` when the given `decimal` has no significant digits after the decimal point. + + ## Examples + + iex> Decimal.integer?("1.00") + true + + iex> Decimal.integer?("1.10") + false + + """ + doc_since("2.0.0") + @spec integer?(decimal()) :: boolean + def integer?(%Decimal{coef: :NaN}), do: false + def integer?(%Decimal{coef: :inf}), do: false + def integer?(%Decimal{coef: 0}), do: true + def integer?(%Decimal{exp: exp}) when exp >= 0, do: true + def integer?(%Decimal{coef: coef, exp: exp}), do: trailing_zeros_at_least?(coef, -exp) + def integer?(num), do: integer?(decimal(num)) + + defp trailing_zeros_at_least?(_coef, 0), do: true + + defp trailing_zeros_at_least?(coef, n) when n >= @normalize_chunk do + case Kernel.rem(coef, @normalize_chunk_pow) do + 0 -> + trailing_zeros_at_least?(Kernel.div(coef, @normalize_chunk_pow), n - @normalize_chunk) + + _ -> + false + end + end + + defp trailing_zeros_at_least?(coef, n) do + Kernel.rem(coef, pow10(n)) == 0 + end + + ## ARITHMETIC ## + + defp add_align(coef1, exp1, coef2, exp2) when exp1 == exp2, do: {coef1, coef2} + + defp add_align(coef1, exp1, coef2, exp2) when exp1 > exp2, + do: {coef1 * pow10(exp1 - exp2), coef2} + + defp add_align(coef1, exp1, coef2, exp2) when exp1 < exp2, + do: {coef1, coef2 * pow10(exp2 - exp1)} + + defp add_zero(%Decimal{coef: 0, exp: zero_exp}, %Decimal{} = num) do + %Decimal{sign: sign, coef: coef, exp: exp} = num + + cond do + zero_exp >= exp -> + context(num) + + exp - zero_exp > Context.get().precision + 2 -> + add_bounded_zero(num) + + true -> + context(%Decimal{sign: sign, coef: coef * pow10(exp - zero_exp), exp: zero_exp}) + end + end + + defp add_bounded_zero(%Decimal{} = num) do + work_digits = Context.get().precision + 2 + base_exp = Kernel.min(num.exp, adjust_exp(num) - work_digits + 1) + {coef, false} = add_scale_to_base(num.coef, num.exp, base_exp) + context(%Decimal{sign: num.sign, coef: coef, exp: base_exp}) + end + + defp add_bounded?(%Decimal{} = num1, %Decimal{} = num2) do + precision = Context.get().precision + Kernel.abs(adjust_exp(num1) - adjust_exp(num2)) > precision + 2 + end + + # Bounded addition for operands whose exponent gap exceeds `precision + 2`. + # Aligning at the smaller exponent would materialize coefficients with + # `gap` extra digits, which is unbounded for hostile input. + # + # Instead, scale both operands to a shared `base_exp` chosen `precision + 2` + # digits below the larger operand's adjusted exponent. Digits below + # `base_exp` are dropped, and any non-zero digits dropped from the smaller + # operand are remembered as a sticky bit. `precision/4` then sees the same + # guard, round, and sticky information it would have seen from the + # full-precision sum, so rounding (including half-even tie-breaking and + # subtractive cancellation toward zero in `add_sticky/3`) matches the + # unbounded result. + defp add_bounded(%Decimal{} = num1, %Decimal{} = num2) do + {high, low} = add_bounded_order(num1, num2) + + work_digits = Context.get().precision + 2 + base_exp = Kernel.min(high.exp, adjust_exp(high) - work_digits + 1) + + {high_coef, false} = add_scale_to_base(high.coef, high.exp, base_exp) + {low_coef, low_sticky?} = add_scale_to_base(low.coef, low.exp, base_exp) + + sum = high.sign * high_coef + low.sign * low_coef + {sum, sticky?} = add_sticky(sum, low.sign, low_sticky?) + sign = add_sign(num1.sign, num2.sign, sum) + + context(%Decimal{sign: sign, coef: Kernel.abs(sum), exp: base_exp}, [], sticky?) + end + + defp add_bounded_order(%Decimal{coef: 0} = num1, %Decimal{} = num2), do: {num2, num1} + defp add_bounded_order(%Decimal{} = num1, %Decimal{coef: 0} = num2), do: {num1, num2} + + defp add_bounded_order(%Decimal{} = num1, %Decimal{} = num2) do + if adjust_exp(num1) >= adjust_exp(num2) do + {num1, num2} + else + {num2, num1} + end + end + + defp add_scale_to_base(0, _exp, _base_exp), do: {0, false} + + defp add_scale_to_base(coef, exp, base_exp) when exp >= base_exp do + {coef * pow10(exp - base_exp), false} + end + + defp add_scale_to_base(coef, exp, base_exp) do + drop = base_exp - exp + + if drop >= coef_length(coef) do + {0, true} + else + divisor = pow10(drop) + {Kernel.div(coef, divisor), Kernel.rem(coef, divisor) != 0} + end + end + + defp add_sticky(sum, _tail_sign, false), do: {sum, false} + + defp add_sticky(sum, tail_sign, true) do + sum_sign = integer_sign(sum) + + cond do + sum_sign == 0 -> {tail_sign, true} + sum_sign == tail_sign -> {sum, true} + true -> {sum - sum_sign, true} + end + end + + defp integer_sign(int) when int > 0, do: 1 + defp integer_sign(int) when int < 0, do: -1 + defp integer_sign(_int), do: 0 + + defp add_sign(sign1, sign2, coef) do + cond do + coef > 0 -> 1 + coef < 0 -> -1 + sign1 == -1 and sign2 == -1 -> -1 + sign1 != sign2 and Context.get().rounding == :floor -> -1 + true -> 1 + end + end + + defp div_adjust(coef1, coef2, adjust) when coef1 < coef2, + do: div_adjust(coef1 * 10, coef2, adjust + 1) + + defp div_adjust(coef1, coef2, adjust) when coef1 >= coef2 * 10, + do: div_adjust(coef1, coef2 * 10, adjust - 1) + + defp div_adjust(coef1, coef2, adjust), do: {coef1, coef2, adjust} + + defp div_calc(coef1, coef2, coef, adjust, prec10) do + cond do + coef1 >= coef2 -> + div_calc(coef1 - coef2, coef2, coef + 1, adjust, prec10) + + coef1 == 0 and adjust >= 0 -> + {coef, adjust, coef1, []} + + coef >= prec10 -> + signals = [:rounded] + signals = if base10?(coef1), do: signals, else: [:inexact | signals] + {coef, adjust, coef1, signals} + + true -> + div_calc(coef1 * 10, coef2, coef * 10, adjust + 1, prec10) + end + end + + defp div_int_calc(coef1, coef2, coef, adjust, precision) do + cond do + coef1 >= coef2 -> + div_int_calc(coef1 - coef2, coef2, coef + 1, adjust, precision) + + adjust != precision -> + div_int_calc(coef1 * 10, coef2, coef * 10, adjust + 1, precision) + + true -> + {coef, coef1} + end + end + + defp integer_division(div_sign, coef1, exp1, coef2, exp2) do + precision = exp1 - exp2 + {coef1, coef2, adjust} = div_adjust(coef1, coef2, 0) + + {coef, _rem} = div_int_calc(coef1, coef2, 0, adjust, precision) + + prec10 = pow10(Context.get().precision) + + if coef > prec10 do + { + :error, + :invalid_operation, + "integer division impossible, quotient too large", + %Decimal{coef: :NaN} + } + else + {:ok, %Decimal{sign: div_sign, coef: coef, exp: 0}} + end + end + + defp do_normalize(coef, exp) when coef >= @normalize_chunk_pow do + case Kernel.rem(coef, @normalize_chunk_pow) do + 0 -> + do_normalize(Kernel.div(coef, @normalize_chunk_pow), exp + @normalize_chunk) + + _ -> + do_normalize_one(coef, exp) + end + end + + defp do_normalize(coef, exp), do: do_normalize_one(coef, exp) + + defp do_normalize_one(0, _exp), do: %Decimal{coef: 0, exp: 0} + + defp do_normalize_one(coef, exp) when Kernel.rem(coef, 10) == 0 do + do_normalize_one(Kernel.div(coef, 10), exp + 1) + end + + defp do_normalize_one(coef, exp), do: %Decimal{coef: coef, exp: exp} + + defp strip_trailing_zeros(coef, exp) when coef >= @normalize_chunk_pow do + case Kernel.rem(coef, @normalize_chunk_pow) do + 0 -> + strip_trailing_zeros(Kernel.div(coef, @normalize_chunk_pow), exp + @normalize_chunk) + + _ -> + strip_trailing_zeros_one(coef, exp) + end + end + + defp strip_trailing_zeros(coef, exp), do: strip_trailing_zeros_one(coef, exp) + + defp strip_trailing_zeros_one(0, _exp), do: {0, 0} + + defp strip_trailing_zeros_one(coef, exp) when Kernel.rem(coef, 10) == 0 do + strip_trailing_zeros_one(Kernel.div(coef, 10), exp + 1) + end + + defp strip_trailing_zeros_one(coef, exp), do: {coef, exp} + + defp ratio(coef, exp) when exp >= 0, do: {coef * pow10(exp), 1} + defp ratio(coef, exp) when exp < 0, do: {coef, pow10(-exp)} + + pow10_max = + Enum.reduce(0..104, 1, fn int, acc -> + defp pow10(unquote(int)), do: unquote(acc) + defp base10?(unquote(acc)), do: true + acc * 10 + end) + + defp pow10(num) when num > 104, do: pow10(104) * pow10(num - 104) + + defp base10?(num) when num >= unquote(pow10_max) do + if Kernel.rem(num, unquote(pow10_max)) == 0 do + base10?(Kernel.div(num, unquote(pow10_max))) + else + false + end + end + + defp base10?(_num), do: false + + ## ROUNDING ## + + defp do_round(sign, digits, exp, target_exp, rounding) do + num_digits = length(digits) + precision = num_digits - (target_exp - exp) + + cond do + exp == target_exp -> + %Decimal{sign: sign, coef: digits_to_integer(digits), exp: exp} + + exp < target_exp and precision < 0 -> + zeros = :lists.duplicate(target_exp - exp, ?0) + digits = zeros ++ digits + {signif, remain} = :lists.split(1, digits) + + signif = + if increment?(rounding, sign, signif, remain), + do: digits_increment(signif), + else: signif + + coef = digits_to_integer(signif) + %Decimal{sign: sign, coef: coef, exp: target_exp} + + exp < target_exp and precision >= 0 -> + {signif, remain} = :lists.split(precision, digits) + + signif = + if increment?(rounding, sign, signif, remain), + do: digits_increment(signif), + else: signif + + coef = digits_to_integer(signif) + %Decimal{sign: sign, coef: coef, exp: target_exp} + + exp > target_exp -> + digits = digits ++ Enum.map(1..(exp - target_exp), fn _ -> ?0 end) + coef = digits_to_integer(digits) + %Decimal{sign: sign, coef: coef, exp: target_exp} + end + end + + defp digits_to_integer([]), do: 0 + defp digits_to_integer(digits), do: :erlang.list_to_integer(digits) + + defp precision(%Decimal{coef: :NaN} = num, _precision, _rounding, _sticky?) do + {num, []} + end + + defp precision(%Decimal{coef: :inf} = num, _precision, _rounding, _sticky?) do + {num, []} + end + + defp precision(%Decimal{sign: sign, coef: coef, exp: exp} = num, precision, rounding, sticky?) do + digits = :erlang.integer_to_list(coef) + num_digits = length(digits) + + cond do + num_digits > precision -> + do_precision(sign, digits, num_digits, exp, precision, rounding, sticky?) + + sticky? -> + do_precision(sign, digits, num_digits, exp, num_digits, rounding, sticky?) + + true -> + {num, []} + end + end + + defp do_precision(sign, digits, num_digits, exp, precision, rounding, sticky?) do + precision = Kernel.min(num_digits, precision) + {signif, remain} = :lists.split(precision, digits) + + signif = + if increment?(rounding, sign, signif, remain, sticky?), + do: digits_increment(signif), + else: signif + + signals = if any_nonzero?(remain, sticky?), do: [:inexact, :rounded], else: [:rounded] + + exp = exp + (num_digits - precision) + coef = digits_to_integer(signif) + dec = %Decimal{sign: sign, coef: coef, exp: exp} + {dec, signals} + end + + defp increment?(rounding, sign, signif, remain), + do: increment?(rounding, sign, signif, remain, false) + + defp increment?(_, _, _, [], false), do: false + + defp increment?(:down, _, _, _, _), do: false + + defp increment?(:up, _, _, _, _), do: true + + defp increment?(:ceiling, sign, _, remain, sticky?), + do: sign == 1 and any_nonzero?(remain, sticky?) + + defp increment?(:floor, sign, _, remain, sticky?), + do: sign == -1 and any_nonzero?(remain, sticky?) + + defp increment?(:half_up, _, _, [], _sticky?), do: false + + defp increment?(:half_up, _, _, [digit | _], _sticky?), do: digit >= ?5 + + defp increment?(:half_even, _, _, [], _sticky?), do: false + + defp increment?(:half_even, _, [], [?5 | rest], sticky?), do: any_nonzero?(rest, sticky?) + + defp increment?(:half_even, _, signif, [?5 | rest], sticky?), + do: any_nonzero?(rest, sticky?) or Kernel.rem(:lists.last(signif), 2) == 1 + + defp increment?(:half_even, _, _, [digit | _], _sticky?), do: digit > ?5 + + defp increment?(:half_down, _, _, [], _sticky?), do: false + + defp increment?(:half_down, _, _, [digit | rest], sticky?), + do: digit > ?5 or (digit == ?5 and any_nonzero?(rest, sticky?)) + + defp any_nonzero(digits), do: :lists.any(fn digit -> digit != ?0 end, digits) + + defp any_nonzero?(digits, sticky?), do: sticky? or any_nonzero(digits) + + defp digits_increment(digits), do: digits_increment(:lists.reverse(digits), []) + + defp digits_increment([?9 | rest], acc), do: digits_increment(rest, [?0 | acc]) + + defp digits_increment([head | rest], acc), do: :lists.reverse(rest, [head + 1 | acc]) + + defp digits_increment([], acc), do: [?1 | acc] + + ## CONTEXT ## + + defp context(num, signals \\ []), do: context(num, signals, false) + + defp context(num, signals, sticky?) do + context = Context.get() + {result, prec_signals} = precision(num, context.precision, context.rounding, sticky?) + {result, exp_signals} = exponent_limits(result, context) + signals = signals |> put_uniq(prec_signals) |> put_uniq(exp_signals) + error(signals, nil, result, context) + end + + defp exponent_limits(%Decimal{coef: coef} = num, _context) when coef in [:NaN, :inf, 0], + do: {num, []} + + defp exponent_limits(%Decimal{} = num, %Context{} = context) do + adjusted_exp = adjust_exp(num) + + cond do + above_emax?(adjusted_exp, context.emax) -> + {overflow_result(num, context), [:overflow, :inexact, :rounded]} + + below_emin?(adjusted_exp, context.emin) -> + {%{num | coef: 0, exp: 0}, [:underflow, :inexact, :rounded]} + + true -> + {num, []} + end + end + + defp above_emax?(_adjusted_exp, :infinity), do: false + defp above_emax?(adjusted_exp, emax), do: adjusted_exp > emax + + defp below_emin?(_adjusted_exp, :infinity), do: false + defp below_emin?(adjusted_exp, emin), do: adjusted_exp < emin + + defp overflow_result(%Decimal{sign: sign}, %Context{rounding: rounding} = context) do + if overflow_to_infinity?(rounding, sign) do + %Decimal{sign: sign, coef: :inf} + else + %Decimal{ + sign: sign, + coef: pow10(context.precision) - 1, + exp: context.emax - context.precision + 1 + } + end + end + + defp overflow_to_infinity?(:down, _sign), do: false + defp overflow_to_infinity?(:floor, sign), do: sign == -1 + defp overflow_to_infinity?(:ceiling, sign), do: sign == 1 + defp overflow_to_infinity?(_rounding, _sign), do: true + + defp put_uniq(list, elems) when is_list(elems) do + Enum.reduce(elems, list, &put_uniq(&2, &1)) + end + + defp put_uniq(list, elem) do + if elem in list, do: list, else: [elem | list] + end + + ## PARSING ## + + defp parse_limits!(opts) do + Enum.reduce( + opts, + %{max_digits: @default_max_digits, max_exponent: @default_max_exponent}, + fn + {:max_digits, value}, acc -> + %{acc | max_digits: limit!(:max_digits, value)} + + {:max_exponent, value}, acc -> + %{acc | max_exponent: limit!(:max_exponent, value)} + + {key, _value}, _acc -> + raise ArgumentError, "unknown option #{inspect(key)}" + end + ) + end + + defp default_parse_limits do + %{max_digits: @default_max_digits, max_exponent: @default_max_exponent} + end + + defp limit!(_key, :infinity), do: :infinity + + defp limit!(_key, value) when is_integer(value) and value >= 0, do: value + + defp limit!(key, value) do + raise ArgumentError, + "#{inspect(key)} must be a non-negative integer or :infinity, got: #{inspect(value)}" + end + + defp parse_digits_count(<>, acc, count) when digit in ?0..?9 do + parse_digits_count(rest, [digit | acc], count + 1) + end + + defp parse_digits_count(rest, acc, count), do: {acc, count, rest} + + defp digits_acc_to_integer([], _size), do: 0 + defp digits_acc_to_integer(acc, _size), do: :erlang.list_to_integer(:lists.reverse(acc)) + + defp parse_exp(<>) + when e in [?e, ?E] and sign in [?+, ?-] and digit in ?0..?9 do + {digits, rest} = parse_digits(rest) + {[sign, digit | digits], rest} + end + + defp parse_exp(<>) when e in [?e, ?E] and digit in ?0..?9 do + {digits, rest} = parse_digits(rest) + {[digit | digits], rest} + end + + defp parse_exp(bin) do + {[], bin} + end + + defp parse_unsign(<>, _limits) + when first in [?i, ?I] do + if String.downcase(remainder) == "nfinity" do + {%Decimal{coef: :inf}, rest} + else + :error + end + end + + defp parse_unsign(<>, _limits) + when first in [?i, ?I] do + if String.downcase(remainder) == "nf" do + {%Decimal{coef: :inf}, rest} + else + :error + end + end + + defp parse_unsign(<>, _limits) + when first in [?n, ?N] do + if String.downcase(remainder) == "an" do + {%Decimal{coef: :NaN}, rest} + else + :error + end + end + + defp parse_unsign(bin, limits) do + {int_rev, int_size, after_int} = parse_digits_count(bin, [], 0) + + {coef_rev, total_size, after_float} = + case after_int do + <> -> parse_digits_count(after_dot, int_rev, int_size) + _ -> {int_rev, int_size, after_int} + end + + cond do + total_size == 0 -> + :error + + exceeds_limit?(total_size, limits.max_digits) -> + :error + + true -> + {exp, rest} = parse_exp(after_float) + exp_chars = if exp == [], do: ~c"0", else: exp + float_size = total_size - int_size + + case bounded_exponent(exp_chars, float_size, limits.max_exponent) do + {:ok, exp_int} -> + coef = digits_acc_to_integer(coef_rev, total_size) + {%Decimal{coef: coef, exp: exp_int}, rest} + + :error -> + :error + end + end + end + + defp decimal_within_limits?(%Decimal{coef: coef, exp: exp}, limits) do + not exceeds_limit?(decimal_digit_count(coef), limits.max_digits) and + within_exponent_limit?(exp, limits.max_exponent) + end + + defp decimal_digit_count(coef) when coef in [:NaN, :inf], do: 0 + defp decimal_digit_count(coef), do: coef_length(coef) + + defp exceeds_limit?(_value, :infinity), do: false + defp exceeds_limit?(value, limit), do: value > limit + + defp within_exponent_limit?(_exp, :infinity), do: true + defp within_exponent_limit?(exp, max_exponent), do: Kernel.abs(exp) <= max_exponent + + defp bounded_exponent(chars, float_digits, :infinity) do + {:ok, List.to_integer(chars) - float_digits} + end + + defp bounded_exponent(chars, float_digits, max_exponent) do + with {:ok, exp} <- bounded_integer(chars, max_exponent + float_digits) do + exp = exp - float_digits + if within_exponent_limit?(exp, max_exponent), do: {:ok, exp}, else: :error + end + end + + defp bounded_integer([?- | digits], bound) do + with {:ok, int} <- bounded_non_neg_integer(digits, bound), do: {:ok, -int} + end + + defp bounded_integer([?+ | digits], bound), do: bounded_non_neg_integer(digits, bound) + defp bounded_integer(digits, bound), do: bounded_non_neg_integer(digits, bound) + + defp bounded_non_neg_integer(digits, bound) do + digits = trim_leading_zeroes(digits) + bound_digits = integer_to_charlist(bound) + digits_length = length(digits) + bound_length = length(bound_digits) + + cond do + digits == [] -> + {:ok, 0} + + digits_length > bound_length -> + :error + + digits_length == bound_length and digits_gt?(digits, bound_digits) -> + :error + + true -> + {:ok, List.to_integer(digits)} + end + end + + defp trim_leading_zeroes([?0 | rest]), do: trim_leading_zeroes(rest) + defp trim_leading_zeroes(digits), do: digits + + defp digits_gt?([digit | rest1], [digit | rest2]), do: digits_gt?(rest1, rest2) + defp digits_gt?([digit1 | _], [digit2 | _]), do: digit1 > digit2 + defp digits_gt?([], []), do: false + + defp parse_digits(bin), do: parse_digits(bin, []) + + defp parse_digits(<>, acc) when digit in ?0..?9 do + parse_digits(rest, [digit | acc]) + end + + defp parse_digits(rest, acc) do + {:lists.reverse(acc), rest} + end + + # Util + + defp decimal(%Decimal{} = num), do: num + defp decimal(num) when is_integer(num), do: new(num) + defp decimal(num) when is_binary(num), do: new(num) + + defp decimal(other) when is_float(other) do + raise ArgumentError, + "implicit conversion of #{inspect(other)} to Decimal is not allowed. Use Decimal.from_float/1" + end + + defp handle_error(signals, reason, result, context) do + context = context || Context.get() + signals = List.wrap(signals) + + flags = Enum.reduce(signals, context.flags, &put_uniq(&2, &1)) + Context.set(%{context | flags: flags}) + error_signal = Enum.find(signals, &(&1 in context.traps)) + + if error_signal do + error = [signal: error_signal, reason: reason] + {:error, error} + else + {:ok, result} + end + end + + defp fix_float_exp(digits) do + fix_float_exp(digits, []) + end + + defp fix_float_exp([?e | rest], [?0 | [?. | result]]) do + fix_float_exp(rest, [?e | result]) + end + + defp fix_float_exp([digit | rest], result) do + fix_float_exp(rest, [digit | result]) + end + + defp fix_float_exp([], result), do: :lists.reverse(result) + + defp check_dbl_min_max(%Decimal{coef: :inf} = infinity), do: infinity + + defp check_dbl_min_max(%Decimal{sign: 1} = num) do + cond do + Decimal.gt?(num, dbl_max(1)) -> + raise Error, reason: "number bigger than DBL_MAX: #{inspect(num)}" + + Decimal.gt?(num, zero(1)) and Decimal.lt?(num, dbl_min(1)) -> + raise Error, reason: "number smaller than DBL_MIN: #{inspect(num)}" + + true -> + num + end + end + + defp check_dbl_min_max(num) do + cond do + Decimal.lt?(num, dbl_max(-1)) -> + raise Error, reason: "negative number smaller than DBL_MAX: #{inspect(num)}" + + Decimal.lt?(num, zero(-1)) and Decimal.gt?(num, dbl_min(-1)) -> + raise Error, reason: "negative number bigger than DBL_MIN: #{inspect(num)}" + + true -> + num + end + end + + defp dbl_min(sign), do: %Decimal{sign: sign, coef: 22_250_738_585_072_014, exp: -324} + defp zero(sign), do: %Decimal{sign: sign, coef: 0, exp: 0} + defp dbl_max(sign), do: %Decimal{sign: sign, coef: 17_976_931_348_623_158, exp: 292} + + if Version.compare(System.version(), "1.3.0") == :lt do + defp integer_to_charlist(string), do: Integer.to_char_list(string) + else + defp integer_to_charlist(string), do: Integer.to_charlist(string) + end +end + +defimpl Inspect, for: Decimal do + def inspect(dec, _opts) do + "Decimal.new(\"" <> Decimal.to_string(dec, :scientific, max_digits: :infinity) <> "\")" + end +end + +defimpl String.Chars, for: Decimal do + def to_string(dec) do + Decimal.to_string(dec, :scientific, max_digits: :infinity) + end +end + +# TODO: remove when we require Elixir 1.18 +if Code.ensure_loaded?(JSON.Encoder) and function_exported?(JSON.Encoder, :encode, 2) do + defimpl JSON.Encoder, for: Decimal do + def encode(decimal, _encoder) do + [?", Decimal.to_string(decimal, :scientific, max_digits: :infinity), ?"] + end + end +end diff --git a/deps/decimal/lib/decimal/context.ex b/deps/decimal/lib/decimal/context.ex new file mode 100644 index 0000000..135ea71 --- /dev/null +++ b/deps/decimal/lib/decimal/context.ex @@ -0,0 +1,140 @@ +defmodule Decimal.Context do + import Decimal.Macros + alias Decimal.Context + + @moduledoc """ + The context is kept in the process dictionary. It can be accessed with + `get/0` and `set/1`. + + The default context follows IEEE 754 decimal128: precision is 34, `emax` is + 6 144, and `emin` is -6 143. The rounding algorithm is `:half_up` and the + set trap enablers are `:invalid_operation` and `:division_by_zero`. + + `emax` and `emin` limit operation results. They do not validate values that + have already been created, so applications that parse untrusted input should + still rely on the default `Decimal.parse/2` and `Decimal.cast/2` limits or + pass explicit `:max_digits` and `:max_exponent` options. + + ## Fields + + * `precision` - maximum number of decimal digits in the coefficient. If an + operation result has more digits it will be rounded to `precision` + digits with the rounding algorithm in `rounding`. + * `rounding` - the rounding algorithm used when the coefficient's number of + exceeds `precision`. Strategies explained below. + * `emax` - maximum adjusted exponent. If the adjusted exponent of a result + is larger than `emax`, overflow is signalled. `:infinity` disables this + limit. + * `emin` - minimum adjusted exponent. If the adjusted exponent of a result + is smaller than `emin`, underflow is signalled. `:infinity` disables this + limit. + * `flags` - a list of signals that for which the flag is sent. When an + exceptional condition is signalled its flag is set. The flags are sticky + and will be set until explicitly cleared. + * `traps` - a list of set trap enablers for signals. When a signal's trap + enabler is set the condition causes `Decimal.Error` to be raised. + + ## Rounding algorithms + + * `:down` - round toward zero (truncate). Discarded digits are ignored, + result is unchanged. + * `:half_up` - if the discarded digits is greater than or equal to half of + the value of a one in the next left position then the coefficient will be + incremented by one (rounded up). Otherwise (the discarded digits are less + than half) the discarded digits will be ignored. + * `:half_even` - also known as "round to nearest" or "banker's rounding". If + the discarded digits is greater than half of the value of a one in the + next left position then the coefficient will be incremented by one + (rounded up). If they represent less than half discarded digits will be + ignored. Otherwise (exactly half), the coefficient is not altered if it's + even, or incremented by one (rounded up) if it's odd (to make an even + number). + * `:ceiling` - round toward +Infinity. If all of the discarded digits are + zero or the sign is negative the result is unchanged. Otherwise, the + coefficient will be incremented by one (rounded up). + * `:floor` - round toward -Infinity. If all of the discarded digits are zero + or the sign is positive the result is unchanged. Otherwise, the sign is + negative and coefficient will be incremented by one. + * `:half_down` - if the discarded digits is greater than half of the value + of a one in the next left position then the coefficient will be + incremented by one (rounded up). Otherwise (the discarded digits are half + or less) the discarded digits are ignored. + * `:up` - round away from zero. If all discarded digits are zero the + coefficient is not changed, otherwise it is incremented by one (rounded + up). + + This table shows the results of rounding operations for all the rounding + algorithms: + + Rounding algorithm | 5.5 | 2.5 | 1.6 | 1.1 | 1.0 | -1.0 | -1.1 | -1.6 | -2.5 | -5.5 + :----------------- | :-- | :-- | :-- | :-- | :-- | :--- | :--- | :--- | :--- | :--- + `:up` | 6 | 3 | 2 | 2 | 1 | -1 | -2 | -2 | -3 | -6 + `:down` | 5 | 2 | 1 | 1 | 1 | -1 | -1 | -1 | -2 | -5 + `:ceiling` | 6 | 3 | 2 | 2 | 1 | -1 | -1 | -1 | -2 | -5 + `:floor` | 5 | 2 | 1 | 1 | 1 | -1 | -2 | -2 | -3 | -6 + `:half_up` | 6 | 3 | 2 | 1 | 1 | -1 | -1 | -2 | -3 | -6 + `:half_down` | 5 | 2 | 2 | 1 | 1 | -1 | -1 | -2 | -2 | -5 + `:half_even` | 6 | 2 | 2 | 1 | 1 | -1 | -1 | -2 | -2 | -6 + + """ + @type t :: %__MODULE__{ + precision: pos_integer, + rounding: Decimal.rounding(), + emax: integer | :infinity, + emin: integer | :infinity, + flags: [Decimal.signal()], + traps: [Decimal.signal()] + } + + defstruct precision: 34, + rounding: :half_up, + emax: 6_144, + emin: -6_143, + flags: [], + traps: [:invalid_operation, :division_by_zero] + + @context_key :"$decimal_context" + + @doc """ + Runs function with given context. + """ + doc_since("1.9.0") + @spec with(t(), (-> x)) :: x when x: var + def with(%Context{} = context, fun) when is_function(fun, 0) do + old = Process.put(@context_key, context) + + try do + fun.() + after + set(old || %Context{}) + end + end + + @doc """ + Gets the process' context. + """ + doc_since("1.9.0") + @spec get() :: t() + def get() do + Process.get(@context_key, %Context{}) + end + + @doc """ + Set the process' context. + """ + doc_since("1.9.0") + @spec set(t()) :: :ok + def set(%Context{} = context) do + Process.put(@context_key, context) + :ok + end + + @doc """ + Update the process' context. + """ + doc_since("1.9.0") + @spec update((t() -> t())) :: :ok + def update(fun) when is_function(fun, 1) do + get() |> fun.() |> set() + end +end diff --git a/deps/decimal/lib/decimal/error.ex b/deps/decimal/lib/decimal/error.ex new file mode 100644 index 0000000..1f3b08f --- /dev/null +++ b/deps/decimal/lib/decimal/error.ex @@ -0,0 +1,24 @@ +defmodule Decimal.Error do + @moduledoc """ + The exception that all decimal operations may raise. + + ## Fields + + * `signal` - the signalled error, additional signalled errors will be found + in the context. + * `reason` - the reason for the error. + + Rescuing the error to access the result or the other fields of the error is + discouraged and should only be done for exceptional conditions. It is more + pragmatic to set the appropriate traps on the context and check the flags + after the operation if the result needs to be inspected. + """ + + defexception [:signal, :reason] + + @impl true + def message(%{signal: signal, reason: reason}) do + reason = reason && ": " <> reason + "#{signal}#{reason}" + end +end diff --git a/deps/decimal/lib/decimal/macros.ex b/deps/decimal/lib/decimal/macros.ex new file mode 100644 index 0000000..f8fca78 --- /dev/null +++ b/deps/decimal/lib/decimal/macros.ex @@ -0,0 +1,11 @@ +defmodule Decimal.Macros do + @moduledoc false + + defmacro doc_since(version) do + if Version.match?(System.version(), ">= 1.7.0") do + quote do + @doc since: unquote(version) + end + end + end +end diff --git a/deps/decimal/mix.exs b/deps/decimal/mix.exs new file mode 100644 index 0000000..f7753a6 --- /dev/null +++ b/deps/decimal/mix.exs @@ -0,0 +1,43 @@ +defmodule Decimal.Mixfile do + use Mix.Project + + @version "3.1.0" + @source_url "https://github.com/ericmj/decimal" + + def project() do + [ + app: :decimal, + version: @version, + elixir: "~> 1.12", + deps: deps(), + name: "Decimal", + source_url: @source_url, + docs: [source_ref: "v#{@version}", main: "readme", extras: ["README.md"]], + description: description(), + package: package() + ] + end + + def application() do + [] + end + + defp deps() do + [ + {:ex_doc, ">= 0.0.0", only: :dev}, + {:stream_data, "~> 1.2.0", only: :test} + ] + end + + defp description() do + "Arbitrary precision decimal arithmetic." + end + + defp package() do + [ + maintainers: ["Eric Meadows-Jönsson"], + licenses: ["Apache-2.0"], + links: %{"GitHub" => @source_url} + ] + end +end diff --git a/deps/dns_cluster/.formatter.exs b/deps/dns_cluster/.formatter.exs new file mode 100644 index 0000000..d2cda26 --- /dev/null +++ b/deps/dns_cluster/.formatter.exs @@ -0,0 +1,4 @@ +# Used by "mix format" +[ + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] +] diff --git a/deps/dns_cluster/.hex b/deps/dns_cluster/.hex new file mode 100644 index 0000000..241439c Binary files /dev/null and b/deps/dns_cluster/.hex differ diff --git a/deps/dns_cluster/CHANGELOG.md b/deps/dns_cluster/CHANGELOG.md new file mode 100644 index 0000000..98939b6 --- /dev/null +++ b/deps/dns_cluster/CHANGELOG.md @@ -0,0 +1,13 @@ +# Changelog + +## 0.1.3 (2024-02-02) + * Support OTP 24 + +## 0.1.2 (2024-01-08) + * Use `:inet_res.getbyname/2` to resolve the given hostname to support search list for host-name lookup, such as in k8s and similar setups + +## 0.1.1 (2023-09-27) + * Fix bug where an empty clauses would raise an argument error + +## 0.1.0 (2023-07-11) + * Initial release \ No newline at end of file diff --git a/deps/dns_cluster/LICENSE.md b/deps/dns_cluster/LICENSE.md new file mode 100644 index 0000000..eb6f417 --- /dev/null +++ b/deps/dns_cluster/LICENSE.md @@ -0,0 +1,22 @@ +# MIT License + +Copyright (c) 2023 Chris McCord + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/dns_cluster/README.md b/deps/dns_cluster/README.md new file mode 100644 index 0000000..8ceae21 --- /dev/null +++ b/deps/dns_cluster/README.md @@ -0,0 +1,45 @@ +# DNSCluster + +Simple DNS clustering for distributed Elixir nodes. + +## Installation + +The package can be installed by adding `dns_cluster` to your list of dependencies in `mix.exs`: + +```elixir +def deps do + [ + {:dns_cluster, "~> 0.1.1"} + ] +end +``` + +Next, you can configure and start the cluster by adding it to your supervision +tree in your `application.ex`: + +```elixir +children = [ + {Phoenix.PubSub, ...}, + {DNSCluster, query: Application.get_env(:my_app, :dns_cluster_query) || :ignore}, + MyAppWeb.Endpoint +] +``` + +If you are deploying with Elixir releases, the release must be set to support longnames and +the node must be named. These can be set in your `rel/env.sh.eex` file: + +```sh +#!/bin/sh +export RELEASE_DISTRIBUTION=name +export RELEASE_NODE="myapp@fully-qualified-host-or-ip" +``` + +By default, nodes from the same release will have the same cookie. If you want different +applications or releases to connect to each other, then you must set the `RELEASE_COOKIE`, +either in your deployment platform or inside `rel/env.sh.eex`: + +```sh +#!/bin/sh +... +export RELEASE_COOKIE="my-app-cookie" +``` diff --git a/deps/dns_cluster/hex_metadata.config b/deps/dns_cluster/hex_metadata.config new file mode 100644 index 0000000..81431e8 --- /dev/null +++ b/deps/dns_cluster/hex_metadata.config @@ -0,0 +1,13 @@ +{<<"links">>, + [{<<"GitHub">>,<<"https://github.com/phoenixframework/dns_cluster">>}]}. +{<<"name">>,<<"dns_cluster">>}. +{<<"version">>,<<"0.1.3">>}. +{<<"description">>,<<"Simple DNS clustering for distributed Elixir nodes">>}. +{<<"elixir">>,<<"~> 1.11">>}. +{<<"app">>,<<"dns_cluster">>}. +{<<"licenses">>,[<<"MIT">>]}. +{<<"files">>, + [<<"lib">>,<<"lib/dns_cluster.ex">>,<<"CHANGELOG.md">>,<<"LICENSE.md">>, + <<"mix.exs">>,<<"README.md">>,<<".formatter.exs">>]}. +{<<"requirements">>,[]}. +{<<"build_tools">>,[<<"mix">>]}. diff --git a/deps/dns_cluster/lib/dns_cluster.ex b/deps/dns_cluster/lib/dns_cluster.ex new file mode 100644 index 0000000..83f279d --- /dev/null +++ b/deps/dns_cluster/lib/dns_cluster.ex @@ -0,0 +1,202 @@ +defmodule DNSCluster do + @moduledoc """ + Simple DNS based cluster discovery. + + A DNS query is made every `:interval` milliseconds to discover new ips. + Nodes will only be joined if their node basename matches the basename of the + current node. For example if `node()` is `myapp-123@fdaa:1:36c9:a7b:198:c4b1:73c6:1`, + a `Node.connect/1` attempt will be made against every IP returned by the DNS query, + but will only be successful if there is a node running on the remote host with the same + basename, for example `myapp-123@fdaa:1:36c9:a7b:198:c4b1:73c6:2`. Nodes running on + remote hosts, but with different basenames will fail to connect and will be ignored. + + ## Examples + + To start in your supervision tree, add the child: + + children = [ + ..., + {DNSCluster, query: "myapp.internal"} + ] + + See the `start_link/1` docs for all available options. + + If you require more advanced clustering options and strategies, see the + [libcluster](https://hexdocs.pm/libcluster) library. + """ + use GenServer + require Logger + + defmodule Resolver do + @moduledoc false + + require Record + Record.defrecord(:hostent, Record.extract(:hostent, from_lib: "kernel/include/inet.hrl")) + + def basename(node_name) when is_atom(node_name) do + [basename, _] = String.split(to_string(node_name), "@") + basename + end + + def connect_node(node_name) when is_atom(node_name), do: Node.connect(node_name) + + def list_nodes, do: Node.list(:visible) + + def lookup(query, type) when is_binary(query) and type in [:a, :aaaa] do + case :inet_res.getbyname(~c"#{query}", type) do + {:ok, hostent(h_addr_list: addr_list)} -> addr_list + {:error, _} -> [] + end + end + end + + @doc ~S""" + Starts DNS based cluster discovery. + + ## Options + + * `:name` - the name of the cluster. Defaults to `DNSCluster`. + * `:query` - the required DNS query for node discovery, for example: `"myapp.internal"`. + The value `:ignore` can be used to ignore starting the DNSCluster. + * `:interval` - the millisec interval between DNS queries. Defaults to `5000`. + * `:connect_timeout` - the millisec timeout to allow discovered nodes to connect. + Defaults to `10_000`. + + ## Examples + + iex> DNSCluster.start_link(query: "myapp.internal") + {:ok, pid} + + iex> DNSCluster.start_link(query: :ignore) + :ignore + """ + def start_link(opts) do + GenServer.start_link(__MODULE__, opts, name: Keyword.get(opts, :name, __MODULE__)) + end + + @impl true + def init(opts) do + case Keyword.fetch(opts, :query) do + {:ok, :ignore} -> + :ignore + + {:ok, query} when is_binary(query) -> + warn_on_invalid_dist() + resolver = Keyword.get(opts, :resolver, Resolver) + + state = %{ + interval: Keyword.get(opts, :interval, 5_000), + basename: resolver.basename(node()), + query: query, + log: Keyword.get(opts, :log, false), + poll_timer: nil, + connect_timeout: Keyword.get(opts, :connect_timeout, 10_000), + resolver: resolver + } + + {:ok, state, {:continue, :discover_ips}} + + {:ok, other} -> + raise ArgumentError, "expected :query to be a string, got: #{inspect(other)}" + + :error -> + raise ArgumentError, "missing required :query option in #{inspect(opts)}" + end + end + + @impl true + def handle_continue(:discover_ips, state) do + {:noreply, do_discovery(state)} + end + + @impl true + def handle_info(:discover_ips, state) do + {:noreply, do_discovery(state)} + end + + defp do_discovery(state) do + state + |> connect_new_nodes() + |> schedule_next_poll() + end + + defp connect_new_nodes(%{resolver: resolver, connect_timeout: timeout} = state) do + node_names = for name <- resolver.list_nodes(), into: MapSet.new(), do: to_string(name) + + ips = discover_ips(state) + + _results = + ips + |> Enum.map(fn ip -> "#{state.basename}@#{ip}" end) + |> Enum.filter(fn node_name -> !Enum.member?(node_names, node_name) end) + |> Task.async_stream( + fn new_name -> + if resolver.connect_node(:"#{new_name}") do + log(state, "#{node()} connected to #{new_name}") + end + end, + max_concurrency: max(1, length(ips)), + timeout: timeout + ) + |> Enum.to_list() + + state + end + + defp log(state, msg) do + if level = state.log, do: Logger.log(level, msg) + end + + defp schedule_next_poll(state) do + %{state | poll_timer: Process.send_after(self(), :discover_ips, state.interval)} + end + + defp discover_ips(%{resolver: resolver, query: query}) do + [:a, :aaaa] + |> Enum.flat_map(&resolver.lookup(query, &1)) + |> Enum.uniq() + |> Enum.map(&to_string(:inet.ntoa(&1))) + end + + defp warn_on_invalid_dist do + release? = is_binary(System.get_env("RELEASE_NAME")) + net_state = if function_exported?(:net_kernel, :get_state, 0), do: :net_kernel.get_state() + + cond do + !net_state -> + :ok + + net_state.started == :no and release? -> + Logger.warning(""" + node not running in distributed mode. Ensure the following exports are set in your rel/env.sh.eex file: + + #!/bin/sh + + export RELEASE_DISTRIBUTION=name + export RELEASE_NODE="myapp@fully-qualified-host-or-ip" + """) + + net_state.started == :no or + (!release? and net_state.started != :no and net_state[:name_domain] != :longnames) -> + Logger.warning(""" + node not running in distributed mode. When running outside of a release, you must start net_kernel manually with + longnames. + https://www.erlang.org/doc/man/net_kernel.html#start-2 + """) + + net_state[:name_domain] != :longnames and release? -> + Logger.warning(""" + node not running with longnames which are required for DNS discovery. + Ensure the following exports are set in your rel/env.sh.eex file: + + #!/bin/sh + + export RELEASE_DISTRIBUTION=name + export RELEASE_NODE="myapp@fully-qualified-host-or-ip" + """) + + true -> + :ok + end + end +end diff --git a/deps/dns_cluster/mix.exs b/deps/dns_cluster/mix.exs new file mode 100644 index 0000000..12cd9ee --- /dev/null +++ b/deps/dns_cluster/mix.exs @@ -0,0 +1,39 @@ +defmodule DNSCluster.MixProject do + use Mix.Project + + @version "0.1.3" + @scm_url "https://github.com/phoenixframework/dns_cluster" + + def project do + [ + app: :dns_cluster, + package: package(), + version: @version, + elixir: "~> 1.11", + start_permanent: Mix.env() == :prod, + deps: deps(), + source_url: @scm_url, + homepage_url: @scm_url, + description: "Simple DNS clustering for distributed Elixir nodes" + ] + end + + defp package do + [ + maintainers: ["Chris McCord"], + licenses: ["MIT"], + links: %{"GitHub" => @scm_url}, + files: ~w(lib CHANGELOG.md LICENSE.md mix.exs README.md .formatter.exs) + ] + end + + def application do + [ + extra_applications: [:logger] + ] + end + + defp deps do + [{:ex_doc, ">= 0.0.0", only: :docs}] + end +end diff --git a/deps/ecto/.formatter.exs b/deps/ecto/.formatter.exs new file mode 100644 index 0000000..00b386f --- /dev/null +++ b/deps/ecto/.formatter.exs @@ -0,0 +1,31 @@ +locals_without_parens = [ + # Query + from: 2, + + # Schema + field: 1, + field: 2, + field: 3, + timestamps: 1, + belongs_to: 2, + belongs_to: 3, + has_one: 2, + has_one: 3, + has_many: 2, + has_many: 3, + many_to_many: 2, + many_to_many: 3, + embeds_one: 2, + embeds_one: 3, + embeds_one: 4, + embeds_many: 2, + embeds_many: 3, + embeds_many: 4 +] + +[ + locals_without_parens: locals_without_parens, + export: [ + locals_without_parens: locals_without_parens + ] +] diff --git a/deps/ecto/.hex b/deps/ecto/.hex new file mode 100644 index 0000000..1b3e9cc Binary files /dev/null and b/deps/ecto/.hex differ diff --git a/deps/ecto/CHANGELOG.md b/deps/ecto/CHANGELOG.md new file mode 100644 index 0000000..497cca2 --- /dev/null +++ b/deps/ecto/CHANGELOG.md @@ -0,0 +1,1124 @@ +# Changelog for v3.x + +## v3.13.6 (2026-05-05) + +### Enhancements + + * [mix.exs] Relax decimal requirement + +## v3.13.5 (2025-11-09) + +### Enhancements + + * [Ecto.Query] Support selecting a subset of a subquery as a struct + +## v3.13.4 (2025-10-24) + +### Bug fixes + + * [Ecto.Changeset] Ensure empty binaries are trimmed + * [Ecto.Repo] Ensure rollback applies to dynamic repos + * [Ecto.Type] Properly format `:in` composite types + +## v3.13.3 (2025-09-19) + +### Enhancements + + * [Ecto.Query] Accept a list of things to exclude in `exclude` + +### Bug fixes + + * [Ecto.Query] Allow 2-arity functions as preload function in query + * [Ecto.Query] Remove soft deprecated literal warning + * [Ecto.Schema] Do not consider space and newlines as empty for binary types + +## v3.13.2 (2025-06-24) + +### Bug fixes + + * [Ecto.Query] Fix regression which made queries with multiple joins expensive to compile + * [Ecto.Repo] Fix detection of missing primary key on associations with only nil entries + * [Ecto.Query] Fix macro expansion in `over` clause's `order_by` + +## v3.13.1 (2025-06-19) + +### Bug fixes + + * [Ecto.Repo] Do not automatically apply HOT updates on upsert with `replace`. It is the user responsibility to make sure they do not overlap + +## v3.13.0 (2025-06-18) + +Requires Elixir v1.14+. + +### Enhancements + + * [Ecto] Support Elixir's built-in JSON + * [Ecto.Enum] Add `Ecto.Enum.cast_value/3` + * [Ecto.Query] Allow schema to be used for `values` list types + * [Ecto.Query] Allow strings in `field/2` + * [Ecto.Query] Add `identifier/1` in queries + * [Ecto.Query] Add `constant/1` in queries + * [Ecto.Query] Allow `exclude/2` to remove windows + * [Ecto.Query] Allow source fields in `json_extract_path` + * [Ecto.Repo] Add `Ecto.Repo.prepare_transaction/2` user callback + * [Ecto.Repo] Add `Ecto.Repo.all_by/3` + * [Ecto.Repo] Add `Ecto.Repo.transact/2` + * [Ecto.Repo] Allow HOT updates on upsert queries in Postgres by removing duplicate fields during `replace_all_except` + * [Ecto.Schema] Support `@schema_redact: :all_except_primary_keys` module attribute + +### Bug fixes + + * [Ecto.Query] Allow select merging maps with all nil values + * [Ecto.Query] `map/2` in queries now always returns a map on joins, even on left joins, for consistency with `from` sources + * [Ecto.Schema] Fix an issue where Ecto could warn an association did not exist, when it did + +### Soft deprecations (no warnings emitted) + + * [Ecto.Repo] `Ecto.Repo.transaction/2` is soft-deprecated in favor of `Ecto.Repo.transact/1` + * [Ecto.Query.API] `literal/1` is deprecated in favor of `identifier/1` + +## v3.12.6 (2025-06-11) + +Fix deprecations on Elixir v1.19. + +## v3.12.5 (2024-11-28) + +### Bug fixes + + * [Ecto.Query] Raise when empty list is given to `values/2` + * [Ecto.Query] Fix inspecting `dynamic/2` with interpolated named bindings + * [Ecto.Query] Plan sources before creating plan_subquery closure + * [Ecto.Repo] Remove read-only changes from returned record during insert/update + * [Ecto.Repo] Cascade `:allow_stale` options to assocs + +## v3.12.4 (2024-10-07) + +### Enhancements + + * [Ecto.Repo] Document new `:pool_count` option + +### Bug fixes + + * [Ecto.Repo] Make `Ecto.Repo.reload` respect `source` + +## v3.12.3 (2024-09-06) + +### Bug fixes + + * [Ecto.Changeset] Allow associations to be cast/put inside of embedded schema changesets + +## v3.12.2 (2024-08-25) + +### Bug fixes + + * [Ecto.Query] Allow `:prefix` to be set to any term + * [Ecto.Repo] Avoid overwriting ssl opts from url if already set in config + +## v3.12.1 (2024-08-13) + +### Enhancements + + * [Ecto.Type] Add `Ecto.Type.parameterized?/2` + +### Bug fixes + + * [Ecto.Enum] Fix dialyzer specification + * [Ecto.Query] Remove incorrect subquery parameter check + +## v3.12.0 (2024-08-12) + +### Enhancements + + * [Ecto.Changeset] Allow `{message, opts}` to be given as message for several validation APIs + * [Ecto.Query] Introduce `is_named_binding` guard + * [Ecto.Query] Subqueries are now supported in `distinct`, `group_by`, `order_by` and `window` expressions + * [Ecto.Query] Allow `select_merge` to be used in more `insert_all` and subquery operations by merging distinct fields + * [Ecto.Query] Allow literal maps inside `dynamic/2` + * [Ecto.Query] Support macro expansion at the root level of `order_by` + * [Ecto.Query] Support preloading subquery sources in `from` and `join` + * [Ecto.Query] Allow map updates with dynamic values in `select` + * [Ecto.Query] Allow any data structure that implements the Enumerable protocol on the right side of `in` + * [Ecto.Repo] Support 2-arity preload functions that receive ids and the association metadata + * [Ecto.Repo] Allow HOT updates on upsert queries in Postgres by removing duplicate fields during `replace_all` + * [Ecto.Repo] `insert_all` supports queries with only source + * [Ecto.Repo] `insert_all` supports queries with the update syntax + * [Ecto.Repo] Support `:allow_stale` on Repo struct/changeset operations + * [Ecto.Schema] Allow schema fields to be read-only via `:writable` option + * [Ecto.Schema] Add `:defaults_to_struct` option to `embeds_one` + * [Ecto.Schema] Support `:duration` type which maps to Elixir v1.17 duration + * [Ecto.Type] Bubble up custom cast errors of the inner type for `{:map, type}` and `{:array, type}` + * [Ecto.Type] Add `Ecto.Type.cast!/2` + +### Bug fixes + + * [Ecto.Query] Ignore query prefix in CTE sources + * [Ecto.Query] Fix a bug of `preload` when a through association is used in a join and has a nested separate query preload. Now the association chain is no longer preloaded and we simply preload directly onto the loaded through association. + * [Ecto.Query] Fix inspection when select has `map/struct` modifiers + * [Ecto.Query] Disable query cache for `values` lists + * [Ecto.Repo] Convert fields to their sources in `insert_all` + * [Ecto.Repo] Raise if empty list is given to `{:replace, fields}` + * [Ecto.Repo] Validate `:prefix` is a string/binary, warn otherwise + * [Ecto.Repo] Remove compile dependency on `:preload_order` MFA in `has_many` + +### Adapter changes + + * `distinct`, `group_by`, `order_by` and `window` expressions use the new `Ecto.Query.ByExpr` + struct rather than the old `Ecto.Query.QueryExpr` struct + +### Potential incompatibilities + + * [Ecto.Changeset] Associations inside embeds have always been read-only. We now raise if you try to cast them inside a changeset (this was reverted in v3.12.3) + * [Ecto.ParameterizedType] Parameterized types are now represented internally as `{:parameterized, {mod, state}}`. While this representation is private, projects may have been relying on it, and therefore they need to adapt accordingly. Use `Ecto.ParameterizedType.init/2` to instantiate parameterized types. + * [Ecto.Query] Drop `:array_join` join type. It was added for Clickhouse support but it is no longer used + * [Ecto.Query] Validate `:prefix` is a string/binary (this was reverted in v3.12.2) + +## v3.11.2 (2024-03-07) + +### Bug fixes + + * [Ecto.Query] Fix compatibility with upcoming Elixir v1.17 + * [Ecto.Repo] Do not hide failures when preloading if the parent process is trapping exits + +## v3.11.1 (2023-12-07) + +### Enhancements + + * [Ecto.Query] Allow module attributes to be given to `in` operator + +### Bug fixes + + * [Ecto.Query] Fix interpolating strings and atoms as map keys + * [Ecto.Query] Plan subqueries in `having` + * [Ecto.Query] Fix late binding with composite types + +## v3.11.0 (2023-11-14) + +### Enhancements + + * [Ecto.Association] Allow `preload_order` to take MFAs for `many_to_many` associations. This allows ordering by the join table + * [Ecto.Query] Add `:operation` option to `with_cte/3`. This allows CTEs to perform updates and deletes + * [Ecto.Query] Support `splice(^...)` in `fragment` + * [Ecto.Query] Add `prepend_order_by/3` + * [Ecto.Query] Allow `selected_as/1` and `selected_as/2` to take interpolated names + * [Ecto.Query] Allow map update syntax to work with `nil` values in `select` + * [Ecto.Query] Allow hints to inject SQL using `unsafe_fragment` + * [Ecto.Query] Support `values/2` lists + * [Ecto.Repo] Add `:on_preload_spawn` option to `preload/3` + * [Ecto.Schema] Support `:load_in_query` option for embeds + * [Ecto.Schema] Support `:returning` option for delete + +### Bug fixes + + * [Ecto.Association] Ensure parent prefix is passed to `on_delete` queries + * [Ecto.Changeset] Ensure duplicate primary keys are always detected for embeds + * [Ecto.Embedded] Raise `ArgumentError` when specifying an autogenerated `:id` primary key + * [Ecto.Query] Ensure subquery selects generate unique cache keys + * [Ecto.Query] Raise on literal non-base binary/uuids in query + * [Ecto.Repo] Reset `belongs_to` association if foreign key update results in a mismatch + +### Adapter changes + + * Adapters now receive `nil` for encoding/decoding + * Adapters now receive `type` instead of `{:maybe, type}` as the first argument to `loaders/2` + +### Deprecations + + * [Ecto.Query] Keyword hints are no longer supported. Please use `unsafe_fragment` inside of hints instead + +## v3.10.3 (2023-07-07) + +### Enhancements + + * [Ecto.Query] Allow dynamic `field/2` in `type/2` + +### Bug fixes + + * [Ecto.Changesets] Limit the largest integer to less than 32 digits + * [Ecto.Type] Limit the largest integer to less than 32 digits + +## v3.10.2 (2023-06-07) + +### Enhancements + + * [Ecto.Changeset] Support a three-arity function with position on `cast_assoc` and `cast_embed` + * [Ecto.Changeset] Add support for maps in `validate_length/3` + * [Ecto.Changeset] Add `:nulls_distinct` option to `unsafe_validate_unique` + * [Ecto.Query] Support `array_join` type for ClickHouse adapter + * [Ecto.Query.API] Support parameterized and custom map types in json path validation + +### Bug fixes + + * [Ecto.Repo] Respect parent prefix in `Repo.aggregate` + * [Ecto.Query.API] Fix late binding in `json_extract_path` + +### Deprecations + + * Deprecate MFAs on `:with` + +## v3.10.1 (2023-04-12) + +### Bug fixes + + * [Ecto.Changeset] Consider `sort_param` even if the relation param was not given + * [Ecto.Query] Correct typespec to avoid Dialyzer warnings + +## v3.10.0 (2023-04-10) + +This release contains many improvements to Ecto.Changeset, functions like `Ecto.Changeset.changed?/2` and `field_missing?/2` will help make your code more expressive. Improvements to association and embed handling will also make it easier to manage more complex forms, especially those embedded within Phoenix.LiveView applications. + +On the changeset front, note this release unifies the handling of empty values between `cast/4` and `validate_required/3`. **If you were setting `:empty_values` in the past and you want to preserve this new behaviour throughout, you may want to update your code** from this: + + Ecto.Changeset.cast(changeset, params, [:field1, :field2], empty_values: ["", []]) + +to: + + empty_values = [[]] ++ Ecto.Changeset.empty_values() + Ecto.Changeset.cast(changeset, params, [:field1, :field2], empty_values: empty_values) + +Queries have also been improved to support LIMIT WITH TIES as well as materialized CTEs. + +### Enhancements + + * [Ecto.Changeset] Add `get_assoc`/`get_embed` + * [Ecto.Changeset] Add `field_missing?/2` + * [Ecto.Changeset] Add `changed?/2` and `changed?/3` with predicates support + * [Ecto.Changeset] Allow `Regex` to be used in constraint names for exact matches + * [Ecto.Changeset] Allow `:empty_values` option in `cast/4` to include a function which must return true if the value is empty + * [Ecto.Changeset] `cast/4` will by default consider strings made only of whitespace characters to be empty + * [Ecto.Changeset] Add support for `:sort_param` and `:drop_param` on `cast_assoc` and `cast_embed` + * [Ecto.Query] Support materialized option in CTEs + * [Ecto.Query] Support dynamic field inside `json_extract_path` + * [Ecto.Query] Support interpolated values for from/join prefixes + * [Ecto.Query] Support ties in limit expressions through `with_ties/3` + * [Ecto.Schema] Add `:autogenerate_fields` to the schema reflection API + * [Ecto.ParameterizedType] Add optional callback `format/1` + +### Bug fixes + + * [Ecto.Changeset] Make unsafe validate unique exclude primary key only for loaded schemas + * [Ecto.Changeset] Raise when change provided to `validate_format/4` is not a string + * [Ecto.Query] Fix bug in `json_extract_path` where maps were not allowed to be nested inside of embeds + * [Ecto.Schema] Allow inline embeds to overwrite conflicting aliases + +## v3.9.6 (2023-07-07) + +### Enhancements + + * [Ecto.Query] Allow dynamic `field/2` in `type/2` + +### Bug fixes + + * [Ecto.Changesets] Limit the largest integer to less than 32 digits + * [Ecto.Type] Limit the largest integer to less than 32 digits + +## v3.9.5 (2023-03-22) + +### Bug fixes + + * [Ecto.Query] Rename `@opaque dynamic` type to `@opaque dynamic_expr` to avoid conflicts with Erlang/OTP 26 + +## v3.9.4 (2022-12-21) + +### Bug fixes + + * [Ecto.Query] Fix regression with interpolated preloads introduced in v3.9.3 + +## v3.9.3 (2022-12-20) + +### Enhancements + + * [Ecto] Add `reset_fields/2` + * [Ecto.Multi] Add `exists?/4` function + * [Ecto.Repo] Keep url scheme in the repo configuration + * [Ecto.Query] Add support for cross lateral joins + * [Ecto.Query] Allow preloads to use `dynamic/2` + * [Ecto.Query.API] Allow the entire path to be interpolated in `json_extract_path/2` + +## v3.9.2 (2022-11-18) + +### Enhancements + + * [Ecto.Query] Allow `selected_as` inside CTE + * [Ecto.Query] Allow `selected_as` to be used in subquery + +### Bug fixes + + * [Ecto.Repo] Fix preloading through associations on `nil` + * [Ecto.Query] Fix select merging a `selected_as` field into a source + +## v3.9.1 (2022-10-06) + +### Enhancements + + * [Ecto.Query] Allow `selected_as` at the root of `dynamic/2` + * [Ecto.Query] Allow `selected_as` to be used with `type/2` + * [Ecto.Query] Allow `selected_as` to be used with `select_merge` + +### Bug fixes + + * [Ecto.Changeset] Reenable support for embedded schemas in `unsafe_validate_unique/4` + * [Ecto.Query] Ensure `join_where` conditions preload correctly in `many_to_many` or with queries with one or many joins + +## v3.9.0 (2022-09-27) + +### Enhancements + + * [Ecto.Changeset] Add `:force_changes` option to `cast/4` + * [Ecto.Enum] Allow enum fields to be embed either as their values or their dumped versions + * [Ecto.Query] Support `^%{field: dynamic(...)}` in `select` and `select_merge` + * [Ecto.Query] Support `%{field: subquery(...)}` in `select` and `select_merge` + * [Ecto.Query] Support select aliases through `selected_as/1` and `selected_as/2` + * [Ecto.Query] Allow `parent_as/1` in `type/2` + * [Ecto.Query] Add `with_named_binding/3` + * [Ecto.Query] Allow fragment sources in keyword queries + * [Ecto.Repo] Support `idle_interval` query parameter in connection URL + * [Ecto.Repo] Log human-readable UUIDs by using pre-dumped query parameters + * [Ecto.Schema] Support preloading associations in embedded schemas + +### Bug fix + + * [Ecto.Changeset] Raise when schemaless changeset or embedded schema is used in `unsafe_validate_unique/4` + * [Ecto.Query] Respect virtual field type in subqueries + * [Ecto.Query] Don't select struct fields overridden with `nil` + * [Ecto.Query] Fix `select_merge` not tracking `load_in_query: false` field + * [Ecto.Query] Fix field source when used in `json_extract_path` + * [Ecto.Query] Properly build CTEs at compile time + * [Ecto.Query] Properly order subqueries in `dynamic` + * [Ecto.Repo] Fix `insert_all` query parameter count when using value queries alongside `placeholder` + * [Ecto.Repo] Raise if combination query is used in a `many` preload + * [Ecto.Schema] Ignore associations that aren't loaded on insert + +## v3.8.4 (2022-06-04) + +### Enhancements + + * [Ecto.Multi] Add `one/2` and `all/2` functions + * [Ecto.Query] Support `literal(...)` in `fragment` + +### Bug fix + + * [Ecto.Schema] Make sure fields are inspected in the correct order in Elixir v1.14+ + +## v3.8.3 (2022-05-11) + +### Bug fix + + * [Ecto.Query] Allow source aliases to be used in `type/2` + * [Ecto.Schema] Avoid "undefined behaviour/struct" warnings and errors during compilation + +## v3.8.2 (2022-05-05) + +### Bug fix + + * [Ecto.Adapter] Do not require adapter metadata to be raw maps + * [Ecto.Association] Respect `join_where` in many to many `on_replace` deletes + * [Ecto.Changeset] Check if list is in `empty_values` before nested validations + +## v3.8.1 (2022-04-27) + +### Bug fix + + * [Ecto.Query] Fix regression where a join's on parameter on `update_all` was out of order + +## v3.8.0 (2022-04-26) + +Ecto v3.8 requires Elixir v1.10+. + +### Enhancements + + * [Ecto] Add new Embedded chapter to Introductory guides + * [Ecto.Changeset] Allow custom `:error_key` in unique_constraint + * [Ecto.Changeset] Add `:match` option to all constraint functions + * [Ecto.Query] Support dynamic aliases + * [Ecto.Query] Allow using `type/2` with virtual fields + * [Ecto.Query] Suggest alternatives to inexistent fields in queries + * [Ecto.Query] Support passing queries using subqueries to `insert_all` + * [Ecto.Repo] Allow `stacktrace: true` so stacktraces are included in telemetry events and logs + * [Ecto.Schema] Validate options given to schema fields + +### Bug fixes + + * [Ecto.Changeset] Address regression on `validate_subset` no longer working with custom array types + * [Ecto.Changeset] **Potentially breaking change**: Detect `empty_values` inside lists when casting. This may cause issues if you were relying on the casting of empty values (by default, only `""`). + * [Ecto.Query] Handle atom list sigils in `select` + * [Ecto.Query] Improve tracking of `select_merge` inside subqueries + * [Ecto.Repo] Properly handle literals in queries given to `insert_all` + * [Ecto.Repo] Don't surface persisted data as changes on embed updates + * [Ecto.Repo] **Potentially breaking change**: Raise if an association doesn't have a primary key and is preloaded in a join query. Previously, this would silently produce the wrong the result in certain circumstances. + * [Ecto.Schema] Preserve parent prefix on join tables + +## v3.7.2 (2022-03-13) + +### Enhancements + + * [Ecto.Schema] Add option to skip validations for default values + * [Ecto.Query] Allow coalesce in `type/2` + * [Ecto.Query] Support parameterized types in type/2 + * [Ecto.Query] Allow arbitrary parentheses in query expressions + +## v3.7.1 (2021-08-27) + +### Enhancements + + * [Ecto.Embedded] Make `Ecto.Embedded` public and describe struct fields + +### Bug fixes + + * [Ecto.Repo] Make sure parent changeset is included in changes for `insert`/`update`/`delete` when there are errors processing the parent itself + +## v3.7.0 (2021-08-19) + +### Enhancements + + * [Ecto.Changeset] Add `Ecto.Changeset.traverse_validations/2` + * [Ecto.Enum] Add `Ecto.Enum.mappings/2` and `Ecto.Enum.dump_values/2` + * [Ecto.Query] Add support for dynamic `as(^as)` and `parent_as(^as)` + * [Ecto.Repo] Add stale changeset to `Ecto.StaleEntryError` fields + * [Ecto.Schema] Add support for `@schema_context` to set context metadata on schema definition + +### Bug fixes + + * [Ecto.Changeset] Fix changeset inspection not redacting when embedded + * [Ecto.Changeset] Use semantic comparison on `validate_inclusion`, `validate_exclusion`, and `validate_subset` + * [Ecto.Enum] Raise on duplicate values in `Ecto.Enum` + * [Ecto.Query] Make sure `hints` are included in the query cache + * [Ecto.Repo] Support placeholders in `insert_all` without schemas + * [Ecto.Repo] Wrap in a subquery when query given to `Repo.aggregate` has combination + * [Ecto.Repo] Fix CTE subqueries not finding parent bindings + * [Ecto.Repo] Return changeset with assocs if any of the assocs are invalid + +## v3.6.2 (2021-05-28) + +### Enhancements + + * [Ecto.Query] Support macros in `with_cte` + * [Ecto.Repo] Add `Ecto.Repo.all_running/0` to list all running repos + +### Bug fixes + + * [Ecto.Query] Do not omit nil fields in a subquery select + * [Ecto.Query] Allow `parent_as` to look for an alias all the way up across subqueries + * [Ecto.Query] Raise if a nil value is given to a query from a nested map parameter + * [Ecto.Query] Fix `insert_all` when using both `:on_conflict` and `:placeholders` + * [mix ecto.load] Do not pass `--force` to underlying compile task + +## v3.6.1 (2021-04-12) + +### Enhancements + + * [Ecto.Changeset] Allow the `:query` option in `unsafe_validate_unique` + +### Bug fixes + + * [Ecto.Changeset] Add the relation id in `apply_changes` if the relation key exists (instead of hardcoding it to `id`) + +## v3.6.0 (2021-04-03) + +### Enhancements + + * [Ecto.Changeset] Support `:repo_opts` in `unsafe_validate_unique` + * [Ecto.Changeset] Add a validation error if trying to cast a cardinality one embed/assoc with anything other than a map or keyword list + * [Ecto.Enum] Allow enums to map to custom values + * [Ecto.Multi] Add `Ecto.Multi.put/3` for directly storing values + * [Ecto.Query] **Potentially breaking change**: optimize `many_to_many` queries so it no longer load intermediary tables in more occasions. This may cause issues if you are using `Ecto.assoc/2` to load `many_to_many` associations and then trying to access intermediate bindings (which is discouraged but it was possible) + * [Ecto.Repo] Allow `insert_all` to be called with a query instead of rows + * [Ecto.Repo] Add `:placeholders` support to `insert_all` to avoid sending the same value multiple times + * [Ecto.Schema] Support `:preload_order` on `has_many` and `many_to_many` associations + * [Ecto.UUID] Add bang UUID conversion methods + * [Ecto.Query] The `:hints` option now accepts dynamic values when supplied as tuples + * [Ecto.Query] Support `select: map(source, fields)` where `source` is a fragment + * [Ecto.Query] Allow referring to the parent query in a join's subquery select via `parent_as` + * [mix ecto] Support file and line interpolation on `ECTO_EDITOR` + +### Bug fixes + + * [Ecto.Changeset] Change `apply_changes/1` to add the relation to the `struct.relation_id` if relation struct is persisted + * [Ecto.Query] Remove unnecessary INNER JOIN in many to many association query + * [Ecto.Query] Allow parametric types to be interpolated in queries + * [Ecto.Schema] Raise `ArgumentError` when default has invalid type + +## v3.5.8 (2021-02-21) + +### Enhancements + + * [Ecto.Query] Support map/2 on fragments and subqueries + +## v3.5.7 (2021-02-07) + +### Bug fixes + + * [Ecto.Query] Fixes param ordering issue on dynamic queries with subqueries + +## v3.5.6 (2021-01-20) + +### Enhancements + + * [Ecto.Schema] Support `on_replace: :delete_if_exists` on associations + +### Bug fixes + + * [Ecto.Query] Allow unary minus operator in query expressions + * [Ecto.Schema] Allow nil values on typed maps + +## v3.5.5 (2020-11-12) + +### Enhancements + + * [Ecto.Query] Add support for subqueries operators: `all`, `any`, and `exists` + +### Bug fixes + + * [Ecto.Changeset] Use association source on `put_assoc` with maps/keywords + * [Ecto.Enum] Add `cast` clause for nil values on `Ecto.Enum` + * [Ecto.Schema] Allow nested type `:any` for non-virtual fields + +## v3.5.4 (2020-10-28) + +### Enhancements + + * [mix ecto.drop] Provide `--force-drop` for databases that may support it + * [guides] Add new "Multi tenancy with foreign keys" guide + +### Bug fixes + + * [Ecto.Changeset] Make keys optional in specs + * [Ecto.Enum] Make sure `values/2` works for virtual fields + * [Ecto.Query] Fix missing type on CTE queries that select a single field + +## v3.5.3 (2020-10-21) + +### Bug fixes + + * [Ecto.Query] Do not reset parameter counter for nested CTEs + * [Ecto.Type] Fix regression where array type with nils could no longer be cast/load/dump + * [Ecto.Type] Fix CaseClauseError when casting a decimal with a binary remainder + +## v3.5.2 (2020-10-12) + +### Enhancements + + * [Ecto.Repo] Add Repo.reload/2 and Repo.reload!/2 + +### Bug fixes + + * [Ecto.Changeset] Fix "__schema__/1 is undefined or private" error while inspecting a schemaless changeset + * [Ecto.Repo] Invoke `c:Ecto.Repo.default_options/1` per entry-point operation + +## v3.5.1 (2020-10-08) + +### Enhancements + + * [Ecto.Changeset] Warn if there are duplicate IDs in the parent schema for `cast_assoc/3`/`cast_embed/3` + * [Ecto.Schema] Allow `belongs_to` to accept options for parameterized types + +### Bug fixes + + * [Ecto.Query] Keep field types when using a subquery with source + +## v3.5.0 (2020-10-03) + +v3.5 requires Elixir v1.8+. + +### Bug fixes + + * [Ecto.Changeset] Ensure `:empty_values` in `cast/4` does not automatically propagate to following cast calls. If you want a given set of `:empty_values` to apply to all `cast/4` calls, change the value stored in `changeset.empty_values` instead + * [Ecto.Changeset] **Potentially breaking change**: Do not force repository updates to happen when using `optimistic_lock`. The lock field will only be incremented if the record has other changes. If no changes, nothing happens. + * [Ecto.Changeset] Do not automatically share empty values across `cast/3` calls + * [Ecto.Query] Consider query prefix in cte/combination query cache + * [Ecto.Query] Allow the entry to be marked as nil when using left join with subqueries + * [Ecto.Query] Support subqueries inside dynamic expressions + * [Ecto.Repo] Fix preloading when using dynamic repos and the sandbox in automatic mode + * [Ecto.Repo] Do not duplicate collections when associations are preloaded for repeated elements + +### Enhancements + + * [Ecto.Enum] Add `Ecto.Enum` as a custom parameterized type + * [Ecto.Query] Allow `:prefix` in `from` to be set to nil + * [Ecto.Query] Do not restrict subqueries in `where` to map/struct types + * [Ecto.Query] Allow atoms in query without interpolation in order to support Ecto.Enum + * [Ecto.Schema] Do not validate uniqueness if there is a prior error on the field + * [Ecto.Schema] Allow `redact: true` in `field` + * [Ecto.Schema] Support parameterized types via `Ecto.ParameterizedType` + * [Ecto.Schema] Rewrite embeds and assocs as parameterized types. This means `__schema__(:type, assoc_or_embed)` now returns a parameterized type. To check if something is an association, use `__schema__(:assocs)` or `__schema__(:embeds)` instead + +## v3.4.6 (2020-08-07) + +### Enhancements + + * [Ecto.Query] Allow `count/0` on `type/2` + * [Ecto.Multi] Support anonymous functions in multiple functions + +### Bug fixes + + * [Ecto.Query] Consider booleans as literals in unions, subqueries, ctes, etc + * [Ecto.Schema] Generate IDs for nested embeds + +## v3.4.5 (2020-06-14) + +### Enhancements + + * [Ecto.Changeset] Allow custom error key in `unsafe_validate_unique` + * [Ecto.Changeset] Improve performance when casting large params maps + +### Bug fixes + + * [Ecto.Changeset] Improve error message for invalid `cast_assoc` + * [Ecto.Query] Fix inspecting query with fragment CTE + * [Ecto.Query] Fix inspecting dynamics with aliased bindings + * [Ecto.Query] Improve error message when selecting a single atom + * [Ecto.Repo] Reduce data-copying when preloading multiple associations + * [Ecto.Schema] Do not define a compile-time dependency for schema in `:join_through` + +## v3.4.4 (2020-05-11) + +### Enhancements + + * [Ecto.Schema] Add `join_where` support to `many_to_many` + +## v3.4.3 (2020-04-27) + +### Enhancements + + * [Ecto.Query] Support `as/1` and `parent_as/1` for lazy named bindings and to allow parent references from subqueries + * [Ecto.Query] Support `x in subquery(query)` + +### Bug fixes + + * [Ecto.Query] Do not raise for missing assocs if :force is given to preload + * [Ecto.Repo] Return error from `Repo.delete` on invalid changeset from `prepare_changeset` + +## v3.4.2 (2020-04-10) + +### Enhancements + + * [Ecto.Changeset] Support multiple fields in `unique_constraint/3` + +## v3.4.1 (2020-04-08) + +### Enhancements + + * [Ecto] Add `Ecto.embedded_load/3` and `Ecto.embedded_dump/2` + * [Ecto.Query] Improve error message on invalid JSON expressions + * [Ecto.Repo] Emit `[:ecto, :repo, :init]` telemetry event upon Repo init + +### Bug fixes + + * [Ecto.Query] Do not support JSON selectors on `type/2` + +### Deprecations + + * [Ecto.Repo] Deprecate `conflict_target: {:constraint, _}`. It is a discouraged approach and `{:unsafe_fragment, _}` is still available if someone definitely needs it + +## v3.4.0 (2020-03-24) + +v3.4 requires Elixir v1.7+. + +### Enhancements + + * [Ecto.Query] Allow dynamic queries in CTE and improve error message + * [Ecto.Query] Add `Ecto.Query.API.json_extract_path/2` and JSON path support to query syntax. For example, `posts.metadata["tags"][0]["name"]` will return the name of the first tag stored in the `:map` metadata field + * [Ecto.Repo] Add new `default_options/1` callback to repository + * [Ecto.Repo] Support passing `:telemetry_options` to repository operations + +### Bug fixes + + * [Ecto.Changeset] Properly add validation annotation to `validate_acceptance` + * [Ecto.Query] Raise if there is loaded non-empty association data without related key when preloading. This typically means not all fields have been loaded in a query + * [Ecto.Schema] Show meaningful error in case `schema` is invoked twice in an `Ecto.Schema` + +## v3.3.4 (2020-02-27) + +### Bug fixes + + * [mix ecto] Do not rely on map ordering when parsing repos + * [mix ecto.gen.repo] Improve error message when a repo is not given + +## v3.3.3 (2020-02-14) + +### Enhancements + + * [Ecto.Query] Support fragments in `lock` + * [Ecto.Query] Handle `nil` in `select_merge` with similar semantics to SQL databases (i.e. it simply returns `nil` itself) + +## v3.3.2 (2020-01-28) + +### Enhancements + + * [Ecto.Changeset] Only bump optimistic lock in case of success + * [Ecto.Query] Allow macros in Ecto window expressions + * [Ecto.Schema] Support `:join_defaults` on `many_to_many` associations + * [Ecto.Schema] Allow MFargs to be given to association `:defaults` + * [Ecto.Type] Add `Ecto.Type.embedded_load` and `Ecto.Type.embedded_dump` + +### Bug fixes + + * [Ecto.Repo] Ignore empty hostname when parsing database url (Elixir v1.10 support) + * [Ecto.Repo] Rewrite combinations on Repo.exists? queries + * [Ecto.Schema] Respect child `@schema_prefix` in `cast_assoc` + * [mix ecto.gen.repo] Use `config_path` when writing new config in `mix ecto.gen.repo` + +## v3.3.1 (2019-12-27) + +### Enhancements + + * [Ecto.Query.WindowAPI] Support `filter/2` + +### Bug fixes + + * [Ecto.Query.API] Fix `coalesce/2` usage with mixed types + +## v3.3.0 (2019-12-11) + +### Enhancements + + * [Ecto.Adapter] Add `storage_status/1` callback to `Ecto.Adapters.Storage` behaviour + * [Ecto.Changeset] Add `Ecto.Changeset.apply_action!/2` + * [Ecto.Changeset] Remove actions restriction in `Ecto.Changeset.apply_action/2` + * [Ecto.Repo] Introduce `c:Ecto.Repo.aggregate/2` + * [Ecto.Repo] Support `{:replace_all_except, fields}` in `:on_conflict` + +### Bug fixes + + * [Ecto.Query] Make sure the `:prefix` option in `:from`/`:join` also cascades to subqueries + * [Ecto.Query] Make sure the `:prefix` option in `:join` also cascades to queries + * [Ecto.Query] Use database returned values for literals. Previous Ecto versions knew literals from queries should not be discarded for combinations but, even if they were not discarded, we would ignore the values returned by the database + * [Ecto.Repo] Do not wrap schema operations in a transaction if already inside a transaction. We have also removed the **private** option called `:skip_transaction` + +### Deprecations + + * [Ecto.Repo] `:replace_all_except_primary_keys` is deprecated in favor of `{:replace_all_except, fields}` in `:on_conflict` + +## v3.2.5 (2019-11-03) + +### Bug fixes + + * [Ecto.Query] Fix a bug where executing some queries would leak the `{:maybe, ...}` type + +## v3.2.4 (2019-11-02) + +### Bug fixes + + * [Ecto.Query] Improve error message on invalid join binding + * [Ecto.Query] Make sure the `:prefix` option in `:join` also applies to through associations + * [Ecto.Query] Invoke custom type when loading aggregations from the database (but fallback to database value if it can't be cast) + * [mix ecto.gen.repo] Support Elixir v1.9 style configs + +## v3.2.3 (2019-10-17) + +### Bug fixes + + * [Ecto.Changeset] Do not convert enums given to `validate_inclusion` to a list + +### Enhancements + + * [Ecto.Changeset] Improve error message on non-atom keys to change/put_change + * [Ecto.Changeset] Allow :with to be given as a `{module, function, args}` tuple on `cast_association/cast_embed` + * [Ecto.Changeset] Add `fetch_change!/2` and `fetch_field!/2` + +## v3.2.2 (2019-10-01) + +### Bug fixes + + * [Ecto.Query] Fix keyword arguments given to `:on` when a bind is not given to join + * [Ecto.Repo] Make sure a preload given to an already preloaded has_many :through is loaded + +## v3.2.1 (2019-09-17) + +### Enhancements + + * [Ecto.Changeset] Add rollover logic for default incrementer in `optimistic_lock` + * [Ecto.Query] Also expand macros when used inside `type/2` + +### Bug fixes + + * [Ecto.Query] Ensure queries with non-cacheable queries in CTEs/combinations are also not-cacheable + +## v3.2.0 (2019-09-07) + +v3.2 requires Elixir v1.6+. + +### Enhancements + + * [Ecto.Query] Add common table expressions support `with_cte/3` and `recursive_ctes/2` + * [Ecto.Query] Allow `dynamic/3` to be used in `order_by`, `distinct`, `group_by`, as well as in `partition_by`, `order_by`, and `frame` inside `windows` + * [Ecto.Query] Allow filters in `type/2` expressions + * [Ecto.Repo] Merge options given to the repository into the changeset `repo_opts` and assign it back to make it available down the chain + * [Ecto.Repo] Add `prepare_query/3` callback that is invoked before query operations + * [Ecto.Repo] Support `:returning` option in `Ecto.Repo.update/2` + * [Ecto.Repo] Support passing a one arity function to `Ecto.Repo.transaction/2`, where the argument is the current repo + * [Ecto.Type] Add a new `embed_as/1` callback to `Ecto.Type` that allows adapters to control embedding behaviour + * [Ecto.Type] Add `use Ecto.Type` for convenience that implements the new required callbacks + +### Bug fixes + + * [Ecto.Association] Ensure we delete an association before inserting when replacing on `has_one` + * [Ecto.Query] Do not allow interpolated `nil` in literal keyword list when building query + * [Ecto.Query] Do not remove literals from combinations, otherwise UNION/INTERSECTION queries may not match the number of values in `select` + * [Ecto.Query] Do not attempt to merge at compile-time non-keyword lists given to `select_merge` + * [Ecto.Repo] Do not override `:through` associations on preload unless forcing + * [Ecto.Repo] Make sure prefix option cascades to combinations and recursive queries + * [Ecto.Schema] Use OS time without drift when generating timestamps + * [Ecto.Type] Allow any datetime in `datetime_add` + +## v3.1.7 (2019-06-27) + +### Bug fixes + + * [Ecto.Changeset] Make sure `put_assoc` with empty changeset propagates on insert + +## v3.1.6 (2019-06-19) + +### Enhancements + + * [Ecto.Repo] Add `:read_only` repositories + * [Ecto.Schema] Also validate options given to `:through` associations + +### Bug fixes + + * [Ecto.Changeset] Do not mark `put_assoc` from `[]` to `[]` or from `nil` to `nil` as change + * [Ecto.Query] Remove named binding when excluding joins + * [mix ecto.gen.repo] Use `:config_path` instead of hardcoding to `config/config.exs` + +## v3.1.5 (2019-06-06) + +### Enhancements + + * [Ecto.Repo] Allow `:default_dynamic_repo` option on `use Ecto.Repo` + * [Ecto.Schema] Support `{:fragment, ...}` in the `:where` option for associations + +### Bug fixes + + * [Ecto.Query] Fix handling of literals in combinators (union, except, intersection) + +## v3.1.4 (2019-05-07) + +### Bug fixes + + * [Ecto.Changeset] Convert validation enums to lists before adding them as validation metadata + * [Ecto.Schema] Properly propagate prefix to join_through source in many_to_many associations + +## v3.1.3 (2019-04-30) + +### Enhancements + + * [Ecto.Changeset] Expose the enum that was validated against in errors from enum-based validations + +## v3.1.2 (2019-04-24) + +### Enhancements + + * [Ecto.Query] Add support for `type+over` + * [Ecto.Schema] Allow schema fields to be excluded from queries + +### Bug fixes + + * [Ecto.Changeset] Do not list a field as changed if it is updated to its original value + * [Ecto.Query] Keep literal numbers and bitstring in subqueries and unions + * [Ecto.Query] Improve error message for invalid `type/2` expression + * [Ecto.Query] Properly count interpolations in `select_merge/2` + +## v3.1.1 (2019-04-04) + +### Bug fixes + + * [Ecto] Do not require Jason (i.e. it should continue to be an optional dependency) + * [Ecto.Repo] Make sure `many_to_many` and `Ecto.Multi` work with dynamic repos + +## v3.1.0 (2019-04-02) + +v3.1 requires Elixir v1.5+. + +### Enhancements + + * [Ecto.Changeset] Add `not_equal_to` option for `validate_number` + * [Ecto.Query] Improve error message for missing `fragment` arguments + * [Ecto.Query] Improve error message on missing struct key for structs built in `select` + * [Ecto.Query] Allow dynamic named bindings + * [Ecto.Repo] Add dynamic repository support with `Ecto.Repo.put_dynamic_repo/1` and `Ecto.Repo.get_dynamic_repo/0` (experimental) + * [Ecto.Type] Cast naive_datetime/utc_datetime strings without seconds + +### Bug fixes + + * [Ecto.Changeset] Do not run `unsafe_validate_unique` query unless relevant fields were changed + * [Ecto.Changeset] Raise if an unknown field is given on `Ecto.Changeset.change/2` + * [Ecto.Changeset] Expose the type that was validated in errors generated by `validate_length/3` + * [Ecto.Query] Add support for `field/2` as first element of `type/2` and alias as second element of `type/2` + * [Ecto.Query] Do not attempt to assert types of named bindings that are not known at compile time + * [Ecto.Query] Properly cast boolean expressions in select + * [Mix.Ecto] Load applications during repo lookup so their app environment is available + +### Deprecations + + * [Ecto.LogEntry] Fully deprecate previously soft deprecated API + +## v3.0.7 (2019-02-06) + +### Bug fixes + + * [Ecto.Query] `reverse_order` reverses by primary key if no order is given + +## v3.0.6 (2018-12-31) + +### Enhancements + + * [Ecto.Query] Add `reverse_order/1` + +### Bug fixes + + * [Ecto.Multi] Raise better error message on accidental rollback inside `Ecto.Multi` + * [Ecto.Query] Properly merge deeply nested preloaded joins + * [Ecto.Query] Raise better error message on missing select on schemaless queries + * [Ecto.Schema] Fix parameter ordering in assoc `:where` + +## v3.0.5 (2018-12-08) + +### Backwards incompatible changes + + * [Ecto.Schema] The `:where` option added in Ecto 3.0.0 had a major flaw and it has been reworked in this version. This means a tuple of three elements can no longer be passed to `:where`, instead a keyword list must be given. Check the "Filtering associations" section in `has_many/3` docs for more information + +### Bug fixes + + * [Ecto.Query] Do not raise on lists of tuples that are not keywords. Instead, let custom Ecto.Type handle them + * [Ecto.Query] Allow `prefix: nil` to be given to subqueries + * [Ecto.Query] Use different cache keys for unions/intersections/excepts + * [Ecto.Repo] Fix support for upserts with `:replace` without a schema + * [Ecto.Type] Do not lose precision when casting `utc_datetime_usec` with a time zone different than Etc/UTC + +## v3.0.4 (2018-11-29) + +### Enhancements + + * [Decimal] Bump decimal dependency + * [Ecto.Repo] Remove unused `:pool_timeout` + +## v3.0.3 (2018-11-20) + +### Enhancements + + * [Ecto.Changeset] Add `count: :bytes` option in `validate_length/3` + * [Ecto.Query] Support passing `Ecto.Query` in `Ecto.Repo.insert_all` + +### Bug fixes + + * [Ecto.Type] Respect adapter types when loading/dumping arrays and maps + * [Ecto.Query] Ensure no bindings in order_by when using combinations in `Ecto.Query` + * [Ecto.Repo] Ensure adapter is compiled (instead of only loaded) before invoking it + * [Ecto.Repo] Support new style child spec from adapters + +## v3.0.2 (2018-11-17) + +### Bug fixes + + * [Ecto.LogEntry] Bring old Ecto.LogEntry APIs back for compatibility + * [Ecto.Repo] Consider non-joined fields when merging preloaded assocs only at root + * [Ecto.Repo] Take field sources into account in :replace_all_fields upsert option + * [Ecto.Type] Convert `:utc_datetime` to `DateTime` when sending it to adapters + +## v3.0.1 (2018-11-03) + +### Bug fixes + + * [Ecto.Query] Ensure parameter order is preserved when using more than 32 parameters + * [Ecto.Query] Consider query prefix when planning association joins + * [Ecto.Repo] Consider non-joined fields as unique parameters when merging preloaded query assocs + +## v3.0.0 (2018-10-29) + +Note this version includes changes from `ecto` and `ecto_sql` but in future releases all `ecto_sql` entries will be listed in their own CHANGELOG. + +### Enhancements + + * [Ecto.Adapters.MySQL] Add ability to specify cli_protocol for `ecto.create` and `ecto.drop` commands + * [Ecto.Adapters.PostgreSQL] Add ability to specify maintenance database name for PostgreSQL adapter for `ecto.create` and `ecto.drop` commands + * [Ecto.Changeset] Store constraint name in error metadata for constraints + * [Ecto.Changeset] Add `validations/1` and `constraints/1` instead of allowing direct access on the struct fields + * [Ecto.Changeset] Add `:force_update` option when casting relations, to force an update even if there are no changes + * [Ecto.Migration] Migrations now lock the migrations table in order to avoid concurrent migrations in a cluster. The type of lock can be configured via the `:migration_lock` repository configuration and defaults to "FOR UPDATE" or disabled if set to nil + * [Ecto.Migration] Add `:migration_default_prefix` repository configuration + * [Ecto.Migration] Add reversible version of `remove/2` subcommand + * [Ecto.Migration] Add support for non-empty arrays as defaults in migrations + * [Ecto.Migration] Add support for logging notices/alerts/warnings when running migrations (only supported by Postgres currently) + * [Ecto.Migrator] Warn when migrating and there is a higher version already migrated in the database + * [Ecto.Multi] Add support for anonymous functions in `insert/4`, `update/4`, `insert_or_update/4`, and `delete/4` + * [Ecto.Query] Support tuples in `where` and `having`, allowing queries such as `where: {p.foo, p.bar} > {^foo, ^bar}` + * [Ecto.Query] Support arithmetic operators in queries as a thin layer around the DB functionality + * [Ecto.Query] Allow joins in queries to be named via `:as` and allow named bindings + * [Ecto.Query] Support excluding specific join types in `exclude/2` + * [Ecto.Query] Allow virtual field update in subqueries + * [Ecto.Query] Support `coalesce/2` in queries, such as `select: coalesce(p.title, p.old_title)` + * [Ecto.Query] Support `filter/2` in queries, such as `select: filter(count(p.id), p.public == true)` + * [Ecto.Query] The `:prefix` and `:hints` options are now supported on both `from` and `join` expressions + * [Ecto.Query] Support `:asc_nulls_last`, `:asc_nulls_first`, `:desc_nulls_last`, and `:desc_nulls_first` in `order_by` + * [Ecto.Query] Allow variables (sources) to be given in queries, for example, useful for invoking functions, such as `fragment("some_function(?)", p)` + * [Ecto.Query] Add support for `union`, `union_all`, `intersection`, `intersection_all`, `except` and `except_all` + * [Ecto.Query] Add support for `windows` and `over` + * [Ecto.Query] Raise when comparing a string with a charlist during planning + * [Ecto.Repo] Only start transactions if an association or embed has changed, this reduces the overhead during repository operations + * [Ecto.Repo] Support `:replace_all_except_primary_key` as `:on_conflict` strategy + * [Ecto.Repo] Support `{:replace, fields}` as `:on_conflict` strategy + * [Ecto.Repo] Support `:unsafe_fragment` as `:conflict_target` + * [Ecto.Repo] Support `select` in queries given to `update_all` and `delete_all` + * [Ecto.Repo] Add `Repo.exists?/2` + * [Ecto.Repo] Add `Repo.checkout/2` - useful when performing multiple operations in short-time to interval, allowing the pool to be bypassed + * [Ecto.Repo] Add `:stale_error_field` to `Repo.insert/update/delete` that converts `Ecto.StaleEntryError` into a changeset error. The message can also be set with `:stale_error_message` + * [Ecto.Repo] Preloading now only sorts results by the relationship key instead of sorting by the whole struct + * [Ecto.Schema] Allow `:where` option to be given to `has_many`/`has_one`/`belongs_to`/`many_to_many` + +### Bug fixes + + * [Ecto.Inspect] Do not fail when inspecting query expressions which have a number of bindings more than bindings available + * [Ecto.Migration] Keep double underscores on autogenerated index names to be consistent with changesets + * [Ecto.Query] Fix `Ecto.Query.API.map/2` for single nil column with join + * [Ecto.Migration] Ensure `create_if_not_exists` is properly reversible + * [Ecto.Repo] Allow many_to_many associations to be preloaded via a function (before the behaviour was erratic) + * [Ecto.Schema] Make autogen ID loading work with custom type + * [Ecto.Schema] Make `updated_at` have the same value as `inserted_at` + * [Ecto.Schema] Ensure all fields are replaced with `on_conflict: :replace_all/:replace_all_except_primary_key` and not only the fields sent as changes + * [Ecto.Type] Return `:error` when casting NaN or infinite decimals + * [mix ecto.migrate] Properly run migrations after ECTO_EDITOR changes + * [mix ecto.migrations] List migrated versions even if the migration file is deleted + * [mix ecto.load] The task now fails on SQL errors on Postgres + +### Deprecations + +Although Ecto 3.0 is a major bump version, the functionality below emits deprecation warnings to ease the migration process. The functionality below will be removed in future Ecto 3.1+ releases. + + * [Ecto.Changeset] Passing a list of binaries to `cast/3` is deprecated, please pass a list of atoms instead + * [Ecto.Multi] `Ecto.Multi.run/3` now receives the repo in which the transaction is executing as the first argument to functions, and the changes so far as the second argument + * [Ecto.Query] `join/5` now expects `on: expr` as last argument instead of simply `expr`. This was done in order to properly support the `:as`, `:hints` and `:prefix` options + * [Ecto.Repo] The `:returning` option for `update_all` and `delete_all` has been deprecated as those statements now support `select` clauses + * [Ecto.Repo] Passing `:adapter` via config is deprecated in favor of passing it on `use Ecto.Repo` + * [Ecto.Repo] The `:loggers` configuration is deprecated in favor of "Telemetry Events" + +### Backwards incompatible changes + + * [Ecto.DateTime] `Ecto.Date`, `Ecto.Time` and `Ecto.DateTime` were previously deprecated and have now been removed + * [Ecto.DataType] `Ecto.DataType` protocol has been removed + * [Ecto.Migration] Automatically inferred index names may differ in Ecto v3.0 for indexes on complex column names + * [Ecto.Multi] `Ecto.Multi.run/5` now receives the repo in which the transaction is executing as the first argument to functions, and the changes so far as the second argument + * [Ecto.Query] A `join` no longer wraps `fragment` in parentheses. In some cases, such as common table expressions, you will have to explicitly wrap the fragment in parens. + * [Ecto.Repo] The `on_conflict: :replace_all` option now will also send fields with default values to the database. If you prefer the old behaviour that only sends the changes in the changeset, you can set it to `on_conflict: {:replace, Map.keys(changeset.changes)}` (this change is also listed as a bug fix) + * [Ecto.Repo] The repository operations are no longer called from association callbacks - this behaviour was not guaranteed in previous versions but we are listing as backwards incompatible changes to help with users relying on this behaviour + * [Ecto.Repo] `:pool_timeout` is no longer supported in favor of a new queue system described in `DBConnection.start_link/2` under "Queue config". For most users, configuring `:timeout` is enough, as it now includes both queue and query time + * [Ecto.Schema] `:time`, `:naive_datetime` and `:utc_datetime` no longer keep microseconds information. If you want to keep microseconds, use `:time_usec`, `:naive_datetime_usec`, `:utc_datetime_usec` + * [Ecto.Schema] The `@schema_prefix` option now only affects the `from`/`join` of where the schema is used and no longer the whole query + * [Ecto.Schema.Metadata] The `source` key no longer returns a tuple of the schema_prefix and the table/collection name. It now returns just the table/collection string. You can now access the schema_prefix via the `prefix` key. + * [Mix.Ecto] `Mix.Ecto.ensure_started/2` has been removed. However, in Ecto 2.2 the `Mix.Ecto` module was not considered part of the public API and should not have been used but we are listing this for guidance. + +### Adapter changes + + * [Ecto.Adapter] Split `Ecto.Adapter` into `Ecto.Adapter.Queryable` and `Ecto.Adapter.Schema` to provide more granular repository APIs + * [Ecto.Adapter] The `:sources` field in `query_meta` now contains three elements tuples with `{source, schema, prefix}` in order to support `from`/`join` prefixes (#2572) + * [Ecto.Adapter] The database types `time`, `utc_datetime` and `naive_datetime` should translate to types with seconds precision while the database types `time_usec`, `utc_datetime_usec` and `naive_datetime_usec` should have microseconds precision (#2291) + * [Ecto.Adapter] The `on_conflict` argument for `insert` and `insert_all` no longer receives a `{:replace_all, list(), atom()}` tuple. Instead, it receives a `{fields :: [atom()], list(), atom()}` where `fields` is a list of atoms of the fields to be replaced (#2181) + * [Ecto.Adapter] `insert`/`update`/`delete` now receive both `:source` and `:prefix` fields instead of a single `:source` field with both `source` and `prefix` in it (#2490) + * [Ecto.Adapter.Migration] A new `lock_for_migration/4` callback has been added. It is implemented by default by `Ecto.Adapters.SQL` (#2215) + * [Ecto.Adapter.Migration] The `execute_ddl` should now return `{:ok, []}` to make space for returning notices/hints/warnings in the future (adapters leveraging `Ecto.Adapters.SQL` do not have to perform any change) + * [Ecto.Query] The `from` field in `Ecto.Query` now returns a `Ecto.Query.FromExpr` with the `:source` field, unifying the behaviour in `from` and `join` expressions (#2497) + * [Ecto.Query] Tuple expressions are now supported in queries. For example, `where: {p.foo, p.bar} > {p.bar, p.baz}` should translate to `WHERE (p.foo, p.bar) > (p.bar, p.baz)` in SQL databases. Adapters should be changed to handle `{:{}, meta, exprs}` in the query AST (#2344) + * [Ecto.Query] Adapters should support the following arithmetic operators in queries `+`, `-`, `*` and `/` (#2400) + * [Ecto.Query] Adapters should support `filter/2` in queries, as in `select: filter(count(p.id), p.public == true)` (#2487) + +## Previous versions + + * See the CHANGELOG.md [in the v2.2 branch](https://github.com/elixir-ecto/ecto/blob/v2.2/CHANGELOG.md) diff --git a/deps/ecto/README.md b/deps/ecto/README.md new file mode 100644 index 0000000..b990ec2 --- /dev/null +++ b/deps/ecto/README.md @@ -0,0 +1,214 @@ +Ecto +Ecto + +--- + +[![Build Status](https://github.com/elixir-ecto/ecto/workflows/CI/badge.svg)](https://github.com/elixir-ecto/ecto/actions) [![Hex.pm](https://img.shields.io/hexpm/v/ecto.svg)](https://hex.pm/packages/ecto) [![Documentation](https://img.shields.io/badge/documentation-gray)](https://hexdocs.pm/ecto/) + +## Installation + +Add `:ecto` to the list of dependencies in `mix.exs`: + +```elixir +def deps do + [ + {:ecto, "~> 3.10"} + ] +end +``` + +## About + +Ecto is a toolkit for data mapping and language integrated query for Elixir. Here is an example: + +```elixir +# In your config/config.exs file +config :my_app, ecto_repos: [Sample.Repo] + +config :my_app, Sample.Repo, + database: "ecto_simple", + username: "postgres", + password: "postgres", + hostname: "localhost", + port: "5432" + +# In your application code +defmodule Sample.Repo do + use Ecto.Repo, + otp_app: :my_app, + adapter: Ecto.Adapters.Postgres +end + +defmodule Sample.Weather do + use Ecto.Schema + + schema "weather" do + field :city # Defaults to type :string + field :temp_lo, :integer + field :temp_hi, :integer + field :prcp, :float, default: 0.0 + end +end + +defmodule Sample.App do + import Ecto.Query + alias Sample.{Weather, Repo} + + def keyword_query do + query = + from w in Weather, + where: w.prcp > 0 or is_nil(w.prcp), + select: w + + Repo.all(query) + end + + def pipe_query do + Weather + |> where(city: "Kraków") + |> order_by(:temp_lo) + |> limit(10) + |> Repo.all + end +end +``` + +Ecto is commonly used to interact with databases, such as PostgreSQL and MySQL via [Ecto.Adapters.SQL](https://hexdocs.pm/ecto_sql) ([source code](https://github.com/elixir-ecto/ecto_sql)). Ecto is also commonly used to map data from any source into Elixir structs, whether they are backed by a database or not. + +See the [getting started guide](https://hexdocs.pm/ecto/getting-started.html) and the [online documentation](https://hexdocs.pm/ecto) for more information. Other resources available are: + + * [Programming Ecto](https://pragprog.com/titles/wmecto/programming-ecto/), by Darin Wilson and Eric Meadows-Jönsson, which guides you from fundamentals up to advanced concepts + + * [The Little Ecto Cookbook](https://dashbit.co/ebooks/the-little-ecto-cookbook), a free ebook by Dashbit, which is a curation of the existing Ecto guides with some extra contents + +## Usage + +You need to add both Ecto and the database adapter as a dependency to your `mix.exs` file. The supported databases and their adapters are: + +| Database | Ecto Adapter | Dependencies | +| :--------- | :----------------------- | :----------------------------------------------- | +| PostgreSQL | Ecto.Adapters.Postgres | [ecto_sql][ecto_sql] + [postgrex][postgrex] | +| MySQL | Ecto.Adapters.MyXQL | [ecto_sql][ecto_sql] + [myxql][myxql] | +| MSSQL | Ecto.Adapters.Tds | [ecto_sql][ecto_sql] + [tds][tds] | +| SQLite3 | Ecto.Adapters.SQLite3 | [ecto_sqlite3][ecto_sqlite3] | +| ClickHouse | Ecto.Adapters.ClickHouse | [ecto_ch][ecto_ch] | +| ETS     | Etso   | [etso][etso] | + +[ecto_sql]: https://github.com/elixir-ecto/ecto_sql +[postgrex]: https://github.com/elixir-ecto/postgrex +[myxql]: https://github.com/elixir-ecto/myxql +[tds]: https://github.com/livehelpnow/tds +[ecto_sqlite3]: https://github.com/elixir-sqlite/ecto_sqlite3 +[etso]: https://github.com/evadne/etso +[ecto_ch]: https://github.com/plausible/ecto_ch + +For example, if you want to use PostgreSQL, add to your `mix.exs` file: + +```elixir +defp deps do + [ + {:ecto_sql, "~> 3.0"}, + {:postgrex, ">= 0.0.0"} + ] +end +``` + +Then run `mix deps.get` in your shell to fetch the dependencies. If you want to use another database, just choose the proper dependency from the table above. + +Finally, in the repository definition, you will need to specify the `adapter:` respective to the chosen dependency. For PostgreSQL it is: + +```elixir +defmodule MyApp.Repo do + use Ecto.Repo, + otp_app: :my_app, + adapter: Ecto.Adapters.Postgres, + ... +``` + +### IPv6 support + +If your database's host resolves to ipv6 address you should +add `socket_options: [:inet6]` to configuration block like below: + +```elixir +import Mix.Config + +config :my_app, MyApp.Repo, + hostname: "db12.dc0.comp.any", + socket_options: [:inet6], + ... +``` + +## Supported Versions + +| Branch | Support | +| ----------------- | ------------------------ | +| v3.12 | Bug fixes | +| v3.11 | Security patches only | +| v3.10 | Security patches only | +| v3.9 | Security patches only | +| v3.8 | Security patches only | +| v3.7 and earlier | Unsupported | + +With version 3.0, Ecto API has become stable. Our main focus is on providing +bug fixes and incremental changes. + +## Important links + + * [Documentation](https://hexdocs.pm/ecto) + * [Mailing list](https://groups.google.com/forum/#!forum/elixir-ecto) + * [Examples](https://github.com/elixir-ecto/ecto/tree/master/examples) + +## Running tests + +Clone the repo and fetch its dependencies: + + $ git clone https://github.com/elixir-ecto/ecto.git + $ cd ecto + $ mix deps.get + $ mix test + +Note that `mix test` does not run the tests in the `integration_test` folder. To run integration tests, you can clone `ecto_sql` in a sibling directory and then run its integration tests with the `ECTO_PATH` environment variable pointing to your Ecto checkout: + + $ cd .. + $ git clone https://github.com/elixir-ecto/ecto_sql.git + $ cd ecto_sql + $ mix deps.get + $ ECTO_PATH=../ecto mix test.all + +### Running containerized tests + +It is also possible to run the integration tests under a containerized environment using [earthly](https://earthly.dev/get-earthly): + + $ earthly -P +all + +You can also use this to interactively debug any failing integration tests using: + + $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-21.3.8.21-alpine-3.13.1 +integration-test + +Then once you enter the containerized shell, you can inspect the underlying databases with the respective commands: + + PGPASSWORD=postgres psql -h 127.0.0.1 -U postgres -d postgres ecto_test + MYSQL_PASSWORD=root mysql -h 127.0.0.1 -uroot -proot ecto_test + sqlcmd -U sa -P 'some!Password' + +## Logo + +"Ecto" and the Ecto logo are Copyright (c) 2020 Dashbit. + +The Ecto logo was designed by [Dane Wesolko](https://www.danewesolko.com). + +## License + +Copyright (c) 2013 Plataformatec \ +Copyright (c) 2020 Dashbit + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/deps/ecto/hex_metadata.config b/deps/ecto/hex_metadata.config new file mode 100644 index 0000000..2a3448b --- /dev/null +++ b/deps/ecto/hex_metadata.config @@ -0,0 +1,73 @@ +{<<"links">>, + [{<<"Changelog">>,<<"https://hexdocs.pm/ecto/changelog.html">>}, + {<<"GitHub">>,<<"https://github.com/elixir-ecto/ecto">>}]}. +{<<"name">>,<<"ecto">>}. +{<<"version">>,<<"3.13.6">>}. +{<<"description">>, + <<"A toolkit for data mapping and language integrated query for Elixir">>}. +{<<"elixir">>,<<"~> 1.14">>}. +{<<"files">>, + [<<".formatter.exs">>,<<"mix.exs">>,<<"README.md">>,<<"CHANGELOG.md">>, + <<"lib">>,<<"lib/ecto">>,<<"lib/ecto/adapter.ex">>,<<"lib/ecto/multi.ex">>, + <<"lib/ecto/queryable.ex">>,<<"lib/ecto/type.ex">>, + <<"lib/ecto/changeset.ex">>,<<"lib/ecto/parameterized_type.ex">>, + <<"lib/ecto/changeset">>,<<"lib/ecto/changeset/relation.ex">>, + <<"lib/ecto/adapter">>,<<"lib/ecto/adapter/transaction.ex">>, + <<"lib/ecto/adapter/queryable.ex">>,<<"lib/ecto/adapter/schema.ex">>, + <<"lib/ecto/adapter/storage.ex">>,<<"lib/ecto/query.ex">>, + <<"lib/ecto/enum.ex">>,<<"lib/ecto/schema">>, + <<"lib/ecto/schema/metadata.ex">>,<<"lib/ecto/schema/loader.ex">>, + <<"lib/ecto/uuid.ex">>,<<"lib/ecto/json.ex">>,<<"lib/ecto/embedded.ex">>, + <<"lib/ecto/schema.ex">>,<<"lib/ecto/association.ex">>, + <<"lib/ecto/application.ex">>,<<"lib/ecto/query">>, + <<"lib/ecto/query/planner.ex">>,<<"lib/ecto/query/window_api.ex">>, + <<"lib/ecto/query/api.ex">>,<<"lib/ecto/query/inspect.ex">>, + <<"lib/ecto/query/builder.ex">>,<<"lib/ecto/query/builder">>, + <<"lib/ecto/query/builder/preload.ex">>, + <<"lib/ecto/query/builder/group_by.ex">>, + <<"lib/ecto/query/builder/join.ex">>,<<"lib/ecto/query/builder/update.ex">>, + <<"lib/ecto/query/builder/distinct.ex">>, + <<"lib/ecto/query/builder/limit_offset.ex">>, + <<"lib/ecto/query/builder/select.ex">>, + <<"lib/ecto/query/builder/windows.ex">>, + <<"lib/ecto/query/builder/from.ex">>,<<"lib/ecto/query/builder/cte.ex">>, + <<"lib/ecto/query/builder/combination.ex">>, + <<"lib/ecto/query/builder/dynamic.ex">>, + <<"lib/ecto/query/builder/order_by.ex">>, + <<"lib/ecto/query/builder/lock.ex">>,<<"lib/ecto/query/builder/filter.ex">>, + <<"lib/ecto/repo.ex">>,<<"lib/ecto/repo">>, + <<"lib/ecto/repo/transaction.ex">>,<<"lib/ecto/repo/supervisor.ex">>, + <<"lib/ecto/repo/registry.ex">>,<<"lib/ecto/repo/queryable.ex">>, + <<"lib/ecto/repo/preloader.ex">>,<<"lib/ecto/repo/schema.ex">>, + <<"lib/ecto/repo/assoc.ex">>,<<"lib/ecto/exceptions.ex">>,<<"lib/mix">>, + <<"lib/mix/tasks">>,<<"lib/mix/tasks/ecto.create.ex">>, + <<"lib/mix/tasks/ecto.ex">>,<<"lib/mix/tasks/ecto.gen.repo.ex">>, + <<"lib/mix/tasks/ecto.drop.ex">>,<<"lib/mix/ecto.ex">>,<<"lib/ecto.ex">>, + <<"integration_test/cases">>,<<"integration_test/cases/type.exs">>, + <<"integration_test/cases/interval.exs">>, + <<"integration_test/cases/preload.exs">>, + <<"integration_test/cases/assoc.exs">>, + <<"integration_test/cases/joins.exs">>, + <<"integration_test/cases/windows.exs">>, + <<"integration_test/cases/repo.exs">>,<<"integration_test/support">>, + <<"integration_test/support/schemas.exs">>, + <<"integration_test/support/types.exs">>]}. +{<<"app">>,<<"ecto">>}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"requirements">>, + [[{<<"name">>,<<"telemetry">>}, + {<<"app">>,<<"telemetry">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 0.4 or ~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"decimal">>}, + {<<"app">>,<<"decimal">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 2.0 or ~> 3.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"jason">>}, + {<<"app">>,<<"jason">>}, + {<<"optional">>,true}, + {<<"requirement">>,<<"~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}]]}. +{<<"build_tools">>,[<<"mix">>]}. diff --git a/deps/ecto/integration_test/cases/assoc.exs b/deps/ecto/integration_test/cases/assoc.exs new file mode 100644 index 0000000..fe2eeb1 --- /dev/null +++ b/deps/ecto/integration_test/cases/assoc.exs @@ -0,0 +1,865 @@ +defmodule Ecto.Integration.AssocTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + import Ecto.Query + + alias Ecto.Integration.Custom + alias Ecto.Integration.Post + alias Ecto.Integration.User + alias Ecto.Integration.PostUser + alias Ecto.Integration.Comment + alias Ecto.Integration.Permalink + + test "has_many assoc" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + [c1, c2] = TestRepo.all Ecto.assoc(p1, :comments) + assert c1.id == cid1 + assert c2.id == cid2 + + [c1, c2, c3] = TestRepo.all Ecto.assoc([p1, p2], :comments) + assert c1.id == cid1 + assert c2.id == cid2 + assert c3.id == cid3 + end + + test "has_one assoc" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + %Permalink{id: lid1} = TestRepo.insert!(%Permalink{url: "1", post_id: p1.id}) + %Permalink{} = TestRepo.insert!(%Permalink{url: "2"}) + %Permalink{id: lid3} = TestRepo.insert!(%Permalink{url: "3", post_id: p2.id}) + + [l1, l3] = TestRepo.all Ecto.assoc([p1, p2], :permalink) + assert l1.id == lid1 + assert l3.id == lid3 + end + + test "belongs_to assoc" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + + l1 = TestRepo.insert!(%Permalink{url: "1", post_id: pid1}) + l2 = TestRepo.insert!(%Permalink{url: "2"}) + l3 = TestRepo.insert!(%Permalink{url: "3", post_id: pid2}) + + assert [p1, p2] = TestRepo.all Ecto.assoc([l1, l2, l3], :post) + assert p1.id == pid1 + assert p2.id == pid2 + end + + test "has_many through assoc" do + p1 = TestRepo.insert!(%Post{}) + p2 = TestRepo.insert!(%Post{}) + + u1 = TestRepo.insert!(%User{name: "zzz"}) + u2 = TestRepo.insert!(%User{name: "aaa"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u2.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p2.id, author_id: u2.id}) + + query = Ecto.assoc([p1, p2], :comments_authors) |> order_by([a], a.name) + assert [^u2, ^u1] = TestRepo.all(query) + + # Dynamic through + query = Ecto.assoc([p1, p2], [:comments, :author]) |> order_by([a], a.name) + assert [^u2, ^u1] = TestRepo.all(query) + end + + @tag :on_replace_nilify + test "has_many through-through assoc leading" do + p1 = TestRepo.insert!(%Post{}) + p2 = TestRepo.insert!(%Post{}) + + u1 = TestRepo.insert!(%User{}) + u2 = TestRepo.insert!(%User{}) + + pl1 = TestRepo.insert!(%Permalink{user_id: u1.id, url: "zzz"}) + pl2 = TestRepo.insert!(%Permalink{user_id: u2.id, url: "aaa"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u2.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p2.id, author_id: u2.id}) + + query = Ecto.assoc([p1, p2], :comments_authors_permalinks) |> order_by([p], p.url) + assert [^pl2, ^pl1] = TestRepo.all(query) + + # Dynamic through + query = Ecto.assoc([p1, p2], [:comments, :author, :permalink]) |> order_by([p], p.url) + assert [^pl2, ^pl1] = TestRepo.all(query) + end + + test "has_many through-through assoc trailing" do + p1 = TestRepo.insert!(%Post{}) + u1 = TestRepo.insert!(%User{}) + pl1 = TestRepo.insert!(%Permalink{user_id: u1.id, post_id: p1.id}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + + query = Ecto.assoc([pl1], :post_comments_authors) + assert [^u1] = TestRepo.all(query) + + # Dynamic through + query = Ecto.assoc([pl1], [:post, :comments, :author]) + assert [^u1] = TestRepo.all(query) + end + + test "has_many through has_many, many_to_many and has_many" do + user1 = %User{id: uid1} = TestRepo.insert!(%User{name: "Gabriel"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "Isadora"}) + %User{id: uid3} = TestRepo.insert!(%User{name: "Joey Mush"}) + + p1 = TestRepo.insert!(%Post{title: "p1", author_id: uid1}) + p2 = TestRepo.insert!(%Post{title: "p2", author_id: uid2}) + p3 = TestRepo.insert!(%Post{title: "p3", author_id: uid2}) + TestRepo.insert!(%Post{title: "p4", author_id: uid3}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: uid1], + [post_id: p1.id, user_id: uid2], + [post_id: p2.id, user_id: uid3]] + + [pid1, pid2, pid3] = + Ecto.assoc(user1, :related_2nd_order_posts) + |> TestRepo.all() + |> Enum.map(fn %Post{id: id} -> id end) + |> Enum.sort() + + assert p1.id == pid1 + assert p2.id == pid2 + assert p3.id == pid3 + end + + test "has_many through has_many, belongs_to and a nested has through" do + user1 = TestRepo.insert!(%User{name: "Gabriel"}) + user2 = TestRepo.insert!(%User{name: "Isadora"}) + user3 = TestRepo.insert!(%User{name: "Joey"}) + + post1 = TestRepo.insert!(%Post{title: "p1"}) + post2 = TestRepo.insert!(%Post{title: "p2"}) + + TestRepo.insert!(%Comment{author_id: user1.id, text: "c1", post_id: post1.id}) + TestRepo.insert!(%Comment{author_id: user2.id, text: "c2", post_id: post1.id}) + TestRepo.insert!(%Comment{author_id: user3.id, text: "c3", post_id: post2.id}) + + [u1_id, u2_id] = + Ecto.assoc(user1, :co_commenters) + |> TestRepo.all() + |> Enum.map(fn %User{id: id} -> id end) + |> Enum.sort() + + assert u1_id == user1.id + assert u2_id == user2.id + end + + test "has_many through two many_to_many associations" do + user1 = %User{id: uid1} = TestRepo.insert!(%User{name: "Gabriel"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "Isadora"}) + %User{id: uid3} = TestRepo.insert!(%User{name: "Joey Mush"}) + + p1 = TestRepo.insert!(%Post{title: "p1", author_id: uid1}) + TestRepo.insert!(%Post{title: "p2", author_id: uid2}) + p3 = TestRepo.insert!(%Post{title: "p3", author_id: uid2}) + p4 = TestRepo.insert!(%Post{title: "p4", author_id: uid3}) + + TestRepo.insert_all "posts_users", [[post_id: p3.id, user_id: uid1], + [post_id: p3.id, user_id: uid2], + [post_id: p1.id, user_id: uid3]] + + TestRepo.insert!(%PostUser{post_id: p1.id, user_id: uid2}) + TestRepo.insert!(%PostUser{post_id: p3.id, user_id: uid1}) + TestRepo.insert!(%PostUser{post_id: p3.id, user_id: uid2}) + TestRepo.insert!(%PostUser{post_id: p4.id, user_id: uid3}) + + [u1, u2] = + Ecto.assoc(user1, :users_through_schema_posts) + |> TestRepo.all() + |> Enum.map(fn %User{id: id} -> id end) + |> Enum.sort() + + assert uid1 == u1 + assert uid2 == u2 + end + + test "has_many through with where" do + post1 = TestRepo.insert!(%Post{title: "p1"}) + post2 = TestRepo.insert!(%Post{title: "p2"}) + post3 = TestRepo.insert!(%Post{title: "p3"}) + + author = TestRepo.insert!(%User{name: "john"}) + + TestRepo.insert!(%Comment{text: "1", lock_version: 1, post_id: post1.id, author_id: author.id}) + TestRepo.insert!(%Comment{text: "2", lock_version: 2, post_id: post2.id, author_id: author.id}) + TestRepo.insert!(%Comment{text: "3", lock_version: 2, post_id: post3.id, author_id: author.id}) + + [p2, p3] = Ecto.assoc(author, :v2_comments_posts) |> TestRepo.all() |> Enum.sort_by(&(&1.id)) + assert p2.id == post2.id + assert p3.id == post3.id + end + + test "many_to_many assoc" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "john"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "mary"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: uid1], + [post_id: p1.id, user_id: uid2], + [post_id: p2.id, user_id: uid2]] + + [u1, u2] = TestRepo.all Ecto.assoc([p1], :users) + assert u1.id == uid1 + assert u2.id == uid2 + + [u2] = TestRepo.all Ecto.assoc([p2], :users) + assert u2.id == uid2 + [] = TestRepo.all Ecto.assoc([p3], :users) + + [u1, u2, u2] = TestRepo.all Ecto.assoc([p1, p2, p3], :users) + assert u1.id == uid1 + assert u2.id == uid2 + end + + ## Changesets + + test "has_one changeset assoc (on_replace: :delete)" do + # Insert new + changeset = + %Post{title: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, %Permalink{url: "1"}) + post = TestRepo.insert!(changeset) + assert post.permalink.id + assert post.permalink.post_id == post.id + assert post.permalink.url == "1" + post = TestRepo.get!(from(Post, preload: [:permalink]), post.id) + assert post.permalink.url == "1" + + # Replace with new + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, %Permalink{url: "2"}) + post = TestRepo.update!(changeset) + assert post.permalink.id + assert post.permalink.post_id == post.id + assert post.permalink.url == "2" + post = TestRepo.get!(from(Post, preload: [:permalink]), post.id) + assert post.permalink.url == "2" + + # Replacing with existing + existing = TestRepo.insert!(%Permalink{url: "3"}) + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, existing) + post = TestRepo.update!(changeset) + assert post.permalink.id + assert post.permalink.post_id == post.id + assert post.permalink.url == "3" + post = TestRepo.get!(from(Post, preload: [:permalink]), post.id) + assert post.permalink.url == "3" + + # Replacing with nil (on_replace: :delete) + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, nil) + post = TestRepo.update!(changeset) + refute post.permalink + post = TestRepo.get!(from(Post, preload: [:permalink]), post.id) + refute post.permalink + + assert [0] == TestRepo.all(from(p in Permalink, select: count(p.id))) + end + + test "has_one changeset assoc (on_replace: :delete_if_exists)" do + permalink = TestRepo.insert!(%Permalink{url: "1"}) + post = TestRepo.insert!(%Post{title: "1", permalink: permalink, force_permalink: permalink}) + TestRepo.delete!(permalink) + + assert_raise Ecto.StaleEntryError, fn -> + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:permalink, nil) + |> TestRepo.update!() + end + + post = + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:force_permalink, nil) + |> TestRepo.update!() + + assert post.force_permalink == nil + end + + @tag :on_replace_nilify + test "has_one changeset assoc (on_replace: :nilify)" do + # Insert new + changeset = + %User{name: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, %Permalink{url: "1"}) + user = TestRepo.insert!(changeset) + assert user.permalink.id + assert user.permalink.user_id == user.id + assert user.permalink.url == "1" + user = TestRepo.get!(from(User, preload: [:permalink]), user.id) + assert user.permalink.url == "1" + + # Replace with new + changeset = + user + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, %Permalink{url: "2"}) + user = TestRepo.update!(changeset) + assert user.permalink.id + assert user.permalink.user_id == user.id + assert user.permalink.url == "2" + user = TestRepo.get!(from(User, preload: [:permalink]), user.id) + assert user.permalink.url == "2" + + # Replacing with nil (on_replace: :nilify) + changeset = + user + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, nil) + user = TestRepo.update!(changeset) + refute user.permalink + user = TestRepo.get!(from(User, preload: [:permalink]), user.id) + refute user.permalink + + assert [2] == TestRepo.all(from(p in Permalink, select: count(p.id))) + end + + @tag :on_replace_update + test "has_one changeset assoc (on_replace: :update)" do + # Insert new + changeset = + %Post{title: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:update_permalink, %Permalink{url: "1"}) + post = TestRepo.insert!(changeset) + assert post.update_permalink.id + assert post.update_permalink.post_id == post.id + assert post.update_permalink.url == "1" + post = TestRepo.get!(from(Post, preload: [:update_permalink]), post.id) + assert post.update_permalink.url == "1" + + perma = post.update_permalink + + # Put on update + changeset = + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:update_permalink, %{url: "2"}) + post = TestRepo.update!(changeset) + assert post.update_permalink.id == perma.id + assert post.update_permalink.post_id == post.id + assert post.update_permalink.url == "2" + post = TestRepo.get!(from(Post, preload: [:update_permalink]), post.id) + assert post.update_permalink.url == "2" + + # Cast on update + changeset = + post + |> Ecto.Changeset.cast(%{update_permalink: %{url: "3"}}, []) + |> Ecto.Changeset.cast_assoc(:update_permalink) + post = TestRepo.update!(changeset) + assert post.update_permalink.id == perma.id + assert post.update_permalink.post_id == post.id + assert post.update_permalink.url == "3" + post = TestRepo.get!(from(Post, preload: [:update_permalink]), post.id) + assert post.update_permalink.url == "3" + + # Replace with new struct + assert_raise RuntimeError, ~r"you are only allowed\sto update the existing entry", fn -> + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:update_permalink, %Permalink{url: "4"}) + end + + # Replace with existing struct + assert_raise RuntimeError, ~r"you are only allowed\sto update the existing entry", fn -> + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:update_permalink, TestRepo.insert!(%Permalink{url: "5"})) + end + + # Replacing with nil (on_replace: :update) + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:update_permalink, nil) + post = TestRepo.update!(changeset) + refute post.update_permalink + post = TestRepo.get!(from(Post, preload: [:update_permalink]), post.id) + refute post.update_permalink + + assert [2] == TestRepo.all(from(p in Permalink, select: count(p.id))) + end + + test "has_many changeset assoc (on_replace: :delete)" do + c1 = TestRepo.insert! %Comment{text: "1"} + c2 = %Comment{text: "2"} + + # Inserting + changeset = + %Post{title: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:comments, [c2]) + post = TestRepo.insert!(changeset) + [c2] = post.comments + assert c2.id + assert c2.post_id == post.id + post = TestRepo.get!(from(Post, preload: [:comments]), post.id) + [c2] = post.comments + assert c2.text == "2" + + # Updating + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:comments, [Ecto.Changeset.change(c1, text: "11"), + Ecto.Changeset.change(c2, text: "22")]) + post = TestRepo.update!(changeset) + [c1, _c2] = post.comments |> Enum.sort_by(&(&1.id)) + assert c1.id + assert c1.post_id == post.id + post = TestRepo.get!(from(Post, preload: [:comments]), post.id) + [c1, c2] = post.comments |> Enum.sort_by(&(&1.id)) + assert c1.text == "11" + assert c2.text == "22" + + # Replacing (on_replace: :delete) + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:comments, []) + post = TestRepo.update!(changeset) + assert post.comments == [] + post = TestRepo.get!(from(Post, preload: [:comments]), post.id) + assert post.comments == [] + + assert [0] == TestRepo.all(from(c in Comment, select: count(c.id))) + end + + test "has_many changeset assoc (on_replace: :delete_if_exists)" do + comment = TestRepo.insert!(%Comment{text: "1"}) + post = TestRepo.insert!(%Post{title: "1", comments: [comment], force_comments: [comment]}) + + TestRepo.delete!(comment) + + assert_raise Ecto.StaleEntryError, fn -> + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:comments, []) + |> TestRepo.update!() + end + + post = + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:force_comments, []) + |> TestRepo.update!() + + assert post.force_comments == [] + end + + test "has_many changeset assoc (on_replace: :nilify)" do + c1 = TestRepo.insert! %Comment{text: "1"} + c2 = %Comment{text: "2"} + + # Inserting + changeset = + %User{name: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:comments, [c1, c2]) + user = TestRepo.insert!(changeset) + [c1, c2] = user.comments + assert c1.id + assert c1.author_id == user.id + assert c2.id + assert c2.author_id == user.id + user = TestRepo.get!(from(User, preload: [:comments]), user.id) + [c1, c2] = user.comments + assert c1.text == "1" + assert c2.text == "2" + + # Replacing (on_replace: :nilify) + changeset = + user + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:comments, []) + user = TestRepo.update!(changeset) + assert user.comments == [] + user = TestRepo.get!(from(User, preload: [:comments]), user.id) + assert user.comments == [] + + assert [2] == TestRepo.all(from(c in Comment, select: count(c.id))) + end + + test "many_to_many changeset assoc" do + u1 = TestRepo.insert! %User{name: "1"} + u2 = %User{name: "2"} + + # Inserting + changeset = + %Post{title: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:users, [u2]) + post = TestRepo.insert!(changeset) + [u2] = post.users + assert u2.id + post = TestRepo.get!(from(Post, preload: [:users]), post.id) + [u2] = post.users + assert u2.name == "2" + + assert [1] == TestRepo.all(from(j in "posts_users", select: count(j.post_id))) + + # Updating + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:users, [Ecto.Changeset.change(u1, name: "11"), + Ecto.Changeset.change(u2, name: "22")]) + post = TestRepo.update!(changeset) + [u1, _u2] = post.users |> Enum.sort_by(&(&1.id)) + assert u1.id + post = TestRepo.get!(from(Post, preload: [:users]), post.id) + [u1, u2] = post.users |> Enum.sort_by(&(&1.id)) + assert u1.name == "11" + assert u2.name == "22" + + assert [2] == TestRepo.all(from(j in "posts_users", select: count(j.post_id))) + + # Replacing (on_replace: :delete) + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:users, []) + post = TestRepo.update!(changeset) + assert post.users == [] + post = TestRepo.get!(from(Post, preload: [:users]), post.id) + assert post.users == [] + + assert [0] == TestRepo.all(from(j in "posts_users", select: count(j.post_id))) + assert [2] == TestRepo.all(from(c in User, select: count(c.id))) + end + + test "many_to_many changeset assoc with schema" do + p1 = TestRepo.insert! %Post{title: "1"} + p2 = %Post{title: "2"} + + # Inserting + changeset = + %User{name: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:schema_posts, [p2]) + user = TestRepo.insert!(changeset) + [p2] = user.schema_posts + assert p2.id + user = TestRepo.get!(from(User, preload: [:schema_posts]), user.id) + [p2] = user.schema_posts + assert p2.title == "2" + + [up2] = TestRepo.all(PostUser) |> Enum.sort_by(&(&1.id)) + assert up2.post_id == p2.id + assert up2.user_id == user.id + assert up2.inserted_at + assert up2.updated_at + + # Updating + changeset = + user + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:schema_posts, [Ecto.Changeset.change(p1, title: "11"), + Ecto.Changeset.change(p2, title: "22")]) + user = TestRepo.update!(changeset) + [p1, _p2] = user.schema_posts |> Enum.sort_by(&(&1.id)) + assert p1.id + user = TestRepo.get!(from(User, preload: [:schema_posts]), user.id) + [p1, p2] = user.schema_posts |> Enum.sort_by(&(&1.id)) + assert p1.title == "11" + assert p2.title == "22" + + [_up2, up1] = TestRepo.all(PostUser) |> Enum.sort_by(&(&1.id)) + assert up1.post_id == p1.id + assert up1.user_id == user.id + assert up1.inserted_at + assert up1.updated_at + end + + test "many_to_many changeset assoc with self-referential binary_id" do + assoc_custom = TestRepo.insert!(%Custom{uuid: Ecto.UUID.generate()}) + custom = TestRepo.insert!(%Custom{customs: [assoc_custom]}) + + custom = Custom |> TestRepo.get!(custom.bid) |> TestRepo.preload(:customs) + assert [_] = custom.customs + + custom = + custom + |> Ecto.Changeset.change(%{}) + |> Ecto.Changeset.put_assoc(:customs, []) + |> TestRepo.update! + assert [] = custom.customs + + custom = Custom |> TestRepo.get!(custom.bid) |> TestRepo.preload(:customs) + assert [] = custom.customs + end + + @tag :unique_constraint + test "has_many changeset assoc with constraints" do + author = TestRepo.insert!(%User{name: "john doe"}) + p1 = TestRepo.insert!(%Post{title: "hello", author_id: author.id}) + TestRepo.insert!(%Post{title: "world", author_id: author.id}) + + # Asserts that `unique_constraint` for `uuid` exists + assert_raise Ecto.ConstraintError, fn -> + TestRepo.insert!(%Post{title: "another", author_id: author.id, uuid: p1.uuid}) + end + + author = TestRepo.preload author, [:posts] + posts_params = Enum.map author.posts, fn %Post{uuid: u} -> + %{uuid: u, title: "fresh"} + end + + # This will only work if we delete before performing inserts + changeset = + author + |> Ecto.Changeset.cast(%{"posts" => posts_params}, ~w()) + |> Ecto.Changeset.cast_assoc(:posts) + author = TestRepo.update! changeset + assert Enum.map(author.posts, &(&1.title)) == ["fresh", "fresh"] + end + + test "belongs_to changeset assoc" do + # Insert new + changeset = + %Permalink{url: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:post, %Post{title: "1"}) + perma = TestRepo.insert!(changeset) + post = perma.post + assert perma.post_id + assert perma.post_id == post.id + assert perma.post.title == "1" + + # Replace with new + changeset = + perma + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:post, %Post{title: "2"}) + perma = TestRepo.update!(changeset) + assert perma.post.id != post.id + post = perma.post + assert perma.post_id + assert perma.post_id == post.id + assert perma.post.title == "2" + + # Replace with existing + existing = TestRepo.insert!(%Post{title: "3"}) + changeset = + perma + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:post, existing) + perma = TestRepo.update!(changeset) + post = perma.post + assert perma.post_id == post.id + assert perma.post_id == existing.id + assert perma.post.title == "3" + + # Replace with nil + changeset = + perma + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:post, nil) + perma = TestRepo.update!(changeset) + assert perma.post == nil + assert perma.post_id == nil + end + + test "belongs_to changeset assoc (on_replace: :update)" do + # Insert new + changeset = + %Permalink{url: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:update_post, %Post{title: "1"}) + perma = TestRepo.insert!(changeset) + post = perma.update_post + assert perma.post_id + assert perma.post_id == post.id + assert perma.update_post.title == "1" + + # Casting on update + changeset = + perma + |> Ecto.Changeset.cast(%{update_post: %{title: "2"}}, []) + |> Ecto.Changeset.cast_assoc(:update_post) + perma = TestRepo.update!(changeset) + assert perma.update_post.id == post.id + post = perma.update_post + assert perma.post_id + assert perma.post_id == post.id + assert perma.update_post.title == "2" + + # Replace with nil + changeset = + perma + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:update_post, nil) + perma = TestRepo.update!(changeset) + assert perma.update_post == nil + assert perma.post_id == nil + end + + test "inserting struct with associations" do + tree = %Permalink{ + url: "root", + post: %Post{ + title: "belongs_to", + comments: [ + %Comment{text: "child 1"}, + %Comment{text: "child 2"}, + ] + } + } + + tree = TestRepo.insert!(tree) + assert tree.id + assert tree.post.id + assert length(tree.post.comments) == 2 + assert Enum.all?(tree.post.comments, & &1.id) + + tree = TestRepo.get!(from(Permalink, preload: [post: :comments]), tree.id) + assert tree.id + assert tree.post.id + assert length(tree.post.comments) == 2 + assert Enum.all?(tree.post.comments, & &1.id) + end + + test "inserting struct with empty associations" do + permalink = TestRepo.insert!(%Permalink{url: "root", post: nil}) + assert permalink.post == nil + + post = TestRepo.insert!(%Post{title: "empty", comments: []}) + assert post.comments == [] + end + + test "inserting changeset with empty cast associations" do + changeset = + %Permalink{} + |> Ecto.Changeset.cast(%{url: "root", post: nil}, [:url]) + |> Ecto.Changeset.cast_assoc(:post) + permalink = TestRepo.insert!(changeset) + assert permalink.post == nil + + changeset = + %Post{} + |> Ecto.Changeset.cast(%{title: "root", comments: []}, [:title]) + |> Ecto.Changeset.cast_assoc(:comments) + post = TestRepo.insert!(changeset) + assert post.comments == [] + end + + test "inserting changeset with empty put associations" do + changeset = + %Permalink{} + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:post, nil) + permalink = TestRepo.insert!(changeset) + assert permalink.post == nil + + changeset = + %Post{} + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:comments, []) + post = TestRepo.insert!(changeset) + assert post.comments == [] + end + + test "updating changeset with empty cast associations" do + post = TestRepo.insert!(%Post{}) + c1 = TestRepo.insert!(%Comment{post_id: post.id}) + c2 = TestRepo.insert!(%Comment{post_id: post.id}) + + assert TestRepo.all(Comment) == [c1, c2] + + post = TestRepo.get!(from(Post, preload: [:comments]), post.id) + + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:comments, []) + |> TestRepo.update!() + + assert TestRepo.all(Comment) == [] + end + + ## Dependent + + test "has_many assoc on delete deletes all" do + post = TestRepo.insert!(%Post{}) + TestRepo.insert!(%Comment{post_id: post.id}) + TestRepo.insert!(%Comment{post_id: post.id}) + TestRepo.delete!(post) + + assert TestRepo.all(Comment) == [] + refute Process.get(Comment) + end + + test "has_many assoc on delete nilifies all" do + user = TestRepo.insert!(%User{}) + TestRepo.insert!(%Comment{author_id: user.id}) + TestRepo.insert!(%Comment{author_id: user.id}) + TestRepo.delete!(user) + + author_ids = Comment |> TestRepo.all() |> Enum.map(fn(comment) -> comment.author_id end) + + assert author_ids == [nil, nil] + refute Process.get(Comment) + end + + test "has_many assoc on delete does nothing" do + user = TestRepo.insert!(%User{}) + TestRepo.insert!(%Post{author_id: user.id}) + + TestRepo.delete!(user) + assert Enum.count(TestRepo.all(Post)) == 1 + end + + test "many_to_many assoc on delete deletes all" do + p1 = TestRepo.insert!(%Post{title: "1", visits: 1}) + p2 = TestRepo.insert!(%Post{title: "2", visits: 2}) + + u1 = TestRepo.insert!(%User{name: "john"}) + u2 = TestRepo.insert!(%User{name: "mary"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: u1.id], + [post_id: p1.id, user_id: u1.id], + [post_id: p2.id, user_id: u2.id]] + TestRepo.delete!(p1) + + [pid2] = TestRepo.all from(p in Post, select: p.id) + assert pid2 == p2.id + + [[pid2, uid2]] = TestRepo.all from(j in "posts_users", select: [j.post_id, j.user_id]) + assert pid2 == p2.id + assert uid2 == u2.id + + [uid1, uid2] = TestRepo.all from(u in User, select: u.id) + assert uid1 == u1.id + assert uid2 == u2.id + end +end diff --git a/deps/ecto/integration_test/cases/interval.exs b/deps/ecto/integration_test/cases/interval.exs new file mode 100644 index 0000000..0954bc9 --- /dev/null +++ b/deps/ecto/integration_test/cases/interval.exs @@ -0,0 +1,419 @@ +defmodule Ecto.Integration.IntervalTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.{Post, User, Usec} + alias Ecto.Integration.TestRepo + import Ecto.Query + + @posted ~D[2014-01-01] + @inserted_at ~N[2014-01-01 02:00:00] + + setup do + TestRepo.insert!(%Post{posted: @posted, inserted_at: @inserted_at}) + :ok + end + + test "date_add with year" do + dec = Decimal.new(1) + assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, 1, "year")) + assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, 1.0, "year")) + assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^1, "year")) + assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^1.0, "year")) + assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "year")) + end + + test "date_add with month" do + dec = Decimal.new(3) + assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, 3, "month")) + assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, 3.0, "month")) + assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^3, "month")) + assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^3.0, "month")) + assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "month")) + end + + test "date_add with week" do + dec = Decimal.new(3) + assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, 3, "week")) + assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, 3.0, "week")) + assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^3, "week")) + assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^3.0, "week")) + assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "week")) + end + + test "date_add with day" do + dec = Decimal.new(5) + assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, 5, "day")) + assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, 5.0, "day")) + assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^5, "day")) + assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^5.0, "day")) + assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "day")) + end + + test "date_add with hour" do + dec = Decimal.new(48) + assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, 48, "hour")) + assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, 48.0, "hour")) + assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^48, "hour")) + assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^48.0, "hour")) + assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "hour")) + end + + test "date_add with dynamic" do + posted = @posted + assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(^posted, ^1, ^"year")) + assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(^posted, ^3, ^"month")) + assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(^posted, ^3, ^"week")) + assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(^posted, ^5, ^"day")) + assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(^posted, ^48, ^"hour")) + end + + test "date_add with negative interval" do + dec = Decimal.new(-1) + assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, -1, "year")) + assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, -1.0, "year")) + assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^-1, "year")) + assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^-1.0, "year")) + assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "year")) + end + + test "datetime_add with year" do + dec = Decimal.new(1) + assert [~N[2015-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1, "year")) + assert [~N[2015-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1.0, "year")) + assert [~N[2015-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1, "year")) + assert [~N[2015-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1.0, "year")) + assert [~N[2015-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "year")) + end + + test "datetime_add with month" do + dec = Decimal.new(3) + assert [~N[2014-04-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 3, "month")) + assert [~N[2014-04-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 3.0, "month")) + assert [~N[2014-04-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^3, "month")) + assert [~N[2014-04-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^3.0, "month")) + assert [~N[2014-04-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "month")) + end + + test "datetime_add with week" do + dec = Decimal.new(3) + assert [~N[2014-01-22 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 3, "week")) + assert [~N[2014-01-22 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 3.0, "week")) + assert [~N[2014-01-22 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^3, "week")) + assert [~N[2014-01-22 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^3.0, "week")) + assert [~N[2014-01-22 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "week")) + end + + test "datetime_add with day" do + dec = Decimal.new(5) + assert [~N[2014-01-06 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 5, "day")) + assert [~N[2014-01-06 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 5.0, "day")) + assert [~N[2014-01-06 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^5, "day")) + assert [~N[2014-01-06 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^5.0, "day")) + assert [~N[2014-01-06 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "day")) + end + + test "datetime_add with hour" do + dec = Decimal.new(60) + assert [~N[2014-01-03 14:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 60, "hour")) + assert [~N[2014-01-03 14:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 60.0, "hour")) + assert [~N[2014-01-03 14:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^60, "hour")) + assert [~N[2014-01-03 14:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^60.0, "hour")) + assert [~N[2014-01-03 14:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "hour")) + end + + test "datetime_add with minute" do + dec = Decimal.new(90) + assert [~N[2014-01-01 03:30:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 90, "minute")) + assert [~N[2014-01-01 03:30:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 90.0, "minute")) + assert [~N[2014-01-01 03:30:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^90, "minute")) + assert [~N[2014-01-01 03:30:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^90.0, "minute")) + assert [~N[2014-01-01 03:30:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "minute")) + end + + test "datetime_add with second" do + dec = Decimal.new(90) + assert [~N[2014-01-01 02:01:30]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 90, "second")) + assert [~N[2014-01-01 02:01:30]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 90.0, "second")) + assert [~N[2014-01-01 02:01:30]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^90, "second")) + assert [~N[2014-01-01 02:01:30]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^90.0, "second")) + assert [~N[2014-01-01 02:01:30]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "second")) + end + + @tag :uses_msec + test "datetime_add with millisecond" do + dec = Decimal.new(1500) + assert [~N[2014-01-01 02:00:01]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1500, "millisecond")) + assert [~N[2014-01-01 02:00:01]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1500.0, "millisecond")) + assert [~N[2014-01-01 02:00:01]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1500, "millisecond")) + assert [~N[2014-01-01 02:00:01]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1500.0, "millisecond")) + assert [~N[2014-01-01 02:00:01]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "millisecond")) + end + + @tag :microsecond_precision + @tag :uses_usec + test "datetime_add with microsecond" do + dec = Decimal.new(1500) + assert [~N[2014-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1500, "microsecond")) + assert [~N[2014-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1500.0, "microsecond")) + assert [~N[2014-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1500, "microsecond")) + assert [~N[2014-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1500.0, "microsecond")) + assert [~N[2014-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "microsecond")) + end + + test "datetime_add with dynamic" do + inserted_at = @inserted_at + assert [~N[2015-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^1, ^"year")) + assert [~N[2014-04-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^3, ^"month")) + assert [~N[2014-01-22 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^3, ^"week")) + assert [~N[2014-01-06 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^5, ^"day")) + assert [~N[2014-01-03 14:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^60, ^"hour")) + assert [~N[2014-01-01 03:30:00]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^90, ^"minute")) + assert [~N[2014-01-01 02:01:30]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^90, ^"second")) + end + + test "datetime_add with dynamic in filters" do + inserted_at = @inserted_at + field = :inserted_at + assert [_] = + TestRepo.all(from p in Post, where: p.inserted_at > datetime_add(^inserted_at, ^-1, "year")) + assert [_] = + TestRepo.all(from p in Post, where: p.inserted_at > datetime_add(^inserted_at, -3, "month")) + assert [_] = + TestRepo.all(from p in Post, where: field(p, ^field) > datetime_add(^inserted_at, ^-3, ^"week")) + assert [_] = + TestRepo.all(from p in Post, where: field(p, ^field) > datetime_add(^inserted_at, -5, ^"day")) + end + + test "datetime_add with negative interval" do + dec = Decimal.new(-1) + assert [~N[2013-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, -1, "year")) + assert [~N[2013-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, -1.0, "year")) + assert [~N[2013-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^-1, "year")) + assert [~N[2013-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^-1.0, "year")) + assert [~N[2013-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "year")) + end + + test "from_now" do + current = DateTime.utc_now().year + dec = Decimal.new(5) + assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(5, "year")) + assert year > current + assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(5.0, "year")) + assert year > current + assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(^5, "year")) + assert year > current + assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(^5.0, "year")) + assert year > current + assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(^dec, "year")) + assert year > current + end + + test "ago" do + current = DateTime.utc_now().year + dec = Decimal.new(5) + assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(5, "year")) + assert year < current + assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(5.0, "year")) + assert year < current + assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(^5, "year")) + assert year < current + assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(^5.0, "year")) + assert year < current + assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(^dec, "year")) + assert year < current + end + + test "datetime_add with utc_datetime" do + {:ok, datetime} = DateTime.from_naive(@inserted_at, "Etc/UTC") + TestRepo.insert!(%User{inserted_at: datetime}) + + {:ok, datetime} = DateTime.from_naive(~N[2015-01-01 02:00:00], "Etc/UTC") + dec = Decimal.new(1) + + assert [^datetime] = + TestRepo.all(from p in User, select: datetime_add(type(^datetime, :utc_datetime), 0, "year")) + assert [^datetime] = + TestRepo.all(from p in User, select: datetime_add(p.inserted_at, 1, "year")) + assert [^datetime] = + TestRepo.all(from p in User, select: datetime_add(p.inserted_at, 1.0, "year")) + assert [^datetime] = + TestRepo.all(from p in User, select: datetime_add(p.inserted_at, ^1, "year")) + assert [^datetime] = + TestRepo.all(from p in User, select: datetime_add(p.inserted_at, ^1.0, "year")) + assert [^datetime] = + TestRepo.all(from p in User, select: datetime_add(p.inserted_at, ^dec, "year")) + end + + @tag :microsecond_precision + test "datetime_add with naive_datetime_usec" do + TestRepo.insert!(%Usec{naive_datetime_usec: ~N[2014-01-01 02:00:00.000001]}) + datetime = ~N[2014-01-01 02:00:00.001501] + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(type(^datetime, :naive_datetime_usec), 0, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, 1500, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, 1500.0, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^1500, "microsecond")) + end + + @tag :microsecond_precision + @tag :decimal_precision + test "datetime_add with naive_datetime_usec and decimal increment" do + TestRepo.insert!(%Usec{naive_datetime_usec: ~N[2014-01-01 02:00:00.000001]}) + dec = Decimal.new(1500) + datetime = ~N[2014-01-01 02:00:00.001501] + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^1500.0, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^dec, "microsecond")) + end + + @tag :microsecond_precision + test "datetime_add with utc_datetime_usec" do + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.000001], "Etc/UTC") + TestRepo.insert!(%Usec{utc_datetime_usec: datetime}) + + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.001501], "Etc/UTC") + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(type(^datetime, :utc_datetime_usec), 0, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, 1500, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, 1500.0, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^1500, "microsecond")) + end + + @tag :microsecond_precision + @tag :decimal_precision + test "datetime_add uses utc_datetime_usec with decimal increment" do + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.000001], "Etc/UTC") + TestRepo.insert!(%Usec{utc_datetime_usec: datetime}) + + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.001501], "Etc/UTC") + dec = Decimal.new(1500) + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^1500.0, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^dec, "microsecond")) + end + + test "datetime_add with utc_datetime_usec in milliseconds" do + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.001000], "Etc/UTC") + TestRepo.insert!(%Usec{utc_datetime_usec: datetime}) + + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.151000], "Etc/UTC") + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(type(^datetime, :utc_datetime_usec), 0, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, 150, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, 150, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^150, "millisecond")) + end + + @tag :decimal_precision + test "datetime_add uses utc_datetime_usec with decimal increment in milliseconds" do + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.001000], "Etc/UTC") + TestRepo.insert!(%Usec{utc_datetime_usec: datetime}) + + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.151000], "Etc/UTC") + dec = Decimal.new(150) + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^150.0, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^dec, "millisecond")) + end + + test "datetime_add with naive_datetime_usec in milliseconds" do + TestRepo.insert!(%Usec{naive_datetime_usec: ~N[2014-01-01 02:00:00.001000]}) + datetime = ~N[2014-01-01 02:00:00.151000] + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(type(^datetime, :naive_datetime_usec), 0, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, 150, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, 150.0, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^150, "millisecond")) + end + + @tag :decimal_precision + test "datetime_add with naive_datetime_usec and decimal increment in milliseconds" do + TestRepo.insert!(%Usec{naive_datetime_usec: ~N[2014-01-01 02:00:00.001000]}) + dec = Decimal.new(150) + datetime = ~N[2014-01-01 02:00:00.151000] + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^150.0, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^dec, "millisecond")) + end +end diff --git a/deps/ecto/integration_test/cases/joins.exs b/deps/ecto/integration_test/cases/joins.exs new file mode 100644 index 0000000..50ee4d7 --- /dev/null +++ b/deps/ecto/integration_test/cases/joins.exs @@ -0,0 +1,672 @@ +defmodule Ecto.Integration.JoinsTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + import Ecto.Query + + alias Ecto.Integration.Post + alias Ecto.Integration.Comment + alias Ecto.Integration.Permalink + alias Ecto.Integration.User + alias Ecto.Integration.PostUserCompositePk + + @tag :update_with_join + test "update all with joins" do + user = TestRepo.insert!(%User{name: "Tester"}) + post = TestRepo.insert!(%Post{title: "foo"}) + comment = TestRepo.insert!(%Comment{text: "hey", author_id: user.id, post_id: post.id}) + + another_post = TestRepo.insert!(%Post{title: "bar"}) + another_comment = TestRepo.insert!(%Comment{text: "another", author_id: user.id, post_id: another_post.id}) + + query = from(c in Comment, join: u in User, on: u.id == c.author_id, + where: c.post_id in ^[post.id]) + + assert {1, nil} = TestRepo.update_all(query, set: [text: "hoo"]) + assert %Comment{text: "hoo"} = TestRepo.get(Comment, comment.id) + assert %Comment{text: "another"} = TestRepo.get(Comment, another_comment.id) + end + + @tag :delete_with_join + test "delete all with joins" do + user = TestRepo.insert!(%User{name: "Tester"}) + post = TestRepo.insert!(%Post{title: "foo"}) + TestRepo.insert!(%Comment{text: "hey", author_id: user.id, post_id: post.id}) + TestRepo.insert!(%Comment{text: "foo", author_id: user.id, post_id: post.id}) + TestRepo.insert!(%Comment{text: "bar", author_id: user.id}) + + query = from(c in Comment, join: u in User, on: u.id == c.author_id, + where: is_nil(c.post_id)) + assert {1, nil} = TestRepo.delete_all(query) + assert [%Comment{}, %Comment{}] = TestRepo.all(Comment) + + query = from(c in Comment, join: u in assoc(c, :author), + join: p in assoc(c, :post), + where: p.id in ^[post.id]) + assert {2, nil} = TestRepo.delete_all(query) + assert [] = TestRepo.all(Comment) + end + + test "joins" do + _p = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + c1 = TestRepo.insert!(%Permalink{url: "1", post_id: p2.id}) + + query = from(p in Post, join: c in assoc(p, :permalink), order_by: p.id, select: {p, c}) + assert [{^p2, ^c1}] = TestRepo.all(query) + + query = from(p in Post, join: c in assoc(p, :permalink), on: c.id == ^c1.id, select: {p, c}) + assert [{^p2, ^c1}] = TestRepo.all(query) + end + + test "joins with queries" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + c1 = TestRepo.insert!(%Permalink{url: "1", post_id: p2.id}) + + # Joined query without parameter + permalink = from c in Permalink, where: c.url == "1" + + query = from(p in Post, join: c in ^permalink, on: c.post_id == p.id, select: {p, c}) + assert [{^p2, ^c1}] = TestRepo.all(query) + + # Joined query with parameter + permalink = from c in Permalink, where: c.url == "1" + + query = from(p in Post, join: c in ^permalink, on: c.id == ^c1.id, order_by: p.title, select: {p, c}) + assert [{^p1, ^c1}, {^p2, ^c1}] = TestRepo.all(query) + end + + test "named joins" do + _p = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + c1 = TestRepo.insert!(%Permalink{url: "1", post_id: p2.id}) + + query = + from(p in Post, join: c in assoc(p, :permalink), as: :permalink, order_by: p.id) + |> select([p, permalink: c], {p, c}) + + assert [{^p2, ^c1}] = TestRepo.all(query) + end + + test "joins with dynamic in :on" do + p = TestRepo.insert!(%Post{title: "1"}) + c = TestRepo.insert!(%Permalink{url: "1", post_id: p.id}) + + join_on = dynamic([p, ..., c], c.id == ^c.id) + + query = + from(p in Post, join: c in Permalink, on: ^join_on) + |> select([p, c], {p, c}) + + assert [{^p, ^c}] = TestRepo.all(query) + + join_on = dynamic([p, permalink: c], c.id == ^c.id) + + query = + from(p in Post, join: c in Permalink, as: :permalink, on: ^join_on) + |> select([p, c], {p, c}) + + assert [{^p, ^c}] = TestRepo.all(query) + end + + @tag :cross_join + test "cross joins with missing entries" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + c1 = TestRepo.insert!(%Permalink{url: "1", post_id: p2.id}) + + query = from(p in Post, cross_join: c in Permalink, order_by: p.id, select: {p, c}) + assert [{^p1, ^c1}, {^p2, ^c1}] = TestRepo.all(query) + end + + @tag :left_join + test "left joins with missing entries" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + c1 = TestRepo.insert!(%Permalink{url: "1", post_id: p2.id}) + + query = from(p in Post, left_join: c in assoc(p, :permalink), order_by: p.id, select: {p, c}) + assert [{^p1, nil}, {^p2, ^c1}] = TestRepo.all(query) + end + + @tag :left_join + test "left join with missing entries from subquery" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + c1 = TestRepo.insert!(%Permalink{url: "1", post_id: p2.id}) + + query = from(p in Post, left_join: c in subquery(Permalink), on: p.id == c.post_id, order_by: p.id, select: {p, c}) + assert [{^p1, nil}, {^p2, ^c1}] = TestRepo.all(query) + end + + @tag :right_join + test "right joins with missing entries" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + + %Permalink{id: plid1} = TestRepo.insert!(%Permalink{url: "1", post_id: pid2}) + + TestRepo.insert!(%Comment{text: "1", post_id: pid1}) + TestRepo.insert!(%Comment{text: "2", post_id: pid2}) + TestRepo.insert!(%Comment{text: "3", post_id: nil}) + + query = from(p in Post, right_join: c in assoc(p, :comments), + preload: :permalink, order_by: c.id) + assert [p1, p2, p3] = TestRepo.all(query) + assert p1.id == pid1 + assert p2.id == pid2 + assert is_nil(p3.id) + + assert p1.permalink == nil + assert p2.permalink.id == plid1 + end + + ## Associations joins + + test "has_many association join" do + post = TestRepo.insert!(%Post{title: "1"}) + c1 = TestRepo.insert!(%Comment{text: "hey", post_id: post.id}) + c2 = TestRepo.insert!(%Comment{text: "heya", post_id: post.id}) + + query = from(p in Post, join: c in assoc(p, :comments), select: {p, c}, order_by: p.id) + [{^post, ^c1}, {^post, ^c2}] = TestRepo.all(query) + end + + test "has_one association join" do + post1 = TestRepo.insert!(%Post{title: "1"}) + post2 = TestRepo.insert!(%Post{title: "1"}) + user = TestRepo.insert!(%User{}) + p1 = TestRepo.insert!(%Permalink{url: "hey", user_id: user.id, post_id: post1.id}) + p2 = TestRepo.insert!(%Permalink{url: "heya", user_id: user.id, post_id: post2.id}) + + query = from(p in User, join: c in assoc(p, :permalink), select: {p, c}, order_by: c.id) + [{^user, ^p1}, {^user, ^p2}] = TestRepo.all(query) + end + + test "belongs_to association join" do + post1 = TestRepo.insert!(%Post{title: "1"}) + post2 = TestRepo.insert!(%Post{title: "1"}) + user = TestRepo.insert!(%User{}) + p1 = TestRepo.insert!(%Permalink{url: "hey", user_id: user.id, post_id: post1.id}) + p2 = TestRepo.insert!(%Permalink{url: "heya", user_id: user.id, post_id: post2.id}) + + query = from(p in Permalink, join: c in assoc(p, :user), select: {p, c}, order_by: p.id) + [{^p1, ^user}, {^p2, ^user}] = TestRepo.all(query) + end + + test "has_many through association join" do + p1 = TestRepo.insert!(%Post{}) + p2 = TestRepo.insert!(%Post{}) + + u1 = TestRepo.insert!(%User{name: "zzz"}) + u2 = TestRepo.insert!(%User{name: "aaa"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u2.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p2.id, author_id: u2.id}) + + query = from p in Post, join: a in assoc(p, :comments_authors), select: {p, a}, order_by: [p.id, a.name] + assert [{^p1, ^u2}, {^p1, ^u1}, {^p1, ^u1}, {^p2, ^u2}] = TestRepo.all(query) + end + + test "has_many through nested association joins" do + u1 = TestRepo.insert!(%User{name: "Alice"}) + u2 = TestRepo.insert!(%User{name: "John"}) + + p1 = TestRepo.insert!(%Post{title: "p1", author_id: u1.id}) + p2 = TestRepo.insert!(%Post{title: "p2", author_id: u1.id}) + + TestRepo.insert!(%Comment{text: "c1", author_id: u1.id, post_id: p1.id}) + TestRepo.insert!(%Comment{text: "c2", author_id: u2.id, post_id: p1.id}) + TestRepo.insert!(%Comment{text: "c3", author_id: u2.id, post_id: p2.id}) + TestRepo.insert!(%Comment{text: "c4", post_id: p2.id}) + TestRepo.insert!(%Comment{text: "c5", author_id: u1.id, post_id: p2.id}) + + assert %{ + comments: [ + %{text: "c1"}, + %{text: "c5"} + ], + posts: [ + %{title: "p1"} = p1, + %{title: "p2"} = p2 + ] + } = + from(u in User) + |> join(:left, [u], p in assoc(u, :posts)) + |> join(:left, [u], c in assoc(u, :comments)) + |> join(:left, [_, p], c in assoc(p, :comments)) + |> preload( + [user, posts, comments, post_comments], + comments: comments, + posts: {posts, comments: {post_comments, :author}} + ) + |> TestRepo.get(u1.id) + + assert [ + %{text: "c1", author: %{name: "Alice"}}, + %{text: "c2", author: %{name: "John"}} + ] = Enum.sort_by(p1.comments, & &1.text) + + assert [ + %{text: "c3", author: %{name: "John"}}, + %{text: "c4", author: nil}, + %{text: "c5", author: %{name: "Alice"}} + ] = Enum.sort_by(p2.comments, & &1.text) + end + + test "many_to_many association join" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + _p = TestRepo.insert!(%Post{title: "3"}) + u1 = TestRepo.insert!(%User{name: "john"}) + u2 = TestRepo.insert!(%User{name: "mary"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: u1.id], + [post_id: p1.id, user_id: u2.id], + [post_id: p2.id, user_id: u2.id]] + + query = from(p in Post, join: u in assoc(p, :users), select: {p, u}, order_by: p.id) + [{^p1, ^u1}, {^p1, ^u2}, {^p2, ^u2}] = TestRepo.all(query) + end + + ## Association preload + + test "has_many assoc selector" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + c1 = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + c2 = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + c3 = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + # Without on + query = from(p in Post, join: c in assoc(p, :comments), preload: [comments: c]) + [p1, p2] = TestRepo.all(query) + assert p1.comments == [c1, c2] + assert p2.comments == [c3] + + # With on + query = from(p in Post, left_join: c in assoc(p, :comments), + on: p.title == c.text, preload: [comments: c]) + [p1, p2] = TestRepo.all(query) + assert p1.comments == [c1] + assert p2.comments == [] + end + + test "has_one assoc selector" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + pl1 = TestRepo.insert!(%Permalink{url: "1", post_id: p1.id}) + _pl = TestRepo.insert!(%Permalink{url: "2"}) + pl3 = TestRepo.insert!(%Permalink{url: "3", post_id: p2.id}) + + query = from(p in Post, join: pl in assoc(p, :permalink), preload: [permalink: pl]) + assert [post1, post3] = TestRepo.all(query) + + assert post1.permalink == pl1 + assert post3.permalink == pl3 + end + + test "belongs_to assoc selector" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + TestRepo.insert!(%Permalink{url: "1", post_id: p1.id}) + TestRepo.insert!(%Permalink{url: "2"}) + TestRepo.insert!(%Permalink{url: "3", post_id: p2.id}) + + query = from(pl in Permalink, left_join: p in assoc(pl, :post), preload: [post: p], order_by: pl.id) + assert [pl1, pl2, pl3] = TestRepo.all(query) + + assert pl1.post == p1 + refute pl2.post + assert pl3.post == p2 + end + + test "many_to_many assoc selector" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + _p = TestRepo.insert!(%Post{title: "3"}) + u1 = TestRepo.insert!(%User{name: "1"}) + u2 = TestRepo.insert!(%User{name: "2"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: u1.id], + [post_id: p1.id, user_id: u2.id], + [post_id: p2.id, user_id: u2.id]] + + # Without on + query = from(p in Post, left_join: u in assoc(p, :users), preload: [users: u], order_by: p.id) + [p1, p2, p3] = TestRepo.all(query) + assert Enum.sort_by(p1.users, & &1.name) == [u1, u2] + assert p2.users == [u2] + assert p3.users == [] + + # With on + query = from(p in Post, left_join: u in assoc(p, :users), on: p.title == u.name, + preload: [users: u], order_by: p.id) + [p1, p2, p3] = TestRepo.all(query) + assert p1.users == [u1] + assert p2.users == [u2] + assert p3.users == [] + end + + test "has_many through assoc selector" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + u1 = TestRepo.insert!(%User{name: "1"}) + u2 = TestRepo.insert!(%User{name: "2"}) + + TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + TestRepo.insert!(%Comment{post_id: p1.id, author_id: u2.id}) + TestRepo.insert!(%Comment{post_id: p2.id, author_id: u2.id}) + + # Without on + query = from(p in Post, left_join: ca in assoc(p, :comments_authors), + preload: [comments_authors: ca]) + [p1, p2] = TestRepo.all(query) + assert Enum.sort_by(p1.comments_authors, & &1.id) == [u1, u2] + assert p2.comments_authors == [u2] + + # With on + query = from(p in Post, left_join: ca in assoc(p, :comments_authors), + on: ca.name == p.title, preload: [comments_authors: ca]) + [p1, p2] = TestRepo.all(query) + assert p1.comments_authors == [u1] + assert p2.comments_authors == [u2] + end + + test "has_many through-through assoc selector" do + %Post{id: pid1} = TestRepo.insert!(%Post{}) + %Post{id: pid2} = TestRepo.insert!(%Post{}) + + %Permalink{} = TestRepo.insert!(%Permalink{post_id: pid1, url: "1"}) + %Permalink{} = TestRepo.insert!(%Permalink{post_id: pid2, url: "2"}) + + %User{id: uid1} = TestRepo.insert!(%User{}) + %User{id: uid2} = TestRepo.insert!(%User{}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid2}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid2, author_id: uid2}) + + query = from(p in Permalink, left_join: ca in assoc(p, :post_comments_authors), + preload: [post_comments_authors: ca], order_by: ca.id) + + [l1, l2] = TestRepo.all(query) + [u1, u2] = l1.post_comments_authors + assert u1.id == uid1 + assert u2.id == uid2 + + [u2] = l2.post_comments_authors + assert u2.id == uid2 + + # Insert some intermediary joins to check indexes won't be shuffled + query = from(p in Permalink, + left_join: assoc(p, :post), + left_join: ca in assoc(p, :post_comments_authors), + left_join: assoc(p, :post), + left_join: assoc(p, :post), + preload: [post_comments_authors: ca], order_by: ca.id) + + [l1, l2] = TestRepo.all(query) + [u1, u2] = l1.post_comments_authors + assert u1.id == uid1 + assert u2.id == uid2 + + [u2] = l2.post_comments_authors + assert u2.id == uid2 + end + + ## Nested + + test "nested assoc" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "2"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: pid1, author_id: uid1}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: pid1, author_id: uid2}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "3", post_id: pid2, author_id: uid2}) + + # use multiple associations to force parallel preloader + query = from p in Post, + left_join: c in assoc(p, :comments), + left_join: u in assoc(c, :author), + order_by: [p.id, c.id, u.id], + preload: [:permalink, comments: {c, author: {u, [:comments, :custom]}}], + select: {0, [p], 1, 2} + + posts = TestRepo.all(query) + assert [p1, p2] = Enum.map(posts, fn {0, [p], 1, 2} -> p end) + assert p1.id == pid1 + assert p2.id == pid2 + + assert [c1, c2] = p1.comments + assert [c3] = p2.comments + assert c1.id == cid1 + assert c2.id == cid2 + assert c3.id == cid3 + + assert c1.author.id == uid1 + assert c2.author.id == uid2 + assert c3.author.id == uid2 + end + + test "nested assoc with missing entries" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + %Post{id: pid3} = TestRepo.insert!(%Post{title: "2"}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "2"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: pid1, author_id: uid1}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: pid1, author_id: nil}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "3", post_id: pid3, author_id: uid2}) + + query = from p in Post, + left_join: c in assoc(p, :comments), + left_join: u in assoc(c, :author), + order_by: [p.id, c.id, u.id], + preload: [comments: {c, author: u}] + + assert [p1, p2, p3] = TestRepo.all(query) + assert p1.id == pid1 + assert p2.id == pid2 + assert p3.id == pid3 + + assert [c1, c2] = p1.comments + assert [] = p2.comments + assert [c3] = p3.comments + assert c1.id == cid1 + assert c2.id == cid2 + assert c3.id == cid3 + + assert c1.author.id == uid1 + assert c2.author == nil + assert c3.author.id == uid2 + end + + test "nested assoc with child preload" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "2"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: pid1, author_id: uid1}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: pid1, author_id: uid2}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "3", post_id: pid2, author_id: uid2}) + + query = from p in Post, + left_join: c in assoc(p, :comments), + order_by: [p.id, c.id], + preload: [comments: {c, :author}], + select: p + + assert [p1, p2] = TestRepo.all(query) + assert p1.id == pid1 + assert p2.id == pid2 + + assert [c1, c2] = p1.comments + assert [c3] = p2.comments + assert c1.id == cid1 + assert c2.id == cid2 + assert c3.id == cid3 + + assert c1.author.id == uid1 + assert c2.author.id == uid2 + assert c3.author.id == uid2 + end + + test "nested assoc with sibling preload" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + + %Permalink{id: plid1} = TestRepo.insert!(%Permalink{url: "1", post_id: pid2}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: pid1}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: pid2}) + %Comment{id: _} = TestRepo.insert!(%Comment{text: "3", post_id: pid2}) + + query = from p in Post, + left_join: c in assoc(p, :comments), + where: c.text in ~w(1 2), + preload: [:permalink, comments: c], + select: {0, [p], 1, 2} + + posts = TestRepo.all(query) + assert [p1, p2] = Enum.map(posts, fn {0, [p], 1, 2} -> p end) + assert p1.id == pid1 + assert p2.id == pid2 + + assert p2.permalink.id == plid1 + + assert [c1] = p1.comments + assert [c2] = p2.comments + assert c1.id == cid1 + assert c2.id == cid2 + end + + test "mixing regular join and assoc selector" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + c1 = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + c2 = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + c3 = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + pl1 = TestRepo.insert!(%Permalink{url: "1", post_id: p1.id}) + _pl = TestRepo.insert!(%Permalink{url: "2"}) + pl3 = TestRepo.insert!(%Permalink{url: "3", post_id: p2.id}) + + # Without on + query = from(p in Post, join: pl in assoc(p, :permalink), + join: c in assoc(p, :comments), + preload: [permalink: pl], + select: {p, c}) + [{p1, ^c1}, {p1, ^c2}, {p2, ^c3}] = TestRepo.all(query) + assert p1.permalink == pl1 + assert p2.permalink == pl3 + end + + test "association with composite pk join" do + post = TestRepo.insert!(%Post{title: "1"}) + user = TestRepo.insert!(%User{name: "1"}) + TestRepo.insert!(%PostUserCompositePk{post_id: post.id, user_id: user.id}) + + query = from(p in Post, join: a in assoc(p, :post_user_composite_pk), + preload: [post_user_composite_pk: a], select: p) + assert [post] = TestRepo.all(query) + assert post.post_user_composite_pk + end + + test "joining a through association with a nested preloads" do + post = TestRepo.insert!(%Post{title: "1"}) + user = TestRepo.insert!(%User{name: "1"}) + TestRepo.insert!(%Comment{text: "1", post_id: post.id}) + TestRepo.insert!(%Permalink{post_id: post.id, user_id: user.id}) + + query = + from c in Comment, + join: pp in assoc(c, :post_permalink), + join: u in assoc(pp, :user), + preload: [post_permalink: {pp, [:post, user: u]}] + + [comment] = TestRepo.all(query) + + assert not Ecto.assoc_loaded?(comment.post) + assert %Permalink{user: %User{}, post: %Post{}} = comment.post_permalink + end + + test "joining multiple through associations with a nested preloads" do + post = TestRepo.insert!(%Post{title: "1"}) + user = TestRepo.insert!(%User{name: "1"}) + TestRepo.insert!(%Comment{text: "1", post_id: post.id, author_id: user.id}) + TestRepo.insert!(%Permalink{post_id: post.id, user_id: user.id}) + + query = + from c in Comment, + join: pp in assoc(c, :post_permalink), + join: ap in assoc(c, :author_permalink), + join: u1 in assoc(pp, :user), + join: u2 in assoc(ap, :user), + preload: [post_permalink: {pp, [:post, user: u1]}, author_permalink: {ap, [:post, user: u2]}] + + [comment] = TestRepo.all(query) + + assert not Ecto.assoc_loaded?(comment.post) + assert not Ecto.assoc_loaded?(comment.author) + assert %Permalink{user: %User{}, post: %Post{}} = comment.post_permalink + assert %Permalink{user: %User{}, post: %Post{}} = comment.author_permalink + end + + test "joining nested through associations with a nested preloads" do + user = TestRepo.insert!(%User{name: "1"}) + post = TestRepo.insert!(%Post{title: "1", author_id: user.id}) + TestRepo.insert!(%Comment{text: "1", post_id: post.id}) + TestRepo.insert!(%Permalink{post_id: post.id, user_id: user.id}) + + query = + from c in Comment, + join: pp in assoc(c, :post_permalink), + join: up in assoc(pp, :user_posts), + preload: [post_permalink: {pp, [:post, user_posts: {up, :comments}]}] + + [comment] = TestRepo.all(query) + + assert not Ecto.assoc_loaded?(comment.post) + assert %Permalink{post: %Post{}, user_posts: [%Post{}]} = comment.post_permalink + assert not Ecto.assoc_loaded?(comment.post_permalink.user) + end + + test "joining and preloading through a subquery" do + %{id: p_id} = TestRepo.insert!(%Post{}) + %{id: c1_id} = TestRepo.insert!(%Comment{post_id: p_id}) + %{id: c2_id} = TestRepo.insert!(%Comment{post_id: p_id}) + + q = + from p1 in Post, + left_join: u in User, + on: p1.author_id == u.id, + inner_join: c in subquery(from c in Comment), + on: p1.id == c.post_id, + join: p2 in Post, + on: c.post_id == p2.id, + preload: [author: u, force_comments: {c, post: p2}] + + assert [%Post{id: ^p_id, force_comments: comments}] = TestRepo.all(q) + [comment1, comment2] = Enum.sort_by(comments, & &1.id) + assert %Comment{id: ^c1_id, post: %Post{id: ^p_id}} = comment1 + assert %Comment{id: ^c2_id, post: %Post{id: ^p_id}} = comment2 + end +end diff --git a/deps/ecto/integration_test/cases/preload.exs b/deps/ecto/integration_test/cases/preload.exs new file mode 100644 index 0000000..cb7627d --- /dev/null +++ b/deps/ecto/integration_test/cases/preload.exs @@ -0,0 +1,866 @@ +defmodule Ecto.Integration.PreloadTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + import Ecto.Query + + alias Ecto.Integration.Post + alias Ecto.Integration.Comment + alias Ecto.Integration.Item + alias Ecto.Integration.Permalink + alias Ecto.Integration.User + alias Ecto.Integration.Custom + alias Ecto.Integration.Order + + test "preload with parameter from select_merge" do + p1 = TestRepo.insert!(%Post{title: "p1"}) + TestRepo.insert!(%Comment{text: "c1", post: p1}) + + comments = + from(c in Comment, select: struct(c, [:text])) + |> select_merge([c], %{post_id: c.post_id}) + |> preload(:post) + |> TestRepo.all() + + assert [%{text: "c1", post: %{title: "p1"}}] = comments + end + + test "preload has_many" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + # We use the same text to expose bugs in preload sorting + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "2", post_id: p2.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + %Comment{id: cid4} = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + assert %Ecto.Association.NotLoaded{} = p1.comments + + [p3, p1, p2] = TestRepo.preload([p3, p1, p2], :comments) + assert [%Comment{id: ^cid1}, %Comment{id: ^cid2}] = p1.comments |> sort_by_id() + assert [%Comment{id: ^cid3}, %Comment{id: ^cid4}] = p2.comments |> sort_by_id() + assert [] = p3.comments + end + + test "preload has_many multiple times" do + p1 = TestRepo.insert!(%Post{title: "1"}) + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + + [p1, p1] = TestRepo.preload([p1, p1], :comments) + assert [%Comment{id: ^cid1}, %Comment{id: ^cid2}] = p1.comments |> sort_by_id() + + [p1, p1] = TestRepo.preload([p1, p1], :comments) + assert [%Comment{id: ^cid1}, %Comment{id: ^cid2}] = p1.comments |> sort_by_id() + end + + test "preload has_one" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + %Permalink{id: pid1} = TestRepo.insert!(%Permalink{url: "1", post_id: p1.id}) + %Permalink{} = TestRepo.insert!(%Permalink{url: "2", post_id: nil}) + %Permalink{id: pid3} = TestRepo.insert!(%Permalink{url: "3", post_id: p3.id}) + + assert %Ecto.Association.NotLoaded{} = p1.permalink + assert %Ecto.Association.NotLoaded{} = p2.permalink + + [p3, p1, p2] = TestRepo.preload([p3, p1, p2], :permalink) + assert %Permalink{id: ^pid1} = p1.permalink + refute p2.permalink + assert %Permalink{id: ^pid3} = p3.permalink + end + + test "preload belongs_to" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + TestRepo.insert!(%Post{title: "2"}) + %Post{id: pid3} = TestRepo.insert!(%Post{title: "3"}) + + pl1 = TestRepo.insert!(%Permalink{url: "1", post_id: pid1}) + pl2 = TestRepo.insert!(%Permalink{url: "2", post_id: nil}) + pl3 = TestRepo.insert!(%Permalink{url: "3", post_id: pid3}) + assert %Ecto.Association.NotLoaded{} = pl1.post + + [pl3, pl1, pl2] = TestRepo.preload([pl3, pl1, pl2], :post) + assert %Post{id: ^pid1} = pl1.post + refute pl2.post + assert %Post{id: ^pid3} = pl3.post + end + + test "preload multiple belongs_to" do + %User{id: uid} = TestRepo.insert!(%User{name: "foo"}) + %Post{id: pid} = TestRepo.insert!(%Post{title: "1"}) + %Comment{id: cid} = TestRepo.insert!(%Comment{post_id: pid, author_id: uid}) + + comment = TestRepo.get!(Comment, cid) + comment = TestRepo.preload(comment, [:author, :post]) + assert comment.author.id == uid + assert comment.post.id == pid + end + + test "preload belongs_to with shared parent" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + + c1 = TestRepo.insert!(%Comment{text: "1", post_id: pid1}) + c2 = TestRepo.insert!(%Comment{text: "2", post_id: pid1}) + c3 = TestRepo.insert!(%Comment{text: "3", post_id: pid2}) + + [c3, c1, c2] = TestRepo.preload([c3, c1, c2], :post) + assert %Post{id: ^pid1} = c1.post + assert %Post{id: ^pid1} = c2.post + assert %Post{id: ^pid2} = c3.post + end + + test "preload many_to_many" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + # We use the same name to expose bugs in preload sorting + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + %User{id: uid3} = TestRepo.insert!(%User{name: "2"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "2"}) + %User{id: uid4} = TestRepo.insert!(%User{name: "3"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: uid1], + [post_id: p1.id, user_id: uid2], + [post_id: p2.id, user_id: uid3], + [post_id: p2.id, user_id: uid4], + [post_id: p3.id, user_id: uid1], + [post_id: p3.id, user_id: uid4]] + + assert %Ecto.Association.NotLoaded{} = p1.users + + [p1, p2, p3] = TestRepo.preload([p1, p2, p3], :users) + assert [%User{id: ^uid1}, %User{id: ^uid2}] = p1.users |> sort_by_id + assert [%User{id: ^uid3}, %User{id: ^uid4}] = p2.users |> sort_by_id + assert [%User{id: ^uid1}, %User{id: ^uid4}] = p3.users |> sort_by_id + end + + test "preload has_many through" do + %Post{id: pid1} = p1 = TestRepo.insert!(%Post{}) + %Post{id: pid2} = p2 = TestRepo.insert!(%Post{}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "foo"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "bar"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid2}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid2, author_id: uid2}) + + [p1, p2] = TestRepo.preload([p1, p2], :comments_authors) + + # Through was preloaded + [u1, u2] = p1.comments_authors |> sort_by_id + assert u1.id == uid1 + assert u2.id == uid2 + + [u2] = p2.comments_authors + assert u2.id == uid2 + + # But we also preloaded everything along the way + assert [c1, c2, c3] = p1.comments |> sort_by_id + assert c1.author.id == uid1 + assert c2.author.id == uid1 + assert c3.author.id == uid2 + + assert [c4] = p2.comments + assert c4.author.id == uid2 + end + + test "preload has_one through" do + %Post{id: pid1} = TestRepo.insert!(%Post{}) + %Post{id: pid2} = TestRepo.insert!(%Post{}) + + %Permalink{id: lid1} = TestRepo.insert!(%Permalink{post_id: pid1, url: "1"}) + %Permalink{id: lid2} = TestRepo.insert!(%Permalink{post_id: pid2, url: "2"}) + + %Comment{} = c1 = TestRepo.insert!(%Comment{post_id: pid1}) + %Comment{} = c2 = TestRepo.insert!(%Comment{post_id: pid1}) + %Comment{} = c3 = TestRepo.insert!(%Comment{post_id: pid2}) + + [c1, c2, c3] = TestRepo.preload([c1, c2, c3], :post_permalink) + + # Through was preloaded + assert c1.post.id == pid1 + assert c1.post.permalink.id == lid1 + assert c1.post_permalink.id == lid1 + + assert c2.post.id == pid1 + assert c2.post.permalink.id == lid1 + assert c2.post_permalink.id == lid1 + + assert c3.post.id == pid2 + assert c3.post.permalink.id == lid2 + assert c3.post_permalink.id == lid2 + end + + test "preload through with nil association" do + %Comment{} = c = TestRepo.insert!(%Comment{post_id: nil}) + + c = TestRepo.preload(c, [:post, :post_permalink]) + assert c.post == nil + assert c.post_permalink == nil + + c = TestRepo.preload(c, [:post, :post_permalink]) + assert c.post == nil + assert c.post_permalink == nil + end + + test "preload through with nil struct" do + %Comment{} = c = TestRepo.insert!(%Comment{}) + [%Comment{}, nil] = TestRepo.preload([c, nil], [:post, :post_permalink]) + end + + test "preload has_many through-through" do + %Post{id: pid1} = TestRepo.insert!(%Post{}) + %Post{id: pid2} = TestRepo.insert!(%Post{}) + + %Permalink{} = l1 = TestRepo.insert!(%Permalink{post_id: pid1, url: "1"}) + %Permalink{} = l2 = TestRepo.insert!(%Permalink{post_id: pid2, url: "2"}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "foo"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "bar"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid2}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid2, author_id: uid2}) + + # With assoc query + [l1, l2] = TestRepo.preload([l1, l2], :post_comments_authors) + + # Through was preloaded + [u1, u2] = l1.post_comments_authors |> sort_by_id + assert u1.id == uid1 + assert u2.id == uid2 + + [u2] = l2.post_comments_authors + assert u2.id == uid2 + + # But we also preloaded everything along the way + assert l1.post.id == pid1 + assert l1.post.comments != [] + + assert l2.post.id == pid2 + assert l2.post.comments != [] + end + + test "preload has_many through many_to_many" do + %Post{} = p1 = TestRepo.insert!(%Post{}) + %Post{} = p2 = TestRepo.insert!(%Post{}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "foo"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "bar"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: uid1], + [post_id: p1.id, user_id: uid2], + [post_id: p2.id, user_id: uid2]] + + %Comment{id: cid1} = TestRepo.insert!(%Comment{author_id: uid1}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{author_id: uid1}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{author_id: uid2}) + %Comment{id: cid4} = TestRepo.insert!(%Comment{author_id: uid2}) + + [p1, p2] = TestRepo.preload([p1, p2], :users_comments) + + # Through was preloaded + [c1, c2, c3, c4] = p1.users_comments |> sort_by_id + assert c1.id == cid1 + assert c2.id == cid2 + assert c3.id == cid3 + assert c4.id == cid4 + + [c3, c4] = p2.users_comments |> sort_by_id + assert c3.id == cid3 + assert c4.id == cid4 + + # But we also preloaded everything along the way + assert [u1, u2] = p1.users |> sort_by_id + assert u1.id == uid1 + assert u2.id == uid2 + + assert [u2] = p2.users + assert u2.id == uid2 + end + + ## Empties + + test "preload empty" do + assert TestRepo.preload([], :anything_goes) == [] + end + + test "preload has_many with no associated entries" do + p = TestRepo.insert!(%Post{title: "1"}) + p = TestRepo.preload(p, :comments) + + assert p.title == "1" + assert p.comments == [] + end + + test "preload has_one with no associated entries" do + p = TestRepo.insert!(%Post{title: "1"}) + p = TestRepo.preload(p, :permalink) + + assert p.title == "1" + assert p.permalink == nil + end + + test "preload belongs_to with no associated entry" do + c = TestRepo.insert!(%Comment{text: "1"}) + c = TestRepo.preload(c, :post) + + assert c.text == "1" + assert c.post == nil + end + + test "preload many_to_many with no associated entries" do + p = TestRepo.insert!(%Post{title: "1"}) + p = TestRepo.preload(p, :users) + + assert p.title == "1" + assert p.users == [] + end + + ## With queries + + test "preload with 1-arity function" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + # We use the same text to expose bugs in preload sorting + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "2", post_id: p2.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + %Comment{id: cid4} = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + assert [pe3, pe1, pe2] = TestRepo.preload([p3, p1, p2], + comments: fn _ -> TestRepo.all(Comment) end) + assert [%Comment{id: ^cid1}, %Comment{id: ^cid2}] = pe1.comments + assert [%Comment{id: ^cid3}, %Comment{id: ^cid4}] = pe2.comments + assert [] = pe3.comments + end + + test "preload with 2-arity function" do + p = TestRepo.insert!(%Post{title: "1"}) + c1 = TestRepo.insert!(%Comment{post_id: p.id}) + c2 = TestRepo.insert!(%Comment{post_id: p.id}) + + # making a simple preloader so that it works across all adapters + preloader = fn parent_ids, assoc -> + %{related_key: related_key, queryable: queryable} = assoc + + from(q in queryable, where: field(q, ^related_key) in ^parent_ids, order_by: q.id) + |> TestRepo.all() + end + + assert p = TestRepo.preload(p, comments: preloader) + assert [^c1, ^c2] = p.comments + end + + test "preload many_to_many with function" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + # We use the same name to expose bugs in preload sorting + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + %User{id: uid3} = TestRepo.insert!(%User{name: "2"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "2"}) + %User{id: uid4} = TestRepo.insert!(%User{name: "3"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: uid1], + [post_id: p1.id, user_id: uid2], + [post_id: p2.id, user_id: uid3], + [post_id: p2.id, user_id: uid4], + [post_id: p3.id, user_id: uid1], + [post_id: p3.id, user_id: uid4]] + + wrong_preloader = fn post_ids -> + TestRepo.all( + from u in User, + join: pu in "posts_users", + on: true, + where: pu.post_id in ^post_ids and pu.user_id == u.id, + order_by: u.id, + select: map(u, [:id]) + ) + end + + assert_raise RuntimeError, ~r/invalid custom preload for `users` on `Ecto.Integration.Post`/, fn -> + TestRepo.preload([p1, p2, p3], users: wrong_preloader) + end + + right_preloader = fn post_ids -> + TestRepo.all( + from u in User, + join: pu in "posts_users", + on: true, + where: pu.post_id in ^post_ids and pu.user_id == u.id, + order_by: u.id, + select: {pu.post_id, map(u, [:id])} + ) + end + + [p1, p2, p3] = TestRepo.preload([p1, p2, p3], users: right_preloader) + assert p1.users == [%{id: uid1}, %{id: uid2}] + assert p2.users == [%{id: uid3}, %{id: uid4}] + assert p3.users == [%{id: uid1}, %{id: uid4}] + end + + test "preload with query" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + # We use the same text to expose bugs in preload sorting + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "2", post_id: p2.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + %Comment{id: cid4} = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + assert %Ecto.Association.NotLoaded{} = p1.comments + + # With empty query + assert [pe3, pe1, pe2] = TestRepo.preload([p3, p1, p2], + comments: from(c in Comment, where: false)) + assert [] = pe1.comments + assert [] = pe2.comments + assert [] = pe3.comments + + # With custom select + assert [pe3, pe1, pe2] = TestRepo.preload([p3, p1, p2], + comments: from(c in Comment, select: c.id, order_by: c.id)) + assert [^cid1, ^cid2] = pe1.comments + assert [^cid3, ^cid4] = pe2.comments + assert [] = pe3.comments + + # With custom ordered query + assert [pe3, pe1, pe2] = TestRepo.preload([p3, p1, p2], + comments: from(c in Comment, order_by: [desc: c.text])) + assert [%Comment{id: ^cid2}, %Comment{id: ^cid1}] = pe1.comments + assert [%Comment{id: ^cid4}, %Comment{id: ^cid3}] = pe2.comments + assert [] = pe3.comments + + # With custom ordered query with preload + assert [pe3, pe1, pe2] = TestRepo.preload([p3, p1, p2], + comments: {from(c in Comment, order_by: [desc: c.text]), :post}) + assert [%Comment{id: ^cid2} = c2, %Comment{id: ^cid1} = c1] = pe1.comments + assert [%Comment{id: ^cid4} = c4, %Comment{id: ^cid3} = c3] = pe2.comments + assert [] = pe3.comments + + assert c1.post.title == "1" + assert c2.post.title == "1" + assert c3.post.title == "2" + assert c4.post.title == "2" + end + + test "preload through with query" do + %Post{id: pid1} = p1 = TestRepo.insert!(%Post{}) + + u1 = TestRepo.insert!(%User{name: "foo"}) + u2 = TestRepo.insert!(%User{name: "bar"}) + u3 = TestRepo.insert!(%User{name: "baz"}) + u4 = TestRepo.insert!(%User{name: "norf"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: u2.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: u3.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: u4.id}) + + np1 = TestRepo.preload(p1, comments_authors: from(u in User, where: u.name == "foo")) + assert np1.comments_authors == [u1] + + assert_raise ArgumentError, ~r/Ecto expected a map\/struct with the key `id` but got: \d+/, fn -> + TestRepo.preload(p1, comments_authors: from(u in User, order_by: u.name, select: u.id)) + end + + # The subpreload order does not matter because the result is dictated by comments + np1 = TestRepo.preload(p1, comments_authors: from(u in User, order_by: u.name, select: %{id: u.id})) + assert np1.comments_authors == + [%{id: u1.id}, %{id: u2.id}, %{id: u3.id}, %{id: u4.id}] + end + + test "preload into a subquery source" do + %{id: p_id} = TestRepo.insert!(%Post{}) + %{id: c_id} = TestRepo.insert!(%Comment{post_id: p_id}) + + q = + from c in subquery(from c in Comment), + join: p in Post, + on: c.post_id == p.id, + preload: [post: p] + + assert [%Comment{id: ^c_id, post: %Post{id: ^p_id}}] = TestRepo.all(q) + end + + ## With take + + test "preload with take" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + _p = TestRepo.insert!(%Post{title: "3"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "2", post_id: p2.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + %Comment{id: cid4} = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + assert %Ecto.Association.NotLoaded{} = p1.comments + + posts = TestRepo.all(from Post, preload: [:comments], select: [:id, comments: [:id, :post_id]]) + [p1, p2, p3] = sort_by_id(posts) + assert p1.title == nil + assert p2.title == nil + assert p3.title == nil + + assert [%{id: ^cid1, text: nil}, %{id: ^cid2, text: nil}] = sort_by_id(p1.comments) + assert [%{id: ^cid3, text: nil}, %{id: ^cid4, text: nil}] = sort_by_id(p2.comments) + assert [] = sort_by_id(p3.comments) + end + + test "take with join nil maps (many association)" do + p = TestRepo.insert!(%Post{}) + + # many + query = + from p in Post, + left_join: c in Comment, + on: p.id == c.post_id, + select: map(p, [:id, comments: [:id, :post_id]]), + preload: [comments: c] + + assert TestRepo.one(query) == %{id: p.id, comments: []} + + query = + from p in Post, + left_join: c in Comment, + on: p.id == c.post_id, + select: map(p, [:id, comments: [:id, :post_id]]), + preload: [:comments] + + assert TestRepo.one(query) == %{id: p.id, comments: []} + end + + test "take with join nil maps (one association)" do + p = TestRepo.insert!(%Post{}) + + query = + from p in Post, + left_join: u in User, + on: p.author_id == u.id, + select: map(p, [:id, author: [:id, :name]]), + preload: [author: u] + + assert TestRepo.one(query) == %{id: p.id, author: nil} + + query = + from p in Post, + left_join: u in User, + on: p.author_id == u.id, + select: map(p, [:id, author: [:id, :name]]), + preload: [:author] + + assert TestRepo.one(query) == %{id: p.id, author: nil} + end + + test "preload through with take" do + %Post{id: pid1} = TestRepo.insert!(%Post{}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "foo"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "bar"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid2}) + + [p1] = TestRepo.all from Post, preload: [:comments_authors], select: [:id, comments_authors: :id] + [%{id: ^uid1, name: nil}, %{id: ^uid2, name: nil}] = p1.comments_authors |> sort_by_id + end + + ## Nested + + test "preload many assocs" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + assert [p2, p1] = TestRepo.preload([p2, p1], [:comments, :users]) + assert p1.comments == [] + assert p2.comments == [] + assert p1.users == [] + assert p2.users == [] + end + + test "preload nested" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + TestRepo.insert!(%Comment{text: "4", post_id: p2.id}) + + assert [p2, p1] = TestRepo.preload([p2, p1], [comments: :post]) + assert [c1, c2] = p1.comments + assert [c3, c4] = p2.comments + assert p1.id == c1.post.id + assert p1.id == c2.post.id + assert p2.id == c3.post.id + assert p2.id == c4.post.id + end + + test "preload nested via custom query" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + TestRepo.insert!(%Comment{text: "4", post_id: p2.id}) + + query = from(c in Comment, preload: :post, order_by: [desc: c.text]) + assert [p2, p1] = TestRepo.preload([p2, p1], comments: query) + assert [c2, c1] = p1.comments + assert [c4, c3] = p2.comments + assert p1.id == c1.post.id + assert p1.id == c2.post.id + assert p2.id == c3.post.id + assert p2.id == c4.post.id + end + + test "custom preload_order" do + post = TestRepo.insert!(%Post{users: [%User{name: "bar"}, %User{name: "foo"}], title: "1"}) + + TestRepo.insert!(%Comment{text: "2", post_id: post.id}) + TestRepo.insert!(%Comment{text: "1", post_id: post.id}) + + post = TestRepo.preload(post, [:ordered_comments, :ordered_users]) + + # asc + assert [%{text: "1"}, %{text: "2"}] = post.ordered_comments + + # desc + assert [%{name: "foo"}, %{name: "bar"}] = post.ordered_users + end + + test "custom preload_order with mfa" do + post1 = TestRepo.insert!(%Post{users: [%User{name: "bar"}, %User{name: "foo"}], title: "1"}) + post2 = TestRepo.insert!(%Post{users: [%User{name: "baz"}, %User{name: "foz"}], title: "2"}) + + [post1, post2] = TestRepo.preload([post1, post2], [:ordered_users_by_join_table], log: :error) + + assert [%{name: "foo"}, %{name: "bar"}] = post1.ordered_users_by_join_table + assert [%{name: "foz"}, %{name: "baz"}] = post2.ordered_users_by_join_table + end + + ## Others + + @tag :invalid_prefix + test "preload custom prefix from schema" do + p = TestRepo.insert!(%Post{title: "1"}) + p = Ecto.put_meta(p, prefix: "this_surely_does_not_exist") + # This preload should fail because it points to a prefix that does not exist + assert catch_error(TestRepo.preload(p, [:comments])) + end + + @tag :invalid_prefix + test "preload custom prefix from options" do + p = TestRepo.insert!(%Post{title: "1"}) + # This preload should fail because it points to a prefix that does not exist + assert catch_error(TestRepo.preload(p, [:comments], prefix: "this_surely_does_not_exist")) + end + + test "preload with binary_id" do + c = TestRepo.insert!(%Custom{}) + u = TestRepo.insert!(%User{custom_id: c.bid}) + + u = TestRepo.preload(u, :custom) + assert u.custom.bid == c.bid + end + + test "preload raises with association set but without id" do + c1 = TestRepo.insert!(%Comment{text: "1"}) + u1 = TestRepo.insert!(%User{name: "name"}) + updated = %{c1 | author: u1, author_id: nil} + + assert ExUnit.CaptureLog.capture_log(fn -> + assert TestRepo.preload(updated, [:author]).author == u1 + end) =~ ~r/its association key `author_id` is nil/ + + assert TestRepo.preload(updated, [:author], force: true).author == nil + end + + test "preload skips already loaded for cardinality one" do + %Post{id: pid} = TestRepo.insert!(%Post{title: "1"}) + + c1 = %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: pid}) + c2 = %Comment{id: _cid} = TestRepo.insert!(%Comment{text: "2", post_id: nil}) + + [c1, c2] = TestRepo.preload([c1, c2], :post) + assert %Post{id: ^pid} = c1.post + assert c2.post == nil + + [c1, c2] = TestRepo.preload([c1, c2], post: :comments) + assert [%Comment{id: ^cid1}] = c1.post.comments + + TestRepo.update_all Post, set: [title: "0"] + TestRepo.update_all Comment, set: [post_id: pid] + + # Preloading once again shouldn't change the result + [c1, c2] = TestRepo.preload([c1, c2], :post) + assert %Post{id: ^pid, title: "1", comments: [_|_]} = c1.post + assert c2.post == nil + + [c1, c2] = TestRepo.preload([c1, %{c2 | post_id: pid}], :post, force: true) + assert %Post{id: ^pid, title: "0", comments: %Ecto.Association.NotLoaded{}} = c1.post + assert %Post{id: ^pid, title: "0", comments: %Ecto.Association.NotLoaded{}} = c2.post + end + + test "preload skips already loaded for cardinality many" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p2.id}) + + [p1, p2] = TestRepo.preload([p1, p2], :comments) + assert [%Comment{id: ^cid1}] = p1.comments + assert [%Comment{id: ^cid2}] = p2.comments + + [p1, p2] = TestRepo.preload([p1, p2], comments: :post) + assert hd(p1.comments).post.id == p1.id + assert hd(p2.comments).post.id == p2.id + + TestRepo.update_all Comment, set: [text: "0"] + + # Preloading once again shouldn't change the result + [p1, p2] = TestRepo.preload([p1, p2], :comments) + assert [%Comment{id: ^cid1, text: "1", post: %Post{}}] = p1.comments + assert [%Comment{id: ^cid2, text: "2", post: %Post{}}] = p2.comments + + [p1, p2] = TestRepo.preload([p1, p2], :comments, force: true) + assert [%Comment{id: ^cid1, text: "0", post: %Ecto.Association.NotLoaded{}}] = p1.comments + assert [%Comment{id: ^cid2, text: "0", post: %Ecto.Association.NotLoaded{}}] = p2.comments + end + + test "preload keyword query" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + TestRepo.insert!(%Post{title: "3"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + %Comment{id: cid4} = TestRepo.insert!(%Comment{text: "4", post_id: p2.id}) + + # Regular query + query = from(p in Post, preload: [:comments], select: p) + + assert [p1, p2, p3] = TestRepo.all(query) |> sort_by_id + assert [%Comment{id: ^cid1}, %Comment{id: ^cid2}] = p1.comments |> sort_by_id + assert [%Comment{id: ^cid3}, %Comment{id: ^cid4}] = p2.comments |> sort_by_id + assert [] = p3.comments + + # Query with interpolated preload query + query = from(p in Post, preload: [comments: ^from(c in Comment, where: false)], select: p) + + assert [p1, p2, p3] = TestRepo.all(query) + assert [] = p1.comments + assert [] = p2.comments + assert [] = p3.comments + + # Now let's use an interpolated preload too + comments = [:comments] + query = from(p in Post, preload: ^comments, select: {0, [p], 1, 2}) + + posts = TestRepo.all(query) + [p1, p2, p3] = Enum.map(posts, fn {0, [p], 1, 2} -> p end) |> sort_by_id + + assert [%Comment{id: ^cid1}, %Comment{id: ^cid2}] = p1.comments |> sort_by_id + assert [%Comment{id: ^cid3}, %Comment{id: ^cid4}] = p2.comments |> sort_by_id + assert [] = p3.comments + end + + + test "preload belongs_to in embedded_schema" do + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + item = %Item{user_id: uid1} + + # Starts as not loaded + assert %Ecto.Association.NotLoaded{} = item.user + + # Now we preload it + item = TestRepo.preload(item, :user) + assert %User{id: ^uid1} = item.user + end + + describe "preload associations from nested embeds" do + setup do + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "2"}) + %User{id: uid3} = TestRepo.insert!(%User{name: "3"}) + item1 = %Item{id: 1, user_id: uid1} + item2 = %Item{id: 2, user_id: uid2} + item3 = %Item{id: 3, user_id: uid3} + order1 = %Order{items: [item1, item3, item2], item: item1} + order2 = %Order{items: [], item: nil} + order3 = %Order{items: nil, item: nil} + order4 = %Order{items: [item1, item2], item: item2} + + [orders: [order1, order2, order3, order4]] + end + + test "cannot preload embed without its associations", context do + assert_raise ArgumentError, ~r/cannot preload embedded field/, fn -> + TestRepo.preload(context.orders, :item) + end + end + + test "embeds_one", context do + [nil | preloaded_orders] = [nil | context.orders] |> TestRepo.preload(item: :user) + + expected_item_user = + Enum.map(context.orders, fn + %{item: nil} -> {nil, nil} + %{item: item} -> {item.id, item.user_id} + end) + + actual_item_user = + Enum.map(preloaded_orders, fn + %{item: nil} -> {nil, nil} + %{item: item} -> {item.id, item.user.id} + end) + + assert expected_item_user == actual_item_user + end + + test "embeds_many", context do + [nil | preloaded_orders] = [nil | context.orders] |> TestRepo.preload(items: :user) + + expected_items_user = + Enum.map(context.orders, fn + %{items: nil} -> {nil, nil} + %{items: items} -> Enum.map(items, & {&1.id, &1.user_id}) + end) + + actual_items_user = + Enum.map(preloaded_orders, fn + %{items: nil} -> {nil, nil} + %{items: items} -> Enum.map(items, & {&1.id, &1.user.id}) + end) + + assert expected_items_user == actual_items_user + end + end + + defp sort_by_id(values) do + Enum.sort_by(values, &(&1.id)) + end +end diff --git a/deps/ecto/integration_test/cases/repo.exs b/deps/ecto/integration_test/cases/repo.exs new file mode 100644 index 0000000..4d3c733 --- /dev/null +++ b/deps/ecto/integration_test/cases/repo.exs @@ -0,0 +1,2397 @@ +defmodule Ecto.Integration.RepoTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + import Ecto.Query + + alias Ecto.Integration.Post + alias Ecto.Integration.Order + alias Ecto.Integration.User + alias Ecto.Integration.Comment + alias Ecto.Integration.Permalink + alias Ecto.Integration.Custom + alias Ecto.Integration.Barebone + alias Ecto.Integration.CompositePk + alias Ecto.Integration.PostUserCompositePk + + test "returns already started for started repos" do + assert {:error, {:already_started, _}} = TestRepo.start_link() + end + + test "supports unnamed repos" do + assert {:ok, pid} = TestRepo.start_link(name: nil) + assert Ecto.Repo.Queryable.all(pid, Post, Ecto.Repo.Supervisor.tuplet(pid, [])) == [] + end + + test "all empty" do + assert TestRepo.all(Post) == [] + assert TestRepo.all(from p in Post) == [] + end + + test "all with in" do + TestRepo.insert!(%Post{title: "hello"}) + + # Works without the query cache. + assert_raise Ecto.Query.CastError, fn -> + TestRepo.all(from p in Post, where: p.title in ^nil) + end + + assert [] = TestRepo.all from p in Post, where: p.title in [] + assert [] = TestRepo.all from p in Post, where: p.title in ["1", "2", "3"] + assert [] = TestRepo.all from p in Post, where: p.title in ^[] + + assert [_] = TestRepo.all from p in Post, where: p.title not in [] + assert [_] = TestRepo.all from p in Post, where: p.title in ["1", "hello", "3"] + assert [_] = TestRepo.all from p in Post, where: p.title in ["1", ^"hello", "3"] + assert [_] = TestRepo.all from p in Post, where: p.title in ^["1", "hello", "3"] + + # Still doesn't work after the query cache. + assert_raise Ecto.Query.CastError, fn -> + TestRepo.all(from p in Post, where: p.title in ^nil) + end + end + + test "all using named from" do + TestRepo.insert!(%Post{title: "hello"}) + + query = + from(p in Post, as: :post) + |> where([post: p], p.title == "hello") + + assert [_] = TestRepo.all query + end + + test "all without schema" do + %Post{} = TestRepo.insert!(%Post{title: "title1"}) + %Post{} = TestRepo.insert!(%Post{title: "title2"}) + + assert ["title1", "title2"] = + TestRepo.all(from(p in "posts", order_by: p.title, select: p.title)) + + assert [_] = + TestRepo.all(from(p in "posts", where: p.title == "title1", select: p.id)) + end + + test "all shares metadata" do + TestRepo.insert!(%Post{title: "title1"}) + TestRepo.insert!(%Post{title: "title2"}) + + [post1, post2] = TestRepo.all(Post) + assert :erts_debug.same(post1.__meta__, post2.__meta__) + + [new_post1, new_post2] = TestRepo.all(Post) + assert :erts_debug.same(post1.__meta__, new_post1.__meta__) + assert :erts_debug.same(post2.__meta__, new_post2.__meta__) + end + + @tag :invalid_prefix + test "all with invalid prefix" do + assert catch_error(TestRepo.all("posts", prefix: "oops")) + end + + test "all_by" do + post1 = TestRepo.insert!(%Post{title: "a"}) + post2 = TestRepo.insert!(%Post{title: "a"}) + post3 = TestRepo.insert!(%Post{title: "b"}) + + assert TestRepo.all_by(Post, title: "a") |> Enum.sort() == [post1, post2] + assert TestRepo.all_by(Post, title: "b") |> Enum.sort() == [post3] + end + + test "insert, update and delete" do + post = %Post{title: "insert, update, delete", visits: 1} + meta = post.__meta__ + + assert %Post{} = inserted = TestRepo.insert!(post) + assert %Post{} = updated = TestRepo.update!(Ecto.Changeset.change(inserted, visits: 2)) + + deleted_meta = put_in meta.state, :deleted + assert %Post{__meta__: ^deleted_meta} = TestRepo.delete!(updated) + + loaded_meta = put_in meta.state, :loaded + assert %Post{__meta__: ^loaded_meta} = TestRepo.insert!(post) + + post = TestRepo.one(Post) + assert post.__meta__.state == :loaded + assert post.inserted_at + end + + test "insert, update and delete with field source" do + permalink = %Permalink{url: "url"} + assert %Permalink{url: "url"} = inserted = + TestRepo.insert!(permalink) + assert %Permalink{url: "new"} = updated = + TestRepo.update!(Ecto.Changeset.change(inserted, url: "new")) + assert %Permalink{url: "new"} = + TestRepo.delete!(updated) + end + + @tag :composite_pk + test "insert, update and delete with composite pk" do + c1 = TestRepo.insert!(%CompositePk{a: 1, b: 2, name: "first"}) + c2 = TestRepo.insert!(%CompositePk{a: 1, b: 3, name: "second"}) + + assert CompositePk |> first |> TestRepo.one == c1 + assert CompositePk |> last |> TestRepo.one == c2 + + changeset = Ecto.Changeset.cast(c1, %{name: "first change"}, ~w(name)a) + c1 = TestRepo.update!(changeset) + assert TestRepo.get_by!(CompositePk, %{a: 1, b: 2}) == c1 + + TestRepo.delete!(c2) + assert TestRepo.all(CompositePk) == [c1] + + assert_raise ArgumentError, ~r"to have exactly one primary key", fn -> + TestRepo.get(CompositePk, []) + end + + assert_raise ArgumentError, ~r"to have exactly one primary key", fn -> + TestRepo.get!(CompositePk, [1, 2]) + end + end + + @tag :composite_pk + test "insert, update and delete with associated composite pk" do + user = TestRepo.insert!(%User{}) + post = TestRepo.insert!(%Post{title: "post title"}) + + user_post = TestRepo.insert!(%PostUserCompositePk{user_id: user.id, post_id: post.id}) + assert TestRepo.get_by!(PostUserCompositePk, [user_id: user.id, post_id: post.id]) == user_post + TestRepo.delete!(user_post) + assert TestRepo.all(PostUserCompositePk) == [] + end + + @tag :invalid_prefix + test "insert, update and delete with invalid prefix" do + post = TestRepo.insert!(%Post{}) + changeset = Ecto.Changeset.change(post, title: "foo") + assert catch_error(TestRepo.insert(%Post{}, prefix: "oops")) + assert catch_error(TestRepo.update(changeset, prefix: "oops")) + assert catch_error(TestRepo.delete(changeset, prefix: "oops")) + + # Check we can still insert the post after the invalid prefix attempt + assert %Post{id: _} = TestRepo.insert!(%Post{}) + end + + test "insert and update with changeset" do + # On insert we merge the fields and changes + changeset = Ecto.Changeset.cast(%Post{visits: 13, title: "wrong"}, + %{"title" => "hello", "temp" => "unknown"}, ~w(title temp)a) + + post = TestRepo.insert!(changeset) + assert %Post{visits: 13, title: "hello", temp: "unknown"} = post + assert %Post{visits: 13, title: "hello", temp: "temp"} = TestRepo.get!(Post, post.id) + + # On update we merge only fields, direct schema changes are discarded + changeset = Ecto.Changeset.cast(%{post | visits: 17}, + %{"title" => "world", "temp" => "unknown"}, ~w(title temp)a) + + assert %Post{visits: 17, title: "world", temp: "unknown"} = TestRepo.update!(changeset) + assert %Post{visits: 13, title: "world", temp: "temp"} = TestRepo.get!(Post, post.id) + end + + test "insert and update with empty changeset" do + # On insert we merge the fields and changes + changeset = Ecto.Changeset.cast(%Permalink{}, %{}, ~w()) + assert %Permalink{} = permalink = TestRepo.insert!(changeset) + + # Assert we can update the same value twice, + # without changes, without triggering stale errors. + changeset = Ecto.Changeset.cast(permalink, %{}, ~w()) + assert TestRepo.update!(changeset) == permalink + assert TestRepo.update!(changeset) == permalink + end + + @tag :no_primary_key + test "insert with no primary key" do + assert %Barebone{num: nil} = TestRepo.insert!(%Barebone{}) + assert %Barebone{num: 13} = TestRepo.insert!(%Barebone{num: 13}) + end + + @tag :read_after_writes + test "insert and update with changeset read after writes" do + defmodule RAW do + use Ecto.Schema + + schema "comments" do + field :text, :string + field :lock_version, :integer, read_after_writes: true + end + end + + changeset = Ecto.Changeset.cast(struct(RAW, %{}), %{}, ~w()) + + # If the field is nil, we will not send it + # and read the value back from the database. + assert %{id: cid, lock_version: 1} = raw = TestRepo.insert!(changeset) + + # Set the counter to 11, so we can read it soon + TestRepo.update_all from(u in RAW, where: u.id == ^cid), set: [lock_version: 11] + + # We will read back on update too + changeset = Ecto.Changeset.cast(raw, %{"text" => "0"}, ~w(text)a) + assert %{id: ^cid, lock_version: 11, text: "0"} = TestRepo.update!(changeset) + end + + test "insert autogenerates for custom type" do + post = TestRepo.insert!(%Post{uuid: nil}) + assert byte_size(post.uuid) == 36 + assert TestRepo.get_by(Post, uuid: post.uuid) == post + end + + @tag :id_type + test "insert autogenerates for custom id type" do + defmodule ID do + use Ecto.Schema + + @primary_key {:id, CustomPermalink, autogenerate: true} + schema "posts" do + end + end + + id = TestRepo.insert!(struct(ID, id: nil)) + assert id.id + assert TestRepo.get_by(ID, id: "#{id.id}-hello") == id + end + + @tag :id_type + @tag :assigns_id_type + test "insert with user-assigned primary key" do + assert %Post{id: 1} = TestRepo.insert!(%Post{id: 1}) + end + + @tag :id_type + @tag :assigns_id_type + test "insert and update with user-assigned primary key in changeset" do + changeset = Ecto.Changeset.cast(%Post{id: 11}, %{"id" => "13"}, ~w(id)a) + assert %Post{id: 13} = post = TestRepo.insert!(changeset) + + changeset = Ecto.Changeset.cast(post, %{"id" => "15"}, ~w(id)a) + assert %Post{id: 15} = TestRepo.update!(changeset) + end + + test "insert and fetch a schema with utc timestamps" do + datetime = DateTime.from_unix!(System.os_time(:second), :second) + TestRepo.insert!(%User{inserted_at: datetime}) + assert [%{inserted_at: ^datetime}] = TestRepo.all(User) + end + + test "optimistic locking in update/delete operations" do + import Ecto.Changeset, only: [cast: 3, optimistic_lock: 2] + base_comment = TestRepo.insert!(%Comment{}) + + changeset_ok = + base_comment + |> cast(%{"text" => "foo.bar"}, ~w(text)a) + |> optimistic_lock(:lock_version) + TestRepo.update!(changeset_ok) + + changeset_stale = + base_comment + |> cast(%{"text" => "foo.bat"}, ~w(text)a) + |> optimistic_lock(:lock_version) + + assert_raise Ecto.StaleEntryError, fn -> TestRepo.update!(changeset_stale) end + assert_raise Ecto.StaleEntryError, fn -> TestRepo.delete!(changeset_stale) end + end + + test "optimistic locking in update operation with nil field" do + import Ecto.Changeset, only: [cast: 3, optimistic_lock: 3] + + base_comment = + %Comment{} + |> cast(%{lock_version: nil}, [:lock_version]) + |> TestRepo.insert!() + + incrementer = + fn + nil -> 1 + old_value -> old_value + 1 + end + + changeset_ok = + base_comment + |> cast(%{"text" => "foo.bar"}, ~w(text)a) + |> optimistic_lock(:lock_version, incrementer) + + updated = TestRepo.update!(changeset_ok) + assert updated.text == "foo.bar" + assert updated.lock_version == 1 + end + + test "optimistic locking in delete operation with nil field" do + import Ecto.Changeset, only: [cast: 3, optimistic_lock: 3] + + base_comment = + %Comment{} + |> cast(%{lock_version: nil}, [:lock_version]) + |> TestRepo.insert!() + + incrementer = + fn + nil -> 1 + old_value -> old_value + 1 + end + + changeset_ok = optimistic_lock(base_comment, :lock_version, incrementer) + TestRepo.delete!(changeset_ok) + + refute TestRepo.get(Comment, base_comment.id) + end + + @tag :unique_constraint + test "unique constraint" do + changeset = Ecto.Changeset.change(%Post{}, uuid: Ecto.UUID.generate()) + {:ok, _} = TestRepo.insert(changeset) + + exception = + assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to insert struct/, fn -> + changeset + |> TestRepo.insert() + end + + assert exception.message =~ "\"posts_uuid_index\" (unique_constraint)" + assert exception.message =~ "The changeset has not defined any constraint." + assert exception.message =~ "call `unique_constraint/3`" + + message = ~r/constraint error when attempting to insert struct/ + exception = + assert_raise Ecto.ConstraintError, message, fn -> + changeset + |> Ecto.Changeset.unique_constraint(:uuid, name: :posts_email_changeset) + |> TestRepo.insert() + end + + assert exception.message =~ "\"posts_email_changeset\" (unique_constraint)" + + {:error, changeset} = + changeset + |> Ecto.Changeset.unique_constraint(:uuid) + |> TestRepo.insert() + assert changeset.errors == [uuid: {"has already been taken", [constraint: :unique, constraint_name: "posts_uuid_index"]}] + assert changeset.data.__meta__.state == :built + end + + @tag :unique_constraint + test "unique constraint from association" do + uuid = Ecto.UUID.generate() + post = & %Post{} |> Ecto.Changeset.change(uuid: &1) |> Ecto.Changeset.unique_constraint(:uuid) + + {:error, changeset} = + TestRepo.insert %User{ + comments: [%Comment{}], + permalink: %Permalink{}, + posts: [post.(uuid), post.(uuid), post.(Ecto.UUID.generate())] + } + + [_, p2, _] = changeset.changes.posts + assert p2.errors == [uuid: {"has already been taken", [constraint: :unique, constraint_name: "posts_uuid_index"]}] + end + + @tag :id_type + @tag :unique_constraint + test "unique constraint with binary_id" do + changeset = Ecto.Changeset.change(%Custom{}, uuid: Ecto.UUID.generate()) + {:ok, _} = TestRepo.insert(changeset) + + {:error, changeset} = + changeset + |> Ecto.Changeset.unique_constraint(:uuid) + |> TestRepo.insert() + assert changeset.errors == [uuid: {"has already been taken", [constraint: :unique, constraint_name: "customs_uuid_index"]}] + assert changeset.data.__meta__.state == :built + end + + test "unique pseudo-constraint violation error message with join table at the repository" do + post = + TestRepo.insert!(%Post{title: "some post"}) + |> TestRepo.preload(:unique_users) + + user = + TestRepo.insert!(%User{name: "some user"}) + + # Violate the unique composite index + {:error, changeset} = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:unique_users, [user, user]) + |> TestRepo.update + + errors = Ecto.Changeset.traverse_errors(changeset, fn {msg, _opts} -> msg end) + assert errors == %{unique_users: [%{}, %{id: ["has already been taken"]}]} + refute changeset.valid? + end + + @tag :join + @tag :unique_constraint + test "unique constraint violation error message with join table in single changeset" do + post = + TestRepo.insert!(%Post{title: "some post"}) + |> TestRepo.preload(:constraint_users) + + user = + TestRepo.insert!(%User{name: "some user"}) + + # Violate the unique composite index + {:error, changeset} = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:constraint_users, [user, user]) + |> Ecto.Changeset.unique_constraint(:user, + name: :posts_users_composite_pk_post_id_user_id_index, + message: "has already been assigned") + |> TestRepo.update + + errors = Ecto.Changeset.traverse_errors(changeset, fn {msg, _opts} -> msg end) + assert errors == %{constraint_users: [%{}, %{user: ["has already been assigned"]}]} + + refute changeset.valid? + end + + @tag :join + @tag :unique_constraint + test "unique constraint violation error message with join table and separate changesets" do + post = + TestRepo.insert!(%Post{title: "some post"}) + |> TestRepo.preload(:constraint_users) + + user = TestRepo.insert!(%User{name: "some user"}) + + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:constraint_users, [user]) + |> TestRepo.update + + # Violate the unique composite index + {:error, changeset} = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:constraint_users, [user]) + |> Ecto.Changeset.unique_constraint(:user, + name: :posts_users_composite_pk_post_id_user_id_index, + message: "has already been assigned") + |> TestRepo.update + + errors = Ecto.Changeset.traverse_errors(changeset, fn {msg, _opts} -> msg end) + assert errors == %{constraint_users: [%{user: ["has already been assigned"]}]} + + refute changeset.valid? + end + + @tag :foreign_key_constraint + test "foreign key constraint" do + changeset = Ecto.Changeset.change(%Comment{post_id: 0}) + + exception = + assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to insert struct/, fn -> + changeset + |> TestRepo.insert() + end + + assert exception.message =~ "\"comments_post_id_fkey\" (foreign_key_constraint)" + assert exception.message =~ "The changeset has not defined any constraint." + assert exception.message =~ "call `foreign_key_constraint/3`" + + message = ~r/constraint error when attempting to insert struct/ + exception = + assert_raise Ecto.ConstraintError, message, fn -> + changeset + |> Ecto.Changeset.foreign_key_constraint(:post_id, name: :comments_post_id_other) + |> TestRepo.insert() + end + + assert exception.message =~ "\"comments_post_id_other\" (foreign_key_constraint)" + + {:error, changeset} = + changeset + |> Ecto.Changeset.foreign_key_constraint(:post_id) + |> TestRepo.insert() + assert changeset.errors == [post_id: {"does not exist", [constraint: :foreign, constraint_name: "comments_post_id_fkey"]}] + end + + @tag :foreign_key_constraint + test "assoc constraint" do + changeset = Ecto.Changeset.change(%Comment{post_id: 0}) + + exception = + assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to insert struct/, fn -> + changeset + |> TestRepo.insert() + end + + assert exception.message =~ "\"comments_post_id_fkey\" (foreign_key_constraint)" + assert exception.message =~ "The changeset has not defined any constraint." + + message = ~r/constraint error when attempting to insert struct/ + exception = + assert_raise Ecto.ConstraintError, message, fn -> + changeset + |> Ecto.Changeset.assoc_constraint(:post, name: :comments_post_id_other) + |> TestRepo.insert() + end + + assert exception.message =~ "\"comments_post_id_other\" (foreign_key_constraint)" + + {:error, changeset} = + changeset + |> Ecto.Changeset.assoc_constraint(:post) + |> TestRepo.insert() + assert changeset.errors == [post: {"does not exist", [constraint: :assoc, constraint_name: "comments_post_id_fkey"]}] + end + + @tag :foreign_key_constraint + test "no assoc constraint error" do + user = TestRepo.insert!(%User{}) + TestRepo.insert!(%Permalink{user_id: user.id}) + + exception = + assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to delete struct/, fn -> + TestRepo.delete!(user) + end + + assert exception.message =~ "\"permalinks_user_id_fkey\" (foreign_key_constraint)" + assert exception.message =~ "The changeset has not defined any constraint." + end + + @tag :foreign_key_constraint + test "no assoc constraint with changeset mismatch" do + user = TestRepo.insert!(%User{}) + TestRepo.insert!(%Permalink{user_id: user.id}) + + message = ~r/constraint error when attempting to delete struct/ + exception = + assert_raise Ecto.ConstraintError, message, fn -> + user + |> Ecto.Changeset.change + |> Ecto.Changeset.no_assoc_constraint(:permalink, name: :permalinks_user_id_pther) + |> TestRepo.delete() + end + + assert exception.message =~ "\"permalinks_user_id_pther\" (foreign_key_constraint)" + end + + @tag :foreign_key_constraint + test "no assoc constraint with changeset match" do + user = TestRepo.insert!(%User{}) + TestRepo.insert!(%Permalink{user_id: user.id}) + + {:error, changeset} = + user + |> Ecto.Changeset.change + |> Ecto.Changeset.no_assoc_constraint(:permalink) + |> TestRepo.delete() + assert changeset.errors == [permalink: {"is still associated with this entry", [constraint: :no_assoc, constraint_name: "permalinks_user_id_fkey"]}] + end + + @tag :foreign_key_constraint + test "insert and update with embeds during failing child foreign key" do + changeset = + Order + |> struct(%{}) + |> order_changeset(%{item: %{price: 10}, permalink: %{post_id: 0}}) + + {:error, changeset} = TestRepo.insert(changeset) + assert %Ecto.Changeset{} = changeset.changes.item + + order = + Order + |> struct(%{}) + |> order_changeset(%{}) + |> TestRepo.insert!() + |> TestRepo.preload([:permalink]) + + changeset = order_changeset(order, %{item: %{price: 10}, permalink: %{post_id: 0}}) + assert %Ecto.Changeset{} = changeset.changes.item + + {:error, changeset} = TestRepo.update(changeset) + assert %Ecto.Changeset{} = changeset.changes.item + end + + def order_changeset(order, params) do + order + |> Ecto.Changeset.cast(params, [:permalink_id]) + |> Ecto.Changeset.cast_embed(:item, with: &item_changeset/2) + |> Ecto.Changeset.cast_assoc(:permalink, with: &permalink_changeset/2) + end + + def item_changeset(item, params) do + item + |> Ecto.Changeset.cast(params, [:price]) + end + + def permalink_changeset(comment, params) do + comment + |> Ecto.Changeset.cast(params, [:post_id]) + |> Ecto.Changeset.assoc_constraint(:post) + end + + test "unsafe_validate_unique/4" do + {:ok, inserted_post} = TestRepo.insert(%Post{title: "Greetings", visits: 13}) + new_post_changeset = Post.changeset(%Post{}, %{title: "Greetings", visits: 17}) + + changeset = Ecto.Changeset.unsafe_validate_unique(new_post_changeset, [:title], TestRepo) + assert changeset.errors[:title] == + {"has already been taken", validation: :unsafe_unique, fields: [:title]} + + changeset = Ecto.Changeset.unsafe_validate_unique(new_post_changeset, [:title, :text], TestRepo) + assert changeset.errors[:title] == nil + + update_changeset = Post.changeset(inserted_post, %{visits: 17}) + changeset = Ecto.Changeset.unsafe_validate_unique(update_changeset, [:title], TestRepo) + assert changeset.errors[:title] == nil # cannot conflict with itself + end + + test "unsafe_validate_unique/4 with composite keys" do + {:ok, inserted_post} = TestRepo.insert(%CompositePk{a: 123, b: 456, name: "UniqueName"}) + + different_pk = CompositePk.changeset(%CompositePk{}, %{name: "UniqueName", a: 789, b: 321}) + changeset = Ecto.Changeset.unsafe_validate_unique(different_pk, [:name], TestRepo) + assert changeset.errors[:name] == + {"has already been taken", validation: :unsafe_unique, fields: [:name]} + + partial_pk = CompositePk.changeset(%CompositePk{}, %{name: "UniqueName", a: 789, b: 456}) + changeset = Ecto.Changeset.unsafe_validate_unique(partial_pk, [:name], TestRepo) + assert changeset.errors[:name] == + {"has already been taken", validation: :unsafe_unique, fields: [:name]} + + update_changeset = CompositePk.changeset(inserted_post, %{name: "NewName"}) + changeset = Ecto.Changeset.unsafe_validate_unique(update_changeset, [:name], TestRepo) + assert changeset.valid? + assert changeset.errors[:name] == nil # cannot conflict with itself + end + + test "get(!)" do + post1 = TestRepo.insert!(%Post{title: "1"}) + post2 = TestRepo.insert!(%Post{title: "2"}) + + assert post1 == TestRepo.get(Post, post1.id) + assert post2 == TestRepo.get(Post, to_string post2.id) # With casting + + assert post1 == TestRepo.get!(Post, post1.id) + assert post2 == TestRepo.get!(Post, to_string post2.id) # With casting + + TestRepo.delete!(post1) + + assert TestRepo.get(Post, post1.id) == nil + assert_raise Ecto.NoResultsError, fn -> + TestRepo.get!(Post, post1.id) + end + end + + test "get(!) with custom source" do + custom = Ecto.put_meta(%Custom{}, source: "posts") + custom = TestRepo.insert!(custom) + bid = custom.bid + assert %Custom{bid: ^bid, __meta__: %{source: "posts"}} = + TestRepo.get(from(c in {"posts", Custom}), bid) + end + + test "get(!) with binary_id" do + custom = TestRepo.insert!(%Custom{}) + bid = custom.bid + assert %Custom{bid: ^bid} = TestRepo.get(Custom, bid) + end + + test "get_by(!)" do + post1 = TestRepo.insert!(%Post{title: "1", visits: 1}) + post2 = TestRepo.insert!(%Post{title: "2", visits: 2}) + + assert post1 == TestRepo.get_by(Post, id: post1.id) + assert post1 == TestRepo.get_by(Post, title: post1.title) + assert post1 == TestRepo.get_by(Post, id: post1.id, title: post1.title) + assert post2 == TestRepo.get_by(Post, id: to_string(post2.id)) # With casting + assert nil == TestRepo.get_by(Post, title: "hey") + assert nil == TestRepo.get_by(Post, id: post2.id, visits: 3) + + assert post1 == TestRepo.get_by!(Post, id: post1.id) + assert post1 == TestRepo.get_by!(Post, title: post1.title) + assert post1 == TestRepo.get_by!(Post, id: post1.id, visits: 1) + assert post2 == TestRepo.get_by!(Post, id: to_string(post2.id)) # With casting + + assert post1 == TestRepo.get_by!(Post, %{id: post1.id}) + + assert_raise Ecto.NoResultsError, fn -> + TestRepo.get_by!(Post, id: post2.id, title: "hey") + end + end + + test "reload" do + post1 = TestRepo.insert!(%Post{title: "1", visits: 1}) + post2 = TestRepo.insert!(%Post{title: "2", visits: 2}) + + assert post1 == TestRepo.reload(post1) + assert [post1, post2] == TestRepo.reload([post1, post2]) + assert [post1, post2, nil] == TestRepo.reload([post1, post2, %Post{id: 0}]) + assert nil == TestRepo.reload(%Post{id: 0}) + + # keeps order as received in the params + assert [post2, post1] == TestRepo.reload([post2, post1]) + + TestRepo.update_all(Post, inc: [visits: 1]) + + assert [%{visits: 2}, %{visits: 3}] = TestRepo.reload([post1, post2]) + end + + test "reload ignores preloads" do + post = TestRepo.insert!(%Post{title: "1", visits: 1}) |> TestRepo.preload(:comments) + + assert %{comments: %Ecto.Association.NotLoaded{}} = TestRepo.reload(post) + end + + test "reload!" do + post1 = TestRepo.insert!(%Post{title: "1", visits: 1}) + post2 = TestRepo.insert!(%Post{title: "2", visits: 2}) + + assert post1 == TestRepo.reload!(post1) + assert [post1, post2] == TestRepo.reload!([post1, post2]) + + assert_raise RuntimeError, ~r"could not reload", fn -> + TestRepo.reload!([post1, post2, %Post{id: -1}]) + end + + assert_raise Ecto.NoResultsError, fn -> + TestRepo.reload!(%Post{id: -1}) + end + + assert [post2, post1] == TestRepo.reload([post2, post1]) + + TestRepo.update_all(Post, inc: [visits: 1]) + + assert [%{visits: 2}, %{visits: 3}] = TestRepo.reload!([post1, post2]) + end + + test "first, last and one(!)" do + post1 = TestRepo.insert!(%Post{title: "1"}) + post2 = TestRepo.insert!(%Post{title: "2"}) + + assert post1 == Post |> first |> TestRepo.one + assert post2 == Post |> last |> TestRepo.one + + query = from p in Post, order_by: p.title + assert post1 == query |> first |> TestRepo.one + assert post2 == query |> last |> TestRepo.one + + query = from p in Post, order_by: [desc: p.title], limit: 10 + assert post2 == query |> first |> TestRepo.one + assert post1 == query |> last |> TestRepo.one + + query = from p in Post, where: is_nil(p.id) + refute query |> first |> TestRepo.one + refute query |> last |> TestRepo.one + assert_raise Ecto.NoResultsError, fn -> query |> first |> TestRepo.one! end + assert_raise Ecto.NoResultsError, fn -> query |> last |> TestRepo.one! end + end + + test "exists?" do + TestRepo.insert!(%Post{title: "1", visits: 2}) + TestRepo.insert!(%Post{title: "2", visits: 1}) + + query = from p in Post, where: not is_nil(p.title), limit: 2 + assert query |> TestRepo.exists? == true + + query = from p in Post, where: p.title == "1", select: p.title + assert query |> TestRepo.exists? == true + + query = from p in Post, where: is_nil(p.id) + assert query |> TestRepo.exists? == false + + query = from p in Post, where: is_nil(p.id) + assert query |> TestRepo.exists? == false + + query = from(p in Post, select: {p.visits, avg(p.visits)}, group_by: p.visits, having: avg(p.visits) > 1) + assert query |> TestRepo.exists? == true + end + + test "aggregate" do + assert TestRepo.aggregate(Post, :max, :visits) == nil + + TestRepo.insert!(%Post{visits: 10}) + TestRepo.insert!(%Post{visits: 12}) + TestRepo.insert!(%Post{visits: 14}) + TestRepo.insert!(%Post{visits: 14}) + + # Barebones + assert TestRepo.aggregate(Post, :max, :visits) == 14 + assert TestRepo.aggregate(Post, :min, :visits) == 10 + assert TestRepo.aggregate(Post, :count, :visits) == 4 + assert "50" = to_string(TestRepo.aggregate(Post, :sum, :visits)) + + # With order_by + query = from Post, order_by: [asc: :visits] + assert TestRepo.aggregate(query, :max, :visits) == 14 + + # With order_by and limit + query = from Post, order_by: [asc: :visits], limit: 2 + assert TestRepo.aggregate(query, :max, :visits) == 12 + end + + @tag :decimal_precision + test "aggregate avg" do + TestRepo.insert!(%Post{visits: 10}) + TestRepo.insert!(%Post{visits: 12}) + TestRepo.insert!(%Post{visits: 14}) + TestRepo.insert!(%Post{visits: 14}) + + assert "12.5" <> _ = to_string(TestRepo.aggregate(Post, :avg, :visits)) + end + + @tag :inline_order_by + test "aggregate with distinct" do + TestRepo.insert!(%Post{visits: 10}) + TestRepo.insert!(%Post{visits: 12}) + TestRepo.insert!(%Post{visits: 14}) + TestRepo.insert!(%Post{visits: 14}) + + query = from Post, order_by: [asc: :visits], distinct: true + assert TestRepo.aggregate(query, :count, :visits) == 3 + end + + @tag :insert_cell_wise_defaults + test "insert all" do + assert {2, nil} = TestRepo.insert_all("comments", [[text: "1"], %{text: "2", lock_version: 2}]) + assert {2, nil} = TestRepo.insert_all({"comments", Comment}, [[text: "3"], %{text: "4", lock_version: 2}]) + assert [%Comment{text: "1", lock_version: 1}, + %Comment{text: "2", lock_version: 2}, + %Comment{text: "3", lock_version: 1}, + %Comment{text: "4", lock_version: 2}] = TestRepo.all(Comment) + + assert {2, nil} = TestRepo.insert_all(Post, [[], []]) + assert [%Post{}, %Post{}] = TestRepo.all(Post) + + assert {0, nil} = TestRepo.insert_all("posts", []) + assert {0, nil} = TestRepo.insert_all({"posts", Post}, []) + end + + @tag :insert_select + test "insert all with query for single fields" do + comment = TestRepo.insert!(%Comment{text: "1", lock_version: 1}) + + text_query = from(c in Comment, select: c.text, where: [id: ^comment.id, lock_version: 1]) + + lock_version_query = from(c in Comment, select: c.lock_version, where: [id: ^comment.id]) + + rows = [ + [text: "2", lock_version: lock_version_query], + [lock_version: lock_version_query, text: "3"], + [text: text_query], + [text: text_query, lock_version: lock_version_query], + [lock_version: 6, text: "6"] + ] + assert {5, nil} = TestRepo.insert_all(Comment, rows, []) + + inserted_rows = Comment + |> where([c], c.id != ^comment.id) + |> TestRepo.all() + + assert [%Comment{text: "2", lock_version: 1}, + %Comment{text: "3", lock_version: 1}, + %Comment{text: "1"}, + %Comment{text: "1", lock_version: 1}, + %Comment{text: "6", lock_version: 6}] = inserted_rows + end + + describe "insert_all with source query" do + @tag :upsert_all + @tag :with_conflict_target + @tag :concat + test "insert_all with query and conflict target" do + {:ok, %Post{id: id}} = TestRepo.insert(%Post{ + title: "A generic title" + }) + + source = from p in Post, + select: %{ + title: fragment("concat(?, ?, ?)", p.title, type(^" suffix ", :string), p.id) + } + + assert {1, _} = TestRepo.insert_all(Post, source, conflict_target: [:id], on_conflict: :replace_all) + + expected_id = id + 1 + expected_title = "A generic title suffix #{id}" + + assert %Post{title: ^expected_title} = TestRepo.get(Post, expected_id) + end + + @tag :returning + @tag :concat + test "insert_all with query and returning" do + {:ok, %Post{id: id}} = TestRepo.insert(%Post{ + title: "A generic title" + }) + + source = from p in Post, + select: %{ + title: fragment("concat(?, ?, ?)", p.title, type(^" suffix ", :string), p.id) + } + + assert {1, returns} = TestRepo.insert_all(Post, source, returning: [:id, :title]) + + expected_id = id + 1 + expected_title = "A generic title suffix #{id}" + assert [%Post{id: ^expected_id, title: ^expected_title}] = returns + end + + @tag :upsert_all + @tag :without_conflict_target + @tag :concat + test "insert_all with query and on_conflict" do + {:ok, %Post{id: id}} = TestRepo.insert(%Post{ + title: "A generic title" + }) + + source = from p in Post, + select: %{ + title: fragment("concat(?, ?, ?)", p.title, type(^" suffix ", :string), p.id) + } + + assert {1, _} = TestRepo.insert_all(Post, source, on_conflict: :replace_all) + + expected_id = id + 1 + expected_title = "A generic title suffix #{id}" + + assert %Post{title: ^expected_title} = TestRepo.get(Post, expected_id) + end + + @tag :concat + test "insert_all with query" do + {:ok, %Post{id: id}} = TestRepo.insert(%Post{ + title: "A generic title" + }) + + source = from p in Post, + select: %{ + title: fragment("concat(?, ?, ?)", p.title, type(^" suffix ", :string), p.id) + } + + assert {1, _} = TestRepo.insert_all(Post, source) + + expected_id = id + 1 + expected_title = "A generic title suffix #{id}" + + assert %Post{title: ^expected_title} = TestRepo.get(Post, expected_id) + end + + test "insert_all with query and source field" do + %{id: post_id} = TestRepo.insert!(%Post{}) + TestRepo.insert!(%Permalink{url: "url", title: "title"}) + + source = from p in Permalink, select: %{url: p.title, post_id: ^post_id} + assert {1, _} = TestRepo.insert_all(Permalink, source) + end + end + + @tag :invalid_prefix + @tag :insert_cell_wise_defaults + test "insert all with invalid prefix" do + assert catch_error(TestRepo.insert_all(Post, [[], []], prefix: "oops")) + end + + @tag :returning + @tag :insert_cell_wise_defaults + test "insert all with returning with schema" do + assert {0, []} = TestRepo.insert_all(Comment, [], returning: true) + assert {0, nil} = TestRepo.insert_all(Comment, [], returning: false) + + {2, [c1, c2]} = TestRepo.insert_all(Comment, [[text: "1"], [text: "2"]], returning: [:id, :text]) + assert %Comment{text: "1", __meta__: %{state: :loaded}} = c1 + assert %Comment{text: "2", __meta__: %{state: :loaded}} = c2 + + {2, [c1, c2]} = TestRepo.insert_all(Comment, [[text: "3"], [text: "4"]], returning: true) + assert %Comment{text: "3", __meta__: %{state: :loaded}} = c1 + assert %Comment{text: "4", __meta__: %{state: :loaded}} = c2 + end + + @tag :returning + @tag :insert_cell_wise_defaults + test "insert all with returning with schema with field source" do + assert {0, []} = TestRepo.insert_all(Permalink, [], returning: true) + assert {0, nil} = TestRepo.insert_all(Permalink, [], returning: false) + + {2, [c1, c2]} = TestRepo.insert_all(Permalink, [[url: "1"], [url: "2"]], returning: [:id, :url]) + assert %Permalink{url: "1", __meta__: %{state: :loaded}} = c1 + assert %Permalink{url: "2", __meta__: %{state: :loaded}} = c2 + + {2, [c1, c2]} = TestRepo.insert_all(Permalink, [[url: "3"], [url: "4"]], returning: true) + assert %Permalink{url: "3", __meta__: %{state: :loaded}} = c1 + assert %Permalink{url: "4", __meta__: %{state: :loaded}} = c2 + end + + @tag :returning + @tag :insert_cell_wise_defaults + test "insert all with returning without schema" do + {2, [c1, c2]} = TestRepo.insert_all("comments", [[text: "1"], [text: "2"]], returning: [:id, :text]) + assert %{id: _, text: "1"} = c1 + assert %{id: _, text: "2"} = c2 + + assert_raise ArgumentError, fn -> + TestRepo.insert_all("comments", [[text: "1"], [text: "2"]], returning: true) + end + end + + @tag :insert_cell_wise_defaults + test "insert all with dumping" do + uuid = Ecto.UUID.generate() + assert {1, nil} = TestRepo.insert_all(Post, [%{uuid: uuid}]) + assert [%Post{uuid: ^uuid, title: nil}] = TestRepo.all(Post) + end + + @tag :insert_cell_wise_defaults + test "insert all autogenerates for binary_id type" do + custom = TestRepo.insert!(%Custom{bid: nil}) + assert custom.bid + assert TestRepo.get(Custom, custom.bid) + assert TestRepo.delete!(custom) + refute TestRepo.get(Custom, custom.bid) + + uuid = Ecto.UUID.generate() + assert {2, nil} = TestRepo.insert_all(Custom, [%{uuid: uuid}, %{bid: custom.bid}]) + assert [%Custom{bid: bid2, uuid: nil}, + %Custom{bid: bid1, uuid: ^uuid}] = Enum.sort_by(TestRepo.all(Custom), & &1.uuid) + assert bid1 && bid2 + assert custom.bid != bid1 + assert custom.bid == bid2 + end + + describe "placeholders" do + @describetag :placeholders + + test "Repo.insert_all fills in placeholders" do + placeholders = %{foo: 100, bar: "test"} + bar_ph = {:placeholder, :bar} + foo_ph = {:placeholder, :foo} + + entries = [ + %{intensity: 1.0, title: bar_ph, posted: ~D[2020-12-21], visits: foo_ph}, + %{intensity: 2.0, title: bar_ph, posted: ~D[2000-12-21], visits: foo_ph} + ] |> Enum.map(&Map.put(&1, :uuid, Ecto.UUID.generate)) + + TestRepo.insert_all(Post, entries, placeholders: placeholders) + + query = from(p in Post, select: {p.intensity, p.title, p.visits}) + assert [{1.0, "test", 100}, {2.0, "test", 100}] == TestRepo.all(query) + end + + test "Repo.insert_all accepts non-atom placeholder keys" do + placeholders = %{10 => "integer key", {:foo, :bar} => "tuple key"} + entries = [%{text: {:placeholder, 10}}, %{text: {:placeholder, {:foo, :bar}}}] + TestRepo.insert_all(Comment, entries, placeholders: placeholders) + + query = from(c in Comment, select: c.text) + assert ["integer key", "tuple key"] == TestRepo.all(query) + end + + test "Repo.insert_all fills in placeholders with keyword list entries" do + TestRepo.insert_all(Barebone, [[num: {:placeholder, :foo}]], placeholders: %{foo: 100}) + + query = from(b in Barebone, select: b.num) + assert [100] == TestRepo.all(query) + end + + @tag :upsert_all + @tag :with_conflict_target + test "Repo.insert_all upserts and fills in placeholders with conditioned on_conflict query" do + do_not_update_title = "don't touch me" + + posted_value = + from p in Post, where: p.public == ^true and p.id > ^0, select: p.posted, limit: 1 + + on_conflict = + from p in Post, update: [set: [title: "updated"]], where: p.title != ^do_not_update_title + + placeholders = %{visits: 1, title: "title"} + + post1 = [ + visits: {:placeholder, :visits}, + title: {:placeholder, :title}, + uuid: Ecto.UUID.generate(), + posted: posted_value + ] + + post2 = [ + title: do_not_update_title, + uuid: Ecto.UUID.generate(), + posted: posted_value + ] + + assert TestRepo.insert_all(Post, [post1, post2], + placeholders: placeholders, + on_conflict: on_conflict, + conflict_target: [:uuid] + ) == + {2, nil} + + # only update first post + assert TestRepo.insert_all(Post, [post1, post2], + placeholders: placeholders, + on_conflict: on_conflict, + conflict_target: [:uuid] + ) == + {1, nil} + + assert TestRepo.aggregate(where(Post, title: "updated"), :count) == 1 + end + end + + test "update all" do + assert post1 = TestRepo.insert!(%Post{title: "1"}) + assert post2 = TestRepo.insert!(%Post{title: "2"}) + assert post3 = TestRepo.insert!(%Post{title: "3"}) + + assert {3, nil} = TestRepo.update_all(Post, set: [title: "x"]) + + assert %Post{title: "x"} = TestRepo.reload(post1) + assert %Post{title: "x"} = TestRepo.reload(post2) + assert %Post{title: "x"} = TestRepo.reload(post3) + + assert {3, nil} = TestRepo.update_all("posts", [set: [title: nil]]) + + assert %Post{title: nil} = TestRepo.reload(post1) + assert %Post{title: nil} = TestRepo.reload(post2) + assert %Post{title: nil} = TestRepo.reload(post3) + end + + @tag :invalid_prefix + test "update all with invalid prefix" do + assert catch_error(TestRepo.update_all(Post, [set: [title: "x"]], prefix: "oops")) + end + + @tag :returning + test "update all with returning with schema" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + assert {3, posts} = TestRepo.update_all(select(Post, [p], p), [set: [title: "x"]]) + + [p1, p2, p3] = Enum.sort_by(posts, & &1.id) + assert %Post{id: ^id1, title: "x"} = p1 + assert %Post{id: ^id2, title: "x"} = p2 + assert %Post{id: ^id3, title: "x"} = p3 + + assert {3, posts} = TestRepo.update_all(select(Post, [:id, :visits]), [set: [visits: 11]]) + + [p1, p2, p3] = Enum.sort_by(posts, & &1.id) + assert %Post{id: ^id1, title: nil, visits: 11} = p1 + assert %Post{id: ^id2, title: nil, visits: 11} = p2 + assert %Post{id: ^id3, title: nil, visits: 11} = p3 + end + + @tag :returning + test "update all with returning without schema" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + assert {3, posts} = TestRepo.update_all(select("posts", [:id, :title]), [set: [title: "x"]]) + + [p1, p2, p3] = Enum.sort_by(posts, & &1.id) + assert p1 == %{id: id1, title: "x"} + assert p2 == %{id: id2, title: "x"} + assert p3 == %{id: id3, title: "x"} + end + + test "update all with filter" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + query = from(p in Post, where: p.title == "1" or p.title == "2", + update: [set: [visits: ^17]]) + assert {2, nil} = TestRepo.update_all(query, set: [title: "x"]) + + assert %Post{title: "x", visits: 17} = TestRepo.get(Post, id1) + assert %Post{title: "x", visits: 17} = TestRepo.get(Post, id2) + assert %Post{title: "3", visits: nil} = TestRepo.get(Post, id3) + end + + test "update all no entries" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + query = from(p in Post, where: p.title == "4") + assert {0, nil} = TestRepo.update_all(query, set: [title: "x"]) + + assert %Post{title: "1"} = TestRepo.get(Post, id1) + assert %Post{title: "2"} = TestRepo.get(Post, id2) + assert %Post{title: "3"} = TestRepo.get(Post, id3) + end + + test "update all increment syntax" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1", visits: 0}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2", visits: 1}) + + # Positive + query = from p in Post, where: not is_nil(p.id), update: [inc: [visits: 2]] + assert {2, nil} = TestRepo.update_all(query, []) + + assert %Post{visits: 2} = TestRepo.get(Post, id1) + assert %Post{visits: 3} = TestRepo.get(Post, id2) + + # Negative + query = from p in Post, where: not is_nil(p.id), update: [inc: [visits: -1]] + assert {2, nil} = TestRepo.update_all(query, []) + + assert %Post{visits: 1} = TestRepo.get(Post, id1) + assert %Post{visits: 2} = TestRepo.get(Post, id2) + end + + @tag :id_type + test "update all with casting and dumping on id type field" do + assert %Post{id: id1} = TestRepo.insert!(%Post{}) + assert {1, nil} = TestRepo.update_all(Post, set: [counter: to_string(id1)]) + assert %Post{counter: ^id1} = TestRepo.get(Post, id1) + end + + test "update all with casting and dumping" do + visits = 13 + datetime = ~N[2014-01-16 20:26:51] + assert %Post{id: id} = TestRepo.insert!(%Post{}) + + assert {1, nil} = TestRepo.update_all(Post, set: [visits: visits, inserted_at: datetime]) + assert %Post{visits: 13, inserted_at: ^datetime} = TestRepo.get(Post, id) + end + + test "delete all" do + assert %Post{} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{} = TestRepo.insert!(%Post{title: "3"}) + + assert {3, nil} = TestRepo.delete_all(Post) + assert [] = TestRepo.all(Post) + end + + @tag :invalid_prefix + test "delete all with invalid prefix" do + assert catch_error(TestRepo.delete_all(Post, prefix: "oops")) + end + + @tag :returning + test "delete all with returning with schema" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + assert {3, posts} = TestRepo.delete_all(select(Post, [p], p)) + + [p1, p2, p3] = Enum.sort_by(posts, & &1.id) + assert %Post{id: ^id1, title: "1"} = p1 + assert %Post{id: ^id2, title: "2"} = p2 + assert %Post{id: ^id3, title: "3"} = p3 + end + + @tag :returning + test "delete all with returning without schema" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + assert {3, posts} = TestRepo.delete_all(select("posts", [:id, :title])) + + [p1, p2, p3] = Enum.sort_by(posts, & &1.id) + assert p1 == %{id: id1, title: "1"} + assert p2 == %{id: id2, title: "2"} + assert p3 == %{id: id3, title: "3"} + end + + test "delete all with filter" do + assert %Post{} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{} = TestRepo.insert!(%Post{title: "3"}) + + query = from(p in Post, where: p.title == "1" or p.title == "2") + assert {2, nil} = TestRepo.delete_all(query) + assert [%Post{}] = TestRepo.all(Post) + end + + test "delete all no entries" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + query = from(p in Post, where: p.title == "4") + assert {0, nil} = TestRepo.delete_all(query) + assert %Post{title: "1"} = TestRepo.get(Post, id1) + assert %Post{title: "2"} = TestRepo.get(Post, id2) + assert %Post{title: "3"} = TestRepo.get(Post, id3) + end + + test "virtual field" do + assert %Post{id: id} = TestRepo.insert!(%Post{title: "1"}) + assert TestRepo.get(Post, id).temp == "temp" + end + + ## Query syntax + + defmodule Foo do + defstruct [:title] + end + + describe "query select" do + test "expressions" do + %Post{} = TestRepo.insert!(%Post{title: "1", visits: 13}) + + assert [{"1", 13}] == + TestRepo.all(from p in Post, select: {p.title, p.visits}) + + assert [["1", 13]] == + TestRepo.all(from p in Post, select: [p.title, p.visits]) + + assert [%{:title => "1", 3 => 13, "visits" => 13}] == + TestRepo.all(from p in Post, select: %{ + :title => p.title, + "visits" => p.visits, + 3 => p.visits + }) + + assert [%{:title => "1", "1" => 13, "visits" => 13}] == + TestRepo.all(from p in Post, select: %{ + :title => p.title, + p.title => p.visits, + "visits" => p.visits + }) + + assert [%Foo{title: "1"}] == + TestRepo.all(from p in Post, select: %Foo{title: p.title}) + end + + test "map update" do + %Post{} = TestRepo.insert!(%Post{title: "1", visits: 13}) + + assert [%Post{:title => "new title", visits: 13}] = + TestRepo.all(from p in Post, select: %{p | title: "new title"}) + + assert [%Post{title: "new title", visits: 13}] = + TestRepo.all(from p in Post, select: %Post{p | title: "new title"}) + + assert [%Post{:title => "1", visits: -1}] = + TestRepo.all(from p in Post, select: %{p | visits: ^"-1"}) + + assert [%Post{title: "1", visits: -1}] = + TestRepo.all(from p in Post, select: %Post{p | visits: ^"-1"}) + + assert_raise KeyError, fn -> + TestRepo.all(from p in Post, select: %{p | unknown: "new title"}) + end + + assert_raise BadMapError, fn -> + TestRepo.all(from p in Post, select: %{p.title | title: "new title"}) + end + + assert_raise ArgumentError, ~r/expected a struct named/, fn -> + TestRepo.all(from p in Post, select: %Foo{p | title: p.title}) + end + end + + test "map update on association" do + p = TestRepo.insert!(%Post{}) + TestRepo.insert!(%Comment{post_id: p.id, text: "comment text"}) + TestRepo.insert!(%Comment{}) + + query = + from(c in Comment, left_join: p in Post, on: c.post_id == p.id, select: %{p | temp: c.text}) + + assert [%Post{:temp => "comment text"}, nil] = TestRepo.all(query) + end + + test "take with structs" do + %{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + %{id: pid3} = TestRepo.insert!(%Post{title: "3"}) + + [p1, p2, p3] = Post |> select([p], struct(p, [:title])) |> order_by([:title]) |> TestRepo.all + refute p1.id + assert p1.title == "1" + assert match?(%Post{}, p1) + refute p2.id + assert p2.title == "2" + assert match?(%Post{}, p2) + refute p3.id + assert p3.title == "3" + assert match?(%Post{}, p3) + + [p1, p2, p3] = Post |> select([:id]) |> order_by([:id]) |> TestRepo.all + assert %Post{id: ^pid1} = p1 + assert %Post{id: ^pid2} = p2 + assert %Post{id: ^pid3} = p3 + end + + test "take with maps" do + %{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + %{id: pid3} = TestRepo.insert!(%Post{title: "3"}) + + [p1, p2, p3] = "posts" |> select([p], map(p, [:title])) |> order_by([:title]) |> TestRepo.all + assert p1 == %{title: "1"} + assert p2 == %{title: "2"} + assert p3 == %{title: "3"} + + [p1, p2, p3] = "posts" |> select([:id]) |> order_by([:id]) |> TestRepo.all + assert p1 == %{id: pid1} + assert p2 == %{id: pid2} + assert p3 == %{id: pid3} + end + + test "take with join nil maps" do + TestRepo.insert!(%Post{}) + + assert {%{title: nil}, %{title: nil}} == + from(p1 in Post) + |> join(:left, [p1], p2 in Post, on: p1.id == p2.id) + |> select([p1, p2], {map(p1, [:title]), map(p2, [:title])}) + |> TestRepo.one() + end + + test "take with join nil source" do + TestRepo.insert!(%Post{}) + + assert {%{title: nil}, nil} == + from(p1 in Post) + |> join(:left, [p1], p2 in Post, on: p2.id == -1) + |> select([p1, p2], {map(p1, [:title]), p2}) + |> TestRepo.one() + end + + test "take with preload assocs" do + %{id: pid} = TestRepo.insert!(%Post{title: "post"}) + TestRepo.insert!(%Comment{post_id: pid, text: "comment"}) + fields = [:id, :title, comments: [:text, :post_id]] + + [p] = Post |> preload(:comments) |> select([p], ^fields) |> TestRepo.all + assert %Post{title: "post"} = p + assert [%Comment{text: "comment"}] = p.comments + + [p] = Post |> preload(:comments) |> select([p], struct(p, ^fields)) |> TestRepo.all + assert %Post{title: "post"} = p + assert [%Comment{text: "comment"}] = p.comments + + [p] = Post |> preload(:comments) |> select([p], map(p, ^fields)) |> TestRepo.all + assert p == %{id: pid, title: "post", comments: [%{text: "comment", post_id: pid}]} + end + + test "take with nil preload assoc" do + %{id: cid} = TestRepo.insert!(%Comment{text: "comment"}) + fields = [:id, :text, post: [:title]] + + [c] = Comment |> preload(:post) |> select([c], ^fields) |> TestRepo.all + assert %Comment{id: ^cid, text: "comment", post: nil} = c + + [c] = Comment |> preload(:post) |> select([c], struct(c, ^fields)) |> TestRepo.all + assert %Comment{id: ^cid, text: "comment", post: nil} = c + + [c] = Comment |> preload(:post) |> select([c], map(c, ^fields)) |> TestRepo.all + assert c == %{id: cid, text: "comment", post: nil} + end + + test "take with join assocs" do + %{id: pid} = TestRepo.insert!(%Post{title: "post"}) + %{id: cid} = TestRepo.insert!(%Comment{post_id: pid, text: "comment"}) + fields = [:id, :title, comments: [:text, :post_id, :id]] + query = from p in Post, where: p.id == ^pid, join: c in assoc(p, :comments), preload: [comments: c] + + p = TestRepo.one(from q in query, select: ^fields) + assert %Post{title: "post"} = p + assert [%Comment{text: "comment"}] = p.comments + + p = TestRepo.one(from q in query, select: struct(q, ^fields)) + assert %Post{title: "post"} = p + assert [%Comment{text: "comment"}] = p.comments + + p = TestRepo.one(from q in query, select: map(q, ^fields)) + assert p == %{id: pid, title: "post", comments: [%{text: "comment", post_id: pid, id: cid}]} + end + + test "take with single nil column" do + %Post{} = TestRepo.insert!(%Post{title: "1", counter: nil}) + assert %{counter: nil} = + TestRepo.one(from p in Post, where: p.title == "1", select: [:counter]) + end + + test "take with join assocs and single nil column" do + %{id: post_id} = TestRepo.insert!(%Post{title: "1"}, counter: nil) + TestRepo.insert!(%Comment{post_id: post_id, text: "comment"}) + assert %{counter: nil} == + TestRepo.one(from p in Post, join: c in assoc(p, :comments), where: p.title == "1", select: map(p, [:counter])) + end + + test "field source" do + TestRepo.insert!(%Permalink{url: "url"}) + assert ["url"] = Permalink |> select([p], p.url) |> TestRepo.all() + assert [1] = Permalink |> select([p], count(p.url)) |> TestRepo.all() + end + + test "merge" do + date = Date.utc_today() + %Post{id: post_id} = TestRepo.insert!(%Post{title: "1", counter: nil, posted: date, public: false}) + + # Merge on source + assert [%Post{title: "2"}] = + Post |> select([p], merge(p, %{title: "2"})) |> TestRepo.all() + assert [%Post{title: "2"}] = + Post |> select([p], p) |> select_merge([p], %{title: "2"}) |> TestRepo.all() + + # Merge on struct + assert [%Post{title: "2"}] = + Post |> select([p], merge(%Post{title: p.title}, %{title: "2"})) |> TestRepo.all() + assert [%Post{title: "2"}] = + Post |> select([p], %Post{title: p.title}) |> select_merge([p], %{title: "2"}) |> TestRepo.all() + + # Merge on map + assert [%{title: "2"}] = + Post |> select([p], merge(%{title: p.title}, %{title: "2"})) |> TestRepo.all() + assert [%{title: "2"}] = + Post |> select([p], %{title: p.title}) |> select_merge([p], %{title: "2"}) |> TestRepo.all() + + # Merge on outer join with map + %Permalink{} = TestRepo.insert!(%Permalink{post_id: post_id, url: "Q", title: "Z"}) + + # left join record is present + assert [%{url: "Q", title: "1", posted: _date}] = + Permalink + |> join(:left, [l], p in Post, on: l.post_id == p.id) + |> select([l, p], merge(l, map(p, ^~w(title posted)a))) + |> TestRepo.all() + + assert [%{url: "Q", title: "1", posted: _date}] = + Permalink + |> join(:left, [l], p in Post, on: l.post_id == p.id) + |> select_merge([_l, p], map(p, ^~w(title posted)a)) + |> TestRepo.all() + + # left join record is not present, we consider it the same as being present with nils + assert [%{url: "Q", title: nil, posted: nil}] = + Permalink + |> join(:left, [l], p in Post, on: l.post_id == p.id and p.public == true) + |> select([l, p], merge(l, map(p, ^~w(title posted)a))) + |> TestRepo.all() + + assert [%{url: "Q", title: nil, posted: nil}] = + Permalink + |> join(:left, [l], p in Post, on: l.post_id == p.id and p.public == true) + |> select_merge([_l, p], map(p, ^~w(title posted)a)) + |> TestRepo.all() + end + + test "merge with update on self" do + %Post{} = TestRepo.insert!(%Post{title: "1", counter: 1}) + + assert [%Post{title: "1", counter: 2}] = + Post |> select([p], merge(p, %{p | counter: 2})) |> TestRepo.all() + assert [%Post{title: "1", counter: 2}] = + Post |> select([p], p) |> select_merge([p], %{p | counter: 2}) |> TestRepo.all() + end + + test "merge within subquery" do + %Post{} = TestRepo.insert!(%Post{title: "1", counter: 1}) + + subquery = + Post + |> select_merge([p], %{p | counter: 2}) + |> subquery() + + assert [%Post{title: "1", counter: 2}] = TestRepo.all(subquery) + end + + @tag :selected_as_with_group_by + test "selected_as/2 with group_by" do + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 3}) + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 2}) + TestRepo.insert!(%Post{posted: ~D[2020-12-20], visits: nil}) + + query = + from p in Post, + select: %{ + posted: selected_as(p.posted, :date), + min_visits: p.visits |> coalesce(0) |> min() + }, + group_by: selected_as(:date), + order_by: p.posted + + assert [%{posted: ~D[2020-12-20], min_visits: 0}, %{posted: ~D[2020-12-21], min_visits: 2}] = + TestRepo.all(query) + end + + @tag :selected_as_with_order_by + test "selected_as/2 with order_by" do + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 3}) + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 2}) + TestRepo.insert!(%Post{posted: ~D[2020-12-20], visits: nil}) + + base_query = + from p in Post, + select: %{ + posted: p.posted, + min_visits: p.visits |> coalesce(0) |> min() |> selected_as(:min_visits) + }, + group_by: p.posted + + # ascending order + results = base_query |> order_by(selected_as(:min_visits)) |> TestRepo.all() + + assert [%{posted: ~D[2020-12-20], min_visits: 0}, %{posted: ~D[2020-12-21], min_visits: 2}] = + results + + # descending order + results = base_query |> order_by([desc: selected_as(:min_visits)]) |> TestRepo.all() + + assert [%{posted: ~D[2020-12-21], min_visits: 2}, %{posted: ~D[2020-12-20], min_visits: 0}] = + results + end + + @tag :selected_as_with_order_by + test "selected_as/2 respects custom types" do + TestRepo.insert!(%Post{title: "title1", visits: 1}) + TestRepo.insert!(%Post{title: "title2"}) + uuid = Ecto.UUID.generate() + + query = + from p in Post, + select: %{ + uuid: type(^uuid, Ecto.UUID) |> selected_as(:uuid), + visits: p.visits |> coalesce(0) |> selected_as(:visits) + }, + order_by: [selected_as(:uuid), selected_as(:visits)] + + assert [%{uuid: ^uuid, visits: 0}, %{uuid: ^uuid, visits: 1}] = TestRepo.all(query) + end + + @tag :selected_as_with_order_by_expression + test "selected_as/2 with order_by expression" do + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 3, intensity: 2.0}) + TestRepo.insert!(%Post{posted: ~D[2020-12-20], visits: nil, intensity: 10.0}) + + results = + from(p in Post, + select: %{ + posted: p.posted, + visits: p.visits |> coalesce(0) |> selected_as(:num_visits), + intensity: selected_as(p.intensity, :strength) + }, + order_by: [desc: (selected_as(:num_visits) + selected_as(:strength))] + ) + |> TestRepo.all() + + assert [%{posted: ~D[2020-12-20], visits: 0}, %{posted: ~D[2020-12-21], visits: 3}] = + results + end + + @tag :selected_as_with_having + test "selected_as/2 with having" do + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 3}) + TestRepo.insert!(%Post{posted: ~D[2020-12-21], visits: 2}) + TestRepo.insert!(%Post{posted: ~D[2020-12-20], visits: nil}) + + results = + from(p in Post, + select: %{ + posted: p.posted, + min_visits: p.visits |> coalesce(0) |> min() |> selected_as(:min_visits) + }, + group_by: p.posted, + having: selected_as(:min_visits) > 0, + or_having: not(selected_as(:min_visits) > 0), + order_by: p.posted + ) + |> TestRepo.all() + + assert [%{posted: ~D[2020-12-20], min_visits: 0}, %{posted: ~D[2020-12-21], min_visits: 2}] = results + end + end + + @tag :distinct_count + test "query count distinct" do + TestRepo.insert!(%Post{title: "1"}) + TestRepo.insert!(%Post{title: "1"}) + TestRepo.insert!(%Post{title: "2"}) + + assert [3] == Post |> select([p], count(p.title)) |> TestRepo.all + assert [2] == Post |> select([p], count(p.title, :distinct)) |> TestRepo.all + end + + test "query where interpolation" do + post1 = TestRepo.insert!(%Post{title: "hello"}) + post2 = TestRepo.insert!(%Post{title: "goodbye"}) + + assert [post1, post2] == Post |> where([], []) |> TestRepo.all |> Enum.sort_by(& &1.id) + assert [post1] == Post |> where([], [title: "hello"]) |> TestRepo.all + assert [post1] == Post |> where([], [title: "hello", id: ^post1.id]) |> TestRepo.all + + params0 = [] + params1 = [title: "hello"] + params2 = [title: "hello", id: post1.id] + assert [post1, post2] == (from Post, where: ^params0) |> TestRepo.all |> Enum.sort_by(& &1.id) + assert [post1] == (from Post, where: ^params1) |> TestRepo.all + assert [post1] == (from Post, where: ^params2) |> TestRepo.all + + post3 = TestRepo.insert!(%Post{title: "goodbye", uuid: nil}) + params3 = [title: "goodbye", uuid: post3.uuid] + assert [post3] == (from Post, where: ^params3) |> TestRepo.all + end + + describe "upsert via insert" do + @describetag :upsert + + test "on conflict raise" do + {:ok, inserted} = TestRepo.insert(%Post{title: "first"}, on_conflict: :raise) + assert catch_error(TestRepo.insert(%Post{id: inserted.id, title: "second"}, on_conflict: :raise)) + end + + test "on conflict ignore" do + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: :nothing) + assert inserted.id + assert inserted.__meta__.state == :loaded + + {:ok, not_inserted} = TestRepo.insert(post, on_conflict: :nothing) + assert not_inserted.id == nil + assert not_inserted.__meta__.state == :loaded + end + + @tag :with_conflict_target + test "on conflict and associations" do + on_conflict = [set: [title: "second"]] + post = %Post{uuid: Ecto.UUID.generate(), + title: "first", comments: [%Comment{}]} + {:ok, inserted} = TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:uuid]) + assert inserted.id + end + + @tag :with_conflict_target + test "on conflict with inc" do + uuid = "6fa459ea-ee8a-3ca4-894e-db77e160355e" + post = %Post{title: "first", uuid: uuid} + {:ok, _} = TestRepo.insert(post) + post = %{title: "upsert", uuid: uuid} + TestRepo.insert_all(Post, [post], on_conflict: [inc: [visits: 1]], conflict_target: :uuid) + end + + @tag :with_conflict_target + test "on conflict ignore and conflict target" do + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: :nothing, conflict_target: [:uuid]) + assert inserted.id + + # Error on non-conflict target + assert catch_error(TestRepo.insert(post, on_conflict: :nothing, conflict_target: [:id])) + + # Error on conflict target + {:ok, not_inserted} = TestRepo.insert(post, on_conflict: :nothing, conflict_target: [:uuid]) + assert not_inserted.id == nil + end + + @tag :without_conflict_target + test "on conflict keyword list" do + on_conflict = [set: [title: "second"]] + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: on_conflict) + assert inserted.id + + {:ok, updated} = TestRepo.insert(post, on_conflict: on_conflict) + assert updated.id == inserted.id + assert updated.title != "second" + assert TestRepo.get!(Post, inserted.id).title == "second" + end + + @tag :with_conflict_target + test "on conflict keyword list and conflict target" do + on_conflict = [set: [title: "second"]] + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:uuid]) + assert inserted.id + + # Error on non-conflict target + assert catch_error(TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:id])) + + {:ok, updated} = TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:uuid]) + assert updated.id == inserted.id + assert updated.title != "second" + assert TestRepo.get!(Post, inserted.id).title == "second" + end + + @tag :returning + @tag :with_conflict_target + test "on conflict keyword list and conflict target and returning" do + {:ok, c1} = TestRepo.insert(%Post{}) + {:ok, c2} = TestRepo.insert(%Post{id: c1.id}, on_conflict: [set: [id: c1.id]], conflict_target: [:id], returning: [:id, :uuid]) + {:ok, c3} = TestRepo.insert(%Post{id: c1.id}, on_conflict: [set: [id: c1.id]], conflict_target: [:id], returning: true) + {:ok, c4} = TestRepo.insert(%Post{id: c1.id}, on_conflict: [set: [id: c1.id]], conflict_target: [:id], returning: false) + + assert c2.uuid == c1.uuid + assert c3.uuid == c1.uuid + assert c4.uuid != c1.uuid + end + + @tag :returning + @tag :with_conflict_target + test "on conflict keyword list and conflict target and returning and field source" do + TestRepo.insert!(%Permalink{url: "old"}) + {:ok, c1} = TestRepo.insert(%Permalink{url: "old"}, + on_conflict: [set: [url: "new1"]], + conflict_target: [:url], + returning: [:url]) + + TestRepo.insert!(%Permalink{url: "old"}) + {:ok, c2} = TestRepo.insert(%Permalink{url: "old"}, + on_conflict: [set: [url: "new2"]], + conflict_target: [:url], + returning: true) + + assert c1.url == "new1" + assert c2.url == "new2" + end + + @tag :returning + @tag :with_conflict_target + test "on conflict ignore and returning" do + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: :nothing, conflict_target: [:uuid]) + assert inserted.id + + {:ok, not_inserted} = TestRepo.insert(post, on_conflict: :nothing, conflict_target: [:uuid], returning: true) + assert not_inserted.id == nil + end + + @tag :without_conflict_target + test "on conflict query" do + on_conflict = from Post, update: [set: [title: "second"]] + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: on_conflict) + assert inserted.id + + {:ok, updated} = TestRepo.insert(post, on_conflict: on_conflict) + assert updated.id == inserted.id + assert updated.title != "second" + assert TestRepo.get!(Post, inserted.id).title == "second" + end + + @tag :with_conflict_target + test "on conflict query and conflict target" do + on_conflict = from Post, update: [set: [title: "second"]] + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:uuid]) + assert inserted.id + + # Error on non-conflict target + assert catch_error(TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:id])) + + {:ok, updated} = TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:uuid]) + assert updated.id == inserted.id + assert updated.title != "second" + assert TestRepo.get!(Post, inserted.id).title == "second" + end + + @tag :with_conflict_target + test "on conflict query having condition" do + post = %Post{title: "first", counter: 1, uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post) + + on_conflict = from Post, where: [counter: 2], update: [set: [title: "second"]] + + insert_options = [ + on_conflict: on_conflict, + conflict_target: [:uuid], + stale_error_field: :counter + ] + + assert {:error, changeset} = TestRepo.insert(post, insert_options) + assert changeset.errors == [counter: {"is stale", [stale: true]}] + + assert TestRepo.get!(Post, inserted.id).title == "first" + end + + @tag :without_conflict_target + test "on conflict replace_all" do + post = %Post{title: "first", visits: 13, uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: :replace_all) + assert inserted.id + + post = %Post{title: "updated", visits: 17, uuid: post.uuid} + post = TestRepo.insert!(post, on_conflict: :replace_all) + assert post.id != inserted.id + assert post.title == "updated" + assert post.visits == 17 + + assert TestRepo.all(from p in Post, select: {p.id, p.title, p.visits}) == + [{post.id, "updated", 17}] + assert TestRepo.all(from p in Post, select: count(p.id)) == [1] + end + + @tag :with_conflict_target + test "on conflict replace_all and conflict target" do + post = %Post{title: "first", visits: 13, uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: :replace_all, conflict_target: :uuid) + assert inserted.id + + post = %Post{title: "updated", visits: 17, uuid: post.uuid} + post = TestRepo.insert!(post, on_conflict: :replace_all, conflict_target: :uuid) + assert post.id != inserted.id + assert post.title == "updated" + assert post.visits == 17 + + assert TestRepo.all(from p in Post, select: {p.id, p.title, p.visits}) == + [{post.id, "updated", 17}] + assert TestRepo.all(from p in Post, select: count(p.id)) == [1] + end + end + + describe "upsert via insert_all" do + @describetag :upsert_all + + test "on conflict raise" do + post = [title: "first", uuid: Ecto.UUID.generate()] + {1, nil} = TestRepo.insert_all(Post, [post], on_conflict: :raise) + assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: :raise)) + end + + test "on conflict ignore" do + post = [title: "first", uuid: Ecto.UUID.generate()] + assert TestRepo.insert_all(Post, [post], on_conflict: :nothing) == {1, nil} + + # PG returns 0, MySQL returns 1 + {entries, nil} = TestRepo.insert_all(Post, [post], on_conflict: :nothing) + assert entries == 0 or entries == 1 + + assert length(TestRepo.all(Post)) == 1 + end + + @tag :with_conflict_target + test "on conflict ignore and conflict target" do + post = [title: "first", uuid: Ecto.UUID.generate()] + assert TestRepo.insert_all(Post, [post], on_conflict: :nothing, conflict_target: [:uuid]) == + {1, nil} + + # Error on non-conflict target + assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: :nothing, conflict_target: [:id])) + + # Error on conflict target + assert TestRepo.insert_all(Post, [post], on_conflict: :nothing, conflict_target: [:uuid]) == + {0, nil} + end + + @tag :with_conflict_target + test "on conflict keyword list and conflict target" do + on_conflict = [set: [title: "second"]] + post = [title: "first", uuid: Ecto.UUID.generate()] + {1, nil} = TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:uuid]) + + # Error on non-conflict target + assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:id])) + + # Error on conflict target + assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + assert TestRepo.all(from p in Post, select: p.title) == ["second"] + end + + @tag :with_conflict_target + @tag :returning + test "on conflict keyword list and conflict target and returning and source field" do + on_conflict = [set: [url: "new"]] + permalink = [url: "old"] + + assert {1, [%Permalink{url: "old"}]} = + TestRepo.insert_all(Permalink, [permalink], + on_conflict: on_conflict, conflict_target: [:url], returning: [:url]) + + assert {1, [%Permalink{url: "new"}]} = + TestRepo.insert_all(Permalink, [permalink], + on_conflict: on_conflict, conflict_target: [:url], returning: [:url]) + end + + @tag :with_conflict_target + test "on conflict query and conflict target" do + on_conflict = from p in Post, where: p.id > ^0, update: [set: [title: "second"]] + post = [title: "first", uuid: Ecto.UUID.generate()] + assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + + # Error on non-conflict target + assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:id])) + + # Error on conflict target + assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + assert TestRepo.all(from p in Post, select: p.title) == ["second"] + end + + @tag :insert_select + @tag :with_conflict_target + test "on conflict query and insert select and conflict target" do + on_conflict = from p in Post, where: p.id > ^0, update: [set: [title: "second"]] + visits_value = from p in Post, where: p.public == ^true and p.id > ^0, select: p.visits, limit: 1 + post = [title: "first", uuid: Ecto.UUID.generate(), visits: visits_value] + assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + + # Error on non-conflict target + assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:id])) + + # Error on conflict target + assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + assert TestRepo.all(from p in Post, select: p.title) == ["second"] + end + + @tag :returning + @tag :with_conflict_target + test "on conflict query and conflict target and returning" do + on_conflict = from Post, update: [set: [title: "second"]] + post = [title: "first", uuid: Ecto.UUID.generate()] + {1, [%{id: id}]} = TestRepo.insert_all(Post, [post], on_conflict: on_conflict, + conflict_target: [:uuid], returning: [:id]) + + # Error on non-conflict target + assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: on_conflict, + conflict_target: [:id], returning: [:id])) + + # Error on conflict target + {1, [%Post{id: ^id, title: "second"}]} = + TestRepo.insert_all(Post, [post], on_conflict: on_conflict, + conflict_target: [:uuid], returning: [:id, :title]) + end + + @tag :with_conflict_target + test "source (without an Ecto schema) on conflict query and conflict target" do + on_conflict = [set: [title: "second"]] + {:ok, uuid} = Ecto.UUID.dump(Ecto.UUID.generate()) + post = [title: "first", uuid: uuid] + assert TestRepo.insert_all("posts", [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + + # Error on non-conflict target + assert catch_error(TestRepo.insert_all("posts", [post], on_conflict: on_conflict, conflict_target: [:id])) + + # Error on conflict target + assert TestRepo.insert_all("posts", [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + assert TestRepo.all(from p in Post, select: p.title) == ["second"] + end + + @tag :without_conflict_target + test "on conflict replace_all" do + post_first = %Post{title: "first", public: true, uuid: Ecto.UUID.generate()} + post_second = %Post{title: "second", public: false, uuid: Ecto.UUID.generate()} + + {:ok, post_first} = TestRepo.insert(post_first, on_conflict: :replace_all) + {:ok, post_second} = TestRepo.insert(post_second, on_conflict: :replace_all) + + assert post_first.id + assert post_second.id + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + # Multiple record change value: note IDS are also replaced + changes = [%{id: post_first.id + 2, title: "first_updated", + visits: 1, uuid: post_first.uuid}, + %{id: post_second.id + 2, title: "second_updated", + visits: 2, uuid: post_second.uuid}] + + TestRepo.insert_all(Post, changes, on_conflict: :replace_all) + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + updated_first = TestRepo.get(Post, post_first.id + 2) + assert updated_first.title == "first_updated" + assert updated_first.visits == 1 + + updated_second = TestRepo.get(Post, post_second.id + 2) + assert updated_second.title == "second_updated" + assert updated_second.visits == 2 + end + + @tag :with_conflict_target + test "on conflict replace_all and conflict_target" do + post_first = %Post{title: "first", public: true, uuid: Ecto.UUID.generate()} + post_second = %Post{title: "second", public: false, uuid: Ecto.UUID.generate()} + + {:ok, post_first} = TestRepo.insert(post_first, on_conflict: :replace_all, conflict_target: :uuid) + {:ok, post_second} = TestRepo.insert(post_second, on_conflict: :replace_all, conflict_target: :uuid) + + assert post_first.id + assert post_second.id + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + # Multiple record change value: note IDS are also replaced + changes = [%{id: post_second.id + 1, title: "first_updated", + visits: 1, uuid: post_first.uuid}, + %{id: post_second.id + 2, title: "second_updated", + visits: 2, uuid: post_second.uuid}] + + TestRepo.insert_all(Post, changes, on_conflict: :replace_all, conflict_target: :uuid) + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + updated_first = TestRepo.get(Post, post_second.id + 1) + assert updated_first.title == "first_updated" + assert updated_first.visits == 1 + + updated_second = TestRepo.get(Post, post_second.id + 2) + assert updated_second.title == "second_updated" + assert updated_second.visits == 2 + end + + @tag :without_conflict_target + test "on conflict replace_all_except" do + post_first = %Post{title: "first", public: true, uuid: Ecto.UUID.generate()} + post_second = %Post{title: "second", public: false, uuid: Ecto.UUID.generate()} + + {:ok, post_first} = TestRepo.insert(post_first, on_conflict: {:replace_all_except, [:id]}) + {:ok, post_second} = TestRepo.insert(post_second, on_conflict: {:replace_all_except, [:id]}) + + assert post_first.id + assert post_second.id + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + # Multiple record change value: note IDS are not replaced + changes = [%{id: post_first.id + 2, title: "first_updated", + visits: 1, uuid: post_first.uuid}, + %{id: post_second.id + 2, title: "second_updated", + visits: 2, uuid: post_second.uuid}] + + TestRepo.insert_all(Post, changes, on_conflict: {:replace_all_except, [:id]}) + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + updated_first = TestRepo.get(Post, post_first.id) + assert updated_first.title == "first_updated" + assert updated_first.visits == 1 + + updated_second = TestRepo.get(Post, post_second.id) + assert updated_second.title == "second_updated" + assert updated_second.visits == 2 + end + + @tag :with_conflict_target + test "on conflict replace_all_except and conflict_target" do + post_first = %Post{title: "first", public: true, uuid: Ecto.UUID.generate()} + post_second = %Post{title: "second", public: false, uuid: Ecto.UUID.generate()} + + {:ok, post_first} = TestRepo.insert(post_first, on_conflict: {:replace_all_except, [:id]}, conflict_target: :uuid) + {:ok, post_second} = TestRepo.insert(post_second, on_conflict: {:replace_all_except, [:id]}, conflict_target: :uuid) + + assert post_first.id + assert post_second.id + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + # Multiple record change value: note IDS are not replaced + changes = [%{id: post_first.id + 2, title: "first_updated", + visits: 1, uuid: post_first.uuid}, + %{id: post_second.id + 2, title: "second_updated", + visits: 2, uuid: post_second.uuid}] + + TestRepo.insert_all(Post, changes, on_conflict: {:replace_all_except, [:id]}, conflict_target: :uuid) + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + updated_first = TestRepo.get(Post, post_first.id) + assert updated_first.title == "first_updated" + assert updated_first.visits == 1 + + updated_second = TestRepo.get(Post, post_second.id) + assert updated_second.title == "second_updated" + assert updated_second.visits == 2 + end + + @tag :with_conflict_target + test "on conflict replace and conflict_target" do + post_first = %Post{title: "first", visits: 10, public: true, uuid: Ecto.UUID.generate()} + post_second = %Post{title: "second", visits: 20, public: false, uuid: Ecto.UUID.generate()} + + {:ok, post_first} = TestRepo.insert(post_first, on_conflict: {:replace, [:title, :visits]}, conflict_target: :uuid) + {:ok, post_second} = TestRepo.insert(post_second, on_conflict: {:replace, [:title, :visits]}, conflict_target: :uuid) + + assert post_first.id + assert post_second.id + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + # Multiple record change value: note `public` field is not changed + changes = [%{id: post_first.id, title: "first_updated", visits: 11, public: false, uuid: post_first.uuid}, + %{id: post_second.id, title: "second_updated", visits: 21, public: true, uuid: post_second.uuid}] + + TestRepo.insert_all(Post, changes, on_conflict: {:replace, [:title, :visits]}, conflict_target: :uuid) + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + updated_first = TestRepo.get(Post, post_first.id) + assert updated_first.title == "first_updated" + assert updated_first.visits == 11 + assert updated_first.public == true + + updated_second = TestRepo.get(Post, post_second.id) + assert updated_second.title == "second_updated" + assert updated_second.visits == 21 + assert updated_second.public == false + end + end + + describe "values list" do + @describetag :values_list + + test "all" do + uuid_module = uuid_module(TestRepo.__adapter__()) + uuid = uuid_module.generate() + + # Without select + values = [%{bid: uuid, visits: 1}, %{bid: uuid, visits: 2}] + types = %{bid: uuid_module, visits: :integer} + query = from v in values(values, types) + assert TestRepo.all(query) == values + + # With select + query = select(query, [v], {v, v.bid}) + assert TestRepo.all(query) == Enum.map(values, &{&1, &1.bid}) + end + + test "all with schema types" do + uuid_module = uuid_module(TestRepo.__adapter__()) + uuid = uuid_module.generate() + + raw_values = [%{bid: uuid, visits: "1"}, %{bid: uuid, visits: "2"}] + casted_values = [%{bid: uuid, visits: 1}, %{bid: uuid, visits: 2}] + types = Post + query = from v in values(raw_values, types) + assert TestRepo.all(query) == casted_values + end + + test "all with join" do + uuid_module = uuid_module(TestRepo.__adapter__()) + uuid = uuid_module.generate() + + values1 = [%{bid: uuid, visits: 1}, %{bid: uuid, visits: 2}] + values2 = [%{bid: uuid, visits: 1}] + types = %{bid: uuid_module, visits: :integer} + + query = + from v1 in values(values1, types), + join: v2 in values(values2, types), + on: v1.visits == v2.visits + + assert TestRepo.all(query) == [%{bid: uuid, visits: 1}] + end + + test "delete_all" do + uuid_module = uuid_module(TestRepo.__adapter__()) + uuid = uuid_module.generate() + + _p1 = TestRepo.insert!(%Post{bid: uuid, visits: 1}) + p2 = TestRepo.insert!(%Post{bid: uuid, visits: 5}) + + values = [%{bid: uuid, visits: 1}, %{bid: nil, visits: 1}, %{bid: uuid, visits: 3}] + types = %{bid: uuid_module, visits: :integer} + + query = + from p in Post, + join: v in values(values, types), + on: p.visits == v.visits + + assert {1, _} = TestRepo.delete_all(query) + assert TestRepo.all(Post) == [p2] + end + + test "update_all" do + uuid_module = uuid_module(TestRepo.__adapter__()) + uuid = uuid_module.generate() + + TestRepo.insert!(%Post{bid: uuid, visits: 1}) + + values = [%{bid: uuid, visits: 10}, %{bid: nil, visits: 2}] + types = %{bid: uuid_module, visits: :integer} + + query = + from p in Post, + join: v in values(values, types), + on: p.bid == v.bid, + update: [set: [visits: v.visits]] + + assert {1, _} = TestRepo.update_all(query, []) + assert [%{visits: 10}] = TestRepo.all(Post) + end + + defp uuid_module(Ecto.Adapters.Tds), do: Tds.Ecto.UUID + defp uuid_module(_), do: Ecto.UUID + end + + describe "transact/2 with function" do + test "return ok" do + assert {:ok, [post1, post2]} = + TestRepo.transact(fn -> + post1 = TestRepo.insert!(%Post{title: "1"}) + post2 = TestRepo.insert!(%Post{title: "2"}) + {:ok, [post1, post2]} + end) + + assert TestRepo.all(Post) |> Enum.sort() == [post1, post2] + end + + test "return error" do + assert {:error, :oops} = + TestRepo.transact(fn -> + TestRepo.insert!(%Post{title: "1"}) + TestRepo.insert!(%Post{title: "2"}) + {:error, :oops} + end) + + assert TestRepo.all(Post) == [] + end + + test "rollback" do + assert {:error, :oops} = + TestRepo.transact(fn -> + TestRepo.insert!(%Post{title: "1"}) + TestRepo.insert!(%Post{title: "2"}) + TestRepo.rollback(:oops) + raise "unreachable" + end) + + assert TestRepo.all(Post) == [] + end + + test "raise error" do + assert_raise RuntimeError, "oops", fn -> + TestRepo.transact(fn -> + TestRepo.insert!(%Post{title: "1"}) + TestRepo.insert!(%Post{title: "2"}) + raise "oops" + end) + end + + assert TestRepo.all(Post) == [] + end + end + + describe "transact/2 with multi" do + test "ok" do + multi = Ecto.Multi.new() + |> Ecto.Multi.insert(:post1, %Post{title: "1"}) + |> Ecto.Multi.insert(:post2, %Post{title: "2"}) + + assert {:ok, %{post1: post1, post2: post2}} = + TestRepo.transact(multi) + + assert TestRepo.all(Post) |> Enum.sort() == [post1, post2] + end + + test "error" do + changeset = + Ecto.Changeset.change(%Post{}) + |> Ecto.Changeset.add_error(:title, "invalid") + + multi = + Ecto.Multi.new() + |> Ecto.Multi.insert(:post1, %Post{title: "1"}) + |> Ecto.Multi.insert(:post2, fn _ -> changeset end) + + assert {:error, :post2, changeset, %{post1: %Post{title: "1"}}} = + TestRepo.transact(multi) + + refute changeset.valid? + end + end + + describe "transaction/2 (soft-deprecated)" do + test "ok" do + assert {:ok, {:ok, [post1, post2]}} = + TestRepo.transaction(fn -> + post1 = TestRepo.insert!(%Post{title: "1"}) + post2 = TestRepo.insert!(%Post{title: "2"}) + {:ok, [post1, post2]} + end) + + assert TestRepo.all(Post) |> Enum.sort() == [post1, post2] + end + end +end diff --git a/deps/ecto/integration_test/cases/type.exs b/deps/ecto/integration_test/cases/type.exs new file mode 100644 index 0000000..47d08b0 --- /dev/null +++ b/deps/ecto/integration_test/cases/type.exs @@ -0,0 +1,636 @@ +defmodule Ecto.Integration.TypeTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.{Bitstring, Comment, Custom, Item, ItemColor, Order, Post, User, Tag, Usec} + alias Ecto.Integration.TestRepo + import Ecto.Query + + @parameterized_type Ecto.ParameterizedType.init(Ecto.Enum, values: [:a, :b]) + + test "primitive types" do + integer = 1 + float = 0.1 + blob = <<0, 1>> + uuid = "00010203-0405-4607-8809-0a0b0c0d0e0f" + datetime = ~N[2014-01-16 20:26:51] + + TestRepo.insert!(%Post{blob: blob, public: true, visits: integer, uuid: uuid, + counter: integer, inserted_at: datetime, intensity: float}) + + # nil + assert [nil] = TestRepo.all(from Post, select: nil) + + # ID + assert [1] = TestRepo.all(from p in Post, where: p.counter == ^integer, select: p.counter) + + # Integers + assert [1] = TestRepo.all(from p in Post, where: p.visits == ^integer, select: p.visits) + assert [1] = TestRepo.all(from p in Post, where: p.visits == 1, select: p.visits) + assert [3] = TestRepo.all(from p in Post, select: p.visits + 2) + + # Floats + assert [0.1] = TestRepo.all(from p in Post, where: p.intensity == ^float, select: p.intensity) + assert [0.1] = TestRepo.all(from p in Post, where: p.intensity == 0.1, select: p.intensity) + assert [1500.0] = TestRepo.all(from p in Post, select: 1500.0) + assert [0.5] = TestRepo.all(from p in Post, select: p.intensity * 5) + + # Booleans + assert [true] = TestRepo.all(from p in Post, where: p.public == ^true, select: p.public) + assert [true] = TestRepo.all(from p in Post, where: p.public == true, select: p.public) + + # Binaries + assert [^blob] = TestRepo.all(from p in Post, where: p.blob == <<0, 1>>, select: p.blob) + assert [^blob] = TestRepo.all(from p in Post, where: p.blob == ^blob, select: p.blob) + + # UUID + assert [^uuid] = TestRepo.all(from p in Post, where: p.uuid == ^uuid, select: p.uuid) + + # NaiveDatetime + assert [^datetime] = TestRepo.all(from p in Post, where: p.inserted_at == ^datetime, select: p.inserted_at) + + # Datetime + datetime = DateTime.from_unix!(System.os_time(:second), :second) + TestRepo.insert!(%User{inserted_at: datetime}) + assert [^datetime] = TestRepo.all(from u in User, where: u.inserted_at == ^datetime, select: u.inserted_at) + + # usec + naive_datetime = ~N[2014-01-16 20:26:51.000000] + datetime = DateTime.from_naive!(~N[2014-01-16 20:26:51.000000], "Etc/UTC") + TestRepo.insert!(%Usec{naive_datetime_usec: naive_datetime, utc_datetime_usec: datetime}) + assert [^naive_datetime] = TestRepo.all(from u in Usec, where: u.naive_datetime_usec == ^naive_datetime, select: u.naive_datetime_usec) + assert [^datetime] = TestRepo.all(from u in Usec, where: u.utc_datetime_usec == ^datetime, select: u.utc_datetime_usec) + + naive_datetime = ~N[2014-01-16 20:26:51.123000] + datetime = DateTime.from_naive!(~N[2014-01-16 20:26:51.123000], "Etc/UTC") + TestRepo.insert!(%Usec{naive_datetime_usec: naive_datetime, utc_datetime_usec: datetime}) + assert [^naive_datetime] = TestRepo.all(from u in Usec, where: u.naive_datetime_usec == ^naive_datetime, select: u.naive_datetime_usec) + assert [^datetime] = TestRepo.all(from u in Usec, where: u.utc_datetime_usec == ^datetime, select: u.utc_datetime_usec) + end + + @tag :bitstring_type + test "bitstring type" do + bitstring = <<2::3>> + + TestRepo.insert!(%Bitstring{bs: bitstring, bs_with_size: <<5::10>>}) + + # Bitstrings + assert [^bitstring] = TestRepo.all(from p in Bitstring, where: p.bs == ^bitstring, select: p.bs) + assert [^bitstring] = TestRepo.all(from p in Bitstring, where: p.bs == <<2::3>>, select: p.bs) + + assert [<<42::6>>] = TestRepo.all(from p in Bitstring, limit: 1, select: p.bs_with_default) + end + + if Code.ensure_loaded?(Duration) do + @tag :duration_type + test "duration type" do + duration = %Duration{year: 1, month: 1, second: 1, microsecond: {100, 6}} + + struct = %Ecto.Integration.Duration{ + dur: duration, + dur_with_fields: duration, + dur_with_precision: duration, + dur_with_fields_and_precision: duration + } + + TestRepo.insert!(struct) + + persisted_duration = + from(d in Ecto.Integration.Duration, where: d.dur == ^duration) + |> TestRepo.one() + + assert persisted_duration.dur == duration + + # `:field` option set to MONTH so it ignores all units lower than `:month` + assert persisted_duration.dur_with_fields == %Duration{ + year: 1, + month: 1, + microsecond: {0, 6} + } + + assert persisted_duration.dur_with_precision == %Duration{ + year: 1, + month: 1, + second: 1, + microsecond: {100, 4} + } + + # `:field` option is set to HOUR TO SECOND so it ignores all units lower than `:second` + assert persisted_duration.dur_with_fields_and_precision == %Duration{ + year: 1, + month: 1, + second: 1, + microsecond: {0, 1} + } + + # `:default set in migration` + assert persisted_duration.dur_with_default == %Duration{month: 10, microsecond: {0, 6}} + end + end + + @tag :select_not + test "primitive types boolean negate" do + TestRepo.insert!(%Post{public: true}) + assert [false] = TestRepo.all(from p in Post, where: p.public == true, select: not p.public) + assert [true] = TestRepo.all(from p in Post, where: p.public == true, select: not not p.public) + end + + test "aggregate types" do + datetime = ~N[2014-01-16 20:26:51] + TestRepo.insert!(%Post{inserted_at: datetime}) + query = from p in Post, select: max(p.inserted_at) + assert [^datetime] = TestRepo.all(query) + end + + # We don't specifically assert on the tuple content because + # some databases would return integer, others decimal. + # The important is that the type has been invoked for wrapping. + test "aggregate custom types" do + TestRepo.insert!(%Post{wrapped_visits: {:int, 10}}) + query = from p in Post, select: sum(p.wrapped_visits) + assert [{:int, _}] = TestRepo.all(query) + end + + @tag :aggregate_filters + test "aggregate filter types" do + datetime = ~N[2014-01-16 20:26:51] + TestRepo.insert!(%Post{inserted_at: datetime}) + query = from p in Post, select: filter(max(p.inserted_at), p.public == ^true) + assert [^datetime] = TestRepo.all(query) + end + + test "coalesce text type when default" do + TestRepo.insert!(%Post{blob: nil}) + blob = <<0, 1>> + query = from p in Post, select: coalesce(p.blob, ^blob) + assert [^blob] = TestRepo.all(query) + end + + test "coalesce text type when value" do + blob = <<0, 2>> + default_blob = <<0, 1>> + TestRepo.insert!(%Post{blob: blob}) + query = from p in Post, select: coalesce(p.blob, ^default_blob) + assert [^blob] = TestRepo.all(query) + end + + test "tagged types" do + %{id: post_id} = TestRepo.insert!(%Post{visits: 12}) + TestRepo.insert!(%Comment{text: "#{post_id}", post_id: post_id}) + + # Numbers + assert [1] = TestRepo.all(from Post, select: type(^"1", :integer)) + assert [1.0] = TestRepo.all(from Post, select: type(^1.0, :float)) + assert [1] = TestRepo.all(from p in Post, select: type(^"1", p.visits)) + assert [1.0] = TestRepo.all(from p in Post, select: type(^"1", p.intensity)) + + # Custom wrappers + assert [1] = TestRepo.all(from Post, select: type(^"1", CustomPermalink)) + + # Custom types + uuid = Ecto.UUID.generate() + assert [^uuid] = TestRepo.all(from Post, select: type(^uuid, Ecto.UUID)) + + # Parameterized types + assert [:a] = TestRepo.all(from Post, select: type(^"a", ^@parameterized_type)) + + # Math operations + assert [4] = TestRepo.all(from Post, select: type(2 + ^"2", :integer)) + assert [4.0] = TestRepo.all(from Post, select: type(2.0 + ^"2", :float)) + assert [4] = TestRepo.all(from p in Post, select: type(2 + ^"2", p.visits)) + assert [4.0] = TestRepo.all(from p in Post, select: type(2.0 + ^"2", p.intensity)) + + # Comparison expression + assert [12] = TestRepo.all(from p in Post, select: type(coalesce(p.visits, 0), :integer)) + assert [1.0] = TestRepo.all(from p in Post, select: type(coalesce(p.intensity, 1.0), :float)) + + # parent_as/1 + child = from c in Comment, where: type(parent_as(:posts).id, :string) == c.text, select: c.post_id + query = from p in Post, as: :posts, where: p.id in subquery(child), select: p.id + assert [post_id] == TestRepo.all(query) + end + + test "binary id type" do + assert %Custom{} = custom = TestRepo.insert!(%Custom{}) + bid = custom.bid + assert [^bid] = TestRepo.all(from c in Custom, select: c.bid) + assert [^bid] = TestRepo.all(from c in Custom, select: type(^bid, :binary_id)) + end + + @tag :like_match_blob + test "text type as blob" do + assert %Post{} = post = TestRepo.insert!(%Post{blob: <<0, 1, 2>>}) + id = post.id + assert post.blob == <<0, 1, 2>> + assert [^id] = TestRepo.all(from p in Post, where: like(p.blob, ^<<0, 1, 2>>), select: p.id) + end + + @tag :like_match_blob + @tag :text_type_as_string + test "text type as string" do + assert %Post{} = post = TestRepo.insert!(%Post{blob: "hello"}) + id = post.id + assert post.blob == "hello" + assert [^id] = TestRepo.all(from p in Post, where: like(p.blob, ^"hello"), select: p.id) + end + + @tag :array_type + test "array type" do + ints = [1, 2, 3] + tag = TestRepo.insert!(%Tag{ints: ints}) + + assert TestRepo.all(from t in Tag, where: t.ints == ^[], select: t.ints) == [] + assert TestRepo.all(from t in Tag, where: t.ints == ^[1, 2, 3], select: t.ints) == [ints] + + # Both sides interpolation + assert TestRepo.all(from t in Tag, where: ^"b" in ^["a", "b", "c"], select: t.ints) == [ints] + assert TestRepo.all(from t in Tag, where: ^"b" in [^"a", ^"b", ^"c"], select: t.ints) == [ints] + + # Querying + assert TestRepo.all(from t in Tag, where: t.ints == [1, 2, 3], select: t.ints) == [ints] + assert TestRepo.all(from t in Tag, where: 0 in t.ints, select: t.ints) == [] + assert TestRepo.all(from t in Tag, where: 1 in t.ints, select: t.ints) == [ints] + + # Update + tag = TestRepo.update!(Ecto.Changeset.change tag, ints: nil) + assert TestRepo.get!(Tag, tag.id).ints == nil + + tag = TestRepo.update!(Ecto.Changeset.change tag, ints: [3, 2, 1]) + assert TestRepo.get!(Tag, tag.id).ints == [3, 2, 1] + + # Update all + {1, _} = TestRepo.update_all(Tag, push: [ints: 0]) + assert TestRepo.get!(Tag, tag.id).ints == [3, 2, 1, 0] + + {1, _} = TestRepo.update_all(Tag, pull: [ints: 2]) + assert TestRepo.get!(Tag, tag.id).ints == [3, 1, 0] + + {1, _} = TestRepo.update_all(Tag, set: [ints: nil]) + assert TestRepo.get!(Tag, tag.id).ints == nil + end + + @tag :array_type + test "array type with custom types" do + uuids = ["51fcfbdd-ad60-4ccb-8bf9-47aabd66d075"] + TestRepo.insert!(%Tag{uuids: ["51fcfbdd-ad60-4ccb-8bf9-47aabd66d075"]}) + + assert TestRepo.all(from t in Tag, where: t.uuids == ^[], select: t.uuids) == [] + assert TestRepo.all(from t in Tag, where: t.uuids == ^["51fcfbdd-ad60-4ccb-8bf9-47aabd66d075"], + select: t.uuids) == [uuids] + + {1, _} = TestRepo.update_all(Tag, set: [uuids: nil]) + assert TestRepo.all(from t in Tag, select: t.uuids) == [nil] + end + + @tag :array_type + test "array type with nil in array" do + tag = TestRepo.insert!(%Tag{ints: [1, nil, 3]}) + assert tag.ints == [1, nil, 3] + end + + @tag :map_type + test "untyped map" do + post1 = TestRepo.insert!(%Post{meta: %{"foo" => "bar", "baz" => "bat"}}) + post2 = TestRepo.insert!(%Post{meta: %{foo: "bar", baz: "bat"}}) + + assert TestRepo.all(from p in Post, where: p.id == ^post1.id, select: p.meta) == + [%{"foo" => "bar", "baz" => "bat"}] + assert TestRepo.all(from p in Post, where: p.id == ^post2.id, select: p.meta) == + [%{"foo" => "bar", "baz" => "bat"}] + end + + @tag :map_type + test "typed string map" do + post1 = TestRepo.insert!(%Post{links: %{"foo" => "http://foo.com", "bar" => "http://bar.com"}}) + post2 = TestRepo.insert!(%Post{links: %{foo: "http://foo.com", bar: "http://bar.com"}}) + + assert TestRepo.all(from p in Post, where: p.id == ^post1.id, select: p.links) == + [%{"foo" => "http://foo.com", "bar" => "http://bar.com"}] + assert TestRepo.all(from p in Post, where: p.id == ^post2.id, select: p.links) == + [%{"foo" => "http://foo.com", "bar" => "http://bar.com"}] + end + + @tag :map_type + test "typed float map" do + post = TestRepo.insert!(%Post{intensities: %{"foo" => 1.0, "bar" => 416500.0}}) + + # Note we are using === since we want to check integer vs float + assert TestRepo.all(from p in Post, where: p.id == ^post.id, select: p.intensities) === + [%{"foo" => 1.0, "bar" => 416500.0}] + end + + @tag :map_type + test "map type on update" do + post = TestRepo.insert!(%Post{meta: %{"world" => "hello"}}) + assert TestRepo.get!(Post, post.id).meta == %{"world" => "hello"} + + post = TestRepo.update!(Ecto.Changeset.change post, meta: %{hello: "world"}) + assert TestRepo.get!(Post, post.id).meta == %{"hello" => "world"} + + query = from(p in Post, where: p.id == ^post.id) + TestRepo.update_all(query, set: [meta: %{world: "hello"}]) + assert TestRepo.get!(Post, post.id).meta == %{"world" => "hello"} + end + + @tag :map_type + test "embeds one" do + item = %Item{price: 123, valid_at: ~D[2014-01-16]} + + order = + %Order{} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_embed(:item, item) + |> TestRepo.insert!() + + dbitem = TestRepo.get!(Order, order.id).item + assert item.reference == dbitem.reference + assert item.price == dbitem.price + assert item.valid_at == dbitem.valid_at + assert dbitem.id + + [dbitem] = TestRepo.all(from o in Order, select: o.item) + assert item.reference == dbitem.reference + assert item.price == dbitem.price + assert item.valid_at == dbitem.valid_at + assert dbitem.id + + {1, _} = TestRepo.update_all(Order, set: [item: %{dbitem | price: 456}]) + assert TestRepo.get!(Order, order.id).item.price == 456 + end + + @tag :map_type + @tag :json_extract_path + test "json_extract_path with primitive values" do + order = %Order{metadata: + %{ + :id => 123, + :time => ~T[09:00:00], + "code" => "good", + "'single quoted'" => "bar", + "\"double quoted\"" => "baz", + "enabled" => true, + "extra" => [%{"enabled" => false}] + } + } + + order = TestRepo.insert!(order) + + assert TestRepo.one(from o in Order, select: json_extract_path(o.metadata, ^["id"])) == 123 + assert TestRepo.one(from o in Order, select: o.metadata["bad"]) == nil + assert TestRepo.one(from o in Order, select: o.metadata["bad"]["bad"]) == nil + + field = "id" + assert TestRepo.one(from o in Order, select: o.metadata[^field]) == 123 + assert TestRepo.one(from o in Order, select: o.metadata["time"]) == "09:00:00" + assert TestRepo.one(from o in Order, select: o.metadata["'single quoted'"]) == "bar" + assert TestRepo.one(from o in Order, select: o.metadata["';"]) == nil + assert TestRepo.one(from o in Order, select: o.metadata["\"double quoted\""]) == "baz" + assert TestRepo.one(from o in Order, select: o.metadata["enabled"]) == true + assert TestRepo.one(from o in Order, select: o.metadata["extra"][0]["enabled"]) == false + + # where + assert TestRepo.one(from o in Order, where: o.metadata["id"] == 123, select: o.id) == order.id + assert TestRepo.one(from o in Order, where: o.metadata["id"] == 456, select: o.id) == nil + assert TestRepo.one(from o in Order, where: o.metadata["code"] == "good", select: o.id) == order.id + assert TestRepo.one(from o in Order, where: o.metadata["code"] == "bad", select: o.id) == nil + assert TestRepo.one(from o in Order, where: o.metadata["enabled"] == true, select: o.id) == order.id + assert TestRepo.one(from o in Order, where: o.metadata["extra"][0]["enabled"] == false, select: o.id) == order.id + end + + @tag :map_type + @tag :json_extract_path + test "json_extract_path with arrays and objects" do + order = %Order{metadata: %{tags: [%{name: "red"}, %{name: "green"}]}} + order = TestRepo.insert!(order) + + assert TestRepo.one(from o in Order, select: o.metadata["tags"][0]["name"]) == "red" + assert TestRepo.one(from o in Order, select: o.metadata["tags"][99]["name"]) == nil + + index = 1 + assert TestRepo.one(from o in Order, select: o.metadata["tags"][^index]["name"]) == "green" + + # where + assert TestRepo.one(from o in Order, where: o.metadata["tags"][0]["name"] == "red", select: o.id) == order.id + assert TestRepo.one(from o in Order, where: o.metadata["tags"][0]["name"] == "blue", select: o.id) == nil + assert TestRepo.one(from o in Order, where: o.metadata["tags"][99]["name"] == "red", select: o.id) == nil + end + + @tag :map_type + @tag :json_extract_path + test "json_extract_path with embeds" do + order = %Order{items: [%{valid_at: ~D[2020-01-01]}]} + TestRepo.insert!(order) + + assert TestRepo.one(from o in Order, select: o.items[0]["valid_at"]) == "2020-01-01" + end + + @tag :map_type + @tag :json_extract_path + test "json_extract_path with custom field source" do + order = TestRepo.insert!(%Order{metadata: %{tags: [%{name: "red"}, %{name: "green"}]}}) + + assert TestRepo.one(from o in Order, where: o.metadata["tags"][0]["name"] == "red", select: o.id) == order.id + end + + @tag :map_type + @tag :json_extract_path_with_field + @tag :json_extract_path + test "json_extract_path with fields in path" do + order = %Order{id: 1, label: "tags", metadata: %{tags: [%{name: "red"}, %{name: "green"}]}} + order = TestRepo.insert!(order) + + assert TestRepo.one(from o in Order, select: o.metadata[o.label][1]["name"]) == "green" + assert TestRepo.one(from o in Order, select: o.metadata["tags"][o.id]["name"]) == "green" + + assert TestRepo.one(from o in Order, select: o.metadata["tags"][field(o, ^:id)]["name"]) == + "green" + + squery = from o in Order, select: o.metadata["tags"][parent_as(:o).id]["name"] + assert TestRepo.one(from o in Order, as: :o, where: subquery(squery) == ^"green") + + squery = from o in Order, select: o.metadata["tags"][field(parent_as(:o), ^:id)]["name"] + assert TestRepo.one(from o in Order, as: :o, where: subquery(squery) == ^"green") + + assert TestRepo.one( + from(o in Order, + where: o.metadata["tags"][o.id]["name"] == "green", + select: o.id) + ) == order.id + + assert TestRepo.one( + from(o in Order, + where: o.metadata["tags"][field(o, ^:id)]["name"] == "green", + select: o.id) + ) == order.id + + squery = from o in Order, where: o.metadata["tags"][parent_as(:o).id]["name"] == "green" + assert TestRepo.one(from o in Order, as: :o, where: exists(subquery(squery))) + end + + @tag :map_type + @tag :map_type_schemaless + test "embeds one with custom type" do + item = %Item{price: 123, reference: "PREFIX-EXAMPLE"} + + order = + %Order{} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_embed(:item, item) + |> TestRepo.insert!() + + dbitem = TestRepo.get!(Order, order.id).item + assert dbitem.reference == "PREFIX-EXAMPLE" + assert [%{"reference" => "EXAMPLE"}] = TestRepo.all(from o in "orders", select: o.item) + end + + @tag :map_type + test "empty embeds one" do + order = TestRepo.insert!(%Order{}) + assert order.item == nil + assert TestRepo.get!(Order, order.id).item == nil + end + + @tag :map_type + @tag :array_type + test "embeds many" do + item = %Item{price: 123, valid_at: ~D[2014-01-16]} + tag = + %Tag{} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_embed(:items, [item]) + tag = TestRepo.insert!(tag) + + [dbitem] = TestRepo.get!(Tag, tag.id).items + assert item.price == dbitem.price + assert item.valid_at == dbitem.valid_at + assert dbitem.id + + [[dbitem]] = TestRepo.all(from t in Tag, select: t.items) + assert item.price == dbitem.price + assert item.valid_at == dbitem.valid_at + assert dbitem.id + + {1, _} = TestRepo.update_all(Tag, set: [items: [%{dbitem | price: 456}]]) + assert (TestRepo.get!(Tag, tag.id).items |> hd).price == 456 + end + + @tag :map_type + @tag :array_type + test "empty embeds many" do + tag = TestRepo.insert!(%Tag{}) + assert tag.items == [] + assert TestRepo.get!(Tag, tag.id).items == [] + end + + @tag :map_type + @tag :array_type + test "nested embeds" do + red = %ItemColor{name: "red"} + blue = %ItemColor{name: "blue"} + item = %Item{ + primary_color: red, + secondary_colors: [blue] + } + + order = + %Order{} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_embed(:item, item) + order = TestRepo.insert!(order) + + dbitem = TestRepo.get!(Order, order.id).item + assert dbitem.primary_color.name == "red" + assert Enum.map(dbitem.secondary_colors, & &1.name) == ["blue"] + assert dbitem.id + assert dbitem.primary_color.id + + [dbitem] = TestRepo.all(from o in Order, select: o.item) + assert dbitem.primary_color.name == "red" + assert Enum.map(dbitem.secondary_colors, & &1.name) == ["blue"] + assert dbitem.id + assert dbitem.primary_color.id + end + + @tag :decimal_type + test "decimal type" do + decimal = Decimal.new("1.0") + TestRepo.insert!(%Post{cost: decimal}) + + [cost] = TestRepo.all(from p in Post, where: p.cost == ^decimal, select: p.cost) + assert Decimal.equal?(decimal, cost) + [cost] = TestRepo.all(from p in Post, where: p.cost == ^1.0, select: p.cost) + assert Decimal.equal?(decimal, cost) + [cost] = TestRepo.all(from p in Post, where: p.cost == ^1, select: p.cost) + assert Decimal.equal?(decimal, cost) + [cost] = TestRepo.all(from p in Post, where: p.cost == 1.0, select: p.cost) + assert Decimal.equal?(decimal, cost) + [cost] = TestRepo.all(from p in Post, where: p.cost == 1, select: p.cost) + assert Decimal.equal?(decimal, cost) + [cost] = TestRepo.all(from p in Post, select: p.cost * 2) + assert Decimal.equal?(Decimal.new("2.0"), cost) + [cost] = TestRepo.all(from p in Post, select: p.cost - p.cost) + assert Decimal.equal?(Decimal.new("0.0"), cost) + end + + @tag :decimal_type + @tag :decimal_precision + test "decimal typed aggregations" do + decimal = Decimal.new("1.0") + TestRepo.insert!(%Post{cost: decimal}) + + assert [1] = TestRepo.all(from p in Post, select: type(sum(p.cost), :integer)) + assert [1.0] = TestRepo.all(from p in Post, select: type(sum(p.cost), :float)) + [cost] = TestRepo.all(from p in Post, select: type(sum(p.cost), :decimal)) + assert Decimal.equal?(decimal, cost) + end + + @tag :decimal_type + test "on coalesce with mixed types" do + decimal = Decimal.new("1.0") + TestRepo.insert!(%Post{cost: decimal}) + [cost] = TestRepo.all(from p in Post, select: coalesce(p.cost, 0)) + assert Decimal.equal?(decimal, cost) + end + + @tag :union_with_literals + test "unions with literals" do + TestRepo.insert!(%Post{}) + TestRepo.insert!(%Post{}) + + query1 = from(p in Post, select: %{n: 1}) + query2 = from(p in Post, select: %{n: 2}) + + assert TestRepo.all(union_all(query1, ^query2)) == + [%{n: 1}, %{n: 1}, %{n: 2}, %{n: 2}] + + query1 = from(p in Post, select: %{n: 1.0}) + query2 = from(p in Post, select: %{n: 2.0}) + + assert TestRepo.all(union_all(query1, ^query2)) == + [%{n: 1.0}, %{n: 1.0}, %{n: 2.0}, %{n: 2.0}] + + query1 = from(p in Post, select: %{n: "foo"}) + query2 = from(p in Post, select: %{n: "bar"}) + + assert TestRepo.all(union_all(query1, ^query2)) == + [%{n: "foo"}, %{n: "foo"}, %{n: "bar"}, %{n: "bar"}] + end + + test "schemaless types" do + TestRepo.insert!(%Post{visits: 123}) + assert [123] = TestRepo.all(from p in "posts", select: type(p.visits, :integer)) + end + + test "schemaless calendar types" do + datetime = ~N[2014-01-16 20:26:51] + assert {1, _} = + TestRepo.insert_all("posts", [[inserted_at: datetime]]) + assert {1, _} = + TestRepo.update_all("posts", set: [inserted_at: datetime]) + assert [_] = + TestRepo.all(from p in "posts", where: p.inserted_at >= ^datetime, select: p.inserted_at) + assert [_] = + TestRepo.all(from p in "posts", where: p.inserted_at in [^datetime], select: p.inserted_at) + assert [_] = + TestRepo.all(from p in "posts", where: p.inserted_at in ^[datetime], select: p.inserted_at) + end +end diff --git a/deps/ecto/integration_test/cases/windows.exs b/deps/ecto/integration_test/cases/windows.exs new file mode 100644 index 0000000..a52eda2 --- /dev/null +++ b/deps/ecto/integration_test/cases/windows.exs @@ -0,0 +1,53 @@ +defmodule Ecto.Integration.WindowsTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + import Ecto.Query + + alias Ecto.Integration.{Comment, User, Post} + + test "over" do + u1 = TestRepo.insert!(%User{name: "Tester"}) + u2 = TestRepo.insert!(%User{name: "Developer"}) + c1 = TestRepo.insert!(%Comment{text: "1", author_id: u1.id}) + c2 = TestRepo.insert!(%Comment{text: "2", author_id: u1.id}) + c3 = TestRepo.insert!(%Comment{text: "3", author_id: u1.id}) + c4 = TestRepo.insert!(%Comment{text: "4", author_id: u2.id}) + + # Over nothing + query = from(c in Comment, select: [c, count(c.id) |> over()]) + assert [[^c1, 4], [^c2, 4], [^c3, 4], [^c4, 4]] = TestRepo.all(query) + + # Over partition + query = from(c in Comment, select: [c, count(c.id) |> over(partition_by: c.author_id)]) + assert [[^c1, 3], [^c2, 3], [^c3, 3], [^c4, 1]] = TestRepo.all(query) + + # Over window + query = from(c in Comment, windows: [w: [partition_by: c.author_id]], select: [c, count(c.id) |> over(:w)]) + assert [[^c1, 3], [^c2, 3], [^c3, 3], [^c4, 1]] = TestRepo.all(query) + end + + test "frame" do + posts = Enum.map(0..6, &%{counter: &1, visits: round(:math.pow(2, &1))}) + TestRepo.insert_all(Post, posts) + + n = 1 + query = from(p in Post, + windows: [w: [order_by: p.counter, frame: fragment("ROWS BETWEEN ? PRECEDING AND ? FOLLOWING", ^n, ^n)]], + select: [p.counter, sum(p.visits) |> over(:w)] + ) + assert [[0, 3], [1, 7], [2, 14], [3, 28], [4, 56], [5, 112], [6, 96]] = TestRepo.all(query) + + query = from(p in Post, + windows: [w: [order_by: p.counter, frame: fragment("ROWS BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING")]], + select: [p.counter, sum(p.visits) |> over(:w)] + ) + assert [[0, 126], [1, 124], [2, 120], [3, 112], [4, 96], [5, 64], [6, nil]] = TestRepo.all(query) + + query = from(p in Post, + windows: [w: [order_by: p.counter, frame: fragment("ROWS CURRENT ROW")]], + select: [p.counter, sum(p.visits) |> over(:w)] + ) + assert [[0, 1], [1, 2], [2, 4], [3, 8], [4, 16], [5, 32], [6, 64]] = TestRepo.all(query) + end +end diff --git a/deps/ecto/integration_test/support/schemas.exs b/deps/ecto/integration_test/support/schemas.exs new file mode 100644 index 0000000..e5b6559 --- /dev/null +++ b/deps/ecto/integration_test/support/schemas.exs @@ -0,0 +1,424 @@ +Code.require_file("types.exs", __DIR__) + +defmodule Ecto.Integration.Schema do + defmacro __using__(_) do + quote do + use Ecto.Schema + + type = + Application.compile_env(:ecto, :primary_key_type) || + raise ":primary_key_type not set in :ecto application" + + @primary_key {:id, type, autogenerate: true} + @foreign_key_type type + end + end +end + +defmodule Ecto.Integration.Post do + @moduledoc """ + This module is used to test: + + * Overall functionality + * Overall types + * Non-null timestamps + * Relationships + * Dependent callbacks + + """ + use Ecto.Integration.Schema + import Ecto.Changeset + import Ecto.Query, only: [dynamic: 2] + + schema "posts" do + field :counter, :id # Same as integer + field :title, :string + field :blob, :binary + field :temp, :string, default: "temp", virtual: true + field :public, :boolean, default: true + field :cost, :decimal + field :visits, :integer + field :wrapped_visits, WrappedInteger + field :intensity, :float + field :bid, :binary_id + field :uuid, Ecto.Integration.TestRepo.uuid(), autogenerate: true + field :meta, :map + field :links, {:map, :string} + field :intensities, {:map, :float} + field :posted, :date + has_many :comments, Ecto.Integration.Comment, on_delete: :delete_all, on_replace: :delete + has_many :force_comments, Ecto.Integration.Comment, on_replace: :delete_if_exists + has_many :ordered_comments, Ecto.Integration.Comment, preload_order: [:text] + # The post<->permalink relationship should be marked as uniq + has_one :permalink, Ecto.Integration.Permalink, on_delete: :delete_all, on_replace: :delete + has_one :force_permalink, Ecto.Integration.Permalink, on_replace: :delete_if_exists + has_one :update_permalink, Ecto.Integration.Permalink, foreign_key: :post_id, on_delete: :delete_all, on_replace: :update + has_many :comments_authors, through: [:comments, :author] + belongs_to :author, Ecto.Integration.User + many_to_many :users, Ecto.Integration.User, + join_through: "posts_users", on_delete: :delete_all, on_replace: :delete + many_to_many :ordered_users, Ecto.Integration.User, join_through: "posts_users", preload_order: [desc: :name] + many_to_many :ordered_users_by_join_table, Ecto.Integration.User, + join_through: "posts_users", preload_order: {__MODULE__, :preload_order, []} + many_to_many :unique_users, Ecto.Integration.User, + join_through: "posts_users", unique: true + many_to_many :constraint_users, Ecto.Integration.User, + join_through: Ecto.Integration.PostUserCompositePk + has_many :users_comments, through: [:users, :comments] + has_many :comments_authors_permalinks, through: [:comments_authors, :permalink] + has_one :post_user_composite_pk, Ecto.Integration.PostUserCompositePk + timestamps() + end + + def preload_order() do + [desc: dynamic([assoc, join], join.user_id)] + end + + def changeset(schema, params) do + cast(schema, params, ~w(counter title blob temp public cost visits + intensity bid uuid meta posted)a) + end +end + +defmodule Ecto.Integration.Comment do + @moduledoc """ + This module is used to test: + + * Optimistic lock + * Relationships + * Dependent callbacks + + """ + use Ecto.Integration.Schema + + schema "comments" do + field :text, :string + field :lock_version, :integer, default: 1 + belongs_to :post, Ecto.Integration.Post + belongs_to :author, Ecto.Integration.User + has_one :post_permalink, through: [:post, :permalink] + has_one :author_permalink, through: [:author, :permalink] + end + + def changeset(schema, params) do + Ecto.Changeset.cast(schema, params, [:text]) + end +end + +defmodule Ecto.Integration.Permalink do + @moduledoc """ + This module is used to test: + + * Field sources + * Relationships + * Dependent callbacks + + """ + use Ecto.Integration.Schema + + schema "permalinks" do + field :url, :string, source: :uniform_resource_locator + field :title, :string + field :posted, :date, virtual: true + belongs_to :post, Ecto.Integration.Post, on_replace: :nilify + belongs_to :update_post, Ecto.Integration.Post, on_replace: :update, foreign_key: :post_id, define_field: false + belongs_to :user, Ecto.Integration.User + has_many :post_comments_authors, through: [:post, :comments_authors] + has_many :user_posts, through: [:user, :posts] + end + + def changeset(schema, params) do + Ecto.Changeset.cast(schema, params, [:url, :title]) + end +end + +defmodule Ecto.Integration.PostUser do + @moduledoc """ + This module is used to test: + + * Many to many associations join_through with schema + + """ + use Ecto.Integration.Schema + + schema "posts_users_pk" do + belongs_to :user, Ecto.Integration.User + belongs_to :post, Ecto.Integration.Post + timestamps() + end +end + +defmodule Ecto.Integration.User do + @moduledoc """ + This module is used to test: + + * UTC Timestamps + * Relationships + * Dependent callbacks + + """ + use Ecto.Integration.Schema + + schema "users" do + field :name, :string + has_many :comments, Ecto.Integration.Comment, foreign_key: :author_id, on_delete: :nilify_all, on_replace: :nilify + has_one :permalink, Ecto.Integration.Permalink, on_replace: :nilify + has_many :posts, Ecto.Integration.Post, foreign_key: :author_id, on_delete: :nothing, on_replace: :delete + belongs_to :custom, Ecto.Integration.Custom, references: :bid, type: :binary_id + many_to_many :schema_posts, Ecto.Integration.Post, join_through: Ecto.Integration.PostUser + many_to_many :unique_posts, Ecto.Integration.Post, join_through: Ecto.Integration.PostUserCompositePk + + has_many :related_2nd_order_posts, through: [:posts, :users, :posts] + has_many :users_through_schema_posts, through: [:schema_posts, :users] + + has_many :v2_comments, Ecto.Integration.Comment, foreign_key: :author_id, where: [lock_version: 2] + has_many :v2_comments_posts, through: [:v2_comments, :post] + has_many :co_commenters, through: [:comments, :post, :comments_authors] + + timestamps(type: :utc_datetime) + end +end + +defmodule Ecto.Integration.Custom do + @moduledoc """ + This module is used to test: + + * binary_id primary key + * Tying another schemas to an existing schema + + Due to the second item, it must be a subset of posts. + """ + use Ecto.Integration.Schema + + @primary_key {:bid, :binary_id, autogenerate: true} + schema "customs" do + field :uuid, Ecto.Integration.TestRepo.uuid() + many_to_many :customs, Ecto.Integration.Custom, + join_through: "customs_customs", join_keys: [custom_id1: :bid, custom_id2: :bid], + on_delete: :delete_all, on_replace: :delete + end +end + +defmodule Ecto.Integration.Barebone do + @moduledoc """ + This module is used to test: + + * A schema without primary keys + + """ + use Ecto.Integration.Schema + + @primary_key false + schema "barebones" do + field :num, :integer + end +end + +defmodule Ecto.Integration.Tag do + @moduledoc """ + This module is used to test: + + * The array type + * Embedding many schemas (uses array) + + """ + use Ecto.Integration.Schema + + schema "tags" do + field :ints, {:array, :integer} + field :uuids, {:array, Ecto.Integration.TestRepo.uuid()} + embeds_many :items, Ecto.Integration.Item + end +end + +defmodule Ecto.Integration.Item do + @moduledoc """ + This module is used to test: + + * Embedding + * Preloading associations in embedded schemas + + """ + use Ecto.Schema + + embedded_schema do + field :reference, PrefixedString + field :price, :integer + field :valid_at, :date + + embeds_one :primary_color, Ecto.Integration.ItemColor + embeds_many :secondary_colors, Ecto.Integration.ItemColor + + belongs_to :user, Ecto.Integration.User + end +end + +defmodule Ecto.Integration.ItemColor do + @moduledoc """ + This module is used to test: + + * Nested embeds + + """ + use Ecto.Schema + + embedded_schema do + field :name, :string + end +end + +defmodule Ecto.Integration.Order do + @moduledoc """ + This module is used to test: + + * Text columns + * Embedding one schema + * Preloading items inside embeds_many + * Preloading items inside embeds_one + * Field source with json_extract_path + + """ + use Ecto.Integration.Schema + + schema "orders" do + field :label, :string + field :metadata, :map, source: :meta + embeds_one :item, Ecto.Integration.Item + embeds_many :items, Ecto.Integration.Item + belongs_to :permalink, Ecto.Integration.Permalink + end +end + +defmodule Ecto.Integration.CompositePk do + @moduledoc """ + This module is used to test: + + * Composite primary keys + + """ + use Ecto.Integration.Schema + import Ecto.Changeset + + @primary_key false + schema "composite_pk" do + field :a, :integer, primary_key: true + field :b, :integer, primary_key: true + field :name, :string + end + def changeset(schema, params) do + cast(schema, params, ~w(a b name)a) + end +end + +defmodule Ecto.Integration.CorruptedPk do + @moduledoc """ + This module is used to test: + + * Primary keys that is not unique on a DB side + + """ + use Ecto.Integration.Schema + + @primary_key false + schema "corrupted_pk" do + field :a, :string, primary_key: true + end +end + +defmodule Ecto.Integration.PostUserCompositePk do + @moduledoc """ + This module is used to test: + + * Composite primary keys for 2 belongs_to fields + + """ + use Ecto.Integration.Schema + + @primary_key false + schema "posts_users_composite_pk" do + belongs_to :user, Ecto.Integration.User, primary_key: true + belongs_to :post, Ecto.Integration.Post, primary_key: true + timestamps() + end +end + +defmodule Ecto.Integration.Usec do + @moduledoc """ + This module is used to test: + + * usec datetime types + + """ + use Ecto.Integration.Schema + + schema "usecs" do + field :naive_datetime_usec, :naive_datetime_usec + field :utc_datetime_usec, :utc_datetime_usec + end +end + +defmodule Ecto.Integration.Logging do + @moduledoc """ + This module is used to test: + + * Logging the casted version of parameters without array types + + """ + use Ecto.Integration.Schema + + @primary_key {:bid, :binary_id, autogenerate: true} + schema "loggings" do + field :int, :integer + field :uuid, Ecto.Integration.TestRepo.uuid() + timestamps() + end +end + +defmodule Ecto.Integration.ArrayLogging do + @moduledoc """ + This module is used to test: + + * Logging the casted version of parameters with array types + + """ + use Ecto.Integration.Schema + + schema "array_loggings" do + field :uuids, {:array, Ecto.Integration.TestRepo.uuid()} + timestamps() + end +end + +defmodule Ecto.Integration.Bitstring do + @moduledoc """ + This module is used to test: + + * Bitstring type + + """ + use Ecto.Integration.Schema + + schema "bitstrings" do + field :bs, :bitstring + field :bs_with_default, :bitstring + field :bs_with_size, :bitstring + end +end + +if Code.ensure_loaded?(Duration) do + defmodule Ecto.Integration.Duration do + @moduledoc """ + This module is used to test: + * Duration type + """ + use Ecto.Integration.Schema + + schema "durations" do + field :dur, :duration + field :dur_with_fields, :duration + field :dur_with_precision, :duration + field :dur_with_fields_and_precision, :duration + field :dur_with_default, :duration + end + end +end diff --git a/deps/ecto/integration_test/support/types.exs b/deps/ecto/integration_test/support/types.exs new file mode 100644 index 0000000..196012f --- /dev/null +++ b/deps/ecto/integration_test/support/types.exs @@ -0,0 +1,53 @@ +defmodule CustomPermalink do + def type, do: :id + + def cast(string) when is_binary(string) do + case Integer.parse(string) do + {int, _} -> {:ok, int} + :error -> :error + end + end + + def cast(integer) when is_integer(integer), do: {:ok, integer} + def cast(_), do: :error + + def load(integer) when is_integer(integer), do: {:ok, integer} + def dump(integer) when is_integer(integer), do: {:ok, integer} +end + +defmodule PrefixedString do + use Ecto.Type + def type(), do: :string + def cast(string), do: {:ok, string} + def load(string), do: {:ok, "PREFIX-" <> string} + def dump("PREFIX-" <> string), do: {:ok, string} + def dump(_string), do: :error + def embed_as(_), do: :dump +end + +defmodule WrappedInteger do + use Ecto.Type + def type(), do: :integer + def cast(integer), do: {:ok, {:int, integer}} + def load(integer), do: {:ok, {:int, integer}} + def dump({:int, integer}), do: {:ok, integer} +end + +defmodule ParameterizedPrefixedString do + use Ecto.ParameterizedType + def init(opts), do: Enum.into(opts, %{}) + def type(_), do: :string + + def cast(data, %{prefix: prefix}) do + if String.starts_with?(data, [prefix <> "-"]) do + {:ok, data} + else + {:ok, prefix <> "-" <> data} + end + end + + def load(string, _, %{prefix: prefix}), do: {:ok, prefix <> "-" <> string} + def dump(nil, _, _), do: {:ok, nil} + def dump(data, _, %{prefix: _prefix}), do: {:ok, data |> String.split("-") |> List.last()} + def embed_as(_, _), do: :dump +end diff --git a/deps/ecto/lib/ecto.ex b/deps/ecto/lib/ecto.ex new file mode 100644 index 0000000..c3e7bb5 --- /dev/null +++ b/deps/ecto/lib/ecto.ex @@ -0,0 +1,754 @@ +defmodule Ecto do + @moduledoc ~S""" + Ecto is split into 4 main components: + + * `Ecto.Repo` - repositories are wrappers around the data store. + Via the repository, we can create, update, destroy and query + existing entries. A repository needs an adapter and credentials + to communicate to the database + + * `Ecto.Schema` - schemas are used to map external data into Elixir + structs. We often use them to map database tables to Elixir data but + they have many other use cases + + * `Ecto.Query` - written in Elixir syntax, queries are used to retrieve + information from a given repository. Ecto queries are secure and composable + + * `Ecto.Changeset` - changesets provide a way to track and validate changes + before they are applied to the data + + In summary: + + * `Ecto.Repo` - **where** the data is + * `Ecto.Schema` - **what** the data is + * `Ecto.Query` - **how to read** the data + * `Ecto.Changeset` - **how to change** the data + + Besides the four components above, most developers use Ecto to interact + with SQL databases, such as PostgreSQL and MySQL via the + [`ecto_sql`](https://hexdocs.pm/ecto_sql) project. `ecto_sql` provides many + conveniences for working with SQL databases as well as the ability to version + how your database changes through time via + [database migrations](https://hexdocs.pm/ecto_sql/Ecto.Adapters.SQL.html#module-migrations). + + If you want to quickly check a sample application using Ecto, please check + the [getting started guide](https://hexdocs.pm/ecto/getting-started.html) and + the accompanying sample application. [Ecto's README](https://github.com/elixir-ecto/ecto) + also links to other resources. + + In the following sections, we will provide an overview of those components and + how they interact with each other. Feel free to access their respective module + documentation for more specific examples, options and configuration. + + ## Repositories + + `Ecto.Repo` is a wrapper around the database. We can define a + repository as follows: + + defmodule Repo do + use Ecto.Repo, + otp_app: :my_app, + adapter: Ecto.Adapters.Postgres + end + + Where the configuration for the Repo must be in your application + environment, usually defined in your `config/config.exs`: + + config :my_app, Repo, + database: "ecto_simple", + username: "postgres", + password: "postgres", + hostname: "localhost", + # OR use a URL to connect instead + url: "postgres://postgres:postgres@localhost/ecto_simple" + + Each repository in Ecto defines a `start_link/0` function that needs to be invoked + before using the repository. In general, this function is not called directly, + but is used as part of your application supervision tree. + + If your application was generated with a supervisor (by passing `--sup` to `mix new`) + you will have a `lib/my_app/application.ex` file containing the application start + callback that defines and starts your supervisor. You just need to edit the `start/2` + function to start the repo as a supervisor on your application's supervisor: + + def start(_type, _args) do + children = [ + MyApp.Repo, + ] + + opts = [strategy: :one_for_one, name: MyApp.Supervisor] + Supervisor.start_link(children, opts) + end + + ## Schema + + Schemas allow developers to define the shape of their data. + Let's see an example: + + defmodule Weather do + use Ecto.Schema + + # weather is the DB table + schema "weather" do + field :city, :string + field :temp_lo, :integer + field :temp_hi, :integer + field :prcp, :float, default: 0.0 + end + end + + By defining a schema, Ecto automatically defines a struct with + the schema fields: + + iex> weather = %Weather{temp_lo: 30} + iex> weather.temp_lo + 30 + + The schema also allows us to interact with a repository: + + iex> weather = %Weather{temp_lo: 0, temp_hi: 23} + iex> Repo.insert!(weather) + %Weather{...} + + After persisting `weather` to the database, it will return a new copy of + `%Weather{}` with the primary key (the `id`) set. We can use this value + to read a struct back from the repository: + + # Get the struct back + iex> weather = Repo.get Weather, 1 + %Weather{id: 1, ...} + + # Delete it + iex> Repo.delete!(weather) + %Weather{...} + + > NOTE: by using `Ecto.Schema`, an `:id` field with type `:id` (:id means :integer) is + > generated by default, which is the primary key of the schema. If you want + > to use a different primary key, you can declare custom `@primary_key` + > before the `schema/2` call. Consult the `Ecto.Schema` documentation + > for more information. + + Notice how the storage (repository) and the data are decoupled. This provides + two main benefits: + + * By having structs as data, we guarantee they are light-weight, + serializable structures. In many languages, the data is often represented + by large, complex objects, with entwined state transactions, which makes + serialization, maintenance and understanding hard; + + * You do not need to define schemas in order to interact with repositories, + operations like `all`, `insert_all` and so on allow developers to directly + access and modify the data, keeping the database at your fingertips when + necessary; + + ## Changesets + + Although in the example above we have directly inserted and deleted the + struct in the repository, operations on top of schemas are done through + changesets so Ecto can efficiently track changes. + + Changesets allow developers to filter, cast, and validate changes before + we apply them to the data. Imagine the given schema: + + defmodule User do + use Ecto.Schema + + import Ecto.Changeset + + schema "users" do + field :name + field :email + field :age, :integer + end + + def changeset(user, params \\ %{}) do + user + |> cast(params, [:name, :email, :age]) + |> validate_required([:name, :email]) + |> validate_format(:email, ~r/@/) + |> validate_inclusion(:age, 18..100) + end + end + + The `changeset/2` function first invokes `Ecto.Changeset.cast/4` with + the struct, the parameters and a list of allowed fields; this returns a changeset. + The parameters is a map with binary keys and values that will be cast based + on the type defined by the schema. + + Any parameter that was not explicitly listed in the fields list will be ignored. + + After casting, the changeset is given to many `Ecto.Changeset.validate_*` + functions that validate only the **changed fields**. In other words: + if a field was not given as a parameter, it won't be validated at all. + For example, if the params map contain only the "name" and "email" keys, + the "age" validation won't run. + + Once a changeset is built, it can be given to functions like `insert` and + `update` in the repository that will return an `:ok` or `:error` tuple: + + case Repo.update(changeset) do + {:ok, user} -> + # user updated + {:error, changeset} -> + # an error occurred + end + + The benefit of having explicit changesets is that we can easily provide + different changesets for different use cases. For example, one + could easily provide specific changesets for registering and updating + users: + + def registration_changeset(user, params) do + # Changeset on create + end + + def update_changeset(user, params) do + # Changeset on update + end + + Changesets are also capable of transforming database constraints, + like unique indexes and foreign key checks, into errors. Allowing + developers to keep their database consistent while still providing + proper feedback to end users. Check `Ecto.Changeset.unique_constraint/3` + for some examples as well as the other `_constraint` functions. + + ## Query + + Last but not least, Ecto allows you to write queries in Elixir and send + them to the repository, which translates them to the underlying database. + Let's see an example: + + import Ecto.Query, only: [from: 2] + + query = from u in User, + where: u.age > 18 or is_nil(u.email), + select: u + + # Returns %User{} structs matching the query + Repo.all(query) + + In the example above we relied on our schema but queries can also be + made directly against a table by giving the table name as a string. In + such cases, the data to be fetched must be explicitly outlined: + + query = from u in "users", + where: u.age > 18 or is_nil(u.email), + select: %{name: u.name, age: u.age} + + # Returns maps as defined in select + Repo.all(query) + + Queries are defined and extended with the [`from`](`Ecto.Query.from/2`) macro. The supported + keywords are: + + * `:distinct` + * `:where` + * `:order_by` + * `:offset` + * `:limit` + * `:lock` + * `:group_by` + * `:having` + * `:join` + * `:select` + * `:preload` + + Examples and detailed documentation for each of those are available + in the `Ecto.Query` module. Functions supported in queries are listed + in `Ecto.Query.API`. + + When writing a query, you are inside Ecto's query syntax. In order to + access params values or invoke Elixir functions, you need to use the `^` + operator, which is overloaded by Ecto: + + def min_age(min) do + from u in User, where: u.age > ^min + end + + Besides [`Repo.all/1`](`c:Ecto.Repo.all/2`) which returns all entries, repositories + also provide [`Repo.one/1`](`c:Ecto.Repo.one/2`) which returns one entry or `nil`, + [`Repo.one!/1`](`c:Ecto.Repo.one!/2`)) which returns one entry or raises, + [`Repo.get/2`](`c:Ecto.Repo.get/3`) which fetches entries for a particular ID and more. + + Finally, if you need an escape hatch, Ecto provides fragments + (see `Ecto.Query.API.fragment/1`) to inject SQL (and non-SQL) + fragments into queries. Also, most adapters provide direct + APIs for queries, like `Ecto.Adapters.SQL.query/4`, allowing + developers to completely bypass Ecto queries. + + ## Other topics + + ### Associations + + Ecto supports defining associations on schemas: + + defmodule Post do + use Ecto.Schema + + schema "posts" do + has_many :comments, Comment + end + end + + defmodule Comment do + use Ecto.Schema + + schema "comments" do + field :title, :string + belongs_to :post, Post + end + end + + When an association is defined, Ecto also defines a field in the schema + with the association name. By default, associations are not loaded into + this field: + + iex> post = Repo.get(Post, 42) + iex> post.comments + #Ecto.Association.NotLoaded<...> + + However, developers can use the preload functionality in queries to + automatically pre-populate the field: + + Repo.all from p in Post, preload: [:comments] + + Preloading can also be done with a pre-defined join value: + + Repo.all from p in Post, + join: c in assoc(p, :comments), + where: c.votes > p.votes, + preload: [comments: c] + + Finally, for the simple cases, preloading can also be done after + a collection was fetched: + + posts = Repo.all(Post) |> Repo.preload(:comments) + + The `Ecto` module also provides conveniences for working + with associations. For example, `Ecto.assoc/3` returns a query + with all associated data to a given struct: + + import Ecto + + # Get all comments for the given post + Repo.all assoc(post, :comments) + + # Or build a query on top of the associated comments + query = from c in assoc(post, :comments), where: not is_nil(c.title) + Repo.all(query) + + Another function in `Ecto` is `build_assoc/3`, which allows + someone to build an associated struct with the proper fields: + + Repo.transact(fn -> + post = Repo.insert!(%Post{title: "Hello", body: "world"}) + + # Build a comment from post + comment = Ecto.build_assoc(post, :comments, body: "Excellent!") + + Repo.insert!(comment) + end) + + In the example above, `Ecto.build_assoc/3` is equivalent to: + + %Comment{post_id: post.id, body: "Excellent!"} + + You can find more information about defining associations and each + respective association module in `Ecto.Schema` docs. + + > NOTE: Ecto does not lazy load associations. While lazily loading + > associations may sound convenient at first, in the long run it + > becomes a source of confusion and performance issues. + + ### Embeds + + Ecto also supports embeds. While associations keep parent and child + entries in different tables, embeds stores the child along side the + parent. + + Databases like MongoDB have native support for embeds. Databases + like PostgreSQL uses a mixture of JSONB (`embeds_one/3`) and ARRAY + columns to provide this functionality. + + Check `Ecto.Schema.embeds_one/3` and `Ecto.Schema.embeds_many/3` + for more information. + + ### Mix tasks and generators + + Ecto provides many tasks to help your workflow as well as code generators. + You can find all available tasks by typing `mix help` inside a project + with Ecto listed as a dependency. + + Ecto generators will automatically open the generated files if you have + `ECTO_EDITOR` set in your environment variable. + + #### Repo resolution + + Ecto requires developers to specify the key `:ecto_repos` in their + application configuration before using tasks like `ecto.create` and + `ecto.migrate`. For example: + + config :my_app, :ecto_repos, [MyApp.Repo] + + config :my_app, MyApp.Repo, + database: "ecto_simple", + username: "postgres", + password: "postgres", + hostname: "localhost" + + """ + + @doc """ + Returns the schema primary keys as a keyword list. + """ + @spec primary_key(Ecto.Schema.t()) :: Keyword.t() + def primary_key(%{__struct__: schema} = struct) do + Enum.map(schema.__schema__(:primary_key), fn field -> + {field, Map.fetch!(struct, field)} + end) + end + + @doc """ + Returns the schema primary keys as a keyword list. + + Raises `Ecto.NoPrimaryKeyFieldError` if the schema has no + primary key field. + """ + @spec primary_key!(Ecto.Schema.t()) :: Keyword.t() + def primary_key!(%{__struct__: schema} = struct) do + case primary_key(struct) do + [] -> raise Ecto.NoPrimaryKeyFieldError, schema: schema + pk -> pk + end + end + + @doc """ + Builds a struct from the given `assoc` in `struct`. + + ## Examples + + If the relationship is a `has_one` or `has_many` and + the primary key is set in the parent struct, the key will + automatically be set in the built association: + + iex> post = Repo.get(Post, 13) + %Post{id: 13} + iex> build_assoc(post, :comments) + %Comment{id: nil, post_id: 13} + + Note though it doesn't happen with `belongs_to` cases, as the + key is often the primary key and such is usually generated + dynamically: + + iex> comment = Repo.get(Comment, 13) + %Comment{id: 13, post_id: 25} + iex> build_assoc(comment, :post) + %Post{id: nil} + + You can also pass the attributes, which can be a map or + a keyword list, to set the struct's fields except the + association key. + + iex> build_assoc(post, :comments, text: "cool") + %Comment{id: nil, post_id: 13, text: "cool"} + + iex> build_assoc(post, :comments, %{text: "cool"}) + %Comment{id: nil, post_id: 13, text: "cool"} + + iex> build_assoc(post, :comments, post_id: 1) + %Comment{id: nil, post_id: 13} + + The given attributes are expected to be structured data. + If you want to build an association with external data, + such as a request parameters, you can use `Ecto.Changeset.cast/3` + after `build_assoc/3`: + + parent + |> Ecto.build_assoc(:child) + |> Ecto.Changeset.cast(params, [:field1, :field2]) + + """ + def build_assoc(%{__struct__: schema} = struct, assoc, attributes \\ %{}) do + assoc = Ecto.Association.association_from_schema!(schema, assoc) + assoc.__struct__.build(assoc, struct, drop_meta(attributes)) + end + + defp drop_meta(%{} = attrs), do: Map.drop(attrs, [:__struct__, :__meta__]) + defp drop_meta([_ | _] = attrs), do: Keyword.drop(attrs, [:__struct__, :__meta__]) + + @doc """ + Builds a query for the association in the given struct or structs. + + ## Examples + + In the example below, we get all comments associated to the given + post: + + post = Repo.get Post, 1 + Repo.all Ecto.assoc(post, :comments) + + `assoc/3` can also receive a list of posts, as long as the posts are + not empty: + + posts = Repo.all from p in Post, where: is_nil(p.published_at) + Repo.all Ecto.assoc(posts, :comments) + + This function can also be used to dynamically load through associations + by giving it a list. For example, to get all authors for all comments for + the given posts, do: + + posts = Repo.all from p in Post, where: is_nil(p.published_at) + Repo.all Ecto.assoc(posts, [:comments, :author]) + + ## Options + + * `:prefix` - the prefix to fetch assocs from. By default, queries + will use the same prefix as the first struct in the given collection. + This option allows the prefix to be changed. + + """ + def assoc(struct_or_structs, assocs, opts \\ []) do + [assoc | assocs] = List.wrap(assocs) + + structs = + case struct_or_structs do + nil -> raise ArgumentError, "cannot retrieve association #{inspect(assoc)} for nil" + [] -> raise ArgumentError, "cannot retrieve association #{inspect(assoc)} for empty list" + struct_or_structs -> List.wrap(struct_or_structs) + end + + sample = hd(structs) + prefix = assoc_prefix(sample, opts) + schema = sample.__struct__ + refl = %{owner_key: owner_key} = Ecto.Association.association_from_schema!(schema, assoc) + + values = + Enum.uniq( + for( + struct <- structs, + assert_struct!(schema, struct), + key = Map.fetch!(struct, owner_key), + do: key + ) + ) + + case assocs do + [] -> + %module{} = refl + %{module.assoc_query(refl, nil, values) | prefix: prefix} + + assocs -> + %{ + Ecto.Association.filter_through_chain(schema, [assoc | assocs], values) + | prefix: prefix + } + end + end + + defp assoc_prefix(sample, opts) do + case Keyword.fetch(opts, :prefix) do + {:ok, prefix} -> + prefix + + :error -> + case sample do + %{__meta__: %{prefix: prefix}} -> prefix + # Must be an embedded schema + _ -> nil + end + end + end + + @doc """ + Checks if an association is loaded. + + ## Examples + + iex> post = Repo.get(Post, 1) + iex> Ecto.assoc_loaded?(post.comments) + false + iex> post = post |> Repo.preload(:comments) + iex> Ecto.assoc_loaded?(post.comments) + true + + """ + def assoc_loaded?(%Ecto.Association.NotLoaded{}), do: false + def assoc_loaded?(list) when is_list(list), do: true + def assoc_loaded?(%_{}), do: true + def assoc_loaded?(nil), do: true + + @doc """ + Resets fields in a struct to their default values. + + ## Examples + + iex> post = post |> Repo.preload(:author) + %Post{title: "hello world", author: %Author{}} + iex> Ecto.reset_fields(post, [:title, :author]) + %Post{ + title: "default title", + author: #Ecto.Association.NotLoaded + } + + """ + @spec reset_fields(Ecto.Schema.t(), list()) :: Ecto.Schema.t() + def reset_fields(struct, []), do: struct + + def reset_fields(%{__struct__: schema} = struct, fields) do + default_struct = schema.__struct__() + default_fields = Map.take(default_struct, fields) + Map.merge(struct, default_fields) + end + + @doc """ + Gets the metadata from the given struct. + + For example, to check whether it has been persisted: + + iex> Ecto.get_meta(changeset.data, :state) + :built + + See `Ecto.Schema.Metadata`. + """ + def get_meta(struct, :context), + do: struct.__meta__.context + + def get_meta(struct, :state), + do: struct.__meta__.state + + def get_meta(struct, :source), + do: struct.__meta__.source + + def get_meta(struct, :prefix), + do: struct.__meta__.prefix + + @doc """ + Returns a new struct with updated metadata. + + It is possible to set: + + * `:source` - changes the struct query source + * `:prefix` - changes the struct query prefix + * `:context` - changes the struct meta context + * `:state` - changes the struct state + + See `Ecto.Schema.Metadata`. + """ + @spec put_meta(Ecto.Schema.schema(), meta) :: Ecto.Schema.schema() + when meta: [ + source: Ecto.Schema.source(), + prefix: Ecto.Schema.prefix(), + context: Ecto.Schema.Metadata.context(), + state: Ecto.Schema.Metadata.state() + ] + def put_meta(%{__meta__: meta} = struct, opts) do + case put_or_noop_meta(opts, meta, false) do + :noop -> struct + meta -> %{struct | __meta__: meta} + end + end + + defp put_or_noop_meta([{key, value} | t], meta, updated?) do + case meta do + %{^key => ^value} -> put_or_noop_meta(t, meta, updated?) + _ -> put_or_noop_meta(t, put_meta(meta, key, value), true) + end + end + + defp put_or_noop_meta([], meta, true), do: meta + defp put_or_noop_meta([], _meta, false), do: :noop + + defp put_meta(meta, :state, state) do + if state in [:built, :loaded, :deleted] do + %{meta | state: state} + else + raise ArgumentError, "invalid state #{inspect(state)}" + end + end + + defp put_meta(meta, :source, source) do + %{meta | source: source} + end + + defp put_meta(meta, :prefix, prefix) do + %{meta | prefix: prefix} + end + + defp put_meta(meta, :context, context) do + %{meta | context: context} + end + + defp put_meta(_meta, key, _value) do + raise ArgumentError, "unknown meta key #{inspect(key)}" + end + + defp assert_struct!(module, %{__struct__: struct}) do + if struct != module do + raise ArgumentError, + "expected a homogeneous list containing the same struct, " <> + "got: #{inspect(module)} and #{inspect(struct)}" + else + true + end + end + + @doc """ + Loads previously dumped `data` in the given `format` into a schema. + + The first argument can be an embedded schema module, or a map (of types) and + determines the return value: a struct or a map, respectively. + + The second argument `data` specifies fields and values that are to be loaded. + It can be a map, a keyword list, or a `{fields, values}` tuple. Fields can be + atoms or strings. + + The third argument `format` is the format the data has been dumped as. For + example, databases may dump embedded to `:json`, this function allows such + dumped data to be put back into the schemas. If custom types are used, + Ecto will invoke the `c:Ecto.Type.embed_as/1` callback to decide if the data + should be loaded using `cast` or `load`. + + Fields that are not present in the schema (or `types` map) are ignored. + If any of the values has invalid type, an error is raised. + + Note that if you want to load data into a non-embedded schema that was + directly persisted into a given repository, then use `c:Ecto.Repo.load/2`. + + ## Examples + + iex> result = Ecto.Adapters.SQL.query!(MyRepo, "SELECT users.settings FROM users", []) + iex> Enum.map(result.rows, fn [settings] -> Ecto.embedded_load(Setting, Jason.decode!(settings), :json) end) + [%Setting{...}, ...] + """ + @spec embedded_load( + module_or_map :: module | map(), + data :: map(), + format :: atom() + ) :: Ecto.Schema.t() | map() + def embedded_load(schema_or_types, data, format) do + Ecto.Schema.Loader.unsafe_load( + schema_or_types, + data, + &Ecto.Type.embedded_load(&1, &2, format) + ) + end + + @doc """ + Dumps the given struct defined by an embedded schema. + + This converts the given embedded schema to a map to be serialized + with the given format. For example: + + iex> Ecto.embedded_dump(%Post{}, :json) + %{title: "hello"} + + """ + @spec embedded_dump(Ecto.Schema.t(), format :: atom()) :: map() + def embedded_dump(%schema{} = data, format) do + Ecto.Schema.Loader.safe_dump( + data, + schema.__schema__(:dump), + &Ecto.Type.embedded_dump(&1, &2, format) + ) + end +end diff --git a/deps/ecto/lib/ecto/adapter.ex b/deps/ecto/lib/ecto/adapter.ex new file mode 100644 index 0000000..a846495 --- /dev/null +++ b/deps/ecto/lib/ecto/adapter.ex @@ -0,0 +1,142 @@ +defmodule Ecto.Adapter do + @moduledoc """ + Specifies the minimal API required from adapters. + """ + + @type t :: module + + @typedoc """ + The metadata returned by the adapter `c:init/1`. + + It must be a map and Ecto itself will always inject + two keys into the meta: + + * the `:cache` key, which as ETS table that can be used as a cache (if available) + * the `:pid` key, which is the PID returned by the child spec returned in `c:init/1` + + """ + @type adapter_meta :: %{optional(:stacktrace) => boolean(), optional(any()) => any()} + + @doc """ + The callback invoked in case the adapter needs to inject code. + """ + @macrocallback __before_compile__(env :: Macro.Env.t()) :: Macro.t() + + @doc """ + Ensure all applications necessary to run the adapter are started. + """ + @callback ensure_all_started( + config :: Keyword.t(), + type :: :permanent | :transient | :temporary + ) :: + {:ok, [atom]} | {:error, atom} + + @doc """ + Initializes the adapter supervision tree by returning the children and adapter metadata. + """ + @callback init(config :: Keyword.t()) :: {:ok, :supervisor.child_spec(), adapter_meta} + + @doc """ + Checks out a connection for the duration of the given function. + + In case the adapter provides a pool, this guarantees all of the code + inside the given `fun` runs against the same connection, which + might improve performance by for instance allowing multiple related + calls to the datastore to share cache information: + + Repo.checkout(fn -> + for _ <- 1..100 do + Repo.insert!(%Post{}) + end + end) + + If the adapter does not provide a pool, just calling the passed function + and returning its result are enough. + + If the adapter provides a pool, it is supposed to "check out" one of the + pool connections for the duration of the function call. Which connection + is checked out is not passed to the calling function, so it should be done + using a stateful method like using the current process' dictionary, process + tracking, or some kind of other lookup method. Make sure that this stored + connection is then used in the other callbacks implementations, such as + `Ecto.Adapter.Queryable` and `Ecto.Adapter.Schema`. + """ + @callback checkout(adapter_meta, config :: Keyword.t(), (-> result)) :: result when result: var + + @doc """ + Returns true if a connection has been checked out. + """ + @callback checked_out?(adapter_meta) :: boolean + + @doc """ + Returns the loaders for a given type. + + It receives the primitive type and the Ecto type (which may be + primitive as well). It returns a list of loaders with the given + type usually at the end. + + This allows developers to properly translate values coming from + the adapters into Ecto ones. For example, if the database does not + support booleans but instead returns 0 and 1 for them, you could + add: + + def loaders(:boolean, type), do: [&bool_decode/1, type] + def loaders(_primitive, type), do: [type] + + defp bool_decode(0), do: {:ok, false} + defp bool_decode(1), do: {:ok, true} + + All adapters are required to implement a clause for `:binary_id` types, + since they are adapter specific. If your adapter does not provide binary + ids, you may simply use `Ecto.UUID`: + + def loaders(:binary_id, type), do: [Ecto.UUID, type] + def loaders(_primitive, type), do: [type] + + """ + @callback loaders(primitive_type :: Ecto.Type.primitive(), ecto_type :: Ecto.Type.t()) :: + [(term -> {:ok, term} | :error) | Ecto.Type.t()] + + @doc """ + Returns the dumpers for a given type. + + It receives the primitive type and the Ecto type (which may be + primitive as well). It returns a list of dumpers with the given + type usually at the beginning. + + This allows developers to properly translate values coming from + the Ecto into adapter ones. For example, if the database does not + support booleans but instead returns 0 and 1 for them, you could + add: + + def dumpers(:boolean, type), do: [type, &bool_encode/1] + def dumpers(_primitive, type), do: [type] + + defp bool_encode(false), do: {:ok, 0} + defp bool_encode(true), do: {:ok, 1} + + All adapters are required to implement a clause for :binary_id types, + since they are adapter specific. If your adapter does not provide + binary ids, you may simply use `Ecto.UUID`: + + def dumpers(:binary_id, type), do: [type, Ecto.UUID] + def dumpers(_primitive, type), do: [type] + + """ + @callback dumpers(primitive_type :: Ecto.Type.primitive(), ecto_type :: Ecto.Type.t()) :: + [(term -> {:ok, term} | :error) | Ecto.Type.t()] + + @doc """ + Returns the adapter metadata from its `c:init/1` callback. + + It expects a process name of a repository. The name is either + an atom or a PID. For a given repository, you often want to + call this function based on the repository dynamic repo: + + Ecto.Adapter.lookup_meta(repo.get_dynamic_repo()) + + """ + def lookup_meta(repo_name_or_pid) do + Ecto.Repo.Registry.lookup(repo_name_or_pid) + end +end diff --git a/deps/ecto/lib/ecto/adapter/queryable.ex b/deps/ecto/lib/ecto/adapter/queryable.ex new file mode 100644 index 0000000..9d0a62d --- /dev/null +++ b/deps/ecto/lib/ecto/adapter/queryable.ex @@ -0,0 +1,127 @@ +defmodule Ecto.Adapter.Queryable do + @moduledoc """ + Specifies the query API required from adapters. + + If your adapter is only able to respond to one or a couple of the query functions, + add custom implementations of those functions directly to the Repo + by using `c:Ecto.Adapter.__before_compile__/1` instead. + """ + + @typedoc "Proxy type to the adapter meta" + @type adapter_meta :: Ecto.Adapter.adapter_meta() + + @typedoc "Ecto.Query metadata fields (stored in cache)" + @type query_meta :: %{sources: tuple, preloads: term, select: map} + + @typedoc """ + Cache query metadata that is passed to `c:execute/5`. + + The cache can be in 3 states, documented below. + + If `{:nocache, prepared}` is given, it means the query was + not and cannot be cached. The `prepared` value is the value + returned by `c:prepare/2`. + + If `{:cache, cache_function, prepared}` is given, it means + the query can be cached and it must be cached by calling + the `cache_function` function with the cache entry of your + choice. Once `cache_function` is called, the next time the + same query is given to `c:execute/5`, it will receive the + `:cached` tuple. + + If `{:cached, update_function, reset_function, cached}` is + given, it means the query has been cached. You may call + `update_function/1` if you want to update the cached result. + Or you may call `reset_function/1`, with a new prepared query, + to force the query to be cached again. If `reset_function/1` + is called, the next time the same query is given to + `c:execute/5`, it will receive the `:cache` tuple. + """ + @type query_cache :: {:nocache, prepared} + | {:cache, cache_function :: (cached -> :ok), prepared} + | {:cached, update_function :: (cached -> :ok), reset_function :: (prepared -> :ok), cached} + + @type prepared :: term + @type cached :: term + @type options :: Keyword.t() + @type selected :: term + + @doc """ + Commands invoked to prepare a query. + + It is used on `c:Ecto.Repo.all/2`, `c:Ecto.Repo.update_all/3`, + and `c:Ecto.Repo.delete_all/2`. It returns a tuple, indicating if + this query can be cached or not, and the `prepared` query. + The `prepared` query is any term that will be passed to the + adapter's `c:execute/5`. + """ + @callback prepare(atom :: :all | :update_all | :delete_all, query :: Ecto.Query.t()) :: + {:cache, prepared} | {:nocache, prepared} + + @doc """ + Executes a previously prepared query. + + The `query_meta` field is a map containing some of the fields + found in the `Ecto.Query` struct, after they have been normalized. + For example, the values `selected` by the query, which then have + to be returned, can be found in `query_meta`. + + The `query_cache` and its state is documented in `t:query_cache/0`. + + The `params` is the list of query parameters. For example, for + a query such as `from Post, where: [id: ^123]`, `params` will be + `[123]`. + + Finally, `options` is a keyword list of options given to the + `Repo` operation that triggered the adapter call. Any option is + allowed, as this is a mechanism to allow users of Ecto to customize + how the adapter behaves per operation. + + It must return a tuple containing the number of entries and + the result set as a list of lists. The entries in the actual + list will depend on what has been selected by the query. The + result set may also be `nil`, if no value is being selected. + """ + @callback execute(adapter_meta, query_meta, query_cache, params :: list(), options) :: + {non_neg_integer, [[selected]] | nil} + + @doc """ + Streams a previously prepared query. + + See `c:execute/5` for a description of arguments. + + It returns a stream of values. + """ + @callback stream(adapter_meta, query_meta, query_cache, params :: list(), options) :: + Enumerable.t + + @doc """ + Plans and prepares a query for the given repo, leveraging its query cache. + + This operation uses the query cache if one is available. + """ + def prepare_query(operation, repo_name_or_pid, queryable) do + %{adapter: adapter, cache: cache} = Ecto.Repo.Registry.lookup(repo_name_or_pid) + + {_meta, prepared, _cast_params, dump_params} = + queryable + |> Ecto.Queryable.to_query() + |> Ecto.Query.Planner.ensure_select(operation == :all) + |> Ecto.Query.Planner.query(operation, cache, adapter, 0) + + {prepared, dump_params} + end + + @doc """ + Plans a query using the given adapter. + + This does not expect the repository and therefore does not leverage the cache. + """ + def plan_query(operation, adapter, queryable) do + query = Ecto.Queryable.to_query(queryable) + {query, params, _key} = Ecto.Query.Planner.plan(query, operation, adapter) + {cast_params, dump_params} = Enum.unzip(params) + {query, _} = Ecto.Query.Planner.normalize(query, operation, adapter, 0) + {query, cast_params, dump_params} + end +end diff --git a/deps/ecto/lib/ecto/adapter/schema.ex b/deps/ecto/lib/ecto/adapter/schema.ex new file mode 100644 index 0000000..57c7bca --- /dev/null +++ b/deps/ecto/lib/ecto/adapter/schema.ex @@ -0,0 +1,92 @@ +defmodule Ecto.Adapter.Schema do + @moduledoc """ + Specifies the schema API required from adapters. + """ + + @typedoc "Proxy type to the adapter meta" + @type adapter_meta :: Ecto.Adapter.adapter_meta() + + @typedoc "Ecto.Schema metadata fields" + @type schema_meta :: %{ + autogenerate_id: {schema_field :: atom, source_field :: atom, Ecto.Type.t()}, + context: term, + prefix: binary | nil, + schema: atom, + source: binary + } + + @type fields :: Keyword.t() + @type filters :: Keyword.t() + @type constraints :: Keyword.t() + @type returning :: [atom] + @type placeholders :: [term] + @type options :: Keyword.t() + + @type on_conflict :: + {:raise, list(), []} + | {:nothing, list(), [atom]} + | {[atom], list(), [atom]} + | {Ecto.Query.t(), list(), [atom]} + + @doc """ + Called to autogenerate a value for id/embed_id/binary_id. + + Returns the autogenerated value, or nil if it must be + autogenerated inside the storage or raise if not supported. + """ + @callback autogenerate(field_type :: :id | :binary_id | :embed_id) :: term | nil + + @doc """ + Inserts multiple entries into the data store. + + In case an `Ecto.Query` given as any of the field values by the user, + it will be sent to the adapter as a tuple with in the shape of + `{query, params}`. + """ + @callback insert_all( + adapter_meta, + schema_meta, + header :: [atom], + [[{atom, term | {Ecto.Query.t(), list()}}]], + on_conflict, + returning, + placeholders, + options + ) :: {non_neg_integer, [[term]] | nil} + + @doc """ + Inserts a single new struct in the data store. + + ## Autogenerate + + The primary key will be automatically included in `returning` if the + field has type `:id` or `:binary_id` and no value was set by the + developer or none was autogenerated by the adapter. + """ + @callback insert(adapter_meta, schema_meta, fields, on_conflict, returning, options) :: + {:ok, fields} | {:invalid, constraints} + + @doc """ + Updates a single struct with the given filters. + + While `filters` can be any record column, it is expected that + at least the primary key (or any other key that uniquely + identifies an existing record) be given as a filter. Therefore, + in case there is no record matching the given filters, + `{:error, :stale}` is returned. + """ + @callback update(adapter_meta, schema_meta, fields, filters, returning, options) :: + {:ok, fields} | {:invalid, constraints} | {:error, :stale} + + @doc """ + Deletes a single struct with the given filters. + + While `filters` can be any record column, it is expected that + at least the primary key (or any other key that uniquely + identifies an existing record) be given as a filter. Therefore, + in case there is no record matching the given filters, + `{:error, :stale}` is returned. + """ + @callback delete(adapter_meta, schema_meta, filters, returning, options) :: + {:ok, fields} | {:invalid, constraints} | {:error, :stale} +end diff --git a/deps/ecto/lib/ecto/adapter/storage.ex b/deps/ecto/lib/ecto/adapter/storage.ex new file mode 100644 index 0000000..7590da2 --- /dev/null +++ b/deps/ecto/lib/ecto/adapter/storage.ex @@ -0,0 +1,53 @@ +defmodule Ecto.Adapter.Storage do + @moduledoc """ + Specifies the adapter storage API. + """ + + @doc """ + Creates the storage given by options. + + Returns `:ok` if it was created successfully. + + Returns `{:error, :already_up}` if the storage has already been created or + `{:error, term}` in case anything else goes wrong. + + ## Examples + + storage_up(username: "postgres", + database: "ecto_test", + hostname: "localhost") + + """ + @callback storage_up(options :: Keyword.t) :: :ok | {:error, :already_up} | {:error, term} + + @doc """ + Drops the storage given by options. + + Returns `:ok` if it was dropped successfully. + + Returns `{:error, :already_down}` if the storage has already been dropped or + `{:error, term}` in case anything else goes wrong. + + ## Examples + + storage_down(username: "postgres", + database: "ecto_test", + hostname: "localhost") + + """ + @callback storage_down(options :: Keyword.t) :: :ok | {:error, :already_down} | {:error, term} + + @doc """ + Returns the status of a storage given by options. + + Can return `:up`, `:down` or `{:error, term}` in case anything goes wrong. + + ## Examples + + storage_status(username: "postgres", + database: "ecto_test", + hostname: "localhost") + + """ + @callback storage_status(options :: Keyword.t()) :: :up | :down | {:error, term()} +end diff --git a/deps/ecto/lib/ecto/adapter/transaction.ex b/deps/ecto/lib/ecto/adapter/transaction.ex new file mode 100644 index 0000000..8624b55 --- /dev/null +++ b/deps/ecto/lib/ecto/adapter/transaction.ex @@ -0,0 +1,31 @@ +defmodule Ecto.Adapter.Transaction do + @moduledoc """ + Specifies the adapter transactions API. + """ + + @type adapter_meta :: Ecto.Adapter.adapter_meta() + + @doc """ + Runs the given function inside a transaction. + + Returns `{:ok, value}` if the transaction was successful where `value` + is the value returned by the function or `{:error, value}` if the transaction + was rolled back where `value` is the value given to `rollback/1`. + """ + @callback transaction(adapter_meta, options :: Keyword.t(), function :: fun) :: + {:ok, any} | {:error, any} + + @doc """ + Returns true if the given process is inside a transaction. + """ + @callback in_transaction?(adapter_meta) :: boolean + + @doc """ + Rolls back the current transaction. + + The transaction will return the value given as `{:error, value}`. + + See `c:Ecto.Repo.rollback/1`. + """ + @callback rollback(adapter_meta, value :: any) :: no_return +end diff --git a/deps/ecto/lib/ecto/application.ex b/deps/ecto/lib/ecto/application.ex new file mode 100644 index 0000000..8da32e7 --- /dev/null +++ b/deps/ecto/lib/ecto/application.ex @@ -0,0 +1,13 @@ +defmodule Ecto.Application do + @moduledoc false + use Application + + def start(_type, _args) do + children = [ + Ecto.Repo.Registry + ] + + opts = [strategy: :one_for_one, name: Ecto.Supervisor] + Supervisor.start_link(children, opts) + end +end diff --git a/deps/ecto/lib/ecto/association.ex b/deps/ecto/lib/ecto/association.ex new file mode 100644 index 0000000..d5f7a29 --- /dev/null +++ b/deps/ecto/lib/ecto/association.ex @@ -0,0 +1,1689 @@ +import Ecto.Query, only: [from: 1, from: 2, join: 4, join: 5, distinct: 3, where: 3] + +defmodule Ecto.Association.NotLoaded do + @moduledoc """ + Struct returned by associations when they are not loaded. + + The fields are: + + * `__field__` - the association field in `owner` + * `__owner__` - the schema that owns the association + * `__cardinality__` - the cardinality of the association + """ + + @type t :: %__MODULE__{ + __field__: atom(), + __owner__: any(), + __cardinality__: atom() + } + + defstruct [:__field__, :__owner__, :__cardinality__] + + defimpl Inspect do + def inspect(not_loaded, _opts) do + msg = "association #{inspect(not_loaded.__field__)} is not loaded" + ~s(#Ecto.Association.NotLoaded<#{msg}>) + end + end +end + +defmodule Ecto.Association do + @moduledoc false + + @type t :: %{ + required(:__struct__) => atom, + required(:on_cast) => nil | fun, + required(:cardinality) => :one | :many, + required(:relationship) => :parent | :child, + required(:owner) => atom, + required(:owner_key) => atom, + required(:field) => atom, + required(:unique) => boolean, + optional(atom) => any + } + + alias Ecto.Query.Builder.OrderBy + + @doc """ + Builds the association struct. + + The struct must be defined in the module that implements the + callback and it must contain at least the following keys: + + * `:cardinality` - tells if the association is one to one + or one/many to many + + * `:field` - tells the field in the owner struct where the + association should be stored + + * `:owner` - the owner module of the association + + * `:owner_key` - the key in the owner with the association value + + * `:relationship` - if the relationship to the specified schema is + of a `:child` or a `:parent` + + """ + @callback struct(module, field :: atom, opts :: Keyword.t()) :: t + + @doc """ + Invoked after the schema is compiled to validate associations. + + Useful for checking if associated modules exist without running + into deadlocks. + """ + @callback after_verify_validation(t) :: :ok | {:error, String.t()} + + @doc """ + Builds a struct for the given association. + + The struct to build from is given as argument in case default values + should be set in the struct. + + Invoked by `Ecto.build_assoc/3`. + """ + @callback build(t, owner :: Ecto.Schema.t(), %{atom => term} | [Keyword.t()]) :: Ecto.Schema.t() + + @doc """ + Returns an association join query. + + This callback receives the association struct and it must return + a query that retrieves all associated entries using joins up to + the owner association. + + For example, a `has_many :comments` inside a `Post` module would + return: + + from c in Comment, join: p in Post, on: c.post_id == p.id + + Note all the logic must be expressed inside joins, as fields like + `where` and `order_by` won't be used by the caller. + + This callback is invoked when `join: assoc(p, :comments)` is used + inside queries. + """ + @callback joins_query(t) :: Ecto.Query.t() + + @doc """ + Returns the association query on top of the given query. + + If the query is `nil`, the association target must be used. + + This callback receives the association struct and it must return + a query that retrieves all associated entries with the given + values for the owner key. + + This callback is used by `Ecto.assoc/2` and when preloading. + """ + @callback assoc_query(t, Ecto.Query.t() | nil, values :: [term]) :: Ecto.Query.t() + + @doc """ + Returns information used by the preloader. + """ + @callback preload_info(t) :: + {:assoc, t, {integer, atom} | {integer, atom, Ecto.Type.t()}} + | {:through, t, [atom]} + + @doc """ + Performs the repository change on the association. + + Receives the parent changeset, the current changesets + and the repository action options. Must return the + persisted struct (or nil) or the changeset error. + """ + @callback on_repo_change( + t, + parent :: Ecto.Changeset.t(), + changeset :: Ecto.Changeset.t(), + Ecto.Adapter.t(), + Keyword.t() + ) :: + {:ok, Ecto.Schema.t() | nil} | {:error, Ecto.Changeset.t()} + + @doc """ + Retrieves the association from the given schema. + """ + def association_from_schema!(schema, assoc) do + schema.__schema__(:association, assoc) || + raise ArgumentError, "schema #{inspect(schema)} does not have association #{inspect(assoc)}" + end + + @doc """ + Returns the association key for the given module with the given suffix. + + ## Examples + + iex> Ecto.Association.association_key(Hello.World, :id) + :world_id + + iex> Ecto.Association.association_key(Hello.HTTP, :id) + :http_id + + iex> Ecto.Association.association_key(Hello.HTTPServer, :id) + :http_server_id + + """ + def association_key(module, suffix) do + prefix = module |> Module.split() |> List.last() |> Macro.underscore() + :"#{prefix}_#{suffix}" + end + + @doc """ + Build an association query through the given associations from the specified owner table + and through the given associations. Finally filter by the provided values of the owner_key of + the first relationship in the chain. Used in Ecto.assoc/2. + """ + def filter_through_chain(owner, through, values) do + chain_through(owner, through, nil, values) + |> distinct([x], true) + end + + @doc """ + Join the target table given a list of associations to go through starting from the owner table. + """ + def join_through_chain(owner, through, query) do + chain_through(owner, through, query, nil) + end + + # This function is used by both join_through_chain/3 and filter_through_chain/3 since the algorithm for both + # is nearly identical barring a few differences. + defp chain_through(owner, through, join_to, values) do + # Flatten the chain of throughs. If any of the associations is a HasThrough this allows us to expand it so we have + # a list of atomic associations to join through. + {_, through} = flatten_through_chain(owner, through, []) + + # If we're joining then we're going forward from the owner table to the destination table. + # Otherwise we're going backward from the destination table then filtering by values. + chain_direction = if(join_to != nil, do: :forward, else: :backward) + + # This stage produces a list of joins represented as a keyword list with the following structure: + # [ + # [schema: (The Schema), in_key: (The key used to join into the table), out_key: (The key used to join with the next), where: (The condition KW list)] + # ] + relation_list = resolve_through_tables(owner, through, chain_direction) + + # Filter out the joins which are redundant + filtered_list = + Enum.with_index(relation_list) + |> Enum.filter(fn + # We always keep the first table in the chain since it's our source table for the query + {_, 0} -> + true + + {rel, _} -> + # If the condition is not empty we need to join to the table. Otherwise if the in_key and out_key is the same + # then this join is redundant since we can just join to the next table in the chain. + rel.in_key != rel.out_key or rel.where != [] + end) + |> Enum.map(&elem(&1, 0)) + + # If we're preloading we don't need the last table since it is the owner table. + filtered_list = if(join_to == nil, do: Enum.drop(filtered_list, -1), else: filtered_list) + + [source | joins] = filtered_list + + source_schema = source.schema + query = join_to || from(s in source_schema) + + counter = Ecto.Query.Builder.count_binds(query) - 1 + + # We need to create the query by joining all the tables, and also we need the out_key of the final table to use + # for the final WHERE clause with values. + {_, query, _, dest_out_key} = + Enum.reduce(joins, {source, query, counter, source.out_key}, fn + curr_rel, {prev_rel, query, counter, _} -> + related_queryable = curr_rel.schema + + next = + join(query, :inner, [{src, counter}], dest in ^related_queryable, + on: field(src, ^prev_rel.out_key) == field(dest, ^curr_rel.in_key) + ) + |> combine_joins_query(curr_rel.where, counter + 1) + + {curr_rel, next, counter + 1, curr_rel.out_key} + end) + + final_bind = Ecto.Query.Builder.count_binds(query) - 1 + + values = List.wrap(values) + + query = + case {join_to, values} do + {nil, [single_value]} -> + where(query, [{dest, final_bind}], field(dest, ^dest_out_key) == ^single_value) + + {nil, values} -> + where(query, [{dest, final_bind}], field(dest, ^dest_out_key) in ^values) + + {_, _} -> + query + end + + combine_assoc_query(query, source.where || []) + end + + defp flatten_through_chain(owner, [], acc), do: {owner, acc} + + defp flatten_through_chain(owner, [assoc | tl], acc) do + refl = association_from_schema!(owner, assoc) + + case refl do + %{through: nested_throughs} -> + {owner, acc} = flatten_through_chain(owner, nested_throughs, acc) + flatten_through_chain(owner, tl, acc) + + _ -> + flatten_through_chain(refl.related, tl, acc ++ [assoc]) + end + end + + defp resolve_through_tables(owner, through, :backward) do + # This step generates a list of maps with the following keys: + # [ + # %{schema: ..., out_key: ..., in_key: ..., where: ...} + # ] + # This is a list of all tables that we will need to join to follow the chain of throughs and which key is used + # to join in and out of the table, along with the where condition for that table. The final table of the chain will + # be "owner", and the first table of the chain will be the final destination table of all the throughs. + initial_owner_map = %{schema: owner, out_key: nil, in_key: nil, where: nil} + + Enum.reduce(through, {owner, [initial_owner_map]}, fn assoc, {owner, table_list} -> + refl = association_from_schema!(owner, assoc) + [owner_map | table_list] = table_list + + table_list = + case refl do + %{ + join_through: join_through, + join_keys: join_keys, + join_where: join_where, + where: where + } -> + [{owner_join_key, owner_key}, {related_join_key, related_key}] = join_keys + + owner_map = %{owner_map | in_key: owner_key} + + join_map = %{ + schema: join_through, + out_key: owner_join_key, + in_key: related_join_key, + where: join_where + } + + related_map = %{schema: refl.related, out_key: related_key, in_key: nil, where: where} + + [related_map, join_map, owner_map | table_list] + + _ -> + owner_map = %{owner_map | in_key: refl.owner_key} + + related_map = %{ + schema: refl.related, + out_key: refl.related_key, + in_key: nil, + where: refl.where + } + + [related_map, owner_map | table_list] + end + + {refl.related, table_list} + end) + |> elem(1) + end + + defp resolve_through_tables(owner, through, :forward) do + # In the forward case (joining) we need to reverse the list and swap the in_key for the out_key + # since we've changed directions. + resolve_through_tables(owner, through, :backward) + |> Enum.reverse() + |> Enum.map(fn %{out_key: out_key, in_key: in_key} = join -> + %{join | out_key: in_key, in_key: out_key} + end) + end + + @doc """ + Add the default assoc query where clauses to a join. + + This handles only `where` and converts it to a `join`, + as that is the only information propagate in join queries. + """ + def combine_joins_query(query, [], _binding), do: query + + def combine_joins_query(%{joins: joins} = query, [_ | _] = conditions, binding) do + {joins, [join_expr]} = Enum.split(joins, -1) + %{on: %{params: params, expr: expr} = join_on} = join_expr + {expr, params} = expand_where(conditions, expr, Enum.reverse(params), length(params), binding) + %{query | joins: joins ++ [%{join_expr | on: %{join_on | expr: expr, params: params}}]} + end + + @doc """ + Add the default assoc query where clauses a provided query. + """ + def combine_assoc_query(query, []), do: query + + def combine_assoc_query(%{wheres: []} = query, conditions) do + {expr, params} = expand_where(conditions, true, [], 0, 0) + + bool_expr = %Ecto.Query.BooleanExpr{ + op: :and, + expr: expr, + params: params, + line: __ENV__.line, + file: __ENV__.file + } + + %{query | wheres: [bool_expr]} + end + + def combine_assoc_query(%{wheres: wheres} = query, conditions) do + {wheres, [where_expr]} = Enum.split(wheres, -1) + %{params: params, expr: expr} = where_expr + {expr, params} = expand_where(conditions, expr, Enum.reverse(params), length(params), 0) + %{query | wheres: wheres ++ [%{where_expr | expr: expr, params: params}]} + end + + defp expand_where(conditions, expr, params, counter, binding) do + conjoin_exprs = fn + true, r -> r + l, r -> {:and, [], [l, r]} + end + + {expr, params, _counter} = + Enum.reduce(conditions, {expr, params, counter}, fn + {key, nil}, {expr, params, counter} -> + expr = conjoin_exprs.(expr, {:is_nil, [], [to_field(binding, key)]}) + {expr, params, counter} + + {key, {:not, nil}}, {expr, params, counter} -> + expr = conjoin_exprs.(expr, {:not, [], [{:is_nil, [], [to_field(binding, key)]}]}) + {expr, params, counter} + + {key, {:fragment, frag}}, {expr, params, counter} when is_binary(frag) -> + pieces = Ecto.Query.Builder.fragment_pieces(frag, [to_field(binding, key)]) + expr = conjoin_exprs.(expr, {:fragment, [], pieces}) + {expr, params, counter} + + {key, {:in, value}}, {expr, params, counter} when is_list(value) -> + expr = conjoin_exprs.(expr, {:in, [], [to_field(binding, key), {:^, [], [counter]}]}) + {expr, [{value, {:in, {binding, key}}} | params], counter + 1} + + {key, value}, {expr, params, counter} -> + expr = conjoin_exprs.(expr, {:==, [], [to_field(binding, key), {:^, [], [counter]}]}) + {expr, [{value, {binding, key}} | params], counter + 1} + end) + + {expr, Enum.reverse(params)} + end + + defp to_field(binding, field), + do: {{:., [], [{:&, [], [binding]}, field]}, [], []} + + @doc """ + Build a join query with the given `through` associations starting at `counter`. + """ + def joins_query(query, through, counter) do + Enum.reduce(through, {query, counter}, fn current, {acc, counter} -> + query = join(acc, :inner, [{x, counter}], assoc(x, ^current)) + {query, counter + 1} + end) + |> elem(0) + end + + @doc """ + Retrieves related module from queryable. + + ## Examples + + iex> Ecto.Association.related_from_query({"custom_source", Schema}, :comments_v1) + Schema + + iex> Ecto.Association.related_from_query(Schema, :comments_v1) + Schema + + iex> Ecto.Association.related_from_query("wrong", :comments_v1) + ** (ArgumentError) association :comments_v1 queryable must be a schema or a {source, schema}. got: "wrong" + """ + def related_from_query(atom, _name) when is_atom(atom), do: atom + + def related_from_query({source, schema}, _name) when is_binary(source) and is_atom(schema), + do: schema + + def related_from_query(queryable, name) do + raise ArgumentError, + "association #{inspect(name)} queryable must be a schema or " <> + "a {source, schema}. got: #{inspect(queryable)}" + end + + @doc """ + Applies default values into the struct. + """ + def apply_defaults(struct, defaults, _owner) when is_list(defaults) do + struct(struct, defaults) + end + + def apply_defaults(struct, {mod, fun, args}, owner) do + apply(mod, fun, [struct.__struct__(), owner | args]) + end + + @doc """ + Validates `defaults` for association named `name`. + """ + def validate_defaults!(_module, _name, {mod, fun, args} = defaults) + when is_atom(mod) and is_atom(fun) and is_list(args), + do: defaults + + def validate_defaults!(module, _name, fun) when is_atom(fun), + do: {module, fun, []} + + def validate_defaults!(_module, _name, defaults) when is_list(defaults), + do: defaults + + def validate_defaults!(_module, name, defaults) do + raise ArgumentError, + "expected defaults for #{inspect(name)} to be a keyword list " <> + "or a {module, fun, args} tuple, got: `#{inspect(defaults)}`" + end + + @doc """ + Validates `preload_order` for association named `name`. + """ + def validate_preload_order!(_name, {mod, fun, args} = preload_order) + when is_atom(mod) and is_atom(fun) and is_list(args), + do: preload_order + + def validate_preload_order!(name, preload_order) when is_list(preload_order) do + Enum.map(preload_order, fn + field when is_atom(field) -> + field + + {direction, field} when is_atom(direction) and is_atom(field) -> + unless OrderBy.valid_direction?(direction) do + raise ArgumentError, + "expected `:preload_order` for #{inspect(name)} to be a keyword list or a list of atoms/fields, " <> + "got: `#{inspect(preload_order)}`, " <> + "`#{inspect(direction)}` is not a valid direction" + end + + {direction, field} + + item -> + raise ArgumentError, + "expected `:preload_order` for #{inspect(name)} to be a keyword list or a list of atoms/fields, " <> + "got: `#{inspect(preload_order)}`, " <> + "`#{inspect(item)}` is not valid" + end) + end + + def validate_preload_order!(name, preload_order) do + raise ArgumentError, + "expected `:preload_order` for #{inspect(name)} to be a keyword list, a list of atoms/fields " <> + "or a {Mod, fun, args} tuple, got: `#{inspect(preload_order)}`" + end + + @doc """ + Merges source from query into to the given schema. + + In case the query does not have a source, returns + the schema unchanged. + """ + def merge_source(schema, query) + + def merge_source(%{__meta__: %{source: source}} = struct, {source, _}) do + struct + end + + def merge_source(struct, {source, _}) do + Ecto.put_meta(struct, source: source) + end + + def merge_source(struct, _query) do + struct + end + + @doc """ + Updates the prefix of a changeset based on the metadata. + """ + def update_parent_prefix( + %{data: %{__meta__: %{prefix: prefix}}} = changeset, + %{__meta__: %{prefix: prefix}} + ), + do: changeset + + def update_parent_prefix( + %{data: %{__meta__: %{prefix: nil}}} = changeset, + %{__meta__: %{prefix: prefix}} + ), + do: update_in(changeset.data, &Ecto.put_meta(&1, prefix: prefix)) + + def update_parent_prefix(changeset, _), + do: changeset + + @doc """ + Performs the repository action in the related changeset, + returning `{:ok, data}` or `{:error, changes}`. + """ + def on_repo_change(%{data: struct}, [], _adapter, _opts) do + {:ok, struct} + end + + def on_repo_change(changeset, assocs, adapter, opts) do + %{data: struct, changes: changes, action: action} = changeset + + {struct, changes, _halt, valid?} = + Enum.reduce(assocs, {struct, changes, false, true}, fn {refl, value}, acc -> + on_repo_change(refl, value, changeset, action, adapter, opts, acc) + end) + + case valid? do + true -> {:ok, struct} + false -> {:error, changes} + end + end + + defp on_repo_change( + %{cardinality: :one, field: field} = meta, + nil, + parent_changeset, + _repo_action, + adapter, + opts, + {parent, changes, halt, valid?} + ) do + if not halt, do: maybe_replace_one!(meta, nil, parent, parent_changeset, adapter, opts) + {Map.put(parent, field, nil), Map.put(changes, field, nil), halt, valid?} + end + + defp on_repo_change( + %{cardinality: :one, field: field, __struct__: mod} = meta, + %{action: action, data: current} = changeset, + parent_changeset, + repo_action, + adapter, + opts, + {parent, changes, halt, valid?} + ) do + check_action!(meta, action, repo_action) + if not halt, do: maybe_replace_one!(meta, current, parent, parent_changeset, adapter, opts) + + case on_repo_change_unless_halted(halt, mod, meta, parent_changeset, changeset, adapter, opts) do + {:ok, struct} -> + {Map.put(parent, field, struct), Map.put(changes, field, changeset), halt, valid?} + + {:error, error_changeset} -> + {parent, Map.put(changes, field, error_changeset), + halted?(halt, changeset, error_changeset), false} + end + end + + defp on_repo_change( + %{cardinality: :many, field: field, __struct__: mod} = meta, + changesets, + parent_changeset, + repo_action, + adapter, + opts, + {parent, changes, halt, all_valid?} + ) do + {changesets, structs, halt, valid?} = + Enum.reduce(changesets, {[], [], halt, true}, fn + %{action: action} = changeset, {changesets, structs, halt, valid?} -> + check_action!(meta, action, repo_action) + + case on_repo_change_unless_halted( + halt, + mod, + meta, + parent_changeset, + changeset, + adapter, + opts + ) do + {:ok, nil} -> + {[changeset | changesets], structs, halt, valid?} + + {:ok, struct} -> + {[changeset | changesets], [struct | structs], halt, valid?} + + {:error, error_changeset} -> + {[error_changeset | changesets], structs, halted?(halt, changeset, error_changeset), + false} + end + end) + + if valid? do + {Map.put(parent, field, Enum.reverse(structs)), + Map.put(changes, field, Enum.reverse(changesets)), halt, all_valid?} + else + {parent, Map.put(changes, field, Enum.reverse(changesets)), halt, false} + end + end + + defp check_action!(%{related: schema}, :delete, :insert) do + raise ArgumentError, + "got action :delete in changeset for associated #{inspect(schema)} while inserting" + end + + defp check_action!(_, _, _), do: :ok + + defp halted?(true, _, _), do: true + defp halted?(_, %{valid?: true}, %{valid?: false}), do: true + defp halted?(_, _, _), do: false + + defp on_repo_change_unless_halted(true, _mod, _meta, _parent, changeset, _adapter, _opts) do + {:error, changeset} + end + + defp on_repo_change_unless_halted(false, mod, meta, parent, changeset, adapter, opts) do + mod.on_repo_change(meta, parent, changeset, adapter, opts) + end + + defp maybe_replace_one!( + %{field: field, __struct__: mod} = meta, + current, + parent, + parent_changeset, + adapter, + opts + ) do + previous = Map.get(parent, field) + + if replaceable?(previous) and primary_key!(previous) != primary_key!(current) do + changeset = %{Ecto.Changeset.change(previous) | action: :replace} + + case mod.on_repo_change(meta, parent_changeset, changeset, adapter, opts) do + {:ok, _} -> + :ok + + {:error, changeset} -> + raise Ecto.InvalidChangesetError, action: changeset.action, changeset: changeset + end + end + end + + defp maybe_replace_one!(_, _, _, _, _, _), do: :ok + + defp replaceable?(nil), do: false + defp replaceable?(%Ecto.Association.NotLoaded{}), do: false + defp replaceable?(%{__meta__: %{state: :built}}), do: false + defp replaceable?(_), do: true + + defp primary_key!(nil), do: [] + defp primary_key!(struct), do: Ecto.primary_key!(struct) +end + +defmodule Ecto.Association.Has do + @moduledoc """ + The association struct for `has_one` and `has_many` associations. + + Its fields are: + + * `cardinality` - The association cardinality + * `field` - The name of the association field on the schema + * `owner` - The schema where the association was defined + * `related` - The schema that is associated + * `owner_key` - The key on the `owner` schema used for the association + * `related_key` - The key on the `related` schema used for the association + * `queryable` - The real query to use for querying association + * `on_delete` - The action taken on associations when schema is deleted + * `on_replace` - The action taken on associations when schema is replaced + * `defaults` - Default fields used when building the association + * `relationship` - The relationship to the specified schema, default is `:child` + * `preload_order` - Default `order_by` of the association, used only by preload + """ + + @behaviour Ecto.Association + @on_delete_opts [:nothing, :nilify_all, :delete_all] + @on_replace_opts [:raise, :mark_as_invalid, :delete, :delete_if_exists, :nilify] + @has_one_on_replace_opts @on_replace_opts ++ [:update] + defstruct [ + :cardinality, + :field, + :owner, + :related, + :owner_key, + :related_key, + :on_cast, + :queryable, + :on_delete, + :on_replace, + where: [], + unique: true, + defaults: [], + relationship: :child, + ordered: false, + preload_order: [] + ] + + @impl true + def after_verify_validation(%{queryable: queryable, related_key: related_key}) do + cond do + not is_atom(queryable) -> + :ok + + not Code.ensure_loaded?(queryable) -> + {:error, "associated schema #{inspect(queryable)} does not exist"} + + not function_exported?(queryable, :__schema__, 2) -> + {:error, "associated module #{inspect(queryable)} is not an Ecto schema"} + + is_nil(queryable.__schema__(:type, related_key)) -> + {:error, "associated schema #{inspect(queryable)} does not have field `#{related_key}`"} + + true -> + :ok + end + end + + @impl true + def struct(module, name, opts) do + queryable = Keyword.fetch!(opts, :queryable) + cardinality = Keyword.fetch!(opts, :cardinality) + related = Ecto.Association.related_from_query(queryable, name) + + ref = + module + |> Module.get_attribute(:primary_key) + |> get_ref(opts[:references], name) + + unless Module.get_attribute(module, :ecto_fields)[ref] do + raise ArgumentError, + "schema does not have the field #{inspect(ref)} used by " <> + "association #{inspect(name)}, please set the :references option accordingly" + end + + if opts[:through] do + raise ArgumentError, + "invalid association #{inspect(name)}. When using the :through " <> + "option, the schema should not be passed as second argument" + end + + on_delete = Keyword.get(opts, :on_delete, :nothing) + + unless on_delete in @on_delete_opts do + raise ArgumentError, + "invalid :on_delete option for #{inspect(name)}. " <> + "The only valid options are: " <> + Enum.map_join(@on_delete_opts, ", ", &"`#{inspect(&1)}`") + end + + on_replace = Keyword.get(opts, :on_replace, :raise) + on_replace_opts = if cardinality == :one, do: @has_one_on_replace_opts, else: @on_replace_opts + + unless on_replace in on_replace_opts do + raise ArgumentError, + "invalid `:on_replace` option for #{inspect(name)}. " <> + "The only valid options are: " <> + Enum.map_join(@on_replace_opts, ", ", &"`#{inspect(&1)}`") + end + + defaults = Ecto.Association.validate_defaults!(module, name, opts[:defaults] || []) + preload_order = Ecto.Association.validate_preload_order!(name, opts[:preload_order] || []) + where = opts[:where] || [] + + unless is_list(where) do + raise ArgumentError, + "expected `:where` for #{inspect(name)} to be a keyword list, got: `#{inspect(where)}`" + end + + %__MODULE__{ + field: name, + cardinality: cardinality, + owner: module, + related: related, + owner_key: ref, + related_key: opts[:foreign_key] || Ecto.Association.association_key(module, ref), + queryable: queryable, + on_delete: on_delete, + on_replace: on_replace, + defaults: defaults, + where: where, + preload_order: preload_order + } + end + + defp get_ref(primary_key, nil, name) when primary_key in [nil, false] do + raise ArgumentError, + "need to set :references option for " <> + "association #{inspect(name)} when schema has no primary key" + end + + defp get_ref(primary_key, nil, _name), do: elem(primary_key, 0) + defp get_ref(_primary_key, references, _name), do: references + + @impl true + def build(%{owner_key: owner_key, related_key: related_key} = refl, owner, attributes) do + data = refl |> build(owner) |> struct(attributes) + %{data | related_key => Map.get(owner, owner_key)} + end + + @impl true + def joins_query( + %{related_key: related_key, owner: owner, owner_key: owner_key, queryable: queryable} = + assoc + ) do + from(o in owner, join: q in ^queryable, on: field(q, ^related_key) == field(o, ^owner_key)) + |> Ecto.Association.combine_joins_query(assoc.where, 1) + end + + @impl true + def assoc_query(%{related_key: related_key, queryable: queryable} = assoc, query, [value]) do + from(x in (query || queryable), where: field(x, ^related_key) == ^value) + |> Ecto.Association.combine_assoc_query(assoc.where) + end + + @impl true + def assoc_query(%{related_key: related_key, queryable: queryable} = assoc, query, values) do + from(x in (query || queryable), where: field(x, ^related_key) in ^values) + |> Ecto.Association.combine_assoc_query(assoc.where) + end + + @impl true + def preload_info(%{related_key: related_key} = refl) do + {:assoc, refl, {0, related_key}} + end + + @impl true + def on_repo_change( + %{on_replace: :delete_if_exists} = refl, + parent_changeset, + %{action: :replace} = changeset, + adapter, + opts + ) do + try do + on_repo_change(%{refl | on_replace: :delete}, parent_changeset, changeset, adapter, opts) + rescue + Ecto.StaleEntryError -> {:ok, nil} + end + end + + def on_repo_change( + %{on_replace: on_replace} = refl, + %{data: parent} = parent_changeset, + %{action: :replace} = changeset, + adapter, + opts + ) do + changeset = + case on_replace do + :nilify -> %{changeset | action: :update} + :update -> %{changeset | action: :update} + :delete -> %{changeset | action: :delete} + end + + changeset = Ecto.Association.update_parent_prefix(changeset, parent) + + case on_repo_change(refl, %{parent_changeset | data: nil}, changeset, adapter, opts) do + {:ok, _} -> {:ok, nil} + {:error, changeset} -> {:error, changeset} + end + end + + def on_repo_change(assoc, parent_changeset, changeset, _adapter, opts) do + %{data: parent, repo: repo} = parent_changeset + %{action: action, changes: changes} = changeset + + {key, value} = parent_key(assoc, parent) + changeset = update_parent_key(changeset, action, key, value) + changeset = Ecto.Association.update_parent_prefix(changeset, parent) + + case apply(repo, action, [changeset, opts]) do + {:ok, _} = ok -> + if action == :delete, do: {:ok, nil}, else: ok + + {:error, changeset} -> + original = Map.get(changes, key) + {:error, put_in(changeset.changes[key], original)} + end + end + + defp update_parent_key(changeset, :delete, _key, _value), + do: changeset + + defp update_parent_key(changeset, _action, key, value), + do: Ecto.Changeset.put_change(changeset, key, value) + + defp parent_key(%{related_key: related_key}, nil) do + {related_key, nil} + end + + defp parent_key(%{owner_key: owner_key, related_key: related_key}, owner) do + {related_key, Map.get(owner, owner_key)} + end + + ## Relation callbacks + @behaviour Ecto.Changeset.Relation + + @impl true + def build(%{related: related, queryable: queryable, defaults: defaults}, owner) do + related + |> Ecto.Association.apply_defaults(defaults, owner) + |> Ecto.Association.merge_source(queryable) + end + + ## On delete callbacks + + @doc false + def delete_all(refl, parent, repo_name, opts) do + if query = on_delete_query(refl, parent) do + Ecto.Repo.Queryable.delete_all(repo_name, query, opts) + end + end + + @doc false + def nilify_all(%{related_key: related_key} = refl, parent, repo_name, opts) do + if query = on_delete_query(refl, parent) do + Ecto.Repo.Queryable.update_all(repo_name, query, [set: [{related_key, nil}]], opts) + end + end + + defp on_delete_query( + %{owner_key: owner_key, related_key: related_key, queryable: queryable}, + parent + ) do + if value = Map.get(parent, owner_key) do + query = from x in queryable, where: field(x, ^related_key) == ^value + + parent + |> Ecto.get_meta(:prefix) + |> case do + nil -> query + prefix -> Ecto.Query.put_query_prefix(query, prefix) + end + end + end +end + +defmodule Ecto.Association.HasThrough do + @moduledoc """ + The association struct for `has_one` and `has_many` through associations. + + Its fields are: + + * `cardinality` - The association cardinality + * `field` - The name of the association field on the schema + * `owner` - The schema where the association was defined + * `owner_key` - The key on the `owner` schema used for the association + * `through` - The through associations + * `relationship` - The relationship to the specified schema, default `:child` + """ + + @behaviour Ecto.Association + defstruct [ + :cardinality, + :field, + :owner, + :owner_key, + :through, + :on_cast, + relationship: :child, + unique: true, + ordered: false + ] + + @impl true + def after_verify_validation(_) do + :ok + end + + @impl true + def struct(module, name, opts) do + through = Keyword.fetch!(opts, :through) + + refl = + case through do + [h, _ | _] -> + Module.get_attribute(module, :ecto_assocs)[h] + + _ -> + raise ArgumentError, + ":through expects a list with at least two entries: " <> + "the association in the current module and one step through, got: #{inspect(through)}" + end + + unless refl do + raise ArgumentError, + "schema does not have the association #{inspect(hd(through))} " <> + "used by association #{inspect(name)}, please ensure the association exists and " <> + "is defined before the :through one" + end + + %__MODULE__{ + field: name, + cardinality: Keyword.fetch!(opts, :cardinality), + through: through, + owner: module, + owner_key: refl.owner_key + } + end + + @impl true + def build(%{field: name}, %{__struct__: owner}, _attributes) do + raise ArgumentError, + "cannot build through association `#{inspect(name)}` for #{inspect(owner)}. " <> + "Instead build the intermediate steps explicitly." + end + + @impl true + def preload_info(%{through: through} = refl) do + {:through, refl, through} + end + + @impl true + def on_repo_change(%{field: name}, _, _, _, _) do + raise ArgumentError, + "cannot insert/update/delete through associations `#{inspect(name)}` via the repository. " <> + "Instead build the intermediate steps explicitly." + end + + @impl true + def joins_query(%{owner: owner, through: through}) do + Ecto.Association.join_through_chain(owner, through, from(x in owner)) + end + + @impl true + def assoc_query(%{owner: owner, through: through}, _, values) do + Ecto.Association.filter_through_chain(owner, through, values) + end +end + +defmodule Ecto.Association.BelongsTo do + @moduledoc """ + The association struct for a `belongs_to` association. + + Its fields are: + + * `cardinality` - The association cardinality + * `field` - The name of the association field on the schema + * `owner` - The schema where the association was defined + * `owner_key` - The key on the `owner` schema used for the association + * `related` - The schema that is associated + * `related_key` - The key on the `related` schema used for the association + * `queryable` - The real query to use for querying association + * `defaults` - Default fields used when building the association + * `relationship` - The relationship to the specified schema, default `:parent` + * `on_replace` - The action taken on associations when schema is replaced + """ + + @behaviour Ecto.Association + @on_replace_opts [:raise, :mark_as_invalid, :delete, :delete_if_exists, :nilify, :update] + defstruct [ + :field, + :owner, + :related, + :owner_key, + :related_key, + :queryable, + :on_cast, + :on_replace, + where: [], + defaults: [], + cardinality: :one, + relationship: :parent, + unique: true, + ordered: false + ] + + @impl true + def after_verify_validation(%{queryable: queryable, related_key: related_key}) do + cond do + not is_atom(queryable) -> + :ok + + not Code.ensure_loaded?(queryable) -> + {:error, "associated schema #{inspect(queryable)} does not exist"} + + not function_exported?(queryable, :__schema__, 2) -> + {:error, "associated module #{inspect(queryable)} is not an Ecto schema"} + + is_nil(queryable.__schema__(:type, related_key)) -> + {:error, "associated schema #{inspect(queryable)} does not have field `#{related_key}`"} + + true -> + :ok + end + end + + @impl true + def struct(module, name, opts) do + ref = if ref = opts[:references], do: ref, else: :id + queryable = Keyword.fetch!(opts, :queryable) + related = Ecto.Association.related_from_query(queryable, name) + on_replace = Keyword.get(opts, :on_replace, :raise) + + unless on_replace in @on_replace_opts do + raise ArgumentError, + "invalid `:on_replace` option for #{inspect(name)}. " <> + "The only valid options are: " <> + Enum.map_join(@on_replace_opts, ", ", &"`#{inspect(&1)}`") + end + + defaults = Ecto.Association.validate_defaults!(module, name, opts[:defaults] || []) + where = opts[:where] || [] + + unless is_list(where) do + raise ArgumentError, + "expected `:where` for #{inspect(name)} to be a keyword list, got: `#{inspect(where)}`" + end + + %__MODULE__{ + field: name, + owner: module, + related: related, + owner_key: Keyword.fetch!(opts, :foreign_key), + related_key: ref, + queryable: queryable, + on_replace: on_replace, + defaults: defaults, + where: where + } + end + + @impl true + def build(refl, owner, attributes) do + refl + |> build(owner) + |> struct(attributes) + end + + @impl true + def joins_query( + %{related_key: related_key, owner: owner, owner_key: owner_key, queryable: queryable} = + assoc + ) do + from(o in owner, join: q in ^queryable, on: field(q, ^related_key) == field(o, ^owner_key)) + |> Ecto.Association.combine_joins_query(assoc.where, 1) + end + + @impl true + def assoc_query(%{related_key: related_key, queryable: queryable} = assoc, query, [value]) do + from(x in (query || queryable), where: field(x, ^related_key) == ^value) + |> Ecto.Association.combine_assoc_query(assoc.where) + end + + @impl true + def assoc_query(%{related_key: related_key, queryable: queryable} = assoc, query, values) do + from(x in (query || queryable), where: field(x, ^related_key) in ^values) + |> Ecto.Association.combine_assoc_query(assoc.where) + end + + @impl true + def preload_info(%{related_key: related_key} = refl) do + {:assoc, refl, {0, related_key}} + end + + @impl true + def on_repo_change(%{on_replace: :nilify}, _, %{action: :replace}, _adapter, _opts) do + {:ok, nil} + end + + def on_repo_change( + %{on_replace: :delete_if_exists} = refl, + parent_changeset, + %{action: :replace} = changeset, + adapter, + opts + ) do + try do + on_repo_change(%{refl | on_replace: :delete}, parent_changeset, changeset, adapter, opts) + rescue + Ecto.StaleEntryError -> {:ok, nil} + end + end + + def on_repo_change( + %{on_replace: on_replace} = refl, + parent_changeset, + %{action: :replace} = changeset, + adapter, + opts + ) do + changeset = + case on_replace do + :delete -> %{changeset | action: :delete} + :update -> %{changeset | action: :update} + end + + on_repo_change(refl, parent_changeset, changeset, adapter, opts) + end + + def on_repo_change( + _refl, + %{data: parent, repo: repo}, + %{action: action} = changeset, + _adapter, + opts + ) do + changeset = Ecto.Association.update_parent_prefix(changeset, parent) + + case apply(repo, action, [changeset, opts]) do + {:ok, _} = ok -> + if action == :delete, do: {:ok, nil}, else: ok + + {:error, changeset} -> + {:error, changeset} + end + end + + ## Relation callbacks + @behaviour Ecto.Changeset.Relation + + @impl true + def build(%{related: related, queryable: queryable, defaults: defaults}, owner) do + related + |> Ecto.Association.apply_defaults(defaults, owner) + |> Ecto.Association.merge_source(queryable) + end +end + +defmodule Ecto.Association.ManyToMany do + @moduledoc """ + The association struct for `many_to_many` associations. + + Its fields are: + + * `cardinality` - The association cardinality + * `field` - The name of the association field on the schema + * `owner` - The schema where the association was defined + * `related` - The schema that is associated + * `owner_key` - The key on the `owner` schema used for the association + * `queryable` - The real query to use for querying association + * `on_delete` - The action taken on associations when schema is deleted + * `on_replace` - The action taken on associations when schema is replaced + * `defaults` - Default fields used when building the association + * `relationship` - The relationship to the specified schema, default `:child` + * `join_keys` - The keyword list with many to many join keys + * `join_through` - Atom (representing a schema) or a string (representing a table) + for many to many associations + * `join_defaults` - A list of defaults for join associations + * `preload_order` - Default `order_by` of the association, used only by preload + """ + + @behaviour Ecto.Association + @on_delete_opts [:nothing, :delete_all] + @on_replace_opts [:raise, :mark_as_invalid, :delete] + + defstruct [ + :field, + :owner, + :related, + :owner_key, + :queryable, + :on_delete, + :on_replace, + :join_keys, + :join_through, + :on_cast, + where: [], + join_where: [], + defaults: [], + join_defaults: [], + relationship: :child, + cardinality: :many, + unique: false, + ordered: false, + preload_order: [] + ] + + @impl true + def after_verify_validation(%{queryable: queryable, join_through: join_through}) do + cond do + not is_atom(queryable) -> + :ok + + not Code.ensure_loaded?(queryable) -> + {:error, "associated schema #{inspect(queryable)} does not exist"} + + not function_exported?(queryable, :__schema__, 2) -> + {:error, "associated module #{inspect(queryable)} is not an Ecto schema"} + + not is_atom(join_through) -> + :ok + + not Code.ensure_loaded?(join_through) -> + {:error, ":join_through schema #{inspect(join_through)} does not exist"} + + not function_exported?(join_through, :__schema__, 2) -> + {:error, ":join_through module #{inspect(join_through)} is not an Ecto schema"} + + true -> + :ok + end + end + + @impl true + def struct(module, name, opts) do + queryable = Keyword.fetch!(opts, :queryable) + related = Ecto.Association.related_from_query(queryable, name) + + join_keys = opts[:join_keys] + join_through = opts[:join_through] + validate_join_through(name, join_through) + + {owner_key, join_keys} = + case join_keys do + [{join_owner_key, owner_key}, {join_related_key, related_key}] + when is_atom(join_owner_key) and is_atom(owner_key) and + is_atom(join_related_key) and is_atom(related_key) -> + {owner_key, join_keys} + + nil -> + {:id, default_join_keys(module, related)} + + _ -> + raise ArgumentError, + "many_to_many #{inspect(name)} expect :join_keys to be a keyword list " <> + "with two entries, the first being how the join table should reach " <> + "the current schema and the second how the join table should reach " <> + "the associated schema. For example: #{inspect(default_join_keys(module, related))}" + end + + unless Module.get_attribute(module, :ecto_fields)[owner_key] do + raise ArgumentError, + "schema does not have the field #{inspect(owner_key)} used by " <> + "association #{inspect(name)}, please set the :join_keys option accordingly" + end + + on_delete = Keyword.get(opts, :on_delete, :nothing) + on_replace = Keyword.get(opts, :on_replace, :raise) + + unless on_delete in @on_delete_opts do + raise ArgumentError, + "invalid :on_delete option for #{inspect(name)}. " <> + "The only valid options are: " <> + Enum.map_join(@on_delete_opts, ", ", &"`#{inspect(&1)}`") + end + + unless on_replace in @on_replace_opts do + raise ArgumentError, + "invalid `:on_replace` option for #{inspect(name)}. " <> + "The only valid options are: " <> + Enum.map_join(@on_replace_opts, ", ", &"`#{inspect(&1)}`") + end + + where = opts[:where] || [] + join_where = opts[:join_where] || [] + defaults = Ecto.Association.validate_defaults!(module, name, opts[:defaults] || []) + join_defaults = Ecto.Association.validate_defaults!(module, name, opts[:join_defaults] || []) + preload_order = Ecto.Association.validate_preload_order!(name, opts[:preload_order] || []) + + unless is_list(where) do + raise ArgumentError, + "expected `:where` for #{inspect(name)} to be a keyword list, got: `#{inspect(where)}`" + end + + unless is_list(join_where) do + raise ArgumentError, + "expected `:join_where` for #{inspect(name)} to be a keyword list, got: `#{inspect(join_where)}`" + end + + if opts[:join_defaults] && is_binary(join_through) do + raise ArgumentError, ":join_defaults has no effect for a :join_through without a schema" + end + + %__MODULE__{ + field: name, + cardinality: Keyword.fetch!(opts, :cardinality), + owner: module, + related: related, + owner_key: owner_key, + join_keys: join_keys, + join_where: join_where, + join_through: join_through, + join_defaults: join_defaults, + queryable: queryable, + on_delete: on_delete, + on_replace: on_replace, + unique: Keyword.get(opts, :unique, false), + defaults: defaults, + where: where, + preload_order: preload_order + } + end + + defp default_join_keys(module, related) do + [ + {Ecto.Association.association_key(module, :id), :id}, + {Ecto.Association.association_key(related, :id), :id} + ] + end + + @impl true + def joins_query( + %{owner: owner, queryable: queryable, join_through: join_through, join_keys: join_keys} = + assoc + ) do + [{join_owner_key, owner_key}, {join_related_key, related_key}] = join_keys + + from(o in owner, + join: j in ^join_through, + on: field(j, ^join_owner_key) == field(o, ^owner_key), + join: q in ^queryable, + on: field(j, ^join_related_key) == field(q, ^related_key) + ) + |> Ecto.Association.combine_joins_query(assoc.where, 2) + |> Ecto.Association.combine_joins_query(assoc.join_where, 1) + end + + def assoc_query(%{queryable: queryable} = refl, values) do + assoc_query(refl, queryable, values) + end + + @impl true + def assoc_query(assoc, query, values) do + %{queryable: queryable, join_through: join_through, join_keys: join_keys, owner: owner} = + assoc + + [{join_owner_key, owner_key}, {join_related_key, related_key}] = join_keys + + owner_key_type = owner.__schema__(:type, owner_key) + + # We only need to join in the "join table". Preload and Ecto.assoc expressions can then filter + # by &1.join_owner_key in ^... to filter down to the associated entries in the related table. + query = + from(q in (query || queryable), + join: j in ^join_through, + on: field(q, ^related_key) == field(j, ^join_related_key), + where: field(j, ^join_owner_key) in type(^values, {:in, ^owner_key_type}) + ) + |> Ecto.Association.combine_assoc_query(assoc.where) + + Ecto.Association.combine_joins_query(query, assoc.join_where, length(query.joins)) + end + + @impl true + def build(refl, owner, attributes) do + refl + |> build(owner) + |> struct(attributes) + end + + @impl true + def preload_info(%{join_keys: [{join_owner_key, owner_key}, {_, _}], owner: owner} = refl) do + owner_key_type = owner.__schema__(:type, owner_key) + + # When preloading use the last bound table (which is the join table) and the join_owner_key + # to filter out related entities to the owner structs we're preloading with. + {:assoc, refl, {-1, join_owner_key, owner_key_type}} + end + + @impl true + def on_repo_change( + %{on_replace: :delete} = refl, + parent_changeset, + %{action: :replace} = changeset, + adapter, + opts + ) do + on_repo_change(refl, parent_changeset, %{changeset | action: :delete}, adapter, opts) + end + + def on_repo_change( + %{join_keys: join_keys, join_through: join_through, join_where: join_where}, + %{repo: repo, data: owner}, + %{action: :delete, data: related}, + adapter, + opts + ) do + [{join_owner_key, owner_key}, {join_related_key, related_key}] = join_keys + owner_value = dump!(:delete, join_through, owner, owner_key, adapter) + related_value = dump!(:delete, join_through, related, related_key, adapter) + + query = + join_through + |> where([j], field(j, ^join_owner_key) == ^owner_value) + |> where([j], field(j, ^join_related_key) == ^related_value) + |> Ecto.Association.combine_assoc_query(join_where) + + query = %{query | prefix: owner.__meta__.prefix} + repo.delete_all(query, opts) + {:ok, nil} + end + + def on_repo_change( + %{field: field, join_through: join_through, join_keys: join_keys} = refl, + %{repo: repo, data: owner} = parent_changeset, + %{action: action} = changeset, + adapter, + opts + ) do + changeset = Ecto.Association.update_parent_prefix(changeset, owner) + + case apply(repo, action, [changeset, opts]) do + {:ok, related} -> + [{join_owner_key, owner_key}, {join_related_key, related_key}] = join_keys + + if insert_join?(parent_changeset, changeset, field, related_key) do + owner_value = dump!(:insert, join_through, owner, owner_key, adapter) + related_value = dump!(:insert, join_through, related, related_key, adapter) + data = %{join_owner_key => owner_value, join_related_key => related_value} + + case insert_join(join_through, refl, parent_changeset, data, opts) do + {:error, join_changeset} -> + {:error, + %{ + changeset + | errors: join_changeset.errors ++ changeset.errors, + valid?: join_changeset.valid? and changeset.valid? + }} + + _ -> + {:ok, related} + end + else + {:ok, related} + end + + {:error, changeset} -> + {:error, changeset} + end + end + + defp validate_join_through(name, nil) do + raise ArgumentError, + "many_to_many #{inspect(name)} associations require the :join_through option to be given" + end + + defp validate_join_through(_, join_through) + when is_atom(join_through) or is_binary(join_through) do + :ok + end + + defp validate_join_through(name, _join_through) do + raise ArgumentError, + "many_to_many #{inspect(name)} associations require the :join_through option to be " <> + "an atom (representing a schema) or a string (representing a table)" + end + + defp insert_join?(%{action: :insert}, _, _field, _related_key), do: true + defp insert_join?(_, %{action: :insert}, _field, _related_key), do: true + + defp insert_join?(%{data: owner}, %{data: related}, field, related_key) do + current_key = Map.fetch!(related, related_key) + + not Enum.any?(Map.fetch!(owner, field), fn child -> + Map.get(child, related_key) == current_key + end) + end + + defp insert_join(join_through, _refl, %{repo: repo, data: owner}, data, opts) + when is_binary(join_through) do + opts = Keyword.put_new(opts, :prefix, owner.__meta__.prefix) + repo.insert_all(join_through, [data], opts) + end + + defp insert_join(join_through, refl, parent_changeset, data, opts) when is_atom(join_through) do + %{repo: repo, constraints: constraints, data: owner} = parent_changeset + + changeset = + join_through + |> Ecto.Association.apply_defaults(refl.join_defaults, owner) + |> Map.merge(data) + |> Ecto.Changeset.change() + |> Map.put(:constraints, constraints) + |> put_new_prefix(owner.__meta__.prefix) + + repo.insert(changeset, opts) + end + + defp put_new_prefix(%{data: %{__meta__: %{prefix: prefix}}} = changeset, prefix), + do: changeset + + defp put_new_prefix(%{data: %{__meta__: %{prefix: nil}}} = changeset, prefix), + do: update_in(changeset.data, &Ecto.put_meta(&1, prefix: prefix)) + + defp put_new_prefix(changeset, _), + do: changeset + + defp field!(op, struct, field) do + Map.get(struct, field) || + raise "could not #{op} join entry because `#{field}` is nil in #{inspect(struct)}" + end + + defp dump!(action, join_through, struct, field, adapter) when is_binary(join_through) do + value = field!(action, struct, field) + type = struct.__struct__.__schema__(:type, field) + + case Ecto.Type.adapter_dump(adapter, type, value) do + {:ok, value} -> + value + + :error -> + raise Ecto.ChangeError, + "value `#{inspect(value)}` for `#{inspect(struct.__struct__)}.#{field}` " <> + "in `#{action}` does not match type #{Ecto.Type.format(type)}" + end + end + + defp dump!(action, join_through, struct, field, _) when is_atom(join_through) do + field!(action, struct, field) + end + + ## Relation callbacks + @behaviour Ecto.Changeset.Relation + + @impl true + def build(%{related: related, queryable: queryable, defaults: defaults}, owner) do + related + |> Ecto.Association.apply_defaults(defaults, owner) + |> Ecto.Association.merge_source(queryable) + end + + ## On delete callbacks + + @doc false + def delete_all(refl, parent, repo_name, opts) do + %{join_through: join_through, join_keys: join_keys, owner: owner} = refl + [{join_owner_key, owner_key}, {_, _}] = join_keys + + if value = Map.get(parent, owner_key) do + owner_type = owner.__schema__(:type, owner_key) + + query = + from j in join_through, where: field(j, ^join_owner_key) == type(^value, ^owner_type) + + Ecto.Repo.Queryable.delete_all(repo_name, query, opts) + end + end +end diff --git a/deps/ecto/lib/ecto/changeset.ex b/deps/ecto/lib/ecto/changeset.ex new file mode 100644 index 0000000..b727ef9 --- /dev/null +++ b/deps/ecto/lib/ecto/changeset.ex @@ -0,0 +1,4395 @@ +defmodule Ecto.Changeset do + @moduledoc ~S""" + Changesets allow filtering, type casting, validation, and + constraints when manipulating structs, usually in preparation + for inserting and updating entries into a database. + + Let's break down what those features mean. Imagine the common + scenario where you want to receive data from a user submitted + web form to create or update entries in the database. Once you + receive this data on the server, changesets will help you perform + the following actions: + + * **filtering** - because you are receiving external data from + a third-party, you must explicitly list which data you accept. + For example, you most likely don't want to allow a user to set + its own "is_admin" field to true + + * **type casting** - a web form sends most of its data as strings. + When the user types the number "100", Ecto will receive it as + the string "100", which must then be converted to 100. + Changesets are responsible for converting these values to the + types defined in your `Ecto.Schema`, support even complex types + such as datetimes + + * **validations** - the data the user submits may not correct. + For example, the user may type an invalid email address, with + a trailing dot. Or say the date for a future meeting would + happen in the last year. You must validate the data and give + feedback to the user + + * **constraints** - some validations can only happen with the + help of the database. For example, in order to know if a user + email is already taken or not, you must query the database. + Constraints help you do that in a way that respects data + integrity + + Although we have used a web form as an example, changesets can be used + for APIs and many other scenarios. Changesets may also be used to work + with data even if it won't be written to a database. We will cover + these scenarios in the documentation below. There is also an introductory + example of working with changesets and how it relates to schemas and + repositories [in the `Ecto` module](`Ecto#module-changesets`). + + In a nutshell, there are two main functions for creating a changeset. + The `cast/4` function is used to receive external parameters from a + form, API or command line, and convert them to the types defined in + your `Ecto.Schema`. `change/2` is used to modify data directly from + your application, assuming the data given is valid and matches the + existing types. The remaining functions in this module, such as + validations, constraints, association handling, are about manipulating + changesets. + + ## External vs internal data + + Changesets allow working with two kinds of data: + + * external to the application - for example user input from + a form that needs to be type-converted and properly validated. This + use case is primarily covered by the `cast/4` function. + + * internal to the application - for example programmatically generated, + or coming from other subsystems. This use case is primarily covered + by the `change/2` and `put_change/3` functions. + + When working with external data, the data is typically provided + as maps with string keys (also known as parameters). On the other hand, + when working with internal data, you typically have maps of atom keys + or structs. This duality allows you to track the nature of your data: + if you have structs or maps with atom keys, it means the data has been + parsed/validated. + + If you have external data or you have maps that may have either + string or atom keys, consider using `cast/4` to create a changeset. + The changeset will parse and validate these parameters and provide APIs + to safely manipulate and change the data accordingly. + + ## Validations and constraints + + Ecto changesets provide both validations and constraints which + are ultimately turned into errors in case something goes wrong. + + The difference between them is that most validations can be + executed without a need to interact with the database and, therefore, + are always executed before attempting to insert or update the entry + in the database. Validations run immediately when a validation function + is called on the data that is contained in the changeset at that time. + + Some validations may happen against the database but + they are inherently unsafe. Those validations start with a `unsafe_` + prefix, such as `unsafe_validate_unique/4`. + + On the other hand, constraints rely on the database and are always safe. + As a consequence, validations are always checked before constraints. + Constraints won't even be checked in case validations failed. + + Let's see an example: + + defmodule User do + use Ecto.Schema + import Ecto.Changeset + + schema "users" do + field :name + field :email + field :age, :integer + end + + def changeset(user, params \\ %{}) do + user + |> cast(params, [:name, :email, :age]) + |> validate_required([:name, :email]) + |> validate_format(:email, ~r/@/) + |> validate_inclusion(:age, 18..100) + |> unique_constraint(:email) + end + end + + In the `changeset/2` function above, we define three validations. + They check that `name` and `email` fields are present in the + changeset, the e-mail is of the specified format, and the age is + between 18 and 100 - as well as a unique constraint in the email + field. + + Let's suppose the e-mail is given but the age is invalid. The + changeset would have the following errors: + + changeset = User.changeset(%User{}, %{age: 0, email: "mary@example.com"}) + {:error, changeset} = Repo.insert(changeset) + changeset.errors #=> [age: {"is invalid", []}, name: {"can't be blank", []}] + + In this case, we haven't checked the unique constraint in the + e-mail field because the data did not validate. Let's fix the + age and the name, and assume that the e-mail already exists in the + database: + + changeset = User.changeset(%User{}, %{age: 42, name: "Mary", email: "mary@example.com"}) + {:error, changeset} = Repo.insert(changeset) + changeset.errors #=> [email: {"has already been taken", []}] + + Validations and constraints define an explicit boundary when the check + happens. By moving constraints to the database, we also provide a safe, + correct and data-race free means of checking the user input. + + ### Deferred constraints + + Some databases support deferred constraints, i.e., constraints which are + checked at the end of the transaction rather than at the end of each statement. + + Changesets do not support this type of constraints. When working with deferred + constraints, a violation while invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2` won't + return `{:error, changeset}`, but rather raise an error at the end of the + transaction. + + ## Empty values + + Many times, the data given on cast needs to be further pruned, specially + regarding empty values. For example, if you are gathering data to be + cast from the command line or through an HTML form or any other text-based + format, it is likely those means cannot express nil values. For + those reasons, changesets include the concept of empty values. + + When applying changes using `cast/4`, an empty value will be automatically + converted to the field's default value. If the field is an array type, any + empty value inside the array will be removed. When a plain map is used in + the data portion of a schemaless changeset, every field's default value is + considered to be `nil`. For example: + + iex> data = %{name: "Bob"} + iex> types = %{name: :string} + iex> params = %{name: ""} + iex> changeset = Ecto.Changeset.cast({data, types}, params, Map.keys(types)) + iex> changeset.changes + %{name: nil} + + Empty values are stored as a list in the changeset's `:empty_values` field. + The list contains elements of type `t:empty_value/0`. Those are either values, + which will be considered empty if they + match, or a function that must return a boolean if the value is empty or + not. By default, Ecto uses `Ecto.Changeset.empty_values/0` which will mark + a field as empty if it is a string made only of whitespace characters. + You can also pass the `:empty_values` option to `cast/4` in case you want + to change how a particular `cast/4` work. + + ## Associations, embeds, and on replace + + Using changesets you can work with associations as well as with + [embedded](embedded-schemas.html) structs. There are two primary APIs: + + * `cast_assoc/3` and `cast_embed/3` - those functions are used when + working with external data. In particular, they allow you to change + associations and embeds alongside the parent struct, all at once. + + * `put_assoc/4` and `put_embed/4` - it allows you to replace the + association or embed as a whole. This can be used to move associated + data from one entry to another, to completely remove or replace + existing entries. + + These functions are opinionated on how it works with associations. + If you need different behaviour or explicit control over the associated + data, you can skip this functionality and use `Ecto.Multi` to encode how + several database operations will happen on several schemas and changesets + at once. + + You can learn more about working with associations in our documentation, + including cheatsheets and practical examples. Check out: + + * The docs for `cast_assoc/3` and `put_assoc/3` + * The [associations cheatsheet](associations.html) + * The [Constraints and Upserts guide](constraints-and-upserts.html) + * The [Polymorphic associations with many to many guide](polymorphic-associations-with-many-to-many.html) + + ### The `:on_replace` option + + When using any of those APIs, you may run into situations where Ecto sees + data is being replaced. For example, imagine a Post has many Comments where + the comments have IDs 1, 2 and 3. If you call `cast_assoc/3` passing only + the IDs 1 and 2, Ecto will consider 3 is being "replaced" and it will raise + by default. Such behaviour can be changed when defining the relation by + setting `:on_replace` option when defining your association/embed according + to the values below: + + * `:raise` (default) - do not allow removing association or embedded + data via parent changesets + * `:mark_as_invalid` - if attempting to remove the association or + embedded data via parent changeset - an error will be added to the parent + changeset, and it will be marked as invalid + * `:nilify` - sets owner reference column to `nil` (available only for + associations). Use this on a `belongs_to` column to allow the association + to be cleared out so that it can be set to a new value. Will set `action` + on associated changesets to `:replace` + * `:update` - updates the association, available only for `has_one`, `belongs_to` + and `embeds_one`. This option will update all the fields given to the changeset + including the id for the association + * `:delete` - removes the association or related data from the database. + This option has to be used carefully (see below). Will set `action` on associated + changesets to `:replace` + * `:delete_if_exists` - like `:delete` except that it ignores any stale entry + error. For instance, if you set `on_replace: :delete` but the replaced + resource was already deleted by a separate request, it will raise a + `Ecto.StaleEntryError`. `:delete_if_exists` makes it so it will only delete + if the entry still exists + + The `:delete` and `:delete_if_exists` options must be used carefully as they allow + users to delete any associated data by simply setting it to `nil` or an empty list. + If you need deletion, it is often preferred to add a separate boolean virtual field + in the schema and manually mark the changeset for deletion if the `:delete` field is + set in the params, as in the example below. Note that we don't call `cast/4` in this + case because we don't want to prevent deletion if a change is invalid (changes are + irrelevant if the entity needs to be deleted). + + defmodule Comment do + use Ecto.Schema + import Ecto.Changeset + + schema "comments" do + field :body, :string + field :delete, :boolean, virtual: true + end + + def changeset(comment, %{"delete" => "true"}) do + %{Ecto.Changeset.change(comment, delete: true) | action: :delete} + end + + def changeset(comment, params) do + cast(comment, params, [:body]) + end + end + + ## Schemaless changesets + + In the changeset examples so far, we have always used changesets to validate + and cast data contained in a struct defined by an Ecto schema, such as the `%User{}` + struct defined by the `User` module. + + However, changesets can also be used with "regular" structs too by passing a tuple + with the data and its types: + + user = %User{} + types = %{name: :string, email: :string, age: :integer} + params = %{name: "Callum", email: "callum@example.com", age: 27} + changeset = + {user, types} + |> Ecto.Changeset.cast(params, Map.keys(types)) + |> Ecto.Changeset.validate_required(...) + |> Ecto.Changeset.validate_length(...) + + where the user struct refers to the definition in the following module: + + defmodule User do + defstruct [:name, :email, :age] + end + + Changesets can also be used with data in a plain map, by following the same API: + + data = %{} + types = %{name: :string, email: :string, age: :integer} + params = %{name: "Callum", email: "callum@example.com", age: 27} + changeset = + {data, types} + |> Ecto.Changeset.cast(params, Map.keys(types)) + |> Ecto.Changeset.validate_required(...) + |> Ecto.Changeset.validate_length(...) + + Besides the basic types which are mentioned above, such as `:boolean` and `:string`, + parameterized types can also be used in schemaless changesets. They implement + the `Ecto.ParameterizedType` behaviour and we can create the necessary type info by + calling the `init/2` function. + + For example, to use `Ecto.Enum` in a schemaless changeset: + + types = %{ + name: :string, + role: Ecto.ParameterizedType.init(Ecto.Enum, values: [:reader, :editor, :admin]) + } + + data = %{} + params = %{name: "Callum", role: "reader"} + + changeset = + {data, types} + |> Ecto.Changeset.cast(params, Map.keys(types)) + |> Ecto.Changeset.validate_required(...) + |> Ecto.Changeset.validate_length(...) + + Schemaless changesets make Ecto extremely useful to cast, validate and prune data even + if it is not meant to be persisted to the database. + + ### Changeset actions + + Changesets have an action field which is usually set by `Ecto.Repo` + whenever one of the operations such as `insert` or `update` is called: + + changeset = User.changeset(%User{}, %{age: 42, email: "mary@example.com"}) + {:error, changeset} = Repo.insert(changeset) + changeset.action + #=> :insert + + This means that when working with changesets that are not meant to be + persisted to the database, such as schemaless changesets, you may need + to explicitly set the action to one specific value. Frameworks such as + Phoenix [use the action value to define how HTML forms should + act](https://hexdocs.pm/phoenix_live_view/Phoenix.Component.html#form/1-a-note-on-errors). + + Instead of setting the action manually, you may use `apply_action/2` that + emulates operations such as `c:Ecto.Repo.insert`. `apply_action/2` will return + `{:ok, changes}` if the changeset is valid or `{:error, changeset}`, with + the given `action` set in the changeset in case of errors. + + ## The Ecto.Changeset struct + + The public fields are: + + * `valid?` - Stores if the changeset is valid + * `data` - The changeset source data, for example, a struct + * `params` - The parameters as given on changeset creation + * `changes` - The `changes` from parameters that were approved in casting + * `errors` - All errors from validations + * `required` - All required fields as a list of atoms + * `action` - The action to be performed with the changeset + * `types` - Cache of the data's field types + * `empty_values` - A list of values to be considered empty + * `repo` - The repository applying the changeset (only set after a Repo function is called) + * `repo_opts` - A keyword list of options given to the underlying repository operation + + The following fields are private and must not be accessed directly. + + * `validations` + * `constraints` + * `filters` + * `prepare` + + ### Redacting fields in inspect + + To hide a field's value from the inspect protocol of `Ecto.Changeset`, mark + the field as `redact: true` in the schema, and it will display with the + value `**redacted**`. + """ + + require Logger + require Ecto.Query + alias __MODULE__ + alias Ecto.Changeset.Relation + alias Ecto.Schema.Metadata + + @empty_values [&Ecto.Type.empty_trimmed?/2] + + # If a new field is added here, def merge must be adapted + defstruct valid?: false, + data: nil, + params: nil, + changes: %{}, + errors: [], + validations: [], + required: [], + prepare: [], + constraints: [], + filters: %{}, + action: nil, + types: %{}, + empty_values: @empty_values, + repo: nil, + repo_opts: [] + + @type t(data_type) :: %Changeset{ + valid?: boolean(), + repo: atom | nil, + repo_opts: Keyword.t(), + data: data_type, + params: %{optional(String.t()) => term} | nil, + changes: %{optional(atom) => term}, + required: [atom], + prepare: [(t -> t)], + errors: [{atom, error}], + constraints: [constraint], + validations: [validation], + filters: %{optional(atom) => term}, + action: action, + types: types + } + + @type t :: t(Ecto.Schema.t() | map | nil) + @type error :: {String.t(), Keyword.t()} + @type action :: nil | :insert | :update | :delete | :replace | :ignore | atom + @type constraint :: %{ + type: :check | :exclusion | :foreign_key | :unique, + constraint: String.t() | Regex.t(), + match: :exact | :suffix | :prefix, + field: atom, + error_message: String.t(), + error_type: atom + } + @type data :: map() + @type types :: %{atom => Ecto.Type.t() | {:assoc, term()} | {:embed, term()}} + @type traverse_result :: %{atom => [term] | traverse_result} + @type validation :: {atom, term} + + @typedoc """ + A possible value that you can pass to the `:empty_values` option. + + See `empty_values/0` and the [*Empty values* section](#module-empty-values) in + the module documentation for more information. + """ + @typedoc since: "3.11.0" + @type empty_value :: (term() -> boolean()) | binary() | list() | map() | tuple() + + @number_validators %{ + less_than: {&/2, "must be greater than %{number}"}, + less_than_or_equal_to: {&<=/2, "must be less than or equal to %{number}"}, + greater_than_or_equal_to: {&>=/2, "must be greater than or equal to %{number}"}, + equal_to: {&==/2, "must be equal to %{number}"}, + not_equal_to: {&!=/2, "must be not equal to %{number}"} + } + + @relations [:embed, :assoc] + @match_types [:exact, :suffix, :prefix] + + @doc """ + Wraps the given data in a changeset or adds changes to a changeset. + + `changes` is a map or keyword where the key is an atom representing a + field, association or embed and the value is a term. Note the `value` is + directly stored in the changeset with no validation whatsoever. For this + reason, this function is meant for working with data internal to the + application. + + When changing embeds and associations, see `put_assoc/4` for a complete + reference on the accepted values. + + This function is useful for: + + * wrapping a struct inside a changeset + * directly changing a struct without performing castings nor validations + * directly bulk-adding changes to a changeset + + Changed attributes will only be added if the change does not have the + same value as the field in the data. + + When a changeset is passed as the first argument, the changes passed as the + second argument are merged over the changes already in the changeset if they + differ from the values in the struct. + + When a `{data, types}` is passed as the first argument, a changeset is + created with the given data and types and marked as valid. + + See `cast/4` if you'd prefer to cast and validate external parameters. + + ## Examples + + iex> changeset = change(%Post{}) + %Ecto.Changeset{...} + iex> changeset.valid? + true + iex> changeset.changes + %{} + + iex> changeset = change(%Post{author: "bar"}, title: "title") + iex> changeset.changes + %{title: "title"} + + iex> changeset = change(%Post{title: "title"}, title: "title") + iex> changeset.changes + %{} + + iex> changeset = change(changeset, %{title: "new title", body: "body"}) + iex> changeset.changes.title + "new title" + iex> changeset.changes.body + "body" + + """ + @spec change(Ecto.Schema.t() | t | {data, types}, %{atom => term} | Keyword.t()) :: t + def change(data, changes \\ %{}) + + def change({data, types}, changes) when is_map(data) do + change(%Changeset{data: data, types: Enum.into(types, %{}), valid?: true}, changes) + end + + def change(%Changeset{changes: changes, types: types} = changeset, new_changes) + when is_map(new_changes) or is_list(new_changes) do + {changes, errors, valid?} = + get_changed(changeset.data, types, changes, new_changes, changeset.errors, changeset.valid?) + + %{changeset | changes: changes, errors: errors, valid?: valid?} + end + + def change(%{__struct__: struct} = data, changes) when is_map(changes) or is_list(changes) do + types = struct.__changeset__() + {changes, errors, valid?} = get_changed(data, types, %{}, changes, [], true) + %Changeset{valid?: valid?, data: data, changes: changes, errors: errors, types: types} + end + + defp get_changed(data, types, old_changes, new_changes, errors, valid?) do + Enum.reduce(new_changes, {old_changes, errors, valid?}, fn + {key, value}, {changes, errors, valid?} -> + put_change(data, changes, errors, valid?, key, value, Map.get(types, key)) + + _, _ -> + raise ArgumentError, + "invalid changes being applied to changeset. " <> + "Expected a keyword list or a map, got: #{inspect(new_changes)}" + end) + end + + @doc """ + Returns true if a field was changed in a changeset. + + This function can check associations and embeds, but doesn't support the `:to` + and `:from` options for such fields. + + ## Options + + * `:to` - Check if the field was changed to a specific value + * `:from` - Check if the field was changed from a specific value + + ## Examples + + iex> post = %Post{title: "Foo", body: "Old"} + iex> changeset = change(post, %{title: "New title", body: "Old"}) + + iex> changed?(changeset, :body) + false + + iex> changed?(changeset, :title) + true + + iex> changed?(changeset, :title, to: "NEW TITLE") + false + """ + @spec changed?(t, atom, Keyword.t()) :: boolean + def changed?(%Changeset{} = changeset, field, opts \\ []) when is_atom(field) do + case Map.fetch(changeset.types, field) do + {:ok, type} -> + case fetch_change(changeset, field) do + {:ok, new_value} -> + case type do + {tag, relation} when tag in @relations -> + if opts != [] do + raise ArgumentError, "invalid options for #{tag} field" + end + + relation_changed?(relation.cardinality, new_value) + + _ -> + Enum.all?(opts, fn + {:from, from} -> + Ecto.Type.equal?(type, Map.get(changeset.data, field), from) + + {:to, to} -> + Ecto.Type.equal?(type, new_value, to) + + other -> + raise ArgumentError, "unknown option #{inspect(other)}" + end) + end + + :error -> + false + end + + :error -> + raise ArgumentError, "field #{inspect(field)} doesn't exist" + end + end + + defp relation_changed?(:one, changeset) do + changeset.action != :update or changeset.changes != %{} + end + + defp relation_changed?(:many, changesets) do + Enum.any?(changesets, &relation_changed?(:one, &1)) + end + + @doc """ + Returns the default empty values used by `Ecto.Changeset`. + + By default, Ecto marks a field as empty if it is a string made + only of whitespace characters. If you want to provide your + additional empty values on top of the default, such as an empty + list, you can write: + + @empty_values [[]] ++ Ecto.Changeset.empty_values() + + Then, you can pass `empty_values: @empty_values` on `cast/3`. + + See also the [*Empty values* section](#module-empty-values) for more + information. + """ + @doc since: "3.10.0" + @spec empty_values() :: [empty_value()] + def empty_values do + @empty_values + end + + @doc """ + Applies the given `params` as changes on the `data` according to + the set of `permitted` keys. Returns a changeset. + + `data` may be either a changeset, a schema struct or a `{data, types}` + tuple. The second argument is a map of `params` that are cast according + to the type information from `data`. `params` is a map with string keys + or a map with atom keys, containing potentially invalid data. Mixed keys + are not allowed. + + During casting, all `permitted` parameters whose values match the specified + type information will have their key name converted to an atom and stored + together with the value as a change in the `:changes` field of the changeset. + If the cast value matches the current value for the field, it will not be + included in `:changes` unless the `force_changes: true` option is + provided. All parameters that are not explicitly permitted are ignored. + + If casting of all fields is successful, the changeset is returned as valid. + + Note that `cast/4` validates the types in the `params`, but not in the given + `data`. + + ## Options + + * `:empty_values` - a list containing elements of type `t:empty_value/0`. Those are + either values, which will be considered empty if they match, or a function that must + return a boolean if the value is empty or not. 1-arity functions will receive the value + being casted and 2-arity functions will receive the value being casted and its field type. + Empty values are always replaced by the default value of the respective field. + If the field is an array type, any empty value inside of the array will be removed. + To set this option while keeping the current default, use `empty_values/0` and add + your additional empty values + + * `:force_changes` - a boolean indicating whether to include values that don't alter + the current data in `:changes`. See `force_change/3` for more information, Defaults + to `false` + + * `:message` - a function of arity 2 that is used to create the error message when + casting fails. It is called for every field that cannot be casted and receives the + field name as the first argument and the error metadata as the second argument. It + must return a string or `nil`. If a string is returned it will be used as the error + message. If `nil` is returned the default error message will be used. The field type + is given under the `:type` key in the metadata + + ## Examples + + iex> changeset = cast(post, params, [:title]) + iex> if changeset.valid? do + ...> Repo.update!(changeset) + ...> end + + Passing a changeset as the first argument: + + iex> changeset = cast(post, %{title: "Hello"}, [:title]) + iex> new_changeset = cast(changeset, %{title: "Foo", body: "World"}, [:body]) + iex> new_changeset.params + %{"title" => "Hello", "body" => "World"} + + Or creating a changeset from a simple map with types: + + iex> data = %{title: "hello"} + iex> types = %{title: :string} + iex> changeset = cast({data, types}, %{title: "world"}, [:title]) + iex> apply_changes(changeset) + %{title: "world"} + + You can use empty values (and even cast multiple times) to change + what is considered an empty value: + + # Using default + iex> params = %{title: "", topics: []} + iex> changeset = cast(%Post{}, params, [:title, :topics]) + iex> changeset.changes + %{topics: []} + + # Changing default + iex> params = %{title: "", topics: []} + iex> changeset = cast(%Post{}, params, [:title, :topics], empty_values: [[], nil]) + iex> changeset.changes + %{title: ""} + + # Augmenting default + iex> params = %{title: "", topics: []} + iex> changeset = + ...> cast(%Post{}, params, [:title, :topics], empty_values: [[], nil] ++ Ecto.Changeset.empty_values()) + iex> changeset.changes + %{} + + You can define a custom error message function. + + # Using field name + iex> params = %{title: 1, body: 2} + iex> custom_errors = [title: "must be a string"] + iex> msg_func = fn field, _meta -> custom_errors[field] end + iex> changeset = cast(post, params, [:title, :body], message: msg_func) + iex> changeset.errors + [ + title: {"must be a string", [type: :string, validation: :cast]}, + body: {"is_invalid", [type: :string, validation: :cast]} + ] + + # Using field type + iex> params = %{title: 1, body: 2} + iex> custom_errors = [string: "must be a string"] + iex> msg_func = fn _field, meta -> + ... type = meta[:type] + ... custom_errors[type] + ... end + iex> changeset = cast(post, params, [:title, :body], message: msg_func) + iex> changeset.errors + [ + title: {"must be a string", [type: :string, validation: :cast]}, + body: {"must be a string", [type: :string, validation: :cast]} + ] + + ## Composing casts + + `cast/4` also accepts a changeset as its first argument. In such cases, all + the effects caused by the call to `cast/4` (additional errors and changes) + are simply added to the ones already present in the argument changeset. + Parameters are merged (**not deep-merged**) and the ones passed to `cast/4` + take precedence over the ones already in the changeset. + """ + @spec cast( + Ecto.Schema.t() | t | {data, types}, + %{binary => term} | %{atom => term} | :invalid, + [atom], + Keyword.t() + ) :: t + def cast(data, params, permitted, opts \\ []) + + def cast(_data, %{__struct__: _} = params, _permitted, _opts) do + raise Ecto.CastError, + type: :map, + value: params, + message: "expected params to be a :map, got: `#{inspect(params)}`" + end + + def cast({data, types}, params, permitted, opts) when is_map(data) do + cast(data, types, %{}, params, permitted, opts) + end + + def cast(%Changeset{} = changeset, params, permitted, opts) do + %{changes: changes, data: data, types: types, empty_values: empty_values} = changeset + + opts = + cond do + opts[:empty_values] -> + opts + + empty_values != empty_values() -> + # TODO: Remove changeset.empty_values field in Ecto v3.14 + IO.warn( + "Changing the empty_values field of Ecto.Changeset is deprecated, " <> + "please pass the :empty_values option on cast instead" + ) + + [empty_values: empty_values] ++ opts + + true -> + [empty_values: empty_values] ++ opts + end + + new_changeset = cast(data, types, changes, params, permitted, opts) + cast_merge(changeset, new_changeset) + end + + def cast(%{__struct__: module} = data, params, permitted, opts) do + cast(data, module.__changeset__(), %{}, params, permitted, opts) + end + + defp cast(%{} = data, %{} = types, %{} = changes, :invalid, permitted, _opts) + when is_list(permitted) do + _ = Enum.each(permitted, &cast_key/1) + %Changeset{params: nil, data: data, valid?: false, errors: [], changes: changes, types: types} + end + + defp cast(%{} = data, %{} = types, %{} = changes, %{} = params, permitted, opts) + when is_list(permitted) do + empty_values = Keyword.get(opts, :empty_values, @empty_values) + force? = Keyword.get(opts, :force_changes, false) + params = convert_params(params) + msg_func = Keyword.get(opts, :message, fn _, _ -> nil end) + + unless is_function(msg_func, 2) do + raise ArgumentError, + "expected `:message` to be a function of arity 2, received: #{inspect(msg_func)}" + end + + defaults = + case data do + %{__struct__: struct} -> struct.__struct__() + %{} -> %{} + end + + {changes, errors, valid?} = + Enum.reduce( + permitted, + {changes, [], true}, + &process_param(&1, params, types, data, empty_values, defaults, force?, msg_func, &2) + ) + + %Changeset{ + params: params, + data: data, + valid?: valid?, + errors: Enum.reverse(errors), + changes: changes, + types: types + } + end + + defp cast(%{}, %{}, %{}, params, permitted, _opts) when is_list(permitted) do + raise Ecto.CastError, + type: :map, + value: params, + message: "expected params to be a :map, got: `#{inspect(params)}`" + end + + defp process_param( + key, + params, + types, + data, + empty_values, + defaults, + force?, + msg_func, + {changes, errors, valid?} + ) do + {key, param_key} = cast_key(key) + type = cast_type!(types, key) + + current = + case changes do + %{^key => value} -> value + _ -> Map.get(data, key) + end + + case cast_field(key, param_key, type, params, current, empty_values, defaults, force?, valid?) do + {:ok, value, valid?} -> + {Map.put(changes, key, value), errors, valid?} + + :missing -> + {changes, errors, valid?} + + {:invalid, custom_errors} -> + {default_message, metadata} = + custom_errors + |> Keyword.put_new(:validation, :cast) + |> Keyword.put(:type, type) + |> Keyword.pop(:message, "is invalid") + + message = + case msg_func.(key, metadata) do + nil -> default_message + user_message -> user_message + end + + {changes, [{key, {message, metadata}} | errors], false} + end + end + + defp cast_type!(types, key) do + case types do + %{^key => {tag, _}} when tag in @relations -> + raise "casting #{tag}s with cast/4 for #{inspect(key)} field is not supported, use cast_#{tag}/3 instead" + + %{^key => type} -> + type + + _ -> + known_fields = types |> Map.keys() |> Enum.map_join(", ", &inspect/1) + + raise ArgumentError, + "unknown field `#{inspect(key)}` given to cast. Either the field does not exist or it is a " <> + ":through association (which are read-only). The known fields are: #{known_fields}" + end + end + + defp cast_key(key) when is_atom(key), + do: {key, Atom.to_string(key)} + + defp cast_key(key) do + raise ArgumentError, "cast/3 expects a list of atom keys, got key: `#{inspect(key)}`" + end + + defp cast_field(key, param_key, type, params, current, empty_values, defaults, force?, valid?) do + case params do + %{^param_key => value} -> + value = filter_empty_values(type, value, empty_values, defaults, key) + + case Ecto.Type.cast(type, value) do + {:ok, value} -> + if not force? and Ecto.Type.equal?(type, current, value) do + :missing + else + {:ok, value, valid?} + end + + :error -> + {:invalid, []} + + {:error, custom_errors} when is_list(custom_errors) -> + {:invalid, custom_errors} + end + + _ -> + :missing + end + end + + defp filter_empty_values(type, value, empty_values, defaults, key) do + case filter_empty_values(type, value, empty_values) do + :empty -> Map.get(defaults, key) + {:ok, value} -> value + end + end + + defp filter_empty_values({:array, type}, value, empty_values) when is_list(value) do + value = + for elem <- value, + {:ok, elem} <- [filter_empty_values(type, elem, empty_values)], + do: elem + + if value in empty_values do + :empty + else + {:ok, value} + end + end + + defp filter_empty_values(type, value, empty_values) do + filter_empty_value(empty_values, value, type) + end + + defp filter_empty_value([head | tail], value, type) when is_function(head, 1) do + case head.(value) do + true -> :empty + false -> filter_empty_value(tail, value, type) + end + end + + defp filter_empty_value([head | tail], value, type) when is_function(head, 2) do + case head.(value, type) do + true -> :empty + false -> filter_empty_value(tail, value, type) + end + end + + defp filter_empty_value([value | _tail], value, _type), + do: :empty + + defp filter_empty_value([_head | tail], value, type), + do: filter_empty_value(tail, value, type) + + defp filter_empty_value([], value, _type), + do: {:ok, value} + + # We only look at the first element because traversing the whole map + # can be expensive and it was showing up during profiling. This means + # we won't always raise, but the check only exists for user convenience + # anyway, and it is not a guarantee. + defp convert_params(params) do + case :maps.next(:maps.iterator(params)) do + {key, _, _} when is_atom(key) -> + for {key, value} <- params, into: %{} do + if is_atom(key) do + {Atom.to_string(key), value} + else + raise Ecto.CastError, + type: :map, + value: params, + message: + "expected params to be a map with atoms or string keys, " <> + "got a map with mixed keys: #{inspect(params)}" + end + end + + _ -> + params + end + end + + ## Casting related + + @doc """ + Casts the given association with the changeset parameters. + + This function should be used when working with the entire association at + once (and not a single element of a many-style association) and receiving + data external to the application. + + `cast_assoc/3` matches the records extracted from the database + and compares it with the parameters received from an external source. + Therefore, it is expected that the data in the changeset has explicitly + preloaded the association being cast and that all of the IDs exist and + are unique. + + For example, imagine a user has many addresses relationship where + post data is sent as follows + + %{"name" => "john doe", "addresses" => [ + %{"street" => "somewhere", "country" => "brazil", "id" => 1}, + %{"street" => "elsewhere", "country" => "poland"}, + ]} + + and then + + User + |> Repo.get!(id) + |> Repo.preload(:addresses) # Only required when updating data + |> Ecto.Changeset.cast(params, []) + |> Ecto.Changeset.cast_assoc(:addresses, with: &MyApp.Address.changeset/2) + + The parameters for the given association will be retrieved + from `changeset.params`. Those parameters are expected to be + a map with attributes, similar to the ones passed to `cast/4`. + Once parameters are retrieved, `cast_assoc/3` will match those + parameters with the associations already in the changeset data. + + Once `cast_assoc/3` is called, Ecto will compare each parameter + with the user's already preloaded addresses and act as follows: + + * If the parameter does not contain an ID, the parameter data + will be passed to `MyApp.Address.changeset/2` with a new struct + and become an insert operation. We only consider the ID as not + given if there is no "id" key or if its value is strictly `nil` + + * If the parameter contains an ID and there is no associated child + with such ID, the parameter data will be passed to + `MyApp.Address.changeset/2` with a new struct and become an insert + operation + + * If the parameter contains an ID and there is an associated child + with such ID, the parameter data will be passed to + `MyApp.Address.changeset/2` with the existing struct and become an + update operation + + * If there is an associated child with an ID and its ID is not given + as parameter, the `:on_replace` callback for that association will + be invoked (see the ["On replace" section](#module-the-on_replace-option) + on the module documentation) + + If two or more addresses have the same IDs, Ecto will consider that an + error and add an error to the changeset saying that there are duplicate + entries. + + Every time the `MyApp.Address.changeset/2` function is invoked, it must + return a changeset. This changeset will always be included under `changes` + of the parent changeset, even if there are no changes. This is done for + reflection purposes, allowing developers to introspect validations and + other metadata from the association. Once the parent changeset is given + to an `Ecto.Repo` function, all entries will be inserted/updated/deleted + within the same transaction. + + As you see above, this function is opinionated on how it works. If you + need different behaviour or if you need explicit control over the associated + data, you can either use `put_assoc/4` or use `Ecto.Multi` to encode how + several database operations will happen on several schemas and changesets + at once. + + ## Custom actions + + Developers are allowed to explicitly set the `:action` field of a + changeset to instruct Ecto how to act in certain situations. Let's suppose + that, if one of the associations has only empty fields, you want to ignore + the entry altogether instead of showing an error. The changeset function could + be written like this: + + def changeset(struct, params) do + struct + |> cast(params, [:title, :body]) + |> validate_required([:title, :body]) + |> case do + %{valid?: false, changes: changes} = changeset when changes == %{} -> + # If the changeset is invalid and has no changes, it is + # because all required fields are missing, so we ignore it. + %{changeset | action: :ignore} + changeset -> + changeset + end + end + + You can also set it to delete if you want data to be deleted based on the + received parameters (such as a checkbox or any other indicator). + + ## Partial changes for many-style associations + + By preloading an association using a custom query you can confine the behavior + of `cast_assoc/3`. This opens up the possibility to work on a subset of the data, + instead of all associations in the database. + + Taking the initial example of users having addresses, imagine those addresses + are set up to belong to a country. If you want to allow users to bulk edit all + addresses that belong to a single country, you can do so by changing the preload + query: + + query = from MyApp.Address, where: [country: ^edit_country] + + User + |> Repo.get!(id) + |> Repo.preload(addresses: query) + |> Ecto.Changeset.cast(params, []) + |> Ecto.Changeset.cast_assoc(:addresses) + + This will allow you to cast and update only the association for the given country. + The important point for partial changes is that any addresses, which were not + preloaded won't be changed. + + ## Sorting and deleting from -many collections + + In earlier examples, we passed a -many style association as a list: + + %{"name" => "john doe", "addresses" => [ + %{"street" => "somewhere", "country" => "brazil", "id" => 1}, + %{"street" => "elsewhere", "country" => "poland"}, + ]} + + However, it is also common to pass the addresses as a map, where each + key is an integer representing its position: + + %{"name" => "john doe", "addresses" => %{ + 0 => %{"street" => "somewhere", "country" => "brazil", "id" => 1}, + 1 => %{"street" => "elsewhere", "country" => "poland"} + }} + + Using indexes becomes specially useful with two supporting options: + `:sort_param` and `:drop_param`. These options tell the indexes should + be reordered or deleted from the data. For example, if you did: + + cast_embed(changeset, :addresses, + sort_param: :addresses_sort, + drop_param: :addresses_drop) + + You can now submit this: + + %{"name" => "john doe", "addresses" => %{...}, "addresses_drop" => [0]} + + And now the entry with index 0 will be dropped from the params before casting. + Note this requires setting the relevant `:on_replace` option on your + associations/embeds definition. + + Similar, for sorting, you could do: + + %{"name" => "john doe", "addresses" => %{...}, "addresses_sort" => [1, 0]} + + And that will internally sort the elements so 1 comes before 0. Note that + any index not present in `"addresses_sort"` will come _before_ any of the + sorted indexes. If an index is not found, an empty entry is added in its + place. + + For embeds, this guarantees the embeds will be rewritten in the given order. + However, for associations, this is not enough. You will have to add a + `field :position, :integer` to the schema and add a with function of arity 3 + to add the position to your children changeset. For example, you could implement: + + defp child_changeset(child, _changes, position) do + child + |> change(position: position) + end + + And by passing it to `:with`, it will be called with the final position of the + item: + + changeset + |> cast_assoc(:children, sort_param: ..., with: &child_changeset/3) + + These parameters can be powerful in certain UIs as it allows you to decouple + the sorting and replacement of the data from its representation. + + ## More resources + + You can learn more about working with associations in our documentation, + including cheatsheets and practical examples. Check out: + + * The docs for `put_assoc/3` + * The [associations cheatsheet](associations.html) + * The [Constraints and Upserts guide](constraints-and-upserts.html) + * The [Polymorphic associations with many to many guide](polymorphic-associations-with-many-to-many.html) + + ## Options + + * `:required` - if the association is a required field. For associations of cardinality + one, a non-nil value satisfies this validation. For associations with many entries, + a non-empty list is satisfactory. + + * `:required_message` - the message on failure, defaults to "can't be blank" + + * `:invalid_message` - the message on failure, defaults to "is invalid" + + * `:force_update_on_change` - force the parent record to be updated in the + repository if there is a change, defaults to `true` + + * `:with` - the function to build the changeset from params. Defaults to the + `changeset/2` function of the associated module. It can be an anonymous + function that expects two arguments: the associated struct to be cast and its + parameters. It must return a changeset. For associations with cardinality `:many`, + functions with arity 3 are accepted, and the third argument will be the position + of the associated element in the list, or `nil`, if the association is being replaced. + + * `:drop_param` - the parameter name which keeps a list of indexes to drop + from the relation parameters + + * `:sort_param` - the parameter name which keeps a list of indexes to sort + from the relation parameters. Unknown indexes are considered to be new + entries. Non-listed indexes will come before any sorted ones. See + `cast_assoc/3` for more information + + """ + @spec cast_assoc(t, atom, Keyword.t()) :: t + def cast_assoc(changeset, name, opts \\ []) when is_atom(name) do + cast_relation(:assoc, changeset, name, opts) + end + + @doc """ + Casts the given embed with the changeset parameters. + + The parameters for the given embed will be retrieved + from `changeset.params`. Those parameters are expected to be + a map with attributes, similar to the ones passed to `cast/4`. + Once parameters are retrieved, `cast_embed/3` will match those + parameters with the embeds already in the changeset record. + See `cast_assoc/3` for an example of working with casts and + associations which would also apply for embeds. + + The changeset must have been previously `cast` using + `cast/4` before this function is invoked. + + ## Options + + * `:required` - if the embed is a required field. For embeds of cardinality + one, a non-nil value satisfies this validation. For embeds with many entries, + a non-empty list is satisfactory. + + * `:required_message` - the message on failure, defaults to "can't be blank" + + * `:invalid_message` - the message on failure, defaults to "is invalid" + + * `:force_update_on_change` - force the parent record to be updated in the + repository if there is a change, defaults to `true` + + * `:with` - the function to build the changeset from params. Defaults to the + `changeset/2` function of the associated module. It must be an anonymous + function that expects two arguments: the embedded struct to be cast and its + parameters. It must return a changeset. For embeds with cardinality `:many`, + functions with arity 3 are accepted, and the third argument will be the position + of the associated element in the list, or `nil`, if the embed is being replaced. + + * `:drop_param` - the parameter name which keeps a list of indexes to drop + from the relation parameters + + * `:sort_param` - the parameter name which keeps a list of indexes to sort + from the relation parameters. Unknown indexes are considered to be new + entries. Non-listed indexes will come before any sorted ones. See + `cast_assoc/3` for more information + + """ + @spec cast_embed(t, atom, Keyword.t()) :: t + def cast_embed(changeset, name, opts \\ []) when is_atom(name) do + cast_relation(:embed, changeset, name, opts) + end + + defp cast_relation(type, %Changeset{data: data, types: types}, _name, _opts) + when data == nil or types == nil do + raise ArgumentError, + "cast_#{type}/3 expects the changeset to be cast. " <> + "Please call cast/4 before calling cast_#{type}/3" + end + + defp cast_relation(type, %Changeset{} = changeset, key, opts) do + {key, param_key} = cast_key(key) + %{data: data, types: types, params: params, changes: changes} = changeset + %{related: related} = relation = relation!(:cast, type, key, Map.get(types, key)) + params = params || %{} + + {changeset, required?} = + if opts[:required] do + {update_in(changeset.required, &[key | &1]), true} + else + {changeset, false} + end + + on_cast = Keyword.get_lazy(opts, :with, fn -> on_cast_default(type, related) end) + sort = opts_key_from_params(:sort_param, opts, params) + drop = opts_key_from_params(:drop_param, opts, params) + + changeset = + if is_map_key(params, param_key) or is_list(sort) or is_list(drop) do + value = Map.get(params, param_key) + original = Map.get(data, key) + current = Relation.load!(data, original) + value = cast_params(relation, value, sort, drop) + + case Relation.cast(relation, data, value, current, on_cast) do + {:ok, change, relation_valid?} when change != original -> + valid? = changeset.valid? and relation_valid? + changes = Map.put(changes, key, change) + changeset = %{force_update(changeset, opts) | changes: changes, valid?: valid?} + missing_relation(changeset, key, current, required?, relation, opts) + + {:error, {message, meta}} -> + meta = [validation: type] ++ meta + error = {key, message(opts, :invalid_message, message, meta)} + %{changeset | errors: [error | changeset.errors], valid?: false} + + # ignore or ok with change == original + _ -> + missing_relation(changeset, key, current, required?, relation, opts) + end + else + missing_relation(changeset, key, Map.get(data, key), required?, relation, opts) + end + + update_in(changeset.types[key], fn {type, relation} -> + {type, %{relation | on_cast: on_cast}} + end) + end + + defp cast_params(%{cardinality: :many} = relation, nil, sort, drop) + when is_list(sort) or is_list(drop) do + cast_params(relation, %{}, sort, drop) + end + + defp cast_params(%{cardinality: :many}, value, sort, drop) when is_map(value) do + drop = if is_list(drop), do: drop, else: [] + + {sorted, pending} = + if is_list(sort) do + Enum.map_reduce(sort -- drop, value, &Map.pop(&2, &1, %{})) + else + {[], value} + end + + sorted ++ + (pending + |> Map.drop(drop) + |> Enum.map(&key_as_int/1) + |> Enum.sort() + |> Enum.map(&elem(&1, 1))) + end + + defp cast_params(%{cardinality: :one}, value, sort, drop) do + if sort do + raise ArgumentError, ":sort_param not supported for belongs_to/has_one" + end + + if drop do + raise ArgumentError, ":drop_param not supported for belongs_to/has_one" + end + + value + end + + defp cast_params(_relation, value, _sort, _drop) do + value + end + + defp opts_key_from_params(opt, opts, params) do + if key = opts[opt] do + Map.get(params, Atom.to_string(key), nil) + end + end + + # We check for the byte size to avoid creating unnecessary large integers + # which would never map to a database key (u64 is 20 digits only). + defp key_as_int({key, val}) when is_binary(key) and byte_size(key) < 32 do + case Integer.parse(key) do + {key, ""} -> {key, val} + _ -> {key, val} + end + end + + defp key_as_int(key_val), do: key_val + + defp on_cast_default(type, module) do + fn struct, params -> + try do + module.changeset(struct, params) + rescue + e in UndefinedFunctionError -> + case __STACKTRACE__ do + [{^module, :changeset, args_or_arity, _}] + when args_or_arity == 2 + when length(args_or_arity) == 2 -> + raise ArgumentError, """ + the module #{inspect(module)} does not define a changeset/2 function, + which is used by cast_#{type}/3. You need to either: + + 1. implement the #{type}.changeset/2 function + 2. pass the :with option to cast_#{type}/3 with an anonymous + function of arity 2 (or possibly arity 3, if using has_many or + embeds_many) + + When using an inline embed, the :with option must be given + """ + + stacktrace -> + reraise e, stacktrace + end + end + end + end + + defp missing_relation(changeset, name, current, required?, relation, opts) do + %{changes: changes, errors: errors} = changeset + current_changes = Map.get(changes, name, current) + + if required? and Relation.empty?(relation, current_changes) do + errors = [ + {name, message(opts, :required_message, "can't be blank", validation: :required)} + | errors + ] + + %{changeset | errors: errors, valid?: false} + else + changeset + end + end + + defp relation!(_op, type, _name, {type, relation}), + do: relation + + defp relation!(op, :assoc, name, nil) do + raise ArgumentError, + "cannot #{op} assoc `#{name}`, assoc `#{name}` not found. Make sure it is spelled correctly and that the association type is not read-only" + end + + defp relation!(op, type, name, nil) do + raise ArgumentError, + "cannot #{op} #{type} `#{name}`, #{type} `#{name}` not found. Make sure that it exists and is spelled correctly" + end + + defp relation!(op, type, name, {other, _}) when other in @relations do + raise ArgumentError, + "expected `#{name}` to be an #{type} in `#{op}_#{type}`, got: `#{other}`" + end + + defp relation!(op, type, name, schema_type) do + raise ArgumentError, + "expected `#{name}` to be an #{type} in `#{op}_#{type}`, got: `#{inspect(schema_type)}`" + end + + defp force_update(changeset, opts) do + if Keyword.get(opts, :force_update_on_change, true) do + put_in(changeset.repo_opts[:force], true) + else + changeset + end + end + + ## Working with changesets + + @doc """ + Merges two changesets. + + This function merges two changesets provided they have been applied to the + same data (their `:data` field is equal); if the data differs, an + `ArgumentError` exception is raised. If one of the changesets has a `:repo` + field which is not `nil`, then the value of that field is used as the `:repo` + field of the resulting changeset; if both changesets have a non-`nil` and + different `:repo` field, an `ArgumentError` exception is raised. + + The other fields are merged with the following criteria: + + * `params` - params are merged (not deep-merged) giving precedence to the + params of `changeset2` in case of a conflict. If both changesets have their + `:params` fields set to `nil`, the resulting changeset will have its params + set to `nil` too. + * `changes` - changes are merged giving precedence to the `changeset2` + changes. + * `errors` and `validations` - they are simply concatenated. + * `required` - required fields are merged; all the fields that appear + in the required list of both changesets are moved to the required + list of the resulting changeset. + + ## Examples + + iex> changeset1 = cast(%Post{}, %{title: "Title"}, [:title]) + iex> changeset2 = cast(%Post{}, %{title: "New title", body: "Body"}, [:title, :body]) + iex> changeset = merge(changeset1, changeset2) + iex> changeset.changes + %{body: "Body", title: "New title"} + + iex> changeset1 = cast(%Post{body: "Body"}, %{title: "Title"}, [:title]) + iex> changeset2 = cast(%Post{}, %{title: "New title"}, [:title]) + iex> merge(changeset1, changeset2) + ** (ArgumentError) different :data when merging changesets + + """ + @spec merge(t, t) :: t + def merge(changeset1, changeset2) + + def merge(%Changeset{data: data} = cs1, %Changeset{data: data} = cs2) do + new_repo = merge_identical(cs1.repo, cs2.repo, "repos") + new_repo_opts = Keyword.merge(cs1.repo_opts, cs2.repo_opts) + new_action = merge_identical(cs1.action, cs2.action, "actions") + new_filters = Map.merge(cs1.filters, cs2.filters) + new_validations = cs1.validations ++ cs2.validations + new_constraints = cs1.constraints ++ cs2.constraints + + cast_merge( + %{ + cs1 + | repo: new_repo, + repo_opts: new_repo_opts, + filters: new_filters, + action: new_action, + validations: new_validations, + constraints: new_constraints + }, + cs2 + ) + end + + def merge(%Changeset{}, %Changeset{}) do + raise ArgumentError, message: "different :data when merging changesets" + end + + defp cast_merge(cs1, cs2) do + new_params = (cs1.params || cs2.params) && Map.merge(cs1.params || %{}, cs2.params || %{}) + new_types = Map.merge(cs1.types, cs2.types) + new_changes = Map.merge(cs1.changes, cs2.changes) + new_errors = Enum.uniq(cs1.errors ++ cs2.errors) + new_required = Enum.uniq(cs1.required ++ cs2.required) + new_valid? = cs1.valid? and cs2.valid? + + %{ + cs1 + | params: new_params, + valid?: new_valid?, + errors: new_errors, + types: new_types, + changes: new_changes, + required: new_required + } + end + + defp merge_identical(object, nil, _thing), do: object + defp merge_identical(nil, object, _thing), do: object + defp merge_identical(object, object, _thing), do: object + + defp merge_identical(lhs, rhs, thing) do + raise ArgumentError, + "different #{thing} (`#{inspect(lhs)}` and " <> + "`#{inspect(rhs)}`) when merging changesets" + end + + @doc """ + Fetches the given field from changes or from the data. + + While `fetch_change/2` only looks at the current `changes` + to retrieve a value, this function looks at the changes and + then falls back on the data, finally returning `:error` if + no value is available. + + For relations, these functions will return the changeset + original data with changes applied. To retrieve raw changesets, + please use `fetch_change/2`. + + ## Examples + + iex> post = %Post{title: "Foo", body: "Bar baz bong"} + iex> changeset = change(post, %{title: "New title"}) + iex> fetch_field(changeset, :title) + {:changes, "New title"} + iex> fetch_field(changeset, :body) + {:data, "Bar baz bong"} + iex> fetch_field(changeset, :not_a_field) + :error + + """ + @spec fetch_field(t, atom) :: {:changes, term} | {:data, term} | :error + def fetch_field(%Changeset{changes: changes, data: data, types: types}, key) + when is_atom(key) do + case Map.fetch(changes, key) do + {:ok, value} -> + {:changes, change_as_field(types, key, value)} + + :error -> + case Map.fetch(data, key) do + {:ok, value} -> {:data, data_as_field(data, types, key, value)} + :error -> :error + end + end + end + + @doc """ + Same as `fetch_field/2` but returns the value or raises if the given key was not found. + + ## Examples + + iex> post = %Post{title: "Foo", body: "Bar baz bong"} + iex> changeset = change(post, %{title: "New title"}) + iex> fetch_field!(changeset, :title) + "New title" + iex> fetch_field!(changeset, :other) + ** (KeyError) key :other not found in: %Post{...} + """ + @spec fetch_field!(t, atom) :: term + def fetch_field!(changeset, key) do + case fetch_field(changeset, key) do + {_, value} -> + value + + :error -> + raise KeyError, key: key, term: changeset.data + end + end + + @doc """ + Gets a field from changes or from the data. + + While `get_change/3` only looks at the current `changes` + to retrieve a value, this function looks at the changes and + then falls back on the data, finally returning `default` if + no value is available. + + For associations and embeds, this function always returns + nil, a struct, or a list of structs. In case of changes, + the changeset data will have all data applies. This guarantees + a consistent result regardless if changes have been applied + or not. Use `get_change/2` or `get_assoc/3`/`get_embed/3` + if you want to retrieve the relations as changesets or + if you want more fine-grained control. + + iex> post = %Post{title: "A title", body: "My body is a cage"} + iex> changeset = change(post, %{title: "A new title"}) + iex> get_field(changeset, :title) + "A new title" + iex> get_field(changeset, :not_a_field, "Told you, not a field!") + "Told you, not a field!" + + """ + @spec get_field(t, atom, term) :: term + def get_field(%Changeset{changes: changes, data: data, types: types}, key, default \\ nil) do + case changes do + %{^key => value} -> + change_as_field(types, key, value) + + %{} -> + case data do + %{^key => value} -> data_as_field(data, types, key, value) + %{} -> default + end + end + end + + defp change_as_field(types, key, value) do + case types do + %{^key => {tag, relation}} when tag in @relations -> + Relation.apply_changes(relation, value) + + %{} -> + value + end + end + + defp data_as_field(data, types, key, value) do + case types do + %{^key => {tag, _relation}} when tag in @relations -> + Relation.load!(data, value) + + %{} -> + value + end + end + + @doc """ + Gets the association entry or entries from changes or from the data. + + Returned data is normalized to changesets by default. Pass the `:struct` + flag to retrieve the data as structs with changes applied, similar to `get_field/2`. + + ## Examples + + iex> %Author{posts: [%Post{id: 1, title: "hello"}]} + ...> |> change() + ...> |> get_assoc(:posts) + [%Ecto.Changeset{data: %Post{id: 1, title: "hello"}, changes: %{}}] + + iex> %Author{posts: [%Post{id: 1, title: "hello"}]} + ...> |> cast(%{posts: [%{id: 1, title: "world"}]}, []) + ...> |> cast_assoc(:posts) + ...> |> get_assoc(:posts, :changeset) + [%Ecto.Changeset{data: %Post{id: 1, title: "hello"}, changes: %{title: "world"}}] + + iex> %Author{posts: [%Post{id: 1, title: "hello"}]} + ...> |> cast(%{posts: [%{id: 1, title: "world"}]}, []) + ...> |> cast_assoc(:posts) + ...> |> get_assoc(:posts, :struct) + [%Post{id: 1, title: "world"}] + + """ + @spec get_assoc(t, atom, :changeset | :struct) :: + [t | Ecto.Schema.t()] | t | Ecto.Schema.t() | nil + def get_assoc(changeset, name, as \\ :changeset) + + def get_assoc(%Changeset{} = changeset, name, :struct) do + get_field(changeset, name) + end + + def get_assoc(%Changeset{} = changeset, name, :changeset) do + get_relation(:assoc, changeset, name) + end + + @doc """ + Gets the embedded entry or entries from changes or from the data. + + Returned data is normalized to changesets by default. Pass the `:struct` + flag to retrieve the data as structs with changes applied, similar to `get_field/2`. + + ## Examples + + iex> %Post{comments: [%Comment{id: 1, body: "hello"}]} + ...> |> change() + ...> |> get_embed(:comments) + [%Ecto.Changeset{data: %Comment{id: 1, body: "hello"}, changes: %{}}] + + iex> %Post{comments: [%Comment{id: 1, body: "hello"}]} + ...> |> cast(%{comments: [%{id: 1, body: "world"}]}, []) + ...> |> cast_embed(:comments) + ...> |> get_embed(:comments, :changeset) + [%Ecto.Changeset{data: %Comment{id: 1, body: "hello"}, changes: %{body: "world"}}] + + iex> %Post{comments: [%Comment{id: 1, body: "hello"}]} + ...> |> cast(%{comments: [%{id: 1, body: "world"}]}, []) + ...> |> cast_embed(:comments) + ...> |> get_embed(:comments, :struct) + [%Comment{id: 1, body: "world"}] + + """ + def get_embed(changeset, name, as \\ :changeset) + + def get_embed(%Changeset{} = changeset, name, :struct) do + get_field(changeset, name) + end + + def get_embed(%Changeset{} = changeset, name, :changeset) do + get_relation(:embed, changeset, name) + end + + defp get_relation(tag, %{changes: changes, data: data, types: types}, name) do + _ = relation!(:get, tag, name, Map.get(types, name)) + + existing = + case changes do + %{^name => value} -> value + %{} -> Relation.load!(data, Map.fetch!(data, name)) + end + + case existing do + nil -> nil + list when is_list(list) -> Enum.map(list, &change/1) + item -> change(item) + end + end + + @doc """ + Fetches a change from the given changeset. + + This function only looks at the `:changes` field of the given `changeset` and + returns `{:ok, value}` if the change is present or `:error` if it's not. + + ## Examples + + iex> changeset = change(%Post{body: "foo"}, %{title: "bar"}) + iex> fetch_change(changeset, :title) + {:ok, "bar"} + iex> fetch_change(changeset, :body) + :error + + """ + @spec fetch_change(t, atom) :: {:ok, term} | :error + def fetch_change(%Changeset{changes: changes} = _changeset, key) when is_atom(key) do + Map.fetch(changes, key) + end + + @doc """ + Same as `fetch_change/2` but returns the value or raises if the given key was not found. + + ## Examples + + iex> changeset = change(%Post{body: "foo"}, %{title: "bar"}) + iex> fetch_change!(changeset, :title) + "bar" + iex> fetch_change!(changeset, :body) + ** (KeyError) key :body not found in: %{title: "bar"} + """ + @spec fetch_change!(t, atom) :: term + def fetch_change!(changeset, key) do + case fetch_change(changeset, key) do + {:ok, value} -> + value + + :error -> + raise KeyError, key: key, term: changeset.changes + end + end + + @doc """ + Gets a change or returns a default value. + + For associations and embeds, this function always returns + nil, a changeset, or a list of changesets. + + ## Examples + + iex> changeset = change(%Post{body: "foo"}, %{title: "bar"}) + iex> get_change(changeset, :title) + "bar" + iex> get_change(changeset, :body) + nil + + """ + @spec get_change(t, atom, term) :: term + def get_change(%Changeset{changes: changes} = _changeset, key, default \\ nil) + when is_atom(key) do + Map.get(changes, key, default) + end + + @doc """ + Updates a change. + + The given `function` is invoked with the change value only if there + is a change for `key`. Once the function is invoked, it behaves as + `put_change/3`. + + Note that the value of the change can still be `nil` (unless the field + was marked as required on `validate_required/3`). + + ## Examples + + iex> changeset = change(%Post{}, %{impressions: 1}) + iex> changeset = update_change(changeset, :impressions, &(&1 + 1)) + iex> changeset.changes.impressions + 2 + + """ + @spec update_change(t, atom, (term -> term)) :: t + def update_change(%Changeset{changes: changes} = changeset, key, function) when is_atom(key) do + case Map.fetch(changes, key) do + {:ok, value} -> + put_change(changeset, key, function.(value)) + + :error -> + changeset + end + end + + @doc """ + Puts a change on the given `key` with `value`. + + `key` is an atom that represents any field, embed or + association in the changeset. Note the `value` is directly + stored in the changeset with no validation whatsoever. + For this reason, this function is meant for working with + data internal to the application. + + If the change is already present, it is overridden with + the new value. If the change has the same value as in the + changeset data, no changes are added (and any existing + changes are removed). + + When changing embeds and associations, see `put_assoc/4` + for a complete reference on the accepted values. + + ## Examples + + iex> changeset = change(%Post{}, %{title: "foo"}) + iex> changeset = put_change(changeset, :title, "bar") + iex> changeset.changes + %{title: "bar"} + + iex> changeset = change(%Post{title: "foo"}) + iex> changeset = put_change(changeset, :title, "foo") + iex> changeset.changes + %{} + + """ + @spec put_change(t, atom, term) :: t + def put_change(%Changeset{data: data, types: types} = changeset, key, value) do + type = Map.get(types, key) + + {changes, errors, valid?} = + put_change(data, changeset.changes, changeset.errors, changeset.valid?, key, value, type) + + %{changeset | changes: changes, errors: errors, valid?: valid?} + end + + defp put_change(data, changes, errors, valid?, key, value, {tag, relation}) + when tag in @relations do + original = Map.get(data, key) + current = Relation.load!(data, original) + + case Relation.change(relation, value, current) do + {:ok, change, relation_valid?} when change != original -> + {Map.put(changes, key, change), errors, valid? and relation_valid?} + + {:error, error} -> + {changes, [{key, error} | errors], false} + + # ignore or ok with change == original + _ -> + {Map.delete(changes, key), errors, valid?} + end + end + + defp put_change(data, _changes, _errors, _valid?, key, _value, nil) when is_atom(key) do + raise ArgumentError, "unknown field `#{inspect(key)}` in #{inspect(data)}" + end + + defp put_change(_data, _changes, _errors, _valid?, key, _value, nil) when not is_atom(key) do + raise ArgumentError, + "field names given to change/put_change must be atoms, got: `#{inspect(key)}`" + end + + defp put_change(data, changes, errors, valid?, key, value, type) do + if not Ecto.Type.equal?(type, Map.get(data, key), value) do + {Map.put(changes, key, value), errors, valid?} + else + {Map.delete(changes, key), errors, valid?} + end + end + + @doc """ + Puts the given association entry or entries as a change in the changeset. + + This function is used to work with associations as a whole. For example, + if a Post has many Comments, it allows you to add, remove or change all + comments at once, automatically computing inserts/updates/deletes by + comparing the data that you gave with the one already in the database. + If your goal is to manage individual resources, such as adding a new + comment to a post, or update post linked to a comment, then it is not + necessary to use this function. We will explore this later in the + ["Example: Adding a comment to a post" section](#put_assoc/4-example-adding-a-comment-to-a-post). + + This function requires the associated data to have been preloaded, except + when the parent changeset has been newly built and not yet persisted. + Missing data will invoke the `:on_replace` behaviour defined on the + association. + + For associations with cardinality one, `nil` can be used to remove the existing + entry. For associations with many entries, an empty list may be given instead. + + If the association has no changes, it will be skipped. If the association is + invalid, the changeset will be marked as invalid. If the given value is not any + of values below, it will raise. + + The associated data may be given in different formats: + + * a map or a keyword list representing changes to be applied to the + associated data. A map or keyword list can be given to update the + associated data as long as they have matching primary keys. + For example, `put_assoc(changeset, :comments, [%{id: 1, title: "changed"}])` + will locate the comment with `:id` of 1 and update its title. + If no comment with such id exists, one is created on the fly. + Since only a single comment was given, any other associated comment + will be replaced. On all cases, it is expected the keys to be atoms. + Opposite to `cast_assoc` and `embed_assoc`, the given map (or struct) + is not validated in any way and will be inserted as is. + This API is mostly used in scripts and tests, to make it straight- + forward to create schemas with associations at once, such as: + + Ecto.Changeset.change( + %Post{}, + title: "foo", + comments: [ + %{body: "first"}, + %{body: "second"} + ] + ) + + * changesets - when changesets are given, they are treated as the canonical + data and the associated data currently stored in the association is either + updated or replaced. For example, if you call + `put_assoc(post_changeset, :comments, [list_of_comments_changesets])`, + all comments with matching IDs will be updated according to the changesets. + New comments or comments not associated to any post will be correctly + associated. Currently associated comments that do not have a matching ID + in the list of changesets will act according to the `:on_replace` association + configuration (you can chose to raise, ignore the operation, update or delete + them). If there are changes in any of the changesets, they will be + persisted too. + + * structs - when structs are given, they are treated as the canonical data + and the associated data currently stored in the association is replaced. + For example, if you call + `put_assoc(post_changeset, :comments, [list_of_comments_structs])`, + all comments with matching IDs will be replaced by the new structs. + New comments or comments not associated to any post will be correctly + associated. Currently associated comments that do not have a matching ID + in the list of changesets will act according to the `:on_replace` + association configuration (you can chose to raise, ignore the operation, + update or delete them). Different to passing changesets, structs are not + change tracked in any fashion. In other words, if you change a comment + struct and give it to `put_assoc/4`, the updates in the struct won't be + persisted. You must use changesets, keyword lists, or maps instead. `put_assoc/4` with structs + only takes care of guaranteeing that the comments and the parent data + are associated. This is extremely useful when associating existing data, + as we will see in the ["Example: Adding tags to a post" section](#put_assoc/4-example-adding-tags-to-a-post). + + Once the parent changeset is given to an `Ecto.Repo` function, all entries + will be inserted/updated/deleted within the same transaction. + + If you need different behaviour or explicit control over how this function + behaves, you can drop it altogether and use `Ecto.Multi` to encode how several + database operations will happen on several schemas and changesets at once. + + ## Example: Adding a comment to a post + + Imagine a relationship where Post has many comments and you want to add a + new comment to an existing post. While it is possible to use `put_assoc/4` + for this, it would be unnecessarily complex. Let's see an example. + + First, let's fetch the post with all existing comments: + + post = Post |> Repo.get!(1) |> Repo.preload(:comments) + + The following approach is **wrong**: + + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:comments, [%Comment{body: "bad example!"}]) + |> Repo.update!() + + The reason why the example above is wrong is because `put_assoc/4` always + works with the **full data**. So the example above will effectively **erase + all previous comments** and only keep the comment you are currently adding. + Instead, you could try: + + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:comments, [%Comment{body: "so-so example!"} | post.comments]) + |> Repo.update!() + + In this example, we prepend the new comment to the list of existing comments. + Ecto will diff the list of comments currently in `post` with the list of comments + given, and correctly insert the new comment to the database. Note, however, + Ecto is doing a lot of work just to figure out something we knew since the + beginning, which is that there is only one new comment. + + In cases like above, when you want to work only on a single entry, it is + much easier to simply work on the association directly. For example, we + could instead set the `post` association in the comment: + + %Comment{body: "better example"} + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:post, post) + |> Repo.insert!() + + Alternatively, we can make sure that when we create a comment, it is already + associated to the post: + + Ecto.build_assoc(post, :comments) + |> Ecto.Changeset.change(body: "great example!") + |> Repo.insert!() + + Or we can simply set the post_id in the comment itself: + + %Comment{body: "better example", post_id: post.id} + |> Repo.insert!() + + In other words, when you find yourself wanting to work only with a subset + of the data, then using `put_assoc/4` is most likely unnecessary. Instead, + you want to work on the other side of the association. + + Let's see an example where using `put_assoc/4` is a good fit. + + ## Example: Adding tags to a post + + Imagine you are receiving a set of tags you want to associate to a post. + Let's imagine that those tags exist upfront and are all persisted to the + database. Imagine we get the data in this format: + + params = %{"title" => "new post", "tags" => ["learner"]} + + Now, since the tags already exist, we will bring all of them from the + database and put them directly in the post: + + tags = Repo.all(from t in Tag, where: t.name in ^params["tags"]) + + post + |> Repo.preload(:tags) + |> Ecto.Changeset.cast(params, [:title]) # No need to allow :tags as we put them directly + |> Ecto.Changeset.put_assoc(:tags, tags) # Explicitly set the tags + + Since in this case we always require the user to pass all tags + directly, using `put_assoc/4` is a great fit. It will automatically + remove any tag not given and properly associate all of the given + tags with the post. + + Furthermore, since the tag information is given as structs read directly + from the database, Ecto will treat the data as correct and only do the + minimum necessary to guarantee that posts and tags are associated, + without trying to update or diff any of the fields in the tag struct. + + Although it accepts an `opts` argument, there are no options currently + supported by `put_assoc/4`. + + ## More resources + + You can learn more about working with associations in our documentation, + including cheatsheets and practical examples. Check out: + + * The docs for `cast_assoc/3` + * The [associations cheatsheet](associations.html) + * The [Constraints and Upserts guide](constraints-and-upserts.html) + * The [Polymorphic associations with many to many guide](polymorphic-associations-with-many-to-many.html) + + """ + @spec put_assoc(t, atom, term, Keyword.t()) :: t + def put_assoc(%Changeset{} = changeset, name, value, opts \\ []) do + put_relation(:assoc, changeset, name, value, opts) + end + + @doc """ + Puts the given embed entry or entries as a change in the changeset. + + This function is used to work with embeds as a whole. For embeds with + cardinality one, `nil` can be used to remove the existing entry. For + embeds with many entries, an empty list may be given instead. + + If the embed has no changes, it will be skipped. If the embed is + invalid, the changeset will be marked as invalid. + + The list of supported values and their behaviour is described in + `put_assoc/4`. If the given value is not any of values listed there, + it will raise. + + Although this function accepts an `opts` argument, there are no options + currently supported by `put_embed/4`. + """ + @spec put_embed(t, atom, term, Keyword.t()) :: t + def put_embed(%Changeset{} = changeset, name, value, opts \\ []) do + put_relation(:embed, changeset, name, value, opts) + end + + defp put_relation(tag, changeset, name, value, _opts) do + %{data: data, types: types, changes: changes, errors: errors, valid?: valid?} = changeset + relation = relation!(:put, tag, name, Map.get(types, name)) + + {changes, errors, valid?} = + put_change(data, changes, errors, valid?, name, value, {tag, relation}) + + %{changeset | changes: changes, errors: errors, valid?: valid?} + end + + @doc """ + Forces a change on the given `key` with `value`. + + If the change is already present, it is overridden with + the new value. If the value is later modified via + `put_change/3` and `update_change/3`, reverting back to + its original value, the change will be reverted unless + `force_change/3` is called once again. + + ## Examples + + iex> changeset = change(%Post{author: "bar"}, %{title: "foo"}) + iex> changeset = force_change(changeset, :title, "bar") + iex> changeset.changes + %{title: "bar"} + + iex> changeset = force_change(changeset, :author, "bar") + iex> changeset.changes + %{title: "bar", author: "bar"} + + """ + @spec force_change(t, atom, term) :: t + def force_change(%Changeset{types: types} = changeset, key, value) do + case Map.get(types, key) do + {tag, _} when tag in @relations -> + raise "changing #{tag}s with force_change/3 is not supported, " <> + "please use put_#{tag}/4 instead" + + nil -> + raise ArgumentError, "unknown field `#{inspect(key)}` in #{inspect(changeset.data)}" + + _ -> + put_in(changeset.changes[key], value) + end + end + + @doc """ + Deletes a change with the given key. + + ## Examples + + iex> changeset = change(%Post{}, %{title: "foo"}) + iex> changeset = delete_change(changeset, :title) + iex> get_change(changeset, :title) + nil + + """ + @spec delete_change(t, atom) :: t + def delete_change(%Changeset{} = changeset, key) when is_atom(key) do + update_in(changeset.changes, &Map.delete(&1, key)) + end + + @doc """ + Applies the changeset changes to the changeset data. + + This operation will return the underlying data with changes + regardless if the changeset is valid or not. See `apply_action/2` + for a similar function that ensures the changeset is valid. + + ## Examples + + iex> changeset = change(%Post{author: "bar"}, %{title: "foo"}) + iex> apply_changes(changeset) + %Post{author: "bar", title: "foo"} + + """ + @spec apply_changes(t) :: Ecto.Schema.t() | data + def apply_changes(%Changeset{changes: changes, data: data}) when changes == %{} do + data + end + + def apply_changes(%Changeset{changes: changes, data: data, types: types}) do + Enum.reduce(changes, data, fn {key, value}, acc -> + case Map.fetch(types, key) do + {:ok, {tag, relation}} when tag in @relations -> + apply_relation_changes(acc, key, relation, value) + + {:ok, _} -> + Map.put(acc, key, value) + + :error -> + acc + end + end) + end + + @doc """ + Applies the changeset action only if the changes are valid. + + If the changes are valid, all changes are applied to the changeset data. + If the changes are invalid, no changes are applied, and an error tuple + is returned with the changeset containing the action that was attempted + to be applied. + + The action may be any atom. + + ## Examples + + iex> {:ok, data} = apply_action(changeset, :update) + + iex> {:ok, data} = apply_action(changeset, :my_action) + + iex> {:error, changeset} = apply_action(changeset, :update) + %Ecto.Changeset{action: :update} + """ + @spec apply_action(t, action) :: {:ok, Ecto.Schema.t() | data} | {:error, t} + def apply_action(%Changeset{} = changeset, action) when is_atom(action) do + if changeset.valid? do + {:ok, apply_changes(changeset)} + else + {:error, %{changeset | action: action}} + end + end + + def apply_action(%Changeset{}, action) do + raise ArgumentError, "expected action to be an atom, got: #{inspect(action)}" + end + + @doc """ + Applies the changeset action if the changes are valid or raises an error. + + ## Examples + + iex> changeset = change(%Post{author: "bar"}, %{title: "foo"}) + iex> apply_action!(changeset, :update) + %Post{author: "bar", title: "foo"} + + iex> changeset = change(%Post{author: "bar"}, %{title: :bad}) + iex> apply_action!(changeset, :update) + ** (Ecto.InvalidChangesetError) could not perform update because changeset is invalid. + + See `apply_action/2` for more information. + """ + @spec apply_action!(t, action) :: Ecto.Schema.t() | data + def apply_action!(%Changeset{} = changeset, action) do + case apply_action(changeset, action) do + {:ok, data} -> + data + + {:error, changeset} -> + raise Ecto.InvalidChangesetError, action: action, changeset: changeset + end + end + + ## Validations + + @doc ~S""" + Returns a keyword list of the validations for this changeset. + + The keys in the list are the names of fields, and the values are a + validation associated with the field. A field may occur multiple + times in the list. + + ## Example + + %Post{} + |> change() + |> validate_format(:title, ~r/^\w+:\s/, message: "must start with a topic") + |> validate_length(:title, max: 100) + |> validations() + #=> [ + title: {:length, [ max: 100 ]}, + title: {:format, ~r/^\w+:\s/} + ] + + The following validations may be included in the result. The list is + not necessarily exhaustive. For example, custom validations written + by the developer will also appear in our return value. + + This first group contains validations that hold a keyword list of validators. + This list may also include a `:message` key. + + * `{:length, [option]}` + + * `min: n` + * `max: n` + * `is: n` + * `count: :graphemes | :codepoints` + + * `{:number, [option]}` + + * `equal_to: n` + * `greater_than: n` + * `greater_than_or_equal_to: n` + * `less_than: n` + * `less_than_or_equal_to: n` + + The other validators simply take a value: + + * `{:exclusion, Enum.t}` + * `{:format, ~r/pattern/}` + * `{:inclusion, Enum.t}` + * `{:subset, Enum.t}` + + Note that calling `validate_required/3` does not store the validation under the + `changeset.validations` key (and so won't be included in the result of this + function). The required fields are stored under the `changeset.required` key. + """ + @spec validations(t) :: [{atom, term}] + def validations(%Changeset{validations: validations}) do + validations + end + + @doc """ + Adds an error to the changeset. + + An additional keyword list `keys` can be passed to provide additional + contextual information for the error. This is useful when using + `traverse_errors/2` and when translating errors with `Gettext` + + ## Examples + + iex> changeset = change(%Post{}, %{title: ""}) + iex> changeset = add_error(changeset, :title, "empty") + iex> changeset.errors + [title: {"empty", []}] + iex> changeset.valid? + false + + iex> changeset = change(%Post{}, %{title: ""}) + iex> changeset = add_error(changeset, :title, "empty", additional: "info") + iex> changeset.errors + [title: {"empty", [additional: "info"]}] + iex> changeset.valid? + false + + iex> changeset = change(%Post{}, %{tags: ["ecto", "elixir", "x"]}) + iex> changeset = add_error(changeset, :tags, "tag '%{val}' is too short", val: "x") + iex> changeset.errors + [tags: {"tag '%{val}' is too short", [val: "x"]}] + iex> changeset.valid? + false + """ + @spec add_error(t, atom, String.t(), Keyword.t()) :: t + def add_error(%Changeset{errors: errors} = changeset, key, message, keys \\ []) + when is_binary(message) do + %{changeset | errors: [{key, {message, keys}} | errors], valid?: false} + end + + @doc """ + Validates the given `field` change. + + It invokes the `validator` function to perform the validation + only if a change for the given `field` exists and the change + value is not `nil`. The function must return a list of errors + (with an empty list meaning no errors). + + In case there's at least one error, the list of errors will be appended to the + `:errors` field of the changeset and the `:valid?` flag will be set to + `false`. + + ## Examples + + iex> changeset = change(%Post{}, %{title: "foo"}) + iex> changeset = validate_change changeset, :title, fn :title, title -> + ...> # Value must not be "foo"! + ...> if title == "foo" do + ...> [title: "cannot be foo"] + ...> else + ...> [] + ...> end + ...> end + iex> changeset.errors + [title: {"cannot be foo", []}] + + iex> changeset = change(%Post{}, %{title: "foo"}) + iex> changeset = validate_change changeset, :title, fn :title, title -> + ...> if title == "foo" do + ...> [title: {"cannot be foo", additional: "info"}] + ...> else + ...> [] + ...> end + ...> end + iex> changeset.errors + [title: {"cannot be foo", [additional: "info"]}] + + """ + @spec validate_change( + t, + atom, + (atom, term -> [{atom, String.t()} | {atom, error}]) + ) :: t + def validate_change(%Changeset{} = changeset, field, validator) when is_atom(field) do + %{changes: changes, types: types, errors: errors} = changeset + ensure_field_exists!(changeset, types, field) + + value = Map.get(changes, field) + new = if is_nil(value), do: [], else: validator.(field, value) + + new = + Enum.map(new, fn + {key, val} when is_atom(key) and is_binary(val) -> + {key, {val, []}} + + {key, {val, opts}} when is_atom(key) and is_binary(val) and is_list(opts) -> + {key, {val, opts}} + end) + + case new do + [] -> changeset + [_ | _] -> %{changeset | errors: new ++ errors, valid?: false} + end + end + + @doc """ + Stores the validation `metadata` and validates the given `field` change. + + Similar to `validate_change/3` but stores the validation metadata + into the changeset validators. The validator metadata is often used + as a reflection mechanism, to automatically generate code based on + the available validations. + + ## Examples + + iex> changeset = change(%Post{}, %{title: "foo"}) + iex> changeset = validate_change changeset, :title, :useless_validator, fn + ...> _, _ -> [] + ...> end + iex> changeset.validations + [title: :useless_validator] + + """ + @spec validate_change( + t, + atom, + term, + (atom, term -> [{atom, String.t()} | {atom, error}]) + ) :: t + def validate_change( + %Changeset{validations: validations} = changeset, + field, + metadata, + validator + ) do + changeset = %{changeset | validations: [{field, metadata} | validations]} + validate_change(changeset, field, validator) + end + + @doc """ + Validates that one or more fields are present in the changeset. + + You can pass a single field name or a list of field names that + are required. + + If the value of a field is `nil` or a string made only of whitespace, + the changeset is marked as invalid, the field is removed from the + changeset's changes, and an error is added. An error won't be added if + the field already has an error. + + If a field is given to `validate_required/3` but it has not been passed + as parameter during `cast/3` (i.e. it has not been changed), then + `validate_required/3` will check for its current value in the data. + If the data contains a non-empty value for the field, then no error is + added. This allows developers to use `validate_required/3` to perform + partial updates. For example, on `insert` all fields would be required, + because their default values on the data are all `nil`, but on `update`, + if you don't want to change a field that has been previously set, + you are not required to pass it as a parameter, since `validate_required/3` + won't add an error for missing changes as long as the value in the + data given to the `changeset` is not empty. + + Do not use this function to validate associations that are required, + instead pass the `:required` option to `cast_assoc/3` or `cast_embed/3`. + + Opposite to other validations, calling this function does not store + the validation under the `changeset.validations` key. Instead, it + stores all required fields under `changeset.required`. + + ## Options + + * `:message` - the message on failure, defaults to "can't be blank". + Can also be a `{msg, opts}` tuple, to provide additional options + when using `traverse_errors/2`. + + ## Examples + + validate_required(changeset, :title) + validate_required(changeset, [:title, :body]) + + """ + @spec validate_required(t, list | atom, Keyword.t()) :: t + def validate_required(%Changeset{} = changeset, fields, opts \\ []) when not is_nil(fields) do + %{required: required, errors: errors, changes: changes} = changeset + fields = List.wrap(fields) + + fields_with_errors = + for field <- fields, + field_missing?(changeset, field), + is_nil(errors[field]), + do: field + + case fields_with_errors do + [] -> + %{changeset | required: fields ++ required} + + _ -> + new_errors = + Enum.map( + fields_with_errors, + &{&1, message(opts, "can't be blank", validation: :required)} + ) + + changes = Map.drop(changes, fields_with_errors) + + %{ + changeset + | changes: changes, + required: fields ++ required, + errors: new_errors ++ errors, + valid?: false + } + end + end + + @doc """ + Determines whether a field is missing in a changeset. + + The field passed into this function will have its presence evaluated + according to the same rules as `validate_required/3`. + + This is useful when performing complex validations that are not possible with + `validate_required/3`. For example, evaluating whether at least one field + from a list is present or evaluating that exactly one field from a list is + present. + + ## Examples + + iex> changeset = cast(%Post{}, %{color: "Red"}, [:color]) + iex> missing_fields = Enum.filter([:title, :body], &field_missing?(changeset, &1)) + iex> changeset = + ...> case missing_fields do + ...> [_, _] -> add_error(changeset, :title, "at least one of `:title` or `:body` must be present") + ...> _ -> changeset + ...> end + ...> changeset.errors + [title: {"at least one of `:title` or `:body` must be present", []}] + + """ + @spec field_missing?(t(), atom()) :: boolean() + def field_missing?(%Changeset{} = changeset, field) when not is_nil(field) do + ensure_field_not_many!(changeset.types, field) && missing?(changeset, field) && + ensure_field_exists!(changeset, changeset.types, field) + end + + @doc """ + Validates that no existing record with a different primary key + has the same values for these fields. + + This function exists to provide quick feedback to users of your + application. It should not be relied on for any data guarantee as it + has race conditions and is inherently unsafe. For example, if this + check happens twice in the same time interval (because the user + submitted a form twice), both checks may pass and you may end-up with + duplicate entries in the database. Therefore, a `unique_constraint/3` + should also be used to ensure your data won't get corrupted. + + However, because constraints are only checked if all validations + succeed, this function can be used as an early check to provide + early feedback to users, since most conflicting data will have been + inserted prior to the current validation phase. + + When applying this validation to a schemas loaded from the database + this check will exclude rows having the same primary key as set on + the changeset, as those are supposed to be overwritten anyways. + + ## Options + + * `:message` - the message in case the constraint check fails, + defaults to "has already been taken". Can also be a `{msg, opts}` tuple, + to provide additional options when using `traverse_errors/2`. + + * `:error_key` - the key to which changeset error will be added when + check fails, defaults to the first field name of the given list of + fields. + + * `:prefix` - the prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). See `Ecto.Repo` documentation + for more information. + + * `:nulls_distinct` - a boolean controlling whether different null values + are considered distinct (not equal). If `false`, `nil` values will have + their uniqueness checked. Otherwise, the check will not be performed. This + is only meaningful when paired with a unique index that treats nulls as equal, + such as Postgres 15's `NULLS NOT DISTINCT` option. Defaults to `true` + + * `:repo_opts` - the options to pass to the `Ecto.Repo` call. + + * `:query` - the base query to use for the check. Defaults to the schema of + the changeset. If the primary key is set, a clause will be added to exclude + the changeset row itself from the check. + + ## Examples + + unsafe_validate_unique(changeset, :city_name, repo) + unsafe_validate_unique(changeset, [:city_name, :state_name], repo) + unsafe_validate_unique(changeset, [:city_name, :state_name], repo, message: "city must be unique within state") + unsafe_validate_unique(changeset, [:city_name, :state_name], repo, prefix: "public") + unsafe_validate_unique(changeset, [:city_name, :state_name], repo, query: from(c in City, where: is_nil(c.deleted_at))) + + """ + @spec unsafe_validate_unique(t, atom | [atom, ...], Ecto.Repo.t(), Keyword.t()) :: t + def unsafe_validate_unique(%Changeset{} = changeset, fields, repo, opts \\ []) + when is_list(opts) do + {repo_opts, opts} = Keyword.pop(opts, :repo_opts, []) + %{data: data, validations: validations} = changeset + + unless is_struct(data) and function_exported?(data.__struct__, :__schema__, 1) do + raise ArgumentError, + "unsafe_validate_unique/4 does not work with schemaless changesets, got #{inspect(data)}" + end + + schema = + case {changeset.data, opts[:query]} do + # regular schema + {%schema{__meta__: %Metadata{}}, _} -> + schema + + # embedded schema with base query + {%schema{}, base_query} when base_query != nil -> + schema + + # embedded schema without base query + {data, _} -> + raise ArgumentError, + "unsafe_validate_unique/4 does not work with embedded schemas unless " <> + "the `:query` option is specified, got: #{inspect(data)}" + end + + fields = List.wrap(fields) + + changeset = %{ + changeset + | validations: [{hd(fields), {:unsafe_unique, fields: fields}} | validations] + } + + nulls_distinct = Keyword.get(opts, :nulls_distinct, true) + where_clause = unsafe_unique_filter(fields, changeset, nulls_distinct) + + # No need to query if there is a prior error for the fields + any_prior_errors_for_fields? = Enum.any?(changeset.errors, &(elem(&1, 0) in fields)) + + # No need to query if we haven't changed any of the fields in question + unrelated_changes? = Enum.all?(fields, &(not Map.has_key?(changeset.changes, &1))) + + # If one or more fields are `nil` and `nulls_distinct` is not false, we can't query for uniqueness + distinct_nils? = + if nulls_distinct == false do + false + else + Enum.any?(where_clause, &(&1 |> elem(1) |> is_nil())) + end + + if unrelated_changes? or distinct_nils? or any_prior_errors_for_fields? do + changeset + else + query = + Keyword.get(opts, :query, schema) + |> maybe_exclude_itself(changeset) + |> Ecto.Query.where(^where_clause) + + query = + if prefix = opts[:prefix] do + Ecto.Query.put_query_prefix(query, prefix) + else + query + end + + if repo.exists?(query, repo_opts) do + error_key = Keyword.get(opts, :error_key, hd(fields)) + + {error_message, keys} = + message(opts, "has already been taken", validation: :unsafe_unique, fields: fields) + + add_error(changeset, error_key, error_message, keys) + else + changeset + end + end + end + + defp unsafe_unique_filter(fields, changeset, false) do + Enum.reduce(fields, Ecto.Query.dynamic(true), fn field, dynamic -> + case get_field(changeset, field) do + nil -> Ecto.Query.dynamic([q], ^dynamic and is_nil(field(q, ^field))) + value -> Ecto.Query.dynamic([q], ^dynamic and field(q, ^field) == ^value) + end + end) + end + + defp unsafe_unique_filter(fields, changeset, _nulls_distinct) do + for field <- fields do + {field, get_field(changeset, field)} + end + end + + defp maybe_exclude_itself(base_query, %{data: %schema{__meta__: %Metadata{}}} = changeset) do + primary_keys_to_exclude = + case Ecto.get_meta(changeset.data, :state) do + :loaded -> + :primary_key + |> schema.__schema__() + |> Enum.map(&{&1, get_field(changeset, &1)}) + + _ -> + [] + end + + case primary_keys_to_exclude do + [{_pk_field, nil} | _remaining_pks] -> + base_query + + [{pk_field, value} | remaining_pks] -> + # generate a clean query (one that does not start with 'TRUE OR ...') + first_expr = Ecto.Query.dynamic([q], field(q, ^pk_field) == ^value) + + Enum.reduce_while(remaining_pks, first_expr, fn + {_pk_field, nil}, _expr -> + {:halt, nil} + + {pk_field, value}, expr -> + {:cont, Ecto.Query.dynamic([q], ^expr and field(q, ^pk_field) == ^value)} + end) + |> case do + nil -> + base_query + + matches_pk -> + Ecto.Query.where(base_query, ^Ecto.Query.dynamic(not (^matches_pk))) + end + + [] -> + base_query + end + end + + defp maybe_exclude_itself(base_query, _changeset), do: base_query + + defp ensure_field_exists!(changeset = %Changeset{}, types, field) do + unless Map.has_key?(types, field) do + raise ArgumentError, "unknown field #{inspect(field)} in #{inspect(changeset.data)}" + end + + true + end + + defp ensure_field_not_many!(types, field) do + case types do + %{^field => {:assoc, %Ecto.Association.Has{cardinality: :many}}} -> + IO.warn( + "attempting to determine the presence of has_many association #{inspect(field)} " <> + "with validate_required/3 or field_missing?/2 which has no effect. You can pass the " <> + ":required option to Ecto.Changeset.cast_assoc/3 to achieve this." + ) + + %{^field => {:embed, %Ecto.Embedded{cardinality: :many}}} -> + IO.warn( + "attempting to determine the presence of embed_many field #{inspect(field)} " <> + "with validate_required/3 or field_missing?/2 which has no effect. You can pass the " <> + ":required option to Ecto.Changeset.cast_embed/3 to achieve this." + ) + + _ -> + true + end + end + + defp missing?(changeset, field) when is_atom(field) do + case get_field(changeset, field) do + %{__struct__: Ecto.Association.NotLoaded} -> + raise ArgumentError, + "attempting to determine the presence of association `#{field}` " <> + "that was not loaded. Please preload your associations " <> + "before calling validate_required/3 or field_missing?/2. " <> + "You may also consider passing the :required option to Ecto.Changeset.cast_assoc/3" + + value when is_binary(value) -> + value == "" + + nil -> + true + + _ -> + false + end + end + + defp missing?(_changeset, field) do + raise ArgumentError, + "validate_required/3 and field_missing?/2 expect field names to be atoms, got: `#{inspect(field)}`" + end + + @doc """ + Validates a change has the given format. + + The format has to be expressed as a regular expression. + + The validation only runs if a change for the given `field` exists and the + change value is not `nil`. + + ## Options + + * `:message` - the message on failure, defaults to "has invalid format". + Can also be a `{msg, opts}` tuple, to provide additional options + when using `traverse_errors/2`. + + ## Examples + + validate_format(changeset, :email, ~r/@/) + + """ + @spec validate_format(t, atom, Regex.t(), Keyword.t()) :: t + def validate_format(changeset, field, format, opts \\ []) do + validate_change(changeset, field, {:format, format}, fn _, value -> + unless is_binary(value) do + raise ArgumentError, + "validate_format/4 expects changes to be strings, received: #{inspect(value)} for field `#{field}`" + end + + if value =~ format, + do: [], + else: [{field, message(opts, "has invalid format", validation: :format)}] + end) + end + + @doc """ + Validates a change is included in the given enumerable. + + The validation only runs if a change for the given `field` exists and the + change value is not `nil`. + + ## Options + + * `:message` - the message on failure, defaults to "is invalid". + Can also be a `{msg, opts}` tuple, to provide additional options + when using `traverse_errors/2`. + + ## Examples + + validate_inclusion(changeset, :cardinal_direction, ["north", "east", "south", "west"]) + validate_inclusion(changeset, :age, 0..99) + + """ + @spec validate_inclusion(t, atom, Enum.t(), Keyword.t()) :: t + def validate_inclusion(changeset, field, data, opts \\ []) do + validate_change(changeset, field, {:inclusion, data}, fn _, value -> + type = Map.fetch!(changeset.types, field) + + if Ecto.Type.include?(type, value, data), + do: [], + else: [{field, message(opts, "is invalid", validation: :inclusion, enum: data)}] + end) + end + + @doc ~S""" + Validates a change, of type enum, is a subset of the given enumerable. + + This validates if a list of values belongs to the given enumerable. + If you need to validate if a single value is inside the given enumerable, + you should use `validate_inclusion/4` instead. + + Type of the field must be array. + + The validation only runs if a change for the given `field` exists and the + change value is not `nil`. + + ## Options + + * `:message` - the message on failure, defaults to "has an invalid entry". + Can also be a `{msg, opts}` tuple, to provide additional options + when using `traverse_errors/2`. + + ## Examples + + validate_subset(changeset, :pets, ["cat", "dog", "parrot"]) + validate_subset(changeset, :lottery_numbers, 0..99) + + """ + @spec validate_subset(t, atom, Enum.t(), Keyword.t()) :: t + def validate_subset(changeset, field, data, opts \\ []) do + validate_change(changeset, field, {:subset, data}, fn _, value -> + element_type = + case Map.fetch!(changeset.types, field) do + {:array, element_type} -> + element_type + + type -> + # backwards compatibility: custom types use underlying type + {:array, element_type} = Ecto.Type.type(type) + element_type + end + + case Enum.any?(value, fn element -> not Ecto.Type.include?(element_type, element, data) end) do + true -> + [{field, message(opts, "has an invalid entry", validation: :subset, enum: data)}] + + false -> + [] + end + end) + end + + @doc """ + Validates a change is not included in the given enumerable. + + The validation only runs if a change for the given `field` exists and the + change value is not `nil`. + + ## Options + + * `:message` - the message on failure, defaults to "is reserved". + Can also be a `{msg, opts}` tuple, to provide additional options + when using `traverse_errors/2`. + + ## Examples + + validate_exclusion(changeset, :name, ~w(admin superadmin)) + + """ + @spec validate_exclusion(t, atom, Enum.t(), Keyword.t()) :: t + def validate_exclusion(changeset, field, data, opts \\ []) do + validate_change(changeset, field, {:exclusion, data}, fn _, value -> + type = Map.fetch!(changeset.types, field) + + if Ecto.Type.include?(type, value, data), + do: [{field, message(opts, "is reserved", validation: :exclusion, enum: data)}], + else: [] + end) + end + + @doc """ + Validates a change is a string or list of the given length. + + Note that the length of a string is counted in graphemes by default. If using + this validation to match a character limit of a database backend, + it's likely that the limit ignores graphemes and limits the number + of unicode characters. Then consider using the `:count` option to + limit the number of codepoints (`:codepoints`), or limit the number of bytes (`:bytes`). + + The validation only runs if a change for the given `field` exists and the + change value is not `nil`. + + ## Options + + * `:is` - the length must be exactly this value + * `:min` - the length must be greater than or equal to this value + * `:max` - the length must be less than or equal to this value + * `:count` - what length to count for string, `:graphemes` (default), `:codepoints` or `:bytes` + * `:message` - the message on failure, depending on the validation, is one of: + * for strings: + * "should be %{count} character(s)" + * "should be at least %{count} character(s)" + * "should be at most %{count} character(s)" + * for binary: + * "should be %{count} byte(s)" + * "should be at least %{count} byte(s)" + * "should be at most %{count} byte(s)" + * for lists and maps: + * "should have %{count} item(s)" + * "should have at least %{count} item(s)" + * "should have at most %{count} item(s)" + Can also be a `{msg, opts}` tuple, to provide additional options + when using `traverse_errors/2`. + + ## Examples + + validate_length(changeset, :title, min: 3) + validate_length(changeset, :title, max: 100) + validate_length(changeset, :title, min: 3, max: 100) + validate_length(changeset, :code, is: 9) + validate_length(changeset, :topics, is: 2) + validate_length(changeset, :icon, count: :bytes, max: 1024 * 16) + + """ + @spec validate_length(t, atom, Keyword.t()) :: t + def validate_length(changeset, field, opts) when is_list(opts) do + validate_change(changeset, field, {:length, opts}, fn + _, value -> + count_type = opts[:count] || :graphemes + + {type, length} = + case {value, count_type} do + {value, :codepoints} when is_binary(value) -> + {:string, codepoints_length(value, 0)} + + {value, :graphemes} when is_binary(value) -> + {:string, String.length(value)} + + {value, :bytes} when is_binary(value) -> + {:binary, byte_size(value)} + + {value, _} when is_list(value) -> + {:list, list_length(changeset, field, value)} + + {value, _} when is_map(value) -> + {:map, map_size(value)} + end + + error = + ((is = opts[:is]) && wrong_length(type, length, is, opts)) || + ((min = opts[:min]) && too_short(type, length, min, opts)) || + ((max = opts[:max]) && too_long(type, length, max, opts)) + + if error, do: [{field, error}], else: [] + end) + end + + defp codepoints_length(<<_::utf8, rest::binary>>, acc), do: codepoints_length(rest, acc + 1) + defp codepoints_length(<<_, rest::binary>>, acc), do: codepoints_length(rest, acc + 1) + defp codepoints_length(<<>>, acc), do: acc + + defp list_length(%{types: types}, field, value) do + case Map.fetch(types, field) do + {:ok, {tag, _association}} when tag in [:embed, :assoc] -> + length(Relation.filter_empty(value)) + + _ -> + length(value) + end + end + + defp wrong_length(_type, value, value, _opts), do: nil + + defp wrong_length(:string, _length, value, opts), + do: + message(opts, "should be %{count} character(s)", + count: value, + validation: :length, + kind: :is, + type: :string + ) + + defp wrong_length(:binary, _length, value, opts), + do: + message(opts, "should be %{count} byte(s)", + count: value, + validation: :length, + kind: :is, + type: :binary + ) + + defp wrong_length(:list, _length, value, opts), + do: + message(opts, "should have %{count} item(s)", + count: value, + validation: :length, + kind: :is, + type: :list + ) + + defp wrong_length(:map, _length, value, opts), + do: + message(opts, "should have %{count} item(s)", + count: value, + validation: :length, + kind: :is, + type: :map + ) + + defp too_short(_type, length, value, _opts) when length >= value, do: nil + + defp too_short(:string, _length, value, opts) do + message(opts, "should be at least %{count} character(s)", + count: value, + validation: :length, + kind: :min, + type: :string + ) + end + + defp too_short(:binary, _length, value, opts) do + message(opts, "should be at least %{count} byte(s)", + count: value, + validation: :length, + kind: :min, + type: :binary + ) + end + + defp too_short(:list, _length, value, opts) do + message(opts, "should have at least %{count} item(s)", + count: value, + validation: :length, + kind: :min, + type: :list + ) + end + + defp too_short(:map, _length, value, opts) do + message(opts, "should have at least %{count} item(s)", + count: value, + validation: :length, + kind: :min, + type: :map + ) + end + + defp too_long(_type, length, value, _opts) when length <= value, do: nil + + defp too_long(:string, _length, value, opts) do + message(opts, "should be at most %{count} character(s)", + count: value, + validation: :length, + kind: :max, + type: :string + ) + end + + defp too_long(:binary, _length, value, opts) do + message(opts, "should be at most %{count} byte(s)", + count: value, + validation: :length, + kind: :max, + type: :binary + ) + end + + defp too_long(:list, _length, value, opts) do + message(opts, "should have at most %{count} item(s)", + count: value, + validation: :length, + kind: :max, + type: :list + ) + end + + defp too_long(:map, _length, value, opts) do + message(opts, "should have at most %{count} item(s)", + count: value, + validation: :length, + kind: :max, + type: :map + ) + end + + @doc """ + Validates the properties of a number. + + The validation only runs if a change for the given `field` exists and the + change value is not `nil`. + + ## Options + + * `:less_than` + * `:greater_than` + * `:less_than_or_equal_to` + * `:greater_than_or_equal_to` + * `:equal_to` + * `:not_equal_to` + * `:message` - the message on failure, defaults to one of: + * "must be less than %{number}" + * "must be greater than %{number}" + * "must be less than or equal to %{number}" + * "must be greater than or equal to %{number}" + * "must be equal to %{number}" + * "must be not equal to %{number}" + Can also be a `{msg, opts}` tuple, to provide additional options + when using `traverse_errors/2`. + + ## Examples + + validate_number(changeset, :count, less_than: 3) + validate_number(changeset, :pi, greater_than: 3, less_than: 4) + validate_number(changeset, :the_answer_to_life_the_universe_and_everything, equal_to: 42) + + """ + @spec validate_number(t, atom, Keyword.t()) :: t + def validate_number(changeset, field, opts) do + validate_change(changeset, field, {:number, opts}, fn + field, value -> + unless valid_number?(value) do + raise ArgumentError, + "expected field `#{field}` to be a decimal, integer, or float, got: #{inspect(value)}" + end + + opts + |> Keyword.drop([:message]) + |> Enum.find_value([], fn {spec_key, target_value} -> + case Map.fetch(@number_validators, spec_key) do + {:ok, {spec_function, default_message}} -> + unless valid_number?(target_value) do + raise ArgumentError, + "expected option `#{spec_key}` to be a decimal, integer, or float, got: #{inspect(target_value)}" + end + + compare_numbers( + field, + value, + default_message, + spec_key, + spec_function, + target_value, + opts + ) + + :error -> + supported_options = + @number_validators |> Map.keys() |> Enum.map_join("\n", &" * #{inspect(&1)}") + + raise ArgumentError, """ + unknown option #{inspect(spec_key)} given to validate_number/3 + + The supported options are: + + #{supported_options} + """ + end + end) + end) + end + + defp valid_number?(%Decimal{}), do: true + defp valid_number?(other), do: is_number(other) + + defp compare_numbers( + field, + %Decimal{} = value, + default_message, + spec_key, + _spec_function, + %Decimal{} = target_value, + opts + ) do + result = Decimal.compare(value, target_value) + + case decimal_compare(result, spec_key) do + true -> + nil + + false -> + [ + {field, + message(opts, default_message, + validation: :number, + kind: spec_key, + number: target_value + )} + ] + end + end + + defp compare_numbers( + field, + value, + default_message, + spec_key, + spec_function, + %Decimal{} = target_value, + opts + ) do + compare_numbers( + field, + decimal_new(value), + default_message, + spec_key, + spec_function, + target_value, + opts + ) + end + + defp compare_numbers( + field, + %Decimal{} = value, + default_message, + spec_key, + spec_function, + target_value, + opts + ) do + compare_numbers( + field, + value, + default_message, + spec_key, + spec_function, + decimal_new(target_value), + opts + ) + end + + defp compare_numbers(field, value, default_message, spec_key, spec_function, target_value, opts) do + case apply(spec_function, [value, target_value]) do + true -> + nil + + false -> + [ + {field, + message(opts, default_message, + validation: :number, + kind: spec_key, + number: target_value + )} + ] + end + end + + defp decimal_new(term) when is_float(term), do: Decimal.from_float(term) + defp decimal_new(term), do: Decimal.new(term) + + defp decimal_compare(:lt, spec), do: spec in [:less_than, :less_than_or_equal_to, :not_equal_to] + + defp decimal_compare(:gt, spec), + do: spec in [:greater_than, :greater_than_or_equal_to, :not_equal_to] + + defp decimal_compare(:eq, spec), + do: spec in [:equal_to, :less_than_or_equal_to, :greater_than_or_equal_to] + + @doc """ + Validates that the given parameter matches its confirmation. + + By calling `validate_confirmation(changeset, :email)`, this + validation will check if both "email" and "email_confirmation" + in the parameter map matches. Note this validation only looks + at the parameters themselves, never the fields in the schema. + As such as, the "email_confirmation" field does not need to be + added as a virtual field in your schema. + + Note that if the confirmation field is missing, this does not + add a validation error. This is done on purpose as you do not + trigger confirmation validation in places where a confirmation + is not required (for example, in APIs). You can force the + confirmation parameter to be required in the options (see below). + + ## Options + + * `:message` - the message on failure, defaults to "does not match confirmation". + Can also be a `{msg, opts}` tuple, to provide additional options + when using `traverse_errors/2`. + * `:required` - boolean, sets whether existence of confirmation parameter + is required for addition of error. Defaults to false + + ## Examples + + validate_confirmation(changeset, :email) + validate_confirmation(changeset, :password, message: "does not match password") + + cast(data, params, [:password]) + |> validate_confirmation(:password, message: "does not match password") + + """ + @spec validate_confirmation(t, atom, Keyword.t()) :: t + def validate_confirmation(changeset, field, opts \\ []) + + def validate_confirmation(%{params: params} = changeset, field, opts) when is_map(params) do + param = Atom.to_string(field) + error_param = "#{param}_confirmation" + error_field = String.to_atom(error_param) + value = Map.get(params, param) + + errors = + case params do + %{^error_param => ^value} -> + [] + + %{^error_param => _} -> + [ + {error_field, message(opts, "does not match confirmation", validation: :confirmation)} + ] + + %{} -> + confirmation_missing(opts, error_field) + end + + %{ + changeset + | validations: [{field, {:confirmation, opts}} | changeset.validations], + errors: errors ++ changeset.errors, + valid?: changeset.valid? and errors == [] + } + end + + def validate_confirmation(%{params: nil} = changeset, _, _) do + changeset + end + + defp confirmation_missing(opts, error_field) do + required = Keyword.get(opts, :required, false) + + if required, + do: [{error_field, message(opts, "can't be blank", validation: :required)}], + else: [] + end + + defp message(opts, key \\ :message, default, message_opts) do + case Keyword.get(opts, key, default) do + {message, extra_opts} when is_binary(message) and is_list(extra_opts) -> + {message, Keyword.merge(message_opts, extra_opts)} + + message when is_binary(message) -> + {message, message_opts} + end + end + + defp constraint_message(opts, key \\ :message, default) do + Keyword.get(opts, key, default) + end + + @doc """ + Validates the given parameter is true. + + Note this validation only checks the parameter itself is true, never + the field in the schema. That's because acceptance parameters do not need + to be persisted, as by definition they would always be stored as `true`. + + ## Options + + * `:message` - the message on failure, defaults to "must be accepted". + Can also be a `{msg, opts}` tuple, to provide additional options + when using `traverse_errors/2`. + + ## Examples + + validate_acceptance(changeset, :terms_of_service) + validate_acceptance(changeset, :rules, message: "please accept rules") + + """ + @spec validate_acceptance(t, atom, Keyword.t()) :: t + def validate_acceptance(changeset, field, opts \\ []) + + def validate_acceptance(%{params: params} = changeset, field, opts) do + errors = validate_acceptance_errors(params, field, opts) + + %{ + changeset + | validations: [{field, {:acceptance, opts}} | changeset.validations], + errors: errors ++ changeset.errors, + valid?: changeset.valid? and errors == [] + } + end + + defp validate_acceptance_errors(nil, _field, _opts), do: [] + + defp validate_acceptance_errors(params, field, opts) do + param = Atom.to_string(field) + value = Map.get(params, param) + + case Ecto.Type.cast(:boolean, value) do + {:ok, true} -> [] + _ -> [{field, message(opts, "must be accepted", validation: :acceptance)}] + end + end + + ## Optimistic lock + + @doc ~S""" + Applies optimistic locking to the changeset. + + [Optimistic + locking](https://en.wikipedia.org/wiki/Optimistic_concurrency_control) (or + *optimistic concurrency control*) is a technique that allows concurrent edits + on a single record. While pessimistic locking works by locking a resource for + an entire transaction, optimistic locking only checks if the resource changed + before updating it. + + This is done by regularly fetching the record from the database, then checking + whether another user has made changes to the record *only when updating the + record*. This behaviour is ideal in situations where the chances of concurrent + updates to the same record are low; if they're not, pessimistic locking or + other concurrency patterns may be more suited. + + ## Usage + + Optimistic locking works by keeping a "version" counter for each record; this + counter gets incremented each time a modification is made to a record. Hence, + in order to use optimistic locking, a field must exist in your schema for + versioning purpose. Such field is usually an integer but other types are + supported. + + ## Examples + + Assuming we have a `Post` schema (stored in the `posts` table), the first step + is to add a version column to the `posts` table: + + alter table(:posts) do + add :lock_version, :integer, default: 1 + end + + The column name is arbitrary and doesn't need to be `:lock_version`. Now add + a field to the schema too: + + defmodule Post do + use Ecto.Schema + + schema "posts" do + field :title, :string + field :lock_version, :integer, default: 1 + end + + def changeset(:update, struct, params \\ %{}) do + struct + |> Ecto.Changeset.cast(params, [:title]) + |> Ecto.Changeset.optimistic_lock(:lock_version) + end + end + + Now let's take optimistic locking for a spin: + + iex> post = Repo.insert!(%Post{title: "foo"}) + %Post{id: 1, title: "foo", lock_version: 1} + iex> valid_change = Post.changeset(:update, post, %{title: "bar"}) + iex> stale_change = Post.changeset(:update, post, %{title: "baz"}) + iex> Repo.update!(valid_change) + %Post{id: 1, title: "bar", lock_version: 2} + iex> Repo.update!(stale_change) + ** (Ecto.StaleEntryError) attempted to update a stale entry: + + %Post{id: 1, title: "baz", lock_version: 1} + + When a conflict happens (a record which has been previously fetched is + being updated, but that same record has been modified since it was + fetched), an `Ecto.StaleEntryError` exception is raised. + + Optimistic locking also works with delete operations. Just call the + `optimistic_lock/3` function with the data before delete: + + iex> changeset = Ecto.Changeset.optimistic_lock(post, :lock_version) + iex> Repo.delete(changeset) + + `optimistic_lock/3` by default assumes the field + being used as a lock is an integer. If you want to use another type, + you need to pass the third argument customizing how the next value + is generated: + + iex> Ecto.Changeset.optimistic_lock(post, :lock_uuid, fn _ -> Ecto.UUID.generate end) + + """ + @spec optimistic_lock(Ecto.Schema.t() | t, atom, (term -> term)) :: t + def optimistic_lock(data_or_changeset, field, incrementer \\ &increment_with_rollover/1) do + changeset = change(data_or_changeset, %{}) + current = get_field(changeset, field) + + # Apply these changes only inside the repo because we + # don't want to permanently track the lock change. + changeset = + prepare_changes(changeset, fn changeset -> + put_in(changeset.changes[field], incrementer.(current)) + end) + + if is_nil(current) do + Logger.warning(""" + the current value of `#{field}` is `nil` and will not be used as a filter for optimistic locking. \ + To ensure `#{field}` is never `nil`, consider setting a default value. + """) + + changeset + else + put_in(changeset.filters[field], current) + end + end + + # increment_with_rollover expect to be used with lock_version set as :integer in db schema + # 2_147_483_647 is upper limit for signed integer for both PostgreSQL and MySQL + defp increment_with_rollover(val) when val >= 2_147_483_647 do + 1 + end + + defp increment_with_rollover(val) when is_integer(val) do + val + 1 + end + + @doc """ + Provides a function executed by the repository on insert/update/delete. + + If the changeset given to the repository is valid, the function given to + `prepare_changes/2` will be called with the changeset and must return a + changeset, allowing developers to do final adjustments to the changeset or + to issue data consistency commands. The repository itself can be accessed + inside the function under the `repo` field in the changeset. If the + changeset given to the repository is invalid, the function will not be + invoked. + + The given function is guaranteed to run inside the same transaction + as the changeset operation for databases that do support transactions. + + ## Example + + A common use case is updating a counter cache, in this case updating a post's + comment count when a comment is created: + + def create_comment(comment, params) do + comment + |> cast(params, [:body, :post_id]) + |> prepare_changes(fn changeset -> + if post_id = get_change(changeset, :post_id) do + query = from Post, where: [id: ^post_id] + changeset.repo.update_all(query, inc: [comment_count: 1]) + end + changeset + end) + end + + We retrieve the repo from the comment changeset itself and use + update_all to update the counter cache in one query. Finally, the original + changeset must be returned. + """ + @spec prepare_changes(t, (t -> t)) :: t + def prepare_changes(%Changeset{prepare: prepare} = changeset, function) + when is_function(function, 1) do + %{changeset | prepare: [function | prepare]} + end + + ## Constraints + + @doc """ + Returns all constraints in a changeset. + + A constraint is a map with the following fields: + + * `:type` - the type of the constraint that will be checked in the database, + such as `:check`, `:unique`, etc + * `:constraint` - the database constraint name as a string or `Regex`. The constraint at + the database level will be checked against this according to `:match` type + * `:match` - the type of match Ecto will perform on a violated constraint + against the `:constraint` value. It is `:exact`, `:suffix` or `:prefix` + * `:field` - the field a violated constraint will apply the error to + * `:error_message` - the error message in case of violated constraints + * `:error_type` - the type of error that identifies the error message + + """ + @spec constraints(t) :: [constraint] + def constraints(%Changeset{constraints: constraints}) do + constraints + end + + @doc """ + Checks for a check constraint in the given field. + + The check constraint works by relying on the database to check + if the check constraint has been violated or not and, if so, + Ecto converts it into a changeset error. + + In order to use the check constraint, the first step is + to define the check constraint in a migration: + + create constraint("users", :age_must_be_positive, check: "age > 0") + + Now that a constraint exists, when modifying users, we could + annotate the changeset with a check constraint so Ecto knows + how to convert it into an error message: + + cast(user, params, [:age]) + |> check_constraint(:age, name: :age_must_be_positive) + + Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, + if the age is not positive, the underlying operation will fail + but Ecto will convert the database exception into a changeset error + and return an `{:error, changeset}` tuple. Note that the error will + occur only after hitting the database, so it will not be visible + until all other validations pass. If the constraint fails inside a + transaction, the transaction will be marked as aborted. + + ## Options + + * `:message` - the message in case the constraint check fails. + Defaults to "is invalid" + * `:name` - the constraint name. By default, the constraint + name is inferred from the table + field. If this option is given, + the `field` argument only indicates the field the error will be + added to. May be required explicitly for complex cases + * `:match` - how the changeset constraint name is matched against the + repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to + `:exact`. `:suffix` matches any repo constraint which `ends_with?` `:name` + to this changeset constraint. `:prefix` matches any repo constraint which + `starts_with?` `:name` to this changeset constraint. + + """ + @spec check_constraint(t, atom, Keyword.t()) :: t + def check_constraint(changeset, field, opts \\ []) do + name = opts[:name] || raise ArgumentError, "must supply the name of the constraint" + message = constraint_message(opts, "is invalid") + match_type = Keyword.get(opts, :match, :exact) + + add_constraint(changeset, :check, name, match_type, field, message, :check) + end + + @doc """ + Checks for a unique constraint in the given field or list of fields. + + The unique constraint works by relying on the database to check + if the unique constraint has been violated or not and, if so, + Ecto converts it into a changeset error. + + In order to use the uniqueness constraint, the first step is + to define the unique index in a migration: + + create unique_index(:users, [:email]) + + Now that a constraint exists, when modifying users, we could + annotate the changeset with a unique constraint so Ecto knows + how to convert it into an error message: + + cast(user, params, [:email]) + |> unique_constraint(:email) + + Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, + if the email already exists, the underlying operation will fail but + Ecto will convert the database exception into a changeset error and + return an `{:error, changeset}` tuple. Note that the error will occur + only after hitting the database, so it will not be visible until all + other validations pass. If the constraint fails inside a transaction, + the transaction will be marked as aborted. + + ## Options + + * `:message` - the message in case the constraint check fails, + defaults to "has already been taken" + + * `:name` - the constraint name. By default, the constraint + name is inferred from the table + field. If this option is given, + the `field` argument only indicates the field the error will be + added to. May be required explicitly for complex cases + + * `:match` - how the changeset constraint name is matched against the + repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to + `:exact`. `:suffix` matches any repo constraint which `ends_with?` `:name` + to this changeset constraint. `:prefix` matches any repo constraint which + `starts_with?` `:name` to this changeset constraint. + + * `:error_key` - the key to which changeset error will be added when + check fails, defaults to the first field name of the given list of + fields. + + ## Complex constraints + + Because the constraint logic is in the database, we can leverage + all the database functionality when defining them. For example, + let's suppose the e-mails are scoped by company id: + + # In migration + create unique_index(:users, [:email, :company_id]) + + # In the changeset function + cast(user, params, [:email]) + |> unique_constraint([:email, :company_id]) + + The first field name, `:email` in this case, will be used as the error + key to the changeset errors keyword list. For example, the above + `unique_constraint/3` would generate something like: + + Repo.insert!(%User{email: "john@elixir.org", company_id: 1}) + changeset = User.changeset(%User{}, %{email: "john@elixir.org", company_id: 1}) + {:error, changeset} = Repo.insert(changeset) + changeset.errors #=> [email: {"has already been taken", []}] + + In complex cases, instead of relying on name inference, it may be best + to set the constraint name explicitly: + + # In the migration + create unique_index(:users, [:email, :company_id], name: :users_email_company_id_index) + + # In the changeset function + cast(user, params, [:email]) + |> unique_constraint(:email, name: :users_email_company_id_index) + + ### Partitioning + + If your table is partitioned, then your unique index might look different + per partition, e.g. Postgres adds p to the middle of your key, like: + + users_p0_email_key + users_p1_email_key + ... + users_p99_email_key + + In this case you can use the name and suffix options together to match on + these dynamic indexes, like: + + cast(user, params, [:email]) + |> unique_constraint(:email, name: :email_key, match: :suffix) + + There are cases where the index has a number added both for table name and + index name, generating an index name such as: + + user_p0_email_idx2 + user_p1_email_idx3 + ... + user_p99_email_idx101 + + In that case, a `Regex` can be used to match: + + cast(user, params, [:email]) + |> unique_constraint(:email, name: ~r/user_p\d+_email_idx\d+/) + + ## Case sensitivity + + Unfortunately, different databases provide different guarantees + when it comes to case-sensitiveness. For example, in MySQL, comparisons + are case-insensitive by default. In Postgres, users can define case + insensitive column by using the `:citext` type/extension. In your migration: + + execute "CREATE EXTENSION IF NOT EXISTS citext" + create table(:users) do + ... + add :email, :citext + ... + end + + If for some reason your database does not support case insensitive columns, + you can explicitly downcase values before inserting/updating them: + + cast(data, params, [:email]) + |> update_change(:email, &String.downcase/1) + |> unique_constraint(:email) + + """ + @spec unique_constraint(t, atom | [atom, ...], Keyword.t()) :: t + def unique_constraint(changeset, field_or_fields, opts \\ []) + + def unique_constraint(changeset, field, opts) when is_atom(field) do + unique_constraint(changeset, [field], opts) + end + + def unique_constraint(changeset, [first_field | _] = fields, opts) do + name = opts[:name] || unique_index_name(changeset, fields) + message = constraint_message(opts, "has already been taken") + match_type = Keyword.get(opts, :match, :exact) + error_key = Keyword.get(opts, :error_key, first_field) + + add_constraint(changeset, :unique, name, match_type, error_key, message, :unique) + end + + defp unique_index_name(changeset, fields) do + field_names = Enum.map(fields, &get_field_source(changeset, &1)) + Enum.join([get_source(changeset)] ++ field_names ++ ["index"], "_") + end + + @doc """ + Checks for foreign key constraint in the given field. + + The foreign key constraint works by relying on the database to + check if the associated data exists or not. This is useful to + guarantee that a child will only be created if the parent exists + in the database too. + + In order to use the foreign key constraint the first step is + to define the foreign key in a migration. This is often done + with references. For example, imagine you are creating a + comments table that belongs to posts. One would have: + + create table(:comments) do + add :post_id, references(:posts) + end + + By default, Ecto will generate a foreign key constraint with + name "comments_post_id_fkey" (the name is configurable). + + Now that a constraint exists, when creating comments, we could + annotate the changeset with foreign key constraint so Ecto knows + how to convert it into an error message: + + cast(comment, params, [:post_id]) + |> foreign_key_constraint(:post_id) + + Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, + if the associated post does not exist, the underlying operation will + fail but Ecto will convert the database exception into a changeset + error and return an `{:error, changeset}` tuple. Note that the error + will occur only after hitting the database, so it will not be visible + until all other validations pass. If the constraint fails inside a + transaction, the transaction will be marked as aborted. + + ## Options + + * `:message` - the message in case the constraint check fails, + defaults to "does not exist" + * `:name` - the constraint name. By default, the constraint + name is inferred from the table + field. If this option is given, + the `field` argument only indicates the field the error will be + added to. May be required explicitly for complex cases + * `:match` - how the changeset constraint name is matched against the + repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to + `:exact`. `:suffix` matches any repo constraint which `ends_with?` `:name` + to this changeset constraint. `:prefix` matches any repo constraint which + `starts_with?` `:name` to this changeset constraint. + + """ + @spec foreign_key_constraint(t, atom, Keyword.t()) :: t + def foreign_key_constraint(changeset, field, opts \\ []) do + name = opts[:name] || "#{get_source(changeset)}_#{get_field_source(changeset, field)}_fkey" + match_type = Keyword.get(opts, :match, :exact) + message = constraint_message(opts, "does not exist") + + add_constraint(changeset, :foreign_key, name, match_type, field, message, :foreign) + end + + @doc """ + Checks the associated field exists. + + This is similar to `foreign_key_constraint/3` except that the + field is inferred from the association definition. This is useful + to guarantee that a child will only be created if the parent exists + in the database too. Therefore, it only applies to `belongs_to` + associations. + + As the name says, a constraint is required in the database for + this function to work. Such constraint is often added as a + reference to the child table: + + create table(:comments) do + add :post_id, references(:posts) + end + + Now, when inserting a comment, it is possible to forbid any + comment to be added if the associated post does not exist: + + comment + |> Ecto.Changeset.cast(params, [:post_id]) + |> Ecto.Changeset.assoc_constraint(:post) + |> Repo.insert + + ## Options + + * `:message` - the message in case the constraint check fails, + defaults to "does not exist" + * `:name` - the constraint name. By default, the constraint + name is inferred from the table + field. If this option is given, + the `field` argument only indicates the field the error will be + added to. May be required explicitly for complex cases + * `:match` - how the changeset constraint name is matched against the + repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to + `:exact`. `:suffix` matches any repo constraint which `ends_with?` `:name` + to this changeset constraint. `:prefix` matches any repo constraint which + `starts_with?` `:name` to this changeset constraint. + """ + @spec assoc_constraint(t, atom, Keyword.t()) :: t + def assoc_constraint(changeset, assoc, opts \\ []) do + name = + opts[:name] || + case get_assoc_type(changeset, assoc) do + %Ecto.Association.BelongsTo{owner_key: owner_key} -> + "#{get_source(changeset)}_#{owner_key}_fkey" + + other -> + raise ArgumentError, + "assoc_constraint can only be added to belongs to associations, got: #{inspect(other)}" + end + + match_type = Keyword.get(opts, :match, :exact) + message = constraint_message(opts, "does not exist") + + add_constraint(changeset, :foreign_key, name, match_type, assoc, message, :assoc) + end + + @doc """ + Checks the associated field does not exist. + + This is similar to `foreign_key_constraint/3` except that the + field is inferred from the association definition. This is useful + to guarantee that parent can only be deleted (or have its primary + key changed) if no child exists in the database. Therefore, it only + applies to `has_*` associations. + + As the name says, a constraint is required in the database for + this function to work. Such constraint is often added as a + reference to the child table: + + create table(:comments) do + add :post_id, references(:posts) + end + + Now, when deleting the post, it is possible to forbid any post to + be deleted if they still have comments attached to it: + + post + |> Ecto.Changeset.change + |> Ecto.Changeset.no_assoc_constraint(:comments) + |> Repo.delete + + ## Options + + * `:message` - the message in case the constraint check fails, + defaults to "is still associated with this entry" (for `has_one`) + and "are still associated with this entry" (for `has_many`) + * `:name` - the constraint name. By default, the constraint + name is inferred from the table + field. If this option is given, + the `field` argument only indicates the field the error will be + added to. May be required explicitly for complex cases + * `:match` - how the changeset constraint name is matched against the + repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to + `:exact`. `:suffix` matches any repo constraint which `ends_with?` `:name` + to this changeset constraint. `:prefix` matches any repo constraint which + `starts_with?` `:name` to this changeset constraint. + + """ + @spec no_assoc_constraint(t, atom, Keyword.t()) :: t + def no_assoc_constraint(changeset, assoc, opts \\ []) do + {name, message} = + case get_assoc_type(changeset, assoc) do + %Ecto.Association.Has{ + cardinality: cardinality, + related_key: related_key, + related: related + } -> + {opts[:name] || "#{related.__schema__(:source)}_#{related_key}_fkey", + constraint_message(opts, no_assoc_message(cardinality))} + + other -> + raise ArgumentError, + "no_assoc_constraint can only be added to has one/many associations, got: #{inspect(other)}" + end + + match_type = Keyword.get(opts, :match, :exact) + + add_constraint(changeset, :foreign_key, name, match_type, assoc, message, :no_assoc) + end + + @doc """ + Checks for an exclusion constraint in the given field. + + The exclusion constraint works by relying on the database to check + if the exclusion constraint has been violated or not and, if so, + Ecto converts it into a changeset error. + + ## Options + + * `:message` - the message in case the constraint check fails, + defaults to "violates an exclusion constraint" + * `:name` - the constraint name. By default, the constraint + name is inferred from the table + field. If this option is given, + the `field` argument only indicates the field the error will be + added to. May be required explicitly for complex cases + * `:match` - how the changeset constraint name is matched against the + repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to + `:exact`. `:suffix` matches any repo constraint which `ends_with?` `:name` + to this changeset constraint. `:prefix` matches any repo constraint which + `starts_with?` `:name` to this changeset constraint. + + """ + def exclusion_constraint(changeset, field, opts \\ []) do + name = + opts[:name] || "#{get_source(changeset)}_#{get_field_source(changeset, field)}_exclusion" + + message = constraint_message(opts, "violates an exclusion constraint") + match_type = Keyword.get(opts, :match, :exact) + + add_constraint(changeset, :exclusion, name, match_type, field, message, :exclusion) + end + + defp no_assoc_message(:one), do: "is still associated with this entry" + defp no_assoc_message(:many), do: "are still associated with this entry" + + defp add_constraint( + %Changeset{constraints: constraints} = changeset, + type, + constraint, + match, + field, + error_message, + error_type + ) + when is_atom(field) and is_binary(error_message) do + constraint = %{ + constraint: normalize_constraint(constraint, match), + error_message: error_message, + error_type: error_type, + field: field, + match: match, + type: type + } + + %{changeset | constraints: [constraint | constraints]} + end + + defp normalize_constraint(constraint, match) when is_atom(constraint) do + normalize_constraint(Atom.to_string(constraint), match) + end + + defp normalize_constraint(constraint, match) when is_binary(constraint) do + unless match in @match_types do + raise ArgumentError, + "invalid match type: #{inspect(match)}. Allowed match types: #{inspect(@match_types)}" + end + + constraint + end + + defp normalize_constraint(%Regex{} = constraint, match) do + if match != :exact do + raise ArgumentError, + "a Regex constraint only allows match type of :exact, got: #{inspect(match)}" + end + + constraint + end + + defp get_source(%{data: %{__meta__: %{source: source}}}) when is_binary(source), + do: source + + defp get_source(%{data: data}) do + raise ArgumentError, + "cannot add constraint to changeset because it does not have a source, got: #{inspect(data)}" + end + + defp get_source(item) do + raise ArgumentError, + "cannot add constraint because a changeset was not supplied, got: #{inspect(item)}" + end + + defp get_assoc_type(%{types: types}, assoc) do + case Map.fetch(types, assoc) do + {:ok, {:assoc, association}} -> association + _ -> raise_invalid_assoc(types, assoc) + end + end + + defp raise_invalid_assoc(types, assoc) do + associations = for {_key, {:assoc, %{field: field}}} <- types, do: field + one_of = if match?([_], associations), do: "", else: "one of " + + raise ArgumentError, + "cannot add constraint to changeset because association `#{assoc}` does not exist. " <> + "Did you mean #{one_of}`#{Enum.join(associations, "`, `")}`?" + end + + defp get_field_source(%{data: %{__struct__: schema}}, field) when is_atom(schema), + do: schema.__schema__(:field_source, field) || field + + defp get_field_source(%{}, field), + do: field + + @doc ~S""" + Traverses changeset errors and applies the given function to error messages. + + This function is particularly useful when associations and embeds + are cast in the changeset as it will traverse all associations and + embeds and place all errors in a series of nested maps. + + A changeset is supplied along with a function to apply to each + error message as the changeset is traversed. The error message + function receives an error tuple `{msg, opts}`, for example: + + {"should be at least %{count} characters", [count: 3, validation: :length, min: 3]} + + ## Examples + + iex> traverse_errors(changeset, fn {msg, opts} -> + ...> Regex.replace(~r"%{(\w+)}", msg, fn _, key -> + ...> opts |> Keyword.get(String.to_existing_atom(key), key) |> to_string() + ...> end) + ...> end) + %{title: ["should be at least 3 characters"]} + + Optionally function can accept three arguments: `changeset`, `field` + and error tuple `{msg, opts}`. It is useful whenever you want to extract + validations rules from `changeset.validations` to build detailed error + description. + """ + @spec traverse_errors(t, (error -> String.t()) | (Changeset.t(), atom, error -> String.t())) :: + traverse_result + def traverse_errors( + %Changeset{errors: errors, changes: changes, types: types} = changeset, + msg_func + ) + when is_function(msg_func, 1) or is_function(msg_func, 3) do + errors + |> Enum.reverse() + |> merge_keyword_keys(msg_func, changeset) + |> merge_related_keys(changes, types, msg_func, &traverse_errors/2) + end + + defp merge_keyword_keys(keyword_list, msg_func, _) when is_function(msg_func, 1) do + Enum.reduce(keyword_list, %{}, fn {key, val}, acc -> + val = msg_func.(val) + Map.update(acc, key, [val], &[val | &1]) + end) + end + + defp merge_keyword_keys(keyword_list, msg_func, changeset) when is_function(msg_func, 3) do + Enum.reduce(keyword_list, %{}, fn {key, val}, acc -> + val = msg_func.(changeset, key, val) + Map.update(acc, key, [val], &[val | &1]) + end) + end + + defp merge_related_keys(_, _, nil, _, _) do + raise ArgumentError, "changeset does not have types information" + end + + defp merge_related_keys(map, changes, types, msg_func, traverse_function) do + Enum.reduce(types, map, fn + {field, {tag, %{cardinality: :many}}}, acc when tag in @relations -> + if changesets = Map.get(changes, field) do + {child, all_empty?} = + Enum.map_reduce(changesets, true, fn changeset, all_empty? -> + child = traverse_function.(changeset, msg_func) + {child, all_empty? and child == %{}} + end) + + case all_empty? do + true -> acc + false -> Map.put(acc, field, child) + end + else + acc + end + + {field, {tag, %{cardinality: :one}}}, acc when tag in @relations -> + if changeset = Map.get(changes, field) do + case traverse_function.(changeset, msg_func) do + child when child == %{} -> acc + child -> Map.put(acc, field, child) + end + else + acc + end + + {_, _}, acc -> + acc + end) + end + + defp apply_relation_changes(acc, key, relation, value) do + relation_changed = Relation.apply_changes(relation, value) + + acc = Map.put(acc, key, relation_changed) + + with %Ecto.Association.BelongsTo{related_key: related_key} <- relation, + %{^related_key => id} <- relation_changed do + Map.put(acc, relation.owner_key, id) + else + _ -> acc + end + end + + @doc ~S""" + Traverses changeset validations and applies the given function to validations. + + This behaves the same as `traverse_errors/2`, but operates on changeset + validations instead of errors. + + ## Examples + + iex> traverse_validations(changeset, &(&1)) + %{title: [format: ~r/pattern/, length: [min: 1, max: 20]]} + + iex> traverse_validations(changeset, fn + ...> {:length, opts} -> {:length, "#{Keyword.get(opts, :min, 0)}-#{Keyword.get(opts, :max, 32)}"} + ...> {:format, %Regex{source: source}} -> {:format, "/#{source}/"} + ...> {other, opts} -> {other, inspect(opts)} + ...> end) + %{title: [format: "/pattern/", length: "1-20"]} + """ + @spec traverse_validations( + t, + (validation -> String.t()) | (Changeset.t(), atom, validation -> String.t()) + ) :: traverse_result + def traverse_validations( + %Changeset{validations: validations, changes: changes, types: types} = changeset, + msg_func + ) + when is_function(msg_func, 1) or is_function(msg_func, 3) do + validations + |> Enum.reverse() + |> merge_keyword_keys(msg_func, changeset) + |> merge_related_keys(changes, types, msg_func, &traverse_validations/2) + end +end + +defimpl Inspect, for: Ecto.Changeset do + import Inspect.Algebra + + def inspect(%Ecto.Changeset{data: data} = changeset, opts) do + # The trailing element is skipped later on + list = + for attr <- [:action, :changes, :errors, :data, :valid?, :action] do + {attr, Map.get(changeset, attr)} + end + + redacted_fields = + case data do + %type{} -> + if function_exported?(type, :__schema__, 1) do + type.__schema__(:redact_fields) + else + [] + end + + _ -> + [] + end + + container_doc("#Ecto.Changeset<", list, ">", %{limit: 5}, fn + {:action, action}, _opts -> + concat("action: ", to_doc(action, opts)) + + {:changes, changes}, _opts -> + concat("changes: ", changes |> filter(redacted_fields) |> to_doc(opts)) + + {:data, data}, _opts -> + concat("data: ", to_struct(data, opts)) + + {:errors, errors}, _opts -> + concat("errors: ", to_doc(errors, opts)) + + {:valid?, valid?}, _opts -> + concat("valid?: ", to_doc(valid?, opts)) + end) + end + + defp to_struct(%{__struct__: struct}, _opts), do: "#" <> Kernel.inspect(struct) <> "<>" + defp to_struct(other, opts), do: to_doc(other, opts) + + defp filter(changes, redacted_fields) do + Enum.reduce(redacted_fields, changes, fn redacted_field, changes -> + if Map.has_key?(changes, redacted_field) do + Map.put(changes, redacted_field, "**redacted**") + else + changes + end + end) + end +end diff --git a/deps/ecto/lib/ecto/changeset/relation.ex b/deps/ecto/lib/ecto/changeset/relation.ex new file mode 100644 index 0000000..2a4db33 --- /dev/null +++ b/deps/ecto/lib/ecto/changeset/relation.ex @@ -0,0 +1,586 @@ +defmodule Ecto.Changeset.Relation do + @moduledoc false + + require Logger + alias Ecto.Changeset + alias Ecto.Association.NotLoaded + + @type t :: %{required(:__struct__) => atom(), + required(:cardinality) => :one | :many, + required(:on_replace) => :raise | :mark_as_invalid | atom, + required(:relationship) => :parent | :child, + required(:ordered) => boolean, + required(:owner) => atom, + required(:related) => atom, + required(:field) => atom, + optional(atom()) => any()} + + @doc """ + Builds the related data. + """ + @callback build(t, owner :: Ecto.Schema.t) :: Ecto.Schema.t + + @doc """ + Returns empty container for relation. + """ + def empty(%{cardinality: cardinality}), do: cardinality_to_empty(cardinality) + + defp cardinality_to_empty(:one), do: nil + defp cardinality_to_empty(:many), do: [] + + @doc """ + Checks if the container can be considered empty. + """ + def empty?(%{cardinality: _}, %NotLoaded{}), do: true + def empty?(%{cardinality: :many}, []), do: true + def empty?(%{cardinality: :many}, changes), do: filter_empty(changes) == [] + def empty?(%{cardinality: :one}, nil), do: true + def empty?(%{}, _), do: false + + @doc """ + Filter empty changes + """ + def filter_empty(changes) do + Enum.filter(changes, fn + %Changeset{action: action} when action in [:replace, :delete] -> false + _ -> true + end) + end + + @doc """ + Applies related changeset changes + """ + def apply_changes(%{cardinality: :one}, nil) do + nil + end + + def apply_changes(%{cardinality: :one}, changeset) do + apply_changes(changeset) + end + + def apply_changes(%{cardinality: :many}, changesets) do + for changeset <- changesets, + struct = apply_changes(changeset), + do: struct + end + + defp apply_changes(%Changeset{action: :delete}), do: nil + defp apply_changes(%Changeset{action: :replace}), do: nil + defp apply_changes(changeset), do: Changeset.apply_changes(changeset) + + @doc """ + Loads the relation with the given struct. + + Loading will fail if the association is not loaded but the struct is. + """ + def load!(%{__meta__: %{state: :built}}, %NotLoaded{__cardinality__: cardinality}) do + cardinality_to_empty(cardinality) + end + + def load!(struct, %NotLoaded{__field__: field}) do + raise "attempting to cast or change association `#{field}` " <> + "from `#{inspect struct.__struct__}` that was not loaded. Please preload your " <> + "associations before manipulating them through changesets" + end + + def load!(_struct, loaded), do: loaded + + @doc """ + Casts related according to the `on_cast` function. + """ + def cast(%{cardinality: :one} = relation, _owner, nil, current, _on_cast) do + case current && on_replace(relation, current) do + :error -> {:error, {"is invalid", [type: expected_type(relation)]}} + _ -> {:ok, nil, true} + end + end + + def cast(%{cardinality: :one} = relation, owner, params, current, on_cast) when is_list(params) do + if Keyword.keyword?(params) do + cast(relation, owner, Map.new(params), current, on_cast) + else + {:error, {"is invalid", [type: expected_type(relation)]}} + end + end + + def cast(%{related: mod} = relation, owner, params, current, on_cast) do + pks = mod.__schema__(:primary_key) + fun = &do_cast(relation, owner, &1, &2, &3, &4, on_cast) + data_pk = data_pk(pks) + param_pk = param_pk(mod, pks) + + with :error <- cast_or_change(relation, params, current, data_pk, param_pk, fun) do + {:error, {"is invalid", [type: expected_type(relation)]}} + end + end + + defp do_cast(meta, owner, params, struct, allowed_actions, idx, {module, fun, args}) + when is_atom(module) and is_atom(fun) and is_list(args) do + IO.warn "passing a MFA to :with in cast_assoc/cast_embed is deprecated, please pass an anonymous function instead" + + on_cast = fn changeset, attrs -> + apply(module, fun, [changeset, attrs | args]) + end + + do_cast(meta, owner, params, struct, allowed_actions, idx, on_cast) + end + + defp do_cast(relation, owner, params, nil = _struct, allowed_actions, idx, on_cast) do + {:ok, + relation + |> apply_on_cast(on_cast, relation.__struct__.build(relation, owner), params, idx) + |> put_new_action(:insert) + |> check_action!(allowed_actions)} + end + + defp do_cast(relation, _owner, nil = _params, current, _allowed_actions, _idx, _on_cast) do + on_replace(relation, current) + end + + defp do_cast(relation, _owner, params, struct, allowed_actions, idx, on_cast) do + {:ok, + relation + |> apply_on_cast(on_cast, struct, params, idx) + |> put_new_action(:update) + |> check_action!(allowed_actions)} + end + + defp apply_on_cast(%{cardinality: :many}, on_cast, struct, params, idx) when is_function(on_cast, 3) do + on_cast.(struct, params, idx) + end + + defp apply_on_cast(%{cardinality: :one, field: field}, on_cast, _struct, _params, _idx) when is_function(on_cast, 3) do + raise ArgumentError, "invalid :with function for relation #{inspect(field)} " <> + "of cardinality one. Expected a function of arity 2" + end + + defp apply_on_cast(_relation, on_cast, struct, params, _idx) when is_function(on_cast, 2) do + on_cast.(struct, params) + end + + @doc """ + Wraps related structs in changesets. + """ + def change(%{cardinality: :one} = relation, nil, current) do + case current && on_replace(relation, current) do + :error -> {:error, {"is invalid", [type: expected_type(relation)]}} + _ -> {:ok, nil, true} + end + end + + def change(%{related: mod} = relation, value, current) do + get_pks = data_pk(mod.__schema__(:primary_key)) + with :error <- cast_or_change(relation, value, current, get_pks, get_pks, + &do_change(relation, &1, &2, &3, &4)) do + {:error, {"is invalid", [type: expected_type(relation)]}} + end + end + + # This may be an insert or an update, get all fields. + defp do_change(relation, %{__struct__: _} = changeset_or_struct, nil, _allowed_actions, _idx) do + changeset = Changeset.change(changeset_or_struct) + {:ok, + changeset + |> assert_changeset_struct!(relation) + |> put_new_action(action_from_changeset(changeset, nil))} + end + + defp do_change(relation, nil, current, _allowed_actions, _idx) do + on_replace(relation, current) + end + + defp do_change(relation, %Changeset{} = changeset, _current, allowed_actions, _idx) do + {:ok, + changeset + |> assert_changeset_struct!(relation) + |> put_new_action(:update) + |> check_action!(allowed_actions)} + end + + defp do_change(_relation, %{__struct__: _} = struct, _current, allowed_actions, _idx) do + {:ok, + struct + |> Ecto.Changeset.change + |> put_new_action(:update) + |> check_action!(allowed_actions)} + end + + defp do_change(relation, changes, current, allowed_actions, idx) + when is_list(changes) or is_map(changes) do + changeset = Ecto.Changeset.change(current || relation.__struct__.build(relation, nil), changes) + changeset = put_new_action(changeset, action_from_changeset(changeset, current)) + do_change(relation, changeset, current, allowed_actions, idx) + end + + defp action_from_changeset(%{data: %{__meta__: %{state: state}}}, _current) do + case state do + :built -> :insert + :loaded -> :update + :deleted -> :delete + end + end + + defp action_from_changeset(_, nil) do + :insert + end + + defp action_from_changeset(_, _current) do + :update + end + + defp assert_changeset_struct!(%{data: %{__struct__: mod}} = changeset, %{related: mod}) do + changeset + end + defp assert_changeset_struct!(%{data: data}, %{related: mod}) do + raise ArgumentError, "expected changeset data to be a #{mod} struct, got: #{inspect data}" + end + + @doc """ + Handles the changeset or struct when being replaced. + """ + def on_replace(%{on_replace: :mark_as_invalid}, _changeset_or_struct) do + :error + end + + def on_replace(%{on_replace: :raise, field: name, owner: owner}, _) do + raise """ + you are attempting to change relation #{inspect name} of + #{inspect owner} but the `:on_replace` option of this relation + is set to `:raise`. + + By default it is not possible to replace or delete embeds and + associations during `cast`. Therefore Ecto requires the parameters + given to `cast` to have IDs matching the data currently associated + to #{inspect owner}. Failing to do so results in this error message. + + If you want to replace data or automatically delete any data + not sent to `cast`, please set the appropriate `:on_replace` + option when defining the relation. The docs for `Ecto.Changeset` + covers the supported options in the "Associations, embeds and on + replace" section. + + However, if you don't want to allow data to be replaced or + deleted, only updated, make sure that: + + * If you are attempting to update an existing entry, you + are including the entry primary key (ID) in the data. + + * If you have a relationship with many children, all children + must be given on update. + + """ + end + + def on_replace(_relation, changeset_or_struct) do + {:ok, Changeset.change(changeset_or_struct) |> put_new_action(:replace)} + end + + defp raise_if_updating_with_struct!(%{field: name, owner: owner}, %{__struct__: _} = new) do + raise """ + you have set that the relation #{inspect name} of #{inspect owner} + has `:on_replace` set to `:update` but you are giving it a struct/ + changeset to put_assoc/put_change. + + Since you have set `:on_replace` to `:update`, you are only allowed + to update the existing entry by giving updated fields as a map or + keyword list or set it to nil. + + If you indeed want to replace the existing #{inspect name}, you have + to change the foreign key field directly. + + Got: + + #{inspect(new, pretty: true)} + """ + end + + defp raise_if_updating_with_struct!(_, _) do + true + end + + defp cast_or_change(%{cardinality: :one} = relation, value, current, current_pks_fun, new_pks_fun, fun) + when is_map(value) or is_list(value) or is_nil(value) do + single_change(relation, value, current_pks_fun, new_pks_fun, fun, current) + end + + defp cast_or_change(%{cardinality: :many}, [], [], _current_pks, _new_pks, _fun) do + {:ok, [], true} + end + + defp cast_or_change(%{cardinality: :many} = relation, value, current, current_pks_fun, new_pks_fun, fun) + when is_list(value) do + {current_pks, current_map} = process_current(current, current_pks_fun, relation) + %{unique: unique, ordered: ordered, related: mod} = relation + change_pks_fun = change_pk(mod.__schema__(:primary_key)) + ordered = if ordered, do: current_pks, else: [] + map_changes(value, new_pks_fun, change_pks_fun, fun, current_map, [], true, true, unique && %{}, 0, ordered) + end + + defp cast_or_change(_, _, _, _, _, _), do: :error + + # single change + + defp single_change(_relation, nil, _current_pks_fun, _new_pks_fun, fun, current) do + single_change(nil, current, fun, [:update, :delete], false) + end + + defp single_change(_relation, new, _current_pks_fun, _new_pks_fun, fun, nil) do + single_change(new, nil, fun, [:insert], false) + end + + defp single_change(%{on_replace: on_replace} = relation, new, current_pks_fun, new_pks_fun, fun, current) do + pk_values = new_pks_fun.(new) + + if (pk_values == current_pks_fun.(current) and pk_values != []) or + (on_replace == :update and raise_if_updating_with_struct!(relation, new)) do + single_change(new, current, fun, allowed_actions(pk_values), true) + else + case on_replace(relation, current) do + {:ok, _changeset} -> single_change(new, nil, fun, [:insert], false) + :error -> :error + end + end + end + + defp single_change(new, current, fun, allowed_actions, skippable?) do + case fun.(new, current, allowed_actions, nil) do + {:ok, %{action: :ignore}} -> + :ignore + {:ok, changeset} -> + if skippable? and skip?(changeset) do + :ignore + else + {:ok, changeset, changeset.valid?} + end + :error -> + :error + end + end + + # map changes + + defp map_changes([changes | rest], new_pks, change_pks, fun, current, acc, valid?, skip?, unique, idx, ordered) + when is_map(changes) or is_list(changes) do + pk_values = new_pks.(changes) + {struct, current, allowed_actions} = pop_current(current, pk_values) + + case fun.(changes, struct, allowed_actions, idx) do + {:ok, %{action: :ignore}} -> + ordered = pop_ordered(pk_values, ordered) + map_changes(rest, new_pks, change_pks, fun, current, acc, valid?, skip?, unique, idx + 1, ordered) + {:ok, changeset} -> + pk_values = change_pks.(changeset) + changeset = maybe_add_error_on_pk(changeset, pk_values, unique) + acc = [changeset | acc] + valid? = valid? and changeset.valid? + skip? = (struct != nil) and skip? and skip?(changeset) + unique = unique && Map.put(unique, pk_values, true) + ordered = pop_ordered(pk_values, ordered) + map_changes(rest, new_pks, change_pks, fun, current, acc, valid?, skip?, unique, idx + 1, ordered) + :error -> + :error + end + end + + defp map_changes([], _new_pks, _change_pks, fun, current, acc, valid?, skip?, _unique, _idx, ordered) do + current_structs = Enum.map(current, &elem(&1, 1)) + skip? = skip? and ordered == [] + reduce_delete_changesets(current_structs, fun, Enum.reverse(acc), valid?, skip?) + end + + defp map_changes(_params, _new_pks, _change_pks, _fun, _current, _acc, _valid?, _skip?, _unique, _idx, _ordered) do + :error + end + + defp pop_ordered(pk_values, [pk_values | tail]), do: tail + defp pop_ordered(_pk_values, tail), do: tail + + defp maybe_add_error_on_pk(%{data: %{__struct__: schema}} = changeset, pk_values, unique) do + if is_map(unique) and not missing_pks?(pk_values) and Map.has_key?(unique, pk_values) do + Enum.reduce(schema.__schema__(:primary_key), changeset, fn pk, acc -> + Changeset.add_error(acc, pk, "has already been taken") + end) + else + changeset + end + end + + defp missing_pks?(pk_values) do + pk_values == [] or Enum.any?(pk_values, &is_nil/1) + end + + defp allowed_actions(pk_values) do + if Enum.all?(pk_values, &is_nil/1) do + [:insert, :update, :delete] + else + [:update, :delete] + end + end + + defp reduce_delete_changesets([struct | rest], fun, acc, valid?, _skip?) do + case fun.(nil, struct, [:update, :delete], nil) do + {:ok, changeset} -> + valid? = valid? and changeset.valid? + reduce_delete_changesets(rest, fun, [changeset | acc], valid?, false) + + :error -> + :error + end + end + + defp reduce_delete_changesets([], _fun, _acc, _valid?, true), do: :ignore + defp reduce_delete_changesets([], _fun, acc, valid?, false), do: {:ok, acc, valid?} + + # helpers + + defp check_action!(changeset, allowed_actions) do + action = changeset.action + + cond do + action in allowed_actions -> + changeset + + action == :ignore -> + changeset + + action == :insert -> + raise "cannot insert related #{inspect changeset.data} " <> + "because it is already associated with the given struct" + + action == :replace -> + raise "cannot replace related #{inspect changeset.data}. " <> + "This typically happens when you are calling put_assoc/put_embed " <> + "with the results of a previous put_assoc/put_embed/cast_assoc/cast_embed " <> + "operation, which is not supported. You must call such operations only once " <> + "per embed/assoc, in order for Ecto to track changes efficiently" + + true -> + raise "cannot #{action} related #{inspect changeset.data} because " <> + "it already exists and it is not currently associated with the " <> + "given struct. Ecto forbids casting existing records through " <> + "the association field for security reasons. Instead, set " <> + "the foreign key value accordingly" + end + end + + defp process_current(nil, _get_pks, _relation), + do: {[], %{}} + + defp process_current(current, get_pks, relation) do + {pks, {map, counter}} = + Enum.map_reduce(current, {%{}, 0}, fn struct, {acc, counter} -> + pks = get_pks.(struct) + key = if pks == [], do: map_size(acc), else: pks + {pks, {Map.put(acc, key, struct), counter + 1}} + end) + + if map_size(map) != counter do + Logger.warning """ + found duplicate primary keys for association/embed `#{inspect(relation.field)}` \ + in `#{inspect(relation.owner)}`. In case of duplicate IDs, only the last entry \ + with the same ID will be kept. Make sure that all entries in `#{inspect(relation.field)}` \ + have an ID and the IDs are unique between them + """ + end + + {pks, map} + end + + defp pop_current(current, pk_values) do + case Map.pop(current, pk_values) do + {nil, current} -> {nil, current, [:insert]} + {struct, current} -> {struct, current, allowed_actions(pk_values)} + end + end + + defp data_pk(pks) do + fn + %Changeset{data: data} -> Enum.map(pks, &Map.get(data, &1)) + map when is_map(map) -> Enum.map(pks, &Map.get(map, &1)) + list when is_list(list) -> Enum.map(pks, &Keyword.get(list, &1)) + end + end + + defp param_pk(mod, pks) do + pks = Enum.map(pks, &{&1, Atom.to_string(&1), mod.__schema__(:type, &1)}) + fn params -> + Enum.map pks, fn {atom_key, string_key, type} -> + original = Map.get(params, string_key) || Map.get(params, atom_key) + case Ecto.Type.cast(type, original) do + {:ok, value} -> value + _ -> original + end + end + end + end + + defp change_pk(pks) do + fn %Changeset{} = cs -> + Enum.map(pks, fn pk -> + case cs.changes do + %{^pk => pk_value} -> pk_value + _ -> Map.get(cs.data, pk) + end + end) + end + end + + defp put_new_action(%{action: action} = changeset, new_action) when is_nil(action), + do: Map.put(changeset, :action, new_action) + defp put_new_action(changeset, _new_action), + do: changeset + + defp skip?(%{valid?: true, changes: empty, action: :update}) when empty == %{}, + do: true + defp skip?(_changeset), + do: false + + defp expected_type(%{cardinality: :one}), do: :map + defp expected_type(%{cardinality: :many}), do: {:array, :map} + + ## Surface changes on insert + + def surface_changes(%{changes: changes, types: types} = changeset, struct, fields) do + {changes, errors} = + Enum.reduce fields, {changes, []}, fn field, {changes, errors} -> + case {struct, changes, types} do + # User has explicitly changed it + {_, %{^field => _}, _} -> + {changes, errors} + + # Handle associations specially + {_, _, %{^field => {tag, embed_or_assoc}}} when tag in [:assoc, :embed] -> + # This is partly reimplementing the logic behind put_relation + # in Ecto.Changeset but we need to do it in a way where we have + # control over the current value. + value = not_loaded_to_empty(Map.get(struct, field)) + empty = empty(embed_or_assoc) + case change(embed_or_assoc, value, empty) do + {:ok, change, _} when change != empty -> + {Map.put(changes, field, change), errors} + {:error, error} -> + {changes, [{field, error}]} + _ -> # :ignore or ok with change == empty + {changes, errors} + end + + # Struct has a non nil value + {%{^field => value}, _, %{^field => _}} when value != nil -> + {Map.put(changes, field, value), errors} + + {_, _, _} -> + {changes, errors} + end + end + + case errors do + [] -> %{changeset | changes: changes} + _ -> %{changeset | errors: errors ++ changeset.errors, valid?: false, changes: changes} + end + end + + defp not_loaded_to_empty(%NotLoaded{__cardinality__: cardinality}), + do: cardinality_to_empty(cardinality) + + defp not_loaded_to_empty(loaded), do: loaded +end diff --git a/deps/ecto/lib/ecto/embedded.ex b/deps/ecto/lib/ecto/embedded.ex new file mode 100644 index 0000000..e8c5fb2 --- /dev/null +++ b/deps/ecto/lib/ecto/embedded.ex @@ -0,0 +1,301 @@ +defmodule Ecto.Embedded do + @moduledoc """ + The embedding struct for `embeds_one` and `embeds_many`. + + Its fields are: + + * `cardinality` - The association cardinality + * `field` - The name of the association field on the schema + * `owner` - The schema where the association was defined + * `related` - The schema that is embedded + * `on_cast` - Function name to call by default when casting embeds + * `on_replace` - The action taken on associations when schema is replaced + + """ + alias __MODULE__ + alias Ecto.Changeset + alias Ecto.Changeset.Relation + + use Ecto.ParameterizedType + + @behaviour Relation + @on_replace_opts [:raise, :mark_as_invalid, :delete] + @embeds_one_on_replace_opts @on_replace_opts ++ [:update] + + defstruct [ + :cardinality, + :field, + :owner, + :related, + :on_cast, + on_replace: :raise, + unique: true, + ordered: true + ] + + ## Parameterized API + + # We treat even embed_many as maps, as that's often the + # most efficient format to encode them in the database. + @impl Ecto.ParameterizedType + def type(_), do: {:map, :any} + + @impl Ecto.ParameterizedType + def init(opts) do + opts = Keyword.put_new(opts, :on_replace, :raise) + cardinality = Keyword.fetch!(opts, :cardinality) + + on_replace_opts = + if cardinality == :one, do: @embeds_one_on_replace_opts, else: @on_replace_opts + + unless opts[:on_replace] in on_replace_opts do + raise ArgumentError, + "invalid `:on_replace` option for #{inspect(Keyword.fetch!(opts, :field))}. " <> + "The only valid options are: " <> + Enum.map_join(on_replace_opts, ", ", &"`#{inspect(&1)}`") + end + + struct(%Embedded{}, opts) + end + + @impl Ecto.ParameterizedType + def load(nil, _fun, %{cardinality: :one}), do: {:ok, nil} + + def load(value, fun, %{cardinality: :one, related: schema, field: field}) when is_map(value) do + {:ok, load_field(field, schema, value, fun)} + end + + def load(nil, _fun, %{cardinality: :many}), do: {:ok, []} + + def load(value, fun, %{cardinality: :many, related: schema, field: field}) + when is_list(value) do + {:ok, Enum.map(value, &load_field(field, schema, &1, fun))} + end + + def load(_value, _fun, _embed) do + :error + end + + defp load_field(_field, schema, value, loader) when is_map(value) do + Ecto.Schema.Loader.unsafe_load(schema, value, loader) + end + + defp load_field(field, _schema, value, _fun) do + raise ArgumentError, "cannot load embed `#{field}`, expected a map but got: #{inspect(value)}" + end + + @impl Ecto.ParameterizedType + def dump(nil, _, _), do: {:ok, nil} + + def dump(value, fun, %{cardinality: :one, related: schema, field: field}) when is_map(value) do + {:ok, dump_field(field, schema, value, schema.__schema__(:dump), fun, _one_embed? = true)} + end + + def dump(value, fun, %{cardinality: :many, related: schema, field: field}) + when is_list(value) do + types = schema.__schema__(:dump) + {:ok, Enum.map(value, &dump_field(field, schema, &1, types, fun, _one_embed? = false))} + end + + def dump(_value, _fun, _embed) do + :error + end + + defp dump_field(_field, schema, %{__struct__: schema} = struct, types, dumper, _one_embed?) do + Ecto.Schema.Loader.safe_dump(struct, types, dumper) + end + + defp dump_field(field, schema, value, _types, _dumper, one_embed?) do + one_or_many = + if one_embed?, + do: "a struct #{inspect(schema)} value", + else: "a list of #{inspect(schema)} struct values" + + raise ArgumentError, + "cannot dump embed `#{field}`, expected #{one_or_many} but got: #{inspect(value)}" + end + + @impl Ecto.ParameterizedType + def cast(nil, %{cardinality: :one}), do: {:ok, nil} + + def cast(%{__struct__: schema} = struct, %{cardinality: :one, related: schema}) do + {:ok, struct} + end + + def cast(nil, %{cardinality: :many}), do: {:ok, []} + + def cast(value, %{cardinality: :many, related: schema}) when is_list(value) do + if Enum.all?(value, &Kernel.match?(%{__struct__: ^schema}, &1)) do + {:ok, value} + else + :error + end + end + + def cast(_value, _embed) do + :error + end + + @impl Ecto.ParameterizedType + def embed_as(_, _), do: :dump + + ## End of parameterized API + + # Callback invoked by repository to prepare embeds. + # + # It replaces the changesets for embeds inside changes + # by actual structs so it can be dumped by adapters and + # loaded into the schema struct afterwards. + @doc false + def prepare(changeset, embeds, adapter, repo_action) do + %{changes: changes, types: types, repo: repo} = changeset + prepare(Map.take(changes, embeds), types, adapter, repo, repo_action) + end + + defp prepare(embeds, _types, _adapter, _repo, _repo_action) when embeds == %{} do + embeds + end + + defp prepare(embeds, types, adapter, repo, repo_action) do + Enum.reduce(embeds, embeds, fn {name, changeset_or_changesets}, acc -> + {:embed, embed} = Map.get(types, name) + Map.put(acc, name, prepare_each(embed, changeset_or_changesets, adapter, repo, repo_action)) + end) + end + + defp prepare_each(%{cardinality: :one}, nil, _adapter, _repo, _repo_action) do + nil + end + + defp prepare_each(%{cardinality: :one} = embed, changeset, adapter, repo, repo_action) do + action = normalize_action(changeset.action, repo_action, embed) + changeset = run_prepare(changeset, repo) + to_struct(changeset, action, embed, adapter) + end + + defp prepare_each(%{cardinality: :many} = embed, changesets, adapter, repo, repo_action) do + for changeset <- changesets, + action = normalize_action(changeset.action, repo_action, embed), + changeset = run_prepare(changeset, repo), + prepared = to_struct(changeset, action, embed, adapter), + do: prepared + end + + defp to_struct(%Changeset{valid?: false}, _action, %{related: schema}, _adapter) do + raise ArgumentError, + "changeset for embedded #{inspect(schema)} is invalid, " <> + "but the parent changeset was not marked as invalid" + end + + defp to_struct(%Changeset{data: %{__struct__: actual}}, _action, %{related: expected}, _adapter) + when actual != expected do + raise ArgumentError, + "expected changeset for embedded schema `#{inspect(expected)}`, " <> + "got: #{inspect(actual)}" + end + + defp to_struct(%Changeset{changes: changes, data: schema}, :update, _embed, _adapter) + when changes == %{} do + schema + end + + defp to_struct(%Changeset{}, :delete, _embed, _adapter) do + nil + end + + defp to_struct(%Changeset{data: data} = changeset, action, %{related: schema}, adapter) do + %{data: struct, changes: changes} = + changeset = + maybe_surface_changes(changeset, data, schema, action) + + embeds = prepare(changeset, schema.__schema__(:embeds), adapter, action) + + changes + |> Map.merge(embeds) + |> autogenerate_id(struct, action, schema, adapter) + |> autogenerate(action, schema) + |> apply_embeds(struct) + end + + defp maybe_surface_changes(changeset, data, schema, :insert) do + Relation.surface_changes(changeset, data, schema.__schema__(:fields)) + end + + defp maybe_surface_changes(changeset, _data, _schema, _action) do + changeset + end + + defp run_prepare(changeset, repo) do + changeset = %{changeset | repo: repo} + + Enum.reduce(Enum.reverse(changeset.prepare), changeset, fn fun, acc -> + case fun.(acc) do + %Ecto.Changeset{} = acc -> + acc + + other -> + raise "expected function #{inspect(fun)} given to Ecto.Changeset.prepare_changes/2 " <> + "to return an Ecto.Changeset, got: `#{inspect(other)}`" + end + end) + end + + defp apply_embeds(changes, struct) do + struct(struct, changes) + end + + defp normalize_action(:replace, _, %{on_replace: :delete}), do: :delete + defp normalize_action(:update, :insert, _), do: :insert + defp normalize_action(action, _, _), do: action + + defp autogenerate_id(changes, _struct, :insert, schema, adapter) do + case schema.__schema__(:autogenerate_id) do + {key, _source, :binary_id} -> + Map.put_new_lazy(changes, key, fn -> adapter.autogenerate(:embed_id) end) + + {_key, _source, :id} -> + raise ArgumentError, + "embedded schema `#{inspect(schema)}` cannot autogenerate `:id` primary keys, " <> + "those are typically used for auto-incrementing constraints. " <> + "Maybe you meant to use `:binary_id` instead?" + + nil -> + changes + end + end + + defp autogenerate_id(changes, struct, :update, _schema, _adapter) do + for {_, nil} <- Ecto.primary_key(struct) do + raise Ecto.NoPrimaryKeyValueError, struct: struct + end + + changes + end + + defp autogenerate(changes, action, schema) do + autogen_fields = action |> action_to_auto() |> schema.__schema__() + + Enum.reduce(autogen_fields, changes, fn {fields, {mod, fun, args}}, acc -> + case Enum.reject(fields, &Map.has_key?(changes, &1)) do + [] -> + acc + + fields -> + generated = apply(mod, fun, args) + Enum.reduce(fields, acc, &Map.put(&2, &1, generated)) + end + end) + end + + defp action_to_auto(:insert), do: :autogenerate + defp action_to_auto(:update), do: :autoupdate + + @impl Relation + def build(%Embedded{related: related}, _owner) do + related.__struct__() + end + + def preload_info(_embed) do + :embed + end +end diff --git a/deps/ecto/lib/ecto/enum.ex b/deps/ecto/lib/ecto/enum.ex new file mode 100644 index 0000000..63f1a2c --- /dev/null +++ b/deps/ecto/lib/ecto/enum.ex @@ -0,0 +1,393 @@ +defmodule Ecto.Enum do + @moduledoc """ + A custom type that maps atoms to strings or integers. + + `Ecto.Enum` must be used whenever you want to keep atom values in a field. + Since atoms cannot be persisted to the database, `Ecto.Enum` converts them + to strings or integers when writing to the database and converts them back + to atoms when loading data. It can be used in your schemas as follows: + + # Stored as strings + field :status, Ecto.Enum, values: [:foo, :bar, :baz] + + or + + # Stored as integers + field :status, Ecto.Enum, values: [foo: 1, bar: 2, baz: 5] + + Therefore, the type to be used in your migrations for enum fields depends + on the choice above. For the cases above, one would do, respectively: + + add :status, :string + + or + + add :status, :integer + + Some databases also support enum types, which you could use in combination + with the above. + + Composite types, such as `:array`, are also supported which allow selecting + multiple values per record: + + field :roles, {:array, Ecto.Enum}, values: [:author, :editor, :admin] + + Overall, `:values` must be a list of atoms or a keyword list. Values will be + cast to atoms safely and only if the atom exists in the list (otherwise an + error will be raised). Attempting to load any string/integer not represented + by an atom in the list will be invalid. + + The helper function `mappings/2` returns the mappings for a given schema and + field, which can be used in places like form drop-downs. See `mappings/2` for + examples. + + If you want the values only, you can use `values/2`, and if you want + the "dump-able" values only, you can use `dump_values/2`. + + ## Embeds + + `Ecto.Enum` allows to customize how fields are dumped within embeds through the + `:embed_as` option. Two alternatives are supported: `:values`, which will save + the enum keys (and not their respective mapping), and `:dumped`, which will save + the dumped value. The default is `:values`. For example, assuming the following + schema: + + defmodule EnumSchema do + use Ecto.Schema + + schema "my_schema" do + embeds_one :embed, Embed do + field :embed_as_values, Ecto.Enum, values: [foo: 1, bar: 2], embed_as: :values + field :embed_as_dump, Ecto.Enum, values: [foo: 1, bar: 2], embed_as: :dumped + end + end + end + + The `:embed_as_values` field value will save `:foo` or `:bar`, while the + `:embed_as_dump` field value will save `1` or `2`. + """ + + use Ecto.ParameterizedType + + @impl true + def type(params), do: params.type + + @impl true + def init(opts) do + values = opts[:values] + + {type, mappings} = + cond do + is_list(values) and Enum.all?(values, &is_atom/1) -> + validate_unique!(values) + {:string, Enum.map(values, fn atom -> {atom, to_string(atom)} end)} + + type = Keyword.keyword?(values) and infer_type(Keyword.values(values)) -> + validate_unique!(Keyword.keys(values)) + validate_unique!(Keyword.values(values)) + {type, values} + + true -> + raise ArgumentError, """ + Ecto.Enum types must have a values option specified as a list of atoms or a + keyword list with a mapping from atoms to either integer or string values. + + For example: + + field :my_field, Ecto.Enum, values: [:foo, :bar] + + or + + field :my_field, Ecto.Enum, values: [foo: 1, bar: 2, baz: 5] + """ + end + + on_load = Map.new(mappings, fn {key, val} -> {val, key} end) + on_dump = Map.new(mappings) + on_cast = Map.new(mappings, fn {key, _} -> {Atom.to_string(key), key} end) + + embed_as = + case Keyword.get(opts, :embed_as, :values) do + :values -> + :self + + :dumped -> + :dump + + other -> + raise ArgumentError, """ + the `:embed_as` option for `Ecto.Enum` accepts either `:values` or `:dumped`, + received: `#{inspect(other)}` + """ + end + + %{ + on_load: on_load, + on_dump: on_dump, + on_cast: on_cast, + mappings: mappings, + embed_as: embed_as, + type: type + } + end + + defp validate_unique!(values) do + if length(Enum.uniq(values)) != length(values) do + raise ArgumentError, """ + Ecto.Enum type values must be unique. + + For example: + + field :my_field, Ecto.Enum, values: [:foo, :bar, :foo] + + is invalid, while + + field :my_field, Ecto.Enum, values: [:foo, :bar, :baz] + + is valid + """ + end + end + + defp infer_type(values) do + cond do + Enum.all?(values, &is_integer/1) -> :integer + Enum.all?(values, &is_binary/1) -> :string + true -> nil + end + end + + @impl true + def cast(nil, _params), do: {:ok, nil} + + def cast(data, params) do + case params do + %{on_load: %{^data => as_atom}} -> + {:ok, as_atom} + + %{on_dump: %{^data => _}} -> + {:ok, data} + + %{on_cast: %{^data => as_atom}} -> + {:ok, as_atom} + + params -> + {:error, validation: :inclusion, enum: Map.keys(params.on_cast)} + end + end + + @impl true + def load(nil, _, _), do: {:ok, nil} + + def load(data, _loader, %{on_load: on_load}) do + case on_load do + %{^data => as_atom} -> {:ok, as_atom} + _ -> :error + end + end + + @impl true + def dump(nil, _, _), do: {:ok, nil} + + def dump(data, _dumper, %{on_dump: on_dump}) do + case on_dump do + %{^data => as_string} -> {:ok, as_string} + _ -> :error + end + end + + @impl true + def equal?(a, b, _params), do: a == b + + @impl true + def embed_as(_, %{embed_as: embed_as}), do: embed_as + + @impl true + def format(%{mappings: mappings}) do + "#Ecto.Enum" + end + + @doc """ + Returns the possible values for a given schema or types map and field. + + These values are the atoms that represent the different possible values + of the field. + + ## Examples + + Assuming this schema: + + defmodule MySchema do + use Ecto.Schema + + schema "my_schema" do + field :my_string_enum, Ecto.Enum, values: [:foo, :bar, :baz] + field :my_integer_enum, Ecto.Enum, values: [foo: 1, bar: 2, baz: 5] + end + end + + Then: + + Ecto.Enum.values(MySchema, :my_string_enum) + #=> [:foo, :bar, :baz] + + Ecto.Enum.values(MySchema, :my_integer_enum) + #=> [:foo, :bar, :baz] + + """ + @spec values(module | map, atom) :: [atom()] + def values(schema_or_types, field) do + schema_or_types + |> mappings(field) + |> Keyword.keys() + end + + @doc """ + Returns the possible dump values for a given schema or types map and field + + "Dump values" are the values that can be dumped in the database. For enums stored + as strings, these are the strings that will be dumped in the database. For enums + stored as integers, these are the integers that will be dumped in the database. + + ## Examples + + Assuming this schema: + + defmodule MySchema do + use Ecto.Schema + + schema "my_schema" do + field :my_string_enum, Ecto.Enum, values: [:foo, :bar, :baz] + field :my_integer_enum, Ecto.Enum, values: [foo: 1, bar: 2, baz: 5] + end + end + + Then: + + Ecto.Enum.dump_values(MySchema, :my_string_enum) + #=> ["foo", "bar", "baz"] + + Ecto.Enum.dump_values(MySchema, :my_integer_enum) + #=> [1, 2, 5] + + `schema_or_types` can also be a types map. See `mappings/2` for more information. + """ + @spec dump_values(module | map, atom) :: [String.t()] | [integer()] + def dump_values(schema_or_types, field) do + schema_or_types + |> mappings(field) + |> Keyword.values() + end + + @doc """ + Casts a value from the given `schema` and `field`. + + ## Examples + + Assuming this schema: + + defmodule MySchema do + use Ecto.Schema + + schema "my_schema" do + field :my_string_enum, Ecto.Enum, values: [:foo, :bar, :baz] + field :my_integer_enum, Ecto.Enum, values: [foo: 1, bar: 2, baz: 5] + end + end + + Then: + + Ecto.Enum.cast_value(MySchema, :my_string_enum, "foo") + #=> {:ok, :foo} + + Ecto.Enum.cast_value(MySchema, :my_string_enum, :foo) + #=> {:ok, :foo} + + Ecto.Enum.cast_value(MySchema, :my_string_enum, "qux") + #=> :error + + Ecto.Enum.cast_value(MySchema, :my_integer_enum, 1) + #=> {:ok, :foo} + + Ecto.Enum.cast_value(MySchema, :my_integer_enum, :foo) + #=> {:ok, :foo} + + Ecto.Enum.cast_value(MySchema, :my_integer_enum, 6) + #=> :error + + `schema_or_types` can also be a types map. See `mappings/2` for more information. + """ + @spec cast_value(module | map, atom, binary | atom | integer) :: {:ok, atom} | :error + def cast_value(schema_or_types, field, value) do + params = get_params(schema_or_types, field) + case cast(value, params) do + {:ok, casted_value} -> {:ok, casted_value} + {:error, _reason} -> :error + end + end + + @doc """ + Returns the mappings between values and dumped values. + + ## Examples + + Assuming this schema: + + defmodule MySchema do + use Ecto.Schema + + schema "my_schema" do + field :my_string_enum, Ecto.Enum, values: [:foo, :bar, :baz] + field :my_integer_enum, Ecto.Enum, values: [foo: 1, bar: 2, baz: 5] + end + end + + Here are some examples of using `mappings/2` with it: + + Ecto.Enum.mappings(MySchema, :my_string_enum) + #=> [foo: "foo", bar: "bar", baz: "baz"] + + Ecto.Enum.mappings(MySchema, :my_integer_enum) + #=> [foo: 1, bar: 2, baz: 5] + + Examples of calling `mappings/2` with a types map: + + schemaless_types = %{ + my_enum: Ecto.ParameterizedType.init(Ecto.Enum, values: [:foo, :bar, :baz]), + my_integer_enum: Ecto.ParameterizedType.init(Ecto.Enum, values: [foo: 1, bar: 2, baz: 5]) + } + + Ecto.Enum.mappings(schemaless_types, :my_enum) + #=> [foo: "foo", bar: "bar", baz: "baz"] + Ecto.Enum.mappings(schemaless_types, :my_integer_enum) + #=> [foo: 1, bar: 2, baz: 5] + + """ + @spec mappings(module | map, atom) :: keyword(String.t() | integer()) + def mappings(schema_or_types, field) do + get_params(schema_or_types, field) + |> Map.fetch!(:mappings) + end + + defp get_params(schema_or_types, field) + + defp get_params(schema, field) when is_atom(schema) do + try do + schema.__changeset__() + rescue + _ in UndefinedFunctionError -> + raise ArgumentError, "#{inspect(schema)} is not an Ecto schema or types map" + else + %{} = types -> get_params(types, field) + end + end + + defp get_params(types, field) when is_map(types) do + case types do + %{^field => {:parameterized, {Ecto.Enum, params}}} -> params + %{^field => {_, {:parameterized, {Ecto.Enum, params}}}} -> params + %{^field => _} -> raise ArgumentError, "#{field} is not an Ecto.Enum field" + %{} -> raise ArgumentError, "#{field} does not exist" + end + end +end diff --git a/deps/ecto/lib/ecto/exceptions.ex b/deps/ecto/lib/ecto/exceptions.ex new file mode 100644 index 0000000..22b0963 --- /dev/null +++ b/deps/ecto/lib/ecto/exceptions.ex @@ -0,0 +1,321 @@ +defmodule Ecto.Query.CompileError do + @moduledoc """ + Raised at compilation time when the query cannot be compiled. + """ + defexception [:message] +end + +defmodule Ecto.Query.CastError do + @moduledoc """ + Raised at runtime when a value cannot be cast. + """ + defexception [:type, :value, :message] + + def exception(opts) do + value = Keyword.fetch!(opts, :value) + type = Keyword.fetch!(opts, :type) + msg = Keyword.fetch!(opts, :message) + %__MODULE__{value: value, type: type, message: msg} + end +end + +defmodule Ecto.QueryError do + @moduledoc """ + Raised at runtime when the query is invalid. + """ + defexception [:message] + + def exception(opts) do + message = Keyword.fetch!(opts, :message) + query = Keyword.fetch!(opts, :query) + hint = Keyword.get(opts, :hint) + + message = """ + #{message} in query: + + #{Inspect.Ecto.Query.to_string(query)} + """ + + file = opts[:file] + line = opts[:line] + + message = + if file && line do + relative = Path.relative_to_cwd(file) + Exception.format_file_line(relative, line) <> " " <> message + else + message + end + + message = + if hint do + message <> "\n" <> hint <> "\n" + else + message + end + + %__MODULE__{message: message} + end +end + +defmodule Ecto.SubQueryError do + @moduledoc """ + Raised at runtime when a subquery is invalid. + """ + defexception [:message, :exception] + + def exception(opts) do + exception = Keyword.fetch!(opts, :exception) + query = Keyword.fetch!(opts, :query) + + message = """ + the following exception happened when compiling a subquery. + + #{Exception.format(:error, exception, []) |> String.replace("\n", "\n ")} + + The subquery originated from the following query: + + #{Inspect.Ecto.Query.to_string(query)} + """ + + %__MODULE__{message: message, exception: exception} + end +end + +defmodule Ecto.InvalidChangesetError do + @moduledoc """ + Raised when we cannot perform an action because the + changeset is invalid. + """ + defexception [:action, :changeset] + + def message(%{action: action, changeset: changeset}) do + changes = extract_changes(changeset) + errors = Ecto.Changeset.traverse_errors(changeset, & &1) + + """ + could not perform #{action} because changeset is invalid. + + Errors + + #{pretty(errors)} + + Applied changes + + #{pretty(changes)} + + Params + + #{pretty(changeset.params)} + + Changeset + + #{pretty(changeset)} + """ + end + + defp pretty(term) do + inspect(term, pretty: true) + |> String.split("\n") + |> Enum.map_join("\n", &(" " <> &1)) + end + + defp extract_changes(%Ecto.Changeset{changes: changes}) do + Enum.reduce(changes, %{}, fn {key, value}, acc -> + case value do + %Ecto.Changeset{action: :delete} -> acc + _ -> Map.put(acc, key, extract_changes(value)) + end + end) + end + + defp extract_changes([%Ecto.Changeset{action: :delete} | tail]), + do: extract_changes(tail) + + defp extract_changes([%Ecto.Changeset{} = changeset | tail]), + do: [extract_changes(changeset) | extract_changes(tail)] + + defp extract_changes(other), + do: other +end + +defmodule Ecto.CastError do + @moduledoc """ + Raised when a changeset can't cast a value. + """ + defexception [:message, :type, :value] + + def exception(opts) do + type = Keyword.fetch!(opts, :type) + value = Keyword.fetch!(opts, :value) + msg = opts[:message] || "cannot cast #{inspect(value)} to #{Ecto.Type.format(type)}" + %__MODULE__{message: msg, type: type, value: value} + end +end + +defmodule Ecto.InvalidURLError do + defexception [:message, :url] + + def exception(opts) do + url = Keyword.fetch!(opts, :url) + msg = Keyword.fetch!(opts, :message) + msg = "invalid URL #{url}, #{msg}. The parsed URL is: #{inspect(URI.parse(url))}" + %__MODULE__{message: msg, url: url} + end +end + +defmodule Ecto.NoPrimaryKeyFieldError do + @moduledoc """ + Raised at runtime when an operation that requires a primary key is invoked + with a schema that does not define a primary key by using `@primary_key false` + """ + defexception [:message, :schema] + + def exception(opts) do + schema = Keyword.fetch!(opts, :schema) + message = "schema `#{inspect(schema)}` has no primary key" + %__MODULE__{message: message, schema: schema} + end +end + +defmodule Ecto.NoPrimaryKeyValueError do + @moduledoc """ + Raised at runtime when an operation that requires a primary key is invoked + with a schema missing value for its primary key + """ + defexception [:message, :struct] + + def exception(opts) do + struct = Keyword.fetch!(opts, :struct) + message = "struct `#{inspect(struct)}` is missing primary key value" + %__MODULE__{message: message, struct: struct} + end +end + +defmodule Ecto.ChangeError do + defexception [:message] +end + +defmodule Ecto.NoResultsError do + defexception [:message] + + def exception(opts) do + query = Keyword.fetch!(opts, :queryable) |> Ecto.Queryable.to_query() + + msg = """ + expected at least one result but got none in query: + + #{Inspect.Ecto.Query.to_string(query)} + """ + + %__MODULE__{message: msg} + end +end + +defmodule Ecto.MultipleResultsError do + defexception [:message] + + def exception(opts) do + query = Keyword.fetch!(opts, :queryable) |> Ecto.Queryable.to_query() + count = Keyword.fetch!(opts, :count) + + msg = """ + expected at most one result but got #{count} in query: + + #{Inspect.Ecto.Query.to_string(query)} + """ + + %__MODULE__{message: msg} + end +end + +defmodule Ecto.MultiplePrimaryKeyError do + defexception [:message] + + def exception(opts) do + operation = Keyword.fetch!(opts, :operation) + source = Keyword.fetch!(opts, :source) + params = Keyword.fetch!(opts, :params) + count = Keyword.fetch!(opts, :count) + + msg = """ + expected #{operation} on #{source} to return at most one entry but got #{count} entries. + + This typically means the field(s) set as primary_key in your schema/source + are not enough to uniquely identify entries in the repository. + + Those are the parameters sent to the repository: + + #{inspect(params)} + """ + + %__MODULE__{message: msg} + end +end + +defmodule Ecto.MigrationError do + defexception [:message] +end + +defmodule Ecto.StaleEntryError do + defexception [:message, :changeset] + + def exception(opts) do + action = Keyword.fetch!(opts, :action) + changeset = Keyword.fetch!(opts, :changeset) + + msg = """ + attempted to #{action} a stale struct: + + #{inspect(changeset.data)} + + This typically happens when the struct no longer exists in the database \ + or a database trigger/rule has forbidden the action. If stale entries are \ + expected, you may use `:stale_error_field` to convert this into a changeset \ + error, or set `:allow_stale` to true if you would like stale operations to \ + be considered a success (such as a stale deletion) + """ + + %__MODULE__{message: msg, changeset: changeset} + end +end + +defmodule Ecto.ConstraintError do + defexception [:type, :constraint, :message] + + def exception(opts) do + type = Keyword.fetch!(opts, :type) + constraint = Keyword.fetch!(opts, :constraint) + changeset = Keyword.fetch!(opts, :changeset) + action = Keyword.fetch!(opts, :action) + + constraints = + case changeset.constraints do + [] -> + "The changeset has not defined any constraint." + + constraints -> + "The changeset defined the following constraints:\n\n" <> + Enum.map_join( + constraints, + "\n", + &" * #{inspect(&1.constraint)} (#{&1.type}_constraint)" + ) + end + + msg = """ + constraint error when attempting to #{action} struct: + + * #{inspect(constraint)} (#{type}_constraint) + + If you would like to stop this constraint violation from raising an + exception and instead add it as an error to your changeset, please + call `#{type}_constraint/3` on your changeset with the constraint + `:name` as an option. + + #{constraints} + """ + + %__MODULE__{message: msg, type: type, constraint: constraint} + end +end diff --git a/deps/ecto/lib/ecto/json.ex b/deps/ecto/lib/ecto/json.ex new file mode 100644 index 0000000..ac2a5b4 --- /dev/null +++ b/deps/ecto/lib/ecto/json.ex @@ -0,0 +1,52 @@ +for encoder <- [Jason.Encoder, JSON.Encoder] do + module = Macro.inspect_atom(:literal, encoder) + + if Code.ensure_loaded?(encoder) do + defimpl encoder, for: Ecto.Association.NotLoaded do + def encode(%{__owner__: owner, __field__: field}, _) do + raise """ + cannot encode association #{inspect(field)} from #{inspect(owner)} to \ + JSON because the association was not loaded. + + You can either preload the association: + + Repo.preload(#{inspect(owner)}, #{inspect(field)}) + + Or choose to not encode the association when converting the struct \ + to JSON by explicitly listing the JSON fields in your schema: + + defmodule #{inspect(owner)} do + # ... + + @derive {#{unquote(module)}, only: [:name, :title, ...]} + schema ... do + + You can also use the :except option instead of :only if you would \ + prefer to skip some fields. + """ + end + end + + defimpl encoder, for: Ecto.Schema.Metadata do + def encode(%{schema: schema}, _) do + raise """ + cannot encode metadata from the :__meta__ field for #{inspect(schema)} \ + to JSON. This metadata is used internally by Ecto and should never be \ + exposed externally. + + You can either map the schemas to remove the :__meta__ field before \ + encoding or explicitly list the JSON fields in your schema: + + defmodule #{inspect(schema)} do + # ... + + @derive {#{unquote(module)}, only: [:name, :title, ...]} + schema ... do + + You can also use the :except option instead of :only if you would \ + prefer to skip some fields. + """ + end + end + end +end diff --git a/deps/ecto/lib/ecto/multi.ex b/deps/ecto/lib/ecto/multi.ex new file mode 100644 index 0000000..4d92f64 --- /dev/null +++ b/deps/ecto/lib/ecto/multi.ex @@ -0,0 +1,998 @@ +defmodule Ecto.Multi do + @moduledoc """ + `Ecto.Multi` is a data structure for grouping multiple Repo operations. + + `Ecto.Multi` makes it possible to pack operations that should be + performed in a single database transaction and provides a way to introspect + the queued operations without actually performing them. Each operation + is given a name that is unique and will identify its result in case of + either success or failure. + + If a Multi is valid (i.e. all the changesets in it are valid), + all operations will be executed in the order they were added. + + The `Ecto.Multi` structure should be considered opaque. You can use + `%Ecto.Multi{}` to pattern match the type, but accessing fields or + directly modifying them is not advised. + + `Ecto.Multi.to_list/1` returns a canonical representation of the + structure that can be used for introspection. + + > #### When to use Ecto.Multi? {: .info} + > + > `Ecto.Multi` is particularly useful when the set of operations to perform + > is dynamic. For most other use cases, using regular control flow within + > [`Repo.transact(fun)`](`c:Ecto.Repo.transact/2`) and returning + > `{:ok, result}` or `{:error, reason}` is more straightforward. + + ## Changesets + + If a Multi contains operations that accept changesets (like `insert/4`, + `update/4` or `delete/4`), they will be checked before starting the + transaction. If any changeset has errors, the transaction will not be + started and the error will immediately be returned. + + Note: `insert/4`, `update/4`, `insert_or_update/4` and `delete/4` + variants that accept a function do not perform these checks since + the functions are executed after the transaction has started. + + ## Run + + `Multi` allows you to run arbitrary functions as part of your transaction + via `run/3` and `run/5`. This is especially useful when an operation + depends on the value of a previous operation. For this reason, the + function given as a callback to `run/3` and `run/5` will receive the repo + as the first argument, and all changes performed by the Multi so far as a + map as the second argument. + + The function given to `run` must return `{:ok, value}` or `{:error, value}` + as its result. Returning an error will abort any further operations + and make the Multi fail. + + ## Example + + Let's look at an example definition and usage: resetting a password. We need + to update the account with proper information, log the request and remove + all current sessions: + + defmodule PasswordManager do + alias Ecto.Multi + + def reset(account, params) do + Multi.new() + |> Multi.update(:account, Account.password_reset_changeset(account, params)) + |> Multi.insert(:log, Log.password_reset_changeset(account, params)) + |> Multi.delete_all(:sessions, Ecto.assoc(account, :sessions)) + end + end + + We can later execute it in the integration layer using Repo: + + Repo.transact(PasswordManager.reset(account, params)) + + By pattern matching on the result we can differentiate various conditions: + + case result do + {:ok, %{account: account, log: log, sessions: sessions}} -> + # The Multi was successful. We can access results , which are as + # we would get from running the corresponding Repo functions, under + # keys we used for naming the operations. + {:error, failed_operation, failed_value, changes_so_far} -> + # One of the operations failed. We can access the operation's failure + # value (such as a changeset for operations on changesets) to prepare a + # proper response. We also get access to the results of any operations + # that succeeded before the indicated operation failed. (However, + # successful operations were rolled back.) + end + + We can also easily unit test our transaction without actually running it. + Since changesets can use in-memory data, we can use an account that is + constructed in memory as well, without persisting it to the database: + + test "dry run password reset" do + account = %Account{password: "letmein"} + multi = PasswordManager.reset(account, params) + + assert [ + {:account, {:update, account_changeset, []}}, + {:log, {:insert, log_changeset, []}}, + {:sessions, {:delete_all, query, []}} + ] = Ecto.Multi.to_list(multi) + + # We can introspect changesets and query to see if everything + # is as expected, for example: + assert account_changeset.valid? + assert log_changeset.valid? + assert inspect(query) == "#Ecto.Query" + end + + The name of each operation does not have to be an atom. This can be particularly + useful when you wish to update a collection of changesets at once, and track their + errors individually: + + accounts = [%Account{id: 1}, %Account{id: 2}] + + Enum.reduce(accounts, Multi.new(), fn account, multi -> + Multi.update( + multi, + {:account, account.id}, + Account.password_reset_changeset(account, params) + ) + end) + """ + + alias __MODULE__ + alias Ecto.Changeset + + defstruct operations: [], names: MapSet.new() + + @typedoc """ + Map of changes made so far during the current transaction. For any Multi + which returns `{:ok, result}`, its `t:name/0` is added as a key and its + result as the value. + """ + @type changes :: map + @type run :: (Ecto.Repo.t(), changes -> {:ok | :error, any}) + @type fun(result) :: (changes -> result) + @type merge :: (changes -> t) | {module, atom, [any]} + @typep schema_or_source :: binary | {binary, module} | module + @typep operation :: + {:changeset, Changeset.t(), Keyword.t()} + | {:run, run} + | {:put, any} + | {:inspect, Keyword.t()} + | {:merge, merge} + | {:update_all, Ecto.Query.t(), Keyword.t()} + | {:delete_all, Ecto.Query.t(), Keyword.t()} + | {:insert_all, schema_or_source, [map | Keyword.t()], Keyword.t()} + @typep operations :: [{name, operation}] + + @typep names :: MapSet.t() + + @typedoc """ + Name of an operation in the Multi. Can be any term, as long as it is unique + within the list of operations; for example, `:insert_post` or `{:delete_post, + 5}`. + """ + @type name :: any + + @typedoc """ + Result of a failed transaction using a Multi. + """ + @type failure :: + {:error, failed_operation :: Ecto.Multi.name(), failed_value :: any(), + changes_so_far :: %{Ecto.Multi.name() => any}} + + @type t :: %__MODULE__{operations: operations, names: names} + + @doc """ + Returns an empty `Ecto.Multi` struct. + + ## Example + + iex> Ecto.Multi.new() |> Ecto.Multi.to_list() + [] + + """ + @spec new :: t + def new do + %Multi{} + end + + @doc """ + Appends the second Multi to the first. + + All names must be unique within both structures. + + ## Example + + iex> lhs = Ecto.Multi.new() |> Ecto.Multi.run(:left, fn _, changes -> {:ok, changes} end) + iex> rhs = Ecto.Multi.new() |> Ecto.Multi.run(:right, fn _, changes -> {:error, changes} end) + iex> Ecto.Multi.append(lhs, rhs) |> Ecto.Multi.to_list |> Keyword.keys + [:left, :right] + + """ + @spec append(t, t) :: t + def append(lhs, rhs) do + merge_structs(lhs, rhs, &(&2 ++ &1)) + end + + @doc """ + Prepends the second Multi to the first. + + All names must be unique within both structures. + + ## Example + + iex> lhs = Ecto.Multi.new() |> Ecto.Multi.run(:left, fn _, changes -> {:ok, changes} end) + iex> rhs = Ecto.Multi.new() |> Ecto.Multi.run(:right, fn _, changes -> {:error, changes} end) + iex> Ecto.Multi.prepend(lhs, rhs) |> Ecto.Multi.to_list |> Keyword.keys + [:right, :left] + + """ + @spec prepend(t, t) :: t + def prepend(lhs, rhs) do + merge_structs(lhs, rhs, &(&1 ++ &2)) + end + + defp merge_structs(%Multi{} = lhs, %Multi{} = rhs, joiner) do + %{names: lhs_names, operations: lhs_ops} = lhs + %{names: rhs_names, operations: rhs_ops} = rhs + + case MapSet.intersection(lhs_names, rhs_names) |> MapSet.to_list() do + [] -> + %Multi{names: MapSet.union(lhs_names, rhs_names), operations: joiner.(lhs_ops, rhs_ops)} + + common -> + raise ArgumentError, """ + error when merging the following Ecto.Multi structs: + + #{Kernel.inspect(lhs)} + + #{Kernel.inspect(rhs)} + + both declared operations: #{Kernel.inspect(common)} + """ + end + end + + @doc """ + Merges a Multi returned dynamically by an anonymous function. + + This function is useful when the Multi to be merged requires information + from the original Multi. The second argument is an anonymous function + that receives the Multi changes so far. The anonymous function must return + another Multi. + + If you would prefer to simply merge two Multis together, see `append/2` or + `prepend/2`. + + Duplicated operations are not allowed. + + ## Example + + multi = + Ecto.Multi.new() + |> Ecto.Multi.insert(:post, %Post{title: "first"}) + + multi + |> Ecto.Multi.merge(fn %{post: post} -> + Ecto.Multi.new() + |> Ecto.Multi.insert(:comment, Ecto.build_assoc(post, :comments)) + end) + |> MyApp.Repo.transact() + """ + @spec merge(t, (changes -> t)) :: t + def merge(%Multi{} = multi, merge) when is_function(merge, 1) do + Map.update!(multi, :operations, &[{:merge, {:merge, merge}} | &1]) + end + + @doc """ + Merges a Multi returned dynamically by calling `module` and `function` with `args`. + + Similar to `merge/2` but allows passing of module name, function and + arguments. The function should return an `Ecto.Multi`, and receives changes so far + as the first argument (prepended to those passed in the call to the function). + + Duplicated operations are not allowed. + """ + @spec merge(t, module, function, args) :: t when function: atom, args: [any] + def merge(%Multi{} = multi, mod, fun, args) + when is_atom(mod) and is_atom(fun) and is_list(args) do + Map.update!(multi, :operations, &[{:merge, {:merge, {mod, fun, args}}} | &1]) + end + + @doc """ + Adds an insert operation to the Multi. + + The `name` must be unique within the Multi. + + The remaining arguments and options are the same as in `c:Ecto.Repo.insert/2`. + + ## Example + + Ecto.Multi.new() + |> Ecto.Multi.insert(:insert, %Post{title: "first"}) + |> MyApp.Repo.transact() + + Ecto.Multi.new() + |> Ecto.Multi.insert(:post, %Post{title: "first"}) + |> Ecto.Multi.insert(:comment, fn %{post: post} -> + Ecto.build_assoc(post, :comments) + end) + |> MyApp.Repo.transact() + + """ + @spec insert( + t, + name, + Changeset.t() | Ecto.Schema.t() | (changes -> Changeset.t() | Ecto.Schema.t()), + Keyword.t() + ) :: t + def insert(multi, name, changeset_or_struct_or_fun, opts \\ []) + + def insert(multi, name, %Changeset{} = changeset, opts) do + add_changeset(multi, :insert, name, changeset, opts) + end + + def insert(multi, name, %_{} = struct, opts) do + insert(multi, name, Changeset.change(struct), opts) + end + + def insert(multi, name, fun, opts) when is_function(fun, 1) do + run(multi, name, operation_fun({:insert, fun}, opts)) + end + + @doc """ + Adds an update operation to the Multi. + + The `name` must be unique within the Multi. + + The remaining arguments and options are the same as in `c:Ecto.Repo.update/2`. + + ## Example + + post = MyApp.Repo.get!(Post, 1) + changeset = Ecto.Changeset.change(post, title: "New title") + Ecto.Multi.new() + |> Ecto.Multi.update(:update, changeset) + |> MyApp.Repo.transact() + + Ecto.Multi.new() + |> Ecto.Multi.insert(:post, %Post{title: "first"}) + |> Ecto.Multi.update(:fun, fn %{post: post} -> + Ecto.Changeset.change(post, title: "New title") + end) + |> MyApp.Repo.transact() + + """ + @spec update(t, name, Changeset.t() | (changes -> Changeset.t()), Keyword.t()) :: t + def update(multi, name, changeset_or_fun, opts \\ []) + + def update(multi, name, %Changeset{} = changeset, opts) do + add_changeset(multi, :update, name, changeset, opts) + end + + def update(multi, name, fun, opts) when is_function(fun, 1) do + run(multi, name, operation_fun({:update, fun}, opts)) + end + + @doc """ + Inserts or updates a changeset depending on whether or not the changeset was persisted. + + The `name` must be unique within the Multi. + + The remaining arguments and options are the same as in `c:Ecto.Repo.insert_or_update/2`. + + ## Example + + changeset = Post.changeset(%Post{}, %{title: "New title"}) + Ecto.Multi.new() + |> Ecto.Multi.insert_or_update(:insert_or_update, changeset) + |> MyApp.Repo.transact() + + Ecto.Multi.new() + |> Ecto.Multi.run(:post, fn repo, _changes -> + {:ok, repo.get(Post, 1) || %Post{}} + end) + |> Ecto.Multi.insert_or_update(:update, fn %{post: post} -> + Ecto.Changeset.change(post, title: "New title") + end) + |> MyApp.Repo.transact() + + """ + @spec insert_or_update(t, name, Changeset.t() | (changes -> Changeset.t()), Keyword.t()) :: t + def insert_or_update(multi, name, changeset_or_fun, opts \\ []) + + def insert_or_update( + multi, + name, + %Changeset{data: %{__meta__: %{state: :loaded}}} = changeset, + opts + ) do + add_changeset(multi, :update, name, changeset, opts) + end + + def insert_or_update(multi, name, %Changeset{} = changeset, opts) do + add_changeset(multi, :insert, name, changeset, opts) + end + + def insert_or_update(multi, name, fun, opts) when is_function(fun, 1) do + run(multi, name, operation_fun({:insert_or_update, fun}, opts)) + end + + @doc """ + Adds a delete operation to the Multi. + + The `name` must be unique within the Multi. + + The remaining arguments and options are the same as in `c:Ecto.Repo.delete/2`. + + ## Example + + post = MyApp.Repo.get!(Post, 1) + Ecto.Multi.new() + |> Ecto.Multi.delete(:delete, post) + |> MyApp.Repo.transact() + + Ecto.Multi.new() + |> Ecto.Multi.run(:post, fn repo, _changes -> + case repo.get(Post, 1) do + nil -> {:error, :not_found} + post -> {:ok, post} + end + end) + |> Ecto.Multi.delete(:delete, fn %{post: post} -> + # Others validations + post + end) + |> MyApp.Repo.transact() + + """ + @spec delete( + t, + name, + Changeset.t() | Ecto.Schema.t() | (changes -> Changeset.t() | Ecto.Schema.t()), + Keyword.t() + ) :: t + def delete(multi, name, changeset_or_struct_fun, opts \\ []) + + def delete(multi, name, %Changeset{} = changeset, opts) do + add_changeset(multi, :delete, name, changeset, opts) + end + + def delete(multi, name, %_{} = struct, opts) do + delete(multi, name, Changeset.change(struct), opts) + end + + def delete(multi, name, fun, opts) when is_function(fun, 1) do + run(multi, name, operation_fun({:delete, fun}, opts)) + end + + @doc """ + Runs a query expecting one result and stores the result in the Multi. + + The `name` must be unique within the Multi. + + The remaining arguments and options are the same as in `c:Ecto.Repo.one/2`. + + ## Example + + Ecto.Multi.new() + |> Ecto.Multi.one(:post, Post) + |> Ecto.Multi.one(:author, fn %{post: post} -> + from(a in Author, where: a.id == ^post.author_id) + end) + |> MyApp.Repo.transact() + """ + @spec one( + t, + name, + queryable :: Ecto.Queryable.t() | (changes -> Ecto.Queryable.t()), + opts :: Keyword.t() + ) :: t + def one(multi, name, queryable_or_fun, opts \\ []) + + def one(multi, name, fun, opts) when is_function(fun, 1) do + run(multi, name, operation_fun({:one, fun}, opts)) + end + + def one(multi, name, queryable, opts) do + run(multi, name, operation_fun({:one, fn _ -> queryable end}, opts)) + end + + @doc """ + Runs a query and stores all results in the Multi. + + The `name` must be unique within the Multi. + + The remaining arguments and options are the same as in `c:Ecto.Repo.all/2`. + + ## Example + + Ecto.Multi.new() + |> Ecto.Multi.all(:all, Post) + |> MyApp.Repo.transact() + + Ecto.Multi.new() + |> Ecto.Multi.all(:all, fn _changes -> Post end) + |> MyApp.Repo.transact() + """ + @spec all( + t, + name, + queryable :: Ecto.Queryable.t() | (changes -> Ecto.Queryable.t()), + opts :: Keyword.t() + ) :: t + def all(multi, name, queryable_or_fun, opts \\ []) + + def all(multi, name, fun, opts) when is_function(fun, 1) do + run(multi, name, operation_fun({:all, fun}, opts)) + end + + def all(multi, name, queryable, opts) do + run(multi, name, operation_fun({:all, fn _ -> queryable end}, opts)) + end + + @doc """ + Checks if an entry matching the given query exists and stores a boolean in the Multi. + + The `name` must be unique within the Multi. + + The remaining arguments and options are the same as in `c:Ecto.Repo.exists?/2`. + + ## Example + + Ecto.Multi.new() + |> Ecto.Multi.exists?(:post, Post) + |> MyApp.Repo.transact() + + Ecto.Multi.new() + |> Ecto.Multi.exists?(:post, fn _changes -> Post end) + |> MyApp.Repo.transact() + """ + @spec exists?( + t, + name, + queryable :: Ecto.Queryable.t() | (changes -> Ecto.Queryable.t()), + opts :: Keyword.t() + ) :: t + def exists?(multi, name, queryable_or_fun, opts \\ []) + + def exists?(multi, name, fun, opts) when is_function(fun, 1) do + run(multi, name, operation_fun({:exists?, fun}, opts)) + end + + def exists?(multi, name, queryable, opts) do + run(multi, name, operation_fun({:exists?, fn _ -> queryable end}, opts)) + end + + defp add_changeset(multi, action, name, changeset, opts) when is_list(opts) do + add_operation(multi, name, {:changeset, put_action(changeset, action), opts}) + end + + defp put_action(%{action: nil} = changeset, action) do + %{changeset | action: action} + end + + defp put_action(%{action: action} = changeset, action) do + changeset + end + + defp put_action(%{action: original}, action) do + raise ArgumentError, + "you provided a changeset with an action already set " <> + "to #{Kernel.inspect(original)} when trying to #{action} it" + end + + @doc """ + Causes the Multi to fail with the given value. + + Running the Multi in a transaction will execute + no previous steps and return the value of the first + error added. + """ + @spec error(t, name, error :: term) :: t + def error(multi, name, value) do + add_operation(multi, name, {:error, value}) + end + + @doc """ + Adds a function to run as part of the Multi. + + The function should return either `{:ok, value}` or `{:error, value}`, + and receives the repo as the first argument and the changes so far + as the second argument. + + ## Example + + Ecto.Multi.run(multi, :write, fn _repo, %{image: image} -> + with :ok <- File.write(image.name, image.contents) do + {:ok, nil} + end + end) + """ + @spec run(t, name, run) :: t + def run(multi, name, run) when is_function(run, 2) do + add_operation(multi, name, {:run, run}) + end + + @doc """ + Adds a function to run as part of the Multi. + + Similar to `run/3`, but allows passing of module name, function and arguments. + The function should return either `{:ok, value}` or `{:error, value}`, and + receives the repo as the first argument and the changes so far as the + second argument (prepended to those passed in the call to the function). + """ + @spec run(t, name, module, function, args) :: t when function: atom, args: [any] + def run(multi, name, mod, fun, args) + when is_atom(mod) and is_atom(fun) and is_list(args) do + add_operation(multi, name, {:run, {mod, fun, args}}) + end + + @doc """ + Adds an `insert_all` operation to the Multi. + + Accepts the same arguments and options as `c:Ecto.Repo.insert_all/3`. + + ## Example + + posts = [%{title: "My first post"}, %{title: "My second post"}] + Ecto.Multi.new() + |> Ecto.Multi.insert_all(:insert_all, Post, posts) + |> MyApp.Repo.transact() + + Ecto.Multi.new() + |> Ecto.Multi.run(:post, fn repo, _changes -> + case repo.get(Post, 1) do + nil -> {:error, :not_found} + post -> {:ok, post} + end + end) + |> Ecto.Multi.insert_all(:insert_all, Comment, fn %{post: post} -> + # Others validations + + entries + |> Enum.map(fn comment -> + Map.put(comment, :post_id, post.id) + end) + end) + |> MyApp.Repo.transact() + + """ + @spec insert_all( + t, + name, + schema_or_source, + entries_or_query_or_fun :: + [map | Keyword.t()] | (changes -> [map | Keyword.t()]) | Ecto.Query.t(), + Keyword.t() + ) :: t + def insert_all(multi, name, schema_or_source, entries_or_query_or_fun, opts \\ []) + + def insert_all(multi, name, schema_or_source, entries_fun, opts) + when is_function(entries_fun, 1) and is_list(opts) do + run(multi, name, operation_fun({:insert_all, schema_or_source, entries_fun}, opts)) + end + + def insert_all(multi, name, schema_or_source, entries_or_query, opts) when is_list(opts) do + add_operation(multi, name, {:insert_all, schema_or_source, entries_or_query, opts}) + end + + @doc """ + Adds an `update_all` operation to the Multi. + + Accepts the same arguments and options as `c:Ecto.Repo.update_all/3`. + + ## Example + + Ecto.Multi.new() + |> Ecto.Multi.update_all(:update_all, Post, set: [title: "New title"]) + |> MyApp.Repo.transact() + + Ecto.Multi.new() + |> Ecto.Multi.run(:post, fn repo, _changes -> + case repo.get(Post, 1) do + nil -> {:error, :not_found} + post -> {:ok, post} + end + end) + |> Ecto.Multi.update_all(:update_all, fn %{post: post} -> + # Others validations + from(c in Comment, where: c.post_id == ^post.id, update: [set: [title: "New title"]]) + end, []) + |> MyApp.Repo.transact() + + """ + @spec update_all( + t, + name, + Ecto.Queryable.t() | (changes -> Ecto.Queryable.t()), + Keyword.t(), + Keyword.t() + ) :: t + def update_all(multi, name, queryable_or_fun, updates, opts \\ []) + + def update_all(multi, name, queryable_fun, updates, opts) + when is_function(queryable_fun, 1) and is_list(opts) do + run(multi, name, operation_fun({:update_all, queryable_fun, updates}, opts)) + end + + def update_all(multi, name, queryable, updates, opts) when is_list(opts) do + query = Ecto.Queryable.to_query(queryable) + add_operation(multi, name, {:update_all, query, updates, opts}) + end + + @doc """ + Adds a `delete_all` operation to the Multi. + + Accepts the same arguments and options as `c:Ecto.Repo.delete_all/2`. + + ## Example + + queryable = from(p in Post, where: p.id < 5) + Ecto.Multi.new() + |> Ecto.Multi.delete_all(:delete_all, queryable) + |> MyApp.Repo.transact() + + Ecto.Multi.new() + |> Ecto.Multi.run(:post, fn repo, _changes -> + case repo.get(Post, 1) do + nil -> {:error, :not_found} + post -> {:ok, post} + end + end) + |> Ecto.Multi.delete_all(:delete_all, fn %{post: post} -> + # Others validations + from(c in Comment, where: c.post_id == ^post.id) + end) + |> MyApp.Repo.transact() + + """ + @spec delete_all(t, name, Ecto.Queryable.t() | (changes -> Ecto.Queryable.t()), Keyword.t()) :: + t + def delete_all(multi, name, queryable_or_fun, opts \\ []) + + def delete_all(multi, name, fun, opts) when is_function(fun, 1) and is_list(opts) do + run(multi, name, operation_fun({:delete_all, fun}, opts)) + end + + def delete_all(multi, name, queryable, opts) when is_list(opts) do + query = Ecto.Queryable.to_query(queryable) + add_operation(multi, name, {:delete_all, query, opts}) + end + + defp add_operation(%Multi{} = multi, name, operation) do + %{operations: operations, names: names} = multi + + if MapSet.member?(names, name) do + raise "#{Kernel.inspect(name)} is already a member of the Ecto.Multi: \n#{Kernel.inspect(multi)}" + else + %{multi | operations: [{name, operation} | operations], names: MapSet.put(names, name)} + end + end + + @doc """ + Returns the list of operations stored in the Multi. + + Always use this function when you need to access the operations you + have defined in `Ecto.Multi`. Inspecting the `Ecto.Multi` struct internals + directly is discouraged. + """ + @spec to_list(t) :: [{name, term}] + def to_list(%Multi{operations: operations}) do + operations + |> Enum.reverse() + |> Enum.map(&format_operation/1) + end + + defp format_operation({name, {:changeset, changeset, opts}}), + do: {name, {changeset.action, changeset, opts}} + + defp format_operation(other), + do: other + + @doc """ + Adds a value to the changes so far under the given name. + + The given `value` is added to the Multi before the transaction starts. + If you would like to run arbitrary functions as part of your transaction, + see `run/3` or `run/5`. + + ## Example + + Imagine there is an existing company schema that you retrieved from + the database. You can insert it as a change in the Multi using `put/3`: + + Ecto.Multi.new() + |> Ecto.Multi.put(:company, company) + |> Ecto.Multi.insert(:user, fn changes -> User.changeset(changes.company) end) + |> Ecto.Multi.insert(:person, fn changes -> Person.changeset(changes.user, changes.company) end) + |> MyApp.Repo.transact() + + In the example above, there isn't a significant benefit in putting + the `company` in the Multi because you could also access the + `company` variable directly inside the anonymous function. + + However, the benefit of `put/3` is seen when composing `Ecto.Multi`s. + If the insert operations above were defined in another module, + you could use `put(:company, company)` to inject changes that + will be accessed by other functions down the chain, removing + the need to pass both `multi` and `company` values around. + """ + @spec put(t, name, any) :: t + def put(multi, name, value) do + add_operation(multi, name, {:put, value}) + end + + @doc """ + Inspects results from a Multi. + + By default, the name is shown as a label to the inspect. Custom labels are + supported through the `IO.inspect/2` `label` option. + + ## Options + + All options for IO.inspect/2 are supported, as well as: + + * `:only` - A field or a list of fields to inspect, will print the entire + map by default. + + ## Examples + + Ecto.Multi.new() + |> Ecto.Multi.insert(:person_a, changeset) + |> Ecto.Multi.insert(:person_b, changeset) + |> Ecto.Multi.inspect() + |> MyApp.Repo.transact() + + Prints: + %{person_a: %Person{...}, person_b: %Person{...}} + + We can use the `:only` option to limit which fields will be printed: + + Ecto.Multi.new() + |> Ecto.Multi.insert(:person_a, changeset) + |> Ecto.Multi.insert(:person_b, changeset) + |> Ecto.Multi.inspect(only: :person_a) + |> MyApp.Repo.transact() + + Prints: + %{person_a: %Person{...}} + + """ + @spec inspect(t, Keyword.t()) :: t + def inspect(multi, opts \\ []) do + Map.update!(multi, :operations, &[{:inspect, {:inspect, opts}} | &1]) + end + + @doc false + @spec __apply__(t, Ecto.Repo.t(), fun, (term -> no_return)) :: {:ok, term} | {:error, term} + def __apply__(%Multi{} = multi, repo, wrap, return) do + operations = Enum.reverse(multi.operations) + + with {:ok, operations} <- check_operations_valid(operations) do + apply_operations(operations, multi.names, repo, wrap, return) + end + end + + defp check_operations_valid(operations) do + Enum.find_value(operations, &invalid_operation/1) || {:ok, operations} + end + + defp invalid_operation({name, {:changeset, %{valid?: false} = changeset, _}}), + do: {:error, {name, changeset, %{}}} + + defp invalid_operation({name, {:error, value}}), + do: {:error, {name, value, %{}}} + + defp invalid_operation(_operation), + do: nil + + defp apply_operations([], _names, _repo, _wrap, _return), do: {:ok, %{}} + + defp apply_operations(operations, names, repo, wrap, return) do + wrap.(fn -> + operations + |> Enum.reduce({%{}, names}, &apply_operation(&1, repo, wrap, return, &2)) + |> elem(0) + end) + end + + defp apply_operation({_, {:merge, merge}}, repo, wrap, return, {acc, names}) do + case __apply__(apply_merge_fun(merge, acc), repo, wrap, return) do + {:ok, value} -> + merge_results(acc, value, names) + + {:error, {name, value, nested_acc}} -> + {acc, _names} = merge_results(acc, nested_acc, names) + return.({name, value, acc}) + end + end + + defp apply_operation({_name, {:inspect, opts}}, _repo, _wrap_, _return, {acc, names}) do + if opts[:only] do + acc |> Map.take(List.wrap(opts[:only])) |> IO.inspect(opts) + else + IO.inspect(acc, opts) + end + + {acc, names} + end + + defp apply_operation({name, operation}, repo, wrap, return, {acc, names}) do + case apply_operation(operation, acc, {wrap, return}, repo) do + {:ok, value} -> + {Map.put(acc, name, value), names} + + {:error, value} -> + return.({name, value, acc}) + + other -> + raise "expected Ecto.Multi callback named `#{Kernel.inspect(name)}` to return either {:ok, value} or {:error, value}, got: #{Kernel.inspect(other)}" + end + end + + defp apply_operation({:changeset, changeset, opts}, _acc, _apply_args, repo), + do: apply(repo, changeset.action, [changeset, opts]) + + defp apply_operation({:run, run}, acc, _apply_args, repo), + do: apply_run_fun(run, repo, acc) + + defp apply_operation({:error, value}, _acc, _apply_args, _repo), + do: {:error, value} + + defp apply_operation({:insert_all, source, entries, opts}, _acc, _apply_args, repo), + do: {:ok, repo.insert_all(source, entries, opts)} + + defp apply_operation({:update_all, query, updates, opts}, _acc, _apply_args, repo), + do: {:ok, repo.update_all(query, updates, opts)} + + defp apply_operation({:delete_all, query, opts}, _acc, _apply_args, repo), + do: {:ok, repo.delete_all(query, opts)} + + defp apply_operation({:put, value}, _acc, _apply_args, _repo), + do: {:ok, value} + + defp apply_merge_fun({mod, fun, args}, acc), do: apply(mod, fun, [acc | args]) + defp apply_merge_fun(fun, acc), do: apply(fun, [acc]) + + defp apply_run_fun({mod, fun, args}, repo, acc), do: apply(mod, fun, [repo, acc | args]) + defp apply_run_fun(fun, repo, acc), do: apply(fun, [repo, acc]) + + defp merge_results(changes, new_changes, names) do + new_names = new_changes |> Map.keys() |> MapSet.new() + + case MapSet.intersection(names, new_names) |> MapSet.to_list() do + [] -> + {Map.merge(changes, new_changes), MapSet.union(names, new_names)} + + common -> + raise "cannot merge Multi; the following operations were found in " <> + "both Ecto.Multi: #{Kernel.inspect(common)}" + end + end + + defp operation_fun({:update_all, queryable_fun, updates}, opts) do + fn repo, changes -> + {:ok, repo.update_all(queryable_fun.(changes), updates, opts)} + end + end + + defp operation_fun({:insert_all, schema_or_source, entries_fun}, opts) do + fn repo, changes -> + {:ok, repo.insert_all(schema_or_source, entries_fun.(changes), opts)} + end + end + + defp operation_fun({:delete_all, fun}, opts) do + fn repo, changes -> + {:ok, repo.delete_all(fun.(changes), opts)} + end + end + + defp operation_fun({:one, fun}, opts) do + fn repo, changes -> + {:ok, repo.one(fun.(changes), opts)} + end + end + + defp operation_fun({:all, fun}, opts) do + fn repo, changes -> + {:ok, repo.all(fun.(changes), opts)} + end + end + + defp operation_fun({:exists?, fun}, opts) do + fn repo, changes -> + {:ok, repo.exists?(fun.(changes), opts)} + end + end + + defp operation_fun({operation, fun}, opts) do + fn repo, changes -> + apply(repo, operation, [fun.(changes), opts]) + end + end +end diff --git a/deps/ecto/lib/ecto/parameterized_type.ex b/deps/ecto/lib/ecto/parameterized_type.ex new file mode 100644 index 0000000..187b7a9 --- /dev/null +++ b/deps/ecto/lib/ecto/parameterized_type.ex @@ -0,0 +1,220 @@ +defmodule Ecto.ParameterizedType do + @moduledoc """ + Parameterized types are Ecto types that can be customized per field. + + Parameterized types allow a set of options to be specified in the schema + which are initialized on compilation and passed to the callback functions + as the last argument. + + For example, `field :foo, :string` behaves the same for every field. + On the other hand, `field :foo, Ecto.Enum, values: [:foo, :bar, :baz]` + will likely have a different set of values per field. + + Note that options are specified as a keyword, but it is idiomatic to + convert them to maps inside `c:init/1` for easier pattern matching in + other callbacks. + + Parameterized types are a superset of regular types. In other words, + with parameterized types you can do everything a regular type does, + and more. For example, parameterized types can handle `nil` values + in both `load` and `dump` callbacks, they can customize `cast` behavior + per query and per changeset, and also control how values are embedded. + + However, parameterized types are also more complex. Therefore, if + everything you need to achieve can be done with basic types, they + should be preferred to parameterized ones. + + ## Examples + + To create a parameterized type, create a module as shown below: + + defmodule MyApp.MyType do + use Ecto.ParameterizedType + + def type(_params), do: :string + + def init(opts) do + validate_opts(opts) + Enum.into(opts, %{}) + end + + def cast(data, params) do + ... + {:ok, cast_data} + end + + def load(data, _loader, params) do + ... + {:ok, loaded_data} + end + + def dump(data, dumper, params) do + ... + {:ok, dumped_data} + end + + def equal?(a, b, _params) do + a == b + end + end + + To use this type in a schema field, specify the type and parameters like this: + + schema "foo" do + field :bar, MyApp.MyType, opt1: :baz, opt2: :boo + end + + To use this type in a schema field with a composite type, specify the type in a tuple + and opts afterwards. + + schema "foo" do + field :bars, {:array, MyApp.MyType}, opt1: :baz, opt2: :boo + end + + To use this type in places where you need it to be initialized (for example, + schemaless changesets), you can use `init/2`. + + > #### `use Ecto.ParameterizedType` {: .info} + > + > When you `use Ecto.ParameterizedType`, it will set + > `@behaviour Ecto.ParameterizedType` and define default, overridable + > implementations for `c:embed_as/2` and `c:equal?/3`. + """ + + @typedoc """ + The keyword options passed from the Schema's field macro into `c:init/1` + """ + @type opts :: keyword() + + @typedoc """ + The parameters for the ParameterizedType + + This is the value passed back from `c:init/1` and subsequently passed + as the last argument to all callbacks. Idiomatically it is a map. + """ + @type params :: term() + + @doc """ + Callback to convert the options specified in the field macro into parameters + to be used in other callbacks. + + This function is called at compile time, and should raise if invalid values are + specified. It is idiomatic that the parameters returned from this are a map. + `field` and `schema` will be injected into the options automatically. + + For example, this schema specification + + schema "my_table" do + field :my_field, MyParameterizedType, opt1: :foo, opt2: nil + end + + will result in the call: + + MyParameterizedType.init([schema: "my_table", field: :my_field, opt1: :foo, opt2: nil]) + + """ + @callback init(opts :: opts()) :: params() + + @doc """ + Casts the given input to the ParameterizedType with the given parameters. + + If the parameterized type is also a composite type, + the inner type can be cast by calling `Ecto.Type.cast/2` + directly. + + For more information on casting, see `c:Ecto.Type.cast/1`. + """ + @callback cast(data :: term, params()) :: + {:ok, term} | :error | {:error, keyword()} + + @doc """ + Loads the given term into a ParameterizedType. + + It receives a `loader` function in case the parameterized + type is also a composite type. In order to load the inner + type, the `loader` must be called with the inner type and + the inner value as argument. + + For more information on loading, see `c:Ecto.Type.load/1`. + Note that this callback *will* be called when loading a `nil` + value, unlike `c:Ecto.Type.load/1`. + """ + @callback load(value :: any(), loader :: function(), params()) :: {:ok, value :: any()} | :error + + @doc """ + Dumps the given term into an Ecto native type. + + It receives a `dumper` function in case the parameterized + type is also a composite type. In order to dump the inner + type, the `dumper` must be called with the inner type and + the inner value as argument. + + For more information on dumping, see `c:Ecto.Type.dump/1`. + Note that this callback *will* be called when dumping a `nil` + value, unlike `c:Ecto.Type.dump/1`. + """ + @callback dump(value :: any(), dumper :: function(), params()) :: {:ok, value :: any()} | :error + + @doc """ + Returns the underlying schema type for the ParameterizedType. + + For more information on schema types, see `c:Ecto.Type.type/0` + """ + @callback type(params()) :: Ecto.Type.t() + + @doc """ + Checks if two terms are semantically equal. + """ + @callback equal?(value1 :: any(), value2 :: any(), params()) :: boolean() + + @doc """ + Dictates how the type should be treated inside embeds. + + For more information on embedding, see `c:Ecto.Type.embed_as/1` + """ + @callback embed_as(format :: atom(), params()) :: :self | :dump + + @doc """ + Generates a loaded version of the data. + + This callback is invoked when a parameterized type is given + to `field` with the `:autogenerate` flag. + """ + @callback autogenerate(params()) :: term() + + @doc """ + Formats output when a ParameterizedType is printed in exceptions and + other logs. + + Note this callback is not used when constructing `Ecto.Changeset` validation + errors. See the `:message` option of most `Ecto.Changeset` validation + functions for how to customize error messaging on a per `Ecto.Changeset` basis. + """ + @callback format(params()) :: String.t() + + @optional_callbacks autogenerate: 1, format: 1 + + @doc """ + Inits a parameterized type given by `type` with `opts`. + + Useful when manually initializing a type for schemaless changesets. + """ + def init(type, opts) do + {:parameterized, {type, type.init(opts)}} + end + + @doc false + defmacro __using__(_) do + quote location: :keep do + @behaviour Ecto.ParameterizedType + + @doc false + def embed_as(_, _), do: :self + + @doc false + def equal?(term1, term2, _params), do: term1 == term2 + + defoverridable embed_as: 2, equal?: 3 + end + end +end diff --git a/deps/ecto/lib/ecto/query.ex b/deps/ecto/lib/ecto/query.ex new file mode 100644 index 0000000..c3c145c --- /dev/null +++ b/deps/ecto/lib/ecto/query.ex @@ -0,0 +1,3103 @@ +defmodule Ecto.SubQuery do + @moduledoc """ + A struct representing subqueries. + + Users of Ecto must consider this struct as opaque + and not access its fields. Authors of adapters may + read its contents, but never modify them. + + See `Ecto.Query.subquery/2` for more information. + """ + defstruct [:query, :params, :select, :cache] + + @type t :: %__MODULE__{} +end + +defmodule Ecto.Query do + @moduledoc ~S""" + Provides the Query DSL. + + Queries are used to retrieve and manipulate data from a repository + (see `Ecto.Repo`). Ecto queries come in two flavors: keyword-based + and macro-based. Most examples will use the keyword-based syntax, + the macro one will be explored in later sections. + + Let's see a sample query: + + # Imports only from/2 of Ecto.Query + import Ecto.Query, only: [from: 2] + + # Create a query + query = from u in "users", + where: u.age > 18, + select: u.name + + # Send the query to the repository + Repo.all(query) + + In the example above, we are directly querying the "users" table + from the database. Queries do not reach out to the data store until + they are passed as arguments to a function from `Ecto.Repo`. + + ## Query expressions + + Ecto allows a limited set of expressions inside queries. In the + query below, for example, we use `u.age` to access a field, the + `>` comparison operator and the literal `0`: + + query = from u in "users", where: u.age > 0, select: u.name + + You can find the full list of operations in `Ecto.Query.API`. + Besides the operations listed there, the following literals are + supported in queries: + + * Integers: `1`, `2`, `3` + * Floats: `1.0`, `2.0`, `3.0` + * Booleans: `true`, `false` + * Binaries: `<<1, 2, 3>>` + * Strings: `"foo bar"`, `~s(this is a string)` + * Atoms (other than booleans and `nil`): `:foo`, `:bar` + * Arrays: `[1, 2, 3]`, `~w(interpolate words)` + + All other types and dynamic values must be passed as a parameter using + interpolation as explained below. + + ## Interpolation and casting + + External values and Elixir expressions can be injected into a query + expression with `^`: + + def with_minimum(age, height_ft) do + from u in "users", + where: u.age > ^age and u.height > ^(height_ft * 3.28), + select: u.name + end + + with_minimum(18, 5.0) + + When interpolating values, you may want to explicitly tell Ecto + what is the expected type of the value being interpolated: + + age = "18" + Repo.all(from u in "users", + where: u.age > type(^age, :integer), + select: u.name) + + In the example above, Ecto will cast the age to type integer. When + a value cannot be cast, `Ecto.Query.CastError` is raised. + + To avoid the repetition of always specifying the types, you may define + an `Ecto.Schema`. In such cases, Ecto will analyze your queries and + automatically cast the interpolated "age" when compared to the `u.age` + field, as long as the age field is defined with type `:integer` in + your schema: + + age = "18" + Repo.all(from u in User, where: u.age > ^age, select: u.name) + + Another advantage of using schemas is that we no longer need to specify + the select option in queries, as by default Ecto will retrieve all + fields specified in the schema: + + age = "18" + Repo.all(from u in User, where: u.age > ^age) + + For this reason, we will use schemas on the remaining examples but + remember Ecto does not require them in order to write queries. + + ## `nil` comparison + + `nil` comparison in filters, such as where and having, is forbidden + and it will raise an error: + + # Raises if age is nil + from u in User, where: u.age == ^age + + This is done as a security measure to avoid attacks that attempt + to traverse entries with nil columns. To check that value is `nil`, + use `is_nil/1` instead: + + from u in User, where: is_nil(u.age) + + ## Composition + + Ecto queries are composable. For example, the query above can + actually be defined in two parts: + + # Create a query + query = from u in User, where: u.age > 18 + + # Extend the query + query = from u in query, select: u.name + + Composing queries uses the same syntax as creating a query. + The difference is that, instead of passing a schema like `User` + on the right-hand side of `in`, we passed the query itself. + + Any value can be used on the right-hand side of `in` as long as it implements + the `Ecto.Queryable` protocol. For now, we know the protocol is + implemented for both atoms (like `User`) and strings (like "users"). + + In any case, regardless if a schema has been given or not, Ecto + queries are always composable thanks to its binding system. + + ### Positional bindings + + On the left-hand side of `in` we specify the query bindings. This is + done inside `from` and `join` clauses. In the query below `u` is a + binding and `u.age` is a field access using this binding. + + query = from u in User, where: u.age > 18 + + Bindings are not exposed from the query. When composing queries, you + must specify bindings again for each refinement query. For example, + to further narrow down the above query, we again need to tell Ecto what + bindings to expect: + + query = from u in query, select: u.city + + Bindings in Ecto are positional, and the names do not have to be + consistent between input and refinement queries. For example, the + query above could also be written as: + + query = from q in query, select: q.city + + It would make no difference to Ecto. This is important because + it allows developers to compose queries without caring about + the bindings used in the initial query. + + When using joins, the bindings should be matched in the order they + are specified: + + # Create a query + query = from p in Post, + join: c in Comment, on: c.post_id == p.id + + # Extend the query + query = from [p, c] in query, + select: {p.title, c.body} + + You are not required to specify all bindings when composing. + For example, if we would like to order the results above by + post insertion date, we could further extend it as: + + query = from q in query, order_by: q.inserted_at + + The example above will work if the input query has 1 or 10 + bindings. As long as the number of bindings is less than the + number of `from`s + `join`s, Ecto will match only what you have + specified. The first binding always matches the source given + in `from`. + + Similarly, if you are interested only in the last binding + (or the last bindings) in a query, you can use `...` to + specify "all bindings before" and match on the last one. + + For instance, imagine you wrote: + + posts_with_comments = + from p in query, join: c in Comment, on: c.post_id == p.id + + And now we want to make sure to return both the post title + and the comment body. Although we may not know how many + bindings there are in the query, we are sure posts is the + first binding and comments are the last one, so we can write: + + from [p, ..., c] in posts_with_comments, select: {p.title, c.body} + + In other words, `...` will include all the bindings between the + first and the last, which may be one, many or no bindings at all. + + ### Named bindings + + Another option for flexibly building queries with joins are named + bindings. Coming back to the previous example, we can use the + `as: :comment` option to bind the comments join to a concrete name: + + posts_with_comments = + from p in Post, + join: c in Comment, as: :comment, on: c.post_id == p.id + + Now we can refer to it using the following form of a bindings list: + + from [p, comment: c] in posts_with_comments, select: {p.title, c.body} + + This approach lets us not worry about keeping track of the position + of the bindings when composing the query. The `:as` option can be + given both on joins and on `from`: + + from p in Post, as: :post + + Only atoms are accepted for binding names. Named binding references + must always be placed at the end of the bindings list: + + [positional_binding_1, positional_binding_2, named_1: binding, named_2: binding] + + Named bindings can also be used for late binding with the `as/1` + construct, allowing you to refer to a binding that has not been + defined yet: + + from c in Comment, where: as(:posts).id == c.post_id + + This is especially useful when working with subqueries, where you + may need to refer to a parent binding with `parent_as`, which is + not known when writing the subquery: + + child_query = from c in Comment, where: parent_as(:posts).id == c.post_id + from p in Post, as: :posts, inner_lateral_join: c in subquery(child_query) + + You can also match on a specific binding when building queries. For + example, let's suppose you want to create a generic sort function + that will order by a given `field` with a given `as` in `query`: + + # Knowing the name of the binding + def sort(query, as, field) do + from [{^as, x}] in query, order_by: field(x, ^field) + end + + ### Bindingless operations + + Although bindings are extremely useful when working with joins, + they are not necessary when the query has only the `from` clause. + For such cases, Ecto supports a way for building queries + without specifying the binding: + + from Post, + where: [category: "fresh and new"], + order_by: [desc: :published_at], + select: [:id, :title, :body] + + The query above will select all posts with category "fresh and new", + order by the most recently published, and return Post structs with + only the id, title and body fields set. It is equivalent to: + + from p in Post, + where: p.category == "fresh and new", + order_by: [desc: p.published_at], + select: struct(p, [:id, :title, :body]) + + One advantage of bindingless queries is that they are data-driven + and therefore useful for dynamically building queries. For example, + the query above could also be written as: + + where = [category: "fresh and new"] + order_by = [desc: :published_at] + select = [:id, :title, :body] + from Post, where: ^where, order_by: ^order_by, select: ^select + + This feature is very useful when queries need to be built based + on some user input, like web search forms, CLIs and so on. + + ## Fragments + + If you need an escape hatch, Ecto provides fragments + (see `Ecto.Query.API.fragment/1`) to inject SQL (and non-SQL) + fragments into queries. + + For example, to get all posts while running the "lower(?)" + function in the database where `p.title` is interpolated + in place of `?`, one can write: + + from p in Post, + where: is_nil(p.published_at) and + fragment("lower(?)", p.title) == ^title + + Also, most adapters provide direct APIs for queries, like + `Ecto.Adapters.SQL.query/4`, allowing developers to + completely bypass Ecto queries. + + ## Macro API + + In all examples so far we have used the **keywords query syntax** to + create a query: + + import Ecto.Query + from u in "users", where: u.age > 18, select: u.name + + Due to the prevalence of the pipe operator in Elixir, Ecto also supports + a pipe-based syntax: + + "users" + |> where([u], u.age > 18) + |> select([u], u.name) + + The keyword-based and pipe-based examples are equivalent. The downside + of using macros is that the binding must be specified for every operation. + However, since keyword-based and pipe-based examples are equivalent, the + bindingless syntax also works for macros. Please note that the following + example is not completely equivalent to the previous example, + as it does not return the name but rather the `User` struct: + + "users" + |> where([u], u.age > 18) + |> select([:name]) + + Such a syntax allows developers to write queries using bindings only in more + complex query expressions. + + This module documents each of those macros, providing examples in + both the keywords query and pipe expression formats. + + ## Query prefix + + It is possible to set a prefix for the queries. For Postgres users, + this will specify the schema where the table is located, while for + MySQL users this will specify the database where the table is + located. When no prefix is set, Postgres queries are assumed to be + in the public schema, while MySQL queries are assumed to be in the + database set in the config for the repo. + + The query prefix may be set either for the whole query or on each + individual `from` and `join` expression. If a `prefix` is not given + to a `from` or a `join`, the prefix of the schema given to the `from` + or `join` is used. The query prefix is used only if none of the above + are declared. + + Let's see some examples. To set the query prefix globally, the simplest + mechanism is to pass an option to the repository operation: + + results = Repo.all(query, prefix: "accounts") + + You may also set the prefix for the whole query by setting the prefix field: + + results = + query # May be User or an Ecto.Query itself + |> Ecto.Query.put_query_prefix("accounts") + |> Repo.all() + + Setting the prefix in the query changes the default prefix of all `from` + and `join` expressions. You can override the query prefix by either setting + the `@schema_prefix` in your schema definitions or by passing the prefix + option: + + from u in User, + prefix: "accounts", + join: p in assoc(u, :posts), + prefix: "public" + + Overall, here is the prefix lookup precedence: + + 1. The `:prefix` option given to `from`/`join` has the highest precedence + 2. Then it falls back to the `@schema_prefix` attribute declared in the schema + given to `from`/`join` + 3. Then it falls back to the query prefix. The query prefix may be + set either on the query with `put_query_prefix/2` or by passing + the `:prefix` option when calling the `Repo` module (where the + former wins if both methods are used) + + The prefixes set in the query will be preserved when loading data. + """ + + @doc """ + The `Ecto.Query` struct. + + Users of Ecto must consider this struct as opaque + and not access its field directly. Authors of adapters + may read its contents, but never modify them. + """ + defstruct prefix: nil, + sources: nil, + from: nil, + joins: [], + aliases: %{}, + wheres: [], + select: nil, + order_bys: [], + limit: nil, + offset: nil, + group_bys: [], + combinations: [], + updates: [], + havings: [], + preloads: [], + assocs: [], + distinct: nil, + lock: nil, + windows: [], + with_ctes: nil + + defmodule FromExpr do + @moduledoc false + defstruct [:source, :file, :line, :as, :prefix, params: [], hints: []] + end + + defmodule DynamicExpr do + @moduledoc false + defstruct [:fun, :binding, :file, :line] + end + + defmodule QueryExpr do + @moduledoc false + defstruct [:expr, :file, :line, params: []] + end + + defmodule ByExpr do + @moduledoc false + defstruct [:expr, :file, :line, params: [], subqueries: []] + end + + defmodule BooleanExpr do + @moduledoc false + defstruct [:op, :expr, :file, :line, params: [], subqueries: []] + end + + defmodule SelectExpr do + @moduledoc false + defstruct [:expr, :file, :line, :fields, params: [], take: %{}, subqueries: [], aliases: %{}] + end + + defmodule JoinExpr do + @moduledoc false + defstruct [ + :qual, + :source, + :on, + :file, + :line, + :assoc, + :as, + :ix, + :prefix, + params: [], + hints: [] + ] + end + + defmodule WithExpr do + @moduledoc false + defstruct recursive: false, queries: [] + end + + defmodule LimitExpr do + @moduledoc false + defstruct [:expr, :file, :line, with_ties: false, params: []] + end + + defmodule Tagged do + @moduledoc false + # * value is the tagged value + # * tag is the directly tagged value, like Ecto.UUID + # * type is the underlying tag type, like :string + defstruct [:tag, :type, :value] + end + + defmodule Values do + @moduledoc false + defstruct [:types, :num_rows, :params] + + def new([], _types) do + raise ArgumentError, "must provide a non-empty list to values/2" + end + + def new(values_list, types) do + fields = fields(values_list) + types = types!(fields, types) + params = params!(values_list, types) + %__MODULE__{types: types, params: params, num_rows: length(values_list)} + end + + defp fields(values_list) do + fields = + Enum.reduce(values_list, MapSet.new(), fn values, fields -> + Enum.reduce(values, fields, fn {field, _}, fields -> + MapSet.put(fields, field) + end) + end) + + MapSet.to_list(fields) + end + + defp types!(fields, types) when is_map(types) do + Enum.map(fields, fn field -> + case types do + %{^field => type} -> + {field, type} + + _ -> + raise ArgumentError, + "values/2 must declare the type for every field. " <> + "The type was not given for field `#{field}`" + end + end) + end + + defp types!(fields, schema) when is_atom(schema) do + Enum.map(fields, fn field -> + if type = schema.__schema__(:type, field) do + {field, type} + else + raise ArgumentError, + "values/2 must declare the type for every field. " <> + "The type was not given for field `#{field}`" + end + end) + end + + defp params!(values_list, types) do + Enum.reduce(values_list, [], fn values, params -> + Enum.reduce(types, params, fn {field, type}, params -> + case values do + %{^field => value} -> + [{value, type} | params] + + _ -> + raise ArgumentError, + "each member of a values list must have the same fields. " <> + "Missing field `#{field}` in #{inspect(values)}" + end + end) + end) + end + end + + @type t :: %__MODULE__{} + @type dynamic_expr :: %DynamicExpr{} + + alias Ecto.Query.Builder + + @doc """ + Builds a dynamic query expression. + + Dynamic query expressions allow developers to compose query + expressions bit by bit, so that they can be interpolated into + parts of a query or another dynamic expression later on. + + ## Examples + + Imagine you have a set of conditions you want to build your query on: + + conditions = false + + conditions = + if params["is_public"] do + dynamic([p], p.is_public or ^conditions) + else + conditions + end + + conditions = + if params["allow_reviewers"] do + dynamic([p, a], a.reviewer == true or ^conditions) + else + conditions + end + + from query, where: ^conditions + + In the example above, we were able to build the query expressions + bit by bit, using different bindings, and later interpolate it all + at once into the actual query. + + A dynamic expression can always be interpolated inside another dynamic + expression and into the constructs described below. + + ## `where`, `having` and a `join`'s `on` + + The [`dynamic`](`dynamic/2`) macro can be interpolated at the root of a `where`, + `having` or a `join`'s `on`. + + For example, assuming the `conditions` variable defined in the + previous section, the following is forbidden because it is not + at the root of a `where`: + + from q in query, where: q.some_condition and ^conditions + + Fortunately that's easily solved by simply rewriting it to: + + conditions = dynamic([q], q.some_condition and ^conditions) + from query, where: ^conditions + + > #### Dynamic boundaries {: .warning} + > + > Type casting does not cross dynamic boundaries. When you write + > a dynamic expression, such as `dynamic([p], p.visits > ^param)`, + > Ecto will automatically cast `^param` to the type of `p.visits`. + > + > However, if `p.visits` is in itself dynamic, as in the example + > below, then Ecto won't be able to propagate its type to `^param`: + > + > field = dynamic([p], p.visits) + > dynamic(^field > ^param) + > + + ## `order_by` + + Dynamics can be interpolated inside keyword lists at the root of + `order_by`. For example, you can write: + + order_by = [ + asc: :some_field, + desc: dynamic([p], fragment("?->>?", p.another_field, "json_key")) + ] + + from query, order_by: ^order_by + + Dynamics are also supported in `order_by/2` clauses inside `windows/2`. + + As with `where` and friends, it is not possible to pass dynamics + outside of a root. For example, this won't work: + + from query, order_by: [asc: ^dynamic(...)] + + But this will: + + from query, order_by: ^[asc: dynamic(...)] + + ## `group_by` + + Dynamics can be interpolated inside keyword lists at the root of + `group_by`. For example, you can write: + + group_by = [ + :some_field, + dynamic([p], fragment("?->>?", p.another_field, "json_key")) + ] + + from query, group_by: ^group_by + + Dynamics are also supported in `partition_by/2` clauses inside `windows/2`. + + As with `where` and friends, it is not possible to pass dynamics + outside of a root. For example, this won't work: + + from query, group_by: [:some_field, ^dynamic(...)] + + But this will: + + from query, group_by: ^[:some_field, dynamic(...)] + + ## `select` and `select_merge` + + Dynamics can be inside maps interpolated at the root of a + `select` or `select_merge`. For example, you can write: + + fields = %{ + period: dynamic([p], p.month), + metric: dynamic([p], p.distance) + } + + from query, select: ^fields + + As with `where` and friends, it is not possible to pass dynamics + outside of a root. For example, this won't work: + + from query, select: %{field: ^dynamic(...)} + + But this will: + + from query, select: ^%{field: dynamic(...)} + + Maps with dynamics can also be merged into existing `select` structures, + enabling a variety of possibilities for partially dynamic selects: + + metric = dynamic([p], p.distance) + + from query, select: [:period, :metric], select_merge: ^%{metric: metric} + + Aliasing fields with `selected_as/2` and referencing them with `selected_as/1` + is also allowed: + + fields = %{ + period: dynamic([p], selected_as(p.month, :month)), + metric: dynamic([p], p.distance) + } + + order = dynamic(selected_as(:month)) + + from query, select: ^fields, order_by: ^order + + ## `update` + + A `dynamic` is also supported inside updates, for example: + + updates = [ + set: [average: dynamic([p], p.sum / p.count)] + ] + + from query, update: ^updates + + ## `preload` + + Dynamics can be used with `preload` in order to dynamically + specify the binding for a joined association. For example, you can + write: + + preloads = [ + :non_joined_assoc, + joined_assoc: dynamic([joined: j], j) + ] + + from x in query, + join: assoc(x, :joined_assoc), + as: :joined, + preload: ^preloads + + While the example above uses a named binding (`:joined`), + positional bindings may also be used: + + preloads = [ + :non_joined_assoc, + joined_assoc: dynamic([_, j], j) + ] + + from x in query, + join: assoc(x, :joined_assoc) + preload: ^preloads + + As with `where` and friends, it is not possible to pass dynamics + outside of an interpolated root. For example, this won't work: + + from query, preload: [comments: ^dynamic(...)] + + But this will: + + from query, preload: ^[comments: dynamic(...)] + + Dynamic expressions used in `preload` must evaluate to a single + binding. For instance, this won't work: + + preloads = dynamic([comments: c, likes: l], [comments: {c, likes: l}]) + + But this will: + + dynamic_comments = dynamic([comments: c], c) + dynamic_likes = dynamic([likes: l], l) + + preloads = [ + comments: {dynamic_comments, likes: dynamic_likes} + ] + """ + defmacro dynamic(binding \\ [], expr) do + Builder.Dynamic.build(binding, expr, __CALLER__) + end + + @doc """ + Defines windows which can be used with `Ecto.Query.WindowAPI`. + + Receives a keyword list where keys are names of the windows + and values are a keyword list with window expressions. + + ## Examples + + # Compare each employee's salary with the average salary in his or her department + from e in Employee, + select: {e.depname, e.empno, e.salary, over(avg(e.salary), :department)}, + windows: [department: [partition_by: e.depname]] + + In the example above, we get the average salary per department. + `:department` is the window name, partitioned by `e.depname` + and `avg/1` is the window function. For more information + on windows functions, see `Ecto.Query.WindowAPI`. + + ## Window expressions + + The following keys are allowed when specifying a window. + + ### :partition_by + + A list of fields to partition the window by, for example: + + windows: [department: [partition_by: e.depname]] + + A list of atoms can also be interpolated for dynamic partitioning: + + fields = [:depname, :year] + windows: [dynamic_window: [partition_by: ^fields]] + + ### :order_by + + A list of fields to order the window by, for example: + + windows: [ordered_names: [order_by: e.name]] + + It works exactly as the keyword query version of `order_by/3`. + + ### :frame + + A fragment which defines the frame for window functions. + + ## Examples + + # Compare each employee's salary for each month with his average salary for previous 3 months + from p in Payroll, + select: {p.empno, p.date, p.salary, over(avg(p.salary), :prev_months)}, + windows: [prev_months: [partition_by: p.empno, order_by: p.date, frame: fragment("ROWS 3 PRECEDING EXCLUDE CURRENT ROW")]] + + """ + defmacro windows(query, binding \\ [], expr) do + Builder.Windows.build(query, binding, expr, __CALLER__) + end + + @doc """ + Converts a query into a subquery. + + If a subquery is given, returns the subquery itself. + If any other value is given, it is converted to a query via + `Ecto.Queryable` and wrapped in the `Ecto.SubQuery` struct. + + `subquery` is supported in: + + * `from`, + * `join`, + * `where`, in the form `p.x in subquery(q)`, + * `select` and `select_merge`, in the form of `%{field: subquery(...)}`. + + ## Examples + + # Get the average salary of the top 10 highest salaries + query = from Employee, order_by: [desc: :salary], limit: 10 + from e in subquery(query), select: avg(e.salary) + + A prefix can be specified for a subquery, similar to standard repo operations: + + query = from Employee, order_by: [desc: :salary], limit: 10 + from e in subquery(query, prefix: "my_prefix"), select: avg(e.salary) + + + Subquery can also be used in a `join` expression. + + UPDATE posts + SET sync_started_at = $1 + WHERE id IN ( + SELECT id FROM posts + WHERE synced = false AND (sync_started_at IS NULL OR sync_started_at < $1) + LIMIT $2 + ) + + We can write it as a join expression: + + subset = from(p in Post, + where: p.synced == false and + (is_nil(p.sync_started_at) or p.sync_started_at < ^min_sync_started_at), + limit: ^batch_size + ) + + Repo.update_all( + from(p in Post, join: s in subquery(subset), on: s.id == p.id), + set: [sync_started_at: NaiveDateTime.utc_now()] + ) + + Or as a `where` condition: + + subset_ids = from(p in subset, select: p.id) + Repo.update_all( + from(p in Post, where: p.id in subquery(subset_ids)), + set: [sync_started_at: NaiveDateTime.utc_now()] + ) + + If you need to refer to a parent binding which is not known when writing the subquery, + you can use `parent_as` as shown in the examples under ["Named bindings"](#module-named-bindings) + in this module doc. + + You can also use subquery directly in `select` and `select_merge`: + + comments_count = from(c in Comment, where: c.post_id == parent_as(:post).id, select: count()) + from(p in Post, as: :post, select: %{id: p.id, comments: subquery(comments_count)}) + """ + def subquery(query, opts \\ []) do + subquery = wrap_in_subquery(query) + + case Keyword.fetch(opts, :prefix) do + {:ok, prefix} -> + put_in(subquery.query.prefix, prefix) + + :error -> + subquery + end + end + + defp wrap_in_subquery(%Ecto.SubQuery{} = subquery), do: subquery + defp wrap_in_subquery(%Ecto.Query{} = query), do: %Ecto.SubQuery{query: query} + defp wrap_in_subquery(queryable), do: %Ecto.SubQuery{query: Ecto.Queryable.to_query(queryable)} + + @joins [ + :join, + :inner_join, + :cross_join, + :cross_lateral_join, + :left_join, + :right_join, + :full_join, + :inner_lateral_join, + :left_lateral_join + ] + + @doc """ + Puts the given prefix in a query. + """ + def put_query_prefix(%Ecto.Query{} = query, prefix) do + %{query | prefix: prefix} + end + + def put_query_prefix(other, prefix) do + query = %Ecto.Query{} = Ecto.Queryable.to_query(other) + put_query_prefix(query, prefix) + end + + @doc """ + Resets a previously set field or fields on a query. + + It can reset many fields except the query source (`from`). When excluding + a `:join`, it will remove *all* types of joins. If you prefer to remove a + single type of join, please see paragraph below. + + ## Examples + + Ecto.Query.exclude(query, :join) + Ecto.Query.exclude(query, :where) + Ecto.Query.exclude(query, :order_by) + Ecto.Query.exclude(query, :group_by) + Ecto.Query.exclude(query, :having) + Ecto.Query.exclude(query, :distinct) + Ecto.Query.exclude(query, :select) + Ecto.Query.exclude(query, :combinations) + Ecto.Query.exclude(query, :with_ctes) + Ecto.Query.exclude(query, :limit) + Ecto.Query.exclude(query, :offset) + Ecto.Query.exclude(query, :lock) + Ecto.Query.exclude(query, :preload) + Ecto.Query.exclude(query, :update) + Ecto.Query.exclude(query, :windows) + + You can remove multiple things at once by passing a list + + Ecto.Query.exclude(query, [:join, :where]) + Ecto.Query.exclude(query, [:limit, :offset]) + + You can remove specific joins such as `left_join` and `inner_join`: + + Ecto.Query.exclude(query, :inner_join) + Ecto.Query.exclude(query, :cross_join) + Ecto.Query.exclude(query, :cross_lateral_join) + Ecto.Query.exclude(query, :left_join) + Ecto.Query.exclude(query, :right_join) + Ecto.Query.exclude(query, :full_join) + Ecto.Query.exclude(query, :inner_lateral_join) + Ecto.Query.exclude(query, :left_lateral_join) + + However, keep in mind that if a join is removed and its bindings + were referenced elsewhere, the bindings won't be removed, leading + to a query that won't compile. + + You can remove specific windows by name: + + Ecto.Query.exclude(query, {:windows, [name1, name2]}) + + If a window was referenced elsewhere, for example in `select` or `order_by`, + it won't be removed. You must recreate the expressions manually. + """ + def exclude(%Ecto.Query{} = query, field), do: maybe_exclude_list(query, field) + def exclude(query, field), do: maybe_exclude_list(Ecto.Queryable.to_query(query), field) + + defp maybe_exclude_list(query, list) when is_list(list) do + Enum.reduce(list, query, &do_exclude(&2, &1)) + end + + defp maybe_exclude_list(query, field) do + do_exclude(query, field) + end + + defp do_exclude(%Ecto.Query{} = query, :join) do + %{query | joins: [], aliases: Map.take(query.aliases, [query.from.as])} + end + + defp do_exclude(%Ecto.Query{} = query, join_keyword) when join_keyword in @joins do + qual = join_qual(join_keyword) + {excluded, remaining} = Enum.split_with(query.joins, &(&1.qual == qual)) + aliases = Map.drop(query.aliases, Enum.map(excluded, & &1.as)) + %{query | joins: remaining, aliases: aliases} + end + + defp do_exclude(%Ecto.Query{} = query, {:windows, window_names}) when is_list(window_names) do + remaining = Enum.filter(query.windows, fn {name, _} -> name not in window_names end) + %{query | windows: remaining} + end + + defp do_exclude(%Ecto.Query{} = query, :where), do: %{query | wheres: []} + defp do_exclude(%Ecto.Query{} = query, :order_by), do: %{query | order_bys: []} + defp do_exclude(%Ecto.Query{} = query, :group_by), do: %{query | group_bys: []} + defp do_exclude(%Ecto.Query{} = query, :combinations), do: %{query | combinations: []} + defp do_exclude(%Ecto.Query{} = query, :with_ctes), do: %{query | with_ctes: nil} + defp do_exclude(%Ecto.Query{} = query, :having), do: %{query | havings: []} + defp do_exclude(%Ecto.Query{} = query, :distinct), do: %{query | distinct: nil} + defp do_exclude(%Ecto.Query{} = query, :select), do: %{query | select: nil} + defp do_exclude(%Ecto.Query{} = query, :limit), do: %{query | limit: nil} + defp do_exclude(%Ecto.Query{} = query, :offset), do: %{query | offset: nil} + defp do_exclude(%Ecto.Query{} = query, :lock), do: %{query | lock: nil} + defp do_exclude(%Ecto.Query{} = query, :preload), do: %{query | preloads: [], assocs: []} + defp do_exclude(%Ecto.Query{} = query, :update), do: %{query | updates: []} + defp do_exclude(%Ecto.Query{} = query, :windows), do: %{query | windows: []} + + @doc """ + Creates a query. + + It can either be a keyword query or a query expression. + + If it is a keyword query the first argument must be + either an `in` expression, a value that implements + the `Ecto.Queryable` protocol, or an `Ecto.Query.API.fragment/1`. If the query needs a + reference to the data source in any other part of the + expression, then an `in` must be used to create a reference + variable. The second argument should be a keyword query + where the keys are expression types and the values are + expressions. + + If it is a query expression the first argument must be + a value that implements the `Ecto.Queryable` protocol + and the second argument the expression. + + ## Hints + + The `hints` keyword can be used to specify query hints: + + from p in Post, + hints: ["USE INDEX FOO"], + where: p.title == "title" + + It can also be used as a general mechanism for adding statements that + come after the `from` clause. For example, it can be used to enable + table sampling: + + from p in Post, + hints: "TABLESAMPLE SYSTEM(1)" + + `from` hints must be a (list of) compile-time strings or unsafe fragments. An unsafe + fragment can be used to specify dynamic hints: + + sample = "SYSTEM_ROWS(1)" + + from p in Post, + hints: ["TABLESAMPLE", unsafe_fragment(^sample)] + + > #### Unsafe Fragments {: .warning} + > + > The output of `unsafe_fragment/1` will be injected directly into the + > resulting SQL statement without being escaped. For this reason, input + > from uncontrolled sources, such as user input, should **never** be used. + > Otherwise, it could lead to harmful SQL injection attacks. + + ## Keywords examples + + # `in` expression + from(c in City, select: c) + + # Ecto.Queryable + from(City, limit: 1) + + # Fragment with user-defined function and predefined columns + from(f in fragment("my_table_valued_function(arg)"), select: f.x) + + # Fragment with built-in function and undefined columns + from(f in fragment("select generate_series(?::integer, ?::integer) as x", ^0, ^10), select: f.x) + + ## Expressions examples + + # Schema + City |> select([c], c) + + # Source + "cities" |> select([c], c) + + # Source with schema + {"cities", Source} |> select([c], c) + + # Ecto.Query + from(c in City) |> select([c], c) + + ## Examples + + def paginate(query, page, size) do + from query, + limit: ^size, + offset: ^((page-1) * size) + end + + The example above does not use `in` because `limit` and `offset` + do not require a reference to the data source. However, extending + the query with a where expression would require the use of `in`: + + def published(query) do + from p in query, where: not(is_nil(p.published_at)) + end + + Notice we have created a `p` variable to reference the query's + original data source. This assumes that the original query + only had one source. When the given query has more than one source, + positional or named bindings may be used to access the additional sources. + + def published_multi(query) do + from [p,o] in query, + where: not(is_nil(p.published_at)) and not(is_nil(o.published_at)) + end + + Note that the variables `p` and `o` can be named whatever you like + as they have no importance in the query sent to the database. + """ + defmacro from(expr, kw \\ []) do + unless Keyword.keyword?(kw) do + raise ArgumentError, "second argument to `from` must be a compile time keyword list" + end + + {kw, as, prefix, hints} = collect_as_and_prefix_and_hints(kw, nil, nil, nil) + + {quoted, binds, count_bind} = + Builder.From.build(expr, __CALLER__, as, prefix, List.wrap(hints)) + + from(kw, __CALLER__, count_bind, quoted, to_query_binds(binds)) + end + + @from_join_opts [:as, :prefix, :hints] + @no_binds [:union, :union_all, :except, :except_all, :intersect, :intersect_all] + @binds [:lock, :where, :or_where, :select, :distinct, :order_by, :group_by, :windows] ++ + [:having, :or_having, :limit, :offset, :preload, :update, :select_merge, :with_ctes] + + defp from([{type, expr} | t], env, count_bind, quoted, binds) when type in @binds do + # If all bindings are integer indexes keep AST Macro expandable to %Query{}, + # otherwise ensure that quoted code is evaluated before macro call + quoted = + if Enum.all?(binds, fn {_, value} -> is_integer(value) end) do + quote do + Ecto.Query.unquote(type)(unquote(quoted), unquote(binds), unquote(expr)) + end + else + quote do + query = unquote(quoted) + Ecto.Query.unquote(type)(query, unquote(binds), unquote(expr)) + end + end + + {t, quoted} = maybe_with_ties(type, t, quoted, binds) + + from(t, env, count_bind, quoted, binds) + end + + defp from([{type, expr} | t], env, count_bind, quoted, binds) when type in @no_binds do + quoted = + quote do + Ecto.Query.unquote(type)(unquote(quoted), unquote(expr)) + end + + from(t, env, count_bind, quoted, binds) + end + + defp from([{join, expr} | t], env, count_bind, quoted, binds) when join in @joins do + qual = join_qual(join) + {t, on, as, prefix, hints} = collect_on(t, nil, nil, nil, nil) + + {quoted, binds, count_bind} = + Builder.Join.build(quoted, qual, binds, expr, count_bind, on, as, prefix, hints, env) + + from(t, env, count_bind, quoted, to_query_binds(binds)) + end + + defp from([{:with_ties, _value} | _], _env, _count_bind, _quoted, _binds) do + Builder.error!("`with_ties` keyword must immediately follow a limit") + end + + defp from([{:on, _value} | _], _env, _count_bind, _quoted, _binds) do + Builder.error!("`on` keyword must immediately follow a join") + end + + defp from([{key, _value} | _], _env, _count_bind, _quoted, _binds) + when key in @from_join_opts do + Builder.error!("`#{key}` keyword must immediately follow a from/join") + end + + defp from([{key, _value} | _], _env, _count_bind, _quoted, _binds) do + Builder.error!("unsupported #{inspect(key)} in keyword query expression") + end + + defp from([], _env, _count_bind, quoted, _binds) do + quoted + end + + defp maybe_with_ties(:limit, t, quoted, binds) do + {t, with_ties} = collect_with_ties(t, nil) + + quoted = + if with_ties != nil do + quote do + Ecto.Query.with_ties(unquote(quoted), unquote(binds), unquote(with_ties)) + end + else + quoted + end + + {t, quoted} + end + + defp maybe_with_ties(_type, t, quoted, _binds), do: {t, quoted} + + defp to_query_binds(binds) do + for {k, v} <- binds, do: {{k, [], nil}, v} + end + + defp join_qual(:join), do: :inner + defp join_qual(:full_join), do: :full + defp join_qual(:left_join), do: :left + defp join_qual(:right_join), do: :right + defp join_qual(:inner_join), do: :inner + defp join_qual(:cross_join), do: :cross + defp join_qual(:cross_lateral_join), do: :cross_lateral + defp join_qual(:left_lateral_join), do: :left_lateral + defp join_qual(:inner_lateral_join), do: :inner_lateral + + defp collect_with_ties([{:with_ties, with_ties} | t], nil), + do: collect_with_ties(t, with_ties) + + defp collect_with_ties([{:with_ties, _} | _], _), + do: Builder.error!("`with_ties` keyword was given more than once to the same limit") + + defp collect_with_ties(t, with_ties), + do: {t, with_ties} + + defp collect_on([{key, _} | _] = t, on, as, prefix, hints) when key in @from_join_opts do + {t, as, prefix, hints} = collect_as_and_prefix_and_hints(t, as, prefix, hints) + collect_on(t, on, as, prefix, hints) + end + + defp collect_on([{:on, on} | t], nil, as, prefix, hints), + do: collect_on(t, on, as, prefix, hints) + + defp collect_on([{:on, expr} | t], on, as, prefix, hints), + do: collect_on(t, {:and, [], [on, expr]}, as, prefix, hints) + + defp collect_on(t, on, as, prefix, hints), + do: {t, on, as, prefix, hints} + + defp collect_as_and_prefix_and_hints([{:as, as} | t], nil, prefix, hints), + do: collect_as_and_prefix_and_hints(t, as, prefix, hints) + + defp collect_as_and_prefix_and_hints([{:as, _} | _], _, _, _), + do: Builder.error!("`as` keyword was given more than once to the same from/join") + + defp collect_as_and_prefix_and_hints([{:prefix, prefix} | t], as, nil, hints), + do: collect_as_and_prefix_and_hints(t, as, {:ok, prefix}, hints) + + defp collect_as_and_prefix_and_hints([{:prefix, _} | _], _, _, _), + do: Builder.error!("`prefix` keyword was given more than once to the same from/join") + + defp collect_as_and_prefix_and_hints([{:hints, hints} | t], as, prefix, nil), + do: collect_as_and_prefix_and_hints(t, as, prefix, hints) + + defp collect_as_and_prefix_and_hints([{:hints, _} | _], _, _, _), + do: Builder.error!("`hints` keyword was given more than once to the same from/join") + + defp collect_as_and_prefix_and_hints(t, as, prefix, hints), + do: {t, as, prefix, hints} + + @doc """ + A join query expression. + + Receives a source that is to be joined to the query and a condition for + the join. The join condition can be any expression that evaluates + to a boolean value. The qualifier must be one of `:inner`, `:left`, + `:right`, `:cross`, `:cross_lateral`, `:full`, `:inner_lateral` or `:left_lateral`. + + For a keyword query the `:join` keyword can be changed to `:inner_join`, + `:left_join`, `:right_join`, `:cross_join`, `:cross_lateral_join`, `:full_join`, `:inner_lateral_join` + or `:left_lateral_join`. `:join` is equivalent to `:inner_join`. + + Currently it is possible to join on: + + * an `Ecto.Schema`, such as `p in Post` + * an interpolated Ecto query with zero or more `where` clauses, + such as `c in ^(from "posts", where: [public: true])` + * an association, such as `c in assoc(post, :comments)` + * a subquery, such as `c in subquery(another_query)` + * a query fragment, such as `c in fragment("SOME COMPLEX QUERY")`, + see "Joining with fragments" below. + + ## Options + + Each join accepts the following options: + + * `:on` - a query expression or keyword list to filter the join, defaults to `true` + * `:as` - a named binding for the join + * `:prefix` - the prefix to be used for the join when issuing a database query + * `:hints` - a string or a list of strings to be used as database hints + + In the keyword query syntax, those options must be given immediately + after the join. In the expression syntax, the options are given as + the fifth argument. + + > #### Unspecified join condition {: .warning} + > + > Leaving the `:on` option unspecified while performing a join + > that is not a cross join will trigger a warning. This is to + > help users avoid performing expensive cross joins when they don't + > mean to. If the behaviour is desired, you may remove the warning by + > changing to a cross join or explicitly setting `on: true`. If + > the behaviour is not desired, you should specify the appropriate + > join condition. + + ## Keywords examples + + from c in Comment, + join: p in Post, + on: p.id == c.post_id, + select: {p.title, c.text} + + from p in Post, + left_join: c in assoc(p, :comments), + select: {p, c} + + Keywords can also be given or interpolated as part of `on`: + + from c in Comment, + join: p in Post, + on: [id: c.post_id], + select: {p.title, c.text} + + Any key in `on` will apply to the currently joined expression. + + It is also possible to interpolate an Ecto query on the right-hand side + of `in`. For example, the query above can also be written as: + + posts = Post + from c in Comment, + join: p in ^posts, + on: [id: c.post_id], + select: {p.title, c.text} + + The above is specially useful to dynamically join on existing + queries, for example, to dynamically choose a source, or by + choosing between public posts or posts that have been recently + published: + + posts = + if params["drafts"] do + from p in Post, where: [drafts: true] + else + from p in Post, where: [public: true] + end + + from c in Comment, + join: p in ^posts, on: [id: c.post_id], + select: {p.title, c.text} + + Only simple queries with `where` expressions can be interpolated + in a join. + + ## Expressions examples + + Comment + |> join(:inner, [c], p in Post, on: c.post_id == p.id) + |> select([c, p], {p.title, c.text}) + + Post + |> join(:left, [p], c in assoc(p, :comments)) + |> select([p, c], {p, c}) + + Post + |> join(:left, [p], c in Comment, on: c.post_id == p.id and c.is_visible == true) + |> select([p, c], {p, c}) + + ## Joining with fragments + + When you need to join on a complex query, Ecto supports fragments in joins: + + Comment + |> join(:inner, [c], p in fragment("SOME COMPLEX QUERY", c.id, ^some_param)) + + Note that the `join` does not automatically wrap the fragment in + parentheses, since some expressions require parens and others + require no parens. Therefore, in cases such as common table + expressions, you will have to explicitly wrap the fragment content + in parens. + + ## Lateral Joins + + Lateral joins require a subquery that refer to previous bindings. This can be achieved using + `parent_as` within the `subquery` function: + + Game + |> from(as: :game) + |> join( + :inner_lateral, + [], + subquery( + GamesSold + |> where([gs], gs.game_id == parent_as(:game).id) + |> order_by([gs], gs.sold_on) + |> limit(2) + ), + on: true + ) + |> select([g, gs], {g.name, gs.sold_on}) + + ## Hints + + `join` also supports table hints, as found in databases such as + [MySQL](https://dev.mysql.com/doc/refman/8.0/en/index-hints.html), + [MSSQL](https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-2017) and + [Clickhouse](https://clickhouse.tech/docs/en/sql-reference/statements/select/sample/). + + For example, a developer using MySQL may write: + + from p in Post, + join: c in Comment, + hints: ["USE INDEX FOO", "USE INDEX BAR"], + where: p.id == c.post_id, + select: c + + Keep in mind you want to use hints rarely, so don't forget to read the database + disclaimers about such functionality. + + Join hints must be static compile-time strings when they are specified as (list of) strings. + """ + @join_opts [:on | @from_join_opts] + + defmacro join(query, qual, binding \\ [], expr, opts \\ []) + + defmacro join(query, qual, binding, expr, opts) + when is_list(binding) and is_list(opts) do + {t, on, as, prefix, hints} = collect_on(opts, nil, nil, nil, nil) + + with [{key, _} | _] <- t do + raise ArgumentError, + "invalid option `#{key}` passed to Ecto.Query.join/5, " <> + "valid options are: #{inspect(@join_opts)}" + end + + query + |> Builder.Join.build(qual, binding, expr, nil, on, as, prefix, hints, __CALLER__) + |> elem(0) + end + + defmacro join(_query, _qual, binding, _expr, opts) when is_list(opts) do + raise ArgumentError, + "invalid binding passed to Ecto.Query.join/5, should be " <> + "list of variables, got: #{Macro.to_string(binding)}" + end + + defmacro join(_query, _qual, _binding, _expr, opts) do + raise ArgumentError, + "invalid opts passed to Ecto.Query.join/5, should be " <> + "list, got: #{Macro.to_string(opts)}" + end + + @doc ~S''' + A common table expression (CTE) also known as WITH expression. + + `name` must be a compile-time literal string that is being used + as the table name to join the CTE in the main query or in the + recursive CTE. + + **IMPORTANT!** Beware of using CTEs. In raw SQL, CTEs can be + used as a mechanism to organize queries, but said mechanism + has no purpose in Ecto since Ecto queries are composable by + definition. In other words, if you need to break a large query + into parts, use all of the functionality in Elixir and in this + module to structure your code. Furthermore, breaking a query + into CTEs can negatively impact performance, as the database + may not optimize efficiently across CTEs. The main use case + for CTEs in Ecto is to provide recursive definitions, which + we outline in the following section. Non-recursive CTEs can + often be written as joins or subqueries, which provide better + performance. + + ## Options + + * `:as` - the CTE query itself or a fragment + * `:materialized` - a boolean indicating whether the CTE should + be materialized. If blank, the database's default behaviour + will be used (only supported by Postgrex, for the built-in adapters) + * `:operation` - one of `:all`, `:update_all`, or `:delete_all` + indicating the operation type of the CTE query. If blank, it defaults to `:all`, + making the CTE query a SELECT query. (only supported by Postgres built-in adapter) + + ## Recursive CTEs + + Use `recursive_ctes/2` to enable recursive mode for CTEs. + + In the CTE query itself use the same table name to leverage + recursion that has been passed to the `name` argument. Make sure + to write a stop condition to avoid an infinite recursion loop. + Generally speaking, you should only use CTEs in Ecto for + writing recursive queries. + + ## Expression examples + + Products and their category names for breadcrumbs: + + category_tree_initial_query = + Category + |> where([c], is_nil(c.parent_id)) + + category_tree_recursion_query = + Category + |> join(:inner, [c], ct in "category_tree", on: c.parent_id == ct.id) + + category_tree_query = + category_tree_initial_query + |> union_all(^category_tree_recursion_query) + + Product + |> recursive_ctes(true) + |> with_cte("category_tree", as: ^category_tree_query) + |> join(:left, [p], c in "category_tree", on: c.id == p.category_id) + |> group_by([p], p.id) + |> select([p, c], %{p | category_names: fragment("ARRAY_AGG(?)", c.name)}) + + It's also possible to pass a raw SQL fragment: + + @raw_sql_category_tree """ + SELECT * FROM categories WHERE c.parent_id IS NULL + UNION ALL + SELECT * FROM categories AS c, category_tree AS ct WHERE ct.id = c.parent_id + """ + + Product + |> recursive_ctes(true) + |> with_cte("category_tree", as: fragment(@raw_sql_category_tree)) + |> join(:inner, [p], c in "category_tree", on: c.id == p.category_id) + + You can also query over the CTE table itself. In such cases, you can pass an + `m:Ecto.Queryable#module-tuple` module tuple with the CTE table name as the first element + and an Ecto schema as the second element. This will cast the result rows to Ecto + structs, as long as the Ecto schema maps over the same fields in the CTE table: + + {"category_tree", Category} + |> recursive_ctes(true) + |> with_cte("category_tree", as: ^category_tree_query) + |> join(:left, [c], p in assoc(c, :products)) + |> group_by([c], c.id) + |> select([c, p], %{c | products_count: count(p.id)}) + + Keep in mind that this will override the source table name to `"category_tree"` in the + resulting structs, which will also inherit all other properties from the `Category` schema, + including a `@schema_prefix` if any is set. + + In such cases, you can disable those properties by setting them as options: + + from(cte in {"category_tree", Category}, prefix: nil) + |> recursive_ctes(true) + |> with_cte("category_tree", as: ^category_tree_query) + + or join the CTE's result to the original schema: + + Category + |> recursive_ctes(true) + |> with_cte("category_tree", as: ^category_tree_query) + |> join(:inner, [c], tree in "category_tree", on: c.id == tree.id) + + While this requires an additional join, it will allow you to use the structs in further + data-modifying operations throughout your application without the need to manually reset + the source table name. + + For the Postgres built-in adapter, it is possible to define data-modifying CTE queries: + + update_categories_query = + Category + |> where([c], is_nil(c.parent_id)) + |> update([c], set: [name: "Root category"]) + |> select([c], c) + + {"update_categories", Category} + |> with_cte("update_categories", as: ^update_categories_query, operation: :update_all) + |> select([c], c) + + Note: In order to retrieve the updates rows from a CTE query, the parent query + must select rows from the CTE table instead of the table referenced by the CTE query. + For example, `"update_categories"` will return updated rows for `"category"` table, but + selecting from `"category"` table directly will return unaffected rows. + For more details see Postgres documentation on data-modifying CTEs and how these work + with snapshots. + + Keyword syntax is not supported for this feature. + + ## Limitation: CTEs on schemas with source fields + + Ecto allows developers to say that a table in their Ecto schema + maps to a different column in their database: + + field :group_id, :integer, source: :iGroupId + + At the moment, using a schema with source fields in CTE may emit + invalid queries. If you are running into such scenarios, your best + option is to use a fragment as your CTE. + ''' + defmacro with_cte(query, name, opts) do + with_query = opts[:as] + operation = opts[:operation] + + if !with_query do + Builder.error!("`as` option must be specified") + end + + Builder.CTE.build(query, name, with_query, opts[:materialized], operation, __CALLER__) + end + + @doc """ + Enables or disables recursive mode for CTEs. + + According to the SQL standard it affects all CTEs in the query, not individual ones. + + See `with_cte/3` on example of how to build a query with a recursive CTE. + """ + def recursive_ctes(%__MODULE__{with_ctes: with_expr} = query, value) when is_boolean(value) do + with_expr = with_expr || %WithExpr{} + with_expr = %{with_expr | recursive: value} + %{query | with_ctes: with_expr} + end + + def recursive_ctes(queryable, value) do + recursive_ctes(Ecto.Queryable.to_query(queryable), value) + end + + @doc """ + A select query expression. + + Selects which fields will be selected from the schema and any transformations + that should be performed on the fields. Any expression that is accepted in a + query can be a select field. + + Select also allows each expression to be wrapped in lists, tuples or maps as + shown in the examples below. A full schema can also be selected. + + There can only be one select expression in a query, if the select expression + is omitted, the query will by default select the full schema. If `select` is + given more than once, an error is raised. Use `exclude/2` if you would like + to remove a previous select for overriding or see `select_merge/3` for a + limited version of `select` that is composable and can be called multiple + times. + + `select` also accepts a list of atoms where each atom refers to a field in + the source to be selected. + + ## Keywords examples + + from(c in City, select: c) # returns the schema as a struct + from(c in City, select: {c.name, c.population}) + from(c in City, select: [c.name, c.county]) + from(c in City, select: %{n: c.name, answer: 42}) + from(c in City, select: %{c | alternative_name: c.name}) + from(c in City, select: %Data{name: c.name}) + + It is also possible to select a struct and limit the returned + fields at the same time: + + from(City, select: [:name]) + + The syntax above is equivalent to: + + from(city in City, select: struct(city, [:name])) + + You can also write: + + from(city in City, select: map(city, [:name])) + + If you want a map with only the selected fields to be returned. + + To select a struct but omit only given fields, you can + override them with `nil` or another default value: + + from(city in City, select: %{city | geojson: nil, text: ""}) + + For more information, read the docs for `Ecto.Query.API.struct/2` + and `Ecto.Query.API.map/2`. + + ## Expressions examples + + City |> select([c], c) + City |> select([c], {c.name, c.country}) + City |> select([c], %{"name" => c.name}) + City |> select([:name]) + City |> select([c], struct(c, [:name])) + City |> select([c], map(c, [:name])) + City |> select([c], %{c | geojson: nil, text: ""}) + + ## Dynamic parts + + Dynamics can be part of a `select` as values in a map that must be interpolated + at the root level: + + period = if monthly?, do: dynamic([p], p.month), else: dynamic([p], p.date) + metric = if distance?, do: dynamic([p], p.distance), else: dynamic([p], p.time) + + from(c in City, select: ^%{period: period, metric: metric}) + """ + defmacro select(query, binding \\ [], expr) do + Builder.Select.build(:select, query, binding, expr, __CALLER__) + end + + @doc """ + Mergeable select query expression. + + This macro is similar to `select/3` except it may be specified + multiple times as long as every entry is a map. This is useful + for merging and composing selects. For example: + + query = from p in Post, select: %{} + + query = + if include_title? do + from p in query, select_merge: %{title: p.title} + else + query + end + + query = + if include_visits? do + from p in query, select_merge: %{visits: p.visits} + else + query + end + + In the example above, the query is built little by little by merging + into a final map. If both conditions above are true, the final query + would be equivalent to: + + from p in Post, select: %{title: p.title, visits: p.visits} + + If `:select_merge` is called and there is no value selected previously, + it will default to the source, `p` in the example above. + + The argument given to `:select_merge` must always be a map. The value + being merged on must be a struct or a map. If it is a struct, the fields + merged later on must be part of the struct, otherwise an error is raised. + + If the argument to `:select_merge` is a constructed struct + (`Ecto.Query.API.struct/2`) or map (`Ecto.Query.API.map/2`) where the source + to struct or map may be a `nil` value (as in an outer join), the source will + be returned unmodified. + + query = + Post + |> join(:left, [p], t in Post.Translation, + on: t.post_id == p.id and t.locale == ^"en" + ) + |> select_merge([_p, t], map(t, ^~w(title summary)a)) + + If there is no English translation for the post, the untranslated post + `title` will be returned and `summary` will be `nil`. If there is, both + `title` and `summary` will be the value from `Post.Translation`. + + `select_merge` cannot be used to set fields in associations, as + associations are always loaded later, overriding any previous value. + + Dynamics can be part of a `select_merge` as values in a map that must be + interpolated at the root level. The rules for merging detailed above apply. + This allows merging dynamic values into previously selected maps and structs. + """ + defmacro select_merge(query, binding \\ [], expr) do + Builder.Select.build(:merge, query, binding, expr, __CALLER__) + end + + @doc """ + A distinct query expression. + + When true, only keeps distinct values from the resulting + select expression. + + If supported by your database, you can also pass query expressions + to distinct and it will generate a query with DISTINCT ON. In such + cases, `distinct` accepts exactly the same expressions as `order_by` + and any `distinct` expression will be automatically prepended to the + `order_by` expressions in case there is any `order_by` expression. + + ## Keywords examples + + # Returns the list of different categories in the Post schema + from(p in Post, distinct: true, select: p.category) + + # If your database supports DISTINCT ON(), + # you can pass expressions to distinct too + from(p in Post, + distinct: p.category, + order_by: [p.date]) + + # The DISTINCT ON() also supports ordering similar to ORDER BY. + from(p in Post, + distinct: [desc: p.category], + order_by: [p.date]) + + # Using atoms + from(p in Post, distinct: :category, order_by: :date) + + ## Expressions example + + Post + |> distinct(true) + |> order_by([p], [p.category, p.author]) + + """ + defmacro distinct(query, binding \\ [], expr) do + Builder.Distinct.build(query, binding, expr, __CALLER__) + end + + @doc """ + An AND where query expression. + + `where` expressions are used to filter the result set. If there is more + than one where expression, they are combined with an `and` operator. All + where expressions have to evaluate to a boolean value. + + `where` also accepts a keyword list where the field given as key is going to + be compared with the given value. The fields will always refer to the source + given in `from`. + + ## Keywords example + + from(c in City, where: c.country == "Sweden") + from(c in City, where: [country: "Sweden"]) + + It is also possible to interpolate the whole keyword list, allowing you to + dynamically filter the source: + + filters = [country: "Sweden"] + from(c in City, where: ^filters) + + ## Expressions examples + + City |> where([c], c.country == "Sweden") + City |> where(country: "Sweden") + + """ + defmacro where(query, binding \\ [], expr) do + Builder.Filter.build(:where, :and, query, binding, expr, __CALLER__) + end + + @doc """ + An OR where query expression. + + Behaves exactly the same as `where` except it combines with any previous + expression by using an `OR`. All expressions have to evaluate to a boolean + value. + + `or_where` also accepts a keyword list where each key is a field to be + compared with the given value. Each key-value pair will be combined + using `AND`, exactly as in `where`. + + ## Keywords example + + from(c in City, where: [country: "Sweden"], or_where: [country: "Brazil"]) + + If interpolating keyword lists, the keyword list entries are combined + using ANDs and joined to any existing expression with an OR: + + filters = [country: "USA", name: "New York"] + from(c in City, where: [country: "Sweden"], or_where: ^filters) + + is equivalent to: + + from c in City, where: (c.country == "Sweden") or + (c.country == "USA" and c.name == "New York") + + The behaviour above is by design to keep the changes between `where` + and `or_where` minimal. Plus, if you have a keyword list and you + would like each pair to be combined using `or`, it can be easily done + with `Enum.reduce/3`: + + filters = [country: "USA", is_tax_exempt: true] + Enum.reduce(filters, City, fn {key, value}, query -> + from q in query, or_where: field(q, ^key) == ^value + end) + + which will be equivalent to: + + from c in City, or_where: (c.country == "USA"), or_where: c.is_tax_exempt == true + + ## Expressions example + + City |> where([c], c.country == "Sweden") |> or_where([c], c.country == "Brazil") + + """ + defmacro or_where(query, binding \\ [], expr) do + Builder.Filter.build(:where, :or, query, binding, expr, __CALLER__) + end + + @doc """ + An order by query expression. + + Orders the fields based on one or more fields. It accepts a single field + or a list of fields. The default direction is ascending (`:asc`) and can be + customized in a keyword list as one of the following: + + * `:asc` + * `:asc_nulls_last` + * `:asc_nulls_first` + * `:desc` + * `:desc_nulls_last` + * `:desc_nulls_first` + + The `*_nulls_first` and `*_nulls_last` variants are not supported by all + databases. While all databases default to ascending order, the choice of + "nulls first" or "nulls last" is specific to each database implementation. + + `order_by` may be invoked or listed in a query many times. New expressions + are appended to the existing ones. + + `order_by` also accepts a list of atoms where each atom refers to a field in + source or a keyword list where the direction is given as key and the field + to order as value. + + ## Keywords examples + + from(c in City, order_by: c.name, order_by: c.population) + from(c in City, order_by: [c.name, c.population]) + from(c in City, order_by: [asc: c.name, desc: c.population]) + + from(c in City, order_by: [:name, :population]) + from(c in City, order_by: [asc: :name, desc_nulls_first: :population]) + + A keyword list can also be interpolated: + + values = [asc: :name, desc_nulls_first: :population] + from(c in City, order_by: ^values) + + A fragment can also be used: + + from c in City, order_by: [ + # A deterministic shuffled order + fragment("? % ? DESC", c.id, ^modulus), + desc: c.id, + ] + + It's also possible to order by an aliased or calculated column: + + from(c in City, + select: %{ + name: c.name, + total_population: + fragment( + "COALESCE(?, ?) + ? AS total_population", + c.animal_population, + 0, + c.human_population + ) + }, + order_by: [ + # based on `AS total_population` in the previous fragment + {:desc, fragment("total_population")} + ] + ) + + ## Expressions examples + + City |> order_by([c], asc: c.name, desc: c.population) + City |> order_by(asc: :name) # Sorts by the cities name + City |> order_by(^order_by_param) # Keyword list + + """ + defmacro order_by(query, binding \\ [], expr) do + Builder.OrderBy.build(query, binding, expr, :append, __CALLER__) + end + + @doc """ + An order by query expression that is prepended to existing ones. + + Accepts the same input as `order_by/3` except the expression will + come before any previously defined order by expression. This only + works with the macro-based query syntax and not the keyword-based + query syntax. + + For example, the following will generate a query that orders by `human_population` + and then `name`: + + City |> order_by([c], c.name) |> prepend_order_by([c], c.human_population) + + The corresponding keyword-based syntax will raise an error: + + from c in City, order_by: c.name, prepend_order_by: c.human_population + + """ + defmacro prepend_order_by(query, binding \\ [], expr) do + Builder.OrderBy.build(query, binding, expr, :prepend, __CALLER__) + end + + @doc """ + A union query expression. + + Combines result sets of multiple queries. The `select` of each query + must be exactly the same, with the same types in the same order. + + Union expression returns only unique rows as if each query returned + distinct results. This may cause a performance penalty. If you need + to combine multiple result sets without removing duplicate rows + consider using `union_all/2`. + + ## Combination behaviour + + There are several behaviours of combination queries that must be taken + into account, otherwise you may unexpectedly return the wrong query result. + + ### Order by, limit and offset + + The `order_by`, `limit` and `offset` expressions of the parent query apply + to the result of the entire combination. `order_by` must be specified in one + of the following ways, since the combination of two or more queries is not + automatically aliased: + + - Use `Ecto.Query.API.fragment/1` to pass an `order_by` statement + that directly access the combination fields. + - Wrap the combination in a subquery and refer to the binding of the subquery. + + ### Column selection ordering + + The columns of each of the queries in the combination must be specified in + the exact same order. Otherwise, you may see the values of one column appearing + in another. This holds for all types of select expressions, including maps. + + For example, the following query will interchange the values of the supplier's + name and city because that is the order the fields are specified in the customer + query. + + supplier_query = from s in Supplier, select: %{city: s.city, name: s.name} + customer_query = from c in Customer, select: %{name: c.name, city: c.city} + union(supplier_query, ^customer_query) + + ### Selecting literal atoms + + When selecting a literal atom, its value must be the same across all queries. + Otherwise, the value from the parent query will be applied to all other queries. + This also holds true for selecting maps with atom keys. + + ## Keywords examples + + # Unordered result + supplier_query = from s in Supplier, select: s.city + from c in Customer, select: c.city, union: ^supplier_query + + # Ordered result + supplier_query = from s in Supplier, select: s.city + union_query = from c in Customer, select: c.city, union: ^supplier_query + from s in subquery(union_query), order_by: s.city + + ## Expressions examples + + # Unordered result + supplier_query = Supplier |> select([s], s.city) + Customer |> select([c], c.city) |> union(^supplier_query) + + # Ordered result + customer_query = Customer |> select([c], c.city) |> order_by(fragment("city")) + supplier_query = Supplier |> select([s], s.city) + union(customer_query, ^supplier_query) + + """ + defmacro union(query, other_query) do + Builder.Combination.build(:union, query, other_query, __CALLER__) + end + + @doc """ + A union all query expression. + + Combines result sets of multiple queries. The `select` of each query + must be exactly the same, with the same types in the same order. + + ## Combination behaviour + + There are several behaviours of combination queries that must be taken + into account, otherwise you may unexpectedly return the wrong query result. + + ### Order by, limit and offset + + The `order_by`, `limit` and `offset` expressions of the parent query apply + to the result of the entire combination. `order_by` must be specified in one + of the following ways, since the combination of two or more queries is not + automatically aliased: + + - Use `Ecto.Query.API.fragment/1` to pass an `order_by` statement + that directly access the combination fields. + - Wrap the combination in a subquery and refer to the binding of the subquery. + + ### Column selection ordering + + The columns of each of the queries in the combination must be specified in + the exact same order. Otherwise, you may see the values of one column appearing + in another. This holds for all types of select expressions, including maps. + + For example, the following query will interchange the values of the supplier's + name and city because that is the order the fields are specified in the customer + query. + + supplier_query = from s in Supplier, select: %{city: s.city, name: s.name} + customer_query = from c in Customer, select: %{name: c.name, city: c.city} + union_all(supplier_query, ^customer_query) + + ### Selecting literal atoms + + When selecting a literal atom, its value must be the same across all queries. + Otherwise, the value from the parent query will be applied to all other queries. + This also holds true for selecting maps with atom keys. + + ## Keywords examples + + # Unordered result + supplier_query = from s in Supplier, select: s.city + from c in Customer, select: c.city, union_all: ^supplier_query + + # Ordered result + supplier_query = from s in Supplier, select: s.city + union_all_query = from c in Customer, select: c.city, union_all: ^supplier_query + from s in subquery(union_all_query), order_by: s.city + + ## Expressions examples + + # Unordered result + supplier_query = Supplier |> select([s], s.city) + Customer |> select([c], c.city) |> union_all(^supplier_query) + + # Ordered result + customer_query = Customer |> select([c], c.city) |> order_by(fragment("city")) + supplier_query = Supplier |> select([s], s.city) + union_all(customer_query, ^supplier_query) + """ + defmacro union_all(query, other_query) do + Builder.Combination.build(:union_all, query, other_query, __CALLER__) + end + + @doc """ + An except (set difference) query expression. + + Takes the difference of the result sets of multiple queries. The + `select` of each query must be exactly the same, with the same + types in the same order. + + Except expression returns only unique rows as if each query returned + distinct results. This may cause a performance penalty. If you need + to take the difference of multiple result sets without + removing duplicate rows consider using `except_all/2`. + + ## Combination behaviour + + There are several behaviours of combination queries that must be taken + into account, otherwise you may unexpectedly return the wrong query result. + + ### Order by, limit and offset + + The `order_by`, `limit` and `offset` expressions of the parent query apply + to the result of the entire combination. `order_by` must be specified in one + of the following ways, since the combination of two or more queries is not + automatically aliased: + + - Use `Ecto.Query.API.fragment/1` to pass an `order_by` statement + that directly access the combination fields. + - Wrap the combination in a subquery and refer to the binding of the subquery. + + ### Column selection ordering + + The columns of each of the queries in the combination must be specified in + the exact same order. Otherwise, you may see the values of one column appearing + in another. This holds for all types of select expressions, including maps. + + For example, the following query will interchange the values of the supplier's + name and city because that is the order the fields are specified in the customer + query. + + supplier_query = from s in Supplier, select: %{city: s.city, name: s.name} + customer_query = from c in Customer, select: %{name: c.name, city: c.city} + except(supplier_query, ^customer_query) + + ### Selecting literal atoms + + When selecting a literal atom, its value must be the same across all queries. + Otherwise, the value from the parent query will be applied to all other queries. + This also holds true for selecting maps with atom keys. + + ## Keywords examples + + # Unordered result + supplier_query = from s in Supplier, select: s.city + from c in Customer, select: c.city, except: ^supplier_query + + # Ordered result + supplier_query = from s in Supplier, select: s.city + except_query = from c in Customer, select: c.city, except: ^supplier_query + from s in subquery(except_query), order_by: s.city + + ## Expressions examples + + # Unordered result + supplier_query = Supplier |> select([s], s.city) + Customer |> select([c], c.city) |> except(^supplier_query) + + # Ordered result + customer_query = Customer |> select([c], c.city) |> order_by(fragment("city")) + supplier_query = Supplier |> select([s], s.city) + except(customer_query, ^supplier_query) + """ + defmacro except(query, other_query) do + Builder.Combination.build(:except, query, other_query, __CALLER__) + end + + @doc """ + An except (set difference) query expression. + + Takes the difference of the result sets of multiple queries. The + `select` of each query must be exactly the same, with the same + types in the same order. + + ## Combination behaviour + + There are several behaviours of combination queries that must be taken + into account, otherwise you may unexpectedly return the wrong query result. + + ### Order by, limit and offset + + The `order_by`, `limit` and `offset` expressions of the parent query apply + to the result of the entire combination. `order_by` must be specified in one + of the following ways, since the combination of two or more queries is not + automatically aliased: + + - Use `Ecto.Query.API.fragment/1` to pass an `order_by` statement + that directly access the combination fields. + - Wrap the combination in a subquery and refer to the binding of the subquery. + + ### Column selection ordering + + The columns of each of the queries in the combination must be specified in + the exact same order. Otherwise, you may see the values of one column appearing + in another. This holds for all types of select expressions, including maps. + + For example, the following query will interchange the values of the supplier's + name and city because that is the order the fields are specified in the customer + query. + + supplier_query = from s in Supplier, select: %{city: s.city, name: s.name} + customer_query = from c in Customer, select: %{name: c.name, city: c.city} + except_all(supplier_query, ^customer_query) + + ### Selecting literal atoms + + When selecting a literal atom, its value must be the same across all queries. + Otherwise, the value from the parent query will be applied to all other queries. + This also holds true for selecting maps with atom keys. + + ## Keywords examples + + # Unordered result + supplier_query = from s in Supplier, select: s.city + from c in Customer, select: c.city, except_all: ^supplier_query + + # Ordered result + supplier_query = from s in Supplier, select: s.city + except_all_query = from c in Customer, select: c.city, except_all: ^supplier_query + from s in subquery(except_all_query), order_by: s.city + + ## Expressions examples + + # Unordered result + supplier_query = Supplier |> select([s], s.city) + Customer |> select([c], c.city) |> except_all(^supplier_query) + + # Ordered result + customer_query = Customer |> select([c], c.city) |> order_by(fragment("city")) + supplier_query = Supplier |> select([s], s.city) + except_all(customer_query, ^supplier_query) + """ + defmacro except_all(query, other_query) do + Builder.Combination.build(:except_all, query, other_query, __CALLER__) + end + + @doc """ + An intersect query expression. + + Takes the overlap of the result sets of multiple queries. The + `select` of each query must be exactly the same, with the same + types in the same order. + + Intersect expression returns only unique rows as if each query returned + distinct results. This may cause a performance penalty. If you need + to take the intersection of multiple result sets without + removing duplicate rows consider using `intersect_all/2`. + + ## Combination behaviour + + There are several behaviours of combination queries that must be taken + into account, otherwise you may unexpectedly return the wrong query result. + + ### Order by, limit and offset + + The `order_by`, `limit` and `offset` expressions of the parent query apply + to the result of the entire combination. `order_by` must be specified in one + of the following ways, since the combination of two or more queries is not + automatically aliased: + + - Use `Ecto.Query.API.fragment/1` to pass an `order_by` statement + that directly access the combination fields. + - Wrap the combination in a subquery and refer to the binding of the subquery. + + ### Column selection ordering + + The columns of each of the queries in the combination must be specified in + the exact same order. Otherwise, you may see the values of one column appearing + in another. This holds for all types of select expressions, including maps. + + For example, the following query will interchange the values of the supplier's + name and city because that is the order the fields are specified in the customer + query. + + supplier_query = from s in Supplier, select: %{city: s.city, name: s.name} + customer_query = from c in Customer, select: %{name: c.name, city: c.city} + intersect(supplier_query, ^customer_query) + + ### Selecting literal atoms + + When selecting a literal atom, its value must be the same across all queries. + Otherwise, the value from the parent query will be applied to all other queries. + This also holds true for selecting maps with atom keys. + + ## Keywords examples + + # Unordered result + supplier_query = from s in Supplier, select: s.city + from c in Customer, select: c.city, intersect: ^supplier_query + + # Ordered result + supplier_query = from s in Supplier, select: s.city + intersect_query = from c in Customer, select: c.city, intersect: ^supplier_query + from s in subquery(intersect_query), order_by: s.city + + ## Expressions examples + + # Unordered result + supplier_query = Supplier |> select([s], s.city) + Customer |> select([c], c.city) |> intersect(^supplier_query) + + # Ordered result + customer_query = Customer |> select([c], c.city) |> order_by(fragment("city")) + supplier_query = Supplier |> select([s], s.city) + intersect(customer_query, ^supplier_query) + """ + defmacro intersect(query, other_query) do + Builder.Combination.build(:intersect, query, other_query, __CALLER__) + end + + @doc """ + An intersect query expression. + + Takes the overlap of the result sets of multiple queries. The + `select` of each query must be exactly the same, with the same + types in the same order. + + ## Combination behaviour + + There are several behaviours of combination queries that must be taken + into account, otherwise you may unexpectedly return the wrong query result. + + ### Order by, limit and offset + + The `order_by`, `limit` and `offset` expressions of the parent query apply + to the result of the entire combination. `order_by` must be specified in one + of the following ways, since the combination of two or more queries is not + automatically aliased: + + - Use `Ecto.Query.API.fragment/1` to pass an `order_by` statement + that directly access the combination fields. + - Wrap the combination in a subquery and refer to the binding of the subquery. + + ### Column selection ordering + + The columns of each of the queries in the combination must be specified in + the exact same order. Otherwise, you may see the values of one column appearing + in another. This holds for all types of select expressions, including maps. + + For example, the following query will interchange the values of the supplier's + name and city because that is the order the fields are specified in the customer + query. + + supplier_query = from s in Supplier, select: %{city: s.city, name: s.name} + customer_query = from c in Customer, select: %{name: c.name, city: c.city} + intersect_all(supplier_query, ^customer_query) + + ### Selecting literal atoms + + When selecting a literal atom, its value must be the same across all queries. + Otherwise, the value from the parent query will be applied to all other queries. + This also holds true for selecting maps with atom keys. + + ## Keywords examples + + # Unordered result + supplier_query = from s in Supplier, select: s.city + from c in Customer, select: c.city, intersect_all: ^supplier_query + + # Ordered result + supplier_query = from s in Supplier, select: s.city + intersect_all_query = from c in Customer, select: c.city, intersect_all: ^supplier_query + from s in subquery(intersect_all_query), order_by: s.city + + ## Expressions examples + + # Unordered result + supplier_query = Supplier |> select([s], s.city) + Customer |> select([c], c.city) |> intersect_all(^supplier_query) + + # Ordered result + customer_query = Customer |> select([c], c.city) |> order_by(fragment("city")) + supplier_query = Supplier |> select([s], s.city) + intersect_all(customer_query, ^supplier_query) + """ + defmacro intersect_all(query, other_query) do + Builder.Combination.build(:intersect_all, query, other_query, __CALLER__) + end + + @doc """ + A limit query expression. + + Limits the number of rows returned from the result. Can be any expression but + has to evaluate to an integer value and it can't include any field. + + If `limit` is given twice, it overrides the previous value. + + ## Keywords example + + from(u in User, where: u.id == ^current_user, limit: 1) + + ## Expressions example + + User |> where([u], u.id == ^current_user) |> limit(1) + + """ + defmacro limit(query, binding \\ [], expr) do + Builder.LimitOffset.build(:limit, query, binding, expr, __CALLER__) + end + + @doc """ + Enables or disables ties for limit expressions. + + If there are multiple records tied for the last position in an ordered + limit result, setting this value to `true` will return all of the tied + records, even if the final result exceeds the specified limit. + + Must be a boolean or evaluate to a boolean at runtime. Can only be applied + to queries with a `limit` expression or an error is raised. If `limit` + is redefined then `with_ties` must be reapplied. + + Not all databases support this option and the ones that do might list it + under the `FETCH` command. Databases may require a corresponding `order_by` + statement to evaluate ties. + + ## Keywords example + + from(p in Post, where: p.author_id == ^current_user, order_by: [desc: p.visits], limit: 10, with_ties: true) + + ## Expressions example + + Post |> where([p], p.author_id == ^current_user) |> order_by([p], desc: p.visits) |> limit(10) |> with_ties(true) + + """ + defmacro with_ties(query, binding \\ [], expr) do + Builder.LimitOffset.build(:with_ties, query, binding, expr, __CALLER__) + end + + @doc """ + An offset query expression. + + Offsets the number of rows selected from the result. Can be any expression + but it must evaluate to an integer value and it can't include any field. + + If `offset` is given twice, it overrides the previous value. + + ## Keywords example + + # Get all posts on page 4 + from(p in Post, limit: 10, offset: 30) + + ## Expressions example + + Post |> limit(10) |> offset(30) + + """ + defmacro offset(query, binding \\ [], expr) do + Builder.LimitOffset.build(:offset, query, binding, expr, __CALLER__) + end + + @doc ~S""" + A lock query expression. + + Provides support for row-level pessimistic locking using + `SELECT ... FOR UPDATE` or other, database-specific, locking clauses. + `expr` can be any expression but has to evaluate to a boolean value or to a + string and it can't include any fields. + + If `lock` is used more than once, the last one used takes precedence. + + Ecto also supports [optimistic + locking](https://en.wikipedia.org/wiki/Optimistic_concurrency_control) but not + through queries. For more information on optimistic locking, have a look at + the `Ecto.Changeset.optimistic_lock/3` function. + + ## Keywords example + + from(u in User, where: u.id == ^current_user, lock: "FOR SHARE NOWAIT") + + ## Expressions example + + User |> where([u], u.id == ^current_user) |> lock("FOR SHARE NOWAIT") + + """ + defmacro lock(query, binding \\ [], expr) do + Builder.Lock.build(query, binding, expr, __CALLER__) + end + + @doc ~S""" + An update query expression. + + Updates are used to update the filtered entries. In order for + updates to be applied, `c:Ecto.Repo.update_all/3` must be invoked. + + ## Keywords example + + from(u in User, update: [set: [name: "new name"]]) + + ## Expressions examples + + User |> update([u], set: [name: "new name"]) + User |> update(set: [name: "new name"]) + + ## Interpolation + + new_name = "new name" + from(u in User, update: [set: [name: ^new_name]]) + + new_name = "new name" + from(u in User, update: [set: [name: fragment("upper(?)", ^new_name)]]) + + ## Operators + + The update expression in Ecto supports the following operators: + + * `set` - sets the given field in the table to the given value + + from(u in User, update: [set: [name: "new name"]]) + + * `inc` - increments (or decrements if the value is negative) the given field in the table by the given value + + from(u in User, update: [inc: [accesses: 1]]) + + * `push` - pushes (appends) the given value to the end of the array field + + from(u in User, update: [push: [tags: "cool"]]) + + * `pull` - pulls (removes) the given value from the array field + + from(u in User, update: [pull: [tags: "not cool"]]) + + ## Composable + + Remember that all query expressions are composable, so you can use `update` + multiple times in the same query to merge the update expressions: + + new_name = "new name" + User + |> update([u], set: [name: fragment("upper(?)", ^new_name)]) + |> update([u], set: [age: 42]) + + This can be useful to compose updates from different functions + or when mixing interpolation, such as `set: ^updates`, with regular + query expressions, such as `set: [age: u.age + 1]`. + """ + defmacro update(query, binding \\ [], expr) do + Builder.Update.build(query, binding, expr, __CALLER__) + end + + @doc """ + A group by query expression. + + Groups together rows from the schema that have the same values in the given + fields. Using `group_by` "groups" the query giving it different semantics + in the `select` expression. If a query is grouped, only fields that were + referenced in the `group_by` can be used in the `select` or if the field + is given as an argument to an aggregate function. + + `group_by` also accepts a list of atoms where each atom refers to + a field in source. For more complicated queries you can access fields + directly instead of atoms. + + ## Keywords examples + + # Returns the number of posts in each category + from(p in Post, + group_by: p.category, + select: {p.category, count(p.id)}) + + # Using atoms + from(p in Post, group_by: :category, select: {p.category, count(p.id)}) + + # Using direct fields access + from(p in Post, + join: c in assoc(p, :category), + group_by: [p.id, c.name]) + + ## Expressions example + + Post |> group_by([p], p.category) |> select([p], count(p.id)) + + """ + defmacro group_by(query, binding \\ [], expr) do + Builder.GroupBy.build(query, binding, expr, __CALLER__) + end + + @doc """ + An AND having query expression. + + Like `where`, `having` filters rows from the schema, but after the grouping is + performed giving it the same semantics as `select` for a grouped query + (see `group_by/3`). `having` groups the query even if the query has no + `group_by` expression. + + ## Keywords example + + # Returns the number of posts in each category where the + # average number of comments is above ten + from(p in Post, + group_by: p.category, + having: avg(p.num_comments) > 10, + select: {p.category, count(p.id)}) + + ## Expressions example + + Post + |> group_by([p], p.category) + |> having([p], avg(p.num_comments) > 10) + |> select([p], count(p.id)) + """ + defmacro having(query, binding \\ [], expr) do + Builder.Filter.build(:having, :and, query, binding, expr, __CALLER__) + end + + @doc """ + An OR having query expression. + + Like `having` but combines with the previous expression by using + `OR`. `or_having` behaves for `having` the same way `or_where` + behaves for `where`. + + ## Keywords example + + # Augment a previous group_by with a having condition. + from(p in query, or_having: avg(p.num_comments) > 10) + + ## Expressions example + + # Augment a previous group_by with a having condition. + Post |> or_having([p], avg(p.num_comments) > 10) + + """ + defmacro or_having(query, binding \\ [], expr) do + Builder.Filter.build(:having, :or, query, binding, expr, __CALLER__) + end + + @doc """ + Preloads the associations into the result set. + + Imagine you have a schema `Post` with a `has_many :comments` + association and you execute the following query: + + Repo.all from p in Post, preload: [:comments] + + The example above will fetch all posts from the database and then do + a separate query returning all comments associated with the given posts. + The comments are then processed and associated to each returned `post` + under the `comments` field. + + Often times, you may want posts and comments to be selected and + filtered in the same query. For such cases, you can explicitly tell + an existing join to be preloaded into the result set: + + Repo.all from p in Post, + join: c in assoc(p, :comments), + where: c.published_at > p.updated_at, + preload: [comments: c] + + In the example above, instead of issuing a separate query to fetch + comments, Ecto will fetch posts and comments in a single query and + then do a separate pass associating each comment to its parent post. + Therefore, instead of returning `number_of_posts * number_of_comments` + results, like a `join` would, it returns only posts with the `comments` + fields properly filled in. + + Nested associations can also be preloaded in both formats: + + Repo.all from p in Post, + preload: [:author, comments: :likes] + + Repo.all from p in Post, + join: c in assoc(p, :comments), + join: l in assoc(c, :likes), + where: l.inserted_at > c.updated_at, + preload: [:author, comments: {c, likes: l}] + + ## Choosing between preloading with joins vs. separate queries + + Deciding between preloading associations via joins, a single large + query, (`preload: [comments: c]`) or separate smaller queries + (`preload: [:comments]`) depends on the specific use case. + Here are some factors to guide your decision: + + * **Joins reduce database round trips:** By fetching data in a single + query, joins can minimize database round trips, potentially reducing + overall latency. + * **Potential for data duplication:** Joins may lead to duplicated + data in the result set, which requires more processing by Ecto + and consumes more bandwidth when transmitting the results. + * **Parallelism with separate queries:** When using separate queries + outside of a transaction, Ecto can parallelize the preload queries, + which can speed up the overall operation. + + In general, a good default is to only use joins in preloads if you're + already joining the associations in the main query. For example, + in the last query in the section above, comments and likes are already + joined, so they are included in the preload. + However, the author is not joined in the main query, so it is preloaded + via a separate query. + + ## Preload queries + + Preload also allows queries to be given, allowing you to filter or + customize how the preloads are fetched: + + comments_query = from c in Comment, order_by: c.published_at + Repo.all from p in Post, preload: [comments: ^comments_query] + + The example above will issue two queries, one for loading posts and + then another for loading the comments associated with the posts. + Comments will be ordered by `published_at`. + + When specifying a preload query, you can still nest preloads. + For instance, you could preload an author's published posts and + their comments as follows: + + posts_query = from p in Post, where: p.state == :published + Repo.all from a in Author, preload: [posts: ^{posts_query, [:comments]}] + + If you prefer, you can also add additional preloads directly in the + `posts_query`: + + posts_query = + from p in Post, where: p.state == :published, preload: :related_posts + + Note: keep in mind operations like limit and offset in the preload + query will affect the whole result set and not each association. For + example, the query below: + + comments_query = from c in Comment, order_by: c.popularity, limit: 5 + Repo.all from p in Post, preload: [comments: ^comments_query] + + won't bring the top of comments per post. Rather, it will only bring + the 5 top comments across all posts. Instead, you must use a window: + + ranking_query = + from c in Comment, + select: %{id: c.id, row_number: over(row_number(), :posts_partition)}, + windows: [posts_partition: [partition_by: :post_id, order_by: :popularity]] + + comments_query = + from c in Comment, + join: r in subquery(ranking_query), + on: c.id == r.id and r.row_number <= 5 + + Repo.all from p in Post, preload: [comments: ^comments_query] + + For `:through` associations, such as a post may have many comments_authors, + written as `has_many :comments_authors, through: [:comments, :author]` + the query given to preload customizes the relationship between comments and + authors, even if preloaded through posts. Another way to put it, in case of + `:through` associations, the query given to preload customizes the last join + of the association chain. This means `order_by` clauses on `:through` + associations affect only the direct relationship between `comments` and + `authors`, not between posts and comments. + + ## Preload functions + + Preload also allows functions to be given. If the function has an arity of 1, + it receives only the IDs of the parent association. If it has an arity of 2, it + receives the IDS of the parent association as the first argument and the association + metadata as the second argument. Both functions must return the associated data. + Ecto then will map this data and sort it by the relationship key: + + comment_preloader = fn post_ids -> fetch_comments_by_post_ids(post_ids) end + Repo.all from p in Post, preload: [comments: ^comment_preloader] + + This is useful when the whole dataset was already loaded or must be + explicitly fetched from elsewhere. The IDs received by the preloading + function and the result returned depends on the association type: + + * For `has_many` and `belongs_to` - the function receives the IDs of + the parent association and it must return a list of maps or structs + with the associated entries. The associated map/struct must contain + the "foreign_key" field. For example, if a post has many comments, + when preloading the comments with a custom function, the function + will receive a list of "post_ids" as the argument and it must return + maps or structs representing the comments. The maps/structs must + include the `:post_id` field + + * For `has_many :through` - it behaves similarly to a regular `has_many` + but note that the IDs received are of the last association. Imagine, + for example, a post has many comments and each comment has an author. + Therefore, a post may have many comments_authors, written as + `has_many :comments_authors, through: [:comments, :author]`. When + preloading authors with a custom function via `:comments_authors`, + the function will receive the IDs of the authors as the last step + + * For `many_to_many` - the function receives the IDs of the parent + association and it must return a tuple with the parent id as the first + element and the association map or struct as the second. For example, + if a post has many tags, when preloading the tags with a custom + function, the function will receive a list of "post_ids" as the argument + and it must return a tuple in the format of `{post_id, tag}` + + The 2-arity version of the function is especially useful if you would like to + build a general preloader that works across all associations. For example, if + you would like to build a preloader for lateral joins that finds the newest + associations you may do the following: + + lateral_preloader = fn ids, assoc -> newest_records(ids, assoc, 5) end + + def newest_records(parent_ids, assoc, n) do + %{related_key: related_key, queryable: queryable} = assoc + + squery = + from q in queryable, + where: field(q, ^related_key) == parent_as(:parent_ids).id, + order_by: {:desc, :created_at}, + limit: ^n + + query = + from f in fragment("SELECT id from UNNEST(?::int[]) AS id", ^parent_ids), as: :parent_ids, + inner_lateral_join: s in subquery(squery), on: true, + select: s + + Repo.all(query) + end + + For the list of available metadata, see the module documentation of the association types. + For example, see `Ecto.Association.BelongsTo`. + + ## Dynamic preloads + + Preloads can also be specified dynamically using the [`dynamic`](`dynamic/2`) macro: + + preloads = [comments: dynamic([comments: c], c)] + + Repo.all from p in Post, + join: c in assoc(p, :comments), + as: :comments, + where: c.published_at > p.updated_at, + preload: ^preloads + + See `dynamic/2` for more information. + + ## Keywords example + + # Returns all posts, their associated comments, and the associated + # likes for those comments. + from(p in Post, + preload: [comments: :likes], + select: p + ) + + ## Expressions examples + + Post |> preload(:comments) |> select([p], p) + + Post + |> join(:left, [p], c in assoc(p, :comments)) + |> preload([p, c], [:user, comments: c]) + |> select([p], p) + + """ + defmacro preload(query, bindings \\ [], expr) do + Builder.Preload.build(query, bindings, expr, __CALLER__) + end + + @doc """ + Restricts the query to return the first result ordered by primary key. + + The query will be automatically ordered by the primary key + unless `order_by` is given or `order_by` is set in the query. + Limit is always set to 1. + + ## Examples + + Post |> first |> Repo.one + query |> first(:inserted_at) |> Repo.one + """ + def first(queryable, order_by \\ nil) + + def first(%Ecto.Query{} = query, nil) do + query = %{query | limit: limit()} + + case query do + %{order_bys: []} -> + %{query | order_bys: [order_by_pk(query, :asc)]} + + %{} -> + query + end + end + + def first(queryable, nil), do: first(Ecto.Queryable.to_query(queryable), nil) + def first(queryable, key), do: first(order_by(queryable, ^key), nil) + + @doc """ + Restricts the query to return the last result ordered by primary key. + + The query ordering will be automatically reversed, with ASC + columns becoming DESC columns (and vice-versa) and limit is set + to 1. If there is no ordering, the query will be automatically + ordered decreasingly by primary key. + + ## Examples + + Post |> last |> Repo.one + query |> last(:inserted_at) |> Repo.one + """ + def last(queryable, order_by \\ nil) + def last(queryable, nil), do: %{reverse_order(queryable) | limit: limit()} + def last(queryable, key), do: last(order_by(queryable, ^key), nil) + + defp limit do + %LimitExpr{expr: 1, params: [], file: __ENV__.file, line: __ENV__.line} + end + + defp field(ix, field) when is_integer(ix) and is_atom(field) do + {{:., [], [{:&, [], [ix]}, field]}, [], []} + end + + defp order_by_pk(query, dir) do + schema = assert_schema!(query) + pks = schema.__schema__(:primary_key) + expr = for pk <- pks, do: {dir, field(0, pk)} + %ByExpr{expr: expr, file: __ENV__.file, line: __ENV__.line} + end + + defp assert_schema!(%{from: %Ecto.Query.FromExpr{source: {_source, schema}}}) + when schema != nil, + do: schema + + defp assert_schema!(query) do + raise Ecto.QueryError, query: query, message: "expected a from expression with a schema" + end + + @doc """ + Returns `true` if the query has a binding with the given name, otherwise `false`. + + For more information on named bindings see ["Named bindings"](#module-named-bindings) + in this module doc. + """ + def has_named_binding?(%Ecto.Query{aliases: aliases}, key) do + Map.has_key?(aliases, key) + end + + def has_named_binding?(queryable, _key) + when is_atom(queryable) or is_binary(queryable) or is_tuple(queryable) do + false + end + + def has_named_binding?(queryable, key) do + has_named_binding?(Ecto.Queryable.to_query(queryable), key) + end + + @doc """ + Applies a callback function to a query if it doesn't contain the given named binding. + Otherwise, returns the original query. + + The callback function must accept a queryable and return an `Ecto.Query` struct + that contains the provided named binding, otherwise an error is raised. It can also + accept second argument which is the atom representing the name of a binding. + + For example, one might use this function as a convenience to conditionally add a new + named join to a query: + + if has_named_binding?(query, :comments) do + query + else + join(query, :left, [p], c in assoc(p, :comments), as: :comments) + end + + With this function it can be simplified to: + + with_named_binding(query, :comments, fn query, binding -> + join(query, :left, [p], a in assoc(p, ^binding), as: ^binding) + end) + + For more information on named bindings see ["Named bindings"](#module-named-bindings) + in this module doc or `has_named_binding?/2`. + """ + def with_named_binding(%Ecto.Query{} = query, key, fun) do + if has_named_binding?(query, key) do + query + else + query + |> apply_binding_callback(fun, key) + |> raise_on_invalid_callback_return(key) + end + end + + def with_named_binding(queryable, key, fun) do + queryable + |> Ecto.Queryable.to_query() + |> with_named_binding(key, fun) + end + + defp apply_binding_callback(query, fun, _key) when is_function(fun, 1), do: query |> fun.() + defp apply_binding_callback(query, fun, key) when is_function(fun, 2), do: query |> fun.(key) + + defp apply_binding_callback(_query, fun, _key) do + raise ArgumentError, + "callback function for with_named_binding/3 should accept one or two arguments, got: #{inspect(fun)}" + end + + defp raise_on_invalid_callback_return(%Ecto.Query{} = query, key) do + if has_named_binding?(query, key) do + query + else + raise RuntimeError, + "callback function for with_named_binding/3 should create a named binding for key #{inspect(key)}" + end + end + + defp raise_on_invalid_callback_return(other, _key) do + raise RuntimeError, + "callback function for with_named_binding/3 should return an Ecto.Query struct, got: #{inspect(other)}" + end + + @doc """ + The same as `has_named_binding?/2` but allowed in guards. + """ + @doc guard: true + defguard is_named_binding(query, name) + when is_struct(query, Ecto.Query) and is_map_key(query.aliases, name) + + @doc """ + Reverses the ordering of the query. + + ASC columns become DESC columns (and vice-versa). If the query + has no `order_by`s, it orders by the inverse of the primary key. + + ## Examples + + query |> reverse_order() |> Repo.one() + Post |> order_by(asc: :id) |> reverse_order() == Post |> order_by(desc: :id) + """ + def reverse_order(%Ecto.Query{} = query) do + update_in(query.order_bys, fn + [] -> [order_by_pk(query, :desc)] + order_bys -> Enum.map(order_bys, &reverse_order_by/1) + end) + end + + def reverse_order(queryable) do + reverse_order(Ecto.Queryable.to_query(queryable)) + end + + defp reverse_order_by(%{expr: expr} = order_by) do + %{ + order_by + | expr: + Enum.map(expr, fn + {:desc, ast} -> {:asc, ast} + {:desc_nulls_last, ast} -> {:asc_nulls_first, ast} + {:desc_nulls_first, ast} -> {:asc_nulls_last, ast} + {:asc, ast} -> {:desc, ast} + {:asc_nulls_last, ast} -> {:desc_nulls_first, ast} + {:asc_nulls_first, ast} -> {:desc_nulls_last, ast} + end) + } + end +end diff --git a/deps/ecto/lib/ecto/query/api.ex b/deps/ecto/lib/ecto/query/api.ex new file mode 100644 index 0000000..3dfb349 --- /dev/null +++ b/deps/ecto/lib/ecto/query/api.ex @@ -0,0 +1,974 @@ +defmodule Ecto.Query.API do + @moduledoc """ + Lists all functions allowed in the query API. + + * Comparison operators: `==`, `!=`, `<=`, `>=`, `<`, `>` + * Arithmetic operators: `+`, `-`, `*`, `/` + * Boolean operators: `and`, `or`, `not` + * Inclusion operator: `in/2` + * Subquery operators: `any`, `all` and `exists` + * Search functions: `like/2` and `ilike/2` + * Null check functions: `is_nil/1` + * Aggregates: `count/0`, `count/1`, `avg/1`, `sum/1`, `min/1`, `max/1` + * Date/time intervals: `datetime_add/3`, `date_add/3`, `from_now/2`, `ago/2` + * Inside select: `struct/2`, `map/2`, `merge/2`, `selected_as/2` and literals (map, tuples, lists, etc) + * General: `fragment/1`, `field/2`, `type/2`, `as/1`, `parent_as/1`, `selected_as/1` + + Note the functions in this module exist for documentation + purposes and one should never need to invoke them directly. + Furthermore, it is possible to define your own macros and + use them in Ecto queries (see docs for `fragment/1`). + + ## Intervals + + Ecto supports following values for `interval` option: `"year"`, `"month"`, + `"week"`, `"day"`, `"hour"`, `"minute"`, `"second"`, `"millisecond"`, and + `"microsecond"`. + + `Date`/`Time` functions like `datetime_add/3`, `date_add/3`, `from_now/2`, + `ago/2` take `interval` as an argument. + + ## Window API + + Ecto also supports many of the windows functions found + in SQL databases. See `Ecto.Query.WindowAPI` for more + information. + + ## About the arithmetic operators + + The Ecto implementation of these operators provide only + a thin layer above the adapters. So if your adapter allows you + to use them in a certain way (like adding a date and an + interval in PostgreSQL), it should work just fine in Ecto + queries. + """ + + @dialyzer :no_return + + @doc """ + Binary `==` operation. + """ + def left == right, do: doc!([left, right]) + + @doc """ + Binary `!=` operation. + """ + def left != right, do: doc!([left, right]) + + @doc """ + Binary `<=` operation. + """ + def left <= right, do: doc!([left, right]) + + @doc """ + Binary `>=` operation. + """ + def left >= right, do: doc!([left, right]) + + @doc """ + Binary `<` operation. + """ + def left < right, do: doc!([left, right]) + + @doc """ + Binary `>` operation. + """ + def left > right, do: doc!([left, right]) + + @doc """ + Binary `+` operation. + """ + def left + right, do: doc!([left, right]) + + @doc """ + Binary `-` operation. + """ + def left - right, do: doc!([left, right]) + + @doc """ + Binary `*` operation. + """ + def left * right, do: doc!([left, right]) + + @doc """ + Binary `/` operation. + """ + def left / right, do: doc!([left, right]) + + @doc """ + Binary `and` operation. + """ + def left and right, do: doc!([left, right]) + + @doc """ + Binary `or` operation. + """ + def left or right, do: doc!([left, right]) + + @doc """ + Unary `not` operation. + + It is used to negate values in `:where`. It is also used to match + the assert the opposite of `in/2`, `is_nil/1`, and `exists/1`. + For example: + + from p in Post, where: p.id not in [1, 2, 3] + + from p in Post, where: not is_nil(p.title) + + # Retrieve all the posts that doesn't have comments. + from p in Post, + as: :post, + where: + not exists( + from( + c in Comment, + where: parent_as(:post).id == c.post_id + ) + ) + + """ + def not value, do: doc!([value]) + + @doc """ + Checks if the left-value is included in the right one. + + from p in Post, where: p.id in [1, 2, 3] + + The right side may either be a literal list, an interpolated list, + any struct that implements the `Enumerable` protocol, or even a + column in the database with array type: + + from p in Post, where: "elixir" in p.tags + + Additionally, the right side may also be a subquery, which should return + a single column: + + from c in Comment, where: c.post_id in subquery( + from(p in Post, where: p.created_at > ^since, select: p.id) + ) + """ + def left in right, do: doc!([left, right]) + + @doc """ + Evaluates to true if the provided subquery returns 1 or more rows. + + from p in Post, + as: :post, + where: + exists( + from( + c in Comment, + where: parent_as(:post).id == c.post_id and c.replies_count > 5, + select: 1 + ) + ) + + This is best used in conjunction with `parent_as/1` to correlate the subquery + with the parent query to test some condition on related rows in a different table. + In the above example the query returns posts which have at least one comment that + has more than 5 replies. + """ + def exists(subquery), do: doc!([subquery]) + + @doc """ + Tests whether one or more values returned from the provided subquery match in a comparison operation. + + from p in Product, where: p.id == any( + from(li in LineItem, select: [li.product_id], where: li.created_at > ^since and li.qty >= 10) + ) + + A product matches in the above example if a line item was created since the provided date where the customer purchased + at least 10 units. + + Both `any` and `all` must be given a subquery as an argument, and they must be used on the right hand side of a comparison. + Both can be used with every comparison operator: `==`, `!=`, `>`, `>=`, `<`, `<=`. + """ + def any(subquery), do: doc!([subquery]) + + @doc """ + Evaluates whether all values returned from the provided subquery match in a comparison operation. + + from p in Post, where: p.visits >= all( + from(p in Post, select: avg(p.visits), group_by: [p.category_id]) + ) + + For a post to match in the above example it must be visited at least as much as the average post in all categories. + + from p in Post, where: p.visits == all( + from(p in Post, select: max(p.visits)) + ) + + The above example matches all the posts which are tied for being the most visited. + + Both `any` and `all` must be given a subquery as an argument, and they must be used on the right hand side of a comparison. + Both can be used with every comparison operator: `==`, `!=`, `>`, `>=`, `<`, `<=`. + """ + def all(subquery), do: doc!([subquery]) + + @doc """ + Searches for `search` in `string`. + + from p in Post, where: like(p.body, "Chapter%") + + Translates to the underlying SQL LIKE query, therefore + its behaviour is dependent on the database. In particular, + PostgreSQL will do a case-sensitive operation, while the + majority of other databases will be case-insensitive. For + performing a case-insensitive `like` in PostgreSQL, see `ilike/2`. + + You should be very careful when allowing user sent data to be used + as part of LIKE query, since they allow to perform + [LIKE-injections](https://githubengineering.com/like-injection/). + """ + def like(string, search), do: doc!([string, search]) + + @doc """ + Searches for `search` in `string` in a case insensitive fashion. + + from p in Post, where: ilike(p.body, "Chapter%") + + Translates to the underlying SQL ILIKE query. This operation is + only available on PostgreSQL. + """ + def ilike(string, search), do: doc!([string, search]) + + @doc """ + Checks if the given value is nil. + + from p in Post, where: is_nil(p.published_at) + + To check if a given value is not nil use: + + from p in Post, where: not is_nil(p.published_at) + """ + def is_nil(value), do: doc!([value]) + + @doc """ + Counts the entries in the table. + + from p in Post, select: count() + """ + def count, do: doc!([]) + + @doc """ + Counts the given entry. + + from p in Post, select: count(p.id) + """ + def count(value), do: doc!([value]) + + @doc """ + Counts the distinct values in given entry. + + from p in Post, select: count(p.id, :distinct) + """ + def count(value, :distinct), do: doc!([value, :distinct]) + + @doc """ + Takes the first value which is not null, or null if they both are. + + In SQL, COALESCE takes any number of arguments, but in ecto + it only takes two, so it must be chained to achieve the same + effect. + + from p in Payment, select: p.value |> coalesce(p.backup_value) |> coalesce(0) + """ + def coalesce(value, expr), do: doc!([value, expr]) + + @doc """ + Applies the given expression as a FILTER clause against an + aggregate. This is currently only supported by Postgres. + + from p in Payment, select: filter(avg(p.value), p.value > 0 and p.value < 100) + + from p in Payment, select: avg(p.value) |> filter(p.value < 0) + """ + def filter(value, filter), do: doc!([value, filter]) + + @doc """ + Calculates the average for the given entry. + + from p in Payment, select: avg(p.value) + """ + def avg(value), do: doc!([value]) + + @doc """ + Calculates the sum for the given entry. + + from p in Payment, select: sum(p.value) + """ + def sum(value), do: doc!([value]) + + @doc """ + Calculates the minimum for the given entry. + + from p in Payment, select: min(p.value) + """ + def min(value), do: doc!([value]) + + @doc """ + Calculates the maximum for the given entry. + + from p in Payment, select: max(p.value) + """ + def max(value), do: doc!([value]) + + @doc """ + Adds a given interval to a datetime. + + The first argument is a `datetime`, the second one is the count + for the interval, which may be either positive or negative and + the interval value: + + # Get all items published since the last month + from p in Post, where: p.published_at > + datetime_add(^NaiveDateTime.utc_now(), -1, "month") + + In the example above, we used `datetime_add/3` to subtract one month + from the current datetime and compared it with the `p.published_at`. + If you want to perform operations on date, `date_add/3` could be used. + + See [Intervals](#module-intervals) for supported `interval` values. + """ + def datetime_add(datetime, count, interval), do: doc!([datetime, count, interval]) + + @doc """ + Adds a given interval to a date. + + See `datetime_add/3` for more information. + + See [Intervals](#module-intervals) for supported `interval` values. + """ + def date_add(date, count, interval), do: doc!([date, count, interval]) + + @doc """ + Adds the given interval to the current time in UTC. + + The current time in UTC is retrieved from Elixir and + not from the database. + + See [Intervals](#module-intervals) for supported `interval` values. + + ## Examples + + from a in Account, where: a.expires_at < from_now(3, "month") + + """ + def from_now(count, interval), do: doc!([count, interval]) + + @doc """ + Subtracts the given interval from the current time in UTC. + + The current time in UTC is retrieved from Elixir and + not from the database. + + See [Intervals](#module-intervals) for supported `interval` values. + + ## Examples + + from p in Post, where: p.published_at > ago(3, "month") + """ + def ago(count, interval), do: doc!([count, interval]) + + @doc """ + Send fragments directly to the database. + + It is not possible to represent all possible database queries using + Ecto's query syntax. When such is required, it is possible to use + fragments to send any expression to the database: + + def unpublished_by_title(title) do + from p in Post, + where: is_nil(p.published_at) and + fragment("lower(?)", p.title) == ^title + end + + Every occurrence of the `?` character will be interpreted as a place + for parameters, which must be given as additional arguments to + `fragment`. If the literal character `?` is required as part of the + fragment, it can be escaped with `\\\\?` (one escape for strings, + another for fragment). + + In the example above, we are using the lower procedure in the + database to downcase the title column. + + It is very important to keep in mind that Ecto is unable to do any + type casting when fragments are used. Therefore it may be necessary + to explicitly cast parameters via `type/2`: + + fragment("lower(?)", p.title) == type(^title, :string) + + ## Identifiers and Constants + + Sometimes you need to interpolate an identifier or a constant value into a fragment, + instead of a query parameter. The latter can happen if your database does not allow + parameterizing certain clauses. For example: + + collation = "es_ES" + fragment("? COLLATE ?", ^name, ^collation) + + limit = "10" + "posts" |> select([p], p.title) |> limit(fragment("?", ^limit)) + + The first example above won't work because `collation` needs to be quoted as an identifier. + The second example won't work on databases that do not allow passing query parameters + as part of `limit`. + + You can address this by telling Ecto to treat these values differently than a query parameter: + + fragment("? COLLATE ?", ^name, identifier(^collation)) + "posts" |> select([p], p.title) |> limit(fragment("?", ^constant(limit)) + + Ecto will make these values directly part of the query, handling quoting and escaping where necessary. + + > #### Query caching {: .warning} + > + > Because identifiers and constants are made part of the query, each different + > value will generate a separate query, with its own cache. + + ## Splicing + + Sometimes you may need to interpolate a variable number of arguments + into the same fragment. For example, when overriding Ecto's default + `where` behaviour for Postgres: + + from p in Post, where: fragment("? in (?, ?)", p.id, val1, val2) + + The example above will only work if you know the number of arguments + upfront. If it can vary, the above will not work. + + You can address this by telling Ecto to splice a list argument into + the fragment: + + from p in Post, where: fragment("? in (?)", p.id, splice(^val_list)) + + This will let Ecto know it should expand the values of the list into + separate fragment arguments. For example: + + from p in Post, where: fragment("? in (?)", p.id, splice(^[1, 2, 3])) + + would be expanded into + + from p in Post, where: fragment("? in (?,?,?)", p.id, ^1, ^2, ^3) + + ## Defining custom functions using macros and fragment + + You can add a custom Ecto query function using macros. For example + to expose SQL's coalesce function you can define this macro: + + defmodule CustomFunctions do + defmacro coalesce(left, right) do + quote do + fragment("coalesce(?, ?)", unquote(left), unquote(right)) + end + end + end + + To have coalesce/2 available, just import the module that defines it. + + import CustomFunctions + + The only downside is that it will show up as a fragment when + inspecting the Elixir query. Other than that, it should be + equivalent to a built-in Ecto query function. + + ## Keyword fragments + + In order to support databases that do not have string-based + queries, like MongoDB, fragments also allow keywords to be given: + + from p in Post, + where: fragment(title: ["$eq": ^some_value]) + + """ + def fragment(fragments), do: doc!([fragments]) + + @doc """ + Allows a dynamic identifier to be injected into a fragment: + + collation = "es_ES" + select("posts", [p], fragment("? COLLATE ?", p.title, identifier(^collation))) + + The example above will inject the value of `collation` directly + into the query instead of treating it as a query parameter. It will + generate a query such as `SELECT p0.title COLLATE "es_ES" FROM "posts" AS p0` + as opposed to `SELECT p0.title COLLATE $1 FROM "posts" AS p0`. + + Note that each different value of `collation` will emit a different query, + which will be independently prepared and cached. + """ + def identifier(binary), do: doc!([binary]) + + @doc """ + Allows a dynamic string or number to be injected into a fragment: + + limit = 10 + "posts" |> select([p], p.title) |> limit(fragment("?", constant(^limit))) + + The example above will inject the value of `limit` directly + into the query instead of treating it as a query parameter. It will + generate a query such as `SELECT p0.title FROM "posts" AS p0 LIMIT 1` + as opposed to `SELECT p0.title FROM "posts" AS p0 LIMIT $1`. + + Note that each different value of `limit` will emit a different query, + which will be independently prepared and cached. + """ + def constant(value), do: doc!([value]) + + @doc """ + Allows a list argument to be spliced into a fragment. + + from p in Post, where: fragment("? in (?)", p.id, splice(^[1, 2, 3])) + + The example above will be transformed at runtime into the following: + + from p in Post, where: fragment("? in (?,?,?)", p.id, ^1, ^2, ^3) + + You may only splice runtime values. For example, this would not work because + query bindings are compile-time constructs: + + from p in Post, where: fragment("concat(?)", splice(^[p.count, " ", "count"])) + """ + def splice(list), do: doc!([list]) + + @doc """ + Creates a values list/constant table. + + A values list can be used as a source in a query, both in `Ecto.Query.from/2` + and `Ecto.Query.join/5`. + + The first argument is a list of maps representing the values of the constant table. + An error is raised if the list is empty or if every map does not have exactly the + same fields. + + The second argument is either a map of types or an Ecto schema containing all the + fields in the first argument. + + Each field must be given a type or an error is raised. Any type that can be specified in + a schema may be used. + + Queries using a values list are not cacheable by Ecto. + + ## Select with map types example + + values = [%{id: 1, text: "abc"}, %{id: 2, text: "xyz"}] + types = %{id: :integer, text: :string} + + query = + from v1 in values(values, types), + join: v2 in values(values, types), + on: v1.id == v2.id + + Repo.all(query) + + ## Select with schema types example + + values = [%{id: 1, text: "abc"}, %{id: 2, text: "xyz"}] + types = ValuesSchema + + query = + from v1 in values(values, types), + join: v2 in values(values, types), + on: v1.id == v2.id + + Repo.all(query) + + ## Delete example + values = [%{id: 1, text: "abc"}, %{id: 2, text: "xyz"}] + types = %{id: :integer, text: :string} + + query = + from p in Post, + join: v in values(values, types), + on: p.id == v.id, + where: p.counter == ^0 + + Repo.delete_all(query) + + ## Update example + values = [%{id: 1, text: "abc"}, %{id: 2, text: "xyz"}] + types = %{id: :integer, text: :string} + + query = + from p in Post, + join: v in values(values, types), + on: p.id == v.id, + update: [set: [text: v.text]] + + Repo.update_all(query, []) + """ + def values(values, types), do: doc!([values, types]) + + @doc """ + Allows a field to be dynamically accessed. + + The source name can be a binding (`p` in `from p in Post`) or a named binding + using `as/1` or `parent_as/1`. The named binding maybe a literal atom or an + interpolation. + + The field name can be given as either an atom or a string. In a schemaless + query, the two types of names behave the same. However, when referencing + a field from a schema the behaviours are different. + + Using an atom to reference a schema field will inherit all the properties from + the schema. For example, the field name will be changed to the value of `:source` + before generating the final query and its type behaviour will be dictated by the + one specified in the schema. + + Using a string to reference a schema field is equivalent to bypassing all of the + above and accessing the field directly from the source (i.e. the underlying table). + This means the name will not be changed to the value of `:source` and the type + behaviour will be dictated by the underlying driver (e.g. Postgrex or MyXQL). + + Take the following schema and query: + + defmodule Car do + use Ecto.Schema + + schema "cars" do + field :doors, source: :num_doors + field :tires, source: :num_tires + end + end + + def at_least_four(doors_or_tires) do + from c in Car, + where: field(c, ^doors_or_tires) >= 4 + end + + def at_least_four(query, doors_or_tires) do + from q in query, + where: field(as(:car), ^doors_or_tires) >= 4 + end + + def at_least_four(query, binding, doors_or_tires) do + from q in query, + where: field(as(^binding), ^doors_or_tires) >= 4 + end + + In the example above, `at_least_four(:doors)` and `at_least_four("num_doors")` + would be valid ways to return the set of cars having at least 4 doors. + + String names can be particularly useful when your application is dynamically + generating many schemaless queries at runtime and you want to avoid creating + a large number of atoms. + """ + def field(source, field), do: doc!([source, field]) + + @doc """ + Used in `select` to specify which struct fields should be returned. + + For example, if you don't need all fields to be returned + as part of a struct, you can filter it to include only certain + fields by using `struct/2`: + + from p in Post, + select: struct(p, [:title, :body]) + + `struct/2` can also be used to dynamically select fields: + + fields = [:title, :body] + from p in Post, select: struct(p, ^fields) + + As a convenience, `select` allows developers to take fields + without an explicit call to `struct/2`: + + from p in Post, select: [:title, :body] + + Or even dynamically: + + fields = [:title, :body] + from p in Post, select: ^fields + + For preloads, the selected fields may be specified from the parent: + + from(city in City, preload: :country, + select: struct(city, [:country_id, :name, country: [:id, :population]])) + + If the same source is selected multiple times with a `struct`, + the fields are merged in order to avoid fetching multiple copies + from the database. In other words, the expression below: + + from(city in City, preload: :country, + select: {struct(city, [:country_id]), struct(city, [:name])}) + + is expanded to: + + from(city in City, preload: :country, + select: {struct(city, [:country_id, :name]), struct(city, [:country_id, :name])}) + + **IMPORTANT**: When filtering fields for associations, you + MUST include the foreign keys used in the relationship, + otherwise Ecto will be unable to find associated records. + """ + def struct(source, fields), do: doc!([source, fields]) + + @doc """ + Used in `select` to specify which fields should be returned as a map. + + For example, if you don't need all fields to be returned or + neither need a struct, you can use `map/2` to achieve both: + + from p in Post, + select: map(p, [:title, :body]) + + `map/2` can also be used to dynamically select fields: + + fields = [:title, :body] + from p in Post, select: map(p, ^fields) + + If the same source is selected multiple times with a `map`, + the fields are merged in order to avoid fetching multiple copies + from the database. In other words, the expression below: + + from(city in City, preload: :country, + select: {map(city, [:country_id]), map(city, [:name])}) + + is expanded to: + + from(city in City, preload: :country, + select: {map(city, [:country_id, :name]), map(city, [:country_id, :name])}) + + For preloads, the selected fields may be specified from the parent: + + from(city in City, preload: :country, + select: map(city, [:country_id, :name, country: [:id, :population]])) + + It's also possible to select a struct from one source but only a subset of + fields from one of its associations: + + from(city in City, preload: :country, + select: %{city | country: map(country: [:id, :population])}) + + **IMPORTANT**: When filtering fields for associations, you + MUST include the foreign keys used in the relationship, + otherwise Ecto will be unable to find associated records. + """ + def map(source, fields), do: doc!([source, fields]) + + @doc """ + Merges the map on the right over the map on the left. + + If the map on the left side is a struct, Ecto will check + all of the field on the right previously exist on the left + before merging. + + from(city in City, select: merge(city, %{virtual_field: "some_value"})) + + This function is primarily used by `Ecto.Query.select_merge/3` + to merge different select clauses. + """ + def merge(left_map, right_map), do: doc!([left_map, right_map]) + + @doc """ + Returns value from the `json_field` pointed to by `path`. + + from(post in Post, select: json_extract_path(post.meta, ["author", "name"])) + + The path can be dynamic: + + path = ["author", "name"] + from(post in Post, select: json_extract_path(post.meta, ^path)) + + And the field can also be dynamic in combination with it: + + path = ["author", "name"] + from(post in Post, select: json_extract_path(field(post, :meta), ^path)) + + The query can be also rewritten as: + + from(post in Post, select: post.meta["author"]["name"]) + + Path elements can be integers to access values in JSON arrays: + + from(post in Post, select: post.meta["tags"][0]["name"]) + + Some adapters allow path elements to be references to query source fields + + from(post in Post, select: post.meta[p.title]) + from(p in Post, join: u in User, on: p.user_id == u.id, select: p.meta[u.name]) + + Any element of the path can be dynamic: + + field = "name" + from(post in Post, select: post.meta["author"][^field]) + + source_field = :source_column + from(post in Post, select: post.meta["author"][field(p, ^source_field)]) + + ## Warning: indexes on PostgreSQL + + PostgreSQL supports indexing on jsonb columns via GIN indexes. + Whenever comparing the value of a jsonb field against a string + or integer, Ecto will use the containment operator @> which + is optimized. You can even use the more efficient `jsonb_path_ops` + GIN index variant. For more information, consult PostgreSQL's docs + on [JSON indexing](https://www.postgresql.org/docs/current/datatype-json.html#JSON-INDEXING). + + ## Warning: return types + + The underlying data in the JSON column is returned without any + additional decoding. This means "null" JSON values are not the + same as SQL's "null". For example, the `Repo.all` operation below + returns an empty list because `p.meta["author"]` returns JSON's + null and therefore `is_nil` does not succeed: + + Repo.insert!(%Post{meta: %{author: nil}}) + Repo.all(from(post in Post, where: is_nil(p.meta["author"]))) + + Similarly, other types, such as datetimes, are returned as strings. + This means conditions like `post.meta["published_at"] > from_now(-1, "day")` + may return incorrect results or fail as the underlying database + tries to compare incompatible types. You can, however, use `type/2` + to force the types on the database level. + """ + def json_extract_path(json_field, path), do: doc!([json_field, path]) + + @doc """ + Casts the given value to the given type at the database level. + + Most of the times, Ecto is able to proper cast interpolated + values due to its type checking mechanism. In some situations + though, you may want to tell Ecto that a parameter has some + particular type: + + type(^title, :string) + + It is also possible to say the type must match the same of a column: + + type(^title, p.title) + + Or a parameterized type, which must be previously initialized + with `Ecto.ParameterizedType.init/2`: + + @my_enum Ecto.ParameterizedType.init(Ecto.Enum, values: [:foo, :bar, :baz]) + type(^title, ^@my_enum) + + Ecto will ensure `^title` is cast to the given type and enforce such + type at the database level. If the value is returned in a `select`, + Ecto will also enforce the proper type throughout. + + When performing arithmetic operations, `type/2` can be used to cast + all the parameters in the operation to the same type: + + from p in Post, + select: type(p.visits + ^a_float + ^a_integer, :decimal) + + Inside `select`, `type/2` can also be used to cast fragments: + + type(fragment("NOW"), :naive_datetime) + + Or to type fields from schemaless queries: + + from p in "posts", select: type(p.cost, :decimal) + + Or to type aggregation results: + + from p in Post, select: type(avg(p.cost), :integer) + from p in Post, select: type(filter(avg(p.cost), p.cost > 0), :integer) + + Or to type comparison expression results: + + from p in Post, select: type(coalesce(p.cost, 0), :integer) + + Or to type fields from a parent query using `parent_as/1`: + + child = from c in Comment, where: type(parent_as(:posts).id, :string) == c.text + from Post, as: :posts, inner_lateral_join: c in subquery(child), select: c.text + + ## `type` vs `fragment` + + `type/2` is all about Ecto types. Therefore, you can perform `type(expr, :string)` + but not `type(expr, :text)`, because `:text` is not an actual Ecto type. If you want + to perform casting exclusively at the database level, you can use fragment. For example, + in PostgreSQL, you might do `fragment("?::text", p.column)`. + """ + def type(interpolated_value, type), do: doc!([interpolated_value, type]) + + @doc """ + Refer to a named atom binding. + + See [Named Bindings](Ecto.Query.html#module-named-bindings) for more information. + """ + def as(binding), do: doc!([binding]) + + @doc """ + Refer to a named atom binding in the parent query. + + This is available only inside subqueries. + + See [Named Bindings](Ecto.Query.html#module-named-bindings) for more information. + """ + def parent_as(binding), do: doc!([binding]) + + @doc """ + Refer to an alias of a selected value. + + This can be used to refer to aliases created using `selected_as/2`. If + the alias hasn't been created using `selected_as/2`, an error will be raised. + + Each database has its own rules governing which clauses can reference these aliases. + If an error is raised mentioning an unknown column, most likely the alias is being + referenced somewhere that is not allowed. Consult the documentation for the database + to ensure the alias is being referenced correctly. + """ + def selected_as(name), do: doc!([name]) + + @doc """ + Creates an alias for the given selected value. + + When working with calculated values, an alias can be used to simplify + the query. Otherwise, the entire expression would need to be copied when + referencing it outside of select statements. + + This comes in handy when, for instance, you would like to use the calculated + value in `Ecto.Query.group_by/3` or `Ecto.Query.order_by/3`: + + from p in Post, + select: %{ + posted: selected_as(p.posted, :date), + sum_visits: p.visits |> coalesce(0) |> sum() |> selected_as(:sum_visits) + }, + group_by: selected_as(:date), + order_by: selected_as(:sum_visits) + + The name of the alias must be an atom and it can only be used in the outer most + select expression, otherwise an error is raised. Please note that the alias name + does not have to match the key when `select` returns a map, struct or keyword list. + + Using this in conjunction with `selected_as/1` is recommended to ensure only defined aliases + are referenced. + + ## Subqueries and CTEs + + Subqueries and CTEs automatically alias the selected fields, for example, one can write: + + # Subquery + s = from p in Post, select: %{visits: coalesce(p.visits, 0)} + from(s in subquery(s), select: s.visits) + + # CTE + cte_query = from p in Post, select: %{visits: coalesce(p.visits, 0)} + Post |> with_cte("cte", as: ^cte_query) |> join(:inner, [p], c in "cte") |> select([p, c], c.visits) + + However, one can also use `selected_as` to override the default naming: + + # Subquery + s = from p in Post, select: %{visits: coalesce(p.visits, 0) |> selected_as(:num_visits)} + from(s in subquery(s), select: s.num_visits) + + # CTE + cte_query = from p in Post, select: %{visits: coalesce(p.visits, 0) |> selected_as(:num_visits)} + Post |> with_cte("cte", as: ^cte_query) |> join(:inner, [p], c in "cte") |> select([p, c], c.num_visits) + + The name given to `selected_as/2` can also be referenced in `selected_as/1`, + as in regular queries. + """ + def selected_as(selected_value, name), do: doc!([selected_value, name]) + + defp doc!(_) do + raise "the functions in Ecto.Query.API should not be invoked directly, " <> + "they serve for documentation purposes only" + end +end diff --git a/deps/ecto/lib/ecto/query/builder.ex b/deps/ecto/lib/ecto/query/builder.ex new file mode 100644 index 0000000..aa815fe --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder.ex @@ -0,0 +1,1631 @@ +defmodule Ecto.Query.Builder do + @moduledoc false + + alias Ecto.Query + + @comparisons [ + is_nil: 1, + ==: 2, + !=: 2, + <: 2, + >: 2, + <=: 2, + >=: 2 + ] + + @dynamic_aggregates [ + max: 1, + min: 1, + first_value: 1, + last_value: 1, + nth_value: 2, + lag: 3, + lead: 3, + lag: 2, + lead: 2, + lag: 1, + lead: 1 + ] + + @static_aggregates [ + count: {0, :integer}, + count: {1, :integer}, + count: {2, :integer}, + avg: {1, :any}, + sum: {1, :any}, + row_number: {0, :integer}, + rank: {0, :integer}, + dense_rank: {0, :integer}, + percent_rank: {0, :any}, + cume_dist: {0, :any}, + ntile: {1, :integer} + ] + + @select_alias_dummy_value [] + + @typedoc """ + Quoted types store primitive types and types in the format + {source, quoted}. The latter are handled directly in the planner, + never forwarded to Ecto.Type. + + The Ecto.Type module concerns itself only with runtime types, + which include all primitive types and custom user types. Also + note custom user types do not show up during compilation time. + """ + @type quoted_type :: Ecto.Type.primitive() | {non_neg_integer, atom | Macro.t()} + + @typedoc """ + The accumulator during escape. + + If the subqueries field is available, subquery escaping must take place. + """ + @type acc :: %{ + optional(:subqueries) => list(Macro.t()), + optional(:take) => %{non_neg_integer => Macro.t()}, + optional(any) => any + } + + @doc """ + Smart escapes a query expression and extracts interpolated values in + a map. + + Everything that is a query expression will be escaped, interpolated + expressions (`^foo`) will be moved to a map unescaped and replaced + with `^index` in the query where index is a number indexing into the + map. + """ + @spec escape( + Macro.t(), + quoted_type | {:in, quoted_type} | {:out, quoted_type} | {:splice, quoted_type}, + {list, acc}, + Keyword.t(), + Macro.Env.t() | {Macro.Env.t(), fun} + ) :: {Macro.t(), {list, acc}} + def escape(expr, type, params_acc, vars, env) + + # var.x - where var is bound + def escape({{:., _, [callee, field]}, _, []}, _type, params_acc, vars, _env) + when is_atom(field) do + {escape_field!(callee, field, vars), params_acc} + end + + # field macro + def escape({:field, _, [callee, field]}, _type, params_acc, vars, _env) do + {escape_field!(callee, field, vars), params_acc} + end + + # param interpolation + def escape({:^, _, [arg]}, type, {params, acc}, _vars, _env) do + expr = {:{}, [], [:^, [], [length(params)]]} + params = [{arg, type} | params] + {expr, {params, acc}} + end + + # tagged types + def escape({:type, _, [{:^, _, [arg]}, type]}, _type, {params, acc}, vars, env) do + type = validate_type!(type, vars, env) + expr = {:{}, [], [:type, [], [{:{}, [], [:^, [], [length(params)]]}, type]]} + params = [{arg, type} | params] + {expr, {params, acc}} + end + + def escape( + {:type, _, [{{:., _, [{var, _, context}, field]}, _, []} = expr, type]}, + _type, + params_acc, + vars, + env + ) + when is_atom(var) and is_atom(context) and is_atom(field) do + escape_with_type(expr, type, params_acc, vars, env) + end + + def escape({:type, _, [{:coalesce, _, [_ | _]} = expr, type]}, _type, params_acc, vars, env) do + escape_with_type(expr, type, params_acc, vars, env) + end + + def escape({:type, _, [{:field, _, [_ | _]} = expr, type]}, _type, params_acc, vars, env) do + escape_with_type(expr, type, params_acc, vars, env) + end + + def escape({:type, _, [{math_op, _, [_, _]} = op_expr, type]}, _type, params_acc, vars, env) + when math_op in ~w(+ - * /)a do + escape_with_type(op_expr, type, params_acc, vars, env) + end + + def escape({:type, _, [{fun, _, args} = expr, type]}, _type, params_acc, vars, env) + when is_list(args) and fun in ~w(fragment avg count max min sum over filter)a do + escape_with_type(expr, type, params_acc, vars, env) + end + + def escape( + {:type, _, [{:json_extract_path, _, [_ | _]} = expr, type]}, + _type, + params_acc, + vars, + env + ) do + escape_with_type(expr, type, params_acc, vars, env) + end + + def escape( + {:type, _, [{{:., _, [Access, :get]}, _, _} = access_expr, type]}, + _type, + params_acc, + vars, + env + ) do + escape_with_type(access_expr, type, params_acc, vars, env) + end + + def escape( + {:type, _, [{{:., _, [{:parent_as, _, [_parent]}, _field]}, _, []} = expr, type]}, + _type, + params_acc, + vars, + env + ) do + escape_with_type(expr, type, params_acc, vars, env) + end + + def escape({:type, meta, [expr, type]}, given_type, params_acc, vars, env) do + case Macro.expand_once(expr, get_env(env)) do + ^expr -> + error!(""" + the first argument of type/2 must be one of: + + * interpolations, such as ^value + * fields, such as p.foo or field(p, :foo) + * fragments, such as fragment("foo(?)", value) + * an arithmetic expression (+, -, *, /) + * an aggregation or window expression (avg, count, min, max, sum, over, filter) + * a conditional expression (coalesce) + * access/json paths (p.column[0].field) + * parent_as/1 (parent_as(:parent).field) + + Got: #{Macro.to_string(expr)} + """) + + expanded -> + escape({:type, meta, [expanded, type]}, given_type, params_acc, vars, env) + end + end + + # fragments + def escape({:fragment, _, [query]}, _type, params_acc, vars, env) when is_list(query) do + {escaped, params_acc} = + Enum.map_reduce(query, params_acc, &escape_kw_fragment(&1, &2, vars, env)) + + {{:{}, [], [:fragment, [], [escaped]]}, params_acc} + end + + def escape({:fragment, _, [{:^, _, [var]} = _expr]}, _type, params_acc, _vars, _env) do + expr = quote do: Ecto.Query.Builder.fragment!(unquote(var)) + {{:{}, [], [:fragment, [], [expr]]}, params_acc} + end + + def escape({:fragment, _, [query | frags]}, _type, params_acc, vars, env) do + pieces = expand_and_split_fragment(query, env) + + if length(pieces) != length(frags) + 1 do + error!( + "fragment(...) expects extra arguments in the same amount of question marks in string. " <> + "It received #{length(frags)} extra argument(s) but expected #{length(pieces) - 1}" + ) + end + + {frags, params_acc} = Enum.map_reduce(frags, params_acc, &escape_fragment(&1, &2, vars, env)) + {{:{}, [], [:fragment, [], merge_fragments(pieces, frags)]}, params_acc} + end + + # subqueries + def escape({:subquery, _, [expr]}, _, {params, %{subqueries: subqueries} = acc}, _vars, _env) do + subquery = quote(do: Ecto.Query.subquery(unquote(expr))) + index = length(subqueries) + # used both in ast and in parameters, as a placeholder. + expr = {:subquery, index} + acc = %{acc | subqueries: [subquery | subqueries]} + {expr, {[expr | params], acc}} + end + + # interval + + def escape({:from_now, meta, [count, interval]}, type, params_acc, vars, env) do + utc = quote do: ^DateTime.utc_now() + escape({:datetime_add, meta, [utc, count, interval]}, type, params_acc, vars, env) + end + + def escape({:ago, meta, [count, interval]}, type, params_acc, vars, env) do + utc = quote do: ^DateTime.utc_now() + + count = + case count do + {:^, meta, [value]} -> + negate = quote do: Ecto.Query.Builder.negate!(unquote(value)) + {:^, meta, [negate]} + + value -> + {:-, [], [value]} + end + + escape({:datetime_add, meta, [utc, count, interval]}, type, params_acc, vars, env) + end + + def escape({:datetime_add, _, [datetime, count, interval]} = expr, type, params_acc, vars, env) do + assert_type!(expr, type, {:supertype, :datetime}) + {datetime, params_acc} = escape(datetime, {:supertype, :datetime}, params_acc, vars, env) + {count, interval, params_acc} = escape_interval(count, interval, params_acc, vars, env) + {{:{}, [], [:datetime_add, [], [datetime, count, interval]]}, params_acc} + end + + def escape({:date_add, _, [date, count, interval]} = expr, type, params_acc, vars, env) do + assert_type!(expr, type, :date) + {date, params_acc} = escape(date, :date, params_acc, vars, env) + {count, interval, params_acc} = escape_interval(count, interval, params_acc, vars, env) + {{:{}, [], [:date_add, [], [date, count, interval]]}, params_acc} + end + + # json + def escape({:json_extract_path, _, [field, path]}, type, params_acc, vars, env) do + validate_json_field!(field) + + path = escape_json_path(path, vars) + {field, params_acc} = escape(field, type, params_acc, vars, env) + {{:{}, [], [:json_extract_path, [], [field, path]]}, params_acc} + end + + def escape({{:., meta, [Access, :get]}, _, [left, _]} = expr, type, params_acc, vars, env) do + case left do + {{:., _, _}, _, _} -> + {expr, path} = parse_access_get(expr, []) + escape({:json_extract_path, meta, [expr, path]}, type, params_acc, vars, env) + + _ -> + error!("`#{Macro.to_string(expr)}` is not a valid query expression") + end + end + + # sigils + def escape({name, _, [_, []]} = sigil, type, params_acc, vars, _env) + when name in ~w(sigil_s sigil_S sigil_w sigil_W)a do + {literal(sigil, type, vars), params_acc} + end + + # lists + def escape(list, type, params_acc, vars, env) when is_list(list) do + if Enum.all?(list, &(is_binary(&1) or is_number(&1) or is_boolean(&1))) do + {literal(list, type, vars), params_acc} + else + fun = + case type do + {:array, inner_type} -> + &escape(&1, inner_type, &2, vars, env) + + _ -> + # In case we don't have an array nor a literal at compile-time, + # such as p.links == [^value], we don't do any casting nor validation. + # We may want to tackle this if the expression above is ever used. + &escape(&1, :any, &2, vars, env) + end + + Enum.map_reduce(list, params_acc, fun) + end + end + + # literals + def escape({:<<>>, _, args} = expr, type, params_acc, vars, _env) do + valid? = + Enum.all?(args, fn + {:"::", _, [left, _]} -> is_integer(left) or is_binary(left) + left -> is_integer(left) or is_binary(left) + end) + + unless valid? do + error!( + "`#{Macro.to_string(expr)}` is not a valid query expression. " <> + "Only literal binaries and strings are allowed, " <> + "dynamic values need to be explicitly interpolated in queries with ^" + ) + end + + {literal(expr, type, vars), params_acc} + end + + def escape({:-, _, [number]}, type, params_acc, vars, _env) when is_number(number), + do: {literal(-number, type, vars), params_acc} + + def escape(number, type, params_acc, vars, _env) when is_number(number), + do: {literal(number, type, vars), params_acc} + + def escape(binary, type, params_acc, vars, _env) when is_binary(binary), + do: {literal(binary, type, vars), params_acc} + + def escape(nil, _type, params_acc, _vars, _env), + do: {nil, params_acc} + + def escape(atom, type, params_acc, vars, _env) when is_atom(atom), + do: {literal(atom, type, vars), params_acc} + + # negate any expression + def escape({:-, meta, arg}, type, params_acc, vars, env) do + {escaped_arg, params_acc} = escape(arg, type, params_acc, vars, env) + expr = {:{}, [], [:-, meta, escaped_arg]} + {expr, params_acc} + end + + # comparison operators + def escape({comp_op, _, [left, right]} = expr, type, params_acc, vars, env) + when comp_op in ~w(== != < > <= >=)a do + assert_type!(expr, type, :boolean) + + if is_nil(left) or is_nil(right) do + error!( + "comparison with nil in `#{Macro.to_string(expr)}` is forbidden as it is unsafe. " <> + "If you want to check if a value is nil, use is_nil/1 instead" + ) + end + + ltype = quoted_type(right, vars) + rtype = quoted_type(left, vars) + + {escaped_left, params_acc} = escape(left, ltype, params_acc, vars, env) + {escaped_right, params_acc} = escape(right, rtype, params_acc, vars, env) + + {params, acc} = params_acc + + params = + params + |> wrap_nil(escaped_left, right) + |> wrap_nil(escaped_right, left) + + {{:{}, [], [comp_op, [], [escaped_left, escaped_right]]}, {params, acc}} + end + + # mathematical operators + def escape({math_op, _, [left, right]}, type, params_acc, vars, env) + when math_op in ~w(+ - * /)a do + {left, params_acc} = escape(left, type, params_acc, vars, env) + {right, params_acc} = escape(right, type, params_acc, vars, env) + + {{:{}, [], [math_op, [], [left, right]]}, params_acc} + end + + # in operator + def escape({:in, _, [left, right]} = expr, type, params_acc, vars, env) + when is_list(right) + when is_tuple(right) and elem(right, 0) in ~w(sigil_w sigil_W @)a do + assert_type!(expr, type, :boolean) + + right = Macro.expand_once(right, get_env(env)) + {:array, ltype} = quoted_type(right, vars) + rtype = {:array, quoted_type(left, vars)} + + {left, params_acc} = escape(left, ltype, params_acc, vars, env) + {right, params_acc} = escape(right, rtype, params_acc, vars, env) + {{:{}, [], [:in, [], [left, right]]}, params_acc} + end + + def escape({:in, _, [left, right]} = expr, type, params_acc, vars, env) do + assert_type!(expr, type, :boolean) + + ltype = {:out, quoted_type(right, vars)} + rtype = {:in, quoted_type(left, vars)} + + {left, params_acc} = escape(left, ltype, params_acc, vars, env) + {right, params_acc} = escape(right, rtype, params_acc, vars, env) + + # Remove any type wrapper from the right side + right = + case right do + {:{}, [], [:type, [], [right, _]]} -> right + _ -> right + end + + {{:{}, [], [:in, [], [left, right]]}, params_acc} + end + + def escape({:count, _, [arg, :distinct]}, type, params_acc, vars, env) do + {arg, params_acc} = escape(arg, type, params_acc, vars, env) + expr = {:{}, [], [:count, [], [arg, :distinct]]} + {expr, params_acc} + end + + def escape({:filter, _, [aggregate]}, type, params_acc, vars, env) do + escape(aggregate, type, params_acc, vars, env) + end + + def escape({:filter, _, [aggregate, filter_expr]}, type, params_acc, vars, env) do + {aggregate, params_acc} = escape(aggregate, type, params_acc, vars, env) + {filter_expr, params_acc} = escape(filter_expr, :boolean, params_acc, vars, env) + {{:{}, [], [:filter, [], [aggregate, filter_expr]]}, params_acc} + end + + def escape({:coalesce, _, [left, right]}, type, params_acc, vars, env) do + {left, params_acc} = escape(left, type, params_acc, vars, env) + {right, params_acc} = escape(right, type, params_acc, vars, env) + {{:{}, [], [:coalesce, [], [left, right]]}, params_acc} + end + + def escape({:over, _, [{agg_name, _, agg_args} | over_args]}, type, params_acc, vars, env) do + aggregate = {agg_name, [], agg_args || []} + {aggregate, params_acc} = escape_window_function(aggregate, type, params_acc, vars, env) + {window, params_acc} = escape_window_description(over_args, params_acc, vars, env) + {{:{}, [], [:over, [], [aggregate, window]]}, params_acc} + end + + def escape({:selected_as, _, [_expr, _name]}, _type, _params_acc, _vars, _env) do + error!(""" + selected_as/2 can only be used at the root of a select statement. \ + If you are trying to use it inside of an expression, consider putting the \ + expression inside of `selected_as/2` instead. For instance, instead of: + + from p in Post, select: coalesce(selected_as(p.visits, :v), 0) + + use: + + from p in Post, select: selected_as(coalesce(p.visits, 0), :v) + """) + end + + def escape({:selected_as, _, [name]}, _type, params_acc, _vars, _env) do + name = quoted_atom!(name, "selected_as/1") + expr = {:{}, [], [:selected_as, [], [name]]} + {expr, params_acc} + end + + def escape({quantifier, meta, [subquery]}, type, params_acc, vars, env) + when quantifier in [:all, :any, :exists] do + {subquery, params_acc} = escape({:subquery, meta, [subquery]}, type, params_acc, vars, env) + {{:{}, [], [quantifier, [], [subquery]]}, params_acc} + end + + def escape({:=, _, _} = expr, _type, _params_acc, _vars, _env) do + error!( + "`#{Macro.to_string(expr)}` is not a valid query expression. " <> + "The match operator is not supported: `=`. " <> + "Did you mean to use `==` instead?" + ) + end + + def escape({op, _, _}, _type, _params_acc, _vars, _env) when op in ~w(|| && !)a do + error!( + "short-circuit operators are not supported: `#{op}`. " <> + "Instead use boolean operators: `and`, `or`, and `not`" + ) + end + + # Tuple + def escape({left, right}, type, params_acc, vars, env) do + escape({:{}, [], [left, right]}, type, params_acc, vars, env) + end + + # Tuple + def escape({:{}, _, list}, {:tuple, types}, params_acc, vars, env) do + if Enum.count(list) == Enum.count(types) do + {list, params_acc} = + list + |> Enum.zip(types) + |> Enum.map_reduce(params_acc, fn {expr, type}, params_acc -> + escape(expr, type, params_acc, vars, env) + end) + + expr = {:{}, [], [:{}, [], list]} + {expr, params_acc} + else + escape({:{}, [], list}, :any, params_acc, vars, env) + end + end + + # Tuple + def escape({:{}, _, _}, _, _, _, _) do + error!("Tuples can only be used in comparisons with literal tuples of the same size") + end + + # Unnecessary parentheses around an expression + def escape({:__block__, _, [expr]}, type, params_acc, vars, env) do + escape(expr, type, params_acc, vars, env) + end + + # Other functions - no type casting + def escape({name, _, args} = expr, type, params_acc, vars, env) + when is_atom(name) and is_list(args) do + case call_type(name, length(args)) do + {in_type, out_type} -> + assert_type!(expr, type, out_type) + escape_call(expr, in_type, params_acc, vars, env) + + nil -> + try_expansion(expr, type, params_acc, vars, env) + end + end + + # Finally handle vars + def escape({var, _, context}, _type, params_acc, vars, _env) + when is_atom(var) and is_atom(context) do + {escape_var!(var, vars), params_acc} + end + + # Raise nice error messages for fun calls. + def escape({fun, _, args} = other, _type, _params_acc, _vars, _env) + when is_atom(fun) and is_list(args) do + error!(""" + `#{Macro.to_string(other)}` is not a valid query expression. \ + If you are trying to invoke a function that is not supported by Ecto, \ + you can use fragments: + + fragment("some_function(?, ?, ?)", m.some_field, 1) + + See Ecto.Query.API to learn more about the supported functions and \ + Ecto.Query.API.fragment/1 to learn more about fragments. + """) + end + + # Raise nice error message for remote calls + def escape({{:., _, [_, fun]}, _, _} = other, type, params_acc, vars, env) + when is_atom(fun) do + try_expansion(other, type, params_acc, vars, env) + end + + # For everything else we raise + def escape(other, _type, _params_acc, _vars, _env) do + error!("`#{Macro.to_string(other)}` is not a valid query expression") + end + + defp escape_with_type(expr, {:^, _, [type]}, params_acc, vars, env) do + {expr, params_acc} = escape(expr, :any, params_acc, vars, env) + {{:{}, [], [:type, [], [expr, type]]}, params_acc} + end + + defp escape_with_type(expr, type, params_acc, vars, env) do + type = validate_type!(type, vars, env) + {expr, params_acc} = escape(expr, type, params_acc, vars, env) + {{:{}, [], [:type, [], [expr, escape_type(type)]]}, params_acc} + end + + defp escape_type({:parameterized, _} = param), do: Macro.escape(param) + defp escape_type(type), do: type + + defp validate_json_field!({{:., _, _}, _, _}), do: :ok + defp validate_json_field!({:field, _, _}), do: :ok + + defp validate_json_field!(unsupported_field), + do: error!("`#{Macro.to_string(unsupported_field)}` is not a valid json field") + + defp wrap_nil(params, {:{}, _, [:^, _, [ix]]}, to_compare), + do: wrap_nil(params, length(params) - ix - 1, to_compare, []) + + defp wrap_nil(params, _other, _to_compare), do: params + + defp wrap_nil([{val, type} | params], 0, to_compare, acc) do + val = + quote do: Ecto.Query.Builder.not_nil!(unquote(val), unquote(Macro.to_string(to_compare))) + + Enum.reverse(acc, [{val, type} | params]) + end + + defp wrap_nil([pair | params], i, to_compare, acc) do + wrap_nil(params, i - 1, to_compare, [pair | acc]) + end + + defp expand_and_split_fragment(query, env) do + case Macro.expand(query, get_env(env)) do + binary when is_binary(binary) -> + split_fragment(binary, "") + + _ -> + error!(bad_fragment_message(Macro.to_string(query))) + end + end + + defp bad_fragment_message(arg) do + "to prevent SQL injection attacks, fragment(...) does not allow strings " <> + "to be interpolated as the first argument via the `^` operator, got: `#{arg}`" + end + + defp split_fragment(<<>>, consumed), + do: [consumed] + + defp split_fragment(<>, consumed), + do: [consumed | split_fragment(rest, "")] + + defp split_fragment(<>, consumed), + do: split_fragment(rest, consumed <> <>) + + defp split_fragment(<>, consumed), + do: split_fragment(rest, consumed <> <>) + + @doc "Returns fragment pieces, given a fragment string and arguments." + def fragment_pieces(frag, args) do + frag + |> split_fragment("") + |> merge_fragments(args) + end + + defp escape_window_description([], params_acc, _vars, _env), + do: {[], params_acc} + + defp escape_window_description([window_name], params_acc, _vars, _env) + when is_atom(window_name), + do: {window_name, params_acc} + + defp escape_window_description([kw], params_acc, vars, env) do + case Ecto.Query.Builder.Windows.escape(kw, params_acc, vars, env) do + {runtime, [], params_acc} -> + {runtime, params_acc} + + {_, [{key, _} | _], _} -> + error!( + "windows definitions given to over/2 do not allow interpolations at the root of " <> + "`#{key}`. Please use Ecto.Query.windows/3 to explicitly define a window instead" + ) + end + end + + defp escape_window_function(expr, type, params_acc, vars, env) do + expr + |> validate_window_function!(env) + |> escape(type, params_acc, vars, env) + end + + defp validate_window_function!({:fragment, _, _} = expr, _env), do: expr + + defp validate_window_function!({agg, _, args} = expr, env) + when is_atom(agg) and is_list(args) do + if Code.ensure_loaded?(Ecto.Query.WindowAPI) and + function_exported?(Ecto.Query.WindowAPI, agg, length(args)) do + expr + else + case Macro.expand_once(expr, get_env(env)) do + ^expr -> + error!( + "unknown window function #{agg}/#{length(args)}. " <> + "See Ecto.Query.WindowAPI for all available functions" + ) + + expr -> + validate_window_function!(expr, env) + end + end + end + + defp validate_window_function!(expr, _), do: expr + + defp escape_call({name, _, args}, type, params_acc, vars, env) do + {args, params_acc} = Enum.map_reduce(args, params_acc, &escape(&1, type, &2, vars, env)) + expr = {:{}, [], [name, [], args]} + {expr, params_acc} + end + + defp escape_field!({var, _, context}, field, vars) + when is_atom(var) and is_atom(context) do + var = escape_var!(var, vars) + field = quoted_atom_or_string!(field, "field/2") + dot = {:{}, [], [:., [], [var, field]]} + {:{}, [], [dot, [], []]} + end + + defp escape_field!({kind, _, [value]}, field, _vars) + when kind in [:as, :parent_as] do + value = late_binding!(kind, value) + as = {:{}, [], [kind, [], [value]]} + field = quoted_atom_or_string!(field, "field/2") + dot = {:{}, [], [:., [], [as, field]]} + {:{}, [], [dot, [], []]} + end + + defp escape_field!(expr, field, _vars) do + error!(""" + cannot fetch field `#{Macro.to_string(field)}` from `#{Macro.to_string(expr)}`. Can only fetch fields from: + + * sources, such as `p` in `from p in Post` + * named bindings, such as `as(:post)` in `from Post, as: :post` + * parent named bindings, such as `parent_as(:post)` in a subquery + """) + end + + defp escape_interval(count, interval, params_acc, vars, env) do + type = + cond do + is_float(count) -> :float + is_integer(count) -> :integer + true -> :decimal + end + + {count, params_acc} = escape(count, type, params_acc, vars, env) + {count, quoted_interval!(interval), params_acc} + end + + defp escape_kw_fragment({key, [{_, _} | _] = exprs}, params_acc, vars, env) when is_atom(key) do + {escaped, params_acc} = + Enum.map_reduce(exprs, params_acc, &escape_kw_fragment(&1, &2, vars, env)) + + {{key, escaped}, params_acc} + end + + defp escape_kw_fragment({key, expr}, params_acc, vars, env) when is_atom(key) do + {escaped, params_acc} = escape(expr, :any, params_acc, vars, env) + {{key, escaped}, params_acc} + end + + defp escape_kw_fragment({key, _expr}, _params_acc, _vars, _env) do + error!( + "fragment(...) with keywords accepts only atoms as keys, got `#{Macro.to_string(key)}`" + ) + end + + defp escape_fragment({:literal, meta, [expr]}, params_acc, vars, env) do + env = + case env do + {env, _fun} -> env + env -> env + end + + escape_fragment({:identifier, meta, [expr]}, params_acc, vars, env) + end + + defp escape_fragment({:identifier, _meta, [expr]}, params_acc, _vars, _env) do + case expr do + {:^, _, [expr]} -> + checked = quote do: Ecto.Query.Builder.identifier!(unquote(expr)) + escaped = {:{}, [], [:identifier, [], [checked]]} + {escaped, params_acc} + + _ -> + error!( + "identifier/1 in fragment expects an interpolated value, such as identifier(^value), got `#{Macro.to_string(expr)}`" + ) + end + end + + defp escape_fragment({:constant, _meta, [expr]}, params_acc, _vars, _env) do + case expr do + {:^, _, [expr]} -> + checked = quote do: Ecto.Query.Builder.constant!(unquote(expr)) + escaped = {:{}, [], [:constant, [], [checked]]} + {escaped, params_acc} + + _ -> + error!( + "constant/1 in fragment expects an interpolated value, such as constant(^value), got `#{Macro.to_string(expr)}`" + ) + end + end + + defp escape_fragment({:splice, _meta, [splice]}, params_acc, vars, env) do + case splice do + {:^, _, [value]} = expr -> + checked = quote do: Ecto.Query.Builder.splice!(unquote(value)) + length = quote do: length(unquote(checked)) + {expr, params_acc} = escape(expr, {:splice, :any}, params_acc, vars, env) + escaped = {:{}, [], [:splice, [], [expr, length]]} + {escaped, params_acc} + + _ -> + error!( + "splice/1 in fragment expects an interpolated value, such as splice(^value), got `#{Macro.to_string(splice)}`" + ) + end + end + + defp escape_fragment(expr, params_acc, vars, env) do + escape(expr, :any, params_acc, vars, env) + end + + defp merge_fragments([h1 | t1], [h2 | t2]), + do: [{:raw, h1}, {:expr, h2} | merge_fragments(t1, t2)] + + defp merge_fragments([h1], []), + do: [{:raw, h1}] + + for {agg, arity} <- @dynamic_aggregates do + defp call_type(unquote(agg), unquote(arity)), do: {:any, :any} + end + + for {agg, {arity, return}} <- @static_aggregates do + defp call_type(unquote(agg), unquote(arity)), do: {:any, unquote(return)} + end + + for {comp, arity} <- @comparisons do + defp call_type(unquote(comp), unquote(arity)), do: {:any, :boolean} + end + + defp call_type(:or, 2), do: {:boolean, :boolean} + defp call_type(:and, 2), do: {:boolean, :boolean} + defp call_type(:not, 1), do: {:boolean, :boolean} + defp call_type(:like, 2), do: {:string, :boolean} + defp call_type(:ilike, 2), do: {:string, :boolean} + defp call_type(_, _), do: nil + + defp assert_type!(expr, type, actual) do + cond do + not is_atom(type) and not Ecto.Type.primitive?(type) -> + :ok + + Ecto.Type.match?(type, actual) -> + :ok + + true -> + error!( + "expression `#{Macro.to_string(expr)}` does not type check. " <> + "It returns a value of type #{inspect(actual)} but a value of " <> + "type #{inspect(type)} is expected" + ) + end + end + + @doc """ + Validates the type with the given vars. + """ + def validate_type!({:parameterized, _} = type, _vars, _env), + do: type + + def validate_type!({composite, type}, vars, env), + do: {composite, validate_type!(type, vars, env)} + + def validate_type!({:^, _, [type]}, _vars, _env), + do: type + + def validate_type!({:__aliases__, _, _} = type, _vars, env), + do: Macro.expand(type, get_env(env)) + + def validate_type!(type, _vars, _env) when is_atom(type), + do: type + + def validate_type!({{:., _, [{var, _, context}, field]}, _, []}, vars, _env) + when is_atom(var) and is_atom(context) and is_atom(field), + do: {find_var!(var, vars), field} + + def validate_type!({:field, _, [{var, _, context}, field]}, vars, _env) + when is_atom(var) and is_atom(context) and (is_atom(field) or is_binary(field)), + do: {find_var!(var, vars), field} + + def validate_type!({:field, _, [{var, _, context}, {:^, _, [field]}]}, vars, _env) + when is_atom(var) and is_atom(context), + do: {find_var!(var, vars), field} + + def validate_type!(type, _vars, _env) do + error!( + "type/2 expects an alias, atom, initialized parameterized type or " <> + "source.field as second argument, got: `#{Macro.to_string(type)}`" + ) + end + + @always_tagged [:binary] + + defp literal(value, expected, vars), + do: do_literal(value, expected, quoted_type(value, vars)) + + defp do_literal(value, _, current) when current in @always_tagged, + do: + {:%, [], + [Ecto.Query.Tagged, {:%{}, [], [value: value, type: normalize_type(value, current)]}]} + + defp do_literal(value, :any, _current), + do: value + + defp do_literal(value, expected, expected), + do: value + + defp do_literal(value, expected, _current), + do: {:%, [], [Ecto.Query.Tagged, {:%{}, [], [value: value, type: expected]}]} + + @doc """ + Escape the params entries list. + """ + @spec escape_params(list()) :: list() + def escape_params(list), do: Enum.reverse(list) + + @doc """ + Escape the select alias map + """ + @spec escape_select_aliases(map()) :: Macro.t() + def escape_select_aliases(%{} = aliases), do: {:%{}, [], Map.to_list(aliases)} + + @doc """ + Escapes a variable according to the given binds. + + A escaped variable is represented internally as + `&0`, `&1` and so on. + """ + @spec escape_var!(atom, Keyword.t()) :: Macro.t() + def escape_var!(var, vars) do + {:{}, [], [:&, [], [find_var!(var, vars)]]} + end + + @doc """ + Escapes a list of bindings as a list of atoms. + + Only variables or `{:atom, value}` tuples are allowed in the `bindings` list, + otherwise an `Ecto.Query.CompileError` is raised. + + ## Examples + + iex> escape_binding(%Ecto.Query{}, quote(do: [x, y, z]), __ENV__) + {%Ecto.Query{}, [x: 0, y: 1, z: 2]} + + iex> escape_binding(%Ecto.Query{}, quote(do: [{x, 0}, {z, 2}]), __ENV__) + {%Ecto.Query{}, [x: 0, z: 2]} + + iex> escape_binding(%Ecto.Query{}, quote(do: [x, y, x]), __ENV__) + ** (Ecto.Query.CompileError) variable `x` is bound twice + + iex> escape_binding(%Ecto.Query{}, quote(do: [a, b, :foo]), __ENV__) + ** (Ecto.Query.CompileError) binding list should contain only variables or `{as, var}` tuples, got: :foo + + """ + @spec escape_binding(Macro.t(), list, Macro.Env.t()) :: {Macro.t(), Keyword.t()} + def escape_binding(query, binding, _env) when is_list(binding) do + vars = binding |> Enum.with_index() |> Enum.map(&escape_bind/1) + assert_no_duplicate_binding!(vars) + + {positional_vars, named_vars} = Enum.split_while(vars, &(not named_bind?(&1))) + assert_named_binds_in_tail!(named_vars, binding) + + {query, positional_binds} = calculate_positional_binds(query, positional_vars) + {query, named_binds} = calculate_named_binds(query, named_vars) + {query, positional_binds ++ named_binds} + end + + def escape_binding(_query, bind, _env) do + error!( + "binding should be list of variables and `{as, var}` tuples " <> + "at the end, got: #{Macro.to_string(bind)}" + ) + end + + defp named_bind?({kind, _, _}), do: kind == :named + + defp assert_named_binds_in_tail!(named_vars, binding) do + if Enum.all?(named_vars, &named_bind?/1) do + :ok + else + error!( + "named binds in the form of `{as, var}` tuples must be at the end " <> + "of the binding list, got: #{Macro.to_string(binding)}" + ) + end + end + + defp assert_no_duplicate_binding!(vars) do + bound_vars = for {_, var, _} <- vars, var != :_, do: var + + case bound_vars -- Enum.uniq(bound_vars) do + [] -> :ok + [var | _] -> error!("variable `#{var}` is bound twice") + end + end + + defp calculate_positional_binds(query, vars) do + case Enum.split_while(vars, &(elem(&1, 1) != :...)) do + {vars, []} -> + vars = for {:pos, var, count} <- vars, do: {var, count} + {query, vars} + + {vars, [_ | tail]} -> + var = Macro.unique_var(:query, __MODULE__) + + query = + quote do + unquote(var) = Ecto.Queryable.to_query(unquote(query)) + escape_count = Ecto.Query.Builder.count_binds(unquote(var)) + unquote(var) + end + + tail = + tail + |> Enum.with_index(-length(tail)) + |> Enum.map(fn {{:pos, k, _}, count} -> + {k, quote(do: escape_count + unquote(count))} + end) + + vars = for {:pos, var, count} <- vars, do: {var, count} + {query, vars ++ tail} + end + end + + defp calculate_named_binds(query, []), do: {query, []} + + defp calculate_named_binds(query, vars) do + var = Macro.unique_var(:query, __MODULE__) + + assignments = + for {:named, key, name} <- vars do + quote do + unquote({key, [], __MODULE__}) = + unquote(__MODULE__).count_alias!(unquote(var), unquote(name)) + end + end + + query = + quote do + unquote(var) = Ecto.Queryable.to_query(unquote(query)) + unquote_splicing(assignments) + unquote(var) + end + + pairs = + for {:named, key, _name} <- vars do + {key, {key, [], __MODULE__}} + end + + {query, pairs} + end + + @doc """ + Count the alias for the given query. + """ + def count_alias!(%{aliases: aliases} = query, name) do + case aliases do + %{^name => ix} -> + ix + + %{} -> + raise Ecto.QueryError, message: "unknown bind name `#{inspect(name)}`", query: query + end + end + + defp escape_bind({{:..., _, _context}, ix}), + do: {:pos, :..., ix} + + defp escape_bind({{var, _, context}, ix}) when is_atom(var) and is_atom(context), + do: {:pos, var, ix} + + defp escape_bind({{{var, _, context}, ix}, _}) when is_atom(var) and is_atom(context), + do: {:pos, var, ix} + + defp escape_bind({{name, {var, _, context}}, _ix}) + when is_atom(name) and is_atom(var) and is_atom(context), + do: {:named, var, name} + + defp escape_bind({{{:^, _, [expr]}, {var, _, context}}, _ix}) + when is_atom(var) and is_atom(context), + do: {:named, var, expr} + + defp escape_bind({bind, _ix}), + do: + error!( + "binding list should contain only variables or " <> + "`{as, var}` tuples, got: #{Macro.to_string(bind)}" + ) + + defp try_expansion(expr, type, params, vars, %Macro.Env{} = env) do + try_expansion(expr, type, params, vars, {env, &escape/5}) + end + + defp try_expansion(expr, type, params, vars, {env, fun}) do + case Macro.expand_once(expr, env) do + ^expr -> + error!(""" + `#{Macro.to_string(expr)}` is not a valid query expression. + + * If you intended to call an Elixir function or introduce a value, + you need to explicitly interpolate it with ^ + + * If you intended to call a database function, please check the documentation + for Ecto.Query.API to see the supported database expressions + + * If you intended to extend Ecto's query DSL, make sure that you have required + the module or imported the relevant function. Note that you need macros to + extend Ecto's querying capabilities + """) + + expanded -> + fun.(expanded, type, params, vars, env) + end + end + + @doc """ + Finds the index value for the given var in vars or raises. + """ + def find_var!(var, vars) do + vars[var] || + error!( + "unbound variable `#{var}` in query. If you are attempting to interpolate a value, use ^var" + ) + end + + @doc """ + Checks if the field is an atom at compilation time or + delegates the check to runtime for interpolation. + """ + def quoted_atom!({:^, _, [expr]}, used_ref), + do: quote(do: Ecto.Query.Builder.atom!(unquote(expr), unquote(used_ref))) + + def quoted_atom!(atom, _used_ref) when is_atom(atom), + do: atom + + def quoted_atom!(other, used_ref), + do: + error!( + "expected literal atom or interpolated value in #{used_ref}, got: " <> + "`#{Macro.to_string(other)}`" + ) + + @doc """ + Checks if the field is an atom or string at compilation time or + delegate the check to runtime for interpolation. + """ + def quoted_atom_or_string!({:^, _, [expr]}, used_ref), + do: quote(do: Ecto.Query.Builder.atom_or_string!(unquote(expr), unquote(used_ref))) + + def quoted_atom_or_string!(atom, _used_ref) when is_atom(atom), + do: atom + + def quoted_atom_or_string!(string, _used_ref) when is_binary(string), + do: string + + def quoted_atom_or_string!(other, used_ref), + do: + error!( + "expected literal atom or string or interpolated value in #{used_ref}, got: " <> + "`#{Macro.to_string(other)}`" + ) + + @doc """ + Called by escaper at runtime to verify that value is an atom. + """ + def atom!(atom, _used_ref) when is_atom(atom), + do: atom + + def atom!(other, used_ref), + do: error!("expected atom in #{used_ref}, got: `#{inspect(other)}`") + + @doc """ + Called by escaper at runtime to verify that value is an atom or string. + """ + def atom_or_string!(atom, _used_ref) when is_atom(atom), + do: atom + + def atom_or_string!(string, _used_ref) when is_binary(string), + do: string + + def atom_or_string!(other, used_ref), + do: error!("expected atom or string in #{used_ref}, got: `#{inspect(other)}`") + + @doc """ + Checks if the value of a late binding is an interpolation or + a quoted atom. + """ + def late_binding!(kind, value) do + case value do + {:^, _, [value]} -> + value + + other -> + quoted_atom!(other, "#{kind}/1") + end + end + + defp escape_json_path(path, vars) when is_list(path) do + Enum.map(path, "ed_json_path_element!(&1, vars)) + end + + defp escape_json_path({:^, _, [path]}, _vars) do + quote do + path = Ecto.Query.Builder.json_path!(unquote(path)) + Enum.map(path, &Ecto.Query.Builder.json_path_element!/1) + end + end + + defp escape_json_path(other, _vars) do + error!( + "expected JSON path to be a literal list or interpolated value, got: `#{Macro.to_string(other)}`" + ) + end + + defp quoted_json_path_element!({:^, _, [expr]}, _vars), + do: quote(do: Ecto.Query.Builder.json_path_element!(unquote(expr))) + + defp quoted_json_path_element!(binary, _vars) when is_binary(binary), + do: binary + + defp quoted_json_path_element!(integer, _vars) when is_integer(integer), + do: integer + + defp quoted_json_path_element!({{:., _, [callee, field]}, _, []}, vars) do + escape_field!(callee, field, vars) + end + + defp quoted_json_path_element!({:field, _, [callee, field]}, vars) do + escape_field!(callee, field, vars) + end + + defp quoted_json_path_element!(other, _vars), + do: + error!( + "expected JSON path to contain literal strings, literal integers, fields, or interpolated values, got: " <> + "`#{Macro.to_string(other)}`" + ) + + @doc """ + Called by escaper at runtime to verify that value is a string or an integer. + """ + def json_path_element!(binary) when is_binary(binary), + do: binary + + def json_path_element!(integer) when is_integer(integer), + do: integer + + def json_path_element!(other), + do: error!("expected string or integer in json_extract_path/2, got: `#{inspect(other)}`") + + @doc """ + Called by escaper at runtime to verify that path is a list + """ + def json_path!(path) when is_list(path), + do: path + + def json_path!(path), + do: error!("expected `path` to be a list in json_extract_path/2, got: `#{inspect(path)}`") + + @doc """ + Called by escaper at runtime to verify that a value is not nil. + """ + def not_nil!(nil, compare_str) do + raise ArgumentError, + "comparing `#{compare_str}` with `nil` is forbidden as it is unsafe. " <> + "If you want to check if a value is nil, use is_nil/1 instead" + end + + def not_nil!(not_nil, _compare_str) do + not_nil + end + + @doc """ + Checks if the field is a valid interval at compilation time or + delegate the check to runtime for interpolation. + """ + def quoted_interval!({:^, _, [expr]}), + do: quote(do: Ecto.Query.Builder.interval!(unquote(expr))) + + def quoted_interval!(other), + do: interval!(other) + + @doc """ + Called by escaper at runtime to verify fragment keywords. + """ + def fragment!(kw) do + if Keyword.keyword?(kw) do + kw + else + raise ArgumentError, bad_fragment_message(inspect(kw)) + end + end + + @doc """ + Called by escaper at runtime to verify identifier in fragments. + """ + def identifier!(identifier) do + if is_binary(identifier) do + identifier + else + raise ArgumentError, + "identifier(^value) expects `value` to be a string, got `#{inspect(identifier)}`" + end + end + + @doc """ + Called by escaper at runtime to verify constant in fragments. + """ + def constant!(constant) do + if is_binary(constant) or is_number(constant) do + constant + else + raise ArgumentError, + "constant(^value) expects `value` to be a string or a number, got `#{inspect(constant)}`" + end + end + + @doc """ + Called by escaper at runtime to verify splice in fragments. + """ + def splice!(value) do + if is_list(value) do + value + else + raise ArgumentError, + "splice(^value) expects `value` to be a list, got `#{inspect(value)}`" + end + end + + @doc """ + Called by escaper at runtime to verify that value is a valid interval. + """ + @interval ~w(year month week day hour minute second millisecond microsecond) + def interval!(interval) when interval in @interval, + do: interval + + def interval!(other_string) when is_binary(other_string), + do: + error!( + "invalid interval: `#{inspect(other_string)}` (expected one of #{Enum.join(@interval, ", ")})" + ) + + def interval!(not_string), + do: error!("invalid interval: `#{inspect(not_string)}` (expected a string)") + + @doc """ + Negates the given number. + """ + def negate!(%Decimal{} = decimal), do: Decimal.negate(decimal) + def negate!(number) when is_number(number), do: -number + + @doc """ + Returns the type of an expression at build time. + """ + @spec quoted_type(Macro.t(), Keyword.t()) :: quoted_type + + # Fields + def quoted_type({{:., _, [{var, _, context}, field]}, _, []}, vars) + when is_atom(var) and is_atom(context) and is_atom(field), + do: {find_var!(var, vars), field} + + def quoted_type({{:., _, [{kind, _, [value]}, field]}, _, []}, _vars) + when kind in [:as, :parent_as] do + value = late_binding!(kind, value) + {{:{}, [], [kind, [], [value]]}, field} + end + + def quoted_type({:field, _, [{var, _, context}, field]}, vars) + when is_atom(var) and is_atom(context) and (is_atom(field) or is_binary(field)), + do: {find_var!(var, vars), field} + + def quoted_type({:field, _, [{kind, _, [value]}, field]}, _vars) + when kind in [:as, :parent_as] and (is_atom(field) or is_binary(field)) do + value = late_binding!(kind, value) + {{:{}, [], [kind, [], [value]]}, field} + end + + # Unquoting code here means the second argument of field will + # always be unquoted twice, one by the type checking and another + # in the query itself. We are assuming this is not an issue + # as the solution is somewhat complicated. + def quoted_type({:field, _, [{var, _, context}, {:^, _, [code]}]}, vars) + when is_atom(var) and is_atom(context), + do: {find_var!(var, vars), code} + + def quoted_type({:field, _, [{kind, _, [value]}, {:^, _, [code]}]}, _vars) + when kind in [:as, :parent_as] do + value = late_binding!(kind, value) + {{:{}, [], [kind, [], [value]]}, code} + end + + # Interval + def quoted_type({:datetime_add, _, [_, _, _]}, _vars), do: :naive_datetime + def quoted_type({:date_add, _, [_, _, _]}, _vars), do: :date + + # Tagged + def quoted_type({:<<>>, _, _}, _vars), do: :binary + def quoted_type({:type, _, [_, type]}, _vars), do: type + + # Sigils + def quoted_type({sigil, _, [_, []]}, _vars) when sigil in ~w(sigil_s sigil_S)a, do: :string + + def quoted_type({sigil, _, [_, []]}, _vars) when sigil in ~w(sigil_w sigil_W)a, + do: {:array, :string} + + # Lists + def quoted_type(list, vars) when is_list(list) do + case list |> Enum.map("ed_type(&1, vars)) |> Enum.uniq() do + [type] -> {:array, type} + _ -> {:array, :any} + end + end + + # Negative numbers + def quoted_type({:-, _, [number]}, _vars) when is_integer(number), do: :integer + def quoted_type({:-, _, [number]}, _vars) when is_float(number), do: :float + + # Dynamic aggregates + for {agg, arity} <- @dynamic_aggregates do + args = 1..arity |> Enum.map(fn _ -> Macro.var(:_, __MODULE__) end) |> tl() + + def quoted_type({unquote(agg), _, [expr, unquote_splicing(args)]}, vars) do + quoted_type(expr, vars) + end + end + + # Literals + def quoted_type(literal, _vars) when is_float(literal), do: :float + def quoted_type(literal, _vars) when is_binary(literal), do: :string + def quoted_type(literal, _vars) when is_boolean(literal), do: :boolean + def quoted_type(literal, _vars) when is_atom(literal) and not is_nil(literal), do: :atom + def quoted_type(literal, _vars) when is_integer(literal), do: :integer + + # Tuples + def quoted_type({left, right}, vars), do: quoted_type({:{}, [], [left, right]}, vars) + def quoted_type({:{}, _, elems}, vars), do: {:tuple, Enum.map(elems, "ed_type(&1, vars))} + + def quoted_type({name, _, args}, _vars) when is_atom(name) and is_list(args) do + case call_type(name, length(args)) do + {_in, out} -> out + nil -> :any + end + end + + def quoted_type(_, _vars), do: :any + + defp get_env({env, _}), do: env + defp get_env(env), do: env + + defp normalize_type(value, :binary), + do: quote(do: (is_binary(unquote(value)) && :binary) || :bitstring) + + @doc """ + Raises a query building error. + """ + def error!(message) when is_binary(message) do + {:current_stacktrace, [_ | t]} = Process.info(self(), :current_stacktrace) + + t = + Enum.drop_while(t, fn + {mod, _, _, _} -> + String.starts_with?(Atom.to_string(mod), ["Elixir.Ecto.Query.", "Elixir.Enum"]) + + _ -> + false + end) + + reraise Ecto.Query.CompileError, [message: message], t + end + + @doc """ + Counts the bindings in a query expression. + + ## Examples + + iex> count_binds(%Ecto.Query{joins: [1,2,3]}) + 4 + + """ + @spec count_binds(Ecto.Query.t()) :: non_neg_integer + def count_binds(%Query{joins: joins}) do + 1 + length(joins) + end + + @doc """ + Bump interpolations by the length of parameters. + """ + def bump_interpolations(expr, []), do: expr + + def bump_interpolations(expr, params) do + len = length(params) + + Macro.prewalk(expr, fn + {:^, meta, [counter]} when is_integer(counter) -> {:^, meta, [len + counter]} + other -> other + end) + end + + @doc """ + Bump subqueries by the count of pre-existing subqueries. + """ + def bump_subqueries(expr, []), do: expr + + def bump_subqueries(expr, subqueries) do + len = length(subqueries) + + Macro.prewalk(expr, fn + {:subquery, counter} -> {:subquery, len + counter} + other -> other + end) + end + + @doc """ + Called by the select escaper at compile time and dynamic builder at runtime to track select aliases + """ + def add_select_alias(aliases, name) when is_map(aliases) and is_atom(name) do + case aliases do + %{^name => _} -> + error!( + "the alias `#{inspect(name)}` has been specified more than once using `selected_as/2`" + ) + + aliases -> + Map.put(aliases, name, @select_alias_dummy_value) + end + end + + def add_select_alias(aliases, name) do + aliases = + case aliases do + %{} -> Macro.escape(aliases) + aliases -> aliases + end + + quote do: Ecto.Query.Builder.add_select_alias(unquote(aliases), unquote(name)) + end + + @doc """ + Applies a query at compilation time or at runtime. + + This function is responsible for checking if a given query is an + `Ecto.Query` struct at compile time. If it is not it will act + accordingly. + + If a query is available, it invokes the `apply` function in the + given `module`, otherwise, it delegates the call to runtime. + + It is important to keep in mind the complexities introduced + by this function. In particular, a %Query{} is a mixture of escaped + and unescaped expressions which makes it impossible for this + function to properly escape or unescape it at compile/runtime. + For this reason, the apply function should be ready to handle + arguments in both escaped and unescaped form. + + For example, take into account the `Builder.OrderBy`: + + select = %Ecto.Query.QueryExpr{expr: expr, file: env.file, line: env.line} + Builder.apply_query(query, __MODULE__, [order_by], env) + + `expr` is already an escaped expression and we must not escape + it again. However, it is wrapped in an Ecto.Query.QueryExpr, + which must be escaped! Furthermore, the `apply/2` function + in `Builder.OrderBy` very likely will inject the QueryExpr inside + Query, which again, is a mixture of escaped and unescaped expressions. + + That said, you need to obey the following rules: + + 1. In order to call this function, the arguments must be escapable + values supported by the `escape/1` function below; + + 2. The apply function may not manipulate the given arguments, + with exception to the query. + + In particular, when invoked at compilation time, all arguments + (except the query) will be escaped, so they can be injected into + the query properly, but they will be in their runtime form + when invoked at runtime. + """ + @spec apply_query(Macro.t(), Macro.t(), Macro.t(), Macro.Env.t()) :: Macro.t() + def apply_query(query, module, args, env) do + case Macro.expand(query, env) |> unescape_query() do + %Query{} = compiletime_query -> + apply(module, :apply, [compiletime_query | args]) + |> escape_query() + + runtime_query -> + quote do + # Unquote the query before `module.apply()` for any binding variable. + query = unquote(runtime_query) + unquote(module).apply(query, unquote_splicing(args)) + end + end + end + + # Unescapes an `Ecto.Query` struct. + @spec unescape_query(Macro.t()) :: Query.t() | Macro.t() + defp unescape_query({:%, _, [Query, {:%{}, _, list}]}) do + struct(Query, list) + end + + defp unescape_query({:%{}, _, list} = ast) do + if List.keyfind(list, :__struct__, 0) == {:__struct__, Query} do + Map.new(list) + else + ast + end + end + + defp unescape_query(other) do + other + end + + # Escapes an `Ecto.Query` and associated structs. + @spec escape_query(Query.t()) :: Macro.t() + defp escape_query(%Query{} = query), do: {:%{}, [], Map.to_list(query)} + + defp parse_access_get({{:., _, [Access, :get]}, _, [left, right]}, acc) do + parse_access_get(left, [right | acc]) + end + + defp parse_access_get({{:., _, [{var, _, context}, field]}, _, []} = expr, acc) + when is_atom(var) and is_atom(context) and is_atom(field) do + {expr, acc} + end + + defp parse_access_get({{:., _, [{kind, _, [_]}, field]}, _, []} = expr, acc) + when kind in [:as, :parent_as] and is_atom(field) do + {expr, acc} + end +end diff --git a/deps/ecto/lib/ecto/query/builder/combination.ex b/deps/ecto/lib/ecto/query/builder/combination.ex new file mode 100644 index 0000000..00f1306 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/combination.ex @@ -0,0 +1,36 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Combination do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(atom, Macro.t, Macro.t, Macro.Env.t) :: Macro.t + def build(kind, query, {:^, _, [expr]}, env) do + expr = quote do: Ecto.Queryable.to_query(unquote(expr)) + Builder.apply_query(query, __MODULE__, [[{kind, expr}]], env) + end + + def build(kind, _query, other, _env) do + Builder.error! "`#{Macro.to_string(other)}` is not a valid #{kind}. " <> + "#{kind} must always be an interpolated query, such as ^existing_query" + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t + def apply(%Ecto.Query{combinations: combinations} = query, value) do + %{query | combinations: combinations ++ value} + end + def apply(query, value) do + apply(Ecto.Queryable.to_query(query), value) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/cte.ex b/deps/ecto/lib/ecto/query/builder/cte.ex new file mode 100644 index 0000000..a253fd2 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/cte.ex @@ -0,0 +1,121 @@ +import Kernel, except: [apply: 3] + +defmodule Ecto.Query.Builder.CTE do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Escapes the CTE name. + + iex> escape(quote(do: "FOO"), __ENV__) + "FOO" + + """ + @spec escape(Macro.t, Macro.Env.t) :: Macro.t + def escape(name, _env) when is_bitstring(name), do: name + + def escape({:^, _, [expr]}, _env), do: expr + + def escape(expr, env) do + case Macro.expand_once(expr, env) do + ^expr -> + Builder.error! "`#{Macro.to_string(expr)}` is not a valid CTE name. " <> + "It must be a literal string or an interpolated variable." + + expr -> + escape(expr, env) + end + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t, Macro.t, Macro.t, nil | boolean(), nil | :all | :update_all | :delete_all , Macro.Env.t) :: Macro.t + def build(query, name, cte, materialized, operation, env) do + Builder.apply_query(query, __MODULE__, [escape(name, env), build_cte(name, cte, env), materialized, operation], env) + end + + @spec build_cte(Macro.t, Macro.t, Macro.Env.t) :: Macro.t + def build_cte(_name, {:^, _, [expr]}, _env) do + quote do: Ecto.Queryable.to_query(unquote(expr)) + end + + def build_cte(_name, {:fragment, _, _} = fragment, env) do + {expr, {params, _acc}} = Builder.escape(fragment, :any, {[], %{}}, [], env) + params = Builder.escape_params(params) + + quote do + %Ecto.Query.QueryExpr{ + expr: unquote(expr), + params: unquote(params), + file: unquote(env.file), + line: unquote(env.line) + } + end + end + + def build_cte(name, cte, env) do + case Macro.expand_once(cte, env) do + ^cte -> + Builder.error! "`#{Macro.to_string(cte)}` is not a valid CTE (named: #{Macro.to_string(name)}). " <> + "The CTE must be an interpolated query, such as ^existing_query or a fragment." + + cte -> + build_cte(name, cte, env) + end + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, bitstring, Ecto.Queryable.t, nil | boolean(), nil | :all | :update_all | :delete_all) :: Ecto.Query.t + # Runtime + def apply(query, name, with_query, materialized, nil) do + apply(query, name, with_query, materialized, :all) + end + + def apply(_query, _name, _with_query, _materialized, operation) + when operation not in [:all, :update_all, :delete_all] do + Builder.error!("`operation` option must be one of :all, :update_all, or :delete_all") + end + + def apply(%Ecto.Query{with_ctes: with_expr} = query, name, %_{} = with_query, materialized, operation) do + %{query | with_ctes: apply_cte(with_expr, name, with_query, materialized, operation)} + end + + # Compile + def apply(%Ecto.Query{with_ctes: with_expr} = query, name, with_query, materialized, operation) do + update = quote do + Ecto.Query.Builder.CTE.apply_cte(unquote(with_expr), unquote(name), unquote(with_query), unquote(materialized), unquote(operation)) + end + + %{query | with_ctes: update} + end + + # Runtime catch-all + def apply(query, name, with_query, materialized, operation) do + apply(Ecto.Queryable.to_query(query), name, with_query, materialized, operation) + end + + @doc false + def apply_cte(nil, name, with_query, materialized, operation) when is_boolean(materialized) do + %Ecto.Query.WithExpr{queries: [{name, %{materialized: materialized, operation: operation}, with_query}]} + end + + def apply_cte(nil, name, with_query, _materialized, operation) do + %Ecto.Query.WithExpr{queries: [{name, %{operation: operation}, with_query}]} + end + + def apply_cte(%Ecto.Query.WithExpr{queries: queries} = with_expr, name, with_query, materialized, operation) when is_boolean(materialized) do + %{with_expr | queries: List.keystore(queries, name, 0, {name, %{materialized: materialized, operation: operation}, with_query})} + end + + def apply_cte(%Ecto.Query.WithExpr{queries: queries} = with_expr, name, with_query, _materialized, operation) do + %{with_expr | queries: List.keystore(queries, name, 0, {name, %{operation: operation}, with_query})} + end +end diff --git a/deps/ecto/lib/ecto/query/builder/distinct.ex b/deps/ecto/lib/ecto/query/builder/distinct.ex new file mode 100644 index 0000000..a919428 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/distinct.ex @@ -0,0 +1,91 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Distinct do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Escapes a list of quoted expressions. + + iex> escape(quote do true end, {[], %{}}, [], __ENV__) + {true, {[], %{}}} + + iex> escape(quote do [x.x, 13] end, {[], %{}}, [x: 0], __ENV__) + {[asc: {:{}, [], [{:{}, [], [:., [], [{:{}, [], [:&, [], [0]]}, :x]]}, [], []]}, + asc: 13], + {[], %{}}} + + """ + @spec escape(Macro.t, {list, term}, Keyword.t, Macro.Env.t) :: {Macro.t, {list, term}} + def escape(expr, params_acc, _vars, _env) when is_boolean(expr) do + {expr, params_acc} + end + + def escape(expr, params_acc, vars, env) do + Builder.OrderBy.escape(:distinct, expr, params_acc, vars, env) + end + + @doc """ + Called at runtime to verify distinct. + """ + def distinct!(query, distinct, file, line) when is_boolean(distinct) do + apply(query, %Ecto.Query.ByExpr{expr: distinct, params: [], line: line, file: file}) + end + def distinct!(query, distinct, file, line) do + {expr, params, subqueries} = + Builder.OrderBy.order_by_or_distinct!(:distinct, query, distinct, []) + + expr = %Ecto.Query.ByExpr{ + expr: expr, + params: Enum.reverse(params), + line: line, + file: file, + subqueries: subqueries + } + + apply(query, expr) + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t + def build(query, _binding, {:^, _, [var]}, env) do + quote do + Ecto.Query.Builder.Distinct.distinct!(unquote(query), unquote(var), unquote(env.file), unquote(env.line)) + end + end + + def build(query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {expr, {params, acc}} = escape(expr, {[], %{subqueries: []}}, binding, env) + params = Builder.escape_params(params) + + distinct = quote do: %Ecto.Query.ByExpr{ + expr: unquote(expr), + params: unquote(params), + subqueries: unquote(acc.subqueries), + file: unquote(env.file), + line: unquote(env.line)} + Builder.apply_query(query, __MODULE__, [distinct], env) + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t + def apply(%Ecto.Query{distinct: nil} = query, expr) do + %{query | distinct: expr} + end + def apply(%Ecto.Query{}, _expr) do + Builder.error! "only one distinct expression is allowed in query" + end + def apply(query, expr) do + apply(Ecto.Queryable.to_query(query), expr) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/dynamic.ex b/deps/ecto/lib/ecto/query/builder/dynamic.ex new file mode 100644 index 0000000..32802a1 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/dynamic.ex @@ -0,0 +1,112 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Dynamic do + @moduledoc false + + alias Ecto.Query.Builder + alias Ecto.Query.Builder.Select + + @doc """ + Builds a dynamic expression. + """ + @spec build([Macro.t], Macro.t, Macro.Env.t) :: Macro.t + def build(binding, expr, env) do + {query, vars} = Builder.escape_binding(quote(do: query), binding, env) + {expr, {params, acc}} = escape(expr, {[], %{subqueries: [], aliases: %{}}}, vars, env) + aliases = escape_select_aliases(acc.aliases) + params = Builder.escape_params(params) + + quote do + %Ecto.Query.DynamicExpr{fun: fn query -> + _ = unquote(query) + {unquote(expr), unquote(params), unquote(Enum.reverse(acc.subqueries)), unquote(aliases)} + end, + binding: unquote(Macro.escape(binding)), + file: unquote(env.file), + line: unquote(env.line)} + end + end + + defp escape({:selected_as, _, [_, _]} = expr, _params_acc, vars, env) do + Select.escape(expr, vars, env) + end + + defp escape({:%{}, _, _} = expr, _params_acc, vars, env) do + Select.escape(expr, vars, env) + end + + defp escape(expr, params_acc, vars, env) do + Builder.escape(expr, :any, params_acc, vars, {env, &escape_expansion/5}) + end + + defp escape_expansion(expr, _type, params_acc, vars, env) do + escape(expr, params_acc, vars, env) + end + + defp escape_select_aliases(%{} = aliases), do: Builder.escape_select_aliases(aliases) + defp escape_select_aliases(aliases), do: aliases + + @doc """ + Expands a dynamic expression for insertion into the given query. + """ + def fully_expand(query, %{file: file, line: line, binding: binding} = dynamic) do + {expr, {binding, params, subqueries, _aliases, _count}} = expand(query, dynamic, {binding, [], [], %{}, 0}) + {expr, binding, Enum.reverse(params), Enum.reverse(subqueries), file, line} + end + + @doc """ + Expands a dynamic expression as part of an existing expression. + + Any dynamic expression parameter is prepended and the parameters + list is not reversed. This is useful when the dynamic expression + is given in the middle of an expression. + """ + def partially_expand(query, %{binding: binding} = dynamic, params, subqueries, aliases, count) do + {expr, {_binding, params, subqueries, aliases, count}} = + expand(query, dynamic, {binding, params, subqueries, aliases, count}) + + {expr, params, subqueries, aliases, count} + end + + def partially_expand(kind, query, %{binding: binding} = dynamic, params, count) do + {expr, {_binding, params, subqueries, _aliases, count}} = + expand(query, dynamic, {binding, params, [], %{}, count}) + + if subqueries != [] do + raise ArgumentError, "subqueries are not allowed in `#{kind}` expressions" + end + + {expr, params, count} + end + + defp expand(query, %{fun: fun}, {binding, params, subqueries, aliases, count}) do + {dynamic_expr, dynamic_params, dynamic_subqueries, dynamic_aliases} = fun.(query) + aliases = merge_aliases(aliases, dynamic_aliases) + + Macro.postwalk(dynamic_expr, {binding, params, subqueries, aliases, count}, fn + {:^, meta, [ix]}, {binding, params, subqueries, aliases, count} -> + case Enum.fetch!(dynamic_params, ix) do + {%Ecto.Query.DynamicExpr{binding: new_binding} = dynamic, _} -> + binding = if length(new_binding) > length(binding), do: new_binding, else: binding + expand(query, dynamic, {binding, params, subqueries, aliases, count}) + + param -> + {{:^, meta, [count]}, {binding, [param | params], subqueries, aliases, count + 1}} + end + + {:subquery, i}, {binding, params, subqueries, aliases, count} -> + subquery = Enum.fetch!(dynamic_subqueries, i) + ix = length(subqueries) + {{:subquery, ix}, {binding, [{:subquery, ix} | params], [subquery | subqueries], aliases, count + 1}} + + expr, acc -> + {expr, acc} + end) + end + + defp merge_aliases(old_aliases, new_aliases) do + Enum.reduce(new_aliases, old_aliases, fn {alias, _}, aliases -> + Builder.add_select_alias(aliases, alias) + end) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/filter.ex b/deps/ecto/lib/ecto/query/builder/filter.ex new file mode 100644 index 0000000..12a374a --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/filter.ex @@ -0,0 +1,196 @@ +import Kernel, except: [apply: 3] + +defmodule Ecto.Query.Builder.Filter do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Escapes a where or having clause. + + It allows query expressions that evaluate to a boolean + or a keyword list of field names and values. In a keyword + list multiple key value pairs will be joined with "and". + + Returned is `{expression, {params, %{subqueries: subqueries}}}` which is + a valid escaped expression, see `Macro.escape/2`. Both `params` + and `subqueries` are reversed. + """ + @spec escape(:where | :having | :on, Macro.t, non_neg_integer, Keyword.t, Macro.Env.t) :: {Macro.t, {list, Builder.acc()}} + def escape(_kind, [], _binding, _vars, _env) do + {true, {[], %{subqueries: []}}} + end + + def escape(kind, expr, binding, vars, env) when is_list(expr) do + {parts, params_acc} = + Enum.map_reduce(expr, {[], %{subqueries: []}}, fn + {field, nil}, _params_acc -> + Builder.error! "nil given for `#{field}`. Comparison with nil is forbidden as it is unsafe. " <> + "Instead write a query with is_nil/1, for example: is_nil(s.#{field})" + + {field, value}, params_acc when is_atom(field) -> + value = check_for_nils(value, field) + {value, params_acc} = Builder.escape(value, {binding, field}, params_acc, vars, env) + {{:{}, [], [:==, [], [to_escaped_field(binding, field), value]]}, params_acc} + + _, _params_acc -> + Builder.error! "expected a keyword list at compile time in #{kind}, " <> + "got: `#{Macro.to_string expr}`. If you would like to " <> + "pass a list dynamically, please interpolate the whole list with ^" + end) + + expr = Enum.reduce parts, &{:{}, [], [:and, [], [&2, &1]]} + {expr, params_acc} + end + + def escape(_kind, expr, _binding, vars, env) do + Builder.escape(expr, :boolean, {[], %{subqueries: []}}, vars, env) + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(:where | :having, :and | :or, Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t + def build(kind, op, query, _binding, {:^, _, [var]}, env) do + quote do + Ecto.Query.Builder.Filter.filter!(unquote(kind), unquote(op), unquote(query), + unquote(var), 0, unquote(env.file), unquote(env.line)) + end + end + + def build(kind, op, query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {expr, {params, acc}} = escape(kind, expr, 0, binding, env) + + params = Builder.escape_params(params) + subqueries = Enum.reverse(acc.subqueries) + + expr = quote do: %Ecto.Query.BooleanExpr{ + expr: unquote(expr), + op: unquote(op), + params: unquote(params), + subqueries: unquote(subqueries), + file: unquote(env.file), + line: unquote(env.line)} + Builder.apply_query(query, __MODULE__, [kind, expr], env) + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, :where | :having, term) :: Ecto.Query.t + def apply(query, _, %{expr: true}) do + query + end + def apply(%Ecto.Query{wheres: wheres} = query, :where, expr) do + %{query | wheres: wheres ++ [expr]} + end + def apply(%Ecto.Query{havings: havings} = query, :having, expr) do + %{query | havings: havings ++ [expr]} + end + def apply(query, kind, expr) do + apply(Ecto.Queryable.to_query(query), kind, expr) + end + + @doc """ + Builds a filter based on the given arguments. + + This is shared by having, where and join's on expressions. + """ + def filter!(kind, query, %Ecto.Query.DynamicExpr{} = dynamic, _binding, _file, _line) do + {expr, _binding, params, subqueries, file, line} = + Ecto.Query.Builder.Dynamic.fully_expand(query, dynamic) + + if subqueries != [] do + raise ArgumentError, "subqueries are not allowed in `#{kind}` expressions" + end + + {expr, params, file, line} + end + + def filter!(_kind, _query, bool, _binding, file, line) when is_boolean(bool) do + {bool, [], file, line} + end + + def filter!(kind, _query, kw, binding, file, line) when is_list(kw) do + {expr, params} = kw!(kind, kw, binding) + {expr, params, file, line} + end + + def filter!(kind, _query, other, _binding, _file, _line) do + raise ArgumentError, "expected a keyword list or dynamic expression in `#{kind}`, got: `#{inspect other}`" + end + + @doc """ + Builds the filter and applies it to the given query as boolean operator. + """ + def filter!(:where, op, query, %Ecto.Query.DynamicExpr{} = dynamic, _binding, _file, _line) do + {expr, _binding, params, subqueries, file, line} = + Ecto.Query.Builder.Dynamic.fully_expand(query, dynamic) + + boolean = %Ecto.Query.BooleanExpr{ + expr: expr, + params: params, + line: line, + file: file, + op: op, + subqueries: subqueries + } + + apply(query, :where, boolean) + end + + def filter!(kind, op, query, expr, binding, file, line) do + {expr, params, file, line} = filter!(kind, query, expr, binding, file, line) + boolean = %Ecto.Query.BooleanExpr{expr: expr, params: params, line: line, file: file, op: op} + apply(query, kind, boolean) + end + + defp kw!(kind, kw, binding) do + case kw!(kw, binding, 0, [], [], kind, kw) do + {[], params} -> {true, params} + {parts, params} -> {Enum.reduce(parts, &{:and, [], [&2, &1]}), params} + end + end + + defp kw!([{field, nil}|_], _binding, _counter, _exprs, _params, _kind, _original) when is_atom(field) do + raise ArgumentError, "nil given for #{inspect field}. Comparison with nil is forbidden as it is unsafe. " <> + "Instead write a query with is_nil/1, for example: is_nil(s.#{field})" + end + defp kw!([{field, value}|t], binding, counter, exprs, params, kind, original) when is_atom(field) do + kw!(t, binding, counter + 1, + [{:==, [], [to_field(binding, field), {:^, [], [counter]}]}|exprs], + [{value, {binding, field}}|params], + kind, original) + end + defp kw!([], _binding, _counter, exprs, params, _kind, _original) do + {Enum.reverse(exprs), Enum.reverse(params)} + end + defp kw!(_, _binding, _counter, _exprs, _params, kind, original) do + raise ArgumentError, "expected a keyword list in `#{kind}`, got: `#{inspect original}`" + end + + defp to_field(binding, field), + do: {{:., [], [{:&, [], [binding]}, field]}, [], []} + defp to_escaped_field(binding, field), + do: {:{}, [], [{:{}, [], [:., [], [{:{}, [], [:&, [], [binding]]}, field]]}, [], []]} + + defp check_for_nils({:^, _, [var]}, field) do + quote do + ^Ecto.Query.Builder.Filter.not_nil!(unquote(var), unquote(field)) + end + end + + defp check_for_nils(value, _field), do: value + + def not_nil!(nil, field) do + raise ArgumentError, "nil given for `#{field}`. comparison with nil is forbidden as it is unsafe. " <> + "Instead write a query with is_nil/1, for example: is_nil(s.#{field})" + end + + def not_nil!(other, _field), do: other +end diff --git a/deps/ecto/lib/ecto/query/builder/from.ex b/deps/ecto/lib/ecto/query/builder/from.ex new file mode 100644 index 0000000..9622cd9 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/from.ex @@ -0,0 +1,237 @@ +defmodule Ecto.Query.Builder.From do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Handles from expressions. + + The expressions may either contain an `in` expression or not. + The right side is always expected to Queryable. + + ## Examples + + iex> escape(quote(do: MySchema), __ENV__) + {MySchema, []} + + iex> escape(quote(do: p in posts), __ENV__) + {quote(do: posts), [p: 0]} + + iex> escape(quote(do: p in {"posts", MySchema}), __ENV__) + {{"posts", MySchema}, [p: 0]} + + iex> escape(quote(do: [p, q] in posts), __ENV__) + {quote(do: posts), [p: 0, q: 1]} + + iex> escape(quote(do: [_, _] in abc), __ENV__) + {quote(do: abc), [_: 0, _: 1]} + + iex> escape(quote(do: other), __ENV__) + {quote(do: other), []} + + iex> escape(quote(do: x() in other), __ENV__) + ** (Ecto.Query.CompileError) binding list should contain only variables or `{as, var}` tuples, got: x() + + """ + @spec escape(Macro.t(), Macro.Env.t()) :: {Macro.t(), Keyword.t()} + def escape({:in, _, [var, query]}, env) do + query = escape_source(query, env) + Builder.escape_binding(query, List.wrap(var), env) + end + + def escape(query, env) do + query = escape_source(query, env) + {query, []} + end + + defp escape_source(query, env) do + case Macro.expand_once(query, env) do + {:fragment, _, _} = fragment -> + {fragment, {params, _acc}} = Builder.escape(fragment, :any, {[], %{}}, [], env) + {fragment, Builder.escape_params(params)} + + {:values, _, [values_list, types]} -> + prelude = quote do: values = Ecto.Query.Values.new(unquote(values_list), unquote(types)) + types = quote do: values.types + num_rows = quote do: values.num_rows + params = quote do: Ecto.Query.Builder.escape_params(values.params) + {{:{}, [], [:values, [], [types, num_rows]]}, prelude, params} + + ^query -> + case query do + {left, right} -> {left, Macro.expand(right, env)} + _ -> query + end + + other -> + escape_source(other, env) + end + end + + @typep hints :: [String.t() | Macro.t()] + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t(), Macro.Env.t(), atom, {:ok, Ecto.Schema.prefix | nil} | nil, hints) :: + {Macro.t(), Keyword.t(), non_neg_integer | nil} + def build(query, env, as, prefix, hints) do + hints = Enum.map(hints, &hint!(&1)) + + prefix = case prefix do + nil -> nil + {:ok, prefix} when is_binary(prefix) or is_nil(prefix) -> {:ok, prefix} + {:ok, {:^, _, [prefix]}} -> {:ok, prefix} + {:ok, prefix} -> Builder.error!("`prefix` must be a compile time string or an interpolated value using ^, got: #{Macro.to_string(prefix)}") + end + + as = case as do + {:^, _, [as]} -> as + as when is_atom(as) -> as + as -> Builder.error!("`as` must be a compile time atom or an interpolated value using ^, got: #{Macro.to_string(as)}") + end + + {query, binds} = escape(query, env) + + case query do + schema when is_atom(schema) -> + # Get the source at runtime so no unnecessary compile time + # dependencies between modules are added + source = quote(do: unquote(schema).__schema__(:source)) + {:ok, prefix} = prefix || {:ok, quote(do: unquote(schema).__schema__(:prefix))} + {query(prefix, {source, schema}, [], as, hints, env.file, env.line), binds, 1} + + source when is_binary(source) -> + {:ok, prefix} = prefix || {:ok, nil} + # When a binary is used, there is no schema + {query(prefix, {source, nil}, [], as, hints, env.file, env.line), binds, 1} + + {source, schema} when is_binary(source) and is_atom(schema) -> + {:ok, prefix} = prefix || {:ok, quote(do: unquote(schema).__schema__(:prefix))} + {query(prefix, {source, schema}, [], as, hints, env.file, env.line), binds, 1} + + {{:{}, _, [:fragment, _, _]} = fragment, params} -> + {:ok, prefix} = prefix || {:ok, nil} + {query(prefix, fragment, params, as, hints, env.file, env.line), binds, 1} + + {{:{}, _, [:values, _, _]} = values, prelude, params} -> + {:ok, prefix} = prefix || {:ok, nil} + query = query(prefix, values, params, as, hints, env.file, env.line) + + quoted = + quote do + unquote(prelude) + unquote(query) + end + + {quoted, binds, 1} + + _other -> + quoted = + quote do + Ecto.Query.Builder.From.apply(unquote(query), unquote(length(binds)), unquote(as), unquote(prefix), unquote(hints)) + end + + {quoted, binds, nil} + end + end + + defp query(prefix, source, params, as, hints, file, line) do + aliases = if as, do: [{as, 0}], else: [] + from_fields = [source: source, params: params, as: as, prefix: prefix, hints: hints, file: file, line: line] + + query_fields = [ + from: {:%, [], [Ecto.Query.FromExpr, {:%{}, [], from_fields}]}, + aliases: {:%{}, [], aliases} + ] + + {:%, [], [Ecto.Query, {:%{}, [], query_fields}]} + end + + @doc """ + Validates hints at compile time and runtime + """ + def hint!(hint) when is_binary(hint), do: hint + + def hint!({:unsafe_fragment, _, [fragment]}) do + case fragment do + {:^, _, [value]} -> + quote do: Ecto.Query.Builder.From.hint!(unquote(value)) + + _ -> + Builder.error!( + "`unsafe_fragment/1` in `hints` expects an interpolated value, such as " <> + "unsafe_fragment(^value), got: `#{Macro.to_string(fragment)}`" + ) + end + end + + def hint!(other) do + Builder.error!( + "`hints` must be a compile time string, unsafe fragment of the form `unsafe_fragment(^...)`, " <> + "or list containing either, got: `#{Macro.to_string(other)}`" + ) + end + + @doc """ + The callback applied by `build/2` to build the query. + """ + @spec apply(Ecto.Queryable.t(), non_neg_integer, Macro.t(), {:ok, Ecto.Schema.prefix} | nil, hints) :: Ecto.Query.t() + def apply(query, binds, as, prefix, hints) do + query = + query + |> Ecto.Queryable.to_query() + |> maybe_apply_as(as) + |> maybe_apply_prefix(prefix) + |> maybe_apply_hints(hints) + + check_binds(query, binds) + query + end + + defp maybe_apply_as(query, nil), do: query + + defp maybe_apply_as(%{from: %{as: from_as}}, as) when not is_nil(from_as) do + Builder.error!( + "can't apply alias `#{inspect(as)}`, binding in `from` is already aliased to `#{inspect(from_as)}`" + ) + end + + defp maybe_apply_as(%{from: from, aliases: aliases} = query, as) do + if Map.has_key?(aliases, as) do + Builder.error!("alias `#{inspect(as)}` already exists") + else + %{query | aliases: Map.put(aliases, as, 0), from: %{from | as: as}} + end + end + + defp maybe_apply_prefix(query, nil), do: query + + defp maybe_apply_prefix(query, {:ok, prefix}) do + update_in query.from.prefix, fn + nil -> + prefix + + from_prefix -> + Builder.error!( + "can't apply prefix `#{inspect(prefix)}`, `from` is already prefixed to `#{inspect(from_prefix)}`" + ) + end + end + + defp maybe_apply_hints(query, []), do: query + defp maybe_apply_hints(query, hints), do: update_in(query.from.hints, &(&1 ++ hints)) + + defp check_binds(query, count) do + if count > 1 and count > Builder.count_binds(query) do + Builder.error!( + "`from` in query expression specified #{count} " <> + "binds but query contains #{Builder.count_binds(query)} binds" + ) + end + end +end diff --git a/deps/ecto/lib/ecto/query/builder/group_by.ex b/deps/ecto/lib/ecto/query/builder/group_by.ex new file mode 100644 index 0000000..0965d11 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/group_by.ex @@ -0,0 +1,118 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.GroupBy do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Escapes a list of quoted expressions. + + See `Ecto.Builder.escape/2`. + + iex> escape(:group_by, quote do [x.x, 13] end, {[], %{}}, [x: 0], __ENV__) + {[{:{}, [], [{:{}, [], [:., [], [{:{}, [], [:&, [], [0]]}, :x]]}, [], []]}, + 13], + {[], %{}}} + """ + @spec escape(:group_by | :partition_by, Macro.t, {list, term}, Keyword.t, Macro.Env.t) :: + {Macro.t, {list, term}} + def escape(kind, expr, params_acc, vars, env) do + expr + |> List.wrap + |> Enum.map_reduce(params_acc, &do_escape(&1, &2, kind, vars, env)) + end + + defp do_escape({:^, _, [expr]}, params_acc, kind, _vars, _env) do + {quote(do: Ecto.Query.Builder.GroupBy.field!(unquote(kind), unquote(expr))), params_acc} + end + + defp do_escape(field, params_acc, _kind, _vars, _env) when is_atom(field) do + {Macro.escape(to_field(field)), params_acc} + end + + defp do_escape(expr, params_acc, _kind, vars, env) do + Builder.escape(expr, :any, params_acc, vars, env) + end + + @doc """ + Called at runtime to verify a field. + """ + def field!(_kind, field) when is_atom(field), + do: to_field(field) + def field!(kind, other) do + raise ArgumentError, + "expected a field as an atom in `#{kind}`, got: `#{inspect other}`" + end + + @doc """ + Shared between group_by and partition_by. + """ + def group_or_partition_by!(kind, query, exprs, params) do + {expr, {params, _, subqueries}} = + Enum.map_reduce(List.wrap(exprs), {params, length(params), []}, fn + field, params_count when is_atom(field) -> + {to_field(field), params_count} + + %Ecto.Query.DynamicExpr{} = dynamic, {params, count, subqueries} -> + {expr, params, subqueries, _aliases, count} = Builder.Dynamic.partially_expand(query, dynamic, params, subqueries, %{}, count) + {expr, {params, count, subqueries}} + + other, _params_count -> + raise ArgumentError, + "expected a list of fields and dynamics in `#{kind}`, got: `#{inspect other}`" + end) + + {expr, params, subqueries} + end + + defp to_field(field), do: {{:., [], [{:&, [], [0]}, field]}, [], []} + + @doc """ + Called at runtime to assemble group_by. + """ + def group_by!(query, group_by, file, line) do + {expr, params, subqueries} = group_or_partition_by!(:group_by, query, group_by, []) + expr = %Ecto.Query.ByExpr{expr: expr, params: Enum.reverse(params), line: line, file: file, subqueries: subqueries} + apply(query, expr) + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t + def build(query, _binding, {:^, _, [var]}, env) do + quote do + Ecto.Query.Builder.GroupBy.group_by!(unquote(query), unquote(var), unquote(env.file), unquote(env.line)) + end + end + + def build(query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {expr, {params, acc}} = escape(:group_by, expr, {[], %{subqueries: []}}, binding, env) + params = Builder.escape_params(params) + + group_by = quote do: %Ecto.Query.ByExpr{ + expr: unquote(expr), + params: unquote(params), + subqueries: unquote(acc.subqueries), + file: unquote(env.file), + line: unquote(env.line)} + Builder.apply_query(query, __MODULE__, [group_by], env) + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t + def apply(%Ecto.Query{group_bys: group_bys} = query, expr) do + %{query | group_bys: group_bys ++ [expr]} + end + def apply(query, expr) do + apply(Ecto.Queryable.to_query(query), expr) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/join.ex b/deps/ecto/lib/ecto/query/builder/join.ex new file mode 100644 index 0000000..65e88da --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/join.ex @@ -0,0 +1,426 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Join do + @moduledoc false + + alias Ecto.Query.Builder + alias Ecto.Query.{JoinExpr, QueryExpr} + + @doc """ + Escapes a join expression (not including the `on` expression). + + It returns a tuple containing the binds, the on expression (if available) + and the association expression. + + ## Examples + + iex> escape(quote(do: x in "foo"), [], __ENV__) + {:x, {"foo", nil}, nil, nil, []} + + iex> escape(quote(do: "foo"), [], __ENV__) + {:_, {"foo", nil}, nil, nil, []} + + iex> escape(quote(do: x in Sample), [], __ENV__) + {:x, {nil, Sample}, nil, nil, []} + + iex> escape(quote(do: x in __MODULE__), [], __ENV__) + {:x, {nil, __MODULE__}, nil, nil, []} + + iex> escape(quote(do: x in {"foo", :sample}), [], __ENV__) + {:x, {"foo", :sample}, nil, nil, []} + + iex> escape(quote(do: x in {"foo", Sample}), [], __ENV__) + {:x, {"foo", Sample}, nil, nil, []} + + iex> escape(quote(do: x in {"foo", __MODULE__}), [], __ENV__) + {:x, {"foo", __MODULE__}, nil, nil, []} + + iex> escape(quote(do: c in assoc(p, :comments)), [p: 0], __ENV__) + {:c, nil, {0, :comments}, nil, []} + + iex> escape(quote(do: x in fragment("foo")), [], __ENV__) + {:x, {:{}, [], [:fragment, [], [raw: "foo"]]}, nil, nil, []} + + """ + @spec escape(Macro.t(), Keyword.t(), Macro.Env.t()) :: + {atom, Macro.t() | nil, Macro.t() | nil, list} + def escape({:in, _, [{var, _, context}, expr]}, vars, env) + when is_atom(var) and is_atom(context) do + {_, expr, assoc, prelude, params} = escape(expr, vars, env) + {var, expr, assoc, prelude, params} + end + + def escape({:subquery, _, [expr]}, _vars, _env) do + {:_, quote(do: Ecto.Query.subquery(unquote(expr))), nil, nil, []} + end + + def escape({:subquery, _, [expr, opts]}, _vars, _env) do + {:_, quote(do: Ecto.Query.subquery(unquote(expr), unquote(opts))), nil, nil, []} + end + + def escape({:fragment, _, [_ | _]} = expr, vars, env) do + {expr, {params, _acc}} = Builder.escape(expr, :any, {[], %{}}, vars, env) + {:_, expr, nil, nil, params} + end + + def escape({:values, _, [values_list, types]}, _vars, _env) do + prelude = quote do: values = Ecto.Query.Values.new(unquote(values_list), unquote(types)) + types = quote do: values.types + num_rows = quote do: values.num_rows + params = quote do: values.params + {:_, {:{}, [], [:values, [], [types, num_rows]]}, nil, prelude, params} + end + + def escape({string, schema} = join, _vars, env) when is_binary(string) do + case Macro.expand(schema, env) do + schema when is_atom(schema) -> + {:_, {string, schema}, nil, nil, []} + + _ -> + Builder.error!("malformed join `#{Macro.to_string(join)}` in query expression") + end + end + + def escape({:assoc, _, [{var, _, context}, field]}, vars, _env) + when is_atom(var) and is_atom(context) do + ensure_field!(field) + var = Builder.find_var!(var, vars) + field = Builder.quoted_atom!(field, "field/2") + {:_, nil, {var, field}, nil, []} + end + + def escape({:^, _, [expr]}, _vars, _env) do + {:_, quote(do: Ecto.Query.Builder.Join.join!(unquote(expr))), nil, nil, []} + end + + def escape(string, _vars, _env) when is_binary(string) do + {:_, {string, nil}, nil, nil, []} + end + + def escape(schema, _vars, _env) when is_atom(schema) do + {:_, {nil, schema}, nil, nil, []} + end + + def escape(join, vars, env) do + case Macro.expand(join, env) do + ^join -> + Builder.error!("malformed join `#{Macro.to_string(join)}` in query expression") + + join -> + escape(join, vars, env) + end + end + + @doc """ + Called at runtime to check dynamic joins. + """ + def join!(expr) when is_atom(expr), + do: {nil, expr} + + def join!(expr) when is_binary(expr), + do: {expr, nil} + + def join!({source, module}) when is_binary(source) and is_atom(module), + do: {source, module} + + def join!(expr), + do: Ecto.Queryable.to_query(expr) + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build( + Macro.t(), + atom, + [Macro.t()], + Macro.t(), + Macro.t(), + Macro.t(), + atom, + nil | {:ok, Ecto.Schema.prefix()}, + nil | String.t() | [String.t()], + Macro.Env.t() + ) :: + {Macro.t(), Keyword.t(), non_neg_integer | nil} + def build(query, qual, binding, expr, count_bind, on, as, prefix, maybe_hints, env) do + {:ok, prefix} = prefix || {:ok, nil} + hints = List.wrap(maybe_hints) + + unless Enum.all?(hints, &is_binary/1) do + Builder.error!( + "`hints` must be a compile time string or list of strings, " <> + "got: `#{Macro.to_string(maybe_hints)}`" + ) + end + + prefix = + case prefix do + nil -> + nil + + prefix when is_binary(prefix) -> + prefix + + {:^, _, [prefix]} -> + prefix + + prefix -> + Builder.error!( + "`prefix` must be a compile time string or an interpolated value using ^, got: #{Macro.to_string(prefix)}" + ) + end + + as = + case as do + {:^, _, [as]} -> + as + + as when is_atom(as) -> + as + + as -> + Builder.error!( + "`as` must be a compile time atom or an interpolated value using ^, got: #{Macro.to_string(as)}" + ) + end + + {query, binding} = Builder.escape_binding(query, binding, env) + {join_bind, join_source, join_assoc, join_prelude, join_params} = escape(expr, binding, env) + join_params = escape_params(join_params) + + join_qual = validate_qual(qual) + validate_bind(join_bind, binding) + + {count_bind, query} = + if is_nil(count_bind) do + var = Macro.unique_var(:query, __MODULE__) + + query = + quote do + unquote(var) = Ecto.Queryable.to_query(unquote(query)) + end + + {quote(do: Builder.count_binds(unquote(var))), query} + else + {count_bind, query} + end + + binding = binding ++ [{join_bind, count_bind}] + + next_bind = + if is_integer(count_bind) do + count_bind + 1 + else + quote(do: unquote(count_bind) + 1) + end + + join = [ + as: as, + assoc: join_assoc, + file: env.file, + line: env.line, + params: join_params, + prefix: prefix, + qual: join_qual, + source: join_source, + hints: hints + ] + + on = ensure_on(on, join_assoc, join_qual, join_source, env) + query = build_on(on, join_prelude, join, as, query, binding, count_bind, env) + {query, binding, next_bind} + end + + defp escape_params(params) when is_list(params) do + Builder.escape_params(params) + end + + defp escape_params(params) do + quote do: Builder.escape_params(unquote(params)) + end + + def build_on({:^, _, [var]}, join_prelude, join, as, query, _binding, count_bind, env) do + quote do + query = unquote(query) + unquote(join_prelude) + + Ecto.Query.Builder.Join.join!( + query, + %JoinExpr{unquote_splicing(join), on: %QueryExpr{}}, + unquote(var), + unquote(as), + unquote(count_bind), + unquote(env.file), + unquote(env.line) + ) + end + end + + def build_on(on, join_prelude, join, as, query, binding, count_bind, env) do + case Ecto.Query.Builder.Filter.escape(:on, on, count_bind, binding, env) do + {_on_expr, {_on_params, %{subqueries: [_ | _]}}} -> + raise ArgumentError, "invalid expression for join `:on`, subqueries aren't supported" + + {on_expr, {on_params, _acc}} -> + on_params = Builder.escape_params(on_params) + + join = + quote do + unquote(join_prelude) + + %JoinExpr{ + unquote_splicing(join), + on: %QueryExpr{ + expr: unquote(on_expr), + params: unquote(on_params), + line: unquote(env.line), + file: unquote(env.file) + } + } + end + + Builder.apply_query(query, __MODULE__, [join, as, count_bind], env) + end + end + + defp ensure_on(on, _assoc, _qual, _source, _env) when on != nil, do: on + + defp ensure_on(nil, _assoc = nil, qual, source, env) + when qual not in [:cross, :cross_lateral] do + maybe_source = + with {source, alias} <- source, + source when source != nil <- source || alias do + " on #{inspect(source)}" + else + _ -> "" + end + + stacktrace = Macro.Env.stacktrace(env) + IO.warn("missing `:on` in join#{maybe_source}, defaulting to `on: true`.", stacktrace) + + true + end + + defp ensure_on(nil, _assoc, _qual, _source, _env), do: true + + @doc """ + Applies the join expression to the query. + """ + def apply(%Ecto.Query{joins: joins} = query, expr, nil, _count_bind) do + %{query | joins: joins ++ [expr]} + end + + def apply(%Ecto.Query{joins: joins, aliases: aliases} = query, expr, as, count_bind) do + aliases = + case aliases do + %{} -> runtime_aliases(aliases, as, count_bind) + _ -> compile_aliases(aliases, as, count_bind) + end + + %{query | joins: joins ++ [expr], aliases: aliases} + end + + def apply(query, expr, as, count_bind) do + apply(Ecto.Queryable.to_query(query), expr, as, count_bind) + end + + @doc """ + Called at runtime to build aliases. + """ + def runtime_aliases(aliases, nil, _), do: aliases + + def runtime_aliases(aliases, name, join_count) when is_integer(join_count) do + if Map.has_key?(aliases, name) do + Builder.error!("alias `#{inspect(name)}` already exists") + else + Map.put(aliases, name, join_count) + end + end + + defp compile_aliases({:%{}, meta, aliases}, name, join_count) + when is_atom(name) and is_integer(join_count) do + {:%{}, meta, aliases |> Map.new() |> runtime_aliases(name, join_count) |> Map.to_list()} + end + + defp compile_aliases(aliases, name, join_count) do + quote do + Ecto.Query.Builder.Join.runtime_aliases( + unquote(aliases), + unquote(name), + unquote(join_count) + ) + end + end + + @doc """ + Called at runtime to build a join. + """ + def join!(query, join, expr, as, count_bind, file, line) do + # join without expanded :on is built and applied to the query, + # so that expansion of dynamic :on accounts for the new binding + {on_expr, on_params, on_file, on_line} = + Ecto.Query.Builder.Filter.filter!( + :on, + apply(query, join, as, count_bind), + expr, + count_bind, + file, + line + ) + + join = %{ + join + | on: %QueryExpr{expr: on_expr, params: on_params, line: on_line, file: on_file} + } + + apply(query, join, as, count_bind) + end + + defp validate_qual(qual) when is_atom(qual) do + qual!(qual) + end + + defp validate_qual(qual) do + quote(do: Ecto.Query.Builder.Join.qual!(unquote(qual))) + end + + defp validate_bind(bind, all) do + if bind != :_ and bind in all do + Builder.error!("variable `#{bind}` is already defined in query") + end + end + + @qualifiers [ + :inner, + :inner_lateral, + :left, + :left_lateral, + :right, + :full, + :cross, + :cross_lateral + ] + + @doc """ + Called at runtime to check dynamic qualifier. + """ + def qual!(qual) when qual in @qualifiers, do: qual + + def qual!(qual) do + raise ArgumentError, + "invalid join qualifier `#{inspect(qual)}`, accepted qualifiers are: " <> + Enum.map_join(@qualifiers, ", ", &"`#{inspect(&1)}`") + end + + defp ensure_field!({var, _, _}) when var != :^ do + Builder.error!( + "you passed the variable `#{var}` to `assoc/2`. Did you mean to pass the atom `:#{var}`?" + ) + end + + defp ensure_field!(_), do: true +end diff --git a/deps/ecto/lib/ecto/query/builder/limit_offset.ex b/deps/ecto/lib/ecto/query/builder/limit_offset.ex new file mode 100644 index 0000000..310d2a5 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/limit_offset.ex @@ -0,0 +1,111 @@ +import Kernel, except: [apply: 3] + +defmodule Ecto.Query.Builder.LimitOffset do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Validates `with_ties` at runtime. + """ + @spec with_ties!(any) :: boolean + def with_ties!(with_ties) when is_boolean(with_ties), do: with_ties + + def with_ties!(with_ties), + do: raise("`with_ties` expression must evaluate to a boolean at runtime, got: `#{inspect(with_ties)}`") + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(:limit | :with_ties | :offset, Macro.t(), [Macro.t()], Macro.t(), Macro.Env.t()) :: + Macro.t() + def build(type, query, binding, expr, env) do + {query, vars} = Builder.escape_binding(query, binding, env) + {expr, {params, _acc}} = escape(type, expr, {[], %{}}, vars, env) + params = Builder.escape_params(params) + quoted = build_quoted(type, expr, params, env) + + Builder.apply_query(query, __MODULE__, [type, quoted], env) + end + + defp escape(type, expr, params_acc, vars, env) when type in [:limit, :offset] do + Builder.escape(expr, :integer, params_acc, vars, env) + end + + defp escape(:with_ties, expr, params_acc, _vars, _env) when is_boolean(expr) do + {expr, params_acc} + end + + defp escape(:with_ties, {:^, _, [expr]}, params_acc, _vars, _env) do + {quote(do: Ecto.Query.Builder.LimitOffset.with_ties!(unquote(expr))), params_acc} + end + + defp escape(:with_ties, expr, _params_acc, _vars, _env) do + Builder.error!( + "`with_ties` expression must be a compile time boolean or an interpolated value using ^, got: `#{Macro.to_string(expr)}`" + ) + end + + defp build_quoted(:limit, expr, params, env) do + quote do: %Ecto.Query.LimitExpr{ + expr: unquote(expr), + params: unquote(params), + file: unquote(env.file), + line: unquote(env.line) + } + end + + defp build_quoted(:offset, expr, params, env) do + quote do: %Ecto.Query.QueryExpr{ + expr: unquote(expr), + params: unquote(params), + file: unquote(env.file), + line: unquote(env.line) + } + end + + defp build_quoted(:with_ties, expr, _params, _env), do: expr + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t(), :limit | :with_ties | :offset, term) :: Ecto.Query.t() + def apply(%Ecto.Query{} = query, :limit, expr) do + %{query | limit: expr} + end + + def apply(%Ecto.Query{limit: limit} = query, :with_ties, expr) do + %{query | limit: apply_limit(limit, expr)} + end + + def apply(%Ecto.Query{} = query, :offset, expr) do + %{query | offset: expr} + end + + def apply(query, kind, expr) do + apply(Ecto.Queryable.to_query(query), kind, expr) + end + + @doc """ + Applies the `with_ties` value to the `limit` struct. + """ + def apply_limit(nil, _with_ties) do + Builder.error!("`with_ties` can only be applied to queries containing a `limit`") + end + + # Runtime + def apply_limit(%_{} = limit, with_ties) do + %{limit | with_ties: with_ties} + end + + # Compile + def apply_limit(limit, with_ties) do + quote do + Ecto.Query.Builder.LimitOffset.apply_limit(unquote(limit), unquote(with_ties)) + end + end +end diff --git a/deps/ecto/lib/ecto/query/builder/lock.ex b/deps/ecto/lib/ecto/query/builder/lock.ex new file mode 100644 index 0000000..ce99030 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/lock.ex @@ -0,0 +1,59 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Lock do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Escapes the lock code. + + iex> escape(quote(do: "FOO"), [], __ENV__) + "FOO" + + """ + @spec escape(Macro.t(), Keyword.t, Macro.Env.t) :: Macro.t() + def escape(lock, _vars, _env) when is_binary(lock), do: lock + + def escape({:fragment, _, [_ | _]} = expr, vars, env) do + {expr, {params, _acc}} = Builder.escape(expr, :any, {[], %{}}, vars, env) + + if params != [] do + Builder.error!("value interpolation is not allowed in :lock") + end + + expr + end + + def escape(other, _, _) do + Builder.error!( + "`#{Macro.to_string(other)}` is not a valid lock. " <> + "For security reasons, a lock must always be a literal string or a fragment" + ) + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t(), Macro.t(), Macro.t(), Macro.Env.t()) :: Macro.t() + def build(query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + Builder.apply_query(query, __MODULE__, [escape(expr, binding, env)], env) + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t(), term) :: Ecto.Query.t() + def apply(%Ecto.Query{} = query, value) do + %{query | lock: value} + end + + def apply(query, value) do + apply(Ecto.Queryable.to_query(query), value) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/order_by.ex b/deps/ecto/lib/ecto/query/builder/order_by.ex new file mode 100644 index 0000000..25f04e0 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/order_by.ex @@ -0,0 +1,282 @@ +import Kernel, except: [apply: 3] + +defmodule Ecto.Query.Builder.OrderBy do + @moduledoc false + + alias Ecto.Query.Builder + + @directions [ + :asc, + :asc_nulls_last, + :asc_nulls_first, + :desc, + :desc_nulls_last, + :desc_nulls_first + ] + + @doc """ + Returns `true` if term is a valid order_by direction; otherwise returns `false`. + + ## Examples + + iex> valid_direction?(:asc) + true + + iex> valid_direction?(:desc) + true + + iex> valid_direction?(:invalid) + false + + """ + def valid_direction?(term), do: term in @directions + + @doc """ + Escapes an order by query. + + The query is escaped to a list of `{direction, expression}` + pairs at runtime. Escaping also validates direction is one of + `:asc`, `:asc_nulls_last`, `:asc_nulls_first`, `:desc`, + `:desc_nulls_last` or `:desc_nulls_first`. + + ## Examples + + iex> escape(:order_by, quote do [x.x, desc: 13] end, {[], %{}}, [x: 0], __ENV__) + {[asc: {:{}, [], [{:{}, [], [:., [], [{:{}, [], [:&, [], [0]]}, :x]]}, [], []]}, + desc: 13], + {[], %{}}} + + """ + @spec escape(:order_by | :distinct, Macro.t(), {list, term}, Keyword.t(), Macro.Env.t()) :: + {Macro.t(), {list, term}} + def escape(kind, expr, params_acc, vars, env) do + expr + |> List.wrap() + |> Enum.flat_map_reduce(params_acc, &do_escape(&1, &2, kind, vars, env)) + end + + defp do_escape({dir, {:^, _, [expr]}}, params_acc, kind, _vars, _env) do + {[{quoted_dir!(kind, dir), + quote(do: Ecto.Query.Builder.OrderBy.field!(unquote(kind), unquote(expr)))}], params_acc} + end + + defp do_escape({:^, _, [expr]}, params_acc, kind, _vars, _env) do + {[{:asc, quote(do: Ecto.Query.Builder.OrderBy.field!(unquote(kind), unquote(expr)))}], + params_acc} + end + + defp do_escape({dir, field}, params_acc, kind, _vars, _env) when is_atom(field) do + {[{quoted_dir!(kind, dir), Macro.escape(to_field(field))}], params_acc} + end + + defp do_escape(field, params_acc, _kind, _vars, _env) when is_atom(field) do + {[{:asc, Macro.escape(to_field(field))}], params_acc} + end + + defp do_escape({dir, expr}, params_acc, kind, vars, env) do + fun = &escape_expansion(kind, &1, &2, &3, &4, &5) + {ast, params_acc} = Builder.escape(expr, :any, params_acc, vars, {get_env(env), fun}) + {[{quoted_dir!(kind, dir), ast}], params_acc} + end + + defp do_escape(expr, params_acc, kind, vars, env) do + fun = &escape_expansion(kind, &1, &2, &3, &4, &5) + {ast, params_acc} = Builder.escape(expr, :any, params_acc, vars, {get_env(env), fun}) + + if is_list(ast) do + {ast, params_acc} + else + {[{:asc, ast}], params_acc} + end + end + + defp get_env({env, _}), do: env + defp get_env(env), do: env + + defp escape_expansion(kind, expr, _type, params_acc, vars, env) when is_list(expr) do + escape(kind, expr, params_acc, vars, env) + end + + defp escape_expansion(_kind, field, _type, params_acc, _vars, _env) when is_atom(field) do + {Macro.escape(to_field(field)), params_acc} + end + + defp escape_expansion(kind, expr, type, params_acc, vars, env) do + fun = &escape_expansion(kind, &1, &2, &3, &4, &5) + Builder.escape(expr, type, params_acc, vars, {env, fun}) + end + + @doc """ + Checks the variable is a quoted direction at compilation time or + delegate the check to runtime for interpolation. + """ + def quoted_dir!(kind, {:^, _, [expr]}), + do: quote(do: Ecto.Query.Builder.OrderBy.dir!(unquote(kind), unquote(expr))) + + def quoted_dir!(_kind, dir) when dir in @directions, + do: dir + + def quoted_dir!(kind, other) do + Builder.error!( + "expected #{Enum.map_join(@directions, ", ", &inspect/1)} or interpolated value " <> + "in `#{kind}`, got: `#{inspect(other)}`" + ) + end + + @doc """ + Called at runtime to verify the direction. + """ + def dir!(_kind, dir) when dir in @directions, + do: dir + + def dir!(kind, other) do + raise ArgumentError, + "expected one of #{Enum.map_join(@directions, ", ", &inspect/1)} " <> + "in `#{kind}`, got: `#{inspect(other)}`" + end + + @doc """ + Called at runtime to verify a field. + """ + def field!(_kind, field) when is_atom(field) do + to_field(field) + end + + def field!(kind, %Ecto.Query.DynamicExpr{} = dynamic_expression) do + raise ArgumentError, + "expected a field as an atom in `#{kind}`, got: `#{inspect(dynamic_expression)}`. " <> + "To use dynamic expressions, you need to interpolate at root level, as in: " <> + "`^[asc: dynamic, desc: :id]`" + end + + def field!(kind, other) do + raise ArgumentError, "expected a field as an atom in `#{kind}`, got: `#{inspect(other)}`" + end + + defp to_field(field), do: {{:., [], [{:&, [], [0]}, field]}, [], []} + + @doc """ + Shared between order_by and distinct. + """ + def order_by_or_distinct!(kind, query, exprs, params) do + {expr, {params, _, subqueries}} = + Enum.map_reduce(List.wrap(exprs), {params, length(params), []}, fn + {dir, expr}, params_count when dir in @directions -> + {expr, params} = dynamic_or_field!(kind, expr, query, params_count) + {{dir, expr}, params} + + expr, params_count -> + {expr, params} = dynamic_or_field!(kind, expr, query, params_count) + {{:asc, expr}, params} + end) + + {expr, params, subqueries} + end + + @doc """ + Called at runtime to assemble order_by. + """ + def order_by!(query, exprs, op, file, line) do + {expr, params, subqueries} = order_by_or_distinct!(:order_by, query, exprs, []) + + expr = %Ecto.Query.ByExpr{ + expr: expr, + params: Enum.reverse(params), + line: line, + file: file, + subqueries: subqueries + } + + apply(query, expr, op) + end + + defp dynamic_or_field!( + _kind, + %Ecto.Query.DynamicExpr{} = dynamic, + query, + {params, count, subqueries} + ) do + {expr, params, subqueries, _aliases, count} = + Ecto.Query.Builder.Dynamic.partially_expand( + query, + dynamic, + params, + subqueries, + %{}, + count + ) + + {expr, {params, count, subqueries}} + end + + defp dynamic_or_field!(_kind, field, _query, params_count) when is_atom(field) do + {to_field(field), params_count} + end + + defp dynamic_or_field!(kind, other, _query, _params_count) do + raise ArgumentError, + "`#{kind}` interpolated on root expects a field or a keyword list " <> + "with the direction as keys and fields or dynamics as values, got: `#{inspect(other)}`" + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t(), [Macro.t()], Macro.t(), :append | :prepend, Macro.Env.t()) :: Macro.t() + def build(query, _binding, {:^, _, [var]}, op, env) do + quote do + Ecto.Query.Builder.OrderBy.order_by!( + unquote(query), + unquote(var), + unquote(op), + unquote(env.file), + unquote(env.line) + ) + end + end + + def build(query, binding, expr, op, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {expr, {params, acc}} = escape(:order_by, expr, {[], %{subqueries: []}}, binding, env) + params = Builder.escape_params(params) + + order_by = + quote do: %Ecto.Query.ByExpr{ + expr: unquote(expr), + params: unquote(params), + subqueries: unquote(acc.subqueries), + file: unquote(env.file), + line: unquote(env.line) + } + + Builder.apply_query(query, __MODULE__, [order_by, op], env) + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t(), term, term) :: Ecto.Query.t() + def apply(%Ecto.Query{order_bys: orders} = query, expr, op) do + %{query | order_bys: update_order_bys(orders, expr, op)} + end + + def apply(query, expr, op) do + apply(Ecto.Queryable.to_query(query), expr, op) + end + + @doc """ + Updates the `order_bys` value for a query. + """ + def update_order_bys(orders, expr, :append), do: orders ++ [expr] + def update_order_bys(orders, expr, :prepend), do: [expr | orders] + + def update_order_bys(orders, expr, mode) do + quote do + Ecto.Query.Builder.OrderBy.update_order_bys(unquote(orders), unquote(expr), unquote(mode)) + end + end +end diff --git a/deps/ecto/lib/ecto/query/builder/preload.ex b/deps/ecto/lib/ecto/query/builder/preload.ex new file mode 100644 index 0000000..4b7455a --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/preload.ex @@ -0,0 +1,337 @@ +import Kernel, except: [apply: 3] + +defmodule Ecto.Query.Builder.Preload do + @moduledoc false + alias Ecto.Query.Builder + + @doc """ + Escapes a preload. + + A preload may be an atom, a list of atoms or a keyword list + nested as a rose tree. + + iex> escape(:foo, []) + {[:foo], []} + + iex> escape([foo: :bar], []) + {[foo: [:bar]], []} + + iex> escape([:foo, :bar], []) + {[:foo, :bar], []} + + iex> escape([foo: [:bar, bar: :bat]], []) + {[foo: [:bar, bar: [:bat]]], []} + + iex> escape([foo: {:^, [], ["external"]}], []) + {[foo: "external"], []} + + iex> escape([foo: [:bar, {:^, [], ["external"]}], baz: :bat], []) + {[foo: [:bar, "external"], baz: [:bat]], []} + + iex> escape([foo: {:c, [], nil}], [c: 1]) + {[], [foo: {1, []}]} + + iex> escape([foo: {{:c, [], nil}, bar: {:l, [], nil}}], [c: 1, l: 2]) + {[], [foo: {1, [bar: {2, []}]}]} + + iex> escape([foo: {:c, [], nil}, bar: {:l, [], nil}], [c: 1, l: 2]) + {[], [foo: {1, []}, bar: {2, []}]} + + iex> escape([foo: {{:c, [], nil}, :bar}], [c: 1]) + {[foo: [:bar]], [foo: {1, []}]} + + iex> escape([foo: [bar: {:c, [], nil}]], [c: 1]) + ** (Ecto.Query.CompileError) cannot preload join association `:bar` with binding `c` because parent preload is not a join association + + """ + @spec escape(Macro.t(), Keyword.t()) :: {[Macro.t()], [Macro.t()]} + def escape(preloads, vars) do + {preloads, assocs} = escape(preloads, :both, [], [], vars) + {Enum.reverse(preloads), Enum.reverse(assocs)} + end + + defp escape(atom, _mode, preloads, assocs, _vars) when is_atom(atom) do + {[atom | preloads], assocs} + end + + defp escape(list, mode, preloads, assocs, vars) when is_list(list) do + Enum.reduce(list, {preloads, assocs}, fn item, acc -> + escape_each(item, mode, acc, vars) + end) + end + + defp escape({:^, _, [inner]}, _mode, preloads, assocs, _vars) do + {[inner | preloads], assocs} + end + + defp escape(other, _mode, _preloads, _assocs, _vars) do + Builder.error!( + "`#{Macro.to_string(other)}` is not a valid preload expression. " <> + "preload expects an atom, a list of atoms or a keyword list with " <> + "more preloads as values. Use ^ on the outermost preload to interpolate a value" + ) + end + + defp escape_each({key, {:^, _, [inner]}}, _mode, {preloads, assocs}, _vars) do + key = escape_key(key) + {[{key, inner} | preloads], assocs} + end + + defp escape_each({key, {var, _, context}}, mode, {preloads, assocs}, vars) + when is_atom(context) do + assert_assoc!(mode, key, var) + key = escape_key(key) + idx = Builder.find_var!(var, vars) + {preloads, [{key, {idx, []}} | assocs]} + end + + defp escape_each({key, {{var, _, context}, list}}, mode, {preloads, assocs}, vars) + when is_atom(context) do + assert_assoc!(mode, key, var) + key = escape_key(key) + idx = Builder.find_var!(var, vars) + {inner_preloads, inner_assocs} = escape(list, :assoc, [], [], vars) + assocs = [{key, {idx, Enum.reverse(inner_assocs)}} | assocs] + + case inner_preloads do + [] -> {preloads, assocs} + _ -> {[{key, Enum.reverse(inner_preloads)} | preloads], assocs} + end + end + + defp escape_each({key, list}, _mode, {preloads, assocs}, vars) do + key = escape_key(key) + {inner_preloads, []} = escape(list, :preload, [], [], vars) + {[{key, Enum.reverse(inner_preloads)} | preloads], assocs} + end + + defp escape_each(other, mode, {preloads, assocs}, vars) do + escape(other, mode, preloads, assocs, vars) + end + + defp escape_key(atom) when is_atom(atom) do + atom + end + + defp escape_key({:^, _, [expr]}) do + quote(do: Ecto.Query.Builder.Preload.key!(unquote(expr))) + end + + defp escape_key(other) do + Builder.error!("malformed key in preload `#{Macro.to_string(other)}` in query expression") + end + + @doc """ + Called at runtime to check dynamic preload keys. + """ + def key!(key) when is_atom(key), + do: key + + def key!(key) do + raise ArgumentError, + "expected key in preload to be an atom, got: `#{inspect(key)}`" + end + + @doc """ + Applies the preloaded value into the query. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t(), [Macro.t()], Macro.t(), Macro.Env.t()) :: Macro.t() + def build(query, _binding, {:^, _, [expr]}, _env) do + quote do + Ecto.Query.Builder.Preload.preload!(unquote(query), unquote(expr)) + end + end + + def build(query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {preloads, assocs} = escape(expr, binding) + Builder.apply_query(query, __MODULE__, [Enum.reverse(preloads), Enum.reverse(assocs)], env) + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t(), term, term) :: Ecto.Query.t() + def apply(%Ecto.Query{preloads: p, assocs: a} = query, preloads, assocs) do + %{query | preloads: p ++ preloads, assocs: a ++ assocs} + end + + def apply(query, preloads, assocs) do + apply(Ecto.Queryable.to_query(query), preloads, assocs) + end + + @doc """ + Called at runtime to assemble preload. + """ + def preload!(query, preload) do + {preloads, assocs} = expand(preload, query) + apply(query, preloads, assocs) + end + + @doc """ + Expands preloads at runtime. + + ## Examples + + iex> expand(:foo, []) + {[:foo], []} + + iex> expand([foo: :bar], []) + {[foo: :bar], []} + + iex> expand([:foo, :bar], []) + {[:foo, :bar], []} + + iex> expand([foo: [:bar, bar: :bat]], []) + {[foo: [:bar, bar: :bat]], []} + + iex> expand([:a, :b, c: [:d]], []) + {[:a, :b, c: [:d]], []} + + iex> expand([foo: ["external"]], []) + ** (ArgumentError) `"external"` is not a valid preload expression, expected an atom or a list. + + iex> require Ecto.Query + iex> expand([b: Ecto.Query.dynamic([_a, b], b)], Ecto.Query.from(a in "a", join: b in "b", on: true)) + {[], [b: {1, []}]} + + iex> require Ecto.Query + iex> expand( + ...> [b: {Ecto.Query.dynamic([_a, b], b), c: Ecto.Query.dynamic([_a, _b, c], c)}], + ...> Ecto.Query.from(a in "a", join: b in "b", on: true, join: c in "c", on: true) + ...> ) + {[], [b: {1, [c: {2, []}]}]} + """ + def expand(preloads, query) do + expand(preloads, query, :both, [], []) + end + + defp expand(atom, _query, _mode, preloads, assocs) when is_atom(atom) do + {[atom | preloads], assocs} + end + + defp expand(list, query, mode, preloads, assocs) when is_list(list) do + {preloads, assocs} = + Enum.reduce(list, {preloads, assocs}, fn item, acc -> + expand_each(item, query, mode, acc) + end) + + {Enum.reverse(preloads), Enum.reverse(assocs)} + end + + defp expand(other, _query, _mode, _preloads, _assocs) do + raise ArgumentError, + "`#{inspect(other)}` is not a valid preload expression, " <> + "expected an atom or a list." + end + + defp expand_each(atom, _query, _mode, {preloads, assocs}) when is_atom(atom) do + {[atom | preloads], assocs} + end + + defp expand_each({key, atom}, _query, _mode, {preloads, assocs}) when is_atom(atom) do + assert_key!(key) + + {[{key, atom} | preloads], assocs} + end + + defp expand_each({key, %Ecto.Query.DynamicExpr{} = dynamic}, query, mode, {preloads, assocs}) do + assert_key!(key) + assert_assoc!(mode, key) + + idx = expand_dynamic(dynamic, query) + {preloads, [{key, {idx, []}} | assocs]} + end + + defp expand_each( + {key, {%Ecto.Query.DynamicExpr{} = dynamic, inner}}, + query, + mode, + {preloads, assocs} + ) do + assert_key!(key) + assert_assoc!(mode, key) + + idx = expand_dynamic(dynamic, query) + {inner_preloads, inner_assocs} = expand(inner, query, :assoc, [], []) + assocs = [{key, {idx, inner_assocs}} | assocs] + + case inner_preloads do + [] -> {preloads, assocs} + _ -> {[{key, inner_preloads} | preloads], assocs} + end + end + + defp expand_each({key, {query_or_fun, inner}}, query, _mode, {preloads, assocs}) do + assert_key!(key) + assert_query_or_fun!(query_or_fun, key) + + {inner_preloads, []} = expand(inner, query, :preload, [], []) + {[{key, {query_or_fun, inner_preloads}} | preloads], assocs} + end + + defp expand_each({key, list}, query, _mode, {preloads, assocs}) when is_list(list) do + assert_key!(key) + + {inner_preloads, []} = expand(list, query, :preload, [], []) + {[{key, inner_preloads} | preloads], assocs} + end + + defp expand_each({key, query_or_fun}, _query, _mode, {preloads, assocs}) do + assert_key!(key) + assert_query_or_fun!(query_or_fun, key) + + {[{key, query_or_fun} | preloads], assocs} + end + + defp expand_each(other, query, mode, {preloads, assocs}) do + expand(other, query, mode, preloads, assocs) + end + + defp expand_dynamic(%Ecto.Query.DynamicExpr{} = dynamic, query) do + case Builder.Dynamic.fully_expand(query, dynamic) do + {{:&, [], [idx]}, _, _, _, _, _} when is_integer(idx) -> + idx + + _ -> + raise ArgumentError, + "invalid dynamic in preload: `#{inspect(dynamic)}`. " <> + "Dynamic expressions in preload must evaluate to a single binding, as in: " <> + "`dynamic([comments: c], c)`" + end + end + + defp assert_key!(key), do: key!(key) && :ok + + defp assert_query_or_fun!(%Ecto.Query{}, _key), do: :ok + defp assert_query_or_fun!(fun, _key) when is_function(fun, 1), do: :ok + defp assert_query_or_fun!(fun, _key) when is_function(fun, 2), do: :ok + + defp assert_query_or_fun!(other, key) do + raise ArgumentError, + "invalid preload for key `#{inspect(key)}`: #{inspect(other)}. " <> + "Preloads can be a query, a function expecting one or two arguments, " <> + "or a dynamic that evaluates to a single binding" + end + + defp assert_assoc!(mode, _atom) when mode in [:both, :assoc], do: :ok + + defp assert_assoc!(_mode, atom) do + raise ArgumentError, + "cannot preload join association `#{inspect(atom)}` " <> + "because parent preload is not a join association" + end + + defp assert_assoc!(mode, _atom, _var) when mode in [:both, :assoc], do: :ok + + defp assert_assoc!(_mode, atom, var) do + Builder.error!( + "cannot preload join association `#{Macro.to_string(atom)}` with binding `#{var}` " <> + "because parent preload is not a join association" + ) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/select.ex b/deps/ecto/lib/ecto/query/builder/select.ex new file mode 100644 index 0000000..01c34ad --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/select.ex @@ -0,0 +1,579 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Select do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Escapes a select. + + It allows tuples, lists and variables at the top level. Inside the + tuples and lists query expressions are allowed. + + ## Examples + + iex> escape({1, 2}, [], __ENV__) + {{:{}, [], [:{}, [], [1, 2]]}, {[], %{take: %{}, subqueries: [], aliases: %{}}}} + + iex> escape([1, 2], [], __ENV__) + {[1, 2], {[], %{take: %{}, subqueries: [], aliases: %{}}}} + + iex> escape(quote(do: x), [x: 0], __ENV__) + {{:{}, [], [:&, [], [0]]}, {[], %{take: %{}, subqueries: [], aliases: %{}}}} + + """ + @spec escape(Macro.t, Keyword.t, Macro.Env.t) :: {Macro.t, {list, %{take: map, subqueries: list}}} + def escape(atom, _vars, _env) + when is_atom(atom) and not is_boolean(atom) and atom != nil do + Builder.error! """ + #{inspect(atom)} is not a valid query expression, :select expects a query expression or a list of fields + """ + end + + def escape(other, vars, env) do + cond do + take?(other) -> + { + {:{}, [], [:&, [], [0]]}, + {[], %{take: %{0 => {:any, Macro.expand(other, env)}}, subqueries: [], aliases: %{}}} + } + + maybe_take?(other) -> + Builder.error! """ + Cannot mix fields with interpolations, such as: `select: [:foo, ^:bar, :baz]`. \ + Instead interpolate all fields at once, such as: `select: ^[:foo, :bar, :baz]`. \ + Got: #{Macro.to_string(other)}. + """ + + true -> + {expr, {params, acc}} = escape(other, {[], %{take: %{}, subqueries: [], aliases: %{}}}, vars, env) + acc = %{acc | subqueries: Enum.reverse(acc.subqueries)} + {expr, {params, acc}} + end + end + + # Tuple + defp escape({left, right}, params_acc, vars, env) do + escape({:{}, [], [left, right]}, params_acc, vars, env) + end + + # Tuple + defp escape({:{}, _, list}, params_acc, vars, env) do + {list, params_acc} = Enum.map_reduce(list, params_acc, &escape(&1, &2, vars, env)) + expr = {:{}, [], [:{}, [], list]} + {expr, params_acc} + end + + # Struct + defp escape({:%, _, [name, map]}, params_acc, vars, env) do + name = Macro.expand(name, env) + {escaped_map, params_acc} = escape(map, params_acc, vars, env) + {{:{}, [], [:%, [], [name, escaped_map]]}, params_acc} + end + + # Map + defp escape({:%{}, _, [{:|, _, [data, pairs]}]}, params_acc, vars, env) do + {escaped_data, params_acc} = escape(data, params_acc, vars, env) + {pairs, params_acc} = escape_pairs(pairs, data, params_acc, vars, env) + {{:{}, [], [:%{}, [], [{:{}, [], [:|, [], [escaped_data, pairs]]}]]}, params_acc} + end + + # Merge + defp escape({:merge, _, [left, {kind, _, _} = right]}, params_acc, vars, env) + when kind in [:%{}, :map] do + {left, params_acc} = escape(left, params_acc, vars, env) + {right, params_acc} = escape(right, params_acc, vars, env) + {{:{}, [], [:merge, [], [left, right]]}, params_acc} + end + + defp escape({:merge, _, [_left, right]}, _params_acc, _vars, _env) do + Builder.error! "expected the second argument of merge/2 in select to be a map, got: `#{Macro.to_string(right)}`" + end + + # Map + defp escape({:%{}, _, pairs}, params_acc, vars, env) do + {pairs, params_acc} = escape_pairs(pairs, nil, params_acc, vars, env) + {{:{}, [], [:%{}, [], pairs]}, params_acc} + end + + # List + defp escape(list, params_acc, vars, env) when is_list(list) do + Enum.map_reduce(list, params_acc, &escape(&1, &2, vars, env)) + end + + # map/struct(var, [:foo, :bar]) + defp escape({tag, _, [{var, _, context}, fields]}, {params, acc}, vars, env) + when tag in [:map, :struct] and is_atom(var) and is_atom(context) do + taken = escape_fields(fields, tag, env) + expr = Builder.escape_var!(var, vars) + acc = add_take(acc, Builder.find_var!(var, vars), {tag, taken}) + {expr, {params, acc}} + end + + # aliased values + defp escape({:selected_as, _, [expr, name]}, {params, acc}, vars, env) do + name = Builder.quoted_atom!(name, "selected_as/2") + {escaped, {params, acc}} = Builder.escape(expr, :any, {params, acc}, vars, env) + expr = {:{}, [], [:selected_as, [], [escaped, name]]} + aliases = Builder.add_select_alias(acc.aliases, name) + {expr, {params, %{acc | aliases: aliases}}} + end + + defp escape(expr, params_acc, vars, env) do + Builder.escape(expr, :any, params_acc, vars, {env, &escape_expansion/5}) + end + + defp escape_expansion(expr, _type, params_acc, vars, env) do + escape(expr, params_acc, vars, env) + end + + defp escape_pairs(pairs, update_data, params_acc, vars, env) do + Enum.map_reduce(pairs, params_acc, fn {k, v}, acc -> + v = tag_update_param(update_data, k, v) + {k, acc} = escape_key(k, acc, vars, env) + {v, acc} = escape(v, acc, vars, env) + {{k, v}, acc} + end) + end + + defp tag_update_param({var, _, context}, field, {:^, _,[_]} = param) when is_atom(var) and is_atom(context) do + {:type, [], [param, {{:., [], [{var, [], context}, field]}, [], []}]} + end + + defp tag_update_param(_, _, value), do: value + + defp escape_key(k, params_acc, _vars, _env) when is_atom(k) do + {k, params_acc} + end + + defp escape_key({:^, _, [k]}, params_acc, _vars, _env) do + checked = quote do: Ecto.Query.Builder.Select.map_key!(unquote(k)) + {checked, params_acc} + end + + defp escape_key(k, params_acc, vars, env) do + escape(k, params_acc, vars, env) + end + + defp escape_fields({:^, _, [interpolated]}, tag, _env) do + quote do + Ecto.Query.Builder.Select.fields!(unquote(tag), unquote(interpolated)) + end + end + defp escape_fields(expr, tag, env) do + case Macro.expand(expr, env) do + fields when is_list(fields) -> + fields + _ -> + Builder.error!( + "`#{tag}/2` in `select` expects either a literal or " <> + "an interpolated (1) list of atom fields, (2) dynamic, or " <> + "(3) map with dynamic values" + ) + end + end + + @doc """ + Called at runtime to verify a field. + """ + def fields!(tag, fields) do + if take?(fields) do + fields + else + raise ArgumentError, + "expected a list of fields in `#{tag}/2` inside `select`, got: `#{inspect fields}`" + end + end + + @doc """ + Called at runtime to verify a map key + """ + def map_key!(key) when is_binary(key), do: key + def map_key!(key) when is_integer(key), do: key + def map_key!(key) when is_float(key), do: key + def map_key!(key) when is_atom(key), do: key + + def map_key!(other) do + Builder.error!( + "interpolated map keys in `:select` can only be atoms, strings or numbers, got: #{inspect(other)}" + ) + end + + # atom list sigils + defp take?({name, _, [_, modifiers]}) when name in ~w(sigil_w sigil_W)a do + ?a in modifiers + end + + defp take?(fields) do + is_list(fields) and Enum.all?(fields, fn + {k, v} when is_atom(k) -> take?(List.wrap(v)) + k when is_atom(k) -> true + _ -> false + end) + end + + defp maybe_take?(fields) do + is_list(fields) and Enum.any?(fields, fn + {k, v} when is_atom(k) -> maybe_take?(List.wrap(v)) + k when is_atom(k) -> true + _ -> false + end) + end + + @doc """ + Called at runtime for interpolated/dynamic selects. + """ + def select!(kind, query, fields, file, line) when is_map(fields) do + {expr, {params, subqueries, aliases, _count}} = expand_nested(fields, {[], [], %{}, 0}, query) + + %Ecto.Query.SelectExpr{ + expr: expr, + params: Enum.reverse(params), + subqueries: Enum.reverse(subqueries), + aliases: aliases, + file: file, + line: line + } + |> apply_or_merge(kind, query) + end + + def select!(kind, query, fields, file, line) do + take = %{0 => {:any, fields!(:select, fields)}} + + %Ecto.Query.SelectExpr{expr: {:&, [], [0]}, take: take, file: file, line: line} + |> apply_or_merge(kind, query) + end + + defp apply_or_merge(select, kind, query) do + if kind == :select do + apply(query, select) + else + merge(query, select) + end + end + + defp expand_nested(%Ecto.Query.DynamicExpr{} = dynamic, {params, subqueries, aliases, count}, query) do + {expr, params, subqueries, aliases, count} = + Ecto.Query.Builder.Dynamic.partially_expand(query, dynamic, params, subqueries, aliases, count) + + {expr, {params, subqueries, aliases, count}} + end + + defp expand_nested(%Ecto.SubQuery{} = subquery, {params, subqueries, aliases, count}, _query) do + index = length(subqueries) + # used both in ast and in parameters, as a placeholder. + expr = {:subquery, index} + params = [expr | params] + subqueries = [subquery | subqueries] + count = count + 1 + + {expr, {params, subqueries, aliases, count}} + end + + defp expand_nested(%type{} = fields, acc, query) do + {fields, acc} = fields |> Map.from_struct() |> expand_nested(acc, query) + {{:%, [], [type, fields]}, acc} + end + + defp expand_nested(fields, acc, query) when is_map(fields) do + {fields, acc} = fields |> Enum.map_reduce(acc, &expand_nested_pair(&1, &2, query)) + {{:%{}, [], fields}, acc} + end + + defp expand_nested(invalid, _acc, query) when is_list(invalid) or is_tuple(invalid) do + raise Ecto.QueryError, + query: query, + message: + "Interpolated map values in :select can only be " <> + "maps, structs, dynamics, subqueries and literals. Got #{inspect(invalid)}" + end + + defp expand_nested(other, acc, _query) do + {other, acc} + end + + defp expand_nested_pair({key, val}, acc, query) do + {val, acc} = expand_nested(val, acc, query) + {{key, val}, acc} + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(:select | :merge, Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t + + def build(kind, query, _binding, {:^, _, [var]}, env) do + quote do + Ecto.Query.Builder.Select.select!(unquote(kind), unquote(query), unquote(var), + unquote(env.file), unquote(env.line)) + end + end + + def build(kind, query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {expr, {params, acc}} = escape(expr, binding, env) + params = Builder.escape_params(params) + take = {:%{}, [], Map.to_list(acc.take)} + aliases = escape_aliases(acc.aliases) + + select = quote do: %Ecto.Query.SelectExpr{ + expr: unquote(expr), + params: unquote(params), + file: unquote(env.file), + line: unquote(env.line), + take: unquote(take), + subqueries: unquote(acc.subqueries), + aliases: unquote(aliases)} + + if kind == :select do + Builder.apply_query(query, __MODULE__, [select], env) + else + quote do + query = unquote(query) + Builder.Select.merge(query, unquote(select)) + end + end + end + + defp escape_aliases(%{} = aliases), do: {:%{}, [], Map.to_list(aliases)} + defp escape_aliases(aliases), do: aliases + + @doc """ + The callback applied by `build/5` to build the query. + """ + @spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t + def apply(%Ecto.Query{select: nil} = query, expr) do + %{query | select: expr} + end + def apply(%Ecto.Query{}, _expr) do + Builder.error! "only one select expression is allowed in query" + end + def apply(query, expr) do + apply(Ecto.Queryable.to_query(query), expr) + end + + @doc """ + The callback applied by `build/5` when merging. + """ + def merge(%Ecto.Query{select: nil} = query, new_select) do + merge(query, new_select, {:&, [], [0]}, [], [], %{}, %{}, new_select) + end + def merge(%Ecto.Query{select: old_select} = query, new_select) do + %{expr: old_expr, params: old_params, subqueries: old_subqueries, take: old_take, aliases: old_aliases} = old_select + merge(query, old_select, old_expr, old_params, old_subqueries, old_take, old_aliases, new_select) + end + def merge(query, expr) do + merge(Ecto.Queryable.to_query(query), expr) + end + + defp merge(query, select, old_expr, old_params, old_subqueries, old_take, old_aliases, new_select) do + %{expr: new_expr, params: new_params, subqueries: new_subqueries, take: new_take, aliases: new_aliases} = new_select + + new_expr = + new_expr + |> Ecto.Query.Builder.bump_interpolations(old_params) + |> Ecto.Query.Builder.bump_subqueries(old_subqueries) + + expr = + case {classify_merge(old_expr, old_take), classify_merge(new_expr, new_take)} do + {_, _} when old_expr == new_expr -> + new_expr + + {{:source, meta, ix}, {:source, _, ix}} -> + {:&, meta, [ix]} + + {{:struct, meta, name, old_fields}, {:map, _, new_fields}} when old_params == [] -> + cond do + new_fields == [] -> + old_expr + + Keyword.keyword?(old_fields) and Keyword.keyword?(new_fields) -> + {:%, meta, [name, {:%{}, meta, Keyword.merge(old_fields, new_fields)}]} + + true -> + {:merge, [], [old_expr, new_expr]} + end + + {{:map, meta, old_fields}, {:map, _, new_fields}} -> + cond do + old_fields == [] -> + new_expr + + new_fields == [] -> + old_expr + + true -> + require_distinct_keys? = old_params != [] + + case merge_map_fields(old_fields, new_fields, require_distinct_keys?) do + fields when is_list(fields) -> + {:%{}, meta, fields} + + :error -> + {:merge, [], [old_expr, new_expr]} + end + end + + {_, {:map, _, _}} -> + {:merge, [], [old_expr, new_expr]} + + {_, _} -> + message = """ + cannot select_merge #{merge_argument_to_error(new_expr, query)} into \ + #{merge_argument_to_error(old_expr, query)}, those select expressions \ + are incompatible. You can only select_merge: + + * a source (such as post) with another source (of the same type) + * a source (such as post) with a map + * a struct with a map + * a map with a map + + Incompatible merge found + """ + + raise Ecto.QueryError, query: query, message: message + end + + select = %{ + select | expr: expr, + params: old_params ++ bump_subquery_params(new_params, old_subqueries), + subqueries: old_subqueries ++ new_subqueries, + take: merge_take(query, old_expr, old_take, new_take), + aliases: merge_aliases(old_aliases, new_aliases) + } + + %{query | select: select} + end + + defp classify_merge({:&, meta, [ix]}, take) when is_integer(ix) do + case take do + %{^ix => {:map, _}} -> {:map, meta, :runtime} + _ -> {:source, meta, ix} + end + end + + defp classify_merge({:%, meta, [name, {:%{}, _, fields}]}, _take) + when fields == [] or tuple_size(hd(fields)) == 2 do + {:struct, meta, name, fields} + end + + defp classify_merge({:%{}, meta, fields}, _take) + when fields == [] or tuple_size(hd(fields)) == 2 do + {:map, meta, fields} + end + + defp classify_merge({:%{}, meta, _}, _take) do + {:map, meta, :runtime} + end + + defp classify_merge(_, _take) do + :error + end + + defp merge_map_fields(old_fields, new_fields, false) do + if Keyword.keyword?(old_fields) and Keyword.keyword?(new_fields) do + Keyword.merge(old_fields, new_fields) + else + :error + end + end + + defp merge_map_fields(old_fields, new_fields, true) when is_list(old_fields) do + if Keyword.keyword?(new_fields) do + valid? = + Enum.reduce_while(old_fields, true, fn + {k, _v}, _ when is_atom(k) -> + if Keyword.has_key?(new_fields, k), + do: {:halt, false}, + else: {:cont, true} + + _, _ -> + {:halt, false} + end) + + if valid?, do: old_fields ++ new_fields, else: :error + else + :error + end + end + + defp merge_map_fields(_, _, true), do: :error + + defp merge_argument_to_error({:&, _, [0]}, %{from: %{source: {source, alias}}}) do + "source #{inspect(source || alias)}" + end + + defp merge_argument_to_error({:&, _, [ix]}, _query) do + "join (at position #{ix})" + end + + defp merge_argument_to_error(other, _query) do + Macro.to_string(other) + end + + defp add_take(acc, key, value) do + take = Map.update(acc.take, key, value, &merge_take_kind_and_fields(key, &1, value)) + %{acc | take: take} + end + + defp bump_subquery_params(new_params, old_subqueries) do + len = length(old_subqueries) + + Enum.map(new_params, fn + {:subquery, counter} -> {:subquery, len + counter} + other -> other + end) + end + + defp merge_take(query, old_expr, %{} = old_take, %{} = new_take) do + Enum.reduce(new_take, old_take, fn {binding, {new_kind, new_fields} = new_value}, acc -> + case acc do + %{^binding => old_value} -> + Map.put(acc, binding, merge_take_kind_and_fields(binding, old_value, new_value)) + + %{} -> + # If merging with a schema, add the schema's query fields. This comes in handy if the user + # is merging fields with load_in_query = false. + # If merging with a schemaless source, do nothing so the planner can take all the fields. + case old_expr do + {:&, _, [^binding]} -> + source = Enum.at([query.from | query.joins], binding).source + + case source do + {_, schema} when schema != nil -> + Map.put(acc, binding, {new_kind, Enum.uniq(new_fields ++ schema.__schema__(:query_fields))}) + + _ -> + acc + end + + _ -> + Map.put(acc, binding, new_value) + end + end + end) + end + + defp merge_take_kind_and_fields(binding, {old_kind, old_fields}, {new_kind, new_fields}) do + {merge_take_kind(binding, old_kind, new_kind), Enum.uniq(old_fields ++ new_fields)} + end + + defp merge_take_kind(_, kind, kind), do: kind + defp merge_take_kind(_, :any, kind), do: kind + defp merge_take_kind(_, kind, :any), do: kind + defp merge_take_kind(binding, old, new) do + Builder.error! "cannot select_merge because the binding at position #{binding} " <> + "was previously specified as a `#{old}` and later as `#{new}`" + end + + defp merge_aliases(old_aliases, new_aliases) do + Enum.reduce(new_aliases, old_aliases, fn {alias, _}, aliases -> + Builder.add_select_alias(aliases, alias) + end) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/update.ex b/deps/ecto/lib/ecto/query/builder/update.ex new file mode 100644 index 0000000..29c0e69 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/update.ex @@ -0,0 +1,200 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Update do + @moduledoc false + + @keys [:set, :inc, :push, :pull] + alias Ecto.Query.Builder + + @doc """ + Escapes a list of quoted expressions. + + iex> escape([], [], __ENV__) + {[], [], []} + + iex> escape([set: []], [], __ENV__) + {[], [], []} + + iex> escape(quote(do: ^[set: []]), [], __ENV__) + {[], [set: []], []} + + iex> escape(quote(do: [set: ^[foo: 1]]), [], __ENV__) + {[], [set: [foo: 1]], []} + + iex> escape(quote(do: [set: [foo: ^1]]), [], __ENV__) + {[], [set: [foo: 1]], []} + + """ + @spec escape(Macro.t, Keyword.t, Macro.Env.t) :: {Macro.t, Macro.t, list} + def escape(expr, vars, env) when is_list(expr) do + escape_op(expr, [], [], [], vars, env) + end + + def escape({:^, _, [v]}, _vars, _env) do + {[], v, []} + end + + def escape(expr, _vars, _env) do + compile_error!(expr) + end + + defp escape_op([{k, v}|t], compile, runtime, params, vars, env) when is_atom(k) and is_list(v) do + validate_op!(k) + {compile_values, runtime_values, params} = escape_kw(k, v, params, vars, env) + compile = + if compile_values == [], do: compile, else: [{k, Enum.reverse(compile_values)} | compile] + runtime = + if runtime_values == [], do: runtime, else: [{k, Enum.reverse(runtime_values)} | runtime] + escape_op(t, compile, runtime, params, vars, env) + end + + defp escape_op([{k, {:^, _, [v]}}|t], compile, runtime, params, vars, env) when is_atom(k) do + validate_op!(k) + escape_op(t, compile, [{k, v}|runtime], params, vars, env) + end + + defp escape_op([], compile, runtime, params, _vars, _env) do + {Enum.reverse(compile), Enum.reverse(runtime), params} + end + + defp escape_op(expr, _compile, _runtime, _params, _vars, _env) do + compile_error!(expr) + end + + defp escape_kw(op, kw, params, vars, env) do + Enum.reduce kw, {[], [], params}, fn + {k, {:^, _, [v]}}, {compile, runtime, params} when is_atom(k) -> + {compile, [{k, v} | runtime], params} + {k, v}, {compile, runtime, params} -> + k = escape_field!(k) + {v, {params, _acc}} = Builder.escape(v, type_for_key(op, {0, k}), {params, %{}}, vars, env) + {[{k, v} | compile], runtime, params} + _, _acc -> + Builder.error! "malformed #{inspect op} in update `#{Macro.to_string(kw)}`, " <> + "expected a keyword list" + end + end + + defp escape_field!({:^, _, [k]}), do: quote(do: Ecto.Query.Builder.Update.field!(unquote(k))) + defp escape_field!(k) when is_atom(k), do: k + + defp escape_field!(k) do + Builder.error!( + "expected an atom field or an interpolated field in `update`, got `#{inspect(k)}`" + ) + end + + def field!(field) when is_atom(field), do: field + + def field!(other) do + raise ArgumentError, "expected a field as an atom in `update`, got: `#{inspect other}`" + end + + defp compile_error!(expr) do + Builder.error! "malformed update `#{Macro.to_string(expr)}` in query expression, " <> + "expected a keyword list with set/push/pull as keys with field-value " <> + "pairs as values" + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t + def build(query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {compile, runtime, params} = escape(expr, binding, env) + + query = + if compile == [] do + query + else + params = Builder.escape_params(params) + + update = quote do + %Ecto.Query.QueryExpr{expr: unquote(compile), params: unquote(params), + file: unquote(env.file), line: unquote(env.line)} + end + + Builder.apply_query(query, __MODULE__, [update], env) + end + + if runtime == [] do + query + else + quote do + Ecto.Query.Builder.Update.update!(unquote(query), unquote(runtime), + unquote(env.file), unquote(env.line)) + end + end + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t + def apply(%Ecto.Query{updates: updates} = query, expr) do + %{query | updates: updates ++ [expr]} + end + def apply(query, expr) do + apply(Ecto.Queryable.to_query(query), expr) + end + + @doc """ + If there are interpolated updates at compile time, + we need to handle them at runtime. We do such in + this callback. + """ + def update!(query, runtime, file, line) when is_list(runtime) do + {runtime, {params, _count}} = + Enum.map_reduce runtime, {[], 0}, fn + {k, v}, acc when is_atom(k) and is_list(v) -> + validate_op!(k) + {v, params} = runtime_field!(query, k, v, acc) + {{k, v}, params} + _, _ -> + runtime_error!(runtime) + end + + expr = %Ecto.Query.QueryExpr{expr: runtime, params: Enum.reverse(params), + file: file, line: line} + + apply(query, expr) + end + + def update!(_query, runtime, _file, _line) do + runtime_error!(runtime) + end + + defp runtime_field!(query, key, kw, acc) do + Enum.map_reduce kw, acc, fn + {k, %Ecto.Query.DynamicExpr{} = v}, {params, count} when is_atom(k) -> + {v, params, count} = Ecto.Query.Builder.Dynamic.partially_expand(:update, query, v, params, count) + {{k, v}, {params, count}} + {k, v}, {params, count} when is_atom(k) -> + params = [{v, type_for_key(key, {0, k})} | params] + {{k, {:^, [], [count]}}, {params, count + 1}} + _, _acc -> + raise ArgumentError, "malformed #{inspect key} in update `#{inspect(kw)}`, " <> + "expected a keyword list" + end + end + + defp runtime_error!(value) do + raise ArgumentError, + "malformed update `#{inspect(value)}` in query expression, " <> + "expected a keyword list with set/push/pull as keys with field-value pairs as values" + end + + defp validate_op!(key) when key in @keys, do: :ok + defp validate_op!(key), do: Builder.error! "unknown key `#{inspect(key)}` in update" + + # Out means the given type must be taken out of an array + # It is the opposite of "left in right" in the query API. + defp type_for_key(:push, type), do: {:out, type} + defp type_for_key(:pull, type), do: {:out, type} + defp type_for_key(_, type), do: type +end diff --git a/deps/ecto/lib/ecto/query/builder/windows.ex b/deps/ecto/lib/ecto/query/builder/windows.ex new file mode 100644 index 0000000..c5bfb93 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/windows.ex @@ -0,0 +1,204 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Windows do + @moduledoc false + + alias Ecto.Query.Builder + alias Ecto.Query.Builder.{GroupBy, OrderBy} + @sort_order [:partition_by, :order_by, :frame] + + @doc """ + Escapes a window params. + + ## Examples + + iex> escape(quote do [order_by: [desc: 13]] end, {[], %{}}, [x: 0], __ENV__) + {[order_by: [desc: 13]], [], {[], %{}}} + + """ + @spec escape([Macro.t], {list, term}, Keyword.t, Macro.Env.t | {Macro.Env.t, fun}) + :: {Macro.t, [{atom, term}], {list, term}} + def escape(kw, params_acc, vars, env) when is_list(kw) do + {compile, runtime} = sort(@sort_order, kw, :compile, [], []) + {compile, params_acc} = Enum.map_reduce(compile, params_acc, &escape_compile(&1, &2, vars, env)) + {compile, runtime, params_acc} + end + + def escape(kw, _params_acc, _vars, _env) do + error!(kw) + end + + defp sort([key | keys], kw, mode, compile, runtime) do + case Keyword.pop(kw, key) do + {nil, kw} -> + sort(keys, kw, mode, compile, runtime) + + {{:^, _, [var]}, kw} -> + sort(keys, kw, :runtime, compile, [{key, var} | runtime]) + + {_, _} when mode == :runtime -> + [{runtime_key, _} | _] = runtime + raise ArgumentError, "window has an interpolated value under `#{runtime_key}` " <> + "and therefore `#{key}` must also be interpolated" + + {expr, kw} -> + sort(keys, kw, mode, [{key, expr} | compile], runtime) + end + end + + defp sort([], [], _mode, compile, runtime) do + {Enum.reverse(compile), Enum.reverse(runtime)} + end + + defp sort([], kw, _mode, _compile, _runtime) do + error!(kw) + end + + defp escape_compile({:partition_by, fields}, params_acc, vars, env) do + {fields, params_acc} = GroupBy.escape(:partition_by, fields, params_acc, vars, env) + {{:partition_by, fields}, params_acc} + end + + defp escape_compile({:order_by, fields}, params_acc, vars, env) do + {fields, params_acc} = OrderBy.escape(:order_by, fields, params_acc, vars, env) + {{:order_by, fields}, params_acc} + end + + defp escape_compile({:frame, frame_clause}, params_acc, vars, env) do + {frame_clause, params_acc} = escape_frame(frame_clause, params_acc, vars, env) + {{:frame, frame_clause}, params_acc} + end + + defp escape_frame({:fragment, _, _} = fragment, params_acc, vars, env) do + Builder.escape(fragment, :any, params_acc, vars, env) + end + defp escape_frame(other, _, _, _) do + Builder.error!("expected a dynamic or fragment in `:frame`, got: `#{inspect other}`") + end + + defp error!(other) do + Builder.error!( + "expected window definition to be a keyword list " <> + "with partition_by, order_by or frame as keys, got: `#{inspect other}`" + ) + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t, [Macro.t], Keyword.t, Macro.Env.t) :: Macro.t + def build(query, binding, windows, env) when is_list(windows) do + {query, binding} = Builder.escape_binding(query, binding, env) + + {compile, runtime} = + windows + |> Enum.map(&escape_window(binding, &1, env)) + |> Enum.split_with(&elem(&1, 2) == []) + + compile = Enum.map(compile, &build_compile_window(&1, env)) + runtime = Enum.map(runtime, &build_runtime_window(&1, env)) + query = Builder.apply_query(query, __MODULE__, [compile], env) + + if runtime == [] do + query + else + quote do + Ecto.Query.Builder.Windows.runtime!( + unquote(query), + unquote(runtime), + unquote(env.file), + unquote(env.line) + ) + end + end + end + + def build(_, _, windows, _) do + Builder.error!( + "expected window definitions to be a keyword list with window names as keys and " <> + "a keyword list with the window definition as value, got: `#{inspect windows}`" + ) + end + + defp escape_window(vars, {name, expr}, env) do + {compile_acc, runtime_acc, {params, acc}} = escape(expr, {[], %{subqueries: []}}, vars, env) + {name, compile_acc, runtime_acc, Builder.escape_params(params), acc} + end + + defp build_compile_window({name, compile_acc, _, params, acc}, env) do + {name, + quote do + %Ecto.Query.ByExpr{ + expr: unquote(compile_acc), + params: unquote(params), + subqueries: unquote(acc.subqueries), + file: unquote(env.file), + line: unquote(env.line) + } + end} + end + + defp build_runtime_window({name, compile_acc, runtime_acc, params, acc}, _env) do + {:{}, [], [name, Enum.reverse(compile_acc), runtime_acc, Enum.reverse(params), {:%{}, [], Map.to_list(acc)}]} + end + + @doc """ + Invoked for runtime windows. + """ + def runtime!(query, runtime, file, line) do + windows = + Enum.map(runtime, fn {name, compile_acc, runtime_acc, params, escape_acc} -> + {{acc, subqueries}, params} = do_runtime_window!(runtime_acc, query, {compile_acc, escape_acc.subqueries}, params) + expr = %Ecto.Query.ByExpr{expr: Enum.reverse(acc), params: Enum.reverse(params), file: file, line: line, subqueries: subqueries} + {name, expr} + end) + + apply(query, windows) + end + + defp do_runtime_window!([{:order_by, order_by} | kw], query, {acc, subqueries_acc}, params) do + {order_by, params, subqueries} = OrderBy.order_by_or_distinct!(:order_by, query, order_by, params) + + do_runtime_window!(kw, query, {[{:order_by, order_by} | acc], subqueries_acc ++ subqueries}, params) + end + + defp do_runtime_window!([{:partition_by, partition_by} | kw], query, {acc, subqueries_acc}, params) do + {partition_by, params, subqueries} = GroupBy.group_or_partition_by!(:partition_by, query, partition_by, params) + + do_runtime_window!(kw, query, {[{:partition_by, partition_by} | acc], subqueries_acc ++ subqueries}, params) + end + + defp do_runtime_window!([{:frame, frame} | kw], query, {acc, subqueries_acc}, params) do + case frame do + %Ecto.Query.DynamicExpr{} -> + {frame, params, _count} = Builder.Dynamic.partially_expand(:windows, query, frame, params, length(params)) + do_runtime_window!(kw, query, {[{:frame, frame} | acc], subqueries_acc}, params) + + _ -> + raise ArgumentError, + "expected a dynamic or fragment in `:frame`, got: `#{inspect frame}`" + end + end + + defp do_runtime_window!([], _query, acc, params), do: {acc, params} + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, Keyword.t) :: Ecto.Query.t + def apply(%Ecto.Query{windows: windows} = query, definitions) do + merged = Keyword.merge(windows, definitions, fn name, _, _ -> + Builder.error! "window with name #{name} is already defined" + end) + + %{query | windows: merged} + end + + def apply(query, definitions) do + apply(Ecto.Queryable.to_query(query), definitions) + end +end diff --git a/deps/ecto/lib/ecto/query/inspect.ex b/deps/ecto/lib/ecto/query/inspect.ex new file mode 100644 index 0000000..0a50972 --- /dev/null +++ b/deps/ecto/lib/ecto/query/inspect.ex @@ -0,0 +1,442 @@ +import Inspect.Algebra +import Kernel, except: [to_string: 1] + +alias Ecto.Query.{DynamicExpr, JoinExpr, QueryExpr, WithExpr, LimitExpr} + +defimpl Inspect, for: Ecto.Query.DynamicExpr do + def inspect(%DynamicExpr{binding: binding} = dynamic, opts) do + binding = + Enum.map(binding, fn + {{:^, _, [as]}, bind} when is_atom(as) -> {as, bind} + other -> other + end) + + dynamic = %{dynamic | binding: binding} + + joins = + binding + |> Enum.drop(1) + |> Enum.with_index() + |> Enum.map(&%JoinExpr{ix: &1}) + + aliases = + for({as, _} when is_atom(as) <- binding, do: as) + |> Enum.with_index() + |> Map.new() + + query = %Ecto.Query{joins: joins, aliases: aliases} + + {expr, binding, params, subqueries, _, _} = + Ecto.Query.Builder.Dynamic.fully_expand(query, dynamic) + + names = + Enum.map(binding, fn + {_, {name, _, _}} -> name + {name, _, _} -> name + end) + + query_expr = %{expr: expr, params: params, subqueries: subqueries} + inspected = Inspect.Ecto.Query.expr(expr, List.to_tuple(names), query_expr) + + container_doc("dynamic(", [Macro.to_string(binding), inspected], ")", opts, fn str, _ -> + str + end) + end +end + +defimpl Inspect, for: Ecto.Query do + @doc false + def inspect(query, opts) do + list = + Enum.map(to_list(query), fn + {key, string} -> + concat(Atom.to_string(key) <> ": ", string) + + string -> + string + end) + + result = container_doc("#Ecto.Query<", list, ">", opts, fn str, _ -> str end) + + case query.with_ctes do + %WithExpr{recursive: recursive, queries: [_ | _] = queries} -> + with_ctes = + Enum.map(queries, fn {name, cte_opts, query} -> + cte = + case query do + %Ecto.Query{} -> __MODULE__.inspect(query, opts) + %Ecto.Query.QueryExpr{} -> expr(query, {}) + end + + concat([ + "|> with_cte(\"" <> name <> "\", materialized: ", + inspect(cte_opts[:materialized]), + ", as: ", + cte, + ")" + ]) + end) + + result = if recursive, do: glue(result, "\n", "|> recursive_ctes(true)"), else: result + [result | with_ctes] |> Enum.intersperse(break("\n")) |> concat() + + _ -> + result + end + end + + @doc false + def to_string(query) do + Enum.map_join(to_list(query), ",\n ", fn + {key, string} -> + Atom.to_string(key) <> ": " <> string + + string -> + string + end) + end + + defp to_list(query) do + names = + query + |> collect_sources() + |> generate_letters() + |> generate_names() + |> List.to_tuple() + + from = bound_from(query.from, elem(names, 0), names) + joins = joins(query.joins, names) + preloads = preloads(query.preloads) + assocs = assocs(query.assocs, names) + windows = windows(query.windows, names) + combinations = combinations(query.combinations) + limit = limit(query.limit, names) + + wheres = bool_exprs(%{and: :where, or: :or_where}, query.wheres, names) + group_bys = kw_exprs(:group_by, query.group_bys, names) + havings = bool_exprs(%{and: :having, or: :or_having}, query.havings, names) + order_bys = kw_exprs(:order_by, query.order_bys, names) + updates = kw_exprs(:update, query.updates, names) + + lock = kw_inspect(:lock, query.lock) + offset = kw_expr(:offset, query.offset, names) + select = kw_expr(:select, query.select, names) + distinct = kw_expr(:distinct, query.distinct, names) + + Enum.concat([ + from, + joins, + wheres, + group_bys, + havings, + windows, + combinations, + order_bys, + limit, + offset, + lock, + distinct, + updates, + select, + preloads, + assocs + ]) + end + + defp bound_from(nil, name, _names), do: ["from #{name} in query"] + + defp bound_from(from, name, names) do + ["from #{name} in #{inspect_source(from, names)}"] ++ kw_as_and_prefix(from) + end + + defp inspect_source(%{source: %Ecto.Query{} = query}, _names), do: "^" <> inspect(query) + + defp inspect_source(%{source: %Ecto.SubQuery{query: query}}, _names), + do: "subquery(#{to_string(query)})" + + defp inspect_source(%{source: {source, nil}}, _names), do: inspect(source) + defp inspect_source(%{source: {nil, schema}}, _names), do: inspect(schema) + + defp inspect_source(%{source: {:fragment, _, _} = source} = part, names), + do: "#{expr(source, names, part)}" + + defp inspect_source(%{source: {:values, _, [types | _]}}, _names) do + fields = Keyword.keys(types) + "values (#{Enum.join(fields, ", ")})" + end + + defp inspect_source(%{source: {source, schema}}, _names) do + inspect(if source == schema.__schema__(:source), do: schema, else: {source, schema}) + end + + defp joins(joins, names) do + joins + |> Enum.with_index() + |> Enum.flat_map(fn {expr, ix} -> join(expr, elem(names, expr.ix || ix + 1), names) end) + end + + defp join(%JoinExpr{qual: qual, assoc: {ix, right}, on: on} = join, name, names) do + string = "#{name} in assoc(#{elem(names, ix)}, #{inspect(right)})" + [{join_qual(qual), string}] ++ kw_as_and_prefix(join) ++ maybe_on(on, names) + end + + defp join(%JoinExpr{qual: qual, on: on} = join, name, names) do + string = "#{name} in #{inspect_source(join, names)}" + [{join_qual(qual), string}] ++ kw_as_and_prefix(join) ++ [on: expr(on, names)] + end + + defp maybe_on(%QueryExpr{expr: true}, _names), do: [] + defp maybe_on(%QueryExpr{} = on, names), do: [on: expr(on, names)] + + defp preloads([]), do: [] + defp preloads(preloads), do: [preload: inspect(preloads)] + + defp assocs([], _names), do: [] + defp assocs(assocs, names), do: [preload: expr(assocs(assocs), names, %{})] + + defp assocs(assocs) do + Enum.map(assocs, fn + {field, {idx, []}} -> + {field, {:&, [], [idx]}} + + {field, {idx, children}} -> + {field, {{:&, [], [idx]}, assocs(children)}} + end) + end + + defp windows(windows, names) do + Enum.map(windows, &window(&1, names)) + end + + defp window({name, %{expr: definition} = part}, names) do + {:windows, "[#{name}: " <> expr(definition, names, part) <> "]"} + end + + defp combinations(combinations) do + Enum.map(combinations, fn {key, val} -> {key, "(" <> to_string(val) <> ")"} end) + end + + defp limit(nil, _names), do: [] + + defp limit(%LimitExpr{with_ties: false} = limit, names) do + [{:limit, expr(limit, names)}] + end + + defp limit(%LimitExpr{with_ties: with_ties} = limit, names) do + [{:limit, expr(limit, names)}] ++ kw_inspect(:with_ties, with_ties) + end + + defp bool_exprs(keys, exprs, names) do + Enum.map(exprs, fn %{expr: expr, op: op} = part -> + {Map.fetch!(keys, op), expr(expr, names, part)} + end) + end + + defp kw_exprs(key, exprs, names) do + Enum.map(exprs, &{key, expr(&1, names)}) + end + + defp kw_expr(_key, nil, _names), do: [] + defp kw_expr(key, expr, names), do: [{key, expr(expr, names)}] + + defp kw_inspect(_key, nil), do: [] + defp kw_inspect(key, val), do: [{key, inspect(val)}] + + defp kw_as_and_prefix(%{as: as, prefix: prefix}) do + kw_inspect(:as, as) ++ kw_inspect(:prefix, prefix) + end + + defp expr(%{expr: expr} = part, names) do + expr(expr, names, part) + end + + @doc false + def expr(expr, names, part) do + expr + |> Macro.traverse(:ok, &{prewalk(&1, names), &2}, &{postwalk(&1, names, part), &2}) + |> elem(0) + |> macro_to_string() + end + + defp macro_to_string(expr), do: Macro.to_string(expr) + + # Tagged values + defp prewalk(%Ecto.Query.Tagged{value: value, tag: nil}, _) do + value + end + + defp prewalk(%Ecto.Query.Tagged{value: value, tag: tag}, _) do + {:type, [], [value, tag]} + end + + defp prewalk({{:., dot_meta, [{:&, _, [ix]}, field]}, meta, []}, names) do + {{:., dot_meta, [binding(names, ix), field]}, meta, []} + end + + defp prewalk(node, _) do + node + end + + # Convert variables to proper names + defp postwalk({:&, _, [ix]}, names, part) do + binding_to_expr(ix, names, part) + end + + # Format field/2 with string name + defp postwalk({{:., _, [{_, _, _} = binding, field]}, meta, []}, _names, _part) + when is_binary(field) do + {:field, meta, [binding, field]} + end + + # Remove parens from field calls + defp postwalk({{:., _, [_, _]} = dot, meta, []}, _names, _part) do + {dot, [no_parens: true] ++ meta, []} + end + + # Interpolated unknown value + defp postwalk({:^, _, [_ix, _len]}, _names, _part) do + {:^, [], [{:..., [], nil}]} + end + + # Interpolated known value + defp postwalk({:^, _, [ix]}, _, %{params: params}) do + value = + case Enum.at(params || [], ix) do + # Wrap the head in a block so it is not treated as a charlist + {[head | tail], _type} -> [{:__block__, [], [head]} | tail] + {value, _type} -> value + _ -> {:..., [], nil} + end + + {:^, [], [value]} + end + + # Types need to be converted back to AST for fields + defp postwalk({:type, meta, [expr, type]}, names, part) do + {:type, meta, [expr, type_to_expr(type, names, part)]} + end + + # For keyword and interpolated fragments use normal escaping + defp postwalk({:fragment, _, [{_, _} | _] = parts}, _names, _part) do + {:fragment, [], unmerge_fragments(parts, "", [])} + end + + # Subqueries + defp postwalk({:subquery, i}, _names, %{subqueries: subqueries}) do + {:subquery, [], [Enum.fetch!(subqueries, i).query]} + end + + # Jason + defp postwalk({:json_extract_path, _, [expr, path]}, _names, _part) do + Enum.reduce(path, expr, fn element, acc -> + {{:., [], [Access, :get]}, [], [acc, element]} + end) + end + + defp postwalk(node, _names, _part) do + node + end + + defp binding_to_expr(ix, names, part) do + case part do + %{take: %{^ix => {:any, fields}}} when ix == 0 -> + fields + + %{take: %{^ix => {tag, fields}}} -> + {tag, [], [binding(names, ix), fields]} + + _ -> + binding(names, ix) + end + end + + defp type_to_expr({ix, type}, names, part) when is_integer(ix) do + {{:., [], [binding_to_expr(ix, names, part), type]}, [no_parens: true], []} + end + + defp type_to_expr({composite, type}, names, part) when is_atom(composite) do + {composite, type_to_expr(type, names, part)} + end + + defp type_to_expr(type, _names, _part) do + type + end + + defp unmerge_fragments([{:raw, s}, {:expr, v} | t], frag, args) do + unmerge_fragments(t, frag <> s <> "?", [v | args]) + end + + defp unmerge_fragments([{:raw, s}], frag, args) do + [frag <> s | Enum.reverse(args)] + end + + defp join_qual(:inner), do: :join + defp join_qual(:inner_lateral), do: :inner_lateral_join + defp join_qual(:left), do: :left_join + defp join_qual(:left_lateral), do: :left_lateral_join + defp join_qual(:right), do: :right_join + defp join_qual(:full), do: :full_join + defp join_qual(:cross), do: :cross_join + defp join_qual(:cross_lateral), do: :cross_lateral_join + + defp collect_sources(%{from: nil, joins: joins}) do + ["query" | join_sources(joins)] + end + + defp collect_sources(%{from: %{source: source}, joins: joins}) do + [from_sources(source) | join_sources(joins)] + end + + defp from_sources(%Ecto.SubQuery{query: query}), do: from_sources(query.from.source) + defp from_sources({source, schema}), do: schema || source + defp from_sources(nil), do: "query" + defp from_sources({:fragment, _, _}), do: "fragment" + defp from_sources({:values, _, _}), do: "values" + + defp join_sources(joins) do + joins + |> Enum.sort_by(& &1.ix) + |> Enum.map(fn + %JoinExpr{assoc: {_var, assoc}} -> + assoc + + %JoinExpr{source: {:fragment, _, _}} -> + "fragment" + + %JoinExpr{source: %Ecto.Query{from: from}} -> + from_sources(from.source) + + %JoinExpr{source: source} -> + from_sources(source) + end) + end + + defp generate_letters(sources) do + Enum.map(sources, fn source -> + source + |> Kernel.to_string() + |> normalize_source() + |> String.first() + |> String.downcase() + end) + end + + defp generate_names(letters) do + {names, _} = Enum.map_reduce(letters, 0, &{:"#{&1}#{&2}", &2 + 1}) + names + end + + defp binding(names, pos) do + try do + {elem(names, pos), [], nil} + rescue + ArgumentError -> {:"unknown_binding_#{pos}!", [], nil} + end + end + + defp normalize_source("Elixir." <> _ = source), + do: source |> Module.split() |> List.last() + + defp normalize_source(source), + do: source +end diff --git a/deps/ecto/lib/ecto/query/planner.ex b/deps/ecto/lib/ecto/query/planner.ex new file mode 100644 index 0000000..35d32b7 --- /dev/null +++ b/deps/ecto/lib/ecto/query/planner.ex @@ -0,0 +1,2751 @@ +defmodule Ecto.Query.Planner do + # Normalizes a query and its parameters. + @moduledoc false + + alias Ecto.Query.{ + BooleanExpr, + ByExpr, + DynamicExpr, + FromExpr, + JoinExpr, + QueryExpr, + SelectExpr, + LimitExpr + } + + if map_size(%Ecto.Query{}) != 21 do + raise "Ecto.Query match out of date in builder" + end + + @parent_as __MODULE__ + @aggs ~w(count avg min max sum row_number rank dense_rank percent_rank cume_dist ntile lag lead first_value last_value nth_value)a + + @doc """ + Converts a query to a list of joins. + + The from is moved as last join with the where conditions as its "on" + in order to keep proper binding order. + """ + def query_to_joins(qual, source, %{wheres: wheres, joins: joins}, position) do + on = %QueryExpr{file: __ENV__.file, line: __ENV__.line, expr: true, params: []} + + on = + Enum.reduce(wheres, on, fn %BooleanExpr{op: op, expr: expr, params: params}, acc -> + merge_expr_and_params(op, acc, expr, params) + end) + + join = %JoinExpr{qual: qual, source: source, file: __ENV__.file, line: __ENV__.line, on: on} + last = length(joins) + position + + mapping = fn + 0 -> last + ix -> ix + position - 1 + end + + for {%{on: on} = join, ix} <- Enum.with_index(joins ++ [join]) do + %{join | on: rewrite_sources(on, mapping), ix: ix + position} + end + end + + defp merge_expr_and_params( + op, + %QueryExpr{expr: left_expr, params: left_params} = struct, + right_expr, + right_params + ) do + right_expr = Ecto.Query.Builder.bump_interpolations(right_expr, left_params) + %{struct | expr: merge_expr(op, left_expr, right_expr), params: left_params ++ right_params} + end + + defp merge_expr(_op, left, true), do: left + defp merge_expr(_op, true, right), do: right + defp merge_expr(op, left, right), do: {op, [], [left, right]} + + @doc """ + Rewrites the given query expression sources using the given mapping. + """ + def rewrite_sources(%{expr: expr, params: params} = part, mapping) do + expr = + Macro.prewalk(expr, fn + %Ecto.Query.Tagged{type: type, tag: tag} = tagged -> + %{tagged | type: rewrite_type(type, mapping), tag: rewrite_type(tag, mapping)} + + {:&, meta, [ix]} -> + {:&, meta, [mapping.(ix)]} + + other -> + other + end) + + params = + Enum.map(params, fn + {val, type} -> + {val, rewrite_type(type, mapping)} + + val -> + val + end) + + %{part | expr: expr, params: params} + end + + defp rewrite_type({composite, {ix, field}}, mapping) when is_integer(ix) do + {composite, {mapping.(ix), field}} + end + + defp rewrite_type({ix, field}, mapping) when is_integer(ix) do + {mapping.(ix), field} + end + + defp rewrite_type(other, _mapping) do + other + end + + @doc """ + Define the query cache table. + """ + def new_query_cache(atom_name) do + :ets.new(atom_name || __MODULE__, [:set, :public, read_concurrency: true]) + end + + @doc """ + Plans the query for execution. + + Planning happens in multiple steps: + + 1. First the query is planned by retrieving + its cache key, casting and merging parameters + + 2. Then a cache lookup is done, if the query is + cached, we are done + + 3. If there is no cache, we need to actually + normalize and validate the query, asking the + adapter to prepare it + + 4. The query is sent to the adapter to be generated + + ## Cache + + All entries in the query, except the preload and sources + field, should be part of the cache key. + + The cache value is the compiled query by the adapter + along-side the select expression. + """ + def query(query, operation, cache, adapter, counter) do + {query, params, key} = plan(query, operation, adapter) + {cast_params, dump_params} = Enum.unzip(params) + query_with_cache(key, query, operation, cache, adapter, counter, cast_params, dump_params) + end + + defp query_with_cache(key, query, operation, cache, adapter, counter, cast_params, dump_params) do + case query_lookup(key, query, operation, cache, adapter, counter) do + {_, select, prepared} -> + {build_meta(query, select), {:nocache, prepared}, cast_params, dump_params} + + {_key, :cached, select, cached} -> + update = &cache_update(cache, key, &1) + reset = &cache_reset(cache, key, &1) + {build_meta(query, select), {:cached, update, reset, cached}, cast_params, dump_params} + + {_key, :cache, select, prepared} -> + update = &cache_update(cache, key, &1) + {build_meta(query, select), {:cache, update, prepared}, cast_params, dump_params} + end + end + + defp query_lookup(:nocache, query, operation, _cache, adapter, counter) do + query_without_cache(query, operation, adapter, counter) + end + + defp query_lookup(key, query, operation, cache, adapter, counter) do + case :ets.lookup(cache, key) do + [term] -> term + [] -> query_prepare(query, operation, adapter, counter, cache, key) + end + end + + defp query_prepare(query, operation, adapter, counter, cache, key) do + case query_without_cache(query, operation, adapter, counter) do + {:cache, select, prepared} -> + cache_insert(cache, key, {key, :cache, select, prepared}) + + {:nocache, _, _} = nocache -> + nocache + end + end + + defp cache_insert(cache, key, elem) do + case :ets.insert_new(cache, elem) do + true -> + elem + + false -> + [elem] = :ets.lookup(cache, key) + elem + end + end + + defp cache_update(cache, key, cached) do + _ = :ets.update_element(cache, key, [{2, :cached}, {4, cached}]) + :ok + end + + defp cache_reset(cache, key, prepared) do + _ = :ets.update_element(cache, key, [{2, :cache}, {4, prepared}]) + :ok + end + + defp query_without_cache(query, operation, adapter, counter) do + {query, select} = normalize(query, operation, adapter, counter) + {cache, prepared} = adapter.prepare(operation, query) + {cache, select, prepared} + end + + defp build_meta(%{sources: sources, preloads: preloads}, select) do + %{select: select, preloads: preloads, sources: sources} + end + + @doc """ + Prepares the query for cache. + + This means all the parameters from query expressions are + merged into a single value and their entries are pruned + from the query. + + This function is called by the backend before invoking + any cache mechanism. + """ + @spec plan(Ecto.Query.t(), atom(), module, map()) :: + {planned_query :: Ecto.Query.t(), parameters :: list(), cache_key :: any()} + def plan(query, operation, adapter, cte_names \\ %{}) do + {query, cte_names} = plan_ctes(query, adapter, cte_names) + query = plan_sources(query, adapter, cte_names) + plan_subquery = &plan_subquery(&1, query, nil, adapter, false, cte_names) + + query + |> plan_assocs() + |> plan_combinations(adapter, cte_names) + |> plan_expr_subqueries(:wheres, plan_subquery) + |> plan_expr_subqueries(:havings, plan_subquery) + |> plan_expr_subqueries(:order_bys, plan_subquery) + |> plan_expr_subqueries(:group_bys, plan_subquery) + |> plan_expr_subquery(:distinct, plan_subquery) + |> plan_expr_subquery(:select, plan_subquery) + |> plan_windows(plan_subquery) + |> plan_cache(operation, adapter) + rescue + e -> + # Reraise errors so we ignore the planner inner stacktrace + filter_and_reraise(e, __STACKTRACE__) + end + + @doc """ + Prepare all sources, by traversing and expanding from, joins, subqueries. + """ + def plan_sources(query, adapter, cte_names) do + {from, source} = plan_from(query, adapter, cte_names) + + # Set up the initial source so we can refer + # to the parent in subqueries in joins + query = %{query | sources: {source}} + + {joins, sources, tail_sources} = + plan_joins(query, [source], length(query.joins), adapter, cte_names) + + %{ + query + | from: from, + joins: joins |> Enum.reverse(), + sources: (tail_sources ++ sources) |> Enum.reverse() |> List.to_tuple() + } + end + + defp plan_from(%{from: nil} = query, _adapter, _cte_names) do + error!(query, "query must have a from expression") + end + + defp plan_from( + %{from: %{source: {kind, _, _}}, preloads: preloads, assocs: assocs} = query, + _adapter, + _cte_names + ) + when kind in [:fragment, :values] and (assocs != [] or preloads != []) do + error!(query, "cannot preload associations with a #{kind} source") + end + + defp plan_from(%{from: from} = query, adapter, cte_names) do + plan_source(query, from, adapter, cte_names) + end + + defp plan_source( + query, + %{source: %Ecto.SubQuery{} = subquery, prefix: prefix} = expr, + adapter, + cte_names + ) do + subquery = plan_subquery(subquery, query, prefix, adapter, true, cte_names) + {%{expr | source: subquery}, subquery} + end + + defp plan_source(query, %{source: {nil, schema}} = expr, _adapter, cte_names) + when is_atom(schema) and schema != nil do + source = schema.__schema__(:source) + source_prefix = plan_source_schema_prefix(expr, schema) + + prefix = + case cte_names do + %{^source => _} -> source_prefix + _ -> source_prefix || query.prefix + end + + {%{expr | source: {source, schema}}, {source, schema, prefix}} + end + + defp plan_source(query, %{source: {source, schema}, prefix: prefix} = expr, _adapter, cte_names) + when is_binary(source) and is_atom(schema) do + prefix = + case cte_names do + %{^source => _} -> prefix + _ -> prefix || query.prefix + end + + {expr, {source, schema, prefix}} + end + + defp plan_source( + _query, + %{source: {kind, _, _} = source, prefix: nil} = expr, + _adapter, + _cte_names + ) + when kind in [:fragment, :values], + do: {expr, source} + + defp plan_source(query, %{source: {kind, _, _}, prefix: prefix} = expr, _adapter, _cte_names) + when kind in [:fragment, :values], + do: error!(query, expr, "cannot set prefix: #{inspect(prefix)} option for #{kind} sources") + + defp plan_subquery(subquery, query, prefix, adapter, source?, cte_names) do + %{query: inner_query} = subquery + + inner_query = %{ + inner_query + | prefix: prefix || subquery.query.prefix || query.prefix, + aliases: Map.put(inner_query.aliases, @parent_as, query) + } + + {inner_query, params, key} = plan(inner_query, :all, adapter, cte_names) + assert_no_subquery_assocs!(inner_query) + + {inner_query, select} = + inner_query + |> ensure_select(true) + |> normalize_subquery_select(adapter, source?) + + {_, inner_query} = pop_in(inner_query.aliases[@parent_as]) + %{subquery | query: inner_query, params: params, cache: key, select: select} + rescue + e -> raise Ecto.SubQueryError, query: query, exception: e + end + + # The prefix for form are computed upfront, but not for joins + defp plan_source_schema_prefix(%FromExpr{prefix: prefix}, _schema), + do: prefix + + defp plan_source_schema_prefix(%JoinExpr{prefix: prefix}, schema), + do: prefix || schema.__schema__(:prefix) + + defp assert_no_subquery_assocs!(%{assocs: assocs, preloads: preloads} = query) + when assocs != [] or preloads != [] do + error!(query, "cannot preload associations in subquery") + end + + defp assert_no_subquery_assocs!(query) do + query + end + + defp normalize_subquery_select(query, adapter, source?) do + {schema_or_source, expr, %{select: select} = query} = + rewrite_subquery_select_expr(query, source?) + + {expr, _} = prewalk(expr, :select, query, select, 0, adapter) + + {{:map, types}, fields, _from} = + collect_fields(expr, [], :none, query, select.take, true, %{}) + + # types must take into account selected_as/2 aliases so that the correct fields are + # referenced when the outer query selects the entire subquery + types = normalize_subquery_types(types, Enum.reverse(fields), query.select.aliases, []) + {query, subquery_source(schema_or_source, types)} + end + + defp normalize_subquery_types(types, _fields, select_aliases, _acc) + when select_aliases == %{} do + types + end + + defp normalize_subquery_types([], [], _aliases, acc) do + Enum.reverse(acc) + end + + defp normalize_subquery_types( + [{alias, _} = type | types], + [{alias, _} | fields], + select_aliases, + acc + ) do + normalize_subquery_types(types, fields, select_aliases, [type | acc]) + end + + defp normalize_subquery_types( + [{source_alias, type_value} | types], + [field | fields], + select_aliases, + acc + ) do + if Map.has_key?(select_aliases, source_alias) do + raise ArgumentError, """ + the alias, #{inspect(source_alias)}, provided to `selected_as/2` conflicts + with the subquery's automatic aliasing. + + For example, the following query is not allowed because the alias `:y` + given to `selected_as/2` is also used by the subquery to automatically + alias `s.y`: + + s = from(s in Schema, select: %{x: selected_as(s.x, :y), y: s.y}) + from s in subquery(s) + """ + end + + type = + case field do + {select_alias, _} -> {select_alias, type_value} + _ -> {source_alias, type_value} + end + + normalize_subquery_types(types, fields, select_aliases, [type | acc]) + end + + defp subquery_source(nil, types), do: {:map, types} + defp subquery_source(name, types) when is_atom(name), do: {:struct, name, types} + + defp subquery_source({:source, schema, prefix, types}, only) do + types = + Enum.map(only, fn {field, {:value, type}} -> {field, Keyword.get(types, field, type)} end) + + {:source, schema, prefix, types} + end + + defp rewrite_subquery_select_expr(%{select: select} = query, source?) do + %{expr: expr, take: take} = select + + case subquery_select(expr, take, query) do + {schema_or_source, fields} -> + expr = {:%{}, [], fields} + {schema_or_source, expr, put_in(query.select.expr, expr)} + + :error when source? -> + error!( + query, + "subquery/cte must select a source (t), a field (t.field) or a map, got: `#{Macro.to_string(expr)}`" + ) + + :error -> + expr = {:%{}, [], [result: expr]} + {nil, expr, put_in(query.select.expr, expr)} + end + end + + defp subquery_select({:merge, _, [left, right]}, take, query) do + {left_struct, left_fields} = subquery_select(left, take, query) + {right_struct, right_fields} = subquery_select(right, take, query) + {left_struct || right_struct, Keyword.merge(left_fields, right_fields)} + end + + defp subquery_select({:%, _, [name, map]}, take, query) do + {_, fields} = subquery_select(map, take, query) + {name, fields} + end + + defp subquery_select({:%{}, _, [{:|, _, [{:&, [], [ix]}, pairs]}]} = expr, take, query) do + assert_subquery_fields!(query, expr, pairs) + drop = Map.new(pairs, fn {key, _} -> {key, nil} end) + {source, _} = source_take!(:select, query, take, ix, ix, drop) + + # In case of map updates, we need to remove duplicated fields + # at query time because we use the field names as aliases and + # duplicate aliases will lead to invalid queries. + kept_keys = subquery_source_fields(source) -- Keyword.keys(pairs) + {keep_source_or_struct(source), subquery_fields(kept_keys, ix) ++ pairs} + end + + defp subquery_select({:%{}, _, pairs} = expr, _take, query) do + assert_subquery_fields!(query, expr, pairs) + {nil, pairs} + end + + defp subquery_select({:&, _, [ix]}, take, query) do + {source, _} = source_take!(:select, query, take, ix, ix, %{}) + fields = subquery_source_fields(source) + {keep_source_or_struct(source), subquery_fields(fields, ix)} + end + + defp subquery_select({{:., _, [{:&, _, [_]}, field]}, _, []} = expr, _take, _query) do + {nil, [{field, expr}]} + end + + defp subquery_select(_expr, _take, _query) do + :error + end + + defp subquery_fields(fields, ix) do + for field <- fields do + {field, {{:., [], [{:&, [], [ix]}, field]}, [], []}} + end + end + + defp keep_source_or_struct({:source, _, _, _} = source), do: source + defp keep_source_or_struct({:struct, name, _}), do: name + defp keep_source_or_struct(_), do: nil + + defp subquery_source_fields({:source, _, _, types}), do: Keyword.keys(types) + defp subquery_source_fields({:struct, _, types}), do: Keyword.keys(types) + defp subquery_source_fields({:map, types}), do: Keyword.keys(types) + + defp subquery_type_for({:source, _, _, fields}, field), do: Keyword.fetch(fields, field) + + defp subquery_type_for({:struct, _name, types}, field), + do: subquery_type_for_value(types, field) + + defp subquery_type_for({:map, types}, field), do: subquery_type_for_value(types, field) + + defp subquery_type_for_value(types, field) do + case Keyword.fetch(types, field) do + {:ok, {:value, type}} -> {:ok, type} + {:ok, _} -> {:ok, :any} + :error -> :error + end + end + + defp assert_subquery_fields!(query, expr, pairs) do + Enum.each(pairs, fn + {key, _} when not is_atom(key) -> + error!( + query, + "only atom keys are allowed when selecting a map in subquery, got: `#{Macro.to_string(expr)}`" + ) + + {key, value} -> + if valid_subquery_value?(value) do + {key, value} + else + error!( + query, + "atoms, structs, maps, lists, tuples and sources are not allowed as map values in subquery, got: `#{Macro.to_string(expr)}`" + ) + end + end) + end + + defp valid_subquery_value?({_, _}), do: false + defp valid_subquery_value?(args) when is_list(args), do: false + + defp valid_subquery_value?({container, _, args}) + when container in [:{}, :%{}, :&, :%] and is_list(args), + do: false + + defp valid_subquery_value?(nil), do: true + defp valid_subquery_value?(arg) when is_atom(arg), do: is_boolean(arg) + defp valid_subquery_value?(_), do: true + + defp plan_joins(query, sources, offset, adapter, cte_names) do + plan_joins(query.joins, query, [], sources, [], 1, offset, adapter, cte_names) + end + + defp plan_joins( + [%JoinExpr{assoc: {ix, assoc}, qual: qual, on: on, prefix: prefix} = join | t], + query, + joins, + sources, + tail_sources, + counter, + offset, + adapter, + cte_names + ) do + source = fetch_source!(sources, ix) + schema = schema_for_association_join!(query, join, source) + refl = schema.__schema__(:association, assoc) + + unless refl do + error!(query, join, "could not find association `#{assoc}` on schema #{inspect(schema)}") + end + + # If we have the following join: + # + # from p in Post, + # join: p in assoc(p, :comments) + # + # The callback below will return a query that contains only + # joins in a way it starts with the Post and ends in the + # Comment. + # + # This means we need to rewrite the joins below to properly + # shift the &... identifier in a way that: + # + # &0 -> becomes assoc ix + # &LAST_JOIN -> becomes counter + # + # All values in the middle should be shifted by offset, + # all values after join are already correct. + child = refl.__struct__.joins_query(refl) + + # Rewrite prefixes: + # 1. the child query has the parent query prefix + # (note the child query should NEVER have a prefix) + # 2. from and joins can have their prefixes explicitly + # overwritten by the join prefix + child = rewrite_prefix(child, query.prefix) + child = update_in(child.from, &rewrite_prefix(&1, prefix)) + child = update_in(child.joins, &Enum.map(&1, fn join -> rewrite_prefix(join, prefix) end)) + + last_ix = length(child.joins) + source_ix = counter + + {_, child_from_source} = plan_source(child, child.from, adapter, cte_names) + + {child_joins, child_sources, child_tail} = + plan_joins(child, [child_from_source], offset + last_ix - 1, adapter, cte_names) + + # Rewrite joins indexes as mentioned above + child_joins = Enum.map(child_joins, &rewrite_join(&1, qual, ix, last_ix, source_ix, offset)) + + # Drop the last resource which is the association owner (it is reversed) + child_sources = Enum.drop(child_sources, -1) + + [current_source | child_sources] = child_sources + child_sources = child_tail ++ child_sources + + plan_joins( + t, + query, + attach_on(child_joins, on) ++ joins, + [current_source | sources], + child_sources ++ tail_sources, + counter + 1, + offset + length(child_sources), + adapter, + cte_names + ) + end + + defp plan_joins( + [ + %JoinExpr{source: %Ecto.Query{} = join_query, qual: qual, on: on, prefix: prefix} = + join + | t + ], + query, + joins, + sources, + tail_sources, + counter, + offset, + adapter, + cte_names + ) do + case join_query do + %{ + order_bys: [], + limit: nil, + offset: nil, + group_bys: [], + joins: [], + havings: [], + preloads: [], + assocs: [], + distinct: nil, + lock: nil + } -> + join_query = rewrite_prefix(join_query, query.prefix) + from = rewrite_prefix(join_query.from, prefix) + {from, source} = plan_source(join_query, from, adapter, cte_names) + [join] = attach_on(query_to_joins(qual, from.source, join_query, counter), on) + + plan_joins( + t, + query, + [join | joins], + [source | sources], + tail_sources, + counter + 1, + offset, + adapter, + cte_names + ) + + _ -> + error!(query, join, """ + invalid query was interpolated in a join. + If you want to pass a query to a join, you must either: + + 1. Make sure the query only has `where` conditions (which will be converted to ON clauses) + 2. Or wrap the query in a subquery by calling subquery(query) + """) + end + end + + defp plan_joins( + [%JoinExpr{} = join | t], + query, + joins, + sources, + tail_sources, + counter, + offset, + adapter, + cte_names + ) do + {join, source} = plan_source(query, %{join | ix: counter}, adapter, cte_names) + + plan_joins( + t, + query, + [join | joins], + [source | sources], + tail_sources, + counter + 1, + offset, + adapter, + cte_names + ) + end + + defp plan_joins( + [], + _query, + joins, + sources, + tail_sources, + _counter, + _offset, + _adapter, + _cte_names + ) do + {joins, sources, tail_sources} + end + + defp attach_on([%{on: on} = h | t], %{expr: expr, params: params}) do + [%{h | on: merge_expr_and_params(:and, on, expr, params)} | t] + end + + defp rewrite_prefix(expr, nil), do: expr + defp rewrite_prefix(%{prefix: nil} = expr, prefix), do: %{expr | prefix: prefix} + defp rewrite_prefix(expr, _prefix), do: expr + + defp rewrite_join(%{on: on, ix: join_ix} = join, qual, ix, last_ix, source_ix, inc_ix) do + expr = + Macro.prewalk(on.expr, fn + {:&, meta, [join_ix]} -> + {:&, meta, [rewrite_ix(join_ix, ix, last_ix, source_ix, inc_ix)]} + + expr = %Ecto.Query.Tagged{type: {type_ix, type}} when is_integer(type_ix) -> + %{expr | type: {rewrite_ix(type_ix, ix, last_ix, source_ix, inc_ix), type}} + + other -> + other + end) + + params = Enum.map(on.params, &rewrite_param_ix(&1, ix, last_ix, source_ix, inc_ix)) + + %{ + join + | on: %{on | expr: expr, params: params}, + qual: qual, + ix: rewrite_ix(join_ix, ix, last_ix, source_ix, inc_ix) + } + end + + # We need to replace the source by the one from the assoc + defp rewrite_ix(0, ix, _last_ix, _source_ix, _inc_x), do: ix + + # The last entry will have the current source index + defp rewrite_ix(last_ix, _ix, last_ix, source_ix, _inc_x), do: source_ix + + # All above last are already correct + defp rewrite_ix(join_ix, _ix, last_ix, _source_ix, _inc_ix) when join_ix > last_ix, do: join_ix + + # All others need to be incremented by the offset sources + defp rewrite_ix(join_ix, _ix, _last_ix, _source_ix, inc_ix), do: join_ix + inc_ix + + defp rewrite_param_ix({value, {upper, {type_ix, field}}}, ix, last_ix, source_ix, inc_ix) + when is_integer(type_ix) do + {value, {upper, {rewrite_ix(type_ix, ix, last_ix, source_ix, inc_ix), field}}} + end + + defp rewrite_param_ix({value, {type_ix, field}}, ix, last_ix, source_ix, inc_ix) + when is_integer(type_ix) do + {value, {rewrite_ix(type_ix, ix, last_ix, source_ix, inc_ix), field}} + end + + defp rewrite_param_ix(param, _, _, _, _), do: param + + defp fetch_source!(sources, ix) when is_integer(ix) do + case Enum.reverse(sources) |> Enum.fetch(ix) do + {:ok, source} -> + source + + :error -> + raise ArgumentError, "could not find a source with index `#{ix}` in `#{inspect(sources)}" + end + end + + defp fetch_source!(_, ix) do + raise ArgumentError, + "invalid binding index: `#{inspect(ix)}` (check if you're binding using a valid :as atom)" + end + + defp schema_for_association_join!(query, join, source) do + case source do + {:fragment, _, _} -> + error!(query, join, "cannot perform association joins on fragment sources") + + {source, nil, _} -> + error!( + query, + join, + "cannot perform association join on #{inspect(source)} " <> + "because it does not have a schema" + ) + + {_, schema, _} -> + schema + + %Ecto.SubQuery{select: {:source, {_, schema}, _, _}} -> + schema + + %Ecto.SubQuery{select: {:struct, schema, _}} -> + schema + + %Ecto.SubQuery{} -> + error!( + query, + join, + "can only perform association joins on subqueries " <> + "that return a source with schema in select" + ) + + _ -> + error!(query, join, "can only perform association joins on sources with a schema") + end + end + + # An optimized version of plan subqueries that only modifies the query when necessary. + defp plan_expr_subqueries(query, key, fun) do + query + |> Map.fetch!(key) + |> plan_expr_subqueries([], query, key, fun) + end + + defp plan_expr_subqueries([%{subqueries: []} = head | tail], acc, query, key, fun) do + plan_expr_subqueries(tail, [head | acc], query, key, fun) + end + + defp plan_expr_subqueries([head | tail], acc, query, key, fun) do + exprs = + Enum.reduce([head | tail], acc, fn + %{subqueries: []} = expr, acc -> + [expr | acc] + + %{subqueries: subqueries} = expr, acc -> + [%{expr | subqueries: Enum.map(subqueries, fun)} | acc] + end) + + %{query | key => Enum.reverse(exprs)} + end + + defp plan_expr_subqueries([], _acc, query, _key, _fun) do + query + end + + defp plan_expr_subquery(query, key, fun) do + with %{^key => %{subqueries: [_ | _] = subqueries} = expr} <- query do + %{query | key => %{expr | subqueries: Enum.map(subqueries, fun)}} + end + end + + defp plan_windows(%{windows: []} = query, _fun), do: query + + defp plan_windows(query, fun) do + windows = + Enum.map(query.windows, fn + {key, %{subqueries: []} = window} -> + {key, window} + + {key, %{subqueries: subqueries} = window} -> + {key, %{window | subqueries: Enum.map(subqueries, fun)}} + end) + + %{query | windows: windows} + end + + @doc """ + Prepare the parameters by merging and casting them according to sources. + """ + def plan_cache(query, operation, adapter) do + {query, params, cache} = traverse_cache(query, operation, {[], []}, adapter) + {query, Enum.reverse(params), cache} + end + + defp traverse_cache(query, operation, cache_params, adapter) do + fun = &{&3, merge_cache(&1, &2, &3, &4, operation, adapter)} + {query, {cache, params}} = traverse_exprs(query, operation, cache_params, fun) + {query, params, finalize_cache(query, operation, cache)} + end + + defp merge_cache(:from, query, from, {cache, params}, _operation, adapter) do + {key, params} = source_cache(from, params) + {params, source_cacheable?} = cast_and_merge_params(:from, query, from, params, adapter) + {merge_cache({:from, key, from.hints}, cache, source_cacheable? and key != :nocache), params} + end + + defp merge_cache(kind, query, expr, {cache, params}, _operation, adapter) + when kind in ~w(select distinct limit offset)a do + if expr do + {params, cacheable?} = cast_and_merge_params(kind, query, expr, params, adapter) + {merge_cache({kind, expr_to_cache(expr)}, cache, cacheable?), params} + else + {cache, params} + end + end + + defp merge_cache(kind, query, exprs, {cache, params}, _operation, adapter) + when kind in ~w(where update group_by having order_by)a do + {expr_cache, {params, cacheable?}} = + Enum.map_reduce(exprs, {params, true}, fn expr, {params, cacheable?} -> + {params, current_cacheable?} = cast_and_merge_params(kind, query, expr, params, adapter) + {expr_to_cache(expr), {params, cacheable? and current_cacheable?}} + end) + + case expr_cache do + [] -> {cache, params} + _ -> {merge_cache({kind, expr_cache}, cache, cacheable?), params} + end + end + + defp merge_cache(:join, query, exprs, {cache, params}, _operation, adapter) do + {expr_cache, {params, cacheable?}} = + Enum.map_reduce(exprs, {params, true}, fn + %JoinExpr{on: on, qual: qual, hints: hints} = join, {params, cacheable?} -> + {key, params} = source_cache(join, params) + {params, join_cacheable?} = cast_and_merge_params(:join, query, join, params, adapter) + {params, on_cacheable?} = cast_and_merge_params(:join, query, on, params, adapter) + + {{qual, key, on.expr, hints}, + {params, cacheable? and join_cacheable? and on_cacheable? and key != :nocache}} + end) + + case expr_cache do + [] -> {cache, params} + _ -> {merge_cache({:join, expr_cache}, cache, cacheable?), params} + end + end + + defp merge_cache(:windows, query, exprs, {cache, params}, _operation, adapter) do + {expr_cache, {params, cacheable?}} = + Enum.map_reduce(exprs, {params, true}, fn {key, expr}, {params, cacheable?} -> + {params, current_cacheable?} = + cast_and_merge_params(:windows, query, expr, params, adapter) + + {{key, expr_to_cache(expr)}, {params, cacheable? and current_cacheable?}} + end) + + case expr_cache do + [] -> {cache, params} + _ -> {merge_cache({:windows, expr_cache}, cache, cacheable?), params} + end + end + + defp merge_cache(:combination, _query, combinations, cache_and_params, operation, adapter) do + # In here we add each combination as its own entry in the cache key. + # We could group them to avoid multiple keys, but since they are uncommon, we keep it simple. + Enum.reduce(combinations, cache_and_params, fn {modifier, query}, {cache, params} -> + {_, params, inner_cache} = traverse_cache(query, operation, {[], params}, adapter) + {merge_cache({modifier, inner_cache}, cache, inner_cache != :nocache), params} + end) + end + + defp merge_cache(:with_cte, _query, nil, cache_and_params, _operation, _adapter) do + cache_and_params + end + + defp merge_cache(:with_cte, query, with_expr, cache_and_params, _operation, adapter) do + %{queries: queries, recursive: recursive} = with_expr + key = if recursive, do: :recursive_cte, else: :non_recursive_cte + + # In here we add each cte as its own entry in the cache key. + # We could group them to avoid multiple keys, but since they are uncommon, we keep it simple. + Enum.reduce(queries, cache_and_params, fn + {name, opts, %Ecto.Query{} = query}, {cache, params} -> + {_, params, inner_cache} = traverse_cache(query, :all, {[], params}, adapter) + + {merge_cache( + {key, name, opts[:materialized], opts[:operation], inner_cache}, + cache, + inner_cache != :nocache + ), params} + + {name, opts, %Ecto.Query.QueryExpr{} = query_expr}, {cache, params} -> + {params, cacheable?} = + cast_and_merge_params(:with_cte, query, query_expr, params, adapter) + + {merge_cache( + {key, name, opts[:materialized], opts[:operation], expr_to_cache(query_expr)}, + cache, + cacheable? + ), params} + end) + end + + defp expr_to_cache(%QueryExpr{expr: expr}), do: expr + + defp expr_to_cache(%SelectExpr{expr: expr, subqueries: []}), do: expr + + defp expr_to_cache(%SelectExpr{expr: expr, subqueries: subqueries}) do + {expr, Enum.map(subqueries, fn %{cache: cache} -> {:subquery, cache} end)} + end + + defp expr_to_cache(%ByExpr{expr: expr, subqueries: []}), do: expr + + defp expr_to_cache(%ByExpr{expr: expr, subqueries: subqueries}) do + {expr, Enum.map(subqueries, fn %{cache: cache} -> {:subquery, cache} end)} + end + + defp expr_to_cache(%BooleanExpr{op: op, expr: expr, subqueries: []}), do: {op, expr} + + defp expr_to_cache(%BooleanExpr{op: op, expr: expr, subqueries: subqueries}) do + # Alternate implementation could be replace {:subquery, i} expression in expr. + # Current strategy appends [{:subquery, i, cache}], where cache is the cache key for this subquery. + {op, expr, Enum.map(subqueries, fn %{cache: cache} -> {:subquery, cache} end)} + end + + defp expr_to_cache(%LimitExpr{expr: expr, with_ties: with_ties}), do: {with_ties, expr} + + @spec cast_and_merge_params(atom, Ecto.Query.t(), any, list, module) :: + {params :: list, cacheable? :: boolean} + defp cast_and_merge_params(kind, query, expr, params, adapter) do + Enum.reduce(expr.params, {params, true}, fn + {:subquery, i}, {acc, cacheable?} -> + # This is the place holder to intersperse subquery parameters. + %Ecto.SubQuery{params: subparams, cache: cache} = Enum.fetch!(expr.subqueries, i) + {Enum.reverse(subparams, acc), cacheable? and cache != :nocache} + + {v, type}, {acc, cacheable?} -> + case cast_param(kind, query, expr, v, type, adapter) do + {cast_v, {:in, dump_v}} -> {split_variadic_params(cast_v, dump_v, acc), false} + {cast_v, {:splice, dump_v}} -> {split_variadic_params(cast_v, dump_v, acc), cacheable?} + cast_v_and_dump_v -> {[cast_v_and_dump_v | acc], cacheable?} + end + end) + end + + defp split_variadic_params(cast_v, dump_v, acc) do + Enum.zip(cast_v, dump_v) |> Enum.reverse(acc) + end + + defp merge_cache(_left, _right, false), do: :nocache + defp merge_cache(_left, :nocache, true), do: :nocache + defp merge_cache(left, right, true), do: [left | right] + + defp finalize_cache(_query, _operation, :nocache) do + :nocache + end + + defp finalize_cache(query, operation, cache) do + %{assocs: assocs, prefix: prefix, lock: lock, select: select, aliases: aliases} = query + aliases = Map.delete(aliases, @parent_as) + + cache = + case select do + %{take: take} when take != %{} -> + [take: take] ++ cache + + _ -> + cache + end + + cache = + cache + |> prepend_if(assocs != [], assocs: assocs) + |> prepend_if(prefix != nil, prefix: prefix) + |> prepend_if(lock != nil, lock: lock) + |> prepend_if(aliases != %{}, aliases: aliases) + + [operation | cache] + end + + defp prepend_if(cache, true, prepend), do: prepend ++ cache + defp prepend_if(cache, false, _prepend), do: cache + + defp source_cache(%{source: {_, nil} = source, prefix: prefix}, params), + do: {{source, prefix}, params} + + defp source_cache(%{source: {bin, schema}, prefix: prefix}, params), + do: {{bin, schema, schema.__schema__(:hash), prefix}, params} + + defp source_cache(%{source: {:fragment, _, _} = source, prefix: prefix}, params), + do: {{source, prefix}, params} + + defp source_cache(%{source: {:values, _, _}}, params), + do: {:nocache, params} + + defp source_cache(%{source: %Ecto.SubQuery{params: inner, cache: key}}, params), + do: {key, Enum.reverse(inner, params)} + + defp cast_param(_kind, query, expr, %DynamicExpr{}, _type, _value) do + error!( + query, + expr, + "invalid dynamic expression", + "dynamic expressions can only be interpolated at the top level of where, having, group_by, order_by, select, update or a join's on" + ) + end + + defp cast_param(_kind, query, expr, [{key, _} | _], _type, _value) when is_atom(key) do + error!( + query, + expr, + "invalid keyword list", + "keyword lists are only allowed at the top level of where, having, distinct, order_by, update or a join's on" + ) + end + + defp cast_param(_kind, query, expr, %x{}, {:in, _type}, _value) + when x in [Ecto.Query, Ecto.SubQuery] do + error!( + query, + expr, + "an #{inspect(x)} struct is not supported as right-side value of `in` operator", + "Did you mean to write `expr in subquery(query)` instead?" + ) + end + + defp cast_param(kind, query, expr, v, type, adapter) do + type = field_type!(kind, query, expr, type) + + with {:ok, type} <- normalize_param(kind, type, v), + {:ok, cast_v} <- cast_param(kind, type, v), + {:ok, dump_v} <- dump_param(adapter, type, cast_v) do + {cast_v, dump_v} + else + {:error, message} -> + e = + Ecto.QueryError.exception( + message: message, + query: query, + file: expr.file, + line: expr.line + ) + + raise Ecto.Query.CastError, value: v, type: type, message: Exception.message(e) + end + end + + @doc """ + Prepare association fields found in the query. + """ + def plan_assocs(query) do + plan_assocs(query, 0, query.assocs) + query + end + + defp plan_assocs(_query, _ix, []), do: :ok + + defp plan_assocs(query, ix, assocs) do + # We validate the schema exists when preparing joins. + parent_schema = + case get_preload_source!(query, ix) do + {_, schema, _} -> + schema + + %Ecto.SubQuery{select: {:source, {_, schema}, _, _}} -> + schema + end + + Enum.each(assocs, fn {assoc, {child_ix, child_assocs}} -> + refl = parent_schema.__schema__(:association, assoc) + + unless refl do + error!( + query, + "field `#{inspect(parent_schema)}.#{assoc}` " <> + "in preload is not an association" + ) + end + + case find_source_expr(query, child_ix) do + %JoinExpr{qual: qual} when qual in [:inner, :left, :inner_lateral, :left_lateral] -> + :ok + + %JoinExpr{qual: qual} -> + error!( + query, + "association `#{inspect(parent_schema)}.#{assoc}` " <> + "in preload requires an inner, left or lateral join, got #{qual} join" + ) + + _ -> + :ok + end + + plan_assocs(query, child_ix, child_assocs) + end) + end + + defp plan_combinations(query, adapter, cte_names) do + combinations = + Enum.map(query.combinations, fn {type, combination_query} -> + {prepared_query, _params, _key} = + combination_query |> attach_prefix(query) |> plan(:all, adapter, cte_names) + + prepared_query = prepared_query |> ensure_select(true) + {type, prepared_query} + end) + + %{query | combinations: combinations} + end + + defp plan_ctes(%Ecto.Query{with_ctes: nil} = query, _adapter, cte_names), do: {query, cte_names} + + defp plan_ctes(%Ecto.Query{with_ctes: %{queries: queries}} = query, adapter, cte_names) do + {queries, cte_names} = + Enum.map_reduce(queries, cte_names, fn + {name, opts, %Ecto.Query{} = cte_query}, cte_names -> + cte_names = Map.put(cte_names, name, []) + + {planned_query, _params, _key} = + cte_query |> attach_prefix(query) |> plan(:all, adapter, cte_names) + + planned_query = planned_query |> ensure_select(true) + {{name, opts, planned_query}, cte_names} + + {name, opts, other}, cte_names -> + {{name, opts, other}, cte_names} + end) + + {put_in(query.with_ctes.queries, queries), cte_names} + end + + defp find_source_expr(query, 0) do + query.from + end + + defp find_source_expr(query, ix) do + Enum.find(query.joins, &(&1.ix == ix)) + end + + @doc """ + Used for customizing the query returning result. + """ + def ensure_select(%{select: select} = query, _fields) when select != nil do + query + end + + def ensure_select(%{select: nil}, []) do + raise ArgumentError, ":returning expects at least one field to be given, got an empty list" + end + + def ensure_select(%{select: nil} = query, fields) when is_list(fields) do + %{ + query + | select: %SelectExpr{ + expr: {:&, [], [0]}, + take: %{0 => {:any, fields}}, + line: __ENV__.line, + file: __ENV__.file + } + } + end + + def ensure_select(%{select: nil, from: %{source: {_, nil}}} = query, true) do + error!(query, "queries that do not have a schema need to explicitly pass a :select clause") + end + + def ensure_select(%{select: nil, from: %{source: {:fragment, _, _}}} = query, true) do + error!(query, "queries from a fragment need to explicitly pass a :select clause") + end + + def ensure_select(%{select: nil} = query, true) do + %{query | select: %SelectExpr{expr: {:&, [], [0]}, line: __ENV__.line, file: __ENV__.file}} + end + + def ensure_select(%{select: nil} = query, false) do + query + end + + @doc """ + Normalizes and validates the query. + + After the query was planned and there is no cache + entry, we need to update its interpolations and check + its fields and associations exist and are valid. + """ + def normalize(query, operation, adapter, counter) do + query + |> normalize_query(operation, adapter, counter) + |> elem(0) + |> normalize_select(keep_literals?(operation, query)) + rescue + e -> + # Reraise errors so we ignore the planner inner stacktrace + filter_and_reraise(e, __STACKTRACE__) + end + + defp keep_literals?(:insert_all, _), do: true + defp keep_literals?(_, %{combinations: combinations}), do: combinations != [] + + defp normalize_query(query, operation, adapter, counter) do + case operation do + :all -> + assert_no_update!(query, operation) + + :insert_all -> + assert_no_update!(query, operation) + + :update_all -> + assert_update!(query, operation) + assert_only_filter_expressions!(query, operation) + + :delete_all -> + assert_no_update!(query, operation) + assert_only_filter_expressions!(query, operation) + end + + traverse_exprs( + query, + operation, + counter, + &validate_and_increment(&1, &2, &3, &4, operation, adapter) + ) + end + + defp validate_and_increment(:from, query, %{source: %Ecto.SubQuery{}}, _counter, kind, _adapter) + when kind not in ~w(all insert_all)a do + error!(query, "`#{kind}` does not allow subqueries in `from`") + end + + defp validate_and_increment(:from, query, %{source: source} = expr, counter, _kind, adapter) do + {source, acc} = prewalk_source(source, :from, query, expr, counter, adapter) + {%{expr | source: source}, acc} + end + + defp validate_and_increment(kind, query, expr, counter, _operation, adapter) + when kind in ~w(select distinct limit offset)a do + if expr do + prewalk(kind, query, expr, counter, adapter) + else + {nil, counter} + end + end + + defp validate_and_increment(kind, query, exprs, counter, _operation, adapter) + when kind in ~w(where group_by having order_by update)a do + {exprs, counter} = + Enum.reduce(exprs, {[], counter}, fn + %{expr: []}, {list, acc} -> + {list, acc} + + expr, {list, acc} -> + {expr, acc} = prewalk(kind, query, expr, acc, adapter) + {[expr | list], acc} + end) + + {Enum.reverse(exprs), counter} + end + + defp validate_and_increment(:with_cte, _query, nil, counter, _operation, _adapter) do + {nil, counter} + end + + defp validate_and_increment(:with_cte, query, with_expr, counter, _operation, adapter) do + fun = &validate_and_increment(&1, &2, &3, &4, :all, adapter) + + {queries, counter} = + Enum.reduce(with_expr.queries, {[], counter}, fn + {name, opts, %Ecto.Query{} = inner_query}, {queries, counter} -> + inner_query = put_in(inner_query.aliases[@parent_as], query) + + # We don't want to use normalize_subquery_select because we are + # going to prepare the whole query ourselves next. + {_, _, inner_query} = rewrite_subquery_select_expr(inner_query, true) + {inner_query, counter} = traverse_exprs(inner_query, opts.operation, counter, fun) + + # Now compute the fields as keyword lists so we emit AS in Ecto query. + %{select: %{expr: expr, take: take, aliases: aliases}} = inner_query + + {{:map, types}, fields, _from} = + collect_fields(expr, [], :none, inner_query, take, true, %{}) + + fields = cte_fields(Keyword.keys(types), Enum.reverse(fields), aliases) + inner_query = put_in(inner_query.select.fields, fields) + {_, inner_query} = pop_in(inner_query.aliases[@parent_as]) + + {[{name, opts, inner_query} | queries], counter} + + {name, opts, %QueryExpr{expr: {:fragment, _, _} = fragment} = query_expr}, + {queries, counter} -> + {fragment, counter} = + prewalk_source(fragment, :with_cte, query, with_expr, counter, adapter) + + query_expr = %{query_expr | expr: fragment} + {[{name, opts, query_expr} | queries], counter} + end) + + {%{with_expr | queries: Enum.reverse(queries)}, counter} + end + + defp validate_and_increment(:join, query, exprs, counter, _operation, adapter) do + Enum.map_reduce(exprs, counter, fn join, acc -> + {source, acc} = prewalk_source(join.source, :join, query, join, acc, adapter) + {on, acc} = prewalk(:join, query, join.on, acc, adapter) + {%{join | on: on, source: source, params: nil}, acc} + end) + end + + defp validate_and_increment(:windows, query, exprs, counter, _operation, adapter) do + {exprs, counter} = + Enum.reduce(exprs, {[], counter}, fn {name, expr}, {list, acc} -> + {expr, acc} = prewalk(:windows, query, expr, acc, adapter) + {[{name, expr} | list], acc} + end) + + {Enum.reverse(exprs), counter} + end + + defp validate_and_increment(:combination, query, combinations, counter, operation, adapter) do + fun = &validate_and_increment(&1, &2, &3, &4, operation, adapter) + parent_aliases = query.aliases[@parent_as] + + {combinations, counter} = + Enum.reduce(combinations, {[], counter}, fn {type, combination_query}, + {combinations, counter} -> + combination_query = put_in(combination_query.aliases[@parent_as], parent_aliases) + {combination_query, counter} = traverse_exprs(combination_query, operation, counter, fun) + {combination_query, _} = combination_query |> normalize_select(true) + {_, combination_query} = pop_in(combination_query.aliases[@parent_as]) + {[{type, combination_query} | combinations], counter} + end) + + {Enum.reverse(combinations), counter} + end + + defp validate_json_path!([path_field | rest], field, {:parameterized, {Ecto.Embedded, embed}}) + when is_binary(path_field) or is_integer(path_field) do + case embed do + %{related: related, cardinality: :one} -> + unless Enum.any?(related.__schema__(:fields), &(Atom.to_string(&1) == path_field)) do + raise "field `#{path_field}` does not exist in #{inspect(related)}" + end + + type = related.__schema__(:type, String.to_atom(path_field)) + validate_json_path!(rest, path_field, type) + + %{related: _, cardinality: :many} -> + unless is_integer(path_field) do + raise "cannot use `#{path_field}` to refer to an item in `embeds_many`" + end + + updated_embed = %{embed | cardinality: :one} + validate_json_path!(rest, path_field, {:parameterized, {Ecto.Embedded, updated_embed}}) + + other -> + raise "expected field `#{field}` to be of type embed, got: `#{inspect(other)}`" + end + end + + defp validate_json_path!([path_field | rest], field, {:parameterized, {Ecto.Embedded, embed}}) do + case embed do + %{related: _, cardinality: :one} -> + # A source field cannot be used to validate whether the next step in the + # path exists in the embedded schema, so we stop here. If there is an error + # later in the path it will be caught by the driver. + :ok + + %{related: _, cardinality: :many} -> + # The source field may not be an integer but for the sake of validating + # the rest of the path, we assume it is. The error will be caught later + # by the driver if it is not. + updated_embed = %{embed | cardinality: :one} + validate_json_path!(rest, path_field, {:parameterized, {Ecto.Embedded, updated_embed}}) + + other -> + raise "expected field `#{field}` to be of type embed, got: `#{inspect(other)}`" + end + end + + defp validate_json_path!([_path_field | _rest] = path, field, other_type) do + case Ecto.Type.type(other_type) do + :any -> + :ok + + :map -> + :ok + + {:map, _} -> + :ok + + {:parameterized, {type, _}} -> + validate_json_path!(path, field, type) + + type -> + raise "expected field `#{field}` to be an embed or a map, got: `#{inspect(type)}`" + end + end + + defp validate_json_path!([], _field, _type) do + :ok + end + + defp prewalk_source({:fragment, meta, fragments}, kind, query, expr, acc, adapter) do + {fragments, acc} = prewalk(fragments, kind, query, expr, acc, adapter) + {{:fragment, meta, fragments}, acc} + end + + defp prewalk_source({:values, meta, [types, num_rows]}, _kind, _query, _expr, acc, _adapter) do + length = num_rows * length(types) + # Adapters will use the schema types to cast the values + schema_types = Enum.map(types, fn {field, type} -> {field, Ecto.Type.type(type)} end) + {{:values, meta, [schema_types, acc, num_rows]}, acc + length} + end + + defp prewalk_source( + %Ecto.SubQuery{query: inner_query} = subquery, + kind, + query, + _expr, + counter, + adapter + ) do + try do + inner_query = put_in(inner_query.aliases[@parent_as], query) + {inner_query, counter} = normalize_query(inner_query, :all, adapter, counter) + {inner_query, _} = normalize_select(inner_query, true) + {_, inner_query} = pop_in(inner_query.aliases[@parent_as]) + + # If the subquery comes from a select, we are not really interested on the fields + inner_query = + if kind == :where do + inner_query + else + update_in(inner_query.select.fields, fn fields -> + # fields are aliased by the subquery source, unless + # already aliased by selected_as/2 + subquery.select + |> subquery_source_fields() + |> Enum.zip(fields) + |> Enum.map(fn + {_source_alias, {select_alias, field}} -> {select_alias, field} + {source_alias, field} -> {source_alias, field} + end) + end) + end + + {%{subquery | query: inner_query}, counter} + rescue + e -> raise Ecto.SubQueryError, query: query, exception: e + end + end + + defp prewalk_source(source, _kind, _query, _expr, acc, _adapter) do + {source, acc} + end + + defp prewalk(:update, query, expr, counter, adapter) do + source = get_source!(:update, query, 0) + + {inner, acc} = + Enum.map_reduce(expr.expr, counter, fn {op, kw}, counter -> + {kw, acc} = + Enum.map_reduce(kw, counter, fn {field, value}, counter -> + {value, acc} = prewalk(value, :update, query, expr, counter, adapter) + {{field_source(source, field), value}, acc} + end) + + {{op, kw}, acc} + end) + + {%{expr | expr: inner, params: nil}, acc} + end + + defp prewalk(kind, query, expr, counter, adapter) do + {inner, acc} = prewalk(expr.expr, kind, query, expr, counter, adapter) + {%{expr | expr: inner, params: nil}, acc} + end + + defp prewalk({:subquery, i}, kind, query, expr, acc, adapter) do + prewalk_source(Enum.fetch!(expr.subqueries, i), kind, query, expr, acc, adapter) + end + + defp prewalk({:in, in_meta, [left, {:^, meta, [param]}]}, kind, query, expr, acc, adapter) do + {left, acc} = prewalk(left, kind, query, expr, acc, adapter) + {right, acc} = validate_in(meta, expr, param, acc, adapter) + {{:in, in_meta, [left, right]}, acc} + end + + defp prewalk({:in, in_meta, [left, {:subquery, _} = right]}, kind, query, expr, acc, adapter) do + {left, acc} = prewalk(left, kind, query, expr, acc, adapter) + {right, acc} = prewalk(right, kind, query, expr, acc, adapter) + + case right.query.select.fields do + [_] -> + :ok + + _ -> + error!( + query, + "subquery must return a single field in order to be used on the right-side of `in`" + ) + end + + {{:in, in_meta, [left, right]}, acc} + end + + defp prewalk({quantifier, meta, [{:subquery, _} = subquery]}, kind, query, expr, acc, adapter) + when quantifier in [:exists, :any, :all] do + {subquery, acc} = prewalk(subquery, kind, query, expr, acc, adapter) + + case {quantifier, subquery.query.select.fields} do + {:exists, _} -> + :ok + + {_, [_]} -> + :ok + + _ -> + error!( + query, + "subquery must return a single field in order to be used with #{quantifier}" + ) + end + + {{quantifier, meta, [subquery]}, acc} + end + + defp prewalk( + {:splice, splice_meta, [{:^, meta, [_]}, length]}, + _kind, + _query, + _expr, + acc, + _adapter + ) do + param = {:^, meta, [acc, length]} + {{:splice, splice_meta, [param]}, acc + length} + end + + defp prewalk({{:., dot_meta, [left, field]}, meta, []}, kind, query, expr, acc, _adapter) do + {ix, ix_expr, ix_query} = get_ix!(left, kind, query) + extra = if kind == :select, do: [type: type!(kind, ix_query, expr, ix, field)], else: [] + field = field_source(get_source!(kind, ix_query, ix), field) + {{{:., extra ++ dot_meta, [ix_expr, field]}, meta, []}, acc} + end + + defp prewalk({:^, meta, [ix]}, _kind, _query, _expr, acc, _adapter) when is_integer(ix) do + {{:^, meta, [acc]}, acc + 1} + end + + defp prewalk({:type, _, [arg, type]}, kind, query, expr, acc, adapter) do + {arg, acc} = prewalk(arg, kind, query, expr, acc, adapter) + type = field_type!(kind, query, expr, type, true) + {%Ecto.Query.Tagged{value: arg, tag: type, type: Ecto.Type.type(type)}, acc} + end + + defp prewalk({:json_extract_path, meta, [json_field, path]}, kind, query, expr, acc, _adapter) do + {{:., dot_meta, [left, field]}, expr_meta, []} = json_field + {ix, ix_expr, ix_query} = get_ix!(left, kind, query) + + type = type!(kind, ix_query, expr, ix, field) + validate_json_path!(path, field, type) + + field_source = kind |> get_source!(ix_query, ix) |> field_source(field) + + json_field = {{:., dot_meta, [ix_expr, field_source]}, expr_meta, []} + {{:json_extract_path, meta, [json_field, path]}, acc} + end + + defp prewalk({:selected_as, [], [name]}, _kind, query, _expr, acc, _adapter) do + name = selected_as!(query.select.aliases, name) + {{:selected_as, [], [name]}, acc} + end + + defp prewalk(%Ecto.Query.Tagged{value: v, type: type} = tagged, kind, query, expr, acc, adapter) do + if Ecto.Type.base?(type) do + {tagged, acc} + else + type = field_type!(kind, query, expr, type) + + with {:ok, type} <- normalize_param(kind, type, v), + {:ok, value} <- dump_param(adapter, type, v) do + # We cannot encode binary/uuid in queries because they would emit + # invalid queries with binary parts in them. In theory, we could + # wrap them in Ecto.Query.Tagged, but a tagged UUID would most + # likely wrap its string representation, not its binary one. + # So it is best to be consistent and not support query-dumping of + # non-base types. + if is_binary(value) and Ecto.Type.type(type) in [:binary_id, :binary, :uuid] do + error = + "cannot encode value `#{inspect(v)}` of type `#{inspect(type)}` within a query, please interpolate (using ^) instead" + + error!(query, expr, error) + else + {value, acc} + end + else + {:error, error} -> + error = + error <> + ". Or the value is incompatible or it must be " <> + "interpolated (using ^) so it may be cast accordingly" + + error!(query, expr, error) + end + end + end + + defp prewalk({left, right}, kind, query, expr, acc, adapter) do + {left, acc} = prewalk(left, kind, query, expr, acc, adapter) + {right, acc} = prewalk(right, kind, query, expr, acc, adapter) + {{left, right}, acc} + end + + defp prewalk({left, meta, args}, kind, query, expr, acc, adapter) do + {left, acc} = prewalk(left, kind, query, expr, acc, adapter) + {args, acc} = prewalk(args, kind, query, expr, acc, adapter) + {{left, meta, args}, acc} + end + + defp prewalk(list, kind, query, expr, acc, adapter) when is_list(list) do + Enum.map_reduce(list, acc, &prewalk(&1, kind, query, expr, &2, adapter)) + end + + defp prewalk(other, _kind, _query, _expr, acc, _adapter) do + {other, acc} + end + + defp selected_as!(select_aliases, name) do + case select_aliases do + %{^name => _} -> + name + + _ -> + raise ArgumentError, + "invalid alias: `#{inspect(name)}`. Use `selected_as/2` to define aliases in the outer most `select` expression." + end + end + + defp validate_in(meta, expr, param, acc, adapter) do + {v, t} = Enum.fetch!(expr.params, param) + length = length(v) + + case adapter.dumpers(t, t) do + [{:in, _} | _] -> {{:^, meta, [acc, length]}, acc + length} + _ -> {{:^, meta, [acc, length]}, acc + 1} + end + end + + defp normalize_select(%{select: nil} = query, _keep_literals?) do + {query, nil} + end + + defp normalize_select(query, keep_literals?) do + %{assocs: assocs, preloads: preloads, select: select} = query + %{take: take, expr: expr} = select + {tag, from_take} = Map.get(take, 0, {:any, []}) + source = get_source!(:select, query, 0) + assocs = merge_assocs(assocs, query) + + # In from, if there is a schema and we have a map tag with preloads, + # it needs to be converted to a map in a later pass. + {take, from_tag} = + case source do + {source, schema, _} + when tag == :map and preloads != [] and is_binary(source) and schema != nil -> + {Map.put(take, 0, {:struct, from_take}), :map} + + _ -> + {take, :any} + end + + {postprocess, fields, from} = + collect_fields(expr, [], :none, query, take, keep_literals?, %{}) + + {fields, preprocess, from} = + case from do + {from_expr, from_source, from_fields} -> + {assoc_exprs, assoc_fields} = collect_assocs([], [], query, tag, from_take, assocs) + fields = from_fields ++ Enum.reverse(assoc_fields, Enum.reverse(fields)) + preprocess = [from_expr | Enum.reverse(assoc_exprs)] + {fields, preprocess, {from_tag, from_source}} + + :none when preloads != [] or assocs != [] -> + error!( + query, + "the binding used in `from` must be selected in `select` when using `preload`" + ) + + :none -> + {Enum.reverse(fields), [], :none} + end + + select = %{ + preprocess: preprocess, + postprocess: postprocess, + take: from_take, + assocs: assocs, + from: from + } + + {put_in(query.select.fields, fields), select} + end + + # Handling of source + + # The idea of collect_fields is to collect all fields used in select. + # However, special care is taken in for `from`. Because `from` is used + # earlier in assoc/preloads, any operation done on `from` is separately + # collected in the `from` information. Then, everything else refers to + # the preprocessed `from` as `{:source, :from}`. + + defp collect_fields( + {:merge, _, [left, right]}, + fields, + from, + query, + take, + keep_literals?, + _drop + ) do + case collect_fields(left, fields, from, query, take, keep_literals?, %{}) do + {{:source, :from}, fields, left_from} -> + {right, right_fields, _} = + collect_fields(right, [], left_from, query, take, keep_literals?, %{}) + + {from_expr, from_source, from_fields} = left_from + + from = + {{:merge, from_expr, right}, from_source, from_fields ++ Enum.reverse(right_fields)} + + {{:source, :from}, fields, from} + + {left, left_fields, left_from} -> + {right, right_fields, right_from} = + collect_fields(right, left_fields, left_from, query, take, keep_literals?, %{}) + + {{:merge, left, right}, right_fields, right_from} + end + end + + defp collect_fields({:&, _, [0]}, fields, :none, query, take, _keep_literals?, drop) do + {expr, taken} = source_take!(:select, query, take, 0, 0, drop) + {{:source, :from}, fields, {{:source, :from}, expr, taken}} + end + + defp collect_fields({:&, _, [0]}, fields, from, _query, _take, _keep_literals?, _drop) do + {{:source, :from}, fields, from} + end + + defp collect_fields({:&, _, [ix]}, fields, from, query, take, _keep_literals?, drop) do + {expr, taken} = source_take!(:select, query, take, ix, ix, drop) + {expr, Enum.reverse(taken, fields), from} + end + + # Expression handling + + defp collect_fields( + {agg, _, [{{:., dot_meta, [{:&, _, [_]}, _]}, _, []} | _]} = expr, + fields, + from, + _query, + _take, + _keep_literals?, + _drop + ) + when agg in @aggs do + type = + case agg do + :count -> :integer + :row_number -> :integer + :rank -> :integer + :dense_rank -> :integer + :ntile -> :integer + # If it is possible to upcast, we do it, otherwise keep the DB value. + # For example, an average of integers will return a decimal, which can't be cast + # as an integer. But an average of "moneys" should be upcast. + _ -> {:try, Keyword.fetch!(dot_meta, :type)} + end + + {{:value, type}, [expr | fields], from} + end + + defp collect_fields( + {:filter, _, [call, _]} = expr, + fields, + from, + query, + take, + keep_literals?, + _drop + ) do + case call do + {agg, _, _} when agg in @aggs -> + :ok + + {:fragment, _, [_ | _]} -> + :ok + + _ -> + error!( + query, + "filter(...) expects the first argument to be an aggregate expression, got: `#{Macro.to_string(expr)}`" + ) + end + + {type, _, _} = collect_fields(call, fields, from, query, take, keep_literals?, %{}) + {type, [expr | fields], from} + end + + defp collect_fields( + {:coalesce, _, [left, right]} = expr, + fields, + from, + query, + take, + _keep_literals?, + _drop + ) do + {left_type, _, _} = collect_fields(left, fields, from, query, take, true, %{}) + {right_type, _, _} = collect_fields(right, fields, from, query, take, true, %{}) + + type = if left_type == right_type, do: left_type, else: {:value, :any} + {type, [expr | fields], from} + end + + defp collect_fields( + {:over, _, [call, window]} = expr, + fields, + from, + query, + take, + keep_literals?, + _drop + ) do + if is_atom(window) and not Keyword.has_key?(query.windows, window) do + error!(query, "unknown window #{inspect(window)} given to over/2") + end + + {type, _, _} = collect_fields(call, fields, from, query, take, keep_literals?, %{}) + {type, [expr | fields], from} + end + + defp collect_fields( + {{:., dot_meta, [{:&, _, [_]}, _]}, _, []} = expr, + fields, + from, + _query, + _take, + _keep_literals?, + _drop + ) do + {{:value, Keyword.fetch!(dot_meta, :type)}, [expr | fields], from} + end + + defp collect_fields({left, right}, fields, from, query, take, keep_literals?, _drop) do + {args, fields, from} = + collect_args([left, right], fields, from, query, take, keep_literals?, []) + + {{:tuple, args}, fields, from} + end + + defp collect_fields({:{}, _, args}, fields, from, query, take, keep_literals?, _drop) do + {args, fields, from} = collect_args(args, fields, from, query, take, keep_literals?, []) + {{:tuple, args}, fields, from} + end + + defp collect_fields( + {:%{}, _, [{:|, _, [data, args]}]}, + fields, + from, + query, + take, + keep_literals?, + _drop + ) do + drop = Map.new(args, fn {key, _} -> {key, nil} end) + {data, fields, from} = collect_fields(data, fields, from, query, take, keep_literals?, drop) + {args, fields, from} = collect_kv(args, fields, from, query, take, keep_literals?, []) + {{:map, data, args}, fields, from} + end + + defp collect_fields({:%{}, _, args}, fields, from, query, take, keep_literals?, _drop) do + {args, fields, from} = collect_kv(args, fields, from, query, take, keep_literals?, []) + {{:map, args}, fields, from} + end + + defp collect_fields( + {:%, _, [name, {:%{}, _, [{:|, _, [data, args]}]}]}, + fields, + from, + query, + take, + keep_literals?, + _drop + ) do + drop = Map.new(args, fn {key, _} -> {key, nil} end) + {data, fields, from} = collect_fields(data, fields, from, query, take, keep_literals?, drop) + {args, fields, from} = collect_kv(args, fields, from, query, take, keep_literals?, []) + struct!(name, args) + {{:struct, name, data, args}, fields, from} + end + + defp collect_fields( + {:%, _, [name, {:%{}, _, args}]}, + fields, + from, + query, + take, + keep_literals?, + _drop + ) do + {args, fields, from} = collect_kv(args, fields, from, query, take, keep_literals?, []) + struct!(name, args) + {{:struct, name, args}, fields, from} + end + + defp collect_fields( + {:date_add, _, [arg | _]} = expr, + fields, + from, + query, + take, + keep_literals?, + _drop + ) do + case collect_fields(arg, fields, from, query, take, keep_literals?, %{}) do + {{:value, :any}, _, _} -> {{:value, :date}, [expr | fields], from} + {type, _, _} -> {type, [expr | fields], from} + end + end + + defp collect_fields( + {:datetime_add, _, [arg | _]} = expr, + fields, + from, + query, + take, + keep_literals?, + _drop + ) do + case collect_fields(arg, fields, from, query, take, keep_literals?, %{}) do + {{:value, :any}, _, _} -> {{:value, :naive_datetime}, [expr | fields], from} + {type, _, _} -> {type, [expr | fields], from} + end + end + + defp collect_fields(args, fields, from, query, take, keep_literals?, _drop) + when is_list(args) do + {args, fields, from} = collect_args(args, fields, from, query, take, keep_literals?, []) + {{:list, args}, fields, from} + end + + defp collect_fields(expr, fields, from, _query, _take, true, _drop) when is_binary(expr) do + {{:value, :binary}, [expr | fields], from} + end + + defp collect_fields(expr, fields, from, _query, _take, true, _drop) when is_integer(expr) do + {{:value, :integer}, [expr | fields], from} + end + + defp collect_fields(expr, fields, from, _query, _take, true, _drop) when is_float(expr) do + {{:value, :float}, [expr | fields], from} + end + + defp collect_fields(expr, fields, from, _query, _take, true, _drop) when is_boolean(expr) do + {{:value, :boolean}, [expr | fields], from} + end + + defp collect_fields(nil, fields, from, _query, _take, true, _drop) do + {{:value, :any}, [nil | fields], from} + end + + defp collect_fields(expr, fields, from, _query, _take, _keep_literals?, _drop) + when is_atom(expr) do + {expr, fields, from} + end + + defp collect_fields(expr, fields, from, _query, _take, false, _drop) + when is_binary(expr) or is_number(expr) do + {expr, fields, from} + end + + defp collect_fields( + %Ecto.Query.Tagged{tag: tag} = expr, + fields, + from, + _query, + _take, + _keep_literals?, + _drop + ) do + {{:value, tag}, [expr | fields], from} + end + + defp collect_fields({op, _, [_]} = expr, fields, from, _query, _take, _keep_literals?, _drop) + when op in ~w(not is_nil)a do + {{:value, :boolean}, [expr | fields], from} + end + + defp collect_fields({op, _, [_, _]} = expr, fields, from, _query, _take, _keep_literals?, _drop) + when op in ~w(< > <= >= == != and or like ilike)a do + {{:value, :boolean}, [expr | fields], from} + end + + defp collect_fields( + {:selected_as, _, [select_expr, name]}, + fields, + from, + query, + take, + keep_literals?, + _drop + ) do + {type, _, _} = collect_fields(select_expr, fields, from, query, take, keep_literals?, %{}) + {type, [{name, select_expr} | fields], from} + end + + defp collect_fields(expr, fields, from, _query, _take, _keep_literals?, _drop) do + {{:value, :any}, [expr | fields], from} + end + + defp collect_kv([{key, value} | elems], fields, from, query, take, keep_literals?, acc) do + {key, fields, from} = collect_fields(key, fields, from, query, take, keep_literals?, %{}) + {value, fields, from} = collect_fields(value, fields, from, query, take, keep_literals?, %{}) + collect_kv(elems, fields, from, query, take, keep_literals?, [{key, value} | acc]) + end + + defp collect_kv([], fields, from, _query, _take, _keep_literals?, acc) do + {Enum.reverse(acc), fields, from} + end + + defp collect_args([elem | elems], fields, from, query, take, keep_literals?, acc) do + {elem, fields, from} = collect_fields(elem, fields, from, query, take, keep_literals?, %{}) + collect_args(elems, fields, from, query, take, keep_literals?, [elem | acc]) + end + + defp collect_args([], fields, from, _query, _take, _keep_literals?, acc) do + {Enum.reverse(acc), fields, from} + end + + defp merge_assocs(assocs, query) do + assocs + |> Enum.reduce(%{}, fn {field, {index, children}}, acc -> + children = merge_assocs(children, query) + + Map.update(acc, field, {index, children}, fn + {^index, current_children} -> + {index, merge_assocs(children ++ current_children, query)} + + {other_index, _} -> + error!( + query, + "association `#{field}` is being set to binding at position #{index} " <> + "and at position #{other_index} at the same time" + ) + end) + end) + |> Map.to_list() + end + + defp collect_assocs(exprs, fields, query, tag, take, [{assoc, {ix, children}} | tail]) do + to_take = get_preload_source!(query, ix) + {fetch, take_children} = fetch_assoc(tag, take, assoc) + {expr, taken} = take!(to_take, query, fetch, assoc, ix, %{}) + exprs = [expr | exprs] + fields = Enum.reverse(taken, fields) + {exprs, fields} = collect_assocs(exprs, fields, query, tag, take_children, children) + {exprs, fields} = collect_assocs(exprs, fields, query, tag, take, tail) + {exprs, fields} + end + + defp collect_assocs(exprs, fields, _query, _tag, _take, []) do + {exprs, fields} + end + + defp fetch_assoc(tag, take, assoc) do + case Access.fetch(take, assoc) do + {:ok, value} -> {{:ok, {tag, value}}, value} + :error -> {:error, []} + end + end + + defp source_take!(kind, query, take, field, ix, drop) do + source = get_source!(kind, query, ix) + take!(source, query, Access.fetch(take, field), field, ix, drop) + end + + defp take!(source, query, fetched, field, ix, drop) do + case {fetched, source} do + {{:ok, {:struct, _}}, {:fragment, _, _}} -> + error!(query, "it is not possible to return a struct subset of a fragment") + + {{:ok, {:struct, fields}}, %Ecto.SubQuery{select: select}} -> + subquery_select_fields(select, fields, ix, query) + + {{:ok, {_, []}}, {_, _, _}} -> + error!( + query, + "at least one field must be selected for binding `#{field}`, got an empty list" + ) + + {{:ok, {:struct, _}}, {_, nil, _}} -> + error!(query, "struct/2 in select expects a source with a schema") + + {{:ok, {kind, fields}}, {source, schema, prefix}} when is_binary(source) -> + dumper = if schema, do: schema.__schema__(:dump), else: %{} + schema = if kind == :map, do: nil, else: schema + {types, fields} = select_dump(List.wrap(fields), dumper, ix, drop) + {{:source, {source, schema}, prefix || query.prefix, types}, fields} + + {{:ok, {_, fields}}, _} -> + {{:map, Enum.map(fields, &{&1, {:value, :any}})}, + Enum.map(fields, &select_field(&1, ix, :always))} + + {:error, {:fragment, _, _}} -> + {{:value, :map}, [{:&, [], [ix]}]} + + {:error, {:values, _, [types, _]}} -> + fields = Keyword.keys(types) + + dumper = + types + |> Enum.map(fn {field, type} -> {field, {field, type, :always}} end) + |> Enum.into(%{}) + + {types, fields} = select_dump(fields, dumper, ix, drop) + {{:source, :values, nil, types}, fields} + + {:error, {_, nil, _}} -> + {{:value, :map}, [{:&, [], [ix]}]} + + {:error, {source, schema, prefix}} -> + {types, fields} = + select_dump(schema.__schema__(:query_fields), schema.__schema__(:dump), ix, drop) + + {{:source, {source, schema}, prefix || query.prefix, types}, fields} + + {:error, %Ecto.SubQuery{select: select}} -> + fields = subquery_source_fields(select) + {select, Enum.map(fields, &select_field(&1, ix, :always))} + end + end + + defp select_dump(fields, dumper, ix, drop) do + fields + |> Enum.reverse() + |> Enum.reduce({[], []}, fn + field, {types, exprs} when is_atom(field) and not is_map_key(drop, field) -> + {source, type, writable} = Map.get(dumper, field, {field, :any, :always}) + {[{field, type} | types], [select_field(source, ix, writable) | exprs]} + + _field, acc -> + acc + end) + end + + defp subquery_select_fields(select, requested_fields, ix, query) do + available_fields = subquery_source_fields(select) + requested_fields = List.wrap(requested_fields) + + schema = + case select do + {:source, {_, schema}, _, _} when not is_nil(schema) -> schema + + _ -> + error!(query, "it is not possible to return a struct subset of a subquery that does not return a schema struct") + end + + types = + Enum.map(requested_fields, fn field -> + case subquery_type_for(select, field) do + {:ok, type} -> + {field, type} + + :error -> + error!(query, "field `#{field}` in struct/2 is not available in the subquery. " <> + "Subquery only returns fields: #{inspect(available_fields)}") + end + end) + + field_exprs = Enum.map(requested_fields, &select_field(&1, ix, :always)) + + {{:source, {nil, schema}, nil, types}, field_exprs} + end + + defp select_field(field, ix, writable) do + {{:., [writable: writable], [{:&, [], [ix]}, field]}, [], []} + end + + defp get_ix!({:&, _, [ix]} = expr, _kind, query) do + {ix, expr, query} + end + + defp get_ix!({:as, meta, [as]}, _kind, query) do + case query.aliases do + %{^as => ix} -> {ix, {:&, meta, [ix]}, query} + %{} -> error!(query, "could not find named binding `as(#{inspect(as)})`") + end + end + + defp get_ix!({:parent_as, meta, [as]}, kind, query) do + case query.aliases[@parent_as] do + %{aliases: %{^as => ix}, sources: sources} = query -> + if kind == :select and not (ix < tuple_size(sources)) do + error!( + query, + "the parent_as in a subquery select used as a join can only access the `from` binding" + ) + else + {ix, {:parent_as, [], [as]}, query} + end + + %{} = parent -> + get_ix!({:parent_as, meta, [as]}, kind, parent) + + nil -> + error!(query, "could not find named binding `parent_as(#{inspect(as)})`") + end + end + + defp get_source!(where, %{sources: sources} = query, ix) do + elem(sources, ix) + rescue + ArgumentError -> + error!( + query, + "invalid query has specified more bindings than bindings available " <> + "in `#{where}` (look for `unknown_binding!` in the printed query below)" + ) + end + + defp get_preload_source!(query, ix) do + case get_source!(:preload, query, ix) do + {source, schema, _} = all when is_binary(source) and schema != nil -> + all + + %Ecto.SubQuery{select: {:source, {source, schema}, _, _}} = subquery + when is_binary(source) and schema != nil -> + subquery + + _ -> + error!( + query, + "can only preload sources with a schema " <> + "(fragments, binaries, and subqueries that do not select a from/join schema are not supported)" + ) + end + end + + @doc """ + Puts the prefix given via `opts` into the given query, if available. + """ + def attach_prefix(%{prefix: nil} = query, opts) when is_list(opts) do + case Keyword.fetch(opts, :prefix) do + {:ok, prefix} -> + %{query | prefix: prefix} + + :error -> + query + end + end + + def attach_prefix(%{prefix: nil} = query, %{prefix: prefix}) do + %{query | prefix: prefix} + end + + def attach_prefix(query, _), do: query + + ## Helpers + + @all_exprs [ + with_cte: :with_ctes, + distinct: :distinct, + select: :select, + from: :from, + join: :joins, + where: :wheres, + group_by: :group_bys, + having: :havings, + windows: :windows, + combination: :combinations, + order_by: :order_bys, + limit: :limit, + offset: :offset + ] + + # Although joins come before updates in the actual query, + # the on fields are moved to where, so they effectively + # need to come later for MySQL. This means subqueries + # with parameters are not supported as a join on MySQL. + # The only way to address it is by splitting how join + # and their on expressions are processed. + @update_all_exprs [ + with_cte: :with_ctes, + from: :from, + update: :updates, + join: :joins, + where: :wheres, + select: :select + ] + + @delete_all_exprs [ + with_cte: :with_ctes, + from: :from, + join: :joins, + where: :wheres, + select: :select + ] + + # Traverse all query components with expressions. + # Therefore from, preload, assocs and lock are not traversed. + defp traverse_exprs(query, operation, acc, fun) do + exprs = + case operation do + :all -> @all_exprs + :insert_all -> @all_exprs + :update_all -> @update_all_exprs + :delete_all -> @delete_all_exprs + end + + Enum.reduce(exprs, {query, acc}, fn {kind, key}, {query, acc} -> + {traversed, acc} = fun.(kind, query, Map.fetch!(query, key), acc) + {%{query | key => traversed}, acc} + end) + end + + defp field_type!(kind, query, expr, type, allow_virtuals? \\ false) + + defp field_type!(kind, query, expr, {composite, {ix, field}}, allow_virtuals?) + when is_integer(ix) do + {composite, type!(kind, query, expr, ix, field, allow_virtuals?)} + end + + defp field_type!( + kind, + query, + expr, + {composite, {{bind_kind, _, [_]} = bind_expr, field}}, + allow_virtuals? + ) + when bind_kind in [:as, :parent_as] do + {ix, _, ix_query} = get_ix!(bind_expr, kind, query) + {composite, type!(kind, ix_query, expr, ix, field, allow_virtuals?)} + end + + defp field_type!(kind, query, expr, {{bind_kind, _, [_]} = bind_expr, field}, allow_virtuals?) + when bind_kind in [:as, :parent_as] do + {ix, _, ix_query} = get_ix!(bind_expr, kind, query) + type!(kind, ix_query, expr, ix, field, allow_virtuals?) + end + + defp field_type!(kind, query, expr, {ix, field}, allow_virtuals?) when is_integer(ix) do + type!(kind, query, expr, ix, field, allow_virtuals?) + end + + defp field_type!(_kind, _query, _expr, type, _) do + type + end + + defp type!(kind, query, expr, schema, field, allow_virtuals? \\ false) + + defp type!(_kind, _query, _expr, nil, _field, _allow_virtuals?), do: :any + + defp type!(_kind, _query, _expr, _ix, field, _allow_virtuals?) when is_binary(field), do: :any + + defp type!(kind, query, expr, ix, field, allow_virtuals?) when is_integer(ix) do + case get_source!(kind, query, ix) do + {:fragment, _, _} -> + :any + + {:values, _, [types, _]} -> + case Keyword.fetch(types, field) do + {:ok, type} -> + type + + :error -> + error!(query, expr, "field `#{field}` in `#{kind}` does not exist in values list") + end + + {_, schema, _} -> + type!(kind, query, expr, schema, field, allow_virtuals?) + + %Ecto.SubQuery{select: select} -> + case subquery_type_for(select, field) do + {:ok, type} -> type + :error -> error!(query, expr, "field `#{field}` does not exist in subquery") + end + end + end + + defp type!(kind, query, expr, schema, field, allow_virtuals?) when is_atom(schema) do + cond do + type = schema.__schema__(:type, field) -> + type + + type = allow_virtuals? && schema.__schema__(:virtual_type, field) -> + type + + Map.has_key?(schema.__struct__(), field) -> + case schema.__schema__(:association, field) do + %Ecto.Association.BelongsTo{owner_key: owner_key} -> + error!( + query, + expr, + "field `#{field}` in `#{kind}` is an association in schema #{inspect(schema)}. " <> + "Did you mean to use `#{owner_key}`?" + ) + + %_{} -> + error!( + query, + expr, + "field `#{field}` in `#{kind}` is an association in schema #{inspect(schema)}" + ) + + _ -> + error!( + query, + expr, + "field `#{field}` in `#{kind}` is a virtual field in schema #{inspect(schema)}" + ) + end + + true -> + hint = closest_fields_hint(field, schema) + + error!( + query, + expr, + "field `#{field}` in `#{kind}` does not exist in schema #{inspect(schema)}", + hint + ) + end + end + + defp closest_fields_hint(input, schema) do + input_string = Atom.to_string(input) + + schema.__schema__(:fields) + |> Enum.map(fn field -> {field, String.jaro_distance(input_string, Atom.to_string(field))} end) + |> Enum.filter(fn {_field, score} -> score >= 0.77 end) + |> Enum.sort(&(elem(&1, 0) >= elem(&2, 0))) + |> Enum.take(5) + |> Enum.map(&elem(&1, 0)) + |> case do + [] -> + nil + + [suggestion] -> + "Did you mean `#{suggestion}`?" + + suggestions -> + Enum.reduce(suggestions, "Did you mean one of: \n", fn suggestion, acc -> + acc <> "\n * `#{suggestion}`" + end) + end + end + + defp normalize_param(_kind, {:out, {:array, type}}, _value) do + {:ok, type} + end + + defp normalize_param(_kind, {:out, :any}, _value) do + {:ok, :any} + end + + defp normalize_param(kind, {:out, other}, value) do + {:error, + "value `#{inspect(value)}` in `#{kind}` expected to be part of an array " <> + "but matched type is #{inspect(other)}"} + end + + defp normalize_param(_kind, type, _value) do + {:ok, type} + end + + defp cast_param(kind, type, v) do + case Ecto.Type.cast(type, v) do + {:ok, v} -> + {:ok, v} + + :error -> + {:error, + "value `#{inspect(v)}` in `#{kind}` cannot be cast to type #{Ecto.Type.format(type)}"} + + {:error, _meta} -> + {:error, + "value `#{inspect(v)}` in `#{kind}` cannot be cast to type #{Ecto.Type.format(type)}"} + + other -> + raise "expected #{inspect(type)}.cast/1 to return {:ok, v}, :error, or {:error, meta}" <> + ", got: #{inspect(other)}" + end + end + + defp dump_param(adapter, type, v) do + case Ecto.Type.adapter_dump(adapter, type, v) do + {:ok, v} -> + {:ok, v} + + :error -> + {:error, "value `#{inspect(v)}` cannot be dumped to type #{Ecto.Type.format(type)}"} + end + end + + defp field_source({source, schema, _}, field) when is_binary(source) and schema != nil do + # If the field is not found we return the field itself + # which will be checked and raise later. + schema.__schema__(:field_source, field) || field + end + + defp field_source(_, field) do + field + end + + defp cte_fields([key | rest_keys], [{key, select_expr} | rest_fields], aliases) do + [{key, select_expr} | cte_fields(rest_keys, rest_fields, aliases)] + end + + defp cte_fields([key | rest_keys], [field | rest_fields], aliases) do + if Map.has_key?(aliases, key) do + raise ArgumentError, + "the alias, #{inspect(key)}, provided to `selected_as/2` conflicts" <> + "with the CTE's automatic aliasing. When using `selected_as/2`" <> + "inside of a CTE, you must ensure it does not conflict with any of the other" <> + "field names" + end + + {key, field} = + case field do + {alias, select_expr} -> {alias, select_expr} + field -> {key, field} + end + + [{key, field} | cte_fields(rest_keys, rest_fields, aliases)] + end + + defp cte_fields([], [], _aliases), do: [] + + defp assert_update!(%Ecto.Query{updates: updates} = query, operation) do + dumper = dumper_for_update(query) + + changes = + Enum.reduce(updates, %{}, fn update, acc -> + Enum.reduce(update.expr, acc, fn {_op, kw}, acc -> + Enum.reduce(kw, acc, fn {k, v}, acc -> + if Map.has_key?(acc, k) do + error!(query, "duplicate field `#{k}` for `#{operation}`") + end + + case dumper do + %{^k => {_, _, :always}} -> :ok + %{} -> error!(query, "cannot update non-updatable field `#{inspect(k)}`") + nil -> :ok + end + + Map.put(acc, k, v) + end) + end) + end) + + if changes == %{} do + error!(query, "`#{operation}` requires at least one field to be updated") + end + end + + defp assert_no_update!(query, operation) do + case query do + %Ecto.Query{updates: []} -> + query + + _ -> + error!(query, "`#{operation}` does not allow `update` expressions") + end + end + + defp assert_only_filter_expressions!(query, operation) do + case query do + %Ecto.Query{ + order_bys: [], + limit: nil, + offset: nil, + group_bys: [], + havings: [], + preloads: [], + assocs: [], + distinct: nil, + lock: nil, + windows: [], + combinations: [] + } -> + query + + _ when operation == :delete_all -> + error!( + query, + "`#{operation}` allows only `with_cte`, `where`, `select`, and `join` expressions. " <> + "You can exclude unwanted expressions from a query by using " <> + "Ecto.Query.exclude/2. Error found" + ) + + _ -> + error!( + query, + "`#{operation}` allows only `with_cte`, `where` and `join` expressions. " <> + "You can exclude unwanted expressions from a query by using " <> + "Ecto.Query.exclude/2. Error found" + ) + end + end + + defp dumper_for_update(query) do + case get_source!(:updates, query, 0) do + {source, schema, _} when is_binary(source) and schema != nil -> + schema.__schema__(:dump) + + _ -> + nil + end + end + + defp filter_and_reraise(exception, stacktrace) do + reraise exception, Enum.reject(stacktrace, &match?({__MODULE__, _, _, _}, &1)) + end + + defp error!(query, message) do + raise Ecto.QueryError, message: message, query: query + end + + defp error!(query, expr, message) do + raise Ecto.QueryError, message: message, query: query, file: expr.file, line: expr.line + end + + defp error!(query, expr, message, hint) do + raise Ecto.QueryError, + message: message, + query: query, + file: expr.file, + line: expr.line, + hint: hint + end +end diff --git a/deps/ecto/lib/ecto/query/window_api.ex b/deps/ecto/lib/ecto/query/window_api.ex new file mode 100644 index 0000000..6e6a900 --- /dev/null +++ b/deps/ecto/lib/ecto/query/window_api.ex @@ -0,0 +1,232 @@ +defmodule Ecto.Query.WindowAPI do + @moduledoc """ + Lists all windows functions. + + Windows functions must always be used as the first argument + of `over/2` where the second argument is the name of a window: + + from e in Employee, + select: {e.depname, e.empno, e.salary, over(avg(e.salary), :department)}, + windows: [department: [partition_by: e.depname]] + + In the example above, we get the average salary per department. + `:department` is the window name, partitioned by `e.depname` + and `avg/1` is the window function. + + However, note that defining a window is not necessary, as the + window definition can be given as the second argument to `over`: + + from e in Employee, + select: {e.depname, e.empno, e.salary, over(avg(e.salary), partition_by: e.depname)} + + Both queries are equivalent. However, if you are using the same + partitioning over and over again, defining a window will reduce + the query size. See `Ecto.Query.windows/3` for all possible window + expressions, such as `:partition_by` and `:order_by`. + """ + + @dialyzer :no_return + + @doc """ + Counts the entries in the table. + + from p in Post, select: count() + """ + def count, do: doc! [] + + @doc """ + Counts the given entry. + + from p in Post, select: count(p.id) + """ + def count(value), do: doc! [value] + + @doc """ + Calculates the average for the given entry. + + from p in Payment, select: avg(p.value) + """ + def avg(value), do: doc! [value] + + @doc """ + Calculates the sum for the given entry. + + from p in Payment, select: sum(p.value) + """ + def sum(value), do: doc! [value] + + @doc """ + Calculates the minimum for the given entry. + + from p in Payment, select: min(p.value) + """ + def min(value), do: doc! [value] + + @doc """ + Calculates the maximum for the given entry. + + from p in Payment, select: max(p.value) + """ + def max(value), do: doc! [value] + + @doc """ + Defines a value based on the function and the window. See moduledoc for more information. + + from e in Employee, select: over(avg(e.salary), partition_by: e.depname) + """ + def over(window_function, window_name), do: doc! [window_function, window_name] + + @doc """ + Returns number of the current row within its partition, counting from 1. + + from p in Post, + select: row_number() |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def row_number(), do: doc! [] + + @doc """ + Returns rank of the current row with gaps; same as `row_number/0` of its first peer. + + from p in Post, + select: rank() |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def rank(), do: doc! [] + + @doc """ + Returns rank of the current row without gaps; this function counts peer groups. + + from p in Post, + select: dense_rank() |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def dense_rank(), do: doc! [] + + @doc """ + Returns relative rank of the current row: (rank - 1) / (total rows - 1). + + from p in Post, + select: percent_rank() |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def percent_rank(), do: doc! [] + + @doc """ + Returns relative rank of the current row: + (number of rows preceding or peer with current row) / (total rows). + + from p in Post, + select: cume_dist() |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def cume_dist(), do: doc! [] + + @doc """ + Returns integer ranging from 1 to the argument value, dividing the partition as equally as possible. + + from p in Post, + select: ntile(10) |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def ntile(num_buckets), do: doc! [num_buckets] + + @doc """ + Returns value evaluated at the row that is the first row of the window frame. + + from p in Post, + select: first_value(p.id) |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def first_value(value), do: doc! [value] + + @doc """ + Returns value evaluated at the row that is the last row of the window frame. + + from p in Post, + select: last_value(p.id) |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def last_value(value), do: doc! [value] + + + @doc """ + Applies the given expression as a FILTER clause against an + aggregate. This is currently only supported by Postgres. + + from p in Post, + select: avg(p.value) + |> filter(p.value > 0 and p.value < 100) + |> over(partition_by: p.category_id, order_by: p.date) + """ + + def filter(value, filter), do: doc! [value, filter] + + @doc """ + Returns value evaluated at the row that is the nth row of the window + frame (counting from 1); `nil` if no such row. + + from p in Post, + select: nth_value(p.id, 4) |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def nth_value(value, nth), do: doc! [value, nth] + + @doc """ + Returns value evaluated at the row that is offset rows before + the current row within the partition. + + If there is no such row, instead return default (which must be of the + same type as value). Both offset and default are evaluated with respect + to the current row. If omitted, offset defaults to 1 and default to `nil`. + + from e in Events, + windows: [w: [partition_by: e.name, order_by: e.tick]], + select: { + e.tick, + e.action, + e.name, + lag(e.action) |> over(:w), # previous_action + lead(e.action) |> over(:w) # next_action + } + + Note that this function must be invoked using window function syntax. + """ + def lag(value, offset \\ 1, default \\ nil), do: doc! [value, offset, default] + + @doc """ + Returns value evaluated at the row that is offset rows after + the current row within the partition. + + If there is no such row, instead return default (which must be of the + same type as value). Both offset and default are evaluated with respect + to the current row. If omitted, offset defaults to 1 and default to `nil`. + + from e in Events, + windows: [w: [partition_by: e.name, order_by: e.tick]], + select: { + e.tick, + e.action, + e.name, + lag(e.action) |> over(:w), # previous_action + lead(e.action) |> over(:w) # next_action + } + + Note that this function must be invoked using window function syntax. + """ + def lead(value, offset \\ 1, default \\ nil), do: doc! [value, offset, default] + + defp doc!(_) do + raise "the functions in Ecto.Query.WindowAPI should not be invoked directly, " <> + "they serve for documentation purposes only" + end +end diff --git a/deps/ecto/lib/ecto/queryable.ex b/deps/ecto/lib/ecto/queryable.ex new file mode 100644 index 0000000..584b690 --- /dev/null +++ b/deps/ecto/lib/ecto/queryable.ex @@ -0,0 +1,151 @@ +defprotocol Ecto.Queryable do + @moduledoc """ + Converts a data structure into an `Ecto.Query`. + + This is used by `Ecto.Repo` and also by the [`from`](`Ecto.Query.from/2`) macro. + For example, [`Repo.all`](`c:Ecto.Repo.all/2`) + expects any queryable as argument, which is why you can do `Repo.all(MySchema)` + or `Repo.all(query)`. Furthermore, when you write `from ALIAS in QUERYABLE`, + `QUERYABLE` accepts any data structure that implements `Ecto.Queryable`. + + This module defines a few default implementations so let us go over each and + how to use them. + + ## Atom + + The most common use case for this protocol is to convert atoms representing + an `Ecto.Schema` module into a query. This is what happens when you write: + + query = from(p in Person) + + Or when you directly pass a schema to a repository: + + Repo.all(Person) + + In case you did not know, Elixir modules are just atoms. This implementation + takes the provided module name and then tries to load the associated schema. + If no schema exists, it will raise `Protocol.UndefinedError`. + + ## BitString + + This implementation allows you to directly specify a table that you would like + to query from: + + from( + p in "people", + select: {p.first_name, p.last_name} + ) + + Or: + + Repo.delete_all("people") + + While this is quite simple to use, some repository operations, such as + `Repo.all`, require a `select` clause. When you query a schema, the + select is automatically defined for you based on the schema fields, + but when you pass a table directly, you need to explicitly list them. + This limitation now brings us to our next implementation! + + ## Tuple + + Similar to the `BitString` implementation, this allows you to specify the + underlying table that you would like to query; however, this additionally + allows you to specify the schema you would like to use: + + from(p in {"filtered_people", Person}) + + This can be particularly useful if you have database views that filter or + aggregate the underlying data of a table but share the same schema. This means + that you can reuse the same schema while specifying a separate "source" for + the data. + + ## Ecto.Query + + This is a simple pass through. After all, all `Ecto.Query` instances + can be converted into `Ecto.Query`: + + Repo.all(from u in User, where: u.active) + + This also enables Ecto queries to compose, since we can pass one query + as the source of another: + + active_users = from u in User, where: u.active + ordered_active_users = from u in active_users, order_by: u.created_at + + ## Ecto.SubQuery + + Ecto also allows you to compose queries using subqueries. Imagine you + have a table of "people". Now imagine that you want to do something with + people with the most common last names. To get that list, you could write + something like: + + sub = from( + p in Person, + group_by: p.last_name, + having: count(p.last_name) > 1, + select: %{last_name: p.last_name, count: count(p.last_name)} + ) + + Now if you want to do something else with this data, perhaps join on + additional tables and perform some calculations, you can do that as so: + + from( + p in subquery(sub), + # other filtering etc here + ) + + Please note that the `Ecto.Query.subquery/2` is needed here to convert the + `Ecto.Query` into an instance of `Ecto.SubQuery`. This protocol then wraps + it into an `Ecto.Query`, but using the provided subquery in the FROM clause. + Please see `Ecto.Query.subquery/2` for more information. + """ + + @doc """ + Converts the given `data` into an `Ecto.Query`. + """ + def to_query(data) +end + +defimpl Ecto.Queryable, for: Ecto.Query do + def to_query(query), do: query +end + +defimpl Ecto.Queryable, for: Ecto.SubQuery do + def to_query(subquery) do + %Ecto.Query{from: %Ecto.Query.FromExpr{source: subquery}} + end +end + +defimpl Ecto.Queryable, for: BitString do + def to_query(source) when is_binary(source) do + %Ecto.Query{from: %Ecto.Query.FromExpr{source: {source, nil}}} + end +end + +defimpl Ecto.Queryable, for: Atom do + def to_query(module) do + try do + module.__schema__(:query) + rescue + UndefinedFunctionError -> + message = + if :code.is_loaded(module) do + "the given module does not provide a schema" + else + "the given module does not exist" + end + + raise Protocol.UndefinedError, protocol: @protocol, value: module, description: message + + FunctionClauseError -> + raise Protocol.UndefinedError, protocol: @protocol, value: module, description: "the given module is an embedded schema" + end + end +end + +defimpl Ecto.Queryable, for: Tuple do + def to_query({source, schema} = from) + when is_binary(source) and is_atom(schema) and not is_nil(schema) do + %Ecto.Query{from: %Ecto.Query.FromExpr{source: from, prefix: schema.__schema__(:prefix)}} + end +end diff --git a/deps/ecto/lib/ecto/repo.ex b/deps/ecto/lib/ecto/repo.ex new file mode 100644 index 0000000..6af07e8 --- /dev/null +++ b/deps/ecto/lib/ecto/repo.ex @@ -0,0 +1,2537 @@ +defmodule Ecto.Repo do + @moduledoc """ + Defines a repository. + + A repository maps to an underlying data store, controlled by the + adapter. For example, Ecto ships with a Postgres adapter that + stores data into a PostgreSQL database. + + When used, the repository expects the `:otp_app` and `:adapter` as + option. The `:otp_app` should point to an OTP application that has + the repository configuration. For example, the repository: + + defmodule Repo do + use Ecto.Repo, + otp_app: :my_app, + adapter: Ecto.Adapters.Postgres + end + + Could be configured with: + + config :my_app, Repo, + database: "ecto_simple", + username: "postgres", + password: "postgres", + hostname: "localhost" + + Most of the configuration that goes into the `config` is specific + to the adapter. For this particular example, you can check + [`Ecto.Adapters.Postgres`](https://hexdocs.pm/ecto_sql/Ecto.Adapters.Postgres.html) + for more information. In spite of this, the following configuration values + are common across all adapters: + + * `:name`- The name of the Repo supervisor process. Notice that + it must be unique across **all repo modules** + + * `:priv` - the directory where to keep repository data, like + migrations, schema and more. Defaults to "priv/YOUR_REPO". + It must always point to a subdirectory inside the priv directory + + * `:url` - an URL that specifies storage information. Read below + for more information + + * `:log` - the log level used when logging the query with Elixir's + Logger. Can be any of `Logger.level/0` values or `false`. If false, + disables logging for that repository. Defaults to `:debug` + + * `:pool_size` - the size of the pool used by the connection module. + Defaults to `10` + + * `:pool_count` - the number of pools to run concurrently, + increase this option when the pool itself may be under contention. + When running multiple pools, queries are randomly routed to different + pools, without taking into account how many connections are available + in each. So in some circumstances, you may be routed to a fully busy + pool while others have connections available. The overall number of + connections used will be `pool_size * pool_count`. Defaults to `1` + + * `:telemetry_prefix` - we recommend adapters to publish events + using the [Telemetry](`:telemetry`) library. By default, the telemetry prefix + is based on the module name, so if your module is called + `MyApp.Repo`, the prefix will be `[:my_app, :repo]`. See the + ["Telemetry Events"](#module-telemetry-events) section to see which events we recommend + adapters to publish. Note that if you have multiple databases, you + should keep the `:telemetry_prefix` consistent for each repo and + use the `:repo` property in the event metadata for distinguishing + between repos. + + * `:stacktrace`- when `true`, publishes the stacktrace in telemetry events + and allows more advanced logging. + + * `:log_stacktrace_mfa` - A `{module, function, arguments}` tuple that customizes + which part of the stacktrace is included in query logs. The specified function + must accept at least two arguments (stacktrace and metadata) and return + a filtered stacktrace. The metadata is a map with keys such as `:repo` and other + adapter specific information. Additional arguments can be passed in the third + element of the tuple. For `Ecto.Adapters.SQL`, defaults to + `{Ecto.Adapters.SQL, :first_non_ecto_stacktrace, [1]}`, which filters the + stacktrace to show only the first call originating from outside + Ecto's internal code. Only relevant when `:stacktrace` is `true`. + + ## URLs + + Repositories by default support URLs. For example, the configuration + above could be rewritten to: + + config :my_app, Repo, + url: "ecto://postgres:postgres@localhost/ecto_simple" + + The schema can be of any value and the path represents the database name. + The URL will be used generate the relevant Repo configuration values, such + as `:database`, `:username`, `:password`, `:hostname` and `:port`. These + values take precedence over those already specified in the Repo's configuration. + + URL can include query parameters to override shared and adapter-specific + options, like `ssl`, `timeout` and `pool_size`. The following example + shows how to pass these configuration values: + + config :my_app, Repo, + url: "ecto://postgres:postgres@localhost/ecto_simple?ssl=true&pool_size=10" + + ### IPv6 support + + If your database's host resolves to ipv6 address you should + add `socket_options: [:inet6]` to configuration block like below: + + import Mix.Config + + config :my_app, MyApp.Repo, + hostname: "db12.dc0.comp.any", + socket_options: [:inet6], + ... + + ## `use` options + + When you `use Ecto.Repo`, the following options are supported: + + * `:otp_app` (required) - the name of the Erlang/OTP application + to find your repository configuration (usually your Elixir app name) + + * `:adapter` (required) - the module of the database adapter you want to use + + * `:read_only` - when true, marks the repository as `:read_only`. + In such cases, none of the functions that perform write operations, such as + `c:insert/2`, `c:insert_all/3`, `c:update_all/3`, and friends are defined + + ## Shared options + + Almost all of the repository functions outlined in this module accept the following + options: + + * `:timeout` - The time in milliseconds (as an integer) to wait for the query call to + finish. `:infinity` will wait indefinitely (default: `15_000`) + * `:log` - Can be any of the `Logger.level/0` values or `false`. If `false`, + logging is disabled. Defaults to the configured Repo logger level + * `:telemetry_event` - The telemetry event name to dispatch the event under. + See the next section for more information + * `:telemetry_options` - Extra options to attach to telemetry event name. + See the next section for more information + + ## Adapter-Specific Errors + + Many of the functions in this module may raise adapter-specific errors, such as `PostgrexError`. + This can happen, for example, when the underlying database cannot execute the specified query. + + ## Telemetry events + + There are two types of telemetry events. The ones emitted by Ecto and the + ones that are adapter specific. + + ### Ecto telemetry events + + The following events are emitted by all Ecto repositories: + + * `[:ecto, :repo, :init]` - it is invoked whenever a repository starts. + The measurement is a single `system_time` entry in native unit. The + metadata is the `:repo` and all initialization options under `:opts`. + + ### Adapter-specific events + + We recommend adapters to publish certain `Telemetry` events listed below. + Those events will use the `:telemetry_prefix` outlined above which defaults + to `[:my_app, :repo]`. + + For instance, to receive all query events published by a repository called + `MyApp.Repo`, one would define a module: + + defmodule MyApp.Telemetry do + def handle_event([:my_app, :repo, :query], measurements, metadata, config) do + IO.inspect binding() + end + end + + Then, in the `Application.start/2` callback, attach the handler to this event using + a unique handler id: + + :ok = :telemetry.attach("my-app-handler-id", [:my_app, :repo, :query], &MyApp.Telemetry.handle_event/4, %{}) + + For details, see [the telemetry documentation](https://hexdocs.pm/telemetry/). + + Below we list all events developers should expect from Ecto. All examples + below consider a repository named `MyApp.Repo`: + + #### `[:my_app, :repo, :query]` + + This event should be invoked on every query sent to the adapter, including + queries that are related to the transaction management. + + The `:measurements` map may include the following, all given in the + `:native` time unit: + + * `:idle_time` - the time the connection spent waiting before being checked out for the query + * `:queue_time` - the time spent waiting to check out a database connection + * `:query_time` - the time spent executing the query + * `:decode_time` - the time spent decoding the data received from the database + * `:total_time` - the sum of (`queue_time`, `query_time`, and `decode_time`)️ + + All measurements are given in the `:native` time unit. You can read more + about it in the docs for `System.convert_time_unit/3`. + + A telemetry `:metadata` map including the following fields. Each database + adapter may emit different information here. For Ecto.SQL databases, it + will look like this: + + * `:type` - the type of the Ecto query. For example, for Ecto.SQL + databases, it would be `:ecto_sql_query` + * `:repo` - the Ecto repository + * `:result` - the query result + * `:params` - the dumped query parameters (formatted for database drivers like Postgrex) + * `:cast_params` - the casted query parameters (normalized before dumping) + * `:query` - the query sent to the database as a string + * `:source` - the source the query was made on (may be `nil`) + * `:stacktrace` - the stacktrace information, if enabled, or `nil` + * `:options` - extra options given to the repo operation under + `:telemetry_options` + + """ + + @moduledoc groups: [ + %{title: "Query API", description: "Functions that operate on an `Ecto.Query`."}, + %{ + title: "Schema API", + description: "Functions that operate on an `Ecto.Schema` or a `Ecto.Changeset`." + }, + %{ + title: "Transaction API", + description: "Functions to work with database transactions and connections." + }, + %{ + title: "Process API", + description: "Functions to work with repository processes." + }, + "Config API", + "User callbacks" + ] + + @type t :: module + + @doc """ + Returns all running Ecto repositories. + + The list is returned in no particular order. The list + contains either atoms, for named Ecto repositories, or + PIDs. + """ + @doc group: "Process API" + @spec all_running() :: [atom() | pid()] + defdelegate all_running(), to: Ecto.Repo.Registry + + @doc false + defmacro __using__(opts) do + quote bind_quoted: [opts: opts] do + @behaviour Ecto.Repo + + {otp_app, adapter, behaviours} = + Ecto.Repo.Supervisor.compile_config(__MODULE__, opts) + + @otp_app otp_app + @adapter adapter + @default_dynamic_repo opts[:default_dynamic_repo] || __MODULE__ + @read_only opts[:read_only] || false + @before_compile adapter + @aggregates [:count, :avg, :max, :min, :sum] + + def config do + {:ok, config} = Ecto.Repo.Supervisor.init_config(:runtime, __MODULE__, @otp_app, []) + config + end + + def __adapter__ do + @adapter + end + + def child_spec(opts) do + %{ + id: __MODULE__, + start: {__MODULE__, :start_link, [opts]}, + type: :supervisor + } + end + + def start_link(opts \\ []) do + Ecto.Repo.Supervisor.start_link(__MODULE__, @otp_app, @adapter, opts) + end + + def stop(timeout \\ 5000) do + Supervisor.stop(get_dynamic_repo(), :normal, timeout) + end + + def load(schema_or_types, data) do + Ecto.Repo.Schema.load(@adapter, schema_or_types, data) + end + + def checkout(fun, opts \\ []) when is_function(fun) do + %{adapter: adapter} = meta = Ecto.Repo.Registry.lookup(get_dynamic_repo()) + adapter.checkout(meta, opts, fun) + end + + def checked_out? do + %{adapter: adapter} = meta = Ecto.Repo.Registry.lookup(get_dynamic_repo()) + adapter.checked_out?(meta) + end + + @compile {:inline, get_dynamic_repo: 0} + + def get_dynamic_repo() do + Process.get({__MODULE__, :dynamic_repo}, @default_dynamic_repo) + end + + def put_dynamic_repo(dynamic) when is_atom(dynamic) or is_pid(dynamic) do + Process.put({__MODULE__, :dynamic_repo}, dynamic) || @default_dynamic_repo + end + + def default_options(_operation), do: [] + defoverridable default_options: 1 + + defp prepare_opts(operation_name, []), do: default_options(operation_name) + + defp prepare_opts(operation_name, [{key, _} | _rest] = opts) when is_atom(key) do + operation_name + |> default_options() + |> Keyword.merge(opts) + end + + ## Transactions + + if Ecto.Adapter.Transaction in behaviours do + def transact(fun_or_multi, opts \\ []) do + repo = get_dynamic_repo() + + {adapter_meta, opts} = + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:transaction, opts)) + + {fun_or_multi, opts} = prepare_transaction(fun_or_multi, opts) + + Ecto.Repo.Transaction.transact( + __MODULE__, + repo, + fun_or_multi, + {adapter_meta, opts} + ) + end + + def transaction(fun_or_multi, opts \\ []) + + def transaction(fun, opts) when is_function(fun, 0) do + fun = fn -> {:ok, fun.()} end + transact(fun, opts) + end + + def transaction(fun, opts) when is_function(fun, 1) do + fun = fn repo -> {:ok, fun.(repo)} end + transact(fun, opts) + end + + def transaction(%Ecto.Multi{} = multi, opts) do + transact(multi, opts) + end + + def in_transaction? do + Ecto.Repo.Transaction.in_transaction?(get_dynamic_repo()) + end + + @spec rollback(term) :: no_return + def rollback(value) do + Ecto.Repo.Transaction.rollback(get_dynamic_repo(), value) + end + end + + ## Schemas + + if Ecto.Adapter.Schema in behaviours and not @read_only do + def insert(struct, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Schema.insert( + __MODULE__, + repo, + struct, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert, opts)) + ) + end + + def update(struct, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Schema.update( + __MODULE__, + repo, + struct, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:update, opts)) + ) + end + + def insert_or_update(changeset, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Schema.insert_or_update( + __MODULE__, + repo, + changeset, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert_or_update, opts)) + ) + end + + def delete(struct, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Schema.delete( + __MODULE__, + repo, + struct, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:delete, opts)) + ) + end + + def insert!(struct, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Schema.insert!( + __MODULE__, + repo, + struct, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert, opts)) + ) + end + + def update!(struct, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Schema.update!( + __MODULE__, + repo, + struct, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:update, opts)) + ) + end + + def insert_or_update!(changeset, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Schema.insert_or_update!( + __MODULE__, + repo, + changeset, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert_or_update, opts)) + ) + end + + def delete!(struct, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Schema.delete!( + __MODULE__, + repo, + struct, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:delete, opts)) + ) + end + + def insert_all(schema_or_source, entries, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Schema.insert_all( + __MODULE__, + repo, + schema_or_source, + entries, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert_all, opts)) + ) + end + end + + ## Queryable + + if Ecto.Adapter.Queryable in behaviours do + if not @read_only do + def update_all(queryable, updates, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.update_all( + repo, + queryable, + updates, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:update_all, opts)) + ) + end + + def delete_all(queryable, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.delete_all( + repo, + queryable, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:delete_all, opts)) + ) + end + end + + def all(queryable, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.all( + repo, + queryable, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts)) + ) + end + + def all_by(queryable, clauses, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.all_by( + repo, + queryable, + clauses, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts)) + ) + end + + def stream(queryable, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.stream( + repo, + queryable, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:stream, opts)) + ) + end + + def get(queryable, id, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.get( + repo, + queryable, + id, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts)) + ) + end + + def get!(queryable, id, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.get!( + repo, + queryable, + id, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts)) + ) + end + + def get_by(queryable, clauses, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.get_by( + repo, + queryable, + clauses, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts)) + ) + end + + def get_by!(queryable, clauses, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.get_by!( + repo, + queryable, + clauses, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts)) + ) + end + + def reload(queryable, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.reload( + repo, + queryable, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:reload, opts)) + ) + end + + def reload!(queryable, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.reload!( + repo, + queryable, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:reload, opts)) + ) + end + + def one(queryable, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.one( + repo, + queryable, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts)) + ) + end + + def one!(queryable, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.one!( + repo, + queryable, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts)) + ) + end + + def aggregate(queryable, aggregate, opts \\ []) + + def aggregate(queryable, aggregate, opts) + when aggregate in [:count] and is_list(opts) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.aggregate( + repo, + queryable, + aggregate, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts)) + ) + end + + def aggregate(queryable, aggregate, field) + when aggregate in @aggregates and is_atom(field) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.aggregate( + repo, + queryable, + aggregate, + field, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, [])) + ) + end + + def aggregate(queryable, aggregate, field, opts) + when aggregate in @aggregates and is_atom(field) and is_list(opts) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.aggregate( + repo, + queryable, + aggregate, + field, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts)) + ) + end + + def exists?(queryable, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Queryable.exists?( + repo, + queryable, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts)) + ) + end + + def preload(struct_or_structs_or_nil, preloads, opts \\ []) do + repo = get_dynamic_repo() + + Ecto.Repo.Preloader.preload( + struct_or_structs_or_nil, + repo, + preloads, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:preload, opts)) + ) + end + + def prepare_query(operation, query, opts), do: {query, opts} + defoverridable prepare_query: 3 + + def prepare_transaction(fun_or_multi, opts), do: {fun_or_multi, opts} + defoverridable prepare_transaction: 2 + end + end + end + + ## User callbacks + + @optional_callbacks init: 2 + + @doc """ + A callback executed when the repo starts or when configuration is read. + + This callback is available for backwards compatibility purposes. Most + runtime configuration in Elixir today can be done via config/runtime.exs. + + The first argument is the context the callback is being invoked. If it + is called because the Repo supervisor is starting, it will be `:supervisor`. + It will be `:runtime` if it is called for reading configuration without + actually starting a process. + + The second argument is the repository configuration as stored in the + application environment. It must return `{:ok, keyword}` with the updated + list of configuration or `:ignore` (only in the `:supervisor` case). + """ + @doc group: "User callbacks" + @callback init(context :: :supervisor | :runtime, config :: Keyword.t()) :: + {:ok, Keyword.t()} | :ignore + + ## Ecto.Adapter + + @doc """ + Returns the adapter tied to the repository. + """ + @doc group: "Config API" + @callback __adapter__ :: Ecto.Adapter.t() + + @doc """ + Returns the adapter configuration stored in the `:otp_app` environment. + + If the `c:init/2` callback is implemented in the repository, + it will be invoked with the first argument set to `:runtime`. + It does not consider the options given on `c:start_link/1`. + """ + @doc group: "Config API" + @callback config() :: Keyword.t() + + @doc """ + Starts the Repo supervision tree. + + Returns `{:error, {:already_started, pid}}` if the repo is already + started or `{:error, term}` in case anything else goes wrong. + + ## Options + + See the configuration in the moduledoc for options shared between adapters, + for adapter-specific configuration see the adapter's documentation. + """ + @doc group: "Process API" + @callback start_link(opts :: Keyword.t()) :: + {:ok, pid} + | {:error, {:already_started, pid}} + | {:error, term} + + @doc """ + Shuts down the repository. + """ + @doc group: "Process API" + @callback stop(timeout) :: :ok + + @doc """ + Checks out a connection for the duration of the function. + + It returns the result of the function. This is useful when + you need to perform multiple operations against the repository + in a row and you want to avoid checking out the connection + multiple times. + + `checkout/2` and `transaction/2` can be combined and nested + multiple times. If `checkout/2` is called inside the function + of another `checkout/2` call, the function is simply executed, + without checking out a new connection. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + """ + @doc group: "Transaction API" + @callback checkout((-> result), opts :: Keyword.t()) :: result when result: var + + @doc """ + Returns true if a connection has been checked out. + + This is true if inside a `c:Ecto.Repo.checkout/2` or + `c:Ecto.Repo.transact/2`. + + ## Examples + + MyRepo.checked_out?() + #=> false + + MyRepo.transact(fn -> + MyRepo.checked_out?() #=> true + end) + + MyRepo.checkout(fn -> + MyRepo.checked_out?() #=> true + end) + + """ + @doc group: "Transaction API" + @callback checked_out?() :: boolean + + @doc """ + Loads `data` into a schema or a map. + + The first argument can be a schema module or a map (of types). + The first argument determines the return value: a struct or a map, + respectively. + + The second argument `data` specifies fields and values that are to be loaded. + It can be a map, a keyword list, or a `{fields, values}` tuple. + Fields can be atoms or strings. + + Fields that are not present in the schema (or `types` map) are ignored. + If any of the values has invalid type, an error is raised. + + To load data from non-database sources, use `Ecto.embedded_load/3`. + + ## Examples + + iex> MyRepo.load(User, %{name: "Alice", age: 25}) + %User{name: "Alice", age: 25} + + iex> MyRepo.load(User, [name: "Alice", age: 25]) + %User{name: "Alice", age: 25} + + `data` can also take form of `{fields, values}`: + + iex> MyRepo.load(User, {[:name, :age], ["Alice", 25]}) + %User{name: "Alice", age: 25, ...} + + The first argument can also be a `types` map: + + iex> types = %{name: :string, age: :integer} + iex> MyRepo.load(types, %{name: "Alice", age: 25}) + %{name: "Alice", age: 25} + + This function is especially useful when parsing raw query results: + + iex> result = Ecto.Adapters.SQL.query!(MyRepo, "SELECT * FROM users", []) + iex> Enum.map(result.rows, &MyRepo.load(User, {result.columns, &1})) + [%User{...}, ...] + + """ + @doc group: "Schema API" + @callback load( + schema_or_map :: module | map(), + data :: map() | Keyword.t() | {list, list} + ) :: Ecto.Schema.t() | map() + + @doc """ + Returns the atom name or pid of the current repository. + + See `c:put_dynamic_repo/1` for more information. + """ + @doc group: "Process API" + @callback get_dynamic_repo() :: atom() | pid() + + @doc """ + Sets the dynamic repository to be used in further interactions. + + Sometimes you may want a single Ecto repository to talk to + many different database instances. By default, when you call + `MyApp.Repo.start_link/1`, it will start a repository with + name `MyApp.Repo`. But if you want to start multiple repositories, + you can give each of them a different name: + + MyApp.Repo.start_link(name: :tenant_foo, hostname: "foo.example.com") + MyApp.Repo.start_link(name: :tenant_bar, hostname: "bar.example.com") + + You can also start repositories without names by explicitly + setting the name to nil: + + MyApp.Repo.start_link(name: nil, hostname: "temp.example.com") + + However, once the repository is started, you can't directly interact with + it, since all operations in `MyApp.Repo` are sent by default to the repository + named `MyApp.Repo`. You can change the default repo at compile time with: + + use Ecto.Repo, default_dynamic_repo: :name_of_repo + + Or you can change it anytime at runtime by calling `put_dynamic_repo/1`: + + MyApp.Repo.put_dynamic_repo(:tenant_foo) + + From this moment on, all future queries done by the current process will + run on `:tenant_foo`. + + > ### Global repo names {: .warning} + > + > The repo name resolution is global across all repo modules. When using + > `put_dynamic_repo/1`, ensure you're referencing the intended repo, as + > it is possible to accidentally reference repos from other modules: + > + > ```elixir + > Repo.start_link(name: :primary) + > AnalyticstRepo.start_link(name: :analytics) + > + > # This works but may not be intended - queries will use AnalyticsRepo's connection + > Repo.put_dynamic_repo(:analytics) + > Repo.all(User) # Executes against AnalyticsRepo's connection! + > ``` + """ + @doc group: "Process API" + @callback put_dynamic_repo(name_or_pid :: atom() | pid()) :: atom() | pid() + + ## Ecto.Adapter.Queryable + + @optional_callbacks get: 3, + get!: 3, + get_by: 3, + get_by!: 3, + reload: 2, + reload!: 2, + aggregate: 3, + aggregate: 4, + exists?: 2, + one: 2, + one!: 2, + preload: 3, + all: 2, + all_by: 3, + stream: 2, + update_all: 3, + delete_all: 2 + + @doc """ + Fetches a single struct from the data store where the primary key matches the + given id. + + Returns `nil` if no result was found. If the struct in the queryable + has no or more than one primary key, it will raise an argument error. + + See also `c:get!/3`, `c:one/2`, and `c:all_by/3`. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the ["Query Prefix"](`m:Ecto.Query#module-query-prefix`) section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + MyRepo.get(Post, 42) + + MyRepo.get(Post, 42, prefix: "public") + + """ + @doc group: "Query API" + @callback get(queryable :: Ecto.Queryable.t(), id :: term, opts :: Keyword.t()) :: + Ecto.Schema.t() | term | nil + + @doc """ + Similar to `c:get/3` but raises `Ecto.NoResultsError` if no record was found. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the ["Query Prefix"](`m:Ecto.Query#module-query-prefix`) section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + MyRepo.get!(Post, 42) + + MyRepo.get!(Post, 42, prefix: "public") + + """ + @doc group: "Query API" + @callback get!(queryable :: Ecto.Queryable.t(), id :: term, opts :: Keyword.t()) :: + Ecto.Schema.t() | term + + @doc """ + Fetches a single result from the query. + + Returns `nil` if no result was found. Raises if more than one entry. + + See also `c:get/3`, `c:one/2`, and `c:all_by/3`. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the ["Query Prefix"](`m:Ecto.Query#module-query-prefix`) section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + MyRepo.get_by(Post, title: "My post") + + MyRepo.get_by(Post, [title: "My post"], prefix: "public") + + """ + @doc group: "Query API" + @callback get_by( + queryable :: Ecto.Queryable.t(), + clauses :: Keyword.t() | map, + opts :: Keyword.t() + ) :: Ecto.Schema.t() | term | nil + + @doc """ + Similar to `c:get_by/3` but raises `Ecto.NoResultsError` if no record was found. + + Raises if more than one entry. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the ["Query Prefix"](`m:Ecto.Query#module-query-prefix`) section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + MyRepo.get_by!(Post, title: "My post") + + MyRepo.get_by!(Post, [title: "My post"], prefix: "public") + + """ + @doc group: "Query API" + @callback get_by!( + queryable :: Ecto.Queryable.t(), + clauses :: Keyword.t() | map, + opts :: Keyword.t() + ) :: Ecto.Schema.t() | term + + @doc """ + Reloads a given schema or schema list from the database. + + When using with lists, it is expected that all of the structs in the list belong + to the same schema. Ordering is guaranteed to be kept. Results not found in + the database will be returned as `nil`. + + Preloaded association will be discarded and need to be preloaded again. + + ## Example + + MyRepo.reload(post) + %Post{} + + MyRepo.reload([post1, post2]) + [%Post{}, %Post{}] + + MyRepo.reload([deleted_post, post1]) + [nil, %Post{}] + """ + @doc group: "Schema API" + @callback reload( + struct_or_structs :: Ecto.Schema.t() | [Ecto.Schema.t()], + opts :: Keyword.t() + ) :: Ecto.Schema.t() | [Ecto.Schema.t() | nil] | nil + + @doc """ + Similar to `c:reload/2`, but raises when something is not found. + + When using with lists, ordering is guaranteed to be kept. + + ## Example + + MyRepo.reload!(post) + %Post{} + + MyRepo.reload!([post1, post2]) + [%Post{}, %Post{}] + """ + @doc group: "Schema API" + @callback reload!(struct_or_structs, opts :: Keyword.t()) :: struct_or_structs + when struct_or_structs: Ecto.Schema.t() | [Ecto.Schema.t()] + + @doc """ + Calculate the given `aggregate`. + + Any preload or select in the query will be ignored in favor of + the column being aggregated. However, if the query has a limit, + offset, distinct or combination set, it will be automatically + wrapped in a subquery in order to return the proper result, + which requires the select field to follows certain rules: + it must return a `source`, a field (such as `source.field`), + or a map with atom keys and scalars (integers, floats, and + strings) or simple expressions as values. Those rules are shared + across all subqueries in Ecto. + + The aggregation will fail if any `group_by` field is set. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the ["Query Prefix"](`m:Ecto.Query#module-query-prefix`) section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Examples + + # Returns the number of blog posts + Repo.aggregate(Post, :count) + + # Returns the number of blog posts in the "private" schema path + # (in Postgres) or database (in MySQL) + Repo.aggregate(Post, :count, prefix: "private") + + """ + @doc group: "Query API" + @callback aggregate( + queryable :: Ecto.Queryable.t(), + aggregate :: :count, + opts :: Keyword.t() + ) :: term | nil + + @doc """ + Calculate the given `aggregate` over the given `field`. + + See `c:aggregate/3` for general considerations and options. + + ## Examples + + # Returns the sum of the number of visits for every blog post + Repo.aggregate(Post, :sum, :visits) + + # Returns the sum of the number of visits for every blog post in the + # "private" schema path (in Postgres) or database (in MySQL) + Repo.aggregate(Post, :sum, :visits, prefix: "private") + + # Returns the average number of visits for the first 10 blog posts + query = from Post, limit: 10 + Repo.aggregate(query, :avg, :visits) + """ + @doc group: "Query API" + @callback aggregate( + queryable :: Ecto.Queryable.t(), + aggregate :: :avg | :count | :max | :min | :sum, + field :: atom, + opts :: Keyword.t() + ) :: term | nil + + @doc """ + Checks if there exists an entry that matches the given queryable. + + Returns a boolean. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the ["Query Prefix"](`m:Ecto.Query#module-query-prefix`) section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + > #### Generated Query {: .info} + > + > Ecto will take the provided queryable and modify it to reduce its footprint + > as much as possible. For example, by forcing `SELECT 1` and `LIMIT 1`. Any + > additional filtering must be provided directly on the queryable using expressions + > such as `where` and `having`. + + ## Examples + + # checks if any posts exist + Repo.exists?(Post) + + # checks if any posts exist in the "private" schema path (in Postgres) or + # database (in MySQL) + Repo.exists?(Post, prefix: "private") + + # checks if any post with a like count greater than 10 exists + query = from p in Post, where: p.like_count > 10 + Repo.exists?(query) + """ + @doc group: "Query API" + @callback exists?(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: boolean() + + @doc """ + Fetches a single result from the query. + + Returns `nil` if no result was found. Raises if more than one entry. + + See also `c:one!/2`, `c:get/3`, and `c:all/2`. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the ["Query Prefix"](`m:Ecto.Query#module-query-prefix`) section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Examples + + Repo.one(from p in Post, join: c in assoc(p, :comments), where: p.id == ^post_id) + + query = from p in Post, join: c in assoc(p, :comments), where: p.id == ^post_id + Repo.one(query, prefix: "private") + """ + @doc group: "Query API" + @callback one(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: + Ecto.Schema.t() | term | nil + + @doc """ + Similar to `c:one/2` but raises `Ecto.NoResultsError` if no record was found. + + Raises if more than one entry. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the ["Query Prefix"](`m:Ecto.Query#module-query-prefix`) section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + """ + @doc group: "Query API" + @callback one!(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: + Ecto.Schema.t() | term + + @doc """ + Preloads all associations on the given struct or structs. + + This is similar to `Ecto.Query.preload/3` except it allows + you to preload structs after they have been fetched from the + database. + + In case the association was already loaded, preload won't attempt + to reload it. Preload assumes each association has the same nested + associations already loaded. If this is not the case, it is + possible to lose information. For example: + + comment1 = TestRepo.preload(comment1, [author: [:permalink]]) + TestRepo.preload([comment1, comment2], :author) + + If both comments are associated to the same author, the first comment + will lose its nested `:permalink` association because the second comment + does not have it preloaded. To avoid this, you must preload the nested + associations as well. + + If you want to reset the loaded fields, see `Ecto.reset_fields/2`. + + ## Options + + * `:force` - By default, Ecto won't preload associations that + are already loaded. By setting this option to true, any existing + association will be discarded and reloaded. + * `:in_parallel` - If the preloads must be done in parallel. It can + only be performed when we have more than one preload and the + repository is not in a transaction. Defaults to `true`. + * `:prefix` - the prefix to fetch preloads from. By default, queries + will use the same prefix as the first struct in the given collection. + This option allows the prefix to be changed. + * `:on_preloader_spawn` - when preloads are done in parallel, this function + will be called in the processes that perform the preloads. This can be useful + for context propagation for traces. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Examples + + # Use a single atom to preload an association + posts = Repo.preload posts, :comments + + # Use a list of atoms to preload multiple associations + posts = Repo.preload posts, [:comments, :authors] + + # Use a keyword list to preload nested associations as well + posts = Repo.preload posts, [comments: [:replies, :likes], authors: []] + + # You can mix atoms and keywords, but the atoms must come first + posts = Repo.preload posts, [:authors, comments: [:likes, replies: [:reactions]]] + + # Use a keyword list to customize how associations are queried + posts = Repo.preload posts, [comments: from(c in Comment, order_by: c.published_at)] + + # Use a two-element tuple for a custom query and nested association definition + query = from c in Comment, order_by: c.published_at + posts = Repo.preload posts, [comments: {query, [:replies, :likes]}] + + # Use a function for custom preloading + posts = Repo.preload posts, [comments: fn post_ids -> fetch_comments_by_post_ids(post_ids) end] + + The query given to preload may also preload its own associations. See the ["preload queries"](Ecto.Query.html#preload/3-preload-queries) and ["preload functions"](Ecto.Query.html#preload/3-preload-functions) section of the `Ecto.Query.preload/3` for details on those. + """ + @doc group: "Schema API" + @callback preload(structs_or_struct_or_nil, preloads :: term, opts :: Keyword.t()) :: + structs_or_struct_or_nil + when structs_or_struct_or_nil: [Ecto.Schema.t()] | Ecto.Schema.t() | nil + + @doc """ + A user customizable callback invoked for query-based operations. + + This callback can be used to further modify the query and options + before it is transformed and sent to the database. + + This callback is invoked for all query APIs, including the `stream` + functions. It is also invoked for `insert_all` if a source query is + given. It is not invoked for any of the other schema functions. + + ## Examples + + Let's say you want to filter out records that were "soft-deleted" + (have `deleted_at` column set) from all operations unless an admin + is running the query; you can define the callback like this: + + @impl true + def prepare_query(_operation, query, opts) do + if opts[:admin] do + {query, opts} + else + query = from(x in query, where: is_nil(x.deleted_at)) + {query, opts} + end + end + + And then execute the query: + + Repo.all(query) # only non-deleted records are returned + Repo.all(query, admin: true) # all records are returned + + The callback will be invoked for all queries, including queries + made from associations and preloads. It is not invoked for each + individual join inside a query. + """ + @doc group: "User callbacks" + @callback prepare_query(operation, query :: Ecto.Query.t(), opts :: Keyword.t()) :: + {Ecto.Query.t(), Keyword.t()} + when operation: :all | :update_all | :delete_all | :stream | :insert_all + + @doc """ + A user-customizable callback invoked on transaction operations. + + This callback can be used to further modify the given Ecto Multi and options in a transaction operation + before it is transformed and sent to the database. + + This callback is only invoked in transactions. + + ## Examples + + Imagine you want to prepend a SQL comment to commit statements using the `commit_comment` option on transactions. + + @impl true + def prepare_transaction(multi_or_fun, opts) do + opts = Keyword.put_new_lazy(opts, :commit_comment, fn -> extract_comment(opts) end) + {multi_or_fun, opts} + end + + The callback will be invoked for every transaction operation, and it will try to extract the appropriate commit comment, + that will be subsequently used by the adapters if they support this option. + """ + @doc group: "User callbacks" + @callback prepare_transaction(fun_or_multi :: fun | Ecto.Multi.t(), opts :: Keyword.t()) :: + {fun_or_multi :: fun | Ecto.Multi.t(), Keyword.t()} + + @doc """ + A user customizable callback invoked to retrieve default options + for operations. + + This can be used to provide default values per operation that + have higher precedence than the values given on configuration + or when starting the repository. It can also be used to set + query specific options, such as `:prefix`. + + This callback is invoked as the entry point for all repository + operations. For example, if you are executing a query with preloads, + this callback will be invoked once at the beginning, but the + options returned here will be passed to all following operations. + """ + @doc group: "User callbacks" + @callback default_options(operation) :: Keyword.t() + when operation: + :all + | :delete + | :delete_all + | :insert + | :insert_all + | :insert_or_update + | :preload + | :reload + | :stream + | :transaction + | :update + | :update_all + @doc """ + Fetches all entries from the data store matching the given query. + + May raise `Ecto.QueryError` if query validation fails. + + See also `c:all_by/3`, `c:one/2`, and `c:get/3`. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the ["Query Prefix"](`m:Ecto.Query#module-query-prefix`) section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + # Fetch all post titles + query = from p in Post, + select: p.title + MyRepo.all(query) + """ + @doc group: "Query API" + @callback all(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: [Ecto.Schema.t() | term] + + @doc """ + Fetches all entries from the data store matching the given query and conditions. + + May raise `Ecto.QueryError` if query validation fails. + + This function is a shortcut for `c:all/2` when adjusting the given query with simple conditions. + + See also `c:all/2` and `c:get_by/3`. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the ["Query Prefix"](`m:Ecto.Query#module-query-prefix`) section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + MyRepo.all_by(Post, author_id: 1) + + query = from p in Post + MyRepo.all_by(query, author_id: 1) + """ + @doc group: "Query API" + @callback all_by( + queryable :: Ecto.Queryable.t(), + clauses :: Keyword.t() | map, + opts :: Keyword.t() + ) :: [Ecto.Schema.t() | term] + + @doc """ + Returns a lazy enumerable that emits all entries from the data store + matching the given query. + + SQL adapters, such as Postgres and MySQL, can only enumerate a stream + inside a transaction. + + May raise `Ecto.QueryError` if query validation fails. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the ["Query Prefix"](`m:Ecto.Query#module-query-prefix`) section of the + `Ecto.Query` documentation. + + * `:max_rows` - The number of rows to load from the database as we stream. + It is supported at least by Postgres and MySQL and defaults to 500. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + # Fetch all post titles + query = from p in Post, select: p.title + + stream = MyRepo.stream(query) + + MyRepo.transact(fn -> + Enum.to_list(stream) + end) + """ + @doc group: "Query API" + @callback stream(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: Enum.t() + + @doc """ + Updates all entries matching the given query with the given values. + + It returns a tuple containing the number of entries and any returned + result as second element. The second element is `nil` by default + unless a `select` is supplied in the update query. Note, however, + not all databases support returning data from UPDATEs. + + Keep in mind this `update_all` will not update autogenerated + fields like the `updated_at` columns. + + See `Ecto.Query.update/3` for update operations that can be + performed on fields. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the ["Query Prefix"](`m:Ecto.Query#module-query-prefix`) section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for remaining options. + + ## Examples + + MyRepo.update_all(Post, set: [title: "New title"]) + + MyRepo.update_all(Post, inc: [visits: 1]) + + from(p in Post, where: p.id < 10, select: p.visits) + |> MyRepo.update_all(set: [title: "New title"]) + + from(p in Post, where: p.id < 10, update: [set: [title: "New title"]]) + |> MyRepo.update_all([]) + + from(p in Post, where: p.id < 10, update: [set: [title: ^new_title]]) + |> MyRepo.update_all([]) + + from(p in Post, where: p.id < 10, update: [set: [title: fragment("upper(?)", ^new_title)]]) + |> MyRepo.update_all([]) + + from(p in Post, where: p.id < 10, update: [set: [visits: p.visits * 1000]]) + |> MyRepo.update_all([]) + + """ + @doc group: "Query API" + @callback update_all( + queryable :: Ecto.Queryable.t(), + updates :: Keyword.t(), + opts :: Keyword.t() + ) :: {non_neg_integer, nil | [term]} + + @doc """ + Deletes all entries matching the given query. + + It returns a tuple containing the number of entries and any returned + result as second element. The second element is `nil` by default + unless a `select` is supplied in the delete query. Note, however, + not all databases support returning data from DELETEs. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the ["Query Prefix"](`m:Ecto.Query#module-query-prefix`) section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for remaining options. + + ## Examples + + MyRepo.delete_all(Post) + + from(p in Post, where: p.id < 10) |> MyRepo.delete_all() + + # With returning results, if supported by the database. + {_count, posts} = from(p in Post, where: p.id < 10, select: p) |> MyRepo.delete_all() + + """ + @doc group: "Query API" + @callback delete_all(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: + {non_neg_integer, nil | [term]} + + ## Ecto.Adapter.Schema + + @optional_callbacks insert_all: 3, + insert: 2, + insert!: 2, + update: 2, + update!: 2, + delete: 2, + delete!: 2, + insert_or_update: 2, + insert_or_update!: 2, + prepare_query: 3, + prepare_transaction: 2 + + @doc """ + Inserts all entries into the repository. + + It expects a schema module (`MyApp.User`) or a source (`"users"`) or + both (`{"users", MyApp.User}`) as the first argument. The second + argument is a list of entries to be inserted, either as keyword + lists or as maps. The keys of the entries are the field names as + atoms, when a schema module is specified in the first argument. + Otherwise, the keys can be either atoms or strings representing + the names of the columns in the underlying datastore. The value + should be the respective value for the field type or, optionally, + an `Ecto.Query` that returns a single entry with a single value. + + It returns a tuple containing the number of entries + and any returned result as second element. If the database + does not support RETURNING in INSERT statements or no + return result was selected, the second element will be `nil`. + + When a schema module is given, the entries given will be properly dumped + before being sent to the database. If the schema primary key has type + `:id` or `:binary_id`, it will be handled either at the adapter + or the storage layer. However any other primary key type or autogenerated + value, like `Ecto.UUID` and timestamps, won't be autogenerated when + using `c:insert_all/3`. You must set those fields explicitly. This is by + design as this function aims to be a more direct way to insert data into + the database without the conveniences of `c:insert/2`. This is also + consistent with `c:update_all/3` that does not handle auto generated + values as well. + + It is also not possible to use `insert_all` to insert across multiple + tables, therefore associations are not supported. + + If a source is given, without a schema module, the given fields are passed + as is to the adapter. + + ## Options + + * `:returning` - selects which fields to return. When `true`, + returns all fields in the given schema. May be a list of + fields, where a struct is still returned but only with the + given fields. Or `false`, where nothing is returned (the default). + This option is not supported by all databases. + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This overrides the prefix set + in the query and any `@schema_prefix` set in the schema. + + * `:on_conflict` - It may be one of `:raise` (the default), `:nothing`, + `:replace_all`, `{:replace_all_except, fields}`, `{:replace, fields}`, + a keyword list of update instructions or an `Ecto.Query` + query for updates. See the "[Upserts](#c:insert_all/3-upserts)" section for more information. + + * `:conflict_target` - A list of column names to verify for conflicts. + It is expected those columns to have unique indexes on them that may conflict. + If none is specified, the conflict target is left up to the database. + It may also be `{:unsafe_fragment, binary_fragment}` to pass any + expression to the database without any sanitization, this is useful + for partial index or index with expressions, such as + `{:unsafe_fragment, "(coalesce(firstname, ''), coalesce(lastname, '')) WHERE middlename IS NULL"}` for + `ON CONFLICT (coalesce(firstname, ''), coalesce(lastname, '')) WHERE middlename IS NULL` SQL query. + + * `:placeholders` - A map with placeholders. This feature is not supported + by all databases. See the ["Placeholders" section](#c:insert_all/3-placeholders) for more information. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for remaining options. + + ## Source query + + A query can be given instead of a list with entries. This query needs to select + into a map containing only keys that are available as writeable columns in the + schema. This will query and insert the values all inside one query, without + another round trip to the application. + + ## Examples + + MyRepo.insert_all(Post, [[title: "My first post"], [title: "My second post"]]) + + MyRepo.insert_all(Post, [%{title: "My first post"}, %{title: "My second post"}]) + + query = from p in Post, + join: c in assoc(p, :comments), + select: %{ + author_id: p.author_id, + posts: count(p.id, :distinct), + interactions: sum(p.likes) + count(c.id) + }, + group_by: p.author_id + MyRepo.insert_all(AuthorStats, query) + + ## Upserts + + `c:insert_all/3` provides upserts (update or inserts) via the `:on_conflict` + option. The `:on_conflict` option supports the following values: + + * `:raise` - raises if there is a conflicting primary key or unique index + + * `:nothing` - ignores the error in case of conflicts + + * `:replace_all` - replace **all** values on the existing row with the values + in the schema/changeset, including fields not explicitly set in the changeset, + such as IDs and autogenerated timestamps (`inserted_at` and `updated_at`). + Do not use this option if you have auto-incrementing primary keys, as they + will also be replaced. You most likely want to use `{:replace_all_except, [:id]}` + or `{:replace, fields}` explicitly instead. This option requires a schema + + * `{:replace_all_except, fields}` - same as above except the given fields + (and the ones given as conflict target) are not replaced. This option + requires a schema + + * `{:replace, fields}` - replace only specific columns. This option requires + `:conflict_target`. Generally speaking, you want to make sure the given + fields to replace do not overlap with the `conflict_target` as databases + can then perform more efficient upserts + + * a keyword list of update instructions - such as the one given to + `c:update_all/3`, for example: `[set: [title: "new title"]]` + + * an `Ecto.Query` that will act as an `UPDATE` statement, such as the + one given to `c:update_all/3` + + Upserts map to "ON CONFLICT" on databases like Postgres and "ON DUPLICATE KEY" + on databases such as MySQL. + + ## Return values + + By default, both Postgres and MySQL will return the number of entries + inserted on `c:insert_all/3`. However, when the `:on_conflict` option + is specified, Postgres and MySQL will return different results. + + Postgres will only count a row if it was affected and will + return 0 if no new entry was added. + + MySQL will return, at a minimum, the number of entries attempted. For example, + if `:on_conflict` is set to `:nothing`, MySQL will return + the number of entries attempted to be inserted, even when no entry + was added. + + Also note that if `:on_conflict` is a query, MySQL will return + the number of attempted entries plus the number of entries modified + by the UPDATE query. + + ## Placeholders + + Passing in a map for the `:placeholders` allows you to send less + data over the wire when you have many entries with the same value + for a field. To use a placeholder, replace its value in each of your + entries with `{:placeholder, key}`, where `key` is the key you + are using in the `:placeholders` option map. For example: + + placeholders = %{blob: large_blob_of_text(...)} + + entries = [ + %{title: "v1", body: {:placeholder, :blob}}, + %{title: "v2", body: {:placeholder, :blob}} + ] + + Repo.insert_all(Post, entries, placeholders: placeholders) + + Keep in mind that: + + * placeholders cannot be nested in other values. For example, you + cannot put a placeholder inside an array. Instead, the whole + array has to be the placeholder + + * a placeholder key can only be used with columns of the same type + + * placeholders require a database that supports index parameters, + so they are not currently compatible with MySQL + + """ + @doc group: "Schema API" + @callback insert_all( + schema_or_source :: binary() | {binary(), module()} | module(), + entries_or_query :: + [%{(atom() | String.t()) => value} | Keyword.t(value)] | Ecto.Query.t(), + opts :: Keyword.t() + ) :: {non_neg_integer(), nil | [term()]} + when value: term() | Ecto.Query.t() + + @doc """ + Inserts a struct defined via `Ecto.Schema` or a changeset. + + In case a struct is given, the struct is converted into a changeset + with all non-nil fields as part of the changeset. + + In case a changeset is given, the changes in the changeset are + merged with the struct fields, and all of them are sent to the + database. If more than one database operation is required, they're + automatically wrapped in a transaction. + + It returns `{:ok, struct}` if the struct has been successfully + inserted or `{:error, changeset}` if there was a validation + or a known constraint error. + + ## Options + + * `:returning` - selects which fields to return. It accepts a list + of fields to be returned from the database. When `true`, returns + all fields, including those marked as `load_in_query: false`. When + `false`, no extra fields are returned. It will always include all + fields in `read_after_writes` as well as any autogenerated id. Be + aware that the fields returned from the database overwrite what was + supplied by the user. Any field not returned by the database will be + present with the original value supplied by the user. Not all databases + support this option and it may not be available during upserts. + See the ["Upserts"](`c:insert/2#upserts`) section for more information. + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This overrides the prefix set + in the query and any `@schema_prefix` set on any schemas. Also, the + `@schema_prefix` for the parent record will override all default + `@schema_prefix`s set in any child schemas for associations. + + * `:on_conflict` - It may be one of `:raise` (the default), `:nothing`, + `:replace_all`, `{:replace_all_except, fields}`, `{:replace, fields}`, + a keyword list of update instructions or an `Ecto.Query` query for updates. + See the ["Upserts"](`c:insert/2#upserts`) section for more information. + + * `:conflict_target` - A list of column names to verify for conflicts. + It is expected those columns to have unique indexes on them that may conflict. + If none is specified, the conflict target is left up to the database. + It may also be `{:unsafe_fragment, binary_fragment}` to pass any + expression to the database without any sanitization, this is useful + for partial index or index with expressions, such as + `{:unsafe_fragment, "(coalesce(firstname, ""), coalesce(lastname, "")) WHERE middlename IS NULL"}` for + `ON CONFLICT (coalesce(firstname, ""), coalesce(lastname, "")) WHERE middlename IS NULL` SQL query. + + * `:stale_error_field` - The field where stale errors will be added in + the returning changeset. This option can be used to avoid raising + `Ecto.StaleEntryError`. + + * `:stale_error_message` - The message to add to the configured + `:stale_error_field` when stale errors happen, defaults to "is stale". + + * `:allow_stale` - Doesn't error when structs are stale. Defaults to `false`. + This may happen if there are rules or triggers in the database that + rejects the insert operation. This option cascades to associations. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Examples + + A typical example is calling `MyRepo.insert/1` with a struct + and acting on the return value: + + case MyRepo.insert(%Post{title: "Ecto is great"}) do + {:ok, struct} -> # Inserted with success + {:error, changeset} -> # Something went wrong + end + + ## Upserts + + `c:insert/2` provides upserts (update or inserts) via the `:on_conflict` + option. The `:on_conflict` option supports the following values: + + * `:raise` - raises if there is a conflicting primary key or unique index + + * `:nothing` - ignores the error in case of conflicts + + * `:replace_all` - replace **all** values on the existing row with the values + in the schema/changeset, including fields not explicitly set in the changeset, + such as IDs and autogenerated timestamps (`inserted_at` and `updated_at`). + Do not use this option if you have auto-incrementing primary keys, as they + will also be replaced. You most likely want to use `{:replace_all_except, [:id]}` + or `{:replace, fields}` explicitly instead. This option requires a schema + + * `{:replace_all_except, fields}` - same as above except the given fields are + not replaced. This option requires a schema + + * `{:replace, fields}` - replace only specific columns. This option requires + `:conflict_target` + + * a keyword list of update instructions - such as the one given to + `c:update_all/3`, for example: `[set: [title: "new title"]]` + + * an `Ecto.Query` that will act as an `UPDATE` statement, such as the + one given to `c:update_all/3`. Similarly to `c:update_all/3`, auto + generated values, such as timestamps are not automatically updated. + If the struct cannot be found, `Ecto.StaleEntryError` will be raised. + + Upserts map to "ON CONFLICT" on databases like Postgres and "ON DUPLICATE KEY" + on databases such as MySQL. + + As an example, imagine `:title` is marked as a unique column in + the database: + + {:ok, inserted} = MyRepo.insert(%Post{title: "this is unique"}) + + Now we can insert with the same title but do nothing on conflicts: + + {:ok, ignored} = MyRepo.insert(%Post{title: "this is unique"}, on_conflict: :nothing) + + Because we used `on_conflict: :nothing`, instead of getting an error, + we got `{:ok, struct}`. However the returned struct does not reflect + the data in the database. If the primary key is auto-generated by the + database, the primary key in the `ignored` record will be nil if there + was no insertion. For example, if you use the default primary key + (which has name `:id` and a type of `:id`), then `ignored.id` above + will be nil if there was no insertion. + + If your id is generated by your application (typically the case for + `:binary_id`) or if you pass another value for `:on_conflict`, detecting + if an insert or update happened is slightly more complex, as the database + does not actually inform us what happened. Let's insert a post with the + same title but use a query to update the body column in case of conflicts: + + # In Postgres (it requires the conflict target for updates): + on_conflict = [set: [body: "updated"]] + {:ok, updated} = MyRepo.insert(%Post{title: "this is unique"}, + on_conflict: on_conflict, conflict_target: :title) + + # In MySQL (conflict target is not supported): + on_conflict = [set: [title: "updated"]] + {:ok, updated} = MyRepo.insert(%Post{id: inserted.id, title: "updated"}, + on_conflict: on_conflict) + + In the examples above, even though it returned `:ok`, we do not know + if we inserted new data or if we updated only the `:on_conflict` fields. + In case an update happened, the data in the struct most likely does + not match the data in the database. For example, autogenerated fields + such as `inserted_at` will point to now rather than the time the + struct was actually inserted. + + If you need to guarantee the data in the returned struct mirrors the + database, you have three options: + + * Use `on_conflict: :replace_all`, although that will replace all + fields in the database with the ones in the struct/changeset, + including autogenerated fields such as `inserted_at` and `updated_at`: + + MyRepo.insert(%Post{title: "this is unique"}, + on_conflict: :replace_all, conflict_target: :title) + + * Specify `read_after_writes: true` in your schema for choosing + fields that are read from the database after every operation. + Or pass `returning: true` to `insert` to read all fields back. + (Note that it will only read from the database if at least one + field is updated). + + MyRepo.insert(%Post{title: "this is unique"}, returning: true, + on_conflict: on_conflict, conflict_target: :title) + + * Alternatively, read the data again from the database in a separate + query. This option requires the primary key to be generated by the + database: + + {:ok, updated} = MyRepo.insert(%Post{title: "this is unique"}, on_conflict: on_conflict) + Repo.get(Post, updated.id) + + Because of the inability to know if the struct is up to date or not, + inserting a struct with associations and using the `:on_conflict` option + at the same time is not recommended, as Ecto will be unable to actually + track the proper status of the association. + + ## Advanced Upserts + + Using an `Ecto.Query` for `:on_conflict` can allow us to use more advanced + database features. For example, PostgreSQL supports conditional upserts like + `DO UPDATE SET title = EXCLUDED.title, version = EXCLUDED.version + WHERE EXCLUDED.version > post.version`. + This means that the title and version will be updated only if the proposed + row has a greater version value than the existing row. + + Ecto can support this as follows: + + conflict_query = + from(p in Post, + update: [set: [ + title: fragment("EXCLUDED.title"), + version: fragment("EXCLUDED.version") + ]], + where: fragment("EXCLUDED.version > ?", p.version) + ) + + MyRepo.insert( + %Post{id: 1, title: "Ecto Upserts (Dance Remix)", version: 2}, + conflict_target: [:id], + on_conflict: conflict_query + ) + """ + @doc group: "Schema API" + @callback insert( + struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(), + opts :: Keyword.t() + ) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()} + + @doc """ + Updates a changeset using its primary key. + + A changeset is required as it is the only mechanism for + tracking dirty changes. Only the fields present in the `changes` part + of the changeset are sent to the database. Any other, in-memory + changes done to the schema are ignored. If more than one database + operation is required, they're automatically wrapped in a transaction. + + If the struct has no primary key, `Ecto.NoPrimaryKeyFieldError` + will be raised. + + If the struct cannot be found, `Ecto.StaleEntryError` will be raised. + + It returns `{:ok, struct}` if the struct has been successfully + updated or `{:error, changeset}` if there was a validation + or a known constraint error. + + ## Options + + * `:returning` - selects which fields to return. It accepts a list + of fields to be returned from the database. When `true`, returns + all fields, including those marked as `load_in_query: false`. When + `false`, no extra fields are returned. It will always include all + fields in `read_after_writes`. Be aware that the fields returned + from the database overwrite what was supplied by the user. Any field + not returned by the database will be present with the original value + supplied by the user. Not all databases support this option. + + * `:force` - By default, if there are no changes in the changeset, + `c:update/2` is a no-op. By setting this option to true, update + callbacks will always be executed, even if there are no changes + (including timestamps). + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This overrides the prefix set + in the query and any `@schema_prefix` set on any schemas. Also, the + `@schema_prefix` for the parent record will override all default + `@schema_prefix`s set in any child schemas for associations. + + * `:stale_error_field` - The field where stale errors will be added in + the returning changeset. This option can be used to avoid raising + `Ecto.StaleEntryError`. + + * `:stale_error_message` - The message to add to the configured + `:stale_error_field` when stale errors happen, defaults to "is stale". + + * `:allow_stale` - Doesn't error if update is stale. Defaults to `false`. + This may happen if the struct has been deleted from the database before + the update or if there is a rule or a trigger on the database that rejects + the update operation. This option cascades to associations. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + post = MyRepo.get!(Post, 42) + post = Ecto.Changeset.change(post, title: "New title") + case MyRepo.update(post) do + {:ok, struct} -> # Updated with success + {:error, changeset} -> # Something went wrong + end + """ + @doc group: "Schema API" + @callback update(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) :: + {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()} + + @doc """ + Inserts or updates a changeset depending on whether the struct is persisted + or not. + + The distinction whether to insert or update will be made on the + `Ecto.Schema.Metadata` field `:state`. The `:state` is automatically set by + Ecto when loading or building a schema. + + Please note that for this to work, you will have to load existing structs from + the database. So even if the struct exists, this won't work: + + struct = %Post{id: "existing_id", ...} + MyRepo.insert_or_update(changeset) + # => {:error, changeset} # id already exists + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This overrides the prefix set + in the query and any `@schema_prefix` set any schemas. Also, the + `@schema_prefix` for the parent record will override all default + `@schema_prefix`s set in any child schemas for associations. + * `:stale_error_field` - The field where stale errors will be added in + the returning changeset. This option can be used to avoid raising + `Ecto.StaleEntryError`. Only applies to updates. + * `:stale_error_message` - The message to add to the configured + `:stale_error_field` when stale errors happen, defaults to "is stale". + Only applies to updates. + * `:allow_stale` - Doesn't error when structs are stale. Defaults to `false`. + This option cascades to associations. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + result = + case MyRepo.get(Post, id) do + nil -> %Post{id: id} # Post not found, we build one + post -> post # Post exists, let's use it + end + |> Post.changeset(changes) + |> MyRepo.insert_or_update() + + case result do + {:ok, struct} -> # Inserted or updated with success + {:error, changeset} -> # Something went wrong + end + """ + @doc group: "Schema API" + @callback insert_or_update(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) :: + {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()} + + @doc """ + Deletes a struct using its primary key. + + If the struct has no primary key, `Ecto.NoPrimaryKeyFieldError` + will be raised. If the struct has been removed prior to the call, + `Ecto.StaleEntryError` will be raised. If more than one database + operation is required, they're automatically wrapped in a transaction. + + It returns `{:ok, struct}` if the struct has been successfully + deleted or `{:error, changeset}` if there was a validation + or a known constraint error. By default, constraint errors will + raise the `Ecto.ConstraintError` exception, unless a changeset is + given as the first argument with the relevant constraints declared + in it (see `Ecto.Changeset`). + + ## Options + + * `:returning` - selects which fields to return. It accepts a list + of fields to be returned from the database. When `true`, returns + all fields, including those marked as `load_in_query: false`. When + `false`, no extra fields are returned. It will always include all + fields in `read_after_writes`. Be aware that the fields returned + from the database overwrite what was supplied by the user. Any field + not returned by the database will be present with the original value + supplied by the user. Not all databases support this option. + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This overrides the prefix set + in the query and any `@schema_prefix` set in the schema. + + * `:stale_error_field` - The field where stale errors will be added in + the returning changeset. This option can be used to avoid raising + `Ecto.StaleEntryError`. + + * `:stale_error_message` - The message to add to the configured + `:stale_error_field` when stale errors happen, defaults to "is stale". + + * `:allow_stale` - Doesn't error if delete is stale. Defaults to `false`. + This may happen if the struct has been deleted from the database before + this deletion or if there is a rule or a trigger on the database that rejects + the delete operation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + post = MyRepo.get!(Post, 42) + case MyRepo.delete(post) do + {:ok, struct} -> # Deleted with success + {:error, changeset} -> # Something went wrong + end + + """ + @doc group: "Schema API" + @callback delete( + struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(), + opts :: Keyword.t() + ) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()} + + @doc """ + Same as `c:insert/2` but returns the struct or raises if the changeset is invalid. + """ + @doc group: "Schema API" + @callback insert!( + struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(), + opts :: Keyword.t() + ) :: Ecto.Schema.t() + + @doc """ + Same as `c:update/2` but returns the struct or raises if the changeset is invalid. + """ + @doc group: "Schema API" + @callback update!(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) :: + Ecto.Schema.t() + + @doc """ + Same as `c:insert_or_update/2` but returns the struct or raises if the changeset + is invalid. + """ + @doc group: "Schema API" + @callback insert_or_update!(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) :: + Ecto.Schema.t() + + @doc """ + Same as `c:delete/2` but returns the struct or raises if the changeset is invalid. + """ + @doc group: "Schema API" + @callback delete!( + struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(), + opts :: Keyword.t() + ) :: Ecto.Schema.t() + + ## Ecto.Adapter.Transaction + + @optional_callbacks transaction: 2, transact: 2, in_transaction?: 0, rollback: 1 + + @doc """ + Runs the given function or `Ecto.Multi` inside a transaction. + + Deprecated in favor of `c:transact/2`. + + ## Use with function + + `c:transaction/2` can be called with both a function of arity + zero or one. The arity zero function will just be executed as is: + + import Ecto.Changeset, only: [change: 2] + + MyRepo.transaction(fn -> + MyRepo.update!(change(alice, balance: alice.balance - 10)) + MyRepo.update!(change(bob, balance: bob.balance + 10)) + end) + + While the arity one function will receive the repo of the transaction + as its first argument: + + MyRepo.transaction(fn repo -> + repo.insert!(%Post{}) + end) + + If an Elixir exception occurs the transaction will be rolled back + and the exception will bubble up from the transaction function. + If no exception occurs, the transaction is committed when the + function returns. A transaction can be explicitly rolled back + by calling `c:rollback/1`, this will immediately leave the function + and return the value given to `rollback` as `{:error, value}`. + + A successful transaction returns the value returned by the function + wrapped in a tuple as `{:ok, value}`. + + See `c:transact/2` for further considerations. + + ## Use with Ecto.Multi + + `c:transaction/2` also accepts the `Ecto.Multi` struct as first argument. + `Ecto.Multi` allows you to compose transactions operations, step by step, + and manage what happens in case of success or failure. + + When an `Ecto.Multi` is given to this function, a transaction will be started, + all operations applied and in case of success committed returning `{:ok, changes}`: + + # With Ecto.Multi + Ecto.Multi.new() + |> Ecto.Multi.insert(:post, %Post{}) + |> MyRepo.transaction() + + In case of any errors the transaction will be rolled back and + `{:error, failed_operation, failed_value, changes_so_far}` will be returned. + + Explore the `Ecto.Multi` documentation to learn more and find detailed examples. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + """ + @doc group: "Transaction API" + @doc deprecated: "Use Repo.transact/2" + @callback transaction(fun_or_multi :: fun | Ecto.Multi.t(), opts :: Keyword.t()) :: + {:ok, any} + | {:error, any} + | Ecto.Multi.failure() + + @doc """ + Runs the given function or `Ecto.Multi` inside a transaction. + + ## Use with function + + `c:transact/2` can be called with both a function of arity + zero or one. The arity zero function will just be executed as is: + + Repo.transact(fn -> + alice = Repo.insert!(alice_changeset) + bob = Repo.insert!(bob_changeset) + {:ok, [alice, bob]} + end) + + While the arity one function will receive the repo of the transaction + as its first argument: + + Repo.transact(fn repo -> + alice = repo.insert!(alice_changeset) + bob = repo.insert!(bob_changeset) + {:ok, [alice, bob]} + end) + + The return value is the same as of the given `fun` which must be + `{:ok, result}` or `{:error, reason}`. + + If this function returns `{:ok, result}`, it means the transaction + was successfully committed. On the other hand, if it returns `{:error, reason}`, + it means the transaction was rolled back. + + This function is commonly used with `with/1`: + + Repo.transact(fn -> + with {:ok, alice} <- Repo.insert(alice_changeset), + {:ok, bob} <- Repo.insert(bob_changeset) do + {:ok, [alice, bob]} + end + end) + + If an Elixir exception occurs the transaction will be rolled back + and the exception will bubble up from the transaction function. + If no exception occurs, the transaction is committed if the function + returns `{:ok, result}`. Returning `{:error, result}` will rollback the transaction + and this function will return `{:error, result}` as well. + A transaction can be explicitly rolled back + by calling `c:rollback/1`, this will immediately leave the function + and return the value given to `rollback` as `{:error, value}`. + + ### Nested transactions + + If `c:transact/2` is called inside another transaction, the function + is simply executed, without wrapping the new transaction call in any + way. If there is an error in the inner transaction and the error is + rescued, or the inner transaction is rolled back, the whole outer + transaction is aborted, guaranteeing nothing will be committed. + + Below is an example of how rollbacks work with nested transactions: + + {:error, :rollback} = + Repo.transact(fn -> + {:error, :posting_not_allowed} = + Repo.transact(fn -> + # This function call causes the following to happen: + # + # * the transaction is rolled back in the database, + # * code execution is stopped within the current function, + # * and the value, passed to `rollback/1` is returned from + # `Repo.transaction/1` as the second element in the error + # tuple. + # + Repo.rollback(:posting_not_allowed) + + # `rollback/1` stops execution, so code here won't be run + end) + + # The transaction here is now aborted and any further + # operation will raise an exception. + end) + + See the ["Aborted transactions"](`c:transact/2#aborted-transactions`) section for more examples + of aborted transactions and how to handle them. + + In practice, managing nested transactions can become complex quickly. As a rule of thumb, avoid them + in favour of composing operations inside a single transaction using regular control flow and `with/1` + or use `Ecto.Multi` described next. + + ## Use with Ecto.Multi + + `c:transact/2` also accepts the `Ecto.Multi` struct as first argument. + `Ecto.Multi` allows you to compose transactions operations, step by step, + and manage what happens in case of success or failure. + + When an `Ecto.Multi` is given to this function, a transaction will be started, + all operations applied and in case of success committed returning `{:ok, changes}`: + + # With Ecto.Multi + Ecto.Multi.new() + |> Ecto.Multi.insert(:post, %Post{}) + |> Repo.transact() + + In case of any errors the transaction will be rolled back and + `{:error, failed_operation, failed_value, changes_so_far}` will be returned. + + Explore the `Ecto.Multi` documentation to learn more and find detailed examples. + + ## Aborted transactions + + When an operation inside a transaction fails, the transaction is aborted in the database. + For instance, if you attempt an insert that violates a unique constraint, the insert fails + and the transaction is aborted. In such cases, any further operation inside the transaction + will raise exceptions. + + Take the following transaction as an example: + + Repo.transact(fn repo -> + case Repo.insert(changeset) do + {:ok, post} -> + Repo.insert(%Status{value: "success"}) + + {:error, changeset} -> + Repo.insert(%Status{value: "failure"}) + end + end) + + If the changeset is valid, but the insert operation fails due to a database constraint, + the subsequent `Repo.insert(%Status{value: "failure"})` operation will raise an exception + because the database has already aborted the transaction and thus making the operation invalid. + In Postgres, the exception would look like this: + + ** (Postgrex.Error) ERROR 25P02 (in_failed_sql_transaction) current transaction is aborted, commands ignored until end of transaction block + + If the changeset is invalid before it reaches the database due to a validation error, + no statement is sent to the database, an `:error` tuple is returned, and `Repo.insert(%Status{value: "failure"})` + operation will execute as usual. + + We have two options to deal with such scenarios: + + If you don't want to change the semantics of your code, you can also use the savepoints + feature by passing the `:mode` option like this: `Repo.insert(changeset, mode: :savepoint)`. + In case of an exception, the transaction will rollback to the savepoint and prevent + the transaction from failing. + + Another alternative is to handle this operation outside of the transaction: + + result = + Repo.transact(fn -> + with {:ok, post} <- Repo.insert(changeset) do + Repo.insert(%Status{value: "success"}) + end + end) + + case result do + {:ok, _} -> + :ok + + {:error, _changeset} -> + Repo.insert!(%Status{value: "failure"}) + end + + ## Working with processes + + The transaction is per process. A separate process started inside a + transaction won't be part of the same transaction and will use a separate + connection altogether. + + When using the `Ecto.Adapters.SQL.Sandbox` in tests, while it may be + possible to share the connection between processes, the parent process + will typically hold the connection until the transaction completes. This + may lead to a deadlock if the child process attempts to use the same connection. + See the docs for + [`Ecto.Adapters.SQL.Sandbox`](https://hexdocs.pm/ecto_sql/Ecto.Adapters.SQL.Sandbox.html) + for more information. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Examples + + If the transaction was successful, `{:ok, result}` is returned: + + iex> Repo.transact(fn -> + ...> Repo.insert(changeset) + ...> end) + {:ok, %User{}} + + If the transaction failed, `{:error, reason}` is returned: + + iex> Repo.transact(fn -> + ...> Repo.insert(changeset) + ...> end) + {:error, #Ecto.Changeset<...>} + + Transaction can be aborted by returning `{:error, reason}`, calling `c:rollback/1`, + or raising from the given `fun`: + + iex> Repo.transact(fn -> + ...> Repo.insert!(%User{}) # will be rolled back + ...> {:error, :oops} + ...> end) + {:error, :oops} + + iex> Repo.transact(fn -> + ...> Repo.insert!(%User{}) # will be rolled back + ...> Repo.rollback(:oops) + ...> end) + {:error, :oops} + + iex> Repo.transact(fn -> + ...> Repo.insert!(%User{}) # will be rolled back + ...> raise "oops" + ...> end) + ** (RuntimeError) oops + """ + @doc group: "Transaction API" + @callback transact(fun :: (-> result), opts :: Keyword.t()) :: result + when result: {:ok, any()} | {:error, any()} + @callback transact(multi :: Ecto.Multi.t(), opts :: Keyword.t()) :: + {:ok, map()} + | Ecto.Multi.failure() + + @doc """ + Returns true if the current process is inside a transaction. + + If you are using the `Ecto.Adapters.SQL.Sandbox` in tests, note that even + though each test is inside a transaction, `in_transaction?/0` will only + return true inside transactions explicitly created with `transaction/2`. This + is done so the test environment mimics dev and prod. + + ## Examples + + MyRepo.in_transaction?() + #=> false + + MyRepo.transact(fn -> + MyRepo.in_transaction?() #=> true + end) + + """ + @doc group: "Transaction API" + @callback in_transaction?() :: boolean + + @doc """ + Rolls back the current transaction. + + The transaction will return the value given as `{:error, value}`. + + Note that calling `rollback` causes the code in the transaction to stop executing. + """ + @doc group: "Transaction API" + @callback rollback(value :: any) :: no_return +end diff --git a/deps/ecto/lib/ecto/repo/assoc.ex b/deps/ecto/lib/ecto/repo/assoc.ex new file mode 100644 index 0000000..f46bbcf --- /dev/null +++ b/deps/ecto/lib/ecto/repo/assoc.ex @@ -0,0 +1,141 @@ +defmodule Ecto.Repo.Assoc do + # The module invoked by repo modules + # for association related functionality. + @moduledoc false + + @doc """ + Transforms a result set based on query assocs, loading + the associations onto their parent schema. + """ + @spec query([list], list, tuple, (list -> list)) :: [Ecto.Schema.t] + def query(rows, assocs, sources, fun) + + def query([], _assocs, _sources, _fun), do: [] + def query(rows, [], _sources, fun), do: Enum.map(rows, fun) + + def query(rows, assocs, sources, fun) do + # Create rose tree of accumulator dicts in the same + # structure as the fields tree + accs = create_accs(0, assocs, sources, []) + + # Populate tree of dicts of associated entities from the result set + {_keys, _cache, rows, sub_dicts} = Enum.reduce(rows, accs, fn row, acc -> + merge(fun.(row), acc, 0) |> elem(0) + end) + + # Create the reflections that will be loaded into memory. + refls = create_refls(0, assocs, sub_dicts, sources) + + # Retrieve and load the assocs from cached dictionaries recursively + for {item, sub_structs} <- Enum.reverse(rows) do + [load_assocs(item, refls)|sub_structs] + end + end + + defp merge([struct|sub_structs], {primary_keys, cache, dict, sub_dicts}, parent_key) do + {struct, child_key} = + if struct do + {child_key, all_nil?} = + Enum.map_reduce(primary_keys, true, fn primary_key, all_nil? -> + case struct do + %_{^primary_key => nil} -> raise Ecto.NoPrimaryKeyValueError, struct: struct + # We allow maps to be returned with all `nil` values in queries without + # preloads. For preloads we have to treat maps with all `nil` values as + # `nil` instead of a map otherwise we can't associate the missing + # association to the parent struct + %{^primary_key => value} -> {value, all_nil? and value == nil} + %{} -> raise Ecto.NoPrimaryKeyValueError, struct: struct + end + end) + + if all_nil?, do: {nil, nil}, else: {struct, child_key} + else + {nil, nil} + end + + # Traverse sub_structs adding one by one to the tree. + # Note we need to traverse even if we don't have a child_key + # due to nested associations. + {sub_dicts, sub_structs} = Enum.map_reduce(sub_dicts, sub_structs, &merge(&2, &1, child_key)) + + cache_key = cache_key(parent_key, child_key, sub_structs, dict) + + if struct && parent_key && not Map.get(cache, cache_key, false) do + cache = Map.put(cache, cache_key, true) + item = {child_key, struct} + + # If we have a list, we are at the root, so we also store the sub structs + dict = update_dict(dict, parent_key, item, sub_structs) + + {{primary_keys, cache, dict, sub_dicts}, sub_structs} + else + {{primary_keys, cache, dict, sub_dicts}, sub_structs} + end + end + + defp cache_key(parent_key, child_key, sub_structs, dict) when is_list(dict) do + {parent_key, child_key, sub_structs} + end + + defp cache_key(parent_key, child_key, _sub_structs, dict) when is_map(dict) do + {parent_key, child_key} + end + + defp update_dict(dict, _parent_key, item, sub_structs) when is_list(dict) do + [{item, sub_structs} | dict] + end + + defp update_dict(dict, parent_key, item, _sub_structs) when is_map(dict) do + Map.update(dict, parent_key, [item], &[item | &1]) + end + + defp load_assocs({child_key, struct}, refls) do + Enum.reduce refls, struct, fn {dict, refl, sub_refls}, acc -> + %{field: field, cardinality: cardinality} = refl + loaded = + dict + |> Map.get(child_key, []) + |> Enum.reverse() + |> Enum.map(&load_assocs(&1, sub_refls)) + |> maybe_first(cardinality) + Map.put(acc, field, loaded) + end + end + + defp maybe_first(list, :one), do: List.first(list) + defp maybe_first(list, _), do: list + + defp create_refls(idx, fields, dicts, sources) do + schema = get_assoc_schema(sources, idx) + + Enum.map(:lists.zip(dicts, fields), fn + {{_primary_keys, _cache, dict, sub_dicts}, {field, {child_idx, child_fields}}} -> + refl = schema.__schema__(:association, field) + sub_refls = create_refls(child_idx, child_fields, sub_dicts, sources) + {dict, refl, sub_refls} + end) + end + + defp create_accs(idx, fields, sources, initial_dict) do + acc = Enum.map(fields, fn {_field, {child_idx, child_fields}} -> + create_accs(child_idx, child_fields, sources, %{}) + end) + + schema = get_assoc_schema(sources, idx) + + case schema.__schema__(:primary_key) do + [] -> raise Ecto.NoPrimaryKeyFieldError, schema: schema + pk -> {pk, %{}, initial_dict, acc} + end + end + + defp get_assoc_schema(sources, idx) do + case elem(sources, idx) do + {_, schema, _} -> + schema + + %Ecto.SubQuery{select: {:source, {_, schema}, _, _}} -> + schema + end + end +end diff --git a/deps/ecto/lib/ecto/repo/preloader.ex b/deps/ecto/lib/ecto/repo/preloader.ex new file mode 100644 index 0000000..376decc --- /dev/null +++ b/deps/ecto/lib/ecto/repo/preloader.ex @@ -0,0 +1,712 @@ +defmodule Ecto.Repo.Preloader do + # The module invoked by user defined repo_names + # for preload related functionality. + @moduledoc false + + require Ecto.Query + require Logger + + alias Ecto.Query.DynamicExpr + + @doc """ + Transforms a result set based on query preloads, loading + the associations onto their parent schema. + """ + @spec query([list], Ecto.Repo.t, list, Access.t, list, fun, {adapter_meta :: map, opts :: Keyword.t}) :: [list] + def query([], _repo_name, _preloads, _take, _assocs, _fun, _tuplet), do: [] + def query(rows, _repo_name, [], _take, _assocs, fun, _tuplet), do: Enum.map(rows, fun) + + def query(rows, repo_name, preloads, take, assocs, fun, tuplet) do + assocs = normalize_query_assocs(assocs) + + rows + |> extract() + |> normalize_and_preload_each(repo_name, preloads, take, assocs, tuplet) + |> unextract(rows, fun) + end + + defp extract([[nil|_]|t2]), do: extract(t2) + defp extract([[h|_]|t2]), do: [h|extract(t2)] + defp extract([]), do: [] + + defp unextract(structs, [[nil|_] = h2|t2], fun), do: [fun.(h2)|unextract(structs, t2, fun)] + defp unextract([h1|structs], [[_|t1]|t2], fun), do: [fun.([h1|t1])|unextract(structs, t2, fun)] + defp unextract([], [], _fun), do: [] + + @doc """ + Implementation for `Ecto.Repo.preload/2`. + """ + @spec preload(structs, atom, atom | list, {adapter_meta :: map, opts :: Keyword.t}) :: + structs when structs: [Ecto.Schema.t] | Ecto.Schema.t | nil + def preload(nil, _repo_name, _preloads, _tuplet) do + nil + end + + def preload(structs, repo_name, preloads, {_adapter_meta, opts} = tuplet) when is_list(structs) do + normalize_and_preload_each(structs, repo_name, preloads, opts[:take], %{}, tuplet) + end + + def preload(struct, repo_name, preloads, {_adapter_meta, opts} = tuplet) when is_map(struct) do + normalize_and_preload_each([struct], repo_name, preloads, opts[:take], %{}, tuplet) |> hd() + end + + defp normalize_and_preload_each( + structs, + repo_name, + preloads, + take, + query_assocs, + {adapter_meta, opts} + ) do + tuplet = {adapter_meta, Keyword.put(opts, :ecto_query, :preload)} + preloads = normalize(preloads, take, preloads) + preload_each(structs, repo_name, preloads, query_assocs, tuplet) + rescue + e -> + # Reraise errors so we ignore the preload inner stacktrace + filter_and_reraise e, __STACKTRACE__ + end + + ## Preloading + + defp preload_each(structs, _repo_name, [], _query_assocs, _tuplet), do: structs + defp preload_each([], _repo_name, _preloads, _query_assocs, _tuplet), do: [] + defp preload_each(structs, repo_name, preloads, query_assocs, tuplet) do + if sample = Enum.find(structs, & &1) do + module = sample.__struct__ + prefix = preload_prefix(tuplet, sample) + {assocs, throughs, embeds} = expand(module, preloads, query_assocs, {%{}, [], []}) + structs = preload_embeds(structs, embeds, repo_name, tuplet) + structs = preload_throughs(structs, throughs, repo_name, query_assocs, tuplet) + + {fetched_assocs, to_fetch_queries} = + prepare_queries(structs, module, assocs, prefix, repo_name, tuplet) + + fetched_queries = maybe_pmap(to_fetch_queries, repo_name, tuplet) + assocs = preload_assocs(fetched_assocs, fetched_queries, repo_name, query_assocs, tuplet) + + for struct <- structs do + struct = Enum.reduce assocs, struct, &load_assoc/2 + struct = Enum.reduce throughs, struct, &load_through/2 + struct + end + else + structs + end + end + + defp preload_prefix({_adapter_meta, opts}, sample) do + case Keyword.fetch(opts, :prefix) do + {:ok, prefix} -> + prefix + + :error -> + case sample do + %{__meta__: %{prefix: prefix}} -> prefix + # Must be an embedded schema + _ -> nil + end + end + end + + ## Association preloading + + # First we traverse all assocs and find which queries we need to run. + defp prepare_queries(structs, module, assocs, prefix, repo_name, tuplet) do + Enum.reduce(assocs, {[], []}, fn + {_key, {{:assoc, assoc, related_key}, take, query, preloads}}, {assocs, queries} -> + {fetch_ids, loaded_ids, loaded_structs} = fetch_ids(structs, module, assoc, tuplet) + + queries = + if fetch_ids != [] do + [ + fn tuplet -> + fetch_query(fetch_ids, assoc, repo_name, query, prefix, related_key, take, tuplet) + end + | queries + ] + else + queries + end + + {[{assoc, fetch_ids != [], loaded_ids, loaded_structs, preloads} | assocs], queries} + end) + end + + # Then we execute queries in parallel + defp maybe_pmap(preloaders, _repo_name, {adapter_meta, opts}) do + if match?([_, _ | _] , preloaders) and not adapter_meta.adapter.checked_out?(adapter_meta) and + Keyword.get(opts, :in_parallel, true) do + # We pass caller: self() so the ownership pool knows where + # to fetch the connection from and set the proper timeouts. + # Note while the ownership pool uses '$callers' from pdict, + # it does not do so in automatic mode, hence this line is + # still necessary. + opts = Keyword.put_new(opts, :caller, self()) + on_preloader_spawn = Keyword.get(opts, :on_preloader_spawn, fn -> :ok end) + + preloaders + |> Task.async_stream(fn preloader -> + on_preloader_spawn.() + preloader.({adapter_meta, opts}) + end, timeout: :infinity) + |> Enum.map(fn + {:ok, assoc} -> assoc + {:exit, reason} -> exit(reason) + end) + else + Enum.map(preloaders, &(&1.({adapter_meta, opts}))) + end + end + + # Then we unpack the query results, merge them, and preload recursively + defp preload_assocs( + [{assoc, query?, loaded_ids, loaded_structs, sub_preloads} | assocs], + queries, + repo_name, + query_assocs, + tuplet + ) do + {fetch_ids, fetch_structs, queries} = maybe_unpack_query(query?, queries) + sub_query_assocs = Map.get(query_assocs, assoc.field, %{}) + all = preload_each(Enum.reverse(loaded_structs, fetch_structs), repo_name, sub_preloads, sub_query_assocs, tuplet) + entry = {:assoc, assoc, assoc_map(assoc.cardinality, Enum.reverse(loaded_ids, fetch_ids), all)} + [entry | preload_assocs(assocs, queries, repo_name, query_assocs, tuplet)] + end + + defp preload_assocs([] = _assocs, [] = _queries, _, _, _), do: [] + + defp preload_embeds(structs, [] = _embeds, _, _), do: structs + + defp preload_embeds(structs, [embed | embeds], repo_name, tuplet) do + {%{field: field, cardinality: card}, sub_preloads} = embed + + {embed_structs, counts} = + Enum.flat_map_reduce(structs, [], fn + %{^field => embeds}, counts when is_list(embeds) -> {embeds, [length(embeds) | counts]} + %{^field => nil}, counts -> {[], [0 | counts]} + %{^field => embed}, counts -> {[embed], [1 | counts]} + nil, counts -> {[], [0 | counts]} + struct, _counts -> raise ArgumentError, "expected #{inspect(struct)} to contain embed `#{field}`" + end) + + # It is not possible for an embed to be preloaded through Ecto.Query.preload + # Therefore, we don't consider associations coming from queries + embed_structs = preload_each(embed_structs, repo_name, sub_preloads, %{}, tuplet) + structs = put_through_or_embed(card, field, structs, embed_structs, Enum.reverse(counts), []) + preload_embeds(structs, embeds, repo_name, tuplet) + end + + defp preload_throughs(structs, [] = _throughs, _, _, _), do: structs + + defp preload_throughs( + structs, + [{_, _, false = _from_query?} | throughs], + repo_name, + query_assocs, + tuplet + ) do + # Through associations will not be preloaded directly unless they were + # loaded through a join using Ecto.Query.preload. When using Ecto.Repo.preload + # or Ecto.Query.preload where the through association is not part of a join, + # the chain of associations making up the through association are preloaded instead. + preload_throughs(structs, throughs, repo_name, query_assocs, tuplet) + end + + defp preload_throughs(structs, [through | throughs], repo_name, query_assocs, tuplet) do + {{_, %{field: field, cardinality: card}, _}, sub_preloads, true} = through + sub_query_assocs = Map.get(query_assocs, field, %{}) + + {through_structs, counts} = + Enum.flat_map_reduce(structs, [], fn + %{^field => throughs}, counts when is_list(throughs) -> {throughs, [length(throughs) | counts]} + %{^field => nil}, counts -> {[], [0 | counts]} + %{^field => through}, counts -> {[through], [1 | counts]} + nil, counts -> {[], [0 | counts]} + struct, _counts -> raise ArgumentError, "expected #{inspect(struct)} to contain through association `#{field}`" + end) + + through_structs = preload_each(through_structs, repo_name, sub_preloads, sub_query_assocs, tuplet) + structs = put_through_or_embed(card, field, structs, through_structs, Enum.reverse(counts), []) + preload_throughs(structs, throughs, repo_name, query_assocs, tuplet) + end + + defp put_through_or_embed(_card, _field, [], [], [], acc), do: Enum.reverse(acc) + + defp put_through_or_embed(card, field, [struct | structs], loaded_structs, [0 | counts], acc), + do: put_through_or_embed(card, field, structs, loaded_structs, counts, [struct | acc]) + + defp put_through_or_embed(:one, field, [struct | structs], [loaded | loaded_structs], [1 | counts], acc), + do: put_through_or_embed(:one, field, structs, loaded_structs, counts, [Map.put(struct, field, loaded) | acc]) + + defp put_through_or_embed(:many, field, [struct | structs], loaded_structs, [count | counts], acc) do + {current_loaded, rest_loaded} = split_n(loaded_structs, count, []) + acc = [Map.put(struct, field, Enum.reverse(current_loaded)) | acc] + put_through_or_embed(:many, field, structs, rest_loaded, counts, acc) + end + + defp maybe_unpack_query(false, queries), do: {[], [], queries} + defp maybe_unpack_query(true, [{ids, structs} | queries]), do: {ids, structs, queries} + + defp fetch_ids(structs, module, assoc, {_adapter_meta, opts}) do + %{field: field, owner_key: owner_key, cardinality: card} = assoc + force? = Keyword.get(opts, :force, false) + + Enum.reduce structs, {[], [], []}, fn + nil, acc -> + acc + struct, {fetch_ids, loaded_ids, loaded_structs} -> + assert_struct!(module, struct) + %{^owner_key => id, ^field => value} = struct + loaded? = Ecto.assoc_loaded?(value) and not force? + + if loaded? and is_nil(id) and not Ecto.Changeset.Relation.empty?(assoc, value) do + Logger.warning """ + association `#{field}` for `#{inspect(module)}` has a loaded value but \ + its association key `#{owner_key}` is nil. This usually means one of: + + * `#{owner_key}` was not selected in a query + * the struct was set with default values for `#{field}` which now you want to override + + If this is intentional, set force: true to disable this warning + """ + end + + cond do + card == :one and loaded? -> + {fetch_ids, [id | loaded_ids], [value | loaded_structs]} + card == :many and loaded? -> + {fetch_ids, [{id, length(value)} | loaded_ids], value ++ loaded_structs} + is_nil(id) -> + {fetch_ids, loaded_ids, loaded_structs} + true -> + {[id | fetch_ids], loaded_ids, loaded_structs} + end + end + end + + defp fetch_query(ids, assoc, _repo_name, query, _prefix, related_key, _take, _tuplet) + when is_function(query, 1) or is_function(query, 2) do + # Note we use an explicit sort because we don't want + # to reorder based on the struct. Only the ID. + ids + |> Enum.uniq() + |> preload_function(assoc, query) + |> fetched_records_to_tuple_ids(assoc, related_key) + |> Enum.sort(fn {id1, _}, {id2, _} -> id1 <= id2 end) + |> unzip_ids([], []) + end + + defp fetch_query(ids, %{cardinality: card} = assoc, repo_name, query, prefix, related_key, take, tuplet) do + query = assoc.__struct__.assoc_query(assoc, query, Enum.uniq(ids)) + related_field_ast = related_key_to_field(query, related_key) + + # Normalize query + query = %{Ecto.Query.Planner.ensure_select(query, take || true) | prefix: prefix} + + # Add the related key to the query results + query = update_in query.select.expr, &{:{}, [], [related_field_ast, &1]} + + # If we are returning many results, we must sort by the key too + query = + case {card, query.combinations} do + {:many, [{kind, _} | []]} -> + raise ArgumentError, + "`#{kind}` queries must be wrapped inside of a subquery " <> + "when preloading a `has_many` or `many_to_many` association. " <> + "You must also ensure that all members of the `#{kind}` query " <> + "select the parent's foreign key" + + {:many, _} -> + query = add_preload_order(assoc.preload_order, query) + + update_in query.order_bys, fn order_bys -> + [%Ecto.Query.ByExpr{expr: [asc: related_field_ast], params: [], + file: __ENV__.file, line: __ENV__.line}|order_bys] + end + + {:one, _} -> + query + end + + unzip_ids Ecto.Repo.Queryable.all(repo_name, query, tuplet), [], [] + end + + defp preload_function(ids, _assoc, query) when is_function(query, 1), do: query.(ids) + defp preload_function(ids, assoc, query) when is_function(query, 2), do: query.(ids, assoc) + + defp fetched_records_to_tuple_ids([], _assoc, _related_key), + do: [] + + defp fetched_records_to_tuple_ids([%{} | _] = entries, _assoc, {0, key}), + do: Enum.map(entries, &{Map.fetch!(&1, key), &1}) + + defp fetched_records_to_tuple_ids([{_, %{}} | _] = entries, _assoc, _related_key), + do: entries + + defp fetched_records_to_tuple_ids([entry | _], assoc, _), + do: raise """ + invalid custom preload for `#{assoc.field}` on `#{inspect assoc.owner}`. + + For many_to_many associations, the custom function given to preload should \ + return a tuple with the associated key as first element and the struct as \ + second element. + + For example, imagine posts has many to many tags through a posts_tags table. \ + When preloading the tags, you may write: + + custom_tags = fn post_ids -> + Repo.all( + from t in Tag, + join: pt in "posts_tags", + where: t.custom and pt.post_id in ^post_ids and pt.tag_id == t.id + ) + end + + from Post, preload: [tags: ^custom_tags] + + Unfortunately the query above is not enough because Ecto won't know how to \ + associate the posts with the tags. In those cases, you need to return a tuple \ + with the `post_id` as first element and the tag struct as second. The new query \ + will have a select field as follows: + + from t in Tag, + join: pt in "posts_tags", + where: t.custom and pt.post_id in ^post_ids and pt.tag_id == t.id, + select: {pt.post_id, t} + + Expected a tuple with ID and struct, got: #{inspect(entry)} + """ + + defp related_key_to_field(query, {pos, key, field_type}) do + field_ast = related_key_to_field(query, {pos, key}) + + {:type, [], [field_ast, field_type]} + end + + defp related_key_to_field(query, {pos, key}) do + {{:., [], [{:&, [], [related_key_pos(query, pos)]}, key]}, [], []} + end + + defp related_key_pos(_query, pos) when pos >= 0, do: pos + defp related_key_pos(query, pos), do: Ecto.Query.Builder.count_binds(query) + pos + + defp add_preload_order([], query), do: query + + defp add_preload_order(order, query) when is_list(order) do + Ecto.Query.prepend_order_by(query, [q], ^order) + end + + defp add_preload_order({m, f, a}, query) do + order = + case apply(m, f, a) do + order when is_list(order) -> + order + + other -> + raise ArgumentError, + "`:preload_order` must resolve to a keyword list or a list of atoms/fields, " <> + "got: `#{inspect(other)}`" + end + + Enum.each(order, fn + {direction, field} when is_atom(field) or is_struct(field, DynamicExpr) -> + unless Ecto.Query.Builder.OrderBy.valid_direction?(direction) do + raise ArgumentError, + "`:preload_order` must specify valid directions, " <> + "got: `#{inspect(order)}`, `#{inspect(direction)}` is not a valid direction" + end + + :ok + + field when is_atom(field) or is_struct(field, DynamicExpr) -> + :ok + + other -> + raise ArgumentError, + "`:preload_order` must resolve to a keyword list or a list of atoms/fields, " <> + "got: `#{inspect(order)}`, `#{inspect(other)}` is not valid" + end) + + add_preload_order(order, query) + end + + defp unzip_ids([{k, v}|t], acc1, acc2), do: unzip_ids(t, [k|acc1], [v|acc2]) + defp unzip_ids([], acc1, acc2), do: {acc1, acc2} + + defp assert_struct!(mod, %{__struct__: mod}), do: true + defp assert_struct!(mod, %{__struct__: struct}) do + raise ArgumentError, "expected a homogeneous list containing the same struct, " <> + "got: #{inspect mod} and #{inspect struct}" + end + + defp assoc_map(:one, ids, structs) do + one_assoc_map(ids, structs, %{}) + end + defp assoc_map(:many, ids, structs) do + many_assoc_map(ids, structs, %{}) + end + + defp one_assoc_map([id|ids], [struct|structs], map) do + one_assoc_map(ids, structs, Map.put(map, id, struct)) + end + defp one_assoc_map([], [], map) do + map + end + + defp many_assoc_map([{id, n}|ids], structs, map) do + {acc, structs} = split_n(structs, n, []) + many_assoc_map(ids, structs, Map.put(map, id, acc)) + end + defp many_assoc_map([id|ids], [struct|structs], map) do + {ids, structs, acc} = split_while(ids, structs, id, [struct]) + many_assoc_map(ids, structs, Map.put(map, id, acc)) + end + defp many_assoc_map([], [], map) do + map + end + + defp split_n(structs, 0, acc), do: {acc, structs} + defp split_n([struct | structs], n, acc), do: split_n(structs, n - 1, [struct | acc]) + + defp split_while([id|ids], [struct|structs], id, acc), + do: split_while(ids, structs, id, [struct|acc]) + defp split_while(ids, structs, _id, acc), + do: {ids, structs, acc} + + ## Load preloaded data + + defp load_assoc({:assoc, _assoc, _ids}, nil) do + nil + end + + defp load_assoc({:assoc, assoc, ids}, struct) do + %{field: field, owner_key: owner_key, cardinality: cardinality} = assoc + key = Map.fetch!(struct, owner_key) + + loaded = + case ids do + %{^key => value} -> value + _ when cardinality == :many -> [] + _ -> nil + end + + Map.put(struct, field, loaded) + end + + defp load_through({_, _, _}, nil), do: nil + defp load_through({_, _, true = _from_query?}, struct), do: struct + + defp load_through({{:through, assoc, throughs}, _, false = _from_query?}, struct) do + %{cardinality: cardinality, field: field, owner: owner} = assoc + {loaded, _} = Enum.reduce(throughs, {[struct], owner}, &recur_through/2) + Map.put(struct, field, maybe_first(loaded, cardinality)) + end + + defp maybe_first(list, :one), do: List.first(list) + defp maybe_first(list, _), do: list + + defp recur_through(field, {structs, owner}) do + assoc = owner.__schema__(:association, field) + case assoc.__struct__.preload_info(assoc) do + {:assoc, %{related: related}, _} -> + pk_fields = + related.__schema__(:primary_key) + |> validate_has_pk_field!(related, assoc) + + {children, _} = + Enum.reduce(structs, {[], %{}}, fn struct, acc -> + struct + |> Map.fetch!(field) + |> List.wrap() + |> Enum.reduce(acc, fn child, {fresh, set} -> + pk_values = + child + |> through_pks(pk_fields, assoc) + |> validate_non_null_pk!(child, pk_fields, assoc) + + case set do + %{^pk_values => true} -> + {fresh, set} + _ -> + {[child|fresh], Map.put(set, pk_values, true)} + end + end) + end) + + {Enum.reverse(children), related} + + {:through, _, through} -> + Enum.reduce(through, {structs, owner}, &recur_through/2) + end + end + + defp validate_has_pk_field!([], related, assoc) do + raise ArgumentError, + "cannot preload through association `#{assoc.field}` on " <> + "`#{inspect assoc.owner}`. Ecto expected the #{inspect related} schema " <> + "to have at least one primary key field" + end + + defp validate_has_pk_field!(pk_fields, _related, _assoc), do: pk_fields + + defp through_pks(map, pks, assoc) do + Enum.map(pks, fn pk -> + case map do + %{^pk => value} -> + value + + _ -> + raise ArgumentError, + "cannot preload through association `#{assoc.field}` on " <> + "`#{inspect assoc.owner}`. Ecto expected a map/struct with " <> + "the key `#{pk}` but got: #{inspect map}" + end + end) + end + + defp validate_non_null_pk!(values, map, pks, assoc) do + case values do + [nil | _] -> + raise ArgumentError, + "cannot preload through association `#{assoc.field}` on " <> + "`#{inspect assoc.owner}` because the primary key `#{hd(pks)}` " <> + "is nil for map/struct: #{inspect map}" + + _ -> + values + end + end + + ## Normalizer + + def normalize(preload, take, original) do + normalize_each(wrap(preload, original), [], take, original) + end + + defp normalize_each({atom, {query, list}}, acc, take, original) + when is_atom(atom) and (is_map(query) or is_function(query, 1) or is_function(query, 2)) do + fields = take(take, atom) + [{atom, {fields, query!(query), normalize_each(wrap(list, original), [], fields, original)}}|acc] + end + + defp normalize_each({atom, query}, acc, take, _original) + when is_atom(atom) and (is_map(query) or is_function(query, 1) or is_function(query, 2)) do + [{atom, {take(take, atom), query!(query), []}}|acc] + end + + defp normalize_each({atom, list}, acc, take, original) when is_atom(atom) do + fields = take(take, atom) + [{atom, {fields, nil, normalize_each(wrap(list, original), [], fields, original)}}|acc] + end + + defp normalize_each(atom, acc, take, _original) when is_atom(atom) do + [{atom, {take(take, atom), nil, []}}|acc] + end + + defp normalize_each(other, acc, take, original) do + Enum.reduce(wrap(other, original), acc, &normalize_each(&1, &2, take, original)) + end + + defp query!(query) when is_function(query, 1), do: query + defp query!(query) when is_function(query, 2), do: query + defp query!(%Ecto.Query{} = query), do: query + + defp take(take, field) do + case Access.fetch(take, field) do + {:ok, fields} -> List.wrap(fields) + :error -> nil + end + end + + defp wrap(list, _original) when is_list(list), + do: list + defp wrap(atom, _original) when is_atom(atom), + do: atom + defp wrap(other, original) do + raise ArgumentError, "invalid preload `#{inspect other}` in `#{inspect original}`. " <> + "preload expects an atom, a (nested) keyword or a (nested) list of atoms" + end + + defp normalize_query_assocs([]), do: %{} + + defp normalize_query_assocs(assocs) when is_list(assocs) do + Enum.reduce(assocs, %{}, &normalize_each_query_assoc(&1, &2)) + end + + defp normalize_each_query_assoc({field, {_idx, sub_assocs}}, acc) do + Map.put(acc, field, normalize_query_assocs(sub_assocs)) + end + + ## Expand + + def expand(schema, preloads, query_assocs, acc) do + Enum.reduce(preloads, acc, fn {preload, {fields, query, sub_preloads}}, + {assocs, throughs, embeds} -> + assoc_or_embed = association_or_embed!(schema, preload) + + info = assoc_or_embed.__struct__.preload_info(assoc_or_embed) + + case info do + {:assoc, _, _} -> + value = {info, fields, query, sub_preloads} + assocs = Map.update(assocs, preload, value, &merge_preloads(preload, value, &1)) + {assocs, throughs, embeds} + + {:through, _, through} -> + case query_assocs do + %{^preload => _} -> + {assocs, [{info, sub_preloads, true} | throughs], embeds} + + _ -> + {_, _, through} = + through + |> Enum.reverse() + |> Enum.reduce({fields, query, sub_preloads}, &{nil, nil, [{&1, &2}]}) + + expand(schema, through, query_assocs, {assocs, [{info, sub_preloads, false} | throughs], embeds}) + end + + :embed -> + if sub_preloads == [] do + raise ArgumentError, + "cannot preload embedded field #{inspect(assoc_or_embed.field)} " <> + "without also preloading one of its associations as it has no effect" + end + + embeds = [{assoc_or_embed, sub_preloads} | embeds] + {assocs, throughs, embeds} + end + end) + end + + defp merge_preloads(_preload, {info, _, nil, left}, {info, take, query, right}), + do: {info, take, query, left ++ right} + defp merge_preloads(_preload, {info, take, query, left}, {info, _, nil, right}), + do: {info, take, query, left ++ right} + defp merge_preloads(preload, {info, _, left, _}, {info, _, right, _}) do + raise ArgumentError, "cannot preload `#{preload}` as it has been supplied more than once " <> + "with different queries: #{inspect left} and #{inspect right}" + end + + defp association_or_embed!(schema, preload) do + schema.__schema__(:association, preload) || schema.__schema__(:embed, preload) || + raise ArgumentError, "schema #{inspect schema} does not have association or embed #{inspect preload}#{maybe_module(preload)}" + end + + defp maybe_module(assoc) do + case Atom.to_string(assoc) do + "Elixir." <> _ -> + " (if you were trying to pass a schema as a query to preload, " <> + "you have to explicitly convert it to a query by doing `from x in #{inspect assoc}` " <> + "or by calling Ecto.Queryable.to_query/1)" + + _ -> + "" + end + end + + defp filter_and_reraise(exception, stacktrace) do + reraise exception, Enum.reject(stacktrace, &match?({__MODULE__, _, _, _}, &1)) + end +end diff --git a/deps/ecto/lib/ecto/repo/queryable.ex b/deps/ecto/lib/ecto/repo/queryable.ex new file mode 100644 index 0000000..c609895 --- /dev/null +++ b/deps/ecto/lib/ecto/repo/queryable.ex @@ -0,0 +1,610 @@ +defmodule Ecto.Repo.Queryable do + @moduledoc false + + alias Ecto.Queryable + alias Ecto.Query + alias Ecto.Query.Planner + alias Ecto.Query.SelectExpr + + import Ecto.Query.Planner, only: [attach_prefix: 2] + + require Ecto.Query + + def all(name, queryable, tuplet) do + query = + queryable + |> Ecto.Queryable.to_query() + |> Ecto.Query.Planner.ensure_select(true) + + execute(:all, name, query, tuplet) |> elem(1) + end + + def all_by(name, queryable, clauses, tuplet) do + query = + queryable + |> Ecto.Query.where([], ^Enum.to_list(clauses)) + |> Ecto.Query.Planner.ensure_select(true) + + execute(:all, name, query, tuplet) |> elem(1) + end + + def stream(_name, queryable, {adapter_meta, opts}) do + %{adapter: adapter, cache: cache, repo: repo} = adapter_meta + + query = + queryable + |> Ecto.Queryable.to_query() + |> Ecto.Query.Planner.ensure_select(true) + + {query, opts} = repo.prepare_query(:stream, query, opts) + query = attach_prefix(query, opts) + + {query_meta, prepared, cast_params, dump_params} = + Planner.query(query, :all, cache, adapter, 0) + + opts = [cast_params: cast_params] ++ opts + + case query_meta do + %{select: nil} -> + adapter_meta + |> adapter.stream(query_meta, prepared, dump_params, opts) + |> Stream.flat_map(fn {_, nil} -> [] end) + + %{select: select, preloads: preloads} -> + %{ + assocs: assocs, + preprocess: preprocess, + postprocess: postprocess, + take: take, + from: from + } = select + + if preloads != [] or assocs != [] do + raise Ecto.QueryError, query: query, message: "preloads are not supported on streams" + end + + preprocessor = preprocessor(from, preprocess, adapter) + stream = adapter.stream(adapter_meta, query_meta, prepared, dump_params, opts) + postprocessor = postprocessor(from, postprocess, take, adapter) + + stream + |> Stream.flat_map(fn {_, rows} -> rows end) + |> Stream.map(preprocessor) + |> Stream.map(postprocessor) + end + end + + def get(name, queryable, id, opts) do + one(name, query_for_get(queryable, id), opts) + end + + def get!(name, queryable, id, opts) do + one!(name, query_for_get(queryable, id), opts) + end + + def get_by(name, queryable, clauses, opts) do + one(name, query_for_get_by(queryable, clauses), opts) + end + + def get_by!(name, queryable, clauses, opts) do + one!(name, query_for_get_by(queryable, clauses), opts) + end + + def reload(name, [head | _] = structs, opts) when is_list(structs) do + results = all(name, query_for_reload(structs), opts) + + [pk] = head.__struct__.__schema__(:primary_key) + + for struct <- structs do + struct_pk = Map.fetch!(struct, pk) + Enum.find(results, &(Map.fetch!(&1, pk) == struct_pk)) + end + end + + def reload(name, struct, opts) do + one(name, query_for_reload([struct]), opts) + end + + def reload!(name, [head | _] = structs, opts) when is_list(structs) do + query = query_for_reload(structs) + results = all(name, query, opts) + + [pk] = head.__struct__.__schema__(:primary_key) + + for struct <- structs do + struct_pk = Map.fetch!(struct, pk) + + Enum.find(results, &(Map.fetch!(&1, pk) == struct_pk)) || + raise "could not reload #{inspect(struct)}, maybe it doesn't exist or was deleted" + end + end + + def reload!(name, struct, opts) do + query = query_for_reload([struct]) + one!(name, query, opts) + end + + def aggregate(name, queryable, aggregate, opts) do + one!(name, query_for_aggregate(queryable, aggregate), opts) + end + + def aggregate(name, queryable, aggregate, field, opts) do + one!(name, query_for_aggregate(queryable, aggregate, field), opts) + end + + def exists?(name, queryable, opts) do + queryable = + Query.exclude(queryable, :select) + |> Query.exclude(:preload) + |> Query.exclude(:order_by) + |> Query.exclude(:distinct) + |> Query.select(1) + |> Query.limit(1) + |> rewrite_combinations() + + case all(name, queryable, opts) do + [1] -> true + [] -> false + end + end + + defp rewrite_combinations(%{combinations: []} = query), do: query + + defp rewrite_combinations(%{combinations: combinations} = query) do + combinations = + Enum.map(combinations, fn {type, query} -> + {type, query |> Query.exclude(:select) |> Query.select(1)} + end) + + %{query | combinations: combinations} + end + + def one(name, queryable, tuplet) do + case all(name, queryable, tuplet) do + [one] -> one + [] -> nil + other -> raise Ecto.MultipleResultsError, queryable: queryable, count: length(other) + end + end + + def one!(name, queryable, tuplet) do + case all(name, queryable, tuplet) do + [one] -> one + [] -> raise Ecto.NoResultsError, queryable: queryable + other -> raise Ecto.MultipleResultsError, queryable: queryable, count: length(other) + end + end + + def update_all(name, queryable, [], tuplet) do + update_all(name, queryable, tuplet) + end + + def update_all(name, queryable, updates, tuplet) do + query = Query.from(queryable, update: ^updates) + update_all(name, query, tuplet) + end + + defp update_all(name, queryable, tuplet) do + query = Ecto.Queryable.to_query(queryable) + execute(:update_all, name, query, tuplet) + end + + def delete_all(name, queryable, tuplet) do + query = Ecto.Queryable.to_query(queryable) + execute(:delete_all, name, query, tuplet) + end + + @doc """ + Load structs from query. + """ + def struct_load!([{field, type} | types], [value | values], acc, all_nil?, struct, adapter) do + all_nil? = all_nil? and value == nil + value = load!(type, value, field, struct, adapter) + struct_load!(types, values, [{field, value} | acc], all_nil?, struct, adapter) + end + + def struct_load!([], values, _acc, true, struct, _adapter) when struct != %{} do + {nil, values} + end + + def struct_load!([], values, acc, _all_nil?, struct, _adapter) do + {Map.merge(struct, Map.new(acc)), values} + end + + ## Helpers + + defp execute(operation, name, query, {adapter_meta, opts} = tuplet) do + %{adapter: adapter, cache: cache, repo: repo} = adapter_meta + + {query, opts} = repo.prepare_query(operation, query, opts) + query = attach_prefix(query, opts) + + {query_meta, prepared, cast_params, dump_params} = + Planner.query(query, operation, cache, adapter, 0) + + opts = [cast_params: cast_params] ++ opts + + case query_meta do + %{select: nil} -> + adapter.execute(adapter_meta, query_meta, prepared, dump_params, opts) + + %{select: select, sources: sources, preloads: preloads} -> + %{ + preprocess: preprocess, + postprocess: postprocess, + take: take, + assocs: assocs, + from: from + } = select + + preprocessor = preprocessor(from, preprocess, adapter) + {count, rows} = adapter.execute(adapter_meta, query_meta, prepared, dump_params, opts) + postprocessor = postprocessor(from, postprocess, take, adapter) + + {count, + rows + |> Ecto.Repo.Assoc.query(assocs, sources, preprocessor) + |> Ecto.Repo.Preloader.query(name, preloads, take, assocs, postprocessor, tuplet)} + end + end + + defp preprocessor({_, {:source, {source, schema}, prefix, types}}, preprocess, adapter) do + struct = Ecto.Schema.Loader.load_struct(schema, prefix, source) + + fn row -> + {entry, rest} = struct_load!(types, row, [], false, struct, adapter) + preprocess(rest, preprocess, entry, adapter) + end + end + + defp preprocessor({_, from}, preprocess, adapter) do + fn row -> + {entry, rest} = process(row, from, nil, adapter) + preprocess(rest, preprocess, entry, adapter) + end + end + + defp preprocessor(:none, preprocess, adapter) do + fn row -> + preprocess(row, preprocess, nil, adapter) + end + end + + defp preprocess(row, [], _from, _adapter) do + row + end + + defp preprocess(row, [source | sources], from, adapter) do + {entry, rest} = process(row, source, from, adapter) + [entry | preprocess(rest, sources, from, adapter)] + end + + defp postprocessor({:any, _}, postprocess, _take, adapter) do + fn [from | row] -> + row |> process(postprocess, from, adapter) |> elem(0) + end + end + + defp postprocessor({:map, _}, postprocess, take, adapter) do + fn [from | row] -> + row |> process(postprocess, to_map(from, take), adapter) |> elem(0) + end + end + + defp postprocessor(:none, postprocess, _take, adapter) do + fn row -> row |> process(postprocess, nil, adapter) |> elem(0) end + end + + defp process(row, {:source, :from}, from, _adapter) do + {from, row} + end + + defp process(row, {:source, {source, schema}, prefix, types}, _from, adapter) do + struct = Ecto.Schema.Loader.load_struct(schema, prefix, source) + struct_load!(types, row, [], true, struct, adapter) + end + + defp process(row, {:source, :values, _prefix, types}, _from, adapter) do + values_list_load!(types, row, [], true, adapter) + end + + defp process(row, {:merge, left, right}, from, adapter) do + {left, row} = process(row, left, from, adapter) + {right, row} = process(row, right, from, adapter) + + data = + case {left, right} do + {%{__struct__: s}, %{__struct__: s}} -> + Map.merge(left, right) + + {%{__struct__: left_struct}, %{__struct__: right_struct}} -> + raise ArgumentError, + "cannot merge structs of different types, " <> + "got: #{inspect(left_struct)} and #{inspect(right_struct)}" + + {%{__struct__: name}, %{}} -> + for {key, _} <- right, not Map.has_key?(left, key) do + raise ArgumentError, "struct #{inspect(name)} does not have the key #{inspect(key)}" + end + + Map.merge(left, right) + + {%{}, %{}} -> + Map.merge(left, right) + + {%{}, nil} -> + left + + {_, %{}} -> + raise ArgumentError, + "cannot merge because the left side is not a map, got: #{inspect(left)}" + + {%{}, _} -> + raise ArgumentError, + "cannot merge because the right side is not a map, got: #{inspect(right)}" + end + + {data, row} + end + + defp process(row, {:struct, struct, data, args}, from, adapter) do + case process(row, data, from, adapter) do + {%{__struct__: ^struct} = data, row} -> + process_update(data, args, row, from, adapter) + + {data, _row} -> + raise ArgumentError, + "expected a struct named #{inspect(struct)}, got: #{inspect(data)}" + end + end + + defp process(row, {:struct, struct, args}, from, adapter) do + {fields, row} = process_kv(args, row, from, adapter) + + case Map.merge(struct.__struct__(), Map.new(fields)) do + %{__meta__: %Ecto.Schema.Metadata{state: state} = metadata} = struct + when state != :loaded -> + {Map.replace!(struct, :__meta__, %{metadata | state: :loaded}), row} + + map -> + {map, row} + end + end + + defp process(row, {:map, data, args}, from, adapter) do + {data, row} = process(row, data, from, adapter) + process_update(data, args, row, from, adapter) + end + + defp process(row, {:map, args}, from, adapter) do + {args, row} = process_kv(args, row, from, adapter) + {Map.new(args), row} + end + + defp process(row, {:list, args}, from, adapter) do + process_args(args, row, from, adapter) + end + + defp process(row, {:tuple, args}, from, adapter) do + {args, row} = process_args(args, row, from, adapter) + {List.to_tuple(args), row} + end + + defp process([value | row], {:value, :any}, _from, _adapter) do + {value, row} + end + + defp process([value | row], {:value, type}, _from, adapter) do + {load!(type, value, nil, nil, adapter), row} + end + + defp process(row, value, _from, _adapter) + when is_binary(value) or is_number(value) or is_atom(value) do + {value, row} + end + + defp process_update(nil, args, row, from, adapter) do + {_args, row} = process_kv(args, row, from, adapter) + {nil, row} + end + + defp process_update(data, args, row, from, adapter) do + {args, row} = process_kv(args, row, from, adapter) + data = Enum.reduce(args, data, fn {key, value}, acc -> %{acc | key => value} end) + {data, row} + end + + defp process_args(args, row, from, adapter) do + Enum.map_reduce(args, row, fn arg, row -> + process(row, arg, from, adapter) + end) + end + + defp process_kv(kv, row, from, adapter) do + Enum.map_reduce(kv, row, fn {key, value}, row -> + {key, row} = process(row, key, from, adapter) + {value, row} = process(row, value, from, adapter) + {{key, value}, row} + end) + end + + @compile {:inline, load!: 5} + defp load!(type, value, field, struct, adapter) do + case Ecto.Type.adapter_load(adapter, type, value) do + {:ok, value} -> + value + + :error -> + field = field && " for field #{inspect(field)}" + struct = struct && " in #{inspect(struct)}" + + raise ArgumentError, + "cannot load `#{inspect(value)}` as type #{Ecto.Type.format(type)}#{field}#{struct}" + end + end + + defp values_list_load!([{field, type} | types], [value | values], acc, all_nil?, adapter) do + all_nil? = all_nil? and value == nil + value = load!(type, value, field, nil, adapter) + values_list_load!(types, values, [{field, value} | acc], all_nil?, adapter) + end + + defp values_list_load!([], values, _acc, true, _adapter) do + {nil, values} + end + + defp values_list_load!([], values, acc, false, _adapter) do + {Map.new(acc), values} + end + + defp to_map(nil, _fields) do + nil + end + + defp to_map(value, fields) when is_list(value) do + Enum.map(value, &to_map(&1, fields)) + end + + defp to_map(value, fields) do + for field <- fields, into: %{} do + case field do + {k, v} -> {k, to_map(Map.fetch!(value, k), List.wrap(v))} + k -> {k, Map.fetch!(value, k)} + end + end + end + + defp query_for_get(_queryable, nil) do + raise ArgumentError, "cannot perform Ecto.Repo.get/2 because the given value is nil" + end + + defp query_for_get(queryable, id) do + query = Queryable.to_query(queryable) + schema = assert_schema!(query) + + case schema.__schema__(:primary_key) do + [pk] -> + Query.from(x in query, where: field(x, ^pk) == ^id) + + pks -> + raise ArgumentError, + "Ecto.Repo.get/2 requires the schema #{inspect(schema)} " <> + "to have exactly one primary key, got: #{inspect(pks)}" + end + end + + defp query_for_get_by(queryable, clauses) do + Query.where(queryable, [], ^Enum.to_list(clauses)) + end + + defp query_for_reload([head | _] = structs) do + assert_structs!(structs) + + schema = head.__struct__ + %{prefix: prefix, source: source} = head.__meta__ + + case schema.__schema__(:primary_key) do + [pk] -> + keys = Enum.map(structs, &get_pk!(&1, pk)) + query = Query.from(x in {source, schema}, where: field(x, ^pk) in ^keys) + %{query | prefix: prefix} + + pks -> + raise ArgumentError, + "Ecto.Repo.reload/2 requires the schema #{inspect(schema)} " <> + "to have exactly one primary key, got: #{inspect(pks)}" + end + end + + defp query_for_aggregate(queryable, aggregate) do + query = + case prepare_for_aggregate(queryable) do + %{distinct: nil, limit: nil, offset: nil, combinations: []} = query -> + %{query | order_bys: []} + + %{prefix: prefix} = query -> + query = + query + |> Query.subquery() + |> Queryable.Ecto.SubQuery.to_query() + + %{query | prefix: prefix} + end + + select = %SelectExpr{expr: {aggregate, [], []}, file: __ENV__.file, line: __ENV__.line} + %{query | select: select} + end + + defp query_for_aggregate(queryable, aggregate, field) do + ast = field(0, field) + + query = + case prepare_for_aggregate(queryable) do + %{distinct: nil, limit: nil, offset: nil, combinations: []} = query -> + %{query | order_bys: []} + + %{prefix: prefix} = query -> + select = %SelectExpr{expr: ast, file: __ENV__.file, line: __ENV__.line} + + query = + %{query | select: select} + |> Query.subquery() + |> Queryable.Ecto.SubQuery.to_query() + + %{query | prefix: prefix} + end + + select = %SelectExpr{expr: {aggregate, [], [ast]}, file: __ENV__.file, line: __ENV__.line} + %{query | select: select} + end + + defp prepare_for_aggregate(queryable) do + case %{Queryable.to_query(queryable) | preloads: [], assocs: []} do + %{group_bys: [_ | _]} = query -> + raise Ecto.QueryError, message: "cannot aggregate on query with group_by", query: query + + %{} = query -> + query + end + end + + defp field(ix, field) when is_integer(ix) and is_atom(field) do + {{:., [], [{:&, [], [ix]}, field]}, [], []} + end + + defp assert_schema!(%{from: %{source: {_source, schema}}}) when schema != nil, do: schema + + defp assert_schema!(query) do + raise Ecto.QueryError, + query: query, + message: "expected a from expression with a schema" + end + + defp assert_structs!([head | _] = structs) when is_list(structs) do + unless Enum.all?(structs, &schema?/1) do + raise ArgumentError, "expected a struct or a list of structs, received #{inspect(structs)}" + end + + unless Enum.all?(structs, &(&1.__struct__ == head.__struct__)) do + raise ArgumentError, "expected an homogeneous list, received different struct types" + end + + :ok + end + + defp schema?(%{__meta__: _}), do: true + defp schema?(_), do: false + + defp get_pk!(struct, pk) do + struct + |> Map.fetch!(pk) + |> case do + nil -> + raise ArgumentError, + "Ecto.Repo.reload/2 expects existent structs, found a `nil` primary key" + + key -> + key + end + end +end diff --git a/deps/ecto/lib/ecto/repo/registry.ex b/deps/ecto/lib/ecto/repo/registry.ex new file mode 100644 index 0000000..88f9cd3 --- /dev/null +++ b/deps/ecto/lib/ecto/repo/registry.ex @@ -0,0 +1,51 @@ +defmodule Ecto.Repo.Registry do + @moduledoc false + + use GenServer + + def start_link(_opts) do + GenServer.start_link(__MODULE__, :ok, name: __MODULE__) + end + + def associate(pid, name, value) when is_pid(pid) do + GenServer.call(__MODULE__, {:associate, pid, name, value}) + end + + def all_running() do + for [pid, name] <- :ets.match(__MODULE__, {:"$1", :_, :"$2", :_}) do + name || pid + end + end + + def lookup(repo) when is_atom(repo) do + GenServer.whereis(repo) + |> Kernel.||(raise "could not lookup Ecto repo #{inspect repo} because it was not started or it does not exist") + |> lookup() + end + + def lookup(pid) when is_pid(pid) do + :ets.lookup_element(__MODULE__, pid, 4) + end + + ## Callbacks + + @impl true + def init(:ok) do + table = :ets.new(__MODULE__, [:named_table, read_concurrency: true]) + {:ok, table} + end + + @impl true + def handle_call({:associate, pid, name, value}, _from, table) do + ref = Process.monitor(pid) + true = :ets.insert(table, {pid, ref, name, value}) + {:reply, :ok, table} + end + + @impl true + def handle_info({:DOWN, ref, _type, pid, _reason}, table) do + [{^pid, ^ref, _, _}] = :ets.lookup(table, pid) + :ets.delete(table, pid) + {:noreply, table} + end +end diff --git a/deps/ecto/lib/ecto/repo/schema.ex b/deps/ecto/lib/ecto/repo/schema.ex new file mode 100644 index 0000000..06b24e9 --- /dev/null +++ b/deps/ecto/lib/ecto/repo/schema.ex @@ -0,0 +1,1313 @@ +defmodule Ecto.Repo.Schema do + # The module invoked by user defined repos + # for schema related functionality. + @moduledoc false + + alias Ecto.Changeset + alias Ecto.Changeset.Relation + require Ecto.Query + + import Ecto.Query.Planner, only: [attach_prefix: 2] + + @doc """ + Implementation for `Ecto.Repo.insert_all/3`. + """ + def insert_all(repo, name, schema, rows, tuplet) when is_atom(schema) do + do_insert_all( + repo, + name, + schema, + schema.__schema__(:prefix), + schema.__schema__(:source), + rows, + tuplet + ) + end + + def insert_all(repo, name, table, rows, tuplet) when is_binary(table) do + do_insert_all(repo, name, nil, nil, table, rows, tuplet) + end + + def insert_all(repo, name, {source, schema}, rows, tuplet) when is_atom(schema) do + do_insert_all(repo, name, schema, schema.__schema__(:prefix), source, rows, tuplet) + end + + defp do_insert_all(_repo, _name, _schema, _prefix, _source, [], {_adapter_meta, opts}) do + if opts[:returning] do + {0, []} + else + {0, nil} + end + end + + defp do_insert_all(repo, _name, schema, prefix, source, rows_or_query, {adapter_meta, opts}) do + %{adapter: adapter} = adapter_meta + autogen_id = schema && schema.__schema__(:autogenerate_id) + dumper = schema && schema.__schema__(:dump) + placeholder_map = Keyword.get(opts, :placeholders, %{}) + + {return_fields_or_types, return_sources} = + schema + |> returning(opts) + |> fields_to_sources(dumper) + + {rows_or_query, header, row_cast_params, placeholder_cast_params, placeholder_dump_params, + counter} = + extract_header_and_fields( + repo, + rows_or_query, + schema, + dumper, + autogen_id, + placeholder_map, + adapter, + opts + ) + + schema_meta = metadata(schema, prefix, source, autogen_id, nil, opts) + + on_conflict = Keyword.get(opts, :on_conflict, :raise) + conflict_target = Keyword.get(opts, :conflict_target, []) + conflict_target = conflict_target(conflict_target, dumper) + + {on_conflict, conflict_cast_params} = + on_conflict(on_conflict, conflict_target, schema_meta, counter, dumper, adapter) + + opts = + Keyword.put( + opts, + :cast_params, + placeholder_cast_params ++ row_cast_params ++ conflict_cast_params + ) + + {count, rows_or_query} = + adapter.insert_all( + adapter_meta, + schema_meta, + header, + rows_or_query, + on_conflict, + return_sources, + placeholder_dump_params, + opts + ) + + {count, postprocess(rows_or_query, return_fields_or_types, adapter, schema, schema_meta)} + end + + defp postprocess(nil, [], _adapter, _schema, _schema_meta) do + nil + end + + defp postprocess(rows, fields, _adapter, nil, _schema_meta) do + for row <- rows, do: Map.new(Enum.zip(fields, row)) + end + + defp postprocess(rows, types, adapter, schema, %{prefix: prefix, source: source}) do + struct = Ecto.Schema.Loader.load_struct(schema, prefix, source) + + for row <- rows do + {loaded, _} = Ecto.Repo.Queryable.struct_load!(types, row, [], false, struct, adapter) + loaded + end + end + + defp extract_header_and_fields( + _repo, + rows, + schema, + dumper, + autogen_id, + placeholder_map, + adapter, + _opts + ) + when is_list(rows) do + mapper = init_mapper(schema, dumper, adapter, placeholder_map) + + {rows, {header, placeholder_dump, _}} = + Enum.map_reduce(rows, {%{}, %{}, 1}, fn fields, acc -> + {fields, {header, placeholder_dump, counter}} = Enum.map_reduce(fields, acc, mapper) + {fields, header} = autogenerate_id(autogen_id, fields, header, adapter) + {fields, {header, placeholder_dump, counter}} + end) + + header = Map.keys(header) + + placeholder_size = map_size(placeholder_dump) + + {placeholder_cast_params, placeholder_dump_params} = + placeholder_dump + |> Enum.map(fn {_, {idx, _, cast_value, dump_value}} -> {idx, cast_value, dump_value} end) + |> Enum.sort() + |> Enum.map(&{elem(&1, 1), elem(&1, 2)}) + |> Enum.unzip() + + {rows, row_cast_params, counter} = plan_query_in_rows(rows, header, adapter, placeholder_size) + + {rows, header, row_cast_params, placeholder_cast_params, placeholder_dump_params, + fn -> counter end} + end + + defp extract_header_and_fields( + repo, + %Ecto.Query{} = query, + _schema, + dumper, + _autogen_id, + _placeholder_map, + adapter, + opts + ) do + {query, opts} = repo.prepare_query(:insert_all, query, opts) + query = attach_prefix(query, opts) + + {query, cast_params, dump_params} = + Ecto.Adapter.Queryable.plan_query(:insert_all, adapter, query) + + ix = + case query.select do + %Ecto.Query.SelectExpr{expr: {:&, _, [ix]}} -> ix + _ -> nil + end + + header = + case query.select do + %Ecto.Query.SelectExpr{expr: {:%{}, [], [{:|, _, [{:&, _, [ix]}, args]}]}, fields: fields} -> + {updated_fields, updated_set} = + Enum.map_reduce(args, MapSet.new(), fn {field, _}, set -> + dumped_field = insert_all_select_dump!(field, dumper) + {dumped_field, MapSet.put(set, dumped_field)} + end) + + unchanged_fields = + for {{:., _, [{:&, _, [^ix]}, field]}, [], []} = expr <- fields, + not MapSet.member?(updated_set, field), + do: insert_all_select_dump!(expr) + + unchanged_fields ++ updated_fields + + %Ecto.Query.SelectExpr{expr: {:%{}, _ctx, args}} -> + Enum.map(args, fn {field, _} -> insert_all_select_dump!(field, dumper) end) + + %Ecto.Query.SelectExpr{take: %{^ix => {_fun, fields}}} -> + Enum.map(fields, &insert_all_select_dump!(&1, dumper)) + + %Ecto.Query.SelectExpr{expr: {:&, _, [_ix]}, fields: fields} -> + Enum.map(fields, &insert_all_select_dump!(&1)) + + _ -> + raise ArgumentError, """ + cannot generate a fields list for insert_all from the given source query: + + #{inspect(query)} + + The select clause must be one of the following: + + * A single `map/2` or several `map/2` expressions combined with `select_merge` + * A single `struct/2` or several `struct/2` expressions combined with `select_merge` + * A source such as `p` in the query `from p in Post` + * A single literal map or several literal maps combined with `select_merge`. If + combining several literal maps, there cannot be any query interpolations + except in the last `select_merge`. Consider using `Ecto.Query.exclude/2` + to rebuild the select expression from scratch if you need multiple `select_merge` + statements with interpolations + + All keys must exist in the schema that is being inserted into + """ + end + + counter = fn -> length(dump_params) end + + {{query, dump_params}, header, cast_params, [], [], counter} + end + + defp extract_header_and_fields( + _repo, + rows_or_query, + _schema, + _dumper, + _autogen_id, + _placeholder_map, + _adapter, + _opts + ) do + raise ArgumentError, + "expected a list of rows or a query, but got #{inspect(rows_or_query)} as rows_or_query argument in insert_all" + end + + defp init_mapper(nil, _dumper, _adapter, placeholder_map) do + fn {field, value}, acc -> + extract_value(field, value, :any, placeholder_map, acc, & &1) + end + end + + defp init_mapper(schema, dumper, adapter, placeholder_map) do + fn {field, value}, acc -> + case dumper do + %{^field => {source, type, writable}} when writable != :never -> + extract_value(source, value, type, placeholder_map, acc, fn val -> + dump_field!(:insert_all, schema, field, type, val, adapter) + end) + + %{} -> + raise ArgumentError, + "unknown field `#{inspect(field)}` in schema #{inspect(schema)} given to " <> + "insert_all. Unwritable fields, such as virtual and read only fields " <> + "are not supported. Associations are also not supported" + end + end + end + + defp extract_value(source, value, type, placeholder_map, acc, dumper) do + {header, placeholder_dump, counter} = acc + + case value do + %Ecto.Query{} = query -> + {{source, query}, {Map.put(header, source, true), placeholder_dump, counter}} + + {:placeholder, key} -> + {value, placeholder_dump, counter} = + extract_placeholder(key, type, placeholder_map, placeholder_dump, counter, dumper) + + {{source, value}, {Map.put(header, source, true), placeholder_dump, counter}} + + cast_value -> + {{source, cast_value, dumper.(value)}, + {Map.put(header, source, true), placeholder_dump, counter}} + end + end + + defp extract_placeholder(key, type, placeholder_map, placeholder_dump, counter, dumper) do + case placeholder_dump do + %{^key => {idx, ^type, _, _}} -> + {{:placeholder, idx}, placeholder_dump, counter} + + %{^key => {_, type, _}} -> + raise ArgumentError, + "a placeholder key can only be used with columns of the same type. " <> + "The key #{inspect(key)} has already been dumped as a #{inspect(type)}" + + %{} -> + {cast_value, dump_value} = + case placeholder_map do + %{^key => cast_value} -> + {cast_value, dumper.(cast_value)} + + _ -> + raise KeyError, + "placeholder key #{inspect(key)} not found in #{inspect(placeholder_map)}" + end + + placeholder_dump = Map.put(placeholder_dump, key, {counter, type, cast_value, dump_value}) + {{:placeholder, counter}, placeholder_dump, counter + 1} + end + end + + defp plan_query_in_rows(rows, header, adapter, counter) do + {rows, {cast_params, counter}} = + Enum.map_reduce(rows, {[], counter}, fn fields, {cast_param_acc, counter} -> + Enum.flat_map_reduce(header, {cast_param_acc, counter}, fn key, + {cast_param_acc, counter} -> + case :lists.keyfind(key, 1, fields) do + {^key, %Ecto.Query{} = query} -> + {query, params, _} = Ecto.Query.Planner.plan(query, :all, adapter) + {cast_params, dump_params} = Enum.unzip(params) + {query, _} = Ecto.Query.Planner.normalize(query, :all, adapter, counter) + num_params = length(dump_params) + + {[{key, {query, dump_params}}], + {Enum.reverse(cast_params, cast_param_acc), counter + num_params}} + + {^key, {:placeholder, _} = value} -> + {[{key, value}], {cast_param_acc, counter}} + + {^key, cast_value, dump_value} -> + {[{key, dump_value}], {[cast_value | cast_param_acc], counter + 1}} + + false -> + {[], {cast_param_acc, counter}} + end + end) + end) + + {rows, Enum.reverse(cast_params), counter} + end + + defp insert_all_select_dump!({{:., dot_meta, [{:&, _, [_]}, field]}, [], []}) do + if dot_meta[:writable] == :never do + raise ArgumentError, "cannot select unwritable field `#{inspect(field)}` for insert_all" + else + field + end + end + + defp insert_all_select_dump!(field, dumper) when is_atom(field) do + case dumper do + %{^field => {source, _, writable}} when writable != :never -> + source + + %{} -> + raise ArgumentError, "cannot select unwritable field `#{inspect(field)}` for insert_all" + + nil -> + field + end + end + + defp autogenerate_id(nil, fields, header, _adapter) do + {fields, header} + end + + defp autogenerate_id({key, source, type}, fields, header, adapter) do + case :lists.keyfind(key, 1, fields) do + {^key, _, _} -> + {fields, header} + + false -> + if dump_value = Ecto.Type.adapter_autogenerate(adapter, type) do + {:ok, cast_value} = Ecto.Type.adapter_load(adapter, type, dump_value) + {[{source, cast_value, dump_value} | fields], Map.put(header, source, true)} + else + {fields, header} + end + end + end + + @doc """ + Implementation for `Ecto.Repo.insert!/2`. + """ + def insert!(repo, name, struct_or_changeset, tuplet) do + case insert(repo, name, struct_or_changeset, tuplet) do + {:ok, struct} -> + struct + + {:error, %Ecto.Changeset{} = changeset} -> + raise Ecto.InvalidChangesetError, action: :insert, changeset: changeset + end + end + + @doc """ + Implementation for `Ecto.Repo.update!/2`. + """ + def update!(repo, name, struct_or_changeset, tuplet) do + case update(repo, name, struct_or_changeset, tuplet) do + {:ok, struct} -> + struct + + {:error, %Ecto.Changeset{} = changeset} -> + raise Ecto.InvalidChangesetError, action: :update, changeset: changeset + end + end + + @doc """ + Implementation for `Ecto.Repo.delete!/2`. + """ + def delete!(repo, name, struct_or_changeset, tuplet) do + case delete(repo, name, struct_or_changeset, tuplet) do + {:ok, struct} -> + struct + + {:error, %Ecto.Changeset{} = changeset} -> + raise Ecto.InvalidChangesetError, action: :delete, changeset: changeset + end + end + + @doc """ + Implementation for `Ecto.Repo.insert/2`. + """ + def insert(repo, name, %Changeset{} = changeset, tuplet) do + do_insert(repo, name, changeset, tuplet) + end + + def insert(repo, name, %{__struct__: _} = struct, tuplet) do + do_insert(repo, name, Ecto.Changeset.change(struct), tuplet) + end + + defp do_insert(repo, _name, %Changeset{valid?: true} = changeset, {adapter_meta, opts} = tuplet) do + %{adapter: adapter} = adapter_meta + %{prepare: prepare, repo_opts: repo_opts} = changeset + opts = Keyword.merge(repo_opts, opts) + + struct = struct_from_changeset!(:insert, changeset) + schema = struct.__struct__ + dumper = schema.__schema__(:dump) + {keep_fields, drop_fields} = schema.__schema__(:insertable_fields) + assocs = schema.__schema__(:associations) + embeds = schema.__schema__(:embeds) + + {return_types, return_sources} = + schema + |> returning(opts) + |> add_read_after_writes(schema) + |> fields_to_sources(dumper) + + on_conflict = Keyword.get(opts, :on_conflict, :raise) + conflict_target = Keyword.get(opts, :conflict_target, []) + conflict_target = conflict_target(conflict_target, dumper) + + # On insert, we always merge the whole struct into the + # changeset as changes, except the primary key if it is nil. + changeset = put_repo_and_action(changeset, :insert, repo, tuplet) + changeset = Relation.surface_changes(changeset, struct, keep_fields ++ assocs) + changeset = update_in(changeset.changes, &Map.drop(&1, drop_fields)) + + wrap_in_transaction(adapter, adapter_meta, opts, changeset, assocs, embeds, prepare, fn -> + assoc_opts = assoc_opts(assocs, opts) + user_changeset = run_prepare(changeset, prepare) + + {changeset, parents, children, _} = pop_assocs(user_changeset, assocs) + changeset = process_parents(changeset, user_changeset, parents, [], adapter, assoc_opts) + + if changeset.valid? do + embeds = Ecto.Embedded.prepare(changeset, embeds, adapter, :insert) + + autogen_id = schema.__schema__(:autogenerate_id) + schema_meta = metadata(struct, autogen_id, opts) + changes = Map.merge(changeset.changes, embeds) + + {changes, cast_extra, dump_extra, return_types, return_sources} = + autogenerate_id(autogen_id, changes, return_types, return_sources, adapter) + + changes = Map.take(changes, keep_fields) + autogen = autogenerate_changes(schema, :insert, changes) + + dump_changes = + dump_changes!(:insert, changes, autogen, schema, dump_extra, dumper, adapter) + + {on_conflict, conflict_cast_params} = + on_conflict( + on_conflict, + conflict_target, + schema_meta, + fn -> length(dump_changes) end, + dumper, + adapter + ) + + change_values = Enum.map(changes, &elem(&1, 1)) + autogen_values = Enum.map(autogen, &elem(&1, 1)) + + opts = + Keyword.put( + opts, + :cast_params, + change_values ++ autogen_values ++ cast_extra ++ conflict_cast_params + ) + + args = [adapter_meta, schema_meta, dump_changes, on_conflict, return_sources, opts] + + case apply(user_changeset, adapter, :insert, args) do + {:ok, values} -> + values = dump_extra ++ values + + changeset + |> load_changes(:loaded, return_types, values, embeds, autogen, adapter, schema_meta) + |> process_children(user_changeset, children, adapter, assoc_opts) + + {:error, _} = error -> + error + end + else + {:error, changeset} + end + end) + end + + defp do_insert(repo, _name, %Changeset{valid?: false} = changeset, tuplet) do + {:error, put_repo_and_action(changeset, :insert, repo, tuplet)} + end + + @doc """ + Implementation for `Ecto.Repo.update/2`. + """ + def update(repo, name, %Changeset{} = changeset, tuplet) do + do_update(repo, name, changeset, tuplet) + end + + def update(_repo, _name, %{__struct__: _}, _tuplet) do + raise ArgumentError, + "giving a struct to Ecto.Repo.update/2 is not supported. " <> + "Ecto is unable to properly track changes when a struct is given, " <> + "an Ecto.Changeset must be given instead" + end + + defp do_update(repo, _name, %Changeset{valid?: true} = changeset, {adapter_meta, opts} = tuplet) do + %{adapter: adapter} = adapter_meta + %{prepare: prepare, repo_opts: repo_opts} = changeset + opts = Keyword.merge(repo_opts, opts) + + struct = struct_from_changeset!(:update, changeset) + schema = struct.__struct__ + dumper = schema.__schema__(:dump) + {keep_fields, drop_fields} = schema.__schema__(:updatable_fields) + assocs = schema.__schema__(:associations) + embeds = schema.__schema__(:embeds) + + force? = !!opts[:force] + filters = add_pk_filter!(changeset.filters, struct) + + {return_types, return_sources} = + schema + |> returning(opts) + |> add_read_after_writes(schema) + |> fields_to_sources(dumper) + + # Differently from insert, update does not copy the struct + # fields into the changeset. All changes must be in the + # changeset before hand. + changeset = put_repo_and_action(changeset, :update, repo, tuplet) + changeset = update_in(changeset.changes, &Map.drop(&1, drop_fields)) + + if changeset.changes != %{} or force? do + wrap_in_transaction(adapter, adapter_meta, opts, changeset, assocs, embeds, prepare, fn -> + assoc_opts = assoc_opts(assocs, opts) + user_changeset = run_prepare(changeset, prepare) + + {changeset, parents, children, reset_parents} = pop_assocs(user_changeset, assocs) + + changeset = + process_parents(changeset, user_changeset, parents, reset_parents, adapter, assoc_opts) + + if changeset.valid? do + embeds = Ecto.Embedded.prepare(changeset, embeds, adapter, :update) + + changes = changeset.changes |> Map.merge(embeds) |> Map.take(keep_fields) + autogen = autogenerate_changes(schema, :update, changes) + dump_changes = dump_changes!(:update, changes, autogen, schema, [], dumper, adapter) + + schema_meta = metadata(struct, schema.__schema__(:autogenerate_id), opts) + dump_filters = dump_fields!(:update, schema, filters, dumper, adapter) + + change_values = Enum.map(changes, &elem(&1, 1)) + autogen_values = Enum.map(autogen, &elem(&1, 1)) + filter_values = Enum.map(filters, &elem(&1, 1)) + opts = Keyword.put(opts, :cast_params, change_values ++ autogen_values ++ filter_values) + args = [adapter_meta, schema_meta, dump_changes, dump_filters, return_sources, opts] + + # If there are no changes or all the changes were autogenerated but not forced, we skip + {action, autogen} = + if changes != %{} or (autogen != [] and force?), + do: {:update, autogen}, + else: {:noop, []} + + case apply(user_changeset, adapter, action, args) do + {:ok, values} -> + changeset + |> load_changes( + :loaded, + return_types, + values, + embeds, + autogen, + adapter, + schema_meta + ) + |> process_children(user_changeset, children, adapter, assoc_opts) + + {:error, _} = error -> + error + end + else + {:error, changeset} + end + end) + else + {:ok, changeset.data} + end + end + + defp do_update(repo, _name, %Changeset{valid?: false} = changeset, tuplet) do + {:error, put_repo_and_action(changeset, :update, repo, tuplet)} + end + + @doc """ + Implementation for `Ecto.Repo.insert_or_update/2`. + """ + def insert_or_update(repo, name, changeset, tuplet) do + case get_state(changeset) do + :built -> + insert(repo, name, changeset, tuplet) + + :loaded -> + update(repo, name, changeset, tuplet) + + state -> + raise ArgumentError, + "the changeset has an invalid state " <> + "for Repo.insert_or_update/2: #{state}" + end + end + + @doc """ + Implementation for `Ecto.Repo.insert_or_update!/2`. + """ + def insert_or_update!(repo, name, changeset, tuplet) do + case get_state(changeset) do + :built -> + insert!(repo, name, changeset, tuplet) + + :loaded -> + update!(repo, name, changeset, tuplet) + + state -> + raise ArgumentError, + "the changeset has an invalid state " <> + "for Repo.insert_or_update!/2: #{state}" + end + end + + defp get_state(%Changeset{data: %{__meta__: %{state: state}}}), do: state + + defp get_state(%{__struct__: _}) do + raise ArgumentError, + "giving a struct to Repo.insert_or_update/2 or " <> + "Repo.insert_or_update!/2 is not supported. " <> + "Please use an Ecto.Changeset" + end + + @doc """ + Implementation for `Ecto.Repo.delete/2`. + """ + def delete(repo, name, %Changeset{} = changeset, tuplet) do + do_delete(repo, name, changeset, tuplet) + end + + def delete(repo, name, %{__struct__: _} = struct, tuplet) do + changeset = Ecto.Changeset.change(struct) + do_delete(repo, name, changeset, tuplet) + end + + defp do_delete(repo, name, %Changeset{valid?: true} = changeset, {adapter_meta, opts} = tuplet) do + %{adapter: adapter} = adapter_meta + %{prepare: prepare, repo_opts: repo_opts} = changeset + opts = Keyword.merge(repo_opts, opts) + + struct = struct_from_changeset!(:delete, changeset) + schema = struct.__struct__ + assocs = to_delete_assocs(schema) + dumper = schema.__schema__(:dump) + changeset = put_repo_and_action(changeset, :delete, repo, tuplet) + + {return_types, return_sources} = + schema + |> returning(opts) + |> add_read_after_writes(schema) + |> fields_to_sources(dumper) + + wrap_in_transaction(adapter, adapter_meta, opts, assocs != [], prepare, fn -> + changeset = run_prepare(changeset, prepare) + + if changeset.valid? do + filters = add_pk_filter!(changeset.filters, struct) + dump_filters = dump_fields!(:delete, schema, filters, dumper, adapter) + + # Delete related associations + for %{__struct__: mod, on_delete: on_delete} = reflection <- assocs do + apply(mod, on_delete, [reflection, changeset.data, name, tuplet]) + end + + schema_meta = metadata(struct, schema.__schema__(:autogenerate_id), opts) + filter_values = Enum.map(filters, &elem(&1, 1)) + opts = Keyword.put(opts, :cast_params, filter_values) + # Remove backwards compatibility in later release + args = + if function_exported?(adapter, :delete, 5) do + [adapter_meta, schema_meta, dump_filters, return_sources, opts] + else + [adapter_meta, schema_meta, dump_filters, opts] + end + + case apply(changeset, adapter, :delete, args) do + {:ok, values} -> + changeset = + load_changes( + changeset, + :deleted, + return_types, + values, + %{}, + [], + adapter, + schema_meta + ) + + {:ok, changeset.data} + + {:error, _} = error -> + error + end + else + {:error, changeset} + end + end) + end + + defp do_delete(repo, _name, %Changeset{valid?: false} = changeset, tuplet) do + {:error, put_repo_and_action(changeset, :delete, repo, tuplet)} + end + + def load(adapter, schema_or_types, data) do + do_load(schema_or_types, data, &Ecto.Type.adapter_load(adapter, &1, &2)) + end + + defp do_load(schema, data, loader) when is_list(data), + do: do_load(schema, Map.new(data), loader) + + defp do_load(schema, {fields, values}, loader) when is_list(fields) and is_list(values), + do: do_load(schema, Enum.zip(fields, values), loader) + + defp do_load(schema, data, loader) when is_atom(schema), + do: Ecto.Schema.Loader.unsafe_load(schema, data, loader) + + defp do_load(types, data, loader) when is_map(types), + do: Ecto.Schema.Loader.unsafe_load(%{}, types, data, loader) + + ## Helpers + + defp returning(schema, opts) do + case Keyword.get(opts, :returning, false) do + [_ | _] = fields -> + fields + + [] -> + raise ArgumentError, + ":returning expects at least one field to be given, got an empty list" + + true when is_nil(schema) -> + raise ArgumentError, ":returning option can only be set to true if a schema is given" + + true -> + schema.__schema__(:fields) + + false -> + [] + end + end + + defp add_read_after_writes([], schema), + do: schema.__schema__(:read_after_writes) + + defp add_read_after_writes(return, schema), + do: Enum.uniq(return ++ schema.__schema__(:read_after_writes)) + + defp fields_to_sources(fields, nil) do + {fields, fields} + end + + defp fields_to_sources(fields, dumper) do + Enum.reduce(fields, {[], []}, fn field, {types, sources} -> + {source, type, _writable} = Map.fetch!(dumper, field) + {[{field, type} | types], [source | sources]} + end) + end + + defp struct_from_changeset!(action, %{data: nil}), + do: raise(ArgumentError, "cannot #{action} a changeset without :data") + + defp struct_from_changeset!(_action, %{data: struct}), + do: struct + + defp put_repo_and_action( + %{action: :ignore, valid?: valid?} = changeset, + action, + repo, + {_adapter_meta, opts} + ) do + if valid? do + raise ArgumentError, + "a valid changeset with action :ignore was given to " <> + "#{inspect(repo)}.#{action}/2. Changesets can only be ignored " <> + "in a repository action if they are also invalid" + else + %{changeset | action: action, repo: repo, repo_opts: opts} + end + end + + defp put_repo_and_action(%{action: given}, action, repo, _tuplet) + when given != nil and given != action, + do: + raise( + ArgumentError, + "a changeset with action #{inspect(given)} was given to #{inspect(repo)}.#{action}/2" + ) + + defp put_repo_and_action(changeset, action, repo, {_adapter_meta, opts}), + do: %{changeset | action: action, repo: repo, repo_opts: opts} + + defp run_prepare(changeset, prepare) do + Enum.reduce(Enum.reverse(prepare), changeset, fn fun, acc -> + case fun.(acc) do + %Ecto.Changeset{} = acc -> + acc + + other -> + raise "expected function #{inspect(fun)} given to Ecto.Changeset.prepare_changes/2 " <> + "to return an Ecto.Changeset, got: `#{inspect(other)}`" + end + end) + end + + defp metadata(schema, prefix, source, autogen_id, context, opts) do + %{ + autogenerate_id: autogen_id, + context: context, + schema: schema, + source: source, + prefix: Keyword.get(opts, :prefix, prefix) + } + end + + defp metadata( + %{__struct__: schema, __meta__: %{context: context, source: source, prefix: prefix}}, + autogen_id, + opts + ) do + metadata(schema, prefix, source, autogen_id, context, opts) + end + + defp metadata(%{__struct__: schema}, _, _) do + raise ArgumentError, "#{inspect(schema)} needs to be a schema with source" + end + + defp conflict_target({:unsafe_fragment, fragment}, _dumper) when is_binary(fragment) do + {:unsafe_fragment, fragment} + end + + defp conflict_target(conflict_target, dumper) do + for target <- List.wrap(conflict_target) do + case dumper do + %{^target => {alias, _, _}} -> + alias + + %{} when is_atom(target) -> + raise ArgumentError, "unknown field `#{inspect(target)}` in conflict_target" + + _ -> + target + end + end + end + + defp on_conflict(on_conflict, conflict_target, schema_meta, counter_fun, dumper, adapter) do + %{source: source, schema: schema, prefix: prefix} = schema_meta + + case on_conflict do + :raise when conflict_target == [] -> + {{:raise, [], []}, []} + + :raise -> + raise ArgumentError, ":conflict_target option is forbidden when :on_conflict is :raise" + + :nothing -> + {{:nothing, [], conflict_target}, []} + + {:replace, []} -> + raise ArgumentError, + ":on_conflict option with `{:replace, fields}` requires a non-empty list of fields" + + {:replace, keys} when is_list(keys) -> + {{replace_fields!(dumper, keys), [], conflict_target}, []} + + :replace_all -> + # Remove the conflict targets from the replacing fields + # since the values don't change and this allows postgres to + # possibly perform a HOT optimization: https://www.postgresql.org/docs/current/storage-hot.html + to_remove = List.wrap(conflict_target) + replace = replace_all_fields!(:replace_all, schema, to_remove) + + if replace == [], do: raise(ArgumentError, "empty list of fields to update, use the `:replace` option instead") + + {{replace, [], conflict_target}, []} + + {:replace_all_except, fields} -> + to_remove = List.wrap(conflict_target) ++ fields + replace = replace_all_fields!(:replace_all_except, schema, to_remove) + + if replace == [], do: raise(ArgumentError, "empty list of fields to update, use the `:replace` option instead") + + {{replace, [], conflict_target}, []} + + [_ | _] = on_conflict -> + from = if schema, do: {source, schema}, else: source + query = Ecto.Query.from(from, update: ^on_conflict) + on_conflict_query(query, {source, schema}, prefix, counter_fun, adapter, conflict_target) + + %Ecto.Query{} = query -> + on_conflict_query(query, {source, schema}, prefix, counter_fun, adapter, conflict_target) + + other -> + raise ArgumentError, "unknown value for :on_conflict, got: #{inspect(other)}" + end + end + + defp replace_fields!(nil, fields), do: fields + + defp replace_fields!(dumper, fields) do + Enum.map(fields, fn field -> + case dumper do + %{^field => {source, _type, :always}} -> + source + + _ -> + raise ArgumentError, + "cannot replace non-updatable field `#{inspect(field)}` in :on_conflict option" + end + end) + end + + defp replace_all_fields!(kind, nil, _to_remove) do + raise ArgumentError, "cannot use #{inspect(kind)} on operations without a schema" + end + + defp replace_all_fields!(_kind, schema, to_remove) do + {updatable_fields, _} = schema.__schema__(:updatable_fields) + Enum.map(updatable_fields -- to_remove, &field_source!(schema, &1)) + end + + defp field_source!(nil, field) do + field + end + + defp field_source!(schema, field) do + schema.__schema__(:field_source, field) || + raise ArgumentError, "unknown field for :on_conflict, got: #{inspect(field)}" + end + + defp on_conflict_query(query, from, prefix, counter_fun, adapter, conflict_target) do + {query, params, _} = + Ecto.Query.Planner.plan(%{query | prefix: prefix}, :update_all, adapter) + + {cast_params, dump_params} = Enum.unzip(params) + + unless query.from.source == from do + raise ArgumentError, + "cannot run on_conflict: query because the query " <> + "has a different {source, schema} pair than the " <> + "original struct/changeset/query. Got #{inspect(query.from)} " <> + "and #{inspect(from)} respectively" + end + + {query, _} = Ecto.Query.Planner.normalize(query, :update_all, adapter, counter_fun.()) + {{query, dump_params, conflict_target}, cast_params} + end + + defp apply(_user_changeset, _adapter, :noop, _args) do + {:ok, []} + end + + defp apply(user_changeset, adapter, action, args) do + case apply(adapter, action, args) do + {:ok, values} -> + {:ok, values} + + {:invalid, constraints} -> + {:error, constraints_to_errors(user_changeset, action, constraints)} + + {:error, :stale} -> + opts = List.last(args) + + if Keyword.get(opts, :allow_stale, false) do + {:ok, []} + else + case Keyword.fetch(opts, :stale_error_field) do + {:ok, stale_error_field} when is_atom(stale_error_field) -> + stale_message = Keyword.get(opts, :stale_error_message, "is stale") + + user_changeset = + Changeset.add_error(user_changeset, stale_error_field, stale_message, stale: true) + + {:error, user_changeset} + + _other -> + raise Ecto.StaleEntryError, changeset: user_changeset, action: action + end + end + end + end + + defp constraints_to_errors( + %{constraints: user_constraints, errors: errors} = changeset, + action, + constraints + ) do + constraint_errors = + Enum.map(constraints, fn {type, constraint} -> + user_constraint = + Enum.find(user_constraints, fn c -> + case {c.type, c.constraint, c.match} do + {^type, ^constraint, :exact} -> true + {^type, cc, :suffix} -> String.ends_with?(constraint, cc) + {^type, cc, :prefix} -> String.starts_with?(constraint, cc) + {^type, %Regex{} = r, _match} -> Regex.match?(r, constraint) + _ -> false + end + end) + + case user_constraint do + %{field: field, error_message: error_message, error_type: error_type} -> + {field, {error_message, [constraint: error_type, constraint_name: constraint]}} + + nil -> + raise Ecto.ConstraintError, + action: action, + type: type, + constraint: constraint, + changeset: changeset + end + end) + + %{changeset | errors: constraint_errors ++ errors, valid?: false} + end + + defp load_changes(changeset, state, types, values, embeds, autogen, adapter, schema_meta) do + %{data: data, changes: changes} = changeset + + data = + data + |> merge_changes(changes) + |> Map.merge(embeds) + |> merge_autogen(autogen) + |> apply_metadata(state, schema_meta) + |> load_each(values, types, adapter) + + Map.put(changeset, :data, data) + end + + defp merge_changes(data, changes) do + changes = + Enum.reduce(changes, changes, fn {key, _value}, changes -> + if Map.has_key?(data, key), do: changes, else: Map.delete(changes, key) + end) + + Map.merge(data, changes) + end + + defp merge_autogen(data, autogen) do + Enum.reduce(autogen, data, fn {k, v}, acc -> %{acc | k => v} end) + end + + defp apply_metadata(%{__meta__: meta} = data, state, %{source: source, prefix: prefix}) do + %{data | __meta__: %{meta | state: state, source: source, prefix: prefix}} + end + + defp load_each(struct, [{_, value} | kv], [{key, type} | types], adapter) do + case Ecto.Type.adapter_load(adapter, type, value) do + {:ok, value} -> + load_each(%{struct | key => value}, kv, types, adapter) + + :error -> + raise ArgumentError, + "cannot load `#{inspect(value)}` as type #{Ecto.Type.format(type)} " <> + "for field `#{key}` in schema #{inspect(struct.__struct__)}" + end + end + + defp load_each(struct, [], _types, _adapter) do + struct + end + + defp pop_assocs(changeset, []) do + {changeset, [], [], []} + end + + defp pop_assocs(%{changes: changes, types: types, data: data} = changeset, assocs) do + {changes, parent, child, reset} = + Enum.reduce(assocs, {changes, [], [], []}, fn assoc, {changes, parent, child, reset} -> + case changes do + %{^assoc => value} -> + changes = Map.delete(changes, assoc) + + case types do + %{^assoc => {:assoc, %{relationship: :parent} = refl}} -> + {changes, [{refl, value} | parent], child, reset} + + %{^assoc => {:assoc, %{relationship: :child} = refl}} -> + {changes, parent, [{refl, value} | child], reset} + end + + %{} -> + with %{^assoc => {:assoc, %{relationship: :parent} = refl}} <- types, + true <- reset_parent?(changes, data, refl) do + {changes, parent, child, [assoc | reset]} + else + _ -> {changes, parent, child, reset} + end + end + end) + + {%{changeset | changes: changes}, parent, child, reset} + end + + defp reset_parent?(changes, data, assoc) do + %{field: field, owner_key: owner_key, related_key: related_key} = assoc + + with %{^owner_key => owner_value} <- changes, + %{^field => %{^related_key => related_value}} when owner_value != related_value <- data do + true + else + _ -> false + end + end + + # Don't mind computing options if there are no assocs + defp assoc_opts([], _opts), do: [] + + defp assoc_opts(_assocs, opts) do + Keyword.take(opts, [:timeout, :log, :telemetry_event, :prefix, :allow_stale]) + end + + defp process_parents(changeset, user_changeset, assocs, reset_assocs, adapter, opts) do + %{changes: changes, valid?: valid?} = changeset + + # Even if the changeset is invalid, we want to run parent callbacks + # to collect feedback. But if all is ok, still return the user changeset. + case Ecto.Association.on_repo_change(changeset, assocs, adapter, opts) do + {:ok, struct} when valid? -> + changes = change_parents(changes, struct, assocs) + struct = Ecto.reset_fields(struct, reset_assocs) + %{changeset | changes: changes, data: struct} + + {:ok, _} -> + user_changeset + + {:error, changes} -> + %{user_changeset | changes: Map.merge(user_changeset.changes, changes), valid?: false} + end + end + + defp change_parents(changes, struct, assocs) do + Enum.reduce(assocs, changes, fn {refl, _}, acc -> + %{field: field, owner_key: owner_key, related_key: related_key} = refl + related = Map.get(struct, field) + value = related && Map.fetch!(related, related_key) + + case Map.fetch(changes, owner_key) do + {:ok, current} when current != value -> + raise ArgumentError, + "cannot change belongs_to association `#{field}` because there is " <> + "already a change setting its foreign key `#{owner_key}` to `#{inspect(current)}`" + + _ -> + Map.put(acc, owner_key, value) + end + end) + end + + defp process_children(changeset, user_changeset, assocs, adapter, opts) do + case Ecto.Association.on_repo_change(changeset, assocs, adapter, opts) do + {:ok, struct} -> + {:ok, struct} + + {:error, changes} -> + changes = Map.merge(user_changeset.changes, changes) + {:error, %{user_changeset | changes: changes, valid?: false}} + end + end + + defp to_delete_assocs(schema) do + for assoc <- schema.__schema__(:associations), + reflection = schema.__schema__(:association, assoc), + match?(%{on_delete: on_delete} when on_delete != :nothing, reflection), + do: reflection + end + + defp autogenerate_id(nil, changes, return_types, return_sources, _adapter) do + {changes, [], [], return_types, return_sources} + end + + defp autogenerate_id({key, source, type}, changes, return_types, return_sources, adapter) do + cond do + # Set by user + Map.has_key?(changes, key) -> + {changes, [], [], return_types, return_sources} + + # Autogenerated now + dump_value = Ecto.Type.adapter_autogenerate(adapter, type) -> + {:ok, cast_value} = Ecto.Type.adapter_load(adapter, type, dump_value) + + {changes, [cast_value], [{source, dump_value}], [{key, type} | return_types], + return_sources} + + # Autogenerated in storage + true -> + {changes, [], [], [{key, type} | return_types], + [source | List.delete(return_sources, source)]} + end + end + + defp dump_changes!(action, changes, autogen, schema, extra, dumper, adapter) do + dump_fields!(action, schema, changes, dumper, adapter) ++ + dump_fields!(action, schema, autogen, dumper, adapter) ++ + extra + end + + defp autogenerate_changes(schema, action, changes) do + autogen_fields = action |> action_to_auto() |> schema.__schema__() + + Enum.flat_map(autogen_fields, fn {fields, {mod, fun, args}} -> + case Enum.reject(fields, &Map.has_key?(changes, &1)) do + [] -> + [] + + fields -> + generated = apply(mod, fun, args) + Enum.map(fields, &{&1, generated}) + end + end) + end + + defp action_to_auto(:insert), do: :autogenerate + defp action_to_auto(:update), do: :autoupdate + + defp add_pk_filter!(filters, struct) do + Enum.reduce(Ecto.primary_key!(struct), filters, fn + {_k, nil}, _acc -> + raise Ecto.NoPrimaryKeyValueError, struct: struct + + {k, v}, acc -> + Map.put(acc, k, v) + end) + end + + defp wrap_in_transaction(adapter, adapter_meta, opts, changeset, assocs, embeds, prepare, fun) do + %{changes: changes} = changeset + changed = &Map.has_key?(changes, &1) + relations_changed? = Enum.any?(assocs, changed) or Enum.any?(embeds, changed) + wrap_in_transaction(adapter, adapter_meta, opts, relations_changed?, prepare, fun) + end + + defp wrap_in_transaction(adapter, adapter_meta, opts, relations_changed?, prepare, fun) do + if (relations_changed? or prepare != []) and + function_exported?(adapter, :transaction, 3) and + not adapter.in_transaction?(adapter_meta) do + adapter.transaction(adapter_meta, opts, fn -> + case fun.() do + {:ok, struct} -> struct + {:error, changeset} -> adapter.rollback(adapter_meta, changeset) + end + end) + else + fun.() + end + end + + defp dump_field!(action, schema, field, type, value, adapter) do + case Ecto.Type.adapter_dump(adapter, type, value) do + {:ok, value} -> + value + + :error -> + raise Ecto.ChangeError, + "value `#{inspect(value)}` for `#{inspect(schema)}.#{field}` " <> + "in `#{action}` does not match type #{Ecto.Type.format(type)}" + end + end + + defp dump_fields!(action, schema, kw, dumper, adapter) do + for {field, value} <- kw do + {alias, type, _writable} = Map.fetch!(dumper, field) + {alias, dump_field!(action, schema, field, type, value, adapter)} + end + end +end diff --git a/deps/ecto/lib/ecto/repo/supervisor.ex b/deps/ecto/lib/ecto/repo/supervisor.ex new file mode 100644 index 0000000..1be968d --- /dev/null +++ b/deps/ecto/lib/ecto/repo/supervisor.ex @@ -0,0 +1,227 @@ +defmodule Ecto.Repo.Supervisor do + @moduledoc false + use Supervisor + require Logger + + @defaults [timeout: 15000, pool_size: 10] + @integer_url_query_params ["timeout", "pool_size", "idle_interval"] + + @doc """ + Starts the repo supervisor. + """ + def start_link(repo, otp_app, adapter, opts) do + name = Keyword.get(opts, :name, repo) + sup_opts = if name, do: [name: name], else: [] + Supervisor.start_link(__MODULE__, {name, repo, otp_app, adapter, opts}, sup_opts) + end + + @doc """ + Retrieves the runtime configuration. + """ + def init_config(type, repo, otp_app, opts) do + config = Application.get_env(otp_app, repo, []) + config = [otp_app: otp_app] ++ (@defaults |> Keyword.merge(config) |> Keyword.merge(opts)) + config = Keyword.put_new_lazy(config, :telemetry_prefix, fn -> telemetry_prefix(repo) end) + + case repo_init(type, repo, config) do + {:ok, config} -> + {url, config} = Keyword.pop(config, :url) + url_config = parse_url(url || "") + + url_config = + if is_list(config[:ssl]) and url_config[:ssl] == true do + Logger.warning( + "ignoring `ssl=true` parameter in URL because `ssl` is already set in the configuration: #{inspect(config[:ssl])}" + ) + + Keyword.delete(url_config, :ssl) + else + url_config + end + + {:ok, Keyword.merge(config, url_config)} + + :ignore -> + :ignore + end + end + + defp telemetry_prefix(repo) do + repo + |> Module.split() + |> Enum.map(&(&1 |> Macro.underscore() |> String.to_atom())) + end + + defp repo_init(type, repo, config) do + if Code.ensure_loaded?(repo) and function_exported?(repo, :init, 2) do + repo.init(type, config) + else + {:ok, config} + end + end + + @doc """ + Retrieves the compile time configuration. + """ + def compile_config(_repo, opts) do + otp_app = Keyword.fetch!(opts, :otp_app) + adapter = opts[:adapter] + + unless adapter do + raise ArgumentError, "missing :adapter option on use Ecto.Repo" + end + + if Code.ensure_compiled(adapter) != {:module, adapter} do + raise ArgumentError, + "adapter #{inspect(adapter)} was not compiled, " <> + "ensure it is correct and it is included as a project dependency" + end + + behaviours = + for {:behaviour, behaviours} <- adapter.__info__(:attributes), + behaviour <- behaviours, + do: behaviour + + unless Ecto.Adapter in behaviours do + raise ArgumentError, + "expected :adapter option given to Ecto.Repo to list Ecto.Adapter as a behaviour" + end + + {otp_app, adapter, behaviours} + end + + @doc """ + Parses an Ecto URL allowed in configuration. + + The format must be: + + "ecto://username:password@hostname:port/database?ssl=true&timeout=1000" + + """ + def parse_url(""), do: [] + + def parse_url(url) when is_binary(url) do + info = URI.parse(url) + + if is_nil(info.host) do + raise Ecto.InvalidURLError, url: url, message: "host is not present" + end + + if is_nil(info.path) or not (info.path =~ ~r"^/([^/])+$") do + raise Ecto.InvalidURLError, url: url, message: "path should be a database name" + end + + destructure [username, password], info.userinfo && String.split(info.userinfo, ":") + "/" <> database = info.path + + url_opts = [ + scheme: info.scheme, + username: username, + password: password, + database: database, + port: info.port + ] + + url_opts = put_hostname_if_present(url_opts, info.host) + query_opts = parse_uri_query(info) + + for {k, v} <- url_opts ++ query_opts, + not is_nil(v), + do: {k, if(is_binary(v), do: URI.decode(v), else: v)} + end + + defp put_hostname_if_present(keyword, "") do + keyword + end + + defp put_hostname_if_present(keyword, hostname) when is_binary(hostname) do + Keyword.put(keyword, :hostname, hostname) + end + + defp parse_uri_query(%URI{query: nil}), + do: [] + + defp parse_uri_query(%URI{query: query} = url) do + query + |> URI.query_decoder() + |> Enum.reduce([], fn + {"ssl", "true"}, acc -> + [{:ssl, true}] ++ acc + + {"ssl", "false"}, acc -> + [{:ssl, false}] ++ acc + + {key, value}, acc when key in @integer_url_query_params -> + [{String.to_atom(key), parse_integer!(key, value, url)}] ++ acc + + {key, value}, acc -> + [{String.to_atom(key), value}] ++ acc + end) + end + + defp parse_integer!(key, value, url) do + case Integer.parse(value) do + {int, ""} -> + int + + _ -> + raise Ecto.InvalidURLError, + url: url, + message: "cannot parse value `#{value}` for parameter `#{key}` as an integer" + end + end + + @doc false + def tuplet(name, opts) do + adapter_meta = Ecto.Repo.Registry.lookup(name) + + if opts[:stacktrace] || Map.get(adapter_meta, :stacktrace) do + {:current_stacktrace, stacktrace} = :erlang.process_info(self(), :current_stacktrace) + {adapter_meta, Keyword.put(opts, :stacktrace, stacktrace)} + else + {adapter_meta, opts} + end + end + + ## Callbacks + + @doc false + def init({name, repo, otp_app, adapter, opts}) do + case init_config(:supervisor, repo, otp_app, opts) do + {:ok, opts} -> + :telemetry.execute( + [:ecto, :repo, :init], + %{system_time: System.system_time()}, + %{repo: repo, opts: opts} + ) + + {:ok, child, meta} = adapter.init([repo: repo] ++ opts) + + # Normalize name to atom, ignore via/global names + name = if is_atom(name), do: name, else: nil + cache = Ecto.Query.Planner.new_query_cache(name) + meta = Map.merge(meta, %{repo: repo, cache: cache}) + child_spec = wrap_child_spec(child, [name, adapter, meta]) + Supervisor.init([child_spec], strategy: :one_for_one, max_restarts: 0) + + :ignore -> + :ignore + end + end + + def start_child({mod, fun, args}, name, adapter, meta) do + case apply(mod, fun, args) do + {:ok, pid} -> + meta = Map.merge(meta, %{pid: pid, adapter: adapter}) + Ecto.Repo.Registry.associate(self(), name, meta) + {:ok, pid} + + other -> + other + end + end + + defp wrap_child_spec(%{start: start} = spec, args) do + %{spec | start: {__MODULE__, :start_child, [start | args]}} + end +end diff --git a/deps/ecto/lib/ecto/repo/transaction.ex b/deps/ecto/lib/ecto/repo/transaction.ex new file mode 100644 index 0000000..19d8932 --- /dev/null +++ b/deps/ecto/lib/ecto/repo/transaction.ex @@ -0,0 +1,59 @@ +defmodule Ecto.Repo.Transaction do + @moduledoc false + @dialyzer :no_opaque + + def transact(repo, name, fun, adapter_opts) when is_function(fun, 0) do + transact(repo, name, fn _repo -> fun.() end, adapter_opts) + end + + def transact(repo, _name, fun, {adapter_meta, opts}) when is_function(fun, 1) do + adapter_meta.adapter.transaction(adapter_meta, opts, fn -> + case fun.(repo) do + {:ok, result} -> + result + + {:error, reason} -> + adapter_meta.adapter.rollback(adapter_meta, reason) + + other -> + raise ArgumentError, + "expected to return {:ok, _} or {:error, _}, got: #{inspect(other)}" + end + end) + end + + def transact(repo, _name, %Ecto.Multi{} = multi, {adapter_meta, opts}) do + %{adapter: adapter} = adapter_meta + wrap = &adapter.transaction(adapter_meta, opts, &1) + return = &adapter.rollback(adapter_meta, &1) + + case Ecto.Multi.__apply__(multi, repo, wrap, return) do + {:ok, values} -> + {:ok, values} + + {:error, {key, error_value, values}} -> + {:error, key, error_value, values} + + {:error, operation} -> + raise """ + operation #{inspect(operation)} is rolling back unexpectedly. + + This can happen if `repo.rollback/1` is manually called, which is not \ + supported by `Ecto.Multi`. It can also occur if a nested transaction \ + has rolled back and its error is not bubbled up to the outer multi. \ + Nested transactions are discouraged when using `Ecto.Multi`. Consider \ + flattening out the transaction instead. + """ + end + end + + def in_transaction?(name) do + %{adapter: adapter} = meta = Ecto.Repo.Registry.lookup(name) + adapter.in_transaction?(meta) + end + + def rollback(name, value) do + %{adapter: adapter} = meta = Ecto.Repo.Registry.lookup(name) + adapter.rollback(meta, value) + end +end diff --git a/deps/ecto/lib/ecto/schema.ex b/deps/ecto/lib/ecto/schema.ex new file mode 100644 index 0000000..d54072c --- /dev/null +++ b/deps/ecto/lib/ecto/schema.ex @@ -0,0 +1,2674 @@ +defmodule Ecto.Schema do + @moduledoc ~S""" + An Ecto schema maps external data into Elixir structs. + + The definition of the schema is possible through two main APIs: + `schema/2` and `embedded_schema/1`. + + `schema/2` is typically used to map data from a persisted source, + usually a database table, into Elixir structs and vice-versa via + the `Ecto.Repo` module. For this reason, the first argument of `schema/2` + is the source (table) name. Structs defined with `schema/2` also contain + a `__meta__` field with metadata holding the status of the struct, + for example, if it has been built, loaded or deleted. Schemas also support + associations, through APIs such as `has_one/3` and `belongs_to/3`. + Check out the [Associations cheatsheet](associations.cheatmd) for a reference + on the different associations types and their migrations. + + On the other hand, `embedded_schema/1` is used for defining schemas + that are embedded in other schemas or only exist in-memory. For example, + you can use such schemas to receive data from a command line interface + or a contact form, and validate it, without ever persisting it elsewhere. + Such structs do not contain a `__meta__` field, as they are never persisted. + + Both schemas can be used alongside changesets to filter, cast, and validate + data. Besides working as data mappers, `embedded_schema/1` and `schema/2` + can also be used together to decouple how the data is represented in your + applications from the database. + + ## Example + + defmodule User do + use Ecto.Schema + + schema "users" do + field :name, :string + field :age, :integer, default: 0 + field :password, :string, redact: true + has_many :posts, Post + end + end + + By default, a schema will automatically generate a primary key which is named + `id` and of type `:integer`. The [`field`](`field/3`) macro defines a field in the schema + with given name and type. `has_many` associates many posts with the user + schema. Schemas are regular structs and can be created and manipulated directly + using Elixir's struct API: + + iex> user = %User{name: "jane"} + iex> %{user | age: 30} + + However, most commonly, structs are cast, validated and manipulated with the + `Ecto.Changeset` module. + + The first argument of `schema/2` is the name of database's table, which does + not need to correlate to your module name (commonly referred to as the schema/schema name). + For example, if you are working with a legacy database, you can reference the table name + (`legacy_users`) when you define your schema (`User`): + + defmodule User do + use Ecto.Schema + + schema "legacy_users" do + # ... fields ... + end + end + + Source-based schemas are queryable by default, which means we can pass them + to `Ecto.Repo` modules and also build queries: + + MyRepo.all(User) + MyRepo.all(from u in User, where: u.id == 13) + + The repository will then run the query against the source/table. + + Embedded schemas are defined similarly to source-based schemas. For example, + you can use an embedded schema to represent your UI, mapping and validating + its inputs, and then you convert such embedded schema to other schemas that + are persisted to the database: + + defmodule SignUp do + use Ecto.Schema + + embedded_schema do + field :name, :string + field :age, :integer + field :email, :string + field :accepts_conditions, :boolean + end + end + + defmodule Profile do + use Ecto.Schema + + schema "profiles" do + field :name + field :age + belongs_to :account, Account + end + end + + defmodule Account do + use Ecto.Schema + + schema "accounts" do + field :email + end + end + + The `SignUp` schema can be cast and validated with the help of the + `Ecto.Changeset` module, and afterwards, you can copy its data to + the `Profile` and `Account` structs that will be persisted to the + database with the help of `Ecto.Repo`. On the other hand, embedded + schemas cannot be queried directly (they are not queryable). + + > #### `use Ecto.Schema` {: .info} + > + > When you `use Ecto.Schema`, it will: + > + > - import `Ecto.Schema` macros `schema/2` and `embedded_schema/1` + > - register default values for module attributes that can be overridden, such as + > `@primary_key` and `@timestamps_opts` + > - define reflection functions such as `__schema__/1` and `__changeset__/1` + > + > We detail those throughout the module documentation. + + ## Redacting fields + + A field marked with `redact: true` will display a value of `**redacted**` + when inspected in changes inside a `Ecto.Changeset` and be excluded from + inspect on the schema unless the schema module is tagged with + the option `@derive_inspect_for_redacted_fields false`. + + A schema module tagged with `@schema_redact :all_except_primary_keys` will + redact all fields except primary keys. + + ## Schema attributes + + Supported attributes for configuring the defined schema. They must + be set after the `use Ecto.Schema` call and before the `schema/2` + definition. + + These attributes are: + + * `@primary_key` - configures the schema primary key. It expects + a tuple `{field_name, type, options}` with the primary key field + name, type (typically `:id` or `:binary_id`, but can be any type) and + options. It also accepts `false` to disable the generation of a primary + key field. Defaults to `{:id, :id, autogenerate: true}`. + + * `@schema_prefix` - configures the schema prefix. Defaults to `nil`, + which generates structs and queries without prefix. When set, the + prefix will be used by every built struct and on queries whenever + the schema is used in a `from` or a `join`. In PostgreSQL, the prefix + is called "SCHEMA" (typically set via Postgres' `search_path`). + In MySQL the prefix points to databases. + + * `@schema_context` - configures the schema context. Defaults to `nil`, + which generates structs and queries without context. Context are not used + by the built-in SQL adapters. + + * `@schema_redact` - If set to `:all_except_primary_keys`, Ecto will + treat all non-primary key fields as if they were individually marked + as redacted. Defaults to `false`, as no fields are redacted by default. + The value set here can be changed per field through the `:redact` option. + + * `@foreign_key_type` - configures the default foreign key type + used by `belongs_to` associations. It must be set in the same + module that defines the `belongs_to`. Defaults to `:id`; + + * `@timestamps_opts` - configures the default timestamps type + used by `timestamps`. Defaults to `[type: :naive_datetime]`; + + * `@derive` - the same as `@derive` available in `Kernel.defstruct/1` + as the schema defines a struct behind the scenes; + + * `@derive_inspect_for_redacted_fields false` - Ecto will automatically + derive the `Inspect` protocol if any redacted fields are set. This option + sets it to false; + + * `@field_source_mapper` - a function that receives the current field name + and returns the mapping of this field name in the underlying source. + In other words, it is a mechanism to automatically generate the `:source` + option for the [`field`](`field/3`) macro. It defaults to `fn x -> x end`, + where no field transformation is done; + + The advantage of configuring the schema via those attributes is + that they can be set with a macro to configure application wide + defaults. + + For example, if your database does not support autoincrementing + primary keys and requires something like UUID or a RecordID, you + can configure and use `:binary_id` as your primary key type as follows: + + # Define a module to be used as base + defmodule MyApp.Schema do + defmacro __using__(_) do + quote do + use Ecto.Schema + @primary_key {:id, :binary_id, autogenerate: true} + @foreign_key_type :binary_id + end + end + end + + # Now use MyApp.Schema to define new schemas + defmodule MyApp.Comment do + use MyApp.Schema + + schema "comments" do + belongs_to :post, MyApp.Post + end + end + + Any schemas using `MyApp.Schema` will get the `:id` field with type + `:binary_id` as the primary key. We explain what the `:binary_id` type + entails in the next section. + + The `belongs_to` association on `MyApp.Comment` will also define + a `:post_id` field with `:binary_id` type that references the `:id` + field of the `MyApp.Post` schema. + + ## Primary keys + + Ecto supports two ID types, called `:id` and `:binary_id`, which are + often used as the type for primary keys and associations. + + The `:id` type is used when the primary key is an integer while the + `:binary_id` is used for primary keys in particular binary formats, + which may be `Ecto.UUID` for databases like PostgreSQL and MySQL, + or some specific ObjectID or RecordID often imposed by NoSQL databases. + + In both cases, both types have their semantics specified by the + underlying adapter/database. If you use the `:id` type with + `:autogenerate`, it means the database will be responsible for + auto-generation of the id. This is often the case for primary keys + in relational databases which are auto-incremented. + + There are two ways to define primary keys in Ecto: using the `@primary_key` + module attribute and using `primary_key: true` as option for `field/3` in + your schema definition. They are not mutually exclusive and can be used + together. + + Using `@primary_key` should be preferred for single field primary keys and + sharing primary key definitions between multiple schemas using macros. + Setting `@primary_key` also automatically configures the reference types + for `has_one` and `has_many` associations. + + Ecto also supports composite primary keys, which is where you need to use + `primary_key: true` for the fields in your schema. This usually goes along + with setting `@primary_key false` to disable generation of additional + primary key fields. + + Besides `:id` and `:binary_id`, which are often used by primary + and foreign keys, Ecto provides a huge variety of types to be used + by any field. + + ## Types and casting + + When defining the schema, types need to be given. Types are split + into two categories, primitive types and custom types. + + ### Primitive types + + The primitive types are: + + Ecto type | Elixir type | Literal syntax in query + :---------------------- | :---------------------- | :--------------------- + `:id` | `integer` | 1, 2, 3 + `:binary_id` | `binary` | `<>` + `:integer` | `integer` | 1, 2, 3 + `:float` | `float` | 1.0, 2.0, 3.0 + `:boolean` | `boolean` | true, false + `:string` | UTF-8 encoded `string` | "hello" + `:binary` | `binary` | `<>` + `:bitstring` | `bitstring` | `<<_::size>>` + `{:array, inner_type}` | `list` | `[value, value, value, ...]` + `:map` | `map` | + `{:map, inner_type}` | `map` | + `:decimal` | [`Decimal`](https://github.com/ericmj/decimal) | + `:date` | `Date` | + `:time` | `Time` | + `:time_usec` | `Time` | + `:naive_datetime` | `NaiveDateTime` | + `:naive_datetime_usec` | `NaiveDateTime` | + `:utc_datetime` | `DateTime` | + `:utc_datetime_usec` | `DateTime` | + `:duration` | `Duration` | + + **Notes:** + + * When using database migrations provided by "Ecto SQL", you can pass + your Ecto type as the column type. However, note the same Ecto type + may support multiple database types. For example, all of `:varchar`, + `:text`, `:bytea`, etc. translate to Ecto's `:string`. Similarly, + Ecto's `:decimal` can be used for `:numeric` and other database + types. For more information, see [all migration types](https://hexdocs.pm/ecto_sql/Ecto.Migration.html#module-field-types). + + * For the `{:array, inner_type}` and `{:map, inner_type}` type, + replace `inner_type` with one of the valid types, such as `:string`. + + * For the `:decimal` type, `+Infinity`, `-Infinity`, and `NaN` values + are not supported, even though the `Decimal` library handles them. + To support them, you can create a custom type. + + * For calendar types with and without microseconds, the precision is + enforced when persisting to the DB. For example, casting `~T[09:00:00]` + as `:time_usec` will succeed and result in `~T[09:00:00.000000]`, but + persisting a type without microseconds as `:time_usec` will fail. + Similarly, casting `~T[09:00:00.000000]` as `:time` will succeed, but + persisting will not. This is the same behaviour as seen in other types, + where casting has to be done explicitly and is never performed + implicitly when loading from or dumping to the database. + + * For the `:duration` type, you may need to enable `Duration` support in + your adapter. For information on how to enable it in Postgrex, see their + [HexDocs page](https://hexdocs.pm/postgrex/readme.html#data-representation). + + ### Custom types + + Besides providing primitive types, Ecto allows custom types to be + implemented by developers, allowing Ecto behaviour to be extended. + + A custom type is a module that implements one of the `Ecto.Type` + or `Ecto.ParameterizedType` behaviours. By default, Ecto provides + the following custom types: + + Custom type | Database type | Elixir type + :---------------------- | :---------------------- | :--------------------- + `Ecto.UUID` | `:uuid` (as a binary) | `string()` (as a UUID) + `Ecto.Enum` | `:string` | `atom()` + + Finally, schemas can also have virtual fields by passing the + `virtual: true` option. These fields are not persisted to the database + and can optionally not be type checked by declaring type `:any`. + + ### The datetime types + + Four different datetime primitive types are available: + + * `naive_datetime` - has a precision of seconds and casts values + to Elixir's `NaiveDateTime` struct which has no timezone information. + + * `naive_datetime_usec` - has a default precision of microseconds and + also casts values to `NaiveDateTime` with no timezone information. + + * `utc_datetime` - has a precision of seconds and casts values to + Elixir's `DateTime` struct and expects the time zone to be set to UTC. + + * `utc_datetime_usec` has a default precision of microseconds and also + casts values to `DateTime` expecting the time zone be set to UTC. + + All of those types are represented by the same timestamp/datetime in the + underlying data storage, the difference are in their precision and how the + data is loaded into Elixir. + + Having different precisions allows developers to choose a type that will + be compatible with the database and your project's precision requirements. + For example, some older versions of MySQL do not support microseconds in + datetime fields. + + When choosing what datetime type to work with, keep in mind that Elixir + functions like `NaiveDateTime.utc_now/0` have a default precision of 6. + Casting a value with a precision greater than 0 to a non-`usec` type will + truncate all microseconds and set the precision to 0. + + ### The map type + + The map type allows developers to store an Elixir map directly + in the database: + + # In your migration + create table(:users) do + add :data, :map + end + + # In your schema + field :data, :map + + # Now in your code + user = Repo.insert! %User{data: %{"foo" => "bar"}} + + Keep in mind that we advise the map keys to be strings or integers + instead of atoms. Atoms may be accepted depending on how maps are + serialized but the database will always convert atom keys to strings + due to security reasons. + + In order to support maps, different databases may employ different + techniques. For example, PostgreSQL will store those values in jsonb + fields, allowing you to just query parts of it. MSSQL, on + the other hand, does not yet provide a JSON type, so the value will be + stored in a text field. + + For maps to work in such databases, Ecto will need a JSON library. + By default Ecto will use [Jason](https://github.com/michalmuskala/jason) + which needs to be added to your deps in `mix.exs`: + + {:jason, "~> 1.0"} + + You can however configure the adapter to use another library. For example, + if using Postgres: + + config :postgrex, :json_library, YourLibraryOfChoice + + Or if using MySQL: + + config :myxql, :json_library, YourLibraryOfChoice + + If changing the JSON library, remember to recompile the adapter afterwards + by cleaning the current build: + + mix deps.clean --build postgrex + + ### Casting + + When directly manipulating the struct, it is the responsibility of + the developer to ensure the field values have the proper type. For + example, you can create a user struct with an invalid value + for `age`: + + iex> user = %User{age: "0"} + iex> user.age + "0" + + However, if you attempt to persist the struct above, an error will + be raised since Ecto validates the types when sending them to the + adapter/database. + + Therefore, when working with and manipulating external data, it is + recommended to use `Ecto.Changeset`'s that are able to filter + and properly cast external data: + + changeset = Ecto.Changeset.cast(%User{}, %{"age" => "0"}, [:age]) + user = Repo.insert!(changeset) + + **You can use Ecto schemas and changesets to cast and validate any kind + of data, regardless if the data will be persisted to an Ecto repository + or not**. + + ## Reflection + + Any schema module will generate the `__schema__` function that can be + used for runtime introspection of the schema: + + * `__schema__(:source)` - Returns the source as given to `schema/2`; + * `__schema__(:prefix)` - Returns optional prefix for source provided by + `@schema_prefix` schema attribute; + * `__schema__(:primary_key)` - Returns a list of primary key fields (empty if there is none); + + * `__schema__(:fields)` - Returns a list of all non-virtual field names; + * `__schema__(:virtual_fields)` - Returns a list of all virtual field names; + * `__schema__(:field_source, field)` - Returns the alias of the given field; + + * `__schema__(:type, field)` - Returns the type of the given non-virtual field; + * `__schema__(:virtual_type, field)` - Returns the type of the given virtual field; + + * `__schema__(:associations)` - Returns a list of all association field names; + * `__schema__(:association, assoc)` - Returns the association reflection of the given assoc; + + * `__schema__(:embeds)` - Returns a list of all embedded field names; + * `__schema__(:embed, embed)` - Returns the embedding reflection of the given embed; + + * `__schema__(:read_after_writes)` - Non-virtual fields that must be read back + from the database after every write (insert, update, and delete); + + * `__schema__(:autogenerate_id)` - Primary key that is auto generated on insert; + * `__schema__(:autogenerate_fields)` - Returns a list of fields names that are auto + generated on insert, except for the primary key; + + * `__schema__(:redact_fields)` - Returns a list of redacted field names; + + Furthermore, both `__struct__` and `__changeset__` functions are + defined so structs and changeset functionalities are available. + + The `__schema__` function may accept other values, but those values + are not part of the public API. Any values that are not in the list + above may change at any time without notice. + + ## Working with typespecs + + Generating typespecs for schemas is out of the scope of `Ecto.Schema`. + + In order to be able to use types such as `User.t()`, `t/0` has to be defined manually: + + defmodule User do + use Ecto.Schema + + @type t :: %__MODULE__{ + name: String.t(), + age: non_neg_integer() + } + + # ... schema ... + end + + Defining the type of each field is not mandatory, but it is preferable. + """ + + alias Ecto.Schema.Metadata + + @type source :: String.t() + @type prefix :: any() + @type schema :: %{optional(atom) => any, __struct__: atom, __meta__: Metadata.t()} + @type embedded_schema :: %{optional(atom) => any, __struct__: atom} + @type t :: schema | embedded_schema + @type belongs_to(t) :: t | Ecto.Association.NotLoaded.t() + @type has_one(t) :: t | Ecto.Association.NotLoaded.t() + @type has_many(t) :: [t] | Ecto.Association.NotLoaded.t() + @type many_to_many(t) :: [t] | Ecto.Association.NotLoaded.t() + @type embeds_one(t) :: t + @type embeds_many(t) :: [t] + + @doc false + defmacro __using__(_) do + quote do + import Ecto.Schema, only: [schema: 2, embedded_schema: 1] + + Module.register_attribute(__MODULE__, :ecto_primary_keys, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_fields, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_virtual_fields, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_query_fields, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_field_sources, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_assocs, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_embeds, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_raw, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_autogenerate, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_autoupdate, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_redact_fields, accumulate: true) + end + end + + @field_opts [ + :default, + :source, + :autogenerate, + :read_after_writes, + :virtual, + :primary_key, + :load_in_query, + :redact, + :foreign_key, + :on_replace, + :defaults, + :type, + :where, + :references, + :skip_default_validation, + :writable + ] + + @doc """ + Defines an embedded schema with the given field definitions. + + An embedded schema is either embedded into another + schema or kept exclusively in memory. For this reason, + an embedded schema does not require a source name and + it does not include a metadata field. + + Embedded schemas by default set the primary key type + to `:binary_id` but such can be configured with the + `@primary_key` attribute. + + `belongs_to/3` associations may be defined inside of + embedded schemas. However, any association nested inside + of an embedded schema won't be persisted to the database + when calling `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`. + """ + defmacro embedded_schema(do: block) do + schema(nil, false, :binary_id, block) + end + + @doc """ + Defines a schema struct with a source name and field definitions. + + An additional field called `__meta__` is added to the struct for storing + internal Ecto state. This field always has a `Ecto.Schema.Metadata` struct + as value and can be manipulated with the `Ecto.put_meta/2` function. + """ + defmacro schema(source, do: block) do + schema(source, true, :id, block) + end + + defp schema(source, meta?, type, block) do + prelude = + quote do + meta? = unquote(meta?) + source = unquote(source) + prefix = Ecto.Schema.__schema__(__MODULE__, __ENV__.line, source, meta?, unquote(type)) + + try do + import Ecto.Schema + unquote(block) + after + :ok + end + end + + postlude = + quote unquote: false do + {struct_fields, bags_of_clauses} = Ecto.Schema.__schema__(__MODULE__) + defstruct struct_fields + + def __changeset__ do + %{unquote_splicing(Macro.escape(@ecto_changeset_fields))} + end + + if meta? do + def __schema__(:query) do + %Ecto.Query{ + from: %Ecto.Query.FromExpr{ + source: {unquote(source), __MODULE__}, + prefix: unquote(Macro.escape(prefix)) + } + } + end + end + + def __schema__(:source), do: unquote(source) + def __schema__(:prefix), do: unquote(Macro.escape(prefix)) + + for clauses <- bags_of_clauses, {args, body} <- clauses do + def __schema__(unquote_splicing(args)), do: unquote(body) + end + + :ok + end + + quote do + unquote(prelude) + unquote(postlude) + end + end + + ## API + + @doc """ + Defines a field on the schema with given name and type. + + The field name will be used as is to read and write to the database + by all of the built-in adapters unless overridden with the `:source` + option. + + ## Options + + * `:default` - Sets the default value on the schema and the struct. + + The default value is calculated at compilation time, so don't use + expressions like `DateTime.utc_now` or `Ecto.UUID.generate` as + they would then be the same for all records: in this scenario you can use + the `:autogenerate` option to generate at insertion time. + + The default value is validated against the field's type at compilation time + and it will raise an ArgumentError if there is a type mismatch. If you cannot + infer the field's type at compilation time, you can use the + `:skip_default_validation` option on the field to skip validations. + + Once a default value is set, if you send changes to the changeset that + contains the same value defined as default, validations will not be performed + since there are no changes after all. + + * `:source` - Defines the name that is to be used in the database for this field. + This is useful when attaching to an existing database. The value should be + an atom. This is a last minute translation before the query goes to the database. + All references within your Elixir code must still be to the field name, + such as in association foreign keys. + + * `:autogenerate` - a `{module, function, args}` tuple for a function + to call to generate the field value before insertion if value is not set. + A shorthand value of `true` is equivalent to `{type, :autogenerate, []}`. + + * `:read_after_writes` - When true, the field is always read back + from the database after inserts, updates, and deletes. + + For relational databases, this means the RETURNING option of those + statements is used. For this reason, MySQL does not support this + option and will raise an error if a schema is inserted/updated with + read after writes fields. + + * `:virtual` - When true, the field is not persisted to the database. + Notice virtual fields do not support `:autogenerate` nor + `:read_after_writes`. + + * `:primary_key` - When true, the field is used as part of the + composite primary key. + + * `:load_in_query` - When false, the field will not be loaded when + selecting the whole struct in a query, such as `from p in Post, select: p`. + Defaults to `true`. + + * `:redact` - When true, it will display a value of `**redacted**` + when inspected in changes inside a `Ecto.Changeset` and be excluded + from inspect on the schema. Defaults to `false`. + + * `:skip_default_validation` - When true, it will skip the type validation + step at compile time. + + * `:writable` - Defines when a field is allowed to be modified. Must be one of + `:always`, `:insert`, or `:never`. If set to `:always`, the field can be modified + by any repo operation. If set to `:insert`, the field can be inserted but cannot + be further modified, even in an upsert. If set to `:never`, the field becomes + read only. Defaults to `:always`. + + """ + defmacro field(name, type \\ :string, opts \\ []) do + quote do + Ecto.Schema.__field__(__MODULE__, unquote(name), unquote(type), unquote(opts)) + end + end + + @doc """ + Generates `:inserted_at` and `:updated_at` timestamp fields. + + The fields generated by this macro will automatically be set to + the current time when inserting and updating values in a repository. + + ## Options + + * `:inserted_at` - the Ecto schema name of the field for insertion times or `false` + * `:updated_at` - the Ecto schema name of the field for update times or `false` + * `:inserted_at_source` - the name of the database column for insertion times or `false` + * `:updated_at_source` - the name of the database column for update times or `false` + * `:type` - the timestamps type, defaults to `:naive_datetime`. + * `:autogenerate` - a module-function-args tuple used for generating + both `inserted_at` and `updated_at` timestamps + + All options can be pre-configured by setting `@timestamps_opts`. + """ + defmacro timestamps(opts \\ []) do + quote bind_quoted: binding() do + Ecto.Schema.__define_timestamps__(__MODULE__, opts) + end + end + + @doc ~S""" + Indicates a one-to-many association with another schema. + + The current schema has zero or more records of the other schema. The other + schema often has a `belongs_to` field with the reverse association. + + ## Options + + * `:foreign_key` - Sets the foreign key, this should map to a field on the + other schema, defaults to the underscored name of the current schema + suffixed by `_id` + + * `:references` - Sets the key on the current schema to be used for the + association, defaults to the primary key on the schema + + * `:through` - Allow this association to be defined in terms of existing + associations. Read the [section on `:through` associations](#has_many/3-has_many-has_one-through) + for more info + + * `:on_delete` - The action taken on associations when parent record + is deleted. May be `:nothing` (default), `:nilify_all` and `:delete_all`. + Using this option is DISCOURAGED for most relational databases. Instead, + in your migration, set `references(:parent_id, on_delete: :delete_all)`. + Opposite to the migration option, this option cannot guarantee integrity + and it is only triggered for `c:Ecto.Repo.delete/2` (and not on + `c:Ecto.Repo.delete_all/2`) and it never cascades. If posts has many comments, + which has many tags, and you delete a post, only comments will be deleted. + If your database does not support references, cascading can be manually + implemented by using `Ecto.Multi` or `Ecto.Changeset.prepare_changes/2`. + + * `:on_replace` - The action taken on associations when the record is + replaced when casting or manipulating parent changeset. May be + `:raise` (default), `:mark_as_invalid`, `:nilify`, `:delete` or + `:delete_if_exists`. See `Ecto.Changeset`'s section about `:on_replace` for + more info. + + * `:defaults` - Default values to use when building the association. + It may be a keyword list of options that override the association schema + or an `atom`/`{module, function, args}` that receives the association struct + and the owner struct as arguments. For example, if you set + `Post.has_many :comments, defaults: [public: true]`, + then when using `Ecto.build_assoc(post, :comments)`, the comment will have + `comment.public == true`. Alternatively, you can set it to + `Post.has_many :comments, defaults: :update_comment`, which will invoke + `Post.update_comment(comment, post)`, or set it to a MFA tuple such as + `{Mod, fun, [arg3, arg4]}`, which will invoke `Mod.fun(comment, post, arg3, arg4)` + + * `:where` - A filter for the association. See "Filtering associations" below. + It does not apply to `:through` associations. + + * `:preload_order` - Sets the default `order_by` when preloading the association. + It may be a keyword list/list of fields or an MFA tuple, such as `{Mod, fun, []}`. + Both cases must resolve to a valid `order_by` expression. + For example, if you set `Post.has_many :comments, preload_order: [asc: :content]`, + whenever the `:comments` associations is preloaded, + the comments will be ordered by the `:content` field. + See `Ecto.Query.order_by/3` to learn more about ordering expressions. + + ## Examples + + defmodule Post do + use Ecto.Schema + schema "posts" do + has_many :comments, Comment + end + end + + # Get all comments for a given post + post = Repo.get(Post, 42) + comments = Repo.all assoc(post, :comments) + + # The comments can come preloaded on the post struct + [post] = Repo.all(from(p in Post, where: p.id == 42, preload: :comments)) + post.comments #=> [%Comment{...}, ...] + + If using [EctoSQL](https://hexdocs.pm/ecto_sql), the foreign key should be + defined in the `comments` table, as shown in `belongs_to/3` examples. + You may also see the [Associations cheatsheet](associations.cheatmd) + for more examples. + + `has_many` can be used to define hierarchical relationships within a single + schema, for example threaded comments. + + defmodule Comment do + use Ecto.Schema + schema "comments" do + field :content, :string + field :parent_id, :integer + belongs_to :parent, Comment, foreign_key: :parent_id, references: :id, define_field: false + has_many :children, Comment, foreign_key: :parent_id, references: :id + end + end + + ## Filtering associations + + It is possible to specify a `:where` option that will filter the records + returned by the association. Querying, joining or preloading the association + will use the given conditions as shown next: + + defmodule Post do + use Ecto.Schema + + schema "posts" do + has_many :public_comments, Comment, + where: [public: true] + end + end + + The `:where` option expects a keyword list where the key is an atom + representing the field and the value is either: + + * `nil` - which specifies the field must be nil + * `{:not, nil}` - which specifies the field must not be nil + * `{:in, list}` - which specifies the field must be one of the values in a list + * `{:fragment, expr}` - which specifies a fragment string as the filter + (see `Ecto.Query.API.fragment/1`) with the field's value given to it + as the only argument + * or any other value which the field is compared directly against + + Note the values above are distinctly different from the values you + would pass to `where` when building a query. For example, if you + attempt to build a query such as + + from Post, where: [id: nil] + + it will emit an error. This is because queries can be built dynamically, + and therefore passing `nil` can lead to security errors. However, the + `:where` values for an association are given at compile-time, which is + less dynamic and cannot leverage the full power of Ecto queries, which + explains why they have different APIs. + + **Important!** Please use this feature only when strictly necessary, + otherwise it is very easy to end-up with large schemas with dozens of + different associations polluting your schema and affecting your + application performance. For instance, if you are using associations + only for different querying purposes, then it is preferable to build + and compose queries. For instance, instead of having two associations, + one for comments and another for deleted comments, you might have + a single comments association and filter it instead: + + posts + |> Ecto.assoc(:comments) + |> Comment.deleted() + + Or when preloading: + + from posts, preload: [comments: ^Comment.deleted()] + + ## has_many/has_one :through + + Ecto also supports defining associations in terms of other associations + via the `:through` option. Let's see an example: + + defmodule Post do + use Ecto.Schema + + schema "posts" do + has_many :comments, Comment + has_one :permalink, Permalink + + # In the has_many :through example below, the `:comments` + # in the list [:comments, :author] refers to the + # `has_many :comments` in the Post own schema and the + # `:author` refers to the `belongs_to :author` of the + # Comment's schema (the module below). + # (see the description below for more details) + has_many :comments_authors, through: [:comments, :author] + + # Specify the association with custom source + has_many :tags, {"posts_tags", Tag} + end + end + + defmodule Comment do + use Ecto.Schema + + schema "comments" do + belongs_to :author, Author + belongs_to :post, Post + has_one :post_permalink, through: [:post, :permalink] + end + end + + In the example above, we have defined a `has_many :through` association + named `:comments_authors`. A `:through` association always expects a list + and the first element of the list must be a previously defined association + in the current module. For example, `:comments_authors` first points to + `:comments` in the same module (Post), which then points to `:author` in + the next schema, `Comment`. + + This `:through` association will return all authors for all comments + that belongs to that post: + + # Get all comments authors for a given post + post = Repo.get(Post, 42) + authors = Repo.all assoc(post, :comments_authors) + + `:through` associations can also be preloaded. In such cases, not only + the `:through` association is preloaded but all intermediate steps are + preloaded too: + + [post] = Repo.all(from(p in Post, where: p.id == 42, preload: :comments_authors)) + post.comments_authors #=> [%Author{...}, ...] + + # The comments for each post will be preloaded too + post.comments #=> [%Comment{...}, ...] + + # And the author for each comment too + hd(post.comments).author #=> %Author{...} + + When the `:through` association is expected to return one or zero items, + `has_one :through` should be used instead, as in the example at the beginning + of this section: + + # How we defined the association above in Comments + has_one :post_permalink, through: [:post, :permalink] + + # Get a preloaded comment + [comment] = Repo.all(Comment) |> Repo.preload(:post_permalink) + comment.post_permalink #=> %Permalink{...} + + If possible, Ecto will avoid traversing intermediate associations in + queries. For example, in the example above, `Comment` has a `post_id` + column (defined by `belongs_to :post`) and it is expected for + `Permalink` to have the same. Therefore, when preloading the permalinks, + Ecto may avoid traversing the "posts" table altogether. Of course, this + assumes your database guarantees those references are valid, which can + be done by defining foreign key constraints and references your database + (often done via `EctoSQL` migrations). + + Note `:through` associations are read-only. For example, you cannot use + `Ecto.Changeset.cast_assoc/3` to modify through associations. + """ + defmacro has_many(name, schema, opts \\ []) do + schema = expand_literals(schema, __CALLER__) + opts = expand_literals(opts, __CALLER__) + + quote do + Ecto.Schema.__has_many__(__MODULE__, unquote(name), unquote(schema), unquote(opts)) + end + end + + @doc ~S""" + Indicates a one-to-one association with another schema. + + The current schema has zero or one records of the other schema. The other + schema often has a `belongs_to` field with the reverse association. + + ## Options + + * `:foreign_key` - Sets the foreign key, this should map to a field on the + other schema, defaults to the underscored name of the current module + suffixed by `_id` + + * `:references` - Sets the key on the current schema to be used for the + association, defaults to the primary key on the schema + + * `:through` - If this association must be defined in terms of existing + associations. Read the section in `has_many/3` for more information + + * `:on_delete` - The action taken on associations when parent record + is deleted. May be `:nothing` (default), `:nilify_all` and `:delete_all`. + Using this option is DISCOURAGED for most relational databases. Instead, + in your migration, set `references(:parent_id, on_delete: :delete_all)`. + Opposite to the migration option, this option cannot guarantee integrity + and it is only triggered for `c:Ecto.Repo.delete/2` (and not on + `c:Ecto.Repo.delete_all/2`) and it never cascades. If posts has many comments, + which has many tags, and you delete a post, only comments will be deleted. + If your database does not support references, cascading can be manually + implemented by using `Ecto.Multi` or `Ecto.Changeset.prepare_changes/2` + + * `:on_replace` - The action taken on associations when the record is + replaced when casting or manipulating parent changeset. May be + `:raise` (default), `:mark_as_invalid`, `:nilify`, `:update`, or + `:delete`. See `Ecto.Changeset`'s section on related data for more info. + + * `:defaults` - Default values to use when building the association. + It may be a keyword list of options that override the association schema + or an `atom`/`{module, function, args}` that receives the association struct + and the owner struct as arguments. For example, if you set + `Post.has_one :banner, defaults: [public: true]`, + then when using `Ecto.build_assoc(post, :banner)`, the banner will have + `banner.public == true`. Alternatively, you can set it to + `Post.has_one :banner, defaults: :update_banner`, which will invoke + `Post.update_banner(banner, post)`, or set it to a MFA tuple such as + `{Mod, fun, [arg3, arg4]}`, which will invoke `Mod.fun(banner, post, arg3, arg4)` + + * `:where` - A filter for the association. When loading `has_one` associations, + Ecto emits a query with `LIMIT` set to one. If your association may return + multiple entries, you can use this option to guarantee it returns a single + unique result. See "Filtering associations" in `has_many/3`. It does not + apply to `:through` associations. + + ## Examples + + defmodule Post do + use Ecto.Schema + + schema "posts" do + has_one :permalink, Permalink + + # Specify the association with custom source + has_one :category, {"posts_categories", Category} + end + end + + # The permalink can come preloaded on the post struct + [post] = Repo.all(from(p in Post, where: p.id == 42, preload: :permalink)) + post.permalink #=> %Permalink{...} + + If using [EctoSQL](https://hexdocs.pm/ecto_sql), a foreign key must be defined + in the `permalinks` and `categories` tables, as shown in `belongs_to/3` + examples. You may also see the [Associations cheatsheet](associations.cheatmd) + for more examples. + """ + defmacro has_one(name, schema, opts \\ []) do + schema = expand_literals(schema, __CALLER__) + + quote do + Ecto.Schema.__has_one__(__MODULE__, unquote(name), unquote(schema), unquote(opts)) + end + end + + @doc ~S""" + Indicates a one-to-one or many-to-one association with another schema. + + The current schema belongs to zero or one records of the other schema. The other + schema often has a `has_one` or a `has_many` field with the reverse association. + + You should use `belongs_to` in the table that contains the foreign key. Imagine + a company <-> employee relationship. If the employee contains the `company_id` in + the underlying database table, we say the employee belongs to company. + + In fact, when you invoke this macro, a field with the name of foreign key is + automatically defined in the schema for you. + + ## Options + + * `:foreign_key` - Sets the foreign key field name, defaults to the name + of the association suffixed by `_id`. For example, `belongs_to :company` + will define foreign key of `:company_id`. The associated `has_one` or `has_many` + field in the other schema should also have its `:foreign_key` option set + with the same value. + + * `:references` - Sets the key on the other schema to be used for the + association, defaults to: `:id` + + * `:define_field` - When false, does not automatically define a `:foreign_key` + field, implying the user is defining the field manually elsewhere + + * `:type` - Sets the type of automatically defined `:foreign_key`. + Defaults to: `:integer` and can be set per schema via `@foreign_key_type` + + * `:on_replace` - The action taken on associations when the record is + replaced when casting or manipulating parent changeset. May be + `:raise` (default), `:mark_as_invalid`, `:nilify`, `:update`, or `:delete`. + See `Ecto.Changeset`'s section on related data for more info. + + * `:defaults` - Default values to use when building the association. + It may be a keyword list of options that override the association schema + or an `atom`/`{module, function, args}` that receives the association struct + and the owner struct as arguments. For example, if you set + `Comment.belongs_to :post, defaults: [public: true]`, + then when using `Ecto.build_assoc(comment, :post)`, the post will have + `post.public == true`. Alternatively, you can set it to + `Comment.belongs_to :post, defaults: :update_post`, which will invoke + `Comment.update_post(post, comment)`, or set it to a MFA tuple such as + `{Mod, fun, [arg3, arg4]}`, which will invoke `Mod.fun(post, comment, arg3, arg4)` + + * `:primary_key` - If the underlying belongs_to field is a primary key + + * `:source` - Defines the name that is to be used in database for this field + + * `:where` - A filter for the association. See "Filtering associations" + in `has_many/3`. + + ## Examples + + defmodule Comment do + use Ecto.Schema + + schema "comments" do + belongs_to :post, Post + end + end + + # The post can come preloaded on the comment record + [comment] = Repo.all(from(c in Comment, where: c.id == 42, preload: :post)) + comment.post #=> %Post{...} + + If you need custom options on the underlying field, you can define the + field explicitly and then pass `define_field: false` to `belongs_to`: + + defmodule Comment do + use Ecto.Schema + + schema "comments" do + field :post_id, :integer, ... # custom options + belongs_to :post, Post, define_field: false + end + end + + If using [EctoSQL](https://hexdocs.pm/ecto_sql), the `comments` table + should have a `post_id` column that references the `posts` table. + In your migrations, this can be done as: + + add :post_id, + references(:posts, on_delete: :delete_all), + null: false + + See the [Associations cheatsheet](associations.cheatmd) for more examples. + + ## Polymorphic associations + + One common use case for belongs to associations is to handle + polymorphism. For example, imagine you have defined a Comment + schema and you wish to use it for commenting on both tasks and + posts. + + Some abstractions would force you to define some sort of + polymorphic association with two fields in your database: + + * commentable_type + * commentable_id + + The problem with this approach is that it breaks references in + the database. You can't use foreign keys and it is very inefficient, + both in terms of query time and storage. + + In Ecto, we have three ways to solve this issue. The simplest + is to define multiple fields in the Comment schema, one for each + association: + + * task_id + * post_id + + Unless you have dozens of columns, this is simpler for the developer, + more DB friendly and more efficient in all aspects. + + Alternatively, because Ecto does not tie a schema to a given table, + we can use separate tables for each association. Let's start over + and define a new Comment schema: + + defmodule Comment do + use Ecto.Schema + + schema "abstract table: comments" do + # This will be used by associations on each "concrete" table + field :assoc_id, :integer + end + end + + Notice we have changed the table name to "abstract table: comments". + You can choose whatever name you want, the point here is that this + particular table will never exist. + + Now in your Post and Task schemas: + + defmodule Post do + use Ecto.Schema + + schema "posts" do + has_many :comments, {"posts_comments", Comment}, foreign_key: :assoc_id + end + end + + defmodule Task do + use Ecto.Schema + + schema "tasks" do + has_many :comments, {"tasks_comments", Comment}, foreign_key: :assoc_id + end + end + + Now each association uses its own specific table, "posts_comments" + and "tasks_comments", which must be created on migrations. The + advantage of this approach is that we never store unrelated data + together, also ensuring we keep database references fast and correct. + + When using this technique, the only limitation is that you cannot + build comments directly. For example, the command below + + Repo.insert!(%Comment{}) + + will attempt to use the abstract table. Instead, one should use + + Repo.insert!(build_assoc(post, :comments)) + + leveraging the `Ecto.build_assoc/3` function. You can also + use `Ecto.assoc/2` or pass a tuple in the query syntax + to easily retrieve associated comments to a given post or + task: + + # Fetch all comments associated with the given task + Repo.all(Ecto.assoc(task, :comments)) + + Or all comments in a given table: + + Repo.all from(c in {"posts_comments", Comment}), ...) + + The third and final option is to use `many_to_many/3` to + define the relationships between the resources. In this case, + the `comments` table won't have the foreign key, instead there + is an intermediary table responsible for associating the entries: + + defmodule Comment do + use Ecto.Schema + schema "comments" do + # ... + end + end + + In your posts and tasks: + + defmodule Post do + use Ecto.Schema + + schema "posts" do + many_to_many :comments, Comment, join_through: "posts_comments" + end + end + + defmodule Task do + use Ecto.Schema + + schema "tasks" do + many_to_many :comments, Comment, join_through: "tasks_comments" + end + end + + See `many_to_many/3` for more information on this particular approach. + """ + defmacro belongs_to(name, schema, opts \\ []) do + schema = expand_literals(schema, __CALLER__) + + quote do + Ecto.Schema.__belongs_to__(__MODULE__, unquote(name), unquote(schema), unquote(opts)) + end + end + + @doc ~S""" + Indicates a many-to-many association with another schema. + + The association happens through a join schema or source, containing + foreign keys to the associated schemas. For example, the association + below: + + # from MyApp.Post + many_to_many :tags, MyApp.Tag, join_through: "posts_tags" + + is backed by relational databases through a join table as follows: + + [Post] <-> [posts_tags] <-> [Tag] + id <-- post_id + tag_id --> id + + More information on the migration for creating such a schema is shown + below. + + ## Options + + * `:join_through` - Specifies the source of the associated data. + It may be a string, like "posts_tags", representing the + underlying storage table or an atom, like `MyApp.PostTag`, + representing a schema. This option is required. + + * `:join_keys` - Specifies how the schemas are associated. It + expects a keyword list with two entries, the first being how + the join table should reach the current schema and the second + how the join table should reach the associated schema. In the + example above, it defaults to: `[post_id: :id, tag_id: :id]`. + The keys are inflected from the schema names. + + * `:on_delete` - The action taken on associations when the parent record + is deleted. May be `:nothing` (default) or `:delete_all`. + Using this option is DISCOURAGED for most relational databases. Instead, + in your migration, set `references(:parent_id, on_delete: :delete_all)`. + Opposite to the migration option, this option cannot guarantee integrity + and it is only triggered for `c:Ecto.Repo.delete/2` (and not on + `c:Ecto.Repo.delete_all/2`). This option can only remove data from the + join source, never the associated records, and it never cascades. + + * `:on_replace` - The action taken on associations when the record is + replaced when casting or manipulating parent changeset. May be + `:raise` (default), `:mark_as_invalid`, or `:delete`. + `:delete` will only remove data from the join source, never the + associated records. See `Ecto.Changeset`'s section on related data + for more info. + + * `:defaults` - Default values to use when building the association. + It may be a keyword list of options that override the association schema + or an `atom`/`{module, function, args}` that receives the association struct + and the owner struct as arguments. For example, if you set + `Post.many_to_many :tags, defaults: [public: true]`, + then when using `Ecto.build_assoc(post, :tags)`, the tag will have + `tag.public == true`. Alternatively, you can set it to + `Post.many_to_many :tags, defaults: :update_tag`, which will invoke + `Post.update_tag(tag, post)`, or set it to a MFA tuple such as + `{Mod, fun, [arg3, arg4]}`, which will invoke `Mod.fun(tag, post, arg3, arg4)` + + * `:join_defaults` - The same as `:defaults` but it applies to the join schema + instead. This option will raise if it is given and the `:join_through` value + is not a schema. + + * `:unique` - When true, checks if the associated entries are unique + whenever the association is cast or changed via the parent record. + For instance, it would verify that a given tag cannot be attached to + the same post more than once. This exists mostly as a quick check + for user feedback, as it does not guarantee uniqueness at the database + level. Therefore, you should also set a unique index in the database + join table, such as: `create unique_index(:posts_tags, [:post_id, :tag_id])` + + * `:where` - A filter for the association. See "Filtering associations" + in `has_many/3` + + * `:join_where` - A filter for the join table. See "Filtering associations" + in `has_many/3` + + * `:preload_order` - Sets the default `order_by` when preloading the association. + It may be a keyword list/list of fields or an MFA tuple, such as `{Mod, fun, []}`. + Both cases must resolve to a valid `order_by` expression. See `Ecto.Query.order_by/3` + to learn more about ordering expressions. + See the [preload order](#many_to_many/3-preload-order) section below to learn how + this option can be utilized + + ## Using Ecto.assoc/2 + + One of the benefits of using `many_to_many` is that Ecto will avoid + loading the intermediate whenever possible, making your queries more + efficient. For this reason, developers should not refer to the join + table of `many_to_many` in queries. The join table is accessible in + few occasions, such as in `Ecto.assoc/2`. For example, if you do this: + + post + |> Ecto.assoc(:tags) + |> where([t, _pt, p], p.public == t.public) + + It may not work as expected because the `posts_tags` table may not be + included in the query. You can address this problem in multiple ways. + One option is to use `...`: + + post + |> Ecto.assoc(:tags) + |> where([t, ..., p], p.public == t.public) + + Another and preferred option is to rewrite to an explicit `join`, which + leaves out the intermediate bindings as they are resolved only later on: + + # keyword syntax + from t in Tag, + join: p in assoc(t, :post), on: p.id == ^post.id + + # pipe syntax + Tag + |> join(:inner, [t], p in assoc(t, :post), on: p.id == ^post.id) + + If you need to access the join table, then you likely want to use + `has_many/3` with the `:through` option instead. + + ## Removing data + + If you attempt to remove associated `many_to_many` data, **Ecto will + always remove data from the join schema and never from the target + associations** be it by setting `:on_replace` to `:delete`, `:on_delete` + to `:delete_all` or by using changeset functions such as + `Ecto.Changeset.put_assoc/3`. For example, if a `Post` has a many to many + relationship with `Tag`, setting `:on_delete` to `:delete_all` will + only delete entries from the "posts_tags" table in case `Post` is + deleted. + + ## Migration + + How your migration should be structured depends on the value you pass + in `:join_through`. If `:join_through` is simply a string, representing + a table, you may define a table without primary keys and you must not + include any further columns, as those values won't be set by Ecto: + + create table(:posts_tags, primary_key: false) do + add :post_id, references(:posts, on_delete: :delete_all), null: false + add :tag_id, references(:tags, on_delete: :delete_all), null: false + end + + However, if your `:join_through` is a schema, like `MyApp.PostTag`, your + join table may be structured as any other table in your codebase, + including timestamps: + + create table(:posts_tags) do + add :post_id, references(:posts, on_delete: :delete_all), null: false + add :tag_id, references(:tags, on_delete: :delete_all), null: false + timestamps() + end + + Because `:join_through` contains a schema, in such cases, autogenerated + values and primary keys will be automatically handled by Ecto. + + ## Preload Order + + The `:preload_order` option may be used to return the preloaded structs + in a deterministic order. It accepts either a compile-time keyword list/list + or an MFA tuple, such as `{Mod, fun, []}`. The MFA tuple will be used to + generate the `order_by` expression at runtime. + + When specifying a compile-time keyword list/list, the ordering applies to the + association's table and not the join table. Ordering by the join table can be + achieved by specifying an MFA tuple that utilizes `Ecto.Query.dynamic/2`. + + For example, say we have an association `Assoc` being joined through the table + `join_through`. The default preload query generated by Ecto is roughly: + + from a in Assoc, join: jt in "join_through", on: ... + + If `:preload_order` is given as `[asc: :field]` then the preload query will be + changed to the following: + + from a in Assoc, join: jt in "join_through", on: ..., order_by: [asc: a.field] + + Similarly, any compile-time keyword list/list will have its fields interpreted + as belonging to the association's table. To order by a field from the join table, + an MFA tuple can be specified that utilizes `Ecto.Query.dynamic/2`. + + For example, if `:preload_order` is given as `{Mod, fun, []}`, corresponding to + the following function: + + defmodule Mod do + def fun() do + [desc: dynamic([assoc, join], join.field)] + end + end + + then the preload query will be changed to the following: + + from a in Assoc, join: jt in "join_through", on: ..., order_by: [desc: jt.field] + + Note the ordering of the bindings. The join table always comes last. + + ## Examples + + defmodule Post do + use Ecto.Schema + schema "posts" do + many_to_many :tags, Tag, join_through: "posts_tags" + end + end + + # Let's create a post and a tag + post = Repo.insert!(%Post{}) + tag = Repo.insert!(%Tag{name: "introduction"}) + + # We can associate at any time post and tags together using changesets + post + |> Repo.preload(:tags) # Load existing data + |> Ecto.Changeset.change() # Build the changeset + |> Ecto.Changeset.put_assoc(:tags, [tag]) # Set the association + |> Repo.update! + + # In a later moment, we may get all tags for a given post + post = Repo.get(Post, 42) + tags = Repo.all(assoc(post, :tags)) + + # The tags may also be preloaded on the post struct for reading + [post] = Repo.all(from(p in Post, where: p.id == 42, preload: :tags)) + post.tags #=> [%Tag{...}, ...] + + ## Join Schema Example + + You may prefer to use a join schema to handle many_to_many associations. The + decoupled nature of Ecto allows us to create a "join" struct which + `belongs_to` both sides of the many to many association. + + In our example, a `User` has and belongs to many `Organization`s: + + defmodule MyApp.Repo.Migrations.CreateUserOrganization do + use Ecto.Migration + + def change do + create table(:users_organizations) do + add :user_id, references(:users) + add :organization_id, references(:organizations) + + timestamps() + end + end + end + + defmodule UserOrganization do + use Ecto.Schema + + @primary_key false + schema "users_organizations" do + belongs_to :user, User + belongs_to :organization, Organization + timestamps() # Added bonus, a join schema will also allow you to set timestamps + end + + def changeset(struct, params \\ %{}) do + struct + |> Ecto.Changeset.cast(params, [:user_id, :organization_id]) + |> Ecto.Changeset.validate_required([:user_id, :organization_id]) + # Maybe do some counter caching here! + end + end + + defmodule User do + use Ecto.Schema + + schema "users" do + many_to_many :organizations, Organization, join_through: UserOrganization + end + end + + defmodule Organization do + use Ecto.Schema + + schema "organizations" do + many_to_many :users, User, join_through: UserOrganization + end + end + + To create the association, pass in the IDs of an existing `User` and + `Organization` to `UserOrganization.changeset/2`: + + changeset = UserOrganization.changeset(%UserOrganization{}, %{user_id: id, organization_id: id}) + + case Repo.insert(changeset) do + {:ok, assoc} -> # Assoc was created! + {:error, changeset} -> # Handle the error + end + """ + defmacro many_to_many(name, schema, opts \\ []) do + schema = expand_literals(schema, __CALLER__) + opts = expand_literals(opts, __CALLER__) + + quote do + Ecto.Schema.__many_to_many__(__MODULE__, unquote(name), unquote(schema), unquote(opts)) + end + end + + ## Embeds + + @doc ~S""" + Indicates an embedding of a schema. + + The current schema has zero or one records of the other schema embedded + inside of it. It uses a field similar to the `:map` type for storage, + but allows embeds to have all the things regular schema can. + + You must declare your `embeds_one/3` field with type `:map` at the + database level. + + The embedded may or may not have a primary key. Ecto uses the primary keys + to detect if an embed is being updated or not. If a primary key is not present, + `:on_replace` should be set to either `:update` or `:delete` if there is a + desire to either update or delete the current embed when a new one is set. + + ## Options + + * `:primary_key` - The `:primary_key` option can be used with the same arguments + as `@primary_key` (see the [Schema attributes](#module-schema-attributes) + section for more info). Primary keys are automatically set up for embedded + schemas as well, defaulting to `{:id, :binary_id, autogenerate: true}`. + Note `:primary_key`s are not automatically read back on `insert/2`, + unless one of `autogenerate: true` or `read_after_writes: true` is set. + + * `:on_replace` - The action taken on associations when the embed is + replaced when casting or manipulating parent changeset. May be + `:raise` (default), `:mark_as_invalid`, `:update`, or `:delete`. + See `Ecto.Changeset`'s section on related data for more info. + + * `:source` - Defines the name that is to be used in database for this field. + This is useful when attaching to an existing database. The value should be + an atom. + + * `:load_in_query` - When false, the field will not be loaded when + selecting the whole struct in a query, such as `from p in Post, select: p`. + Defaults to `true`. + + * `:defaults_to_struct` - When true, the field will default to the initialized + struct instead of nil, the same you would get from something like `%Order.Item{}`. + One important thing is that if the underlying data is explicitly nil when loading + the schema, it will still be loaded as nil, similar to how `:default` works in fields. + Defaults to `false`. + + ## Examples + + defmodule Order do + use Ecto.Schema + + schema "orders" do + embeds_one :item, Item + end + end + + defmodule Item do + use Ecto.Schema + + embedded_schema do + field :title + end + end + + # The item is loaded with the order + order = Repo.get!(Order, 42) + order.item #=> %Item{...} + + Adding and removal of embeds can only be done via the `Ecto.Changeset` + API so Ecto can properly track the embed life-cycle: + + order = Repo.get!(Order, 42) + item = %Item{title: "Soap"} + + # Generate a changeset + changeset = Ecto.Changeset.change(order) + + # Put a new embed to the changeset + changeset = Ecto.Changeset.put_embed(changeset, :item, item) + + # Update the order, and fetch the item + item = Repo.update!(changeset).item + + # Item is generated with a unique identification + item + # => %Item{id: "20a97d94-f79b-4e63-a875-85deed7719b7", title: "Soap"} + + ## Inline embedded schema + + The schema module can be defined inline in the parent schema in simple + cases: + + defmodule Parent do + use Ecto.Schema + + schema "parents" do + field :name, :string + + embeds_one :child, Child do + field :name, :string + field :age, :integer + end + end + end + + Options should be passed before the `do` block like this: + + embeds_one :child, Child, on_replace: :delete, primary_key: false do + field :name, :string + field :age, :integer + end + + Defining embedded schema in such a way will define a `Parent.Child` module + with the appropriate struct. In order to properly cast the embedded schema. + When casting the inline-defined embedded schemas you need to use the `:with` + option of `Ecto.Changeset.cast_embed/3` to provide the proper function to do the casting. + For example: + + def changeset(schema, params) do + schema + |> cast(params, [:name]) + |> cast_embed(:child, with: &child_changeset/2) + end + + defp child_changeset(schema, params) do + schema + |> cast(params, [:name, :age]) + end + + ## Encoding and decoding + + Because many databases do not support direct encoding and decoding + of embeds, it is often emulated by Ecto by using specific encoding + and decoding rules. + + For example, PostgreSQL will store embeds on top of JSONB columns, + which means types in embedded schemas won't go through the usual + dump->DB->load cycle but rather encode->DB->decode->cast. This means + that, when using embedded schemas with databases like PG or MySQL, + make sure all of your types can be JSON encoded/decoded correctly. + Ecto provides this guarantee for all built-in types. + + When decoding, if a key exists in the database not defined in the + schema, it'll be ignored. If a field exists in the schema that's not + in the database, it's value will be `nil`. + """ + defmacro embeds_one(name, schema, opts \\ []) + + defmacro embeds_one(name, schema, do: block) do + quote do + embeds_one(unquote(name), unquote(schema), [], do: unquote(block)) + end + end + + defmacro embeds_one(name, schema, opts) do + schema = expand_literals(schema, __CALLER__) + + quote do + Ecto.Schema.__embeds_one__(__MODULE__, unquote(name), unquote(schema), unquote(opts)) + end + end + + @doc """ + Indicates an embedding of a schema. + + For options and examples see documentation of `embeds_one/3`. + """ + defmacro embeds_one(name, schema, opts, do: block) do + schema = expand_nested_module_alias(schema, __CALLER__) + + quote do + {schema, opts} = + Ecto.Schema.__embeds_module__( + __ENV__, + unquote(schema), + unquote(opts), + unquote(Macro.escape(block)) + ) + + Ecto.Schema.__embeds_one__(__MODULE__, unquote(name), schema, opts) + end + end + + @doc ~S""" + Indicates an embedding of many schemas. + + The current schema has zero or more records of the other schema embedded + inside of it. Embeds have all the things regular schemas have. + + It is recommended to declare your `embeds_many/3` field with type `:map` + in your migrations, instead of using `{:array, :map}`. Ecto can work with + both maps and arrays as the container for embeds (and in most databases + maps are represented as JSON which allows Ecto to choose what works best). + + The embedded may or may not have a primary key. Ecto uses the primary keys + to detect if an embed is being updated or not. If a primary key is not + present and you still want the list of embeds to be updated, `:on_replace` + must be set to `:delete`, forcing all current embeds to be deleted and + replaced by new ones whenever a new list of embeds is set. + + For encoding and decoding of embeds, please read the docs for + `embeds_one/3`. + + ## Options + + * `:on_replace` - The action taken on associations when the embed is + replaced when casting or manipulating parent changeset. May be + `:raise` (default), `:mark_as_invalid`, or `:delete`. + See `Ecto.Changeset`'s section on related data for more info. + + * `:source` - Defines the name that is to be used in database for this field. + This is useful when attaching to an existing database. The value should be + an atom. + + * `:load_in_query` - When false, the field will not be loaded when + selecting the whole struct in a query, such as `from p in Post, select: p`. + Defaults to `true`. + + ## Examples + + defmodule Order do + use Ecto.Schema + + schema "orders" do + embeds_many :items, Item + end + end + + defmodule Item do + use Ecto.Schema + + embedded_schema do + field :title + end + end + + # The items are loaded with the order + order = Repo.get!(Order, 42) + order.items #=> [%Item{...}, ...] + + Adding and removal of embeds can only be done via the `Ecto.Changeset` + API so Ecto can properly track the embed life-cycle: + + # Order has no items + order = Repo.get!(Order, 42) + order.items + # => [] + + items = [%Item{title: "Soap"}] + + # Generate a changeset + changeset = Ecto.Changeset.change(order) + + # Put a one or more new items + changeset = Ecto.Changeset.put_embed(changeset, :items, items) + + # Update the order and fetch items + items = Repo.update!(changeset).items + + # Items are generated with a unique identification + items + # => [%Item{id: "20a97d94-f79b-4e63-a875-85deed7719b7", title: "Soap"}] + + Updating of embeds must be done using a changeset for each changed embed. + + # Order has an existing items + order = Repo.get!(Order, 42) + order.items + # => [%Item{id: "20a97d94-f79b-4e63-a875-85deed7719b7", title: "Soap"}] + + # Generate a changeset + changeset = Ecto.Changeset.change(order) + + # Put the updated item as a changeset + current_item = List.first(order.items) + item_changeset = Ecto.Changeset.change(current_item, title: "Mujju's Soap") + order_changeset = Ecto.Changeset.put_embed(changeset, :items, [item_changeset]) + + # Update the order and fetch items + items = Repo.update!(order_changeset).items + + # Item has the updated title + items + # => [%Item{id: "20a97d94-f79b-4e63-a875-85deed7719b7", title: "Mujju's Soap"}] + + ## Inline embedded schema + + The schema module can be defined inline in the parent schema in simple + cases: + + defmodule Parent do + use Ecto.Schema + + schema "parents" do + field :name, :string + + embeds_many :children, Child do + field :name, :string + field :age, :integer + end + end + end + + Primary keys are automatically set up for embedded schemas as well, + defaulting to `{:id, :binary_id, autogenerate: true}`. You can + customize it by passing a `:primary_key` option with the same arguments + as `@primary_key` (see the [Schema attributes](https://hexdocs.pm/ecto/Ecto.Schema.html#module-schema-attributes) + section for more info). + + Defining embedded schema in such a way will define a `Parent.Child` module + with the appropriate struct. In order to properly cast the embedded schema. + When casting the inline-defined embedded schemas you need to use the `:with` + option of `cast_embed/3` to provide the proper function to do the casting. + For example: + + def changeset(schema, params) do + schema + |> cast(params, [:name]) + |> cast_embed(:children, with: &child_changeset/2) + end + + defp child_changeset(schema, params) do + schema + |> cast(params, [:name, :age]) + end + + """ + defmacro embeds_many(name, schema, opts \\ []) + + defmacro embeds_many(name, schema, do: block) do + quote do + embeds_many(unquote(name), unquote(schema), [], do: unquote(block)) + end + end + + defmacro embeds_many(name, schema, opts) do + schema = expand_literals(schema, __CALLER__) + + quote do + Ecto.Schema.__embeds_many__(__MODULE__, unquote(name), unquote(schema), unquote(opts)) + end + end + + @doc """ + Indicates an embedding of many schemas. + + For options and examples see documentation of `embeds_many/3`. + """ + defmacro embeds_many(name, schema, opts, do: block) do + schema = expand_nested_module_alias(schema, __CALLER__) + + quote do + {schema, opts} = + Ecto.Schema.__embeds_module__( + __ENV__, + unquote(schema), + unquote(opts), + unquote(Macro.escape(block)) + ) + + Ecto.Schema.__embeds_many__(__MODULE__, unquote(name), schema, opts) + end + end + + # Internal function for integrating associations into schemas. + # + # This function exists as an extension point for libraries to + # experiment new types of associations to Ecto, although it may + # break at any time (as with any of the association callbacks). + # + # This function expects the current schema, the association cardinality, + # the association name, the association module (that implements + # `Ecto.Association` callbacks) and a keyword list of options. + @doc false + @spec association(module, :one | :many, atom(), module, Keyword.t()) :: Ecto.Association.t() + def association(schema, cardinality, name, association, opts) do + not_loaded = %Ecto.Association.NotLoaded{ + __owner__: schema, + __field__: name, + __cardinality__: cardinality + } + + put_struct_field(schema, name, not_loaded) + opts = [cardinality: cardinality] ++ opts + struct = association.struct(schema, name, opts) + Module.put_attribute(schema, :ecto_assocs, {name, struct}) + struct + end + + ## Callbacks + + @doc false + def __timestamps__(:naive_datetime) do + %{NaiveDateTime.utc_now() | microsecond: {0, 0}} + end + + def __timestamps__(:naive_datetime_usec) do + NaiveDateTime.utc_now() + end + + def __timestamps__(:utc_datetime) do + %{DateTime.utc_now() | microsecond: {0, 0}} + end + + def __timestamps__(:utc_datetime_usec) do + DateTime.utc_now() + end + + def __timestamps__(type) do + type.from_unix!(System.os_time(:microsecond), :microsecond) + end + + @doc false + def __field__(mod, name, type, opts) do + # Check the field type before we check options because it is + # better to raise unknown type first than unsupported option. + type = check_field_type!(mod, name, type, opts) + + if type == :any && !opts[:virtual] do + raise ArgumentError, + "only virtual fields can have type :any, " <> + "invalid type for field #{inspect(name)}" + end + + check_options!(type, opts, @field_opts, "field/3") + Module.put_attribute(mod, :ecto_changeset_fields, {name, type}) + validate_default!(type, opts[:default], opts[:skip_default_validation]) + define_field(mod, name, type, opts) + end + + defp define_field(mod, name, type, opts) do + virtual? = opts[:virtual] || false + pk? = opts[:primary_key] || false + writable = opts[:writable] || :always + put_struct_field(mod, name, Keyword.get(opts, :default)) + + redact_field? = + Keyword.get_lazy(opts, :redact, fn -> + case Module.get_attribute(mod, :schema_redact, false) do + :all_except_primary_keys -> not pk? + false -> false + end + end) + + if redact_field? do + Module.put_attribute(mod, :ecto_redact_fields, name) + end + + if virtual? do + Module.put_attribute(mod, :ecto_virtual_fields, {name, type}) + else + source = + opts[:source] || + Module.get_attribute(mod, :field_source_mapper, &Function.identity/1).(name) + + if not is_atom(source) do + raise ArgumentError, + "the :source for field `#{name}` must be an atom, got: #{inspect(source)}" + end + + if name != source do + Module.put_attribute(mod, :ecto_field_sources, {name, source}) + end + + if raw = opts[:read_after_writes] do + Module.put_attribute(mod, :ecto_raw, name) + end + + case gen = opts[:autogenerate] do + {_, _, _} -> + store_mfa_autogenerate!(mod, name, type, gen) + + true -> + store_type_autogenerate!(mod, name, source || name, type, pk?) + + _ -> + :ok + end + + if raw && gen do + raise ArgumentError, "cannot mark the same field as autogenerate and read_after_writes" + end + + if writable != :always && gen do + raise ArgumentError, "autogenerated fields must always be writable" + end + + if pk? do + Module.put_attribute(mod, :ecto_primary_keys, name) + end + + if Keyword.get(opts, :load_in_query, true) do + Module.put_attribute(mod, :ecto_query_fields, {name, type}) + end + + Module.put_attribute(mod, :ecto_fields, {name, {type, writable}}) + end + end + + @doc false + def __define_timestamps__(mod, opts) do + timestamps = Keyword.merge(Module.get_attribute(mod, :timestamps_opts, []), opts) + type = Keyword.get(timestamps, :type, :naive_datetime) + autogen = timestamps[:autogenerate] || {Ecto.Schema, :__timestamps__, [type]} + + inserted_at = Keyword.get(timestamps, :inserted_at, :inserted_at) + updated_at = Keyword.get(timestamps, :updated_at, :updated_at) + + if inserted_at do + opts = if source = timestamps[:inserted_at_source], do: [source: source], else: [] + Ecto.Schema.__field__(mod, inserted_at, type, opts) + end + + if updated_at do + opts = if source = timestamps[:updated_at_source], do: [source: source], else: [] + Ecto.Schema.__field__(mod, updated_at, type, opts) + Module.put_attribute(mod, :ecto_autoupdate, {[updated_at], autogen}) + end + + with [_ | _] = fields <- Enum.filter([inserted_at, updated_at], & &1) do + Module.put_attribute(mod, :ecto_autogenerate, {fields, autogen}) + end + + :ok + end + + @valid_has_options [ + :foreign_key, + :references, + :through, + :on_delete, + :defaults, + :on_replace, + :where, + :preload_order + ] + + @doc false + def __has_many__(mod, name, queryable, opts) do + if is_list(queryable) and Keyword.has_key?(queryable, :through) do + check_options!(queryable, @valid_has_options, "has_many/3") + association(mod, :many, name, Ecto.Association.HasThrough, queryable) + else + check_options!(opts, @valid_has_options, "has_many/3") + struct = association(mod, :many, name, Ecto.Association.Has, [queryable: queryable] ++ opts) + Module.put_attribute(mod, :ecto_changeset_fields, {name, {:assoc, struct}}) + end + end + + @doc false + def __has_one__(mod, name, queryable, opts) do + if is_list(queryable) and Keyword.has_key?(queryable, :through) do + check_options!(queryable, @valid_has_options, "has_one/3") + association(mod, :one, name, Ecto.Association.HasThrough, queryable) + else + check_options!(opts, @valid_has_options, "has_one/3") + struct = association(mod, :one, name, Ecto.Association.Has, [queryable: queryable] ++ opts) + Module.put_attribute(mod, :ecto_changeset_fields, {name, {:assoc, struct}}) + end + end + + # :primary_key is valid here to support associative entity + # https://en.wikipedia.org/wiki/Associative_entity + @valid_belongs_to_options [ + :foreign_key, + :references, + :define_field, + :type, + :on_replace, + :defaults, + :primary_key, + :source, + :where + ] + + @doc false + def __belongs_to__(mod, name, queryable, opts) do + opts = Keyword.put_new(opts, :foreign_key, :"#{name}_id") + + foreign_key_name = opts[:foreign_key] + foreign_key_type = opts[:type] || Module.get_attribute(mod, :foreign_key_type, :id) + foreign_key_type = check_field_type!(mod, name, foreign_key_type, opts) + check_options!(foreign_key_type, opts, @valid_belongs_to_options, "belongs_to/3") + + if foreign_key_name == name do + raise ArgumentError, + "foreign_key #{inspect(name)} must be distinct from corresponding association name" + end + + if Keyword.get(opts, :define_field, true) do + Module.put_attribute(mod, :ecto_changeset_fields, {foreign_key_name, foreign_key_type}) + define_field(mod, foreign_key_name, foreign_key_type, opts) + end + + struct = + association(mod, :one, name, Ecto.Association.BelongsTo, [queryable: queryable] ++ opts) + + Module.put_attribute(mod, :ecto_changeset_fields, {name, {:assoc, struct}}) + end + + @valid_many_to_many_options [ + :join_through, + :join_defaults, + :join_keys, + :on_delete, + :defaults, + :on_replace, + :unique, + :where, + :join_where, + :preload_order + ] + + @doc false + def __many_to_many__(mod, name, queryable, opts) do + check_options!(opts, @valid_many_to_many_options, "many_to_many/3") + + struct = + association(mod, :many, name, Ecto.Association.ManyToMany, [queryable: queryable] ++ opts) + + Module.put_attribute(mod, :ecto_changeset_fields, {name, {:assoc, struct}}) + end + + @valid_embeds_one_options [:on_replace, :source, :load_in_query, :defaults_to_struct] + + @doc false + def __embeds_one__(mod, name, schema, opts) when is_atom(schema) do + check_options!(opts, @valid_embeds_one_options, "embeds_one/3") + + opts = + if Keyword.get(opts, :defaults_to_struct) do + Keyword.put(opts, :default, schema.__schema__(:loaded)) + else + opts + end + + embed(mod, :one, name, schema, opts) + end + + def __embeds_one__(_mod, _name, schema, _opts) do + raise ArgumentError, + "`embeds_one/3` expects `schema` to be a module name, but received #{inspect(schema)}" + end + + @valid_embeds_many_options [:on_replace, :source, :load_in_query] + + @doc false + def __embeds_many__(mod, name, schema, opts) when is_atom(schema) do + check_options!(opts, @valid_embeds_many_options, "embeds_many/3") + opts = Keyword.put(opts, :default, []) + embed(mod, :many, name, schema, opts) + end + + def __embeds_many__(_mod, _name, schema, _opts) do + raise ArgumentError, + "`embeds_many/3` expects `schema` to be a module name, but received #{inspect(schema)}" + end + + @doc false + def __embeds_module__(env, module, opts, block) do + {pk, opts} = Keyword.pop(opts, :primary_key, {:id, :binary_id, autogenerate: true}) + + block = + quote do + use Ecto.Schema + + @primary_key unquote(Macro.escape(pk)) + embedded_schema do + unquote(block) + end + end + + Module.create(module, block, env) + {module, opts} + end + + ## Quoted callbacks + + @doc false + def __after_verify__(module) do + # If we are compiling code, we can validate associations now, + # as the Elixir compiler will solve dependencies. + for name <- module.__schema__(:associations) do + assoc = module.__schema__(:association, name) + + case assoc.__struct__.after_verify_validation(assoc) do + :ok -> + :ok + + {:error, message} -> + IO.warn( + "invalid association `#{assoc.field}` in schema #{inspect(module)}: #{message}", + module: module, + file: to_string(module.__info__(:compile)[:source] || "nofile") + ) + end + end + + :ok + end + + @doc false + def __schema__(module, line, source, meta?, type) do + if previous_line = Module.get_attribute(module, :ecto_schema_defined) do + raise "schema already defined for #{inspect(module)} on line #{previous_line}" + end + + Module.put_attribute(module, :ecto_schema_defined, line) + + if Code.can_await_module_compilation?() do + Module.put_attribute(module, :after_verify, Ecto.Schema) + end + + Module.register_attribute(module, :ecto_changeset_fields, accumulate: true) + Module.register_attribute(module, :ecto_struct_fields, accumulate: true) + + # Those module attributes are accessed only dynamically + # so we explicitly reference them here to avoid warnings. + Module.get_attribute(module, :foreign_key_type) + Module.get_attribute(module, :timestamps_opts) + + prefix = Module.get_attribute(module, :schema_prefix) + context = Module.get_attribute(module, :schema_context) + + if meta? do + unless is_binary(source) do + raise ArgumentError, "schema source must be a string, got: #{inspect(source)}" + end + + meta = %Metadata{ + state: :built, + source: source, + prefix: prefix, + context: context, + schema: module + } + + Module.put_attribute(module, :ecto_struct_fields, {:__meta__, meta}) + end + + if Module.get_attribute(module, :primary_key) == nil do + Module.put_attribute(module, :primary_key, {:id, type, autogenerate: true}) + end + + case Module.get_attribute(module, :primary_key) do + false -> + [] + + {name, type, opts} -> + Ecto.Schema.__field__(module, name, type, [primary_key: true] ++ opts) + [name] + + _other -> + raise ArgumentError, "@primary_key must be false or {name, type, opts}" + end + + prefix + end + + @doc false + def __schema__(module) do + fields = Module.get_attribute(module, :ecto_fields) |> Enum.reverse() + field_sources = Module.get_attribute(module, :ecto_field_sources) |> Enum.reverse() + assocs = Module.get_attribute(module, :ecto_assocs) |> Enum.reverse() + embeds = Module.get_attribute(module, :ecto_embeds) |> Enum.reverse() + virtual_fields = Module.get_attribute(module, :ecto_virtual_fields) |> Enum.reverse() + redacted_fields = Module.get_attribute(module, :ecto_redact_fields) + primary_key_fields = Module.get_attribute(module, :ecto_primary_keys) |> Enum.reverse() + query_fields = Module.get_attribute(module, :ecto_query_fields) |> Enum.reverse() + autogenerate = Module.get_attribute(module, :ecto_autogenerate) |> Enum.reverse() + autoupdate = Module.get_attribute(module, :ecto_autoupdate) |> Enum.reverse() + read_after_writes = Module.get_attribute(module, :ecto_raw) |> Enum.reverse() + autogenerate_id = Module.get_attribute(module, :ecto_autogenerate_id) + + struct_fields = Module.get_attribute(module, :ecto_struct_fields) |> Enum.reverse() + derive = Module.get_attribute(module, :derive) + + if redacted_fields != [] and not List.keymember?(derive, Inspect, 0) and + derive_inspect?(module) do + Module.put_attribute(module, :derive, {Inspect, except: redacted_fields}) + end + + loaded = + case Map.new([{:__struct__, module} | struct_fields]) do + %{__meta__: meta} = struct -> %{struct | __meta__: Map.put(meta, :state, :loaded)} + struct -> struct + end + + load = + for {name, {type, _writable}} <- fields do + if alias = field_sources[name] do + {name, {:source, alias, type}} + else + {name, type} + end + end + + dump = + for {name, {type, writable}} <- fields do + {name, {field_sources[name] || name, type, writable}} + end + + field_sources_quoted = + for {name, {_type, _writable}} <- fields do + {[:field_source, name], field_sources[name] || name} + end + + types_quoted = + for {name, {type, _writable}} <- fields do + {[:type, name], Macro.escape(type)} + end + + virtual_types_quoted = + for {name, type} <- virtual_fields do + {[:virtual_type, name], Macro.escape(type)} + end + + assoc_quoted = + for {name, refl} <- assocs do + {[:association, name], Macro.escape(refl)} + end + + assoc_names = Enum.map(assocs, &elem(&1, 0)) + + embed_quoted = + for {name, refl} <- embeds do + {[:embed, name], Macro.escape(refl)} + end + + embed_names = Enum.map(embeds, &elem(&1, 0)) + + updatable = + for {name, {_, writable}} <- fields, reduce: {[], []} do + {keep, drop} -> + case writable do + :always -> {[name | keep], drop} + _ -> {keep, [name | drop]} + end + end + + insertable = + for {name, {_, writable}} <- fields, reduce: {[], []} do + {keep, drop} -> + case writable do + :never -> {keep, [name | drop]} + _ -> {[name | keep], drop} + end + end + + single_arg = [ + {[:dump], dump |> Map.new() |> Macro.escape()}, + {[:load], load |> Macro.escape()}, + {[:associations], assoc_names}, + {[:embeds], embed_names}, + {[:updatable_fields], updatable}, + {[:insertable_fields], insertable}, + {[:redact_fields], redacted_fields}, + {[:autogenerate_fields], Enum.flat_map(autogenerate, &elem(&1, 0))}, + {[:virtual_fields], Enum.map(virtual_fields, &elem(&1, 0))}, + {[:fields], Enum.map(fields, &elem(&1, 0))}, + {[:query_fields], Enum.map(query_fields, &elem(&1, 0))}, + {[:primary_key], primary_key_fields}, + {[:hash], :erlang.phash2({primary_key_fields, query_fields})}, + {[:read_after_writes], read_after_writes}, + {[:autogenerate_id], Macro.escape(autogenerate_id)}, + {[:autogenerate], Macro.escape(autogenerate)}, + {[:autoupdate], Macro.escape(autoupdate)}, + {[:loaded], Macro.escape(loaded)} + ] + + catch_all = [ + {[:field_source, quote(do: _)], nil}, + {[:type, quote(do: _)], nil}, + {[:virtual_type, quote(do: _)], nil}, + {[:association, quote(do: _)], nil}, + {[:embed, quote(do: _)], nil} + ] + + bags_of_clauses = + [ + single_arg, + field_sources_quoted, + types_quoted, + virtual_types_quoted, + assoc_quoted, + embed_quoted, + catch_all + ] + + {struct_fields, bags_of_clauses} + end + + defp derive_inspect?(module) do + case Module.get_attribute(module, :ecto_derive_inspect_for_redacted_fields) do + false -> + IO.warn( + "@ecto_derive_inspect_for_redacted_fields is deprecated, set @derive_inspect_for_redacted_fields instead" + ) + + false + + _ -> + Module.get_attribute(module, :derive_inspect_for_redacted_fields, true) + end + end + + ## Private + + defp embed(mod, cardinality, name, schema, opts) do + opts = [cardinality: cardinality, related: schema, owner: mod, field: name] ++ opts + struct = Ecto.Embedded.init(opts) + + Module.put_attribute(mod, :ecto_changeset_fields, {name, {:embed, struct}}) + Module.put_attribute(mod, :ecto_embeds, {name, struct}) + define_field(mod, name, {:parameterized, {Ecto.Embedded, struct}}, opts) + end + + defp put_struct_field(mod, name, assoc) do + fields = Module.get_attribute(mod, :ecto_struct_fields) + + if List.keyfind(fields, name, 0) do + raise ArgumentError, + "field/association #{inspect(name)} already exists on schema, you must either remove the duplication or choose a different name" + end + + Module.put_attribute(mod, :ecto_struct_fields, {name, assoc}) + end + + defp validate_default!(_type, _value, true), do: :ok + + defp validate_default!(type, value, _skip) do + case Ecto.Type.dump(type, value) do + {:ok, _} -> + :ok + + _ -> + raise ArgumentError, + "value #{inspect(value)} is invalid for type #{Ecto.Type.format(type)}, can't set default" + end + end + + defp check_options!(opts, valid, fun_arity) do + case Enum.find(opts, fn {k, _} -> k not in valid end) do + {k, _} -> raise ArgumentError, "invalid option #{inspect(k)} for #{fun_arity}" + nil -> :ok + end + end + + defp check_options!({:parameterized, _}, _opts, _valid, _fun_arity) do + :ok + end + + defp check_options!({_, type}, opts, valid, fun_arity) do + check_options!(type, opts, valid, fun_arity) + end + + defp check_options!(_type, opts, valid, fun_arity) do + check_options!(opts, valid, fun_arity) + end + + defp check_field_type!(_mod, name, :datetime, _opts) do + raise ArgumentError, + "invalid type :datetime for field #{inspect(name)}. " <> + "You probably meant to choose one between :naive_datetime " <> + "(no time zone information) or :utc_datetime (time zone is set to UTC)" + end + + defp check_field_type!(mod, name, type, opts) do + cond do + composite?(type, name) -> + {outer_type, inner_type} = type + {outer_type, check_field_type!(mod, name, inner_type, opts)} + + not is_atom(type) -> + raise ArgumentError, "invalid type #{Ecto.Type.format(type)} for field #{inspect(name)}" + + Ecto.Type.base?(type) -> + type + + Code.ensure_compiled(type) == {:module, type} -> + cond do + function_exported?(type, :type, 0) -> + type + + function_exported?(type, :type, 1) -> + Ecto.ParameterizedType.init(type, Keyword.merge(opts, field: name, schema: mod)) + + function_exported?(type, :__schema__, 1) -> + raise ArgumentError, + "schema #{inspect(type)} is not a valid type for field #{inspect(name)}." <> + " Did you mean to use belongs_to, has_one, has_many, embeds_one, or embeds_many instead?" + + true -> + raise ArgumentError, + "module #{inspect(type)} given as type for field #{inspect(name)} is not an Ecto.Type/Ecto.ParameterizedType" + end + + true -> + raise ArgumentError, "unknown type #{inspect(type)} for field #{inspect(name)}" + end + end + + defp composite?({composite, _} = type, name) do + if Ecto.Type.composite?(composite) do + true + else + raise ArgumentError, + "invalid or unknown composite #{inspect(type)} for field #{inspect(name)}. " <> + "Did you mean to use :array or :map as first element of the tuple instead?" + end + end + + defp composite?(_type, _name), do: false + + defp store_mfa_autogenerate!(mod, name, type, mfa) do + if autogenerate_id?(type) do + raise ArgumentError, ":autogenerate with {m, f, a} not supported by ID types" + end + + Module.put_attribute(mod, :ecto_autogenerate, {[name], mfa}) + end + + defp store_type_autogenerate!(mod, name, source, {:parameterized, typemod_params} = type, pk?) do + {typemod, params} = typemod_params + + cond do + store_autogenerate_id!(mod, name, source, type, pk?) -> + :ok + + not function_exported?(typemod, :autogenerate, 1) -> + raise ArgumentError, + "field #{inspect(name)} does not support :autogenerate because it uses a " <> + "parameterized type #{Ecto.Type.format(type)} that does not define autogenerate/1" + + true -> + Module.put_attribute( + mod, + :ecto_autogenerate, + {[name], {typemod, :autogenerate, [params]}} + ) + end + end + + defp store_type_autogenerate!(mod, name, source, type, pk?) do + cond do + store_autogenerate_id!(mod, name, source, type, pk?) -> + :ok + + Ecto.Type.primitive?(type) -> + raise ArgumentError, + "field #{inspect(name)} does not support :autogenerate because it uses a " <> + "primitive type #{Ecto.Type.format(type)}" + + # Note the custom type has already been loaded in check_type!/3 + not function_exported?(type, :autogenerate, 0) -> + raise ArgumentError, + "field #{inspect(name)} does not support :autogenerate because it uses a " <> + "custom type #{Ecto.Type.format(type)} that does not define autogenerate/0" + + true -> + Module.put_attribute(mod, :ecto_autogenerate, {[name], {type, :autogenerate, []}}) + end + end + + defp store_autogenerate_id!(mod, name, source, type, pk?) do + cond do + not autogenerate_id?(type) -> + false + + not pk? -> + raise ArgumentError, + "only primary keys allow :autogenerate for type #{Ecto.Type.format(type)}, " <> + "field #{inspect(name)} is not a primary key" + + Module.get_attribute(mod, :ecto_autogenerate_id) -> + raise ArgumentError, "only one primary key with ID type may be marked as autogenerated" + + true -> + Module.put_attribute(mod, :ecto_autogenerate_id, {name, source, type}) + true + end + end + + defp autogenerate_id?(type), do: Ecto.Type.type(type) in [:id, :binary_id] + + defp expand_literals(ast, env) do + if Macro.quoted_literal?(ast) do + Macro.prewalk(ast, &expand_alias(&1, env)) + else + ast + end + end + + defp expand_alias({:__aliases__, _, _} = alias, env), + do: Macro.expand(alias, %{env | function: {:__schema__, 2}}) + + defp expand_alias(other, _env), do: other + + defp expand_nested_module_alias({:__aliases__, _, [Elixir, _ | _] = alias}, _env), + do: Module.concat(alias) + + defp expand_nested_module_alias({:__aliases__, _, [h | t]}, env) when is_atom(h), + do: Module.concat([env.module, h | t]) + + defp expand_nested_module_alias(other, _env), do: other +end diff --git a/deps/ecto/lib/ecto/schema/loader.ex b/deps/ecto/lib/ecto/schema/loader.ex new file mode 100644 index 0000000..8ee6471 --- /dev/null +++ b/deps/ecto/lib/ecto/schema/loader.ex @@ -0,0 +1,106 @@ +defmodule Ecto.Schema.Loader do + @moduledoc false + + alias Ecto.Schema.Metadata + + @doc """ + Loads a struct to be used as a template in further operations. + """ + def load_struct(nil, _prefix, _source), do: %{} + + def load_struct(schema, prefix, source) do + case schema.__schema__(:loaded) do + %{__meta__: %Metadata{prefix: ^prefix, source: ^source}} = struct -> + struct + + %{__meta__: %Metadata{} = metadata} = struct -> + Map.put(struct, :__meta__, %{metadata | source: source, prefix: prefix}) + + %{} = struct -> + struct + end + end + + @doc """ + Loads data coming from the user/embeds into schema. + + Assumes data does not all belong to schema/struct + and that it may also require source-based renaming. + """ + def unsafe_load(schema, data, loader) do + types = schema.__schema__(:load) + struct = schema.__schema__(:loaded) + unsafe_load(struct, types, data, loader) + end + + @doc """ + Loads data coming from the user/embeds into struct and types. + + Assumes data does not all belong to schema/struct + and that it may also require source-based renaming. + """ + def unsafe_load(struct, types, map, loader) when is_map(map) do + Enum.reduce(types, struct, fn pair, acc -> + {field, source, type} = field_source_and_type(pair) + + case fetch_string_or_atom_field(map, source) do + {:ok, value} -> Map.put(acc, field, load!(struct, field, type, value, loader)) + :error -> acc + end + end) + end + + @compile {:inline, field_source_and_type: 1, fetch_string_or_atom_field: 2} + defp field_source_and_type({field, {:source, source, type}}) do + {field, source, type} + end + + defp field_source_and_type({field, type}) do + {field, field, type} + end + + defp fetch_string_or_atom_field(map, field) when is_atom(field) do + case Map.fetch(map, Atom.to_string(field)) do + {:ok, value} -> {:ok, value} + :error -> Map.fetch(map, field) + end + end + + @compile {:inline, load!: 5} + defp load!(struct, field, type, value, loader) do + case loader.(type, value) do + {:ok, value} -> + value + + :error -> + raise ArgumentError, + "cannot load `#{inspect(value)}` as type #{Ecto.Type.format(type)} " <> + "for field `#{field}`#{error_data(struct)}" + end + end + + defp error_data(%{__struct__: atom}) do + " in schema #{inspect(atom)}" + end + + defp error_data(other) when is_map(other) do + "" + end + + @doc """ + Dumps the given data. + """ + def safe_dump(struct, types, dumper) do + Enum.reduce(types, %{}, fn {field, {source, type, _writable}}, acc -> + value = Map.get(struct, field) + + case dumper.(type, value) do + {:ok, value} -> + Map.put(acc, source, value) + :error -> + raise ArgumentError, "cannot dump `#{inspect value}` as type #{Ecto.Type.format(type)} " <> + "for field `#{field}` in schema #{inspect struct.__struct__}" + end + end) + end +end diff --git a/deps/ecto/lib/ecto/schema/metadata.ex b/deps/ecto/lib/ecto/schema/metadata.ex new file mode 100644 index 0000000..cfbec0a --- /dev/null +++ b/deps/ecto/lib/ecto/schema/metadata.ex @@ -0,0 +1,66 @@ +defmodule Ecto.Schema.Metadata do + @moduledoc """ + Stores metadata of a struct. + + ## State + + The state of the schema is stored in the `:state` field and allows + following values: + + * `:built` - the struct was constructed in memory and is not persisted + to database yet; + * `:loaded` - the struct was loaded from database and represents + persisted data; + * `:deleted` - the struct was deleted and no longer represents persisted + data. + + ## Source + + The `:source` tracks the (table or collection) where the struct is or should + be persisted to. + + ## Prefix + + Tracks the source prefix in the data storage. + + ## Context + + The `:context` field represents additional state some databases require + for proper updates of data. It is not used by the built-in adapters of + `Ecto.Adapters.Postgres` and `Ecto.Adapters.MySQL`. + + ## Schema + + The `:schema` field refers the module name for the schema this metadata belongs to. + """ + defstruct [:state, :source, :context, :schema, :prefix] + + @type state :: :built | :loaded | :deleted + + @type context :: any + + @type t(schema) :: %__MODULE__{ + context: context, + prefix: Ecto.Schema.prefix(), + schema: schema, + source: Ecto.Schema.source(), + state: state + } + + @type t :: t(module) + + defimpl Inspect do + import Inspect.Algebra + + def inspect(metadata, opts) do + %{source: source, prefix: prefix, state: state, context: context} = metadata + + entries = + for entry <- [state, prefix, source, context], + entry != nil, + do: to_doc(entry, opts) + + concat(["#Ecto.Schema.Metadata<"] ++ Enum.intersperse(entries, ", ") ++ [">"]) + end + end +end diff --git a/deps/ecto/lib/ecto/type.ex b/deps/ecto/lib/ecto/type.ex new file mode 100644 index 0000000..8892cb5 --- /dev/null +++ b/deps/ecto/lib/ecto/type.ex @@ -0,0 +1,1591 @@ +defmodule Ecto.Type do + @moduledoc """ + Defines functions and the `Ecto.Type` behaviour for implementing + basic custom types. + + Ecto provides two types of custom types: basic types and + parameterized types. Basic types are simple, requiring only four + callbacks to be implemented, and are enough for most occasions. + Parameterized types can be customized on the field definition and + provide a wide variety of callbacks. + + The definition of basic custom types and all of their callbacks are + available in this module. You can learn more about parameterized + types in `Ecto.ParameterizedType`. If in doubt, prefer to use + basic custom types and rely on parameterized types if you need + the extra functionality. + + ## External vs internal vs database representation + + The core functionality of a custom type is the mapping between + external, internal and database representations of a value belonging + to the type. + + For a definition of external and internal data take a look at the + [related section](`Ecto.Changeset#module-external-vs-internal-data`) + in the changeset documentation. + + ```mermaid + stateDiagram-v2 + external: External Data + internal: Internal Data + database: Database Data + external --> internal: cast/1 + external --> database: dump/1 + internal --> database: dump/1 + database --> internal: load/1 + ``` + + ## Example + + Imagine you want to store a URI struct as part of a schema in a + url-shortening service. There isn't an Ecto field type to support + that value at runtime therefore a custom one is needed. + + You also want to query not only by the full url, but for example + by specific ports used. This is possible by putting the URI data + into a map field instead of just storing the plain + string representation. + + from s in ShortUrl, + where: fragment("?->>? ILIKE ?", s.original_url, "port", "443") + + So the custom type does need to handle the conversion from + external data to runtime data (`c:cast/1`) as well as + transforming that runtime data into the `:map` Ecto native type and + back (`c:dump/1` and `c:load/1`). + + defmodule EctoURI do + use Ecto.Type + def type, do: :map + + # Provide custom casting rules. + # Cast strings into the URI struct to be used at runtime + def cast(uri) when is_binary(uri) do + {:ok, URI.parse(uri)} + end + + # Accept casting of URI structs as well + def cast(%URI{} = uri), do: {:ok, uri} + + # Everything else is a failure though + def cast(_), do: :error + + # When loading data from the database, as long as it's a map, + # we just put the data back into a URI struct to be stored in + # the loaded schema struct. + def load(data) when is_map(data) do + data = + for {key, val} <- data do + {String.to_existing_atom(key), val} + end + {:ok, struct!(URI, data)} + end + + # When dumping data to the database, we *expect* a URI struct + # but any value could be inserted into the schema struct at runtime, + # so we need to guard against them. + def dump(%URI{} = uri), do: {:ok, Map.from_struct(uri)} + def dump(_), do: :error + end + + Now we can use our new field type above in our schemas: + + defmodule ShortUrl do + use Ecto.Schema + + schema "posts" do + field :original_url, EctoURI + end + end + + Note: `nil` values are always bypassed and cannot be handled by + custom types. + + > #### `use Ecto.Type` {: .info} + > + > When you `use Ecto.Type`, it will set `@behaviour Ecto.Type` and define + > default, overridable implementations for `c:embed_as/1` and `c:equal?/2`. + > You must implement your own `c:embed_as/1` function if you want + > your `c:dump/1` to be called when exporting from Ecto. + + ## Custom types and primary keys + + Remember that, if you change the type of your primary keys, + you will also need to change the type of all associations that + point to said primary key. + + Imagine you want to encode the ID so they cannot enumerate the + content in your application. An Ecto type could handle the conversion + between the encoded version of the id and its representation in the + database. For the sake of simplicity, we'll use base64 encoding in + this example: + + defmodule EncodedId do + use Ecto.Type + + def type, do: :id + + def cast(id) when is_integer(id) do + {:ok, encode_id(id)} + end + def cast(_), do: :error + + def dump(id) when is_binary(id) do + {:ok, id_decoded} = Base.decode64(id) + {:ok, String.to_integer(id_decoded)} + end + + def load(id) when is_integer(id) do + {:ok, encode_id(id)} + end + + defp encode_id(id) do + id + |> Integer.to_string() + |> Base.encode64() + end + end + + To use it as the type for the id in our schema, we can use the + `@primary_key` module attribute: + + defmodule BlogPost do + use Ecto.Schema + + @primary_key {:id, EncodedId, autogenerate: true} + schema "posts" do + belongs_to :author, Author, type: EncodedId + field :content, :string + end + end + + defmodule Author do + use Ecto.Schema + + @primary_key {:id, EncodedId, autogenerate: true} + schema "authors" do + field :name, :string + has_many :posts, BlogPost + end + end + + The `@primary_key` attribute will tell ecto which type to + use for the id. + + Note the `type: EncodedId` option given to `belongs_to` in + the `BlogPost` schema. By default, Ecto will treat + associations as if their keys were `:integer`s. Our primary + keys are a custom type, so when Ecto tries to cast those + ids, it will fail. + + Alternatively, you can set `@foreign_key_type EncodedId` + after `@primary_key` to automatically configure the type + of all `belongs_to` fields. + """ + + import Kernel, except: [match?: 2] + + @doc false + defmacro __using__(_opts) do + quote location: :keep do + @behaviour Ecto.Type + def embed_as(_), do: :self + def equal?(term1, term2), do: term1 == term2 + defoverridable embed_as: 1, equal?: 2 + end + end + + @typedoc "An Ecto type, primitive or custom." + @type t :: primitive | custom + + @typedoc "Primitive Ecto types (handled by Ecto)." + @type primitive :: base | composite + + @typedoc "Custom types are represented by user-defined modules." + @type custom :: module | {:parameterized, {module, term}} + + @type base :: + :integer + | :float + | :boolean + | :string + | :bitstring + | :map + | :binary + | :decimal + | :id + | :binary_id + | :utc_datetime + | :naive_datetime + | :date + | :time + | :any + | :utc_datetime_usec + | :naive_datetime_usec + | :time_usec + | :duration + + @type composite :: {:array, t} | {:map, t} | private_composite + + @typep private_composite :: {:try, t} | {:in, t} | {:supertype, :datetime} + + @base ~w( + integer float decimal boolean string bitstring map binary id binary_id any + utc_datetime naive_datetime date time + utc_datetime_usec naive_datetime_usec time_usec + duration + )a + @composite ~w(array map try in param)a + @variadic ~w(in splice)a + + @doc """ + Returns the underlying schema type for the custom type. + + For example, if you want to provide your own date + structures, the type function should return `:date`. + + Note this function is not required to return Ecto primitive + types, the type is only required to be known by the adapter. + """ + @callback type :: t + + @doc """ + Casts the given input to the custom type. + + This callback is called on external input and can return any type, + as long as the `dump/1` function is able to convert the returned + value into an Ecto native type. There are two situations where + this callback is called: + + 1. When casting values by `Ecto.Changeset` + 2. When passing arguments to `Ecto.Query` + + You can return `:error` if the given term cannot be cast. + A default error message of "is invalid" will be added to the + changeset. + + You may also return `{:error, keyword()}` to customize the + changeset error message and its metadata. Passing a `:message` + key, will override the default message. It is not possible to + override the `:type` key. + + For `{:array, CustomType}` or `{:map, CustomType}` the returned + keyword list will be erased and the default error will be shown. + """ + @callback cast(term) :: {:ok, term} | :error | {:error, keyword()} + + @doc """ + Loads the given term into a custom type. + + This callback is called when loading data from the database and + receives an Ecto native type. It can return any type, as long as + the `dump/1` function is able to convert the returned value back + into an Ecto native type. + """ + @callback load(term) :: {:ok, term} | :error + + @doc """ + Dumps the given term into an Ecto native type. + + This callback is called with any term that was stored in the struct + and it needs to validate them and convert it to an Ecto native type. + """ + @callback dump(term) :: {:ok, term} | :error + + @doc """ + Checks if two terms are semantically equal. + + This callback is used for determining equality of types in + `Ecto.Changeset`. + + By default the terms are compared with the equal operator `==/2`. + """ + @callback equal?(term, term) :: boolean + + @doc """ + Dictates how the type should be treated inside embeds. + + By default, the type is sent as itself, without calling + dumping to keep the higher level representation. But + it can be set to `:dump` so that it is dumped before + being encoded. + """ + @callback embed_as(format :: atom) :: :self | :dump + + @doc """ + Generates a loaded version of the data. + + This is callback is invoked when a custom type is given + to `field` with the `:autogenerate` flag. + """ + @callback autogenerate() :: term() + + @optional_callbacks autogenerate: 0 + + ## Functions + + @doc """ + Checks if we have a primitive type. + + iex> primitive?(:string) + true + iex> primitive?(Another) + false + + iex> primitive?({:array, :string}) + true + iex> primitive?({:array, Another}) + true + + """ + @spec primitive?(t) :: boolean + def primitive?({:parameterized, _}), do: true + def primitive?({composite, _}) when composite in @composite, do: true + def primitive?(base) when base in @base, do: true + def primitive?(_), do: false + + @doc """ + Checks if the given type is parameterized by the given module. + + iex> type = Ecto.ParameterizedType.init(Ecto.Enum, values: [a: 1]) + iex> Ecto.Type.parameterized?(type, Ecto.Enum) + true + iex> Ecto.Type.parameterized?(type, MyEnum) + false + + """ + @spec parameterized?(t, module) :: boolean + def parameterized?({:parameterized, {module, _}}, module), do: true + def parameterized?(_, _), do: false + + @doc """ + Checks if the given atom can be used as composite type. + + iex> composite?(:array) + true + iex> composite?(:string) + false + + """ + @spec composite?(atom) :: boolean + def composite?(atom), do: atom in @composite + + @doc """ + Checks if the given atom can be used as base type. + + iex> base?(:string) + true + iex> base?(:array) + false + iex> base?(Custom) + false + + """ + @spec base?(atom) :: boolean + def base?(atom), do: atom in @base + + @doc """ + Gets how the type is treated inside embeds for the given format. + + See `c:embed_as/1`. + """ + def embed_as({:parameterized, {module, params}}, format), do: module.embed_as(format, params) + def embed_as({composite, type}, format) when composite in @composite, do: embed_as(type, format) + def embed_as(base, _format) when base in @base, do: :self + def embed_as(mod, format), do: mod.embed_as(format) + + @doc """ + Dumps the `value` for `type` considering it will be embedded in `format`. + + ## Examples + + iex> Ecto.Type.embedded_dump(:decimal, Decimal.new("1"), :json) + {:ok, Decimal.new("1")} + + """ + def embedded_dump(type, value, format) do + case embed_as(type, format) do + :self -> {:ok, value} + :dump -> dump(type, value, &embedded_dump(&1, &2, format)) + end + end + + @doc """ + Loads the `value` for `type` considering it was embedded in `format`. + + ## Examples + + iex> Ecto.Type.embedded_load(:decimal, "1", :json) + {:ok, Decimal.new("1")} + + """ + def embedded_load(type, value, format) do + case embed_as(type, format) do + :self -> + case cast(type, value) do + {:ok, _} = ok -> ok + _ -> :error + end + + :dump -> + load(type, value, &embedded_load(&1, &2, format)) + end + end + + @doc """ + Retrieves the underlying schema type for the given, possibly custom, type. + + iex> type(:string) + :string + iex> type(Ecto.UUID) + :uuid + + iex> type({:array, :string}) + {:array, :string} + iex> type({:array, Ecto.UUID}) + {:array, :uuid} + + iex> type({:map, Ecto.UUID}) + {:map, :uuid} + + """ + @spec type(t) :: t + def type(type) + def type({:parameterized, {type, params}}), do: type.type(params) + def type({:array, type}), do: {:array, type(type)} + def type({:map, type}), do: {:map, type(type)} + def type({:try, type}), do: type(type) + def type(type) when type in @base, do: type + def type(type) when is_atom(type), do: type.type() + def type(type), do: type + + @doc """ + Checks if a given type matches with a primitive type + that can be found in queries. + + iex> match?(:string, :any) + true + iex> match?(:any, :string) + true + iex> match?(:string, :string) + true + + iex> match?({:array, :string}, {:array, :any}) + true + + iex> match?(Ecto.UUID, :uuid) + true + iex> match?(Ecto.UUID, :string) + false + + """ + @spec match?(t, primitive) :: boolean + def match?(schema_type, query_type) do + if primitive?(schema_type) do + do_match?(schema_type, query_type) + else + do_match?(schema_type.type(), query_type) + end + end + + defp do_match?(_left, :any), do: true + defp do_match?(:any, _right), do: true + defp do_match?({outer, left}, {outer, right}), do: match?(left, right) + defp do_match?(:decimal, type) when type in [:float, :integer], do: true + defp do_match?(:binary_id, :binary), do: true + defp do_match?(:id, :integer), do: true + defp do_match?(type, type), do: true + defp do_match?(:naive_datetime, {:supertype, :datetime}), do: true + defp do_match?(:naive_datetime_usec, {:supertype, :datetime}), do: true + defp do_match?(:utc_datetime, {:supertype, :datetime}), do: true + defp do_match?(:utc_datetime_usec, {:supertype, :datetime}), do: true + defp do_match?(_, _), do: false + + @doc """ + Dumps a value to the given type. + + Opposite to casting, dumping requires the returned value + to be a valid Ecto type, as it will be sent to the + underlying data store. + + iex> dump(:string, nil) + {:ok, nil} + iex> dump(:string, "foo") + {:ok, "foo"} + + iex> dump(:integer, 1) + {:ok, 1} + iex> dump(:integer, "10") + :error + + iex> dump(:binary, "foo") + {:ok, "foo"} + iex> dump(:binary, 1) + :error + + iex> dump({:array, :integer}, [1, 2, 3]) + {:ok, [1, 2, 3]} + iex> dump({:array, :integer}, [1, "2", 3]) + :error + iex> dump({:array, :binary}, ["1", "2", "3"]) + {:ok, ["1", "2", "3"]} + + """ + @spec dump(t, term) :: {:ok, term} | :error + @spec dump(t, term, (t, term -> {:ok, term} | :error)) :: {:ok, term} | :error + def dump(type, value, dumper \\ &dump/2) + + def dump({:parameterized, {module, params}}, value, dumper) do + module.dump(value, dumper, params) + end + + def dump(_type, nil, _dumper) do + {:ok, nil} + end + + def dump({:try, type}, value, dumper) do + case dump(type, value, dumper) do + {:ok, _} = ok -> ok + :error -> {:ok, value} + end + end + + def dump({qual, type}, value, dumper) when qual in @variadic do + case dump({:array, type}, value, dumper) do + {:ok, value} -> {:ok, {qual, value}} + :error -> :error + end + end + + def dump({:array, {:parameterized, _} = type}, value, dumper), + do: array_with_type(value, type, dumper, false, []) + + def dump({:array, type}, value, dumper), do: array_with_type(value, type, dumper, true, []) + def dump({:map, type}, value, dumper), do: map(value, type, dumper, false, %{}) + + def dump(:any, value, _dumper), do: {:ok, value} + def dump(:integer, value, _dumper), do: same_integer(value) + def dump(:float, value, _dumper), do: dump_float(value) + def dump(:boolean, value, _dumper), do: same_boolean(value) + def dump(:map, value, _dumper), do: same_map(value) + def dump(:string, value, _dumper), do: same_binary(value) + def dump(:binary, value, _dumper), do: same_binary(value) + def dump(:bitstring, value, _dumper), do: same_bitstring(value) + def dump(:id, value, _dumper), do: same_integer(value) + def dump(:binary_id, value, _dumper), do: same_binary(value) + def dump(:decimal, value, _dumper), do: same_decimal(value) + def dump(:date, value, _dumper), do: same_date(value) + def dump(:time, value, _dumper), do: dump_time(value) + def dump(:time_usec, value, _dumper), do: dump_time_usec(value) + def dump(:naive_datetime, value, _dumper), do: dump_naive_datetime(value) + def dump(:naive_datetime_usec, value, _dumper), do: dump_naive_datetime_usec(value) + def dump(:utc_datetime, value, _dumper), do: dump_utc_datetime(value) + def dump(:utc_datetime_usec, value, _dumper), do: dump_utc_datetime_usec(value) + def dump(:duration, value, _dumper), do: same_duration(value) + def dump({:supertype, :datetime}, value, _dumper), do: dump_any_datetime(value) + def dump(mod, value, _dumper) when is_atom(mod), do: mod.dump(value) + + defp dump_float(term) when is_float(term), do: {:ok, term} + defp dump_float(_), do: :error + + defp dump_time(%Time{} = term), do: {:ok, check_no_usec!(term, :time)} + defp dump_time(_), do: :error + + defp dump_time_usec(%Time{} = term), do: {:ok, check_usec!(term, :time_usec)} + defp dump_time_usec(_), do: :error + + defp dump_any_datetime(%NaiveDateTime{} = term), do: {:ok, term} + defp dump_any_datetime(%DateTime{} = term), do: {:ok, term} + defp dump_any_datetime(_), do: :error + + defp dump_naive_datetime(%NaiveDateTime{} = term), + do: {:ok, check_no_usec!(term, :naive_datetime)} + + defp dump_naive_datetime(_), do: :error + + defp dump_naive_datetime_usec(%NaiveDateTime{} = term), + do: {:ok, check_usec!(term, :naive_datetime_usec)} + + defp dump_naive_datetime_usec(_), do: :error + + defp dump_utc_datetime(%DateTime{} = datetime) do + kind = :utc_datetime + {:ok, datetime |> check_utc_timezone!(kind) |> check_no_usec!(kind)} + end + + defp dump_utc_datetime(_), do: :error + + defp dump_utc_datetime_usec(%DateTime{} = datetime) do + kind = :utc_datetime_usec + {:ok, datetime |> check_utc_timezone!(kind) |> check_usec!(kind)} + end + + defp dump_utc_datetime_usec(_), do: :error + + @doc """ + Loads a value with the given type. + + iex> load(:string, nil) + {:ok, nil} + iex> load(:string, "foo") + {:ok, "foo"} + + iex> load(:integer, 1) + {:ok, 1} + iex> load(:integer, "10") + :error + + """ + @spec load(t, term) :: {:ok, term} | :error + @spec load(t, term, (t, term -> {:ok, term} | :error)) :: {:ok, term} | :error + def load(type, value, loader \\ &load/2) + + def load({:parameterized, {module, params}}, value, loader) do + module.load(value, loader, params) + end + + def load(_type, nil, _loader) do + {:ok, nil} + end + + def load({:try, type}, value, loader) do + case load(type, value, loader) do + {:ok, _} = ok -> ok + :error -> {:ok, value} + end + end + + def load({:array, {:parameterized, _} = type}, value, loader), + do: array_with_type(value, type, loader, false, []) + + def load({:array, type}, value, loader), do: array_with_type(value, type, loader, true, []) + def load({:map, type}, value, loader), do: map(value, type, loader, false, %{}) + + def load(:any, value, _loader), do: {:ok, value} + def load(:integer, value, _loader), do: same_integer(value) + def load(:float, value, _loader), do: load_float(value) + def load(:boolean, value, _loader), do: same_boolean(value) + def load(:map, value, _loader), do: same_map(value) + def load(:string, value, _loader), do: same_binary(value) + def load(:binary, value, _loader), do: same_binary(value) + def load(:bitstring, value, _loader), do: same_bitstring(value) + def load(:id, value, _loader), do: same_integer(value) + def load(:binary_id, value, _loader), do: same_binary(value) + def load(:decimal, value, _loader), do: same_decimal(value) + def load(:date, value, _loader), do: same_date(value) + def load(:time, value, _loader), do: load_time(value) + def load(:time_usec, value, _loader), do: load_time_usec(value) + def load(:naive_datetime, value, _loader), do: load_naive_datetime(value) + def load(:naive_datetime_usec, value, _loader), do: load_naive_datetime_usec(value) + def load(:utc_datetime, value, _loader), do: load_utc_datetime(value) + def load(:utc_datetime_usec, value, _loader), do: load_utc_datetime_usec(value) + def load(:duration, value, _loader), do: same_duration(value) + def load(mod, value, _loader), do: mod.load(value) + + defp load_float(term) when is_float(term), do: {:ok, term} + defp load_float(term) when is_integer(term), do: {:ok, :erlang.float(term)} + defp load_float(_), do: :error + + defp load_time(%Time{} = time), do: {:ok, truncate_usec(time)} + defp load_time(_), do: :error + + defp load_time_usec(%Time{} = time), do: {:ok, pad_usec(time)} + defp load_time_usec(_), do: :error + + # This is a downcast, which is always fine, and in case + # we try to send a naive datetime where a datetime is expected, + # the adapter will either explicitly error (Postgres) or it will + # accept the data (MySQL), which is fine as we always assume UTC + defp load_naive_datetime(%DateTime{} = datetime), + do: + {:ok, + datetime |> check_utc_timezone!(:naive_datetime) |> DateTime.to_naive() |> truncate_usec()} + + defp load_naive_datetime(%NaiveDateTime{} = naive_datetime), + do: {:ok, truncate_usec(naive_datetime)} + + defp load_naive_datetime(_), do: :error + + defp load_naive_datetime_usec(%DateTime{} = datetime), + do: + {:ok, + datetime |> check_utc_timezone!(:naive_datetime_usec) |> DateTime.to_naive() |> pad_usec()} + + defp load_naive_datetime_usec(%NaiveDateTime{} = naive_datetime), + do: {:ok, pad_usec(naive_datetime)} + + defp load_naive_datetime_usec(_), do: :error + + # This is an upcast but because we assume the database + # is always in UTC, we can perform it. + defp load_utc_datetime(%NaiveDateTime{} = naive_datetime), + do: {:ok, naive_datetime |> truncate_usec() |> DateTime.from_naive!("Etc/UTC")} + + defp load_utc_datetime(%DateTime{} = datetime), + do: {:ok, datetime |> check_utc_timezone!(:utc_datetime) |> truncate_usec()} + + defp load_utc_datetime(_), + do: :error + + defp load_utc_datetime_usec(%NaiveDateTime{} = naive_datetime), + do: {:ok, naive_datetime |> pad_usec() |> DateTime.from_naive!("Etc/UTC")} + + defp load_utc_datetime_usec(%DateTime{} = datetime), + do: {:ok, datetime |> check_utc_timezone!(:utc_datetime_usec) |> pad_usec()} + + defp load_utc_datetime_usec(_), + do: :error + + @doc """ + Casts a value to the given type. + + `cast/2` is used by the finder queries and changesets to cast outside values to + specific types. + + Note that nil can be cast to all primitive types as data stores allow nil to be + set on any column. + + NaN and infinite decimals are not supported, use custom types instead. + + iex> cast(:any, "whatever") + {:ok, "whatever"} + + iex> cast(:any, nil) + {:ok, nil} + iex> cast(:string, nil) + {:ok, nil} + + iex> cast(:integer, 1) + {:ok, 1} + iex> cast(:integer, "1") + {:ok, 1} + iex> cast(:integer, "1.0") + :error + + iex> cast(:id, 1) + {:ok, 1} + iex> cast(:id, "1") + {:ok, 1} + iex> cast(:id, "1.0") + :error + + iex> cast(:float, 1.0) + {:ok, 1.0} + iex> cast(:float, 1) + {:ok, 1.0} + iex> cast(:float, "1") + {:ok, 1.0} + iex> cast(:float, "1.0") + {:ok, 1.0} + iex> cast(:float, "1-foo") + :error + + iex> cast(:boolean, true) + {:ok, true} + iex> cast(:boolean, false) + {:ok, false} + iex> cast(:boolean, "1") + {:ok, true} + iex> cast(:boolean, "0") + {:ok, false} + iex> cast(:boolean, "whatever") + :error + + iex> cast(:string, "beef") + {:ok, "beef"} + iex> cast(:binary, "beef") + {:ok, "beef"} + + iex> cast(:decimal, Decimal.new("1.0")) + {:ok, Decimal.new("1.0")} + iex> cast(:decimal, "1.0bad") + :error + + iex> cast({:array, :integer}, [1, 2, 3]) + {:ok, [1, 2, 3]} + iex> cast({:array, :integer}, ["1", "2", "3"]) + {:ok, [1, 2, 3]} + iex> cast({:array, :string}, [1, 2, 3]) + :error + iex> cast(:string, [1, 2, 3]) + :error + + iex> cast(:utc_datetime, "2014-04-17T14:00:00Z") + {:ok, ~U[2014-04-17 14:00:00Z]} + iex> cast(:utc_datetime, "2014-04-17T14:00:00.030Z") + {:ok, ~U[2014-04-17 14:00:00Z]} + iex> cast(:utc_datetime, "2014-04-17T12:00:00-02:00") + {:ok, ~U[2014-04-17 14:00:00Z]} + + """ + @spec cast(t, term) :: {:ok, term} | {:error, keyword()} | :error + def cast({:parameterized, {type, params}}, value), do: type.cast(value, params) + def cast({:in, _type}, nil), do: :error + def cast(_type, nil), do: {:ok, nil} + + def cast({:try, type}, value) do + case cast(type, value) do + {:ok, _} = ok -> ok + _ -> {:ok, value} + end + end + + def cast(type, value) do + cast_fun(type).(value) + end + + defp cast_fun(:integer), do: &cast_integer/1 + defp cast_fun(:float), do: &cast_float/1 + defp cast_fun(:boolean), do: &cast_boolean/1 + defp cast_fun(:map), do: &cast_map/1 + defp cast_fun(:string), do: &cast_binary/1 + defp cast_fun(:binary), do: &cast_binary/1 + defp cast_fun(:bitstring), do: &cast_bitstring/1 + defp cast_fun(:id), do: &cast_integer/1 + defp cast_fun(:binary_id), do: &cast_binary/1 + defp cast_fun(:any), do: &{:ok, &1} + defp cast_fun(:decimal), do: &cast_decimal/1 + defp cast_fun(:date), do: &cast_date/1 + defp cast_fun(:time), do: &maybe_truncate_usec(cast_time(&1)) + defp cast_fun(:time_usec), do: &maybe_pad_usec(cast_time(&1)) + defp cast_fun(:naive_datetime), do: &maybe_truncate_usec(cast_naive_datetime(&1)) + defp cast_fun(:naive_datetime_usec), do: &maybe_pad_usec(cast_naive_datetime(&1)) + defp cast_fun(:utc_datetime), do: &maybe_truncate_usec(cast_utc_datetime(&1)) + defp cast_fun(:utc_datetime_usec), do: &maybe_pad_usec(cast_utc_datetime(&1)) + defp cast_fun(:duration), do: &cast_duration/1 + defp cast_fun({:supertype, :datetime}), do: &cast_any_datetime(&1) + defp cast_fun({:parameterized, {mod, params}}), do: &mod.cast(&1, params) + defp cast_fun({qual, type}) when qual in @variadic, do: cast_fun({:array, type}) + + defp cast_fun({:array, {:parameterized, _} = type}) do + fun = cast_fun(type) + &array_with_index(&1, fun, false, 0, []) + end + + defp cast_fun({:array, type}) do + fun = cast_fun(type) + &array_with_index(&1, fun, true, 0, []) + end + + defp cast_fun({:map, {:parameterized, _} = type}) do + fun = cast_fun(type) + &map(&1, fun, false, %{}) + end + + defp cast_fun({:map, type}) do + fun = cast_fun(type) + &map(&1, fun, true, %{}) + end + + defp cast_fun(mod) when is_atom(mod) do + fn + nil -> {:ok, nil} + value -> mod.cast(value) + end + end + + # We check for the byte size to avoid creating unnecessary large integers + # which would never map to a database key (u64 is 20 digits only). + defp cast_integer(term) when is_binary(term) and byte_size(term) < 32 do + case Integer.parse(term) do + {integer, ""} -> {:ok, integer} + _ -> :error + end + end + + defp cast_integer(term) when is_integer(term), do: {:ok, term} + defp cast_integer(_), do: :error + + defp cast_float(term) when is_binary(term) do + case Float.parse(term) do + {float, ""} -> {:ok, float} + _ -> :error + end + end + + defp cast_float(term) when is_float(term), do: {:ok, term} + defp cast_float(term) when is_integer(term), do: {:ok, :erlang.float(term)} + defp cast_float(_), do: :error + + defp cast_decimal(term) when is_binary(term) do + case Decimal.parse(term) do + {decimal, ""} -> check_decimal(decimal, false) + {_, remainder} when is_binary(remainder) and byte_size(remainder) > 0 -> :error + :error -> :error + end + end + + defp cast_decimal(term), do: same_decimal(term) + + defp cast_boolean(term) when term in ~w(true 1), do: {:ok, true} + defp cast_boolean(term) when term in ~w(false 0), do: {:ok, false} + defp cast_boolean(term) when is_boolean(term), do: {:ok, term} + defp cast_boolean(_), do: :error + + defp cast_binary(term) when is_binary(term), do: {:ok, term} + defp cast_binary(_), do: :error + + defp cast_bitstring(term) when is_bitstring(term), do: {:ok, term} + defp cast_bitstring(_), do: :error + + defp cast_map(term) when is_map(term), do: {:ok, term} + defp cast_map(_), do: :error + + if Code.ensure_loaded?(Duration) do + defp cast_duration(%Duration{} = term), do: {:ok, term} + end + + defp cast_duration(_), do: :error + + @doc """ + Casts a value to the given type or raises an error. + + See `cast/2` for more information. + + ## Examples + + iex> Ecto.Type.cast!(:integer, "1") + 1 + iex> Ecto.Type.cast!(:integer, 1) + 1 + iex> Ecto.Type.cast!(:integer, nil) + nil + + iex> Ecto.Type.cast!(:integer, 1.0) + ** (Ecto.CastError) cannot cast 1.0 to :integer + """ + def cast!(type, value) do + case Ecto.Type.cast(type, value) do + {:ok, value} -> + value + + :error -> + raise Ecto.CastError, type: type, value: value + + {:error, metadata} -> + raise Ecto.CastError, [type: type, value: value] ++ Keyword.take(metadata, [:message]) + end + end + + ## Shared helpers + + @compile {:inline, same_integer: 1, same_boolean: 1, same_map: 1, same_decimal: 1, same_date: 1} + defp same_integer(term) when is_integer(term), do: {:ok, term} + defp same_integer(_), do: :error + + defp same_boolean(term) when is_boolean(term), do: {:ok, term} + defp same_boolean(_), do: :error + + defp same_binary(term) when is_binary(term), do: {:ok, term} + defp same_binary(_), do: :error + + defp same_bitstring(term) when is_bitstring(term), do: {:ok, term} + defp same_bitstring(_), do: :error + + defp same_map(term) when is_map(term), do: {:ok, term} + defp same_map(_), do: :error + + defp same_decimal(term) when is_integer(term), do: {:ok, Decimal.new(term)} + defp same_decimal(term) when is_float(term), do: {:ok, Decimal.from_float(term)} + defp same_decimal(%Decimal{} = term), do: check_decimal(term, true) + defp same_decimal(_), do: :error + + defp same_date(%Date{} = term), do: {:ok, term} + defp same_date(_), do: :error + + if Code.ensure_loaded?(Duration) do + defp same_duration(%Duration{} = term), do: {:ok, term} + end + + defp same_duration(_), do: :error + + @doc false + def empty_trimmed?(value, :binary), do: value == "" + def empty_trimmed?(value, _type), do: is_binary(value) and String.trim_leading(value) == "" + + ## Adapter related + + @doc false + def adapter_autogenerate(adapter, type) do + type + |> type() + |> adapter.autogenerate() + end + + @doc false + def adapter_load(adapter, type, value) do + if of_base_type?(type, value) do + {:ok, value} + else + process_loaders(adapter.loaders(type(type), type), {:ok, value}, adapter) + end + end + + defp process_loaders(_, :error, _adapter), + do: :error + + defp process_loaders([fun | t], {:ok, value}, adapter) when is_function(fun), + do: process_loaders(t, fun.(value), adapter) + + defp process_loaders([type | t], {:ok, value}, adapter), + do: process_loaders(t, load(type, value, &adapter_load(adapter, &1, &2)), adapter) + + defp process_loaders([], {:ok, _} = acc, _adapter), + do: acc + + @doc false + def adapter_dump(adapter, type, value) do + process_dumpers(adapter.dumpers(type(type), type), {:ok, value}, adapter) + end + + defp process_dumpers(_, :error, _adapter), + do: :error + + defp process_dumpers([fun | t], {:ok, value}, adapter) when is_function(fun), + do: process_dumpers(t, fun.(value), adapter) + + defp process_dumpers([type | t], {:ok, value}, adapter), + do: process_dumpers(t, dump(type, value, &adapter_dump(adapter, &1, &2)), adapter) + + defp process_dumpers([], {:ok, _} = acc, _adapter), + do: acc + + ## Date + + defp cast_date(binary) when is_binary(binary) do + case Date.from_iso8601(binary) do + {:ok, _} = ok -> + ok + + {:error, _} -> + case NaiveDateTime.from_iso8601(binary) do + {:ok, naive_datetime} -> {:ok, NaiveDateTime.to_date(naive_datetime)} + {:error, _} -> :error + end + end + end + + defp cast_date(%{"year" => empty, "month" => empty, "day" => empty}) when empty in ["", nil], + do: {:ok, nil} + + defp cast_date(%{year: empty, month: empty, day: empty}) when empty in ["", nil], + do: {:ok, nil} + + defp cast_date(%{"year" => year, "month" => month, "day" => day}), + do: cast_date(to_i(year), to_i(month), to_i(day)) + + defp cast_date(%{year: year, month: month, day: day}), + do: cast_date(to_i(year), to_i(month), to_i(day)) + + defp cast_date(_), + do: :error + + defp cast_date(year, month, day) + when is_integer(year) and is_integer(month) and is_integer(day) do + case Date.new(year, month, day) do + {:ok, _} = ok -> ok + {:error, _} -> :error + end + end + + defp cast_date(_, _, _), + do: :error + + ## Time + + defp cast_time(<>), + do: cast_time(to_i(hour), to_i(minute), 0, nil) + + defp cast_time(binary) when is_binary(binary) do + case Time.from_iso8601(binary) do + {:ok, _} = ok -> ok + {:error, _} -> :error + end + end + + defp cast_time(%{"hour" => empty, "minute" => empty}) when empty in ["", nil], + do: {:ok, nil} + + defp cast_time(%{hour: empty, minute: empty}) when empty in ["", nil], + do: {:ok, nil} + + defp cast_time(%{"hour" => hour, "minute" => minute} = map), + do: + cast_time( + to_i(hour), + to_i(minute), + to_i(Map.get(map, "second")), + to_i(Map.get(map, "microsecond")) + ) + + defp cast_time(%{ + hour: hour, + minute: minute, + second: second, + microsecond: {microsecond, precision} + }), + do: cast_time(to_i(hour), to_i(minute), to_i(second), {to_i(microsecond), to_i(precision)}) + + defp cast_time(%{hour: hour, minute: minute} = map), + do: + cast_time( + to_i(hour), + to_i(minute), + to_i(Map.get(map, :second)), + to_i(Map.get(map, :microsecond)) + ) + + defp cast_time(_), + do: :error + + defp cast_time(hour, minute, sec, usec) when is_integer(usec) do + cast_time(hour, minute, sec, {usec, 6}) + end + + defp cast_time(hour, minute, sec, nil) do + cast_time(hour, minute, sec, {0, 0}) + end + + defp cast_time(hour, minute, sec, {usec, precision}) + when is_integer(hour) and is_integer(minute) and + (is_integer(sec) or is_nil(sec)) and is_integer(usec) and is_integer(precision) do + case Time.new(hour, minute, sec || 0, {usec, precision}) do + {:ok, _} = ok -> ok + {:error, _} -> :error + end + end + + defp cast_time(_, _, _, _) do + :error + end + + defp cast_any_datetime(%DateTime{} = datetime), do: cast_utc_datetime(datetime) + defp cast_any_datetime(other), do: cast_naive_datetime(other) + + ## Naive datetime + + defp cast_naive_datetime("-" <> rest) do + with {:ok, naive_datetime} <- cast_naive_datetime(rest) do + {:ok, %{naive_datetime | year: naive_datetime.year * -1}} + end + end + + defp cast_naive_datetime( + <> + ) + when sep in [?\s, ?T] do + case NaiveDateTime.new(to_i(year), to_i(month), to_i(day), to_i(hour), to_i(minute), 0) do + {:ok, _} = ok -> ok + _ -> :error + end + end + + defp cast_naive_datetime(binary) when is_binary(binary) do + case NaiveDateTime.from_iso8601(binary) do + {:ok, _} = ok -> ok + {:error, _} -> :error + end + end + + defp cast_naive_datetime(%{ + "year" => empty, + "month" => empty, + "day" => empty, + "hour" => empty, + "minute" => empty + }) + when empty in ["", nil], + do: {:ok, nil} + + defp cast_naive_datetime(%{year: empty, month: empty, day: empty, hour: empty, minute: empty}) + when empty in ["", nil], + do: {:ok, nil} + + defp cast_naive_datetime(%{} = map) do + with {:ok, %Date{} = date} <- cast_date(map), + {:ok, %Time{} = time} <- cast_time(map) do + NaiveDateTime.new(date, time) + else + _ -> :error + end + end + + defp cast_naive_datetime(_) do + :error + end + + ## UTC datetime + + defp cast_utc_datetime("-" <> rest) do + with {:ok, utc_datetime} <- cast_utc_datetime(rest) do + {:ok, %{utc_datetime | year: utc_datetime.year * -1}} + end + end + + defp cast_utc_datetime( + <> + ) + when sep in [?\s, ?T] do + case NaiveDateTime.new(to_i(year), to_i(month), to_i(day), to_i(hour), to_i(minute), 0) do + {:ok, naive_datetime} -> {:ok, DateTime.from_naive!(naive_datetime, "Etc/UTC")} + _ -> :error + end + end + + defp cast_utc_datetime(binary) when is_binary(binary) do + case DateTime.from_iso8601(binary) do + {:ok, datetime, _offset} -> + {:ok, datetime} + + {:error, :missing_offset} -> + case NaiveDateTime.from_iso8601(binary) do + {:ok, naive_datetime} -> {:ok, DateTime.from_naive!(naive_datetime, "Etc/UTC")} + {:error, _} -> :error + end + + {:error, _} -> + :error + end + end + + defp cast_utc_datetime(%DateTime{time_zone: "Etc/UTC"} = datetime), do: {:ok, datetime} + + defp cast_utc_datetime(%DateTime{} = datetime) do + case datetime |> DateTime.to_unix(:microsecond) |> DateTime.from_unix(:microsecond) do + {:ok, _} = ok -> ok + {:error, _} -> :error + end + end + + defp cast_utc_datetime(value) do + case cast_naive_datetime(value) do + {:ok, %NaiveDateTime{} = naive_datetime} -> + {:ok, DateTime.from_naive!(naive_datetime, "Etc/UTC")} + + {:ok, _} = ok -> + ok + + :error -> + :error + end + end + + @doc """ + Checks if two terms are equal. + + Depending on the given `type` performs a structural or semantical comparison. + + ## Examples + + iex> equal?(:integer, 1, 1) + true + iex> equal?(:decimal, Decimal.new("1"), Decimal.new("1.00")) + true + + """ + @spec equal?(t, term, term) :: boolean + def equal?(_, nil, nil), do: true + + def equal?(type, term1, term2) do + if fun = equal_fun(type) do + fun.(term1, term2) + else + term1 == term2 + end + end + + @doc """ + Checks if `collection` includes a `term`. + + Depending on the given `type` performs a structural or semantical comparison. + + ## Examples + + iex> include?(:integer, 1, 1..3) + true + iex> include?(:decimal, Decimal.new("1"), [Decimal.new("1.00"), Decimal.new("2.00")]) + true + + """ + @spec include?(t, term, Enum.t()) :: boolean + def include?(type, term, collection) do + if fun = equal_fun(type) do + Enum.any?(collection, &fun.(term, &1)) + else + term in collection + end + end + + defp equal_fun(:decimal), do: &equal_decimal?/2 + defp equal_fun(t) when t in [:time, :time_usec], do: &equal_time?/2 + defp equal_fun(t) when t in [:utc_datetime, :utc_datetime_usec], do: &equal_utc_datetime?/2 + + defp equal_fun(t) when t in [:naive_datetime, :naive_datetime_usec], + do: &equal_naive_datetime?/2 + + defp equal_fun(t) when t in @base, do: nil + + defp equal_fun({:array, type}) do + if fun = equal_fun(type) do + &equal_list?(fun, &1, &2) + end + end + + defp equal_fun({:map, type}) do + if fun = equal_fun(type) do + &equal_map?(fun, &1, &2) + end + end + + defp equal_fun({:parameterized, {mod, params}}) do + &mod.equal?(&1, &2, params) + end + + defp equal_fun(mod) when is_atom(mod), do: &mod.equal?/2 + + defp equal_decimal?(%Decimal{} = a, %Decimal{} = b), do: Decimal.equal?(a, b) + defp equal_decimal?(_, _), do: false + + defp equal_time?(%Time{} = a, %Time{} = b), do: Time.compare(a, b) == :eq + defp equal_time?(_, _), do: false + + defp equal_utc_datetime?(%DateTime{} = a, %DateTime{} = b), do: DateTime.compare(a, b) == :eq + defp equal_utc_datetime?(_, _), do: false + + defp equal_naive_datetime?(%NaiveDateTime{} = a, %NaiveDateTime{} = b), + do: NaiveDateTime.compare(a, b) == :eq + + defp equal_naive_datetime?(_, _), + do: false + + defp equal_list?(fun, [nil | xs], [nil | ys]), do: equal_list?(fun, xs, ys) + defp equal_list?(fun, [x | xs], [y | ys]), do: fun.(x, y) and equal_list?(fun, xs, ys) + defp equal_list?(_fun, [], []), do: true + defp equal_list?(_fun, _, _), do: false + + defp equal_map?(_fun, map1, map2) when map_size(map1) != map_size(map2) do + false + end + + defp equal_map?(fun, %{} = map1, %{} = map2) do + equal_map?(fun, Map.to_list(map1), map2) + end + + defp equal_map?(fun, [{key, nil} | tail], other_map) do + case other_map do + %{^key => nil} -> equal_map?(fun, tail, other_map) + _ -> false + end + end + + defp equal_map?(fun, [{key, val} | tail], other_map) do + case other_map do + %{^key => other_val} -> fun.(val, other_val) and equal_map?(fun, tail, other_map) + _ -> false + end + end + + defp equal_map?(_fun, [], _) do + true + end + + defp equal_map?(_fun, _, _) do + false + end + + @doc """ + Format type for error messaging and logs. + """ + def format({:parameterized, {type, params}}) do + if function_exported?(type, :format, 1) do + apply(type, :format, [params]) + else + "##{inspect(type)}<#{inspect(params)}>" + end + end + + def format({composite, type}) when composite in [:array, :map, :in] do + "{#{inspect(composite)}, #{format(type)}}" + end + + def format(type), do: inspect(type) + + ## Helpers + + # Checks if a value is of the given primitive type. + defp of_base_type?(:any, _), do: true + defp of_base_type?(:id, term), do: is_integer(term) + defp of_base_type?(:float, term), do: is_float(term) + defp of_base_type?(:integer, term), do: is_integer(term) + defp of_base_type?(:boolean, term), do: is_boolean(term) + defp of_base_type?(:binary, term), do: is_binary(term) + defp of_base_type?(:string, term), do: is_binary(term) + defp of_base_type?(:map, term), do: is_map(term) and not Map.has_key?(term, :__struct__) + defp of_base_type?(:decimal, value), do: Kernel.match?(%Decimal{}, value) + defp of_base_type?(:date, value), do: Kernel.match?(%Date{}, value) + defp of_base_type?(_, _), do: false + + defp array_with_index([nil | t], fun, true, index, acc) do + array_with_index(t, fun, true, index + 1, [nil | acc]) + end + + defp array_with_index([h | t], fun, skip_nil?, index, acc) do + case fun.(h) do + {:ok, h} -> + array_with_index(t, fun, skip_nil?, index + 1, [h | acc]) + + :error -> + :error + + {:error, custom_errors} -> + {:error, Keyword.update(custom_errors, :source, [index], &[index | &1])} + end + end + + defp array_with_index([], _fun, _skip_nil?, _index, acc) do + {:ok, Enum.reverse(acc)} + end + + defp array_with_index(%_{} = struct, fun, skip_nil?, index, acc) do + case Enumerable.impl_for(struct) do + nil -> :error + _ -> struct |> Enum.to_list() |> array_with_index(fun, skip_nil?, index, acc) + end + end + + defp array_with_index(_, _, _, _, _) do + :error + end + + defp map(map, fun, skip_nil?, acc) when is_map(map) do + map_each(Map.to_list(map), fun, skip_nil?, acc) + end + + defp map(_, _, _, _) do + :error + end + + defp map_each([{key, nil} | t], fun, true, acc) do + map_each(t, fun, true, Map.put(acc, key, nil)) + end + + defp map_each([{key, value} | t], fun, skip_nil?, acc) do + case fun.(value) do + {:ok, value} -> + map_each(t, fun, skip_nil?, Map.put(acc, key, value)) + + :error -> + :error + + {:error, custom_errors} -> + {:error, Keyword.update(custom_errors, :source, [key], &[key | &1])} + end + end + + defp map_each([], _fun, _skip_nil?, acc) do + {:ok, acc} + end + + defp array_with_type([nil | t], type, fun, true, acc) do + array_with_type(t, type, fun, true, [nil | acc]) + end + + defp array_with_type([h | t], type, fun, skip_nil?, acc) do + case fun.(type, h) do + {:ok, h} -> array_with_type(t, type, fun, skip_nil?, [h | acc]) + :error -> :error + end + end + + defp array_with_type([], _type, _fun, _skip_nil?, acc) do + {:ok, Enum.reverse(acc)} + end + + defp array_with_type(_, _, _, _, _) do + :error + end + + defp map(map, type, fun, skip_nil?, acc) when is_map(map) do + map_each(Map.to_list(map), type, fun, skip_nil?, acc) + end + + defp map(_, _, _, _, _) do + :error + end + + defp map_each([{key, value} | t], type, fun, skip_nil?, acc) do + case fun.(type, value) do + {:ok, value} -> map_each(t, type, fun, skip_nil?, Map.put(acc, key, value)) + :error -> :error + end + end + + defp map_each([], _type, _fun, _skip_nil?, acc) do + {:ok, acc} + end + + defp to_i(bin) when is_binary(bin) and byte_size(bin) < 32 do + case Integer.parse(bin) do + {int, ""} -> int + _ -> nil + end + end + + defp to_i(int) when is_integer(int), do: int + defp to_i(_), do: nil + + defp maybe_truncate_usec({:ok, struct}), do: {:ok, truncate_usec(struct)} + defp maybe_truncate_usec(:error), do: :error + + defp maybe_pad_usec({:ok, struct}), do: {:ok, pad_usec(struct)} + defp maybe_pad_usec(:error), do: :error + + defp truncate_usec(nil), do: nil + defp truncate_usec(%{microsecond: {0, 0}} = struct), do: struct + defp truncate_usec(struct), do: %{struct | microsecond: {0, 0}} + + defp pad_usec(nil), do: nil + defp pad_usec(%{microsecond: {_, 6}} = struct), do: struct + + defp pad_usec(%{microsecond: {microsecond, _}} = struct), + do: %{struct | microsecond: {microsecond, 6}} + + defp check_utc_timezone!(%{time_zone: "Etc/UTC"} = datetime, _kind), do: datetime + + defp check_utc_timezone!(datetime, kind) do + raise ArgumentError, + "#{inspect(kind)} expects the time zone to be \"Etc/UTC\", got `#{inspect(datetime)}`" + end + + defp check_usec!(%{microsecond: {_, 6}} = datetime, _kind), do: datetime + + defp check_usec!(datetime, kind) do + raise ArgumentError, + "#{inspect(kind)} expects microsecond precision, got: #{inspect(datetime)}" + end + + defp check_no_usec!(%{microsecond: {0, 0}} = datetime, _kind), do: datetime + + defp check_no_usec!(%struct{} = datetime, kind) do + raise ArgumentError, """ + #{inspect(kind)} expects microseconds to be empty, got: #{inspect(datetime)} + + Use `#{inspect(struct)}.truncate(#{kind}, :second)` (available in Elixir v1.6+) to remove microseconds. + """ + end + + defp check_decimal(%Decimal{coef: coef} = decimal, _) when is_integer(coef), do: {:ok, decimal} + defp check_decimal(_decimal, false), do: :error + + defp check_decimal(decimal, true) do + raise ArgumentError, """ + #{inspect(decimal)} is not allowed for type :decimal + + `+Infinity`, `-Infinity`, and `NaN` values are not supported, even though the `Decimal` library handles them. \ + To support them, you can create a custom type. + """ + end +end diff --git a/deps/ecto/lib/ecto/uuid.ex b/deps/ecto/lib/ecto/uuid.ex new file mode 100644 index 0000000..75a5974 --- /dev/null +++ b/deps/ecto/lib/ecto/uuid.ex @@ -0,0 +1,234 @@ +defmodule Ecto.UUID do + @moduledoc """ + An Ecto type for UUID strings. + """ + + use Ecto.Type + + @typedoc """ + A hex-encoded UUID string. + """ + @type t :: <<_::288>> + + @typedoc """ + A raw binary representation of a UUID. + """ + @type raw :: <<_::128>> + + @doc false + def type, do: :uuid + + @doc """ + Casts either a string in the canonical, human-readable UUID format or a + 16-byte binary to a UUID in its canonical, human-readable UUID format. + + If `uuid` is neither of these, `:error` will be returned. + + Since both binaries and strings are represented as binaries, this means some + strings you may not expect are actually also valid UUIDs in their binary form + and so will be casted into their string form. + + If you need further-restricted behavior or validation, you should define your + own custom `Ecto.Type`. There is also `Ecto.UUID.load/1` if you only want to + process `raw` UUIDs, which may be a more suitable reverse operation to + `Ecto.UUID.dump/1`. + + ## Examples + + iex> Ecto.UUID.cast(<<0x60, 0x1D, 0x74, 0xE4, 0xA8, 0xD3, 0x4B, 0x6E, + ...> 0x83, 0x65, 0xED, 0xDB, 0x4C, 0x89, 0x33, 0x27>>) + {:ok, "601d74e4-a8d3-4b6e-8365-eddb4c893327"} + + iex> Ecto.UUID.cast("601d74e4-a8d3-4b6e-8365-eddb4c893327") + {:ok, "601d74e4-a8d3-4b6e-8365-eddb4c893327"} + + iex> Ecto.UUID.cast("warehouse worker") + {:ok, "77617265-686f-7573-6520-776f726b6572"} + """ + @spec cast(t | raw | any) :: {:ok, t} | :error + def cast(uuid) + def cast( + <> + ) do + <> + catch + :error -> :error + else + hex_uuid -> {:ok, hex_uuid} + end + + def cast(<<_::128>> = raw_uuid), do: {:ok, encode(raw_uuid)} + def cast(_), do: :error + + @doc """ + Same as `cast/1` but raises `Ecto.CastError` on invalid arguments. + """ + @spec cast!(t | raw | any) :: t + def cast!(uuid) do + case cast(uuid) do + {:ok, hex_uuid} -> hex_uuid + :error -> raise Ecto.CastError, type: __MODULE__, value: uuid + end + end + + @compile {:inline, c: 1} + + defp c(?0), do: ?0 + defp c(?1), do: ?1 + defp c(?2), do: ?2 + defp c(?3), do: ?3 + defp c(?4), do: ?4 + defp c(?5), do: ?5 + defp c(?6), do: ?6 + defp c(?7), do: ?7 + defp c(?8), do: ?8 + defp c(?9), do: ?9 + defp c(?A), do: ?a + defp c(?B), do: ?b + defp c(?C), do: ?c + defp c(?D), do: ?d + defp c(?E), do: ?e + defp c(?F), do: ?f + defp c(?a), do: ?a + defp c(?b), do: ?b + defp c(?c), do: ?c + defp c(?d), do: ?d + defp c(?e), do: ?e + defp c(?f), do: ?f + defp c(_), do: throw(:error) + + @doc """ + Converts a string representing a UUID into a raw binary. + """ + @spec dump(uuid_string :: t | any) :: {:ok, raw} | :error + def dump(uuid_string) + def dump( + <> + ) do + <> + catch + :error -> :error + else + raw_uuid -> {:ok, raw_uuid} + end + + def dump(_), do: :error + + @compile {:inline, d: 1} + + defp d(?0), do: 0 + defp d(?1), do: 1 + defp d(?2), do: 2 + defp d(?3), do: 3 + defp d(?4), do: 4 + defp d(?5), do: 5 + defp d(?6), do: 6 + defp d(?7), do: 7 + defp d(?8), do: 8 + defp d(?9), do: 9 + defp d(?A), do: 10 + defp d(?B), do: 11 + defp d(?C), do: 12 + defp d(?D), do: 13 + defp d(?E), do: 14 + defp d(?F), do: 15 + defp d(?a), do: 10 + defp d(?b), do: 11 + defp d(?c), do: 12 + defp d(?d), do: 13 + defp d(?e), do: 14 + defp d(?f), do: 15 + defp d(_), do: throw(:error) + + @doc """ + Same as `dump/1` but raises `Ecto.ArgumentError` on invalid arguments. + """ + @spec dump!(t | any) :: raw + def dump!(uuid) do + case dump(uuid) do + {:ok, raw_uuid} -> raw_uuid + :error -> raise ArgumentError, "cannot dump given UUID to binary: #{inspect(uuid)}" + end + end + + @doc """ + Converts a binary UUID into a string. + """ + @spec load(raw | any) :: {:ok, t} | :error + def load(<<_::128>> = raw_uuid), do: {:ok, encode(raw_uuid)} + + def load(<<_::64, ?-, _::32, ?-, _::32, ?-, _::32, ?-, _::96>> = string) do + raise ArgumentError, + "trying to load string UUID as Ecto.UUID: #{inspect(string)}. " <> + "Maybe you wanted to declare :uuid as your database field?" + end + + def load(_), do: :error + + @doc """ + Same as `load/1` but raises `Ecto.ArgumentError` on invalid arguments. + """ + @spec load!(raw | any) :: t + def load!(value) do + case load(value) do + {:ok, hex_uuid} -> hex_uuid + :error -> raise ArgumentError, "cannot load given binary as UUID: #{inspect(value)}" + end + end + + @doc """ + Generates a random, version 4 UUID. + """ + @spec generate() :: t + def generate(), do: encode(bingenerate()) + + @doc """ + Generates a random, version 4 UUID in the binary format. + """ + @spec bingenerate() :: raw + def bingenerate() do + <> = :crypto.strong_rand_bytes(16) + <> + end + + # Callback invoked by autogenerate fields. + @doc false + def autogenerate, do: generate() + + @spec encode(raw) :: t + defp encode( + <> + ) do + <> + end + + @compile {:inline, e: 1} + + defp e(0), do: ?0 + defp e(1), do: ?1 + defp e(2), do: ?2 + defp e(3), do: ?3 + defp e(4), do: ?4 + defp e(5), do: ?5 + defp e(6), do: ?6 + defp e(7), do: ?7 + defp e(8), do: ?8 + defp e(9), do: ?9 + defp e(10), do: ?a + defp e(11), do: ?b + defp e(12), do: ?c + defp e(13), do: ?d + defp e(14), do: ?e + defp e(15), do: ?f +end diff --git a/deps/ecto/lib/mix/ecto.ex b/deps/ecto/lib/mix/ecto.ex new file mode 100644 index 0000000..7f5a37a --- /dev/null +++ b/deps/ecto/lib/mix/ecto.ex @@ -0,0 +1,148 @@ +defmodule Mix.Ecto do + @moduledoc """ + Conveniences for writing Ecto related Mix tasks. + """ + + @doc """ + Parses the repository option from the given command line args list. + + If no repo option is given, it is retrieved from the application environment. + """ + @spec parse_repo([term]) :: [Ecto.Repo.t] + def parse_repo(args) do + parse_repo(args, []) + end + + defp parse_repo([key, value|t], acc) when key in ~w(--repo -r) do + parse_repo t, [Module.concat([value])|acc] + end + + defp parse_repo([_|t], acc) do + parse_repo t, acc + end + + defp parse_repo([], []) do + apps = + if apps_paths = Mix.Project.apps_paths() do + Enum.filter(Mix.Project.deps_apps(), &is_map_key(apps_paths, &1)) + else + [Mix.Project.config()[:app]] + end + + apps + |> Enum.flat_map(fn app -> + Application.load(app) + Application.get_env(app, :ecto_repos, []) + end) + |> Enum.uniq() + |> case do + [] -> + Mix.shell().error """ + warning: could not find Ecto repos in any of the apps: #{inspect apps}. + + You can avoid this warning by passing the -r flag or by setting the + repositories managed by those applications in your config/config.exs: + + config #{inspect hd(apps)}, ecto_repos: [...] + """ + [] + repos -> + repos + end + end + + defp parse_repo([], acc) do + Enum.reverse(acc) + end + + @doc """ + Ensures the given module is an Ecto.Repo. + """ + @spec ensure_repo(module, list) :: Ecto.Repo.t + def ensure_repo(repo, args) do + # Do not pass the --force switch used by some tasks downstream + args = List.delete(args, "--force") + Mix.Task.run("app.config", args) + + case Code.ensure_compiled(repo) do + {:module, _} -> + if function_exported?(repo, :__adapter__, 0) do + repo + else + Mix.raise "Module #{inspect repo} is not an Ecto.Repo. " <> + "Please configure your app accordingly or pass a repo with the -r option." + end + + {:error, error} -> + Mix.raise "Could not load #{inspect repo}, error: #{inspect error}. " <> + "Please configure your app accordingly or pass a repo with the -r option." + end + end + + @doc """ + Asks if the user wants to open a file based on ECTO_EDITOR. + + By default, it attempts to open the file and line using the + `file:line` notation. For example, if your editor is called + `subl`, it will open the file as: + + subl path/to/file:line + + It is important that you choose an editor command that does + not block nor that attempts to run an editor directly in the + terminal. Command-line based editors likely need extra + configuration so they open up the given file and line in a + separate window. + + Custom editors are supported by using the `__FILE__` and + `__LINE__` notations, for example: + + ECTO_EDITOR="my_editor +__LINE__ __FILE__" + + and Elixir will properly interpolate values. + + """ + @spec open?(binary, non_neg_integer) :: boolean + def open?(file, line \\ 1) do + editor = System.get_env("ECTO_EDITOR") || "" + + if editor != "" do + command = + if editor =~ "__FILE__" or editor =~ "__LINE__" do + editor + |> String.replace("__FILE__", inspect(file)) + |> String.replace("__LINE__", Integer.to_string(line)) + else + "#{editor} #{inspect(file)}:#{line}" + end + + Mix.shell().cmd(command) + true + else + false + end + end + + @doc """ + Gets a path relative to the application path. + + Raises on umbrella application. + """ + def no_umbrella!(task) do + if Mix.Project.umbrella?() do + Mix.raise "Cannot run task #{inspect task} from umbrella project root. " <> + "Change directory to one of the umbrella applications and try again" + end + end + + @doc """ + Returns `true` if module implements behaviour. + """ + def ensure_implements(module, behaviour, message) do + all = Keyword.take(module.__info__(:attributes), [:behaviour]) + unless [behaviour] in Keyword.values(all) do + Mix.raise "Expected #{inspect module} to implement #{inspect behaviour} " <> + "in order to #{message}" + end + end +end diff --git a/deps/ecto/lib/mix/tasks/ecto.create.ex b/deps/ecto/lib/mix/tasks/ecto.create.ex new file mode 100644 index 0000000..c9a03f1 --- /dev/null +++ b/deps/ecto/lib/mix/tasks/ecto.create.ex @@ -0,0 +1,77 @@ +defmodule Mix.Tasks.Ecto.Create do + use Mix.Task + import Mix.Ecto + + @shortdoc "Creates the repository storage" + + @switches [ + quiet: :boolean, + repo: [:string, :keep], + no_compile: :boolean, + no_deps_check: :boolean + ] + + @aliases [ + r: :repo, + q: :quiet + ] + + @moduledoc """ + Create the storage for the given repository. + + The repositories to create are the ones specified under the + `:ecto_repos` option in the current app configuration. However, + if the `-r` option is given, it replaces the `:ecto_repos` config. + + Since Ecto tasks can only be executed once, if you need to create + multiple repositories, set `:ecto_repos` accordingly or pass the `-r` + flag multiple times. + + ## Examples + + $ mix ecto.create + $ mix ecto.create -r Custom.Repo + + ## Command line options + + * `-r`, `--repo` - the repo to create + * `--quiet` - do not log output + * `--no-compile` - do not compile before creating + * `--no-deps-check` - do not compile before creating + + """ + + @impl true + def run(args) do + repos = parse_repo(args) + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + + Enum.each(repos, fn repo -> + ensure_repo(repo, args) + + ensure_implements( + repo.__adapter__(), + Ecto.Adapter.Storage, + "create storage for #{inspect(repo)}" + ) + + case repo.__adapter__().storage_up(repo.config()) do + :ok -> + unless opts[:quiet] do + Mix.shell().info("The database for #{inspect(repo)} has been created") + end + + {:error, :already_up} -> + unless opts[:quiet] do + Mix.shell().info("The database for #{inspect(repo)} has already been created") + end + + {:error, term} when is_binary(term) -> + Mix.raise("The database for #{inspect(repo)} couldn't be created: #{term}") + + {:error, term} -> + Mix.raise("The database for #{inspect(repo)} couldn't be created: #{inspect(term)}") + end + end) + end +end diff --git a/deps/ecto/lib/mix/tasks/ecto.drop.ex b/deps/ecto/lib/mix/tasks/ecto.drop.ex new file mode 100644 index 0000000..69275c8 --- /dev/null +++ b/deps/ecto/lib/mix/tasks/ecto.drop.ex @@ -0,0 +1,106 @@ +defmodule Mix.Tasks.Ecto.Drop do + use Mix.Task + import Mix.Ecto + + @shortdoc "Drops the repository storage" + @default_opts [force: false, force_drop: false] + + @aliases [ + f: :force, + q: :quiet, + r: :repo + ] + + @switches [ + force: :boolean, + force_drop: :boolean, + quiet: :boolean, + repo: [:keep, :string], + no_compile: :boolean, + no_deps_check: :boolean + ] + + @moduledoc """ + Drop the storage for the given repository. + + The repositories to drop are the ones specified under the + `:ecto_repos` option in the current app configuration. However, + if the `-r` option is given, it replaces the `:ecto_repos` config. + + Since Ecto tasks can only be executed once, if you need to drop + multiple repositories, set `:ecto_repos` accordingly or pass the `-r` + flag multiple times. + + ## Examples + + $ mix ecto.drop + $ mix ecto.drop -r Custom.Repo + + ## Command line options + + * `-r`, `--repo` - the repo to drop + * `-q`, `--quiet` - run the command quietly + * `-f`, `--force` - do not ask for confirmation when dropping the database. + Configuration is asked only when `:start_permanent` is set to true + (typically in production) + * `--force-drop` - force the database to be dropped even + if it has connections to it (requires PostgreSQL 13+) + * `--no-compile` - do not compile before dropping + * `--no-deps-check` - do not compile before dropping + + """ + + @impl true + def run(args) do + repos = parse_repo(args) + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + opts = Keyword.merge(@default_opts, opts) + + Enum.each(repos, fn repo -> + ensure_repo(repo, args) + + ensure_implements( + repo.__adapter__(), + Ecto.Adapter.Storage, + "drop storage for #{inspect(repo)}" + ) + + if skip_safety_warnings?() or + opts[:force] or + Mix.shell().yes?( + "Are you sure you want to drop the database for repo #{inspect(repo)}?" + ) do + drop_database(repo, opts) + end + end) + end + + defp skip_safety_warnings? do + Mix.Project.config()[:start_permanent] != true + end + + defp drop_database(repo, opts) do + config = + opts + |> Keyword.take([:force_drop]) + |> Keyword.merge(repo.config()) + + case repo.__adapter__().storage_down(config) do + :ok -> + unless opts[:quiet] do + Mix.shell().info("The database for #{inspect(repo)} has been dropped") + end + + {:error, :already_down} -> + unless opts[:quiet] do + Mix.shell().info("The database for #{inspect(repo)} has already been dropped") + end + + {:error, term} when is_binary(term) -> + Mix.raise("The database for #{inspect(repo)} couldn't be dropped: #{term}") + + {:error, term} -> + Mix.raise("The database for #{inspect(repo)} couldn't be dropped: #{inspect(term)}") + end + end +end diff --git a/deps/ecto/lib/mix/tasks/ecto.ex b/deps/ecto/lib/mix/tasks/ecto.ex new file mode 100644 index 0000000..14d10da --- /dev/null +++ b/deps/ecto/lib/mix/tasks/ecto.ex @@ -0,0 +1,30 @@ +defmodule Mix.Tasks.Ecto do + use Mix.Task + + @shortdoc "Prints Ecto help information" + + @moduledoc """ + Prints Ecto tasks and their information. + + $ mix ecto + + """ + + @impl true + def run(args) do + {_opts, args} = OptionParser.parse!(args, strict: []) + + case args do + [] -> general() + _ -> Mix.raise "Invalid arguments, expected: mix ecto" + end + end + + defp general() do + Application.ensure_all_started(:ecto) + Mix.shell().info "Ecto v#{Application.spec(:ecto, :vsn)}" + Mix.shell().info "A toolkit for data mapping and language integrated query for Elixir." + Mix.shell().info "\nAvailable tasks:\n" + Mix.Tasks.Help.run(["--search", "ecto."]) + end +end diff --git a/deps/ecto/lib/mix/tasks/ecto.gen.repo.ex b/deps/ecto/lib/mix/tasks/ecto.gen.repo.ex new file mode 100644 index 0000000..e6f7a31 --- /dev/null +++ b/deps/ecto/lib/mix/tasks/ecto.gen.repo.ex @@ -0,0 +1,110 @@ +defmodule Mix.Tasks.Ecto.Gen.Repo do + use Mix.Task + + import Mix.Ecto + import Mix.Generator + + @shortdoc "Generates a new repository" + + @switches [ + repo: [:string, :keep], + ] + + @aliases [ + r: :repo, + ] + + @moduledoc """ + Generates a new repository. + + The repository will be placed in the `lib` directory. + + ## Examples + + $ mix ecto.gen.repo -r Custom.Repo + + This generator will automatically open the config/config.exs + after generation if you have `ECTO_EDITOR` set in your environment + variable. + + ## Command line options + + * `-r`, `--repo` - the repo to generate + + """ + + @impl true + def run(args) do + no_umbrella!("ecto.gen.repo") + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + + repo = + case Keyword.get_values(opts, :repo) do + [] -> Mix.raise "ecto.gen.repo expects the repository to be given as -r MyApp.Repo" + [repo] -> Module.concat([repo]) + [_ | _] -> Mix.raise "ecto.gen.repo expects a single repository to be given" + end + + config = Mix.Project.config() + underscored = Macro.underscore(inspect(repo)) + + base = Path.basename(underscored) + file = Path.join("lib", underscored) <> ".ex" + app = config[:app] || :YOUR_APP_NAME + opts = [mod: repo, app: app, base: base] + + create_directory Path.dirname(file) + create_file file, repo_template(opts) + config_path = config[:config_path] || "config/config.exs" + + case File.read(config_path) do + {:ok, contents} -> + check = String.contains?(contents, "import Config") + config_first_line = get_first_config_line(check) <> "\n" + new_contents = config_first_line <> "\n" <> config_template(opts) + Mix.shell().info [:green, "* updating ", :reset, config_path] + File.write! config_path, String.replace(contents, config_first_line, new_contents) + + {:error, _} -> + create_file config_path, "import Config\n\n" <> config_template(opts) + end + + open?(config_path, 3) + + Mix.shell().info """ + Don't forget to add your new repo to your supervision tree + (typically in lib/#{app}/application.ex): + + def start(_type, _args) do + children = [ + #{inspect repo}, + ] + + And to add it to the list of Ecto repositories in your + configuration files (so Ecto tasks work as expected): + + config #{inspect app}, + ecto_repos: [#{inspect repo}] + + """ + end + + defp get_first_config_line(true), do: "import Config" + defp get_first_config_line(false), do: "use Mix.Config" + + embed_template :repo, """ + defmodule <%= inspect @mod %> do + use Ecto.Repo, + otp_app: <%= inspect @app %>, + adapter: Ecto.Adapters.Postgres + end + """ + + embed_template :config, """ + config <%= inspect @app %>, <%= inspect @mod %>, + database: "<%= @app %>_<%= @base %>", + username: "user", + password: "pass", + hostname: "localhost" + """ +end diff --git a/deps/ecto/mix.exs b/deps/ecto/mix.exs new file mode 100644 index 0000000..10ab424 --- /dev/null +++ b/deps/ecto/mix.exs @@ -0,0 +1,182 @@ +defmodule Ecto.MixProject do + use Mix.Project + + @source_url "https://github.com/elixir-ecto/ecto" + @version "3.13.6" + + def project do + [ + app: :ecto, + version: @version, + elixir: "~> 1.14", + deps: deps(), + consolidate_protocols: Mix.env() != :test, + elixirc_paths: elixirc_paths(Mix.env()), + + # Hex + description: "A toolkit for data mapping and language integrated query for Elixir", + package: package(), + + # Docs + name: "Ecto", + docs: docs() + ] + end + + def application do + [ + extra_applications: [:logger, :crypto, :eex], + mod: {Ecto.Application, []} + ] + end + + defp deps do + [ + {:telemetry, "~> 0.4 or ~> 1.0"}, + {:decimal, "~> 2.0 or ~> 3.0"}, + {:jason, "~> 1.0", optional: true}, + {:ex_doc, "~> 0.38", only: :docs} + ] + end + + defp package do + [ + maintainers: ["Eric Meadows-Jönsson", "José Valim", "Felipe Stival", "Greg Rychlewski"], + licenses: ["Apache-2.0"], + links: %{ + "GitHub" => @source_url, + "Changelog" => "https://hexdocs.pm/ecto/changelog.html" + }, + files: + ~w(.formatter.exs mix.exs README.md CHANGELOG.md lib) ++ + ~w(integration_test/cases integration_test/support) + ] + end + + defp docs do + [ + search: [ + %{ + name: "Latest", + help: "Search latest versions of Ecto + Ecto.SQL", + packages: [:ecto, :ecto_sql] + }, + %{ + name: "Current version", + help: "Search only this project" + } + ], + main: "Ecto", + source_ref: "v#{@version}", + logo: "guides/images/e.png", + extra_section: "GUIDES", + source_url: @source_url, + skip_undefined_reference_warnings_on: ["CHANGELOG.md"], + extras: extras(), + groups_for_extras: groups_for_extras(), + groups_for_modules: [ + # Ecto, + # Ecto.Changeset, + # Ecto.Multi, + # Ecto.Query, + # Ecto.Repo, + # Ecto.Schema, + # Ecto.Schema.Metadata, + # Mix.Ecto, + + Types: [ + Ecto.Enum, + Ecto.ParameterizedType, + Ecto.Type, + Ecto.UUID + ], + "Query APIs": [ + Ecto.Query.API, + Ecto.Query.WindowAPI, + Ecto.Queryable, + Ecto.SubQuery + ], + "Adapter specification": [ + Ecto.Adapter, + Ecto.Adapter.Queryable, + Ecto.Adapter.Schema, + Ecto.Adapter.Storage, + Ecto.Adapter.Transaction + ], + "Relation structs": [ + Ecto.Association.BelongsTo, + Ecto.Association.Has, + Ecto.Association.HasThrough, + Ecto.Association.ManyToMany, + Ecto.Association.NotLoaded, + Ecto.Embedded + ] + ], + before_closing_body_tag: fn + :html -> + """ + + + """ + + _ -> + "" + end + ] + end + + def extras() do + [ + "guides/introduction/Getting Started.md", + "guides/howtos/Aggregates and subqueries.md", + "guides/howtos/Constraints and Upserts.md", + "guides/howtos/Data mapping and validation.md", + "guides/howtos/Duration Types with Postgrex.md", + "guides/howtos/Dynamic queries.md", + "guides/howtos/Embedded Schemas.md", + "guides/howtos/Multi tenancy with query prefixes.md", + "guides/howtos/Multi tenancy with foreign keys.md", + "guides/howtos/Self-referencing many to many.md", + "guides/howtos/Polymorphic associations with many to many.md", + "guides/howtos/Replicas and dynamic repositories.md", + "guides/howtos/Schemaless queries.md", + "guides/howtos/Test factories.md", + "guides/testing/Testing with Ecto.md", + "guides/cheatsheets/crud.cheatmd", + "guides/cheatsheets/associations.cheatmd", + "CHANGELOG.md" + ] + end + + defp groups_for_extras do + [ + Introduction: ~r/guides\/introduction\/.?/, + Cheatsheets: ~r/cheatsheets\/.?/, + "How-To's": ~r/guides\/howtos\/.?/, + Testing: ~r/testing\/.?/ + ] + end + + defp elixirc_paths(:test), do: ["lib", "test/support"] + defp elixirc_paths(_), do: ["lib"] +end diff --git a/deps/ecto_sql/.formatter.exs b/deps/ecto_sql/.formatter.exs new file mode 100644 index 0000000..3497908 --- /dev/null +++ b/deps/ecto_sql/.formatter.exs @@ -0,0 +1,36 @@ +locals_without_parens = [ + add: 2, + add: 3, + add_if_not_exists: 2, + add_if_not_exists: 3, + alter: 2, + create: 1, + create: 2, + create_if_not_exists: 1, + create_if_not_exists: 2, + drop: 1, + drop: 2, + drop_if_exists: 1, + drop_if_exists: 2, + execute: 1, + execute: 2, + modify: 2, + modify: 3, + remove: 1, + remove: 2, + remove: 3, + remove_if_exists: 1, + remove_if_exists: 2, + rename: 2, + rename: 3, + timestamps: 1 +] + +[ + import_deps: [:ecto], + locals_without_parens: locals_without_parens, + export: [ + locals_without_parens: locals_without_parens + ], + inputs: ["{lib,test}/**/*.{ex,exs}"] +] diff --git a/deps/ecto_sql/.hex b/deps/ecto_sql/.hex new file mode 100644 index 0000000..db3e81c Binary files /dev/null and b/deps/ecto_sql/.hex differ diff --git a/deps/ecto_sql/CHANGELOG.md b/deps/ecto_sql/CHANGELOG.md new file mode 100644 index 0000000..9e1ddfb --- /dev/null +++ b/deps/ecto_sql/CHANGELOG.md @@ -0,0 +1,579 @@ +# Changelog for v3.x + +## v3.13.5 (2026-03-03) + + * [postgrex] Map `:restrict_violation` to `:foreign_key` constraint (required by PostgreSQL 18) + +## v3.13.4 (2025-12-27) + +### Bug fixes + + * [mysql] Do not crash `mix ecto.load` with large dumped databases + +## v3.13.3 (2025-12-08) + +### Enhancements + + * [sql] Tag generated functions as `:generated` + * [sql] Add `:wrap_in_transaction` option to explain + +### Bug fixes + + * [mysql] Fix `structure_load/2` for MySQL 9.4+ + +## v3.13.2 (2025-06-24) + +### Enhancements + + * [sandbox] Allow passing through opts in `Ecto.Adapters.SQL.Sandbox.allow/4` calls + * [sql] Add support for `ON DELETE SET DEFAULT` + +### Bug fixes + + * [postgres] Fix nested array generated time columns + +## v3.13.1 (2025-06-20) + +### Bug fixes + + * [postgres] Fix nested array generated columns + +## v3.13.0 (2025-06-18) + +### Enhancements + + * [Ecto.Migration] Add support for index directions + * [sql] Support `:log_stacktrace_mfa` for filtering or modifying stacktrace-derived info in query logs + * [mysql] Support arrays using JSON for MariaDB + * [mysql] Allow to specify `:prepare` per operation + * [postgres] Add support for collations in Postgres + * [postgres] Allow source fields in `json_extract_path` + +## v3.12.1 (2024-10-07) + +### Enhancements + + * [sql] Support `:pool_count` option + +## v3.12.0 (2024-08-12) + +### Enhancements + + * [Ecto.Migration] Add `Ecto.Migration.remove_if_exists/1` + * [Ecto.Migrator] Warn for migration files that end in `.ex` + * [sql] Support for subqueries in order_bys and group_bys + * [mysql] Add check constraints for MySQL + * [postgres] Add native bitstring support to Postgres + * [postgres] Add support for `:duration` type + * [postgres] Add `:plan` explain option for Postgres + * [tds] Allow passing `%Tds.Parameter` structs as params for named parameter usage in `query` + +### Bug fix + + * [mysql] Type cast of integers in MySQL should use signed integers + +## v3.11.3 (2024-06-13) + +### Enhancements + + * [mysql] Relax `myxql` dependency + +## v3.11.2 (2024-05-18) + +### Enhancements + + * [postgres] Relax `postgrex` dependency + +## v3.11.1 (2023-12-07) + +### Enhancements + + * [Ecto.Migration] Add `:generated` option to columns + * [Ecto.Migration] Add index storage parameters (via :options) for Postgres + +### Bug fixes + + * [Ecto.Migration] Support `:prefix` on index rename + * [Ecto.Migrator] Stop runner if migration fails + +## v3.11.0 (2023-11-14) + +### Enhancements + + * [mix ecto.migrate] Add `--log-level` to ecto.migrate + * [mix ecto.rollback] Add `--log-level` to ecto.rollback + * [sql] Support fragment splicing + * [sql] Support data-modifying CTEs + * [sql] Add source to insert_ll, insert, update, and delete telemetry events + * [tds] Include `exec` before stored procedure for TDS (for earlier SQLServer versions) + +### Bug fixes + + * [mix ecto.migrate] Read existing dynamic repo in migrations + * [mix ecto.migrate] Don't add primary key on remove migration + +## v3.10.2 (2023-08-21) + +### Enhancements + + * [migrations] Handle `from: {reference, opts}` in FK migrations + * [mysql] Support MariaDB versioned tables + +### Bug fixes + + * [migrations] Don't add comment to removed columns + * [migrations] Ensure module is loaded before checking for migration + * [mysql] Fix for casting boolean values in MySQL + +## v3.10.1 (2023-04-11) + +### Enhancements + + * [postgres] Allow Postgrex v0.17.x + +## v3.10.0 (2023-04-10) + +### Enhancements + + * [Ecto.Migrator] Allow running the migrator in your supervision tree + * [Ecto.Migrator] Allow renaming an index + * [Ecto.Migrator] Add `execute_file/1` and `execute_file/2` + * [mix ecto.dump] Support dumping multiple prefixes on PostgreSQL and MySQL + * [mysql] Improve constraint matching support on alternative implementations + * [postgres] Allow `CASCADE` when dropping a constraint on postgres + +### Bug fixes + + * [mix ecto.load] Suppress query logs in mix ecto.load when quiet flag is given + +## v3.9.2 (2022-12-20) + +### Enhancements + + * [migrator] Raise if target version in `to`/`exclusive_to` is not an integer + * [mysql] Add support for cross lateral joins + * [postgres] Add support for cross lateral joins + * [postgres] Add support for materialized CTEs + * [telemetry] Send `cast_params` metadata to telemetry events + +## v3.9.1 (2022-11-18) + +### Enhancements + + * [mysql] Support `:format` option on `explain` + * [postgres] Permit outer joins when using `update_all` + * [sql] Add support for `ONLY` in index creation + +### Bug fixes + + * [mysql] Ensure locks are quoted + * [mysql] Do not crash on `mix ecto.drop` when the database is unreachable + * [postgres] Fix empty array compare in PostgreSQL + * [sql] Allow function sources whose name begins with 'select' + +## v3.9.0 (2022-09-27) + +### Enhancements + + * [migrations] Support `primary_key` configuration options in `table` + * [migrations] Add `:nulls_distinct` option for unique indexes + * [postgres] Support the use of advisory locks for migrations + * [sql] Add `dump_cmd` to `postgrex` and `myxql` adapters + * [sql] Log human-readable UUIDs by using pre-dumped query parameters + * [sql] Support select aliases from `selected_as/1` and `selected_as/2` + * [telemetry] Emit `schema_migration: true` under `telemetry_options` + +## v3.8.3 (2022-06-04) + +### Enhancements + + * [sql] Implement `literal/1` support in fragments + +## v3.8.2 (2022-05-18) + +### Bug fixes + + * [postgres] Fix possible breaking change on `json_extract_path` for boolean values introduced in v3.8.0 + * [sql] Colorize stacktrace and use `:` before printing line number + +## v3.8.1 (2022-04-29) + +### Bug fixes + + * [mysql] Raise on a subquery with parameter on MySQL join + * [sql] Do not invoke dynamic repositories in direct `Ecto.Adapters.SQL` operations + +## v3.8.0 (2022-04-26) + +### Enhancements + + * [migrations] Support `--to-exclusive` in `mix ecto.migrate` and `mix ecto.rollback` + * [mysql] Add `:comment` support on MySQL migrations + * [postgres] Support `:prepare` option per operation + * [postgres] Optimize `json_extract_path` comparisons in PostgreSQL + * [sql] Optionally log last known call, publish stacktrace in telemetry + * [telemetry] Include `:repo` option in telemetry events + +### Bug fixes + + * [sql] Ensure `:timeout` option is respected in `Ecto.Adapters.SQL.explain/3` + +## v3.7.2 (2022-01-23) + +### Enhancements + + * [adapters] Support latest `myxql` and `postgrex` + +## v3.7.1 (2021-10-12) + +### Enhancements + + * [migrations] Add `:cascade` option to `drop` + * [migrations] Support `--prefix` in `mix ecto.migrations` + * [migrations] Add `--log-migrator-sql` and `--log-migrations-sql` + * [mysql] Cache more insert/update queries and allow `:cache_statement` to be set + * [mssql] Support more recent tds versions + +### Bug fixes + + * [migrations] Consider the database prefix when locking tables + +## v3.7.0 (2021-08-19) + +### Enhancements + + * [mysql] Support lateral joins + +### Bug fixes + + * [sql] Fix CTE subqueries not finding parent bindings + +## v3.6.2 (2021-05-28) + +### Bug fixes + + * [migration] Improve error message on invalid migration type + * [postgres] Avoid duplicate order_by with distinct + * [sql] Implement new checked_out? callback required by latest Ecto + +## v3.6.1 (2021-04-12) + +### Bug fixes + + * [migrations] Ensure migration_source option is respected in PostgreSQL adapter + +## v3.6.0 (2021-04-03) + +### Bug fixes + + * [migrations] Fix a bug where the migration lock would not apply on the first migration (when the schema migrations table is empty). This fix changes how migration tables are locked, therefore let us know of any possible regressions in your workflow + +### Enhancements + + * [migrations] Allow generating migrations from within umbrella app + * [postgres] Add `:format` option to PostgreSQL explain + * [postgres] Support `:socket_dir` connection option when using `mix ecto.load` or `mix ecto.dump` + * [sandbox] Support locally registered processes in `allow/3` + * [storage] Do not fail `storage_up` if the user has access to an already-created database + * [tds] Support for `:inner_lateral` and `:left_lateral` + +## v3.5.4 (2020-01-20) + +### Enhancements + + * [mysql] Support defaults for JSON columns + * [postgres] Allow Postgrex v1.0 + +## v3.5.3 (2020-10-27) + +### Enhancements + + * [migrations] Pass `:schema_migration` option to repo operations for `prepare_query` checks + * [psql] Support `:force_drop` configuration to force a DB to be dropped + +## v3.5.2 (2020-10-24) + +### Enhancements + + * [migrations] Support `:with` option in `references` for composite foreign keys + * [migrations] Support `:match` option in `references` + * [tds] Support TDS 3-part and 4-part prefixes + +## v3.5.1 (2020-10-12) + +### Enhancements + + * [tds] Support explain plan for the TDS adapter + +### Bug fix + + * [migrations] Reload all migrations once the lock is free to avoid running the same migration more than once + * [query] Support nested subqueries + +## v3.5.0 (2020-10-03) + +### Enhancements + + * [migrations] Add option to skip schema migrations table checks + * [migrations] Add `:migration_repo` configuration to allow a different repository to host the schema migrations + * [migrations] Support `validate: false` on references and constraints + * [migrations] Accept `:migration_primary_key` as false and add `:migration_foreign_key` repo config + * [postgres] Support for `:identity` key types in Postgres 10 or later + * [postgres] Use IF NOT EXIST when creating index with `create_if_not_exists`, this requires PG 9.5+ or later + * [repo] Support `Repo.explain(:all | :update_all | :delete_all, query)` for Ecto adapters + * [sandbox] Allow for dynamic repos to be checked out in sandbox + +### Bug fixes + + * [migrations] Flush migration commands before executing `before_commit` callback + * [migrations] Do not swallow errors when migration lock is disabled + +## v3.4.5 (2020-07-05) + +### Bug fixes + + * [ecto] Fix warnings on Elixir v1.11 + * [migrations] Migration prefix should have higher preference than `default_options` + +## v3.4.4 (2020-05-19) + +### Enhancements + + * [sandbox] Add `Ecto.Adapters.SQL.start_owner!/2` and `Ecto.Adapters.SQL.stop_owner/1` + * [myxql] Decode BIT columns when using MyXQL and `:boolean` type + * [migrations] Use one line per migration in the schema dump + +## v3.4.3 (2020-04-27) + +### Bug fixes + + * [ecto] Support `as` and `parent_as` from Ecto v3.4.3+ + * [ecto] Support `x in subquery(query)` from Ecto v3.4.3+ + +## v3.4.2 (2020-04-02) + +### Bug fixes + + * [myxql] A binary with size should be a varbinary + * [mssql] A binary without size should be a varbinary(max) + +## v3.4.1 (2020-03-25) + +### Bug fixes + + * [myxql] Assume the reference does not change in MyXQL and prepare for v0.4.0 + +## v3.4.0 (2020-03-24) + +### Enhancements + + * [adapters] Support Ecto's v3.4 `json_extract_path/2` + * [migrations] Support multiple migration paths to be given with `--migration-path` + * [mssql] Add built-in support to MSSQL via the TDS adapter + * [repo] Support custom options on telemetry + +## v3.3.4 (2020-02-14) + +### Enhancements + + * [adapters] Support fragments in locks + * [migration] Add `:include` option to support covering indexes + +## v3.3.3 (2020-01-28) + +### Enhancements + + * [myxql] Allow not setting the encoding when creating a database + +### Bug fixes + + * [myxql] Removing prefixed table name from constraints on latest MySQL versions + * [sql] Fix precedence of `is_nil` when inside a comparison operator + +## v3.3.2 (2019-12-15) + +### Bug fixes + + * [adapters] Start StorageSupervisor before using it + +## v3.3.1 (2019-12-15) + +### Bug fixes + + * [adapters] Do not leak PIDs on storage commands + * [migrations] Use :migration_primary_key in create/1 + +## v3.3.0 (2019-12-11) + +### Enhancements + + * [ecto] Upgrade and support Ecto v3.3 + * [repo] Include `:idle_time` on telemetry measurements + * [migration] Support anonymous functions in `Ecto.Migration.execute/2` + +### Bug fixes + + * [migration] Ensure that flush() will raise on rollback if called from `change/0` + +## v3.2.2 (2019-11-25) + +### Enhancements + + * [mysql] Support myxql v0.3 + +## v3.2.1 (2019-11-02) + +### Enhancements + + * [migration] Support anonymous functions in execute + +### Bug fixes + + * [mix ecto.create] Change default charset in MyXQL to utf8mb4 + +## v3.2.0 (2019-09-07) + +This new version requires Elixir v1.6+. Note also the previously soft-deprecated `Ecto.Adapters.MySQL` has been removed in favor of `Ecto.Adapters.MyXQL`. We announced the intent to remove `Ecto.Adapters.MySQL` back in v3.0 and `Ecto.Adapters.MyXQL` has been tested since then and ready for prime time since v3.1. + +### Enhancements + + * [sql] Use `get_dynamic_repo` on SQL-specific functions + * [sql] Respect `Ecto.Type.embed_as/2` choice when loading/dumping embeds (Ecto 3.2+ compat) + * [sql] Support CTE expressions (Ecto 3.2+ compat) + +### Bug fixes + + * [sql] Fix generated "COMMENT ON INDEX" for PostgreSQL + +## v3.1.6 (2019-06-27) + +### Enhancements + + * [sql] Set `cache_statement` for `insert_all` + +## v3.1.5 (2019-06-13) + +### Enhancements + + * [migration] Add `@disable_migration_lock` to be better handle concurrent indexes + * [mysql] Set `cache_statement` for inserts + +### Deprecations + + * [mysql] Deprecate Ecto.Adapters.MySQL + +## v3.1.4 (2019-05-28) + +### Enhancements + + * [migrator] Print warning message if concurrent indexes are used with migration lock + +## v3.1.3 (2019-05-19) + +### Enhancements + + * [migrator] Add `--migrations-path` to ecto.migrate/ecto.rollback/ecto.migrations Mix tasks + +### Bug fixes + + * [migrator] Make sure an unboxed run is performed when running migrations with the ownership pool + +## v3.1.2 (2019-05-11) + +### Enhancements + + * [migrator] Add `Ecto.Migrator.with_repo/2` to start repo and apps + * [mix] Add `--skip-if-loaded` for `ecto.load` + * [sql] Add `Ecto.Adapters.SQL.table_exists?/2` + +## v3.1.1 (2019-04-16) + +### Bug fixes + + * [repo] Fix backwards incompatible change in Telemetry metadata + +## v3.1.0 (2019-04-02) + +v3.1 requires Elixir v1.5+. + +### Enhancements + + * [mysql] Introduce Ecto.Adapters.MyXQL as an alternative library for MySQL + * [migrations] Run all migrations in subdirectories + * [repo] Update to Telemetry v0.4.0 (note the measurements value differ from previous versions) + +### Bug fixes + + * [sandbox] Respect `:ownership_timeout` repo configuration on SQL Sandbox + * [migrations] Commit and relock after every migration to avoid leaving the DB in an inconsistent state under certain failures + +### Backwards incompatible changes + + * [migrations] If you are creating indexes concurrently, you need to disable the migration lock: `config :app, App.Repo, migration_lock: nil`. This will migrations behave the same way as they did in Ecto 2.0. + +## v3.0.5 (2019-02-05) + +### Enhancements + + * [repo] Add `:repo` and `:type` keys to telemetry events + * [migrations] Add `:add_if_not_exists` and `:remove_if_exists` to columns in migrations + +### Bug fixes + + * [migrations] Load all migrations before running them + * [sandbox] Include `:queue_target` and `:queue_interval` in SQL Sandbox checkout + +## v3.0.4 (2018-12-31) + +### Enhancements + + * [repo] Bump telemetry dependency + * [migrations] Perform strict argument parsing in `ecto.migrate`, `ecto.rollback`, `ecto.load` and `ecto.dump` + +### Bug fixes + + * [migrations] Do not log migration versions query + +### Deprecations + + * [repo] `Telemetry.attach/5` and `Telemetry.attach_many/5` are deprecated in favor of `:telemetry.attach/5` and `:telemetry.attach_many/5` + +## v3.0.3 (2018-11-29) + +### Enhancements + + * [migration] Support `after_begin` and `before_commit` migration callbacks + * [migration] Add `:prefix` option to `references/2` + +### Bug fixes + + * [migration] Do not start a transaction for migrated versions if there is no `:migration_lock` + * [migration] Fix removing an reference column inside alter table + * [migration] Warn on removed `:pool_timeout` option + +## v3.0.2 (2018-11-20) + +### Enhancements + + * [query] Support `Ecto.Query` in `insert_all` values + * [migration] Add `Ecto.Migration.repo/0` + +## v3.0.1 (2018-11-17) + +### Enhancements + + * [migrations] Support `drop_if_exists` for constraints + +### Bug fixes + + * [migrations] Only commit migration transaction if migration can be inserted into the DB + * [migrations] Do not run migrations from `_build` when using Mix + * [migrations] Improve errors when checking in already committed sandboxes + * [mysql] Do not pass nil for `--user` to mysqldump + * [package] Require Ecto 3.0.2 with bug fixes + * [package] Require Mariaex 0.9.1 which fixes a bug when used with Ecto 3.0.2 + * [sandbox] Raise when using sandbox on non-sandbox pools + +## v3.0.0 (2018-10-29) + + * Initial release diff --git a/deps/ecto_sql/README.md b/deps/ecto_sql/README.md new file mode 100644 index 0000000..0c2d755 --- /dev/null +++ b/deps/ecto_sql/README.md @@ -0,0 +1,73 @@ +Ecto SQL +========= + +[![Build Status](https://github.com/elixir-ecto/ecto_sql/workflows/CI/badge.svg)](https://github.com/elixir-ecto/ecto_sql/actions) + +Ecto SQL ([documentation](https://hexdocs.pm/ecto_sql)) provides building blocks for writing SQL adapters for Ecto. It features: + + * The Ecto.Adapters.SQL module as an entry point for all SQL-based adapters + * Default implementations for Postgres (Ecto.Adapters.Postgres), MySQL (Ecto.Adapters.MyXQL), and MSSQL (Ecto.Adapters.Tds) + * A test sandbox (Ecto.Adapters.SQL.Sandbox) that concurrently runs database tests inside transactions + * Support for database migrations via Mix tasks + +To learn more about getting started, [see the Ecto repository](https://github.com/elixir-ecto/ecto). + +## Running tests + +Clone the repo and fetch its dependencies: + + $ git clone https://github.com/elixir-ecto/ecto_sql.git + $ cd ecto_sql + $ mix deps.get + $ mix test + +In case you are modifying Ecto and EctoSQL at the same time, you can configure EctoSQL to use an Ecto version from your machine by running: + + $ ECTO_PATH=../ecto mix test.all + +### Running integration tests + +The command above will run unit tests. EctoSQL also has a suite of integration tests for its built-in adapters: `pg`, `myxql` and `tds`. If you are changing logic specific to a database, we recommend running its respective integration test suite as well. Doing so requires you to have the database available locally. MySQL and PostgreSQL can be installed directly on most systems. For MSSQL, you may need to run it as a Docker image: + + docker run -d -p 1433:1433 --name mssql -e 'ACCEPT_EULA=Y' -e 'MSSQL_SA_PASSWORD=some!Password' mcr.microsoft.com/mssql/server:2017-latest + +Once the database is running, you can run tests against a specific Ecto adapter by using the `ECTO_ADAPTER` environment variable: + + $ ECTO_ADAPTER=pg mix test + +You may also run `mix test.all` to run the unit tests and all integration tests. You can also use a local Ecto checkout if desired: + + $ ECTO_PATH=../ecto mix test.all + +### Running containerized tests + +It is also possible to run the integration tests under a containerized environment using [earthly](https://earthly.dev/get-earthly). You will also need Docker installed on your system. Then you can run: + + $ earthly -P +all + +You can also use this to interactively debug any failing integration tests using the corresponding commands: + + $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-20.3.8.26-alpine-3.11.6 --build-arg MYSQL=5.7 +integration-test-mysql + $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-20.3.8.26-alpine-3.11.6 --build-arg MSSQL=2019 +integration-test-mssql + $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-20.3.8.26-alpine-3.11.6 --build-arg POSTGRES=11.11 +integration-test-postgres + +Then once you enter the containerized shell, you can inspect the underlying databases with the respective commands: + + PGPASSWORD=postgres psql -h 127.0.0.1 -U postgres -d postgres ecto_test + MYSQL_PASSWORD=root mysql -h 127.0.0.1 -uroot -proot ecto_test + sqlcmd -U sa -P 'some!Password' + +## License + +Copyright (c) 2012 Plataformatec \ +Copyright (c) 2020 Dashbit + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/deps/ecto_sql/hex_metadata.config b/deps/ecto_sql/hex_metadata.config new file mode 100644 index 0000000..5ca967d --- /dev/null +++ b/deps/ecto_sql/hex_metadata.config @@ -0,0 +1,74 @@ +{<<"links">>,[{<<"GitHub">>,<<"https://github.com/elixir-ecto/ecto_sql">>}]}. +{<<"name">>,<<"ecto_sql">>}. +{<<"version">>,<<"3.13.5">>}. +{<<"description">>,<<"SQL-based adapters for Ecto and database migrations">>}. +{<<"elixir">>,<<"~> 1.14">>}. +{<<"files">>, + [<<".formatter.exs">>,<<"mix.exs">>,<<"README.md">>,<<"CHANGELOG.md">>, + <<"lib">>,<<"lib/ecto">>,<<"lib/ecto/adapter">>, + <<"lib/ecto/adapter/migration.ex">>,<<"lib/ecto/adapter/structure.ex">>, + <<"lib/ecto/adapters">>,<<"lib/ecto/adapters/tds.ex">>, + <<"lib/ecto/adapters/myxql">>,<<"lib/ecto/adapters/myxql/connection.ex">>, + <<"lib/ecto/adapters/tds">>,<<"lib/ecto/adapters/tds/types.ex">>, + <<"lib/ecto/adapters/tds/connection.ex">>,<<"lib/ecto/adapters/sql.ex">>, + <<"lib/ecto/adapters/postgres">>, + <<"lib/ecto/adapters/postgres/connection.ex">>, + <<"lib/ecto/adapters/myxql.ex">>,<<"lib/ecto/adapters/postgres.ex">>, + <<"lib/ecto/adapters/sql">>,<<"lib/ecto/adapters/sql/stream.ex">>, + <<"lib/ecto/adapters/sql/sandbox.ex">>, + <<"lib/ecto/adapters/sql/connection.ex">>, + <<"lib/ecto/adapters/sql/application.ex">>,<<"lib/ecto/migration.ex">>, + <<"lib/ecto/migrator.ex">>,<<"lib/ecto/migration">>, + <<"lib/ecto/migration/runner.ex">>, + <<"lib/ecto/migration/schema_migration.ex">>,<<"lib/mix">>, + <<"lib/mix/tasks">>,<<"lib/mix/tasks/ecto.migrations.ex">>, + <<"lib/mix/tasks/ecto.dump.ex">>,<<"lib/mix/tasks/ecto.rollback.ex">>, + <<"lib/mix/tasks/ecto.migrate.ex">>, + <<"lib/mix/tasks/ecto.gen.migration.ex">>,<<"lib/mix/tasks/ecto.load.ex">>, + <<"lib/mix/ecto_sql.ex">>,<<"integration_test/sql">>, + <<"integration_test/sql/migrator.exs">>, + <<"integration_test/sql/query_many.exs">>, + <<"integration_test/sql/lock.exs">>, + <<"integration_test/sql/transaction.exs">>, + <<"integration_test/sql/alter.exs">>,<<"integration_test/sql/stream.exs">>, + <<"integration_test/sql/sql.exs">>,<<"integration_test/sql/sandbox.exs">>, + <<"integration_test/sql/subquery.exs">>, + <<"integration_test/sql/logging.exs">>, + <<"integration_test/sql/migration.exs">>,<<"integration_test/support">>, + <<"integration_test/support/file_helpers.exs">>, + <<"integration_test/support/migration.exs">>, + <<"integration_test/support/repo.exs">>]}. +{<<"app">>,<<"ecto_sql">>}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"requirements">>, + [[{<<"name">>,<<"ecto">>}, + {<<"app">>,<<"ecto">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 3.13.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"telemetry">>}, + {<<"app">>,<<"telemetry">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 0.4.0 or ~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"db_connection">>}, + {<<"app">>,<<"db_connection">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 2.5 or ~> 2.4.1">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"postgrex">>}, + {<<"app">>,<<"postgrex">>}, + {<<"optional">>,true}, + {<<"requirement">>,<<"~> 0.19 or ~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"myxql">>}, + {<<"app">>,<<"myxql">>}, + {<<"optional">>,true}, + {<<"requirement">>,<<"~> 0.7">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"tds">>}, + {<<"app">>,<<"tds">>}, + {<<"optional">>,true}, + {<<"requirement">>,<<"~> 2.1.1 or ~> 2.2">>}, + {<<"repository">>,<<"hexpm">>}]]}. +{<<"build_tools">>,[<<"mix">>]}. diff --git a/deps/ecto_sql/integration_test/sql/alter.exs b/deps/ecto_sql/integration_test/sql/alter.exs new file mode 100644 index 0000000..b7ec655 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/alter.exs @@ -0,0 +1,90 @@ +defmodule Ecto.Integration.AlterTest do + use Ecto.Integration.Case, async: false + + alias Ecto.Integration.PoolRepo + + defmodule AlterMigrationOne do + use Ecto.Migration + + def up do + create table(:alter_col_type) do + add :value, :integer + end + + execute "INSERT INTO alter_col_type (value) VALUES (1)" + end + + def down do + drop table(:alter_col_type) + end + end + + defmodule AlterMigrationTwo do + use Ecto.Migration + + def up do + alter table(:alter_col_type) do + modify :value, :numeric + end + end + + def down do + alter table(:alter_col_type) do + modify :value, :integer + end + end + end + + import Ecto.Query, only: [from: 1, from: 2] + + defp run(direction, repo, module) do + Ecto.Migration.Runner.run(repo, repo.config(), 1, module, :forward, direction, direction, log: false) + end + + test "reset cache on returning query after alter column type" do + values = from v in "alter_col_type", select: v.value + + assert :ok == run(:up, PoolRepo, AlterMigrationOne) + assert PoolRepo.all(values) == [1] + + assert :ok == run(:up, PoolRepo, AlterMigrationTwo) + [%Decimal{}] = PoolRepo.all(values) + + PoolRepo.transaction(fn() -> + assert [%Decimal{}] = PoolRepo.all(values) + assert :ok == run(:down, PoolRepo, AlterMigrationTwo) + + # Optionally fail once with database error when + # already prepared on connection (and clear cache) + try do + PoolRepo.all(values, [mode: :savepoint]) + rescue + _ -> + assert PoolRepo.all(values) == [1] + else + result -> + assert result == [1] + end + end) + after + assert :ok == run(:down, PoolRepo, AlterMigrationOne) + end + + test "reset cache on parameterized query after alter column type" do + values = from v in "alter_col_type" + + assert :ok == run(:up, PoolRepo, AlterMigrationOne) + assert PoolRepo.update_all(values, [set: [value: 2]]) == {1, nil} + + assert :ok == run(:up, PoolRepo, AlterMigrationTwo) + assert PoolRepo.update_all(values, [set: [value: 3]]) == {1, nil} + + PoolRepo.transaction(fn() -> + assert PoolRepo.update_all(values, [set: [value: Decimal.new(5)]]) == {1, nil} + assert :ok == run(:down, PoolRepo, AlterMigrationTwo) + assert PoolRepo.update_all(values, [set: [value: 6]]) == {1, nil} + end) + after + assert :ok == run(:down, PoolRepo, AlterMigrationOne) + end +end diff --git a/deps/ecto_sql/integration_test/sql/lock.exs b/deps/ecto_sql/integration_test/sql/lock.exs new file mode 100644 index 0000000..eb99ad5 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/lock.exs @@ -0,0 +1,59 @@ +defmodule Ecto.Integration.LockTest do + # We can keep this test async as long as it + # is the only one accessing the lock_test table. + use ExUnit.Case, async: true + + import Ecto.Query + alias Ecto.Integration.PoolRepo + + defmodule LockCounter do + use Ecto.Schema + + schema "lock_counters" do + field :count, :integer + end + end + + setup do + PoolRepo.delete_all(LockCounter) + :ok + end + + test "lock for update" do + %{id: id} = PoolRepo.insert!(%LockCounter{count: 1}) + pid = self() + + lock_for_update = + Application.get_env(:ecto_sql, :lock_for_update) || + raise ":lock_for_update not set in :ecto application" + + # Here we are manually inserting the lock in the query + # to test multiple adapters. Never do this in actual + # application code: it is not safe and not public. + query = from(lc in LockCounter, where: lc.id == ^id) + query = %{query | lock: lock_for_update} + + {:ok, new_pid} = + Task.start_link fn -> + assert_receive :select_for_update, 5000 + + PoolRepo.transaction(fn -> + [post] = PoolRepo.all(query) # this should block until the other trans. commit + post |> Ecto.Changeset.change(count: post.count + 1) |> PoolRepo.update! + end) + + send pid, :updated + end + + PoolRepo.transaction(fn -> + [post] = PoolRepo.all(query) # select and lock the row + send new_pid, :select_for_update # signal second process to begin a transaction + post |> Ecto.Changeset.change(count: post.count + 1) |> PoolRepo.update! + end) + + assert_receive :updated, 5000 + + # Final count will be 3 if SELECT ... FOR UPDATE worked and 2 otherwise + assert [%LockCounter{count: 3}] = PoolRepo.all(LockCounter) + end +end diff --git a/deps/ecto_sql/integration_test/sql/logging.exs b/deps/ecto_sql/integration_test/sql/logging.exs new file mode 100644 index 0000000..0f92e9a --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/logging.exs @@ -0,0 +1,857 @@ +defmodule Ecto.Integration.LoggingTest do + use Ecto.Integration.Case, async: true + + alias Ecto.Integration.TestRepo + alias Ecto.Integration.PoolRepo + alias Ecto.Integration.{Post, Logging, ArrayLogging} + + import ExUnit.CaptureLog + import Ecto.Query, only: [from: 2] + + describe "telemetry" do + test "dispatches event" do + log = fn event_name, measurements, metadata -> + assert Enum.at(event_name, -1) == :query + assert %{result: {:ok, _res}} = metadata + + assert measurements.total_time == + measurements.query_time + measurements.decode_time + measurements.queue_time + + assert measurements.idle_time + send(self(), :logged) + end + + Process.put(:telemetry, log) + _ = PoolRepo.all(Post) + assert_received :logged + end + + test "dispatches event with stacktrace" do + log = fn _event_name, _measurements, metadata -> + assert %{stacktrace: [_ | _]} = metadata + send(self(), :logged) + end + + Process.put(:telemetry, log) + _ = PoolRepo.all(Post, stacktrace: true) + assert_received :logged + end + + test "dispatches event with custom options" do + log = fn event_name, _measurements, metadata -> + assert Enum.at(event_name, -1) == :query + assert metadata.options == [:custom_metadata] + send(self(), :logged) + end + + Process.put(:telemetry, log) + _ = PoolRepo.all(Post, telemetry_options: [:custom_metadata]) + assert_received :logged + end + + test "dispatches under another event name" do + log = fn [:custom], measurements, metadata -> + assert %{result: {:ok, _res}} = metadata + + assert measurements.total_time == + measurements.query_time + measurements.decode_time + measurements.queue_time + + assert measurements.idle_time + send(self(), :logged) + end + + Process.put(:telemetry, log) + _ = PoolRepo.all(Post, telemetry_event: [:custom]) + assert_received :logged + end + + test "is not dispatched with no event name" do + Process.put(:telemetry, fn _, _ -> raise "never called" end) + _ = TestRepo.all(Post, telemetry_event: nil) + refute_received :logged + end + + test "cast params" do + uuid_module = + if TestRepo.__adapter__() == Ecto.Adapters.Tds do + Tds.Ecto.UUID + else + Ecto.UUID + end + + uuid = uuid_module.generate() + dumped_uuid = uuid_module.dump!(uuid) + + log = fn _event_name, _measurements, metadata -> + assert [dumped_uuid] == metadata.params + assert [uuid] == metadata.cast_params + send(self(), :logged) + end + + Process.put(:telemetry, log) + TestRepo.all(from l in Logging, where: l.uuid == ^uuid) + assert_received :logged + end + end + + describe "logs" do + @stacktrace_opts [stacktrace: true, log: :error] + + defp stacktrace_entry(line) do + ~r/↳ anonymous fn\/0 in Ecto.Integration.LoggingTest.\"test logs includes stacktraces\"\/1, at: .*integration_test\/sql\/logging.exs:#{line - 3}/ + end + + test "when some measurements are nil" do + assert capture_log(fn -> TestRepo.query("BEG", [], log: :error) end) =~ + "[error]" + end + + test "includes stacktraces" do + assert capture_log(fn -> + TestRepo.all(Post, @stacktrace_opts) + + :ok + end) =~ stacktrace_entry(__ENV__.line) + + assert capture_log(fn -> + TestRepo.insert(%Post{}, @stacktrace_opts) + + :ok + end) =~ stacktrace_entry(__ENV__.line) + + assert capture_log(fn -> + # Test cascading options + Ecto.Multi.new() + |> Ecto.Multi.insert(:post, %Post{}) + |> TestRepo.transaction(@stacktrace_opts) + + :ok + end) =~ stacktrace_entry(__ENV__.line) + + assert capture_log(fn -> + # In theory we should point to the call _inside_ run + # but all multi calls point to the transaction starting point. + Ecto.Multi.new() + |> Ecto.Multi.run(:all, fn _, _ -> {:ok, TestRepo.all(Post, @stacktrace_opts)} end) + |> TestRepo.transaction() + + :ok + end) =~ stacktrace_entry(__ENV__.line) + + out = capture_log(fn -> + TestRepo.all(Post, Keyword.put(@stacktrace_opts, :log_stacktrace_mfa, {Ecto.Adapters.SQL, :first_non_ecto_stacktrace, [2]})) + + :ok + end) + + assert out =~ stacktrace_entry(__ENV__.line - 2) + + # We are a bit liberal with what we expect as we don't want to tie to internal ExUnit code + assert out =~ ~r/ ExUnit.CaptureLog.*/ + end + + test "with custom log level" do + assert capture_log(fn -> TestRepo.insert!(%Post{title: "1"}, log: :error) end) =~ + "[error]" + + # We cannot assert on the result because it depends on the suite log level + capture_log(fn -> + TestRepo.insert!(%Post{title: "1"}, log: true) + end) + + # But this assertion is always true + assert capture_log(fn -> + TestRepo.insert!(%Post{title: "1"}, log: false) + end) == "" + end + + test "with a log: true override when logging is disabled" do + refute capture_log(fn -> + TestRepo.insert!(%Post{title: "1"}, log: true) + end) =~ "an exception was raised logging" + end + + test "with unspecified :log option when logging is disabled" do + refute capture_log(fn -> + TestRepo.insert!(%Post{title: "1"}) + end) =~ "an exception was raised logging" + end + end + + describe "parameter logging" do + @describetag :parameter_logging + + @uuid_regex ~r/[0-9a-f]{2}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i + @naive_datetime_regex ~r/~N\[[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}\]/ + + test "for insert_all with query" do + # Source query + int = 1 + uuid = Ecto.UUID.generate() + + source_query = + from l in Logging, + where: l.int == ^int and l.uuid == ^uuid, + select: %{uuid: l.uuid, int: l.int} + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.insert_all(Logging, source_query, log: :info) + end) + + param_regex = ~r/\[(?.+), \"(?.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + # Query parameters + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + end + + @tag :insert_select + test "for insert_all with entries" do + # Row 1 + int = 1 + uuid = Ecto.UUID.generate() + uuid_query = from l in Logging, where: l.int == ^int and l.uuid == ^uuid, select: l.uuid + datetime = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) + + row1 = [ + int: int, + uuid: uuid_query, + inserted_at: datetime, + updated_at: datetime + ] + + # Row 2 + int2 = 2 + uuid2 = Ecto.UUID.generate() + int_query = from l in Logging, where: l.int == ^int2 and l.uuid == ^uuid2, select: l.int + datetime2 = NaiveDateTime.add(datetime, 1) + + row2 = [ + int: int_query, + uuid: uuid2, + inserted_at: datetime2, + updated_at: datetime2 + ] + + # Extract the parameters from the log: + # 1. Remove the colour codes + # 2. Remove the log level + # 3. Capture everything inside of the square brackets + log = + capture_log(fn -> + TestRepo.insert_all(Logging, [row1, row2], log: :info) + end) + + log = Regex.replace(~r/\e\[[0-9]+m/, log, "") + log = Regex.replace(~r/\[info\]/, log, "") + log = Regex.named_captures(~r/\[(?.+)\]/, log) + log_params = String.split(log["params"], ",") + + # Compute the expected parameters in the right order. + # This involves recreating the headers in the same order + # as `insert_all`. The user values come first and then + # the autogenerated id + headers = + row1 + |> Enum.reduce(%{}, fn {field, _}, headers -> Map.put(headers, field, true) end) + |> Map.put(:bid, true) + |> Map.keys() + + row1_regex = [ + int: "#{int}", + uuid: ["#{int}", uuid], + inserted_at: inspect(datetime), + updated_at: inspect(datetime), + bid: @uuid_regex + ] + + row2_regex = [ + int: ["#{int2}", uuid2], + uuid: uuid2, + inserted_at: inspect(datetime2), + updated_at: inspect(datetime2), + bid: @uuid_regex + ] + + expected_param_regex = + Enum.flat_map([row1_regex, row2_regex], fn row -> + Enum.flat_map(headers, fn field -> + case Keyword.get(row, field) do + params when is_list(params) -> params + param -> [param] + end + end) + end) + + assert length(log_params) == length(expected_param_regex) + + Enum.zip(log_params, expected_param_regex) + |> Enum.each(fn {log, expected_regex} -> + assert log =~ expected_regex + end) + end + + @tag :insert_select + @tag :placeholders + test "for insert_all with entries and placeholders" do + # Placeholders + datetime = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) + datetime2 = NaiveDateTime.add(datetime, 1) + placeholder_map = %{datetime: datetime, datetime2: datetime2} + + # Row 1 + int = 1 + uuid = Ecto.UUID.generate() + uuid_query = from l in Logging, where: l.int == ^int and l.uuid == ^uuid, select: l.uuid + + row1 = [ + int: int, + uuid: uuid_query, + inserted_at: {:placeholder, :datetime}, + updated_at: {:placeholder, :datetime} + ] + + # Row 2 + int2 = 2 + uuid2 = Ecto.UUID.generate() + int_query = from l in Logging, where: l.int == ^int2 and l.uuid == ^uuid2, select: l.int + + row2 = [ + int: int_query, + uuid: uuid2, + inserted_at: {:placeholder, :datetime2}, + updated_at: {:placeholder, :datetime2} + ] + + # Extract the parameters from the log: + # 1. Remove the colour codes + # 2. Remove the log level + # 3. Capture everything inside of the square brackets + log = + capture_log(fn -> + TestRepo.insert_all(Logging, [row1, row2], + placeholders: placeholder_map, + log: :info + ) + end) + + log = Regex.replace(~r/\e\[[0-9]+m/, log, "") + log = Regex.replace(~r/\[info\]/, log, "") + log = Regex.named_captures(~r/\[(?.+)\]/, log) + log_params = String.split(log["params"], ",") + + # Compute the expected parameters in the right order. + # This involves recreating the headers in the same order + # as `insert_all`. The placeholders come first and then + # the user values and then the autogenerated id + headers = + row1 + |> Enum.reduce(%{}, fn {field, _}, headers -> Map.put(headers, field, true) end) + |> Map.put(:bid, true) + |> Map.drop([:inserted_at, :updated_at]) + |> Map.keys() + + row1_regex = [ + int: "#{int}", + uuid: ["#{int}", uuid], + bid: @uuid_regex + ] + + row2_regex = [ + int: ["#{int2}", uuid2], + uuid: uuid2, + bid: @uuid_regex + ] + + row_param_regex = + Enum.flat_map([row1_regex, row2_regex], fn row -> + Enum.flat_map(headers, fn field -> + case Keyword.get(row, field) do + params when is_list(params) -> params + param -> [param] + end + end) + end) + + placeholder_regex = [inspect(datetime), inspect(datetime2)] + expected_param_regex = placeholder_regex ++ row_param_regex + + assert length(log_params) == length(expected_param_regex) + + Enum.zip(log_params, expected_param_regex) + |> Enum.each(fn {log, expected_regex} -> + assert log =~ expected_regex + end) + end + + @tag :with_conflict_target + test "for insert_all with query with conflict query" do + # Source query + int = 1 + uuid = Ecto.UUID.generate() + + source_query = + from l in Logging, + where: l.int == ^int and l.uuid == ^uuid, + select: %{uuid: l.uuid, int: l.int} + + # Conflict query + conflict_int = 0 + conflict_uuid = Ecto.UUID.generate() + conflict_update = 2 + + conflict_query = + from l in Logging, + where: l.int == ^conflict_int and l.uuid == ^conflict_uuid, + update: [set: [int: ^conflict_update]] + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.insert_all(Logging, source_query, + on_conflict: conflict_query, + conflict_target: :bid, + log: :info + ) + end) + + param_regex = + ~r/\[(?.+), \"(?.+)\", (?.+), (?.+), \"(?.+)\"\]/ + + param_logs = Regex.named_captures(param_regex, log) + + # Query parameters + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + # Conflict query parameters + assert param_logs["conflict_update"] == Integer.to_string(conflict_update) + assert param_logs["conflict_int"] == Integer.to_string(conflict_int) + assert param_logs["conflict_uuid"] == conflict_uuid + end + + @tag :insert_select + @tag :with_conflict_target + test "for insert_all with entries conflict query" do + # Row 1 + int = 1 + uuid = Ecto.UUID.generate() + uuid_query = from l in Logging, where: l.int == ^int and l.uuid == ^uuid, select: l.uuid + datetime = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) + + row1 = [ + int: int, + uuid: uuid_query, + inserted_at: datetime, + updated_at: datetime + ] + + # Row 2 + int2 = 2 + uuid2 = Ecto.UUID.generate() + int_query = from l in Logging, where: l.int == ^int2 and l.uuid == ^uuid2, select: l.int + datetime2 = NaiveDateTime.add(datetime, 1) + + row2 = [ + int: int_query, + uuid: uuid2, + inserted_at: datetime2, + updated_at: datetime2 + ] + + # Conflict query + conflict_int = 0 + conflict_uuid = Ecto.UUID.generate() + conflict_update = 2 + + conflict_query = + from l in Logging, + where: l.int == ^conflict_int and l.uuid == ^conflict_uuid, + update: [set: [int: ^conflict_update]] + + # Extract the parameters from the log: + # 1. Remove the colour codes + # 2. Remove the log level + # 3. Capture everything inside of the square brackets + log = + capture_log(fn -> + TestRepo.insert_all(Logging, [row1, row2], + on_conflict: conflict_query, + conflict_target: :bid, + log: :info + ) + end) + + log = Regex.replace(~r/\e\[[0-9]+m/, log, "") + log = Regex.replace(~r/\[info\]/, log, "") + log = Regex.named_captures(~r/\[(?.+)\]/, log) + log_params = String.split(log["params"], ",") + + # Compute the expected parameters in the right order. + # This involves recreating the headers in the same order + # as `insert_all`. The user values come first, then + # the autogenerated id and then the conflict params + headers = + row1 + |> Enum.reduce(%{}, fn {field, _}, headers -> Map.put(headers, field, true) end) + |> Map.put(:bid, true) + |> Map.keys() + + row1_regex = [ + int: "#{int}", + uuid: ["#{int}", uuid], + bid: @uuid_regex, + inserted_at: inspect(datetime), + updated_at: inspect(datetime) + ] + + row2_regex = [ + int: ["#{int2}", uuid2], + uuid: uuid2, + bid: @uuid_regex, + inserted_at: inspect(datetime2), + updated_at: inspect(datetime2) + ] + + row_param_regex = + Enum.flat_map([row1_regex, row2_regex], fn row -> + Enum.flat_map(headers, fn field -> + case Keyword.get(row, field) do + row_params when is_list(row_params) -> row_params + row_param -> [row_param] + end + end) + end) + + conflict_param_regex = ["#{conflict_update}", "#{conflict_int}", conflict_uuid] + expected_param_regex = row_param_regex ++ conflict_param_regex + + assert length(log_params) == length(expected_param_regex) + + Enum.zip(log_params, expected_param_regex) + |> Enum.each(fn {log, expected_regex} -> + assert log =~ expected_regex + end) + end + + @tag :insert_select + @tag :placeholders + @tag :with_conflict_target + test "for insert_all with entries, placeholders and conflict query" do + # Row 1 + int = 1 + uuid = Ecto.UUID.generate() + uuid_query = from l in Logging, where: l.int == ^int and l.uuid == ^uuid, select: l.uuid + + row1 = [ + int: int, + uuid: uuid_query, + inserted_at: {:placeholder, :datetime}, + updated_at: {:placeholder, :datetime2} + ] + + # Row 2 + int2 = 2 + uuid2 = Ecto.UUID.generate() + int_query = from l in Logging, where: l.int == ^int2 and l.uuid == ^uuid2, select: l.int + + row2 = [ + int: int_query, + uuid: uuid2, + inserted_at: {:placeholder, :datetime}, + updated_at: {:placeholder, :datetime2} + ] + + # Placeholders + datetime = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) + datetime2 = NaiveDateTime.add(datetime, 1) + placeholder_map = %{datetime: datetime, datetime2: datetime2} + + # Conflict query + conflict_int = 0 + conflict_uuid = Ecto.UUID.generate() + conflict_update = 2 + + conflict_query = + from l in Logging, + where: l.int == ^conflict_int and l.uuid == ^conflict_uuid, + update: [set: [int: ^conflict_update]] + + # Extract the parameters from the log: + # 1. Remove the colour codes + # 2. Remove the log level + # 3. Capture everything inside of the square brackets + log = + capture_log(fn -> + TestRepo.insert_all(Logging, [row1, row2], + placeholders: placeholder_map, + on_conflict: conflict_query, + conflict_target: :bid, + log: :info + ) + end) + + log = Regex.replace(~r/\e\[[0-9]+m/, log, "") + log = Regex.replace(~r/\[info\]/, log, "") + log = Regex.named_captures(~r/\[(?.+)\]/, log) + log_params = String.split(log["params"], ",") + + # Compute the expected parameters in the right order. + # This involves recreating the headers in the same order + # as `insert_all`. The placeholders come first, then the + # user value, then the autogenerated id and then the conflict + # params + headers = + row1 + |> Enum.reduce(%{}, fn {field, _}, headers -> Map.put(headers, field, true) end) + |> Map.put(:bid, true) + |> Map.drop([:inserted_at, :updated_at]) + |> Map.keys() + + row1_regex = [ + int: "#{int}", + uuid: ["#{int}", uuid], + bid: @uuid_regex + ] + + row2_regex = [ + int: ["#{int2}", uuid2], + uuid: uuid2, + bid: @uuid_regex + ] + + row_param_regex = + Enum.flat_map([row1_regex, row2_regex], fn row -> + Enum.flat_map(headers, fn field -> + case Keyword.get(row, field) do + row_params when is_list(row_params) -> row_params + row_param -> [row_param] + end + end) + end) + + placeholder_regex = [inspect(datetime), inspect(datetime2)] + conflict_param_regex = ["#{conflict_update}", "#{conflict_int}", conflict_uuid] + expected_param_regex = placeholder_regex ++ row_param_regex ++ conflict_param_regex + + assert length(log_params) == length(expected_param_regex) + + Enum.zip(log_params, expected_param_regex) + |> Enum.each(fn {log, expected_regex} -> + assert log =~ expected_regex + end) + end + + test "for insert" do + # Insert values + int = 1 + uuid = Ecto.UUID.generate() + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.insert!(%Logging{uuid: uuid, int: int}, + log: :info + ) + end) + + param_regex = + ~r/\[(?.+), \"(?.+)\", (?.+), (?.+), \"(?.+)\"\]/ + + param_logs = Regex.named_captures(param_regex, log) + + # User changes + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + # Autogenerated changes + assert param_logs["inserted_at"] =~ @naive_datetime_regex + assert param_logs["updated_at"] =~ @naive_datetime_regex + # Filters + assert param_logs["bid"] =~ @uuid_regex + end + + @tag :with_conflict_target + test "for insert with conflict query" do + # Insert values + int = 1 + uuid = Ecto.UUID.generate() + + # Conflict query + conflict_int = 0 + conflict_uuid = Ecto.UUID.generate() + conflict_update = 2 + + conflict_query = + from l in Logging, + where: l.int == ^conflict_int and l.uuid == ^conflict_uuid, + update: [set: [int: ^conflict_update]] + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.insert!(%Logging{uuid: uuid, int: int}, + on_conflict: conflict_query, + conflict_target: :bid, + log: :info + ) + end) + + param_regex = + ~r/\[(?.+), \"(?.+)\", (?.+), (?.+), \"(?.+)\", (?.+), (?.+), \"(?.+)\"\]/ + + param_logs = Regex.named_captures(param_regex, log) + + # User changes + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + # Autogenerated changes + assert param_logs["inserted_at"] =~ @naive_datetime_regex + assert param_logs["updated_at"] =~ @naive_datetime_regex + # Filters + assert param_logs["bid"] =~ @uuid_regex + # Conflict query parameters + assert param_logs["conflict_update"] == Integer.to_string(conflict_update) + assert param_logs["conflict_int"] == Integer.to_string(conflict_int) + assert param_logs["conflict_uuid"] == conflict_uuid + end + + test "for update" do + # Update values + int = 1 + uuid = Ecto.UUID.generate() + current = TestRepo.insert!(%Logging{}) + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.update!(Ecto.Changeset.change(current, %{uuid: uuid, int: int}), log: :info) + end) + + param_regex = ~r/\[(?.+), \"(?.+)\", (?.+), \"(?.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + # User changes + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + # Autogenerated changes + assert param_logs["updated_at"] =~ @naive_datetime_regex + # Filters + assert param_logs["bid"] == current.bid + end + + test "for delete" do + current = TestRepo.insert!(%Logging{}) + + # Ensure parameters are complete and in correct order + log = + capture_log(fn -> + TestRepo.delete!(current, log: :info) + end) + + param_regex = ~r/\[\"(?.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + # Filters + assert param_logs["bid"] == current.bid + end + + test "for queries" do + int = 1 + uuid = Ecto.UUID.generate() + + # all + log = + capture_log(fn -> + TestRepo.all( + from(l in Logging, + select: type(^"1", :integer), + where: l.int == ^int and l.uuid == ^uuid + ), + log: :info + ) + end) + + param_regex = ~r/\[(?.+), (?.+), \"(?.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + assert param_logs["tagged_int"] == Integer.to_string(int) + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + + # update_all + update = 2 + + log = + capture_log(fn -> + from(l in Logging, + where: l.int == ^int and l.uuid == ^uuid, + update: [set: [int: ^update]] + ) + |> TestRepo.update_all([], log: :info) + end) + + param_regex = ~r/\[(?.+), (?.+), \"(?.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + assert param_logs["update"] == Integer.to_string(update) + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + + # delete_all + log = + capture_log(fn -> + TestRepo.delete_all(from(l in Logging, where: l.int == ^int and l.uuid == ^uuid), + log: :info + ) + end) + + param_regex = ~r/\[(?.+), \"(?.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + end + + @tag :stream + test "for queries with stream" do + int = 1 + uuid = Ecto.UUID.generate() + + log = + capture_log(fn -> + stream = + TestRepo.stream(from(l in Logging, where: l.int == ^int and l.uuid == ^uuid), + log: :info + ) + + TestRepo.transaction(fn -> Enum.to_list(stream) end) + end) + + param_regex = ~r/\[(?.+), \"(?.+)\"\]/ + param_logs = Regex.named_captures(param_regex, log) + + assert param_logs["int"] == Integer.to_string(int) + assert param_logs["uuid"] == uuid + end + + @tag :array_type + test "for queries with array type" do + uuid = Ecto.UUID.generate() + uuid2 = Ecto.UUID.generate() + + log = + capture_log(fn -> + TestRepo.all(from(a in ArrayLogging, where: a.uuids == ^[uuid, uuid2]), + log: :info + ) + end) + + param_regex = ~r/\[(?\[.+\])\]/ + param_logs = Regex.named_captures(param_regex, log) + + assert param_logs["uuids"] == "[\"#{uuid}\", \"#{uuid2}\"]" + end + end +end diff --git a/deps/ecto_sql/integration_test/sql/migration.exs b/deps/ecto_sql/integration_test/sql/migration.exs new file mode 100644 index 0000000..96b216e --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/migration.exs @@ -0,0 +1,775 @@ +defmodule Ecto.Integration.MigrationTest do + use ExUnit.Case, async: true + + alias Ecto.Integration.{TestRepo, PoolRepo} + + defmodule CreateMigration do + use Ecto.Migration + + @table table(:create_table_migration) + @index index(:create_table_migration, [:value], unique: true) + + def up do + create @table do + add :value, :integer + end + create @index + end + + def down do + drop @index + drop @table + end + end + + defmodule AddColumnMigration do + use Ecto.Migration + + def up do + create table(:add_col_migration) do + add :value, :integer + end + + alter table(:add_col_migration) do + add :to_be_added, :integer + end + + execute "INSERT INTO add_col_migration (value, to_be_added) VALUES (1, 2)" + end + + def down do + drop table(:add_col_migration) + end + end + + defmodule AlterColumnMigration do + use Ecto.Migration + + def up do + create table(:alter_col_migration) do + add :from_null_to_not_null, :integer + add :from_not_null_to_null, :integer, null: false + + add :from_default_to_no_default, :integer, default: 0 + add :from_no_default_to_default, :integer + end + + alter table(:alter_col_migration) do + modify :from_null_to_not_null, :string, null: false + modify :from_not_null_to_null, :string, null: true + + modify :from_default_to_no_default, :integer, default: nil + modify :from_no_default_to_default, :integer, default: 0 + end + + execute "INSERT INTO alter_col_migration (from_null_to_not_null) VALUES ('foo')" + end + + def down do + drop table(:alter_col_migration) + end + end + + defmodule AlterColumnFromMigration do + use Ecto.Migration + + def change do + create table(:modify_from_products) do + add :value, :integer + add :nullable, :integer, null: false + end + + if direction() == :up do + flush() + PoolRepo.insert_all "modify_from_products", [[value: 1, nullable: 1]] + end + + alter table(:modify_from_products) do + modify :value, :bigint, from: :integer + modify :nullable, :bigint, null: true, from: {:integer, null: false} + end + end + end + + defmodule AlterColumnFromPkeyMigration do + use Ecto.Migration + + def change do + create table(:modify_from_authors, primary_key: false) do + add :id, :integer, primary_key: true + end + create table(:modify_from_posts) do + add :author_id, references(:modify_from_authors, type: :integer) + end + + if direction() == :up do + flush() + PoolRepo.insert_all "modify_from_authors", [[id: 1]] + PoolRepo.insert_all "modify_from_posts", [[author_id: 1]] + end + + alter table(:modify_from_posts) do + # remove the constraints modify_from_posts_author_id_fkey + modify :author_id, :integer, from: references(:modify_from_authors, type: :integer) + end + alter table(:modify_from_authors) do + modify :id, :bigint, from: :integer + end + alter table(:modify_from_posts) do + # add the constraints modify_from_posts_author_id_fkey + modify :author_id, references(:modify_from_authors, type: :bigint), from: :integer + end + end + end + + defmodule AlterForeignKeyOnDeleteMigration do + use Ecto.Migration + + def up do + create table(:alter_fk_users) + + create table(:alter_fk_posts) do + add :alter_fk_user_id, :id + end + + alter table(:alter_fk_posts) do + modify :alter_fk_user_id, references(:alter_fk_users, on_delete: :nilify_all) + end + end + + def down do + drop table(:alter_fk_posts) + drop table(:alter_fk_users) + end + end + + defmodule AlterForeignKeyOnUpdateMigration do + use Ecto.Migration + + def up do + create table(:alter_fk_users) + + create table(:alter_fk_posts) do + add :alter_fk_user_id, :id + end + + alter table(:alter_fk_posts) do + modify :alter_fk_user_id, references(:alter_fk_users, on_update: :update_all) + end + end + + def down do + drop table(:alter_fk_posts) + drop table(:alter_fk_users) + end + end + + defmodule DropColumnMigration do + use Ecto.Migration + + def up do + create table(:drop_col_migration) do + add :value, :integer + add :to_be_removed, :integer + end + + execute "INSERT INTO drop_col_migration (value, to_be_removed) VALUES (1, 2)" + + alter table(:drop_col_migration) do + remove :to_be_removed + end + end + + def down do + drop table(:drop_col_migration) + end + end + + defmodule RenameColumnMigration do + use Ecto.Migration + + def up do + create table(:rename_col_migration) do + add :to_be_renamed, :integer + end + + rename table(:rename_col_migration), :to_be_renamed, to: :was_renamed + + execute "INSERT INTO rename_col_migration (was_renamed) VALUES (1)" + end + + def down do + drop table(:rename_col_migration) + end + end + + defmodule OnDeleteMigration do + use Ecto.Migration + + def up do + create table(:parent1) + create table(:parent2) + + create table(:ref_migration) do + add :parent1, references(:parent1, on_delete: :nilify_all) + end + + alter table(:ref_migration) do + add :parent2, references(:parent2, on_delete: :delete_all) + end + end + + def down do + drop table(:ref_migration) + drop table(:parent1) + drop table(:parent2) + end + end + + defmodule OnDeleteNilifyColumnsMigration do + use Ecto.Migration + + def up do + create table(:parent) do + add :col1, :integer + add :col2, :integer + end + + create unique_index(:parent, [:id, :col1, :col2]) + + create table(:ref) do + add :col1, :integer + add :col2, :integer + add :parent_id, + references(:parent, + with: [col1: :col1, col2: :col2], + on_delete: {:nilify, [:parent_id, :col2]} + ) + end + end + + def down do + drop table(:ref) + drop table(:parent) + end + end + + defmodule OnDeleteDefaultAllMigration do + use Ecto.Migration + + def up do + create table(:parent, primary_key: [type: :bigint]) do + add :col1, :integer + add :col2, :integer + end + + create unique_index(:parent, [:id, :col1, :col2]) + + create table(:ref) do + add :col1, :integer, default: 2 + add :col2, :integer, default: 3 + + add :parent_id, + references(:parent, + with: [col1: :col1, col2: :col2], + on_delete: :default_all + ), default: 1 + end + end + + def down do + drop table(:ref) + drop table(:parent) + end + end + + defmodule OnDeleteDefaultColumnsMigration do + use Ecto.Migration + + def up do + create table(:parent, primary_key: [type: :bigint]) do + add :col1, :integer + add :col2, :integer + end + + create unique_index(:parent, [:id, :col1, :col2]) + + create table(:ref) do + add :col1, :integer, default: 2 + add :col2, :integer, default: 3 + + add :parent_id, + references(:parent, + with: [col1: :col1, col2: :col2], + on_delete: {:default, [:parent_id, :col2]} + ), default: 1 + end + end + + def down do + drop table(:ref) + drop table(:parent) + end + end + + defmodule CompositeForeignKeyMigration do + use Ecto.Migration + + def change do + create table(:composite_parent) do + add :key_id, :integer + end + + create unique_index(:composite_parent, [:id, :key_id]) + + create table(:composite_child) do + add :parent_key_id, :integer + add :parent_id, references(:composite_parent, with: [parent_key_id: :key_id]) + end + end + end + + defmodule RenameIndexMigration do + use Ecto.Migration + + def change do + create table(:composite_parent) do + add :key_id, :integer + end + + create unique_index(:composite_parent, [:id, :key_id], name: "old_index_name") + + rename index(:composite_parent, [:id, :key_id], name: "old_index_name"), to: "new_index_name" + end + end + + defmodule ReferencesRollbackMigration do + use Ecto.Migration + + def change do + create table(:parent) do + add :name, :string + end + + create table(:child) do + add :parent_id, references(:parent) + end + end + end + + defmodule RenameMigration do + use Ecto.Migration + + @table_current table(:posts_migration) + @table_new table(:new_posts_migration) + + def up do + create @table_current + rename @table_current, to: @table_new + end + + def down do + drop @table_new + end + end + + defmodule PrefixMigration do + use Ecto.Migration + + @prefix "ecto_prefix_test" + + def up do + execute TestRepo.create_prefix(@prefix) + create table(:first, prefix: @prefix) + create table(:second, prefix: @prefix) do + add :first_id, references(:first) + end + end + + def down do + drop table(:second, prefix: @prefix) + drop table(:first, prefix: @prefix) + execute TestRepo.drop_prefix(@prefix) + end + end + + defmodule NoSQLMigration do + use Ecto.Migration + + def up do + create table(:collection, options: [capped: true]) + execute create: "collection" + end + end + + defmodule Parent do + use Ecto.Schema + + schema "parent" do + end + end + + defmodule NoErrorTableMigration do + use Ecto.Migration + + def change do + create_if_not_exists table(:existing) do + add :name, :string + end + + create_if_not_exists table(:existing) do + add :name, :string + end + + create_if_not_exists table(:existing) + + drop_if_exists table(:existing) + drop_if_exists table(:existing) + end + end + + defmodule NoErrorIndexMigration do + use Ecto.Migration + + def change do + create_if_not_exists index(:posts, [:title]) + create_if_not_exists index(:posts, [:title]) + drop_if_exists index(:posts, [:title]) + drop_if_exists index(:posts, [:title]) + end + end + + defmodule InferredDropIndexMigration do + use Ecto.Migration + + def change do + create index(:posts, [:title]) + end + end + + defmodule AlterPrimaryKeyMigration do + use Ecto.Migration + + def change do + create table(:no_pk, primary_key: false) do + add :dummy, :string + end + alter table(:no_pk) do + add :id, :serial, primary_key: true + end + end + end + + + defmodule AddColumnIfNotExistsMigration do + use Ecto.Migration + + def up do + create table(:add_col_if_not_exists_migration) + + alter table(:add_col_if_not_exists_migration) do + add_if_not_exists :value, :integer + add_if_not_exists :to_be_added, :integer + end + + execute "INSERT INTO add_col_if_not_exists_migration (value, to_be_added) VALUES (1, 2)" + end + + def down do + drop table(:add_col_if_not_exists_migration) + end + end + + defmodule DropColumnIfExistsMigration do + use Ecto.Migration + + def up do + create table(:drop_col_if_exists_migration) do + add :value, :integer + add :to_be_removed, :integer + end + + execute "INSERT INTO drop_col_if_exists_migration (value, to_be_removed) VALUES (1, 2)" + + alter table(:drop_col_if_exists_migration) do + remove_if_exists :to_be_removed, :integer + end + end + + def down do + drop table(:drop_col_if_exists_migration) + end + end + + defmodule NoErrorOnConditionalColumnMigration do + use Ecto.Migration + + def up do + create table(:no_error_on_conditional_column_migration) + + alter table(:no_error_on_conditional_column_migration) do + add_if_not_exists :value, :integer + add_if_not_exists :value, :integer + + remove_if_exists :value, :integer + remove_if_exists :value, :integer + end + end + + def down do + drop table(:no_error_on_conditional_column_migration) + end + end + + import Ecto.Query, only: [from: 2] + import Ecto.Migrator, only: [up: 4, down: 4] + + # Avoid migration out of order warnings + @moduletag :capture_log + @base_migration 1_000_000 + + setup do + {:ok, migration_number: System.unique_integer([:positive]) + @base_migration} + end + + test "create and drop table and indexes", %{migration_number: num} do + assert :ok == up(PoolRepo, num, CreateMigration, log: false) + assert :ok == down(PoolRepo, num, CreateMigration, log: false) + end + + test "correctly infers how to drop index", %{migration_number: num} do + assert :ok == up(PoolRepo, num, InferredDropIndexMigration, log: false) + assert :ok == down(PoolRepo, num, InferredDropIndexMigration, log: false) + end + + test "supports on delete", %{migration_number: num} do + assert :ok == up(PoolRepo, num, OnDeleteMigration, log: false) + + parent1 = PoolRepo.insert! Ecto.put_meta(%Parent{}, source: "parent1") + parent2 = PoolRepo.insert! Ecto.put_meta(%Parent{}, source: "parent2") + + writer = "INSERT INTO ref_migration (parent1, parent2) VALUES (#{parent1.id}, #{parent2.id})" + PoolRepo.query!(writer) + + reader = from r in "ref_migration", select: {r.parent1, r.parent2} + assert PoolRepo.all(reader) == [{parent1.id, parent2.id}] + + PoolRepo.delete!(parent1) + assert PoolRepo.all(reader) == [{nil, parent2.id}] + + PoolRepo.delete!(parent2) + assert PoolRepo.all(reader) == [] + + assert :ok == down(PoolRepo, num, OnDeleteMigration, log: false) + end + + test "composite foreign keys", %{migration_number: num} do + assert :ok == up(PoolRepo, num, CompositeForeignKeyMigration, log: false) + + PoolRepo.insert_all("composite_parent", [[key_id: 2]]) + assert [id] = PoolRepo.all(from p in "composite_parent", select: p.id) + + catch_error(PoolRepo.insert_all("composite_child", [[parent_id: id, parent_key_id: 1]])) + assert {1, nil} = PoolRepo.insert_all("composite_child", [[parent_id: id, parent_key_id: 2]]) + + assert :ok == down(PoolRepo, num, CompositeForeignKeyMigration, log: false) + end + + test "rename index", %{migration_number: num} do + assert :ok == up(PoolRepo, num, RenameIndexMigration, log: false) + assert :ok == down(PoolRepo, num, RenameIndexMigration, log: false) + end + + test "rolls back references in change/1", %{migration_number: num} do + assert :ok == up(PoolRepo, num, ReferencesRollbackMigration, log: false) + assert :ok == down(PoolRepo, num, ReferencesRollbackMigration, log: false) + end + + test "create table if not exists and drop table if exists does not raise on failure", %{migration_number: num} do + assert :ok == up(PoolRepo, num, NoErrorTableMigration, log: false) + end + + @tag :create_index_if_not_exists + test "create index if not exists and drop index if exists does not raise on failure", %{migration_number: num} do + assert :ok == up(PoolRepo, num, NoErrorIndexMigration, log: false) + end + + test "raises on NoSQL migrations", %{migration_number: num} do + assert_raise ArgumentError, ~r"does not support keyword lists in :options", fn -> + up(PoolRepo, num, NoSQLMigration, log: false) + end + end + + @tag :add_column + test "add column", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AddColumnMigration, log: false) + assert [2] == PoolRepo.all from p in "add_col_migration", select: p.to_be_added + :ok = down(PoolRepo, num, AddColumnMigration, log: false) + end + + @tag :modify_column + test "modify column", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AlterColumnMigration, log: false) + + assert ["foo"] == + PoolRepo.all from p in "alter_col_migration", select: p.from_null_to_not_null + assert [nil] == + PoolRepo.all from p in "alter_col_migration", select: p.from_not_null_to_null + assert [nil] == + PoolRepo.all from p in "alter_col_migration", select: p.from_default_to_no_default + assert [0] == + PoolRepo.all from p in "alter_col_migration", select: p.from_no_default_to_default + + query = "INSERT INTO `alter_col_migration` (\"from_not_null_to_null\") VALUES ('foo')" + assert catch_error(PoolRepo.query!(query)) + + :ok = down(PoolRepo, num, AlterColumnMigration, log: false) + end + + @tag :modify_column + test "modify column with from", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AlterColumnFromMigration, log: false) + + assert [1] == + PoolRepo.all from p in "modify_from_products", select: p.value + + :ok = down(PoolRepo, num, AlterColumnFromMigration, log: false) + end + + @tag :alter_primary_key + test "modify column with from and pkey", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AlterColumnFromPkeyMigration, log: false) + + assert [1] == + PoolRepo.all from p in "modify_from_posts", select: p.author_id + + :ok = down(PoolRepo, num, AlterColumnFromPkeyMigration, log: false) + end + + @tag :alter_foreign_key + test "modify foreign key's on_delete constraint", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AlterForeignKeyOnDeleteMigration, log: false) + + PoolRepo.insert_all("alter_fk_users", [[]]) + assert [id] = PoolRepo.all from p in "alter_fk_users", select: p.id + + PoolRepo.insert_all("alter_fk_posts", [[alter_fk_user_id: id]]) + PoolRepo.delete_all("alter_fk_users") + assert [nil] == PoolRepo.all from p in "alter_fk_posts", select: p.alter_fk_user_id + + :ok = down(PoolRepo, num, AlterForeignKeyOnDeleteMigration, log: false) + end + + @tag :assigns_id_type + test "modify foreign key's on_update constraint", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AlterForeignKeyOnUpdateMigration, log: false) + + PoolRepo.insert_all("alter_fk_users", [[]]) + assert [id] = PoolRepo.all from p in "alter_fk_users", select: p.id + + PoolRepo.insert_all("alter_fk_posts", [[alter_fk_user_id: id]]) + PoolRepo.update_all("alter_fk_users", set: [id: 12345]) + assert [12345] == PoolRepo.all from p in "alter_fk_posts", select: p.alter_fk_user_id + + PoolRepo.delete_all("alter_fk_posts") + :ok = down(PoolRepo, num, AlterForeignKeyOnUpdateMigration, log: false) + end + + @tag :remove_column + test "remove column", %{migration_number: num} do + assert :ok == up(PoolRepo, num, DropColumnMigration, log: false) + assert catch_error(PoolRepo.all from p in "drop_col_migration", select: p.to_be_removed) + :ok = down(PoolRepo, num, DropColumnMigration, log: false) + end + + @tag :rename_column + test "rename column", %{migration_number: num} do + assert :ok == up(PoolRepo, num, RenameColumnMigration, log: false) + assert [1] == PoolRepo.all from p in "rename_col_migration", select: p.was_renamed + :ok = down(PoolRepo, num, RenameColumnMigration, log: false) + end + + @tag :rename_table + test "rename table", %{migration_number: num} do + assert :ok == up(PoolRepo, num, RenameMigration, log: false) + assert :ok == down(PoolRepo, num, RenameMigration, log: false) + end + + @tag :prefix + test "prefix", %{migration_number: num} do + assert :ok == up(PoolRepo, num, PrefixMigration, log: false) + assert :ok == down(PoolRepo, num, PrefixMigration, log: false) + end + + @tag :alter_primary_key + test "alter primary key", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AlterPrimaryKeyMigration, log: false) + assert :ok == down(PoolRepo, num, AlterPrimaryKeyMigration, log: false) + end + + @tag :add_column_if_not_exists + @tag :remove_column_if_exists + test "add if not exists and remove if exists does not raise on failure", %{migration_number: num} do + assert :ok == up(PoolRepo, num, NoErrorOnConditionalColumnMigration, log: false) + assert :ok == down(PoolRepo, num, NoErrorOnConditionalColumnMigration, log: false) + end + + @tag :add_column_if_not_exists + test "add column if not exists", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AddColumnIfNotExistsMigration, log: false) + assert [2] == PoolRepo.all from p in "add_col_if_not_exists_migration", select: p.to_be_added + :ok = down(PoolRepo, num, AddColumnIfNotExistsMigration, log: false) + end + + @tag :remove_column_if_exists + test "remove column when exists", %{migration_number: num} do + assert :ok == up(PoolRepo, num, DropColumnIfExistsMigration, log: false) + assert catch_error(PoolRepo.all from p in "drop_col_if_exists_migration", select: p.to_be_removed) + :ok = down(PoolRepo, num, DropColumnIfExistsMigration, log: false) + end + + @tag :on_delete_nilify_column_list + test "nilify list of columns on_delete constraint", %{migration_number: num} do + assert :ok == up(PoolRepo, num, OnDeleteNilifyColumnsMigration, log: false) + + PoolRepo.insert_all("parent", [%{col1: 1, col2: 2}]) + assert [{id, col1, col2}] = PoolRepo.all from p in "parent", select: {p.id, p.col1, p.col2} + + PoolRepo.insert_all("ref", [[parent_id: id, col1: col1, col2: col2]]) + PoolRepo.delete_all("parent") + assert [{nil, col1, nil}] == PoolRepo.all from r in "ref", select: {r.parent_id, r.col1, r.col2} + + :ok = down(PoolRepo, num, OnDeleteNilifyColumnsMigration, log: false) + end + + @tag :on_delete_default_all + test "default all on_delete constraint", %{migration_number: num} do + assert :ok == up(PoolRepo, num, OnDeleteDefaultAllMigration, log: false) + + PoolRepo.insert_all("parent", [%{id: 1, col1: 2, col2: 3}]) + {id, col1, col2} = {Enum.random(10..1000), Enum.random(10..1000), Enum.random(10..1000)} + + PoolRepo.insert_all("parent", [%{id: id, col1: col1, col2: col2}]) + PoolRepo.insert_all("ref", [%{parent_id: id, col1: col1, col2: col2}]) + PoolRepo.delete_all(from p in "parent", where: p.id == ^id) + assert [{1, 2, 3}] == PoolRepo.all from r in "ref", select: {r.parent_id, r.col1, r.col2} + + :ok = down(PoolRepo, num, OnDeleteDefaultAllMigration, log: false) + end + + @tag :on_delete_default_column_list + test "default list of columns on_delete constraint", %{migration_number: num} do + assert :ok == up(PoolRepo, num, OnDeleteDefaultColumnsMigration, log: false) + + PoolRepo.insert_all("parent", [%{id: 1, col1: 20, col2: 3}]) + + {id, col2} = {Enum.random(10..1000), Enum.random(10..1000)} + + PoolRepo.insert_all("parent", [%{id: id, col1: 20, col2: col2}]) + PoolRepo.insert_all("ref", [%{parent_id: id, col1: 20, col2: col2}]) + PoolRepo.delete_all(from p in "parent", where: p.id == ^id) + assert [{1, 20, 3}] == PoolRepo.all from r in "ref", select: {r.parent_id, r.col1, r.col2} + + :ok = down(PoolRepo, num, OnDeleteDefaultColumnsMigration, log: false) + end +end diff --git a/deps/ecto_sql/integration_test/sql/migrator.exs b/deps/ecto_sql/integration_test/sql/migrator.exs new file mode 100644 index 0000000..903ed87 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/migrator.exs @@ -0,0 +1,264 @@ +Code.require_file "../support/file_helpers.exs", __DIR__ + +defmodule Ecto.Integration.MigratorTest do + use Ecto.Integration.Case + + import Support.FileHelpers + import ExUnit.CaptureLog + import Ecto.Migrator + + alias Ecto.Integration.{TestRepo, PoolRepo} + alias Ecto.Migration.SchemaMigration + + setup config do + Process.register(self(), config.test) + PoolRepo.delete_all(SchemaMigration) + :ok + end + + defmodule AnotherSchemaMigration do + use Ecto.Migration + + def change do + execute TestRepo.create_prefix("bad_schema_migrations"), + TestRepo.drop_prefix("bad_schema_migrations") + + create table(:schema_migrations, prefix: "bad_schema_migrations") do + add :version, :string + add :inserted_at, :integer + end + end + end + + defmodule BrokenLinkMigration do + use Ecto.Migration + + def change do + Task.start_link(fn -> raise "oops" end) + Process.sleep(:infinity) + end + end + + defmodule GoodMigration do + use Ecto.Migration + + def up do + create table(:good_migration) + end + + def down do + drop table(:good_migration) + end + end + + defmodule BadMigration do + use Ecto.Migration + + def change do + execute "CREATE WHAT" + end + end + + test "migrations up and down" do + assert migrated_versions(PoolRepo) == [] + assert up(PoolRepo, 31, GoodMigration, log: false) == :ok + + [migration] = PoolRepo.all(SchemaMigration) + assert migration.version == 31 + assert migration.inserted_at + + assert migrated_versions(PoolRepo) == [31] + assert up(PoolRepo, 31, GoodMigration, log: false) == :already_up + assert migrated_versions(PoolRepo) == [31] + assert down(PoolRepo, 32, GoodMigration, log: false) == :already_down + assert migrated_versions(PoolRepo) == [31] + assert down(PoolRepo, 31, GoodMigration, log: false) == :ok + assert migrated_versions(PoolRepo) == [] + end + + @tag :prefix + test "does not commit migration if insert into schema migration fails" do + # First we create a new schema migration table in another prefix + assert up(PoolRepo, 33, AnotherSchemaMigration, log: false) == :ok + assert migrated_versions(PoolRepo) == [33] + + catch_error(up(PoolRepo, 34, GoodMigration, log: false, prefix: "bad_schema_migrations")) + catch_error(PoolRepo.all("good_migration")) + catch_error(PoolRepo.all("good_migration", prefix: "bad_schema_migrations")) + + assert down(PoolRepo, 33, AnotherSchemaMigration, log: false) == :ok + end + + test "ecto-generated migration queries pass schema_migration in telemetry options" do + handler = fn _event_name, _measurements, metadata -> + send(self(), metadata) + end + + # migration table creation + Process.put(:telemetry, handler) + migrated_versions(PoolRepo, log: false) + assert_received %{options: [schema_migration: true]} + + # transaction begin statement + Process.put(:telemetry, handler) + migrated_versions(PoolRepo, skip_table_creation: true, log: false) + assert_received %{options: [schema_migration: true]} + + # retrieving the migration versions + Process.put(:telemetry, handler) + migrated_versions(PoolRepo, migration_lock: false, skip_table_creation: true, log: false) + assert_received %{options: [schema_migration: true]} + end + + test "bad execute migration" do + assert catch_error(up(PoolRepo, 31, BadMigration, log: false)) + assert DynamicSupervisor.which_children(Ecto.MigratorSupervisor) == [] + end + + test "broken link migration" do + Process.flag(:trap_exit, true) + + assert capture_log(fn -> + {:ok, pid} = Task.start_link(fn -> up(PoolRepo, 31, BrokenLinkMigration, log: false) end) + assert_receive {:EXIT, ^pid, _} + end) =~ "oops" + + assert capture_log(fn -> + catch_exit(up(PoolRepo, 31, BrokenLinkMigration, log: false)) + end) =~ "oops" + end + + test "run up to/step migration", config do + in_tmp fn path -> + create_migration(47, config) + create_migration(48, config) + + assert [47] = run(PoolRepo, path, :up, step: 1, log: false) + assert count_entries() == 1 + + assert [48] = run(PoolRepo, path, :up, to: 48, log: false) + end + end + + test "run down to/step migration", config do + in_tmp fn path -> + migrations = [ + create_migration(49, config), + create_migration(50, config), + ] + + assert [49, 50] = run(PoolRepo, path, :up, all: true, log: false) + purge migrations + + assert [50] = run(PoolRepo, path, :down, step: 1, log: false) + purge migrations + + assert count_entries() == 1 + assert [50] = run(PoolRepo, path, :up, to: 50, log: false) + end + end + + test "runs all migrations", config do + in_tmp fn path -> + migrations = [ + create_migration(53, config), + create_migration(54, config), + ] + + assert [53, 54] = run(PoolRepo, path, :up, all: true, log: false) + assert [] = run(PoolRepo, path, :up, all: true, log: false) + purge migrations + + assert [54, 53] = run(PoolRepo, path, :down, all: true, log: false) + purge migrations + + assert count_entries() == 0 + assert [53, 54] = run(PoolRepo, path, :up, all: true, log: false) + end + end + + test "does not commit half transactions on bad syntax", config do + in_tmp fn path -> + migrations = [ + create_migration(64, config), + create_migration("65_+", config) + ] + + assert_raise SyntaxError, fn -> + run(PoolRepo, path, :up, all: true, log: false) + end + + refute_received {:up, _} + assert count_entries() == 0 + purge migrations + end + end + + @tag :lock_for_migrations + test "raises when connection pool is too small" do + config = Application.fetch_env!(:ecto_sql, PoolRepo) + config = Keyword.merge(config, pool_size: 1) + Application.put_env(:ecto_sql, __MODULE__.SingleConnectionRepo, config) + + defmodule SingleConnectionRepo do + use Ecto.Repo, otp_app: :ecto_sql, adapter: PoolRepo.__adapter__() + end + + {:ok, _pid} = SingleConnectionRepo.start_link() + + in_tmp fn path -> + exception_message = ~r/Migrations failed to run because the connection pool size is less than 2/ + + assert_raise Ecto.MigrationError, exception_message, fn -> + run(SingleConnectionRepo, path, :up, all: true, log: false) + end + end + end + + test "does not raise when connection pool is too small but there is no lock" do + config = Application.fetch_env!(:ecto_sql, PoolRepo) + config = Keyword.merge(config, pool_size: 1, migration_lock: nil) + Application.put_env(:ecto_sql, __MODULE__.SingleConnectionNoLockRepo, config) + + defmodule SingleConnectionNoLockRepo do + use Ecto.Repo, otp_app: :ecto_sql, adapter: PoolRepo.__adapter__() + end + + {:ok, _pid} = SingleConnectionNoLockRepo.start_link() + + in_tmp fn path -> + run(SingleConnectionNoLockRepo, path, :up, all: true, log: false) + end + end + + defp count_entries() do + PoolRepo.aggregate(SchemaMigration, :count, :version) + end + + defp create_migration(num, config) do + module = Module.concat(__MODULE__, "Migration#{num}") + + File.write! "#{num}_migration_#{num}.exs", """ + defmodule #{module} do + use Ecto.Migration + + def up do + send #{inspect config.test}, {:up, #{inspect num}} + end + + def down do + send #{inspect config.test}, {:down, #{inspect num}} + end + end + """ + + module + end + + defp purge(modules) do + Enum.each(List.wrap(modules), fn m -> + :code.delete m + :code.purge m + end) + end +end diff --git a/deps/ecto_sql/integration_test/sql/query_many.exs b/deps/ecto_sql/integration_test/sql/query_many.exs new file mode 100644 index 0000000..885e62c --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/query_many.exs @@ -0,0 +1,15 @@ +defmodule Ecto.Integration.QueryManyTest do + use Ecto.Integration.Case, async: true + + alias Ecto.Integration.TestRepo + + test "query_many!/4" do + results = TestRepo.query_many!("SELECT 1; SELECT 2;") + assert [%{rows: [[1]], num_rows: 1}, %{rows: [[2]], num_rows: 1}] = results + end + + test "query_many!/4 with iodata" do + results = TestRepo.query_many!(["SELECT", ?\s, ?1, ";", ?\s, "SELECT", ?\s, ?2, ";"]) + assert [%{rows: [[1]], num_rows: 1}, %{rows: [[2]], num_rows: 1}] = results + end +end diff --git a/deps/ecto_sql/integration_test/sql/sandbox.exs b/deps/ecto_sql/integration_test/sql/sandbox.exs new file mode 100644 index 0000000..fc4e3a8 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/sandbox.exs @@ -0,0 +1,316 @@ +defmodule Ecto.Integration.SandboxTest do + use ExUnit.Case + + alias Ecto.Adapters.SQL.Sandbox + alias Ecto.Integration.{PoolRepo, TestRepo} + alias Ecto.Integration.Post + + import ExUnit.CaptureLog + + Application.put_env(:ecto_sql, __MODULE__.DynamicRepo, Application.compile_env(:ecto_sql, TestRepo)) + + defmodule DynamicRepo do + use Ecto.Repo, otp_app: :ecto_sql, adapter: TestRepo.__adapter__() + end + + describe "errors" do + test "raises if repo doesn't exist" do + assert_raise UndefinedFunctionError, ~r"function UnknownRepo.get_dynamic_repo/0 is undefined", fn -> + Sandbox.mode(UnknownRepo, :manual) + end + end + + test "raises if repo is not started" do + assert_raise RuntimeError, ~r"could not lookup Ecto repo #{inspect DynamicRepo} because it was not started", fn -> + Sandbox.mode(DynamicRepo, :manual) + end + end + + test "raises if repo is not using sandbox" do + assert_raise RuntimeError, ~r"cannot invoke sandbox operation with pool DBConnection", fn -> + Sandbox.mode(PoolRepo, :manual) + end + + assert_raise RuntimeError, ~r"cannot invoke sandbox operation with pool DBConnection", fn -> + Sandbox.checkout(PoolRepo) + end + end + + test "includes link to SQL sandbox on ownership errors" do + assert_raise DBConnection.OwnershipError, + ~r"See Ecto.Adapters.SQL.Sandbox docs for more information.", fn -> + TestRepo.all(Post) + end + end + end + + describe "mode" do + test "uses the repository when checked out" do + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + Sandbox.checkout(TestRepo) + assert TestRepo.all(Post) == [] + Sandbox.checkin(TestRepo) + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + end + + test "uses the repository when allowed from another process" do + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + + parent = self() + + Task.start_link fn -> + Sandbox.checkout(TestRepo) + Sandbox.allow(TestRepo, self(), parent) + send(parent, :allowed) + Process.sleep(:infinity) + end + + assert_receive :allowed + assert TestRepo.all(Post) == [] + end + + test "uses the repository when allowed from another process by registered name" do + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + + parent = self() + Process.register(parent, __MODULE__) + + Task.start_link fn -> + Sandbox.checkout(TestRepo) + Sandbox.allow(TestRepo, self(), __MODULE__) + send(parent, :allowed) + Process.sleep(:infinity) + end + + assert_receive :allowed + assert TestRepo.all(Post) == [] + + Process.unregister(__MODULE__) + end + + test "uses the repository when shared from another process" do + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + + parent = self() + + Task.start_link(fn -> + Sandbox.checkout(TestRepo) + Sandbox.mode(TestRepo, {:shared, self()}) + send(parent, :shared) + Process.sleep(:infinity) + end) + + assert_receive :shared + assert Task.async(fn -> TestRepo.all(Post) end) |> Task.await == [] + after + Sandbox.mode(TestRepo, :manual) + end + + test "works with a dynamic repo" do + repo_pid = start_supervised!({DynamicRepo, name: nil}) + DynamicRepo.put_dynamic_repo(repo_pid) + + assert Sandbox.mode(DynamicRepo, :manual) == :ok + + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + DynamicRepo.all(Post) + end + + Sandbox.checkout(DynamicRepo) + assert DynamicRepo.all(Post) == [] + end + + test "works with a repo pid" do + repo_pid = start_supervised!({DynamicRepo, name: nil}) + DynamicRepo.put_dynamic_repo(repo_pid) + + assert Sandbox.mode(repo_pid, :manual) == :ok + + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + DynamicRepo.all(Post) + end + + Sandbox.checkout(repo_pid) + assert DynamicRepo.all(Post) == [] + end + end + + describe "savepoints" do + test "runs inside a sandbox that is rolled back on checkin" do + Sandbox.checkout(TestRepo) + assert TestRepo.insert(%Post{}) + assert TestRepo.all(Post) != [] + Sandbox.checkin(TestRepo) + Sandbox.checkout(TestRepo) + assert TestRepo.all(Post) == [] + Sandbox.checkin(TestRepo) + end + + test "runs inside a sandbox that may be disabled" do + Sandbox.checkout(TestRepo, sandbox: false) + assert TestRepo.insert(%Post{}) + assert TestRepo.all(Post) != [] + Sandbox.checkin(TestRepo) + + Sandbox.checkout(TestRepo) + assert {1, _} = TestRepo.delete_all(Post) + Sandbox.checkin(TestRepo) + + Sandbox.checkout(TestRepo, sandbox: false) + assert {1, _} = TestRepo.delete_all(Post) + Sandbox.checkin(TestRepo) + end + + test "runs inside a sandbox with caller data when preloading associations" do + Sandbox.checkout(TestRepo) + assert TestRepo.insert(%Post{}) + parent = self() + + Task.start_link fn -> + Sandbox.allow(TestRepo, parent, self()) + assert [_] = TestRepo.all(Post) |> TestRepo.preload([:author, :comments]) + send parent, :success + end + + assert_receive :success + end + + test "runs inside a sidebox with custom ownership timeout" do + :ok = Sandbox.checkout(TestRepo, ownership_timeout: 200) + parent = self() + + assert capture_log(fn -> + {:ok, pid} = + Task.start(fn -> + Sandbox.allow(TestRepo, parent, self()) + TestRepo.transaction(fn -> Process.sleep(500) end) + end) + + ref = Process.monitor(pid) + assert_receive {:DOWN, ^ref, _, ^pid, _}, 1000 + end) =~ "it owned the connection for longer than 200ms" + end + + test "does not taint the sandbox on query errors" do + Sandbox.checkout(TestRepo) + + {:ok, _} = TestRepo.insert(%Post{}, skip_transaction: true) + {:error, _} = TestRepo.query("INVALID") + {:ok, _} = TestRepo.insert(%Post{}, skip_transaction: true) + + Sandbox.checkin(TestRepo) + end + end + + describe "transactions" do + @tag :transaction_isolation + test "with custom isolation level" do + Sandbox.checkout(TestRepo, isolation: "READ UNCOMMITTED") + + # Setting it to the same level later on works + TestRepo.query!("SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED") + + # Even inside a transaction + TestRepo.transaction fn -> + TestRepo.query!("SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED") + end + end + + test "disconnects on transaction timeouts" do + Sandbox.checkout(TestRepo) + + assert capture_log(fn -> + {:error, :rollback} = + TestRepo.transaction(fn -> Process.sleep(1000) end, timeout: 100) + end) =~ "timed out" + + Sandbox.checkin(TestRepo) + end + end + + describe "checkouts" do + test "with transaction inside checkout" do + Sandbox.checkout(TestRepo) + refute TestRepo.checked_out?() + refute TestRepo.in_transaction?() + + TestRepo.checkout(fn -> + assert TestRepo.checked_out?() + refute TestRepo.in_transaction?() + TestRepo.transaction(fn -> + assert TestRepo.checked_out?() + assert TestRepo.in_transaction?() + end) + assert TestRepo.checked_out?() + refute TestRepo.in_transaction?() + end) + + refute TestRepo.checked_out?() + refute TestRepo.in_transaction?() + end + + test "with checkout inside transaction" do + Sandbox.checkout(TestRepo) + refute TestRepo.checked_out?() + refute TestRepo.in_transaction?() + + TestRepo.transaction(fn -> + assert TestRepo.checked_out?() + assert TestRepo.in_transaction?() + TestRepo.checkout(fn -> + assert TestRepo.checked_out?() + assert TestRepo.in_transaction?() + end) + assert TestRepo.checked_out?() + assert TestRepo.in_transaction?() + end) + + refute TestRepo.checked_out?() + refute TestRepo.in_transaction?() + end + end + + describe "start_owner!/2" do + test "checks out the connection" do + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + + owner = Sandbox.start_owner!(TestRepo) + assert TestRepo.all(Post) == [] + + :ok = Sandbox.stop_owner(owner) + refute Process.alive?(owner) + end + + test "can set shared mode" do + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + + parent = self() + + Task.start_link(fn -> + owner = Sandbox.start_owner!(TestRepo, shared: true) + send(parent, {:owner, owner}) + Process.sleep(:infinity) + end) + + assert_receive {:owner, owner} + assert TestRepo.all(Post) == [] + :ok = Sandbox.stop_owner(owner) + after + Sandbox.mode(TestRepo, :manual) + end + end +end diff --git a/deps/ecto_sql/integration_test/sql/sql.exs b/deps/ecto_sql/integration_test/sql/sql.exs new file mode 100644 index 0000000..46d1eb8 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/sql.exs @@ -0,0 +1,178 @@ +defmodule Ecto.Integration.SQLTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.PoolRepo + alias Ecto.Integration.TestRepo + alias Ecto.Integration.Barebone + alias Ecto.Integration.Post + alias Ecto.Integration.CorruptedPk + alias Ecto.Integration.Tag + import Ecto.Query, only: [from: 2] + + test "fragmented types" do + datetime = ~N[2014-01-16 20:26:51] + TestRepo.insert!(%Post{inserted_at: datetime}) + query = from p in Post, where: fragment("? >= ?", p.inserted_at, ^datetime), select: p.inserted_at + assert [^datetime] = TestRepo.all(query) + end + + test "fragmented schemaless types" do + TestRepo.insert!(%Post{visits: 123}) + assert [123] = TestRepo.all(from p in "posts", select: type(fragment("visits"), :integer)) + end + + test "type casting negative integers" do + TestRepo.insert!(%Post{visits: -42}) + assert [-42] = TestRepo.all(from(p in Post, select: type(p.visits, :integer))) + end + + @tag :array_type + test "fragment array types" do + text1 = "foo" + text2 = "bar" + result = TestRepo.query!("SELECT $1::text[]", [[text1, text2]]) + assert result.rows == [[[text1, text2]]] + end + + @tag :array_type + test "Converts empty array correctly" do + result = TestRepo.query!("SELECT array[1,2,3] = $1", [[]]) + assert result.rows == [[false]] + + result = TestRepo.query!("SELECT array[]::integer[] = $1", [[]]) + assert result.rows == [[true]] + + %{id: tag_id} = TestRepo.insert!(%Tag{uuids: []}) + query = from t in Tag, where: t.uuids == [] + assert [%{id: ^tag_id}] = TestRepo.all(query) + end + + test "query!/4 with dynamic repo" do + TestRepo.put_dynamic_repo(:unknown) + assert_raise RuntimeError, ~r/:unknown/, fn -> TestRepo.query!("SELECT 1") end + end + + test "query!/4" do + result = TestRepo.query!("SELECT 1") + assert result.rows == [[1]] + end + + test "query!/4 with iodata" do + result = TestRepo.query!(["SELECT", ?\s, ?1]) + assert result.rows == [[1]] + end + + test "disconnect_all/2" do + assert :ok = PoolRepo.disconnect_all(0) + end + + test "to_sql/3" do + {sql, []} = TestRepo.to_sql(:all, Barebone) + assert sql =~ "SELECT" + assert sql =~ "barebones" + + {sql, [0]} = TestRepo.to_sql(:update_all, from(b in Barebone, update: [set: [num: ^0]])) + assert sql =~ "UPDATE" + assert sql =~ "barebones" + assert sql =~ "SET" + + {sql, []} = TestRepo.to_sql(:delete_all, Barebone) + assert sql =~ "DELETE" + assert sql =~ "barebones" + end + + test "raises when primary key is not unique on struct operation" do + schema = %CorruptedPk{a: "abc"} + TestRepo.insert!(schema) + TestRepo.insert!(schema) + TestRepo.insert!(schema) + + assert_raise Ecto.MultiplePrimaryKeyError, + ~r|expected delete on corrupted_pk to return at most one entry but got 3 entries|, + fn -> TestRepo.delete!(schema) end + end + + test "Repo.insert! escape" do + TestRepo.insert!(%Post{title: "'"}) + + query = from(p in Post, select: p.title) + assert ["'"] == TestRepo.all(query) + end + + test "Repo.update! escape" do + p = TestRepo.insert!(%Post{title: "hello"}) + TestRepo.update!(Ecto.Changeset.change(p, title: "'")) + + query = from(p in Post, select: p.title) + assert ["'"] == TestRepo.all(query) + end + + @tag :insert_cell_wise_defaults + test "Repo.insert_all escape" do + TestRepo.insert_all(Post, [%{title: "'"}]) + + query = from(p in Post, select: p.title) + assert ["'"] == TestRepo.all(query) + end + + test "Repo.update_all escape" do + TestRepo.insert!(%Post{title: "hello"}) + + TestRepo.update_all(Post, set: [title: "'"]) + reader = from(p in Post, select: p.title) + assert ["'"] == TestRepo.all(reader) + + query = from(Post, where: "'" != "") + TestRepo.update_all(query, set: [title: "''"]) + assert ["''"] == TestRepo.all(reader) + end + + test "Repo.delete_all escape" do + TestRepo.insert!(%Post{title: "hello"}) + assert [_] = TestRepo.all(Post) + + TestRepo.delete_all(from(Post, where: "'" == "'")) + assert [] == TestRepo.all(Post) + end + + test "load" do + inserted_at = ~N[2016-01-01 09:00:00] + TestRepo.insert!(%Post{title: "title1", inserted_at: inserted_at, public: false}) + + result = Ecto.Adapters.SQL.query!(TestRepo, "SELECT * FROM posts", []) + posts = Enum.map(result.rows, &TestRepo.load(Post, {result.columns, &1})) + assert [%Post{title: "title1", inserted_at: ^inserted_at, public: false}] = posts + end + + test "returns true when table exists" do + assert Ecto.Adapters.SQL.table_exists?(TestRepo, "posts") + end + + test "returns false table doesn't exists" do + refute Ecto.Adapters.SQL.table_exists?(TestRepo, "unknown") + end + + test "returns result as a formatted table" do + TestRepo.insert_all(Post, [%{title: "my post title", counter: 1, public: nil}]) + + # resolve correct query for each adapter + query = from(p in Post, select: [p.title, p.counter, p.public]) + {query, _} = Ecto.Adapters.SQL.to_sql(:all, TestRepo, query) + + table = + query + |> TestRepo.query!() + |> Ecto.Adapters.SQL.format_table() + + assert table == "+---------------+---------+--------+\n| title | counter | public |\n+---------------+---------+--------+\n| my post title | 1 | NULL |\n+---------------+---------+--------+" + end + + test "format_table edge cases" do + assert Ecto.Adapters.SQL.format_table(nil) == "" + assert Ecto.Adapters.SQL.format_table(%{columns: nil, rows: nil}) == "" + assert Ecto.Adapters.SQL.format_table(%{columns: [], rows: []}) == "" + assert Ecto.Adapters.SQL.format_table(%{columns: [], rows: [["test"]]}) == "" + assert Ecto.Adapters.SQL.format_table(%{columns: ["test"], rows: []}) == "+------+\n| test |\n+------+\n+------+" + assert Ecto.Adapters.SQL.format_table(%{columns: ["test"], rows: nil}) == "+------+\n| test |\n+------+\n+------+" + end +end diff --git a/deps/ecto_sql/integration_test/sql/stream.exs b/deps/ecto_sql/integration_test/sql/stream.exs new file mode 100644 index 0000000..e304918 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/stream.exs @@ -0,0 +1,44 @@ +defmodule Ecto.Integration.StreamTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + alias Ecto.Integration.Post + alias Ecto.Integration.Comment + import Ecto.Query + + test "stream empty" do + assert {:ok, []} = TestRepo.transaction(fn() -> + TestRepo.stream(Post) + |> Enum.to_list() + end) + + assert {:ok, []} = TestRepo.transaction(fn() -> + TestRepo.stream(from p in Post) + |> Enum.to_list() + end) + end + + test "stream without schema" do + %Post{} = TestRepo.insert!(%Post{title: "title1"}) + %Post{} = TestRepo.insert!(%Post{title: "title2"}) + + assert {:ok, ["title1", "title2"]} = TestRepo.transaction(fn() -> + TestRepo.stream(from(p in "posts", order_by: p.title, select: p.title)) + |> Enum.to_list() + end) + end + + test "stream with assoc" do + p1 = TestRepo.insert!(%Post{title: "1"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + + stream = TestRepo.stream(Ecto.assoc(p1, :comments)) + assert {:ok, [c1, c2]} = TestRepo.transaction(fn() -> + Enum.to_list(stream) + end) + assert c1.id == cid1 + assert c2.id == cid2 + end +end diff --git a/deps/ecto_sql/integration_test/sql/subquery.exs b/deps/ecto_sql/integration_test/sql/subquery.exs new file mode 100644 index 0000000..d820924 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/subquery.exs @@ -0,0 +1,153 @@ +defmodule Ecto.Integration.SubQueryTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + import Ecto.Query + alias Ecto.Integration.Post + alias Ecto.Integration.Comment + + test "from: subqueries with select source" do + TestRepo.insert!(%Post{title: "hello", public: true}) + + query = from p in Post, select: p + assert ["hello"] = + TestRepo.all(from p in subquery(query), select: p.title) + assert [post] = + TestRepo.all(from p in subquery(query), select: p) + + assert %NaiveDateTime{} = post.inserted_at + assert post.__meta__.state == :loaded + end + + @tag :map_boolean_in_expression + test "from: subqueries with map and select expression" do + TestRepo.insert!(%Post{title: "hello", public: true}) + + query = from p in Post, select: %{title: p.title, pub: not p.public} + assert ["hello"] = + TestRepo.all(from p in subquery(query), select: p.title) + assert [%{title: "hello", pub: false}] = + TestRepo.all(from p in subquery(query), select: p) + assert [{"hello", %{title: "hello", pub: false}}] = + TestRepo.all(from p in subquery(query), select: {p.title, p}) + assert [{%{title: "hello", pub: false}, false}] = + TestRepo.all(from p in subquery(query), select: {p, p.pub}) + end + + @tag :map_boolean_in_expression + test "from: subqueries with map update and select expression" do + TestRepo.insert!(%Post{title: "hello", public: true}) + + query = from p in Post, select: %{p | public: not p.public} + assert ["hello"] = + TestRepo.all(from p in subquery(query), select: p.title) + assert [%Post{title: "hello", public: false}] = + TestRepo.all(from p in subquery(query), select: p) + assert [{"hello", %Post{title: "hello", public: false}}] = + TestRepo.all(from p in subquery(query), select: {p.title, p}) + assert [{%Post{title: "hello", public: false}, false}] = + TestRepo.all(from p in subquery(query), select: {p, p.public}) + end + + test "from: subqueries with map update on virtual field and select expression" do + TestRepo.insert!(%Post{title: "hello"}) + + query = from p in Post, select: %{p | temp: p.title} + assert ["hello"] = + TestRepo.all(from p in subquery(query), select: p.temp) + assert [%Post{title: "hello", temp: "hello"}] = + TestRepo.all(from p in subquery(query), select: p) + end + + @tag :subquery_aggregates + test "from: subqueries with aggregates" do + TestRepo.insert!(%Post{visits: 10}) + TestRepo.insert!(%Post{visits: 11}) + TestRepo.insert!(%Post{visits: 13}) + + query = from p in Post, select: [:visits], order_by: [asc: :visits] + assert [13] = TestRepo.all(from p in subquery(query), select: max(p.visits)) + query = from p in Post, select: [:visits], order_by: [asc: :visits], limit: 2 + assert [11] = TestRepo.all(from p in subquery(query), select: max(p.visits)) + + query = from p in Post, order_by: [asc: :visits] + assert [13] = TestRepo.all(from p in subquery(query), select: max(p.visits)) + query = from p in Post, order_by: [asc: :visits], limit: 2 + assert [11] = TestRepo.all(from p in subquery(query), select: max(p.visits)) + end + + test "from: subqueries with parameters" do + TestRepo.insert!(%Post{visits: 10, title: "hello"}) + TestRepo.insert!(%Post{visits: 11, title: "hello"}) + TestRepo.insert!(%Post{visits: 13, title: "world"}) + + query = from p in Post, where: p.visits >= ^11 and p.visits <= ^13 + query = from p in subquery(query), where: p.title == ^"hello", select: fragment("? + ?", p.visits, ^1) + assert [12] = TestRepo.all(query) + end + + test "join: subqueries with select source" do + %{id: id} = TestRepo.insert!(%Post{title: "hello", public: true}) + TestRepo.insert!(%Comment{post_id: id}) + + query = from p in Post, select: p + assert ["hello"] = + TestRepo.all(from c in Comment, join: p in subquery(query), on: c.post_id == p.id, select: p.title) + assert [%Post{inserted_at: %NaiveDateTime{}}] = + TestRepo.all(from c in Comment, join: p in subquery(query), on: c.post_id == p.id, select: p) + end + + test "join: subqueries with parameters" do + TestRepo.insert!(%Post{visits: 10, title: "hello"}) + TestRepo.insert!(%Post{visits: 11, title: "hello"}) + TestRepo.insert!(%Post{visits: 13, title: "world"}) + TestRepo.insert!(%Comment{}) + TestRepo.insert!(%Comment{}) + + query = from p in Post, where: p.visits >= ^11 and p.visits <= ^13 + query = from c in Comment, + join: p in subquery(query), + on: true, + where: p.title == ^"hello", + select: fragment("? + ?", p.visits, ^1) + assert [12, 12] = TestRepo.all(query) + end + + @tag :subquery_in_order_by + test "subqueries in order by" do + TestRepo.insert!(%Post{visits: 10, title: "hello"}) + TestRepo.insert!(%Post{visits: 11, title: "hello"}) + + query = from p in Post, as: :p, order_by: [asc: exists(from p in Post, where: p.visits > parent_as(:p).visits)] + + assert [%{visits: 11}, %{visits: 10}] = TestRepo.all(query) + end + + @tag :multicolumn_distinct + @tag :subquery_in_distinct + test "subqueries in distinct" do + TestRepo.insert!(%Post{visits: 10, title: "hello1"}) + TestRepo.insert!(%Post{visits: 10, title: "hello2"}) + TestRepo.insert!(%Post{visits: 11, title: "hello"}) + + query = from p in Post, as: :p, distinct: exists(from p in Post, where: p.visits > parent_as(:p).visits), order_by: [asc: :title] + + assert [%{title: "hello"}, %{title: "hello1"}] = TestRepo.all(query) + end + + @tag :subquery_in_group_by + test "subqueries in group by" do + TestRepo.insert!(%Post{visits: 10, title: "hello1"}) + TestRepo.insert!(%Post{visits: 10, title: "hello2"}) + TestRepo.insert!(%Post{visits: 11, title: "hello"}) + + query = from p in Post, as: :p, select: sum(p.visits), group_by: exists(from p in Post, where: p.visits > parent_as(:p).visits), order_by: [sum(p.visits)] + + query + |> TestRepo.all() + |> Enum.map(&Decimal.new/1) + |> Enum.zip([Decimal.new(11), Decimal.new(20)]) + |> Enum.all?(fn {a, b} -> Decimal.eq?(a, b) end) + |> assert() + end +end diff --git a/deps/ecto_sql/integration_test/sql/transaction.exs b/deps/ecto_sql/integration_test/sql/transaction.exs new file mode 100644 index 0000000..6ac9334 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/transaction.exs @@ -0,0 +1,277 @@ +defmodule Ecto.Integration.TransactionTest do + # We can keep this test async as long as it + # is the only one access the transactions table + use Ecto.Integration.Case, async: true + + import Ecto.Query + alias Ecto.Integration.PoolRepo # Used for writes + alias Ecto.Integration.TestRepo # Used for reads + + @moduletag :capture_log + + defmodule UniqueError do + defexception message: "unique error" + end + + setup do + PoolRepo.delete_all "transactions" + :ok + end + + defmodule Trans do + use Ecto.Schema + + schema "transactions" do + field :num, :integer + end + end + + test "transaction returns value" do + refute PoolRepo.in_transaction?() + {:ok, val} = PoolRepo.transaction(fn -> + assert PoolRepo.in_transaction?() + {:ok, val} = + PoolRepo.transaction(fn -> + assert PoolRepo.in_transaction?() + 42 + end) + assert PoolRepo.in_transaction?() + val + end) + refute PoolRepo.in_transaction?() + assert val == 42 + end + + test "transaction re-raises" do + assert_raise UniqueError, fn -> + PoolRepo.transaction(fn -> + PoolRepo.transaction(fn -> + raise UniqueError + end) + end) + end + end + + # tag is required for TestRepo, since it is checkout in + # Ecto.Integration.Case setup + @tag isolation_level: :snapshot + test "transaction commits" do + # mssql requires that all transactions that use same shared lock are set + # to :snapshot isolation level + opts = [isolation_level: :snapshot] + + PoolRepo.transaction(fn -> + e = PoolRepo.insert!(%Trans{num: 1}) + assert [^e] = PoolRepo.all(Trans) + assert [] = TestRepo.all(Trans) + end, opts) + + assert [%Trans{num: 1}] = PoolRepo.all(Trans) + end + + @tag isolation_level: :snapshot + test "transaction rolls back" do + opts = [isolation_level: :snapshot] + try do + PoolRepo.transaction(fn -> + e = PoolRepo.insert!(%Trans{num: 2}) + assert [^e] = PoolRepo.all(Trans) + assert [] = TestRepo.all(Trans) + raise UniqueError + end, opts) + rescue + UniqueError -> :ok + end + + assert [] = TestRepo.all(Trans) + end + + test "transaction rolls back per repository" do + message = "cannot call rollback outside of transaction" + + assert_raise RuntimeError, message, fn -> + PoolRepo.rollback(:done) + end + + assert_raise RuntimeError, message, fn -> + TestRepo.transaction fn -> + PoolRepo.rollback(:done) + end + end + end + + @tag :assigns_id_type + test "transaction rolls back with reason on aborted transaction" do + e1 = PoolRepo.insert!(%Trans{num: 13}) + + assert_raise Ecto.ConstraintError, fn -> + TestRepo.transaction fn -> + PoolRepo.insert!(%Trans{id: e1.id, num: 14}) + end + end + end + + test "nested transaction partial rollback" do + assert PoolRepo.transaction(fn -> + e1 = PoolRepo.insert!(%Trans{num: 3}) + assert [^e1] = PoolRepo.all(Trans) + + try do + PoolRepo.transaction(fn -> + e2 = PoolRepo.insert!(%Trans{num: 4}) + assert [^e1, ^e2] = PoolRepo.all(from(t in Trans, order_by: t.num)) + raise UniqueError + end) + rescue + UniqueError -> :ok + end + + assert_raise DBConnection.ConnectionError, "transaction rolling back", + fn() -> PoolRepo.insert!(%Trans{num: 5}) end + end) == {:error, :rollback} + + assert TestRepo.all(Trans) == [] + end + + test "manual rollback doesn't bubble up" do + x = PoolRepo.transaction(fn -> + e = PoolRepo.insert!(%Trans{num: 6}) + assert [^e] = PoolRepo.all(Trans) + PoolRepo.rollback(:oops) + end) + + assert x == {:error, :oops} + assert [] = TestRepo.all(Trans) + end + + test "manual rollback bubbles up on nested transaction" do + assert PoolRepo.transaction(fn -> + e = PoolRepo.insert!(%Trans{num: 7}) + assert [^e] = PoolRepo.all(Trans) + assert {:error, :oops} = PoolRepo.transaction(fn -> + PoolRepo.rollback(:oops) + end) + assert_raise DBConnection.ConnectionError, "transaction rolling back", + fn() -> PoolRepo.insert!(%Trans{num: 8}) end + end) == {:error, :rollback} + + assert [] = TestRepo.all(Trans) + end + + test "transactions are not shared in repo" do + pid = self() + opts = [isolation_level: :snapshot] + + new_pid = spawn_link fn -> + PoolRepo.transaction(fn -> + e = PoolRepo.insert!(%Trans{num: 9}) + assert [^e] = PoolRepo.all(Trans) + send(pid, :in_transaction) + receive do + :commit -> :ok + after + 5000 -> raise "timeout" + end + end, opts) + send(pid, :committed) + end + + receive do + :in_transaction -> :ok + after + 5000 -> raise "timeout" + end + + # mssql requires that all transactions that use same shared lock + # set transaction isolation level to "snapshot" so this must be wrapped into + # explicit transaction + PoolRepo.transaction(fn -> + assert [] = PoolRepo.all(Trans) + end, opts) + + send(new_pid, :commit) + receive do + :committed -> :ok + after + 5000 -> raise "timeout" + end + + assert [%Trans{num: 9}] = PoolRepo.all(Trans) + end + + ## Checkout + + describe "with checkouts" do + test "transaction inside checkout" do + PoolRepo.checkout(fn -> + refute PoolRepo.in_transaction?() + PoolRepo.transaction(fn -> + assert PoolRepo.in_transaction?() + end) + refute PoolRepo.in_transaction?() + end) + end + + test "checkout inside transaction" do + PoolRepo.transaction(fn -> + assert PoolRepo.in_transaction?() + PoolRepo.checkout(fn -> + assert PoolRepo.in_transaction?() + end) + assert PoolRepo.in_transaction?() + end) + end + + @tag :transaction_checkout_raises + test "checkout raises on transaction attempt" do + assert_raise DBConnection.ConnectionError, ~r"connection was checked out with status", fn -> + PoolRepo.checkout(fn -> PoolRepo.query!("BEGIN") end) + end + end + end + + ## Logging + + defp register_telemetry() do + Process.put(:telemetry, fn _, measurements, event -> send(self(), {measurements, event}) end) + end + + test "log begin, commit and rollback" do + register_telemetry() + + PoolRepo.transaction(fn -> + assert_received {measurements, %{params: [], result: {:ok, _res}}} + assert is_integer(measurements.query_time) and measurements.query_time >= 0 + assert is_integer(measurements.queue_time) and measurements.queue_time >= 0 + + refute_received %{} + register_telemetry() + end) + + assert_received {measurements, %{params: [], result: {:ok, _res}}} + assert is_integer(measurements.query_time) and measurements.query_time >= 0 + refute Map.has_key?(measurements, :queue_time) + + assert PoolRepo.transaction(fn -> + refute_received %{} + register_telemetry() + PoolRepo.rollback(:log_rollback) + end) == {:error, :log_rollback} + + assert_received {measurements, %{params: [], result: {:ok, _res}}} + assert is_integer(measurements.query_time) and measurements.query_time >= 0 + refute Map.has_key?(measurements, :queue_time) + end + + test "log queries inside transactions" do + PoolRepo.transaction(fn -> + register_telemetry() + assert [] = PoolRepo.all(Trans) + + assert_received {measurements, %{params: [], result: {:ok, _res}}} + assert is_integer(measurements.query_time) and measurements.query_time >= 0 + assert is_integer(measurements.decode_time) and measurements.query_time >= 0 + refute Map.has_key?(measurements, :queue_time) + end) + end +end diff --git a/deps/ecto_sql/integration_test/support/file_helpers.exs b/deps/ecto_sql/integration_test/support/file_helpers.exs new file mode 100644 index 0000000..72ba741 --- /dev/null +++ b/deps/ecto_sql/integration_test/support/file_helpers.exs @@ -0,0 +1,45 @@ +defmodule Support.FileHelpers do + import ExUnit.Assertions + + @doc """ + Returns the `tmp_path` for tests. + """ + def tmp_path do + Path.expand("../../tmp", __DIR__) + end + + @doc """ + Executes the given function in a temp directory + tailored for this test case and test. + """ + defmacro in_tmp(fun) do + {name, _arity} = __CALLER__.function || raise "in_tmp must be called inside a function" + path = Path.join([tmp_path(), "#{__CALLER__.module}", "#{name}"]) + + quote do + path = unquote(path) + File.rm_rf!(path) + File.mkdir_p!(path) + File.cd!(path, fn -> unquote(fun).(path) end) + end + end + + @doc """ + Asserts a file was generated. + """ + def assert_file(file) do + assert File.regular?(file), "Expected #{file} to exist, but does not" + end + + @doc """ + Asserts a file was generated and that it matches a given pattern. + """ + def assert_file(file, callback) when is_function(callback, 1) do + assert_file(file) + callback.(File.read!(file)) + end + + def assert_file(file, match) do + assert_file(file, &assert(&1 =~ match)) + end +end diff --git a/deps/ecto_sql/integration_test/support/migration.exs b/deps/ecto_sql/integration_test/support/migration.exs new file mode 100644 index 0000000..6ae53bc --- /dev/null +++ b/deps/ecto_sql/integration_test/support/migration.exs @@ -0,0 +1,166 @@ +defmodule Ecto.Integration.Migration do + use Ecto.Migration + + def change do + # IO.puts "TESTING MIGRATION LOCK" + # Process.sleep(10000) + + create table(:users, comment: "users table") do + add :name, :string, comment: "name column" + add :custom_id, :uuid + timestamps() + end + + create table(:posts) do + add :title, :string, size: 100 + add :counter, :integer + add :blob, :binary + add :bid, :binary_id + add :uuid, :uuid + add :meta, :map + add :links, {:map, :string} + add :intensities, {:map, :float} + add :public, :boolean + add :cost, :decimal, precision: 2, scale: 1 + add :visits, :integer + add :wrapped_visits, :integer + add :intensity, :float + add :author_id, :integer + add :posted, :date + add :read_only, :string + timestamps(null: true) + end + + create table(:posts_users, primary_key: false) do + add :post_id, references(:posts) + add :user_id, references(:users) + end + + create table(:posts_users_pk) do + add :post_id, references(:posts) + add :user_id, references(:users) + timestamps() + end + + # Add a unique index on uuid. We use this + # to verify the behaviour that the index + # only matters if the UUID column is not NULL. + create unique_index(:posts, [:uuid], comment: "posts index") + + create table(:permalinks) do + add :uniform_resource_locator, :string + add :title, :string + add :post_id, references(:posts) + add :user_id, references(:users) + end + + create unique_index(:permalinks, [:post_id]) + create unique_index(:permalinks, [:uniform_resource_locator]) + + create table(:comments) do + add :text, :string, size: 100 + add :lock_version, :integer, default: 1 + add :post_id, references(:posts) + add :author_id, references(:users) + end + + create table(:customs, primary_key: false) do + add :bid, :binary_id, primary_key: true + add :uuid, :uuid + end + + create unique_index(:customs, [:uuid]) + + create table(:customs_customs, primary_key: false) do + add :custom_id1, references(:customs, column: :bid, type: :binary_id) + add :custom_id2, references(:customs, column: :bid, type: :binary_id) + end + + create table(:barebones) do + add :num, :integer + end + + create table(:transactions) do + add :num, :integer + end + + create table(:lock_counters) do + add :count, :integer + end + + create table(:orders) do + add :label, :string + add :item, :map + add :items, :map + add :meta, :map + add :permalink_id, references(:permalinks) + end + + unless :array_type in ExUnit.configuration()[:exclude] do + create table(:tags) do + add :ints, {:array, :integer} + add :uuids, {:array, :uuid}, default: [] + add :items, {:array, :map} + end + + create table(:array_loggings) do + add :uuids, {:array, :uuid}, default: [] + timestamps() + end + end + + unless :bitstring_type in ExUnit.configuration()[:exclude] do + create table(:bitstrings) do + add :bs, :bitstring + add :bs_with_default, :bitstring, default: <<42::6>> + add :bs_with_size, :bitstring, size: 10 + end + end + + if Code.ensure_loaded?(Duration) do + unless :duration_type in ExUnit.configuration()[:exclude] do + create table(:durations) do + add :dur, :duration + add :dur_with_fields, :duration, fields: "MONTH" + add :dur_with_precision, :duration, precision: 4 + add :dur_with_fields_and_precision, :duration, fields: "HOUR TO SECOND", precision: 1 + add :dur_with_default, :duration, default: "10 MONTH" + end + end + end + + create table(:composite_pk, primary_key: false) do + add :a, :integer, primary_key: true + add :b, :integer, primary_key: true + add :name, :string + end + + create table(:corrupted_pk, primary_key: false) do + add :a, :string + end + + create table(:posts_users_composite_pk) do + add :post_id, references(:posts), primary_key: true + add :user_id, references(:users), primary_key: true + timestamps() + end + + create unique_index(:posts_users_composite_pk, [:post_id, :user_id]) + + create table(:usecs) do + add :naive_datetime_usec, :naive_datetime_usec + add :utc_datetime_usec, :utc_datetime_usec + end + + create table(:bits) do + add :bit, :bit + end + + create table(:loggings, primary_key: false) do + add :bid, :binary_id, primary_key: true + add :int, :integer + add :uuid, :uuid + timestamps() + end + end +end diff --git a/deps/ecto_sql/integration_test/support/repo.exs b/deps/ecto_sql/integration_test/support/repo.exs new file mode 100644 index 0000000..f17c838 --- /dev/null +++ b/deps/ecto_sql/integration_test/support/repo.exs @@ -0,0 +1,23 @@ +defmodule Ecto.Integration.Repo do + defmacro __using__(opts) do + quote do + use Ecto.Repo, unquote(opts) + + @query_event __MODULE__ + |> Module.split() + |> Enum.map(& &1 |> Macro.underscore() |> String.to_atom()) + |> Kernel.++([:query]) + + def init(_, opts) do + fun = &Ecto.Integration.Repo.handle_event/4 + :telemetry.attach_many(__MODULE__, [[:custom], @query_event], fun, :ok) + {:ok, opts} + end + end + end + + def handle_event(event, latency, metadata, _config) do + handler = Process.delete(:telemetry) || fn _, _, _ -> :ok end + handler.(event, latency, metadata) + end +end diff --git a/deps/ecto_sql/lib/ecto/adapter/migration.ex b/deps/ecto_sql/lib/ecto/adapter/migration.ex new file mode 100644 index 0000000..d627459 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapter/migration.ex @@ -0,0 +1,72 @@ +defmodule Ecto.Adapter.Migration do + @moduledoc """ + Specifies the adapter migrations API. + """ + + alias Ecto.Migration.Constraint + alias Ecto.Migration.Table + alias Ecto.Migration.Index + alias Ecto.Migration.Reference + + @type adapter_meta :: Ecto.Adapter.adapter_meta() + + @type drop_mode :: :restrict | :cascade + + @typedoc "All migration commands" + @type command :: + raw :: + String.t() + | {:create, Table.t(), [table_subcommand]} + | {:create_if_not_exists, Table.t(), [table_subcommand]} + | {:alter, Table.t(), [table_subcommand]} + | {:drop, Table.t(), drop_mode()} + | {:drop_if_exists, Table.t(), drop_mode()} + | {:create, Index.t()} + | {:create_if_not_exists, Index.t()} + | {:drop, Index.t(), drop_mode()} + | {:drop_if_exists, Index.t(), drop_mode()} + | {:create, Constraint.t()} + | {:drop, Constraint.t(), drop_mode()} + | {:drop_if_exists, Constraint.t(), drop_mode()} + + @typedoc "All commands allowed within the block passed to `table/2`" + @type table_subcommand :: + {:add, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), Keyword.t()} + | {:add_if_not_exists, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), + Keyword.t()} + | {:modify, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), + Keyword.t()} + | {:remove, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), + Keyword.t()} + | {:remove, field :: atom} + | {:remove_if_exists, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary()} + | {:remove_if_exists, field :: atom} + + @typedoc """ + A struct that represents a table or index in a database schema. + + These database objects can be modified through the use of a Data + Definition Language, hence the name DDL object. + """ + @type ddl_object :: Table.t() | Index.t() + + @doc """ + Checks if the adapter supports ddl transaction. + """ + @callback supports_ddl_transaction? :: boolean + + @doc """ + Executes migration commands. + """ + @callback execute_ddl(adapter_meta, command, options :: Keyword.t()) :: + {:ok, [{Logger.level(), Logger.message(), Logger.metadata()}]} + + @doc """ + Locks the migrations table and emits the locked versions for callback execution. + + It returns the result of calling the given function with a list of versions. + """ + @callback lock_for_migrations(adapter_meta, options :: Keyword.t(), fun) :: + result + when fun: (-> result), result: var +end diff --git a/deps/ecto_sql/lib/ecto/adapter/structure.ex b/deps/ecto_sql/lib/ecto/adapter/structure.ex new file mode 100644 index 0000000..6f9c8b3 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapter/structure.ex @@ -0,0 +1,62 @@ +defmodule Ecto.Adapter.Structure do + @moduledoc """ + Specifies the adapter structure (dump/load) API. + """ + + @doc """ + Dumps the given structure. + + The path will be looked in the `config` under :dump_path or + default to the structure path inside `default`. + + Returns an `:ok` tuple if it was dumped successfully, an error tuple otherwise. + + ## Examples + + structure_dump("priv/repo", username: "postgres", + database: "ecto_test", + hostname: "localhost") + + """ + @callback structure_dump(default :: String.t(), config :: Keyword.t()) :: + {:ok, String.t()} | {:error, term} + + @doc """ + Loads the given structure. + + The path will be looked in the `config` under :dump_path or + default to the structure path inside `default`. + + Returns an `:ok` tuple if it was loaded successfully, an error tuple otherwise. + + ## Examples + + structure_load("priv/repo", username: "postgres", + database: "ecto_test", + hostname: "localhost") + + """ + @callback structure_load(default :: String.t(), config :: Keyword.t()) :: + {:ok, String.t()} | {:error, term} + + @doc """ + Runs the dump command for the given repo / config. + + Calling this function will setup authentication and run the dump cli + command with your provided `args`. + + The options in `opts` are passed directly to `System.cmd/3`. + + Returns `{output, exit_status}` where `output` is a string of the stdout + (as long as no option `into` is provided, see `System.cmd/3`) and `exit_status` + is the exit status of the invocation. (`0` for success) + + ## Examples + + iex> dump_cmd(["--data-only", "--table", "table_name"], [stdout_to_stderr: true], Acme.Repo.config()) + {"--\n-- PostgreSQL database dump\n--\n" <> _rest, 0} + + """ + @callback dump_cmd(args :: [String.t()], opts :: Keyword.t(), config :: Keyword.t()) :: + {output :: Collectable.t(), exit_status :: non_neg_integer()} +end diff --git a/deps/ecto_sql/lib/ecto/adapters/myxql.ex b/deps/ecto_sql/lib/ecto/adapters/myxql.ex new file mode 100644 index 0000000..521d97c --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/myxql.ex @@ -0,0 +1,602 @@ +defmodule Ecto.Adapters.MyXQL do + @moduledoc """ + Adapter module for MySQL. + + It uses `MyXQL` for communicating to the database. + + ## Options + + MySQL options split in different categories described + below. All options can be given via the repository + configuration: + + config :your_app, YourApp.Repo, + ... + + The `:prepare` option may be specified per operation: + + YourApp.Repo.all(Queryable, prepare: :unnamed) + + ### Connection options + + * `:protocol` - Set to `:socket` for using UNIX domain socket, or `:tcp` for TCP + (default: `:socket`) + * `:socket` - Connect to MySQL via UNIX sockets in the given path. + * `:hostname` - Server hostname + * `:port` - Server port (default: 3306) + * `:username` - Username + * `:password` - User password + * `:database` - the database to connect to + * `:pool` - The connection pool module, may be set to `Ecto.Adapters.SQL.Sandbox` + * `:ssl` - Accepts a list of options to enable TLS for the client connection, + or `false` to disable it. See the documentation for [Erlang's `ssl` module](`e:ssl:ssl`) + for a list of options (default: false) + * `:connect_timeout` - The timeout for establishing new connections (default: 5000) + * `:cli_protocol` - The protocol used for the mysql client connection (default: `"tcp"`). + This option is only used for `mix ecto.load` and `mix ecto.dump`, + via the `mysql` command. For more information, please check + [MySQL docs](https://dev.mysql.com/doc/en/connecting.html) + * `:socket_options` - Specifies socket configuration + * `:show_sensitive_data_on_connection_error` - show connection data and + configuration whenever there is an error attempting to connect to the + database + + The `:socket_options` are particularly useful when configuring the size + of both send and receive buffers. For example, when Ecto starts with a + pool of 20 connections, the memory usage may quickly grow from 20MB to + 50MB based on the operating system default values for TCP buffers. It is + advised to stick with the operating system defaults but they can be + tweaked if desired: + + socket_options: [recbuf: 8192, sndbuf: 8192] + + We also recommend developers to consult the `MyXQL.start_link/1` documentation + for a complete listing of all supported options. + + ### Storage options + + * `:charset` - the database encoding (default: "utf8mb4") + * `:collation` - the collation order + * `:dump_path` - where to place dumped structures + * `:dump_prefixes` - list of prefixes that will be included in the + structure dump. For MySQL, this list must be of length 1. Multiple + prefixes are not supported. When specified, the prefixes will have + their definitions dumped along with the data in their migration table. + When it is not specified, only the configured database and its migration + table are dumped. + + ### After connect callback + + If you want to execute a callback as soon as connection is established + to the database, you can use the `:after_connect` configuration. For + example, in your repository configuration you can add: + + after_connect: {MyXQL, :query!, ["SET variable = value", []]} + + You can also specify your own module that will receive the MyXQL + connection as argument. + + ## Limitations + + There are some limitations when using Ecto with MySQL that one + needs to be aware of. + + ### Engine + + Tables created by Ecto are guaranteed to use InnoDB, regardless + of the MySQL version. + + ### UUIDs + + MySQL does not support UUID types. Ecto emulates them by using + `binary(16)`. + + ### Read after writes + + Because MySQL does not support RETURNING clauses in INSERT and + UPDATE, it does not support the `:read_after_writes` option of + `Ecto.Schema.field/3`. + + ### DDL Transaction + + MySQL does not support migrations inside transactions as it + automatically commits after some commands like CREATE TABLE. + Therefore MySQL migrations does not run inside transactions. + + ## Old MySQL versions + + ### JSON support + + MySQL introduced a native JSON type in v5.7.8, if your server is + using this version or higher, you may use `:map` type for your + column in migration: + + add :some_field, :map + + If you're using older server versions, use a `TEXT` field instead: + + add :some_field, :text + + in either case, the adapter will automatically encode/decode the + value from JSON. + + ### usec in datetime + + Old MySQL versions did not support usec in datetime while + more recent versions would round or truncate the usec value. + + Therefore, in case the user decides to use microseconds in + datetimes and timestamps with MySQL, be aware of such + differences and consult the documentation for your MySQL + version. + + If your version of MySQL supports microsecond precision, you + will be able to utilize Ecto's usec types. + + ## Multiple Result Support + + MyXQL supports the execution of queries that return multiple + results, such as text queries with multiple statements separated + by semicolons or stored procedures. These can be executed with + `Ecto.Adapters.SQL.query_many/4` or the `YourRepo.query_many/3` + shortcut. + + Be default, these queries will be executed with the `:query_type` + option set to `:text`. To take advantage of prepared statements + when executing a stored procedure, set the `:query_type` option + to `:binary`. + """ + + # Inherit all behaviour from Ecto.Adapters.SQL + use Ecto.Adapters.SQL, driver: :myxql + + # And provide a custom storage implementation + @behaviour Ecto.Adapter.Storage + @behaviour Ecto.Adapter.Structure + + @default_prepare_opt :named + + ## Custom MySQL types + + @impl true + def loaders({:array, _}, type), do: [&json_decode/1, type] + def loaders({:map, _}, type), do: [&json_decode/1, &Ecto.Type.embedded_load(type, &1, :json)] + def loaders(:map, type), do: [&json_decode/1, type] + def loaders(:float, type), do: [&float_decode/1, type] + def loaders(:boolean, type), do: [&bool_decode/1, type] + def loaders(:binary_id, type), do: [Ecto.UUID, type] + def loaders(_, type), do: [type] + + defp bool_decode(<<0>>), do: {:ok, false} + defp bool_decode(<<1>>), do: {:ok, true} + defp bool_decode(<<0::size(1)>>), do: {:ok, false} + defp bool_decode(<<1::size(1)>>), do: {:ok, true} + defp bool_decode(0), do: {:ok, false} + defp bool_decode(1), do: {:ok, true} + defp bool_decode(x), do: {:ok, x} + + defp float_decode(%Decimal{} = decimal), do: {:ok, Decimal.to_float(decimal)} + defp float_decode(x), do: {:ok, x} + + defp json_decode(x) when is_binary(x), do: {:ok, MyXQL.json_library().decode!(x)} + defp json_decode(x), do: {:ok, x} + + ## Query API + + @impl Ecto.Adapter.Queryable + def execute(adapter_meta, query_meta, query, params, opts) do + prepare = Keyword.get(opts, :prepare, @default_prepare_opt) + + unless valid_prepare?(prepare) do + raise ArgumentError, + "expected option `:prepare` to be either `:named` or `:unnamed`, got: #{inspect(prepare)}" + end + + Ecto.Adapters.SQL.execute(prepare, adapter_meta, query_meta, query, params, opts) + end + + defp valid_prepare?(prepare) when prepare in [:named, :unnamed], do: true + defp valid_prepare?(_), do: false + + ## Storage API + + @impl true + def storage_up(opts) do + database = Keyword.fetch!(opts, :database) + + opts = Keyword.delete(opts, :database) + charset = opts[:charset] || "utf8mb4" + + check_existence_command = + "SELECT TRUE FROM information_schema.schemata WHERE schema_name = '#{database}'" + + case run_query(check_existence_command, opts) do + {:ok, %{num_rows: 1}} -> + {:error, :already_up} + + _ -> + create_command = + ~s(CREATE DATABASE `#{database}` DEFAULT CHARACTER SET = #{charset}) + |> concat_if(opts[:collation], &"DEFAULT COLLATE = #{&1}") + + case run_query(create_command, opts) do + {:ok, _} -> + :ok + + {:error, %{mysql: %{name: :ER_DB_CREATE_EXISTS}}} -> + {:error, :already_up} + + {:error, error} -> + {:error, Exception.message(error)} + + {:exit, exit} -> + {:error, exit_to_exception(exit)} + end + end + end + + defp concat_if(content, nil, _fun), do: content + defp concat_if(content, value, fun), do: content <> " " <> fun.(value) + + @impl true + def storage_down(opts) do + database = Keyword.fetch!(opts, :database) + + opts = Keyword.delete(opts, :database) + command = "DROP DATABASE `#{database}`" + + case run_query(command, opts) do + {:ok, _} -> + :ok + + {:error, %{mysql: %{name: :ER_DB_DROP_EXISTS}}} -> + {:error, :already_down} + + {:error, %{mysql: %{name: :ER_BAD_DB_ERROR}}} -> + {:error, :already_down} + + {:error, error} -> + {:error, Exception.message(error)} + + {:exit, :killed} -> + {:error, :already_down} + + {:exit, exit} -> + {:error, exit_to_exception(exit)} + end + end + + @impl Ecto.Adapter.Storage + def storage_status(opts) do + database = Keyword.fetch!(opts, :database) + + opts = Keyword.delete(opts, :database) + + check_database_query = + "SELECT schema_name FROM information_schema.schemata WHERE schema_name = '#{database}'" + + case run_query(check_database_query, opts) do + {:ok, %{num_rows: 0}} -> :down + {:ok, %{num_rows: _num_rows}} -> :up + other -> {:error, other} + end + end + + @impl true + def supports_ddl_transaction? do + false + end + + @impl true + def lock_for_migrations(meta, opts, fun) do + %{opts: adapter_opts, repo: repo} = meta + + if Keyword.fetch(adapter_opts, :pool_size) == {:ok, 1} do + Ecto.Adapters.SQL.raise_migration_pool_size_error() + end + + opts = Keyword.merge(opts, timeout: :infinity, telemetry_options: [schema_migration: true]) + + {:ok, result} = + transaction(meta, opts, fn -> + lock_name = "\'ecto_#{inspect(repo)}\'" + + try do + {:ok, _} = Ecto.Adapters.SQL.query(meta, "SELECT GET_LOCK(#{lock_name}, -1)", [], opts) + fun.() + after + {:ok, _} = Ecto.Adapters.SQL.query(meta, "SELECT RELEASE_LOCK(#{lock_name})", [], opts) + end + end) + + result + end + + @impl true + def insert(adapter_meta, schema_meta, params, on_conflict, returning, opts) do + %{source: source, prefix: prefix} = schema_meta + {_, query_params, _} = on_conflict + + key = primary_key!(schema_meta, returning) + {fields, values} = :lists.unzip(params) + sql = @conn.insert(prefix, source, fields, [fields], on_conflict, [], []) + + opts = + if is_nil(Keyword.get(opts, :cache_statement)) do + [{:cache_statement, "ecto_insert_#{source}_#{length(fields)}"} | opts] + else + opts + end + + case Ecto.Adapters.SQL.query(adapter_meta, sql, values ++ query_params, opts) do + {:ok, %{num_rows: 0}} -> + raise "insert operation failed to insert any row in the database. " <> + "This may happen if you have trigger or other database conditions rejecting operations. " <> + "The emitted SQL was: #{sql}" + + # We were used to check if num_rows was 1 or 2 (in case of upserts) + # but MariaDB supports tables with System Versioning, and in those + # cases num_rows can be more than 2. + {:ok, %{last_insert_id: last_insert_id}} -> + {:ok, last_insert_id(key, last_insert_id)} + + {:error, err} -> + case @conn.to_constraints(err, source: source) do + [] -> raise err + constraints -> {:invalid, constraints} + end + end + end + + defp primary_key!(%{autogenerate_id: {_, key, _type}}, [key]), do: key + defp primary_key!(_, []), do: nil + + defp primary_key!(%{schema: schema}, returning) do + raise ArgumentError, + "MySQL does not support :read_after_writes in schemas for non-primary keys. " <> + "The following fields in #{inspect(schema)} are tagged as such: #{inspect(returning)}" + end + + defp last_insert_id(nil, _last_insert_id), do: [] + defp last_insert_id(_key, 0), do: [] + defp last_insert_id(key, last_insert_id), do: [{key, last_insert_id}] + + @impl true + def structure_dump(default, config) do + table = config[:migration_source] || "schema_migrations" + path = config[:dump_path] || Path.join(default, "structure.sql") + database = dump_database!(config[:dump_prefixes], config[:database]) + + with {:ok, versions} <- select_versions(database, table, config), + {:ok, contents} <- mysql_dump(database, config), + {:ok, contents} <- append_versions(table, versions, contents) do + File.mkdir_p!(Path.dirname(path)) + File.write!(path, contents) + {:ok, path} + end + end + + defp dump_database!([prefix], _), do: prefix + defp dump_database!(nil, config_database), do: config_database + + defp dump_database!(_, _) do + raise ArgumentError, + "cannot dump multiple prefixes with MySQL. Please run the command separately for each prefix." + end + + defp select_versions(database, table, config) do + case run_query(~s[SELECT version FROM `#{database}`.`#{table}` ORDER BY version], config) do + {:ok, %{rows: rows}} -> {:ok, Enum.map(rows, &hd/1)} + {:error, %{mysql: %{name: :ER_NO_SUCH_TABLE}}} -> {:ok, []} + {:error, _} = error -> error + {:exit, exit} -> {:error, exit_to_exception(exit)} + end + end + + defp mysql_dump(database, config) do + args = ["--no-data", "--routines", "--no-create-db", database] + + case run_with_cmd("mysqldump", config, args) do + {output, 0} -> {:ok, output} + {output, _} -> {:error, output} + end + end + + defp append_versions(_table, [], contents) do + {:ok, contents} + end + + defp append_versions(table, versions, contents) do + {:ok, + contents <> + Enum.map_join(versions, &~s[INSERT INTO `#{table}` (version) VALUES (#{&1});\n])} + end + + @impl true + def structure_load(default, config) do + path = config[:dump_path] || Path.join(default, "structure.sql") + + case File.read(path) do + {:ok, contents} -> + args = [ + "--silent", + "--batch", + "--unbuffered", + "--init-command=SET FOREIGN_KEY_CHECKS = 0;", + "--database", + config[:database] + ] + + case run_with_port("mysql", config, args, contents) do + {_output, 0} -> {:ok, path} + {output, _} -> {:error, output} + end + + {:error, reason} -> + {:error, "could not read #{inspect(path)}: #{:file.format_error(reason)}"} + end + end + + @impl true + def dump_cmd(args, opts \\ [], config) when is_list(config) and is_list(args) do + args = + if database = config[:database] do + args ++ [database] + else + args + end + + run_with_cmd("mysqldump", config, args, opts) + end + + ## Helpers + + defp run_query(sql, opts) do + {:ok, _} = Application.ensure_all_started(:ecto_sql) + {:ok, _} = Application.ensure_all_started(:myxql) + + opts = + opts + |> Keyword.drop([:name, :log, :pool, :pool_size]) + |> Keyword.put(:backoff_type, :stop) + |> Keyword.put(:max_restarts, 0) + + task = + Task.Supervisor.async_nolink(Ecto.Adapters.SQL.StorageSupervisor, fn -> + {:ok, conn} = MyXQL.start_link(opts) + + value = MyXQL.query(conn, sql, [], opts) + GenServer.stop(conn) + value + end) + + timeout = Keyword.get(opts, :timeout, 15_000) + + case Task.yield(task, timeout) || Task.shutdown(task) do + {:ok, {:ok, result}} -> + {:ok, result} + + {:ok, {:error, error}} -> + {:error, error} + + {:exit, exit} -> + {:exit, exit} + + nil -> + {:error, RuntimeError.exception("command timed out")} + end + end + + defp exit_to_exception({%{__struct__: struct} = error, _}) + when struct in [MyXQL.Error, DBConnection.Error], + do: error + + defp exit_to_exception(reason), do: RuntimeError.exception(Exception.format_exit(reason)) + + defp run_with_cmd(cmd, opts, opt_args, cmd_opts \\ []) do + unless System.find_executable(cmd) do + raise "could not find executable `#{cmd}` in path, " <> + "please guarantee it is available before running ecto commands" + end + + {args, env} = args_env(opts, opt_args) + + cmd_opts = + cmd_opts + |> Keyword.put_new(:stderr_to_stdout, true) + |> Keyword.update(:env, env, &Enum.concat(env, &1)) + + System.cmd(cmd, args, cmd_opts) + end + + defp run_with_port(cmd, opts, opt_args, contents) do + abs_cmd = System.find_executable(cmd) + + unless abs_cmd do + raise "could not find executable `#{cmd}` in path, " <> + "please guarantee it is available before running ecto commands" + end + + abs_cmd = String.to_charlist(abs_cmd) + {args, env} = args_env(opts, opt_args) + + port_opts = [ + :use_stdio, + :exit_status, + :binary, + :hide, + :stderr_to_stdout, + env: validate_env(env), + args: args + ] + + port = Port.open({:spawn_executable, abs_cmd}, port_opts) + Port.command(port, contents) + # Use this as a signal to close the port since we cannot + # send an exit command to mysql in batch mode + Port.command(port, ";SELECT '__ECTO_EOF__';\n") + collect_output(port, "") + end + + defp args_env(opts, opt_args) do + env = + if password = opts[:password] do + [{"MYSQL_PWD", password}] + else + [] + end + + host = opts[:hostname] || System.get_env("MYSQL_HOST") || "localhost" + port = opts[:port] || System.get_env("MYSQL_TCP_PORT") || "3306" + protocol = opts[:cli_protocol] || System.get_env("MYSQL_CLI_PROTOCOL") || "tcp" + + user_args = + if username = opts[:username] do + ["--user", username] + else + [] + end + + args = + [ + "--host", + host, + "--port", + to_string(port), + "--protocol", + protocol + ] ++ user_args ++ opt_args + + {args, env} + end + + defp validate_env(enum) do + Enum.map(enum, fn + {k, nil} -> + {String.to_charlist(k), false} + + {k, v} -> + {String.to_charlist(k), String.to_charlist(v)} + + other -> + raise ArgumentError, "invalid environment key-value #{inspect(other)}" + end) + end + + defp collect_output(port, acc) do + receive do + {^port, {:data, data}} -> + acc = acc <> data + + if acc =~ "__ECTO_EOF__" do + Port.close(port) + {acc, 0} + else + collect_output(port, acc) + end + + {^port, {:exit_status, status}} -> + {acc, status} + end + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/myxql/connection.ex b/deps/ecto_sql/lib/ecto/adapters/myxql/connection.ex new file mode 100644 index 0000000..63d3552 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/myxql/connection.ex @@ -0,0 +1,1630 @@ +if Code.ensure_loaded?(MyXQL) do + defmodule Ecto.Adapters.MyXQL.Connection do + @moduledoc false + alias Ecto.Adapters.SQL + + @behaviour Ecto.Adapters.SQL.Connection + + ## Connection + + @impl true + def child_spec(opts) do + MyXQL.child_spec(opts) + end + + ## Query + + @impl true + def prepare_execute(conn, name, sql, params, opts) do + ensure_list_params!(params) + MyXQL.prepare_execute(conn, name, sql, params, opts) + end + + @impl true + def query(conn, sql, params, opts) do + ensure_list_params!(params) + opts = Keyword.put_new(opts, :query_type, :binary_then_text) + MyXQL.query(conn, sql, params, opts) + end + + @impl true + def query_many(conn, sql, params, opts) do + ensure_list_params!(params) + opts = Keyword.put_new(opts, :query_type, :text) + MyXQL.query_many(conn, sql, params, opts) + end + + @impl true + def execute(conn, query, params, opts) do + ensure_list_params!(params) + + case MyXQL.execute(conn, query, params, opts) do + {:ok, _, result} -> {:ok, result} + {:error, _} = error -> error + end + end + + @impl true + def stream(conn, sql, params, opts) do + ensure_list_params!(params) + MyXQL.stream(conn, sql, params, opts) + end + + defp ensure_list_params!(params) do + unless is_list(params) do + raise ArgumentError, "expected params to be a list, got: #{inspect(params)}" + end + end + + @quotes ~w(" ' `) + + @impl true + def to_constraints(%MyXQL.Error{mysql: %{name: :ER_DUP_ENTRY}, message: message}, opts) do + with [_, quoted] <- :binary.split(message, " for key "), + [_, index | _] <- :binary.split(quoted, @quotes, [:global]) do + [unique: strip_source(index, opts[:source])] + else + _ -> [] + end + end + + def to_constraints(%MyXQL.Error{mysql: %{name: name}, message: message}, _opts) + when name in [:ER_ROW_IS_REFERENCED_2, :ER_NO_REFERENCED_ROW_2] do + with [_, quoted] <- :binary.split(message, [" CONSTRAINT ", " FOREIGN KEY "]), + [_, index | _] <- :binary.split(quoted, @quotes, [:global]) do + [foreign_key: index] + else + _ -> [] + end + end + + def to_constraints( + %MyXQL.Error{mysql: %{name: :ER_CHECK_CONSTRAINT_VIOLATED}, message: message}, + _opts + ) do + with [_, quoted] <- :binary.split(message, ["Check constraint "]), + [_, constraint | _] <- :binary.split(quoted, @quotes, [:global]) do + [check: constraint] + else + _ -> [] + end + end + + def to_constraints(_, _), + do: [] + + defp strip_source(name, nil), do: name + defp strip_source(name, source), do: String.trim_leading(name, "#{source}.") + + ## Query + + @parent_as __MODULE__ + alias Ecto.Query.{BooleanExpr, ByExpr, JoinExpr, QueryExpr, WithExpr} + + @impl true + def all(query, as_prefix \\ []) do + sources = create_names(query, as_prefix) + + cte = cte(query, sources) + from = from(query, sources) + select = select(query, sources) + join = join(query, sources) + where = where(query, sources) + group_by = group_by(query, sources) + having = having(query, sources) + window = window(query, sources) + combinations = combinations(query, as_prefix) + order_by = order_by(query, sources) + limit = limit(query, sources) + offset = offset(query, sources) + lock = lock(query, sources) + + [ + cte, + select, + from, + join, + where, + group_by, + having, + window, + combinations, + order_by, + limit, + offset | lock + ] + end + + @impl true + def update_all(query, prefix \\ nil) do + %{from: %{source: source}, select: select} = query + + if select do + error!(nil, ":select is not supported in update_all by MySQL") + end + + sources = create_names(query, []) + cte = cte(query, sources) + {from, name} = get_source(query, sources, 0, source) + + fields = + if prefix do + update_fields(:on_conflict, query, sources) + else + update_fields(:update, query, sources) + end + + {join, wheres} = using_join(query, :update_all, sources) + prefix = prefix || ["UPDATE ", from, " AS ", name, join, " SET "] + where = where(%{query | wheres: wheres ++ query.wheres}, sources) + + [cte, prefix, fields | where] + end + + @impl true + def delete_all(query) do + if query.select do + error!(nil, ":select is not supported in delete_all by MySQL") + end + + sources = create_names(query, []) + cte = cte(query, sources) + {_, name, _} = elem(sources, 0) + + from = from(query, sources) + join = join(query, sources) + where = where(query, sources) + + [cte, "DELETE ", name, ".*", from, join | where] + end + + @impl true + def insert(prefix, table, header, rows, on_conflict, [], []) do + fields = quote_names(header) + + [ + "INSERT INTO ", + quote_table(prefix, table), + " (", + fields, + ") ", + insert_all(rows) | on_conflict(on_conflict, header) + ] + end + + def insert(_prefix, _table, _header, _rows, _on_conflict, _returning, []) do + error!(nil, ":returning is not supported in insert/insert_all by MySQL") + end + + def insert(_prefix, _table, _header, _rows, _on_conflict, _returning, _placeholders) do + error!(nil, ":placeholders is not supported by MySQL") + end + + defp on_conflict({_, _, [_ | _]}, _header) do + error!(nil, ":conflict_target is not supported in insert/insert_all by MySQL") + end + + defp on_conflict({:raise, _, []}, _header) do + [] + end + + defp on_conflict({:nothing, _, []}, [field | _]) do + quoted = quote_name(field) + [" ON DUPLICATE KEY UPDATE ", quoted, " = " | quoted] + end + + defp on_conflict({fields, _, []}, _header) when is_list(fields) do + [ + " ON DUPLICATE KEY UPDATE " + | Enum.map_intersperse(fields, ?,, fn field -> + quoted = quote_name(field) + [quoted, " = VALUES(", quoted, ?)] + end) + ] + end + + defp on_conflict({%{wheres: []} = query, _, []}, _header) do + [" ON DUPLICATE KEY " | update_all(query, "UPDATE ")] + end + + defp on_conflict({_query, _, []}, _header) do + error!( + nil, + "Using a query with :where in combination with the :on_conflict option is not supported by MySQL" + ) + end + + defp insert_all(rows) when is_list(rows) do + [ + "VALUES ", + Enum.map_intersperse(rows, ?,, fn row -> + [?(, Enum.map_intersperse(row, ?,, &insert_all_value/1), ?)] + end) + ] + end + + defp insert_all(%Ecto.Query{} = query) do + [?(, all(query), ?)] + end + + defp insert_all_value(nil), do: "DEFAULT" + defp insert_all_value({%Ecto.Query{} = query, _params_counter}), do: [?(, all(query), ?)] + defp insert_all_value(_), do: ~c"?" + + @impl true + def update(prefix, table, fields, filters, _returning) do + fields = Enum.map_intersperse(fields, ", ", &[quote_name(&1), " = ?"]) + + filters = + Enum.map_intersperse(filters, " AND ", fn + {field, nil} -> + [quote_name(field), " IS NULL"] + + {field, _value} -> + [quote_name(field), " = ?"] + end) + + ["UPDATE ", quote_table(prefix, table), " SET ", fields, " WHERE " | filters] + end + + @impl true + def delete(prefix, table, filters, _returning) do + filters = + Enum.map_intersperse(filters, " AND ", fn + {field, nil} -> + [quote_name(field), " IS NULL"] + + {field, _value} -> + [quote_name(field), " = ?"] + end) + + ["DELETE FROM ", quote_table(prefix, table), " WHERE " | filters] + end + + @impl true + # DB explain opts, except format, are deprecated. + # See Notes at https://dev.mysql.com/doc/refman/5.7/en/explain.html + def explain_query(conn, query, params, opts) do + {explain_opts, opts} = Keyword.split(opts, ~w[format]a) + map_format? = {:format, :map} in explain_opts + + case query(conn, build_explain_query(query, explain_opts), params, opts) do + {:ok, %MyXQL.Result{rows: rows}} when map_format? -> + json_library = MyXQL.json_library() + decoded_result = Enum.map(rows, &json_library.decode!(&1)) + {:ok, decoded_result} + + {:ok, %MyXQL.Result{} = result} -> + {:ok, SQL.format_table(result)} + + error -> + error + end + end + + def build_explain_query(query, []) do + ["EXPLAIN ", query] + |> IO.iodata_to_binary() + end + + def build_explain_query(query, format: value) do + ["EXPLAIN #{String.upcase("#{format_to_sql(value)}")} ", query] + |> IO.iodata_to_binary() + end + + ## Query generation + + binary_ops = [ + ==: " = ", + !=: " != ", + <=: " <= ", + >=: " >= ", + <: " < ", + >: " > ", + +: " + ", + -: " - ", + *: " * ", + /: " / ", + and: " AND ", + or: " OR ", + like: " LIKE " + ] + + @binary_ops Keyword.keys(binary_ops) + + Enum.map(binary_ops, fn {op, str} -> + defp handle_call(unquote(op), 2), do: {:binary_op, unquote(str)} + end) + + defp handle_call(fun, _arity), do: {:fun, Atom.to_string(fun)} + + defp select(%{select: %{fields: fields}, distinct: distinct} = query, sources) do + ["SELECT ", distinct(distinct, sources, query) | select(fields, sources, query)] + end + + defp distinct(nil, _sources, _query), do: [] + defp distinct(%ByExpr{expr: true}, _sources, _query), do: "DISTINCT " + defp distinct(%ByExpr{expr: false}, _sources, _query), do: [] + + defp distinct(%ByExpr{expr: exprs}, _sources, query) when is_list(exprs) do + error!(query, "DISTINCT with multiple columns is not supported by MySQL") + end + + defp select([], _sources, _query), + do: "TRUE" + + defp select(fields, sources, query) do + Enum.map_intersperse(fields, ", ", fn + {:&, _, [idx]} -> + case elem(sources, idx) do + {nil, source, nil} -> + error!( + query, + "MySQL adapter does not support selecting all fields from fragment #{source}. " <> + "Please specify exactly which fields you want to select" + ) + + {source, _, nil} -> + error!( + query, + "MySQL adapter does not support selecting all fields from #{source} without a schema. " <> + "Please specify a schema or specify exactly which fields you want to select" + ) + + {_, source, _} -> + source + end + + {key, value} -> + [expr(value, sources, query), " AS ", quote_name(key)] + + value -> + expr(value, sources, query) + end) + end + + defp from(%{from: %{source: source, hints: hints}} = query, sources) do + {from, name} = get_source(query, sources, 0, source) + [" FROM ", from, " AS ", name | Enum.map(hints, &[?\s | &1])] + end + + defp cte(%{with_ctes: %WithExpr{queries: [_ | _]}} = query, sources) do + %{with_ctes: with} = query + recursive_opt = if with.recursive, do: "RECURSIVE ", else: "" + ctes = Enum.map_intersperse(with.queries, ", ", &cte_expr(&1, sources, query)) + ["WITH ", recursive_opt, ctes, " "] + end + + defp cte(%{with_ctes: _}, _), do: [] + + defp cte_expr({_name, %{materialized: materialized}, _cte}, _sources, query) + when is_boolean(materialized) do + error!(query, "MySQL adapter does not support materialized CTEs") + end + + defp cte_expr({name, opts, cte}, sources, query) do + operation_opt = Map.get(opts, :operation) + + [quote_name(name), " AS ", cte_query(cte, sources, query, operation_opt)] + end + + defp cte_query(query, sources, parent_query, nil) do + cte_query(query, sources, parent_query, :all) + end + + defp cte_query(%Ecto.Query{} = query, sources, parent_query, :all) do + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + ["(", all(query, subquery_as_prefix(sources)), ")"] + end + + defp cte_query(%Ecto.Query{} = query, _sources, _parent_query, operation) do + error!( + query, + "MySQL adapter does not support data-modifying CTEs (operation: #{operation})" + ) + end + + defp cte_query(%QueryExpr{expr: expr}, sources, query, _operation) do + expr(expr, sources, query) + end + + defp update_fields(type, %{updates: updates} = query, sources) do + fields = + for( + %{expr: expr} <- updates, + {op, kw} <- expr, + {key, value} <- kw, + do: update_op(op, update_key(type, key, query, sources), value, sources, query) + ) + + Enum.intersperse(fields, ", ") + end + + defp update_key(:update, key, %{from: from} = query, sources) do + {_from, name} = get_source(query, sources, 0, from) + + [name, ?. | quote_name(key)] + end + + defp update_key(:on_conflict, key, _query, _sources) do + quote_name(key) + end + + defp update_op(:set, quoted_key, value, sources, query) do + [quoted_key, " = " | expr(value, sources, query)] + end + + defp update_op(:inc, quoted_key, value, sources, query) do + [quoted_key, " = ", quoted_key, " + " | expr(value, sources, query)] + end + + defp update_op(command, _quoted_key, _value, _sources, query) do + error!(query, "Unknown update operation #{inspect(command)} for MySQL") + end + + defp using_join(%{joins: []}, _kind, _sources), do: {[], []} + + defp using_join(%{joins: joins} = query, kind, sources) do + froms = + Enum.map_intersperse(joins, ", ", fn + %JoinExpr{source: %Ecto.SubQuery{params: [_ | _]}} -> + error!( + query, + "MySQL adapter does not support subqueries with parameters in update_all/delete_all joins" + ) + + %JoinExpr{qual: :inner, ix: ix, source: source} -> + {join, name} = get_source(query, sources, ix, source) + [join, " AS " | name] + + %JoinExpr{qual: qual} -> + error!(query, "MySQL adapter supports only inner joins on #{kind}, got: `#{qual}`") + end) + + wheres = + for %JoinExpr{on: %QueryExpr{expr: value} = expr} <- joins, + value != true, + do: expr |> Map.put(:__struct__, BooleanExpr) |> Map.put(:op, :and) + + {[?,, ?\s | froms], wheres} + end + + defp join(%{joins: []}, _sources), do: [] + + defp join(%{joins: joins} = query, sources) do + Enum.map(joins, fn + %JoinExpr{on: %QueryExpr{expr: expr}, qual: qual, ix: ix, source: source, hints: hints} -> + {join, name} = get_source(query, sources, ix, source) + + [ + join_qual(qual, query), + join, + " AS ", + name, + Enum.map(hints, &[?\s | &1]) | join_on(qual, expr, sources, query) + ] + end) + end + + defp join_on(:cross, true, _sources, _query), do: [] + defp join_on(:cross_lateral, true, _sources, _query), do: [] + defp join_on(_qual, expr, sources, query), do: [" ON " | expr(expr, sources, query)] + + defp join_qual(:inner, _), do: " INNER JOIN " + defp join_qual(:inner_lateral, _), do: " INNER JOIN LATERAL " + defp join_qual(:left, _), do: " LEFT OUTER JOIN " + defp join_qual(:left_lateral, _), do: " LEFT OUTER JOIN LATERAL " + defp join_qual(:right, _), do: " RIGHT OUTER JOIN " + defp join_qual(:full, _), do: " FULL OUTER JOIN " + defp join_qual(:cross, _), do: " CROSS JOIN " + defp join_qual(:cross_lateral, _), do: " CROSS JOIN LATERAL " + + defp join_qual(qual, query), + do: error!(query, "join qualifier #{inspect(qual)} is not supported in the MySQL adapter") + + defp where(%{wheres: wheres} = query, sources) do + boolean(" WHERE ", wheres, sources, query) + end + + defp having(%{havings: havings} = query, sources) do + boolean(" HAVING ", havings, sources, query) + end + + defp group_by(%{group_bys: []}, _sources), do: [] + + defp group_by(%{group_bys: group_bys} = query, sources) do + [ + " GROUP BY " + | Enum.map_intersperse(group_bys, ", ", fn %ByExpr{expr: expr} -> + Enum.map_intersperse(expr, ", ", &top_level_expr(&1, sources, query)) + end) + ] + end + + defp window(%{windows: []}, _sources), do: [] + + defp window(%{windows: windows} = query, sources) do + [ + " WINDOW " + | Enum.map_intersperse(windows, ", ", fn {name, %{expr: kw}} -> + [quote_name(name), " AS " | window_exprs(kw, sources, query)] + end) + ] + end + + defp window_exprs(kw, sources, query) do + [?(, Enum.map_intersperse(kw, ?\s, &window_expr(&1, sources, query)), ?)] + end + + defp window_expr({:partition_by, fields}, sources, query) do + ["PARTITION BY " | Enum.map_intersperse(fields, ", ", &expr(&1, sources, query))] + end + + defp window_expr({:order_by, fields}, sources, query) do + ["ORDER BY " | Enum.map_intersperse(fields, ", ", &order_by_expr(&1, sources, query))] + end + + defp window_expr({:frame, {:fragment, _, _} = fragment}, sources, query) do + expr(fragment, sources, query) + end + + defp order_by(%{order_bys: []}, _sources), do: [] + + defp order_by(%{order_bys: order_bys} = query, sources) do + [ + " ORDER BY " + | Enum.map_intersperse(order_bys, ", ", fn %ByExpr{expr: expr} -> + Enum.map_intersperse(expr, ", ", &order_by_expr(&1, sources, query)) + end) + ] + end + + defp order_by_expr({dir, expr}, sources, query) do + str = top_level_expr(expr, sources, query) + + case dir do + :asc -> str + :desc -> [str | " DESC"] + _ -> error!(query, "#{dir} is not supported in ORDER BY in MySQL") + end + end + + defp limit(%{limit: nil}, _sources), do: [] + + defp limit(%{limit: %{with_ties: true}} = query, _sources) do + error!(query, "MySQL adapter does not support the `:with_ties` limit option") + end + + defp limit(%{limit: %{expr: expr}} = query, sources) do + [" LIMIT " | expr(expr, sources, query)] + end + + defp offset(%{offset: nil}, _sources), do: [] + + defp offset(%{offset: %QueryExpr{expr: expr}} = query, sources) do + [" OFFSET " | expr(expr, sources, query)] + end + + defp combinations(%{combinations: combinations}, as_prefix) do + Enum.map(combinations, fn + {:union, query} -> [" UNION (", all(query, as_prefix), ")"] + {:union_all, query} -> [" UNION ALL (", all(query, as_prefix), ")"] + {:except, query} -> [" EXCEPT (", all(query, as_prefix), ")"] + {:except_all, query} -> [" EXCEPT ALL (", all(query, as_prefix), ")"] + {:intersect, query} -> [" INTERSECT (", all(query, as_prefix), ")"] + {:intersect_all, query} -> [" INTERSECT ALL (", all(query, as_prefix), ")"] + end) + end + + defp lock(%{lock: nil}, _sources), do: [] + defp lock(%{lock: binary}, _sources) when is_binary(binary), do: [?\s | binary] + defp lock(%{lock: expr} = query, sources), do: [?\s | expr(expr, sources, query)] + + defp boolean(_name, [], _sources, _query), do: [] + + defp boolean(name, [%{expr: expr, op: op} | query_exprs], sources, query) do + [ + name, + Enum.reduce(query_exprs, {op, paren_expr(expr, sources, query)}, fn + %BooleanExpr{expr: expr, op: op}, {op, acc} -> + {op, [acc, operator_to_boolean(op) | paren_expr(expr, sources, query)]} + + %BooleanExpr{expr: expr, op: op}, {_, acc} -> + {op, [?(, acc, ?), operator_to_boolean(op) | paren_expr(expr, sources, query)]} + end) + |> elem(1) + ] + end + + defp operator_to_boolean(:and), do: " AND " + defp operator_to_boolean(:or), do: " OR " + + defp parens_for_select([first_expr | _] = expr) do + if is_binary(first_expr) and String.match?(first_expr, ~r/^\s*select/i) do + [?(, expr, ?)] + else + expr + end + end + + defp paren_expr(expr, sources, query) do + [?(, expr(expr, sources, query), ?)] + end + + defp top_level_expr(%Ecto.SubQuery{query: query}, sources, parent_query) do + combinations = + Enum.map(query.combinations, fn {type, combination_query} -> + {type, put_in(combination_query.aliases[@parent_as], {parent_query, sources})} + end) + + query = put_in(query.combinations, combinations) + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + [all(query, subquery_as_prefix(sources))] + end + + defp top_level_expr(other, sources, parent_query) do + expr(other, sources, parent_query) + end + + defp expr({:^, [], [_ix]}, _sources, _query) do + ~c"?" + end + + defp expr({{:., _, [{:parent_as, _, [as]}, field]}, _, []}, _sources, query) + when is_atom(field) or is_binary(field) do + {ix, sources} = get_parent_sources_ix(query, as) + {_, name, _} = elem(sources, ix) + [name, ?. | quote_name(field)] + end + + defp expr({{:., _, [{:&, _, [idx]}, field]}, _, []}, sources, _query) + when is_atom(field) or is_binary(field) do + {_, name, _} = elem(sources, idx) + [name, ?. | quote_name(field)] + end + + defp expr({:&, _, [idx]}, sources, _query) do + {_, source, _} = elem(sources, idx) + source + end + + defp expr({:in, _, [_left, []]}, _sources, _query) do + "false" + end + + defp expr({:in, _, [left, right]}, sources, query) when is_list(right) do + args = Enum.map_intersperse(right, ?,, &expr(&1, sources, query)) + [expr(left, sources, query), " IN (", args, ?)] + end + + defp expr({:in, _, [_, {:^, _, [_, 0]}]}, _sources, _query) do + "false" + end + + defp expr({:in, _, [left, {:^, _, [_, length]}]}, sources, query) do + args = Enum.intersperse(List.duplicate(??, length), ?,) + [expr(left, sources, query), " IN (", args, ?)] + end + + defp expr({:in, _, [left, %Ecto.SubQuery{} = subquery]}, sources, query) do + [expr(left, sources, query), " IN ", expr(subquery, sources, query)] + end + + defp expr({:in, _, [left, right]}, sources, query) do + [expr(left, sources, query), " = ANY(", expr(right, sources, query), ?)] + end + + defp expr({:is_nil, _, [arg]}, sources, query) do + [expr(arg, sources, query) | " IS NULL"] + end + + defp expr({:not, _, [expr]}, sources, query) do + ["NOT (", expr(expr, sources, query), ?)] + end + + defp expr({:filter, _, _}, _sources, query) do + error!(query, "MySQL adapter does not support aggregate filters") + end + + defp expr(%Ecto.SubQuery{} = subquery, sources, parent_query) do + [?(, top_level_expr(subquery, sources, parent_query), ?)] + end + + defp expr({:fragment, _, [kw]}, _sources, query) when is_list(kw) or tuple_size(kw) == 3 do + error!(query, "MySQL adapter does not support keyword or interpolated fragments") + end + + defp expr({:fragment, _, parts}, sources, query) do + Enum.map(parts, fn + {:raw, part} -> part + {:expr, expr} -> expr(expr, sources, query) + end) + |> parens_for_select + end + + defp expr({:values, _, [types, _idx, num_rows]}, _, query) do + [?(, values_list(types, num_rows, query), ?)] + end + + defp expr({:identifier, _, [literal]}, _sources, _query) do + quote_name(literal) + end + + defp expr({:constant, _, [literal]}, _sources, _query) when is_binary(literal) do + [?', escape_string(literal), ?'] + end + + defp expr({:constant, _, [literal]}, _sources, _query) when is_number(literal) do + [to_string(literal)] + end + + defp expr({:splice, _, [{:^, _, [_, length]}]}, _sources, _query) do + Enum.intersperse(List.duplicate(??, length), ?,) + end + + defp expr({:selected_as, _, [name]}, _sources, _query) do + [quote_name(name)] + end + + defp expr({:datetime_add, _, [datetime, count, interval]}, sources, query) do + [ + "date_add(", + expr(datetime, sources, query), + ", ", + interval(count, interval, sources, query) | ")" + ] + end + + defp expr({:date_add, _, [date, count, interval]}, sources, query) do + [ + "CAST(date_add(", + expr(date, sources, query), + ", ", + interval(count, interval, sources, query) | ") AS date)" + ] + end + + defp expr({:ilike, _, [_, _]}, _sources, query) do + error!(query, "ilike is not supported by MySQL") + end + + defp expr({:over, _, [agg, name]}, sources, query) when is_atom(name) do + aggregate = expr(agg, sources, query) + [aggregate, " OVER " | quote_name(name)] + end + + defp expr({:over, _, [agg, kw]}, sources, query) do + aggregate = expr(agg, sources, query) + [aggregate, " OVER " | window_exprs(kw, sources, query)] + end + + defp expr({:{}, _, elems}, sources, query) do + [?(, Enum.map_intersperse(elems, ?,, &expr(&1, sources, query)), ?)] + end + + defp expr({:count, _, []}, _sources, _query), do: "count(*)" + + defp expr({:json_extract_path, _, [expr, path]}, sources, query) do + path = + Enum.map(path, fn + binary when is_binary(binary) -> + [?., ?", escape_json_key(binary), ?"] + + integer when is_integer(integer) -> + "[#{integer}]" + + _ -> + error!( + query, + "MySQL adapter does not support references to source fields inside of `json_extract_path`" + ) + end) + + ["json_extract(", expr(expr, sources, query), ", '$", path, "')"] + end + + defp expr({fun, _, args}, sources, query) when is_atom(fun) and is_list(args) do + {modifier, args} = + case args do + [rest, :distinct] -> {"DISTINCT ", [rest]} + _ -> {[], args} + end + + case handle_call(fun, length(args)) do + {:binary_op, op} -> + [left, right] = args + [op_to_binary(left, sources, query), op | op_to_binary(right, sources, query)] + + {:fun, fun} -> + [ + fun, + ?(, + modifier, + Enum.map_intersperse(args, ", ", &expr(&1, sources, query)), + ?) + ] + end + end + + defp expr(list, _sources, query) when is_list(list) do + error!(query, "Array type is not supported by MySQL") + end + + defp expr(%Decimal{} = decimal, _sources, _query) do + Decimal.to_string(decimal, :normal) + end + + defp expr(%Ecto.Query.Tagged{value: binary, type: :binary}, _sources, _query) + when is_binary(binary) do + hex = Base.encode16(binary, case: :lower) + [?x, ?', hex, ?'] + end + + defp expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) + when type in [:decimal, :float] do + [expr(other, sources, query), " + 0"] + end + + defp expr(%Ecto.Query.Tagged{value: other, type: :boolean}, sources, query) do + ["IF(", expr(other, sources, query), ", TRUE, FALSE)"] + end + + defp expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) do + ["CAST(", expr(other, sources, query), " AS ", ecto_cast_to_db(type, query), ?)] + end + + defp expr(nil, _sources, _query), do: "NULL" + defp expr(true, _sources, _query), do: "TRUE" + defp expr(false, _sources, _query), do: "FALSE" + + defp expr(literal, _sources, _query) when is_binary(literal) do + [?', escape_string(literal), ?'] + end + + defp expr(literal, _sources, _query) when is_integer(literal) do + Integer.to_string(literal) + end + + defp expr(literal, _sources, _query) when is_float(literal) do + # MySQL doesn't support float cast + ["(0 + ", Float.to_string(literal), ?)] + end + + defp expr(expr, _sources, query) do + error!(query, "unsupported expression: #{inspect(expr)}") + end + + defp values_list(types, num_rows, query) do + rows = :lists.seq(1, num_rows, 1) + + [ + "VALUES ", + Enum.map_intersperse(rows, ?,, fn _ -> + ["ROW(", values_expr(types, query), ?)] + end) + ] + end + + defp values_expr(types, query) do + Enum.map_intersperse(types, ?,, fn {_field, type} -> + ["CAST(", ??, " AS ", ecto_cast_to_db(type, query), ?)] + end) + end + + defp interval(count, "millisecond", sources, query) do + ["INTERVAL (", expr(count, sources, query) | " * 1000) microsecond"] + end + + defp interval(count, interval, sources, query) do + ["INTERVAL ", expr(count, sources, query), ?\s | interval] + end + + defp op_to_binary({op, _, [_, _]} = expr, sources, query) when op in @binary_ops, + do: paren_expr(expr, sources, query) + + defp op_to_binary({:is_nil, _, [_]} = expr, sources, query), + do: paren_expr(expr, sources, query) + + defp op_to_binary(expr, sources, query), + do: expr(expr, sources, query) + + defp create_names(%{sources: sources}, as_prefix) do + create_names(sources, 0, tuple_size(sources), as_prefix) |> List.to_tuple() + end + + defp create_names(sources, pos, limit, as_prefix) when pos < limit do + [create_name(sources, pos, as_prefix) | create_names(sources, pos + 1, limit, as_prefix)] + end + + defp create_names(_sources, pos, pos, as_prefix) do + [as_prefix] + end + + defp subquery_as_prefix(sources) do + [?s | :erlang.element(tuple_size(sources), sources)] + end + + defp create_name(sources, pos, as_prefix) do + case elem(sources, pos) do + {:fragment, _, _} -> + {nil, as_prefix ++ [?f | Integer.to_string(pos)], nil} + + {:values, _, _} -> + {nil, as_prefix ++ [?v | Integer.to_string(pos)], nil} + + {table, schema, prefix} -> + name = as_prefix ++ [create_alias(table) | Integer.to_string(pos)] + {quote_table(prefix, table), name, schema} + + %Ecto.SubQuery{} -> + {nil, as_prefix ++ [?s | Integer.to_string(pos)], nil} + end + end + + defp create_alias(<>) when first in ?a..?z when first in ?A..?Z do + first + end + + defp create_alias(_) do + ?t + end + + ## DDL + + alias Ecto.Migration.{Table, Index, Reference, Constraint} + + @impl true + def execute_ddl({command, %Table{} = table, columns}) + when command in [:create, :create_if_not_exists] do + table_structure = + case column_definitions(table, columns) ++ pk_definitions(columns, ", ") do + [] -> [] + list -> [?\s, ?(, list, ?)] + end + + [ + [ + "CREATE TABLE ", + if_do(command == :create_if_not_exists, "IF NOT EXISTS "), + quote_table(table.prefix, table.name), + table_structure, + comment_expr(table.comment, true), + engine_expr(table.engine), + options_expr(table.options) + ] + ] + end + + def execute_ddl({command, %Table{} = table, mode}) when command in [:drop, :drop_if_exists] do + [ + [ + "DROP TABLE ", + if_do(command == :drop_if_exists, "IF EXISTS "), + quote_table(table.prefix, table.name), + drop_mode(mode) + ] + ] + end + + def execute_ddl({:alter, %Table{} = table, changes}) do + [ + [ + "ALTER TABLE ", + quote_table(table.prefix, table.name), + ?\s, + column_changes(table, changes), + pk_definitions(changes, ", ADD ") + ] + ] ++ + if_do( + table.comment, + [["ALTER TABLE ", quote_table(table.prefix, table.name), comment_expr(table.comment)]] + ) + end + + def execute_ddl({:create, %Index{} = index}) do + if index.where do + error!(nil, "MySQL adapter does not support where in indexes") + end + + if index.nulls_distinct == false do + error!(nil, "MySQL adapter does not support nulls_distinct set to false in indexes") + end + + [ + [ + "CREATE", + if_do(index.unique, " UNIQUE"), + " INDEX ", + quote_name(index.name), + " ON ", + quote_table(index.prefix, index.table), + ?\s, + ?(, + Enum.map_intersperse(index.columns, ", ", &index_expr/1), + ?), + if_do(index.using, [" USING ", to_string(index.using)]), + if_do(index.concurrently, " LOCK=NONE") + ] + ] + end + + def execute_ddl({:create_if_not_exists, %Index{}}), + do: error!(nil, "MySQL adapter does not support create if not exists for index") + + def execute_ddl({:create, %Constraint{check: check} = constraint}) when is_binary(check) do + table_name = quote_name(constraint.prefix, constraint.table) + [["ALTER TABLE ", table_name, " ADD ", new_constraint_expr(constraint)]] + end + + def execute_ddl({:create, %Constraint{exclude: exclude}}) when is_binary(exclude), + do: error!(nil, "MySQL adapter does not support exclusion constraints") + + def execute_ddl({:drop, %Constraint{}, :cascade}), + do: error!(nil, "MySQL does not support `CASCADE` in `DROP CONSTRAINT` commands") + + def execute_ddl({:drop, %Constraint{} = constraint, _}) do + [ + [ + "ALTER TABLE ", + quote_name(constraint.prefix, constraint.table), + " DROP CONSTRAINT ", + quote_name(constraint.name) + ] + ] + end + + def execute_ddl({:drop_if_exists, %Constraint{}, _}), + do: error!(nil, "MySQL adapter does not support `drop_if_exists` for constraints") + + def execute_ddl({:drop, %Index{}, :cascade}), + do: error!(nil, "MySQL adapter does not support cascade in drop index") + + def execute_ddl({:drop, %Index{} = index, :restrict}) do + [ + [ + "DROP INDEX ", + quote_name(index.name), + " ON ", + quote_table(index.prefix, index.table), + if_do(index.concurrently, " LOCK=NONE") + ] + ] + end + + def execute_ddl({:drop_if_exists, %Index{}, _}), + do: error!(nil, "MySQL adapter does not support drop if exists for index") + + def execute_ddl({:rename, %Index{} = index, new_name}) do + [ + [ + "ALTER TABLE ", + quote_table(index.table), + " RENAME INDEX ", + quote_name(index.name), + " TO ", + quote_name(new_name) + ] + ] + end + + def execute_ddl({:rename, %Table{} = current_table, %Table{} = new_table}) do + [ + [ + "RENAME TABLE ", + quote_table(current_table.prefix, current_table.name), + " TO ", + quote_table(new_table.prefix, new_table.name) + ] + ] + end + + def execute_ddl({:rename, %Table{} = table, current_column, new_column}) do + [ + [ + "ALTER TABLE ", + quote_table(table.prefix, table.name), + " RENAME COLUMN ", + quote_name(current_column), + " TO ", + quote_name(new_column) + ] + ] + end + + def execute_ddl(string) when is_binary(string), do: [string] + + def execute_ddl(keyword) when is_list(keyword), + do: error!(nil, "MySQL adapter does not support keyword lists in execute") + + @impl true + def ddl_logs(_), do: [] + + @impl true + def table_exists_query(table) do + {"SELECT true FROM information_schema.tables WHERE table_name = ? AND table_schema = DATABASE() LIMIT 1", + [table]} + end + + defp drop_mode(:cascade), do: " CASCADE" + defp drop_mode(:restrict), do: [] + + defp pk_definitions(columns, prefix) do + pks = + for {action, name, _, opts} <- columns, + action != :remove, + opts[:primary_key], + do: name + + case pks do + [] -> [] + _ -> [[prefix, "PRIMARY KEY (", quote_names(pks), ?)]] + end + end + + defp column_definitions(table, columns) do + Enum.map_intersperse(columns, ", ", &column_definition(table, &1)) + end + + defp column_definition(table, {:add, name, %Reference{} = ref, opts}) do + [ + quote_name(name), + ?\s, + reference_column_type(ref.type, opts), + column_options(opts), + reference_expr(ref, table, name) + ] + end + + defp column_definition(_table, {:add, name, type, opts}) do + [quote_name(name), ?\s, column_type(type, opts), column_options(opts)] + end + + defp column_changes(table, columns) do + Enum.map_intersperse(columns, ", ", &column_change(table, &1)) + end + + defp column_change(_table, {_command, _name, %Reference{validate: false}, _opts}) do + error!(nil, "validate: false on references is not supported in MyXQL") + end + + defp column_change(table, {:add, name, %Reference{} = ref, opts}) do + [ + "ADD ", + quote_name(name), + ?\s, + reference_column_type(ref.type, opts), + column_options(opts), + constraint_expr(ref, table, name) + ] + end + + defp column_change(_table, {:add, name, type, opts}) do + ["ADD ", quote_name(name), ?\s, column_type(type, opts), column_options(opts)] + end + + defp column_change(table, {:add_if_not_exists, name, %Reference{} = ref, opts}) do + [ + "ADD IF NOT EXISTS ", + quote_name(name), + ?\s, + reference_column_type(ref.type, opts), + column_options(opts), + constraint_if_not_exists_expr(ref, table, name) + ] + end + + defp column_change(_table, {:add_if_not_exists, name, type, opts}) do + [ + "ADD IF NOT EXISTS ", + quote_name(name), + ?\s, + column_type(type, opts), + column_options(opts) + ] + end + + defp column_change(table, {:modify, name, %Reference{} = ref, opts}) do + [ + drop_constraint_expr(opts[:from], table, name), + "MODIFY ", + quote_name(name), + ?\s, + reference_column_type(ref.type, opts), + column_options(opts), + constraint_expr(ref, table, name) + ] + end + + defp column_change(table, {:modify, name, type, opts}) do + [ + drop_constraint_expr(opts[:from], table, name), + "MODIFY ", + quote_name(name), + ?\s, + column_type(type, opts), + column_options(opts) + ] + end + + defp column_change(_table, {:remove, name}), do: ["DROP ", quote_name(name)] + + defp column_change(table, {:remove, name, %Reference{} = ref, _opts}) do + [drop_constraint_expr(ref, table, name), "DROP ", quote_name(name)] + end + + defp column_change(_table, {:remove, name, _type, _opts}), do: ["DROP ", quote_name(name)] + + defp column_change(table, {:remove_if_exists, name, %Reference{} = ref}) do + [drop_constraint_if_exists_expr(ref, table, name), "DROP IF EXISTS ", quote_name(name)] + end + + defp column_change(table, {:remove_if_exists, name, _type}), + do: column_change(table, {:remove_if_exists, name}) + + defp column_change(_table, {:remove_if_exists, name}), + do: ["DROP IF EXISTS ", quote_name(name)] + + defp column_options(opts) do + default = Keyword.fetch(opts, :default) + null = Keyword.get(opts, :null) + after_column = Keyword.get(opts, :after) + comment = Keyword.get(opts, :comment) + collation = Keyword.fetch(opts, :collation) + + [ + default_expr(default), + collation_expr(collation), + null_expr(null), + comment_expr(comment), + after_expr(after_column) + ] + end + + defp comment_expr(comment, create_table? \\ false) + + defp comment_expr(comment, true) when is_binary(comment), + do: " COMMENT = '#{escape_string(comment)}'" + + defp comment_expr(comment, false) when is_binary(comment), + do: " COMMENT '#{escape_string(comment)}'" + + defp comment_expr(_, _), do: [] + + defp after_expr(nil), do: [] + defp after_expr(column) when is_atom(column) or is_binary(column), do: " AFTER `#{column}`" + defp after_expr(_), do: [] + + defp null_expr(false), do: " NOT NULL" + defp null_expr(true), do: " NULL" + defp null_expr(_), do: [] + + defp collation_expr({:ok, collation_name}), do: " COLLATE \"#{collation_name}\"" + defp collation_expr(_), do: [] + + defp new_constraint_expr(%Constraint{check: check} = constraint) when is_binary(check) do + [ + "CONSTRAINT ", + quote_name(constraint.name), + " CHECK (", + check, + ")", + validate(constraint.validate) + ] + end + + defp default_expr({:ok, nil}), + do: " DEFAULT NULL" + + defp default_expr({:ok, literal}) when is_binary(literal), + do: [" DEFAULT '", escape_string(literal), ?'] + + defp default_expr({:ok, literal}) when is_number(literal) or is_boolean(literal), + do: [" DEFAULT ", to_string(literal)] + + defp default_expr({:ok, {:fragment, expr}}), + do: [" DEFAULT ", expr] + + defp default_expr({:ok, value}) when is_map(value) do + library = Application.get_env(:myxql, :json_library, Jason) + expr = IO.iodata_to_binary(library.encode_to_iodata!(value)) + [" DEFAULT ", ?(, ?', escape_string(expr), ?', ?)] + end + + defp default_expr(:error), + do: [] + + defp index_expr({dir, literal}) + when is_binary(literal), + do: index_dir(dir, literal) + + defp index_expr({dir, literal}), + do: index_dir(dir, quote_name(literal)) + + defp index_expr(literal) when is_binary(literal), + do: literal + + defp index_expr(literal), do: quote_name(literal) + + defp index_dir(dir, str) + when dir in [ + :asc, + :asc_nulls_first, + :asc_nulls_last, + :desc, + :desc_nulls_first, + :desc_nulls_last + ] do + case dir do + :asc -> [str | " ASC"] + :desc -> [str | " DESC"] + _ -> error!(nil, "#{dir} is not supported in indexes in MySQL") + end + end + + defp engine_expr(storage_engine), + do: [" ENGINE = ", String.upcase(to_string(storage_engine || "INNODB"))] + + defp options_expr(nil), + do: [] + + defp options_expr(keyword) when is_list(keyword), + do: error!(nil, "MySQL adapter does not support keyword lists in :options") + + defp options_expr(options), + do: [?\s, to_string(options)] + + defp column_type(type, opts) when type in ~w(time utc_datetime naive_datetime)a do + generated = Keyword.get(opts, :generated) + [ecto_to_db(type), generated_expr(generated)] + end + + defp column_type(type, opts) + when type in ~w(time_usec utc_datetime_usec naive_datetime_usec)a do + precision = Keyword.get(opts, :precision, 6) + generated = Keyword.get(opts, :generated) + type_name = ecto_to_db(type) + + [type_name, ?(, to_string(precision), ?), generated_expr(generated)] + end + + defp column_type(type, opts) do + size = Keyword.get(opts, :size) + precision = Keyword.get(opts, :precision) + generated = Keyword.get(opts, :generated) + scale = Keyword.get(opts, :scale) + + type = + cond do + size -> [ecto_size_to_db(type), ?(, to_string(size), ?)] + precision -> [ecto_to_db(type), ?(, to_string(precision), ?,, to_string(scale || 0), ?)] + type == :string -> ["varchar(255)"] + true -> ecto_to_db(type) + end + + [type, generated_expr(generated)] + end + + defp generated_expr(nil), do: [] + + defp generated_expr(expr) when is_binary(expr) do + [" AS ", expr] + end + + defp generated_expr(other) do + raise ArgumentError, + "the `:generated` option only accepts strings, received: #{inspect(other)}" + end + + defp reference_expr(type, ref, table, name) do + {current_columns, reference_columns} = Enum.unzip([{name, ref.column} | ref.with]) + + if ref.match do + error!(nil, ":match is not supported in references for tds") + end + + [ + "CONSTRAINT ", + reference_name(ref, table, name), + " ", + type, + " (", + quote_names(current_columns), + ?), + " REFERENCES ", + quote_table(Keyword.get(ref.options, :prefix, table.prefix), ref.table), + ?(, + quote_names(reference_columns), + ?), + reference_on_delete(ref.on_delete), + reference_on_update(ref.on_update) + ] + end + + defp reference_expr(%Reference{} = ref, table, name), + do: [", " | reference_expr("FOREIGN KEY", ref, table, name)] + + defp constraint_expr(%Reference{} = ref, table, name), + do: [", ADD " | reference_expr("FOREIGN KEY", ref, table, name)] + + defp constraint_if_not_exists_expr(%Reference{} = ref, table, name), + do: [", ADD " | reference_expr("FOREIGN KEY IF NOT EXISTS", ref, table, name)] + + defp drop_constraint_expr({%Reference{} = ref, _opts}, table, name), + do: drop_constraint_expr(ref, table, name) + + defp drop_constraint_expr(%Reference{} = ref, table, name), + do: ["DROP FOREIGN KEY ", reference_name(ref, table, name), ", "] + + defp drop_constraint_expr(_, _, _), + do: [] + + defp drop_constraint_if_exists_expr(%Reference{} = ref, table, name), + do: ["DROP FOREIGN KEY IF EXISTS ", reference_name(ref, table, name), ", "] + + defp drop_constraint_if_exists_expr(_, _, _), + do: [] + + defp reference_name(%Reference{name: nil}, table, column), + do: quote_name("#{table.name}_#{column}_fkey") + + defp reference_name(%Reference{name: name}, _table, _column), + do: quote_name(name) + + defp reference_column_type(:serial, _opts), do: "BIGINT UNSIGNED" + defp reference_column_type(:bigserial, _opts), do: "BIGINT UNSIGNED" + defp reference_column_type(type, opts), do: column_type(type, opts) + + defp reference_on_delete(:nilify_all), do: " ON DELETE SET NULL" + + defp reference_on_delete({:nilify, _columns}) do + error!( + nil, + "MySQL adapter does not support the `{:nilify, columns}` action for `:on_delete`" + ) + end + + defp reference_on_delete(:default_all) do + error!( + nil, + "MySQL adapter does not support the `:default_all` action for `:on_delete`" + ) + end + + defp reference_on_delete({:default, _columns}) do + error!( + nil, + "MySQL adapter does not support the `{:default, columns}` action for `:on_delete`" + ) + end + + defp reference_on_delete(:delete_all), do: " ON DELETE CASCADE" + defp reference_on_delete(:restrict), do: " ON DELETE RESTRICT" + defp reference_on_delete(_), do: [] + + defp reference_on_update(:nilify_all), do: " ON UPDATE SET NULL" + defp reference_on_update(:update_all), do: " ON UPDATE CASCADE" + defp reference_on_update(:restrict), do: " ON UPDATE RESTRICT" + defp reference_on_update(_), do: [] + + defp validate(false), do: " NOT ENFORCED" + defp validate(_), do: [] + + ## Helpers + + defp get_source(query, sources, ix, source) do + {expr, name, _schema} = elem(sources, ix) + name = maybe_add_column_names(source, name) + {expr || expr(source, sources, query), name} + end + + defp get_parent_sources_ix(query, as) do + case query.aliases[@parent_as] do + {%{aliases: %{^as => ix}}, sources} -> {ix, sources} + {%{} = parent, _sources} -> get_parent_sources_ix(parent, as) + end + end + + defp maybe_add_column_names({:values, _, [types, _, _]}, name) do + fields = Keyword.keys(types) + [name, ?\s, ?(, quote_names(fields), ?)] + end + + defp maybe_add_column_names(_, name), do: name + + defp quote_name(nil, name), do: quote_name(name) + + defp quote_name(prefix, name), do: [quote_name(prefix), ?., quote_name(name)] + + defp quote_name(name) when is_atom(name) do + quote_name(Atom.to_string(name)) + end + + defp quote_name(name) when is_binary(name) do + if String.contains?(name, "`") do + error!(nil, "bad literal/field/table name #{inspect(name)} (` is not permitted)") + end + + [?`, name, ?`] + end + + defp quote_names(names), do: Enum.map_intersperse(names, ?,, "e_name/1) + + defp quote_table(nil, name), do: quote_table(name) + defp quote_table(prefix, name), do: [quote_table(prefix), ?., quote_table(name)] + + defp quote_table(name) when is_atom(name), + do: quote_table(Atom.to_string(name)) + + defp quote_table(name) do + if String.contains?(name, "`") do + error!(nil, "bad table name #{inspect(name)}") + end + + [?`, name, ?`] + end + + defp format_to_sql(:map), do: "FORMAT=JSON" + defp format_to_sql(:text), do: "FORMAT=TRADITIONAL" + + defp if_do(condition, value) do + if condition, do: value, else: [] + end + + defp escape_string(value) when is_binary(value) do + value + |> :binary.replace("'", "''", [:global]) + |> :binary.replace("\\", "\\\\", [:global]) + end + + defp escape_json_key(value) when is_binary(value) do + value + |> escape_string() + |> :binary.replace("\"", "\\\\\"", [:global]) + end + + defp ecto_cast_to_db(:id, _query), do: "unsigned" + defp ecto_cast_to_db(:integer, _query), do: "signed" + defp ecto_cast_to_db(:string, _query), do: "char" + defp ecto_cast_to_db(:utc_datetime_usec, _query), do: "datetime(6)" + defp ecto_cast_to_db(:naive_datetime_usec, _query), do: "datetime(6)" + defp ecto_cast_to_db(type, query), do: ecto_to_db(type, query) + + defp ecto_size_to_db(:binary), do: "varbinary" + defp ecto_size_to_db(type), do: ecto_to_db(type) + + defp ecto_to_db(type, query \\ nil) + defp ecto_to_db({:array, _}, query), do: error!(query, "Array type is not supported by MySQL") + defp ecto_to_db(:id, _query), do: "integer" + defp ecto_to_db(:serial, _query), do: "bigint unsigned not null auto_increment" + defp ecto_to_db(:bigserial, _query), do: "bigint unsigned not null auto_increment" + defp ecto_to_db(:binary_id, _query), do: "binary(16)" + defp ecto_to_db(:string, _query), do: "varchar" + defp ecto_to_db(:float, _query), do: "double" + defp ecto_to_db(:binary, _query), do: "blob" + # MySQL does not support uuid + defp ecto_to_db(:uuid, _query), do: "binary(16)" + defp ecto_to_db(:map, _query), do: "json" + defp ecto_to_db({:map, _}, _query), do: "json" + defp ecto_to_db(:time_usec, _query), do: "time" + defp ecto_to_db(:utc_datetime, _query), do: "datetime" + defp ecto_to_db(:utc_datetime_usec, _query), do: "datetime" + defp ecto_to_db(:naive_datetime, _query), do: "datetime" + defp ecto_to_db(:naive_datetime_usec, _query), do: "datetime" + defp ecto_to_db(atom, _query) when is_atom(atom), do: Atom.to_string(atom) + + defp ecto_to_db(type, _query) do + raise ArgumentError, + "unsupported type `#{inspect(type)}`. The type can either be an atom, a string " <> + "or a tuple of the form `{:map, t}` where `t` itself follows the same conditions." + end + + defp error!(nil, message) do + raise ArgumentError, message + end + + defp error!(query, message) do + raise Ecto.QueryError, query: query, message: message + end + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/postgres.ex b/deps/ecto_sql/lib/ecto/adapters/postgres.ex new file mode 100644 index 0000000..94420a9 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/postgres.ex @@ -0,0 +1,548 @@ +defmodule Ecto.Adapters.Postgres do + @moduledoc """ + Adapter module for PostgreSQL. + + It uses `Postgrex` for communicating to the database. + + ## Features + + * Full query support (including joins, preloads and associations) + * Support for transactions + * Support for data migrations + * Support for ecto.create and ecto.drop operations + * Support for transactional tests via `Ecto.Adapters.SQL` + + ## Options + + Postgres options split in different categories described + below. All options can be given via the repository + configuration: + + config :your_app, YourApp.Repo, + ... + + The `:prepare` option may be specified per operation: + + YourApp.Repo.all(Queryable, prepare: :unnamed) + + ### Migration options + + * `:migration_lock` - prevent multiple nodes from running migrations at the same + time by obtaining a lock. The value `:table_lock` will lock migrations by wrapping + the entire migration inside a database transaction, including inserting the + migration version into the migration source (by default, "schema_migrations"). + You may alternatively select `:pg_advisory_lock` which has the benefit + of allowing concurrent operations such as creating indexes. (default: `:table_lock`) + + When using the `:pg_advisory_lock` migration lock strategy and Ecto cannot obtain + the lock due to another instance occupying the lock, Ecto will wait for 5 seconds + and then retry infinity times. This is configurable on the repo with keys + `:migration_advisory_lock_retry_interval_ms` and `:migration_advisory_lock_max_tries`. + If the retries are exhausted, the migration will fail. + + Some downsides to using advisory locks is that some Postgres-compatible systems or plugins + may not support session level locks well and therefore result in inconsistent behavior. + For example, PgBouncer when using pool_modes other than session won't work well with + advisory locks. CockroachDB is another system that is designed in a way that advisory + locks don't make sense for their distributed database. + + ### Connection options + + * `:hostname` - Server hostname + * `:socket_dir` - Connect to Postgres via UNIX sockets in the given directory + The socket name is derived based on the port. This is the preferred method + for configuring sockets and it takes precedence over the hostname. If you are + connecting to a socket outside of the Postgres convention, use `:socket` instead; + * `:socket` - Connect to Postgres via UNIX sockets in the given path. + This option takes precedence over the `:hostname` and `:socket_dir` + * `:username` - Username + * `:password` - User password + * `:port` - Server port (default: 5432) + * `:database` - the database to connect to + * `:maintenance_database` - Specifies the name of the database to connect to when + creating or dropping the database. Defaults to `"postgres"` + * `:pool` - The connection pool module, may be set to `Ecto.Adapters.SQL.Sandbox` + * `:ssl` - Accepts a list of options to enable TLS for the client connection, + or `false` to disable it. See the documentation for [Erlang's `ssl` module](`e:ssl:ssl`) + for a list of options (default: false) + * `:parameters` - Keyword list of connection parameters + * `:connect_timeout` - The timeout for establishing new connections (default: 5000) + * `:prepare` - How to prepare queries, either `:named` to use named queries + or `:unnamed` to force unnamed queries (default: `:named`) + * `:socket_options` - Specifies socket configuration + * `:show_sensitive_data_on_connection_error` - show connection data and + configuration whenever there is an error attempting to connect to the + database + + The `:socket_options` are particularly useful when configuring the size + of both send and receive buffers. For example, when Ecto starts with a + pool of 20 connections, the memory usage may quickly grow from 20MB to + 50MB based on the operating system default values for TCP buffers. It is + advised to stick with the operating system defaults but they can be + tweaked if desired: + + socket_options: [recbuf: 8192, sndbuf: 8192] + + We also recommend developers to consult the `Postgrex.start_link/1` + documentation for a complete listing of all supported options. + + ### Storage options + + * `:encoding` - the database encoding (default: "UTF8") + or `:unspecified` to remove encoding parameter (alternative engine compatibility) + * `:template` - the template to create the database from + * `:lc_collate` - the collation order + * `:lc_ctype` - the character classification + * `:dump_path` - where to place dumped structures + * `:dump_prefixes` - list of prefixes that will be included in the structure dump. + When specified, the prefixes will have their definitions dumped along with the + data in their migration table. When it is not specified, the configured + database has the definitions dumped from all of its schemas but only + the data from the migration table from the `public` schema is included. + * `:force_drop` - force the database to be dropped even + if it has connections to it (requires PostgreSQL 13+) + + ### After connect callback + + If you want to execute a callback as soon as connection is established + to the database, you can use the `:after_connect` configuration. For + example, in your repository configuration you can add: + + after_connect: {Postgrex, :query!, ["SET search_path TO global_prefix", []]} + + You can also specify your own module that will receive the Postgrex + connection as argument. + + ## Extensions + + Both PostgreSQL and its adapter for Elixir, Postgrex, support an + extension system. If you want to use custom extensions for Postgrex + alongside Ecto, you must define a type module with your extensions. + Create a new file anywhere in your application with the following: + + Postgrex.Types.define(MyApp.PostgresTypes, [MyExtension.Foo, MyExtensionBar]) + + Once your type module is defined, you can configure the repository to use it: + + config :my_app, MyApp.Repo, types: MyApp.PostgresTypes + + ## Unix socket connection + + You may desire to communicate with Postgres via Unix sockets. + If your PG server is started on the same machine as your code, you could check that: + + ```bash + % sudo grep unix_socket_directories /var/lib/postgres/data/postgresql.conf + unix_socket_directories = '/run/postgresql' + ``` + + ```bash + % ls -lah /run/postgresql + итого 4,0K + drwxr-xr-x 2 postgres postgres 80 июн 4 10:58 . + drwxr-xr-x 35 root root 840 июн 4 21:02 .. + srwxrwxrwx 1 postgres postgres 0 июн 5 07:41 .s.PGSQL.5432 + -rw------- 1 postgres postgres 61 июн 5 07:41 .s.PGSQL.5432.lock + ``` + + So you have postgresql started and listening on the socket. + Then you may use it as follows: + + config :your_app, YourApp.Repo, + socket_dir: "/run/postgresql" + """ + + # Inherit all behaviour from Ecto.Adapters.SQL + use Ecto.Adapters.SQL, driver: :postgrex + + require Logger + + # And provide a custom storage implementation + @behaviour Ecto.Adapter.Storage + @behaviour Ecto.Adapter.Structure + + @default_maintenance_database "postgres" + @default_prepare_opt :named + + @doc """ + All Ecto extensions for Postgrex. + + Currently Ecto does not define any of its own extensions for Postgrex. + If this changes in a future release, you will need to call this function + when defining your own custom extensions: + + Postgrex.Types.define(MyApp.PostgresTypes, + [MyExtension.Foo, MyExtensionBar] ++ Ecto.Adapters.Postgres.extensions()) + """ + def extensions do + [] + end + + # Support arrays in place of IN + @impl true + def dumpers({:map, _}, type), do: [&Ecto.Type.embedded_dump(type, &1, :json)] + def dumpers({:in, sub}, {:in, sub}), do: [{:array, sub}] + def dumpers(:binary_id, type), do: [type, Ecto.UUID] + def dumpers(_, type), do: [type] + + ## Query API + + @impl Ecto.Adapter.Queryable + def execute(adapter_meta, query_meta, query, params, opts) do + prepare = Keyword.get(opts, :prepare, @default_prepare_opt) + + unless valid_prepare?(prepare) do + raise ArgumentError, + "expected option `:prepare` to be either `:named` or `:unnamed`, got: #{inspect(prepare)}" + end + + Ecto.Adapters.SQL.execute(prepare, adapter_meta, query_meta, query, params, opts) + end + + defp valid_prepare?(prepare) when prepare in [:named, :unnamed], do: true + defp valid_prepare?(_), do: false + + ## Storage API + + @impl true + def storage_up(opts) do + database = Keyword.fetch!(opts, :database) + + encoding = if opts[:encoding] == :unspecified, do: nil, else: opts[:encoding] || "UTF8" + maintenance_database = Keyword.get(opts, :maintenance_database, @default_maintenance_database) + opts = Keyword.put(opts, :database, maintenance_database) + + check_existence_command = "SELECT FROM pg_database WHERE datname = '#{database}'" + + case run_query(check_existence_command, opts) do + {:ok, %{num_rows: 1}} -> + {:error, :already_up} + + _ -> + create_command = + ~s(CREATE DATABASE "#{database}") + |> concat_if(encoding, &"ENCODING '#{&1}'") + |> concat_if(opts[:template], &"TEMPLATE=#{&1}") + |> concat_if(opts[:lc_ctype], &"LC_CTYPE='#{&1}'") + |> concat_if(opts[:lc_collate], &"LC_COLLATE='#{&1}'") + + case run_query(create_command, opts) do + {:ok, _} -> + :ok + + {:error, %{postgres: %{code: :duplicate_database}}} -> + {:error, :already_up} + + {:error, error} -> + {:error, Exception.message(error)} + end + end + end + + defp concat_if(content, nil, _), do: content + defp concat_if(content, false, _), do: content + defp concat_if(content, value, fun), do: content <> " " <> fun.(value) + + @impl true + def storage_down(opts) do + database = Keyword.fetch!(opts, :database) + + command = + "DROP DATABASE \"#{database}\"" + |> concat_if(opts[:force_drop], fn _ -> "WITH (FORCE)" end) + + maintenance_database = Keyword.get(opts, :maintenance_database, @default_maintenance_database) + opts = Keyword.put(opts, :database, maintenance_database) + + case run_query(command, opts) do + {:ok, _} -> + :ok + + {:error, %{postgres: %{code: :invalid_catalog_name}}} -> + {:error, :already_down} + + {:error, error} -> + {:error, Exception.message(error)} + end + end + + @impl Ecto.Adapter.Storage + def storage_status(opts) do + database = Keyword.fetch!(opts, :database) + + maintenance_database = Keyword.get(opts, :maintenance_database, @default_maintenance_database) + opts = Keyword.put(opts, :database, maintenance_database) + + check_database_query = + "SELECT datname FROM pg_catalog.pg_database WHERE datname = '#{database}'" + + case run_query(check_database_query, opts) do + {:ok, %{num_rows: 0}} -> :down + {:ok, %{num_rows: _num_rows}} -> :up + other -> {:error, other} + end + end + + @impl true + def supports_ddl_transaction? do + true + end + + @impl true + def lock_for_migrations(meta, opts, fun) do + %{opts: adapter_opts, repo: repo} = meta + + if Keyword.fetch(adapter_opts, :pool_size) == {:ok, 1} do + Ecto.Adapters.SQL.raise_migration_pool_size_error() + end + + opts = Keyword.merge(opts, timeout: :infinity, telemetry_options: [schema_migration: true]) + config = repo.config() + lock_strategy = Keyword.get(config, :migration_lock, :table_lock) + do_lock_for_migrations(lock_strategy, meta, opts, config, fun) + end + + defp do_lock_for_migrations(:pg_advisory_lock, meta, opts, config, fun) do + lock = :erlang.phash2({:ecto, opts[:prefix], meta.repo}) + + retry_state = %{ + retry_interval_ms: config[:migration_advisory_lock_retry_interval_ms] || 5000, + max_tries: config[:migration_advisory_lock_max_tries] || :infinity, + tries: 0 + } + + advisory_lock(meta, opts, lock, retry_state, fun) + end + + defp do_lock_for_migrations(:table_lock, meta, opts, _config, fun) do + {:ok, res} = + transaction(meta, opts, fn -> + # SHARE UPDATE EXCLUSIVE MODE is the first lock that locks + # itself but still allows updates to happen, see + # # https://www.postgresql.org/docs/9.4/explicit-locking.html + source = Keyword.get(opts, :migration_source, "schema_migrations") + table = if prefix = opts[:prefix], do: ~s|"#{prefix}"."#{source}"|, else: ~s|"#{source}"| + lock_statement = "LOCK TABLE #{table} IN SHARE UPDATE EXCLUSIVE MODE" + {:ok, _} = Ecto.Adapters.SQL.query(meta, lock_statement, [], opts) + + fun.() + end) + + res + end + + defp advisory_lock(meta, opts, lock, retry_state, fun) do + result = + checkout(meta, opts, fn -> + case Ecto.Adapters.SQL.query(meta, "SELECT pg_try_advisory_lock(#{lock})", [], opts) do + {:ok, %{rows: [[true]]}} -> + try do + {:ok, fun.()} + after + release_advisory_lock(meta, opts, lock) + end + + _ -> + :no_advisory_lock + end + end) + + case result do + {:ok, fun_result} -> + fun_result + + :no_advisory_lock -> + maybe_retry_advisory_lock(meta, opts, lock, retry_state, fun) + end + end + + defp release_advisory_lock(meta, opts, lock) do + case Ecto.Adapters.SQL.query(meta, "SELECT pg_advisory_unlock(#{lock})", [], opts) do + {:ok, %{rows: [[true]]}} -> + :ok + + _ -> + raise "failed to release advisory lock" + end + end + + defp maybe_retry_advisory_lock(meta, opts, lock, retry_state, fun) do + %{retry_interval_ms: interval, max_tries: max_tries, tries: tries} = retry_state + + if max_tries != :infinity && max_tries <= tries do + raise "failed to obtain advisory lock. Tried #{max_tries} times waiting #{interval}ms between tries" + else + if Keyword.get(opts, :log_migrator_sql, false) do + Logger.info( + "Migration lock occupied for #{inspect(meta.repo)}. Retry #{tries + 1}/#{max_tries} at #{interval}ms intervals." + ) + end + + Process.sleep(interval) + retry_state = %{retry_state | tries: tries + 1} + advisory_lock(meta, opts, lock, retry_state, fun) + end + end + + @impl true + def structure_dump(default, config) do + table = config[:migration_source] || "schema_migrations" + + with {:ok, versions} <- select_versions(table, config), + {:ok, path} <- pg_dump(default, config), + do: append_versions(table, versions, path) + end + + defp select_versions(table, config) do + prefixes = config[:dump_prefixes] || ["public"] + + result = + Enum.reduce_while(prefixes, [], fn prefix, versions -> + case run_query(~s[SELECT version FROM #{prefix}."#{table}" ORDER BY version], config) do + {:ok, %{rows: rows}} -> {:cont, Enum.map(rows, &{prefix, hd(&1)}) ++ versions} + {:error, %{postgres: %{code: :undefined_table}}} -> {:cont, versions} + {:error, _} = error -> {:halt, error} + end + end) + + case result do + {:error, _} = error -> error + versions -> {:ok, versions} + end + end + + defp pg_dump(default, config) do + path = config[:dump_path] || Path.join(default, "structure.sql") + prefixes = config[:dump_prefixes] || [] + non_prefix_args = ["--file", path, "--schema-only", "--no-acl", "--no-owner"] + + args = + Enum.reduce(prefixes, non_prefix_args, fn prefix, acc -> + ["-n", prefix | acc] + end) + + File.mkdir_p!(Path.dirname(path)) + + case run_with_cmd("pg_dump", config, args) do + {_output, 0} -> + {:ok, path} + + {output, _} -> + {:error, output} + end + end + + defp append_versions(_table, [], path) do + {:ok, path} + end + + defp append_versions(table, versions, path) do + sql = + Enum.map_join(versions, fn {prefix, version} -> + ~s[INSERT INTO #{prefix}."#{table}" (version) VALUES (#{version});\n] + end) + + File.open!(path, [:append], fn file -> + IO.write(file, sql) + end) + + {:ok, path} + end + + @impl true + def structure_load(default, config) do + path = config[:dump_path] || Path.join(default, "structure.sql") + args = ["--quiet", "--file", path, "-vON_ERROR_STOP=1", "--single-transaction"] + + case run_with_cmd("psql", config, args) do + {_output, 0} -> {:ok, path} + {output, _} -> {:error, output} + end + end + + @impl true + def dump_cmd(args, opts \\ [], config) when is_list(config) and is_list(args), + do: run_with_cmd("pg_dump", config, args, opts) + + ## Helpers + + defp run_query(sql, opts) do + {:ok, _} = Application.ensure_all_started(:ecto_sql) + {:ok, _} = Application.ensure_all_started(:postgrex) + + opts = + opts + |> Keyword.drop([:name, :log, :pool, :pool_size]) + |> Keyword.put(:backoff_type, :stop) + |> Keyword.put(:max_restarts, 0) + + task = + Task.Supervisor.async_nolink(Ecto.Adapters.SQL.StorageSupervisor, fn -> + {:ok, conn} = Postgrex.start_link(opts) + + value = Postgrex.query(conn, sql, [], opts) + GenServer.stop(conn) + value + end) + + timeout = Keyword.get(opts, :timeout, 15_000) + + case Task.yield(task, timeout) || Task.shutdown(task) do + {:ok, {:ok, result}} -> + {:ok, result} + + {:ok, {:error, error}} -> + {:error, error} + + {:exit, {%{__struct__: struct} = error, _}} + when struct in [Postgrex.Error, DBConnection.Error] -> + {:error, error} + + {:exit, reason} -> + {:error, RuntimeError.exception(Exception.format_exit(reason))} + + nil -> + {:error, RuntimeError.exception("command timed out")} + end + end + + defp run_with_cmd(cmd, opts, opt_args, cmd_opts \\ []) do + unless System.find_executable(cmd) do + raise "could not find executable `#{cmd}` in path, " <> + "please guarantee it is available before running ecto commands" + end + + env = [{"PGCONNECT_TIMEOUT", "10"}] + + env = + if password = opts[:password] do + [{"PGPASSWORD", password} | env] + else + env + end + + args = [] + args = if username = opts[:username], do: ["--username", username | args], else: args + args = if port = opts[:port], do: ["--port", to_string(port) | args], else: args + args = if database = opts[:database], do: ["--dbname", database | args], else: args + + host = opts[:socket_dir] || opts[:hostname] || System.get_env("PGHOST") || "localhost" + + if opts[:socket] do + IO.warn( + ":socket option is ignored when connecting in structure_load/2 and structure_dump/2," <> + " use :socket_dir or :hostname instead" + ) + end + + args = ["--host", host | args] + args = args ++ opt_args + + cmd_opts = + cmd_opts + |> Keyword.put_new(:stderr_to_stdout, true) + |> Keyword.update(:env, env, &Enum.concat(env, &1)) + + System.cmd(cmd, args, cmd_opts) + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/postgres/connection.ex b/deps/ecto_sql/lib/ecto/adapters/postgres/connection.ex new file mode 100644 index 0000000..fcdcf91 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/postgres/connection.ex @@ -0,0 +1,2080 @@ +if Code.ensure_loaded?(Postgrex) do + defmodule Ecto.Adapters.Postgres.Connection do + @moduledoc false + + @default_port 5432 + @behaviour Ecto.Adapters.SQL.Connection + @explain_prepared_statement_name "ecto_explain_statement" + + ## Module and Options + + @impl true + def child_spec(opts) do + opts + |> Keyword.put_new(:port, @default_port) + |> Postgrex.child_spec() + end + + @impl true + def to_constraints( + %Postgrex.Error{postgres: %{code: :unique_violation, constraint: constraint}}, + _opts + ), + do: [unique: constraint] + + def to_constraints( + %Postgrex.Error{postgres: %{code: :foreign_key_violation, constraint: constraint}}, + _opts + ), + do: [foreign_key: constraint] + + def to_constraints( + %Postgrex.Error{postgres: %{code: :restrict_violation, constraint: constraint}}, + _opts + ), + do: [foreign_key: constraint] + + def to_constraints( + %Postgrex.Error{postgres: %{code: :exclusion_violation, constraint: constraint}}, + _opts + ), + do: [exclusion: constraint] + + def to_constraints( + %Postgrex.Error{postgres: %{code: :check_violation, constraint: constraint}}, + _opts + ), + do: [check: constraint] + + # Postgres 9.2 and earlier does not provide the constraint field + @impl true + def to_constraints( + %Postgrex.Error{postgres: %{code: :unique_violation, message: message}}, + _opts + ) do + case :binary.split(message, " unique constraint ") do + [_, quoted] -> [unique: strip_quotes(quoted)] + _ -> [] + end + end + + def to_constraints( + %Postgrex.Error{postgres: %{code: :foreign_key_violation, message: message}}, + _opts + ) do + case :binary.split(message, " foreign key constraint ") do + [_, quoted] -> + [quoted | _] = :binary.split(quoted, " on table ") + [foreign_key: strip_quotes(quoted)] + + _ -> + [] + end + end + + def to_constraints( + %Postgrex.Error{postgres: %{code: :exclusion_violation, message: message}}, + _opts + ) do + case :binary.split(message, " exclusion constraint ") do + [_, quoted] -> [exclusion: strip_quotes(quoted)] + _ -> [] + end + end + + def to_constraints( + %Postgrex.Error{postgres: %{code: :check_violation, message: message}}, + _opts + ) do + case :binary.split(message, " check constraint ") do + [_, quoted] -> [check: strip_quotes(quoted)] + _ -> [] + end + end + + def to_constraints(_, _opts), + do: [] + + defp strip_quotes(quoted) do + binary_part(quoted, 1, byte_size(quoted) - 2) + end + + ## Query + + @impl true + def prepare_execute(conn, name, sql, params, opts) do + ensure_list_params!(params) + + case Postgrex.prepare_execute(conn, name, sql, params, opts) do + {:error, %Postgrex.Error{postgres: %{pg_code: "22P02", message: message}} = error} -> + context = """ + . If you are trying to query a JSON field, the parameter may need to be interpolated. \ + Instead of + + p.json["field"] != "value" + + do + + p.json["field"] != ^"value" + """ + + {:error, put_in(error.postgres.message, message <> context)} + + other -> + other + end + end + + @impl true + def query(conn, sql, params, opts) do + ensure_list_params!(params) + Postgrex.query(conn, sql, params, opts) + end + + @impl true + def query_many(_conn, _sql, _params, _opts) do + raise RuntimeError, "query_many is not supported in the PostgreSQL adapter" + end + + @impl true + def execute(conn, %{ref: ref} = query, params, opts) do + ensure_list_params!(params) + + case Postgrex.execute(conn, query, params, opts) do + {:ok, %{ref: ^ref}, result} -> + {:ok, result} + + {:ok, _, _} = ok -> + ok + + {:error, %Postgrex.QueryError{} = err} -> + {:reset, err} + + {:error, %Postgrex.Error{postgres: %{code: :feature_not_supported}} = err} -> + {:reset, err} + + {:error, _} = error -> + error + end + end + + @impl true + def stream(conn, sql, params, opts) do + ensure_list_params!(params) + Postgrex.stream(conn, sql, params, opts) + end + + defp ensure_list_params!(params) do + unless is_list(params) do + raise ArgumentError, "expected params to be a list, got: #{inspect(params)}" + end + end + + @parent_as __MODULE__ + alias Ecto.Query.{BooleanExpr, ByExpr, JoinExpr, QueryExpr, WithExpr} + + @impl true + def all(query, as_prefix \\ []) do + sources = create_names(query, as_prefix) + {select_distinct, order_by_distinct} = distinct(query.distinct, sources, query) + + cte = cte(query, sources) + from = from(query, sources) + select = select(query, select_distinct, sources) + join = join(query, sources) + where = where(query, sources) + group_by = group_by(query, sources) + having = having(query, sources) + window = window(query, sources) + combinations = combinations(query, as_prefix) + order_by = order_by(query, order_by_distinct, sources) + limit = limit(query, sources) + offset = offset(query, sources) + lock = lock(query, sources) + + [ + cte, + select, + from, + join, + where, + group_by, + having, + window, + combinations, + order_by, + limit, + offset | lock + ] + end + + @impl true + def update_all(%{from: %{source: source}} = query, prefix \\ nil) do + sources = create_names(query, []) + cte = cte(query, sources) + {from, name} = get_source(query, sources, 0, source) + + prefix = prefix || ["UPDATE ", from, " AS ", name | " SET "] + fields = update_fields(query, sources) + {join, wheres} = using_join(query, :update_all, "FROM", sources) + where = where(%{query | wheres: wheres ++ query.wheres}, sources) + + [cte, prefix, fields, join, where | returning(query, sources)] + end + + @impl true + def delete_all(%{from: from} = query) do + sources = create_names(query, []) + cte = cte(query, sources) + {from, name} = get_source(query, sources, 0, from) + + {join, wheres} = using_join(query, :delete_all, "USING", sources) + where = where(%{query | wheres: wheres ++ query.wheres}, sources) + + [cte, "DELETE FROM ", from, " AS ", name, join, where | returning(query, sources)] + end + + @impl true + def insert(prefix, table, header, rows, on_conflict, returning, placeholders) do + counter_offset = length(placeholders) + 1 + + values = + if header == [] do + [" VALUES " | Enum.map_intersperse(rows, ?,, fn _ -> "(DEFAULT)" end)] + else + [" (", quote_names(header), ") " | insert_all(rows, counter_offset)] + end + + [ + "INSERT INTO ", + quote_name(prefix, table), + insert_as(on_conflict), + values, + on_conflict(on_conflict) | returning(returning) + ] + end + + defp insert_as({%{sources: sources}, _, _}) do + {_expr, name, _schema} = create_name(sources, 0, []) + [" AS " | name] + end + + defp insert_as({_, _, _}) do + [] + end + + defp on_conflict({:raise, _, []}), + do: [] + + defp on_conflict({:nothing, _, targets}), + do: [" ON CONFLICT ", conflict_target(targets) | "DO NOTHING"] + + defp on_conflict({fields, _, targets}) when is_list(fields), + do: [" ON CONFLICT ", conflict_target!(targets), "DO " | replace(fields)] + + defp on_conflict({query, _, targets}), + do: [" ON CONFLICT ", conflict_target!(targets), "DO " | update_all(query, "UPDATE SET ")] + + defp conflict_target!([]), + do: error!(nil, "the :conflict_target option is required on upserts by PostgreSQL") + + defp conflict_target!(target), + do: conflict_target(target) + + defp conflict_target({:unsafe_fragment, fragment}), + do: [fragment, ?\s] + + defp conflict_target([]), + do: [] + + defp conflict_target(targets), + do: [?(, quote_names(targets), ?), ?\s] + + defp replace(fields) do + [ + "UPDATE SET " + | Enum.map_intersperse(fields, ?,, fn field -> + quoted = quote_name(field) + [quoted, " = ", "EXCLUDED." | quoted] + end) + ] + end + + defp insert_all(query = %Ecto.Query{}, _counter) do + [?(, all(query), ?)] + end + + defp insert_all(rows, counter) do + [ + "VALUES ", + intersperse_reduce(rows, ?,, counter, fn row, counter -> + {row, counter} = insert_each(row, counter) + {[?(, row, ?)], counter} + end) + |> elem(0) + ] + end + + defp insert_each(values, counter) do + intersperse_reduce(values, ?,, counter, fn + nil, counter -> + {"DEFAULT", counter} + + {%Ecto.Query{} = query, params_counter}, counter -> + {[?(, all(query), ?)], counter + params_counter} + + {:placeholder, placeholder_index}, counter -> + {[?$ | placeholder_index], counter} + + _, counter -> + {[?$ | Integer.to_string(counter)], counter + 1} + end) + end + + @impl true + def update(prefix, table, fields, filters, returning) do + {fields, count} = + intersperse_reduce(fields, ", ", 1, fn field, acc -> + {[quote_name(field), " = $" | Integer.to_string(acc)], acc + 1} + end) + + {filters, _count} = + intersperse_reduce(filters, " AND ", count, fn + {field, nil}, acc -> + {[quote_name(field), " IS NULL"], acc} + + {field, _value}, acc -> + {[quote_name(field), " = $" | Integer.to_string(acc)], acc + 1} + end) + + [ + "UPDATE ", + quote_name(prefix, table), + " SET ", + fields, + " WHERE ", + filters | returning(returning) + ] + end + + @impl true + def delete(prefix, table, filters, returning) do + {filters, _} = + intersperse_reduce(filters, " AND ", 1, fn + {field, nil}, acc -> + {[quote_name(field), " IS NULL"], acc} + + {field, _value}, acc -> + {[quote_name(field), " = $" | Integer.to_string(acc)], acc + 1} + end) + + ["DELETE FROM ", quote_name(prefix, table), " WHERE ", filters | returning(returning)] + end + + @impl true + def explain_query(conn, query, params, opts) do + {explain_opts, opts} = + Keyword.split( + opts, + ~w[analyze verbose costs settings buffers timing summary format plan]a + ) + + {plan_type, explain_opts} = Keyword.pop(explain_opts, :plan) + fallback_generic? = plan_type == :fallback_generic + + result = + cond do + fallback_generic? and explain_opts[:analyze] -> + raise ArgumentError, + "analyze cannot be used with a `:fallback_generic` explain plan " <> + "as the actual parameter values are ignored under this plan type." <> + "You may either change the plan type to `:custom` or remove the `:analyze` option." + + fallback_generic? -> + explain_queries = build_fallback_generic_queries(query, length(params), explain_opts) + fallback_generic_query(conn, explain_queries, opts) + + true -> + query(conn, build_explain_query(query, explain_opts), params, opts) + end + + map_format? = explain_opts[:format] == :map + + case result do + {:ok, %Postgrex.Result{rows: rows}} when map_format? -> + {:ok, List.flatten(rows)} + + {:ok, %Postgrex.Result{rows: rows}} -> + {:ok, Enum.map_join(rows, "\n", & &1)} + + error -> + error + end + end + + def build_fallback_generic_queries(query, num_params, opts) do + prepare_args = + if num_params > 0, + do: ["( ", Enum.map_intersperse(1..num_params, ", ", fn _ -> "unknown" end), " )"], + else: [] + + prepare = + [ + "PREPARE ", + @explain_prepared_statement_name, + prepare_args, + " AS ", + query + ] + |> IO.iodata_to_binary() + + set = "SET LOCAL plan_cache_mode = force_generic_plan" + + execute_args = + if num_params > 0, + do: ["( ", Enum.map_intersperse(1..num_params, ", ", fn _ -> "NULL" end), " )"], + else: [] + + execute = + [ + "EXPLAIN ", + build_explain_opts(opts), + "EXECUTE ", + @explain_prepared_statement_name, + execute_args + ] + |> IO.iodata_to_binary() + + deallocate = "DEALLOCATE #{@explain_prepared_statement_name}" + + {prepare, set, execute, deallocate} + end + + def build_explain_query(query, opts) do + ["EXPLAIN ", build_explain_opts(opts), query] + |> IO.iodata_to_binary() + end + + defp build_explain_opts([]), do: [] + + defp build_explain_opts(opts) do + {analyze, opts} = Keyword.pop(opts, :analyze) + {verbose, opts} = Keyword.pop(opts, :verbose) + + # Given only ANALYZE or VERBOSE opts we assume the legacy format + # to support all Postgres versions, otherwise assume the new + # syntax supported since v9.0 + case opts do + [] -> + [ + if_do(quote_boolean(analyze) == "TRUE", "ANALYZE "), + if_do(quote_boolean(verbose) == "TRUE", "VERBOSE ") + ] + + opts -> + opts = + ([analyze: analyze, verbose: verbose] ++ opts) + |> Enum.reduce([], fn + {_, nil}, acc -> + acc + + {:format, value}, acc -> + [String.upcase("#{format_to_sql(value)}") | acc] + + {opt, value}, acc -> + [String.upcase("#{opt} #{quote_boolean(value)}") | acc] + end) + |> Enum.reverse() + |> Enum.join(", ") + + ["( ", opts, " ) "] + end + end + + defp fallback_generic_query(conn, queries, opts) do + {prepare, set, execute, deallocate} = queries + + with {:ok, _} <- query(conn, prepare, [], opts), + {:ok, _} <- query(conn, set, [], opts), + {:ok, result} <- query(conn, execute, [], opts), + {:ok, _} <- query(conn, deallocate, [], opts) do + {:ok, result} + end + end + + ## Query generation + + binary_ops = [ + ==: " = ", + !=: " != ", + <=: " <= ", + >=: " >= ", + <: " < ", + >: " > ", + +: " + ", + -: " - ", + *: " * ", + /: " / ", + and: " AND ", + or: " OR ", + ilike: " ILIKE ", + like: " LIKE " + ] + + @binary_ops Keyword.keys(binary_ops) + + Enum.map(binary_ops, fn {op, str} -> + defp handle_call(unquote(op), 2), do: {:binary_op, unquote(str)} + end) + + defp handle_call(fun, _arity), do: {:fun, Atom.to_string(fun)} + + defp select(%{select: %{fields: fields}} = query, select_distinct, sources) do + ["SELECT", select_distinct, ?\s | select_fields(fields, sources, query)] + end + + defp select_fields([], _sources, _query), + do: "TRUE" + + defp select_fields(fields, sources, query) do + Enum.map_intersperse(fields, ", ", fn + {:&, _, [idx]} -> + case elem(sources, idx) do + {nil, source, nil} -> + error!( + query, + "PostgreSQL adapter does not support selecting all fields from fragment #{source}. " <> + "Please specify exactly which fields you want to select" + ) + + {source, _, nil} -> + error!( + query, + "PostgreSQL adapter does not support selecting all fields from #{source} without a schema. " <> + "Please specify a schema or specify exactly which fields you want to select" + ) + + {_, source, _} -> + source + end + + {key, value} -> + [expr(value, sources, query), " AS " | quote_name(key)] + + value -> + expr(value, sources, query) + end) + end + + defp distinct(nil, _, _), do: {[], []} + defp distinct(%ByExpr{expr: []}, _, _), do: {[], []} + defp distinct(%ByExpr{expr: true}, _, _), do: {" DISTINCT", []} + defp distinct(%ByExpr{expr: false}, _, _), do: {[], []} + + defp distinct(%ByExpr{expr: exprs}, sources, query) do + {[ + " DISTINCT ON (", + Enum.map_intersperse(exprs, ", ", fn {_, expr} -> expr(expr, sources, query) end), + ?) + ], exprs} + end + + defp from(%{from: %{source: source, hints: hints}} = query, sources) do + {from, name} = get_source(query, sources, 0, source) + [" FROM ", from, " AS ", name | Enum.map(hints, &[?\s | &1])] + end + + defp cte(%{with_ctes: %WithExpr{queries: [_ | _]}} = query, sources) do + %{with_ctes: with} = query + recursive_opt = if with.recursive, do: "RECURSIVE ", else: "" + ctes = Enum.map_intersperse(with.queries, ", ", &cte_expr(&1, sources, query)) + ["WITH ", recursive_opt, ctes, " "] + end + + defp cte(%{with_ctes: _}, _), do: [] + + defp cte_expr({name, opts, cte}, sources, query) do + materialized_opt = + case opts[:materialized] do + nil -> "" + true -> "MATERIALIZED" + false -> "NOT MATERIALIZED" + end + + operation_opt = Map.get(opts, :operation) + + [quote_name(name), " AS ", materialized_opt, cte_query(cte, sources, query, operation_opt)] + end + + defp cte_query(query, sources, parent_query, nil) do + cte_query(query, sources, parent_query, :all) + end + + defp cte_query(%Ecto.Query{} = query, sources, parent_query, :update_all) do + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + ["(", update_all(query), ")"] + end + + defp cte_query(%Ecto.Query{} = query, sources, parent_query, :delete_all) do + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + ["(", delete_all(query), ")"] + end + + defp cte_query(%Ecto.Query{} = query, _sources, _parent_query, :insert_all) do + error!(query, "Postgres adapter does not support CTE operation :insert_all") + end + + defp cte_query(%Ecto.Query{} = query, sources, parent_query, :all) do + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + ["(", all(query, subquery_as_prefix(sources)), ")"] + end + + defp cte_query(%QueryExpr{expr: expr}, sources, query, _operation) do + expr(expr, sources, query) + end + + defp update_fields(%{updates: updates} = query, sources) do + for( + %{expr: expr} <- updates, + {op, kw} <- expr, + {key, value} <- kw, + do: update_op(op, key, value, sources, query) + ) + |> Enum.intersperse(", ") + end + + defp update_op(:set, key, value, sources, query) do + [quote_name(key), " = " | expr(value, sources, query)] + end + + defp update_op(:inc, key, value, sources, query) do + [ + quote_name(key), + " = ", + quote_qualified_name(key, sources, 0), + " + " + | expr(value, sources, query) + ] + end + + defp update_op(:push, key, value, sources, query) do + [ + quote_name(key), + " = array_append(", + quote_qualified_name(key, sources, 0), + ", ", + expr(value, sources, query), + ?) + ] + end + + defp update_op(:pull, key, value, sources, query) do + [ + quote_name(key), + " = array_remove(", + quote_qualified_name(key, sources, 0), + ", ", + expr(value, sources, query), + ?) + ] + end + + defp update_op(command, _key, _value, _sources, query) do + error!(query, "unknown update operation #{inspect(command)} for PostgreSQL") + end + + defp using_join(%{joins: []}, _kind, _prefix, _sources), do: {[], []} + + defp using_join(%{joins: joins} = query, :update_all, prefix, sources) do + {inner_joins, other_joins} = Enum.split_while(joins, &(&1.qual == :inner)) + + if inner_joins == [] and other_joins != [] do + error!( + query, + "Need at least one inner join at the beginning to use other joins with update_all" + ) + end + + froms = + Enum.map_intersperse(inner_joins, ", ", fn + %JoinExpr{qual: :inner, ix: ix, source: source} -> + {join, name} = get_source(query, sources, ix, source) + [join, " AS " | [name]] + end) + + join_clauses = join(%{query | joins: other_joins}, sources) + + wheres = + for %JoinExpr{on: %QueryExpr{expr: value} = expr} <- inner_joins, + value != true, + do: expr |> Map.put(:__struct__, BooleanExpr) |> Map.put(:op, :and) + + {[?\s, prefix, ?\s, froms | join_clauses], wheres} + end + + defp using_join(%{joins: joins} = query, kind, prefix, sources) do + froms = + Enum.map_intersperse(joins, ", ", fn + %JoinExpr{qual: :inner, ix: ix, source: source} -> + {join, name} = get_source(query, sources, ix, source) + [join, " AS " | name] + + %JoinExpr{qual: qual} -> + error!(query, "PostgreSQL supports only inner joins on #{kind}, got: `#{qual}`") + end) + + wheres = + for %JoinExpr{on: %QueryExpr{expr: value} = expr} <- joins, + value != true, + do: expr |> Map.put(:__struct__, BooleanExpr) |> Map.put(:op, :and) + + {[?\s, prefix, ?\s | froms], wheres} + end + + defp join(%{joins: []}, _sources), do: [] + + defp join(%{joins: joins} = query, sources) do + [ + ?\s + | Enum.map_intersperse(joins, ?\s, fn + %JoinExpr{ + on: %QueryExpr{expr: expr}, + qual: qual, + ix: ix, + source: source, + hints: hints + } -> + if hints != [] do + error!(query, "table hints are not supported by PostgreSQL") + end + + {join, name} = get_source(query, sources, ix, source) + [join_qual(qual, query), join, " AS ", name | join_on(qual, expr, sources, query)] + end) + ] + end + + defp join_on(:cross, true, _sources, _query), do: [] + defp join_on(:cross_lateral, true, _sources, _query), do: [] + defp join_on(_qual, expr, sources, query), do: [" ON " | expr(expr, sources, query)] + + defp join_qual(:inner, _), do: "INNER JOIN " + defp join_qual(:inner_lateral, _), do: "INNER JOIN LATERAL " + defp join_qual(:left, _), do: "LEFT OUTER JOIN " + defp join_qual(:left_lateral, _), do: "LEFT OUTER JOIN LATERAL " + defp join_qual(:right, _), do: "RIGHT OUTER JOIN " + defp join_qual(:full, _), do: "FULL OUTER JOIN " + defp join_qual(:cross, _), do: "CROSS JOIN " + defp join_qual(:cross_lateral, _), do: "CROSS JOIN LATERAL " + + defp join_qual(qual, query), + do: + error!( + query, + "join qualifier #{inspect(qual)} is not supported in the PostgreSQL adapter" + ) + + defp where(%{wheres: wheres} = query, sources) do + boolean(" WHERE ", wheres, sources, query) + end + + defp having(%{havings: havings} = query, sources) do + boolean(" HAVING ", havings, sources, query) + end + + defp group_by(%{group_bys: []}, _sources), do: [] + + defp group_by(%{group_bys: group_bys} = query, sources) do + [ + " GROUP BY " + | Enum.map_intersperse(group_bys, ", ", fn + %ByExpr{expr: expr} -> + Enum.map_intersperse(expr, ", ", &expr(&1, sources, query)) + end) + ] + end + + defp window(%{windows: []}, _sources), do: [] + + defp window(%{windows: windows} = query, sources) do + [ + " WINDOW " + | Enum.map_intersperse(windows, ", ", fn {name, %{expr: kw}} -> + [quote_name(name), " AS " | window_exprs(kw, sources, query)] + end) + ] + end + + defp window_exprs(kw, sources, query) do + [?(, Enum.map_intersperse(kw, ?\s, &window_expr(&1, sources, query)), ?)] + end + + defp window_expr({:partition_by, fields}, sources, query) do + ["PARTITION BY " | Enum.map_intersperse(fields, ", ", &expr(&1, sources, query))] + end + + defp window_expr({:order_by, fields}, sources, query) do + ["ORDER BY " | Enum.map_intersperse(fields, ", ", &order_by_expr(&1, sources, query))] + end + + defp window_expr({:frame, {:fragment, _, _} = fragment}, sources, query) do + expr(fragment, sources, query) + end + + defp order_by(%{order_bys: []}, _distinct, _sources), do: [] + + defp order_by(%{order_bys: order_bys} = query, distinct, sources) do + order_bys = Enum.flat_map(order_bys, & &1.expr) + order_bys = order_by_concat(distinct, order_bys) + [" ORDER BY " | Enum.map_intersperse(order_bys, ", ", &order_by_expr(&1, sources, query))] + end + + defp order_by_concat([head | left], [head | right]), do: [head | order_by_concat(left, right)] + defp order_by_concat(left, right), do: left ++ right + + defp order_by_expr({dir, expr}, sources, query) do + str = expr(expr, sources, query) + + case dir do + :asc -> str + :asc_nulls_last -> [str | " ASC NULLS LAST"] + :asc_nulls_first -> [str | " ASC NULLS FIRST"] + :desc -> [str | " DESC"] + :desc_nulls_last -> [str | " DESC NULLS LAST"] + :desc_nulls_first -> [str | " DESC NULLS FIRST"] + end + end + + defp limit(%{limit: nil}, _sources), do: [] + + defp limit(%{limit: %{with_ties: true}, order_bys: []} = query, _sources) do + error!( + query, + "PostgreSQL adapter requires an `order_by` clause if the " <> + "`:with_ties` limit option is `true`" + ) + end + + defp limit(%{limit: %{expr: expr, with_ties: true}} = query, sources) do + [" FETCH FIRST ", expr(expr, sources, query) | " ROWS WITH TIES"] + end + + defp limit(%{limit: %{expr: expr}} = query, sources) do + [" LIMIT " | expr(expr, sources, query)] + end + + defp offset(%{offset: nil}, _sources), do: [] + + defp offset(%{offset: %QueryExpr{expr: expr}} = query, sources) do + [" OFFSET " | expr(expr, sources, query)] + end + + defp combinations(%{combinations: combinations}, as_prefix) do + Enum.map(combinations, fn + {:union, query} -> [" UNION (", all(query, as_prefix), ")"] + {:union_all, query} -> [" UNION ALL (", all(query, as_prefix), ")"] + {:except, query} -> [" EXCEPT (", all(query, as_prefix), ")"] + {:except_all, query} -> [" EXCEPT ALL (", all(query, as_prefix), ")"] + {:intersect, query} -> [" INTERSECT (", all(query, as_prefix), ")"] + {:intersect_all, query} -> [" INTERSECT ALL (", all(query, as_prefix), ")"] + end) + end + + defp lock(%{lock: nil}, _sources), do: [] + defp lock(%{lock: binary}, _sources) when is_binary(binary), do: [?\s | binary] + defp lock(%{lock: expr} = query, sources), do: [?\s | expr(expr, sources, query)] + + defp boolean(_name, [], _sources, _query), do: [] + + defp boolean(name, [%{expr: expr, op: op} | query_exprs], sources, query) do + [ + name + | Enum.reduce(query_exprs, {op, paren_expr(expr, sources, query)}, fn + %BooleanExpr{expr: expr, op: op}, {op, acc} -> + {op, [acc, operator_to_boolean(op), paren_expr(expr, sources, query)]} + + %BooleanExpr{expr: expr, op: op}, {_, acc} -> + {op, [?(, acc, ?), operator_to_boolean(op), paren_expr(expr, sources, query)]} + end) + |> elem(1) + ] + end + + defp operator_to_boolean(:and), do: " AND " + defp operator_to_boolean(:or), do: " OR " + + defp parens_for_select([first_expr | _] = expr) do + if is_binary(first_expr) and String.match?(first_expr, ~r/^\s*select\s/i) do + [?(, expr, ?)] + else + expr + end + end + + defp paren_expr(expr, sources, query) do + [?(, expr(expr, sources, query), ?)] + end + + defp expr({:^, [], [ix]}, _sources, _query) do + [?$ | Integer.to_string(ix + 1)] + end + + defp expr({{:., _, [{:parent_as, _, [as]}, field]}, _, []}, _sources, query) + when is_atom(field) or is_binary(field) do + {ix, sources} = get_parent_sources_ix(query, as) + quote_qualified_name(field, sources, ix) + end + + defp expr({{:., _, [{:&, _, [idx]}, field]}, _, []}, sources, _query) + when is_atom(field) or is_binary(field) do + quote_qualified_name(field, sources, idx) + end + + defp expr({:&, _, [idx]}, sources, _query) do + {_, source, _} = elem(sources, idx) + source + end + + defp expr({:in, _, [_left, []]}, _sources, _query) do + "false" + end + + defp expr({:in, _, [left, right]}, sources, query) when is_list(right) do + args = Enum.map_intersperse(right, ?,, &expr(&1, sources, query)) + [expr(left, sources, query), " IN (", args, ?)] + end + + defp expr({:in, _, [left, {:^, _, [ix, _]}]}, sources, query) do + [expr(left, sources, query), " = ANY($", Integer.to_string(ix + 1), ?)] + end + + defp expr({:in, _, [left, %Ecto.SubQuery{} = subquery]}, sources, query) do + [expr(left, sources, query), " IN ", expr(subquery, sources, query)] + end + + defp expr({:in, _, [left, right]}, sources, query) do + [expr(left, sources, query), " = ANY(", expr(right, sources, query), ?)] + end + + defp expr({:is_nil, _, [arg]}, sources, query) do + [expr(arg, sources, query) | " IS NULL"] + end + + defp expr({:not, _, [expr]}, sources, query) do + ["NOT (", expr(expr, sources, query), ?)] + end + + defp expr(%Ecto.SubQuery{query: query}, sources, parent_query) do + combinations = + Enum.map(query.combinations, fn {type, combination_query} -> + {type, put_in(combination_query.aliases[@parent_as], {parent_query, sources})} + end) + + query = put_in(query.combinations, combinations) + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + [?(, all(query, subquery_as_prefix(sources)), ?)] + end + + defp expr({:fragment, _, [kw]}, _sources, query) when is_list(kw) or tuple_size(kw) == 3 do + error!(query, "PostgreSQL adapter does not support keyword or interpolated fragments") + end + + defp expr({:fragment, _, parts}, sources, query) do + Enum.map(parts, fn + {:raw, part} -> part + {:expr, expr} -> expr(expr, sources, query) + end) + |> parens_for_select + end + + defp expr({:values, _, [types, idx, num_rows]}, _, _query) do + [?(, values_list(types, idx + 1, num_rows), ?)] + end + + defp expr({:identifier, _, [literal]}, _sources, _query) do + quote_name(literal) + end + + defp expr({:constant, _, [literal]}, _sources, _query) when is_binary(literal) do + [?', escape_string(literal), ?'] + end + + defp expr({:constant, _, [literal]}, _sources, _query) when is_number(literal) do + [to_string(literal)] + end + + defp expr({:splice, _, [{:^, _, [idx, length]}]}, _sources, _query) do + Enum.map_join(1..length, ",", &"$#{idx + &1}") + end + + defp expr({:selected_as, _, [name]}, _sources, _query) do + [quote_name(name)] + end + + defp expr({:datetime_add, _, [datetime, count, interval]}, sources, query) do + [ + expr(datetime, sources, query), + type_unless_typed(datetime, "timestamp"), + " + ", + interval(count, interval, sources, query) + ] + end + + defp expr({:date_add, _, [date, count, interval]}, sources, query) do + [ + ?(, + expr(date, sources, query), + type_unless_typed(date, "date"), + " + ", + interval(count, interval, sources, query) | ")::date" + ] + end + + defp expr({:json_extract_path, _, [expr, path]}, sources, query) do + json_extract_path(expr, path, sources, query) + end + + defp expr({:filter, _, [agg, filter]}, sources, query) do + aggregate = expr(agg, sources, query) + [aggregate, " FILTER (WHERE ", expr(filter, sources, query), ?)] + end + + defp expr({:over, _, [agg, name]}, sources, query) when is_atom(name) do + aggregate = expr(agg, sources, query) + [aggregate, " OVER " | quote_name(name)] + end + + defp expr({:over, _, [agg, kw]}, sources, query) do + aggregate = expr(agg, sources, query) + [aggregate, " OVER ", window_exprs(kw, sources, query)] + end + + defp expr({:{}, _, elems}, sources, query) do + [?(, Enum.map_intersperse(elems, ?,, &expr(&1, sources, query)), ?)] + end + + defp expr({:count, _, []}, _sources, _query), do: "count(*)" + + defp expr({:==, _, [{:json_extract_path, _, [expr, path]} = left, right]}, sources, query) + when is_binary(right) or is_integer(right) or is_boolean(right) do + case Enum.split(path, -1) do + {path, [last]} when is_binary(last) -> + extracted = json_extract_path(expr, path, sources, query) + [?(, extracted, "@>'{", escape_json(last), ": ", escape_json(right) | "}')"] + + _ -> + [maybe_paren(left, sources, query), " = " | maybe_paren(right, sources, query)] + end + end + + defp expr({fun, _, args}, sources, query) when is_atom(fun) and is_list(args) do + {modifier, args} = + case args do + [rest, :distinct] -> {"DISTINCT ", [rest]} + _ -> {[], args} + end + + case handle_call(fun, length(args)) do + {:binary_op, op} -> + [left, right] = args + [maybe_paren(left, sources, query), op | maybe_paren(right, sources, query)] + + {:fun, fun} -> + [fun, ?(, modifier, Enum.map_intersperse(args, ", ", &expr(&1, sources, query)), ?)] + end + end + + defp expr([], _sources, _query) do + # We cannot compare in postgres with the empty array + # i. e. `where array_column = ARRAY[];` + # as that will result in an error: + # ERROR: cannot determine type of empty array + # HINT: Explicitly cast to the desired type, for example ARRAY[]::integer[]. + # + # On the other side comparing with '{}' works + # because '{}' represents the pseudo-type "unknown" + # and thus the type gets inferred based on the column + # it is being compared to so `where array_column = '{}';` works. + "'{}'" + end + + defp expr(list, sources, query) when is_list(list) do + ["ARRAY[", Enum.map_intersperse(list, ?,, &expr(&1, sources, query)), ?]] + end + + defp expr(%Decimal{} = decimal, _sources, _query) do + Decimal.to_string(decimal, :normal) + end + + defp expr(%Ecto.Query.Tagged{value: binary, type: :binary}, _sources, _query) + when is_binary(binary) do + ["'\\x", Base.encode16(binary, case: :lower) | "'::bytea"] + end + + defp expr(%Ecto.Query.Tagged{value: bitstring, type: :bitstring}, _sources, _query) + when is_bitstring(bitstring) do + bitstring_literal(bitstring) + end + + defp expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) do + [maybe_paren(other, sources, query), ?:, ?: | tagged_to_db(type)] + end + + defp expr(nil, _sources, _query), do: "NULL" + defp expr(true, _sources, _query), do: "TRUE" + defp expr(false, _sources, _query), do: "FALSE" + + defp expr(literal, _sources, _query) when is_binary(literal) do + [?\', escape_string(literal), ?\'] + end + + defp expr(literal, _sources, _query) when is_integer(literal) do + Integer.to_string(literal) + end + + defp expr(literal, _sources, _query) when is_float(literal) do + [Float.to_string(literal) | "::float"] + end + + defp expr(expr, _sources, query) do + error!(query, "unsupported expression: #{inspect(expr)}") + end + + defp json_extract_path(expr, [], sources, query) do + expr(expr, sources, query) + end + + defp json_extract_path(expr, path, sources, query) do + path = Enum.map_intersperse(path, ?,, &escape_json(&1, sources, query)) + [?(, expr(expr, sources, query), "#>array[", path, "]::text[])"] + end + + defp values_list(types, idx, num_rows) do + rows = :lists.seq(1, num_rows, 1) + + [ + "VALUES ", + intersperse_reduce(rows, ?,, idx, fn _, idx -> + {value, idx} = values_expr(types, idx) + {[?(, value, ?)], idx} + end) + |> elem(0) + ] + end + + defp values_expr(types, idx) do + intersperse_reduce(types, ?,, idx, fn {_field, type}, idx -> + {[?$, Integer.to_string(idx), ?:, ?: | tagged_to_db(type)], idx + 1} + end) + end + + defp type_unless_typed(%Ecto.Query.Tagged{}, _type), do: [] + defp type_unless_typed(_, type), do: [?:, ?: | type] + + # Always use the largest possible type for integers + defp tagged_to_db(:id), do: "bigint" + defp tagged_to_db(:integer), do: "bigint" + defp tagged_to_db({:array, type}), do: [tagged_to_db(type), ?[, ?]] + defp tagged_to_db(type), do: ecto_to_db(type) + + defp interval(count, interval, _sources, _query) when is_integer(count) do + ["interval '", String.Chars.Integer.to_string(count), ?\s, interval, ?\'] + end + + defp interval(count, interval, _sources, _query) when is_float(count) do + count = :erlang.float_to_binary(count, [:compact, decimals: 16]) + ["interval '", count, ?\s, interval, ?\'] + end + + defp interval(count, interval, sources, query) do + [?(, expr(count, sources, query), "::numeric * ", interval(1, interval, sources, query), ?)] + end + + defp maybe_paren({op, _, [_, _]} = expr, sources, query) when op in @binary_ops, + do: paren_expr(expr, sources, query) + + defp maybe_paren({:is_nil, _, [_]} = expr, sources, query), + do: paren_expr(expr, sources, query) + + defp maybe_paren(expr, sources, query), + do: expr(expr, sources, query) + + defp returning(%{select: nil}, _sources), + do: [] + + defp returning(%{select: %{fields: fields}} = query, sources), + do: [" RETURNING " | select_fields(fields, sources, query)] + + defp returning([]), + do: [] + + defp returning(returning), + do: [" RETURNING " | quote_names(returning)] + + defp create_names(%{sources: sources}, as_prefix) do + create_names(sources, 0, tuple_size(sources), as_prefix) |> List.to_tuple() + end + + defp create_names(sources, pos, limit, as_prefix) when pos < limit do + [create_name(sources, pos, as_prefix) | create_names(sources, pos + 1, limit, as_prefix)] + end + + defp create_names(_sources, pos, pos, as_prefix) do + [as_prefix] + end + + defp subquery_as_prefix(sources) do + [?s | :erlang.element(tuple_size(sources), sources)] + end + + defp create_name(sources, pos, as_prefix) do + case elem(sources, pos) do + {:fragment, _, _} -> + {nil, as_prefix ++ [?f | Integer.to_string(pos)], nil} + + {:values, _, _} -> + {nil, as_prefix ++ [?v | Integer.to_string(pos)], nil} + + {table, schema, prefix} -> + name = as_prefix ++ [create_alias(table) | Integer.to_string(pos)] + {quote_name(prefix, table), name, schema} + + %Ecto.SubQuery{} -> + {nil, as_prefix ++ [?s | Integer.to_string(pos)], nil} + end + end + + defp create_alias(<>) when first in ?a..?z when first in ?A..?Z do + first + end + + defp create_alias(_) do + ?t + end + + # DDL + + alias Ecto.Migration.{Table, Index, Reference, Constraint} + + @creates [:create, :create_if_not_exists] + @drops [:drop, :drop_if_exists] + + @impl true + def execute_ddl({command, %Table{} = table, columns}) when command in @creates do + table_name = quote_name(table.prefix, table.name) + + query = [ + "CREATE TABLE ", + if_do(command == :create_if_not_exists, "IF NOT EXISTS "), + table_name, + ?\s, + ?(, + column_definitions(table, columns), + pk_definition(columns, ", "), + ?), + options_expr(table.options) + ] + + [query] ++ + comments_on("TABLE", table_name, table.comment) ++ + comments_for_columns(table_name, columns) + end + + def execute_ddl({command, %Table{} = table, mode}) when command in @drops do + [ + [ + "DROP TABLE ", + if_do(command == :drop_if_exists, "IF EXISTS "), + quote_name(table.prefix, table.name), + drop_mode(mode) + ] + ] + end + + def execute_ddl({:alter, %Table{} = table, changes}) do + table_name = quote_name(table.prefix, table.name) + + query = [ + "ALTER TABLE ", + table_name, + ?\s, + column_changes(table, changes), + pk_definition(changes, ", ADD ") + ] + + [query] ++ + comments_on("TABLE", table_name, table.comment) ++ + comments_for_columns(table_name, changes) + end + + def execute_ddl({command, %Index{} = index}) when command in @creates do + fields = Enum.map_intersperse(index.columns, ", ", &index_expr/1) + include_fields = Enum.map_intersperse(index.include, ", ", &include_expr/1) + + maybe_nulls_distinct = + case index.nulls_distinct do + nil -> [] + true -> " NULLS DISTINCT" + false -> " NULLS NOT DISTINCT" + end + + queries = [ + [ + "CREATE ", + if_do(index.unique, "UNIQUE "), + "INDEX ", + if_do(index.concurrently, "CONCURRENTLY "), + if_do(command == :create_if_not_exists, "IF NOT EXISTS "), + quote_name(index.name), + " ON ", + if_do(index.only, "ONLY "), + quote_name(index.prefix, index.table), + if_do(index.using, [" USING ", to_string(index.using)]), + ?\s, + ?(, + fields, + ?), + if_do(include_fields != [], [" INCLUDE ", ?(, include_fields, ?)]), + maybe_nulls_distinct, + if_do(index.options != nil, [" WITH ", ?(, index.options, ?)]), + if_do(index.where, [" WHERE ", to_string(index.where)]) + ] + ] + + queries ++ comments_on("INDEX", quote_name(index.prefix, index.name), index.comment) + end + + def execute_ddl({command, %Index{} = index, mode}) when command in @drops do + [ + [ + "DROP INDEX ", + if_do(index.concurrently, "CONCURRENTLY "), + if_do(command == :drop_if_exists, "IF EXISTS "), + quote_name(index.prefix, index.name), + drop_mode(mode) + ] + ] + end + + def execute_ddl({:rename, %Index{} = current_index, new_name}) do + [ + [ + "ALTER INDEX ", + quote_name(current_index.prefix, current_index.name), + " RENAME TO ", + quote_name(new_name) + ] + ] + end + + def execute_ddl({:rename, %Table{} = current_table, %Table{} = new_table}) do + [ + [ + "ALTER TABLE ", + quote_name(current_table.prefix, current_table.name), + " RENAME TO ", + quote_name(nil, new_table.name) + ] + ] + end + + def execute_ddl({:rename, %Table{} = table, current_column, new_column}) do + [ + [ + "ALTER TABLE ", + quote_name(table.prefix, table.name), + " RENAME ", + quote_name(current_column), + " TO ", + quote_name(new_column) + ] + ] + end + + def execute_ddl({:create, %Constraint{} = constraint}) do + table_name = quote_name(constraint.prefix, constraint.table) + queries = [["ALTER TABLE ", table_name, " ADD ", new_constraint_expr(constraint)]] + + queries ++ comments_on("CONSTRAINT", constraint.name, constraint.comment, table_name) + end + + def execute_ddl({command, %Constraint{} = constraint, mode}) when command in @drops do + [ + [ + "ALTER TABLE ", + quote_name(constraint.prefix, constraint.table), + " DROP CONSTRAINT ", + if_do(command == :drop_if_exists, "IF EXISTS "), + quote_name(constraint.name), + drop_mode(mode) + ] + ] + end + + def execute_ddl(string) when is_binary(string), do: [string] + + def execute_ddl(keyword) when is_list(keyword), + do: error!(nil, "PostgreSQL adapter does not support keyword lists in execute") + + @impl true + def ddl_logs(%Postgrex.Result{} = result) do + %{messages: messages} = result + + for message <- messages do + %{message: message, severity: severity} = message + + {ddl_log_level(severity), message, []} + end + end + + @impl true + def table_exists_query(table) do + {"SELECT true FROM information_schema.tables WHERE table_name = $1 AND table_schema = current_schema() LIMIT 1", + [table]} + end + + defp drop_mode(:cascade), do: " CASCADE" + defp drop_mode(:restrict), do: [] + + # From https://www.postgresql.org/docs/current/protocol-error-fields.html. + defp ddl_log_level("DEBUG"), do: :debug + defp ddl_log_level("LOG"), do: :info + defp ddl_log_level("INFO"), do: :info + defp ddl_log_level("NOTICE"), do: :info + defp ddl_log_level("WARNING"), do: :warn + defp ddl_log_level("ERROR"), do: :error + defp ddl_log_level("FATAL"), do: :error + defp ddl_log_level("PANIC"), do: :error + defp ddl_log_level(_severity), do: :info + + defp pk_definition(columns, prefix) do + pks = + for {action, name, _, opts} <- columns, + action != :remove, + opts[:primary_key], + do: name + + case pks do + [] -> [] + _ -> [prefix, "PRIMARY KEY (", quote_names(pks), ")"] + end + end + + defp comments_on(_object, _name, nil), do: [] + + defp comments_on(object, name, comment) do + [["COMMENT ON ", object, ?\s, name, " IS ", single_quote(comment)]] + end + + defp comments_on(_object, _name, nil, _table_name), do: [] + + defp comments_on(object, name, comment, table_name) do + [ + [ + "COMMENT ON ", + object, + ?\s, + quote_name(name), + " ON ", + table_name, + " IS ", + single_quote(comment) + ] + ] + end + + defp comments_for_columns(table_name, columns) do + Enum.flat_map(columns, fn + {:remove, _column_name, _column_type, _opts} -> + [] + + {_operation, column_name, _column_type, opts} -> + column_name = [table_name, ?. | quote_name(column_name)] + comments_on("COLUMN", column_name, opts[:comment]) + + _ -> + [] + end) + end + + defp column_definitions(table, columns) do + Enum.map_intersperse(columns, ", ", &column_definition(table, &1)) + end + + defp column_definition(table, {:add, name, %Reference{} = ref, opts}) do + [ + quote_name(name), + ?\s, + reference_column_type(ref.type, opts), + column_options(ref.type, opts), + ", ", + reference_expr(ref, table, name) + ] + end + + defp column_definition(_table, {:add, name, type, opts}) do + [quote_name(name), ?\s, column_type(type, opts), column_options(type, opts)] + end + + defp column_changes(table, columns) do + Enum.map_intersperse(columns, ", ", &column_change(table, &1)) + end + + defp column_change(table, {:add, name, %Reference{} = ref, opts}) do + [ + "ADD COLUMN ", + quote_name(name), + ?\s, + reference_column_type(ref.type, opts), + column_options(ref.type, opts), + ", ADD ", + reference_expr(ref, table, name) + ] + end + + defp column_change(_table, {:add, name, type, opts}) do + ["ADD COLUMN ", quote_name(name), ?\s, column_type(type, opts), column_options(type, opts)] + end + + defp column_change(table, {:add_if_not_exists, name, %Reference{} = ref, opts}) do + [ + "ADD COLUMN IF NOT EXISTS ", + quote_name(name), + ?\s, + reference_column_type(ref.type, opts), + column_options(ref.type, opts), + ", ADD ", + reference_expr(ref, table, name) + ] + end + + defp column_change(_table, {:add_if_not_exists, name, type, opts}) do + [ + "ADD COLUMN IF NOT EXISTS ", + quote_name(name), + ?\s, + column_type(type, opts), + column_options(type, opts) + ] + end + + defp column_change(table, {:modify, name, %Reference{} = ref, opts}) do + collation = Keyword.fetch(opts, :collation) + + [ + drop_reference_expr(opts[:from], table, name), + "ALTER COLUMN ", + quote_name(name), + " TYPE ", + reference_column_type(ref.type, opts), + ", ADD ", + reference_expr(ref, table, name), + modify_null(name, opts), + modify_default(name, ref.type, opts), + collation_expr(collation) + ] + end + + defp column_change(table, {:modify, name, type, opts}) do + collation = Keyword.fetch(opts, :collation) + + [ + drop_reference_expr(opts[:from], table, name), + "ALTER COLUMN ", + quote_name(name), + " TYPE ", + column_type(type, opts), + modify_null(name, opts), + modify_default(name, type, opts), + collation_expr(collation) + ] + end + + defp column_change(_table, {:remove, name}), do: ["DROP COLUMN ", quote_name(name)] + + defp column_change(table, {:remove, name, %Reference{} = ref, _opts}) do + [drop_reference_expr(ref, table, name), "DROP COLUMN ", quote_name(name)] + end + + defp column_change(_table, {:remove, name, _type, _opts}), + do: ["DROP COLUMN ", quote_name(name)] + + defp column_change(table, {:remove_if_exists, name, %Reference{} = ref}) do + [ + drop_reference_if_exists_expr(ref, table, name), + "DROP COLUMN IF EXISTS ", + quote_name(name) + ] + end + + defp column_change(table, {:remove_if_exists, name, _type}), + do: column_change(table, {:remove_if_exists, name}) + + defp column_change(_table, {:remove_if_exists, name}), + do: ["DROP COLUMN IF EXISTS ", quote_name(name)] + + defp modify_null(name, opts) do + case Keyword.get(opts, :null) do + true -> [", ALTER COLUMN ", quote_name(name), " DROP NOT NULL"] + false -> [", ALTER COLUMN ", quote_name(name), " SET NOT NULL"] + nil -> [] + end + end + + defp modify_default(name, type, opts) do + case Keyword.fetch(opts, :default) do + {:ok, val} -> + [", ALTER COLUMN ", quote_name(name), " SET", default_expr({:ok, val}, type)] + + :error -> + [] + end + end + + defp column_options(type, opts) do + default = Keyword.fetch(opts, :default) + null = Keyword.get(opts, :null) + collation = Keyword.fetch(opts, :collation) + + [default_expr(default, type), null_expr(null), collation_expr(collation)] + end + + defp null_expr(false), do: " NOT NULL" + defp null_expr(true), do: " NULL" + defp null_expr(_), do: [] + + defp collation_expr({:ok, collation_name}), do: " COLLATE \"#{collation_name}\"" + defp collation_expr(_), do: [] + + defp new_constraint_expr(%Constraint{check: check} = constraint) when is_binary(check) do + [ + "CONSTRAINT ", + quote_name(constraint.name), + " CHECK (", + check, + ")", + validate(constraint.validate) + ] + end + + defp new_constraint_expr(%Constraint{exclude: exclude} = constraint) + when is_binary(exclude) do + [ + "CONSTRAINT ", + quote_name(constraint.name), + " EXCLUDE USING ", + exclude, + validate(constraint.validate) + ] + end + + defp default_expr({:ok, nil}, _type), do: " DEFAULT NULL" + defp default_expr({:ok, literal}, type), do: [" DEFAULT ", default_type(literal, type)] + defp default_expr(:error, _), do: [] + + defp default_type(list, {:array, inner} = type) when is_list(list) do + [ + "ARRAY[", + Enum.map_intersperse(list, ?,, &default_type(&1, inner)), + "]::", + ecto_to_db(type) + ] + end + + defp default_type(literal, _type) when is_binary(literal) do + if :binary.match(literal, <<0>>) == :nomatch and String.valid?(literal) do + single_quote(literal) + else + encoded = "\\x" <> Base.encode16(literal, case: :lower) + + raise ArgumentError, + "default values are interpolated as UTF-8 strings and cannot contain null bytes. " <> + "`#{inspect(literal)}` is invalid. If you want to write it as a binary, use \"#{encoded}\", " <> + "otherwise refer to PostgreSQL documentation for instructions on how to escape this SQL type" + end + end + + defp default_type(literal, _type) when is_bitstring(literal) do + bitstring_literal(literal) + end + + defp default_type(literal, _type) when is_number(literal), do: to_string(literal) + defp default_type(literal, _type) when is_boolean(literal), do: to_string(literal) + + defp default_type(%{} = map, :map) do + library = Application.get_env(:postgrex, :json_library, Jason) + default = IO.iodata_to_binary(library.encode_to_iodata!(map)) + [single_quote(default)] + end + + defp default_type({:fragment, expr}, _type), + do: [expr] + + defp default_type(expr, type), + do: + raise( + ArgumentError, + "unknown default `#{inspect(expr)}` for type `#{inspect(type)}`. " <> + ":default may be a string, number, boolean, list of strings, list of integers, map (when type is Map), or a fragment(...)" + ) + + defp index_expr({dir, literal}) when is_binary(literal), + do: index_dir(dir, literal) + + defp index_expr({dir, literal}), + do: index_dir(dir, quote_name(literal)) + + defp index_expr(literal) when is_binary(literal), + do: literal + + defp index_expr(literal), + do: quote_name(literal) + + defp index_dir(dir, str) + when dir in [ + :asc, + :asc_nulls_first, + :asc_nulls_last, + :desc, + :desc_nulls_first, + :desc_nulls_last + ] do + case dir do + :asc -> [str | " ASC"] + :asc_nulls_first -> [str | " ASC NULLS FIRST"] + :asc_nulls_last -> [str | " ASC NULLS LAST"] + :desc -> [str | " DESC"] + :desc_nulls_first -> [str | " DESC NULLS FIRST"] + :desc_nulls_last -> [str | " DESC NULLS LAST"] + end + end + + defp include_expr(literal) when is_binary(literal), + do: literal + + defp include_expr(literal), + do: quote_name(literal) + + defp options_expr(nil), + do: [] + + defp options_expr(keyword) when is_list(keyword), + do: error!(nil, "PostgreSQL adapter does not support keyword lists in :options") + + defp options_expr(options), + do: [?\s, options] + + defp column_type(type, opts) do + type_name = column_type_name(type, opts) + + case Keyword.get(opts, :generated) do + nil when type == :identity -> + cleanup = fn v -> is_integer(v) and v > 0 end + + sequence = + [Keyword.get(opts, :start_value)] + |> Enum.filter(cleanup) + |> Enum.map(&"START WITH #{&1}") + |> Kernel.++( + [Keyword.get(opts, :increment)] + |> Enum.filter(cleanup) + |> Enum.map(&"INCREMENT BY #{&1}") + ) + + case sequence do + [] -> [type_name, " GENERATED BY DEFAULT AS IDENTITY"] + _ -> [type_name, " GENERATED BY DEFAULT AS IDENTITY(", Enum.join(sequence, " "), ") "] + end + + nil -> + type_name + + expr when is_binary(expr) -> + [type_name, " GENERATED ", expr] + + other -> + raise ArgumentError, + "the `:generated` option only accepts strings, received: #{inspect(other)}" + end + end + + defp column_type_name({:array, type}, opts) do + [column_type_name(type, opts), "[]"] + end + + defp column_type_name(type, _opts) when type in ~w(time utc_datetime naive_datetime)a do + [ecto_to_db(type), "(0)"] + end + + defp column_type_name(type, opts) + when type in ~w(time_usec utc_datetime_usec naive_datetime_usec)a do + precision = Keyword.get(opts, :precision) + type_name = ecto_to_db(type) + + if precision do + [type_name, ?(, to_string(precision), ?)] + else + type_name + end + end + + defp column_type_name(:duration, opts) do + precision = Keyword.get(opts, :precision) + fields = Keyword.get(opts, :fields) + type_name = ecto_to_db(:duration) + + cond do + fields && precision -> [type_name, " ", fields, ?(, to_string(precision), ?)] + precision -> [type_name, ?(, to_string(precision), ?)] + fields -> [type_name, " ", fields] + true -> [type_name] + end + end + + defp column_type_name(type, opts) do + size = Keyword.get(opts, :size) + precision = Keyword.get(opts, :precision) + scale = Keyword.get(opts, :scale) + type_name = ecto_to_db(type) + + cond do + size -> [type_name, ?(, to_string(size), ?)] + precision -> [type_name, ?(, to_string(precision), ?,, to_string(scale || 0), ?)] + type == :string -> [type_name, "(255)"] + true -> type_name + end + end + + defp reference_expr(%Reference{} = ref, table, name) do + {current_columns, reference_columns} = Enum.unzip([{name, ref.column} | ref.with]) + + [ + "CONSTRAINT ", + reference_name(ref, table, name), + ?\s, + "FOREIGN KEY (", + quote_names(current_columns), + ") REFERENCES ", + quote_name(Keyword.get(ref.options, :prefix, table.prefix), ref.table), + ?(, + quote_names(reference_columns), + ?), + reference_match(ref.match), + reference_on_delete(ref.on_delete), + reference_on_update(ref.on_update), + validate(ref.validate) + ] + end + + defp drop_reference_expr({%Reference{} = ref, _opts}, table, name), + do: drop_reference_expr(ref, table, name) + + defp drop_reference_expr(%Reference{} = ref, table, name), + do: ["DROP CONSTRAINT ", reference_name(ref, table, name), ", "] + + defp drop_reference_expr(_, _, _), + do: [] + + defp drop_reference_if_exists_expr(%Reference{} = ref, table, name), + do: ["DROP CONSTRAINT IF EXISTS ", reference_name(ref, table, name), ", "] + + defp drop_reference_if_exists_expr(_, _, _), + do: [] + + defp reference_name(%Reference{name: nil}, table, column), + do: quote_name("#{table.name}_#{column}_fkey") + + defp reference_name(%Reference{name: name}, _table, _column), + do: quote_name(name) + + defp reference_column_type(:serial, _opts), do: "integer" + defp reference_column_type(:bigserial, _opts), do: "bigint" + defp reference_column_type(:identity, _opts), do: "bigint" + defp reference_column_type(type, opts), do: column_type(type, opts) + + defp reference_on_delete(:nilify_all), do: " ON DELETE SET NULL" + + defp reference_on_delete({:nilify, columns}), + do: [" ON DELETE SET NULL (", quote_names(columns), ")"] + + defp reference_on_delete(:default_all), do: " ON DELETE SET DEFAULT" + + defp reference_on_delete({:default, columns}), + do: [" ON DELETE SET DEFAULT (", quote_names(columns), ")"] + + defp reference_on_delete(:delete_all), do: " ON DELETE CASCADE" + defp reference_on_delete(:restrict), do: " ON DELETE RESTRICT" + defp reference_on_delete(_), do: [] + + defp reference_on_update(:nilify_all), do: " ON UPDATE SET NULL" + defp reference_on_update(:update_all), do: " ON UPDATE CASCADE" + defp reference_on_update(:restrict), do: " ON UPDATE RESTRICT" + defp reference_on_update(_), do: [] + + defp reference_match(nil), do: [] + defp reference_match(:full), do: " MATCH FULL" + defp reference_match(:simple), do: " MATCH SIMPLE" + defp reference_match(:partial), do: " MATCH PARTIAL" + + defp validate(false), do: " NOT VALID" + defp validate(_), do: [] + + ## Helpers + + defp get_source(query, sources, ix, source) do + {expr, name, _schema} = elem(sources, ix) + name = maybe_add_column_names(source, name) + {expr || expr(source, sources, query), name} + end + + defp get_parent_sources_ix(query, as) do + case query.aliases[@parent_as] do + {%{aliases: %{^as => ix}}, sources} -> {ix, sources} + {%{} = parent, _sources} -> get_parent_sources_ix(parent, as) + end + end + + defp maybe_add_column_names({:values, _, [types, _, _]}, name) do + fields = Keyword.keys(types) + [name, ?\s, ?(, quote_names(fields), ?)] + end + + defp maybe_add_column_names(_, name), do: name + + defp quote_qualified_name(name, sources, ix) do + {_, source, _} = elem(sources, ix) + [source, ?. | quote_name(name)] + end + + defp quote_names(names) do + Enum.map_intersperse(names, ?,, "e_name/1) + end + + defp quote_name(nil, name), do: quote_name(name) + + defp quote_name(prefix, name), do: [quote_name(prefix), ?., quote_name(name)] + + defp quote_name(name) when is_atom(name) do + quote_name(Atom.to_string(name)) + end + + defp quote_name(name) when is_binary(name) do + if String.contains?(name, "\"") do + error!(nil, "bad literal/field/index/table name #{inspect(name)} (\" is not permitted)") + end + + [?", name, ?"] + end + + # TRUE, ON, or 1 to enable the option, and FALSE, OFF, or 0 to disable it + defp quote_boolean(nil), do: nil + defp quote_boolean(true), do: "TRUE" + defp quote_boolean(false), do: "FALSE" + defp quote_boolean(value), do: error!(nil, "bad boolean value #{value}") + + defp format_to_sql(:text), do: "FORMAT TEXT" + defp format_to_sql(:map), do: "FORMAT JSON" + defp format_to_sql(:yaml), do: "FORMAT YAML" + + defp single_quote(value), do: [?', escape_string(value), ?'] + + defp bitstring_literal(value) do + size = bit_size(value) + <> = value + + [?b, ?', val |> Integer.to_string(2) |> String.pad_leading(size, ["0"]), ?'] + end + + defp intersperse_reduce(list, separator, user_acc, reducer, acc \\ []) + + defp intersperse_reduce([], _separator, user_acc, _reducer, acc), + do: {acc, user_acc} + + defp intersperse_reduce([elem], _separator, user_acc, reducer, acc) do + {elem, user_acc} = reducer.(elem, user_acc) + {[acc | elem], user_acc} + end + + defp intersperse_reduce([elem | rest], separator, user_acc, reducer, acc) do + {elem, user_acc} = reducer.(elem, user_acc) + intersperse_reduce(rest, separator, user_acc, reducer, [acc, elem, separator]) + end + + defp if_do(condition, value) do + if condition, do: value, else: [] + end + + defp escape_string(value) when is_binary(value) do + :binary.replace(value, "'", "''", [:global]) + end + + defp escape_json(value) when is_binary(value) do + escaped = + value + |> escape_string() + |> :binary.replace("\"", "\\\"", [:global]) + + [?", escaped, ?"] + end + + defp escape_json(value) when is_integer(value) do + Integer.to_string(value) + end + + defp escape_json(true), do: ["true"] + defp escape_json(false), do: ["false"] + + # To allow columns in json paths, we use the array[...] syntax + # which requires special handling for strings and column references. + # We still keep the escape_json/1 variant for strings because it is + # needed for the queries using @> + defp escape_json(value, _, _) when is_binary(value) do + [?', escape_string(value), ?'] + end + + defp escape_json({{:., _, [{:&, _, [_]}, _]}, _, []} = expr, sources, query) do + expr(expr, sources, query) + end + + defp escape_json({{:., _, [{:parent_as, _, [_]}, _]}, _, []} = expr, sources, query) do + expr(expr, sources, query) + end + + defp escape_json(other, _, _) do + escape_json(other) + end + + defp ecto_to_db({:array, t}), do: [ecto_to_db(t), ?[, ?]] + defp ecto_to_db(:id), do: "integer" + defp ecto_to_db(:identity), do: "bigint" + defp ecto_to_db(:serial), do: "serial" + defp ecto_to_db(:bigserial), do: "bigserial" + defp ecto_to_db(:binary_id), do: "uuid" + defp ecto_to_db(:string), do: "varchar" + defp ecto_to_db(:bitstring), do: "varbit" + defp ecto_to_db(:binary), do: "bytea" + defp ecto_to_db(:map), do: Application.fetch_env!(:ecto_sql, :postgres_map_type) + defp ecto_to_db({:map, _}), do: Application.fetch_env!(:ecto_sql, :postgres_map_type) + defp ecto_to_db(:time_usec), do: "time" + defp ecto_to_db(:utc_datetime), do: "timestamp" + defp ecto_to_db(:utc_datetime_usec), do: "timestamp" + defp ecto_to_db(:naive_datetime), do: "timestamp" + defp ecto_to_db(:naive_datetime_usec), do: "timestamp" + defp ecto_to_db(:duration), do: "interval" + defp ecto_to_db(atom) when is_atom(atom), do: Atom.to_string(atom) + + defp ecto_to_db(type) do + raise ArgumentError, + "unsupported type `#{inspect(type)}`. The type can either be an atom, a string " <> + "or a tuple of the form `{:map, t}` or `{:array, t}` where `t` itself follows the same conditions." + end + + defp error!(nil, message) do + raise ArgumentError, message + end + + defp error!(query, message) do + raise Ecto.QueryError, query: query, message: message + end + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/sql.ex b/deps/ecto_sql/lib/ecto/adapters/sql.ex new file mode 100644 index 0000000..e0c8cd1 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/sql.ex @@ -0,0 +1,1522 @@ +defmodule Ecto.Adapters.SQL do + @moduledoc ~S""" + This application provides functionality for working with + SQL databases in `Ecto`. + + ## Built-in adapters + + By default, we support the following adapters: + + * `Ecto.Adapters.Postgres` for Postgres + * `Ecto.Adapters.MyXQL` for MySQL + * `Ecto.Adapters.Tds` for SQLServer + + ## Additional functions + + If your `Ecto.Repo` is backed by any of the SQL adapters above, + this module will inject additional functions into your repository: + + * `disconnect_all(interval, options \\ [])` - + shortcut for `Ecto.Adapters.SQL.disconnect_all/3` + + * `explain(type, query, options \\ [])` - + shortcut for `Ecto.Adapters.SQL.explain/4` + + * `query(sql, params, options \\ [])` - + shortcut for `Ecto.Adapters.SQL.query/4` + + * `query!(sql, params, options \\ [])` - + shortcut for `Ecto.Adapters.SQL.query!/4` + + * `query_many(sql, params, options \\ [])` - + shortcut for `Ecto.Adapters.SQL.query_many/4` + + * `query_many!(sql, params, options \\ [])` - + shortcut for `Ecto.Adapters.SQL.query_many!/4` + + * `to_sql(type, query)` - + shortcut for `Ecto.Adapters.SQL.to_sql/3` + + Generally speaking, you must invoke those functions directly from + your repository, for example: `MyApp.Repo.query("SELECT true")`. + + You can also invoke them directly from `Ecto.Adapters.SQL`, but + keep in mind that in such cases the "dynamic repository" functionality + is not available by default. Instead, you must explicitly call + `YouRepo.get_dynamic_repo()` and pass it as first argument. + + ## Migrations + + `ecto_sql` supports database migrations. You can generate a migration + with: + + $ mix ecto.gen.migration create_posts + + This will create a new file inside `priv/repo/migrations` with the + `change` function. Check `Ecto.Migration` for more information. + + To interface with migrations, developers typically use mix tasks: + + * `mix ecto.migrations` - lists all available migrations and their status + * `mix ecto.migrate` - runs a migration + * `mix ecto.rollback` - rolls back a previously run migration + + If you want to run migrations programmatically, see `Ecto.Migrator`. + + ## SQL sandbox + + `ecto_sql` provides a sandbox for testing. The sandbox wraps each + test in a transaction, making sure the tests are isolated and can + run concurrently. See `Ecto.Adapters.SQL.Sandbox` for more information. + + ## Structure load and dumping + + If you have an existing database, you may want to dump its existing + structure and make it reproducible from within Ecto. This can be + achieved with two Mix tasks: + + * `mix ecto.load` - loads an existing structure into the database + * `mix ecto.dump` - dumps the existing database structure to the filesystem + + For creating and dropping databases, see `mix ecto.create` + and `mix ecto.drop` that are included as part of Ecto. + + ## Custom adapters + + Developers can implement their own SQL adapters by using + `Ecto.Adapters.SQL` and by implementing the callbacks required + by `Ecto.Adapters.SQL.Connection` for handling connections and + performing queries. The connection handling and pooling for SQL + adapters should be built using the `DBConnection` library. + + When using `Ecto.Adapters.SQL`, the following options are required: + + * `:driver` (required) - the database driver library. + For example: `:postgrex` + + """ + + require Logger + + @type query_result :: %{ + :rows => nil | [[term] | binary], + :num_rows => non_neg_integer, + optional(atom) => any + } + + @type query_params :: [term] | %{(atom | String.t()) => term} + + @doc false + defmacro __using__(opts) do + quote do + @behaviour Ecto.Adapter + @behaviour Ecto.Adapter.Migration + @behaviour Ecto.Adapter.Queryable + @behaviour Ecto.Adapter.Schema + @behaviour Ecto.Adapter.Transaction + + opts = unquote(opts) + @conn __MODULE__.Connection + @driver Keyword.fetch!(opts, :driver) + + @impl true + defmacro __before_compile__(env) do + Ecto.Adapters.SQL.__before_compile__(@driver, env) + end + + @impl true + def ensure_all_started(config, type) do + Ecto.Adapters.SQL.ensure_all_started(@driver, config, type) + end + + @impl true + def init(config) do + Ecto.Adapters.SQL.init(@conn, @driver, config) + end + + @impl true + def checkout(meta, opts, fun) do + Ecto.Adapters.SQL.checkout(meta, opts, fun) + end + + @impl true + def checked_out?(meta) do + Ecto.Adapters.SQL.checked_out?(meta) + end + + @impl true + def loaders({:map, _}, type), do: [&Ecto.Type.embedded_load(type, &1, :json)] + def loaders(:binary_id, type), do: [Ecto.UUID, type] + def loaders(_, type), do: [type] + + @impl true + def dumpers({:map, _}, type), do: [&Ecto.Type.embedded_dump(type, &1, :json)] + def dumpers(:binary_id, type), do: [type, Ecto.UUID] + def dumpers(_, type), do: [type] + + ## Query + + @impl true + def prepare(:all, query) do + {:cache, {System.unique_integer([:positive]), IO.iodata_to_binary(@conn.all(query))}} + end + + def prepare(:update_all, query) do + {:cache, + {System.unique_integer([:positive]), IO.iodata_to_binary(@conn.update_all(query))}} + end + + def prepare(:delete_all, query) do + {:cache, + {System.unique_integer([:positive]), IO.iodata_to_binary(@conn.delete_all(query))}} + end + + @impl true + def execute(adapter_meta, query_meta, query, params, opts) do + Ecto.Adapters.SQL.execute(:named, adapter_meta, query_meta, query, params, opts) + end + + @impl true + def stream(adapter_meta, query_meta, query, params, opts) do + Ecto.Adapters.SQL.stream(adapter_meta, query_meta, query, params, opts) + end + + ## Schema + + @impl true + def autogenerate(:id), do: nil + def autogenerate(:embed_id), do: Ecto.UUID.generate() + def autogenerate(:binary_id), do: Ecto.UUID.bingenerate() + + @impl true + def insert_all( + adapter_meta, + schema_meta, + header, + rows, + on_conflict, + returning, + placeholders, + opts + ) do + Ecto.Adapters.SQL.insert_all( + adapter_meta, + schema_meta, + @conn, + header, + rows, + on_conflict, + returning, + placeholders, + opts + ) + end + + @impl true + def insert(adapter_meta, schema_meta, params, on_conflict, returning, opts) do + %{source: source, prefix: prefix} = schema_meta + {kind, conflict_params, _} = on_conflict + {fields, values} = :lists.unzip(params) + sql = @conn.insert(prefix, source, fields, [fields], on_conflict, returning, []) + + Ecto.Adapters.SQL.struct( + adapter_meta, + @conn, + sql, + :insert, + source, + [], + values ++ conflict_params, + kind, + returning, + opts + ) + end + + @impl true + def update(adapter_meta, schema_meta, fields, params, returning, opts) do + %{source: source, prefix: prefix} = schema_meta + {fields, field_values} = :lists.unzip(fields) + filter_values = Keyword.values(params) + sql = @conn.update(prefix, source, fields, params, returning) + + Ecto.Adapters.SQL.struct( + adapter_meta, + @conn, + sql, + :update, + source, + params, + field_values ++ filter_values, + :raise, + returning, + opts + ) + end + + @impl true + def delete(adapter_meta, schema_meta, params, returning, opts) do + %{source: source, prefix: prefix} = schema_meta + filter_values = Keyword.values(params) + sql = @conn.delete(prefix, source, params, returning) + + Ecto.Adapters.SQL.struct( + adapter_meta, + @conn, + sql, + :delete, + source, + params, + filter_values, + :raise, + returning, + opts + ) + end + + ## Transaction + + @impl true + def transaction(meta, opts, fun) do + Ecto.Adapters.SQL.transaction(meta, opts, fun) + end + + @impl true + def in_transaction?(meta) do + Ecto.Adapters.SQL.in_transaction?(meta) + end + + @impl true + def rollback(meta, value) do + Ecto.Adapters.SQL.rollback(meta, value) + end + + ## Migration + + @impl true + def execute_ddl(meta, definition, opts) do + Ecto.Adapters.SQL.execute_ddl(meta, @conn, definition, opts) + end + + defoverridable prepare: 2, + execute: 5, + insert: 6, + update: 6, + delete: 5, + insert_all: 8, + execute_ddl: 3, + loaders: 2, + dumpers: 2, + autogenerate: 1, + checkout: 3, + ensure_all_started: 2, + __before_compile__: 1 + end + end + + @timeout 15_000 + + @query_doc """ + Runs a custom SQL query. + + If the query was successful, it will return an `:ok` tuple containing + a map with at least two keys: + + * `:num_rows` - the number of rows affected + * `:rows` - the result set as a list. `nil` may be returned + instead of the list if the command does not yield any row + as result (but still yields the number of affected rows, + like a `delete` command without returning would) + + ## Options + + * `:log` - When false, does not log the query + * `:timeout` - Execute request timeout, accepts: `:infinity` (default: `#{@timeout}`); + + ## Examples + + iex> MyRepo.query("SELECT $1::integer + $2", [40, 2]) + {:ok, %{rows: [[42]], num_rows: 1}} + + iex> Ecto.Adapters.SQL.query(MyRepo, "SELECT $1::integer + $2", [40, 2]) + {:ok, %{rows: [[42]], num_rows: 1}} + """ + + @query_bang_doc """ + Same as `query/3` but returns result directly without `:ok` tuple + and raises on invalid queries + """ + + @query_many_doc """ + Runs a custom SQL query that returns multiple results on the given repo. + + In case of success, it must return an `:ok` tuple containing a list of + maps with at least two keys: + + * `:num_rows` - the number of rows affected + + * `:rows` - the result set as a list. `nil` may be returned + instead of the list if the command does not yield any row + as result (but still yields the number of affected rows, + like a `delete` command without returning would) + + ## Options + + * `:log` - When false, does not log the query + * `:timeout` - Execute request timeout, accepts: `:infinity` (default: `#{@timeout}`); + + ## Examples + + iex> MyRepo.query_many("SELECT $1; SELECT $2;", [40, 2]) + {:ok, [%{rows: [[40]], num_rows: 1}, %{rows: [[2]], num_rows: 1}]} + + iex> Ecto.Adapters.SQL.query_many(MyRepo, "SELECT $1; SELECT $2;", [40, 2]) + {:ok, [%{rows: [[40]], num_rows: 1}, %{rows: [[2]], num_rows: 1}]} + """ + + @query_many_bang_doc """ + Same as `query_many/4` but returns result directly without `:ok` tuple + and raises on invalid queries + """ + + @to_sql_doc """ + Converts the given query to SQL according to its kind and the + adapter in the given repository. + + ## Examples + + The examples below are meant for reference. Each adapter will + return a different result: + + iex> MyRepo.to_sql(:all, Post) + {"SELECT p.id, p.title, p.inserted_at, p.created_at FROM posts as p", []} + + iex> MyRepo.to_sql(:update_all, from(p in Post, update: [set: [title: ^"hello"]])) + {"UPDATE posts AS p SET title = $1", ["hello"]} + + iex> Ecto.Adapters.SQL.to_sql(:all, MyRepo, Post) + {"SELECT p.id, p.title, p.inserted_at, p.created_at FROM posts as p", []} + """ + + @explain_doc """ + Executes an EXPLAIN statement or similar for the given query according to its kind and the + adapter in the given repository. + + ## Examples + + # Postgres + iex> MyRepo.explain(:all, Post) + "Seq Scan on posts p0 (cost=0.00..12.12 rows=1 width=443)" + + iex> Ecto.Adapters.SQL.explain(Repo, :all, Post) + "Seq Scan on posts p0 (cost=0.00..12.12 rows=1 width=443)" + + # MySQL + iex> MyRepo.explain(:all, from(p in Post, where: p.title == "title")) |> IO.puts() + +----+-------------+-------+------------+------+---------------+------+---------+------+------+----------+-------------+ + | id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra | + +----+-------------+-------+------------+------+---------------+------+---------+------+------+----------+-------------+ + | 1 | SIMPLE | p0 | NULL | ALL | NULL | NULL | NULL | NULL | 1 | 100.0 | Using where | + +----+-------------+-------+------------+------+---------------+------+---------+------+------+----------+-------------+ + + # Shared opts + iex> MyRepo.explain(:all, Post, analyze: true, timeout: 20_000) + "Seq Scan on posts p0 (cost=0.00..11.70 rows=170 width=443) (actual time=0.013..0.013 rows=0 loops=1)\\nPlanning Time: 0.031 ms\\nExecution Time: 0.021 ms" + + It's safe to execute it for updates and deletes, no data change will be committed: + + iex> MyRepo.explain(Repo, :update_all, from(p in Post, update: [set: [title: "new title"]])) + "Update on posts p0 (cost=0.00..11.70 rows=170 width=449)\\n -> Seq Scan on posts p0 (cost=0.00..11.70 rows=170 width=449)" + + This function is also available under the repository with name `explain`: + + iex> MyRepo.explain(:all, from(p in Post, where: p.title == "title")) + "Seq Scan on posts p0 (cost=0.00..12.12 rows=1 width=443)\\n Filter: ((title)::text = 'title'::text)" + + ### Options + + Built-in adapters support passing `opts` to the EXPLAIN statement according to the following: + + Adapter | Supported opts + ---------------- | -------------- + Postgrex | `analyze`, `verbose`, `costs`, `settings`, `buffers`, `timing`, `summary`, `format`, `plan`, `wrap_in_transaction` + MyXQL | `format`, `wrap_in_transaction` + + All options except `format` are boolean valued and default to `false`. + + The allowed `format` values are `:map`, `:yaml`, and `:text`: + * `:map` is the deserialized JSON encoding. + * `:yaml` and `:text` return the result as a string. + + The built-in adapters support the following formats: + * Postgrex: `:map`, `:yaml` and `:text` + * MyXQL: `:map` and `:text` + + The `wrap_in_transaction` option is a boolean that controls whether the command is run inside of a + transaction that is rolled back. This is useful when, for example, you'd like to use `analyze: true` + on an update or delete query without modifying data. Defaults to `true`. + + The `:plan` option in Postgrex can take the values `:custom` or `:fallback_generic`. When `:custom` + is specified, the explain plan generated will consider the specific values of the query parameters + that are supplied. When using `:fallback_generic`, the specific values of the query parameters will + be ignored. `:fallback_generic` does not use PostgreSQL's built-in support for a generic explain + plan (available as of PostgreSQL 16), but instead uses a special implementation that works for PostgreSQL + versions 12 and above. Defaults to `:custom`. + + Any other value passed to `opts` will be forwarded to the underlying adapter query function, including + shared Repo options such as `:timeout`. Non built-in adapters may have specific behaviour and you should + consult their documentation for more details. + + For version compatibility, please check your database's documentation: + + * _Postgrex_: [PostgreSQL doc](https://www.postgresql.org/docs/current/sql-explain.html). + * _MyXQL_: [MySQL doc](https://dev.mysql.com/doc/refman/8.0/en/explain.html). + + """ + + @disconnect_all_doc """ + Forces all connections in the repo pool to disconnect within the given interval. + + Once this function is called, the pool will disconnect all of its connections + as they are checked in or as they are pinged. Checked in connections will be + randomly disconnected within the given time interval. Pinged connections are + immediately disconnected - as they are idle (according to `:idle_interval`). + + If the connection has a backoff configured (which is the case by default), + disconnecting means an attempt at a new connection will be done immediately + after, without starting a new process for each connection. However, if backoff + has been disabled, the connection process will terminate. In such cases, + disconnecting all connections may cause the pool supervisor to restart + depending on the max_restarts/max_seconds configuration of the pool, + so you will want to set those carefully. + """ + + @doc @to_sql_doc + @spec to_sql(:all | :update_all | :delete_all, Ecto.Repo.t(), Ecto.Queryable.t()) :: + {String.t(), query_params} + def to_sql(kind, repo, queryable) do + case Ecto.Adapter.Queryable.prepare_query(kind, repo, queryable) do + {{:cached, _update, _reset, {_id, cached}}, params} -> + {String.Chars.to_string(cached), params} + + {{:cache, _update, {_id, prepared}}, params} -> + {prepared, params} + + {{:nocache, {_id, prepared}}, params} -> + {prepared, params} + end + end + + @doc @explain_doc + @spec explain( + pid() | Ecto.Repo.t() | Ecto.Adapter.adapter_meta(), + :all | :update_all | :delete_all, + Ecto.Queryable.t(), + opts :: Keyword.t() + ) :: String.t() | Exception.t() | list(map) + def explain(repo, operation, queryable, opts \\ []) + + def explain(repo, operation, queryable, opts) when is_atom(repo) or is_pid(repo) do + wrap_in_transaction? = Keyword.get(opts, :wrap_in_transaction, true) + explain(Ecto.Adapter.lookup_meta(repo), operation, queryable, wrap_in_transaction?, opts) + end + + def explain(%{repo: repo} = adapter_meta, operation, queryable, true, opts) do + Ecto.Multi.new() + |> Ecto.Multi.run(:explain, fn _, _ -> + {prepared, prepared_params} = to_sql(operation, repo, queryable) + sql_call(adapter_meta, :explain_query, [prepared], prepared_params, opts) + end) + |> Ecto.Multi.run(:rollback, fn _, _ -> + {:error, :forced_rollback} + end) + |> repo.transaction(opts) + |> case do + {:error, :rollback, :forced_rollback, %{explain: result}} -> result + {:error, :explain, error, _} -> raise error + _ -> raise "unable to execute explain" + end + end + + def explain(%{repo: repo} = adapter_meta, operation, queryable, false, opts) do + {prepared, prepared_params} = to_sql(operation, repo, queryable) + sql_call(adapter_meta, :explain_query, [prepared], prepared_params, opts) + end + + @doc @disconnect_all_doc + @spec disconnect_all( + pid | Ecto.Repo.t() | Ecto.Adapter.adapter_meta(), + non_neg_integer, + opts :: Keyword.t() + ) :: :ok + def disconnect_all(repo, interval, opts \\ []) + + def disconnect_all(repo, interval, opts) when is_atom(repo) or is_pid(repo) do + disconnect_all(Ecto.Adapter.lookup_meta(repo), interval, opts) + end + + def disconnect_all(adapter_meta, interval, opts) do + case adapter_meta do + %{partition_supervisor: {name, count}} -> + 1..count + |> Enum.map(fn i -> + Task.async(fn -> + DBConnection.disconnect_all({:via, PartitionSupervisor, {name, i}}, interval, opts) + end) + end) + |> Task.await_many(:infinity) + + :ok + + %{pid: pool} -> + DBConnection.disconnect_all(pool, interval, opts) + end + end + + @doc """ + Returns a stream that runs a custom SQL query on given repo when reduced. + + In case of success it is a enumerable containing maps with at least two keys: + + * `:num_rows` - the number of rows affected + + * `:rows` - the result set as a list. `nil` may be returned + instead of the list if the command does not yield any row + as result (but still yields the number of affected rows, + like a `delete` command without returning would) + + In case of failure it raises an exception. + + If the adapter supports a collectable stream, the stream may also be used as + the collectable in `Enum.into/3`. Behaviour depends on the adapter. + + ## Options + + * `:log` - When false, does not log the query + * `:max_rows` - The number of rows to load from the database as we stream + + ## Examples + + iex> Ecto.Adapters.SQL.stream(MyRepo, "SELECT $1::integer + $2", [40, 2]) |> Enum.to_list() + [%{rows: [[42]], num_rows: 1}] + + """ + @spec stream(Ecto.Repo.t(), String.t(), query_params, Keyword.t()) :: Enum.t() + def stream(repo, sql, params \\ [], opts \\ []) do + repo + |> Ecto.Adapter.lookup_meta() + |> Ecto.Adapters.SQL.Stream.build(sql, params, opts) + end + + @doc @query_bang_doc + @spec query!( + pid() | Ecto.Repo.t() | Ecto.Adapter.adapter_meta(), + iodata, + query_params, + Keyword.t() + ) :: + query_result + def query!(repo, sql, params \\ [], opts \\ []) do + case query(repo, sql, params, opts) do + {:ok, result} -> result + {:error, err} -> raise_sql_call_error(err) + end + end + + @doc @query_doc + @spec query( + pid() | Ecto.Repo.t() | Ecto.Adapter.adapter_meta(), + iodata, + query_params, + Keyword.t() + ) :: + {:ok, query_result} | {:error, Exception.t()} + def query(repo, sql, params \\ [], opts \\ []) + + def query(repo, sql, params, opts) when is_atom(repo) or is_pid(repo) do + query(Ecto.Adapter.lookup_meta(repo), sql, params, opts) + end + + def query(adapter_meta, sql, params, opts) do + sql_call(adapter_meta, :query, [sql], params, opts) + end + + @doc @query_many_bang_doc + @spec query_many!( + Ecto.Repo.t() | Ecto.Adapter.adapter_meta(), + iodata, + query_params, + Keyword.t() + ) :: + [query_result] + def query_many!(repo, sql, params \\ [], opts \\ []) do + case query_many(repo, sql, params, opts) do + {:ok, result} -> result + {:error, err} -> raise_sql_call_error(err) + end + end + + @doc @query_many_doc + @spec query_many( + pid() | Ecto.Repo.t() | Ecto.Adapter.adapter_meta(), + iodata, + query_params, + Keyword.t() + ) :: {:ok, [query_result]} | {:error, Exception.t()} + def query_many(repo, sql, params \\ [], opts \\ []) + + def query_many(repo, sql, params, opts) when is_atom(repo) or is_pid(repo) do + query_many(Ecto.Adapter.lookup_meta(repo), sql, params, opts) + end + + def query_many(adapter_meta, sql, params, opts) do + sql_call(adapter_meta, :query_many, [sql], params, opts) + end + + defp sql_call(adapter_meta, callback, args, params, opts) do + %{ + pid: pool, + telemetry: telemetry, + sql: sql, + opts: default_opts, + log_stacktrace_mfa: log_stacktrace_mfa + } = adapter_meta + + conn = get_conn_or_pool(pool, adapter_meta) + opts = with_log(telemetry, log_stacktrace_mfa, params, opts ++ default_opts) + args = args ++ [params, opts] + apply(sql, callback, [conn | args]) + end + + defp put_source(opts, %{sources: sources}) when is_binary(elem(elem(sources, 0), 0)) do + {source, _, _} = elem(sources, 0) + [source: source] ++ opts + end + + defp put_source(opts, _) do + opts + end + + @doc """ + Checks if the given `table` exists. + + Returns `true` if the `table` exists in the `repo`, otherwise `false`. + The table is checked against the current database/schema in the connection. + """ + @spec table_exists?(Ecto.Repo.t(), table :: String.t(), opts :: Keyword.t()) :: boolean + def table_exists?(repo, table, opts \\ []) when is_atom(repo) do + %{sql: sql} = adapter_meta = Ecto.Adapter.lookup_meta(repo) + {query, params} = sql.table_exists_query(table) + query!(adapter_meta, query, params, opts).num_rows != 0 + end + + # Returns a formatted table for a given query `result`. + # + # ## Examples + # + # iex> Ecto.Adapters.SQL.format_table(query) |> IO.puts() + # +---------------+---------+--------+ + # | title | counter | public | + # +---------------+---------+--------+ + # | My Post Title | 1 | NULL | + # +---------------+---------+--------+ + @doc false + @spec format_table(%{ + :columns => [String.t()] | nil, + :rows => [term()] | nil, + optional(atom) => any() + }) :: String.t() + def format_table(result) + + def format_table(nil), do: "" + def format_table(%{columns: nil}), do: "" + def format_table(%{columns: []}), do: "" + + def format_table(%{columns: columns, rows: nil}), + do: format_table(%{columns: columns, rows: []}) + + def format_table(%{columns: columns, rows: rows}) do + column_widths = + [columns | rows] + |> Enum.zip() + |> Enum.map(&Tuple.to_list/1) + |> Enum.map(fn column_with_rows -> + column_with_rows |> Enum.map(&binary_length/1) |> Enum.max() + end) + + [ + separator(column_widths), + "\n", + cells(columns, column_widths), + "\n", + separator(column_widths), + "\n", + Enum.map(rows, &(cells(&1, column_widths) ++ ["\n"])), + separator(column_widths) + ] + |> IO.iodata_to_binary() + end + + # NULL + defp binary_length(nil), do: 4 + defp binary_length(binary) when is_binary(binary), do: String.length(binary) + defp binary_length(other), do: other |> inspect() |> String.length() + + defp separator(widths) do + Enum.map(widths, &[?+, ?-, String.duplicate("-", &1), ?-]) ++ [?+] + end + + defp cells(items, widths) do + cell = + [items, widths] + |> Enum.zip() + |> Enum.map(fn {item, width} -> [?|, " ", format_item(item, width), " "] end) + + [cell | [?|]] + end + + defp format_item(nil, width), do: String.pad_trailing("NULL", width) + defp format_item(item, width) when is_binary(item), do: String.pad_trailing(item, width) + + defp format_item(item, width) when is_number(item), + do: item |> inspect() |> String.pad_leading(width) + + defp format_item(item, width), do: item |> inspect() |> String.pad_trailing(width) + + ## Callbacks + + @doc false + def __before_compile__(_driver, _env) do + query_doc = @query_doc + query_bang_doc = @query_bang_doc + query_many_doc = @query_many_doc + query_many_bang_doc = @query_many_bang_doc + to_sql_doc = @to_sql_doc + explain_doc = @explain_doc + disconnect_all_doc = @disconnect_all_doc + + quote generated: true do + @doc unquote(query_doc) + @spec query(iodata(), Ecto.Adapters.SQL.query_params(), Keyword.t()) :: + {:ok, Ecto.Adapters.SQL.query_result()} | {:error, Exception.t()} + def query(sql, params \\ [], opts \\ []) do + Ecto.Adapters.SQL.query(get_dynamic_repo(), sql, params, opts) + end + + @doc unquote(query_bang_doc) + @spec query!(iodata(), Ecto.Adapters.SQL.query_params(), Keyword.t()) :: + Ecto.Adapters.SQL.query_result() + def query!(sql, params \\ [], opts \\ []) do + Ecto.Adapters.SQL.query!(get_dynamic_repo(), sql, params, opts) + end + + @doc unquote(query_many_doc) + @spec query_many(iodata, Ecto.Adapters.SQL.query_params(), Keyword.t()) :: + {:ok, [Ecto.Adapters.SQL.query_result()]} | {:error, Exception.t()} + def query_many(sql, params \\ [], opts \\ []) do + Ecto.Adapters.SQL.query_many(get_dynamic_repo(), sql, params, opts) + end + + @doc unquote(query_many_bang_doc) + @spec query_many!(iodata, Ecto.Adapters.SQL.query_params(), Keyword.t()) :: + [Ecto.Adapters.SQL.query_result()] + def query_many!(sql, params \\ [], opts \\ []) do + Ecto.Adapters.SQL.query_many!(get_dynamic_repo(), sql, params, opts) + end + + @doc unquote(to_sql_doc) + @spec to_sql(:all | :update_all | :delete_all, Ecto.Queryable.t()) :: + {String.t(), Ecto.Adapters.SQL.query_params()} + def to_sql(operation, queryable) do + Ecto.Adapters.SQL.to_sql(operation, get_dynamic_repo(), queryable) + end + + @doc unquote(explain_doc) + @spec explain(:all | :update_all | :delete_all, Ecto.Queryable.t(), opts :: Keyword.t()) :: + String.t() | Exception.t() | list(map) + def explain(operation, queryable, opts \\ []) do + Ecto.Adapters.SQL.explain(get_dynamic_repo(), operation, queryable, opts) + end + + @doc unquote(disconnect_all_doc) + @spec disconnect_all(non_neg_integer, opts :: Keyword.t()) :: :ok + def disconnect_all(interval, opts \\ []) do + Ecto.Adapters.SQL.disconnect_all(get_dynamic_repo(), interval, opts) + end + end + end + + @doc false + def ensure_all_started(driver, _config, type) do + Application.ensure_all_started(driver, type) + end + + @pool_opts [:timeout, :pool, :pool_size] ++ + [:queue_target, :queue_interval, :ownership_timeout, :repo] + + @valid_log_levels ~w(false debug info notice warning error critical alert emergency)a + + @doc false + def init(connection, driver, config) do + unless Code.ensure_loaded?(connection) do + raise """ + could not find #{inspect(connection)}. + + Please verify you have added #{inspect(driver)} as a dependency: + + {#{inspect(driver)}, ">= 0.0.0"} + + And remember to recompile Ecto afterwards by cleaning the current build: + + mix deps.clean --build ecto + """ + end + + log = Keyword.get(config, :log, :debug) + + log_stacktrace_mfa = + Keyword.get(config, :log_stacktrace_mfa, {__MODULE__, :first_non_ecto_stacktrace, [1]}) + + if log not in @valid_log_levels do + raise """ + invalid value for :log option in Repo config + + The accepted values for the :log option are: + #{Enum.map_join(@valid_log_levels, ", ", &inspect/1)} + + See https://hexdocs.pm/ecto/Ecto.Repo.html for more information. + """ + end + + stacktrace = Keyword.get(config, :stacktrace) + telemetry_prefix = Keyword.fetch!(config, :telemetry_prefix) + telemetry = {config[:repo], log, telemetry_prefix ++ [:query]} + + {name, config} = Keyword.pop(config, :name, config[:repo]) + {pool_count, config} = Keyword.pop(config, :pool_count, 1) + {pool, config} = pool_config(config) + child_spec = connection.child_spec(config) + + meta = %{ + telemetry: telemetry, + sql: connection, + stacktrace: stacktrace, + log_stacktrace_mfa: log_stacktrace_mfa, + opts: Keyword.take(config, @pool_opts) + } + + if pool_count > 1 do + if name == nil do + raise ArgumentError, "the option :pool_count requires a :name" + end + + if pool == DBConnection.Ownership do + raise ArgumentError, "the option :pool_count does not work with the SQL sandbox" + end + + name = Module.concat(name, PartitionSupervisor) + partition_opts = [name: name, child_spec: child_spec, partitions: pool_count] + child_spec = Supervisor.child_spec({PartitionSupervisor, partition_opts}, []) + {:ok, child_spec, Map.put(meta, :partition_supervisor, {name, pool_count})} + else + {:ok, child_spec, meta} + end + end + + defp pool_config(config) do + {pool, config} = Keyword.pop(config, :pool, DBConnection.ConnectionPool) + + pool = + if Code.ensure_loaded?(pool) && function_exported?(pool, :unboxed_run, 2) do + DBConnection.Ownership + else + pool + end + + {pool, [pool: pool] ++ config} + end + + @doc false + def checkout(adapter_meta, opts, callback) do + checkout_or_transaction(:run, adapter_meta, opts, callback) + end + + @doc false + def checked_out?(adapter_meta) do + %{pid: pool} = adapter_meta + get_conn(pool) != nil + end + + ## Query + + @doc false + def insert_all( + adapter_meta, + schema_meta, + conn, + header, + rows, + on_conflict, + returning, + placeholders, + opts + ) do + %{source: source, prefix: prefix} = schema_meta + {_, conflict_params, _} = on_conflict + + {rows, params} = + case rows do + {%Ecto.Query{} = query, params} -> {query, Enum.reverse(params)} + rows -> unzip_inserts(header, rows) + end + + sql = conn.insert(prefix, source, header, rows, on_conflict, returning, placeholders) + + opts = + if is_nil(Keyword.get(opts, :cache_statement)) do + [{:cache_statement, "ecto_insert_all_#{source}"} | opts] + else + opts + end + + all_params = placeholders ++ Enum.reverse(params, conflict_params) + + %{num_rows: num, rows: rows} = query!(adapter_meta, sql, all_params, [source: source] ++ opts) + {num, rows} + end + + defp unzip_inserts(header, rows) do + Enum.map_reduce(rows, [], fn fields, params -> + Enum.map_reduce(header, params, fn key, acc -> + case :lists.keyfind(key, 1, fields) do + {^key, {%Ecto.Query{} = query, query_params}} -> + {{query, length(query_params)}, Enum.reverse(query_params, acc)} + + {^key, {:placeholder, placeholder_index}} -> + {{:placeholder, Integer.to_string(placeholder_index)}, acc} + + {^key, value} -> + {key, [value | acc]} + + false -> + {nil, acc} + end + end) + end) + end + + @doc false + def execute(prepare, adapter_meta, query_meta, prepared, params, opts) do + %{num_rows: num, rows: rows} = + execute!(prepare, adapter_meta, prepared, params, put_source(opts, query_meta)) + + {num, rows} + end + + defp execute!(prepare, adapter_meta, {:cache, update, {id, prepared}}, params, opts) do + name = prepare_name(prepare, id) + + case sql_call(adapter_meta, :prepare_execute, [name, prepared], params, opts) do + {:ok, query, result} -> + maybe_update_cache(prepare, update, {id, query}) + result + + {:error, err} -> + raise_sql_call_error(err) + end + end + + defp execute!( + :unnamed = prepare, + adapter_meta, + {:cached, _update, _reset, {id, cached}}, + params, + opts + ) do + name = prepare_name(prepare, id) + prepared = String.Chars.to_string(cached) + + case sql_call(adapter_meta, :prepare_execute, [name, prepared], params, opts) do + {:ok, _query, result} -> + result + + {:error, err} -> + raise_sql_call_error(err) + end + end + + defp execute!( + :named = _prepare, + adapter_meta, + {:cached, update, reset, {id, cached}}, + params, + opts + ) do + case sql_call(adapter_meta, :execute, [cached], params, opts) do + {:ok, query, result} -> + update.({id, query}) + result + + {:ok, result} -> + result + + {:error, err} -> + raise_sql_call_error(err) + + {:reset, err} -> + reset.({id, String.Chars.to_string(cached)}) + raise_sql_call_error(err) + end + end + + defp execute!(_prepare, adapter_meta, {:nocache, {_id, prepared}}, params, opts) do + case sql_call(adapter_meta, :query, [prepared], params, opts) do + {:ok, res} -> res + {:error, err} -> raise_sql_call_error(err) + end + end + + defp prepare_name(:named, id), do: "ecto_" <> Integer.to_string(id) + defp prepare_name(:unnamed, _id), do: "" + + defp maybe_update_cache(:named = _prepare, update, value), do: update.(value) + defp maybe_update_cache(:unnamed = _prepare, _update, _value), do: :noop + + @doc false + def stream(adapter_meta, query_meta, prepared, params, opts) do + do_stream(adapter_meta, prepared, params, put_source(opts, query_meta)) + end + + defp do_stream(adapter_meta, {:cache, _, {_, prepared}}, params, opts) do + prepare_stream(adapter_meta, prepared, params, opts) + end + + defp do_stream(adapter_meta, {:cached, _, _, {_, cached}}, params, opts) do + prepare_stream(adapter_meta, String.Chars.to_string(cached), params, opts) + end + + defp do_stream(adapter_meta, {:nocache, {_id, prepared}}, params, opts) do + prepare_stream(adapter_meta, prepared, params, opts) + end + + defp prepare_stream(adapter_meta, prepared, params, opts) do + adapter_meta + |> Ecto.Adapters.SQL.Stream.build(prepared, params, opts) + |> Stream.map(fn %{num_rows: nrows, rows: rows} -> {nrows, rows} end) + end + + defp raise_sql_call_error(%DBConnection.OwnershipError{} = err) do + message = err.message <> "\nSee Ecto.Adapters.SQL.Sandbox docs for more information." + raise %{err | message: message} + end + + defp raise_sql_call_error(err), do: raise(err) + + @doc false + def reduce(adapter_meta, statement, params, opts, acc, fun) do + %{ + pid: pool, + telemetry: telemetry, + sql: sql, + log_stacktrace_mfa: log_stacktrace_mfa, + opts: default_opts + } = adapter_meta + + opts = with_log(telemetry, log_stacktrace_mfa, params, opts ++ default_opts) + + case get_conn(pool) do + %DBConnection{conn_mode: :transaction} = conn -> + sql + |> apply(:stream, [conn, statement, params, opts]) + |> Enumerable.reduce(acc, fun) + + _ -> + raise "cannot reduce stream outside of transaction" + end + end + + @doc false + def into(adapter_meta, statement, params, opts) do + %{ + pid: pool, + telemetry: telemetry, + sql: sql, + opts: default_opts, + log_stacktrace_mfa: log_stacktrace_mfa + } = adapter_meta + + opts = with_log(telemetry, log_stacktrace_mfa, params, opts ++ default_opts) + + case get_conn(pool) do + %DBConnection{conn_mode: :transaction} = conn -> + sql + |> apply(:stream, [conn, statement, params, opts]) + |> Collectable.into() + + _ -> + raise "cannot collect into stream outside of transaction" + end + end + + @doc false + def struct( + adapter_meta, + conn, + sql, + operation, + source, + params, + values, + on_conflict, + returning, + opts + ) do + opts = + if is_nil(Keyword.get(opts, :cache_statement)) do + [{:cache_statement, "ecto_#{operation}_#{source}_#{length(params)}"} | opts] + else + opts + end + + case query(adapter_meta, sql, values, [source: source] ++ opts) do + {:ok, %{rows: nil, num_rows: 1}} -> + {:ok, []} + + {:ok, %{rows: [values], num_rows: 1}} -> + {:ok, Enum.zip(returning, values)} + + {:ok, %{num_rows: 0}} -> + if on_conflict == :nothing, do: {:ok, []}, else: {:error, :stale} + + {:ok, %{num_rows: num_rows}} when num_rows > 1 -> + raise Ecto.MultiplePrimaryKeyError, + source: source, + params: params, + count: num_rows, + operation: operation + + {:error, err} -> + case conn.to_constraints(err, source: source) do + [] -> raise_sql_call_error(err) + constraints -> {:invalid, constraints} + end + end + end + + ## Transactions + + @doc false + def transaction(adapter_meta, opts, callback) do + checkout_or_transaction(:transaction, adapter_meta, opts, callback) + end + + @doc false + def in_transaction?(%{pid: pool}) do + match?(%DBConnection{conn_mode: :transaction}, get_conn(pool)) + end + + @doc false + def rollback(%{pid: pool}, value) do + case get_conn(pool) do + %DBConnection{conn_mode: :transaction} = conn -> DBConnection.rollback(conn, value) + _ -> raise "cannot call rollback outside of transaction" + end + end + + ## Migrations + + @doc false + def execute_ddl(meta, conn, definition, opts) do + ddl_logs = + definition + |> conn.execute_ddl() + |> List.wrap() + |> Enum.map(&query!(meta, &1, [], opts)) + |> Enum.flat_map(&conn.ddl_logs/1) + + {:ok, ddl_logs} + end + + @doc false + def raise_migration_pool_size_error do + raise Ecto.MigrationError, """ + Migrations failed to run because the connection pool size is less than 2. + + Ecto requires a pool size of at least 2 to support concurrent migrators. + When migrations run, Ecto uses one connection to maintain a lock and + another to run migrations. + + If you are running migrations with Mix, you can increase the number + of connections via the pool size option: + + mix ecto.migrate --pool-size 2 + + If you are running the Ecto.Migrator programmatically, you can configure + the pool size via your application config: + + config :my_app, Repo, + ..., + pool_size: 2 # at least + """ + end + + ## Log + + defp with_log(telemetry, log_stacktrace_mfa, params, opts) do + [log: &log(telemetry, log_stacktrace_mfa, params, &1, opts)] ++ opts + end + + defp log({repo, log, event_name}, log_stacktrace_mfa, params, entry, opts) do + %{ + connection_time: query_time, + decode_time: decode_time, + pool_time: queue_time, + idle_time: idle_time, + result: result, + query: query + } = entry + + source = Keyword.get(opts, :source) + query = String.Chars.to_string(query) + result = with {:ok, _query, res} <- result, do: {:ok, res} + stacktrace = Keyword.get(opts, :stacktrace) + log_params = opts[:cast_params] || params + + acc = if idle_time, do: [idle_time: idle_time], else: [] + + measurements = + log_measurements( + [query_time: query_time, decode_time: decode_time, queue_time: queue_time], + 0, + acc + ) + + metadata = %{ + type: :ecto_sql_query, + repo: repo, + result: result, + params: params, + cast_params: opts[:cast_params], + query: query, + source: source, + stacktrace: stacktrace, + options: Keyword.get(opts, :telemetry_options, []) + } + + if event_name = Keyword.get(opts, :telemetry_event, event_name) do + :telemetry.execute(event_name, measurements, metadata) + end + + case {opts[:log], log} do + {false, _level} -> + :ok + + {opts_level, false} when opts_level in [nil, true] -> + :ok + + {true, level} -> + Logger.log( + level, + fn -> + log_iodata( + measurements, + repo, + source, + query, + log_params, + result, + stacktrace, + opts[:log_stacktrace_mfa] || log_stacktrace_mfa + ) + end, + ansi_color: sql_color(query) + ) + + {opts_level, args_level} -> + Logger.log( + opts_level || args_level, + fn -> + log_iodata( + measurements, + repo, + source, + query, + log_params, + result, + stacktrace, + opts[:log_stacktrace_mfa] || log_stacktrace_mfa + ) + end, + ansi_color: sql_color(query) + ) + end + + :ok + end + + defp log_measurements([{_, nil} | rest], total, acc), + do: log_measurements(rest, total, acc) + + defp log_measurements([{key, value} | rest], total, acc), + do: log_measurements(rest, total + value, [{key, value} | acc]) + + defp log_measurements([], total, acc), + do: Map.new([total_time: total] ++ acc) + + defp log_iodata(measurements, repo, source, query, params, result, stacktrace, stacktrace_mfa) do + [ + "QUERY", + ?\s, + log_ok_error(result), + log_ok_source(source), + log_time("db", measurements, :query_time, true), + log_time("decode", measurements, :decode_time, false), + log_time("queue", measurements, :queue_time, false), + log_time("idle", measurements, :idle_time, true), + ?\n, + query, + ?\s, + inspect(params, charlists: false), + log_stacktrace(stacktrace, repo, stacktrace_mfa) + ] + end + + defp log_ok_error({:ok, _res}), do: "OK" + defp log_ok_error({:error, _err}), do: "ERROR" + + defp log_ok_source(nil), do: "" + defp log_ok_source(source), do: " source=#{inspect(source)}" + + defp log_time(label, measurements, key, force) do + case measurements do + %{^key => time} -> + us = System.convert_time_unit(time, :native, :microsecond) + ms = div(us, 100) / 10 + + if force or ms > 0 do + [?\s, label, ?=, :io_lib_format.fwrite_g(ms), ?m, ?s] + else + [] + end + + %{} -> + [] + end + end + + defp log_stacktrace([_ | _] = stacktrace, repo, {module, function, args}) do + entries = apply(module, function, [stacktrace, %{repo: repo} | args]) + + Enum.with_index(entries, fn {module, function, arity, info}, idx -> + [ + ?\n, + IO.ANSI.light_black(), + if(idx == 0, do: "↳ ", else: " "), + Exception.format_mfa(module, function, arity), + log_stacktrace_info(info), + IO.ANSI.reset() + ] + end) + end + + defp log_stacktrace(_, _, _), do: [] + + defp log_stacktrace_info([file: file, line: line] ++ _) do + [", at: ", file, ?:, Integer.to_string(line)] + end + + defp log_stacktrace_info(_) do + [] + end + + @repo_modules [Ecto.Repo.Queryable, Ecto.Repo.Schema, Ecto.Repo.Transaction] + + @doc """ + Receives a stacktrace, and return the first N items before Ecto entries + + This function is used by default in the `:log_stacktrace_mfa` config, with + a size of 1. + """ + @spec first_non_ecto_stacktrace( + Exception.stacktrace(), + %{repo: Ecto.Repo.t()}, + non_neg_integer() + ) :: Exception.stacktrace() + def first_non_ecto_stacktrace(stacktrace, %{repo: repo}, size) do + stacktrace + |> Enum.reverse() + |> last_non_ecto_entries(repo, []) + |> Enum.take(size) + end + + defp last_non_ecto_entries([{mod, _, _, _} | _], repo, acc) + when mod == repo or mod in @repo_modules, + do: acc + + defp last_non_ecto_entries([entry | rest], repo, acc), + do: last_non_ecto_entries(rest, repo, [entry | acc]) + + defp last_non_ecto_entries([], _, acc), do: acc + + ## Connection helpers + + defp checkout_or_transaction(fun, adapter_meta, opts, callback) do + %{pid: pool, telemetry: telemetry, opts: default_opts, log_stacktrace_mfa: log_stacktrace_mfa} = + adapter_meta + + opts = with_log(telemetry, log_stacktrace_mfa, [], opts ++ default_opts) + + callback = fn conn -> + previous_conn = put_conn(pool, conn) + + try do + callback.() + after + reset_conn(pool, previous_conn) + end + end + + apply(DBConnection, fun, [get_conn_or_pool(pool, adapter_meta), callback, opts]) + end + + defp get_conn_or_pool(pool, adapter_meta) do + case :erlang.get(key(pool)) do + :undefined -> + case adapter_meta do + %{partition_supervisor: {name, _}} -> {:via, PartitionSupervisor, {name, self()}} + _ -> pool + end + + conn -> + conn + end + end + + defp get_conn(pool) do + Process.get(key(pool)) + end + + defp put_conn(pool, conn) do + Process.put(key(pool), conn) + end + + defp reset_conn(pool, conn) do + if conn do + put_conn(pool, conn) + else + Process.delete(key(pool)) + end + end + + defp key(pool), do: {__MODULE__, pool} + + defp sql_color("SELECT" <> _), do: :cyan + defp sql_color("ROLLBACK" <> _), do: :red + defp sql_color("LOCK" <> _), do: :white + defp sql_color("INSERT" <> _), do: :green + defp sql_color("UPDATE" <> _), do: :yellow + defp sql_color("DELETE" <> _), do: :red + defp sql_color("begin" <> _), do: :magenta + defp sql_color("commit" <> _), do: :magenta + defp sql_color(_), do: nil +end diff --git a/deps/ecto_sql/lib/ecto/adapters/sql/application.ex b/deps/ecto_sql/lib/ecto/adapters/sql/application.ex new file mode 100644 index 0000000..88712fd --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/sql/application.ex @@ -0,0 +1,14 @@ +defmodule Ecto.Adapters.SQL.Application do + @moduledoc false + use Application + + def start(_type, _args) do + children = [ + {DynamicSupervisor, strategy: :one_for_one, name: Ecto.MigratorSupervisor}, + {Task.Supervisor, name: Ecto.Adapters.SQL.StorageSupervisor} + ] + + opts = [strategy: :one_for_one, name: Ecto.Adapters.SQL.Supervisor] + Supervisor.start_link(children, opts) + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/sql/connection.ex b/deps/ecto_sql/lib/ecto/adapters/sql/connection.ex new file mode 100644 index 0000000..0b31175 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/sql/connection.ex @@ -0,0 +1,154 @@ +defmodule Ecto.Adapters.SQL.Connection do + @moduledoc """ + Specifies the behaviour to be implemented by all SQL connections. + """ + + @typedoc "The query name" + @type name :: String.t() + + @typedoc "The SQL statement" + @type statement :: String.t() + + @typedoc "The cached query which is a DBConnection Query" + @type cached :: map + + @type connection :: DBConnection.conn() + @type params :: [term] + + @doc """ + Receives options and returns `DBConnection` supervisor child + specification. + """ + @callback child_spec(options :: Keyword.t()) :: :supervisor.child_spec() | {module, Keyword.t()} + + @doc """ + Prepares and executes the given query with `DBConnection`. + """ + @callback prepare_execute(connection, name, statement, params, options :: Keyword.t()) :: + {:ok, cached, term} | {:error, Exception.t()} + + @doc """ + Executes a cached query. + """ + @callback execute(connection, cached, params, options :: Keyword.t()) :: + {:ok, cached, term} | {:ok, term} | {:error | :reset, Exception.t()} + + @doc """ + Runs the given statement as a query. + """ + @callback query(connection, statement, params, options :: Keyword.t()) :: + {:ok, term} | {:error, Exception.t()} + + @doc """ + Runs the given statement as a multi-result query. + """ + @callback query_many(connection, statement, params, options :: Keyword.t()) :: + {:ok, term} | {:error, Exception.t()} + + @doc """ + Returns a stream that prepares and executes the given query with + `DBConnection`. + """ + @callback stream(connection, statement, params, options :: Keyword.t()) :: + Enum.t() + + @doc """ + Receives the exception returned by `c:query/4`. + + The constraints are in the keyword list and must return the + constraint type, like `:unique`, and the constraint name as + a string, for example: + + [unique: "posts_title_index"] + + Must return an empty list if the error does not come + from any constraint. + """ + @callback to_constraints(exception :: Exception.t(), options :: Keyword.t()) :: Keyword.t() + + ## Queries + + @doc """ + Receives a query and must return a SELECT query. + """ + @callback all(query :: Ecto.Query.t()) :: iodata + + @doc """ + Receives a query and values to update and must return an UPDATE query. + """ + @callback update_all(query :: Ecto.Query.t()) :: iodata + + @doc """ + Receives a query and must return a DELETE query. + """ + @callback delete_all(query :: Ecto.Query.t()) :: iodata + + @doc """ + Returns an INSERT for the given `rows` in `table` returning + the given `returning`. + """ + @callback insert( + prefix :: String.t(), + table :: String.t(), + header :: [atom], + rows :: [[atom | nil]], + on_conflict :: Ecto.Adapter.Schema.on_conflict(), + returning :: [atom], + placeholders :: [term] + ) :: iodata + + @doc """ + Returns an UPDATE for the given `fields` in `table` filtered by + `filters` returning the given `returning`. + """ + @callback update( + prefix :: String.t(), + table :: String.t(), + fields :: [atom], + filters :: [atom], + returning :: [atom] + ) :: iodata + + @doc """ + Returns a DELETE for the `filters` returning the given `returning`. + """ + @callback delete( + prefix :: String.t(), + table :: String.t(), + filters :: [atom], + returning :: [atom] + ) :: iodata + + @doc """ + Executes an EXPLAIN query or similar depending on the adapter to obtains statistics of the given query. + + Receives the `connection`, `query`, `params` for the query, + and all `opts` including those related to the EXPLAIN statement and shared opts. + + Must execute the explain query and return the result. + """ + @callback explain_query( + connection, + query :: String.t(), + params :: Keyword.t(), + opts :: Keyword.t() + ) :: + {:ok, term} | {:error, Exception.t()} + + ## DDL + + @doc """ + Receives a DDL command and returns a query that executes it. + """ + @callback execute_ddl(command :: Ecto.Adapter.Migration.command()) :: String.t() | [iodata] + + @doc """ + Receives a query result and returns a list of logs. + """ + @callback ddl_logs(result :: term) :: [{Logger.level(), Logger.message(), Logger.metadata()}] + + @doc """ + Returns a queryable to check if the given `table` exists. + """ + @callback table_exists_query(table :: String.t()) :: {iodata, [term]} +end diff --git a/deps/ecto_sql/lib/ecto/adapters/sql/sandbox.ex b/deps/ecto_sql/lib/ecto/adapters/sql/sandbox.ex new file mode 100644 index 0000000..59caf4a --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/sql/sandbox.ex @@ -0,0 +1,705 @@ +defmodule Ecto.Adapters.SQL.Sandbox do + @moduledoc ~S""" + A pool for concurrent transactional tests. + + The sandbox pool is implemented on top of an ownership mechanism. + When started, the pool is in automatic mode, which means the + repository will automatically check connections out as with any + other pool. + + The `mode/2` function can be used to change the pool mode from + automatic to either manual or shared. In the latter two modes, + the connection must be explicitly checked out before use. + When explicit checkouts are made, the sandbox will wrap the + connection in a transaction by default and control who has + access to it. This means developers have a safe mechanism for + running concurrent tests against the database. + + ## Database support + + While both PostgreSQL and MySQL support SQL Sandbox, only PostgreSQL + supports concurrent tests while running the SQL Sandbox. Therefore, do + not run concurrent tests with MySQL as you may run into deadlocks due to + its transaction implementation. + + ## Example + + The first step is to configure your database to use the + `Ecto.Adapters.SQL.Sandbox` pool. You set those options in your + `config/config.exs` (or preferably `config/test.exs`) if you + haven't yet: + + config :my_app, Repo, + pool: Ecto.Adapters.SQL.Sandbox + + Now with the test database properly configured, you can write + transactional tests: + + # At the end of your test_helper.exs + # Set the pool mode to manual for explicit checkouts + Ecto.Adapters.SQL.Sandbox.mode(Repo, :manual) + + defmodule PostTest do + # Once the mode is manual, tests can also be async + use ExUnit.Case, async: true + + setup do + # Explicitly get a connection before each test + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(Repo) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + :ok + end + + test "create post" do + # Use the repository as usual + assert %Post{} = Repo.insert!(%Post{}) + end + end + + ## Collaborating processes + + The example above is straight-forward because we have only + a single process using the database connection. However, + sometimes a test may need to interact with multiple processes, + all using the same connection so they all belong to the same + transaction. + + Before we discuss solutions, let's see what happens if we try + to use a connection from a new process without explicitly + checking it out first: + + setup do + # Explicitly get a connection before each test + :ok = Ecto.Adapters.SQL.Sandbox.checkout(Repo) + end + + test "calls worker that runs a query" do + GenServer.call(MyApp.Worker, :run_query) + end + + The test above will fail with an error similar to: + + ** (DBConnection.OwnershipError) cannot find ownership process for #PID<0.35.0> + + That's because the `setup` block is checking out the connection only + for the test process. Once the worker attempts to perform a query, + there is no connection assigned to it and it will fail. + + The sandbox module provides two ways of doing so, via allowances or + by running in shared mode. + + ### Allowances + + The idea behind allowances is that you can explicitly tell a process + which checked out connection it should use, allowing multiple processes + to collaborate over the same connection. Let's give it a try: + + test "calls worker that runs a query" do + allow = Process.whereis(MyApp.Worker) + Ecto.Adapters.SQL.Sandbox.allow(Repo, self(), allow) + GenServer.call(MyApp.Worker, :run_query) + end + + And that's it, by calling `allow/3`, we are explicitly assigning + the parent's connection (i.e. the test process' connection) to + the task. + + Besides calling `allow/3` allowance can also be provided to processes + via [Caller Tracking](`m:Task#module-ancestor-and-caller-tracking`). + + Because allowances use an explicit mechanism, their advantage + is that you can still run your tests in async mode. The downside + is that you need to explicitly control and allow every single + process. This is not always possible. In such cases, you will + want to use shared mode. + + ### Shared mode + + Shared mode allows a process to share its connection with any other + process automatically, without relying on explicit allowances. + Let's change the example above to use shared mode: + + setup do + # Explicitly get a connection before each test + :ok = Ecto.Adapters.SQL.Sandbox.checkout(Repo) + # Setting the shared mode must be done only after checkout + Ecto.Adapters.SQL.Sandbox.mode(Repo, {:shared, self()}) + end + + test "calls worker that runs a query" do + GenServer.call(MyApp.Worker, :run_query) + end + + By calling `mode({:shared, self()})`, any process that needs + to talk to the database will now use the same connection as the + one checked out by the test process during the `setup` block. + + Make sure to always check a connection out before setting the mode + to `{:shared, self()}`. + + The advantage of shared mode is that by calling a single function, + you will ensure all upcoming processes and operations will use that + shared connection, without a need to explicitly allow them. The + downside is that tests can no longer run concurrently in shared mode. + + Also, beware that if the test process terminates while the worker is + using the connection, the connection will be taken away from the worker, + which will error. Therefore it is important to guarantee the work is done + before the test concludes. In the example above, we are using a `call`, + which is synchronous, avoiding the problem, but you may need to explicitly + flush the worker or terminate it under such scenarios in your tests. + + ### Summing up + + There are two mechanisms for explicit ownerships: + + * Using allowances - requires explicit allowances. + Tests may run concurrently. + + * Using shared mode - does not require explicit allowances. + Tests cannot run concurrently. + + ## FAQ + + When running the sandbox mode concurrently, developers may run into + issues we explore in the upcoming sections. + + ### "owner exited" + + In some situations, you may see error reports similar to the one below: + + 23:59:59.999 [error] Postgrex.Protocol (#PID<>) disconnected: + ** (DBConnection.Error) owner #PID<> exited + Client #PID<> is still using a connection from owner + + Such errors are usually followed by another error report from another + process that failed while executing a database query. + + To understand the failure, we need to answer the question: who are the + owner and client processes? The owner process is the one that checks + out the connection, which, in the majority of cases, is the test process, + the one running your tests. In other words, the error happens because + the test process has finished, either because the test succeeded or + because it failed, while the client process was trying to get information + from the database. Since the owner process, the one that owns the + connection, no longer exists, Ecto will check the connection back in + and notify the client process using the connection that the connection + owner is no longer available. + + This can happen in different situations. For example, imagine you query + a GenServer in your test that is using a database connection: + + test "gets results from GenServer" do + {:ok, pid} = MyAppServer.start_link() + Ecto.Adapters.SQL.Sandbox.allow(Repo, self(), pid) + assert MyAppServer.get_my_data_fast(timeout: 1000) == [...] + end + + In the test above, we spawn the server and allow it to perform database + queries using the connection owned by the test process. Since we gave + a timeout of 1 second, in case the database takes longer than one second + to reply, the test process will fail, due to the timeout, making the + "owner down" message to be printed because the server process is still + waiting on a connection reply. + + In some situations, such failures may be intermittent. Imagine that you + allow a process that queries the database every half second: + + test "queries periodically" do + {:ok, pid} = PeriodicServer.start_link() + Ecto.Adapters.SQL.Sandbox.allow(Repo, self(), pid) + # assertions + end + + Because the server is querying the database from time to time, there is + a chance that, when the test exits, the periodic process may be querying + the database, regardless of test success or failure. + + To address this, you can tell ExUnit to manage your processes: + + test "queries periodically" do + pid = start_supervised!(PeriodicServer) + Ecto.Adapters.SQL.Sandbox.allow(Repo, self(), pid) + # assertions + end + + By using `start_supervised!/1`, ExUnit guarantees the process finishes + before your test (the connection owner). + + In some situations, however, the dynamic processes are directly started + inside a `DynamicSupervisor` or a `Task.Supervisor`. You can guarantee + proper termination in such scenarios by adding an `on_exit` callback + that waits until all supervised children terminate: + + on_exit(fn -> + for {_, pid, _, _} <- DynamicSupervisor.which_children(MyApp.DynamicSupervisor) do + ref = Process.monitor(pid) + assert_receive {:DOWN, ^ref, _, _, _}, :infinity + end + end) + + ### "owner timed out because it owned the connection for longer than Nms" + + In some situations, you may see error reports similar to the one below: + + 09:56:43.081 [error] Postgrex.Protocol (#PID<>) disconnected: + ** (DBConnection.ConnectionError) owner #PID<> timed out + because it owned the connection for longer than 120000ms + + If you have a long running test (or you're debugging with IEx.pry), + the timeout for the connection ownership may be too short. You can + increase the timeout by setting the `:ownership_timeout` options for + your repo config in `config/config.exs` (or preferably in `config/test.exs`): + + config :my_app, MyApp.Repo, + ownership_timeout: NEW_TIMEOUT_IN_MILLISECONDS + + The `:ownership_timeout` option is part of `DBConnection.Ownership` + and defaults to 120000ms. Timeouts are given as integers in milliseconds. + + Alternately, if this is an issue for only a handful of long-running tests, + you can pass an `:ownership_timeout` option when calling + `Ecto.Adapters.SQL.Sandbox.checkout/2` instead of setting a longer timeout + globally in your config. + + ### Deferred constraints + + Some databases allow to defer constraint validation to the transaction + commit time, instead of the particular statement execution time. This + feature, for instance, allows for a cyclic foreign key referencing. + Since the SQL Sandbox mode rolls back transactions, tests might report + false positives because deferred constraints are never checked by the + database. To manually force deferred constraints validation when using + PostgreSQL use the following line right at the end of your test case: + + Repo.query!("SET CONSTRAINTS ALL IMMEDIATE") + + ### Database locks and deadlocks + + Since the sandbox relies on concurrent transactional tests, there is + a chance your tests may trigger deadlocks in your database. This is + specially true with MySQL, where the solutions presented here are not + enough to avoid deadlocks and therefore making the use of concurrent tests + with MySQL prohibited. + + However, even on databases like PostgreSQL, performance degradations or + deadlocks may still occur. For example, imagine a "users" table with a + unique index on the "email" column. Now consider multiple tests are + trying to insert the same user email to the database. They will attempt + to retrieve the same database lock, causing only one test to succeed and + run while all other tests wait for the lock. + + In other situations, two different tests may proceed in a way that + each test retrieves locks desired by the other, leading to a situation + that cannot be resolved, a deadlock. For instance: + + ```text + Transaction 1: Transaction 2: + begin + begin + update posts where id = 1 + update posts where id = 2 + update posts where id = 1 + update posts where id = 2 + **deadlock** + ``` + + There are different ways to avoid such problems. One of them is + to make sure your tests work on distinct data. Regardless of + your choice between using fixtures or factories for test data, + make sure you get a new set of data per test. This is specially + important for data that is meant to be unique like user emails. + + For example, instead of: + + def insert_user do + Repo.insert!(%User{email: "sample@example.com"}) + end + + prefer: + + def insert_user do + Repo.insert!(%User{email: "sample-#{counter()}@example.com"}) + end + + defp counter do + System.unique_integer([:positive]) + end + + In fact, avoiding unique emails like above can also have a positive + impact on the test suite performance, as it reduces contention and + wait between concurrent tests. We have heard reports where using + dynamic values for uniquely indexed columns, as we did for email + above, made a test suite run between 2x to 3x faster. + + Deadlocks may happen in other circumstances. If you believe you + are hitting a scenario that has not been described here, please + report an issue so we can improve our examples. As a last resort, + you can always disable the test triggering the deadlock from + running asynchronously by setting "async: false". + """ + + defmodule Connection do + @moduledoc false + if Code.ensure_loaded?(DBConnection) do + @behaviour DBConnection + end + + def connect(_opts) do + raise "should never be invoked" + end + + def disconnect(err, {conn_mod, state, _in_transaction?}) do + conn_mod.disconnect(err, state) + end + + def checkout(state), do: proxy(:checkout, state, []) + def checkin(state), do: proxy(:checkin, state, []) + def ping(state), do: proxy(:ping, state, []) + + def handle_begin(opts, {conn_mod, state, false}) do + opts = [mode: :savepoint] ++ opts + + case conn_mod.handle_begin(opts, state) do + {:ok, value, state} -> + {:ok, value, {conn_mod, state, true}} + + {kind, err, state} -> + {kind, err, {conn_mod, state, false}} + end + end + + def handle_commit(opts, {conn_mod, state, true}) do + opts = [mode: :savepoint] ++ opts + proxy(:handle_commit, {conn_mod, state, false}, [opts]) + end + + def handle_rollback(opts, {conn_mod, state, _}) do + opts = [mode: :savepoint] ++ opts + proxy(:handle_rollback, {conn_mod, state, false}, [opts]) + end + + def handle_status(opts, state), + do: proxy(:handle_status, state, [maybe_savepoint(opts, state)]) + + def handle_prepare(query, opts, state), + do: proxy(:handle_prepare, state, [query, maybe_savepoint(opts, state)]) + + def handle_execute(query, params, opts, state), + do: proxy(:handle_execute, state, [query, params, maybe_savepoint(opts, state)]) + + def handle_close(query, opts, state), + do: proxy(:handle_close, state, [query, maybe_savepoint(opts, state)]) + + def handle_declare(query, params, opts, state), + do: proxy(:handle_declare, state, [query, params, maybe_savepoint(opts, state)]) + + def handle_fetch(query, cursor, opts, state), + do: proxy(:handle_fetch, state, [query, cursor, maybe_savepoint(opts, state)]) + + def handle_deallocate(query, cursor, opts, state), + do: proxy(:handle_deallocate, state, [query, cursor, maybe_savepoint(opts, state)]) + + defp maybe_savepoint(opts, {_, _, in_transaction?}) do + if not in_transaction? and Keyword.get(opts, :sandbox_subtransaction, true) do + [mode: :savepoint] ++ opts + else + opts + end + end + + defp proxy(fun, {conn_mod, state, in_transaction?}, args) do + result = apply(conn_mod, fun, args ++ [state]) + pos = :erlang.tuple_size(result) + :erlang.setelement(pos, result, {conn_mod, :erlang.element(pos, result), in_transaction?}) + end + end + + @doc """ + Starts a process that will check out and own a connection, then returns that process's pid. + + The process is not linked to the caller, so it is your responsibility to ensure that it will be + stopped with `stop_owner/1`. In tests, this is done in an `ExUnit.Callbacks.on_exit/2` callback: + + setup tags do + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(MyApp.Repo, shared: not tags[:async]) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + :ok + end + + ## `start_owner!/2` vs `checkout/2` + + `start_owner!/2` should be used in place of `checkout/2`. + + `start_owner!/2` solves the problem of unlinked processes started in a test outliving the test process and causing ownership errors. + For example, `LiveView`'s `live(...)` test helper starts a process linked to the LiveView supervisor, not the test process. + These errors can be eliminated by having the owner of the connection be a separate process from the test process. + + Outside of that scenario, `checkout/2` involves less overhead than this function and so can be preferable. + + ## Options + + * `:shared` - if `true`, the pool runs in the shared mode. Defaults to `false` + + The remaining options are passed to `checkout/2`. + """ + @doc since: "3.4.4" + @spec start_owner!(Ecto.Repo.t() | pid(), keyword()) :: pid() + def start_owner!(repo, opts \\ []) do + parent = self() + + {:ok, pid} = + Agent.start(fn -> + {shared, opts} = Keyword.pop(opts, :shared, false) + :ok = checkout(repo, opts) + + if shared do + :ok = mode(repo, {:shared, self()}) + else + :ok = allow(repo, self(), parent) + end + end) + + pid + end + + @doc """ + Stops an owner process started by `start_owner!/2`. + """ + @doc since: "3.4.4" + @spec stop_owner(pid()) :: :ok + def stop_owner(pid) do + GenServer.stop(pid) + end + + @doc """ + Sets the mode for the `repo` pool. + + The modes can be: + + * `:auto` - this is the default mode. When trying to use the repository, + processes can automatically checkout a connection without calling + `checkout/2` or `start_owner/2` before. This is the mode you will run + on before your test suite starts + + * `:manual` - in this mode, the connection always has to be explicitly + checked before used. Other processes are allowed to use the same + connection if they are explicitly allowed via `allow/4`. You usually + set the mode to manual at the end of your `test/test_helper.exs` file. + This is also the mode you will run your async tests in + + * `{:shared, pid}` - after checking out a connection in manual mode, + you can change the mode to `{:shared, pid}`, where pid is the process + that owns the connection, most often `{:shared, self()}`. This makes it + so all processes can use the same connection as the one owned by the + current process. This is the mode you will run your sync tests in + + Whenever you change the mode to `:manual` or `:auto`, all existing + connections are checked in. Therefore, it is recommend to set those + modes before your test suite starts, as otherwise you will check in + connections being used in any other test running concurrently. + + If successful, returns `:ok` (this is always successful for `:auto` + and `:manual` modes). It may return `:not_owner` or `:not_found` + when setting `{:shared, pid}` and the given `pid` does not own any + connection for the repo. May return `:already_shared` if another + process set the ownership mode to `{:shared, _}` and is still alive. + """ + @spec mode(Ecto.Repo.t() | pid(), :auto | :manual | {:shared, pid()}) :: + :ok | :already_shared | :not_owner | :not_found + def mode(repo, mode) + when (is_atom(repo) or is_pid(repo)) and mode in [:auto, :manual] + when (is_atom(repo) or is_pid(repo)) and elem(mode, 0) == :shared and is_pid(elem(mode, 1)) do + %{pid: pool, opts: opts} = lookup_meta!(repo) + DBConnection.Ownership.ownership_mode(pool, mode, opts) + end + + @doc """ + Checks a connection out for the given `repo`. + + The process calling `checkout/2` will own the connection + until it calls `checkin/2` or until it crashes in which case + the connection will be automatically reclaimed by the pool. + + If successful, returns `:ok`. If the caller already has a + connection, it returns `{:already, :owner | :allowed}`. + + ## Options + + * `:sandbox` - when true the connection is wrapped in + a transaction. Defaults to true. + + * `:isolation` - set the query to the given isolation level. + + * `:ownership_timeout` - limits how long the connection can be + owned. Defaults to the value in your repo config in + `config/config.exs` (or preferably in `config/test.exs`), or + 120000 ms if not set. The timeout exists for sanity checking + purposes, to ensure there is no connection leakage, and can + be bumped whenever necessary. + + """ + @spec checkout(Ecto.Repo.t() | pid(), keyword()) :: :ok | {:already, :owner | :allowed} + def checkout(repo, opts \\ []) when is_atom(repo) or is_pid(repo) do + %{pid: pool, opts: pool_opts} = lookup_meta!(repo) + + pool_opts = + if Keyword.get(opts, :sandbox, true) do + [ + post_checkout: &post_checkout(&1, &2, opts), + pre_checkin: &pre_checkin(&1, &2, &3, opts) + ] ++ pool_opts + else + pool_opts + end + + pool_opts_overrides = Keyword.take(opts, [:ownership_timeout, :isolation_level]) + pool_opts = Keyword.merge(pool_opts, pool_opts_overrides) + + case DBConnection.Ownership.ownership_checkout(pool, pool_opts) do + :ok -> + if isolation = opts[:isolation] do + set_transaction_isolation_level(repo, isolation) + end + + :ok + + other -> + other + end + end + + defp set_transaction_isolation_level(repo, isolation) do + query = "SET TRANSACTION ISOLATION LEVEL #{isolation}" + + case Ecto.Adapters.SQL.query(repo, query, [], sandbox_subtransaction: false) do + {:ok, _} -> + :ok + + {:error, error} -> + checkin(repo, []) + raise error + end + end + + @doc """ + Checks in the connection back into the sandbox pool. + """ + @spec checkin(Ecto.Repo.t() | pid()) :: :ok | :not_owner | :not_found + def checkin(repo, _opts \\ []) when is_atom(repo) or is_pid(repo) do + %{pid: pool, opts: opts} = lookup_meta!(repo) + DBConnection.Ownership.ownership_checkin(pool, opts) + end + + @doc """ + Allows the `allow` process to use the same connection as `parent`. + + `allow` may be a PID or a locally registered name. + + If the allowance is successful, this function returns `:ok`. If `allow` is already an + owner or already allowed, it returns `{:already, :owner | :allowed}`. If `parent` has not + checked out a connection from the repo, it returns `:not_found`. + """ + @spec allow(Ecto.Repo.t() | pid(), pid(), term()) :: + :ok | {:already, :owner | :allowed} | :not_found + def allow(repo, parent, allow, opts \\ []) when is_atom(repo) or is_pid(repo) do + case GenServer.whereis(allow) do + pid when is_pid(pid) -> + %{pid: pool, opts: meta_opts} = lookup_meta!(repo) + opts = Keyword.merge(meta_opts, opts) + DBConnection.Ownership.ownership_allow(pool, parent, pid, opts) + + other -> + raise """ + only PID or a locally registered process can be allowed to \ + use the same connection as parent but the lookup returned #{inspect(other)} + """ + end + end + + @doc """ + Runs a function outside of the sandbox. + """ + @spec unboxed_run(Ecto.Repo.t() | pid(), (-> result)) :: result when result: var + def unboxed_run(repo, fun) when is_atom(repo) or is_pid(repo) do + checkin(repo) + checkout(repo, sandbox: false) + + try do + fun.() + after + checkin(repo) + end + end + + defp lookup_meta!(repo) do + %{opts: opts} = + meta = + repo + |> find_repo() + |> Ecto.Adapter.lookup_meta() + + if opts[:pool] != DBConnection.Ownership do + raise """ + cannot invoke sandbox operation with pool #{inspect(opts[:pool])}. + To use the SQL Sandbox, configure your repository pool as: + + pool: #{inspect(__MODULE__)} + """ + end + + meta + end + + defp find_repo(repo) when is_atom(repo), do: repo.get_dynamic_repo() + defp find_repo(repo), do: repo + + defp post_checkout(conn_mod, conn_state, opts) do + case conn_mod.handle_begin([mode: :transaction] ++ opts, conn_state) do + {:ok, _, conn_state} -> + {:ok, Connection, {conn_mod, conn_state, false}} + + {:transaction, _conn_state} -> + raise """ + Ecto SQL sandbox transaction cannot be started because there is already\ + a transaction running. + + This either means some code is starting a transaction before the sandbox\ + or a connection was not appropriately rolled back after use. + """ + + {_error_or_disconnect, err, conn_state} -> + {:disconnect, err, conn_mod, conn_state} + end + end + + defp pre_checkin(:checkin, Connection, {conn_mod, conn_state, _in_transaction?}, opts) do + case conn_mod.handle_rollback([mode: :transaction] ++ opts, conn_state) do + {:ok, _, conn_state} -> + {:ok, conn_mod, conn_state} + + {:idle, _conn_state} -> + raise """ + Ecto SQL sandbox transaction was already committed/rolled back. + + The sandbox works by running each test in a transaction and closing the\ + transaction afterwards. However, the transaction has already terminated.\ + Your test code is likely committing or rolling back transactions manually,\ + either by invoking procedures or running custom SQL commands. + + One option is to manually checkout a connection without a sandbox: + + Ecto.Adapters.SQL.Sandbox.checkout(repo, sandbox: false) + + But remember you will have to undo any database changes performed by such tests. + """ + + {_error_or_disconnect, err, conn_state} -> + {:disconnect, err, conn_mod, conn_state} + end + end + + defp pre_checkin(_, Connection, {conn_mod, conn_state, _in_transaction?}, _opts) do + {:ok, conn_mod, conn_state} + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/sql/stream.ex b/deps/ecto_sql/lib/ecto/adapters/sql/stream.ex new file mode 100644 index 0000000..eb815e7 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/sql/stream.ex @@ -0,0 +1,43 @@ +defmodule Ecto.Adapters.SQL.Stream do + @moduledoc false + + defstruct [:meta, :statement, :params, :opts] + + def build(meta, statement, params, opts) do + %__MODULE__{meta: meta, statement: statement, params: params, opts: opts} + end +end + +alias Ecto.Adapters.SQL.Stream + +defimpl Enumerable, for: Stream do + def count(_), do: {:error, __MODULE__} + + def member?(_, _), do: {:error, __MODULE__} + + def slice(_), do: {:error, __MODULE__} + + def reduce(stream, acc, fun) do + %Stream{meta: meta, statement: statement, params: params, opts: opts} = stream + Ecto.Adapters.SQL.reduce(meta, statement, params, opts, acc, fun) + end +end + +defimpl Collectable, for: Stream do + def into(stream) do + %Stream{meta: meta, statement: statement, params: params, opts: opts} = stream + {state, fun} = Ecto.Adapters.SQL.into(meta, statement, params, opts) + {state, make_into(fun, stream)} + end + + defp make_into(fun, stream) do + fn + state, :done -> + fun.(state, :done) + stream + + state, acc -> + fun.(state, acc) + end + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/tds.ex b/deps/ecto_sql/lib/ecto/adapters/tds.ex new file mode 100644 index 0000000..0d45093 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/tds.ex @@ -0,0 +1,296 @@ +defmodule Ecto.Adapters.Tds do + @moduledoc """ + Adapter module for MSSQL Server using the TDS protocol. + + ## Options + + Tds options split in different categories described + below. All options can be given via the repository + configuration. + + ### Connection options + + * `:hostname` - Server hostname + * `:port` - Server port (default: 1433) + * `:username` - Username + * `:password` - User password + * `:database` - the database to connect to + * `:pool` - The connection pool module, may be set to `Ecto.Adapters.SQL.Sandbox` + * `:ssl` - Set to true if ssl should be used (default: false) + * `:ssl_opts` - A list of ssl options, see Erlang's `ssl` docs + * `:show_sensitive_data_on_connection_error` - show connection data and + configuration whenever there is an error attempting to connect to the + database + + We also recommend developers to consult the `Tds.start_link/1` documentation + for a complete list of all supported options for driver. + + ### Storage options + + * `:collation` - the database collation. Used during database creation but + it is ignored later + + If you need collation other than Latin1, add `tds_encoding` as dependency to + your project `mix.exs` file then amend `config/config.ex` by adding: + + config :tds, :text_encoder, Tds.Encoding + + This should give you extended set of most encoding. For complete list check + `Tds.Encoding` [documentation](https://hexdocs.pm/tds_encoding). + + ### After connect flags + + After connecting to MSSQL server, TDS will check if there are any flags set in + connection options that should affect connection session behaviour. All flags are + MSSQL standard *SET* options. The following flags are currently supported: + + * `:set_language` - sets session language (consult stored procedure output + `exec sp_helplanguage` for valid values) + * `:set_datefirst` - number in range 1..7 + * `:set_dateformat` - atom, one of `:mdy | :dmy | :ymd | :ydm | :myd | :dym` + * `:set_deadlock_priority` - atom, one of `:low | :high | :normal | -10..10` + * `:set_lock_timeout` - number in milliseconds > 0 + * `:set_remote_proc_transactions` - atom, one of `:on | :off` + * `:set_implicit_transactions` - atom, one of `:on | :off` + * `:set_allow_snapshot_isolation` - atom, one of `:on | :off` + (required if `Repo.transaction(fn -> ... end, isolation_level: :snapshot)` is used) + * `:set_read_committed_snapshot` - atom, one of `:on | :off` + + ## Limitations + + ### UUIDs + + MSSQL server has slightly different binary storage format for UUIDs (`uniqueidentifier`). + If you use `:binary_id`, the proper choice is made. Otherwise you must use the `Tds.Ecto.UUID` + type. Avoid using `Ecto.UUID` since it may cause unpredictable application behaviour. + + ### SQL `Char`, `VarChar` and `Text` types + + When working with binaries and strings,there are some limitations you should be aware of: + + - Strings that should be stored in mentioned sql types must be encoded to column + codepage (defined in collation). If collation is different than database collation, + it is not possible to store correct value into database since the connection + respects the database collation. Ecto does not provide way to override parameter + codepage. + + - If you need other than Latin1 or other than your database default collation, as + mentioned in "Storage Options" section, then manually encode strings using + `Tds.Encoding.encode/2` into desired codepage and then tag parameter as `:binary`. + Please be aware that queries that use this approach in where clauses can be 10x slower + due increased logical reads in database. + + - You can't store VarChar codepoints encoded in one collation/codepage to column that + is encoded in different collation/codepage. You will always get wrong result. This is + not adapter or driver limitation but rather how string encoding works for single byte + encoded strings in MSSQL server. Don't be confused if you are always seeing latin1 chars, + they are simply in each codepoint table. + + In particular, if a field has the type `:text`, only raw binaries will be allowed. + To avoid above limitations always use `:string` (NVarChar) type for text if possible. + If you really need to use VarChar's column type, you can use the `Tds.Ecto.VarChar` + Ecto type. + + ### JSON support + + Even though the adapter will convert `:map` fields into JSON back and forth, + actual value is stored in NVarChar column. + + ### Query hints and table hints + + MSSQL supports both query hints and table hints: https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-query + + For Ecto compatibility, the query hints must be given via the `lock` option, and they + will be translated to MSSQL's "OPTION". If you need to pass multiple options, you + can separate them by comma: + + from query, lock: "HASH GROUP, FAST 10" + + Table hints are specified as a list alongside a `from` or `join`: + + from query, hints: ["INDEX (IX_Employee_ManagerID)"] + + The `:migration_lock` will be treated as a table hint and defaults to "UPDLOCK". + + ### Multi Repo calls in transactions + + To avoid deadlocks in your app, we exposed `:isolation_level` repo transaction option. + This will tell to SQL Server Transaction Manager how to begin transaction. + By default, if this option is omitted, isolation level is set to `:read_committed`. + + Any attempt to manually set the transaction isolation via queries, such as + + Ecto.Adapter.SQL.query("SET TRANSACTION ISOLATION LEVEL XYZ") + + will fail once explicit transaction is started using `c:Ecto.Repo.transaction/2` + and reset back to :read_committed. + + There is `Ecto.Query.lock/3` function can help by setting it to `WITH(NOLOCK)`. + This should allow you to do eventually consistent reads and avoid locks on given + table if you don't need to write to database. + + NOTE: after explicit transaction ends (commit or rollback) implicit transactions + will run as READ_COMMITTED. + """ + + use Ecto.Adapters.SQL, + driver: :tds + + require Logger + require Ecto.Query + + @behaviour Ecto.Adapter.Storage + + @doc false + def autogenerate(:binary_id), do: Tds.Ecto.UUID.bingenerate() + def autogenerate(:embed_id), do: Tds.Ecto.UUID.generate() + def autogenerate(type), do: super(type) + + @doc false + @impl true + def loaders({:map, _}, type), do: [&json_decode/1, &Ecto.Type.embedded_load(type, &1, :json)] + def loaders(:map, type), do: [&json_decode/1, type] + def loaders(:boolean, type), do: [&bool_decode/1, type] + def loaders(:binary_id, type), do: [Tds.Ecto.UUID, type] + def loaders(_, type), do: [type] + + @impl true + def dumpers({:map, _}, type), do: [&Ecto.Type.embedded_dump(type, &1, :json)] + def dumpers(:binary_id, type), do: [type, Tds.Ecto.UUID] + def dumpers(_, type), do: [type] + + defp bool_decode(<<0>>), do: {:ok, false} + defp bool_decode(<<1>>), do: {:ok, true} + defp bool_decode(0), do: {:ok, false} + defp bool_decode(1), do: {:ok, true} + defp bool_decode(x), do: {:ok, x} + + defp json_decode(x) when is_binary(x), do: {:ok, Tds.json_library().decode!(x)} + defp json_decode(x), do: {:ok, x} + + # Storage API + @doc false + @impl true + def storage_up(opts) do + database = Keyword.fetch!(opts, :database) + + command = + ~s(CREATE DATABASE [#{database}]) + |> concat_if(opts[:collation], &"COLLATE=#{&1}") + + case run_query(Keyword.put(opts, :database, "master"), command) do + {:ok, _} -> + :ok + + {:error, %{mssql: %{number: 1801}}} -> + {:error, :already_up} + + {:error, error} -> + {:error, Exception.message(error)} + end + end + + defp concat_if(content, nil, _fun), do: content + defp concat_if(content, value, fun), do: content <> " " <> fun.(value) + + @doc false + @impl true + def storage_down(opts) do + database = Keyword.fetch!(opts, :database) + + case run_query(Keyword.put(opts, :database, "master"), "DROP DATABASE [#{database}]") do + {:ok, _} -> + :ok + + {:error, %{mssql: %{number: 3701}}} -> + {:error, :already_down} + + {:error, error} -> + {:error, Exception.message(error)} + end + end + + @impl Ecto.Adapter.Storage + def storage_status(opts) do + database = Keyword.fetch!(opts, :database) + + opts = Keyword.put(opts, :database, "master") + + check_database_query = + "SELECT [name] FROM [master].[sys].[databases] WHERE [name] = '#{database}'" + + case run_query(opts, check_database_query) do + {:ok, %{num_rows: 0}} -> :down + {:ok, %{num_rows: _}} -> :up + other -> {:error, other} + end + end + + defp run_query(opts, sql_command) do + {:ok, _} = Application.ensure_all_started(:ecto_sql) + {:ok, _} = Application.ensure_all_started(:tds) + + timeout = Keyword.get(opts, :timeout, 15_000) + + opts = + opts + |> Keyword.drop([:name, :log, :pool, :pool_size]) + |> Keyword.put(:backoff_type, :stop) + |> Keyword.put(:max_restarts, 0) + + {:ok, pid} = Task.Supervisor.start_link() + + task = + Task.Supervisor.async_nolink(pid, fn -> + {:ok, conn} = Tds.start_link(opts) + value = Ecto.Adapters.Tds.Connection.execute(conn, sql_command, [], opts) + GenServer.stop(conn) + value + end) + + case Task.yield(task, timeout) || Task.shutdown(task) do + {:ok, {:ok, result}} -> + {:ok, result} + + {:ok, {:error, error}} -> + {:error, error} + + {:exit, {%{__struct__: struct} = error, _}} + when struct in [Tds.Error, DBConnection.Error] -> + {:error, error} + + {:exit, reason} -> + {:error, RuntimeError.exception(Exception.format_exit(reason))} + + nil -> + {:error, RuntimeError.exception("command timed out")} + end + end + + @impl true + def supports_ddl_transaction? do + true + end + + @impl true + def lock_for_migrations(meta, opts, fun) do + %{opts: adapter_opts, repo: repo} = meta + + if Keyword.fetch(adapter_opts, :pool_size) == {:ok, 1} do + Ecto.Adapters.SQL.raise_migration_pool_size_error() + end + + opts = Keyword.merge(opts, timeout: :infinity, telemetry_options: [schema_migration: true]) + + {:ok, result} = + transaction(meta, opts, fn -> + query = + "exec sp_getapplock @Resource = 'ecto_#{inspect(repo)}', @LockMode = 'Exclusive', @LockOwner = 'Transaction', @LockTimeout = -1" + + Ecto.Adapters.SQL.query!(meta, query, [], opts) + fun.() + end) + + result + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/tds/connection.ex b/deps/ecto_sql/lib/ecto/adapters/tds/connection.ex new file mode 100644 index 0000000..206d2f4 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/tds/connection.ex @@ -0,0 +1,1987 @@ +if Code.ensure_loaded?(Tds) do + defmodule Ecto.Adapters.Tds.Connection do + @moduledoc false + require Logger + alias Tds.Query + alias Ecto.Query.Tagged + alias Ecto.Adapters.SQL + require Ecto.Schema + + @behaviour Ecto.Adapters.SQL.Connection + + @impl true + def child_spec(opts) do + opts + |> Keyword.put_new(:use_elixir_calendar_types, true) + |> Tds.child_spec() + end + + @impl true + def prepare_execute(pid, _name, statement, params, opts \\ []) do + query = %Query{statement: statement} + params = prepare_params(params) + + opts = Keyword.put(opts, :parameters, params) + DBConnection.prepare_execute(pid, query, params, opts) + end + + @impl true + def execute(pid, statement, params, opts) when is_binary(statement) or is_list(statement) do + query = %Query{statement: statement} + params = prepare_params(params) + opts = Keyword.put(opts, :parameters, params) + + case DBConnection.prepare_execute(pid, query, params, opts) do + {:ok, _, %Tds.Result{columns: nil, num_rows: num_rows, rows: []}} + when num_rows >= 0 -> + {:ok, %Tds.Result{columns: nil, num_rows: num_rows, rows: nil}} + + {:ok, _, query} -> + {:ok, query} + + {:error, _} = err -> + err + end + end + + def execute(pid, %{} = query, params, opts) do + opts = Keyword.put_new(opts, :parameters, params) + params = prepare_params(params) + opts = Keyword.put(opts, :parameters, params) + + case DBConnection.prepare_execute(pid, query, params, opts) do + {:ok, _, query} -> {:ok, query} + {:error, _} = err -> err + end + end + + @impl true + def stream(_conn, _sql, _params, _opts) do + error!(nil, "Repo.stream is not supported in the Tds adapter") + end + + @impl true + def query(conn, sql, params, opts) do + params = prepare_params(params) + Tds.query(conn, sql, params, opts) + end + + @impl true + def query_many(_conn, _sql, _params, _opts) do + error!(nil, "query_many is not supported in the Tds adapter") + end + + @impl true + def to_constraints(%Tds.Error{mssql: %{number: code, msg_text: message}}, _opts) do + Tds.Error.get_constraint_violations(code, message) + end + + def to_constraints(_, _opts), do: [] + + def prepare_params(params) do + unless is_list(params) do + raise ArgumentError, "expected params to be a list, got: #{inspect(params)}" + end + + {params, _} = + Enum.map_reduce(params, 1, fn param, acc -> + case prepare_param(param) do + {value, type} -> {%Tds.Parameter{name: "@#{acc}", value: value, type: type}, acc + 1} + %Tds.Parameter{name: ""} = param -> {%{param | name: "@#{acc}"}, acc + 1} + %Tds.Parameter{name: <<"@", _::binary>>} = param -> {param, acc} + _ -> error!(nil, "Tds parameter names must begin with @") + end + end) + + params + end + + # Decimal + defp prepare_param(%Decimal{} = value) do + {value, :decimal} + end + + defp prepare_param(%NaiveDateTime{} = value) do + {value, :datetime2} + end + + defp prepare_param(%DateTime{} = value) do + {value, :datetimeoffset} + end + + defp prepare_param(%Date{} = value) do + {value, :date} + end + + defp prepare_param(%Time{} = value) do + {value, :time} + end + + defp prepare_param(%Tds.Parameter{type: nil, value: value} = param) do + {_value, type} = prepare_param(value) + %{param | type: type} + end + + defp prepare_param(%Tds.Parameter{} = param) do + param + end + + defp prepare_param(%{__struct__: module} = _value) do + # just in case dumpers/loaders are not defined for the this struct + error!( + nil, + "Tds adapter is unable to convert struct `#{inspect(module)}` into supported MSSQL types" + ) + end + + defp prepare_param(%{} = value), do: {json_library().encode!(value), :string} + defp prepare_param(value), do: prepare_raw_param(value) + + defp prepare_raw_param(value) when is_binary(value) do + type = if String.printable?(value), do: :string, else: :binary + {value, type} + end + + defp prepare_raw_param(value) when value == true, do: {1, :boolean} + defp prepare_raw_param(value) when value == false, do: {0, :boolean} + defp prepare_raw_param({_, :varchar} = value), do: value + defp prepare_raw_param(value), do: {value, nil} + + defp json_library(), do: Application.get_env(:tds, :json_library, Jason) + + ## Query + + @parent_as __MODULE__ + alias Ecto.Query + alias Ecto.Query.{BooleanExpr, ByExpr, JoinExpr, QueryExpr, WithExpr} + + @impl true + def all(query, as_prefix \\ []) do + sources = create_names(query, as_prefix) + + cte = cte(query, sources) + from = from(query, sources) + select = select(query, sources) + join = join(query, sources) + where = where(query, sources) + group_by = group_by(query, sources) + having = having(query, sources) + _window = window(query, sources) + combinations = combinations(query, as_prefix) + order_by = order_by(query, sources) + # limit = is handled in select (TOP X) + offset = offset(query, sources) + lock = lock(query, sources) + + if query.offset != nil and query.order_bys == [], + do: error!(query, "ORDER BY is mandatory when OFFSET is set") + + [cte, select, from, join, where, group_by, having, combinations, order_by, lock | offset] + end + + @impl true + def update_all(query) do + sources = create_names(query, []) + cte = cte(query, sources) + {table, name, _model} = elem(sources, 0) + + fields = update_fields(query, sources) + from = " FROM #{table} AS #{name}" + join = join(query, sources) + where = where(query, sources) + lock = lock(query, sources) + + [ + cte, + "UPDATE ", + name, + " SET ", + fields, + returning(query, 0, "INSERTED"), + from, + join, + where | lock + ] + end + + @impl true + def delete_all(query) do + sources = create_names(query, []) + cte = cte(query, sources) + {table, name, _model} = elem(sources, 0) + + delete = "DELETE #{name}" + from = " FROM #{table} AS #{name}" + join = join(query, sources) + where = where(query, sources) + lock = lock(query, sources) + + [cte, delete, returning(query, 0, "DELETED"), from, join, where | lock] + end + + @impl true + def insert(prefix, table, header, rows, on_conflict, returning, placeholders) do + counter_offset = length(placeholders) + 1 + [] = on_conflict(on_conflict, header) + returning = returning(returning, "INSERTED") + + values = + if header == [] do + [returning, " DEFAULT VALUES"] + else + [ + ?\s, + ?(, + quote_names(header), + ?), + returning + | insert_all(rows, counter_offset) + ] + end + + ["INSERT INTO ", quote_table(prefix, table), values] + end + + defp on_conflict({:raise, _, []}, _header) do + [] + end + + defp on_conflict({_, _, _}, _header) do + error!(nil, "Tds adapter supports only on_conflict: :raise") + end + + defp insert_all(%Ecto.Query{} = query, _counter) do + [?\s, all(query)] + end + + defp insert_all(rows, counter) do + sql = + intersperse_reduce(rows, ",", counter, fn row, counter -> + {row, counter} = insert_each(row, counter) + {[?(, row, ?)], counter} + end) + |> elem(0) + + [" VALUES " | sql] + end + + defp insert_each(values, counter) do + intersperse_reduce(values, ", ", counter, fn + nil, counter -> + {"DEFAULT", counter} + + {%Query{} = query, params_counter}, counter -> + {[?(, all(query), ?)], counter + params_counter} + + {:placeholder, placeholder_index}, counter -> + {[?@ | placeholder_index], counter} + + _, counter -> + {[?@ | Integer.to_string(counter)], counter + 1} + end) + end + + @impl true + def update(prefix, table, fields, filters, returning) do + {fields, count} = + intersperse_reduce(fields, ", ", 1, fn field, acc -> + {[quote_name(field), " = @", Integer.to_string(acc)], acc + 1} + end) + + {filters, _count} = + intersperse_reduce(filters, " AND ", count, fn + {field, nil}, acc -> + {[quote_name(field), " IS NULL"], acc + 1} + + {field, _value}, acc -> + {[quote_name(field), " = @", Integer.to_string(acc)], acc + 1} + + field, acc -> + {[quote_name(field), " = @", Integer.to_string(acc)], acc + 1} + end) + + [ + "UPDATE ", + quote_table(prefix, table), + " SET ", + fields, + returning(returning, "INSERTED"), + " WHERE " | filters + ] + end + + @impl true + def delete(prefix, table, filters, returning) do + {filters, _} = + intersperse_reduce(filters, " AND ", 1, fn + {field, nil}, acc -> + {[quote_name(field), " IS NULL"], acc + 1} + + {field, _value}, acc -> + {[quote_name(field), " = @", Integer.to_string(acc)], acc + 1} + + field, acc -> + {[quote_name(field), " = @", Integer.to_string(acc)], acc + 1} + end) + + [ + "DELETE FROM ", + quote_table(prefix, table), + returning(returning, "DELETED"), + " WHERE " | filters + ] + end + + @impl true + def explain_query(conn, query, params, opts) do + params = prepare_params(params) + + case Tds.query_multi(conn, build_explain_query(query), params, opts) do + {:ok, [_, %Tds.Result{} = result, _]} -> + {:ok, SQL.format_table(result)} + + error -> + error + end + end + + def build_explain_query(query) do + [ + "SET STATISTICS XML ON; ", + "SET STATISTICS PROFILE ON; ", + query, + "; ", + "SET STATISTICS XML OFF; ", + "SET STATISTICS PROFILE OFF;" + ] + |> IO.iodata_to_binary() + end + + ## Query generation + + binary_ops = [ + ==: " = ", + !=: " <> ", + <=: " <= ", + >=: " >= ", + <: " < ", + >: " > ", + +: " + ", + -: " - ", + *: " * ", + /: " / ", + and: " AND ", + or: " OR ", + ilike: " LIKE ", + like: " LIKE " + ] + + @binary_ops Keyword.keys(binary_ops) + + Enum.map(binary_ops, fn {op, str} -> + defp handle_call(unquote(op), 2), do: {:binary_op, unquote(str)} + end) + + defp handle_call(fun, _arity), do: {:fun, Atom.to_string(fun)} + + defp select(%{select: %{fields: fields}, distinct: distinct} = query, sources) do + [ + "SELECT ", + distinct(distinct, sources, query), + limit(query, sources), + select(fields, sources, query) + ] + end + + defp distinct(nil, _sources, _query), do: [] + defp distinct(%ByExpr{expr: true}, _sources, _query), do: "DISTINCT " + defp distinct(%ByExpr{expr: false}, _sources, _query), do: [] + + defp distinct(%ByExpr{expr: exprs}, _sources, query) when is_list(exprs) do + error!( + query, + "DISTINCT with multiple columns is not supported by MsSQL. " <> + "Please use distinct(true) if you need distinct resultset" + ) + end + + defp select([], _sources, _query) do + "CAST(1 as bit)" + end + + defp select(fields, sources, query) do + Enum.map_intersperse(fields, ", ", fn + {:&, _, [idx]} -> + case elem(sources, idx) do + {nil, source, nil} -> + error!( + query, + "Tds adapter does not support selecting all fields from fragment #{source}. " <> + "Please specify exactly which fields you want to select" + ) + + {source, _, nil} -> + error!( + query, + "Tds adapter does not support selecting all fields from #{source} without a schema. " <> + "Please specify a schema or specify exactly which fields you want in projection" + ) + + {_, source, _} -> + source + end + + {key, value} -> + [select_expr(value, sources, query), " AS ", quote_name(key)] + + value -> + select_expr(value, sources, query) + end) + end + + defp select_expr({:not, _, [expr]}, sources, query) do + [?~, ?(, select_expr(expr, sources, query), ?)] + end + + defp select_expr(value, sources, query), do: expr(value, sources, query) + + defp from(%{from: %{source: source, hints: hints}} = query, sources) do + {from, name} = get_source(query, sources, 0, source) + + [" FROM ", from, " AS ", name, hints(hints)] + end + + defp cte(%{with_ctes: %WithExpr{queries: [_ | _] = queries}} = query, sources) do + ctes = Enum.map_intersperse(queries, ", ", &cte_expr(&1, sources, query)) + ["WITH ", ctes, " "] + end + + defp cte(%{with_ctes: _}, _), do: [] + + defp cte_expr({_name, %{materialized: materialized}, _cte}, _sources, query) + when is_boolean(materialized) do + error!(query, "Tds adapter does not support materialized CTEs") + end + + defp cte_expr({name, opts, cte}, sources, query) do + operation_opt = Map.get(opts, :operation) + + [ + quote_name(name), + cte_header(cte, query), + " AS ", + cte_query(cte, sources, query, operation_opt) + ] + end + + defp cte_header(%QueryExpr{}, query) do + error!( + query, + "Tds adapter does not support fragment in CTE" + ) + end + + defp cte_header(%Ecto.Query{select: %{fields: fields}} = query, _) do + [ + " (", + Enum.map_intersperse(fields, ",", fn + {key, _} -> + quote_name(key) + + other -> + error!( + query, + "Tds adapter expected field name or alias in CTE header," <> + " instead got #{inspect(other)}" + ) + end), + ?) + ] + end + + defp cte_query(query, sources, parent_query, nil) do + cte_query(query, sources, parent_query, :all) + end + + defp cte_query(%Ecto.Query{} = query, sources, parent_query, :all) do + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + [?(, all(query, subquery_as_prefix(sources)), ?)] + end + + defp cte_query(%Ecto.Query{} = query, _sources, _parent_query, operation) do + error!(query, "Tds adapter does not support data-modifying CTEs (operation: #{operation})") + end + + defp update_fields(%Query{updates: updates} = query, sources) do + for( + %{expr: expr} <- updates, + {op, kw} <- expr, + {key, value} <- kw, + do: update_op(op, key, value, sources, query) + ) + |> Enum.intersperse(", ") + end + + defp update_op(:set, key, value, sources, query) do + {_table, name, _model} = elem(sources, 0) + [name, ?., quote_name(key), " = " | expr(value, sources, query)] + end + + defp update_op(:inc, key, value, sources, query) do + {_table, name, _model} = elem(sources, 0) + quoted = quote_name(key) + + [name, ?., quoted, " = ", name, ?., quoted, " + " | expr(value, sources, query)] + end + + defp update_op(command, _key, _value, _sources, query) do + error!(query, "Unknown update operation #{inspect(command)} for TDS") + end + + defp join(%{joins: []}, _sources), do: [] + + defp join(%{joins: joins} = query, sources) do + [ + ?\s, + Enum.map_intersperse(joins, ?\s, fn + %JoinExpr{on: %QueryExpr{expr: expr}, qual: qual, ix: ix, source: source, hints: hints} -> + {join, name} = get_source(query, sources, ix, source) + qual_text = join_qual(qual, query) + join = join || ["(", expr(source, sources, query) | ")"] + [qual_text, join, " AS ", name, hints(hints) | join_on(qual, expr, sources, query)] + end) + ] + end + + defp join_on(:cross, true, _sources, _query), do: [] + defp join_on(:inner_lateral, true, _sources, _query), do: [] + defp join_on(:left_lateral, true, _sources, _query), do: [] + defp join_on(_qual, true, _sources, _query), do: [" ON 1 = 1"] + defp join_on(_qual, expr, sources, query), do: [" ON " | expr(expr, sources, query)] + + defp join_qual(:inner, _), do: "INNER JOIN " + defp join_qual(:left, _), do: "LEFT OUTER JOIN " + defp join_qual(:right, _), do: "RIGHT OUTER JOIN " + defp join_qual(:full, _), do: "FULL OUTER JOIN " + defp join_qual(:cross, _), do: "CROSS JOIN " + defp join_qual(:inner_lateral, _), do: "CROSS APPLY " + defp join_qual(:left_lateral, _), do: "OUTER APPLY " + + defp join_qual(qual, query), + do: error!(query, "join qualifier #{inspect(qual)} is not supported in the Tds adapter") + + defp where(%Query{wheres: wheres} = query, sources) do + boolean(" WHERE ", wheres, sources, query) + end + + defp having(%Query{havings: havings} = query, sources) do + boolean(" HAVING ", havings, sources, query) + end + + defp window(%{windows: []}, _sources), do: [] + + defp window(_query, _sources), + do: raise(RuntimeError, "Tds adapter does not support window functions") + + defp group_by(%{group_bys: []}, _sources), do: [] + + defp group_by(%{group_bys: group_bys} = query, sources) do + [ + " GROUP BY " + | Enum.map_intersperse(group_bys, ", ", fn %ByExpr{expr: expr} -> + Enum.map_intersperse(expr, ", ", &top_level_expr(&1, sources, query)) + end) + ] + end + + defp order_by(%{order_bys: []}, _sources), do: [] + + defp order_by(%{order_bys: order_bys} = query, sources) do + [ + " ORDER BY " + | Enum.map_intersperse(order_bys, ", ", fn %ByExpr{expr: expr} -> + Enum.map_intersperse(expr, ", ", &order_by_expr(&1, sources, query)) + end) + ] + end + + defp order_by_expr({dir, expr}, sources, query) do + str = top_level_expr(expr, sources, query) + + case dir do + :asc -> str + :desc -> [str | " DESC"] + _ -> error!(query, "#{dir} is not supported in ORDER BY in MSSQL") + end + end + + defp limit(%Query{limit: nil}, _sources), do: [] + + defp limit(%Query{limit: %{with_ties: true}} = query, _sources) do + error!(query, "Tds adapter does not support the `:with_ties` limit option") + end + + defp limit( + %Query{ + limit: %{ + expr: expr + } + } = query, + sources + ) do + case Map.get(query, :offset) do + nil -> + ["TOP(", expr(expr, sources, query), ") "] + + _ -> + [] + end + end + + defp offset(%{offset: nil}, _sources), do: [] + + defp offset(%Query{offset: _, limit: nil} = query, _sources) do + error!(query, "You must provide a limit while using an offset") + end + + defp offset(%{offset: offset, limit: limit} = query, sources) do + [ + " OFFSET ", + expr(offset.expr, sources, query), + " ROW", + " FETCH NEXT ", + expr(limit.expr, sources, query), + " ROWS ONLY" + ] + end + + defp hints([_ | _] = hints), do: [" WITH (", Enum.intersperse(hints, ", "), ?)] + defp hints([]), do: [] + + defp lock(%{lock: nil}, _sources), do: [] + defp lock(%{lock: binary}, _sources) when is_binary(binary), do: [" OPTION (", binary, ?)] + defp lock(%{lock: expr} = query, sources), do: [" OPTION (", expr(expr, sources, query), ?)] + + defp combinations(%{combinations: combinations}, as_prefix) do + Enum.map(combinations, fn + {:union, query} -> [" UNION (", all(query, as_prefix), ")"] + {:union_all, query} -> [" UNION ALL (", all(query, as_prefix), ")"] + {:except, query} -> [" EXCEPT (", all(query, as_prefix), ")"] + {:except_all, query} -> [" EXCEPT ALL (", all(query, as_prefix), ")"] + {:intersect, query} -> [" INTERSECT (", all(query, as_prefix), ")"] + {:intersect_all, query} -> [" INTERSECT ALL (", all(query, as_prefix), ")"] + end) + end + + defp boolean(_name, [], _sources, _query), do: [] + + defp boolean(name, [%{expr: expr, op: op} | query_exprs], sources, query) do + [ + name + | Enum.reduce(query_exprs, {op, paren_expr(expr, sources, query)}, fn + %BooleanExpr{expr: expr, op: op}, {op, acc} -> + {op, [acc, operator_to_boolean(op), paren_expr(expr, sources, query)]} + + %BooleanExpr{expr: expr, op: op}, {_, acc} -> + {op, [?(, acc, ?), operator_to_boolean(op), paren_expr(expr, sources, query)]} + end) + |> elem(1) + ] + end + + defp operator_to_boolean(:and), do: " AND " + defp operator_to_boolean(:or), do: " OR " + + defp parens_for_select([first_expr | _] = expr) do + if is_binary(first_expr) and String.match?(first_expr, ~r/^\s*select\s/i) do + [?(, expr, ?)] + else + expr + end + end + + defp paren_expr(true, _sources, _query) do + ["(1 = 1)"] + end + + defp paren_expr(false, _sources, _query) do + ["(1 = 0)"] + end + + defp paren_expr(expr, sources, query) do + [?(, expr(expr, sources, query), ?)] + end + + defp top_level_expr(%Ecto.SubQuery{query: query}, sources, parent_query) do + combinations = + Enum.map(query.combinations, fn {type, combination_query} -> + {type, put_in(combination_query.aliases[@parent_as], {parent_query, sources})} + end) + + query = put_in(query.combinations, combinations) + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + [all(query, subquery_as_prefix(sources))] + end + + defp top_level_expr(other, sources, parent_query) do + expr(other, sources, parent_query) + end + + # :^ - represents parameter ix is index number + defp expr({:^, [], [idx]}, _sources, _query) do + "@#{idx + 1}" + end + + defp expr({{:., _, [{:parent_as, _, [as]}, field]}, _, []}, _sources, query) + when is_atom(field) or is_binary(field) do + {ix, sources} = get_parent_sources_ix(query, as) + {_, name, _} = elem(sources, ix) + [name, ?. | quote_name(field)] + end + + defp expr({{:., _, [{:&, _, [idx]}, field]}, _, []}, sources, _query) + when is_atom(field) or is_binary(field) do + {_, name, _} = elem(sources, idx) + [name, ?. | quote_name(field)] + end + + defp expr({:&, _, [idx]}, sources, _query) do + {_table, source, _schema} = elem(sources, idx) + source + end + + defp expr({:&, _, [idx, fields, _counter]}, sources, query) do + {_table, name, schema} = elem(sources, idx) + + if is_nil(schema) and is_nil(fields) do + error!( + query, + "Tds adapter requires a schema module when using selector #{inspect(name)} but " <> + "none was given. Please specify schema " <> + "or specify exactly which fields from #{inspect(name)} you what in projection" + ) + end + + Enum.map_join(fields, ", ", &"#{name}.#{quote_name(&1)}") + end + + # example from {:in, [], [1, {:^, [], [0, 0]}]} + defp expr({:in, _, [_left, []]}, _sources, _query) do + "0=1" + end + + # example from(p in Post, where: p.id in [1,2, ^some_id]) + defp expr({:in, _, [left, right]}, sources, query) when is_list(right) do + args = Enum.map_join(right, ",", &expr(&1, sources, query)) + [expr(left, sources, query), " IN (", args | ")"] + end + + # example from(p in Post, where: p.id in []) + defp expr({:in, _, [_, {:^, _, [_, 0]}]}, _sources, _query), do: "0=1" + + # example from(p in Post, where: p.id in ^some_list) + # or from(p in Post, where: p.id in ^[]) + defp expr({:in, _, [left, {:^, _, [idx, length]}]}, sources, query) do + args = list_param_to_args(idx, length) + [expr(left, sources, query), " IN (", args | ")"] + end + + defp expr({:in, _, [left, %Ecto.SubQuery{} = subquery]}, sources, query) do + [expr(left, sources, query), " IN ", expr(subquery, sources, query)] + end + + defp expr({:in, _, [left, right]}, sources, query) do + [expr(left, sources, query), " = ANY(", expr(right, sources, query) | ")"] + end + + defp expr({:is_nil, _, [arg]}, sources, query) do + "#{expr(arg, sources, query)} IS NULL" + end + + defp expr({:not, _, [expr]}, sources, query) do + ["NOT (", expr(expr, sources, query) | ")"] + end + + defp expr({:filter, _, _}, _sources, query) do + error!(query, "Tds adapter does not support aggregate filters") + end + + defp expr(%Ecto.SubQuery{} = subquery, sources, parent_query) do + [?(, top_level_expr(subquery, sources, parent_query), ?)] + end + + defp expr({:fragment, _, [kw]}, _sources, query) when is_list(kw) or tuple_size(kw) == 3 do + error!(query, "Tds adapter does not support keyword or interpolated fragments") + end + + defp expr({:fragment, _, parts}, sources, query) do + Enum.map(parts, fn + {:raw, part} -> part + {:expr, expr} -> expr(expr, sources, query) + end) + |> parens_for_select + end + + defp expr({:values, _, [types, idx, num_rows]}, _, _query) do + [?(, values_list(types, idx + 1, num_rows), ?)] + end + + defp expr({:identifier, _, [literal]}, _sources, _query) do + quote_name(literal) + end + + defp expr({:constant, _, [literal]}, _sources, _query) when is_binary(literal) do + [?', escape_string(literal), ?'] + end + + defp expr({:constant, _, [literal]}, _sources, _query) when is_number(literal) do + [to_string(literal)] + end + + defp expr({:splice, _, [{:^, _, [idx, length]}]}, _sources, _query) do + list_param_to_args(idx, length) + end + + defp expr({:selected_as, _, [name]}, _sources, _query) do + [quote_name(name)] + end + + defp expr({:datetime_add, _, [datetime, count, interval]}, sources, query) do + [ + "DATEADD(", + interval, + ", ", + interval_count(count, sources, query), + ", CAST(", + expr(datetime, sources, query), + " AS datetime2(6)))" + ] + end + + defp expr({:date_add, _, [date, count, interval]}, sources, query) do + [ + "CAST(DATEADD(", + interval, + ", ", + interval_count(count, sources, query), + ", CAST(", + expr(date, sources, query), + " AS datetime2(6))" | ") AS date)" + ] + end + + defp expr({:count, _, []}, _sources, _query), do: "count(*)" + + defp expr({:json_extract_path, _, _}, _sources, query) do + error!( + query, + "Tds adapter does not support json_extract_path expression" <> + ", use fragment with JSON_VALUE/JSON_QUERY" + ) + end + + defp expr({fun, _, args}, sources, query) when is_atom(fun) and is_list(args) do + {modifier, args} = + case args do + [rest, :distinct] -> {"DISTINCT ", [rest]} + _ -> {"", args} + end + + case handle_call(fun, length(args)) do + {:binary_op, op} -> + [left, right] = args + [op_to_binary(left, sources, query), op | op_to_binary(right, sources, query)] + + {:fun, fun} -> + [ + fun, + ?(, + modifier, + Enum.map_intersperse(args, ", ", &top_level_expr(&1, sources, query)), + ?) + ] + end + end + + defp expr(list, sources, query) when is_list(list) do + Enum.map_join(list, ", ", &expr(&1, sources, query)) + end + + defp expr({string, :varchar}, _sources, _query) + when is_binary(string) do + "'#{escape_string(string)}'" + end + + defp expr(string, _sources, _query) when is_binary(string) do + "N'#{escape_string(string)}'" + end + + defp expr(%Decimal{exp: exp} = decimal, _sources, _query) do + # this should help gaining precision for decimals values embedded in query + # but this is still not good enough, for instance: + # + # from(p in Post, select: type(2.0 + ^"2", p.cost()))) + # + # Post.cost is :decimal, but we don't know precision and scale since + # such info is only available in migration files. So query compilation + # will yield + # + # SELECT CAST(CAST(2.0 as decimal(38, 1)) + @1 AS decimal) + # FROM [posts] AS p0 + # + # as long as we have CAST(... as DECIMAL) without precision and scale + # value could be truncated + [ + "CAST(", + Decimal.to_string(decimal, :normal), + " as decimal(38, #{abs(exp)})", + ?) + ] + end + + defp expr(%Tagged{value: binary, type: :binary}, _sources, _query) when is_binary(binary) do + hex = Base.encode16(binary, case: :lower) + "0x#{hex}" + end + + defp expr(%Tagged{value: binary, type: :uuid}, _sources, _query) when is_binary(binary) do + case binary do + <<_::64, ?-, _::32, ?-, _::32, ?-, _::32, ?-, _::96>> -> + {:ok, value} = Tds.Ecto.UUID.dump(binary) + value + + any -> + any + end + end + + defp expr(%Tagged{value: other, type: type}, sources, query) + when type in [:varchar, :nvarchar] do + "CAST(#{expr(other, sources, query)} AS #{column_type(type, [])}(max))" + end + + defp expr(%Tagged{value: other, type: :integer}, sources, query) do + "CAST(#{expr(other, sources, query)} AS bigint)" + end + + defp expr(%Tagged{value: other, type: type}, sources, query) do + "CAST(#{expr(other, sources, query)} AS #{column_type(type, [])})" + end + + defp expr(nil, _sources, _query), do: "NULL" + defp expr(true, _sources, _query), do: "1" + defp expr(false, _sources, _query), do: "0" + + defp expr(literal, _sources, _query) when is_binary(literal) do + "'#{escape_string(literal)}'" + end + + defp expr(literal, _sources, _query) when is_integer(literal) do + Integer.to_string(literal) + end + + defp expr(literal, _sources, _query) when is_float(literal) do + Float.to_string(literal) + end + + defp expr(field, _sources, query) do + error!(query, "unsupported MSSQL expressions: `#{inspect(field)}`") + end + + defp values_list(types, idx, num_rows) do + rows = :lists.seq(1, num_rows, 1) + + [ + "VALUES ", + intersperse_reduce(rows, ?,, idx, fn _, idx -> + {value, idx} = values_expr(types, idx) + {[?(, value, ?)], idx} + end) + |> elem(0) + ] + end + + defp values_expr(types, idx) do + intersperse_reduce(types, ?,, idx, fn {_field, type}, idx -> + {["CAST(", ?@, Integer.to_string(idx), " AS ", column_type(type, []), ?)], idx + 1} + end) + end + + defp op_to_binary({op, _, [_, _]} = expr, sources, query) when op in @binary_ops do + paren_expr(expr, sources, query) + end + + defp op_to_binary({:is_nil, _, [_]} = expr, sources, query) do + paren_expr(expr, sources, query) + end + + defp op_to_binary(expr, sources, query) do + expr(expr, sources, query) + end + + defp interval_count(count, _sources, _query) when is_integer(count) do + Integer.to_string(count) + end + + defp interval_count(count, _sources, _query) when is_float(count) do + :erlang.float_to_binary(count, [:compact, decimals: 16]) + end + + defp interval_count(count, sources, query) do + expr(count, sources, query) + end + + defp returning([], _verb), do: [] + + defp returning(returning, verb) when is_list(returning) do + [" OUTPUT ", Enum.map_intersperse(returning, ", ", &[verb, ?., quote_name(&1)])] + end + + defp returning(%{select: nil}, _, _), + do: [] + + defp returning(%{select: %{fields: fields}} = query, idx, verb), + do: [ + " OUTPUT " + | Enum.map_intersperse(fields, ", ", fn + {{:., _, [{:&, _, [^idx]}, key]}, _, _} -> [verb, ?., quote_name(key)] + _ -> error!(query, "MSSQL can only return table #{verb} columns") + end) + ] + + defp create_names(%{sources: sources}, as_prefix) do + create_names(sources, 0, tuple_size(sources), as_prefix) |> List.to_tuple() + end + + defp create_names(sources, pos, limit, as_prefix) when pos < limit do + [create_name(sources, pos, as_prefix) | create_names(sources, pos + 1, limit, as_prefix)] + end + + defp create_names(_sources, pos, pos, as_prefix) do + [as_prefix] + end + + defp subquery_as_prefix(sources) do + [?s | :erlang.element(tuple_size(sources), sources)] + end + + defp create_name(sources, pos, as_prefix) do + case elem(sources, pos) do + {:fragment, _, _} -> + {nil, as_prefix ++ [?f | Integer.to_string(pos)], nil} + + {:values, _, _} -> + {nil, as_prefix ++ [?v | Integer.to_string(pos)], nil} + + {table, model, prefix} -> + name = as_prefix ++ [create_alias(table) | Integer.to_string(pos)] + {quote_table(prefix, table), name, model} + + %Ecto.SubQuery{} -> + {nil, as_prefix ++ [?s | Integer.to_string(pos)], nil} + end + end + + defp create_alias(<>) when first in ?a..?z when first in ?A..?Z do + first + end + + defp create_alias(_) do + ?t + end + + # DDL + alias Ecto.Migration.{Table, Index, Reference, Constraint} + + @creates [:create, :create_if_not_exists] + @drops [:drop, :drop_if_exists] + + @impl true + def execute_ddl({command, %Table{} = table, columns}) when command in @creates do + prefix = table.prefix + + pk_name = + if table.prefix, + do: "#{table.prefix}_#{table.name}", + else: table.name + + table_structure = + table + |> column_definitions(columns) + |> Kernel.++(pk_definitions(columns, ", CONSTRAINT [#{pk_name}_pkey] ")) + |> case do + [] -> [] + list -> [" (", list, ?)] + end + + create_if_not_exists = + if_table_not_exists(command == :create_if_not_exists, table.name, prefix) + + [ + [ + create_if_not_exists, + "CREATE TABLE ", + quote_table(prefix, table.name), + table_structure, + engine_expr(table.engine), + options_expr(table.options), + "; " + ] + ] + end + + def execute_ddl({command, %Table{}, :cascade}) when command in @drops, + do: error!(nil, "MSSQL does not support `CASCADE` in DROP TABLE commands") + + def execute_ddl({command, %Table{} = table, :restrict}) when command in @drops do + prefix = table.prefix + + [ + [ + if_table_exists(command == :drop_if_exists, table.name, prefix), + "DROP TABLE ", + quote_table(prefix, table.name), + "; " + ] + ] + end + + def execute_ddl({:alter, %Table{} = table, changes}) do + statement_prefix = ["ALTER TABLE ", quote_table(table.prefix, table.name), " "] + + pk_name = + if table.prefix, + do: "#{table.prefix}_#{table.name}", + else: table.name + + pkeys = + case pk_definitions(changes, " CONSTRAINT [#{pk_name}_pkey] ") do + [] -> [] + sql -> [statement_prefix, "ADD", sql] + end + + [ + [ + column_changes(statement_prefix, table, changes), + pkeys + ] + ] + end + + def execute_ddl({command, %Index{} = index}) when command in @creates do + prefix = index.prefix + + if index.using do + error!(nil, "MSSQL does not support `using` in indexes") + end + + if index.nulls_distinct == true do + error!(nil, "MSSQL does not support nulls_distinct set to true in indexes") + end + + with_options = + if index.concurrently or index.options != nil do + [ + " WITH", + ?(, + if_do(index.concurrently, "ONLINE=ON"), + if_do(index.concurrently and index.options != nil, ","), + if_do(index.options != nil, index.options), + ?) + ] + else + [] + end + + include = + index.include + |> List.wrap() + |> Enum.map_intersperse(", ", &include_expr/1) + + [ + [ + if_index_not_exists( + command == :create_if_not_exists, + index.name, + unquoted_name(prefix, index.table) + ), + "CREATE", + if_do(index.unique, " UNIQUE"), + " INDEX ", + quote_name(index.name), + " ON ", + quote_table(prefix, index.table), + " (", + Enum.map_intersperse(index.columns, ", ", &index_expr/1), + ?), + if_do(include != [], [" INCLUDE ", ?(, include, ?)]), + if_do(index.where, [" WHERE (", index.where, ?)]), + with_options, + ?; + ] + ] + end + + def execute_ddl({:create, %Constraint{exclude: exclude}}) when exclude != nil do + msg = + "`:exclude` is not supported Tds adapter check constraint parameter, instead " <> + "set `:check` attribute with negated expression." + + error!(nil, msg) + end + + def execute_ddl({:create, %Constraint{validate: false}}) do + error!(nil, "`:validate` is not supported by the Tds adapter") + end + + def execute_ddl({:create, %Constraint{} = constraint}) do + table_name = quote_table(constraint.prefix, constraint.table) + + [ + [ + "ALTER TABLE ", + table_name, + " ADD CONSTRAINT ", + quote_name(constraint.name), + " ", + "CHECK (", + constraint.check, + "); " + ] + ] + end + + def execute_ddl({:rename, %Index{} = current_index, new_name}) do + [ + [ + "sp_rename ", + "N'#{current_index.table}.#{current_index.name}', ", + "N'#{new_name}', ", + "N'INDEX'" + ] + ] + end + + def execute_ddl({command, %Index{}, :cascade}) when command in @drops, + do: error!(nil, "MSSQL does not support `CASCADE` in DROP INDEX commands") + + def execute_ddl({command, %Index{} = index, :restrict}) when command in @drops do + prefix = index.prefix + + [ + [ + if_index_exists( + command == :drop_if_exists, + index.name, + unquoted_name(prefix, index.table) + ), + "DROP INDEX ", + quote_name(index.name), + " ON ", + quote_table(prefix, index.table), + if_do(index.concurrently, " LOCK=NONE"), + "; " + ] + ] + end + + def execute_ddl({command, %Constraint{}, :cascade}) when command in @drops, + do: error!(nil, "MSSQL does not support `CASCADE` in DROP CONSTRAINT commands") + + def execute_ddl({command, %Constraint{} = constraint, _}) when command in @drops do + table_name = quote_table(constraint.prefix, constraint.table) + + [ + [ + if_check_constraint_exists( + command == :drop_if_exists, + constraint.name, + constraint.prefix + ), + "ALTER TABLE ", + table_name, + " DROP CONSTRAINT ", + quote_name(constraint.name), + "; " + ] + ] + end + + def execute_ddl({:rename, %Table{} = current_table, %Table{} = new_table}) do + [ + [ + "EXEC sp_rename '", + unquoted_name(current_table.prefix, current_table.name), + "', '", + unquoted_name(new_table.prefix, new_table.name), + "'" + ] + ] + end + + def execute_ddl({:rename, table, current_column, new_column}) do + [ + [ + "EXEC sp_rename '", + unquoted_name(table.prefix, table.name, current_column), + "', '", + unquoted_name(new_column), + "', 'COLUMN'" + ] + ] + end + + def execute_ddl(string) when is_binary(string), do: [string] + + def execute_ddl(keyword) when is_list(keyword), + do: error!(nil, "Tds adapter does not support keyword lists in execute") + + @impl true + def ddl_logs(_), do: [] + + @impl true + def table_exists_query(table) do + {"SELECT 1 FROM sys.tables WHERE [name] = @1", [table]} + end + + defp pk_definitions(columns, prefix) do + pks = + for {action, name, _, opts} <- columns, + action != :remove, + opts[:primary_key], + do: name + + case pks do + [] -> + [] + + _ -> + [prefix, "PRIMARY KEY CLUSTERED (", quote_names(pks), ?)] + end + end + + defp column_definitions(table, columns) do + Enum.map_intersperse(columns, ", ", &column_definition(table, &1)) + end + + defp column_definition(table, {:add, name, %Reference{} = ref, opts}) do + [ + quote_name(name), + " ", + reference_column_type(ref.type, opts), + column_options(table, name, opts), + reference_expr(ref, table, name) + ] + end + + defp column_definition(table, {:add, name, type, opts}) do + [quote_name(name), " ", column_type(type, opts), column_options(table, name, opts)] + end + + defp column_changes(statement, table, columns) do + for column <- columns do + column_change(statement, table, column) + end + end + + defp column_change(_, _, {_, _, %Reference{validate: false}, _}) do + error!(nil, "validate: false on references is not supported in Tds") + end + + defp column_change(statement_prefix, table, {:add, name, %Reference{} = ref, opts}) do + [ + [ + statement_prefix, + "ADD ", + quote_name(name), + " ", + reference_column_type(ref.type, opts), + column_options(table, name, opts), + "; " + ], + [statement_prefix, "ADD", constraint_expr(ref, table, name), "; "] + ] + end + + defp column_change(statement_prefix, table, {:add, name, type, opts}) do + [ + [ + statement_prefix, + "ADD ", + quote_name(name), + " ", + column_type(type, opts), + column_options(table, name, opts), + "; " + ] + ] + end + + defp column_change( + statement_prefix, + %{name: table_name, prefix: prefix} = table, + {:add_if_not_exists, column_name, type, opts} + ) do + [ + [ + if_column_not_exists(prefix, table_name, column_name), + statement_prefix, + "ADD ", + quote_name(column_name), + " ", + column_type(type, opts), + column_options(table, column_name, opts), + "; " + ] + ] + end + + defp column_change(statement_prefix, table, {:modify, name, %Reference{} = ref, opts}) do + [ + drop_constraint_from_expr(opts[:from], table, name, statement_prefix), + maybe_drop_default_expr(statement_prefix, table, name, opts), + [ + statement_prefix, + "ALTER COLUMN ", + quote_name(name), + " ", + reference_column_type(ref.type, opts), + column_options(table, name, opts), + "; " + ], + [statement_prefix, "ADD", constraint_expr(ref, table, name), "; "], + [column_default_value(statement_prefix, table, name, opts)] + ] + end + + defp column_change(statement_prefix, table, {:modify, name, type, opts}) do + collation = Keyword.fetch(opts, :collation) + + [ + drop_constraint_from_expr(opts[:from], table, name, statement_prefix), + maybe_drop_default_expr(statement_prefix, table, name, opts), + [ + statement_prefix, + "ALTER COLUMN ", + quote_name(name), + " ", + column_type(type, opts), + null_expr(Keyword.get(opts, :null)), + collation_expr(collation), + "; " + ], + [column_default_value(statement_prefix, table, name, opts)] + ] + end + + defp column_change(statement_prefix, _table, {:remove, name}) do + [statement_prefix, "DROP COLUMN ", quote_name(name), "; "] + end + + defp column_change(statement_prefix, _table, {:remove, name, _type, _opts}), + do: [statement_prefix, "DROP COLUMN ", quote_name(name)] + + defp column_change(statement_prefix, table, {:remove_if_exists, column_name, _}), + do: column_change(statement_prefix, table, {:remove_if_exists, column_name}) + + defp column_change( + statement_prefix, + %{name: table, prefix: prefix}, + {:remove_if_exists, column_name} + ) do + [ + [ + if_column_exists(prefix, table, column_name), + statement_prefix, + "DROP COLUMN ", + quote_name(column_name), + "; " + ] + ] + end + + defp column_options(table, name, opts) do + default = Keyword.fetch(opts, :default) + null = Keyword.get(opts, :null) + collation = Keyword.fetch(opts, :collation) + + [null_expr(null), default_expr(table, name, default), collation_expr(collation)] + end + + defp column_default_value(statement_prefix, table, name, opts) do + default_expression = default_expr(table, name, Keyword.fetch(opts, :default)) + + case default_expression do + [] -> [] + _ -> [statement_prefix, "ADD", default_expression, " FOR ", quote_name(name), "; "] + end + end + + defp null_expr(false), do: [" NOT NULL"] + defp null_expr(true), do: [" NULL"] + defp null_expr(_), do: [] + + defp collation_expr({:ok, collation_name}), do: " COLLATE #{collation_name}" + defp collation_expr(_), do: [] + + defp default_expr(_table, _name, {:ok, nil}), + do: [] + + defp default_expr(table, name, {:ok, literal}) when is_binary(literal), + do: [ + " CONSTRAINT ", + constraint_name("DF", table, name), + " DEFAULT (N'", + escape_string(literal), + "')" + ] + + defp default_expr(table, name, {:ok, true}), + do: [" CONSTRAINT ", constraint_name("DF", table, name), " DEFAULT (1)"] + + defp default_expr(table, name, {:ok, false}), + do: [" CONSTRAINT ", constraint_name("DF", table, name), " DEFAULT (0)"] + + defp default_expr(table, name, {:ok, literal}) when is_number(literal), + do: [ + " CONSTRAINT ", + constraint_name("DF", table, name), + " DEFAULT (", + to_string(literal), + ")" + ] + + defp default_expr(table, name, {:ok, {:fragment, expr}}), + do: [" CONSTRAINT ", constraint_name("DF", table, name), " DEFAULT (", expr, ")"] + + defp default_expr(_table, _name, :error), do: [] + + defp drop_constraint_from_expr({%Reference{} = ref, _opts}, table, name, stm_prefix), + do: drop_constraint_from_expr(ref, table, name, stm_prefix) + + defp drop_constraint_from_expr(%Reference{} = ref, table, name, stm_prefix) do + [stm_prefix, "DROP CONSTRAINT ", reference_name(ref, table, name), "; "] + end + + defp drop_constraint_from_expr(_, _, _, _), + do: [] + + defp maybe_drop_default_expr(statement_prefix, table, name, opts) do + if Keyword.has_key?(opts, :default) do + constraint_name = constraint_name("DF", table, name) + if_exists_drop_constraint(constraint_name, statement_prefix) + else + [] + end + end + + defp constraint_name(type, table, name), + do: quote_name("#{type}_#{table.prefix}_#{table.name}_#{name}") + + defp index_expr({dir, literal}) + when is_binary(literal), + do: index_dir(dir, literal) + + defp index_expr({dir, literal}), + do: index_dir(dir, quote_name(literal)) + + defp index_expr(literal) when is_binary(literal), do: literal + defp index_expr(literal), do: quote_name(literal) + + defp index_dir(dir, str) + when dir in [ + :asc, + :asc_nulls_first, + :asc_nulls_last, + :desc, + :desc_nulls_first, + :desc_nulls_last + ] do + case dir do + :asc -> [str | " ASC"] + :desc -> [str | " DESC"] + _ -> error!(nil, "#{dir} is not supported in indexes in Tds adapter") + end + end + + defp include_expr(literal) when is_binary(literal), do: literal + defp include_expr(literal), do: quote_name(literal) + + defp engine_expr(_storage_engine), do: [""] + + defp options_expr(nil), do: [] + + defp options_expr(keyword) when is_list(keyword), + do: error!(nil, "Tds adapter does not support keyword lists in :options") + + defp options_expr(options), do: [" ", to_string(options)] + + defp column_type(type, opts) do + size = Keyword.get(opts, :size) + precision = Keyword.get(opts, :precision) + scale = Keyword.get(opts, :scale) + generated = Keyword.get(opts, :generated) + [ecto_to_db(type, size, precision, scale), generated_expr(generated)] + end + + defp generated_expr(nil), do: [] + + defp generated_expr(expr) when is_binary(expr) do + [" AS ", expr] + end + + defp generated_expr(other) do + raise ArgumentError, + "the `:generated` option only accepts strings, received: #{inspect(other)}" + end + + defp constraint_expr(%Reference{} = ref, table, name) do + {current_columns, reference_columns} = Enum.unzip([{name, ref.column} | ref.with]) + + if ref.match do + error!(nil, ":match is not supported in references for tds") + end + + [ + " CONSTRAINT ", + reference_name(ref, table, name), + " FOREIGN KEY (#{quote_names(current_columns)})", + " REFERENCES ", + quote_table(Keyword.get(ref.options, :prefix, table.prefix), ref.table), + "(#{quote_names(reference_columns)})", + reference_on_delete(ref.on_delete), + reference_on_update(ref.on_update) + ] + end + + defp reference_expr(%Reference{} = ref, table, name) do + [",", constraint_expr(ref, table, name)] + end + + defp reference_name(%Reference{name: nil}, table, column), + do: quote_name("#{table.name}_#{column}_fkey") + + defp reference_name(%Reference{name: name}, _table, _column), do: quote_name(name) + + defp reference_column_type(:id, _opts), do: "BIGINT" + defp reference_column_type(:serial, _opts), do: "INT" + defp reference_column_type(:bigserial, _opts), do: "BIGINT" + defp reference_column_type(type, opts), do: column_type(type, opts) + + defp reference_on_delete(:nilify_all), do: " ON DELETE SET NULL" + + defp reference_on_delete({:nilify, _columns}) do + error!(nil, "Tds adapter does not support the `{:nilify, columns}` action for `:on_delete`") + end + + defp reference_on_delete(:default_all), do: " ON DELETE SET DEFAULT" + + defp reference_on_delete({:default, _columns}) do + error!( + nil, + "Tds adapter does not support the `{:default, columns}` action for `:on_delete`" + ) + end + + defp reference_on_delete(:delete_all), do: " ON DELETE CASCADE" + defp reference_on_delete(:nothing), do: " ON DELETE NO ACTION" + defp reference_on_delete(_), do: [] + + defp reference_on_update(:nilify_all), do: " ON UPDATE SET NULL" + defp reference_on_update(:update_all), do: " ON UPDATE CASCADE" + defp reference_on_update(:nothing), do: " ON UPDATE NO ACTION" + defp reference_on_update(_), do: [] + + ## Helpers + + defp get_source(query, sources, ix, source) do + {expr, name, _schema} = elem(sources, ix) + name = maybe_add_column_names(source, name) + {expr || expr(source, sources, query), name} + end + + defp get_parent_sources_ix(query, as) do + case query.aliases[@parent_as] do + {%{aliases: %{^as => ix}}, sources} -> {ix, sources} + {%{} = parent, _sources} -> get_parent_sources_ix(parent, as) + end + end + + defp maybe_add_column_names({:values, _, [types, _, _]}, name) do + fields = Keyword.keys(types) + [name, ?\s, ?(, quote_names(fields), ?)] + end + + defp maybe_add_column_names(_, name), do: name + + defp quote_name(name) when is_atom(name) do + quote_name(Atom.to_string(name)) + end + + defp quote_name(name) when is_binary(name) do + if String.contains?(name, ["[", "]"]) do + error!( + nil, + "bad literal/field/table name #{inspect(name)} ('[' and ']' are not permitted)" + ) + end + + "[#{name}]" + end + + defp quote_names(names), do: Enum.map_intersperse(names, ?,, "e_name/1) + + defp quote_table(nil, name), do: quote_table(name) + + defp quote_table({server, db, schema}, name), + do: [ + quote_table(server), + ".", + quote_table(db), + ".", + quote_table(schema), + ".", + quote_table(name) + ] + + defp quote_table({db, schema}, name), + do: [quote_table(db), ".", quote_table(schema), ".", quote_table(name)] + + defp quote_table(prefix, name), + do: [quote_table(prefix), ".", quote_table(name)] + + defp quote_table(name) when is_atom(name), do: quote_table(Atom.to_string(name)) + + defp quote_table(name) do + if String.contains?(name, "[") or String.contains?(name, "]") do + error!(nil, "bad table name #{inspect(name)} '[' and ']' are not permitted") + end + + "[#{name}]" + end + + defp unquoted_name(prefix, name, column_name), + do: unquoted_name(unquoted_name(prefix, name), column_name) + + defp unquoted_name(nil, name), do: unquoted_name(name) + + defp unquoted_name(prefix, name) do + prefix = if is_atom(prefix), do: Atom.to_string(prefix), else: prefix + name = if is_atom(name), do: Atom.to_string(name), else: name + + [prefix, ".", name] + end + + defp unquoted_name(name) when is_atom(name), do: unquoted_name(Atom.to_string(name)) + + defp unquoted_name(name) do + if String.contains?(name, ["[", "]"]) do + error!(nil, "bad table name #{inspect(name)} '[' and ']' are not permitted") + end + + name + end + + defp intersperse_reduce(list, separator, user_acc, reducer, acc \\ []) + + defp intersperse_reduce([], _separator, user_acc, _reducer, acc), + do: {acc, user_acc} + + defp intersperse_reduce([elem], _separator, user_acc, reducer, acc) do + {elem, user_acc} = reducer.(elem, user_acc) + {[acc | elem], user_acc} + end + + defp intersperse_reduce([elem | rest], separator, user_acc, reducer, acc) do + {elem, user_acc} = reducer.(elem, user_acc) + intersperse_reduce(rest, separator, user_acc, reducer, [acc, elem, separator]) + end + + defp if_do(condition, value) do + if condition, do: value, else: [] + end + + defp escape_string(value) when is_binary(value) do + value |> :binary.replace("'", "''", [:global]) + end + + defp ecto_to_db(type, size, precision, scale, query \\ nil) + + defp ecto_to_db({:array, _}, _, _, _, query), + do: error!(query, "Array type is not supported by TDS") + + defp ecto_to_db(:id, _, _, _, _), do: "bigint" + defp ecto_to_db(:serial, _, _, _, _), do: "int IDENTITY(1,1)" + defp ecto_to_db(:bigserial, _, _, _, _), do: "bigint IDENTITY(1,1)" + defp ecto_to_db(:binary_id, _, _, _, _), do: "uniqueidentifier" + defp ecto_to_db(:boolean, _, _, _, _), do: "bit" + defp ecto_to_db(:string, nil, _, _, _), do: "nvarchar(255)" + defp ecto_to_db(:string, :max, _, _, _), do: "nvarchar(max)" + defp ecto_to_db(:string, s, _, _, _) when s in 1..4_000, do: "nvarchar(#{s})" + defp ecto_to_db(:float, nil, _, _, _), do: "float" + defp ecto_to_db(:float, s, _, _, _) when s in 1..53, do: "float(#{s})" + defp ecto_to_db(:binary, nil, _, _, _), do: "varbinary(max)" + defp ecto_to_db(:binary, s, _, _, _) when s in 1..8_000, do: "varbinary(#{s})" + defp ecto_to_db(:uuid, _, _, _, _), do: "uniqueidentifier" + defp ecto_to_db(:map, nil, _, _, _), do: "nvarchar(max)" + defp ecto_to_db(:map, s, _, _, _) when s in 0..4_000, do: "nvarchar(#{s})" + defp ecto_to_db({:map, _}, nil, _, _, _), do: "nvarchar(max)" + defp ecto_to_db({:map, _}, s, _, _, _) when s in 1..4_000, do: "nvarchar(#{s})" + defp ecto_to_db(:time, _, _, _, _), do: "time(0)" + defp ecto_to_db(:time_usec, _, p, _, _) when p in 0..7, do: "time(#{p})" + defp ecto_to_db(:time_usec, _, _, _, _), do: "time(6)" + defp ecto_to_db(:utc_datetime, _, _, _, _), do: "datetime" + defp ecto_to_db(:utc_datetime_usec, _, p, _, _) when p in 0..7, do: "datetime2(#{p})" + defp ecto_to_db(:utc_datetime_usec, _, _, _, _), do: "datetime2(6)" + defp ecto_to_db(:naive_datetime, _, _, _, _), do: "datetime" + defp ecto_to_db(:naive_datetime_usec, _, p, _, _) when p in 0..7, do: "datetime2(#{p})" + defp ecto_to_db(:naive_datetime_usec, _, _, _, _), do: "datetime2(6)" + + defp ecto_to_db(other, size, _, _, _) when is_integer(size) do + "#{Atom.to_string(other)}(#{size})" + end + + defp ecto_to_db(other, _, precision, scale, _) when is_integer(precision) do + "#{Atom.to_string(other)}(#{precision},#{scale || 0})" + end + + defp ecto_to_db(atom, nil, nil, nil, _) when is_atom(atom) do + Atom.to_string(atom) + end + + defp ecto_to_db(type, _, _, _, _) do + raise ArgumentError, + "unsupported type `#{inspect(type)}`. The type can either be an atom, a string " <> + "or a tuple of the form `{:map, t}` where `t` itself follows the same conditions." + end + + defp error!(nil, message) do + raise ArgumentError, message + end + + defp error!(query, message) do + raise Ecto.QueryError, query: query, message: message + end + + defp if_table_not_exists(condition, name, prefix) do + if_do(condition, [ + "IF NOT EXISTS (SELECT * FROM [INFORMATION_SCHEMA].[TABLES] ", + "WHERE ", + "[TABLE_NAME] = ", + ?', + "#{name}", + ?', + if_do(prefix != nil, [ + " AND [TABLE_SCHEMA] = ", + ?', + "#{prefix}", + ?' + ]), + ") " + ]) + end + + defp if_table_exists(condition, name, prefix) do + if_do(condition, [ + "IF EXISTS (SELECT * FROM [INFORMATION_SCHEMA].[TABLES] ", + "WHERE ", + "[TABLE_NAME] = ", + ?', + "#{name}", + ?', + if_do(prefix != nil, [ + " AND [TABLE_SCHEMA] = ", + ?', + "#{prefix}", + ?' + ]), + ") " + ]) + end + + defp if_column_exists(prefix, table, column_name) do + [ + "IF EXISTS (SELECT 1 FROM [sys].[columns] ", + "WHERE [name] = N'#{column_name}' AND ", + "[object_id] = OBJECT_ID(N'", + if_do(prefix != nil, ["#{prefix}", ?.]), + "#{table}", + "')) " + ] + end + + defp if_column_not_exists(prefix, table, column_name) do + [ + "IF NOT EXISTS (SELECT 1 FROM [sys].[columns] ", + "WHERE [name] = N'#{column_name}' AND ", + "[object_id] = OBJECT_ID(N'", + if_do(prefix != nil, ["#{prefix}", ?.]), + "#{table}", + "')) " + ] + end + + defp list_param_to_args(idx, length) do + Enum.map_join(1..length, ",", &"@#{idx + &1}") + end + + defp as_string(atom) when is_atom(atom), do: Atom.to_string(atom) + defp as_string(str), do: str + + defp if_index_exists(condition, index_name, table_name) do + if_do(condition, [ + "IF EXISTS (SELECT name FROM sys.indexes WHERE name = N'", + as_string(index_name), + "' AND object_id = OBJECT_ID(N'", + as_string(table_name), + "')) " + ]) + end + + defp if_index_not_exists(condition, index_name, table_name) do + if_do(condition, [ + "IF NOT EXISTS (SELECT name FROM sys.indexes WHERE name = N'", + as_string(index_name), + "' AND object_id = OBJECT_ID(N'", + as_string(table_name), + "')) " + ]) + end + + defp if_check_constraint_exists(condition, name, prefix) do + if_do(condition, [ + "IF NOT EXISTS (SELECT * ", + "FROM [INFORMATION_SCHEMA].[CHECK_CONSTRAINTS] ", + "WHERE [CONSTRAINT_NAME] = N'#{name}'", + if_do(prefix != nil, [ + " AND [CONSTRAINT_SCHEMA] = N'#{prefix}'" + ]), + ") " + ]) + end + + # types + # "U" - table, + # "C", "PK", "UQ", "F ", "D " - constraints + defp if_object_exists(name, type, statement) do + [ + "IF (OBJECT_ID(N'", + name, + "', '", + type, + "') IS NOT NULL) ", + statement + ] + end + + defp if_exists_drop_constraint(name, statement_prefix) do + [ + if_object_exists( + name, + "D", + "#{statement_prefix}DROP CONSTRAINT #{name}; " + ) + ] + end + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/tds/types.ex b/deps/ecto_sql/lib/ecto/adapters/tds/types.ex new file mode 100644 index 0000000..780a70d --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/tds/types.ex @@ -0,0 +1,293 @@ +if Code.ensure_loaded?(Tds) do + defmodule Tds.Ecto.UUID do + @moduledoc """ + A TDS adapter type for UUIDs strings. + + If you are using Tds adapter and UUIDs in your project, instead of `Ecto.UUID` + you should use Tds.Ecto.UUID to generate correct bytes that should be stored + in database. + """ + + use Ecto.Type + + @typedoc """ + A hex-encoded UUID string. + """ + @type t :: <<_::288>> + + @typedoc """ + A raw binary representation of a UUID. + """ + @type raw :: <<_::128>> + + @doc false + @impl true + def type(), do: :uuid + + @doc """ + Casts to UUID. + """ + @impl true + @spec cast(t | raw | any) :: {:ok, t} | :error + def cast( + <> + ) do + <> + catch + :error -> :error + else + casted -> {:ok, casted} + end + + def cast(<>), do: encode(bin) + def cast(_), do: :error + + @doc """ + Same as `cast/1` but raises `Ecto.CastError` on invalid arguments. + """ + def cast!(value) do + case cast(value) do + {:ok, uuid} -> uuid + :error -> raise Ecto.CastError, type: __MODULE__, value: value + end + end + + @compile {:inline, c: 1} + + defp c(?0), do: ?0 + defp c(?1), do: ?1 + defp c(?2), do: ?2 + defp c(?3), do: ?3 + defp c(?4), do: ?4 + defp c(?5), do: ?5 + defp c(?6), do: ?6 + defp c(?7), do: ?7 + defp c(?8), do: ?8 + defp c(?9), do: ?9 + defp c(?A), do: ?a + defp c(?B), do: ?b + defp c(?C), do: ?c + defp c(?D), do: ?d + defp c(?E), do: ?e + defp c(?F), do: ?f + defp c(?a), do: ?a + defp c(?b), do: ?b + defp c(?c), do: ?c + defp c(?d), do: ?d + defp c(?e), do: ?e + defp c(?f), do: ?f + defp c(_), do: throw(:error) + + @doc """ + Converts a string representing a UUID into a binary. + """ + @impl true + @spec dump(t | any) :: {:ok, raw} | :error + def dump( + <> + ) do + try do + <> + catch + :error -> :error + else + binary -> + {:ok, binary} + end + end + + def dump(_), do: :error + + def dump!(value) do + case dump(value) do + {:ok, binary} -> binary + :error -> raise ArgumentError, "Invalid uuid value #{inspect(value)}" + end + end + + @compile {:inline, d: 1} + + defp d(?0), do: 0 + defp d(?1), do: 1 + defp d(?2), do: 2 + defp d(?3), do: 3 + defp d(?4), do: 4 + defp d(?5), do: 5 + defp d(?6), do: 6 + defp d(?7), do: 7 + defp d(?8), do: 8 + defp d(?9), do: 9 + defp d(?A), do: 10 + defp d(?B), do: 11 + defp d(?C), do: 12 + defp d(?D), do: 13 + defp d(?E), do: 14 + defp d(?F), do: 15 + defp d(?a), do: 10 + defp d(?b), do: 11 + defp d(?c), do: 12 + defp d(?d), do: 13 + defp d(?e), do: 14 + defp d(?f), do: 15 + defp d(_), do: throw(:error) + + @doc """ + Converts a binary UUID into a string. + """ + @impl true + @spec load(raw | any) :: {:ok, t} | :error + def load(<<_::128>> = uuid) do + encode(uuid) + end + + def load(<<_::64, ?-, _::32, ?-, _::32, ?-, _::32, ?-, _::96>> = string) do + raise ArgumentError, + "trying to load string UUID as Tds.Ecto.UUID: #{inspect(string)}. " <> + "Maybe you wanted to declare :uuid as your database field?" + end + + def load(_), do: :error + + @doc """ + Generates a version 4 (random) UUID. + """ + @spec generate() :: t + def generate do + {:ok, uuid} = encode(bingenerate()) + uuid + end + + @doc """ + Generates a version 4 (random) UUID in the binary format. + """ + @spec bingenerate() :: raw + def bingenerate do + <> = :crypto.strong_rand_bytes(15) + <> + end + + # Callback invoked by autogenerate fields. + @impl true + def autogenerate, do: generate() + + defp encode( + <> + ) do + <> + catch + :error -> :error + else + encoded -> {:ok, encoded} + end + + @compile {:inline, e: 1} + + defp e(0), do: ?0 + defp e(1), do: ?1 + defp e(2), do: ?2 + defp e(3), do: ?3 + defp e(4), do: ?4 + defp e(5), do: ?5 + defp e(6), do: ?6 + defp e(7), do: ?7 + defp e(8), do: ?8 + defp e(9), do: ?9 + defp e(10), do: ?a + defp e(11), do: ?b + defp e(12), do: ?c + defp e(13), do: ?d + defp e(14), do: ?e + defp e(15), do: ?f + end + + defmodule Tds.Ecto.VarChar do + @moduledoc """ + A Tds adapter Ecto Type that wraps erlang string into tuple so TDS driver + can understand if erlang string should be encoded as NVarChar or Varchar. + + Due to some limitations in Ecto and Tds driver, it is not possible to + support collations other than the one set on connection during login. + Please be aware of this limitation if you plan to store varchar values in + your database using Ecto since you will probably lose some codepoints in + the value during encoding. Instead use `tds_encoding` library and first + encode value and then annotate it as `:binary` by calling `Ecto.Query.API.type/2` + in your query. This way all codepoints will be properly preserved during + insert to database. + """ + use Ecto.Type + + @typedoc """ + An erlang string + """ + @type t :: String.t() + + @typedoc """ + A value annotated as varchar. + """ + @type varchar :: {String.t(), :varchar} + + @doc false + @impl true + def type(), do: :varchar + + @doc """ + Casts to string. + """ + @spec cast(t | varchar | any) :: {:ok, t} | :error + @impl true + def cast({value, :varchar}) do + # In case we get already dumped value + {:ok, value} + end + + def cast(value) when is_binary(value) do + {:ok, value} + end + + def cast(_), do: :error + + @doc """ + Same as `cast/1` but raises `Ecto.CastError` on invalid arguments. + """ + @spec cast!(t | varchar | any) :: t + def cast!(value) do + case cast(value) do + {:ok, uuid} -> uuid + :error -> raise Ecto.CastError, type: __MODULE__, value: value + end + end + + @doc """ + Loads the DB type as is. + """ + @impl true + @spec load(t | any) :: {:ok, t} | :error + def load(value) do + {:ok, value} + end + + @doc """ + Converts a string representing a VarChar into a tuple `{value, :varchar}`. + + Returns `:error` if value is not binary. + """ + @impl true + @spec dump(t | any) :: {:ok, varchar} | :error + def dump(value) when is_binary(value) do + {:ok, {value, :varchar}} + end + + def dump(_), do: :error + end +end diff --git a/deps/ecto_sql/lib/ecto/migration.ex b/deps/ecto_sql/lib/ecto/migration.ex new file mode 100644 index 0000000..c36dcfd --- /dev/null +++ b/deps/ecto_sql/lib/ecto/migration.ex @@ -0,0 +1,1778 @@ +defmodule Ecto.Migration do + @moduledoc """ + Migrations are used to modify your database schema over time. + + This module provides many helpers for migrating the database, + allowing developers to use Elixir to alter their storage in + a way that is database independent. + + Migrations typically provide two operations: `up` and `down`, + allowing us to migrate the database forward or roll it back + in case of errors. + + In order to manage migrations, Ecto creates a table called + `schema_migrations` in the database, which stores all migrations + that have already been executed. You can configure the name of + this table with the `:migration_source` configuration option + and the name of the repository that manages it with `:migration_repo`. + + Ecto locks the `schema_migrations` table when running + migrations, guaranteeing two different servers cannot run the same + migration at the same time. + + ## Creating your first migration + + Migrations are defined inside the "priv/REPO/migrations" where REPO + is the last part of the repository name in underscore. For example, + migrations for `MyApp.Repo` would be found in "priv/repo/migrations". + For `MyApp.CustomRepo`, it would be found in "priv/custom_repo/migrations". + + Each file in the migrations directory has the following structure: + + ```text + NUMBER_NAME.exs + ``` + + The NUMBER is a unique number that identifies the migration. It is + usually the timestamp of when the migration was created. The NAME + must also be unique and it quickly identifies what the migration + does. For example, if you need to track the "weather" in your system, + you can start a new file at "priv/repo/migrations/20190417140000_add_weather_table.exs" + that will have the following contents: + + defmodule MyRepo.Migrations.AddWeatherTable do + use Ecto.Migration + + def up do + create table("weather") do + add :city, :string, size: 40 + add :temp_lo, :integer + add :temp_hi, :integer + add :prcp, :float + + timestamps() + end + end + + def down do + drop table("weather") + end + end + + The `up/0` function is responsible to migrate your database forward. + the `down/0` function is executed whenever you want to rollback. + The `down/0` function must always do the opposite of `up/0`. + Inside those functions, we invoke the API defined in this module, + you will find conveniences for managing tables, indexes, columns, + references, as well as running custom SQL commands. + + To run a migration, we generally use Mix tasks. For example, you can + run the migration above by going to the root of your project and + typing: + + $ mix ecto.migrate + + You can also roll it back by calling: + + $ mix ecto.rollback --step 1 + + Note rollback requires us to say how much we want to rollback. + On the other hand, `mix ecto.migrate` will always run all pending + migrations. + + In practice, we don't create migration files by hand either, we + typically use `mix ecto.gen.migration` to generate the file with + the proper timestamp and then we just fill in its contents: + + $ mix ecto.gen.migration add_weather_table + + For the rest of this document, we will cover the migration APIs + provided by Ecto. For a in-depth discussion of migrations and how + to use them safely within your application and data, see the + [Safe Ecto Migrations guide](https://github.com/fly-apps/safe-ecto-migrations). + + ## Mix tasks + + As seen above, Ecto provides many Mix tasks to help developers work + with migrations. We summarize them below: + + * `mix ecto.gen.migration` - generates a + migration that the user can fill in with particular commands + * `mix ecto.migrate` - migrates a repository + * `mix ecto.migrations` - shows all migrations and their status + * `mix ecto.rollback` - rolls back a particular migration + + Run `mix help COMMAND` for more information on a particular command. + For a lower level API for running migrations, see `Ecto.Migrator`. + + ## Change + + Having to write both `up/0` and `down/0` functions for every + migration is tedious and error prone. For this reason, Ecto allows + you to define a `change/0` callback with all of the code you want + to execute when migrating and Ecto will automatically figure out + the `down/0` for you. For example, the migration above can be + written as: + + defmodule MyRepo.Migrations.AddWeatherTable do + use Ecto.Migration + + def change do + create table("weather") do + add :city, :string, size: 40 + add :temp_lo, :integer + add :temp_hi, :integer + add :prcp, :float + + timestamps() + end + end + end + + However, note that not all commands are reversible. Trying to rollback + a non-reversible command will raise an `Ecto.MigrationError`. + + A notable command in this regard is `execute/2`, which is reversible in + `change/0` by accepting a pair of plain SQL strings. The first is run on + forward migrations (`up/0`) and the second when rolling back (`down/0`). + + If `up/0` and `down/0` are implemented in a migration, they take precedence, + and `change/0` isn't invoked. + + ## Field Types + + The [Ecto primitive types](https://hexdocs.pm/ecto/Ecto.Schema.html#module-primitive-types) are mapped to the appropriate database + type by the various database adapters. For example, `:string` is + converted to `:varchar`, `:binary` to `:bytea` or `:blob`, and so on. + + In particular, note that: + + * the `:string` type in migrations by default has a limit of 255 characters. + If you need more or less characters, pass the `:size` option, such + as `add :field, :string, size: 10`. If you don't want to impose a limit, + most databases support a `:text` type or similar + + * the `:binary` type in migrations by default has no size limit. If you want + to impose a limit, pass the `:size` option accordingly. In MySQL, passing + the size option changes the underlying field from "blob" to "varbinary" + + Any other type will be given as is to the database. For example, you + can use `:text`, `:char`, or `:varchar` as types. Types that have spaces + in their names can be wrapped in double quotes, such as `:"int unsigned"`, + `:"time without time zone"`, etc. + + ## Executing and flushing + + Most functions in this module, when executed inside of migrations, are not + executed immediately. Instead they are performed after the relevant `up`, + `change`, or `down` callback terminates. Any other functions, such as + functions provided by `Ecto.Repo`, will be executed immediately unless they + are called from within an anonymous function passed to `execute/1`. + + In some situations you may want to guarantee that all of the previous steps + have been executed before continuing. This is useful when you need to apply a + set of changes to the table before continuing with the migration. This can be + done with `flush/0`: + + def up do + ... + flush() + ... + end + + However `flush/0` will raise if it would be called from `change` function when doing a rollback. + To avoid that we recommend to use `execute/2` with anonymous functions instead. + For more information and example usage please take a look at `execute/2` function. + + ## Formatter configuration + + To enable Ecto's custom `mix format` rules in your migrations, you can create a new formatter + config file in your project called `priv/[your_repo]/migrations/.formatter.exs` with the + following content: + + ```elixir + [ + import_deps: [:ecto_sql], + inputs: ["*.exs"] + ] + ``` + + You will also need to add a line or two to your project's main formatter config so that the + formatter knows where to find the new config file. Update (or create) your project's main + `.formatter.exs` file: + + ```elixir + [ + # Add this line to enable Ecto formatter rules + import_deps: [:ecto], + + # Add this line to enable Ecto's formatter rules in your migrations directory + subdirectories: ["priv/*/migrations"], + + # Default Elixir project rules + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] + ] + ``` + + Now, when you run `mix format`, the formatter should apply Ecto's custom rules when formatting + your migrations (e.g. no brackets are automatically added when creating columns with `add/3`). + + ## Repo configuration + + ### Migrator configuration + + These options configure where Ecto stores and how Ecto runs your migrations: + + * `:migration_source` - Version numbers of migrations will be saved in a + table named `schema_migrations` by default. You can configure the name of + the table via: + + config :app, App.Repo, migration_source: "my_migrations" + + * `:migration_lock` - By default, Ecto will lock the migration source to throttle + multiple nodes to run migrations one at a time. You can disable the `migration_lock` + by setting it to `false`. You may also select a different locking strategy if + supported by the adapter. See the adapter docs for more information. + + config :app, App.Repo, migration_lock: false + + # Or use a different locking strategy. For example, Postgres can use advisory + # locks but be aware that your database configuration might not make this a good + # fit. See the Ecto.Adapters.Postgres for more information: + config :app, App.Repo, migration_lock: :pg_advisory_lock + + * `:migration_repo` - The migration repository is where the table managing the + migrations will be stored (`migration_source` defines the table name). It defaults + to the given repository itself but you can configure it via: + + config :app, App.Repo, migration_repo: App.MigrationRepo + + * `:migration_cast_version_column` - Ecto uses a `version` column of type + `bigint` for the underlying migrations table (usually `schema_migrations`). By + default, Ecto doesn't cast this to a different type when reading or writing to + the database when running migrations. However, some web frameworks store this + column as a string. For compatibility reasons, you can set this option to `true`, + which makes Ecto perform a `CAST(version AS int)`. This used to be the default + behavior up to Ecto 3.10, so if you are upgrading to 3.11+ and want to keep the + old behavior, set this option to `true`. + + * `:priv` - the priv directory for the repo with the location of important assets, + such as migrations. For a repository named `MyApp.FooRepo`, `:priv` defaults to + "priv/foo_repo" and migrations should be placed at "priv/foo_repo/migrations" + + * `:start_apps_before_migration` - A list of applications to be started before + running migrations. Used by `Ecto.Migrator.with_repo/3` and the migration tasks: + + config :app, App.Repo, start_apps_before_migration: [:ssl, :some_custom_logger] + + ### Migrations configuration + + These options configure the default values used by migrations. **It is generally + discouraged to change any of those configurations after your database is deployed + to production, as changing these options will retroactively change how all + migrations work**. + + * `:migration_primary_key` - By default, Ecto uses the `:id` column with type + `:bigserial`, but you can configure it via: + + config :app, App.Repo, migration_primary_key: [name: :uuid, type: :binary_id] + + config :app, App.Repo, migration_primary_key: false + + For Postgres version >= 10 `:identity` key may be used. + By default, all :identity column will be bigints. You may provide optional + parameters for `:start_value` and `:increment` to customize the created + sequence. Config example: + + config :app, App.Repo, migration_primary_key: [type: :identity] + + * `:migration_foreign_key` - By default, Ecto uses the `primary_key` type + for foreign keys when `references/2` is used, but you can configure it via: + + config :app, App.Repo, migration_foreign_key: [column: :uuid, type: :binary_id] + + * `:migration_timestamps` - By default, Ecto uses the `:naive_datetime` as the type, + `:inserted_at` as the name of the column for storing insertion times, `:updated_at` as + the name of the column for storing last-updated-at times, but you can configure it + via: + + config :app, App.Repo, migration_timestamps: [ + type: :utc_datetime, + inserted_at: :created_at, + updated_at: :changed_at + ] + + * `:migration_default_prefix` - Ecto defaults to `nil` for the database prefix for + migrations, but you can configure it via: + + config :app, App.Repo, migration_default_prefix: "my_prefix" + + ## Collations + + Collations can be set on a column with the option `:collation`. This can be + useful when relying on ASCII sorting of characters when using a fractional index + for example. All supported collations and types that support setting a collocation + are not known by `ecto_sql` and specifying an incorrect collation or a collation on + an unsupported type might cause a migration to fail. Be sure to match the collation + on any column that references another column. + + def change do + create table(:collate_reference) do + add :name, :string, collation: "POSIX" + end + + create table(:collate) do + add :string, :string, collation: "POSIX" + add :name_ref, references(:collate_reference, type: :string, column: :name), collation: "POSIX" + end + end + + ## Comments + + Migrations where you create or alter a table support specifying table + and column comments. The same can be done when creating constraints + and indexes. Not all databases support this feature. + + def up do + create index("posts", [:name], comment: "Index Comment") + create constraint("products", "price_must_be_positive", check: "price > 0", comment: "Constraint Comment") + create table("weather", prefix: "north_america", comment: "Table Comment") do + add :city, :string, size: 40, comment: "Column Comment" + timestamps() + end + end + + ## Prefixes + + Migrations support specifying a table prefix or index prefix which will + target either a schema (if using PostgreSQL) or a different database (if using + MySQL). If no prefix is provided, the default schema or database is used. + + Any reference declared in the table migration refers by default to the table + with the same declared prefix. The prefix is specified in the table options: + + def up do + create table("weather", prefix: "north_america") do + add :city, :string, size: 40 + add :temp_lo, :integer + add :temp_hi, :integer + add :prcp, :float + add :group_id, references(:groups) + + timestamps() + end + + create index("weather", [:city], prefix: "north_america") + end + + Note: if using MySQL with a prefixed table, you must use the same prefix + for the references since cross-database references are not supported. + + When using a prefixed table with either MySQL or PostgreSQL, you must use the + same prefix for the index field to ensure that you index the prefix-qualified + table. + + ## Transaction Callbacks + + If possible, each migration runs inside a transaction. This is true for Postgres, + but not true for MySQL, as the latter does not support DDL transactions. + + In some rare cases, you may need to execute some common behavior after beginning + a migration transaction, or before committing that transaction. For instance, one + might desire to set a `lock_timeout` for each lock in the migration transaction. + + You can do so by defining `c:after_begin/0` and `c:before_commit/0` callbacks to + your migration. + + However, if you need do so for every migration module, implement this callback + for every migration can be quite repetitive. Luckily, you can handle this by + providing your migration module: + + defmodule MyApp.Migration do + defmacro __using__(_) do + quote do + use Ecto.Migration + + def after_begin() do + repo().query! "SET lock_timeout TO '5s'" + end + end + end + end + + Then in your migrations you can `use MyApp.Migration` to share this behavior + among all your migrations. + + ## Additional resources + + * The [Safe Ecto Migrations guide](https://github.com/fly-apps/safe-ecto-migrations) + + """ + + @doc """ + Migration code to run immediately after the transaction is opened. + + Keep in mind that it is treated like any normal migration code, and should + consider both the up *and* down cases of the migration. + """ + @callback after_begin() :: term + + @doc """ + Migration code to run immediately before the transaction is closed. + + Keep in mind that it is treated like any normal migration code, and should + consider both the up *and* down cases of the migration. + """ + @callback before_commit() :: term + @optional_callbacks after_begin: 0, before_commit: 0 + + defmodule Index do + @moduledoc """ + Used internally by adapters. + + To define an index in a migration, see `Ecto.Migration.index/3`. + """ + defstruct table: nil, + prefix: nil, + name: nil, + columns: [], + unique: false, + concurrently: false, + using: nil, + include: [], + only: false, + nulls_distinct: nil, + where: nil, + comment: nil, + options: nil + + @type column :: atom | String.t() | {index_dir(), atom | String.t()} + + @type index_dir :: + :asc + | :asc_nulls_first + | :asc_nulls_last + | :desc + | :desc_nulls_first + | :desc_nulls_last + + @type t :: %__MODULE__{ + table: String.t(), + prefix: String.t() | nil, + name: String.t() | atom, + columns: [column()], + unique: boolean, + concurrently: boolean, + using: atom | String.t(), + only: boolean, + include: [atom | String.t()], + nulls_distinct: boolean | nil, + where: atom | String.t(), + comment: String.t() | nil, + options: String.t() + } + end + + defmodule Table do + @moduledoc """ + Used internally by adapters. + + To define a table in a migration, see `Ecto.Migration.table/2`. + """ + defstruct name: nil, prefix: nil, comment: nil, primary_key: true, engine: nil, options: nil + + @type t :: %__MODULE__{ + name: String.t(), + prefix: String.t() | nil, + comment: String.t() | nil, + primary_key: boolean | keyword(), + engine: atom, + options: String.t() + } + end + + defmodule Reference do + @moduledoc """ + Used internally by adapters. + + To define a reference in a migration, see `Ecto.Migration.references/2`. + """ + defstruct name: nil, + prefix: nil, + table: nil, + column: :id, + type: :bigserial, + on_delete: :nothing, + on_update: :nothing, + validate: true, + with: [], + match: nil, + options: [] + + @typedoc """ + The reference struct. + + The `:prefix` field is deprecated and should instead be stored in the `:options` field. + """ + @type t :: %__MODULE__{ + table: String.t(), + prefix: String.t() | nil, + column: atom, + type: atom, + on_delete: atom, + on_update: atom, + validate: boolean, + with: list, + match: atom | nil, + options: [{:prefix, String.t() | nil}] + } + end + + defmodule Constraint do + @moduledoc """ + Used internally by adapters. + + To define a constraint in a migration, see `Ecto.Migration.constraint/3`. + """ + defstruct name: nil, + table: nil, + check: nil, + exclude: nil, + prefix: nil, + comment: nil, + validate: true + + @type t :: %__MODULE__{ + name: atom, + table: String.t(), + prefix: String.t() | nil, + check: String.t() | nil, + exclude: String.t() | nil, + comment: String.t() | nil, + validate: boolean + } + end + + defmodule Command do + @moduledoc """ + Used internally by adapters. + + This represents the up and down legs of a reversible raw command + that is usually defined with `Ecto.Migration.execute/1`. + + To define a reversible command in a migration, see `Ecto.Migration.execute/2`. + """ + defstruct up: nil, down: nil + @type t :: %__MODULE__{up: String.t(), down: String.t()} + end + + alias Ecto.Migration.Runner + + @doc false + defmacro __using__(_) do + quote location: :keep do + import Ecto.Migration + @disable_ddl_transaction false + @disable_migration_lock false + @before_compile Ecto.Migration + end + end + + @doc false + defmacro __before_compile__(_env) do + quote do + def __migration__ do + [ + disable_ddl_transaction: @disable_ddl_transaction, + disable_migration_lock: @disable_migration_lock + ] + end + end + end + + @doc """ + Creates a table. + + By default, the table will also include an `:id` primary key field that + has a type of `:bigserial`. Check the `table/2` docs for more information. + + ## Examples + + create table(:posts) do + add :title, :string, default: "Untitled" + add :body, :text + + timestamps() + end + + """ + defmacro create(object, do: block) do + expand_create(object, :create, block) + end + + @doc """ + Creates a table if it does not exist. + + Works just like `create/2` but does not raise an error when the table + already exists. + """ + defmacro create_if_not_exists(object, do: block) do + expand_create(object, :create_if_not_exists, block) + end + + defp expand_create(object, command, block) do + quote do + table = %Table{} = unquote(object) + Runner.start_command({unquote(command), Ecto.Migration.__prefix__(table)}) + + if primary_key = Ecto.Migration.__primary_key__(table) do + {name, type, opts} = primary_key + add(name, type, opts) + end + + unquote(block) + Runner.end_command() + table + end + end + + @doc """ + Alters a table. + + ## Examples + + alter table("posts") do + add :summary, :text + modify :title, :text + remove :views + end + + """ + defmacro alter(object, do: block) do + quote do + table = %Table{} = unquote(object) + Runner.start_command({:alter, Ecto.Migration.__prefix__(table)}) + unquote(block) + Runner.end_command() + end + end + + @doc """ + Creates one of the following: + + * an index + * a table with only the :id primary key + * a constraint + + When reversing (in a `change/0` running backwards), indexes are only dropped + if they exist, and no errors are raised. To enforce dropping an index, use + `drop/1`. + + ## Examples + + create index("posts", [:name]) + create table("version") + create constraint("products", "price_must_be_positive", check: "price > 0") + + """ + def create(%Index{} = index) do + Runner.execute({:create, __prefix__(index)}) + index + end + + def create(%Constraint{} = constraint) do + Runner.execute({:create, __prefix__(constraint)}) + constraint + end + + def create(%Table{} = table) do + do_create(table, :create) + table + end + + @doc """ + Creates an index or a table with only `:id` field if one does not yet exist. + + ## Examples + + create_if_not_exists index("posts", [:name]) + + create_if_not_exists table("version") + + """ + def create_if_not_exists(%Index{} = index) do + Runner.execute({:create_if_not_exists, __prefix__(index)}) + end + + def create_if_not_exists(%Table{} = table) do + do_create(table, :create_if_not_exists) + end + + defp do_create(table, command) do + columns = + if primary_key = Ecto.Migration.__primary_key__(table) do + {name, type, opts} = primary_key + [{:add, name, type, opts}] + else + [] + end + + Runner.execute({command, __prefix__(table), columns}) + end + + @doc """ + Drops one of the following: + + * an index + * a table + * a constraint + + ## Examples + + drop index("posts", [:name]) + drop table("posts") + drop constraint("products", "price_must_be_positive") + drop index("posts", [:name]), mode: :cascade + drop table("posts"), mode: :cascade + + ## Options + + * `:mode` - when set to `:cascade`, automatically drop objects that depend + on the index, and in turn all objects that depend on those objects + on the table. Default is `:restrict` + + """ + def drop(%{} = index_or_table_or_constraint, opts \\ []) when is_list(opts) do + Runner.execute( + {:drop, __prefix__(index_or_table_or_constraint), Keyword.get(opts, :mode, :restrict)} + ) + + index_or_table_or_constraint + end + + @doc """ + Drops one of the following if it exists: + + * an index + * a table + * a constraint + + Does not raise an error if the specified table or index does not exist. + + ## Examples + + drop_if_exists index("posts", [:name]) + drop_if_exists table("posts") + drop_if_exists constraint("products", "price_must_be_positive") + drop_if_exists index("posts", [:name]), mode: :cascade + drop_if_exists table("posts"), mode: :cascade + + ## Options + + * `:mode` - when set to `:cascade`, automatically drop objects that depend + on the index, and in turn all objects that depend on those objects + on the table. Default is `:restrict` + + """ + def drop_if_exists(%{} = index_or_table_or_constraint, opts \\ []) when is_list(opts) do + mode = Keyword.get(opts, :mode, :restrict) + Runner.execute({:drop_if_exists, __prefix__(index_or_table_or_constraint), mode}) + index_or_table_or_constraint + end + + @doc """ + Returns a table struct that can be given to `create/2`, `alter/2`, `drop/1`, + etc. + + ## Examples + + create table("products") do + add :name, :string + add :price, :decimal + end + + drop table("products") + + create table("products", primary_key: false) do + add :name, :string + add :price, :decimal + end + + create table("daily_prices", primary_key: false, options: "PARTITION BY RANGE (date)") do + add :name, :string, primary_key: true + add :date, :date, primary_key: true + add :price, :decimal + end + + create table("users", primary_key: false) do + add :id, :identity, primary_key: true, start_value: 100, increment: 20 + end + + ## Options + + * `:primary_key` - when `false`, a primary key field is not generated on table + creation. Alternatively, a keyword list in the same style of the + `:migration_primary_key` repository configuration can be supplied + to control the generation of the primary key field. The keyword list + must include `:name` and `:type`. See `add/3` for further options. + * `:engine` - customizes the table storage for supported databases. For MySQL, + the default is InnoDB. + * `:prefix` - the prefix for the table. This prefix will automatically be used + for all constraints and references defined for this table unless explicitly + overridden in said constraints/references. + * `:comment` - adds a comment to the table. + * `:options` - provide custom options that will be appended after the generated + statement. For example, "WITH", "INHERITS", or "ON COMMIT" clauses. "PARTITION BY" + can be provided for databases that support table partitioning. + + """ + def table(name, opts \\ []) + + def table(name, opts) when is_atom(name) do + table(Atom.to_string(name), opts) + end + + def table(name, opts) when is_binary(name) and is_list(opts) do + struct!(%Table{name: name}, opts) + end + + @doc ~S""" + Returns an index struct that can be given to `create/1`, `drop/1`, etc. + + Expects the table name as the first argument and the index field(s) as + the second. The fields can be atoms, representing columns, or strings, + representing expressions that are sent as-is to the database. + + ## Options + + * `:name` - the name of the index. Can be provided as a string or an atom. + Defaults to "#{table}_#{column}_index". + * `:prefix` - specify an optional prefix for the index. + * `:unique` - indicates whether the index should be unique. Defaults to `false`. + * `:comment` - adds a comment to the index. + * `:using` - configures the index type. + + Some options are supported only by some databases: + + * `:concurrently` - indicates whether the index should be created/dropped + concurrently in MSSQL and PostgreSQL. + * `:include` - specify fields for a covering index, + [supported by PostgreSQL only](https://www.postgresql.org/docs/current/indexes-index-only-scans.html). + * `:nulls_distinct` - specify whether null values should be considered + distinct for a unique index. Defaults to `nil`, which will not add the + parameter to the generated SQL and thus use the database default. + This option is currently only supported by PostgreSQL 15+. + For MySQL, it is always false. For MSSQL, it is always true. + See the dedicated section on this option for more information. + * `:only` - indicates to not recurse creating indexes on partitions. + [supported by PostgreSQL only](https://www.postgresql.org/docs/current/ddl-partitioning.html#DDL-PARTITIONING-DECLARATIVE-MAINTENANCE). + * `:options` - configures index options (WITH clause) for both PostgreSQL + and MSSQL + * `:where` - specify conditions for a partial index (PostgreSQL) / + filtered index (MSSQL). + + ## Adding/dropping indexes concurrently + + PostgreSQL supports adding/dropping indexes concurrently (see the + [docs](http://www.postgresql.org/docs/current/static/sql-createindex.html)). + However, this feature does not work well with the transactions used by + Ecto to guarantee integrity during migrations. + + You can address this with two changes: + + 1. Change your repository to use PG advisory locks as the migration lock. + Note this may not be supported by Postgres-like databases and proxies. + + 2. Disable DDL transactions. Doing this removes the guarantee that all of + the changes in the migration will happen at once, so you will want to + keep it short. + + If the database adapter supports several migration lock strategies, such as + Postgrex, then review those strategies and consider using a strategy that + utilizes advisory locks to facilitate running migrations one at a time even + across multiple nodes. For example: + + ### Config file (PostgreSQL) + + config MyApp.Repo, migration_lock: :pg_advisory_lock + + ### Migration file + + defmodule MyRepo.Migrations.CreateIndexes do + use Ecto.Migration + @disable_ddl_transaction true + + def change do + create index("posts", [:slug], concurrently: true) + end + end + + Alternately, you can add `@disable_migration_lock true` to your migration file. + This would mean that different nodes in a multi-node setup could run the same + migration at once. It is recommended to isolate your migrations to a single node + when using concurrent index creation without an advisory lock. + + ## Index types + + When creating an index, the index type can be specified with the `:using` + option. The `:using` option can be an atom or a string, and its value is + passed to the generated `USING` clause as-is. + + For example, PostgreSQL supports several index types like B-tree (the + default), Hash, GIN, and GiST. More information on index types can be found + in the [PostgreSQL docs](http://www.postgresql.org/docs/current/indexes-types.html). + + ## Partial indexes + + Databases like PostgreSQL and MSSQL support partial indexes. + + A partial index is an index built over a subset of a table. The subset + is defined by a conditional expression using the `:where` option. + The `:where` option can be an atom or a string; its value is passed + to the generated `WHERE` clause as-is. + + More information on partial indexes can be found in the [PostgreSQL + docs](http://www.postgresql.org/docs/current/indexes-partial.html). + + ## The `:nulls_distinct` option + + A unique index does not prevent multiple null values by default in most databases. + + For example, imagine we have a "products" table and need to guarantee that + sku's are unique within their category, but the category is optional. + Creating a regular unique index over the sku and category_id fields with: + + create index("products", [:sku, :category_id], unique: true) + + will allow products with the same sku to be inserted if their category_id is `nil`. + The `:nulls_distinct` option can be used to change this behavior by considering + null values as equal, i.e. not distinct: + + create index("products", [:sku, :category_id], unique: true, nulls_distinct: false) + + This option is currently only supported by PostgreSQL 15+. + As a workaround for older PostgreSQL versions and other databases, an + additional partial unique index for the sku can be created: + + create index("products", [:sku, :category_id], unique: true) + create index("products", [:sku], unique: true, where: "category_id IS NULL") + + ## Sorting direction + + You can specify the sorting direction of the index by using a keyword list: + + create index("products", [desc: sku]) + + The following keywords are supported: + + * `:asc` + * `:asc_nulls_last` + * `:asc_nulls_first` + * `:desc` + * `:desc_nulls_last` + * `:desc_nulls_first` + + The `*_nulls_first` and `*_nulls_last` variants are not supported by all + databases. + + ## Examples + + # With no name provided, the name of the below index defaults to + # products_category_id_sku_index + create index("products", [:category_id, :sku], unique: true) + + # The name can also be set explicitly + create index("products", [:category_id, :sku], name: :my_special_name) + + # Indexes can be added concurrently + create index("products", [:category_id, :sku], concurrently: true) + + # The index type can be specified + create index("products", [:name], using: :hash) + + # Partial indexes are created by specifying a :where option + create index("products", [:user_id], where: "price = 0", name: :free_products_index) + + # Covering indexes are created by specifying a :include option + create index("products", [:user_id], include: [:category_id]) + + Indexes also support custom expressions. Some databases may require the + index expression to be written between parentheses: + + # Create an index on a custom expression + create index("products", ["(lower(name))"], name: :products_lower_name_index) + + # Create a tsvector GIN index on PostgreSQL + create index("products", ["(to_tsvector('english', name))"], + name: :products_name_vector, using: "GIN") + + If the expression is a column name, it will not be quoted. This may cause issues + when the column is named after a reserved word. Consider using an atom instead. + For example, the name `offset` is reserved in many databases so the following + could produce an error: `create index("products", ["offset"])`. + """ + def index(table, columns, opts \\ []) + + def index(table, columns, opts) when is_atom(table) do + index(Atom.to_string(table), columns, opts) + end + + def index(table, column, opts) when is_binary(table) and is_atom(column) do + index(table, [column], opts) + end + + def index(table, columns, opts) when is_binary(table) and is_list(columns) and is_list(opts) do + validate_index_opts!(opts) + index = struct!(%Index{table: table, columns: columns}, opts) + %{index | name: index.name || default_index_name(index)} + end + + @doc """ + Shortcut for creating a unique index. + + See `index/3` for more information. + """ + def unique_index(table, columns, opts \\ []) + + def unique_index(table, columns, opts) when is_list(opts) do + index(table, columns, [unique: true] ++ opts) + end + + defp default_index_name(index) do + [index.table, index.columns, "index"] + |> List.flatten() + |> Enum.map_join("_", &column_name/1) + |> String.to_atom() + end + + defp column_name({_dir, column}), do: column_name(column) + + defp column_name(column) do + column + |> to_string() + |> String.replace(~r"[^\w]", "_") + |> String.replace_trailing("_", "") + end + + @doc """ + Executes arbitrary SQL, anonymous function or a keyword command. + + The argument is typically a string, containing the SQL command to be executed. + Keyword commands exist for non-SQL adapters and are not used in most + situations. + + You may instead run arbitrary code as part of your migration by supplying an + anonymous function. This defers execution of the anonymous function until + the migration callback has terminated (see [Executing and + flushing](#module-executing-and-flushing)). This is most often used in + combination with `repo/0` by library authors who want to create high-level + migration helpers. + + Reversible commands can be defined by calling `execute/2`. + + ## Examples + + execute "CREATE EXTENSION postgres_fdw" + + execute create: "posts", capped: true, size: 1024 + + execute(fn -> repo().query!("SELECT $1::integer + $2", [40, 2], [log: :info]) end) + + execute(fn -> repo().update_all("posts", set: [published: true]) end) + """ + def execute(command) when is_binary(command) or is_function(command, 0) or is_list(command) do + Runner.execute(command) + end + + @doc """ + Executes reversible SQL commands. + + This is useful for database-specific functionality that does not + warrant special support in Ecto, for example, creating and dropping + a PostgreSQL extension. The `execute/2` form avoids having to define + separate `up/0` and `down/0` blocks that each contain an `execute/1` + expression. + + The allowed parameters are explained in `execute/1`. + + ## Examples + + defmodule MyApp.MyMigration do + use Ecto.Migration + + def change do + execute "CREATE EXTENSION postgres_fdw", "DROP EXTENSION postgres_fdw" + execute(&execute_up/0, &execute_down/0) + end + + defp execute_up, do: repo().query!("select 'Up query …';", [], [log: :info]) + defp execute_down, do: repo().query!("select 'Down query …';", [], [log: :info]) + end + """ + def execute(up, down) + when (is_binary(up) or is_function(up, 0) or is_list(up)) and + (is_binary(down) or is_function(down, 0) or is_list(down)) do + Runner.execute(%Command{up: up, down: down}) + end + + @doc """ + Executes a SQL command from a file. + + The argument must be a path to a file containing a SQL command. + + Reversible commands can be defined by calling `execute_file/2`. + """ + def execute_file(path) when is_binary(path) do + command = File.read!(path) + Runner.execute(command) + end + + @doc """ + Executes reversible SQL commands from files. + + Each argument must be a path to a file containing a SQL command. + + See `execute/2` for more information on executing SQL commands. + """ + def execute_file(up_path, down_path) when is_binary(up_path) and is_binary(down_path) do + up = File.read!(up_path) + down = File.read!(down_path) + Runner.execute(%Command{up: up, down: down}) + end + + @doc """ + Gets the migrator direction. + """ + @spec direction :: :up | :down + def direction do + Runner.migrator_direction() + end + + @doc """ + Gets the migrator repo. + """ + @spec repo :: Ecto.Repo.t() + def repo do + Runner.repo() + end + + @doc """ + Gets the migrator prefix. + """ + def prefix do + Runner.prefix() + end + + @doc """ + Adds a column when creating or altering a table. + + This function also accepts Ecto primitive types as column types + that are normalized by the database adapter. For example, + `:string` is converted to `:varchar`, `:binary` to `:bits` or `:blob`, + and so on. + + However, the column type is not always the same as the type used in your + schema. For example, a schema that has a `:string` field can be supported by + columns of type `:char`, `:varchar`, `:text`, and others. For this reason, + this function also accepts `:text` and other type annotations that are native + to the database. These are passed to the database as-is. + + To sum up, the column type may be either an Ecto primitive type, + which is normalized in cases where the database does not understand it, + such as `:string` or `:binary`, or a database type which is passed as-is. + Custom Ecto types like `Ecto.UUID` are not supported because + they are application-level concerns and may not always map to the database. + + Note: It may be necessary to quote case-sensitive, user-defined type names. + For example, PostgreSQL normalizes all identifiers to lower case unless + they are wrapped in double quotes. To ensure a case-sensitive type name + is sent properly, it must be defined `:'"LikeThis"'` or `:"\"LikeThis\""`. + This is not necessary for column names because Ecto quotes them automatically. + Type names are not automatically quoted because they may be expressions such + as `varchar(255)`. + + ## Examples + + create table("posts") do + add :title, :string, default: "Untitled" + end + + alter table("posts") do + add :summary, :text # Database type + add :object, :map # Elixir type which is handled by the database + add :custom, :'"UserDefinedType"' # A case-sensitive, user-defined type name + add :identity, :integer, generated: "BY DEFAULT AS IDENTITY" # Postgres generated identity column + add :generated_psql, :string, generated: "ALWAYS AS (id::text) STORED" # Postgres calculated column + add :generated_other, :string, generated: "CAST(id AS char)" # MySQL and TDS calculated column + end + + ## Options + + * `:primary_key` - when `true`, marks this field as the primary key. + If multiple fields are marked, a composite primary key will be created. + * `:default` - the column's default value. It can be a string, number, empty + list, list of strings, list of numbers, or a fragment generated by + `fragment/1`. + * `:null` - determines whether the column accepts null values. When not specified, + the database will use its default behaviour (which is to treat the column as nullable + in most databases). + * `:size` - the size of the type (for example, the number of characters). + The default is no size, except for `:string`, which defaults to `255`. + * `:precision` - the precision for a numeric type. Required when `:scale` is + specified. + * `:scale` - the scale of a numeric type. Defaults to `0`. + * `:comment` - adds a comment to the added column. + * `:collation` - the collation of the text type. + * `:after` - positions field after the specified one. Only supported on MySQL, + it is ignored by other databases. + * `:generated` - a string representing the expression for a generated column. See + above for a comprehensive set of examples for each of the built-in adapters. If + specified alongside `:start_value`/`:increment`, those options will be ignored. + * `:start_value` - option for `:identity` key, represents initial value in sequence + generation. Default is defined by the database. + * `:increment` - option for `:identity` key, represents increment value for + sequence generation. Default is defined by the database. + * `:fields` - option for `:duration` type. Restricts the set of stored interval fields + in the database. + + """ + def add(column, type, opts \\ []) when is_atom(column) and is_list(opts) do + validate_precision_opts!(opts, column) + validate_type!(type) + Runner.subcommand({:add, column, type, opts}) + end + + @doc """ + Adds a column if it does not exist yet when altering a table. + + If the `type` value is a `%Reference{}`, it is used to add a constraint. + + `type` and `opts` are exactly the same as in `add/3`. + + This command is not reversible as Ecto does not know about column existence before the creation attempt. + + ## Examples + + alter table("posts") do + add_if_not_exists :title, :string, default: "" + end + + """ + def add_if_not_exists(column, type, opts \\ []) when is_atom(column) and is_list(opts) do + validate_precision_opts!(opts, column) + validate_type!(type) + Runner.subcommand({:add_if_not_exists, column, type, opts}) + end + + @doc """ + Renames a table or index. + + ## Examples + + # rename a table + rename table("posts"), to: table("new_posts") + + # rename an index + rename(index(:people, [:name], name: "persons_name_index"), to: "people_name_index") + """ + def rename(%Table{} = table_current, to: %Table{} = table_new) do + Runner.execute({:rename, __prefix__(table_current), __prefix__(table_new)}) + table_new + end + + def rename(%Index{} = current_index, to: new_name) do + Runner.execute({:rename, __prefix__(current_index), new_name}) + %{current_index | name: new_name} + end + + @doc """ + Renames a column. + + Note that this occurs outside of the `alter` statement. + + ## Examples + + rename table("posts"), :title, to: :summary + """ + def rename(%Table{} = table, current_column, to: new_column) + when is_atom(current_column) and is_atom(new_column) do + Runner.execute({:rename, __prefix__(table), current_column, new_column}) + table + end + + @doc """ + Generates a fragment to be used as a default value. + + ## Examples + + create table("posts") do + add :inserted_at, :naive_datetime, default: fragment("now()") + end + """ + def fragment(expr) when is_binary(expr) do + {:fragment, expr} + end + + @doc """ + Adds `:inserted_at` and `:updated_at` timestamp columns. + + Those columns are of `:naive_datetime` type. A list of `opts` can be given + to customize the generated fields. + + Following options will override the repo configuration specified by + `:migration_timestamps` option. + + ## Options + + * `:inserted_at` - the name of the column for storing insertion times. + Setting it to `false` disables the column. + * `:updated_at` - the name of the column for storing last-updated-at times. + Setting it to `false` disables the column. + * `:type` - the type of the `:inserted_at` and `:updated_at` columns. + Defaults to `:naive_datetime`. + * `:default` - the columns' default value. It can be a string, number, empty + list, list of strings, list of numbers, or a fragment generated by + `fragment/1`. + * `:null` - determines whether the column accepts null values. Defaults to + `false`. + + """ + def timestamps(opts \\ []) when is_list(opts) do + opts = Keyword.merge(Runner.repo_config(:migration_timestamps, []), opts) + opts = Keyword.put_new(opts, :null, false) + + {type, opts} = Keyword.pop(opts, :type, :naive_datetime) + {inserted_at, opts} = Keyword.pop(opts, :inserted_at, :inserted_at) + {updated_at, opts} = Keyword.pop(opts, :updated_at, :updated_at) + + if inserted_at != false, do: add(inserted_at, type, opts) + if updated_at != false, do: add(updated_at, type, opts) + end + + @doc """ + Modifies the type of a column when altering a table. + + This command is not reversible unless the `:from` option is provided. + When the `:from` option is set, the adapter will try to drop + the corresponding foreign key constraints before modifying the type. + Generally speaking, you want to pass the type and each option + you are modifying to `:from`, so the column can be rolled back properly. + However, note that `:from` cannot be used to modify primary keys, + as those are generally trickier to revert. + + See `add/3` for more information on supported types. + + If you want to modify a column without changing its type, + such as adding or dropping a null constraints, consider using + the `execute/2` command with the relevant SQL command instead + of `modify/3`, if supported by your database. This may avoid + redundant type updates and be more efficient, as an unnecessary + type update can lock the table, even if the type actually + doesn't change. + + ## Examples + + alter table("posts") do + modify :title, :text + end + + # Self rollback when using the :from option + alter table("posts") do + modify :title, :text, from: :string + end + + # Modify column with rollback options + alter table("posts") do + modify :title, :text, null: false, from: {:string, null: true} + end + + # Modify the :on_delete option of an existing foreign key + alter table("comments") do + modify :post_id, references(:posts, on_delete: :delete_all), + from: references(:posts, on_delete: :nothing) + end + + ## Options + + * `:null` - determines whether the column accepts null values. If this option is + not set, the nullable behaviour of the underlying column is not modified. + * `:default` - changes the default value of the column. + * `:from` - specifies the current type and options of the column. + * `:size` - specifies the size of the type (for example, the number of characters). + The default is no size. + * `:precision` - the precision for a numeric type. Required when `:scale` is + specified. + * `:scale` - the scale of a numeric type. Defaults to `0`. + * `:comment` - adds a comment to the modified column. + * `:collation` - the collation of the text type. + """ + def modify(column, type, opts \\ []) when is_atom(column) and is_list(opts) do + validate_precision_opts!(opts, column) + validate_type!(type) + Runner.subcommand({:modify, column, type, opts}) + end + + @doc """ + Removes a column when altering a table. + + This command is not reversible as Ecto does not know what type it should add + the column back as. See `remove/3` as a reversible alternative. + + ## Examples + + alter table("posts") do + remove :title + end + + """ + def remove(column) when is_atom(column) do + Runner.subcommand({:remove, column}) + end + + @doc """ + Removes a column in a reversible way when altering a table. + + `type` and `opts` are exactly the same as in `add/3`, and + they are used when the command is reversed. + + If the `type` value is a `%Reference{}`, it is used to remove the constraint. + + ## Examples + + alter table("posts") do + remove :title, :string, default: "" + end + + """ + def remove(column, type, opts \\ []) when is_atom(column) do + validate_type!(type) + Runner.subcommand({:remove, column, type, opts}) + end + + @doc """ + Removes a column if the column exists. + + This command is not reversible as Ecto does not know whether or not the column existed before the removal attempt. + + ## Examples + + alter table("posts") do + remove_if_exists :title + end + + """ + def remove_if_exists(column) when is_atom(column) do + Runner.subcommand({:remove_if_exists, column}) + end + + @doc """ + Removes a column if the column exists. + + If the type is a reference, removes the foreign key constraint for the reference first, if it exists. + + This command is not reversible as Ecto does not know whether or not the column existed before the removal attempt. + + ## Examples + + alter table("posts") do + remove_if_exists :author_id, references(:authors) + end + + """ + def remove_if_exists(column, type) when is_atom(column) do + validate_type!(type) + Runner.subcommand({:remove_if_exists, column, type}) + end + + @doc ~S""" + Defines a foreign key. + + By default it assumes you are linking to the referenced table + via its primary key with name `:id`. If you are using a non-default + key setup (e.g. using `uuid` type keys) you must ensure you set the + options, such as `:column` and `:type`, to match your target key. + + ## Examples + + create table("products") do + add :group_id, references("groups") + end + + create table("categories") do + add :group_id, :integer + # A composite foreign that points from categories (product_id, group_id) + # to products (id, group_id) + add :product_id, references("products", with: [group_id: :group_id]) + end + + ## Options + + * `:name` - The name of the underlying reference, which defaults to + "#{table}_#{column}_fkey". + * `:column` - The column name in the referenced table, which defaults to `:id`. + * `:prefix` - The prefix for the reference. Defaults to the prefix + defined by the block's `table/2` struct (the "products" table in + the example above), or `nil`. + * `:type` - The foreign key type, which defaults to `:bigserial`. + * `:on_delete` - What to do if the referenced entry is deleted. May be + `:nothing` (default), `:delete_all`, `:nilify_all`, `{:nilify, columns}`, `:default_all`, `{:default, columns}` + or `:restrict`. `{:nilify, columns}` and `{:default, columns}` expect a list of atoms for `columns` + and is not supported by all databases. + * `:on_update` - What to do if the referenced entry is updated. May be + `:nothing` (default), `:update_all`, `:nilify_all`, or `:restrict`. + * `:validate` - Whether or not to validate the foreign key constraint on + creation or not. Only available in PostgreSQL, and should be followed by + a command to validate the foreign key in a following migration if false. + * `:with` - defines additional keys to the foreign key in order to build + a composite foreign key + * `:match` - select if the match is `:simple`, `:partial`, or `:full`. This is + [supported only by PostgreSQL](https://www.postgresql.org/docs/current/sql-createtable.html) + at the moment. + + """ + def references(table, opts \\ []) + + def references(table, opts) when is_atom(table) do + references(Atom.to_string(table), opts) + end + + def references(table, opts) when is_binary(table) and is_list(opts) do + reference_options = Keyword.take(opts, [:prefix]) + + opts = + foreign_key_repo_opts() + |> Keyword.merge(opts) + |> Keyword.put(:options, reference_options) + + reference = struct!(%Reference{table: table}, opts) + check_on_delete!(reference.on_delete) + check_on_update!(reference.on_update) + + reference + end + + defp foreign_key_repo_opts() do + case Runner.repo_config(:migration_primary_key, []) do + false -> [] + opts -> opts + end + |> Keyword.take([:type]) + |> Keyword.merge(Runner.repo_config(:migration_foreign_key, [])) + end + + defp check_on_delete!(on_delete) + when on_delete in [:nothing, :delete_all, :nilify_all, :default_all, :restrict], + do: :ok + + defp check_on_delete!({option, columns}) + when option in [:nilify, :default] and is_list(columns) do + unless Enum.all?(columns, &is_atom/1) do + raise ArgumentError, + "expected `columns` in `{#{inspect(option)}, columns}` to be a list of atoms, got: #{inspect(columns)}" + end + + :ok + end + + defp check_on_delete!(on_delete) do + raise ArgumentError, "unknown :on_delete value: #{inspect(on_delete)}" + end + + defp check_on_update!(on_update) + when on_update in [:nothing, :update_all, :nilify_all, :restrict], + do: :ok + + defp check_on_update!(on_update) do + raise ArgumentError, "unknown :on_update value: #{inspect(on_update)}" + end + + @doc ~S""" + Defines a constraint (either a check constraint or an exclusion constraint) + to be evaluated by the database when a row is inserted or updated. + + ## Examples + + create constraint("users", :price_must_be_positive, check: "price > 0") + create constraint("size_ranges", :no_overlap, exclude: ~s|gist (int4range("from", "to", '[]') WITH &&)|) + drop constraint("products", "price_must_be_positive") + + ## Options + + * `:check` - A check constraint expression. Required when creating a check constraint. + * `:exclude` - An exclusion constraint expression. Required when creating an exclusion constraint. + * `:prefix` - The prefix for the table. + * `:validate` - Whether or not to validate the constraint on creation (true by default). See the section below for more information + * `:comment` - adds a comment to the constraint. + + + ## Using `validate: false` + + Validation/Enforcement of a constraint is enabled by default, but disabling on constraint + creation is supported by PostgreSQL, and MySQL, and can be done by setting `validate: false`. + + Setting `validate: false` as an option can be useful, as the creation of a constraint will cause + a full table scan to check existing rows. The constraint will still be enforced for subsequent + inserts and updates, but should then be updated in a following command or migration to enforce + the new constraint. + + Validating / Enforcing the constraint in a later command, or migration, can be done like so: + + ``` + def change do + # PostgreSQL +   execute "ALTER TABLE products VALIDATE CONSTRAINT price_must_be_positive", "" + + # MySQL +   execute "ALTER TABLE products ALTER CONSTRAINT price_must_be_positive ENFORCED", "" + end + ``` + + See the [Safe Ecto Migrations guide](https://github.com/fly-apps/safe-ecto-migrations) for an + in-depth explanation of the benefits of this approach. + """ + def constraint(table, name, opts \\ []) + + def constraint(table, name, opts) when is_atom(table) do + constraint(Atom.to_string(table), name, opts) + end + + def constraint(table, name, opts) when is_binary(table) and is_list(opts) do + struct!(%Constraint{table: table, name: name}, opts) + end + + @doc """ + Execute all changes specified by the migration so far. + + See [Executing and flushing](#module-executing-and-flushing). + """ + defmacro flush do + quote do + if direction() == :down and not function_exported?(__MODULE__, :down, 0) do + raise "calling flush() inside change when doing rollback is not supported." + else + Runner.flush() + end + end + end + + # Validation helpers + defp validate_type!(:datetime) do + raise ArgumentError, + "the :datetime type in migrations is not supported, " <> + "please use :utc_datetime or :naive_datetime instead" + end + + defp validate_type!(type) when is_atom(type) do + case Atom.to_string(type) do + "Elixir." <> _ -> + raise_invalid_migration_type!(type) + + _ -> + :ok + end + end + + defp validate_type!({type, subtype}) when is_atom(type) and is_atom(subtype) do + validate_type!(subtype) + end + + defp validate_type!({type, subtype}) when is_atom(type) and is_tuple(subtype) do + for t <- Tuple.to_list(subtype), do: validate_type!(t) + end + + defp validate_type!(%Reference{} = reference) do + reference + end + + defp validate_type!(type) do + raise_invalid_migration_type!(type) + end + + defp raise_invalid_migration_type!(type) do + raise ArgumentError, """ + invalid migration type: #{inspect(type)}. Expected one of: + + * an atom, such as :string + * a quoted atom, such as :"integer unsigned" + * a tuple representing a composite type, such as {:array, :integer} or {:map, :string} + * a reference, such as references(:users) + + Ecto types are automatically translated to database types. All other types + are sent to the database as is. + + Types defined through Ecto.Type or Ecto.ParameterizedType aren't allowed, + use their underlying types instead. + """ + end + + defp validate_index_opts!(opts) when is_list(opts) do + if opts[:nulls_distinct] != nil and opts[:unique] != true do + raise ArgumentError, "the `nulls_distinct` option can only be used with unique indexes" + end + + case Keyword.get_values(opts, :where) do + [_, _ | _] -> + raise ArgumentError, + "only one `where` keyword is supported when declaring a partial index. " <> + "To specify multiple conditions, write a single WHERE clause using AND between them" + + _ -> + :ok + end + end + + defp validate_index_opts!(opts), do: opts + + defp validate_precision_opts!(opts, column) when is_list(opts) do + if opts[:scale] && !opts[:precision] do + raise ArgumentError, "column #{Atom.to_string(column)} is missing precision option" + end + end + + @doc false + def __prefix__(%{prefix: prefix} = index_or_table) do + runner_prefix = Runner.prefix() + + cond do + is_nil(prefix) -> + prefix = runner_prefix || Runner.repo_config(:migration_default_prefix, nil) + %{index_or_table | prefix: prefix} + + is_nil(runner_prefix) or runner_prefix == to_string(prefix) -> + index_or_table + + true -> + raise Ecto.MigrationError, + message: + "the :prefix option `#{prefix}` does not match the migrator prefix `#{runner_prefix}`" + end + end + + @doc false + def __primary_key__(table) do + case table.primary_key do + false -> + false + + true -> + case Runner.repo_config(:migration_primary_key, []) do + false -> false + opts when is_list(opts) -> pk_opts_to_tuple(opts) + end + + opts when is_list(opts) -> + pk_opts_to_tuple(opts) + + _ -> + raise ArgumentError, + ":primary_key option must be either a boolean or a keyword list of options" + end + end + + defp pk_opts_to_tuple(opts) do + opts = Keyword.put(opts, :primary_key, true) + {name, opts} = Keyword.pop(opts, :name, :id) + {type, opts} = Keyword.pop(opts, :type, :bigserial) + {name, type, opts} + end +end diff --git a/deps/ecto_sql/lib/ecto/migration/runner.ex b/deps/ecto_sql/lib/ecto/migration/runner.ex new file mode 100644 index 0000000..38170b5 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/migration/runner.ex @@ -0,0 +1,503 @@ +defmodule Ecto.Migration.Runner do + @moduledoc false + use Agent, restart: :temporary + + require Logger + + alias Ecto.Migration.Table + alias Ecto.Migration.Index + alias Ecto.Migration.Constraint + alias Ecto.Migration.Command + + @doc """ + Runs the given migration. + """ + def run(repo, config, version, module, direction, operation, migrator_direction, opts) do + level = Keyword.get(opts, :log, :info) + sql = Keyword.get(opts, :log_migrations_sql, false) + log = %{level: level, sql: sql} + args = {self(), repo, config, module, direction, migrator_direction, log} + + {:ok, runner} = DynamicSupervisor.start_child(Ecto.MigratorSupervisor, {__MODULE__, args}) + metadata(runner, opts) + + log(level, "== Running #{version} #{inspect(module)}.#{operation}/0 #{direction}") + {time, _} = :timer.tc(fn -> perform_operation(repo, module, operation) end) + log(level, "== Migrated #{version} in #{inspect(div(time, 100_000) / 10)}s") + after + stop() + end + + @doc """ + Stores the runner metadata. + """ + def metadata(runner, opts) do + prefix = opts[:prefix] + Process.put(:ecto_migration, %{runner: runner, prefix: prefix && to_string(prefix)}) + end + + @doc """ + Starts the runner for the specified repo. + """ + def start_link({parent, repo, config, module, direction, migrator_direction, log}) do + Agent.start_link(fn -> + Process.link(parent) + + %{ + direction: direction, + repo: repo, + migration: module, + migrator_direction: migrator_direction, + command: nil, + subcommands: [], + log: log, + commands: [], + config: config + } + end) + end + + @doc """ + Stops the runner. + """ + def stop() do + Agent.stop(runner()) + end + + @doc """ + Accesses the given repository configuration. + """ + def repo_config(key, default) do + Agent.get(runner(), &Keyword.get(&1.config, key, default)) + end + + @doc """ + Returns the migrator command (up or down). + + * forward + up: up + * forward + down: down + * forward + change: up + * backward + change: down + + """ + def migrator_direction do + Agent.get(runner(), & &1.migrator_direction) + end + + @doc """ + Gets the repo for this migration + """ + def repo do + Agent.get(runner(), & &1.repo) + end + + @doc """ + Gets the prefix for this migration + """ + def prefix do + case Process.get(:ecto_migration) do + %{prefix: prefix} -> prefix + _ -> raise "could not find migration runner process for #{inspect(self())}" + end + end + + @doc """ + Executes queue migration commands. + + Reverses the order commands are executed when doing a rollback + on a change/0 function and resets commands queue. + """ + def flush do + %{commands: commands, direction: direction, repo: repo, log: log, migration: migration} = + Agent.get_and_update(runner(), fn state -> {state, %{state | commands: []}} end) + + commands = if direction == :backward, do: commands, else: Enum.reverse(commands) + + for command <- commands do + execute_in_direction(repo, migration, direction, log, command) + end + end + + @doc """ + Queues command tuples or strings for execution. + + Ecto.MigrationError will be raised when the server + is in `:backward` direction and `command` is irreversible. + """ + def execute(command) do + reply = + Agent.get_and_update(runner(), fn + %{command: nil} = state -> + {:ok, %{state | subcommands: [], commands: [command | state.commands]}} + + %{command: _} = state -> + {:error, %{state | command: nil}} + end) + + case reply do + :ok -> + :ok + + :error -> + raise Ecto.MigrationError, "cannot execute nested commands" + end + end + + @doc """ + Starts a command. + """ + def start_command(command) do + reply = + Agent.get_and_update(runner(), fn + %{command: nil} = state -> + {:ok, %{state | command: command}} + + %{command: _} = state -> + {:error, %{state | command: command}} + end) + + case reply do + :ok -> + :ok + + :error -> + raise Ecto.MigrationError, "cannot execute nested commands" + end + end + + @doc """ + Queues and clears current command. Must call `start_command/1` first. + """ + def end_command do + Agent.update(runner(), fn state -> + {operation, object} = state.command + command = {operation, object, Enum.reverse(state.subcommands)} + %{state | command: nil, subcommands: [], commands: [command | state.commands]} + end) + end + + @doc """ + Adds a subcommand to the current command. Must call `start_command/1` first. + """ + def subcommand(subcommand) do + reply = + Agent.get_and_update(runner(), fn + %{command: nil} = state -> + {:error, state} + + state -> + {:ok, update_in(state.subcommands, &[subcommand | &1])} + end) + + case reply do + :ok -> + :ok + + :error -> + raise Ecto.MigrationError, message: "cannot execute command outside of block" + end + end + + ## Execute + + defp execute_in_direction(repo, migration, :forward, log, %Command{up: up}) do + log_and_execute_ddl(repo, migration, log, up) + end + + defp execute_in_direction(repo, migration, :forward, log, command) do + log_and_execute_ddl(repo, migration, log, command) + end + + defp execute_in_direction(repo, migration, :backward, log, %Command{down: down}) do + log_and_execute_ddl(repo, migration, log, down) + end + + defp execute_in_direction(repo, migration, :backward, log, command) do + if reversed = reverse(command) do + log_and_execute_ddl(repo, migration, log, reversed) + else + raise Ecto.MigrationError, + message: + "cannot reverse migration command: #{command(command)}. " <> + "You will need to explicitly define up/0 and down/0 in your migration" + end + end + + defp reverse({:create, %Index{} = index}), + do: {:drop, index, :restrict} + + defp reverse({:create_if_not_exists, %Index{} = index}), + do: {:drop_if_exists, index, :restrict} + + defp reverse({:drop, %Index{} = index, _}), + do: {:create, index} + + defp reverse({:drop_if_exists, %Index{} = index, _}), + do: {:create_if_not_exists, index} + + defp reverse({:rename, %Index{} = index, new_name}), + do: {:rename, %{index | name: new_name}, index.name} + + defp reverse({:create, %Table{} = table, _columns}), + do: {:drop, table, :restrict} + + defp reverse({:create_if_not_exists, %Table{} = table, _columns}), + do: {:drop_if_exists, table, :restrict} + + defp reverse({:rename, %Table{} = table_current, %Table{} = table_new}), + do: {:rename, table_new, table_current} + + defp reverse({:rename, %Table{} = table, current_column, new_column}), + do: {:rename, table, new_column, current_column} + + defp reverse({:alter, %Table{} = table, changes}) do + if reversed = table_reverse(changes, []) do + {:alter, table, reversed} + end + end + + # It is not a good idea to reverse constraints because + # we can't guarantee data integrity when applying them back. + defp reverse({:create_if_not_exists, %Constraint{} = constraint}), + do: {:drop_if_exists, constraint, :restrict} + + defp reverse({:create, %Constraint{} = constraint}), + do: {:drop, constraint, :restrict} + + defp reverse(_command), do: false + + defp table_reverse([{:remove, name, type, opts} | t], acc) do + table_reverse(t, [{:add, name, type, opts} | acc]) + end + + defp table_reverse([{:modify, name, type, opts} | t], acc) do + case opts[:from] do + nil -> + false + + {reverse_type, from_opts} when is_list(from_opts) -> + reverse_from = {type, Keyword.delete(opts, :from)} + reverse_opts = Keyword.put(from_opts, :from, reverse_from) + table_reverse(t, [{:modify, name, reverse_type, reverse_opts} | acc]) + + reverse_type -> + reverse_opts = Keyword.put(opts, :from, type) + table_reverse(t, [{:modify, name, reverse_type, reverse_opts} | acc]) + end + end + + defp table_reverse([{:add, name, type, _opts} | t], acc) do + table_reverse(t, [{:remove, name, type, []} | acc]) + end + + defp table_reverse([_ | _], _acc) do + false + end + + defp table_reverse([], acc) do + acc + end + + ## Helpers + + defp perform_operation(repo, module, operation) do + if function_exported?(repo, :in_transaction?, 0) and repo.in_transaction?() do + if function_exported?(module, :after_begin, 0) do + module.after_begin() + flush() + end + + apply(module, operation, []) + flush() + + if function_exported?(module, :before_commit, 0) do + module.before_commit() + flush() + end + else + apply(module, operation, []) + flush() + end + end + + defp runner do + case Process.get(:ecto_migration) do + %{runner: runner} -> runner + _ -> raise "could not find migration runner process for #{inspect(self())}" + end + end + + defp log_and_execute_ddl(repo, migration, log, {instruction, %Index{} = index}) do + maybe_warn_index_ddl_transaction(index, migration) + maybe_warn_index_migration_lock(index, repo, migration) + log_and_execute_ddl(repo, log, {instruction, index}) + end + + defp log_and_execute_ddl(repo, _migration, log, command) do + log_and_execute_ddl(repo, log, command) + end + + defp log_and_execute_ddl(_repo, _log, func) when is_function(func, 0) do + func.() + :ok + end + + defp log_and_execute_ddl(repo, %{level: level, sql: sql}, command) do + log(level, command(command)) + meta = Ecto.Adapter.lookup_meta(repo.get_dynamic_repo()) + {:ok, logs} = repo.__adapter__().execute_ddl(meta, command, timeout: :infinity, log: sql) + + Enum.each(logs, fn {ddl_log_level, message, metadata} -> + ddl_log(ddl_log_level, level, message, metadata) + end) + + :ok + end + + defp ddl_log(_level, false, _msg, _metadata), do: :ok + defp ddl_log(level, _, msg, metadata), do: log(level, msg, metadata) + + defp log(level, msg, metadata \\ []) + defp log(false, _msg, _metadata), do: :ok + defp log(true, msg, metadata), do: Logger.log(:info, msg, metadata) + defp log(level, msg, metadata), do: Logger.log(level, msg, metadata) + + defp maybe_warn_index_ddl_transaction(%{concurrently: true} = index, migration) do + migration_config = migration.__migration__() + + if not migration_config[:disable_ddl_transaction] do + IO.warn( + """ + Migration #{inspect(migration)} has set index `#{index.name}` on table \ + `#{index.table}` to concurrently but did not disable ddl transaction. \ + Please set: + + use Ecto.Migration + @disable_ddl_transaction true + """, + [] + ) + end + end + + defp maybe_warn_index_ddl_transaction(_index, _migration), do: :ok + + defp maybe_warn_index_migration_lock(%{concurrently: true} = index, repo, migration) do + migration_lock_disabled = migration.__migration__()[:disable_migration_lock] + lock_strategy = repo.config()[:migration_lock] + adapter = repo.__adapter__() + + case {migration_lock_disabled, adapter, lock_strategy} do + {false, Ecto.Adapters.Postgres, :pg_advisory_lock} -> + :ok + + {false, Ecto.Adapters.Postgres, _} -> + IO.warn( + """ + Migration #{inspect(migration)} has set index `#{index.name}` on table \ + `#{index.table}` to concurrently but did not disable migration lock. \ + Please set: + + use Ecto.Migration + @disable_migration_lock true + + Alternatively, consider using advisory locks during migrations in the \ + repo configuration: + + config #{inspect(repo)}, migration_lock: :pg_advisory_lock + """, + [] + ) + + {false, _adapter, _migration_lock} -> + IO.warn( + """ + Migration #{inspect(migration)} has set index `#{index.name}` on table \ + `#{index.table}` to concurrently but did not disable migration lock. \ + Please set: + + use Ecto.Migration + @disable_migration_lock true + """, + [] + ) + + _ -> + :ok + end + end + + defp maybe_warn_index_migration_lock(_index, _repo, _migration), do: :ok + + defp command(ddl) when is_binary(ddl) or is_list(ddl), + do: "execute #{inspect(ddl)}" + + defp command({:create, %Table{} = table, _}), + do: "create table #{quote_name(table.prefix, table.name)}" + + defp command({:create_if_not_exists, %Table{} = table, _}), + do: "create table if not exists #{quote_name(table.prefix, table.name)}" + + defp command({:alter, %Table{} = table, _}), + do: "alter table #{quote_name(table.prefix, table.name)}" + + defp command({:drop, %Table{} = table, mode}), + do: "drop table #{quote_name(table.prefix, table.name)}#{drop_mode(mode)}" + + defp command({:drop_if_exists, %Table{} = table, mode}), + do: "drop table if exists #{quote_name(table.prefix, table.name)}#{drop_mode(mode)}" + + defp command({:create, %Index{} = index}), + do: "create index #{quote_name(index.prefix, index.name)}" + + defp command({:create_if_not_exists, %Index{} = index}), + do: "create index if not exists #{quote_name(index.prefix, index.name)}" + + defp command({:drop, %Index{} = index, mode}), + do: "drop index #{quote_name(index.prefix, index.name)}#{drop_mode(mode)}" + + defp command({:drop_if_exists, %Index{} = index, mode}), + do: "drop index if exists #{quote_name(index.prefix, index.name)}#{drop_mode(mode)}" + + defp command({:rename, %Index{} = index_current, new_name}), + do: "rename index #{quote_name(index_current.prefix, index_current.name)} to #{new_name}" + + defp command({:rename, %Table{} = current_table, %Table{} = new_table}), + do: + "rename table #{quote_name(current_table.prefix, current_table.name)} to #{quote_name(new_table.prefix, new_table.name)}" + + defp command({:rename, %Table{} = table, current_column, new_column}), + do: + "rename column #{current_column} to #{new_column} on table #{quote_name(table.prefix, table.name)}" + + defp command({:create, %Constraint{check: nil, exclude: nil}}), + do: raise(ArgumentError, "a constraint must have either a check or exclude option") + + defp command({:create, %Constraint{check: check, exclude: exclude}}) + when is_binary(check) and is_binary(exclude), + do: raise(ArgumentError, "a constraint must not have both check and exclude options") + + defp command({:create, %Constraint{check: check} = constraint}) when is_binary(check), + do: + "create check constraint #{constraint.name} on table #{quote_name(constraint.prefix, constraint.table)}" + + defp command({:create, %Constraint{exclude: exclude} = constraint}) when is_binary(exclude), + do: + "create exclude constraint #{constraint.name} on table #{quote_name(constraint.prefix, constraint.table)}" + + defp command({:drop, %Constraint{} = constraint, _}), + do: + "drop constraint #{constraint.name} from table #{quote_name(constraint.prefix, constraint.table)}" + + defp command({:drop_if_exists, %Constraint{} = constraint, _}), + do: + "drop constraint if exists #{constraint.name} from table #{quote_name(constraint.prefix, constraint.table)}" + + defp drop_mode(:restrict), do: "" + defp drop_mode(:cascade), do: " cascade" + + defp quote_name(nil, name), do: quote_name(name) + defp quote_name(prefix, name), do: quote_name(prefix) <> "." <> quote_name(name) + defp quote_name(name) when is_atom(name), do: quote_name(Atom.to_string(name)) + defp quote_name(name), do: name +end diff --git a/deps/ecto_sql/lib/ecto/migration/schema_migration.ex b/deps/ecto_sql/lib/ecto/migration/schema_migration.ex new file mode 100644 index 0000000..a3d792e --- /dev/null +++ b/deps/ecto_sql/lib/ecto/migration/schema_migration.ex @@ -0,0 +1,81 @@ +defmodule Ecto.Migration.SchemaMigration do + # Defines a schema that works with a table that tracks schema migrations. + # The table name defaults to `schema_migrations`. + @moduledoc false + use Ecto.Schema + + import Ecto.Query, only: [from: 2] + + @primary_key false + schema "schema_migrations" do + field :version, :integer, primary_key: true + timestamps updated_at: false + end + + # The migration flag is used to signal to the repository + # we are in a migration operation. + @default_opts [ + timeout: :infinity, + log: false, + # Keep schema_migration for backwards compatibility + schema_migration: true, + ecto_query: :schema_migration, + telemetry_options: [schema_migration: true] + ] + + def ensure_schema_migrations_table!(repo, config, opts) do + {repo, source} = get_repo_and_source(repo, config) + table_name = String.to_atom(source) + table = %Ecto.Migration.Table{name: table_name, prefix: opts[:prefix]} + meta = Ecto.Adapter.lookup_meta(repo.get_dynamic_repo()) + + commands = [ + {:add, :version, :bigint, primary_key: true}, + {:add, :inserted_at, :naive_datetime, []} + ] + + repo.__adapter__().execute_ddl(meta, {:create_if_not_exists, table, commands}, @default_opts) + end + + def versions(repo, config, prefix) do + {repo, source} = get_repo_and_source(repo, config) + from_opts = [prefix: prefix] ++ @default_opts + + query = + if Keyword.get(config, :migration_cast_version_column, false) do + from(m in source, select: type(m.version, :integer)) + else + from(m in source, select: m.version) + end + + {repo, query, from_opts} + end + + def up(repo, config, version, opts) do + {repo, source} = get_repo_and_source(repo, config) + + %__MODULE__{version: version} + |> Ecto.put_meta(source: source) + |> repo.insert(default_opts(opts)) + end + + def down(repo, config, version, opts) do + {repo, source} = get_repo_and_source(repo, config) + + from(m in source, where: m.version == type(^version, :integer)) + |> repo.delete_all(default_opts(opts)) + end + + def get_repo_and_source(repo, config) do + {Keyword.get(config, :migration_repo, repo), + Keyword.get(config, :migration_source, "schema_migrations")} + end + + defp default_opts(opts) do + Keyword.merge( + @default_opts, + prefix: opts[:prefix], + log: Keyword.get(opts, :log_migrator_sql, false) + ) + end +end diff --git a/deps/ecto_sql/lib/ecto/migrator.ex b/deps/ecto_sql/lib/ecto/migrator.ex new file mode 100644 index 0000000..20ae7bc --- /dev/null +++ b/deps/ecto_sql/lib/ecto/migrator.ex @@ -0,0 +1,861 @@ +defmodule Ecto.Migrator do + @moduledoc """ + Lower level API for managing migrations. + + EctoSQL provides three mix tasks for running and managing migrations: + + * `mix ecto.migrate` - migrates a repository + * `mix ecto.rollback` - rolls back a particular migration + * `mix ecto.migrations` - shows all migrations and their status + + Those tasks are built on top of the functions in this module. + While the tasks above cover most use cases, it may be necessary + from time to time to jump into the lower level API. For example, + if you are assembling an Elixir release, Mix is not available, + so this module provides a nice complement to still migrate your + system. + + To learn more about migrations in general, see `Ecto.Migration`. + + ## Example: Running an individual migration + + Imagine you have this migration: + + defmodule MyApp.MigrationExample do + use Ecto.Migration + + def up do + execute "CREATE TABLE users(id serial PRIMARY_KEY, username text)" + end + + def down do + execute "DROP TABLE users" + end + end + + You can execute it manually with: + + Ecto.Migrator.up(Repo, 20080906120000, MyApp.MigrationExample) + + ## Example: Running migrations in a release + + Elixir v1.9 introduces `mix release`, which generates a self-contained + directory that consists of your application code, all of its dependencies, + plus the whole Erlang Virtual Machine (VM) and runtime. + + When a release is assembled, Mix is no longer available inside a release + and therefore none of the Mix tasks. Users may still need a mechanism to + migrate their databases. This can be achieved with using the `Ecto.Migrator` + module: + + defmodule MyApp.Release do + @app :my_app + + def migrate do + for repo <- repos() do + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :up, all: true)) + end + end + + def rollback(repo, version) do + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :down, to: version)) + end + + defp repos do + Application.load(@app) + Application.fetch_env!(@app, :ecto_repos) + end + end + + The example above uses `with_repo/3` to make sure the repository is + started and then runs all migrations up or a given migration down. + Note you will have to replace `MyApp` and `:my_app` on the first two + lines by your actual application name. Once the file above is added + to your application, you can assemble a new release and invoke the + commands above in the release root like this: + + $ bin/my_app eval "MyApp.Release.migrate" + $ bin/my_app eval "MyApp.Release.rollback(MyApp.Repo, 20190417140000)" + + ## Example: Running migrations on application startup + + Add the following to the top of your application children spec: + + {Ecto.Migrator, + repos: Application.fetch_env!(:my_app, :ecto_repos), + skip: System.get_env("SKIP_MIGRATIONS") == "true"} + + To skip migrations you can also pass `skip: true` or as in the example + set the environment variable `SKIP_MIGRATIONS` to a truthy value. + + And all other options described in `up/4` are allowed, + for example if you want to log the SQL commands, + and run migrations in a prefix: + + {Ecto.Migrator, + repos: Application.fetch_env!(:my_app, :ecto_repos), + log_migrator_sql: true, + prefix: "my_app"} + + To roll back you'd do it normally: + + $ mix ecto.rollback + + """ + + require Logger + require Ecto.Query + + alias Ecto.Migration.Runner + alias Ecto.Migration.SchemaMigration + + @doc """ + Ensures the repo is started to perform migration operations. + + All of the application required to run the repo will be started + before hand with chosen mode. If the repo has not yet been started, + it is manually started, with a `:pool_size` of 2, before the given + function is executed, and the repo is then terminated. If the repo + was already started, then the function is directly executed, without + terminating the repo afterwards. + + Although this function was designed to start repositories for running + migrations, it can be used by any code, Mix task, or release tooling + that needs to briefly start a repository to perform a certain operation + and then terminate. + + The repo may also configure a `:start_apps_before_migration` option + which is a list of applications to be started before the migration + runs. + + It returns `{:ok, fun_return, apps}`, with all apps that have been + started, or `{:error, term}`. + + ## Options + + * `:pool_size` - The pool size to start the repo for migrations. + Defaults to 2. + * `:mode` - The mode to start all applications. + Defaults to `:permanent`. + + ## Examples + + {:ok, _, _} = + Ecto.Migrator.with_repo(repo, fn repo -> + Ecto.Migrator.run(repo, :up, all: true) + end) + + """ + def with_repo(repo, fun, opts \\ []) do + config = repo.config() + mode = Keyword.get(opts, :mode, :permanent) + apps = [:ecto_sql | config[:start_apps_before_migration] || []] + + extra_started = + Enum.flat_map(apps, fn app -> + {:ok, started} = Application.ensure_all_started(app, mode) + started + end) + + {:ok, repo_started} = repo.__adapter__().ensure_all_started(config, mode) + started = extra_started ++ repo_started + pool_size = Keyword.get(opts, :pool_size, 2) + migration_repo = config[:migration_repo] || repo + + case ensure_repo_started(repo, pool_size) do + {:ok, repo_after} -> + case ensure_migration_repo_started(migration_repo, repo) do + {:ok, migration_repo_after} -> + try do + {:ok, fun.(repo), started} + after + after_action(repo, repo_after) + after_action(migration_repo, migration_repo_after) + end + + {:error, _} = error -> + after_action(repo, repo_after) + error + end + + {:error, _} = error -> + error + end + end + + @doc """ + Gets the migrations path from a repository. + + This function accepts an optional second parameter to customize the + migrations directory. This can be used to specify a custom migrations + path. + """ + @spec migrations_path(Ecto.Repo.t(), String.t()) :: String.t() + def migrations_path(repo, directory \\ "migrations") do + config = repo.config() + priv = config[:priv] || "priv/#{repo |> Module.split() |> List.last() |> Macro.underscore()}" + app = Keyword.fetch!(config, :otp_app) + Application.app_dir(app, Path.join(priv, directory)) + end + + @doc """ + Gets all migrated versions. + + This function ensures the migration table exists + if no table has been defined yet. + + ## Options + + * `:prefix` - the prefix to run the migrations on + * `:dynamic_repo` - the name of the Repo supervisor process. + See `c:Ecto.Repo.put_dynamic_repo/1`. + * `:skip_table_creation` - skips any attempt to create the migration table + Useful for situations where user needs to check migrations but has + insufficient permissions to create the table. Note that migrations + commands may fail if this is set to true. Defaults to `false`. Accepts a + boolean. + """ + @spec migrated_versions(Ecto.Repo.t(), Keyword.t()) :: [integer] + def migrated_versions(repo, opts \\ []) do + lock_for_migrations(true, repo, opts, fn _config, versions -> versions end) + end + + @doc """ + Runs an up migration on the given repository. + + ## Options + + * `:log` - the level to use for logging of migration instructions. + Defaults to `:info`. Can be any of `Logger.level/0` values or a boolean. + If `false`, it also avoids logging messages from the database. + * `:log_migrations_sql` - the level to use for logging of SQL commands + generated by migrations. Can be any of the `Logger.level/0` values + or a boolean. If `false`, logging is disabled. If `true`, uses the configured + Repo logger level. Defaults to `false` + * `:log_migrator_sql` - the level to use for logging of SQL commands emitted + by the migrator, such as transactions, locks, etc. Can be any of the `Logger.level/0` + values or a boolean. If `false`, logging is disabled. If `true`, uses the configured + Repo logger level. Defaults to `false` + * `:prefix` - the prefix to run the migrations on + * `:dynamic_repo` - the name of the Repo supervisor process. + See `c:Ecto.Repo.put_dynamic_repo/1`. + * `:strict_version_order` - abort when applying a migration with old timestamp + (otherwise it emits a warning) + """ + @spec up(Ecto.Repo.t(), integer, module, Keyword.t()) :: :ok | :already_up + def up(repo, version, module, opts \\ []) do + conditional_lock_for_migrations(module, version, repo, opts, fn config, versions -> + if version in versions do + :already_up + else + result = do_up(repo, config, version, module, opts) + + if version != Enum.max([version | versions]) do + latest = Enum.max(versions) + + message = """ + You are running migration #{version} but an older \ + migration with version #{latest} has already run. + + This can be an issue if you have already ran #{latest} in production \ + because a new deployment may migrate #{version} but a rollback command \ + would revert #{latest} instead of #{version}. + + If this can be an issue, we recommend to rollback #{version} and change \ + it to a version later than #{latest}. + """ + + if opts[:strict_version_order] do + raise Ecto.MigrationError, message + else + Logger.warning(message) + end + end + + result + end + end) + end + + defp do_up(repo, config, version, module, opts) do + async_migrate_maybe_in_transaction(repo, config, version, module, :up, opts, fn -> + attempt(repo, config, version, module, :forward, :up, :up, opts) || + attempt(repo, config, version, module, :forward, :change, :up, opts) || + {:error, + Ecto.MigrationError.exception( + "#{inspect(module)} does not implement a `up/0` or `change/0` function" + )} + end) + end + + @doc """ + Runs a down migration on the given repository. + + ## Options + + * `:log` - the level to use for logging of migration commands. Defaults to `:info`. + Can be any of `Logger.level/0` values or a boolean. + * `:log_migrations_sql` - the level to use for logging of SQL commands + generated by migrations. Can be any of the `Logger.level/0` values + or a boolean. If `false`, logging is disabled. If `true`, uses the configured + Repo logger level. Defaults to `false` + * `:log_migrator_sql` - the level to use for logging of SQL commands emitted + by the migrator, such as transactions, locks, etc. Can be any of the `Logger.level/0` + values or a boolean. If `false`, logging is disabled. If `true`, uses the configured + Repo logger level. Defaults to `false` + * `:prefix` - the prefix to run the migrations on + * `:dynamic_repo` - the name of the Repo supervisor process. + See `c:Ecto.Repo.put_dynamic_repo/1`. + + """ + @spec down(Ecto.Repo.t(), integer, module) :: :ok | :already_down + def down(repo, version, module, opts \\ []) do + conditional_lock_for_migrations(module, version, repo, opts, fn config, versions -> + if version in versions do + do_down(repo, config, version, module, opts) + else + :already_down + end + end) + end + + defp do_down(repo, config, version, module, opts) do + async_migrate_maybe_in_transaction(repo, config, version, module, :down, opts, fn -> + attempt(repo, config, version, module, :forward, :down, :down, opts) || + attempt(repo, config, version, module, :backward, :change, :down, opts) || + {:error, + Ecto.MigrationError.exception( + "#{inspect(module)} does not implement a `down/0` or `change/0` function" + )} + end) + end + + defp async_migrate_maybe_in_transaction(repo, config, version, module, direction, opts, fun) do + dynamic_repo = repo.get_dynamic_repo() + + fun_with_status = fn -> + result = fun.() + apply(SchemaMigration, direction, [repo, config, version, opts]) + result + end + + fn -> run_maybe_in_transaction(repo, dynamic_repo, module, fun_with_status, opts) end + |> Task.async() + |> Task.await(:infinity) + end + + defp run_maybe_in_transaction(repo, dynamic_repo, module, fun, opts) do + repo.put_dynamic_repo(dynamic_repo) + + if module.__migration__()[:disable_ddl_transaction] || + not repo.__adapter__().supports_ddl_transaction?() do + fun.() + else + {:ok, result} = repo.transaction(fun, log: migrator_log(opts), timeout: :infinity) + + result + end + catch + kind, reason -> + {kind, reason, __STACKTRACE__} + end + + defp attempt(repo, config, version, module, direction, operation, reference, opts) do + if Code.ensure_loaded?(module) and function_exported?(module, operation, 0) do + Runner.run(repo, config, version, module, direction, operation, reference, opts) + :ok + end + end + + @doc """ + Runs migrations for the given repository. + + Equivalent to: + + Ecto.Migrator.run(repo, [Ecto.Migrator.migrations_path(repo)], direction, opts) + + See `run/4` for more information. + """ + @spec run(Ecto.Repo.t(), atom, Keyword.t()) :: [integer] + def run(repo, direction, opts) do + run(repo, [migrations_path(repo)], direction, opts) + end + + @doc ~S""" + Apply migrations to a repository with a given strategy. + + The second argument identifies where the migrations are sourced from. + A binary representing directory (or a list of binaries representing + directories) may be passed, in which case we will load all files + following the "#{VERSION}_#{NAME}.exs" schema. The `migration_source` + may also be a list of tuples that identify the version number and + migration modules to be run, for example: + + Ecto.Migrator.run(Repo, [{0, MyApp.Migration1}, {1, MyApp.Migration2}, ...], :up, opts) + + A strategy (which is one of `:all`, `:step`, `:to`, or `:to_exclusive`) must be given as + an option. + + ## Execution model + + In order to run migrations, at least two database connections are + necessary. One is used to lock the "schema_migrations" table and + the other one to effectively run the migrations. This allows multiple + nodes to run migrations at the same time, but guarantee that only one + of them will effectively migrate the database. + + A downside of this approach is that migrations cannot run dynamically + during test under the `Ecto.Adapters.SQL.Sandbox`, as the sandbox has + to share a single connection across processes to guarantee the changes + can be reverted. + + ## Options + + * `:all` - runs all available if `true` + + * `:step` - runs the specific number of migrations + + * `:to` - runs all until the supplied version is reached + (including the version given in `:to`) + + * `:to_exclusive` - runs all until the supplied version is reached + (excluding the version given in `:to_exclusive`) + + Plus all other options described in `up/4`. + """ + @spec run(Ecto.Repo.t(), String.t() | [String.t()] | [{integer, module}], atom, Keyword.t()) :: + [integer] + def run(repo, migration_source, direction, opts) do + migration_source = List.wrap(migration_source) + + pending = + lock_for_migrations(true, repo, opts, fn _config, versions -> + cond do + opts[:all] -> + pending_all(versions, migration_source, direction) + + to = opts[:to] -> + pending_to(versions, migration_source, direction, to) + + to_exclusive = opts[:to_exclusive] -> + pending_to_exclusive(versions, migration_source, direction, to_exclusive) + + step = opts[:step] -> + pending_step(versions, migration_source, direction, step) + + true -> + {:error, + ArgumentError.exception( + "expected one of :all, :to, :to_exclusive, or :step strategies" + )} + end + end) + + # The lock above already created the table, so we can now skip it. + opts = Keyword.put(opts, :skip_table_creation, true) + + ensure_no_duplication!(pending) + migrate(Enum.map(pending, &load_migration!/1), direction, repo, opts) + end + + @doc """ + Returns an array of tuples as the migration status of the given repo, + without actually running any migrations. + + Equivalent to: + + Ecto.Migrator.migrations(repo, [Ecto.Migrator.migrations_path(repo)]) + + """ + @spec migrations(Ecto.Repo.t()) :: [{:up | :down, id :: integer(), name :: String.t()}] + def migrations(repo) do + migrations(repo, [migrations_path(repo)]) + end + + @doc """ + Returns an array of tuples as the migration status of the given repo, + without actually running any migrations. + """ + @spec migrations(Ecto.Repo.t(), String.t() | [String.t()], Keyword.t()) :: + [{:up | :down, id :: integer(), name :: String.t()}] + def migrations(repo, directories, opts \\ []) do + directories = List.wrap(directories) + + repo + |> migrated_versions(opts) + |> collect_migrations(directories) + |> Enum.sort_by(fn {_, version, _} -> version end) + end + + use GenServer + + @doc """ + Runs migrations as part of your supervision tree. + + ## Options + + * `:repos` - Required option to tell the migrator which Repo's to + migrate. Example: `repos: [MyApp.Repo]` + + * `:skip` - Option to skip migrations. Defaults to `false`. + + Plus all other options described in `up/4`. + + See "Example: Running migrations on application startup" for more info. + """ + def start_link(opts) do + GenServer.start_link(__MODULE__, opts, name: __MODULE__) + end + + @impl true + def init(opts) do + {repos, opts} = Keyword.pop!(opts, :repos) + {skip?, opts} = Keyword.pop(opts, :skip, false) + {migrator, opts} = Keyword.pop(opts, :migrator, &Ecto.Migrator.run/3) + opts = Keyword.put(opts, :all, true) + + unless skip? do + for repo <- repos do + {:ok, _, _} = with_repo(repo, &migrator.(&1, :up, opts)) + end + end + + :ignore + end + + defp collect_migrations(versions, migration_source) do + ups_with_file = + versions + |> pending_in_direction(migration_source, :down) + |> Enum.map(fn {version, name, _} -> {:up, version, name} end) + + ups_without_file = + versions + |> versions_without_file(migration_source) + |> Enum.map(fn version -> {:up, version, "** FILE NOT FOUND **"} end) + + downs = + versions + |> pending_in_direction(migration_source, :up) + |> Enum.map(fn {version, name, _} -> {:down, version, name} end) + + ups_with_file ++ ups_without_file ++ downs + end + + defp versions_without_file(versions, migration_source) do + versions_with_file = + migration_source + |> migrations_for() + |> Enum.map(fn {version, _, _} -> version end) + + versions -- versions_with_file + end + + defp lock_for_migrations(lock_or_migration_number, repo, opts, fun) do + dynamic_repo = Keyword.get(opts, :dynamic_repo, repo.get_dynamic_repo()) + skip_table_creation = Keyword.get(opts, :skip_table_creation, false) + previous_dynamic_repo = repo.put_dynamic_repo(dynamic_repo) + + try do + config = repo.config() + + unless skip_table_creation do + verbose_schema_migration(repo, "create schema migrations table", fn -> + SchemaMigration.ensure_schema_migrations_table!(repo, config, opts) + end) + end + + {migration_repo, query, all_opts} = SchemaMigration.versions(repo, config, opts[:prefix]) + + migration_lock? = + Keyword.get(opts, :migration_lock, Keyword.get(config, :migration_lock, true)) + + opts = + opts + |> Keyword.put(:migration_source, config[:migration_source] || "schema_migrations") + |> Keyword.put(:log, migrator_log(opts)) + + result = + if lock_or_migration_number && migration_lock? do + # If there is a migration_repo, it wins over dynamic_repo, + # otherwise the dynamic_repo is the one locked in migrations. + meta_repo = if migration_repo != repo, do: migration_repo, else: dynamic_repo + meta = Ecto.Adapter.lookup_meta(meta_repo) + + migration_repo.__adapter__().lock_for_migrations(meta, opts, fn -> + fun.(config, migration_repo.all(query, all_opts)) + end) + else + fun.(config, migration_repo.all(query, all_opts)) + end + + case result do + {kind, reason, stacktrace} -> + :erlang.raise(kind, reason, stacktrace) + + {:error, error} -> + raise error + + result -> + result + end + after + repo.put_dynamic_repo(previous_dynamic_repo) + end + end + + defp conditional_lock_for_migrations(module, version, repo, opts, fun) do + lock = if module.__migration__()[:disable_migration_lock], do: false, else: version + lock_for_migrations(lock, repo, opts, fun) + end + + defp pending_to(versions, migration_source, direction, target) when is_integer(target) do + within_target_version? = fn + {version, _, _}, target, :up -> + version <= target + + {version, _, _}, target, :down -> + version >= target + end + + pending_in_direction(versions, migration_source, direction) + |> Enum.take_while(&within_target_version?.(&1, target, direction)) + end + + defp pending_to_exclusive(versions, migration_source, direction, target) + when is_integer(target) do + within_target_version? = fn + {version, _, _}, target, :up -> + version < target + + {version, _, _}, target, :down -> + version > target + end + + pending_in_direction(versions, migration_source, direction) + |> Enum.take_while(&within_target_version?.(&1, target, direction)) + end + + defp pending_step(versions, migration_source, direction, count) do + pending_in_direction(versions, migration_source, direction) + |> Enum.take(count) + end + + defp pending_all(versions, migration_source, direction) do + pending_in_direction(versions, migration_source, direction) + end + + defp pending_in_direction(versions, migration_source, :up) do + migration_source + |> migrations_for() + |> Enum.filter(fn {version, _name, _file} -> version not in versions end) + end + + defp pending_in_direction(versions, migration_source, :down) do + migration_source + |> migrations_for() + |> Enum.filter(fn {version, _name, _file} -> version in versions end) + |> Enum.reverse() + end + + defp migrations_for(migration_source) when is_list(migration_source) do + migration_source + |> Enum.flat_map(fn + directory when is_binary(directory) -> + Path.join([directory, "**", "*.{ex,exs}"]) + |> Path.wildcard() + |> Enum.map(&extract_migration_info/1) + |> Enum.filter(& &1) + + {version, module} -> + [{version, module, module}] + end) + |> Enum.sort() + end + + defp extract_migration_info(file) do + base = Path.basename(file) + + case Integer.parse(Path.rootname(base)) do + {integer, "_" <> name} -> + if Path.extname(base) == ".ex" do + # See: https://github.com/elixir-ecto/ecto_sql/issues/599 + IO.warn( + """ + file looks like a migration but ends in .ex. \ + Migration files should end in .exs. Use "mix ecto.gen.migration" to generate \ + migration files with the correct extension.\ + """, + stacktrace_info(file: file) + ) + + nil + else + {integer, name, file} + end + + _ -> + nil + end + end + + # TODO: Remove when we require Elixir 1.14 + if Version.match?(System.version(), ">= 1.14.0") do + defp stacktrace_info(info), do: info + else + defp stacktrace_info(_info), do: [] + end + + defp ensure_no_duplication!([{version, name, _} | t]) do + cond do + List.keyfind(t, version, 0) -> + raise Ecto.MigrationError, + "migrations can't be executed, migration version #{version} is duplicated" + + List.keyfind(t, name, 1) -> + raise Ecto.MigrationError, + "migrations can't be executed, migration name #{name} is duplicated" + + true -> + ensure_no_duplication!(t) + end + end + + defp ensure_no_duplication!([]), do: :ok + + defp load_migration!({version, _, mod}) when is_atom(mod) do + if migration?(mod) do + {version, mod} + else + raise Ecto.MigrationError, "module #{inspect(mod)} is not an Ecto.Migration" + end + end + + defp load_migration!({version, _, file}) when is_binary(file) do + loaded_modules = file |> Code.compile_file() |> Enum.map(&elem(&1, 0)) + + if mod = Enum.find(loaded_modules, &migration?/1) do + {version, mod} + else + raise Ecto.MigrationError, + "file #{Path.relative_to_cwd(file)} does not define an Ecto.Migration" + end + end + + defp migration?(mod) do + Code.ensure_loaded?(mod) and function_exported?(mod, :__migration__, 0) + end + + defp migrate([], direction, _repo, opts) do + level = Keyword.get(opts, :log, :info) + log(level, "Migrations already #{direction}") + [] + end + + defp migrate(migrations, direction, repo, opts) do + for {version, mod} <- migrations, + do_direction(direction, repo, version, mod, opts), + do: version + end + + defp do_direction(:up, repo, version, mod, opts) do + conditional_lock_for_migrations(mod, version, repo, opts, fn config, versions -> + unless version in versions do + do_up(repo, config, version, mod, opts) + end + end) + end + + defp do_direction(:down, repo, version, mod, opts) do + conditional_lock_for_migrations(mod, version, repo, opts, fn config, versions -> + if version in versions do + do_down(repo, config, version, mod, opts) + end + end) + end + + defp verbose_schema_migration(repo, reason, fun) do + try do + fun.() + rescue + error -> + Logger.error(""" + Could not #{reason}. This error usually happens due to the following: + + * The database does not exist + * The "schema_migrations" table, which Ecto uses for managing + migrations, was defined by another library + * There is a deadlock while migrating (such as using concurrent + indexes with a migration_lock) + + To fix the first issue, run "mix ecto.create" for the desired MIX_ENV. + + To address the second, you can run "mix ecto.drop" followed by + "mix ecto.create", both for the desired MIX_ENV. Alternatively you may + configure Ecto to use another table and/or repository for managing + migrations: + + config #{inspect(repo.config()[:otp_app])}, #{inspect(repo)}, + migration_source: "some_other_table_for_schema_migrations", + migration_repo: AnotherRepoForSchemaMigrations + + The full error report is shown below. + """) + + reraise error, __STACKTRACE__ + end + end + + defp log(false, _msg), do: :ok + defp log(true, msg), do: Logger.info(msg) + defp log(level, msg), do: Logger.log(level, msg) + + defp migrator_log(opts) do + Keyword.get(opts, :log_migrator_sql, false) + end + + defp ensure_repo_started(repo, pool_size) do + case repo.start_link(pool_size: pool_size) do + {:ok, _} -> + {:ok, :stop} + + {:error, {:already_started, _pid}} -> + {:ok, :restart} + + {:error, _} = error -> + error + end + end + + defp ensure_migration_repo_started(repo, repo) do + {:ok, :noop} + end + + defp ensure_migration_repo_started(migration_repo, _repo) do + case migration_repo.start_link() do + {:ok, _} -> + {:ok, :stop} + + {:error, {:already_started, _pid}} -> + {:ok, :noop} + + {:error, _} = error -> + error + end + end + + defp after_action(repo, :restart) do + if Process.whereis(repo) do + %{pid: pid} = Ecto.Adapter.lookup_meta(repo) + Supervisor.restart_child(repo, pid) + end + end + + defp after_action(repo, :stop) do + repo.stop() + end + + defp after_action(_repo, :noop) do + :noop + end +end diff --git a/deps/ecto_sql/lib/mix/ecto_sql.ex b/deps/ecto_sql/lib/mix/ecto_sql.ex new file mode 100644 index 0000000..bf76566 --- /dev/null +++ b/deps/ecto_sql/lib/mix/ecto_sql.ex @@ -0,0 +1,45 @@ +defmodule Mix.EctoSQL do + @moduledoc false + + @doc """ + Ensures the given repository's migrations paths exists on the file system. + """ + @spec ensure_migrations_paths(Ecto.Repo.t(), Keyword.t()) :: [String.t()] + def ensure_migrations_paths(repo, opts) do + paths = Keyword.get_values(opts, :migrations_path) + paths = if paths == [], do: [Path.join(source_repo_priv(repo), "migrations")], else: paths + + if not Mix.Project.umbrella?() do + for path <- paths, not File.dir?(path) do + raise_missing_migrations(Path.relative_to_cwd(path), repo) + end + end + + paths + end + + defp raise_missing_migrations(path, repo) do + Mix.raise(""" + Could not find migrations directory #{inspect(path)} + for repo #{inspect(repo)}. + + This may be because you are in a new project and the + migration directory has not been created yet. Creating an + empty directory at the path above will fix this error. + + If you expected existing migrations to be found, please + make sure your repository has been properly configured + and the configured path exists. + """) + end + + @doc """ + Returns the private repository path relative to the source. + """ + def source_repo_priv(repo) do + config = repo.config() + priv = config[:priv] || "priv/#{repo |> Module.split() |> List.last() |> Macro.underscore()}" + app = Keyword.fetch!(config, :otp_app) + Path.join(Mix.Project.deps_paths()[app] || File.cwd!(), priv) + end +end diff --git a/deps/ecto_sql/lib/mix/tasks/ecto.dump.ex b/deps/ecto_sql/lib/mix/tasks/ecto.dump.ex new file mode 100644 index 0000000..fef0310 --- /dev/null +++ b/deps/ecto_sql/lib/mix/tasks/ecto.dump.ex @@ -0,0 +1,115 @@ +defmodule Mix.Tasks.Ecto.Dump do + use Mix.Task + import Mix.Ecto + import Mix.EctoSQL + + @shortdoc "Dumps the repository database structure" + @default_opts [quiet: false] + + @aliases [ + d: :dump_path, + q: :quiet, + r: :repo + ] + + @switches [ + dump_path: :string, + quiet: :boolean, + repo: [:string, :keep], + no_compile: :boolean, + no_deps_check: :boolean, + prefix: [:string, :keep] + ] + + @moduledoc """ + Dumps the current environment's database structure for the + given repository into a structure file. + + The repository must be set under `:ecto_repos` in the + current app configuration or given via the `-r` option. + + This task needs some shell utility to be present on the machine + running the task. + + Database | Utility needed + :--------- | :------------- + PostgreSQL | pg_dump + MySQL | mysqldump + + ## Example + + $ mix ecto.dump + + ## Command line options + + * `-r`, `--repo` - the repo to load the structure info from + * `-d`, `--dump-path` - the path of the dump file to create + * `-q`, `--quiet` - run the command quietly + * `--no-compile` - does not compile applications before dumping + * `--no-deps-check` - does not check dependencies before dumping + * `--prefix` - prefix that will be included in the structure dump. + Can include multiple prefixes (ex. `--prefix foo --prefix bar`) with + PostgreSQL but not MySQL. When specified, the prefixes will have + their definitions dumped along with the data in their migration table. + The default behavior is dependent on the adapter for backwards compatibility + reasons. For PostgreSQL, the configured database has the definitions dumped + from all of its schemas but only the data from the migration table + from the `public` schema is included. For MySQL, only the configured + database and its migration table are dumped. + """ + + @impl true + def run(args) do + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + + dump_prefixes = + case Keyword.get_values(opts, :prefix) do + [_ | _] = prefixes -> prefixes + [] -> nil + end + + opts = + @default_opts + |> Keyword.merge(opts) + |> Keyword.put(:dump_prefixes, dump_prefixes) + + Enum.each(parse_repo(args), fn repo -> + ensure_repo(repo, args) + + ensure_implements( + repo.__adapter__(), + Ecto.Adapter.Structure, + "dump structure for #{inspect(repo)}" + ) + + migration_repo = repo.config()[:migration_repo] || repo + + for repo <- Enum.uniq([repo, migration_repo]) do + config = Keyword.merge(repo.config(), opts) + start_time = System.system_time() + + case repo.__adapter__().structure_dump(source_repo_priv(repo), config) do + {:ok, location} -> + unless opts[:quiet] do + elapsed = + System.convert_time_unit(System.system_time() - start_time, :native, :microsecond) + + Mix.shell().info( + "The structure for #{inspect(repo)} has been dumped to #{location} in #{format_time(elapsed)}" + ) + end + + {:error, term} when is_binary(term) -> + Mix.raise("The structure for #{inspect(repo)} couldn't be dumped: #{term}") + + {:error, term} -> + Mix.raise("The structure for #{inspect(repo)} couldn't be dumped: #{inspect(term)}") + end + end + end) + end + + defp format_time(microsec) when microsec < 1_000, do: "#{microsec} μs" + defp format_time(microsec) when microsec < 1_000_000, do: "#{div(microsec, 1_000)} ms" + defp format_time(microsec), do: "#{Float.round(microsec / 1_000_000.0)} s" +end diff --git a/deps/ecto_sql/lib/mix/tasks/ecto.gen.migration.ex b/deps/ecto_sql/lib/mix/tasks/ecto.gen.migration.ex new file mode 100644 index 0000000..51d2bfc --- /dev/null +++ b/deps/ecto_sql/lib/mix/tasks/ecto.gen.migration.ex @@ -0,0 +1,130 @@ +defmodule Mix.Tasks.Ecto.Gen.Migration do + use Mix.Task + + import Macro, only: [camelize: 1, underscore: 1] + import Mix.Generator + import Mix.Ecto + import Mix.EctoSQL + + @shortdoc "Generates a new migration for the repo" + + @aliases [ + r: :repo + ] + + @switches [ + change: :string, + repo: [:string, :keep], + no_compile: :boolean, + no_deps_check: :boolean, + migrations_path: :string + ] + + @moduledoc """ + Generates a migration. + + The repository must be set under `:ecto_repos` in the + current app configuration or given via the `-r` option. + + ## Examples + + $ mix ecto.gen.migration add_posts_table + $ mix ecto.gen.migration add_posts_table -r Custom.Repo + + The generated migration filename will be prefixed with the current + timestamp in UTC which is used for versioning and ordering. + + By default, the migration will be generated to the + "priv/YOUR_REPO/migrations" directory of the current application + but it can be configured to be any subdirectory of `priv` by + specifying the `:priv` key under the repository configuration. + + This generator will automatically open the generated file if + you have `ECTO_EDITOR` set in your environment variable. + + ## Command line options + + * `-r`, `--repo` - the repo to generate migration for + * `--no-compile` - does not compile applications before running + * `--no-deps-check` - does not check dependencies before running + * `--migrations-path` - the path to run the migrations from, defaults to `priv/repo/migrations` + + ## Configuration + + If the current app configuration specifies a custom migration module + the generated migration code will use that rather than the default + `Ecto.Migration`: + + config :ecto_sql, migration_module: MyApplication.CustomMigrationModule + + """ + + @impl true + def run(args) do + repos = parse_repo(args) + + Enum.map(repos, fn repo -> + case OptionParser.parse!(args, strict: @switches, aliases: @aliases) do + {opts, [name]} -> + ensure_repo(repo, args) + path = opts[:migrations_path] || Path.join(source_repo_priv(repo), "migrations") + base_name = "#{underscore(name)}.exs" + file = Path.join(path, "#{timestamp()}_#{base_name}") + unless File.dir?(path), do: create_directory(path) + + fuzzy_path = Path.join(path, "*_#{base_name}") + + if Path.wildcard(fuzzy_path) != [] do + Mix.raise( + "migration can't be created, there is already a migration file with name #{name}." + ) + end + + # The :change option may be used by other tasks but not the CLI + assigns = [ + mod: Module.concat([repo, Migrations, camelize(name)]), + change: opts[:change] + ] + + create_file(file, migration_template(assigns)) + + if open?(file) and Mix.shell().yes?("Do you want to run this migration?") do + Mix.Task.run("ecto.migrate", ["-r", inspect(repo), "--migrations-path", path]) + end + + file + + {_, _} -> + Mix.raise( + "expected ecto.gen.migration to receive the migration file name, " <> + "got: #{inspect(Enum.join(args, " "))}" + ) + end + end) + end + + defp timestamp do + {{y, m, d}, {hh, mm, ss}} = :calendar.universal_time() + "#{y}#{pad(m)}#{pad(d)}#{pad(hh)}#{pad(mm)}#{pad(ss)}" + end + + defp pad(i) when i < 10, do: <> + defp pad(i), do: to_string(i) + + defp migration_module do + case Application.get_env(:ecto_sql, :migration_module, Ecto.Migration) do + migration_module when is_atom(migration_module) -> migration_module + other -> Mix.raise("Expected :migration_module to be a module, got: #{inspect(other)}") + end + end + + embed_template(:migration, """ + defmodule <%= inspect @mod %> do + use <%= inspect migration_module() %> + + def change do + <%= @change %> + end + end + """) +end diff --git a/deps/ecto_sql/lib/mix/tasks/ecto.load.ex b/deps/ecto_sql/lib/mix/tasks/ecto.load.ex new file mode 100644 index 0000000..94370d2 --- /dev/null +++ b/deps/ecto_sql/lib/mix/tasks/ecto.load.ex @@ -0,0 +1,146 @@ +defmodule Mix.Tasks.Ecto.Load do + use Mix.Task + import Mix.Ecto + import Mix.EctoSQL + + @shortdoc "Loads previously dumped database structure" + @default_opts [force: false, quiet: false] + + @aliases [ + d: :dump_path, + f: :force, + q: :quiet, + r: :repo + ] + + @switches [ + dump_path: :string, + force: :boolean, + quiet: :boolean, + repo: [:string, :keep], + no_compile: :boolean, + no_deps_check: :boolean, + skip_if_loaded: :boolean + ] + + @moduledoc """ + Loads the current environment's database structure for the + given repository from a previously dumped structure file. + + The repository must be set under `:ecto_repos` in the + current app configuration or given via the `-r` option. + + This task needs some shell utility to be present on the machine + running the task. + + Database | Utility needed + :--------- | :------------- + PostgreSQL | psql + MySQL | mysql + + ## Example + + $ mix ecto.load + + ## Command line options + + * `-r`, `--repo` - the repo to load the structure info into + * `-d`, `--dump-path` - the path of the dump file to load from + * `-q`, `--quiet` - run the command quietly + * `-f`, `--force` - do not ask for confirmation when loading data. + Configuration is asked only when `:start_permanent` is set to true + (typically in production) + * `--no-compile` - does not compile applications before loading + * `--no-deps-check` - does not check dependencies before loading + * `--skip-if-loaded` - does not load the dump file if the repo has the migrations table up + + """ + + @impl true + def run(args, table_exists? \\ &Ecto.Adapters.SQL.table_exists?/3) do + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + opts = Keyword.merge(@default_opts, opts) + opts = if opts[:quiet], do: Keyword.put(opts, :log, false), else: opts + + Enum.each(parse_repo(args), fn repo -> + ensure_repo(repo, args) + + ensure_implements( + repo.__adapter__(), + Ecto.Adapter.Structure, + "load structure for #{inspect(repo)}" + ) + + {migration_repo, source} = + Ecto.Migration.SchemaMigration.get_repo_and_source(repo, repo.config()) + + {:ok, loaded?, _} = + Ecto.Migrator.with_repo(migration_repo, table_exists_closure(table_exists?, source, opts)) + + for repo <- Enum.uniq([repo, migration_repo]) do + cond do + loaded? and opts[:skip_if_loaded] -> + :ok + + (skip_safety_warnings?() and not loaded?) or opts[:force] or confirm_load(repo, loaded?) -> + load_structure(repo, opts) + + true -> + :ok + end + end + end) + end + + defp table_exists_closure(fun, source, opts) when is_function(fun, 3) do + &fun.(&1, source, opts) + end + + defp table_exists_closure(fun, source, _opts) when is_function(fun, 2) do + &fun.(&1, source) + end + + defp skip_safety_warnings? do + Mix.Project.config()[:start_permanent] != true + end + + defp confirm_load(repo, false) do + Mix.shell().yes?( + "Are you sure you want to load a new structure for #{inspect(repo)}? Any existing data in this repo may be lost." + ) + end + + defp confirm_load(repo, true) do + Mix.shell().yes?(""" + It looks like a structure was already loaded for #{inspect(repo)}. Any attempt to load it again might fail. + Are you sure you want to proceed? + """) + end + + defp load_structure(repo, opts) do + config = Keyword.merge(repo.config(), opts) + start_time = System.system_time() + + case repo.__adapter__().structure_load(source_repo_priv(repo), config) do + {:ok, location} -> + unless opts[:quiet] do + elapsed = + System.convert_time_unit(System.system_time() - start_time, :native, :microsecond) + + Mix.shell().info( + "The structure for #{inspect(repo)} has been loaded from #{location} in #{format_time(elapsed)}" + ) + end + + {:error, term} when is_binary(term) -> + Mix.raise("The structure for #{inspect(repo)} couldn't be loaded: #{term}") + + {:error, term} -> + Mix.raise("The structure for #{inspect(repo)} couldn't be loaded: #{inspect(term)}") + end + end + + defp format_time(microsec) when microsec < 1_000, do: "#{microsec} μs" + defp format_time(microsec) when microsec < 1_000_000, do: "#{div(microsec, 1_000)} ms" + defp format_time(microsec), do: "#{Float.round(microsec / 1_000_000.0)} s" +end diff --git a/deps/ecto_sql/lib/mix/tasks/ecto.migrate.ex b/deps/ecto_sql/lib/mix/tasks/ecto.migrate.ex new file mode 100644 index 0000000..dae7164 --- /dev/null +++ b/deps/ecto_sql/lib/mix/tasks/ecto.migrate.ex @@ -0,0 +1,162 @@ +defmodule Mix.Tasks.Ecto.Migrate do + use Mix.Task + import Mix.Ecto + import Mix.EctoSQL + + @shortdoc "Runs the repository migrations" + + @aliases [ + n: :step, + r: :repo + ] + + @switches [ + all: :boolean, + step: :integer, + to: :integer, + to_exclusive: :integer, + quiet: :boolean, + prefix: :string, + pool_size: :integer, + log_level: :string, + log_migrations_sql: :boolean, + log_migrator_sql: :boolean, + strict_version_order: :boolean, + repo: [:keep, :string], + no_compile: :boolean, + no_deps_check: :boolean, + migrations_path: :keep + ] + + @moduledoc """ + Runs the pending migrations for the given repository. + + Migrations are expected at "priv/YOUR_REPO/migrations" directory + of the current application, where "YOUR_REPO" is the last segment + in your repository name. For example, the repository `MyApp.Repo` + will use "priv/repo/migrations". The repository `Whatever.MyRepo` + will use "priv/my_repo/migrations". + + You can configure a repository to use another directory by specifying + the `:priv` key under the repository configuration. The "migrations" + part will be automatically appended to it. For instance, to use + "priv/custom_repo/migrations": + + config :my_app, MyApp.Repo, priv: "priv/custom_repo" + + This task runs all pending migrations by default. To migrate up to a + specific version number, supply `--to version_number`. To migrate a + specific number of times, use `--step n`. + + The repositories to migrate are the ones specified under the + `:ecto_repos` option in the current app configuration. However, + if the `-r` option is given, it replaces the `:ecto_repos` config. + + Since Ecto tasks can only be executed once, if you need to migrate + multiple repositories, set `:ecto_repos` accordingly or pass the `-r` + flag multiple times. + + If a repository has not yet been started, one will be started outside + your application supervision tree and shutdown afterwards. + + ## Examples + + $ mix ecto.migrate + $ mix ecto.migrate -r Custom.Repo + + $ mix ecto.migrate -n 3 + $ mix ecto.migrate --step 3 + + $ mix ecto.migrate --to 20080906120000 + + ## Command line options + + * `--all` - run all pending migrations + + * `--log-migrations-sql` - log SQL generated by migration commands + + * `--log-migrator-sql` - log SQL generated by the migrator, such as + transactions, table locks, etc + + * `--log-level` (since v3.11.0) - the level to set for `Logger`. This task + does not start your application, so whatever level you have configured in + your config files will not be used. If this is not provided, no level + will be set, so that if you set it yourself before calling this task + then this won't interfere. Can be any of the `t:Logger.level/0` levels + + * `--migrations-path` - the path to load the migrations from, defaults to + `"priv/repo/migrations"`. This option may be given multiple times in which + case the migrations are loaded from all the given directories and sorted + as if they were in the same one + + * `--no-compile` - does not compile applications before migrating + + * `--no-deps-check` - does not check dependencies before migrating + + * `--pool-size` - the pool size if the repository is started + only for the task (defaults to 2) + + * `--prefix` - the prefix to run migrations on + + * `--quiet` - do not log migration commands + + * `-r`, `--repo` - the repo to migrate + + * `--step`, `-n` - run n number of pending migrations + + * `--strict-version-order` - abort when applying a migration with old + timestamp (otherwise it emits a warning) + + * `--to` - run all migrations up to and including version + + * `--to-exclusive` - run all migrations up to and excluding version + + """ + + @impl true + def run(args, migrator \\ &Ecto.Migrator.run/4) do + repos = parse_repo(args) + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + + opts = + if opts[:to] || opts[:to_exclusive] || opts[:step] || opts[:all], + do: opts, + else: Keyword.put(opts, :all, true) + + opts = + if opts[:quiet], + do: Keyword.merge(opts, log: false, log_migrations_sql: false, log_migrator_sql: false), + else: opts + + if log_level = opts[:log_level] do + Logger.configure(level: String.to_existing_atom(log_level)) + end + + # Start ecto_sql explicitly before as we don't need + # to restart those apps if migrated. + {:ok, _} = Application.ensure_all_started(:ecto_sql) + + for repo <- repos do + ensure_repo(repo, args) + paths = ensure_migrations_paths(repo, opts) + pool = repo.config()[:pool] + + fun = + if Code.ensure_loaded?(pool) and function_exported?(pool, :unboxed_run, 2) do + &pool.unboxed_run(&1, fn -> migrator.(&1, paths, :up, opts) end) + else + &migrator.(&1, paths, :up, opts) + end + + case Ecto.Migrator.with_repo(repo, fun, [mode: :temporary] ++ opts) do + {:ok, _migrated, _apps} -> + :ok + + {:error, error} -> + Mix.raise("Could not start repo #{inspect(repo)}, error: #{inspect(error)}") + end + end + + :ok + end +end diff --git a/deps/ecto_sql/lib/mix/tasks/ecto.migrations.ex b/deps/ecto_sql/lib/mix/tasks/ecto.migrations.ex new file mode 100644 index 0000000..5ebc5a6 --- /dev/null +++ b/deps/ecto_sql/lib/mix/tasks/ecto.migrations.ex @@ -0,0 +1,97 @@ +defmodule Mix.Tasks.Ecto.Migrations do + use Mix.Task + import Mix.Ecto + import Mix.EctoSQL + + @shortdoc "Displays the repository migration status" + + @aliases [ + r: :repo + ] + + @switches [ + repo: [:keep, :string], + no_compile: :boolean, + no_deps_check: :boolean, + migrations_path: :keep, + prefix: :string + ] + + @moduledoc """ + Displays the up / down migration status for the given repository. + + The repository must be set under `:ecto_repos` in the + current app configuration or given via the `-r` option. + + By default, migrations are expected at "priv/YOUR_REPO/migrations" + directory of the current application but it can be configured + by specifying the `:priv` key under the repository configuration. + + If the repository has not been started yet, one will be + started outside our application supervision tree and shutdown + afterwards. + + ## Examples + + $ mix ecto.migrations + $ mix ecto.migrations -r Custom.Repo + + ## Command line options + + * `--migrations-path` - the path to load the migrations from, defaults to + `"priv/repo/migrations"`. This option may be given multiple times in which + case the migrations are loaded from all the given directories and sorted as + if they were in the same one. + + Note, if you have previously run migrations from paths `a/` and `b/`, and now + run `mix ecto.migrations --migrations-path a/` (omitting path `b/`), the + migrations from the path `b/` will be shown in the output as `** FILE NOT FOUND **`. + + * `--no-compile` - does not compile applications before running + + * `--no-deps-check` - does not check dependencies before running + + * `--prefix` - the prefix to check migrations on + + * `-r`, `--repo` - the repo to obtain the status for + + """ + + @impl true + def run(args, migrations \\ &Ecto.Migrator.migrations/3, puts \\ &IO.puts/1) do + repos = parse_repo(args) + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + + for repo <- repos do + ensure_repo(repo, args) + paths = ensure_migrations_paths(repo, opts) + + case Ecto.Migrator.with_repo(repo, &migrations.(&1, paths, opts), mode: :temporary) do + {:ok, repo_status, _} -> + puts.( + """ + + Repo: #{inspect(repo)} + + Status Migration ID Migration Name + -------------------------------------------------- + """ <> + Enum.map_join(repo_status, "\n", fn {status, number, description} -> + " #{format(status, 10)}#{format(number, 16)}#{description}" + end) <> "\n" + ) + + {:error, error} -> + Mix.raise("Could not start repo #{inspect(repo)}, error: #{inspect(error)}") + end + end + + :ok + end + + defp format(content, pad) do + content + |> to_string + |> String.pad_trailing(pad) + end +end diff --git a/deps/ecto_sql/lib/mix/tasks/ecto.rollback.ex b/deps/ecto_sql/lib/mix/tasks/ecto.rollback.ex new file mode 100644 index 0000000..a2906c2 --- /dev/null +++ b/deps/ecto_sql/lib/mix/tasks/ecto.rollback.ex @@ -0,0 +1,158 @@ +defmodule Mix.Tasks.Ecto.Rollback do + use Mix.Task + import Mix.Ecto + import Mix.EctoSQL + + @shortdoc "Rolls back the repository migrations" + + @aliases [ + r: :repo, + n: :step + ] + + @switches [ + all: :boolean, + step: :integer, + to: :integer, + to_exclusive: :integer, + quiet: :boolean, + prefix: :string, + pool_size: :integer, + log_level: :string, + log_migrations_sql: :boolean, + log_migrator_sql: :boolean, + repo: [:keep, :string], + no_compile: :boolean, + no_deps_check: :boolean, + migrations_path: :keep + ] + + @moduledoc """ + Reverts applied migrations in the given repository. + + Migrations are expected at "priv/YOUR_REPO/migrations" directory + of the current application, where "YOUR_REPO" is the last segment + in your repository name. For example, the repository `MyApp.Repo` + will use "priv/repo/migrations". The repository `Whatever.MyRepo` + will use "priv/my_repo/migrations". + + You can configure a repository to use another directory by specifying + the `:priv` key under the repository configuration. The "migrations" + part will be automatically appended to it. For instance, to use + "priv/custom_repo/migrations": + + config :my_app, MyApp.Repo, priv: "priv/custom_repo" + + This task rolls back the last applied migration by default. To roll + back to a version number, supply `--to version_number`. To roll + back a specific number of times, use `--step n`. To undo all applied + migrations, provide `--all`. + + The repositories to rollback are the ones specified under the + `:ecto_repos` option in the current app configuration. However, + if the `-r` option is given, it replaces the `:ecto_repos` config. + + If a repository has not yet been started, one will be started outside + your application supervision tree and shutdown afterwards. + + ## Examples + + $ mix ecto.rollback + $ mix ecto.rollback -r Custom.Repo + + $ mix ecto.rollback -n 3 + $ mix ecto.rollback --step 3 + + $ mix ecto.rollback --to 20080906120000 + + ## Command line options + + * `--all` - run all pending migrations + + * `--log-migrations-sql` - log SQL generated by migration commands + + * `--log-migrator-sql` - log SQL generated by the migrator, such as + transactions, table locks, etc + + * `--log-level` (since v3.11.0) - the level to set for `Logger`. This task + does not start your application, so whatever level you have configured in + your config files will not be used. If this is not provided, no level + will be set, so that if you set it yourself before calling this task + then this won't interfere. Can be any of the `t:Logger.level/0` levels + + * `--migrations-path` - the path to load the migrations from, defaults to + `"priv/repo/migrations"`. This option may be given multiple times in which + case the migrations are loaded from all the given directories and sorted + as if they were in the same one + + * `--no-compile` - does not compile applications before migrating + + * `--no-deps-check` - does not check dependencies before migrating + + * `--pool-size` - the pool size if the repository is started + only for the task (defaults to 2) + + * `--prefix` - the prefix to run migrations on + + * `--quiet` - do not log migration commands + + * `-r`, `--repo` - the repo to migrate + + * `--step`, `-n` - revert n migrations + + * `--strict-version-order` - abort when applying a migration with old + timestamp (otherwise it emits a warning) + + * `--to` - revert all migrations down to and including version + + * `--to-exclusive` - revert all migrations down to and excluding version + + """ + + @impl true + def run(args, migrator \\ &Ecto.Migrator.run/4) do + repos = parse_repo(args) + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + + opts = + if opts[:to] || opts[:to_exclusive] || opts[:step] || opts[:all], + do: opts, + else: Keyword.put(opts, :step, 1) + + opts = + if opts[:quiet], + do: Keyword.merge(opts, log: false, log_migrations_sql: false, log_migrator_sql: false), + else: opts + + if log_level = opts[:log_level] do + Logger.configure(level: String.to_existing_atom(log_level)) + end + + # Start ecto_sql explicitly before as we don't need + # to restart those apps if migrated. + {:ok, _} = Application.ensure_all_started(:ecto_sql) + + for repo <- repos do + ensure_repo(repo, args) + paths = ensure_migrations_paths(repo, opts) + pool = repo.config()[:pool] + + fun = + if Code.ensure_loaded?(pool) and function_exported?(pool, :unboxed_run, 2) do + &pool.unboxed_run(&1, fn -> migrator.(&1, paths, :down, opts) end) + else + &migrator.(&1, paths, :down, opts) + end + + case Ecto.Migrator.with_repo(repo, fun, [mode: :temporary] ++ opts) do + {:ok, _migrated, _apps} -> + :ok + + {:error, error} -> + Mix.raise("Could not start repo #{inspect(repo)}, error: #{inspect(error)}") + end + end + + :ok + end +end diff --git a/deps/ecto_sql/mix.exs b/deps/ecto_sql/mix.exs new file mode 100644 index 0000000..a0c11fd --- /dev/null +++ b/deps/ecto_sql/mix.exs @@ -0,0 +1,207 @@ +defmodule EctoSQL.MixProject do + use Mix.Project + + @source_url "https://github.com/elixir-ecto/ecto_sql" + @version "3.13.5" + @adapters ~w(pg myxql tds) + + def project do + [ + app: :ecto_sql, + version: @version, + elixir: "~> 1.14", + deps: deps(), + test_paths: test_paths(System.get_env("ECTO_ADAPTER")), + xref: [ + exclude: [ + MyXQL, + Ecto.Adapters.MyXQL.Connection, + Postgrex, + Ecto.Adapters.Postgres.Connection, + Tds, + Tds.Ecto.UUID, + Ecto.Adapters.Tds.Connection + ] + ], + + # Custom testing + aliases: [ + "test.all": ["test", "test.adapters", "test.as_a_dep"], + "test.adapters": &test_adapters/1, + "test.as_a_dep": &test_as_a_dep/1 + ], + preferred_cli_env: ["test.all": :test, "test.adapters": :test], + + # Hex + description: "SQL-based adapters for Ecto and database migrations", + package: package(), + + # Docs + name: "Ecto SQL", + docs: docs() + ] + end + + def application do + [ + extra_applications: [:logger, :eex], + env: [postgres_map_type: "jsonb"], + mod: {Ecto.Adapters.SQL.Application, []} + ] + end + + defp deps do + [ + ecto_dep(), + {:telemetry, "~> 0.4.0 or ~> 1.0"}, + + # Drivers + {:db_connection, "~> 2.5 or ~> 2.4.1"}, + postgrex_dep(), + myxql_dep(), + tds_dep(), + + # Bring something in for JSON during tests + {:jason, ">= 0.0.0", only: [:test, :docs]}, + + # Docs + {:ex_doc, "~> 0.21", only: :docs}, + + # Benchmarks + {:benchee, "~> 1.0", only: :bench} + ] + end + + defp ecto_dep do + if path = System.get_env("ECTO_PATH") do + {:ecto, path: path} + else + {:ecto, "~> 3.13.0"} + end + end + + defp postgrex_dep do + if path = System.get_env("POSTGREX_PATH") do + {:postgrex, path: path} + else + {:postgrex, "~> 0.19 or ~> 1.0", optional: true} + end + end + + defp myxql_dep do + if path = System.get_env("MYXQL_PATH") do + {:myxql, path: path} + else + {:myxql, "~> 0.7", optional: true} + end + end + + defp tds_dep do + if path = System.get_env("TDS_PATH") do + {:tds, path: path} + else + {:tds, "~> 2.1.1 or ~> 2.2", optional: true} + end + end + + defp test_paths(adapter) when adapter in @adapters, do: ["integration_test/#{adapter}"] + defp test_paths(nil), do: ["test"] + defp test_paths(other), do: raise("unknown adapter #{inspect(other)}") + + defp package do + [ + maintainers: ["Eric Meadows-Jönsson", "José Valim", "James Fish", "Michał Muskała"], + licenses: ["Apache-2.0"], + links: %{"GitHub" => @source_url}, + files: + ~w(.formatter.exs mix.exs README.md CHANGELOG.md lib) ++ + ~w(integration_test/sql integration_test/support) + ] + end + + defp test_as_a_dep(args) do + IO.puts("==> Compiling ecto_sql from a dependency") + File.rm_rf!("tmp/as_a_dep") + File.mkdir_p!("tmp/as_a_dep") + + File.cd!("tmp/as_a_dep", fn -> + File.write!("mix.exs", """ + defmodule DepsOnEctoSQL.MixProject do + use Mix.Project + + def project do + [ + app: :deps_on_ecto_sql, + version: "0.0.1", + deps: [{:ecto_sql, path: "../.."}] + ] + end + end + """) + + mix_cmd_with_status_check(["do", "deps.get,", "compile", "--force" | args]) + end) + end + + defp test_adapters(args) do + for adapter <- @adapters, do: env_run(adapter, args) + end + + defp env_run(adapter, args) do + IO.puts("==> Running tests for ECTO_ADAPTER=#{adapter} mix test") + + mix_cmd_with_status_check( + ["test", ansi_option() | args], + env: [{"ECTO_ADAPTER", adapter}] + ) + end + + defp ansi_option do + if IO.ANSI.enabled?(), do: "--color", else: "--no-color" + end + + defp mix_cmd_with_status_check(args, opts \\ []) do + {_, res} = System.cmd("mix", args, [into: IO.binstream(:stdio, :line)] ++ opts) + + if res > 0 do + System.at_exit(fn _ -> exit({:shutdown, 1}) end) + end + end + + defp docs do + [ + main: "Ecto.Adapters.SQL", + source_ref: "v#{@version}", + canonical: "http://hexdocs.pm/ecto_sql", + source_url: @source_url, + extras: ["CHANGELOG.md"], + skip_undefined_reference_warnings_on: ["CHANGELOG.md"], + groups_for_modules: [ + # Ecto.Adapters.SQL, + # Ecto.Adapters.SQL.Sandbox, + # Ecto.Migration, + # Ecto.Migrator, + + "Built-in adapters": [ + Ecto.Adapters.MyXQL, + Ecto.Adapters.Tds, + Ecto.Adapters.Postgres + ], + "TDS Types": [ + Tds.Ecto.UUID, + Tds.Ecto.VarChar + ], + "Adapter specification": [ + Ecto.Adapter.Migration, + Ecto.Adapter.Structure, + Ecto.Adapters.SQL.Connection, + Ecto.Migration.Command, + Ecto.Migration.Constraint, + Ecto.Migration.Index, + Ecto.Migration.Reference, + Ecto.Migration.Table + ] + ] + ] + end +end diff --git a/deps/file_system/.hex b/deps/file_system/.hex new file mode 100644 index 0000000..de0edc8 Binary files /dev/null and b/deps/file_system/.hex differ diff --git a/deps/file_system/README.md b/deps/file_system/README.md new file mode 100644 index 0000000..aecb0ed --- /dev/null +++ b/deps/file_system/README.md @@ -0,0 +1,117 @@ +# FileSystem + +[![Module Version](https://img.shields.io/hexpm/v/file_system.svg)](https://hex.pm/packages/file_system) +[![Hex Docs](https://img.shields.io/badge/hex-docs-lightgreen.svg)](https://hexdocs.pm/file_system/) +[![Total Download](https://img.shields.io/hexpm/dt/file_system.svg)](https://hex.pm/packages/file_system) +[![License](https://img.shields.io/hexpm/l/file_system.svg)](https://github.com/falood/file_system/blob/master/LICENSE) +[![Last Updated](https://img.shields.io/github/last-commit/falood/file_system.svg)](https://github.com/falood/file_system/commits/master) +[![CI Linux](https://github.com/falood/file_system/actions/workflows/ci-linux.yml/badge.svg)](https://github.com/falood/file_system/actions) +[![CI MacOS](https://github.com/falood/file_system/actions/workflows/ci-macos.yml/badge.svg)](https://github.com/falood/file_system/actions) +[![CI Windows](https://github.com/falood/file_system/actions/workflows/ci-windows.yml/badge.svg)](https://github.com/falood/file_system/actions) + +An Elixir file change watcher wrapper based on +[FS](https://github.com/synrc/fs), the native file system listener. + +## System Support + +- MacOS - [fsevent](https://github.com/thibaudgg/rb-fsevent) +- GNU/Linux, FreeBSD, DragonFly and OpenBSD - [inotify](https://github.com/rvoicilas/inotify-tools/wiki) +- Windows - [inotify-win](https://github.com/thekid/inotify-win) + +On MacOS 10.14, to compile `mac_listener`, run: + +```console +open /Library/Developer/CommandLineTools/Packages/macOS_SDK_headers_for_macOS_10.14.pkg +``` + +On newer versions this file doesn't exist. But it still should work just fine as long as you have xcode installed. + +## Usage + +Add `file_system` to the `deps` of your mix.exs + +``` elixir +defmodule MyApp.Mixfile do + use Mix.Project + + def project do + ... + end + + defp deps do + [ + {:file_system, "~> 1.0", only: :test}, + ] + end + ... +end +``` + +### Subscription API + +You can spawn a worker and subscribe to events from it: + +```elixir +{:ok, pid} = FileSystem.start_link(dirs: ["/path/to/some/files"]) +FileSystem.subscribe(pid) +``` + +or + +```elixir +{:ok, pid} = FileSystem.start_link(dirs: ["/path/to/some/files"], name: :my_monitor_name) +FileSystem.subscribe(:my_monitor_name) +``` + +The `pid` you subscribed from will now receive messages like: + +``` +{:file_event, worker_pid, {file_path, events}} +``` +and + +``` +{:file_event, worker_pid, :stop} +``` + +### Example Using GenServer + +```elixir +defmodule Watcher do + use GenServer + + def start_link(args) do + GenServer.start_link(__MODULE__, args) + end + + def init(args) do + {:ok, watcher_pid} = FileSystem.start_link(args) + FileSystem.subscribe(watcher_pid) + {:ok, %{watcher_pid: watcher_pid}} + end + + def handle_info({:file_event, watcher_pid, {path, events}}, %{watcher_pid: watcher_pid} = state) do + # Your own logic for path and events + {:noreply, state} + end + + def handle_info({:file_event, watcher_pid, :stop}, %{watcher_pid: watcher_pid} = state) do + # Your own logic when monitor stop + {:noreply, state} + end +end +``` + +## Backend Options + +For each platform, you can pass extra options to the underlying listener +process. + +Each backend supports different extra options, check backend module +documentation for more details. + +Here is an example to get instant notifications on file changes for MacOS: + +```elixir +FileSystem.start_link(dirs: ["/path/to/some/files"], latency: 0, watch_root: true) +``` diff --git a/deps/file_system/c_src/mac/cli.c b/deps/file_system/c_src/mac/cli.c new file mode 100644 index 0000000..84b0c86 --- /dev/null +++ b/deps/file_system/c_src/mac/cli.c @@ -0,0 +1,180 @@ +#include +#include "cli.h" + +const char* cli_info_purpose = "A flexible command-line interface for the FSEvents API"; +const char* cli_info_usage = "Usage: fsevent_watch [OPTIONS]... [PATHS]..."; +const char* cli_info_help[] = { + " -h, --help you're looking at it", + " -V, --version print version number and exit", + " -p, --show-plist display the embedded Info.plist values", + " -s, --since-when=EventID fire historical events since ID", + " -l, --latency=seconds latency period (default='0.5')", + " -n, --no-defer enable no-defer latency modifier", + " -r, --watch-root watch for when the root path has changed", + // " -i, --ignore-self ignore current process", + " -F, --file-events provide file level event data", + " -f, --format=name output format (ignored)", + 0 +}; + +static void default_args (struct cli_info* args_info) +{ + args_info->since_when_arg = kFSEventStreamEventIdSinceNow; + args_info->latency_arg = 0.5; + args_info->no_defer_flag = false; + args_info->watch_root_flag = false; + args_info->ignore_self_flag = false; + args_info->file_events_flag = false; + args_info->mark_self_flag = false; + args_info->format_arg = 0; +} + +static void cli_parser_release (struct cli_info* args_info) +{ + unsigned int i; + + for (i=0; i < args_info->inputs_num; ++i) { + free(args_info->inputs[i]); + } + + if (args_info->inputs_num) { + free(args_info->inputs); + } + + args_info->inputs_num = 0; +} + +void cli_parser_init (struct cli_info* args_info) +{ + default_args(args_info); + + args_info->inputs = 0; + args_info->inputs_num = 0; +} + +void cli_parser_free (struct cli_info* args_info) +{ + cli_parser_release(args_info); +} + +static void cli_print_info_dict (const void *key, + const void *value, + void *context) +{ + CFStringRef entry = CFStringCreateWithFormat(NULL, NULL, + CFSTR("%@:\n %@"), key, value); + if (entry) { + CFShow(entry); + CFRelease(entry); + } +} + +void cli_show_plist (void) +{ + CFBundleRef mainBundle = CFBundleGetMainBundle(); + CFRetain(mainBundle); + CFDictionaryRef mainBundleDict = CFBundleGetInfoDictionary(mainBundle); + if (mainBundleDict) { + CFRetain(mainBundleDict); + printf("Embedded Info.plist metadata:\n\n"); + CFDictionaryApplyFunction(mainBundleDict, cli_print_info_dict, NULL); + CFRelease(mainBundleDict); + } + CFRelease(mainBundle); + printf("\n"); +} + +void cli_print_version (void) +{ + printf("%s %s\n\n", "VXZ", "1.0"); +} + +void cli_print_help (void) +{ + cli_print_version(); + + printf("\n%s\n", cli_info_purpose); + printf("\n%s\n", cli_info_usage); + printf("\n"); + + int i = 0; + while (cli_info_help[i]) { + printf("%s\n", cli_info_help[i++]); + } +} + +int cli_parser (int argc, const char** argv, struct cli_info* args_info) +{ + static struct option longopts[] = { + { "help", no_argument, NULL, 'h' }, + { "version", no_argument, NULL, 'V' }, + { "show-plist", no_argument, NULL, 'p' }, + { "since-when", required_argument, NULL, 's' }, + { "latency", required_argument, NULL, 'l' }, + { "no-defer", no_argument, NULL, 'n' }, + { "watch-root", no_argument, NULL, 'r' }, + { "ignore-self", no_argument, NULL, 'i' }, + { "file-events", no_argument, NULL, 'F' }, + { "mark-self", no_argument, NULL, 'm' }, + { "format", required_argument, NULL, 'f' }, + { 0, 0, 0, 0 } + }; + + const char* shortopts = "hVps:l:nriFf:"; + + int c = -1; + + while ((c = getopt_long(argc, (char * const*)argv, shortopts, longopts, NULL)) != -1) { + switch(c) { + case 's': // since-when + args_info->since_when_arg = strtoull(optarg, NULL, 0); + break; + case 'l': // latency + args_info->latency_arg = strtod(optarg, NULL); + break; + case 'n': // no-defer + args_info->no_defer_flag = true; + break; + case 'r': // watch-root + args_info->watch_root_flag = true; + break; + case 'i': // ignore-self + args_info->ignore_self_flag = true; + break; + case 'F': // file-events + args_info->file_events_flag = true; + break; + case 'm': // mark-self + args_info->mark_self_flag = true; + break; + case 'f': // format + // XXX: ignored + break; + case 'V': // version + cli_print_version(); + exit(EXIT_SUCCESS); + case 'p': // show-plist + cli_show_plist(); + exit(EXIT_SUCCESS); + case 'h': // help + case '?': // invalid option + case ':': // missing argument + cli_print_help(); + exit((c == 'h') ? EXIT_SUCCESS : EXIT_FAILURE); + } + } + + if (optind < argc) { + int i = 0; + args_info->inputs_num = (unsigned int)(argc - optind); + args_info->inputs = + (char**)(malloc ((args_info->inputs_num)*sizeof(char*))); + while (optind < argc) + if (argv[optind++] != argv[0]) { + args_info->inputs[i++] = strdup(argv[optind-1]); + } + } + + return EXIT_SUCCESS; +} + diff --git a/deps/file_system/c_src/mac/cli.h b/deps/file_system/c_src/mac/cli.h new file mode 100644 index 0000000..f176cf0 --- /dev/null +++ b/deps/file_system/c_src/mac/cli.h @@ -0,0 +1,36 @@ +#ifndef CLI_H +#define CLI_H + +#include "common.h" + +#ifndef CLI_NAME +#define CLI_NAME "fsevent_watch" +#endif /* CLI_NAME */ + +struct cli_info { + UInt64 since_when_arg; + double latency_arg; + bool no_defer_flag; + bool watch_root_flag; + bool ignore_self_flag; + bool file_events_flag; + bool mark_self_flag; + int format_arg; + + char** inputs; + unsigned inputs_num; +}; + +extern const char* cli_info_purpose; +extern const char* cli_info_usage; +extern const char* cli_info_help[]; + +void cli_print_help(void); +void cli_print_version(void); + +int cli_parser (int argc, const char** argv, struct cli_info* args_info); +void cli_parser_init (struct cli_info* args_info); +void cli_parser_free (struct cli_info* args_info); + + +#endif /* CLI_H */ diff --git a/deps/file_system/c_src/mac/common.h b/deps/file_system/c_src/mac/common.h new file mode 100644 index 0000000..70bd648 --- /dev/null +++ b/deps/file_system/c_src/mac/common.h @@ -0,0 +1,55 @@ +#ifndef fsevent_watch_common_h +#define fsevent_watch_common_h + +#include +#include +#include +#include +#include "compat.h" + +#define _str(s) #s +#define _xstr(s) _str(s) + +#define COMPILED_AT __DATE__ " " __TIME__ + +#define FPRINTF_FLAG_CHECK(flags, flag, msg, fd) \ + do { \ + if ((flags) & (flag)) { \ + fprintf(fd, "%s\n", msg); } } \ + while (0) + +#define FLAG_CHECK_STDERR(flags, flag, msg) \ + FPRINTF_FLAG_CHECK(flags, flag, msg, stderr) + +/* + * FSEVENTSBITS: + * generated by `make printflags` (and pasted here) + * flags MUST be ordered (bits ascending) and sorted + * + * idea from: http://www.openbsd.org/cgi-bin/cvsweb/src/sbin/ifconfig/ifconfig.c (see printb()) + */ +#define FSEVENTSBITS \ +"\1mustscansubdirs\2userdropped\3kerneldropped\4eventidswrapped\5historydone\6rootchanged\7mount\10unmount\11created\12removed\13inodemetamod\14renamed\15modified\16finderinfomod\17changeowner\20xattrmod\21isfile\22isdir\23issymlink\24ownevent" + +static inline void +sprintb(char *buf, unsigned short v, char *bits) +{ + int i, any = 0; + char c; + char *bufp = buf; + + while ((i = *bits++)) { + if (v & (1 << (i-1))) { + if (any) + *bufp++ = ','; + any = 1; + for (; (c = *bits) > 32; bits++) + *bufp++ = c; + } else + for (; *bits > 32; bits++) + ; + } + *bufp = '\0'; +} + +#endif /* fsevent_watch_common_h */ diff --git a/deps/file_system/c_src/mac/compat.c b/deps/file_system/c_src/mac/compat.c new file mode 100644 index 0000000..ab84dfd --- /dev/null +++ b/deps/file_system/c_src/mac/compat.c @@ -0,0 +1,25 @@ +#include "compat.h" + +#if MAC_OS_X_VERSION_MAX_ALLOWED < 1060 +FSEventStreamCreateFlags kFSEventStreamCreateFlagIgnoreSelf = 0x00000008; +#endif + +#if MAC_OS_X_VERSION_MAX_ALLOWED < 1070 +FSEventStreamCreateFlags kFSEventStreamCreateFlagFileEvents = 0x00000010; +FSEventStreamEventFlags kFSEventStreamEventFlagItemCreated = 0x00000100; +FSEventStreamEventFlags kFSEventStreamEventFlagItemRemoved = 0x00000200; +FSEventStreamEventFlags kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400; +FSEventStreamEventFlags kFSEventStreamEventFlagItemRenamed = 0x00000800; +FSEventStreamEventFlags kFSEventStreamEventFlagItemModified = 0x00001000; +FSEventStreamEventFlags kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000; +FSEventStreamEventFlags kFSEventStreamEventFlagItemChangeOwner = 0x00004000; +FSEventStreamEventFlags kFSEventStreamEventFlagItemXattrMod = 0x00008000; +FSEventStreamEventFlags kFSEventStreamEventFlagItemIsFile = 0x00010000; +FSEventStreamEventFlags kFSEventStreamEventFlagItemIsDir = 0x00020000; +FSEventStreamEventFlags kFSEventStreamEventFlagItemIsSymlink = 0x00040000; +#endif + +#if MAC_OS_X_VERSION_MAX_ALLOWED < 1090 +FSEventStreamCreateFlags kFSEventStreamCreateFlagMarkSelf = 0x00000020; +FSEventStreamEventFlags kFSEventStreamEventFlagOwnEvent = 0x00080000; +#endif diff --git a/deps/file_system/c_src/mac/compat.h b/deps/file_system/c_src/mac/compat.h new file mode 100644 index 0000000..d44c0c8 --- /dev/null +++ b/deps/file_system/c_src/mac/compat.h @@ -0,0 +1,47 @@ +/** + * @headerfile compat.h + * FSEventStream flag compatibility shim + * + * In order to compile a binary against an older SDK yet still support the + * features present in later OS releases, we need to define any missing enum + * constants not present in the older SDK. This allows us to safely defer + * feature detection to runtime (and avoid recompilation). + */ + + +#ifndef fsevent_watch_compat_h +#define fsevent_watch_compat_h + +#ifndef __CORESERVICES__ +#include +#endif // __CORESERVICES__ + +#if MAC_OS_X_VERSION_MAX_ALLOWED < 1060 +// ignoring events originating from the current process introduced in 10.6 +extern FSEventStreamCreateFlags kFSEventStreamCreateFlagIgnoreSelf; +#endif + +#if MAC_OS_X_VERSION_MAX_ALLOWED < 1070 +// file-level events introduced in 10.7 +extern FSEventStreamCreateFlags kFSEventStreamCreateFlagFileEvents; +extern FSEventStreamEventFlags kFSEventStreamEventFlagItemCreated, + kFSEventStreamEventFlagItemRemoved, + kFSEventStreamEventFlagItemInodeMetaMod, + kFSEventStreamEventFlagItemRenamed, + kFSEventStreamEventFlagItemModified, + kFSEventStreamEventFlagItemFinderInfoMod, + kFSEventStreamEventFlagItemChangeOwner, + kFSEventStreamEventFlagItemXattrMod, + kFSEventStreamEventFlagItemIsFile, + kFSEventStreamEventFlagItemIsDir, + kFSEventStreamEventFlagItemIsSymlink; +#endif + +#if MAC_OS_X_VERSION_MAX_ALLOWED < 1090 +// marking, rather than ignoring, events originating from the current process introduced in 10.9 +extern FSEventStreamCreateFlags kFSEventStreamCreateFlagMarkSelf; +extern FSEventStreamEventFlags kFSEventStreamEventFlagOwnEvent; +#endif + + +#endif // fsevent_watch_compat_h diff --git a/deps/file_system/c_src/mac/main.c b/deps/file_system/c_src/mac/main.c new file mode 100644 index 0000000..392529b --- /dev/null +++ b/deps/file_system/c_src/mac/main.c @@ -0,0 +1,234 @@ +#include "common.h" +#include "cli.h" + +// TODO: set on fire. cli.{h,c} handle both parsing and defaults, so there's +// no need to set those here. also, in order to scope metadata by path, +// each stream will need its own configuration... so this won't work as +// a global any more. In the end the goal is to make the output format +// able to declare not just that something happened and what flags were +// attached, but what path it was watching that caused those events (so +// that the path itself can be used for routing that information to the +// relevant callback). +// +// Structure for storing metadata parsed from the commandline +static struct { + FSEventStreamEventId sinceWhen; + CFTimeInterval latency; + FSEventStreamCreateFlags flags; + CFMutableArrayRef paths; + int format; +} config = { + (UInt64) kFSEventStreamEventIdSinceNow, + (double) 0.3, + (CFOptionFlags) kFSEventStreamCreateFlagNone, + NULL, + 0 +}; + +// Prototypes +static void append_path(const char* path); +static inline void parse_cli_settings(int argc, const char* argv[]); +static void callback(FSEventStreamRef streamRef, + void* clientCallBackInfo, + size_t numEvents, + void* eventPaths, + const FSEventStreamEventFlags eventFlags[], + const FSEventStreamEventId eventIds[]); + + +static void append_path(const char* path) +{ + CFStringRef pathRef = CFStringCreateWithCString(kCFAllocatorDefault, + path, + kCFStringEncodingUTF8); + CFArrayAppendValue(config.paths, pathRef); + CFRelease(pathRef); +} + +// Parse commandline settings +static inline void parse_cli_settings(int argc, const char* argv[]) +{ + // runtime os version detection + SInt32 osMajorVersion, osMinorVersion; + if (!(Gestalt(gestaltSystemVersionMajor, &osMajorVersion) == noErr)) { + osMajorVersion = 0; + } + if (!(Gestalt(gestaltSystemVersionMinor, &osMinorVersion) == noErr)) { + osMinorVersion = 0; + } + + if ((osMajorVersion == 10) & (osMinorVersion < 5)) { + fprintf(stderr, "The FSEvents API is unavailable on this version of macos!\n"); + exit(EXIT_FAILURE); + } + + struct cli_info args_info; + cli_parser_init(&args_info); + + if (cli_parser(argc, argv, &args_info) != 0) { + exit(EXIT_FAILURE); + } + + config.paths = CFArrayCreateMutable(NULL, + (CFIndex)0, + &kCFTypeArrayCallBacks); + + config.sinceWhen = args_info.since_when_arg; + config.latency = args_info.latency_arg; + config.format = args_info.format_arg; + + if (args_info.no_defer_flag) { + config.flags |= kFSEventStreamCreateFlagNoDefer; + } + if (args_info.watch_root_flag) { + config.flags |= kFSEventStreamCreateFlagWatchRoot; + } + + if (args_info.ignore_self_flag) { + if ((osMajorVersion > 10) | ((osMajorVersion == 10) & (osMinorVersion >= 6))) { + config.flags |= kFSEventStreamCreateFlagIgnoreSelf; + } else { + fprintf(stderr, "MacOSX 10.6 or later is required for --ignore-self\n"); + exit(EXIT_FAILURE); + } + } + + if (args_info.file_events_flag) { + if ((osMajorVersion > 10) | ((osMajorVersion == 10) & (osMinorVersion >= 7))) { + config.flags |= kFSEventStreamCreateFlagFileEvents; + } else { + fprintf(stderr, "MacOSX 10.7 or later required for --file-events\n"); + exit(EXIT_FAILURE); + } + } + + if (args_info.mark_self_flag) { + if ((osMajorVersion > 10) | ((osMajorVersion == 10) & (osMinorVersion >= 9))) { + config.flags |= kFSEventStreamCreateFlagMarkSelf; + } else { + fprintf(stderr, "MacOSX 10.9 or later required for --mark-self\n"); + exit(EXIT_FAILURE); + } + } + + if (args_info.inputs_num == 0) { + append_path("."); + } else { + for (unsigned int i=0; i < args_info.inputs_num; ++i) { + append_path(args_info.inputs[i]); + } + } + + cli_parser_free(&args_info); + +#ifdef DEBUG + fprintf(stderr, "config.sinceWhen %llu\n", config.sinceWhen); + fprintf(stderr, "config.latency %f\n", config.latency); + fprintf(stderr, "config.flags %#.8x\n", config.flags); + + FLAG_CHECK_STDERR(config.flags, kFSEventStreamCreateFlagUseCFTypes, + " Using CF instead of C types"); + FLAG_CHECK_STDERR(config.flags, kFSEventStreamCreateFlagNoDefer, + " NoDefer latency modifier enabled"); + FLAG_CHECK_STDERR(config.flags, kFSEventStreamCreateFlagWatchRoot, + " WatchRoot notifications enabled"); + FLAG_CHECK_STDERR(config.flags, kFSEventStreamCreateFlagIgnoreSelf, + " IgnoreSelf enabled"); + FLAG_CHECK_STDERR(config.flags, kFSEventStreamCreateFlagFileEvents, + " FileEvents enabled"); + + fprintf(stderr, "config.paths\n"); + + long numpaths = CFArrayGetCount(config.paths); + + for (long i = 0; i < numpaths; i++) { + char path[PATH_MAX]; + CFStringGetCString(CFArrayGetValueAtIndex(config.paths, i), + path, + PATH_MAX, + kCFStringEncodingUTF8); + fprintf(stderr, " %s\n", path); + } + + fprintf(stderr, "\n"); +#endif +} + +static void callback(__attribute__((unused)) FSEventStreamRef streamRef, + __attribute__((unused)) void* clientCallBackInfo, + size_t numEvents, + void* eventPaths, + const FSEventStreamEventFlags eventFlags[], + const FSEventStreamEventId eventIds[]) +{ + char** paths = eventPaths; + char *buf = calloc(sizeof(FSEVENTSBITS), sizeof(char)); + + for (size_t i = 0; i < numEvents; i++) { + sprintb(buf, eventFlags[i], FSEVENTSBITS); + printf("%llu\t%#.8x=[%s]\t%s\n", eventIds[i], eventFlags[i], buf, paths[i]); + } + fflush(stdout); + free(buf); + + if (fcntl(STDIN_FILENO, F_GETFD) == -1) { + CFRunLoopStop(CFRunLoopGetCurrent()); + } +} + +static void stdin_callback(CFFileDescriptorRef fdref, CFOptionFlags callBackTypes, void *info) +{ + char buf[1024]; + int nread; + + do { + nread = read(STDIN_FILENO, buf, sizeof(buf)); + if (nread == -1 && errno == EAGAIN) { + CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack); + return; + } else if (nread == 0) { + exit(1); + return; + } + } while (nread > 0); +} + +int main(int argc, const char* argv[]) +{ + parse_cli_settings(argc, argv); + + FSEventStreamContext context = {0, NULL, NULL, NULL, NULL}; + FSEventStreamRef stream; + stream = FSEventStreamCreate(kCFAllocatorDefault, + (FSEventStreamCallback)&callback, + &context, + config.paths, + config.sinceWhen, + config.latency, + config.flags); + +#ifdef DEBUG + FSEventStreamShow(stream); + fprintf(stderr, "\n"); +#endif + + fcntl(STDIN_FILENO, F_SETFL, O_NONBLOCK); + + CFFileDescriptorRef fdref = CFFileDescriptorCreate(kCFAllocatorDefault, STDIN_FILENO, false, stdin_callback, NULL); + CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack); + CFRunLoopSourceRef source = CFFileDescriptorCreateRunLoopSource(kCFAllocatorDefault, fdref, 0); + CFRunLoopAddSource(CFRunLoopGetCurrent(), source, kCFRunLoopDefaultMode); + CFRelease(source); + + FSEventStreamScheduleWithRunLoop(stream, + CFRunLoopGetCurrent(), + kCFRunLoopDefaultMode); + FSEventStreamStart(stream); + CFRunLoopRun(); + FSEventStreamFlushSync(stream); + FSEventStreamStop(stream); + + return 0; +} + +// vim: ts=2 sts=2 et sw=2 diff --git a/deps/file_system/hex_metadata.config b/deps/file_system/hex_metadata.config new file mode 100644 index 0000000..320a957 --- /dev/null +++ b/deps/file_system/hex_metadata.config @@ -0,0 +1,20 @@ +{<<"links">>,[{<<"GitHub">>,<<"https://github.com/falood/file_system">>}]}. +{<<"name">>,<<"file_system">>}. +{<<"version">>,<<"1.1.1">>}. +{<<"description">>, + <<"An Elixir file system change watcher wrapper based on FS, the native file\nsystem listener.">>}. +{<<"elixir">>,<<"~> 1.11">>}. +{<<"app">>,<<"file_system">>}. +{<<"files">>, + [<<"lib">>,<<"lib/file_system">>,<<"lib/file_system/worker.ex">>, + <<"lib/file_system/backends">>,<<"lib/file_system/backends/fs_inotify.ex">>, + <<"lib/file_system/backends/fs_mac.ex">>, + <<"lib/file_system/backends/fs_poll.ex">>, + <<"lib/file_system/backends/fs_windows.ex">>, + <<"lib/file_system/backend.ex">>,<<"lib/file_system.ex">>,<<"README.md">>, + <<"mix.exs">>,<<"c_src/mac/cli.c">>,<<"c_src/mac/cli.h">>, + <<"c_src/mac/common.h">>,<<"c_src/mac/compat.c">>,<<"c_src/mac/compat.h">>, + <<"c_src/mac/main.c">>,<<"priv/inotifywait.exe">>]}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"requirements">>,[]}. +{<<"build_tools">>,[<<"mix">>]}. diff --git a/deps/file_system/lib/file_system.ex b/deps/file_system/lib/file_system.ex new file mode 100644 index 0000000..3a03d6c --- /dev/null +++ b/deps/file_system/lib/file_system.ex @@ -0,0 +1,62 @@ +defmodule FileSystem do + @moduledoc """ + A `GenServer` process to watch file system changes. + + The process receives data from Port, parse event, and send it to the worker + process. + """ + + @doc """ + Starts a `GenServer` process and linked to the current process. + + ## Options + + * `:dirs` ([string], required), the list of directory to monitor. + + * `:backend` (atom, optional), default backends: `:fs_mac`. Available + backends: `:fs_mac`, `:fs_inotify`, and `:fs_windows`. + + * `:name` (atom, optional), the `name` of the worker process to subscribe + to the file system listener. Alternative to using `pid` of the worker + process. + + * Additional backend implementation options. See backend module documents + for more details. + + ## Examples + + Start monitoring `/tmp/fs` directory using the default `:fs_mac` backend of + the current process: + + iex> {:ok, pid} = FileSystem.start_link(dirs: ["/tmp/fs"]) + iex> FileSystem.subscribe(pid) + + Get instant (`latench: 0`) notifications on file changes: + + iex> FileSystem.start_link(dirs: ["/path/to/some/files"], latency: 0) + + Monitor a directory by a process name: + + iex> FileSystem.start_link(backend: :fs_mac, dirs: ["/tmp/fs"], name: :worker) + iex> FileSystem.subscribe(:worker) + + """ + @spec start_link(Keyword.t()) :: GenServer.on_start() + def start_link(options) do + FileSystem.Worker.start_link(options) + end + + @doc """ + Register the current process as a subscriber of a `file_system` worker. + + The `pid` you subscribed from will now receive messages like: + + {:file_event, worker_pid, {file_path, events}} + {:file_event, worker_pid, :stop} + + """ + @spec subscribe(GenServer.server()) :: :ok + def subscribe(pid) do + GenServer.call(pid, :subscribe) + end +end diff --git a/deps/file_system/lib/file_system/backend.ex b/deps/file_system/lib/file_system/backend.ex new file mode 100644 index 0000000..499d7ea --- /dev/null +++ b/deps/file_system/lib/file_system/backend.ex @@ -0,0 +1,89 @@ +require Logger + +defmodule FileSystem.Backend do + @moduledoc """ + A behaviour module for implementing different file system backend. + """ + + @callback bootstrap() :: :ok | {:error, atom()} + @callback supported_systems() :: [{atom(), atom()}] + @callback known_events() :: [atom()] + + @doc """ + Get and validate backend module. + + Returns `{:ok, backend_module}` upon success and `{:error, reason}` upon + failure. + + When `nil` is given, will return default backend by OS. + + When a custom module is given, make sure `start_link/1`, `bootstrap/0` and + `supported_system/0` are defnied. + """ + @spec backend(atom) :: {:ok, atom()} | {:error, atom()} + def backend(backend) do + with {:ok, module} <- backend_module(backend), + :ok <- validate_os(backend, module), + :ok <- module.bootstrap() do + {:ok, module} + else + {:error, reason} -> {:error, reason} + end + end + + defp backend_module(nil) do + case :os.type() do + {:unix, :darwin} -> :fs_mac + {:unix, :linux} -> :fs_inotify + {:unix, :freebsd} -> :fs_inotify + {:unix, :dragonfly} -> :fs_inotify + {:unix, :openbsd} -> :fs_inotify + {:win32, :nt} -> :fs_windows + system -> {:unsupported_system, system} + end + |> backend_module + end + + defp backend_module(:fs_mac), do: {:ok, FileSystem.Backends.FSMac} + defp backend_module(:fs_inotify), do: {:ok, FileSystem.Backends.FSInotify} + defp backend_module(:fs_windows), do: {:ok, FileSystem.Backends.FSWindows} + defp backend_module(:fs_poll), do: {:ok, FileSystem.Backends.FSPoll} + + defp backend_module({:unsupported_system, system}) do + Logger.error( + "I'm so sorry but `file_system` does NOT support your current system #{inspect(system)} for now." + ) + + {:error, :unsupported_system} + end + + defp backend_module(module) do + functions = module.__info__(:functions) + + ({:start_link, 1} in functions && + {:bootstrap, 0} in functions && + {:supported_systems, 0} in functions) || + raise "illegal backend" + rescue + _ -> + Logger.error( + "You are using custom backend `#{inspect(module)}`, make sure it's a legal file_system backend module." + ) + + {:error, :illegal_backend} + end + + defp validate_os(backend, module) do + os_type = :os.type() + + if os_type in module.supported_systems() do + :ok + else + Logger.error( + "The backend `#{backend}` you are using does NOT support your current system #{inspect(os_type)}." + ) + + {:error, :unsupported_system} + end + end +end diff --git a/deps/file_system/lib/file_system/backends/fs_inotify.ex b/deps/file_system/lib/file_system/backends/fs_inotify.ex new file mode 100644 index 0000000..f9ef358 --- /dev/null +++ b/deps/file_system/lib/file_system/backends/fs_inotify.ex @@ -0,0 +1,222 @@ +require Logger + +defmodule FileSystem.Backends.FSInotify do + @moduledoc """ + File system backend for GNU/Linux, FreeBSD, DragonFly and OpenBSD. + + This file is a fork from https://github.com/synrc/fs. + + ## Backend Options + + * `:recursive` (bool, default: true), monitor directories and their contents recursively. + + ## Executable File Path + + Useful when running `:file_system` with escript. + + The default listener executable file is found through finding `inotifywait` from + `$PATH`. + + Two ways to customize the executable file path: + + * Module config with `config.exs`: + + ```elixir + config :file_system, :fs_inotify, + executable_file: "YOUR_EXECUTABLE_FILE_PATH"` + ``` + + * System environment variable: + + ``` + export FILESYSTEM_FSINOTIFY_EXECUTABLE_FILE="YOUR_EXECUTABLE_FILE_PATH"` + ``` + """ + + use GenServer + @behaviour FileSystem.Backend + @sep_char <<1>> + + def bootstrap do + exec_file = executable_path() + + if is_nil(exec_file) do + Logger.error( + "`inotify-tools` is needed to run `file_system` for your system, check https://github.com/rvoicilas/inotify-tools/wiki for more information about how to install it. If it's already installed but not be found, appoint executable file with `config.exs` or `FILESYSTEM_FSINOTIFY_EXECUTABLE_FILE` env." + ) + + {:error, :fs_inotify_bootstrap_error} + else + :ok + end + end + + def supported_systems do + [{:unix, :linux}, {:unix, :freebsd}, {:unix, :dragonfly}, {:unix, :openbsd}] + end + + def known_events do + [:created, :deleted, :closed, :modified, :isdir, :attribute, :undefined] + end + + defp executable_path do + executable_path(:system_env) || executable_path(:config) || executable_path(:system_path) + end + + defp executable_path(:config) do + Application.get_env(:file_system, :fs_inotify)[:executable_file] + end + + defp executable_path(:system_env) do + System.get_env("FILESYSTEM_FSINOTIFY_EXECUTABLE_FILE") + end + + defp executable_path(:system_path) do + System.find_executable("inotifywait") + end + + def parse_options(options) do + case Keyword.pop(options, :dirs) do + {nil, _} -> + Logger.error("required argument `dirs` is missing") + {:error, :missing_dirs_argument} + + {dirs, rest} -> + format = ["%w", "%e", "%f"] |> Enum.join(@sep_char) |> to_charlist + + args = [ + ~c"-e", + ~c"modify", + ~c"-e", + ~c"close_write", + ~c"-e", + ~c"moved_to", + ~c"-e", + ~c"moved_from", + ~c"-e", + ~c"create", + ~c"-e", + ~c"delete", + ~c"-e", + ~c"attrib", + ~c"--format", + format, + ~c"--quiet", + ~c"-m", + ~c"-r" + | dirs |> Enum.map(&Path.absname/1) |> Enum.map(&to_charlist/1) + ] + + parse_options(rest, args) + end + end + + defp parse_options([], result), do: {:ok, result} + + defp parse_options([{:recursive, true} | t], result) do + parse_options(t, result) + end + + defp parse_options([{:recursive, false} | t], result) do + parse_options(t, result -- [~c"-r"]) + end + + defp parse_options([{:recursive, value} | t], result) do + Logger.error("unknown value `#{inspect(value)}` for recursive, ignore") + parse_options(t, result) + end + + defp parse_options([h | t], result) do + Logger.error("unknown option `#{inspect(h)}`, ignore") + parse_options(t, result) + end + + def start_link(args) do + GenServer.start_link(__MODULE__, args, []) + end + + def init(args) do + {worker_pid, rest} = Keyword.pop(args, :worker_pid) + + case parse_options(rest) do + {:ok, port_args} -> + bash_args = [ + ~c"-c", + ~c"#{executable_path()} \"$0\" \"$@\" & PID=$!; read a; kill -KILL $PID" + ] + + all_args = + case :os.type() do + {:unix, :freebsd} -> + bash_args ++ [~c"--"] ++ port_args + + {:unix, :dragonfly} -> + bash_args ++ [~c"--"] ++ port_args + + _ -> + bash_args ++ port_args + end + + port = + Port.open( + {:spawn_executable, ~c"/bin/sh"}, + [ + :binary, + :stream, + :exit_status, + {:line, 16384}, + {:args, all_args} + ] + ) + + Process.link(port) + Process.flag(:trap_exit, true) + + {:ok, %{port: port, worker_pid: worker_pid}} + + {:error, _} -> + :ignore + end + end + + def handle_info({port, {:data, {:eol, line}}}, %{port: port} = state) do + {file_path, events} = line |> parse_line + send(state.worker_pid, {:backend_file_event, self(), {file_path, events}}) + {:noreply, state} + end + + def handle_info({port, {:exit_status, _}}, %{port: port} = state) do + send(state.worker_pid, {:backend_file_event, self(), :stop}) + {:stop, :normal, state} + end + + def handle_info({:EXIT, port, _reason}, %{port: port} = state) do + send(state.worker_pid, {:backend_file_event, self(), :stop}) + {:stop, :normal, state} + end + + def handle_info(_, state) do + {:noreply, state} + end + + def parse_line(line) do + {path, flags} = + case String.split(line, @sep_char, trim: true) do + [dir, flags, file] -> {Path.join(dir, file), flags} + [path, flags] -> {path, flags} + end + + {path, flags |> String.split(",") |> Enum.map(&convert_flag/1)} + end + + defp convert_flag("CREATE"), do: :created + defp convert_flag("MOVED_TO"), do: :moved_to + defp convert_flag("DELETE"), do: :deleted + defp convert_flag("MOVED_FROM"), do: :moved_from + defp convert_flag("ISDIR"), do: :isdir + defp convert_flag("MODIFY"), do: :modified + defp convert_flag("CLOSE_WRITE"), do: :modified + defp convert_flag("CLOSE"), do: :closed + defp convert_flag("ATTRIB"), do: :attribute + defp convert_flag(_), do: :undefined +end diff --git a/deps/file_system/lib/file_system/backends/fs_mac.ex b/deps/file_system/lib/file_system/backends/fs_mac.ex new file mode 100644 index 0000000..dc8fe4e --- /dev/null +++ b/deps/file_system/lib/file_system/backends/fs_mac.ex @@ -0,0 +1,236 @@ +require Logger + +defmodule FileSystem.Backends.FSMac do + @moduledoc """ + File system backend for MacOS. + + The built-in executable file will be compile upon first use. + + This file is a fork from https://github.com/synrc/fs. + + ## Backend Options + + * `:latency` (float, default: 0.5), latency period. + + * `:no_defer` (bool, default: false), enable no-defer latency modifier. + Works with latency parameter. + + See `FSEvent` API documents + https://developer.apple.com/documentation/coreservices/kfseventstreamcreateflagnodefer. + + * `:watch_root` (bool, default: false), watch for when the root path has changed. + Set the flag `true` to monitor events when watching `/tmp/fs/dir` and run + `mv /tmp/fs /tmp/fx`. + + See `FSEvent` API documents + https://developer.apple.com/documentation/coreservices/kfseventstreamcreateflagwatchroot. + + * recursive is enabled by default and it can'b be disabled for now. + + ## Executable File Path + + Useful when running `:file_system` with escript. + + The default listener executable file is `priv/mac_listener` within the folder of + `:file_system` application. + + Two ways to customize the executable file path: + + * Module config with `config.exs`: + + ```elixir + config :file_system, :fs_mac, + executable_file: "YOUR_EXECUTABLE_FILE_PATH"` + ``` + + * System environment variable: + + ``` + export FILESYSTEM_FSMAC_EXECUTABLE_FILE="YOUR_EXECUTABLE_FILE_PATH"` + ``` + """ + + use GenServer + @behaviour FileSystem.Backend + + @default_exec_file "mac_listener" + + def bootstrap do + exec_file = executable_path() + + if not is_nil(exec_file) and File.exists?(exec_file) do + :ok + else + Logger.error("Can't find executable `mac_listener`") + {:error, :fs_mac_bootstrap_error} + end + end + + def supported_systems do + [{:unix, :darwin}] + end + + def known_events do + [ + :mustscansubdirs, + :userdropped, + :kerneldropped, + :eventidswrapped, + :historydone, + :rootchanged, + :mount, + :unmount, + :created, + :removed, + :inodemetamod, + :renamed, + :modified, + :finderinfomod, + :changeowner, + :xattrmod, + :isfile, + :isdir, + :issymlink, + :ownevent + ] + end + + defp executable_path do + executable_path(:system_env) || executable_path(:config) || executable_path(:system_path) || + executable_path(:priv) + end + + defp executable_path(:config) do + Application.get_env(:file_system, :fs_mac)[:executable_file] + end + + defp executable_path(:system_env) do + System.get_env("FILESYSTEM_FSMAC_EXECUTABLE_FILE") + end + + defp executable_path(:system_path) do + System.find_executable(@default_exec_file) + end + + defp executable_path(:priv) do + case :code.priv_dir(:file_system) do + {:error, _} -> + Logger.error( + "`priv` dir for `:file_system` application is not available in current runtime, appoint executable file with `config.exs` or `FILESYSTEM_FSMAC_EXECUTABLE_FILE` env." + ) + + nil + + dir when is_list(dir) -> + Path.join(dir, @default_exec_file) + end + end + + def parse_options(options) do + case Keyword.pop(options, :dirs) do + {nil, _} -> + Logger.error("required argument `dirs` is missing") + {:error, :missing_dirs_argument} + + {dirs, rest} -> + args = [~c"-F" | dirs |> Enum.map(&Path.absname/1) |> Enum.map(&to_charlist/1)] + parse_options(rest, args) + end + end + + defp parse_options([], result), do: {:ok, result} + + defp parse_options([{:latency, latency} | t], result) do + result = + if is_float(latency) or is_integer(latency) do + [~c"--latency=#{latency / 1}" | result] + else + Logger.error("latency should be integer or float, got `#{inspect(latency)}, ignore") + result + end + + parse_options(t, result) + end + + defp parse_options([{:no_defer, true} | t], result) do + parse_options(t, [~c"--no-defer" | result]) + end + + defp parse_options([{:no_defer, false} | t], result) do + parse_options(t, result) + end + + defp parse_options([{:no_defer, value} | t], result) do + Logger.error("unknown value `#{inspect(value)}` for no_defer, ignore") + parse_options(t, result) + end + + defp parse_options([{:watch_root, true} | t], result) do + parse_options(t, [~c"--watch-root" | result]) + end + + defp parse_options([{:watch_root, false} | t], result) do + parse_options(t, result) + end + + defp parse_options([{:watch_root, value} | t], result) do + Logger.error("unknown value `#{inspect(value)}` for watch_root, ignore") + parse_options(t, result) + end + + defp parse_options([h | t], result) do + Logger.error("unknown option `#{inspect(h)}`, ignore") + parse_options(t, result) + end + + def start_link(args) do + GenServer.start_link(__MODULE__, args, []) + end + + def init(args) do + {worker_pid, rest} = Keyword.pop(args, :worker_pid) + + case parse_options(rest) do + {:ok, port_args} -> + port = + Port.open( + {:spawn_executable, to_charlist(executable_path())}, + [:stream, :exit_status, {:line, 16384}, {:args, port_args}, {:cd, System.tmp_dir!()}] + ) + + Process.link(port) + Process.flag(:trap_exit, true) + {:ok, %{port: port, worker_pid: worker_pid}} + + {:error, _} -> + :ignore + end + end + + def handle_info({port, {:data, {:eol, line}}}, %{port: port} = state) do + {file_path, events} = line |> parse_line + send(state.worker_pid, {:backend_file_event, self(), {file_path, events}}) + {:noreply, state} + end + + def handle_info({port, {:exit_status, _}}, %{port: port} = state) do + send(state.worker_pid, {:backend_file_event, self(), :stop}) + {:stop, :normal, state} + end + + def handle_info({:EXIT, port, _reason}, %{port: port} = state) do + send(state.worker_pid, {:backend_file_event, self(), :stop}) + {:stop, :normal, state} + end + + def handle_info(_, state) do + {:noreply, state} + end + + def parse_line(line) do + [_, _, events, path] = line |> to_string |> String.split(["\t", "="], parts: 4) + + {path, + events |> String.split(["[", ",", "]"], trim: true) |> Enum.map(&String.to_existing_atom/1)} + end +end diff --git a/deps/file_system/lib/file_system/backends/fs_poll.ex b/deps/file_system/lib/file_system/backends/fs_poll.ex new file mode 100644 index 0000000..8e9d380 --- /dev/null +++ b/deps/file_system/lib/file_system/backends/fs_poll.ex @@ -0,0 +1,117 @@ +require Logger + +defmodule FileSystem.Backends.FSPoll do + @moduledoc """ + File system backend for any OS. + + ## Backend Options + + * `:interval` (integer, default: 1000), polling interval + + ## Using FSPoll Backend + + Unlike other backends, polling backend is never automatically chosen in any + OS environment, despite being usable on all platforms. + + To use polling backend, one has to explicitly specify in the backend option. + """ + + use GenServer + @behaviour FileSystem.Backend + + def bootstrap, do: :ok + + def supported_systems do + [ + {:unix, :linux}, + {:unix, :freebsd}, + {:unix, :dragonfly}, + {:unix, :openbsd}, + {:unix, :darwin}, + {:win32, :nt} + ] + end + + def known_events do + [:created, :deleted, :modified] + end + + def start_link(args) do + GenServer.start_link(__MODULE__, args, []) + end + + def init(args) do + worker_pid = Keyword.fetch!(args, :worker_pid) + dirs = Keyword.fetch!(args, :dirs) + interval = Keyword.get(args, :interval, 1000) + + Logger.info("Polling file changes every #{interval}ms...") + send(self(), :first_check) + + {:ok, {worker_pid, dirs, interval, %{}}} + end + + def handle_info(:first_check, {worker_pid, dirs, interval, _empty_map}) do + schedule_check(interval) + {:noreply, {worker_pid, dirs, interval, files_mtimes(dirs)}} + end + + def handle_info(:check, {worker_pid, dirs, interval, stale_mtimes}) do + fresh_mtimes = files_mtimes(dirs) + + diff(stale_mtimes, fresh_mtimes) + |> Tuple.to_list() + |> Enum.zip([:created, :deleted, :modified]) + |> Enum.each(&report_change(&1, worker_pid)) + + schedule_check(interval) + {:noreply, {worker_pid, dirs, interval, fresh_mtimes}} + end + + defp schedule_check(interval) do + Process.send_after(self(), :check, interval) + end + + defp files_mtimes(dirs, files_mtimes_map \\ %{}) do + Enum.reduce(dirs, files_mtimes_map, fn dir, map -> + case File.stat!(dir) do + %{type: :regular, mtime: mtime} -> + Map.put(map, dir, mtime) + + %{type: :directory} -> + dir + |> Path.join("*") + |> Path.wildcard() + |> files_mtimes(map) + + %{type: _other} -> + map + end + end) + end + + @doc false + def diff(stale_mtimes, fresh_mtimes) do + fresh_file_paths = fresh_mtimes |> Map.keys() |> MapSet.new() + stale_file_paths = stale_mtimes |> Map.keys() |> MapSet.new() + + created_file_paths = + MapSet.difference(fresh_file_paths, stale_file_paths) |> MapSet.to_list() + + deleted_file_paths = + MapSet.difference(stale_file_paths, fresh_file_paths) |> MapSet.to_list() + + modified_file_paths = + for file_path <- MapSet.intersection(stale_file_paths, fresh_file_paths), + stale_mtimes[file_path] != fresh_mtimes[file_path], + do: file_path + + {created_file_paths, deleted_file_paths, modified_file_paths} + end + + defp report_change({file_paths, event}, worker_pid) do + for file_path <- file_paths do + send(worker_pid, {:backend_file_event, self(), {file_path, [event]}}) + end + end +end diff --git a/deps/file_system/lib/file_system/backends/fs_windows.ex b/deps/file_system/lib/file_system/backends/fs_windows.ex new file mode 100644 index 0000000..c06f638 --- /dev/null +++ b/deps/file_system/lib/file_system/backends/fs_windows.ex @@ -0,0 +1,195 @@ +require Logger + +defmodule FileSystem.Backends.FSWindows do + @moduledoc """ + File system backend for Windows. + + Need binary executable file packaged in to use this backend. + + This file is a fork from https://github.com/synrc/fs. + + ## Backend Options + + * `:recursive` (bool, default: true), monitor directories and their contents recursively + + ## Executable File Path + + Useful when running `:file_system` with escript. + + The default listener executable file is `priv/inotifywait.exe` within the + folder of `:file_system` application. + + Two ways to customize the executable file path: + + * Module config with `config.exs`: + + ```elixir + config :file_system, :fs_windows, + executable_file: "YOUR_EXECUTABLE_FILE_PATH"` + ``` + + * System environment variable: + + ``` + export FILESYSTEM_FSWINDOWS_EXECUTABLE_FILE="YOUR_EXECUTABLE_FILE_PATH"` + ``` + """ + + use GenServer + @behaviour FileSystem.Backend + @sep_char <<1>> + + @default_exec_file "inotifywait.exe" + + def bootstrap do + exec_file = executable_path() + + if not is_nil(exec_file) and File.exists?(exec_file) do + :ok + else + Logger.error("Can't find executable `inotifywait.exe`") + {:error, :fs_windows_bootstrap_error} + end + end + + def supported_systems do + [{:win32, :nt}] + end + + def known_events do + [:created, :modified, :removed, :renamed, :undefined] + end + + defp executable_path do + executable_path(:system_env) || executable_path(:config) || executable_path(:system_path) || + executable_path(:priv) + end + + defp executable_path(:config) do + Application.get_env(:file_system, :fs_windows)[:executable_file] + end + + defp executable_path(:system_env) do + System.get_env("FILESYSTEM_FSMWINDOWS_EXECUTABLE_FILE") + end + + defp executable_path(:system_path) do + System.find_executable(@default_exec_file) + end + + defp executable_path(:priv) do + case :code.priv_dir(:file_system) do + {:error, _} -> + Logger.error( + "`priv` dir for `:file_system` application is not available in current runtime, appoint executable file with `config.exs` or `FILESYSTEM_FSWINDOWS_EXECUTABLE_FILE` env." + ) + + nil + + dir when is_list(dir) -> + Path.join(dir, @default_exec_file) + end + end + + def parse_options(options) do + case Keyword.pop(options, :dirs) do + {nil, _} -> + Logger.error("required argument `dirs` is missing") + {:error, :missing_dirs_argument} + + {dirs, rest} -> + format = ["%w", "%e", "%f"] |> Enum.join(@sep_char) |> to_charlist + + args = [ + ~c"--format", + format, + ~c"--quiet", + ~c"-m", + ~c"-r" + | dirs |> Enum.map(&Path.absname/1) |> Enum.map(&to_charlist/1) + ] + + parse_options(rest, args) + end + end + + defp parse_options([], result), do: {:ok, result} + + defp parse_options([{:recursive, true} | t], result) do + parse_options(t, result) + end + + defp parse_options([{:recursive, false} | t], result) do + parse_options(t, result -- [~c"-r"]) + end + + defp parse_options([{:recursive, value} | t], result) do + Logger.error("unknown value `#{inspect(value)}` for recursive, ignore") + parse_options(t, result) + end + + defp parse_options([h | t], result) do + Logger.error("unknown option `#{inspect(h)}`, ignore") + parse_options(t, result) + end + + def start_link(args) do + GenServer.start_link(__MODULE__, args, []) + end + + def init(args) do + {worker_pid, rest} = Keyword.pop(args, :worker_pid) + + case parse_options(rest) do + {:ok, port_args} -> + port = + Port.open( + {:spawn_executable, to_charlist(executable_path())}, + [:stream, :exit_status, {:line, 16384}, {:args, port_args}, {:cd, System.tmp_dir!()}] + ) + + Process.link(port) + Process.flag(:trap_exit, true) + {:ok, %{port: port, worker_pid: worker_pid}} + + {:error, _} -> + :ignore + end + end + + def handle_info({port, {:data, {:eol, line}}}, %{port: port} = state) do + {file_path, events} = line |> parse_line + send(state.worker_pid, {:backend_file_event, self(), {file_path, events}}) + {:noreply, state} + end + + def handle_info({port, {:exit_status, _}}, %{port: port} = state) do + send(state.worker_pid, {:backend_file_event, self(), :stop}) + {:stop, :normal, state} + end + + def handle_info({:EXIT, port, _reason}, %{port: port} = state) do + send(state.worker_pid, {:backend_file_event, self(), :stop}) + {:stop, :normal, state} + end + + def handle_info(_, state) do + {:noreply, state} + end + + def parse_line(line) do + {path, flags} = + case line |> to_string |> String.split(@sep_char, trim: true) do + [dir, flags, file] -> {Enum.join([dir, file], "\\"), flags} + [path, flags] -> {path, flags} + end + + {path |> Path.split() |> Path.join(), flags |> String.split(",") |> Enum.map(&convert_flag/1)} + end + + defp convert_flag("CREATE"), do: :created + defp convert_flag("MODIFY"), do: :modified + defp convert_flag("DELETE"), do: :removed + defp convert_flag("MOVED_TO"), do: :renamed + defp convert_flag(_), do: :undefined +end diff --git a/deps/file_system/lib/file_system/worker.ex b/deps/file_system/lib/file_system/worker.ex new file mode 100644 index 0000000..be1f9a1 --- /dev/null +++ b/deps/file_system/lib/file_system/worker.ex @@ -0,0 +1,59 @@ +require Logger + +defmodule FileSystem.Worker do + @moduledoc """ + FileSystem Worker Process with the backend GenServer, receive events from Port Process + and forward it to subscribers. + """ + + use GenServer + + @doc false + def start_link(args) do + {opts, args} = Keyword.split(args, [:name]) + GenServer.start_link(__MODULE__, args, opts) + end + + @doc false + def init(args) do + {backend, rest} = Keyword.pop(args, :backend) + + with {:ok, backend} <- FileSystem.Backend.backend(backend), + {:ok, backend_pid} <- backend.start_link([{:worker_pid, self()} | rest]) do + {:ok, %{backend_pid: backend_pid, subscribers: %{}}} + else + reason -> + Logger.warning("Not able to start file_system worker, reason: #{inspect(reason)}") + :ignore + end + end + + @doc false + def handle_call(:subscribe, {pid, _}, state) do + ref = Process.monitor(pid) + state = put_in(state, [:subscribers, ref], pid) + {:reply, :ok, state} + end + + @doc false + def handle_info( + {:backend_file_event, backend_pid, file_event}, + %{backend_pid: backend_pid} = state + ) do + state.subscribers + |> Enum.each(fn {_ref, subscriber_pid} -> + send(subscriber_pid, {:file_event, self(), file_event}) + end) + + {:noreply, state} + end + + def handle_info({:DOWN, ref, _, _pid, _reason}, state) do + subscribers = Map.drop(state.subscribers, [ref]) + {:noreply, %{state | subscribers: subscribers}} + end + + def handle_info(_, state) do + {:noreply, state} + end +end diff --git a/deps/file_system/mix.exs b/deps/file_system/mix.exs new file mode 100644 index 0000000..9fe826e --- /dev/null +++ b/deps/file_system/mix.exs @@ -0,0 +1,100 @@ +defmodule FileSystem.MixProject do + use Mix.Project + + @source_url "https://github.com/falood/file_system" + @version "1.1.1" + + def project do + [ + app: :file_system, + version: @version, + elixir: "~> 1.11", + deps: deps(), + description: description(), + package: package(), + consolidate_protocols: Mix.env() != :test, + compilers: [:file_system | Mix.compilers()], + aliases: ["compile.file_system": &file_system/1], + docs: [ + extras: ["README.md"], + main: "readme", + source_url: @source_url, + source_ref: "v#{@version}" + ] + ] + end + + def application do + [ + extra_applications: [:logger] + ] + end + + defp description do + """ + An Elixir file system change watcher wrapper based on FS, the native file + system listener. + """ + end + + defp deps do + [ + {:ex_doc, ">= 0.0.0", only: :docs} + ] + end + + defp file_system(_args) do + case :os.type() do + {:unix, :darwin} -> compile_mac() + _ -> :ok + end + end + + defp compile_mac do + require Logger + source = "c_src/mac/*.c" + target = "priv/mac_listener" + + if Mix.Utils.stale?(Path.wildcard(source), [target]) do + Logger.info("Compiling file system watcher for Mac...") + + cflags = System.get_env("CFLAGS", "") + ldflags = System.get_env("LDFLAGS", "") + + cmd = + "xcrun -r clang #{cflags} #{ldflags} -framework CoreFoundation -framework CoreServices -Wno-deprecated-declarations #{source} -o #{target}" + + if Mix.shell().cmd(cmd) > 0 do + Logger.error( + "Could not compile file system watcher for Mac, try to run #{inspect(cmd)} manually inside the dependency." + ) + else + Logger.info("Done.") + end + + :ok + else + :noop + end + end + + defp package do + %{ + maintainers: ["Xiangrong Hao", "Max Veytsman"], + files: [ + "lib", + "README.md", + "mix.exs", + "c_src/mac/cli.c", + "c_src/mac/cli.h", + "c_src/mac/common.h", + "c_src/mac/compat.c", + "c_src/mac/compat.h", + "c_src/mac/main.c", + "priv/inotifywait.exe" + ], + licenses: ["Apache-2.0"], + links: %{"GitHub" => @source_url} + } + end +end diff --git a/deps/file_system/priv/inotifywait.exe b/deps/file_system/priv/inotifywait.exe new file mode 100644 index 0000000..2ce5d49 Binary files /dev/null and b/deps/file_system/priv/inotifywait.exe differ diff --git a/deps/finch/.formatter.exs b/deps/finch/.formatter.exs new file mode 100644 index 0000000..d2cda26 --- /dev/null +++ b/deps/finch/.formatter.exs @@ -0,0 +1,4 @@ +# Used by "mix format" +[ + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] +] diff --git a/deps/finch/.hex b/deps/finch/.hex new file mode 100644 index 0000000..39a9ad4 Binary files /dev/null and b/deps/finch/.hex differ diff --git a/deps/finch/CHANGELOG.md b/deps/finch/CHANGELOG.md new file mode 100644 index 0000000..646f8cd --- /dev/null +++ b/deps/finch/CHANGELOG.md @@ -0,0 +1,264 @@ +# Changelog + +## v0.21.0 (2026-01-22) + +### Enhancements + +- Add support for querying default pool metrics via `Finch.get_pool_status/2` #329 +- Add more details on `Finch.request/3` docs #327 + +### Bug Fixes + +- Prevent idle HTTP/1 pools from being terminated while connections are in use #292 +- Disable unsupported HTTP/2 server push responses to avoid crashes #333 +- Drop `:cacerts` from defaults for HTTP connections to avoid breaking plain HTTP pools #333 +- Only track default pool metrics when `start_pool_metrics?` is enabled #329 + +### Other + +- Elixir 1.19 compatibility updates for HTTP/1 pool state handling #331 +- CI: update test matrix and x509/deps for Elixir 1.19 #330 +- Add ALPN large-body regression test coverage (issue #265) #332 + +## v0.20.0 (2025-07-04) + +### Enhancements + +- Support manual pool termination #299 +- Refactor HTTP1 pool state for better maintainability #308 +- Add `:supported_groups` to list of TLS options #307 +- Be more explicit about the `:default` pool in documentation #314 +- Upgrade `nimble_options` to document deprecations #315 + +### Bug Fixes + +- Fix Finch.stream_while/5 on halt for both HTTP/1 and HTTP/2 #320 +- Return accumulator when Finch.stream/5 and Finch.stream_while/5 fail #295 +- Fix documentation reference for get_pool_status/2 #301 + +### Other + +- Upgrade CI VM to Ubuntu 24 #321 +- CI housekeeping: support Elixir 1.17/Erlang OTP 27, bump Credo and deps #303 +- Update GitHub CI badge URL #304 + +## v0.19.0 (2024-09-04) + +### Enhancements + +- Update @mint_tls_opts in pool_manager.ex #266 +- Document there is no backpressure on HTTP2 #283 +- Fix test: compare file size instead of map #284 +- Finch.request/3: Use improper list and avoid Enum.reverse #286 +- Require Mint 1.6 #287 +- Remove castore dependency #274 +- Fix typos and improve language in docs and comments #285 +- fix logo size in README #275 + +### Bug Fixes + +- Tweak Finch supervisor children startup order #289, fixes #277 +- implement handle_cancelled/2 pool callback #268, fixes #257 +- type Finch.request_opt() was missing the :request_timeout option #278 + +## v0.18.0 (2024-02-09) + +### Enhancements + +- Add Finch name to telemetry events #252 + +### Bug Fixes + +- Fix several minor dialyzer errors and run dialyzer in CI #259, #261 + +## v0.17.0 (2024-01-07) + +### Enhancements + +- Add support for async requests #228, #231 +- Add stream example to docs #230 +- Fix calls to deprecated Logger.warn/2 #232 +- Fix typos #233 +- Docs: do not use streams with async_request #238 +- Add Finch.stream_while/5 #239 +- Set MIX_ENV=test on CI #241 +- Update HTTP/2 pool log level to warning for retried action #240 +- Split trailers from headers #242 +- Introduce :request_timeout option #244 +- Support ALPN over HTTP1 pools #250 +- Deprecate :protocol in favour of :protocols #251 +- Implement pool telemetry #248 + +## v0.16.0 (2023-04-13) + +### Enhancements + +- add `Finch.request!/3` #219 +- allow usage with nimble_pool 1.0 #220 + +## v0.15.0 (2023-03-16) + +### Enhancements + +- allow usage with nimble_options 1.0 #218 +- allow usage with castore 1.0 #210 + +## v0.14.0 (2022-11-30) + +### Enhancements + +- Improve error message for pool timeouts #126 +- Relax nimble_options version to allow usage with 0.5.0 #204 + +## v0.13.0 (2022-07-26) + +### Enhancements + +- Define `Finch.child_spec/1` which will automatically use the `Finch` `:name` as the `:id`, allowing users to start multiple instances under the same Supervisor without any additional configuration #202 +- Include the changelog in the generated HexDocs #201 +- Fix typo in `Finch.Telemetry` docs #198 + +## v0.12.0 (2022-05-03) + +### Enhancements + +- Add support for private request metadata #180 +- Hide docs for deprecated `Finch.request/6` #195 +- Add support for Mint.UnsafeProxy connections #184 + +### Bug Fixes + +- In v0.11.0 headers and status codes were added to Telemetry events in a way that made invalid assumptions + regarding the shape of the response accumulator, this has been resolved in #196 + +### Breaking Changes + +- Telemetry updates #176 + - Rename the telemetry event `:request` to `:send` and `:response` to `:recv`. + - Introduce a new `:request` field which contains the full `Finch.Request.t()` in place of the `:scheme`, `:host`, `:port`, `:path`, `:method` fields wherever possible. The new `:request` field can be found on the `:request`, `:queue`, `:send`, and `:recv` events. + - Rename the meta data field `:error` to `:reason` for all `:exception` events to follow the standard introduced in [telemetry](https://github.com/beam-telemetry/telemetry/blob/3f069cfd2193396bee221d0709287c1bdaa4fabf/src/telemetry.erl#L335) + - Introduce a new `[:finch, :request, :start | :stop | :exception]` telemetry event that emits + whenever `Finch.request/3` or `Finch.stream/5` are called. + +## v0.11.0 (2022-03-28) + +- Add `:pool_max_idle_time` option to enable termination of idle HTTP/1 pools. +- Add `:conn_max_idle_time` and deprecate `:max_idle_time` to make the distinction from + `:pool_max_idle_time` more obvious. +- Add headers and status code to Telemetry events. + +## v0.10.2 (2022-01-12) + +- Complete the typespec for Finch.Request.t() +- Fix the typespec for Finch.build/5 +- Update deps + +## v0.10.1 (2021-12-27) + +- Fix handling of iodata in HTTP/2 request streams. + +## v0.10.0 (2021-12-12) + +- Add ability to stream the request body for HTTP/2 requests. +- Check and respect window sizes during HTTP/2 requests. + +## v0.9.1 (2021-10-17) + +- Upgrade NimbleOptions dep to 0.4.0. + +## v0.9.0 (2021-10-17) + +- Add support for unix sockets. + +## v0.8.3 (2021-10-15) + +- Return Error struct when HTTP2 connection is closed and a timeout occurs. +- Do not leak messages/connections when cancelling streaming requests. + +## v0.8.2 (2021-09-09) + +- Demonitor http/2 connections when the request is done. + +## v0.8.1 (2021-07-27) + +- Update mix.exs to allow compatibility with Telemetry v1.0 +- Avoid appending "?" to request_path when query string is an empty string + +## v0.8.0 (2021-06-23) + +- HTTP2 connections will now always return Exceptions. + +## v0.7.0 (2021-05-10) + +- Add support for SSLKEYLOGFILE. +- Drop HTTPS options for default HTTP pools to avoid `:badarg` errors. + +## v0.6.3 (2021-02-22) + +- Return more verbose errors when finch is configured with bad URLs. + +## v0.6.2 (2021-02-19) + +- Fix incorrect type spec for stream/5 +- Add default transport options for keepalive, timeouts, and nodelay. + +## v0.6.1 (2021-02-17) + +- Update Mint to 1.2.1, which properly handles HTTP/1.0 style responses that close + the connection at the same time as sending the response. +- Update NimblePool to 0.2.4 which includes a bugfix that prevents extra connections + being opened. +- Fix the typespec for Finch.stream/5. +- Fix assertion that was not actually being called in a test case. + +## v0.6.0 (2020-12-15) + +- Add ability to stream the request body for HTTP/1.x requests. + +## v0.5.2 (2020-11-10) + +- Fix deprecation in nimble_options. + +## v0.5.1 (2020-10-27) + +- Fix crash in http2 pools when a message is received in disconnected state. + +## v0.5.0 (2020-10-26) + +- Add `:max_idle_time` option for http1 pools +- Optimize http2 connection closing. +- Use new lazy pools in NimblePool +- Additional `idle_time` measurements for all http1 connection telemetry + +## v0.4.0 (2020-10-2) + +- Update all dependencies. This includes bug fixes for Mint. + +## v0.3.2 (2020-09-18) + +- Add metadata to connection start telemetry in http/2 pools + +## v0.3.1 (2020-08-29) + +- Add HTTP method to telemetry events +- BUGFIX - Include query parameters in HTTP/2 requests + +## v0.3.0 (2020-06-24) + +- HTTP/2 support +- Streaming support for both http/1.1 and http/2 pools +- New api for building and making requests +- typespec fixes + +## v0.2.0 (2020-05-06) + +- Response body now defaults to an empty string instead of nil + +## v0.1.1 (2020-05-04) + +- Accepts a URI struct in request/3/4/5/6, Todd Resudek +- Fix `http_method()` typespec, Ryan Johnson + +## v0.1.0 (2020-04-25) + +- Initial Release diff --git a/deps/finch/LICENSE.md b/deps/finch/LICENSE.md new file mode 100644 index 0000000..ebbf43c --- /dev/null +++ b/deps/finch/LICENSE.md @@ -0,0 +1,9 @@ +MIT License + +Copyright (c) 2020 Christopher Jon Keathley & Nico Daniel Piderman + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/finch/README.md b/deps/finch/README.md new file mode 100644 index 0000000..f43e173 --- /dev/null +++ b/deps/finch/README.md @@ -0,0 +1,121 @@ +Finch +Finch + +[![CI](https://github.com/sneako/finch/actions/workflows/elixir.yml/badge.svg)](https://github.com/sneako/finch/actions/workflows/elixir.yml) +[![Hex pm](https://img.shields.io/hexpm/v/finch.svg?style=flat)](https://hex.pm/packages/finch) +[![Hexdocs.pm](https://img.shields.io/badge/hex-docs-lightgreen.svg)](https://hexdocs.pm/finch/) + + + +An HTTP client with a focus on performance, built on top of +[Mint](https://github.com/elixir-mint/mint) and [NimblePool](https://github.com/dashbitco/nimble_pool). + +We attempt to achieve this goal by providing efficient connection pooling strategies and avoiding copying of memory wherever possible. + +Most developers will most likely prefer to use the fabulous HTTP client [Req](https://github.com/wojtekmach/req) which takes advantage of Finch's pooling and provides an extremely friendly and pleasant to use API. + +## Usage + +In order to use Finch, you must start it and provide a `:name`. Often in your +supervision tree: + +```elixir +children = [ + {Finch, name: MyFinch} +] +``` + +Or, in rare cases, dynamically: + +```elixir +Finch.start_link(name: MyFinch) +``` + +Once you have started your instance of Finch, you are ready to start making requests: + +```elixir +Finch.build(:get, "https://hex.pm") |> Finch.request(MyFinch) +``` + +When using HTTP/1, Finch will parse the passed in URL into a `{scheme, host, port}` +tuple, and maintain one or more connection pools for each `{scheme, host, port}` you +interact with. + +You can also configure a pool size and count to be used for specific URLs that are +known before starting Finch. The passed URLs will be parsed into `{scheme, host, port}`, +and the corresponding pools will be started. See `Finch.start_link/1` for configuration +options. + +```elixir +children = [ + {Finch, + name: MyConfiguredFinch, + pools: %{ + :default => [size: 10, count: 2], + "https://hex.pm" => [size: 32, count: 8] + }} +] +``` + +Pools will be started for each configured `{scheme, host, port}` when Finch is started. +For any unconfigured `{scheme, host, port}`, the pool will be started the first time +it is requested using the `:default` configuration. This means given the pool +configuration above each origin/`{scheme, host, port}` will launch 2 (`:count`) new pool +processes. So, if you encountered 10 separate combinations, that'd be 20 pool processes. + +Note pools are not automatically terminated by default, if you need to +terminate them after some idle time, use the `pool_max_idle_time` option (available only for HTTP1 pools). + +## Telemetry + +Finch uses Telemetry to provide instrumentation. See the `Finch.Telemetry` +module for details on specific events. + +## Logging TLS Secrets + +Finch supports logging TLS secrets to a file. These can be later used in a tool such as +Wireshark to decrypt HTTPS sessions. To use this feature you must specify the file to +which the secrets should be written. If you are using TLSv1.3 you must also add +`keep_secrets: true` to your pool `:transport_opts`. For example: + +```elixir +{Finch, + name: MyFinch, + pools: %{ + default: [conn_opts: [transport_opts: [keep_secrets: true]]] + }} +``` + +There are two different ways to specify this file: + +1. The `:ssl_key_log_file` connection option in your pool configuration. For example: + +```elixir +{Finch, + name: MyFinch, + pools: %{ + default: [ + conn_opts: [ + ssl_key_log_file: "/writable/path/to/the/sslkey.log" + ] + ] + }} +``` + +2. Alternatively, you could also set the `SSLKEYLOGFILE` environment variable. + + + +## Installation + +The package can be installed by adding `finch` to your list of dependencies in `mix.exs`: + +```elixir +def deps do + [ + {:finch, "~> 0.20"} + ] +end +``` + +The docs can be found at [https://hexdocs.pm/finch](https://hexdocs.pm/finch). diff --git a/deps/finch/hex_metadata.config b/deps/finch/hex_metadata.config new file mode 100644 index 0000000..86fd582 --- /dev/null +++ b/deps/finch/hex_metadata.config @@ -0,0 +1,46 @@ +{<<"links">>, + [{<<"Changelog">>,<<"https://hexdocs.pm/finch/changelog.html">>}, + {<<"GitHub">>,<<"https://github.com/sneako/finch">>}]}. +{<<"name">>,<<"finch">>}. +{<<"version">>,<<"0.21.0">>}. +{<<"description">>,<<"An HTTP client focused on performance.">>}. +{<<"elixir">>,<<"~> 1.13">>}. +{<<"app">>,<<"finch">>}. +{<<"licenses">>,[<<"MIT">>]}. +{<<"requirements">>, + [[{<<"name">>,<<"mint">>}, + {<<"app">>,<<"mint">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 1.6.2 or ~> 1.7">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"nimble_pool">>}, + {<<"app">>,<<"nimble_pool">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 1.1">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"nimble_options">>}, + {<<"app">>,<<"nimble_options">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 0.4 or ~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"telemetry">>}, + {<<"app">>,<<"telemetry">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 0.4 or ~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"mime">>}, + {<<"app">>,<<"mime">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 1.0 or ~> 2.0">>}, + {<<"repository">>,<<"hexpm">>}]]}. +{<<"files">>, + [<<"lib">>,<<"lib/finch.ex">>,<<"lib/finch">>,<<"lib/finch/http1">>, + <<"lib/finch/http1/conn.ex">>,<<"lib/finch/http1/pool.ex">>, + <<"lib/finch/http1/pool_metrics.ex">>,<<"lib/finch/telemetry.ex">>, + <<"lib/finch/response.ex">>,<<"lib/finch/request.ex">>, + <<"lib/finch/pool_manager.ex">>,<<"lib/finch/error.ex">>, + <<"lib/finch/http2">>,<<"lib/finch/http2/request_stream.ex">>, + <<"lib/finch/http2/pool.ex">>,<<"lib/finch/http2/pool_metrics.ex">>, + <<"lib/finch/ssl.ex">>,<<"lib/finch/pool.ex">>,<<".formatter.exs">>, + <<"mix.exs">>,<<"README.md">>,<<"LICENSE.md">>,<<"CHANGELOG.md">>]}. +{<<"build_tools">>,[<<"mix">>]}. diff --git a/deps/finch/lib/finch.ex b/deps/finch/lib/finch.ex new file mode 100644 index 0000000..32cb483 --- /dev/null +++ b/deps/finch/lib/finch.ex @@ -0,0 +1,754 @@ +defmodule Finch do + @external_resource "README.md" + @moduledoc "README.md" + |> File.read!() + |> String.split("") + |> Enum.fetch!(1) + + alias Finch.{PoolManager, Request, Response} + require Finch.Pool + + use Supervisor + + @default_pool_size 50 + @default_pool_count 1 + + @default_connect_timeout 5_000 + + @pool_config_schema [ + protocol: [ + type: {:in, [:http2, :http1]}, + deprecated: "Use `:protocols` instead." + ], + protocols: [ + type: {:list, {:in, [:http1, :http2]}}, + doc: """ + The type of connections to support. + + If using `:http1` only, an HTTP1 pool without multiplexing is used. \ + If using `:http2` only, an HTTP2 pool with multiplexing is used. \ + If both are listed, then both HTTP1/HTTP2 connections are \ + supported (via ALPN), but there is no multiplexing. + """, + default: [:http1] + ], + size: [ + type: :pos_integer, + doc: """ + Number of connections to maintain in each pool. Used only by HTTP1 pools \ + since HTTP2 is able to multiplex requests through a single connection. In \ + other words, for HTTP2, the size is always 1 and the `:count` should be \ + configured in order to increase capacity. + """, + default: @default_pool_size + ], + count: [ + type: :pos_integer, + doc: """ + Number of pools to start. HTTP1 pools are able to re-use connections in the \ + same pool and establish new ones only when necessary. However, if there is a \ + high pool count and few requests are made, these requests will be scattered \ + across pools, reducing connection reuse. It is recommended to increase the pool \ + count for HTTP1 only if you are experiencing high checkout times. + """, + default: @default_pool_count + ], + max_idle_time: [ + type: :timeout, + doc: """ + The maximum number of milliseconds an HTTP1 connection is allowed to be idle \ + before being closed during a checkout attempt. + """, + deprecated: "Use :conn_max_idle_time instead." + ], + conn_opts: [ + type: :keyword_list, + doc: """ + These options are passed to `Mint.HTTP.connect/4` whenever a new connection is established. \ + `:mode` is not configurable as Finch must control this setting. Typically these options are \ + used to configure proxying, https settings, or connect timeouts. + """, + default: [] + ], + pool_max_idle_time: [ + type: :timeout, + doc: """ + The maximum number of milliseconds that a pool can be idle before being terminated, used only by HTTP1 pools. \ + This options is forwarded to NimblePool and it starts and idle verification cycle that may impact \ + performance if misused. For instance setting a very low timeout may lead to pool restarts. \ + For more information see NimblePool's `handle_ping/2` documentation. + """, + default: :infinity + ], + conn_max_idle_time: [ + type: :timeout, + doc: """ + The maximum number of milliseconds an HTTP1 connection is allowed to be idle \ + before being closed during a checkout attempt. + """, + default: :infinity + ], + start_pool_metrics?: [ + type: :boolean, + doc: "When true, pool metrics will be collected and available through `get_pool_status/2`", + default: false + ] + ] + + @typedoc """ + The `:name` provided to Finch in `start_link/1`. + """ + @type name() :: atom() + + @type scheme() :: :http | :https + + @type scheme_host_port() :: {scheme(), host :: String.t(), port :: :inet.port_number()} + + @typedoc """ + Pool metrics returned by `get_pool_status/2` for a single pool. + """ + @type pool_metrics() :: + [Finch.HTTP1.PoolMetrics.t()] + | [Finch.HTTP2.PoolMetrics.t()] + + @typedoc """ + Pool metrics grouped by SHP when querying the `:default` configuration. + """ + @type default_pool_metrics() :: %{required(scheme_host_port()) => pool_metrics()} + + @type request_opt() :: + {:pool_timeout, timeout()} + | {:receive_timeout, timeout()} + | {:request_timeout, timeout()} + + @typedoc """ + Options used by request functions. + """ + @type request_opts() :: [request_opt()] + + @typedoc """ + The reference used to identify a request sent using `async_request/3`. + """ + @opaque request_ref() :: Finch.Pool.request_ref() + + @typedoc """ + The stream function given to `stream/5`. + """ + @type stream(acc) :: + ({:status, integer} + | {:headers, Mint.Types.headers()} + | {:data, binary} + | {:trailers, Mint.Types.headers()}, + acc -> + acc) + + @typedoc """ + The stream function given to `stream_while/5`. + """ + @type stream_while(acc) :: + ({:status, integer} + | {:headers, Mint.Types.headers()} + | {:data, binary} + | {:trailers, Mint.Types.headers()}, + acc -> + {:cont, acc} | {:halt, acc}) + + @doc """ + Start an instance of Finch. + + ## Options + + * `:name` - The name of your Finch instance. This field is required. + + * `:pools` - A map specifying the configuration for your pools. The keys should be URLs + provided as binaries, a tuple `{scheme, {:local, unix_socket}}` where `unix_socket` is the path for + the socket, or the atom `:default` to provide a catch-all configuration to be used for any + unspecified URLs - meaning that new pools for unspecified URLs will be started using the `:default` + configuration. See "Pool Configuration Options" below for details on the possible map + values. Default value is `%{default: [size: #{@default_pool_size}, count: #{@default_pool_count}]}`. + + ### Pool Configuration Options + + #{NimbleOptions.docs(@pool_config_schema)} + """ + def start_link(opts) do + name = finch_name!(opts) + pools = Keyword.get(opts, :pools, []) |> pool_options!() + {default_pool_config, pools} = Map.pop(pools, :default) + + config = %{ + registry_name: name, + manager_name: manager_name(name), + supervisor_name: pool_supervisor_name(name), + default_pool_config: default_pool_config, + pools: pools + } + + Supervisor.start_link(__MODULE__, config, name: supervisor_name(name)) + end + + def child_spec(opts) do + %{ + id: finch_name!(opts), + start: {__MODULE__, :start_link, [opts]} + } + end + + @impl true + def init(config) do + children = [ + {Registry, [keys: :duplicate, name: config.registry_name, meta: [config: config]]}, + {DynamicSupervisor, name: config.supervisor_name, strategy: :one_for_one}, + {PoolManager, config} + ] + + Supervisor.init(children, strategy: :one_for_all) + end + + defp finch_name!(opts) do + Keyword.get(opts, :name) || raise(ArgumentError, "must supply a name") + end + + defp pool_options!(pools) do + {:ok, default} = NimbleOptions.validate([], @pool_config_schema) + + Enum.reduce(pools, %{default: valid_opts_to_map(default)}, fn {destination, opts}, acc -> + with {:ok, valid_destination} <- cast_destination(destination), + {:ok, valid_pool_opts} <- cast_pool_opts(opts) do + Map.put(acc, valid_destination, valid_pool_opts) + else + {:error, reason} -> + raise reason + end + end) + end + + defp cast_destination(destination) do + case destination do + :default -> + {:ok, destination} + + {scheme, {:local, path}} when is_atom(scheme) and is_binary(path) -> + {:ok, {scheme, {:local, path}, 0}} + + url when is_binary(url) -> + cast_binary_destination(url) + + _ -> + {:error, %ArgumentError{message: "invalid destination: #{inspect(destination)}"}} + end + end + + defp cast_binary_destination(url) when is_binary(url) do + {scheme, host, port, _path, _query} = Finch.Request.parse_url(url) + {:ok, {scheme, host, port}} + end + + defp cast_pool_opts(opts) do + with {:ok, valid} <- NimbleOptions.validate(opts, @pool_config_schema) do + {:ok, valid_opts_to_map(valid)} + end + end + + defp valid_opts_to_map(valid) do + # We need to enable keepalive and set the nodelay flag to true by default. + transport_opts = + valid + |> get_in([:conn_opts, :transport_opts]) + |> List.wrap() + |> Keyword.put_new(:timeout, @default_connect_timeout) + |> Keyword.put_new(:nodelay, true) + |> Keyword.put(:keepalive, true) + + conn_opts = valid[:conn_opts] |> List.wrap() + + # Only relevant to HTTP2, but just gracefully ignored in HTTP1. + # Since we cannot handle server push responses, we need to disable the feature. + client_settings = + conn_opts + |> Keyword.get(:client_settings, []) + |> Keyword.put(:enable_push, false) + + ssl_key_log_file = + Keyword.get(conn_opts, :ssl_key_log_file) || System.get_env("SSLKEYLOGFILE") + + ssl_key_log_file_device = ssl_key_log_file && File.open!(ssl_key_log_file, [:append]) + + conn_opts = + conn_opts + |> Keyword.put(:ssl_key_log_file_device, ssl_key_log_file_device) + |> Keyword.put(:transport_opts, transport_opts) + |> Keyword.put(:protocols, valid[:protocols]) + |> Keyword.put(:client_settings, client_settings) + + # TODO: Remove :protocol on v0.18 + mod = + case valid[:protocol] do + :http1 -> + Finch.HTTP1.Pool + + :http2 -> + Finch.HTTP2.Pool + + nil -> + if :http1 in valid[:protocols] do + Finch.HTTP1.Pool + else + Finch.HTTP2.Pool + end + end + + %{ + mod: mod, + size: valid[:size], + count: valid[:count], + conn_opts: conn_opts, + conn_max_idle_time: to_native(valid[:max_idle_time] || valid[:conn_max_idle_time]), + pool_max_idle_time: valid[:pool_max_idle_time], + start_pool_metrics?: valid[:start_pool_metrics?] + } + end + + defp to_native(:infinity), do: :infinity + defp to_native(time), do: System.convert_time_unit(time, :millisecond, :native) + + defp supervisor_name(name), do: :"#{name}.Supervisor" + defp manager_name(name), do: :"#{name}.PoolManager" + defp pool_supervisor_name(name), do: :"#{name}.PoolSupervisor" + + defmacrop request_span(request, name, do: block) do + quote do + start_meta = %{request: unquote(request), name: unquote(name)} + + Finch.Telemetry.span(:request, start_meta, fn -> + result = unquote(block) + end_meta = Map.put(start_meta, :result, result) + {result, end_meta} + end) + end + end + + @doc """ + Builds an HTTP request to be sent with `request/3` or `stream/4`. + + It is possible to send the request body in a streaming fashion. In order to do so, the + `body` parameter needs to take form of a tuple `{:stream, body_stream}`, where `body_stream` + is a `Stream`. + """ + @spec build(Request.method(), Request.url(), Request.headers(), Request.body(), Keyword.t()) :: + Request.t() + defdelegate build(method, url, headers \\ [], body \\ nil, opts \\ []), to: Request + + @doc """ + Streams an HTTP request and returns the accumulator. + + A function of arity 2 is expected as argument. The first argument + is a tuple, as listed below, and the second argument is the + accumulator. The function must return a potentially updated + accumulator. + + See also `stream_while/5`. + + > ### HTTP2 streaming and back-pressure {: .warning} + > + > At the moment, streaming over HTTP2 connections do not provide + > any back-pressure mechanism: this means the response will be + > sent to the client as quickly as possible. Therefore, you must + > not use streaming over HTTP2 for non-terminating responses or + > when streaming large responses which you do not intend to keep + > in memory. + + ## Stream commands + + * `{:status, status}` - the http response status + * `{:headers, headers}` - the http response headers + * `{:data, data}` - a streaming section of the http response body + * `{:trailers, trailers}` - the http response trailers + + ## Options + + Shares options with `request/3`. + + ## Examples + + path = "/tmp/archive.zip" + file = File.open!(path, [:write, :exclusive]) + url = "https://example.com/archive.zip" + request = Finch.build(:get, url) + + Finch.stream(request, MyFinch, nil, fn + {:status, status}, _acc -> + IO.inspect(status) + + {:headers, headers}, _acc -> + IO.inspect(headers) + + {:data, data}, _acc -> + IO.binwrite(file, data) + end) + + File.close(file) + """ + @spec stream(Request.t(), name(), acc, stream(acc), request_opts()) :: + {:ok, acc} | {:error, Exception.t(), acc} + when acc: term() + def stream(%Request{} = req, name, acc, fun, opts \\ []) when is_function(fun, 2) do + fun = fn entry, acc -> + {:cont, fun.(entry, acc)} + end + + stream_while(req, name, acc, fun, opts) + end + + @doc """ + Streams an HTTP request until it finishes or `fun` returns `{:halt, acc}`. + + A function of arity 2 is expected as argument. The first argument + is a tuple, as listed below, and the second argument is the + accumulator. + + The function must return: + + * `{:cont, acc}` to continue streaming + * `{:halt, acc}` to halt streaming + + See also `stream/5`. + + > ### HTTP2 streaming and back-pressure {: .warning} + > + > At the moment, streaming over HTTP2 connections do not provide + > any back-pressure mechanism: this means the response will be + > sent to the client as quickly as possible. Therefore, you must + > not use streaming over HTTP2 for non-terminating responses or + > when streaming large responses which you do not intend to keep + > in memory. + + ## Stream commands + + * `{:status, status}` - the http response status + * `{:headers, headers}` - the http response headers + * `{:data, data}` - a streaming section of the http response body + * `{:trailers, trailers}` - the http response trailers + + ## Options + + Shares options with `request/3`. + + ## Examples + + path = "/tmp/archive.zip" + file = File.open!(path, [:write, :exclusive]) + url = "https://example.com/archive.zip" + request = Finch.build(:get, url) + + Finch.stream_while(request, MyFinch, nil, fn + {:status, status}, acc -> + IO.inspect(status) + {:cont, acc} + + {:headers, headers}, acc -> + IO.inspect(headers) + {:cont, acc} + + {:data, data}, acc -> + IO.binwrite(file, data) + {:cont, acc} + end) + + File.close(file) + """ + @spec stream_while(Request.t(), name(), acc, stream_while(acc), request_opts()) :: + {:ok, acc} | {:error, Exception.t(), acc} + when acc: term() + def stream_while(%Request{} = req, name, acc, fun, opts \\ []) when is_function(fun, 2) do + request_span req, name do + __stream__(req, name, acc, fun, opts) + end + end + + defp __stream__(%Request{} = req, name, acc, fun, opts) do + {pool, pool_mod} = get_pool(req, name) + pool_mod.request(pool, req, acc, fun, name, opts) + end + + @doc """ + Sends an HTTP request and returns a `Finch.Response` struct. + + It can still raise exceptions if it was not possible to check out a connection in the given `:pool_timeout`. + + ## Options + + * `:pool_timeout` - This timeout is applied when we check out a connection from the pool. + Default value is `5_000`. + + * `:receive_timeout` - The maximum time to wait for each chunk to be received before returning an error. + Default value is `15_000`. + + * `:request_timeout` - The amount of time to wait for a complete response before returning an error. + This timeout only applies to HTTP/1, and its current implementation is a best effort timeout, + it does not guarantee the call will return precisely when the time has elapsed. + Default value is `:infinity`. + + """ + @spec request(Request.t(), name(), request_opts()) :: + {:ok, Response.t()} + | {:error, Exception.t()} + def request(req, name, opts \\ []) + + def request(%Request{} = req, name, opts) do + request_span req, name do + acc = {nil, [], [], []} + + fun = fn + {:status, value}, {_, headers, body, trailers} -> + {:cont, {value, headers, body, trailers}} + + {:headers, value}, {status, headers, body, trailers} -> + {:cont, {status, headers ++ value, body, trailers}} + + {:data, value}, {status, headers, body, trailers} -> + {:cont, {status, headers, [body | value], trailers}} + + {:trailers, value}, {status, headers, body, trailers} -> + {:cont, {status, headers, body, trailers ++ value}} + end + + case __stream__(req, name, acc, fun, opts) do + {:ok, {status, headers, body, trailers}} -> + {:ok, + %Response{ + status: status, + headers: headers, + body: IO.iodata_to_binary(body), + trailers: trailers + }} + + {:error, error, _acc} -> + {:error, error} + end + end + end + + # Catch-all for backwards compatibility below + def request(name, method, url) do + request(name, method, url, []) + end + + @doc false + def request(name, method, url, headers, body \\ nil, opts \\ []) do + IO.warn("Finch.request/6 is deprecated, use Finch.build/5 + Finch.request/3 instead") + + build(method, url, headers, body) + |> request(name, opts) + end + + @doc """ + Sends an HTTP request and returns a `Finch.Response` struct + or raises an exception in case of failure. + + See `request/3` for more detailed information. + """ + @spec request!(Request.t(), name(), request_opts()) :: + Response.t() + def request!(%Request{} = req, name, opts \\ []) do + case request(req, name, opts) do + {:ok, resp} -> resp + {:error, exception} -> raise exception + end + end + + @doc """ + Sends an HTTP request asynchronously, returning a request reference. + + If the request is sent using HTTP1, an extra process is spawned to + consume messages from the underlying socket. The messages are sent + to the current process as soon as they arrive, as a firehose. If + you wish to maximize request rate or have more control over how + messages are streamed, a strategy using `request/3` or `stream/5` + should be used instead. + + ## Receiving the response + + Response information is sent to the calling process as it is received + in `{ref, response}` tuples. + + If the calling process exits before the request has completed, the + request will be canceled. + + Responses include: + + * `{:status, status}` - HTTP response status + * `{:headers, headers}` - HTTP response headers + * `{:data, data}` - section of the HTTP response body + * `{:error, exception}` - an error occurred during the request + * `:done` - request has completed successfully + + On a successful request, a single `:status` message will be followed + by a single `:headers` message, after which more than one `:data` + messages may be sent. If trailing headers are present, a final + `:headers` message may be sent. Any `:done` or `:error` message + indicates that the request has succeeded or failed and no further + messages are expected. + + ## Example + + iex> req = Finch.build(:get, "https://httpbin.org/stream/5") + iex> ref = Finch.async_request(req, MyFinch) + iex> flush() + {ref, {:status, 200}} + {ref, {:headers, [...]}} + {ref, {:data, "..."}} + {ref, :done} + + ## Options + + Shares options with `request/3`. + """ + @spec async_request(Request.t(), name(), request_opts()) :: request_ref() + def async_request(%Request{} = req, name, opts \\ []) do + {pool, pool_mod} = get_pool(req, name) + pool_mod.async_request(pool, req, name, opts) + end + + @doc """ + Cancels a request sent with `async_request/3`. + """ + @spec cancel_async_request(request_ref()) :: :ok + def cancel_async_request(request_ref) when Finch.Pool.is_request_ref(request_ref) do + {pool_mod, _cancel_ref} = request_ref + pool_mod.cancel_async_request(request_ref) + end + + defp get_pool(%Request{scheme: scheme, unix_socket: unix_socket}, name) + when is_binary(unix_socket) do + PoolManager.get_pool(name, {scheme, {:local, unix_socket}, 0}) + end + + defp get_pool(%Request{scheme: scheme, host: host, port: port}, name) do + PoolManager.get_pool(name, {scheme, host, port}) + end + + @doc """ + Get pool metrics. + + When given a URL or SHP tuple, this returns the metrics list for that specific + pool. The number of items in the metrics list depends on the configured + `:count` option and each entry will have a `pool_index` going from 1 to + `:count`. + + When `:default` is provided, Finch returns the metrics for all pools started + from the `:default` configuration. In this case the return value is a map + keyed by each pool's `{scheme, host, port}` tuple with the corresponding + metrics list as the value. + + The metrics struct depends on the pool scheme defined in the `:protocols` + option: `Finch.HTTP1.PoolMetrics` for `:http1` and `Finch.HTTP2.PoolMetrics` + for `:http2`. See the documentation for those modules for more details. + + `{:error, :not_found}` is returned in the following scenarios: + + * There is no pool registered for the given Finch instance and URL/SHP. + * The pool has `start_pool_metrics?: false` (the default). + * `:default` is provided but no pools have been started from the + `:default` configuration (or none have metrics enabled). + + ## Examples + + iex> Finch.get_pool_status(MyFinch, "https://httpbin.org") + {:ok, [ + %Finch.HTTP1.PoolMetrics{ + pool_index: 1, + pool_size: 50, + available_connections: 43, + in_use_connections: 7 + }, + %Finch.HTTP1.PoolMetrics{ + pool_index: 2, + pool_size: 50, + available_connections: 37, + in_use_connections: 13 + }] + } + + iex> Finch.get_pool_status(MyFinch, :default) + {:ok, + %{ + {:https, "httpbin.org", 443} => [ + %Finch.HTTP1.PoolMetrics{ + pool_index: 1, + pool_size: 50, + available_connections: 43, + in_use_connections: 7 + } + ] + }} + """ + @spec get_pool_status(name(), url :: String.t() | scheme_host_port() | :default) :: + {:ok, pool_metrics()} + | {:ok, default_pool_metrics()} + | {:error, :not_found} + def get_pool_status(finch_name, url) when is_binary(url) do + {s, h, p, _, _} = Request.parse_url(url) + get_pool_status(finch_name, {s, h, p}) + end + + def get_pool_status(finch_name, :default) do + finch_name + |> PoolManager.get_default_shps() + |> Enum.reduce(%{}, fn shp, acc -> + case get_pool_status(finch_name, shp) do + {:ok, metrics} -> Map.put(acc, shp, metrics) + {:error, :not_found} -> acc + end + end) + |> case do + result when result == %{} -> {:error, :not_found} + result -> {:ok, result} + end + end + + def get_pool_status(finch_name, shp) when is_tuple(shp) do + case PoolManager.get_pool(finch_name, shp, auto_start?: false) do + {_pool, pool_mod} -> + pool_mod.get_pool_status(finch_name, shp) + + :not_found -> + {:error, :not_found} + end + end + + @doc """ + Stops the pool of processes associated with the given scheme, host, port (aka SHP). + + This function can be invoked to manually stop the pool to the given SHP when you know it's not + going to be used anymore. + + Note that this function is not safe with respect to concurrent requests. Invoking it while + another request to the same SHP is taking place might result in the failure of that request. It + is the responsibility of the client to ensure that no request to the same SHP is taking place + while this function is being invoked. + """ + @spec stop_pool(name(), url :: String.t() | scheme_host_port()) :: :ok | {:error, :not_found} + def stop_pool(finch_name, url) when is_binary(url) do + {s, h, p, _, _} = Request.parse_url(url) + stop_pool(finch_name, {s, h, p}) + end + + def stop_pool(finch_name, shp) when is_tuple(shp) do + case PoolManager.all_pool_instances(finch_name, shp) do + [] -> + {:error, :not_found} + + children -> + Enum.each( + children, + fn {pid, _module} -> + DynamicSupervisor.terminate_child(pool_supervisor_name(finch_name), pid) + end + ) + + PoolManager.maybe_remove_default_shp(finch_name, shp) + :ok + end + end +end diff --git a/deps/finch/lib/finch/error.ex b/deps/finch/lib/finch/error.ex new file mode 100644 index 0000000..f9e2c8b --- /dev/null +++ b/deps/finch/lib/finch/error.ex @@ -0,0 +1,21 @@ +defmodule Finch.Error do + @moduledoc """ + An HTTP error. + + This exception struct is used to represent errors of all sorts for the HTTP/2 protocol. + """ + + @type t() :: %__MODULE__{reason: atom()} + + defexception [:reason] + + @impl true + def exception(reason) when is_atom(reason) do + %__MODULE__{reason: reason} + end + + @impl true + def message(%__MODULE__{reason: reason}) do + "#{reason}" + end +end diff --git a/deps/finch/lib/finch/http1/conn.ex b/deps/finch/lib/finch/http1/conn.ex new file mode 100644 index 0000000..0e6c60e --- /dev/null +++ b/deps/finch/lib/finch/http1/conn.ex @@ -0,0 +1,372 @@ +defmodule Finch.HTTP1.Conn do + @moduledoc false + + alias Finch.SSL + alias Finch.Telemetry + + def new(scheme, host, port, opts, parent) do + %{ + scheme: scheme, + host: host, + port: port, + opts: opts.conn_opts, + parent: parent, + last_checkin: System.monotonic_time(), + max_idle_time: opts.conn_max_idle_time, + mint: nil + } + end + + def connect(%{mint: mint} = conn, name) when not is_nil(mint) do + meta = %{ + scheme: conn.scheme, + host: conn.host, + port: conn.port, + name: name + } + + Telemetry.event(:reused_connection, %{}, meta) + {:ok, conn} + end + + def connect(%{mint: nil} = conn, name) do + meta = %{ + scheme: conn.scheme, + host: conn.host, + port: conn.port, + name: name + } + + start_time = Telemetry.start(:connect, meta) + + # By default we force HTTP1, but we allow someone to set + # custom protocols in case they don't know if a connection + # is HTTP1/HTTP2, but they are fine as treating HTTP2 + # connections has HTTP2. + + conn_opts = + conn.opts + |> Keyword.put(:mode, :passive) + |> Keyword.put_new(:protocols, [:http1]) + + case Mint.HTTP.connect(conn.scheme, conn.host, conn.port, conn_opts) do + {:ok, mint} -> + Telemetry.stop(:connect, start_time, meta) + SSL.maybe_log_secrets(conn.scheme, conn_opts, mint) + {:ok, %{conn | mint: mint}} + + {:error, error} -> + meta = Map.put(meta, :error, error) + Telemetry.stop(:connect, start_time, meta) + {:error, conn, error} + end + end + + def transfer(conn, pid) do + case Mint.HTTP.controlling_process(conn.mint, pid) do + # Mint.HTTP.controlling_process causes a side-effect, but it doesn't actually + # change the conn, so we can ignore the value returned above. + {:ok, _} -> {:ok, conn} + {:error, error} -> {:error, conn, error} + end + end + + def open?(%{mint: nil}), do: false + def open?(%{mint: mint}), do: Mint.HTTP.open?(mint) + + def idle_time(conn, unit \\ :native) do + idle_time = System.monotonic_time() - conn.last_checkin + + System.convert_time_unit(idle_time, :native, unit) + end + + def reusable?(%{max_idle_time: :infinity}, _idle_time), do: true + def reusable?(%{max_idle_time: max_idle_time}, idle_time), do: idle_time <= max_idle_time + + def set_mode(conn, mode) when mode in [:active, :passive] do + case Mint.HTTP.set_mode(conn.mint, mode) do + {:ok, mint} -> {:ok, %{conn | mint: mint}} + _ -> {:error, "Connection is dead"} + end + end + + def discard(%{mint: nil}, _), do: :unknown + + def discard(conn, message) do + case Mint.HTTP.stream(conn.mint, message) do + {:ok, mint, _responses} -> {:ok, %{conn | mint: mint}} + {:error, _, reason, _} -> {:error, reason} + :unknown -> :unknown + end + end + + def request(%{mint: nil} = conn, _, _, _, _, _, _, _), do: {:error, conn, "Could not connect"} + + def request(conn, req, acc, fun, name, receive_timeout, request_timeout, idle_time) do + full_path = Finch.Request.request_path(req) + + metadata = %{request: req, name: name} + + extra_measurements = %{idle_time: idle_time} + + start_time = Telemetry.start(:send, metadata, extra_measurements) + + try do + case Mint.HTTP.request( + conn.mint, + req.method, + full_path, + req.headers, + stream_or_body(req.body) + ) do + {:ok, mint, ref} -> + case maybe_stream_request_body(mint, ref, req.body) do + {:ok, mint} -> + Telemetry.stop(:send, start_time, metadata, extra_measurements) + start_time = Telemetry.start(:recv, metadata, extra_measurements) + resp_metadata = %{status: nil, headers: [], trailers: []} + timeouts = %{receive_timeout: receive_timeout, request_timeout: request_timeout} + + response = + receive_response( + [], + acc, + fun, + mint, + ref, + timeouts, + :headers, + resp_metadata + ) + + handle_response(response, conn, metadata, start_time, extra_measurements) + + {:error, mint, error} -> + handle_request_error( + conn, + mint, + error, + acc, + metadata, + start_time, + extra_measurements + ) + end + + {:error, mint, error} -> + handle_request_error(conn, mint, error, acc, metadata, start_time, extra_measurements) + end + catch + kind, error -> + close(conn) + Telemetry.exception(:recv, start_time, kind, error, __STACKTRACE__, metadata) + :erlang.raise(kind, error, __STACKTRACE__) + end + end + + defp stream_or_body({:stream, _}), do: :stream + defp stream_or_body(body), do: body + + defp handle_request_error(conn, mint, error, acc, metadata, start_time, extra_measurements) do + metadata = Map.put(metadata, :error, error) + Telemetry.stop(:send, start_time, metadata, extra_measurements) + {:error, %{conn | mint: mint}, error, acc} + end + + defp maybe_stream_request_body(mint, ref, {:stream, stream}) do + with {:ok, mint} <- stream_request_body(mint, ref, stream) do + Mint.HTTP.stream_request_body(mint, ref, :eof) + end + end + + defp maybe_stream_request_body(mint, _, _), do: {:ok, mint} + + defp stream_request_body(mint, ref, stream) do + Enum.reduce_while(stream, {:ok, mint}, fn + chunk, {:ok, mint} -> {:cont, Mint.HTTP.stream_request_body(mint, ref, chunk)} + _chunk, error -> {:halt, error} + end) + end + + def close(%{mint: nil} = conn), do: conn + + def close(conn) do + {:ok, mint} = Mint.HTTP.close(conn.mint) + %{conn | mint: mint} + end + + defp handle_response(response, conn, metadata, start_time, extra_measurements) do + case response do + {:ok, mint, acc, resp_metadata} -> + metadata = Map.merge(metadata, resp_metadata) + Telemetry.stop(:recv, start_time, metadata, extra_measurements) + {:ok, %{conn | mint: mint}, acc} + + {:error, mint, error, acc, resp_metadata} -> + metadata = Map.merge(metadata, Map.put(resp_metadata, :error, error)) + Telemetry.stop(:recv, start_time, metadata, extra_measurements) + {:error, %{conn | mint: mint}, error, acc} + end + end + + defp receive_response( + entries, + acc, + fun, + mint, + ref, + timeouts, + fields, + resp_metadata + ) + + defp receive_response( + [{:done, ref} | _], + acc, + _fun, + mint, + ref, + _timeouts, + _fields, + resp_metadata + ) do + {:ok, mint, acc, resp_metadata} + end + + defp receive_response( + _, + acc, + _fun, + mint, + _ref, + timeouts, + _fields, + resp_metadata + ) + when timeouts.request_timeout < 0 do + {:ok, mint} = Mint.HTTP.close(mint) + {:error, mint, %Mint.TransportError{reason: :timeout}, acc, resp_metadata} + end + + defp receive_response( + [], + acc, + fun, + mint, + ref, + timeouts, + fields, + resp_metadata + ) do + start_time = System.monotonic_time(:millisecond) + + case Mint.HTTP.recv(mint, 0, timeouts.receive_timeout) do + {:ok, mint, entries} -> + timeouts = + if is_integer(timeouts.request_timeout) do + elapsed_time = System.monotonic_time(:millisecond) - start_time + update_in(timeouts.request_timeout, &(&1 - elapsed_time)) + else + timeouts + end + + receive_response( + entries, + acc, + fun, + mint, + ref, + timeouts, + fields, + resp_metadata + ) + + {:error, mint, error, _responses} -> + {:error, mint, error, acc, resp_metadata} + end + end + + defp receive_response( + [entry | entries], + acc, + fun, + mint, + ref, + timeouts, + fields, + resp_metadata + ) do + case entry do + {:status, ^ref, value} -> + case fun.({:status, value}, acc) do + {:cont, acc} -> + receive_response( + entries, + acc, + fun, + mint, + ref, + timeouts, + fields, + %{resp_metadata | status: value} + ) + + {:halt, acc} -> + {:ok, mint} = Mint.HTTP.close(mint) + {:ok, mint, acc, resp_metadata} + + other -> + raise ArgumentError, "expected {:cont, acc} or {:halt, acc}, got: #{inspect(other)}" + end + + {:headers, ^ref, value} -> + resp_metadata = update_in(resp_metadata, [fields], &(&1 ++ value)) + + case fun.({fields, value}, acc) do + {:cont, acc} -> + receive_response( + entries, + acc, + fun, + mint, + ref, + timeouts, + fields, + resp_metadata + ) + + {:halt, acc} -> + {:ok, mint} = Mint.HTTP.close(mint) + {:ok, mint, acc, resp_metadata} + + other -> + raise ArgumentError, "expected {:cont, acc} or {:halt, acc}, got: #{inspect(other)}" + end + + {:data, ^ref, value} -> + case fun.({:data, value}, acc) do + {:cont, acc} -> + receive_response( + entries, + acc, + fun, + mint, + ref, + timeouts, + :trailers, + resp_metadata + ) + + {:halt, acc} -> + {:ok, mint} = Mint.HTTP.close(mint) + {:ok, mint, acc, resp_metadata} + + other -> + raise ArgumentError, "expected {:cont, acc} or {:halt, acc}, got: #{inspect(other)}" + end + + {:error, ^ref, error} -> + {:error, mint, error, acc, resp_metadata} + end + end +end diff --git a/deps/finch/lib/finch/http1/pool.ex b/deps/finch/lib/finch/http1/pool.ex new file mode 100644 index 0000000..715ea75 --- /dev/null +++ b/deps/finch/lib/finch/http1/pool.ex @@ -0,0 +1,377 @@ +defmodule Finch.HTTP1.Pool do + @moduledoc false + @behaviour NimblePool + @behaviour Finch.Pool + + defmodule State do + @moduledoc false + defstruct [ + :registry, + :shp, + :pool_idx, + :metric_ref, + :opts, + :activity_info + ] + end + + alias Finch.HTTP1.Conn + alias Finch.Telemetry + alias Finch.HTTP1.PoolMetrics + + def child_spec(opts) do + { + _shp, + _registry_name, + _pool_size, + _conn_opts, + pool_max_idle_time, + _start_pool_metrics?, + _pool_idx + } = opts + + %{ + id: __MODULE__, + start: {__MODULE__, :start_link, [opts]}, + restart: restart_option(pool_max_idle_time) + } + end + + def start_link( + {shp, registry_name, pool_size, conn_opts, pool_max_idle_time, start_pool_metrics?, + pool_idx} + ) do + NimblePool.start_link( + worker: + {__MODULE__, {registry_name, shp, pool_idx, pool_size, start_pool_metrics?, conn_opts}}, + pool_size: pool_size, + lazy: true, + worker_idle_timeout: pool_idle_timeout(pool_max_idle_time) + ) + end + + @impl Finch.Pool + def request(pool, req, acc, fun, name, opts) do + pool_timeout = Keyword.get(opts, :pool_timeout, 5_000) + receive_timeout = Keyword.get(opts, :receive_timeout, 15_000) + request_timeout = Keyword.get(opts, :request_timeout, :infinity) + + metadata = %{request: req, pool: pool, name: name} + + start_time = Telemetry.start(:queue, metadata) + + try do + NimblePool.checkout!( + pool, + :checkout, + fn from, {state, conn, idle_time} -> + Telemetry.stop(:queue, start_time, metadata, %{idle_time: idle_time}) + + case Conn.connect(conn, name) do + {:ok, conn} -> + Conn.request(conn, req, acc, fun, name, receive_timeout, request_timeout, idle_time) + |> case do + {:ok, conn, acc} -> + {{:ok, acc}, transfer_if_open(conn, state, from)} + + {:error, conn, error, acc} -> + {{:error, error, acc}, transfer_if_open(conn, state, from)} + end + + {:error, conn, error} -> + {{:error, error, acc}, transfer_if_open(conn, state, from)} + end + end, + pool_timeout + ) + catch + :exit, data -> + Telemetry.exception(:queue, start_time, :exit, data, __STACKTRACE__, metadata) + + # Provide helpful error messages for known errors + case data do + {:timeout, {NimblePool, :checkout, _affected_pids}} -> + reraise( + """ + Finch was unable to provide a connection within the timeout due to excess queuing \ + for connections. Consider adjusting the pool size, count, timeout or reducing the \ + rate of requests if it is possible that the downstream service is unable to keep up \ + with the current rate. + """, + __STACKTRACE__ + ) + + _ -> + exit(data) + end + end + end + + @impl Finch.Pool + def async_request(pool, req, name, opts) do + owner = self() + + pid = + spawn_link(fn -> + monitor = Process.monitor(owner) + request_ref = {__MODULE__, self()} + + case request( + pool, + req, + {owner, monitor, request_ref}, + &send_async_response/2, + name, + opts + ) do + {:ok, _} -> send(owner, {request_ref, :done}) + {:error, error, _acc} -> send(owner, {request_ref, {:error, error}}) + end + end) + + {__MODULE__, pid} + end + + defp send_async_response(response, {owner, monitor, request_ref}) do + if process_down?(monitor) do + exit(:shutdown) + end + + send(owner, {request_ref, response}) + {:cont, {owner, monitor, request_ref}} + end + + defp process_down?(monitor) do + receive do + {:DOWN, ^monitor, _, _, _} -> true + after + 0 -> false + end + end + + @impl Finch.Pool + def cancel_async_request({_, pid} = _request_ref) do + Process.unlink(pid) + Process.exit(pid, :shutdown) + :ok + end + + @impl Finch.Pool + def get_pool_status(finch_name, shp) do + case Finch.PoolManager.get_pool_count(finch_name, shp) do + nil -> + {:error, :not_found} + + count -> + 1..count + |> Enum.map(&PoolMetrics.get_pool_status(finch_name, shp, &1)) + |> Enum.filter(&match?({:ok, _}, &1)) + |> Enum.map(&elem(&1, 1)) + |> case do + [] -> {:error, :not_found} + result -> {:ok, result} + end + end + end + + @impl NimblePool + def init_pool({registry, shp, pool_idx, pool_size, start_pool_metrics?, opts}) do + {:ok, metric_ref} = + if start_pool_metrics?, + do: PoolMetrics.init(registry, shp, pool_idx, pool_size), + else: {:ok, nil} + + # Register our pool with our module name as the key. This allows the caller + # to determine the correct pool module to use to make the request + {:ok, _} = Registry.register(registry, shp, __MODULE__) + + acitivity_info = + if opts[:pool_max_idle_time] != :infinity, do: init_activity_info(), else: nil + + state = %__MODULE__.State{ + registry: registry, + shp: shp, + pool_idx: pool_idx, + metric_ref: metric_ref, + opts: opts, + activity_info: acitivity_info + } + + {:ok, state} + end + + @impl NimblePool + def init_worker(%__MODULE__.State{shp: {scheme, host, port}, opts: opts} = pool_state) do + {:ok, Conn.new(scheme, host, port, opts, self()), pool_state} + end + + @impl NimblePool + def handle_checkout(:checkout, _, %{mint: nil} = conn, %__MODULE__.State{} = pool_state) do + idle_time = System.monotonic_time() - conn.last_checkin + PoolMetrics.maybe_add(pool_state.metric_ref, in_use_connections: 1) + {:ok, {:fresh, conn, idle_time}, conn, pool_state} + end + + def handle_checkout(:checkout, _from, conn, %__MODULE__.State{} = pool_state) do + idle_time = System.monotonic_time() - conn.last_checkin + + %__MODULE__.State{ + shp: {scheme, host, port}, + metric_ref: metric_ref + } = pool_state + + with true <- Conn.reusable?(conn, idle_time), + {:ok, conn} <- Conn.set_mode(conn, :passive) do + PoolMetrics.maybe_add(metric_ref, in_use_connections: 1) + {:ok, {:reuse, conn, idle_time}, conn, update_activity_info(:checkout, pool_state)} + else + false -> + meta = %{ + scheme: scheme, + host: host, + port: port + } + + # Deprecated, remember to delete when we remove the :max_idle_time pool config option! + Telemetry.event(:max_idle_time_exceeded, %{idle_time: idle_time}, meta) + + Telemetry.event(:conn_max_idle_time_exceeded, %{idle_time: idle_time}, meta) + + {:remove, :closed, pool_state} + + _ -> + {:remove, :closed, pool_state} + end + end + + @impl NimblePool + def handle_checkin(checkin, _from, _old_conn, %__MODULE__.State{} = pool_state) do + %__MODULE__.State{metric_ref: metric_ref} = pool_state + PoolMetrics.maybe_add(metric_ref, in_use_connections: -1) + + with {:ok, conn} <- checkin, + {:ok, conn} <- Conn.set_mode(conn, :active) do + { + :ok, + %{conn | last_checkin: System.monotonic_time()}, + update_activity_info(:checkin, pool_state) + } + else + _ -> + {:remove, :closed, update_activity_info(:checkin, pool_state)} + end + end + + @impl NimblePool + def handle_update(new_conn, _old_conn, %__MODULE__.State{} = pool_state) do + {:ok, new_conn, pool_state} + end + + @impl NimblePool + def handle_info(message, conn) do + case Conn.discard(conn, message) do + {:ok, conn} -> {:ok, conn} + :unknown -> {:ok, conn} + {:error, _error} -> {:remove, :closed} + end + end + + @impl NimblePool + def handle_ping(conn, %__MODULE__.State{} = pool_state) do + %__MODULE__.State{ + shp: {scheme, host, port}, + opts: opts, + activity_info: activity_info + } = pool_state + + max_idle_time = Map.get(opts, :pool_max_idle_time, :infinity) + now = System.monotonic_time(:millisecond) + diff_from_last_checkout = now - activity_info.last_checkout_ts + + is_idle? = diff_from_last_checkout > max_idle_time + max_idle_time_configured? = is_number(max_idle_time) + any_connection_in_use? = activity_info.in_use_count > 0 + + cond do + not max_idle_time_configured? -> + {:ok, conn} + + any_connection_in_use? -> + {:ok, conn} + + is_idle? -> + meta = %{ + scheme: scheme, + host: host, + port: port + } + + Telemetry.event(:pool_max_idle_time_exceeded, %{}, meta) + {:stop, :idle_timeout} + + true -> + {:ok, conn} + end + end + + @impl NimblePool + # On terminate, effectively close it. + # This will succeed even if it was already closed or if we don't own it. + def terminate_worker(_reason, conn, %__MODULE__.State{} = pool_state) do + Conn.close(conn) + {:ok, pool_state} + end + + @impl NimblePool + def handle_cancelled(:checked_out, %__MODULE__.State{} = pool_state) do + %__MODULE__.State{metric_ref: metric_ref} = pool_state + PoolMetrics.maybe_add(metric_ref, in_use_connections: -1) + :ok + end + + def handle_cancelled(:queued, _pool_state), do: :ok + + defp transfer_if_open(conn, state, {pid, _} = from) do + if Conn.open?(conn) do + if state == :fresh do + NimblePool.update(from, conn) + + case Conn.transfer(conn, pid) do + {:ok, conn} -> {:ok, conn} + {:error, _, _} -> :closed + end + else + {:ok, conn} + end + else + :closed + end + end + + defp restart_option(:infinity), do: :permanent + defp restart_option(_pool_max_idle_time), do: :transient + + defp pool_idle_timeout(:infinity), do: nil + defp pool_idle_timeout(pool_max_idle_time), do: pool_max_idle_time + + defp init_activity_info() do + %{in_use_count: 0, last_checkout_ts: System.monotonic_time(:millisecond)} + end + + defp update_activity_info( + _checkout_or_checkin, + %__MODULE__.State{activity_info: nil} = pool_state + ) do + pool_state + end + + defp update_activity_info(:checkout, %__MODULE__.State{} = pool_state) do + update_in(pool_state.activity_info, fn %{in_use_count: count} -> + %{in_use_count: count + 1, last_checkout_ts: System.monotonic_time(:millisecond)} + end) + end + + defp update_activity_info(:checkin, %__MODULE__.State{} = pool_state) do + update_in(pool_state.activity_info.in_use_count, &max(&1 - 1, 0)) + end +end diff --git a/deps/finch/lib/finch/http1/pool_metrics.ex b/deps/finch/lib/finch/http1/pool_metrics.ex new file mode 100644 index 0000000..3fb44c4 --- /dev/null +++ b/deps/finch/lib/finch/http1/pool_metrics.ex @@ -0,0 +1,81 @@ +defmodule Finch.HTTP1.PoolMetrics do + @moduledoc """ + HTTP1 Pool metrics. + + Available metrics: + + * `:pool_index` - Index of the pool + * `:pool_size` - Total number of connections of the pool + * `:available_connections` - Number of available connections + * `:in_use_connections` - Number of connections currently in use + + Caveats: + + * A given number X of `available_connections` does not mean that currently + exists X connections to the server sitting on the pool. Because Finch uses + a lazy strategy for workers initialization, every pool starts with it's + size as available connections even if they are not started yet. In practice + this means that `available_connections` may be connections sitting on the pool + or available space on the pool for a new one if required. + + """ + @type t :: %__MODULE__{} + + defstruct [ + :pool_index, + :pool_size, + :available_connections, + :in_use_connections + ] + + @atomic_idx [ + pool_idx: 1, + pool_size: 2, + in_use_connections: 3 + ] + + def init(registry, shp, pool_idx, pool_size) do + ref = :atomics.new(length(@atomic_idx), []) + :atomics.add(ref, @atomic_idx[:pool_idx], pool_idx) + :atomics.add(ref, @atomic_idx[:pool_size], pool_size) + + :persistent_term.put({__MODULE__, registry, shp, pool_idx}, ref) + {:ok, ref} + end + + def maybe_add(nil, _metrics_list), do: :ok + + def maybe_add(ref, metrics_list) do + Enum.each(metrics_list, fn {metric_name, val} -> + :atomics.add(ref, @atomic_idx[metric_name], val) + end) + end + + def get_pool_status(name, shp, pool_idx) do + {__MODULE__, name, shp, pool_idx} + |> :persistent_term.get(nil) + |> get_pool_status() + end + + def get_pool_status(nil), do: {:error, :not_found} + + def get_pool_status(ref) do + %{ + pool_idx: pool_idx, + pool_size: pool_size, + in_use_connections: in_use_connections + } = + @atomic_idx + |> Enum.map(fn {k, idx} -> {k, :atomics.get(ref, idx)} end) + |> Map.new() + + result = %__MODULE__{ + pool_index: pool_idx, + pool_size: pool_size, + available_connections: pool_size - in_use_connections, + in_use_connections: in_use_connections + } + + {:ok, result} + end +end diff --git a/deps/finch/lib/finch/http2/pool.ex b/deps/finch/lib/finch/http2/pool.ex new file mode 100644 index 0000000..454cbbc --- /dev/null +++ b/deps/finch/lib/finch/http2/pool.ex @@ -0,0 +1,844 @@ +defmodule Finch.HTTP2.Pool do + @moduledoc false + + @behaviour :gen_statem + @behaviour Finch.Pool + + alias Mint.HTTP2 + alias Mint.HTTPError + alias Finch.Error + alias Finch.Telemetry + alias Finch.SSL + alias Finch.HTTP2.RequestStream + + alias Finch.HTTP2.PoolMetrics + + require Logger + + @default_receive_timeout 15_000 + + @impl true + def callback_mode(), do: [:state_functions, :state_enter] + + def child_spec(opts) do + %{ + id: __MODULE__, + start: {__MODULE__, :start_link, [opts]} + } + end + + # Call the pool with the request. The pool will multiplex multiple requests + # and stream the result set back to the calling process using `send` + @impl Finch.Pool + def request(pool, request, acc, fun, name, opts) do + opts = Keyword.put_new(opts, :receive_timeout, @default_receive_timeout) + timeout = opts[:receive_timeout] + request_ref = make_request_ref(pool) + + case :gen_statem.call(pool, {:request, request_ref, request, opts}) do + {:ok, recv_start} -> + monitor = Process.monitor(pool) + # If the timeout is an integer, we add a fail-safe "after" clause that fires + # after a timeout that is double the original timeout (min 2000ms). This means + # that if there are no bugs in our code, then the normal :request_timeout is + # returned, but otherwise we have a way to escape this code, raise an error, and + # get the process unstuck. + fail_safe_timeout = if is_integer(timeout), do: max(2000, timeout * 2), else: :infinity + + try do + response_waiting_loop(acc, fun, request_ref, monitor, fail_safe_timeout, :headers) + catch + kind, error -> + metadata = %{request: request, name: name} + Telemetry.exception(:recv, recv_start, kind, error, __STACKTRACE__, metadata) + + :ok = :gen_statem.call(pool, {:cancel, request_ref}) + clean_responses(request_ref) + Process.demonitor(monitor) + + :erlang.raise(kind, error, __STACKTRACE__) + end + + {:error, error} -> + {:error, error, acc} + end + end + + @impl Finch.Pool + def async_request(pool, req, _name, opts) do + opts = Keyword.put_new(opts, :receive_timeout, @default_receive_timeout) + request_ref = make_request_ref(pool) + + :ok = :gen_statem.cast(pool, {:async_request, self(), request_ref, req, opts}) + + request_ref + end + + @impl Finch.Pool + def cancel_async_request({_, {pool, _}} = request_ref) do + :ok = :gen_statem.call(pool, {:cancel, request_ref}) + clean_responses(request_ref) + end + + @impl Finch.Pool + def get_pool_status(finch_name, shp) do + case Finch.PoolManager.get_pool_count(finch_name, shp) do + nil -> + {:error, :not_found} + + count -> + 1..count + |> Enum.map(&PoolMetrics.get_pool_status(finch_name, shp, &1)) + |> Enum.filter(&match?({:ok, _}, &1)) + |> Enum.map(&elem(&1, 1)) + |> case do + [] -> {:error, :not_found} + result -> {:ok, result} + end + end + end + + defp make_request_ref(pool) do + {__MODULE__, {pool, make_ref()}} + end + + defp response_waiting_loop(acc, fun, request_ref, monitor_ref, fail_safe_timeout, fields) + + defp response_waiting_loop(acc, fun, request_ref, monitor_ref, fail_safe_timeout, fields) do + receive do + {^request_ref, {:status, value}} -> + case fun.({:status, value}, acc) do + {:cont, acc} -> + response_waiting_loop( + acc, + fun, + request_ref, + monitor_ref, + fail_safe_timeout, + fields + ) + + {:halt, acc} -> + cancel_async_request(request_ref) + Process.demonitor(monitor_ref) + {:ok, acc} + + other -> + raise ArgumentError, "expected {:cont, acc} or {:halt, acc}, got: #{inspect(other)}" + end + + {^request_ref, {:headers, value}} -> + case fun.({fields, value}, acc) do + {:cont, acc} -> + response_waiting_loop( + acc, + fun, + request_ref, + monitor_ref, + fail_safe_timeout, + fields + ) + + {:halt, acc} -> + cancel_async_request(request_ref) + Process.demonitor(monitor_ref) + {:ok, acc} + + other -> + raise ArgumentError, "expected {:cont, acc} or {:halt, acc}, got: #{inspect(other)}" + end + + {^request_ref, {:data, value}} -> + case fun.({:data, value}, acc) do + {:cont, acc} -> + response_waiting_loop( + acc, + fun, + request_ref, + monitor_ref, + fail_safe_timeout, + :trailers + ) + + {:halt, acc} -> + cancel_async_request(request_ref) + Process.demonitor(monitor_ref) + {:ok, acc} + + other -> + raise ArgumentError, "expected {:cont, acc} or {:halt, acc}, got: #{inspect(other)}" + end + + {^request_ref, :done} -> + Process.demonitor(monitor_ref) + {:ok, acc} + + {^request_ref, {:error, error}} -> + Process.demonitor(monitor_ref) + {:error, error, acc} + + {:DOWN, ^monitor_ref, _, _, _} -> + {:error, :connection_process_went_down} + after + fail_safe_timeout -> + Process.demonitor(monitor_ref) + + raise "no response was received even after waiting #{fail_safe_timeout}ms. " <> + "This is likely a bug in Finch, but we're raising so that your system doesn't " <> + "get stuck in an infinite receive." + end + end + + defp clean_responses(request_ref) do + receive do + {^request_ref, _} -> clean_responses(request_ref) + after + 0 -> :ok + end + end + + def start_link({_shp, _finch_name, _pool_config, _start_pool_metrics?, _pool_idx} = opts) do + :gen_statem.start_link(__MODULE__, opts, []) + end + + @impl true + def init({{scheme, host, port} = shp, registry, pool_opts, start_pool_metrics?, pool_idx}) do + {:ok, metrics_ref} = + if start_pool_metrics?, + do: PoolMetrics.init(registry, shp, pool_idx), + else: {:ok, nil} + + {:ok, _} = Registry.register(registry, shp, __MODULE__) + + data = %{ + conn: nil, + finch_name: registry, + scheme: scheme, + host: host, + port: port, + pool_idx: pool_idx, + requests: %{}, + refs: %{}, + requests_by_pid: %{}, + backoff_base: 500, + backoff_max: 10_000, + connect_opts: pool_opts[:conn_opts] || [], + metrics_ref: metrics_ref + } + + {:ok, :disconnected, data, {:next_event, :internal, {:connect, 0}}} + end + + @doc false + def disconnected(event, content, data) + + def disconnected(:enter, :disconnected, _) do + :keep_state_and_data + end + + # When entering a disconnected state we need to fail all of the pending + # requests + def disconnected(:enter, _, data) do + :ok = + Enum.each(data.requests, fn {_ref, request} -> + send( + request.from_pid, + {request.request_ref, {:error, Error.exception(:connection_closed)}} + ) + end) + + # It's possible that we're entering this state before we are alerted of the + # fact that the socket is closed. This most often happens if we're in a read + # only state but have no pending requests to wait on. In this case we can just + # close the connection and throw it away. + if data.conn do + HTTP2.close(data.conn) + end + + data = + data + |> Map.put(:requests, %{}) + |> Map.put(:conn, nil) + + actions = [{{:timeout, :reconnect}, data.backoff_base, 1}] + + {:keep_state, data, actions} + end + + def disconnected(:internal, {:connect, failure_count}, data) do + metadata = %{ + scheme: data.scheme, + host: data.host, + port: data.port, + name: data.finch_name + } + + start = Telemetry.start(:connect, metadata) + + case HTTP2.connect(data.scheme, data.host, data.port, data.connect_opts) do + {:ok, conn} -> + Telemetry.stop(:connect, start, metadata) + SSL.maybe_log_secrets(data.scheme, data.connect_opts, conn) + data = %{data | conn: conn} + {:next_state, :connected, data} + + {:error, error} -> + metadata = Map.put(metadata, :error, error) + Telemetry.stop(:connect, start, metadata) + + Logger.warning([ + "Failed to connect to #{data.scheme}://#{data.host}:#{data.port}: ", + Exception.message(error) + ]) + + delay = backoff(data.backoff_base, data.backoff_max, failure_count) + {:keep_state_and_data, {{:timeout, :reconnect}, delay, failure_count + 1}} + end + end + + # Capture timeout after trying to reconnect. Immediately attempt to reconnect + # to the upstream server + def disconnected({:timeout, :reconnect}, failure_count, _data) do + {:keep_state_and_data, {:next_event, :internal, {:connect, failure_count}}} + end + + # Immediately fail a request if we're disconnected + def disconnected({:call, from}, {:request, _, _, _}, _data) do + {:keep_state_and_data, {:reply, from, {:error, Error.exception(:disconnected)}}} + end + + # Ignore cancel requests if we are disconnected + def disconnected({:call, from}, {:cancel, _request_ref}, _data) do + {:keep_state_and_data, {:reply, from, {:error, Error.exception(:disconnected)}}} + end + + # Immediately fail a request if we're disconnected + def disconnected(:cast, {:async_request, pid, request_ref, _, _}, _data) do + send(pid, {request_ref, {:error, Error.exception(:disconnected)}}) + :keep_state_and_data + end + + # We cancel all request timeouts as soon as we enter the :disconnected state, but + # some timeouts might fire while changing states, so we need to handle them here. + # Since we replied to all pending requests when entering the :disconnected state, + # we can just do nothing here. + def disconnected({:timeout, {:request_timeout, _ref}}, _content, _data) do + :keep_state_and_data + end + + # Its possible that we can receive an info message telling us that a socket + # has been closed. This happens after we enter a disconnected state from a + # read_only state but we don't have any requests that are open. We've already + # closed the connection and thrown it away at this point so we can just retain + # our current state. + def disconnected(:info, _message, _data) do + :keep_state_and_data + end + + @doc false + def connected(event, content, data) + + def connected(:enter, _old_state, _data) do + :keep_state_and_data + end + + # Issue request to the upstream server. We store a ref to the request so we + # know who to respond to when we've completed everything + def connected({:call, {from_pid, _from_ref} = from}, {:request, request_ref, req, opts}, data) do + send_request(from, from_pid, request_ref, req, opts, data) + end + + def connected({:call, from}, {:cancel, request_ref}, data) do + data = cancel_request(data, request_ref) + {:keep_state, data, {:reply, from, :ok}} + end + + def connected(:cast, {:async_request, pid, request_ref, req, opts}, data) do + if is_nil(data.requests_by_pid[pid]) do + Process.monitor(pid) + end + + send_request(nil, pid, request_ref, req, opts, data) + end + + def connected(:info, {:DOWN, _, :process, pid, _}, data) do + {:keep_state, cancel_requests(data, pid)} + end + + def connected(:info, message, data) do + case HTTP2.stream(data.conn, message) do + {:ok, conn, responses} -> + data = put_in(data.conn, conn) + {data, response_actions} = handle_responses(data, responses) + + cond do + HTTP2.open?(data.conn, :write) -> + data = continue_requests(data) + {:keep_state, data, response_actions} + + HTTP2.open?(data.conn, :read) && Enum.any?(data.requests) -> + {:next_state, :connected_read_only, data, response_actions} + + true -> + {:next_state, :disconnected, data, response_actions} + end + + {:error, conn, error, responses} -> + Logger.error([ + "Received error from server #{data.scheme}:#{data.host}:#{data.port}: ", + Exception.message(error) + ]) + + data = put_in(data.conn, conn) + {data, actions} = handle_responses(data, responses) + + if HTTP2.open?(conn, :read) && Enum.any?(data.requests) do + {:next_state, :connected_read_only, data, actions} + else + {:next_state, :disconnected, data, actions} + end + + :unknown -> + Logger.warning(["Received unknown message: ", inspect(message)]) + :keep_state_and_data + end + end + + def connected({:timeout, {:request_timeout, ref}}, _content, data) do + with {:pop, {request, data}} when not is_nil(request) <- {:pop, pop_request(data, ref)}, + {:ok, conn} <- HTTP2.cancel_request(data.conn, ref) do + data = put_in(data.conn, conn) + send(request.from_pid, {request.request_ref, {:error, Error.exception(:request_timeout)}}) + {:keep_state, data} + else + {:error, conn, _error} -> + data = put_in(data.conn, conn) + + cond do + HTTP2.open?(conn, :write) -> + {:keep_state, data} + + # Don't bother entering read only mode if we don't have any pending requests. + HTTP2.open?(conn, :read) && Enum.any?(data.requests) -> + {:next_state, :connected_read_only, data} + + true -> + {:next_state, :disconnected, data} + end + + # The timer might have fired while we were receiving :done/:error for this + # request, so we don't have the request stored anymore but we still get the + # timer event. In those cases, we do nothing. + {:pop, {nil, _data}} -> + :keep_state_and_data + end + end + + @doc false + def connected_read_only(event, content, data) + + def connected_read_only(:enter, _old_state, data) do + data = + Enum.reduce(data.requests, data, fn + # request is awaiting a response and should stay in state + {_ref, %{stream: %{status: :done}}}, data -> + data + + # request is still sending data and should be discarded + {ref, %{stream: %{status: :streaming}} = request}, data -> + {^request, data} = pop_request(data, ref) + reply(request, {:error, Error.exception(:read_only)}) + data + end) + + {:keep_state, data} + end + + # If we're in a read only state then respond with an error immediately + def connected_read_only({:call, from}, {:request, _, _, _}, _) do + {:keep_state_and_data, {:reply, from, {:error, Error.exception(:read_only)}}} + end + + def connected_read_only({:call, from}, {:cancel, request_ref}, data) do + data = cancel_request(data, request_ref) + {:keep_state, data, {:reply, from, :ok}} + end + + def connected_read_only(:cast, {:async_request, pid, request_ref, _, _}, _) do + send(pid, {request_ref, {:error, Error.exception(:read_only)}}) + :keep_state_and_data + end + + def connected_read_only(:info, {:DOWN, _, :process, pid, _}, data) do + {:keep_state, cancel_requests(data, pid)} + end + + def connected_read_only(:info, message, data) do + case HTTP2.stream(data.conn, message) do + {:ok, conn, responses} -> + data = put_in(data.conn, conn) + {data, actions} = handle_responses(data, responses) + + # If the connection is still open for reading and we have pending requests + # to receive, we should try to wait for the responses. Otherwise enter + # the disconnected state so we can try to re-establish a connection. + if HTTP2.open?(conn, :read) && Enum.any?(data.requests) do + {:keep_state, data, actions} + else + {:next_state, :disconnected, data, actions} + end + + {:error, conn, error, responses} -> + Logger.error([ + "Received error from server #{data.scheme}://#{data.host}:#{data.port}: ", + Exception.message(error) + ]) + + data = put_in(data.conn, conn) + {data, actions} = handle_responses(data, responses) + + # Same as above, if we're still waiting on responses, we should stay in + # this state. Otherwise, we should enter the disconnected state and try + # to re-establish a connection. + if HTTP2.open?(conn, :read) && Enum.any?(data.requests) do + {:keep_state, data, actions} + else + {:next_state, :disconnected, data, actions} + end + + :unknown -> + Logger.warning(["Received unknown message: ", inspect(message)]) + :keep_state_and_data + end + end + + # In this state, we don't need to call HTTP2.cancel_request/2 since the connection + # is closed for writing, so we can't tell the server to cancel the request anymore. + def connected_read_only({:timeout, {:request_timeout, ref}}, _content, data) do + # We might get a request timeout that fired in the moment when we received the + # whole request, so we don't have the request in the state but we get the + # timer event anyways. In those cases, we don't do anything. + {request, data} = pop_request(data, ref) + + # Its possible that the request doesn't exist so we guard against that here. + if request != nil do + send(request.from_pid, {request.request_ref, {:error, Error.exception(:request_timeout)}}) + end + + # If we're out of requests then we should enter the disconnected state. + # Otherwise wait for the remaining responses. + if Enum.empty?(data.requests) do + {:next_state, :disconnected, data} + else + {:keep_state, data} + end + end + + defp send_request(from, from_pid, request_ref, req, opts, data) do + telemetry_metadata = %{request: req, name: data.finch_name} + + request = %{ + stream: RequestStream.new(req.body), + from: from, + from_pid: from_pid, + request_ref: request_ref, + telemetry: %{ + metadata: telemetry_metadata, + send: Telemetry.start(:send, telemetry_metadata) + } + } + + body = if req.body == nil, do: nil, else: :stream + + data + |> start_request(req.method, Finch.Request.request_path(req), req.headers, body) + |> stream_request(request, opts) + end + + defp start_request(data, method, path, headers, body) do + case HTTP2.request(data.conn, method, path, headers, body) do + {:ok, conn, ref} -> + {:ok, put_in(data.conn, conn), ref} + + {:error, conn, reason} -> + {:error, put_in(data.conn, conn), reason} + end + end + + defp stream_request({:ok, data, ref}, request, opts) do + data = put_request(data, ref, request) + + case continue_request(data, ref, request) do + {:ok, data} -> + # Set a timeout to close the request after a given timeout + request_timeout = {{:timeout, {:request_timeout, ref}}, opts[:receive_timeout], nil} + + {:keep_state, data, [request_timeout]} + + error -> + stream_request(error, request, opts) + end + end + + defp stream_request({:error, data, %HTTPError{reason: :closed_for_writing}}, request, _opts) do + reply(request, {:error, Error.exception(:read_only)}) + + if HTTP2.open?(data.conn, :read) && Enum.any?(data.requests) do + {:next_state, :connected_read_only, data} + else + {:next_state, :disconnected, data} + end + end + + defp stream_request({:error, data, error}, request, _opts) do + reply(request, {:error, error}) + + if HTTP2.open?(data.conn) do + {:keep_state, data} + else + {:next_state, :disconnected, data} + end + end + + defp handle_responses(data, responses) do + Enum.reduce(responses, {data, _actions = []}, fn response, {data, actions} -> + handle_response(data, response, actions) + end) + end + + defp handle_response(data, {kind, ref, value}, actions) + when kind in [:status, :headers] do + data = + if request = data.requests[ref] do + send(request.from_pid, {request.request_ref, {kind, value}}) + request = put_in(request.telemetry.metadata[kind], value) + put_in(data.requests[ref], request) + else + data + end + + {data, actions} + end + + defp handle_response(data, {:data, ref, value}, actions) do + if request = data.requests[ref] do + send(request.from_pid, {request.request_ref, {:data, value}}) + end + + {data, actions} + end + + defp handle_response(data, {:done, ref}, actions) do + {request, data} = pop_request(data, ref) + + if request do + send(request.from_pid, {request.request_ref, :done}) + Telemetry.stop(:recv, request.telemetry.recv, request.telemetry.metadata) + end + + {data, [cancel_request_timeout_action(ref) | actions]} + end + + defp handle_response(data, {:error, ref, error}, actions) do + {request, data} = pop_request(data, ref) + + if request do + send(request.from_pid, {request.request_ref, {:error, error}}) + + Telemetry.stop( + :recv, + request.telemetry.recv, + Map.put(request.telemetry.metadata, :error, error) + ) + end + + {data, [cancel_request_timeout_action(ref) | actions]} + end + + defp cancel_request_timeout_action(request_ref) do + # By setting the timeout to :infinity, we cancel this timeout as per + # gen_statem documentation. + {{:timeout, {:request_timeout, request_ref}}, :infinity, nil} + end + + # Exponential backoff with jitter + # The backoff algorithm optimizes for tight bounds on completing a request successfully. + # It does this by first calculating an exponential backoff factor based on the + # number of retries that have been performed. It then multiplies this factor against the + # base delay. The total maximum delay is found by taking the minimum of either the calculated delay + # or the maximum delay specified. This creates an upper bound on the maximum delay + # we can see. + # + # In order to find the actual delay value we take a random number between 0 and + # the maximum delay based on a uniform distribution. This randomness ensures that + # our retried requests don't "harmonize" making it harder for the downstream + # service to heal. + defp backoff(base_backoff, max_backoff, failure_count) do + factor = :math.pow(2, failure_count) + max_sleep = trunc(min(max_backoff, base_backoff * factor)) + :rand.uniform(max_sleep) + end + + # this is also a wrapper (Mint.HTTP2.stream_request_body/3) + defp stream_request_body(data, ref, body) do + case HTTP2.stream_request_body(data.conn, ref, body) do + {:ok, conn} -> {:ok, put_in(data.conn, conn)} + {:error, conn, reason} -> {:error, put_in(data.conn, conn), reason} + end + end + + defp stream_chunks(data, ref, body, %{stream: %{status: :done}}) do + with {:ok, data} <- stream_request_body(data, ref, body) do + stream_request_body(data, ref, :eof) + end + end + + defp stream_chunks(data, ref, body, _), do: stream_request_body(data, ref, body) + + defp continue_requests(data) do + Enum.reduce(data.requests, data, fn {ref, request}, data -> + with true <- request.stream.status == :streaming, + true <- HTTP2.open?(data.conn, :write), + {:ok, data} <- continue_request(data, ref, request) do + data + else + false -> + data + + {:error, data, %HTTPError{reason: :closed_for_writing}} -> + reply(request, {:error, Error.exception(:read_only)}) + data + + {:error, data, reason} -> + reply(request, {:error, reason}) + data + end + end) + end + + defp continue_request(data, ref, request) do + with :streaming <- request.stream.status, + window = smallest_window(data.conn, ref), + {stream, chunks} = RequestStream.next_chunk(request.stream, window), + request = %{request | stream: stream}, + {:ok, data} <- stream_chunks(data, ref, chunks, request) do + {:ok, complete_request_if_done(data, ref, request)} + else + :done -> + {:ok, complete_request_if_done(data, ref, request)} + + {:error, data, reason} -> + {_from, data} = pop_request(data, ref) + + {:error, data, reason} + end + end + + defp complete_request_if_done(data, ref, %{stream: %{status: :done}} = request) do + %{from: from, telemetry: telemetry} = request + Telemetry.stop(:send, telemetry.send, telemetry.metadata) + recv_start = Telemetry.start(:recv, telemetry.metadata) + request = put_in(request.telemetry[:recv], recv_start) + + if from do + reply(request, {:ok, recv_start}) + end + + put_in(data.requests[ref], request) + end + + defp complete_request_if_done(data, ref, request) do + put_in(data.requests[ref], request) + end + + defp smallest_window(conn, ref) do + min( + HTTP2.get_window_size(conn, :connection), + HTTP2.get_window_size(conn, {:request, ref}) + ) + end + + defp cancel_requests(data, pid) do + if request_refs = data.requests_by_pid[pid] do + Enum.reduce(request_refs, data, fn request_ref, data -> + cancel_request(data, request_ref) + end) + else + data + end + end + + defp cancel_request(data, request_ref) do + # If the Mint ref isn't present, it was removed because the request + # already completed and there's nothing to cancel. + if ref = data.refs[request_ref] do + conn = + case HTTP2.cancel_request(data.conn, ref) do + {:ok, conn} -> conn + {:error, conn, _error} -> conn + end + + data = put_in(data.conn, conn) + {_from, data} = pop_request(data, ref) + data + else + data + end + end + + defp put_request(data, ref, request) do + PoolMetrics.maybe_add(data.metrics_ref, in_flight_requests: 1) + + data + |> put_in([:requests, ref], request) + |> put_in([:refs, request.request_ref], ref) + |> put_pid(request.from_pid, request.request_ref) + end + + defp pop_request(data, ref) do + PoolMetrics.maybe_add(data.metrics_ref, in_flight_requests: -1) + + case pop_in(data.requests[ref]) do + {nil, data} -> + {nil, data} + + {request, data} -> + {_ref, data} = + data + |> pop_pid(request.from_pid, request.request_ref) + |> pop_in([:refs, request.request_ref]) + + {request, data} + end + end + + defp put_pid(data, pid, request_ref) do + update_in(data.requests_by_pid, fn requests_by_pid -> + Map.update(requests_by_pid, pid, MapSet.new([request_ref]), &MapSet.put(&1, request_ref)) + end) + end + + defp pop_pid(data, pid, request_ref) do + update_in(data.requests_by_pid, fn requests_by_pid -> + requests = + requests_by_pid + |> Map.get(pid, MapSet.new()) + |> MapSet.delete(request_ref) + + if Enum.empty?(requests) do + Map.delete(requests_by_pid, pid) + else + Map.put(requests_by_pid, pid, requests) + end + end) + end + + defp reply(%{from: nil, from_pid: pid, request_ref: request_ref}, reply) do + send(pid, {request_ref, reply}) + :ok + end + + defp reply(%{from: from}, reply) do + :gen_statem.reply(from, reply) + end +end diff --git a/deps/finch/lib/finch/http2/pool_metrics.ex b/deps/finch/lib/finch/http2/pool_metrics.ex new file mode 100644 index 0000000..b7c59a6 --- /dev/null +++ b/deps/finch/lib/finch/http2/pool_metrics.ex @@ -0,0 +1,68 @@ +defmodule Finch.HTTP2.PoolMetrics do + @moduledoc """ + HTTP2 Pool metrics. + + Available metrics: + + * `:pool_index` - Index of the pool + * `:in_flight_requests` - Number of requests currently on the connection + + Caveats: + + * HTTP2 pools have only one connection and leverage the multiplex nature + of the protocol. That's why we only keep the in flight requests, representing + the number of streams currently running on the connection. + """ + @type t :: %__MODULE__{} + + defstruct [ + :pool_index, + :in_flight_requests + ] + + @atomic_idx [ + pool_idx: 1, + in_flight_requests: 2 + ] + + def init(finch_name, shp, pool_idx) do + ref = :atomics.new(length(@atomic_idx), []) + :atomics.put(ref, @atomic_idx[:pool_idx], pool_idx) + + :persistent_term.put({__MODULE__, finch_name, shp, pool_idx}, ref) + {:ok, ref} + end + + def maybe_add(nil, _metrics_list), do: :ok + + def maybe_add(ref, metrics_list) do + Enum.each(metrics_list, fn {metric_name, val} -> + :atomics.add(ref, @atomic_idx[metric_name], val) + end) + end + + def get_pool_status(name, shp, pool_idx) do + {__MODULE__, name, shp, pool_idx} + |> :persistent_term.get(nil) + |> get_pool_status() + end + + def get_pool_status(nil), do: {:error, :not_found} + + def get_pool_status(ref) do + %{ + pool_idx: pool_idx, + in_flight_requests: in_flight_requests + } = + @atomic_idx + |> Enum.map(fn {k, idx} -> {k, :atomics.get(ref, idx)} end) + |> Map.new() + + result = %__MODULE__{ + pool_index: pool_idx, + in_flight_requests: in_flight_requests + } + + {:ok, result} + end +end diff --git a/deps/finch/lib/finch/http2/request_stream.ex b/deps/finch/lib/finch/http2/request_stream.ex new file mode 100644 index 0000000..8fc4d8b --- /dev/null +++ b/deps/finch/lib/finch/http2/request_stream.ex @@ -0,0 +1,83 @@ +defmodule Finch.HTTP2.RequestStream do + @moduledoc false + + defstruct [:body, :status, :buffer, :continuation] + + def new(body) do + enumerable = + case body do + {:stream, stream} -> Stream.map(stream, &with_byte_size/1) + nil -> [with_byte_size("")] + io_data -> [with_byte_size(io_data)] + end + + reducer = &reduce_with_suspend/2 + + %__MODULE__{ + body: body, + status: if(body == nil, do: :done, else: :streaming), + buffer: <<>>, + continuation: &Enumerable.reduce(enumerable, &1, reducer) + } + end + + defp with_byte_size(binary) when is_binary(binary), do: {binary, byte_size(binary)} + defp with_byte_size(io_data), do: io_data |> IO.iodata_to_binary() |> with_byte_size() + + defp reduce_with_suspend( + {message, message_size}, + {message_buffer, message_buffer_size, window} + ) + when message_size + message_buffer_size > window do + {:suspend, + {[{message, message_size} | message_buffer], message_size + message_buffer_size, window}} + end + + defp reduce_with_suspend( + {message, message_size}, + {message_buffer, message_buffer_size, window} + ) do + {:cont, {[message | message_buffer], message_size + message_buffer_size, window}} + end + + # gets the next chunk of data that will fit into the given window size + def next_chunk(request, window) + + # when the buffer is empty, continue reducing the stream + def next_chunk(%__MODULE__{buffer: <<>>} = request, window) do + continue_reduce(request, {[], 0, window}) + end + + def next_chunk(%__MODULE__{buffer: buffer} = request, window) do + case buffer do + <> -> + # when the buffer contains more bytes than a window, send as much of the + # buffer as we can + {put_in(request.buffer, rest), bytes_to_send} + + _ -> + # when the buffer can fit in the windows, continue reducing using the buffer + # as the accumulator + continue_reduce(request, {[buffer], byte_size(buffer), window}) + end + end + + defp continue_reduce(request, acc) do + case request.continuation.({:cont, acc}) do + {finished, {messages, _size, _window}} when finished in [:done, :halted] -> + {put_in(request.status, :done), Enum.reverse(messages)} + + {:suspended, + {[{overload_message, overload_message_size} | messages_that_fit], total_size, window_size}, + next_continuation} -> + fittable_size = window_size - (total_size - overload_message_size) + + <> = + overload_message + + request = %{request | continuation: next_continuation, buffer: overload_binary} + + {request, Enum.reverse([fittable_binary | messages_that_fit])} + end + end +end diff --git a/deps/finch/lib/finch/pool.ex b/deps/finch/lib/finch/pool.ex new file mode 100644 index 0000000..a568aac --- /dev/null +++ b/deps/finch/lib/finch/pool.ex @@ -0,0 +1,32 @@ +defmodule Finch.Pool do + @moduledoc false + # Defines a behaviour that both http1 and http2 pools need to implement. + + @type request_ref :: {pool_mod :: module(), cancel_ref :: term()} + + @callback request( + pid(), + Finch.Request.t(), + acc, + Finch.stream(acc), + Finch.name(), + list() + ) :: {:ok, acc} | {:error, term(), acc} + when acc: term() + + @callback async_request( + pid(), + Finch.Request.t(), + Finch.name(), + list() + ) :: request_ref() + + @callback cancel_async_request(request_ref()) :: :ok + + @callback get_pool_status( + finch_name :: atom(), + {schema :: atom(), host :: String.t(), port :: integer()} + ) :: {:ok, list(map)} | {:error, :not_found} + + defguard is_request_ref(ref) when tuple_size(ref) == 2 and is_atom(elem(ref, 0)) +end diff --git a/deps/finch/lib/finch/pool_manager.ex b/deps/finch/lib/finch/pool_manager.ex new file mode 100644 index 0000000..43ce2d2 --- /dev/null +++ b/deps/finch/lib/finch/pool_manager.ex @@ -0,0 +1,208 @@ +defmodule Finch.PoolManager do + @moduledoc false + use GenServer + + @mint_tls_opts [ + :cacertfile, + :cacerts, + :ciphers, + :depth, + :eccs, + :hibernate_after, + :partial_chain, + :reuse_sessions, + :secure_renegotiate, + :server_name_indication, + :signature_algs, + :signature_algs_cert, + :supported_groups, + :verify, + :verify_fun, + :versions + ] + + @default_conn_hostname "localhost" + + def start_link(config) do + GenServer.start_link(__MODULE__, config, name: config.manager_name) + end + + @impl true + def init(config) do + if config.default_pool_config.start_pool_metrics? do + :ets.new(default_shp_table(config.registry_name), [ + :set, + :public, + :named_table + ]) + end + + Enum.each(config.pools, fn {shp, _} -> + do_start_pools(shp, config) + end) + + {:ok, config} + end + + def get_pool(registry_name, {_scheme, _host, _port} = key, opts \\ []) do + case lookup_pool(registry_name, key) do + {pid, _} = pool when is_pid(pid) -> + pool + + :none -> + if Keyword.get(opts, :auto_start?, true), + do: start_pools(registry_name, key), + else: :not_found + end + end + + def lookup_pool(registry, key) do + case all_pool_instances(registry, key) do + [] -> + :none + + [pool] -> + pool + + pools -> + # TODO implement alternative strategies + Enum.random(pools) + end + end + + def all_pool_instances(registry, key), do: Registry.lookup(registry, key) + + def start_pools(registry_name, shp) do + {:ok, config} = Registry.meta(registry_name, :config) + GenServer.call(config.manager_name, {:start_pools, shp}) + end + + @impl true + def handle_call({:start_pools, shp}, _from, state) do + reply = + case lookup_pool(state.registry_name, shp) do + :none -> do_start_pools(shp, state) + pool -> pool + end + + {:reply, reply, state} + end + + defp do_start_pools(shp, config) do + pool_config = pool_config(config, shp) + + if pool_config.start_pool_metrics? do + maybe_track_default_shp(config, shp) + put_pool_count(config, shp, pool_config.count) + end + + Enum.map(1..pool_config.count, fn pool_idx -> + pool_args = pool_args(shp, config, pool_config, pool_idx) + # Choose pool type here... + {:ok, pid} = + DynamicSupervisor.start_child(config.supervisor_name, {pool_config.mod, pool_args}) + + {pid, pool_config.mod} + end) + |> hd() + end + + defp put_pool_count(%{registry_name: name}, shp, val), + do: :persistent_term.put({__MODULE__, :pool_count, name, shp}, val) + + def get_pool_count(finch_name, shp), + do: :persistent_term.get({__MODULE__, :pool_count, finch_name, shp}, nil) + + defp maybe_track_default_shp(%{pools: pools, registry_name: name}, shp) do + if Map.has_key?(pools, shp), + do: :ok, + else: add_default_shp(name, shp) + end + + defp default_shp_table(name), do: :"#{name}.default_shp_table" + + defp add_default_shp(name, shp) do + true = + name + |> default_shp_table() + |> :ets.insert({shp}) + + :ok + end + + def get_default_shps(name) do + tname = default_shp_table(name) + + if :ets.whereis(tname) == :undefined do + [] + else + tname + |> :ets.tab2list() + |> Enum.map(fn {shp} -> shp end) + end + end + + def maybe_remove_default_shp(name, shp) do + tname = default_shp_table(name) + + if :ets.whereis(tname) == :undefined do + :ok + else + true = :ets.delete(tname, shp) + :ok + end + end + + defp pool_config(%{pools: config, default_pool_config: default}, shp) do + config + |> Map.get(shp, default) + |> maybe_drop_tls_options(shp) + |> maybe_add_hostname(shp) + end + + # Drop TLS options from :conn_opts for default pools with :http scheme, + # otherwise you will get :badarg error from :gen_tcp + defp maybe_drop_tls_options(config, {:http, _, _} = _shp) when is_map(config) do + with conn_opts when is_list(conn_opts) <- config[:conn_opts], + trns_opts when is_list(trns_opts) <- conn_opts[:transport_opts] do + trns_opts = Keyword.drop(trns_opts, @mint_tls_opts) + conn_opts = Keyword.put(conn_opts, :transport_opts, trns_opts) + Map.put(config, :conn_opts, conn_opts) + else + _ -> config + end + end + + defp maybe_drop_tls_options(config, _), do: config + + # Hostname is required when the address is not a URL (binary) so we need to specify + # a default value in case the configuration does not specify one. + defp maybe_add_hostname(config, {_scheme, {:local, _path}, _port} = _shp) when is_map(config) do + conn_opts = + config |> Map.get(:conn_opts, []) |> Keyword.put_new(:hostname, @default_conn_hostname) + + Map.put(config, :conn_opts, conn_opts) + end + + defp maybe_add_hostname(config, _), do: config + + defp pool_args(shp, config, %{mod: Finch.HTTP1.Pool} = pool_config, pool_idx), + do: { + shp, + config.registry_name, + pool_config.size, + pool_config, + pool_config.pool_max_idle_time, + pool_config.start_pool_metrics?, + pool_idx + } + + defp pool_args(shp, config, %{mod: Finch.HTTP2.Pool} = pool_config, pool_idx), + do: { + shp, + config.registry_name, + pool_config, + pool_config.start_pool_metrics?, + pool_idx + } +end diff --git a/deps/finch/lib/finch/request.ex b/deps/finch/lib/finch/request.ex new file mode 100644 index 0000000..8eb394e --- /dev/null +++ b/deps/finch/lib/finch/request.ex @@ -0,0 +1,157 @@ +defmodule Finch.Request do + @moduledoc """ + A request struct. + """ + + @enforce_keys [:scheme, :host, :port, :method, :path, :headers, :body, :query] + defstruct [ + :scheme, + :host, + :port, + :method, + :path, + :headers, + :body, + :query, + :unix_socket, + private: %{} + ] + + @atom_methods [ + :get, + :post, + :put, + :patch, + :delete, + :head, + :options + ] + @methods [ + "GET", + "POST", + "PUT", + "PATCH", + "DELETE", + "HEAD", + "OPTIONS" + ] + @atom_to_method Enum.zip(@atom_methods, @methods) |> Enum.into(%{}) + + @typedoc """ + An HTTP request method represented as an `atom()` or a `String.t()`. + + The following atom methods are supported: `#{Enum.map_join(@atom_methods, "`, `", &inspect/1)}`. + You can use any arbitrary method by providing it as a `String.t()`. + """ + @type method() :: :get | :post | :head | :patch | :delete | :options | :put | String.t() + + @typedoc """ + A Uniform Resource Locator, the address of a resource on the Web. + """ + @type url() :: String.t() | URI.t() + + @typedoc """ + Request headers. + """ + @type headers() :: Mint.Types.headers() + + @typedoc """ + Optional request body. + """ + @type body() :: iodata() | {:stream, Enumerable.t()} | nil + + @type private_metadata() :: %{optional(atom()) => term()} + + @type t :: %__MODULE__{ + scheme: Mint.Types.scheme(), + host: String.t() | nil, + port: :inet.port_number(), + method: String.t(), + path: String.t(), + headers: headers(), + body: body(), + query: String.t() | nil, + unix_socket: String.t() | nil, + private: private_metadata() + } + + @doc """ + Sets a new **private** key and value in the request metadata. This storage is meant to be used by libraries + and frameworks to inject information about the request that needs to be retrieved later on, for example, + from handlers that consume `Finch.Telemetry` events. + """ + @spec put_private(t(), key :: atom(), value :: term()) :: t() + def put_private(%__MODULE__{private: private} = request, key, value) when is_atom(key) do + %{request | private: Map.put(private, key, value)} + end + + def put_private(%__MODULE__{}, key, _) do + raise ArgumentError, """ + got unsupported private metadata key #{inspect(key)} + only atoms are allowed as keys of the `:private` field. + """ + end + + @doc false + def request_path(%{path: path, query: nil}), do: path + def request_path(%{path: path, query: ""}), do: path + def request_path(%{path: path, query: query}), do: "#{path}?#{query}" + + @doc false + def build(method, url, headers, body, opts) do + unix_socket = Keyword.get(opts, :unix_socket) + {scheme, host, port, path, query} = parse_url(url) + + %Finch.Request{ + scheme: scheme, + host: host, + port: port, + method: build_method(method), + path: path, + headers: headers, + body: body, + query: query, + unix_socket: unix_socket + } + end + + @doc false + def parse_url(url) when is_binary(url) do + url |> URI.parse() |> parse_url() + end + + def parse_url(%URI{} = parsed_uri) do + normalized_path = parsed_uri.path || "/" + + scheme = + case parsed_uri.scheme do + "https" -> + :https + + "http" -> + :http + + nil -> + raise ArgumentError, "scheme is required for url: #{URI.to_string(parsed_uri)}" + + scheme -> + raise ArgumentError, + "invalid scheme \"#{scheme}\" for url: #{URI.to_string(parsed_uri)}" + end + + {scheme, parsed_uri.host, parsed_uri.port, normalized_path, parsed_uri.query} + end + + defp build_method(method) when is_binary(method), do: method + defp build_method(method) when method in @atom_methods, do: @atom_to_method[method] + + defp build_method(method) do + supported = Enum.map_join(@atom_methods, ", ", &inspect/1) + + raise ArgumentError, """ + got unsupported atom method #{inspect(method)}. + Only the following methods can be provided as atoms: #{supported}. + Otherwise you must pass a binary. + """ + end +end diff --git a/deps/finch/lib/finch/response.ex b/deps/finch/lib/finch/response.ex new file mode 100644 index 0000000..839152d --- /dev/null +++ b/deps/finch/lib/finch/response.ex @@ -0,0 +1,21 @@ +defmodule Finch.Response do + @moduledoc """ + A response to a request. + """ + + alias __MODULE__ + + defstruct [ + :status, + body: "", + headers: [], + trailers: [] + ] + + @type t :: %Response{ + status: Mint.Types.status(), + body: binary(), + headers: Mint.Types.headers(), + trailers: Mint.Types.headers() + } +end diff --git a/deps/finch/lib/finch/ssl.ex b/deps/finch/lib/finch/ssl.ex new file mode 100644 index 0000000..f522c3c --- /dev/null +++ b/deps/finch/lib/finch/ssl.ex @@ -0,0 +1,26 @@ +defmodule Finch.SSL do + @moduledoc false + + alias Mint.HTTP + + def maybe_log_secrets(:https, conn_opts, mint) do + ssl_key_log_file_device = Keyword.get(conn_opts, :ssl_key_log_file_device) + + if ssl_key_log_file_device != nil do + socket = HTTP.get_socket(mint) + # Note: not every ssl library version returns information for :keylog. By using `with` here, + # anything other than the expected return value is silently ignored. + with {:ok, [{:keylog, keylog_items}]} <- :ssl.connection_information(socket, [:keylog]) do + for keylog_item <- keylog_items do + :ok = IO.puts(ssl_key_log_file_device, keylog_item) + end + end + else + :ok + end + end + + def maybe_log_secrets(_scheme, _conn_opts, _mint) do + :ok + end +end diff --git a/deps/finch/lib/finch/telemetry.ex b/deps/finch/lib/finch/telemetry.ex new file mode 100644 index 0000000..dd288cf --- /dev/null +++ b/deps/finch/lib/finch/telemetry.ex @@ -0,0 +1,318 @@ +defmodule Finch.Telemetry do + @moduledoc """ + Telemetry integration. + + Unless specified, all times are in `:native` units. + + Finch executes the following events: + + ### Request Start + + `[:finch, :request, :start]` - Executed when `Finch.request/3` or `Finch.stream/5` is called. + + #### Measurements + + * `:system_time` - The system time. + + #### Metadata + + * `:name` - The name of the Finch instance. + * `:request` - The request (`Finch.Request`). + + ### Request Stop + + `[:finch, :request, :stop]` - Executed after `Finch.request/3` or `Finch.stream/5` ended. + + #### Measurements + + * `:duration` - Time taken from the request start event. + + #### Metadata + + * `:name` - The name of the Finch instance. + * `:request` - The request (`Finch.Request`). + * `:result` - The result of the operation. In case of `Finch.stream/5` this is + `{:ok, acc} | {:error, Exception.t()}`, where `acc` is the accumulator result of the + reducer passed in `Finch.stream/5`. In case of `Finch.request/3` this is + `{:ok, Finch.Response.t()} | {:error, Exception.t()}`. + + ### Request Exception + + `[:finch, :request, :exception]` - Executed when an exception occurs while executing + `Finch.request/3` or `Finch.stream/5`. + + #### Measurements + + * `:duration` - The time it took since the start before raising the exception. + + #### Metadata + + * `:name` - The name of the Finch instance. + * `:request` - The request (`Finch.Request`). + * `:kind` - The type of exception. + * `:reason` - Error description or error data. + * `:stacktrace` - The stacktrace. + + ### Queue Start + + `[:finch, :queue, :start]` - Executed before checking out an HTTP1 connection from the pool. + + #### Measurements + + * `:system_time` - The system time. + + #### Metadata + + * `:name` - The name of the Finch instance. + * `:pool` - The pool's PID. + * `:request` - The request (`Finch.Request`). + + ### Queue Stop + + `[:finch, :queue, :stop]` - Executed after an HTTP1 connection is retrieved from the pool. + + #### Measurements + + * `:duration` - Time taken to check out a pool connection. + * `:idle_time` - Elapsed time since the connection was last checked in or initialized. + + #### Metadata + + * `:name` - The name of the Finch instance. + * `:pool` - The pool's PID. + * `:request` - The request (`Finch.Request`). + + ### Queue Exception + + `[:finch, :queue, :exception]` - Executed if checking out an HTTP1 connection throws an exception. + + #### Measurements + + * `:duration` - The time it took since queue start event before raising an exception. + + #### Metadata + + * `:name` - The name of the Finch instance. + * `:request` - The request (`Finch.Request`). + * `:kind` - The type of exception. + * `:reason` - Error description or error data. + * `:stacktrace` - The stacktrace. + + ### Connect Start + + `[:finch, :connect, :start]` - Executed before opening a new connection. + If a connection is being re-used this event will *not* be executed. + + #### Measurements + + * `:system_time` - The system time. + + #### Metadata + + * `:name` - The name of the Finch instance. + * `:scheme` - The scheme used in the connection. either `http` or `https`. + * `:host` - The host address. + * `:port` - The port to connect on. + + ### Connect Stop + + `[:finch, :connect, :stop]` - Executed after a connection is opened. + + #### Measurements + + * `:duration` - Time taken to connect to the host. + + #### Metadata + + * `:name` - The name of the Finch instance. + * `:scheme` - The scheme used in the connection. either `http` or `https`. + * `:host` - The host address. + * `:port` - The port to connect on. + * `:error` - This value is optional. It includes any errors that occurred while opening the connection. + + ### Send Start + + `[:finch, :send, :start]` - Executed before sending a request. + + #### Measurements + + * `:name` - The name of the Finch instance. + * `:system_time` - The system time. + * `:idle_time` - Elapsed time since the connection was last checked in or initialized. + + #### Metadata + + * `:request` - The request (`Finch.Request`). + + ### Send Stop + + `[:finch, :send, :stop]` - Executed after a request is finished. + + #### Measurements + + * `:name` - The name of the Finch instance. + * `:duration` - Time taken to make the request. + * `:idle_time` - Elapsed time since the connection was last checked in or initialized. + + #### Metadata + + * `:request` - The request (`Finch.Request`). + * `:error` - This value is optional. It includes any errors that occurred while making the request. + + ### Receive Start + + `[:finch, :recv, :start]` - Executed before receiving the response. + + #### Measurements + + * `:system_time` - The system time. + * `:idle_time` - Elapsed time since the connection was last checked in or initialized. + + #### Metadata + + * `:name` - The name of the Finch instance. + * `:request` - The request (`Finch.Request`). + + ### Receive Stop + + `[:finch, :recv, :stop]` - Executed after a response has been fully received. + + #### Measurements + + * `:duration` - Duration to receive the response. + * `:idle_time` - Elapsed time since the connection was last checked in or initialized. + + #### Metadata + + * `:name` - The name of the Finch instance. + * `:request` - The request (`Finch.Request`). + * `:status` - The response status (`Mint.Types.status()`). + * `:headers` - The response headers (`Mint.Types.headers()`). + * `:error` - This value is optional. It includes any errors that occurred while receiving the response. + + ### Receive Exception + + `[:finch, :recv, :exception]` - Executed if an exception is thrown before the response has + been fully received. + + #### Measurements + + * `:duration` - The time it took before raising an exception + + #### Metadata + + * `:name` - The name of the Finch instance. + * `:request` - The request (`Finch.Request`). + * `:kind` - The type of exception. + * `:reason` - Error description or error data. + * `:stacktrace` - The stacktrace. + + ### Reused Connection + + `[:finch, :reused_connection]` - Executed if an existing HTTP1 connection is reused. There are no measurements provided with this event. + + #### Metadata + + * `:name` - The name of the Finch instance. + * `:scheme` - The scheme used in the connection. either `http` or `https`. + * `:host` - The host address. + * `:port` - The port to connect on. + + ### Conn Max Idle Time Exceeded + + `[:finch, :conn_max_idle_time_exceeded]` - Executed if an HTTP1 connection was discarded because the `conn_max_idle_time` had been reached. + + #### Measurements + + * `:idle_time` - Elapsed time since the connection was last checked in or initialized. + + #### Metadata + + * `:scheme` - The scheme used in the connection. either `http` or `https`. + * `:host` - The host address. + * `:port` - The port to connect on. + + ### Pool Max Idle Time Exceeded + + `[:finch, :pool_max_idle_time_exceeded]` - Executed if an HTTP1 pool was terminated because the `pool_max_idle_time` has been reached. There are no measurements provided with this event. + + #### Metadata + + * `:scheme` - The scheme used in the connection. either `http` or `https`. + * `:host` - The host address. + * `:port` - The port to connect on. + + ### Max Idle Time Exceeded (Deprecated) + + `[:finch, :max_idle_time_exceeded]` - Executed if an HTTP1 connection was discarded because the `max_idle_time` had been reached. + + *Deprecated:* use `:conn_max_idle_time_exceeded` event instead. + + #### Measurements + + * `:idle_time` - Elapsed time since the connection was last checked in or initialized. + + #### Metadata + + * `:scheme` - The scheme used in the connection. either `http` or `https`. + * `:host` - The host address. + * `:port` - The port to connect on. + """ + + @doc false + # emits a `start` telemetry event and returns the the start time + def start(event, meta \\ %{}, extra_measurements \\ %{}) do + start_time = System.monotonic_time() + + :telemetry.execute( + [:finch, event, :start], + Map.merge(extra_measurements, %{system_time: System.system_time()}), + meta + ) + + start_time + end + + @doc false + # Emits a stop event. + def stop(event, start_time, meta \\ %{}, extra_measurements \\ %{}) do + end_time = System.monotonic_time() + measurements = Map.merge(extra_measurements, %{duration: end_time - start_time}) + + :telemetry.execute( + [:finch, event, :stop], + measurements, + meta + ) + end + + @doc false + def exception(event, start_time, kind, reason, stack, meta \\ %{}, extra_measurements \\ %{}) do + end_time = System.monotonic_time() + measurements = Map.merge(extra_measurements, %{duration: end_time - start_time}) + + meta = + meta + |> Map.put(:kind, kind) + |> Map.put(:reason, reason) + |> Map.put(:stacktrace, stack) + + :telemetry.execute([:finch, event, :exception], measurements, meta) + end + + @doc false + # Used for reporting generic events + def event(event, measurements, meta) do + :telemetry.execute([:finch, event], measurements, meta) + end + + @doc false + # Used to easily create :start, :stop, :exception events. + def span(event, start_metadata, fun) do + :telemetry.span( + [:finch, event], + start_metadata, + fun + ) + end +end diff --git a/deps/finch/mix.exs b/deps/finch/mix.exs new file mode 100644 index 0000000..4e857d2 --- /dev/null +++ b/deps/finch/mix.exs @@ -0,0 +1,73 @@ +defmodule Finch.MixProject do + use Mix.Project + + @name "Finch" + @version "0.21.0" + @repo_url "https://github.com/sneako/finch" + + def project do + [ + app: :finch, + version: @version, + elixir: "~> 1.13", + description: "An HTTP client focused on performance.", + package: package(), + docs: docs(), + elixirc_paths: elixirc_paths(Mix.env()), + start_permanent: Mix.env() == :prod, + name: @name, + source_url: @repo_url, + deps: deps() + ] + end + + defp elixirc_paths(:test), do: ["lib", "test/support"] + defp elixirc_paths(:dev), do: ["lib", "test/support/test_usage.ex"] + defp elixirc_paths(_), do: ["lib"] + + def application do + [ + extra_applications: [:logger] + ] + end + + defp deps do + [ + {:mint, "~> 1.6.2 or ~> 1.7"}, + {:nimble_pool, "~> 1.1"}, + {:nimble_options, "~> 0.4 or ~> 1.0"}, + {:telemetry, "~> 0.4 or ~> 1.0"}, + {:mime, "~> 1.0 or ~> 2.0"}, + {:ex_doc, "~> 0.28", only: :dev, runtime: false}, + {:credo, "~> 1.3", only: [:dev, :test]}, + {:dialyxir, "~> 1.0", only: [:dev, :test], runtime: false}, + {:bypass, "~> 2.0", only: :test}, + {:cowboy, "~> 2.7", only: [:dev, :test]}, + {:plug_cowboy, "~> 2.0", only: [:dev, :test]}, + {:x509, "~> 0.8", only: [:dev, :test]}, + {:mimic, "~> 1.7", only: :test} + ] + end + + defp package do + [ + licenses: ["MIT"], + links: %{ + "GitHub" => @repo_url, + "Changelog" => "https://hexdocs.pm/finch/changelog.html" + } + ] + end + + defp docs do + [ + logo: "assets/Finch_logo_all-White.png", + source_ref: "v#{@version}", + source_url: @repo_url, + main: @name, + extras: [ + "CHANGELOG.md" + ] + ] + end +end diff --git a/deps/hpax/.formatter.exs b/deps/hpax/.formatter.exs new file mode 100644 index 0000000..8c80937 --- /dev/null +++ b/deps/hpax/.formatter.exs @@ -0,0 +1,5 @@ +# Used by "mix format" +[ + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"], + import_deps: [:stream_data] +] diff --git a/deps/hpax/.hex b/deps/hpax/.hex new file mode 100644 index 0000000..c24a39e Binary files /dev/null and b/deps/hpax/.hex differ diff --git a/deps/hpax/CHANGELOG.md b/deps/hpax/CHANGELOG.md new file mode 100644 index 0000000..73d4032 --- /dev/null +++ b/deps/hpax/CHANGELOG.md @@ -0,0 +1,34 @@ +# Changelog + +## v1.0.3 + + * Silence warnings on (upcoming, at this time) Elixir 1.19+. + +## v1.0.2 + + * The changes in v1.0.1 introduced some subtle compression errors with HPACK encoding. This has been fixed in this version. See [this issue](https://github.com/elixir-mint/hpax/issues/20) for more details. + +## v1.0.1 + + * Fix some issues with dynamic table resizing. You should not need to do anything to your code, it should Just Work™. If you want to read more, [this issue](https://github.com/elixir-mint/hpax/issues/18) has all the context. + +## v1.0.0 + + * Silence warnings on Elixir 1.17+. + * Require Elixir 1.12+. + +## v0.2.0 + + * Add `HPAX.new/2`, which supports a list of options. For now, the only option + is `:huffman_encoding`, to choose whether to use Huffman encoding or not. + * Add `HPAX.encode/3`, which supports encoding all headers with the same + action. + * Add the `HPAX.table/0` opaque type. + +## v0.1.2 + + * Fix `use Bitwise` deprecation warning. + +## v0.1.1 + + * Improve checking of dynamic resize updates. diff --git a/deps/hpax/LICENSE.txt b/deps/hpax/LICENSE.txt new file mode 100644 index 0000000..d9a10c0 --- /dev/null +++ b/deps/hpax/LICENSE.txt @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/deps/hpax/README.md b/deps/hpax/README.md new file mode 100644 index 0000000..4cc27f7 --- /dev/null +++ b/deps/hpax/README.md @@ -0,0 +1,74 @@ +# HPAX + +![CI](https://github.com/elixir-mint/hpax/actions/workflows/main.yml/badge.svg) +[![Docs](https://img.shields.io/badge/api-docs-green.svg?style=flat)](https://hexdocs.pm/hpax) +[![Hex.pm Version](http://img.shields.io/hexpm/v/hpax.svg?style=flat)](https://hex.pm/packages/hpax) +[![Coverage Status](https://coveralls.io/repos/github/elixir-mint/hpax/badge.svg?branch=main)](https://coveralls.io/github/elixir-mint/hpax?branch=main) + +HPAX is an Elixir implementation of the HPACK header compression algorithm as used in HTTP/2 and +defined in RFC 7541. HPAX is used by several Elixir projects, including the +[Mint](https://github.com/elixir-mint/mint) HTTP client and +[bandit](https://github.com/mtrudel/bandit) HTTP server projects. + +## Installation + +To install HPAX, add it to your `mix.exs` file. + +```elixir +defp deps do + [ + {:hpax, "~> 0.1.0"} + ] +end +``` + +Then, run `$ mix deps.get`. + +## Usage + +HPAX is designed to be used in both encoding and decoding scenarios. In both cases, a context is +used to maintain state internal to the HPACK algorithm. In the common use case of using HPAX +within HTTP/2, this context is called a **table** and must be shared between any +subsequent encoding/decoding calls within +an endpoint. Note that the contexts used for encoding and decoding within HTTP/2 are completely +distinct from one another, even though they are structurally identical. + +To encode a set of headers into a binary with HPAX: + +```elixir +context = HPAX.new(4096) +headers = [{:store, ":status", "201"}, {:store, "location", "http://example.com"}] +{encoded_headers, context} = HPAX.encode(headers, context) +#=> {iodata, updated_context} +``` + +To decode a binary into a set of headers with HPAX: + +```elixir +context = HPAX.new(4096) +encoded_headers = <<...>> +{:ok, headers, context} = HPAX.decode(encoded_headers, context) +#=> {:ok, [{:store, ":status", "201"}, {:store, "location", "http://example.com"}], updated_context} +``` + +For complete usage information, please see the HPAX [documentation](https://hex.pm/packages/hpax). + +## Contributing + +If you wish to contribute check out the [issue list](https://github.com/elixir-mint/hpax/issues) and let us know what you want to work on so we can discuss it and reduce duplicate work. + +## License + +Copyright 2021 Eric Meadows-Jönsson and Andrea Leopardi + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/deps/hpax/hex_metadata.config b/deps/hpax/hex_metadata.config new file mode 100644 index 0000000..b8f0824 --- /dev/null +++ b/deps/hpax/hex_metadata.config @@ -0,0 +1,15 @@ +{<<"links">>,[{<<"GitHub">>,<<"https://github.com/elixir-mint/hpax">>}]}. +{<<"name">>,<<"hpax">>}. +{<<"version">>,<<"1.0.3">>}. +{<<"description">>, + <<"Implementation of the HPACK protocol (RFC 7541) for Elixir">>}. +{<<"elixir">>,<<"~> 1.12">>}. +{<<"app">>,<<"hpax">>}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"requirements">>,[]}. +{<<"files">>, + [<<"lib">>,<<"lib/hpax">>,<<"lib/hpax/huffman_table">>, + <<"lib/hpax/types.ex">>,<<"lib/hpax/huffman.ex">>,<<"lib/hpax/table.ex">>, + <<"lib/hpax.ex">>,<<".formatter.exs">>,<<"mix.exs">>,<<"README.md">>, + <<"LICENSE.txt">>,<<"CHANGELOG.md">>]}. +{<<"build_tools">>,[<<"mix">>]}. diff --git a/deps/hpax/lib/hpax.ex b/deps/hpax/lib/hpax.ex new file mode 100644 index 0000000..91c9401 --- /dev/null +++ b/deps/hpax/lib/hpax.ex @@ -0,0 +1,360 @@ +defmodule HPAX do + @moduledoc """ + Support for the HPACK header compression algorithm. + + This module provides support for the HPACK header compression algorithm used mainly in HTTP/2. + + ## Encoding and decoding contexts + + The HPACK algorithm requires both + + * an encoding context on the encoder side + * a decoding context on the decoder side + + These contexts are semantically different but structurally the same. In HPACK they are + implemented as **HPACK tables**. This library uses the name "tables" everywhere internally + + HPACK tables can be created through the `new/1` function. + """ + + alias HPAX.{Table, Types} + + @typedoc """ + An HPACK table. + + This can be used for encoding or decoding. + """ + @typedoc since: "0.2.0" + @opaque table() :: Table.t() + + @typedoc """ + An HPACK header name. + """ + @type header_name() :: binary() + + @typedoc """ + An HPACK header value. + """ + @type header_value() :: binary() + + @valid_header_actions [:store, :store_name, :no_store, :never_store] + + @doc """ + Creates a new HPACK table. + + Same as `new/2` with default options. + """ + @spec new(non_neg_integer()) :: table() + def new(max_table_size), do: new(max_table_size, []) + + @doc """ + Create a new HPACK table that can be used as encoding or decoding context. + + See the "Encoding and decoding contexts" section in the module documentation. + + `max_table_size` is the maximum table size (in bytes) for the newly created table. + + ## Options + + This function accepts the following `options`: + + * `:huffman_encoding` - (since 0.2.0) `:always` or `:never`. If `:always`, + then HPAX will always encode headers using Huffman encoding. If `:never`, + HPAX will not use any Huffman encoding. Defaults to `:never`. + + ## Examples + + encoding_context = HPAX.new(4096) + + """ + @doc since: "0.2.0" + @spec new(non_neg_integer(), [keyword()]) :: table() + def new(max_table_size, options) + when is_integer(max_table_size) and max_table_size >= 0 and is_list(options) do + options = Keyword.put_new(options, :huffman_encoding, :never) + + Enum.each(options, fn + {:huffman_encoding, _huffman_encoding} -> :ok + {key, _value} -> raise ArgumentError, "unknown option: #{inspect(key)}" + end) + + Table.new(max_table_size, Keyword.fetch!(options, :huffman_encoding)) + end + + @doc """ + Resizes the given table to the given maximum size. + + This is intended for use where the overlying protocol has signaled a change to the table's + maximum size, such as when an HTTP/2 `SETTINGS` frame is received. + + If the indicated size is less than the table's current size, entries + will be evicted as needed to fit within the specified size, and the table's + maximum size will be decreased to the specified value. A flag will also be + set which will enqueue a "dynamic table size update" command to be prefixed + to the next block encoded with this table, per + [RFC9113§4.3.1](https://www.rfc-editor.org/rfc/rfc9113.html#section-4.3.1). + + If the indicated size is greater than or equal to the table's current max size, no entries are evicted + and the table's maximum size changes to the specified value. + + ## Examples + + decoding_context = HPAX.new(4096) + HPAX.resize(decoding_context, 8192) + + """ + @spec resize(table(), non_neg_integer()) :: table() + defdelegate resize(table, new_max_size), to: Table + + @doc """ + Decodes a header block fragment (HBF) through a given table. + + If decoding is successful, this function returns a `{:ok, headers, updated_table}` tuple where + `headers` is a list of decoded headers, and `updated_table` is the updated table. If there's + an error in decoding, this function returns `{:error, reason}`. + + ## Examples + + decoding_context = HPAX.new(1000) + hbf = get_hbf_from_somewhere() + HPAX.decode(hbf, decoding_context) + #=> {:ok, [{":method", "GET"}], decoding_context} + + """ + @spec decode(binary(), table()) :: + {:ok, [{header_name(), header_value()}], table()} | {:error, term()} + + # Dynamic resizes must occur only at the start of a block + # https://datatracker.ietf.org/doc/html/rfc7541#section-4.2 + def decode(<<0b001::3, rest::bitstring>>, %Table{} = table) do + {new_max_size, rest} = decode_integer(rest, 5) + + # Dynamic resizes must be less than protocol max table size + # https://datatracker.ietf.org/doc/html/rfc7541#section-6.3 + if new_max_size <= table.protocol_max_table_size do + decode(rest, Table.dynamic_resize(table, new_max_size)) + else + {:error, :protocol_error} + end + end + + def decode(block, %Table{} = table) when is_binary(block) do + decode_headers(block, table, _acc = []) + catch + :throw, {:hpax, error} -> {:error, error} + end + + @doc """ + Encodes a list of headers through the given table. + + Returns a two-element tuple where the first element is a binary representing the encoded headers + and the second element is an updated table. + + ## Examples + + headers = [{:store, ":authority", "https://example.com"}] + encoding_context = HPAX.new(1000) + HPAX.encode(headers, encoding_context) + #=> {iodata, updated_encoding_context} + + """ + @spec encode([header], table()) :: {iodata(), table()} + when header: {action, header_name(), header_value()}, + action: :store | :store_name | :no_store | :never_store + def encode(headers, %Table{} = table) when is_list(headers) do + {table, pending_resizes} = Table.pop_pending_resizes(table) + acc = Enum.map(pending_resizes, &[<<0b001::3, Types.encode_integer(&1, 5)::bitstring>>]) + encode_headers(headers, table, acc) + end + + @doc """ + Encodes a list of headers through the given table, applying the same `action` to all of them. + + This function is the similar to `encode/2`, but `headers` are `{name, value}` tuples instead, + and the same `action` is applied to all headers. + + ## Examples + + headers = [{":authority", "https://example.com"}] + encoding_context = HPAX.new(1000) + HPAX.encode(:store, headers, encoding_context) + #=> {iodata, updated_encoding_context} + + """ + @doc since: "0.2.0" + @spec encode(action, [header], table()) :: {iodata(), table()} + when action: :store | :store_name | :no_store | :never_store, + header: {header_name(), header_value()} + def encode(action, headers, %Table{} = table) + when is_list(headers) and action in [:store, :store_name, :no_store, :never_store] do + headers + |> Enum.map(fn {name, value} -> {action, name, value} end) + |> encode(table) + end + + ## Helpers + + defp decode_headers(<<>>, table, acc) do + {:ok, Enum.reverse(acc), table} + end + + # Indexed header field + # http://httpwg.org/specs/rfc7541.html#rfc.section.6.1 + defp decode_headers(<<0b1::1, rest::bitstring>>, table, acc) do + {index, rest} = decode_integer(rest, 7) + decode_headers(rest, table, [lookup_by_index!(table, index) | acc]) + end + + # Literal header field with incremental indexing + # http://httpwg.org/specs/rfc7541.html#rfc.section.6.2.1 + defp decode_headers(<<0b01::2, rest::bitstring>>, table, acc) do + {name, value, rest} = + case rest do + # The header name is a string. + <<0::6, rest::binary>> -> + {name, rest} = decode_binary(rest) + {value, rest} = decode_binary(rest) + {name, value, rest} + + # The header name is an index to be looked up in the table. + _other -> + {index, rest} = decode_integer(rest, 6) + {value, rest} = decode_binary(rest) + {name, _value} = lookup_by_index!(table, index) + {name, value, rest} + end + + decode_headers(rest, Table.add(table, name, value), [{name, value} | acc]) + end + + # Literal header field without indexing + # http://httpwg.org/specs/rfc7541.html#rfc.section.6.2.2 + defp decode_headers(<<0b0000::4, rest::bitstring>>, table, acc) do + {name, value, rest} = + case rest do + <<0::4, rest::binary>> -> + {name, rest} = decode_binary(rest) + {value, rest} = decode_binary(rest) + {name, value, rest} + + _other -> + {index, rest} = decode_integer(rest, 4) + {value, rest} = decode_binary(rest) + {name, _value} = lookup_by_index!(table, index) + {name, value, rest} + end + + decode_headers(rest, table, [{name, value} | acc]) + end + + # Literal header field never indexed + # http://httpwg.org/specs/rfc7541.html#rfc.section.6.2.3 + defp decode_headers(<<0b0001::4, rest::bitstring>>, table, acc) do + {name, value, rest} = + case rest do + <<0::4, rest::binary>> -> + {name, rest} = decode_binary(rest) + {value, rest} = decode_binary(rest) + {name, value, rest} + + _other -> + {index, rest} = decode_integer(rest, 4) + {value, rest} = decode_binary(rest) + {name, _value} = lookup_by_index!(table, index) + {name, value, rest} + end + + # TODO: enforce the "never indexed" part somehow. + decode_headers(rest, table, [{name, value} | acc]) + end + + defp decode_headers(_other, _table, _acc) do + throw({:hpax, :protocol_error}) + end + + defp lookup_by_index!(table, index) do + case Table.lookup_by_index(table, index) do + {:ok, header} -> header + :error -> throw({:hpax, {:index_not_found, index}}) + end + end + + defp decode_integer(bitstring, prefix) do + case Types.decode_integer(bitstring, prefix) do + {:ok, int, rest} -> {int, rest} + :error -> throw({:hpax, :bad_integer_encoding}) + end + end + + defp decode_binary(binary) do + case Types.decode_binary(binary) do + {:ok, binary, rest} -> {binary, rest} + :error -> throw({:hpax, :bad_binary_encoding}) + end + end + + defp encode_headers([], table, acc) do + {acc, table} + end + + defp encode_headers([{action, name, value} | rest], table, acc) + when action in @valid_header_actions and is_binary(name) and is_binary(value) do + huffman? = table.huffman_encoding == :always + + {encoded, table} = + case Table.lookup_by_header(table, name, value) do + {:full, index} -> + {encode_indexed_header(index), table} + + {:name, index} when action == :store -> + {encode_literal_header_with_indexing(index, value, huffman?), + Table.add(table, name, value)} + + {:name, index} when action in [:store_name, :no_store] -> + {encode_literal_header_without_indexing(index, value, huffman?), table} + + {:name, index} when action == :never_store -> + {encode_literal_header_never_indexed(index, value, huffman?), table} + + :not_found when action in [:store, :store_name] -> + {encode_literal_header_with_indexing(name, value, huffman?), + Table.add(table, name, value)} + + :not_found when action == :no_store -> + {encode_literal_header_without_indexing(name, value, huffman?), table} + + :not_found when action == :never_store -> + {encode_literal_header_never_indexed(name, value, huffman?), table} + end + + encode_headers(rest, table, [acc, encoded]) + end + + defp encode_indexed_header(index) do + <<1::1, Types.encode_integer(index, 7)::bitstring>> + end + + defp encode_literal_header_with_indexing(index, value, huffman?) when is_integer(index) do + [<<1::2, Types.encode_integer(index, 6)::bitstring>>, Types.encode_binary(value, huffman?)] + end + + defp encode_literal_header_with_indexing(name, value, huffman?) when is_binary(name) do + [<<1::2, 0::6>>, Types.encode_binary(name, huffman?), Types.encode_binary(value, huffman?)] + end + + defp encode_literal_header_without_indexing(index, value, huffman?) when is_integer(index) do + [<<0::4, Types.encode_integer(index, 4)::bitstring>>, Types.encode_binary(value, huffman?)] + end + + defp encode_literal_header_without_indexing(name, value, huffman?) when is_binary(name) do + [<<0::4, 0::4>>, Types.encode_binary(name, huffman?), Types.encode_binary(value, huffman?)] + end + + defp encode_literal_header_never_indexed(index, value, huffman?) when is_integer(index) do + [<<1::4, Types.encode_integer(index, 4)::bitstring>>, Types.encode_binary(value, huffman?)] + end + + defp encode_literal_header_never_indexed(name, value, huffman?) when is_binary(name) do + [<<1::4, 0::4>>, Types.encode_binary(name, huffman?), Types.encode_binary(value, huffman?)] + end +end diff --git a/deps/hpax/lib/hpax/huffman.ex b/deps/hpax/lib/hpax/huffman.ex new file mode 100644 index 0000000..e337fab --- /dev/null +++ b/deps/hpax/lib/hpax/huffman.ex @@ -0,0 +1,94 @@ +defmodule HPAX.Huffman do + @moduledoc false + + import Bitwise, only: [>>>: 2] + + # This file is downloaded from the spec directly. + # http://httpwg.org/specs/rfc7541.html#huffman.code + table_file = Path.absname("huffman_table", __DIR__) + @external_resource table_file + + entries = + Enum.map(File.stream!(table_file), fn line -> + [byte_value, bits, _hex, bit_count] = + line + |> case do + <> -> rest + "EOS " <> rest -> rest + _other -> line + end + |> String.replace(["|", "(", ")", "[", "]"], "") + |> String.split() + + byte_value = String.to_integer(byte_value) + bits = String.to_integer(bits, 2) + bit_count = String.to_integer(bit_count) + + {byte_value, bits, bit_count} + end) + + {regular_entries, [eos_entry]} = Enum.split(entries, -1) + {_eos_byte_value, eos_bits, eos_bit_count} = eos_entry + + ## Encoding + + @spec encode(binary()) :: binary() + def encode(binary) do + encode(binary, _acc = <<>>) + end + + for {byte_value, bits, bit_count} <- regular_entries do + defp encode(<>, acc) do + encode(rest, <>) + end + end + + defp encode(<<>>, acc) do + overflowing_bits = rem(bit_size(acc), 8) + + if overflowing_bits == 0 do + acc + else + bits_to_add = 8 - overflowing_bits + + value_of_bits_to_add = + take_significant_bits(unquote(eos_bits), unquote(eos_bit_count), bits_to_add) + + <> + end + end + + ## Decoding + + @spec decode(binary()) :: binary() + def decode(binary) + + for {byte_value, bits, bit_count} <- regular_entries do + def decode(<>) do + <> + end + end + + def decode(<<>>) do + <<>> + end + + # Use binary syntax for single match context optimization. + def decode(<>) when bit_size(padding) in 1..7 do + padding_size = bit_size(padding) + <> = padding + + if take_significant_bits(unquote(eos_bits), unquote(eos_bit_count), padding_size) == padding do + <<>> + else + throw({:hpax, {:protocol_error, :invalid_huffman_encoding}}) + end + end + + ## Helpers + + @compile {:inline, take_significant_bits: 3} + defp take_significant_bits(value, bit_count, bits_to_take) do + value >>> (bit_count - bits_to_take) + end +end diff --git a/deps/hpax/lib/hpax/huffman_table b/deps/hpax/lib/hpax/huffman_table new file mode 100644 index 0000000..b116ba3 --- /dev/null +++ b/deps/hpax/lib/hpax/huffman_table @@ -0,0 +1,257 @@ +( 0) |11111111|11000 1ff8 [13] +( 1) |11111111|11111111|1011000 7fffd8 [23] +( 2) |11111111|11111111|11111110|0010 fffffe2 [28] +( 3) |11111111|11111111|11111110|0011 fffffe3 [28] +( 4) |11111111|11111111|11111110|0100 fffffe4 [28] +( 5) |11111111|11111111|11111110|0101 fffffe5 [28] +( 6) |11111111|11111111|11111110|0110 fffffe6 [28] +( 7) |11111111|11111111|11111110|0111 fffffe7 [28] +( 8) |11111111|11111111|11111110|1000 fffffe8 [28] +( 9) |11111111|11111111|11101010 ffffea [24] +( 10) |11111111|11111111|11111111|111100 3ffffffc [30] +( 11) |11111111|11111111|11111110|1001 fffffe9 [28] +( 12) |11111111|11111111|11111110|1010 fffffea [28] +( 13) |11111111|11111111|11111111|111101 3ffffffd [30] +( 14) |11111111|11111111|11111110|1011 fffffeb [28] +( 15) |11111111|11111111|11111110|1100 fffffec [28] +( 16) |11111111|11111111|11111110|1101 fffffed [28] +( 17) |11111111|11111111|11111110|1110 fffffee [28] +( 18) |11111111|11111111|11111110|1111 fffffef [28] +( 19) |11111111|11111111|11111111|0000 ffffff0 [28] +( 20) |11111111|11111111|11111111|0001 ffffff1 [28] +( 21) |11111111|11111111|11111111|0010 ffffff2 [28] +( 22) |11111111|11111111|11111111|111110 3ffffffe [30] +( 23) |11111111|11111111|11111111|0011 ffffff3 [28] +( 24) |11111111|11111111|11111111|0100 ffffff4 [28] +( 25) |11111111|11111111|11111111|0101 ffffff5 [28] +( 26) |11111111|11111111|11111111|0110 ffffff6 [28] +( 27) |11111111|11111111|11111111|0111 ffffff7 [28] +( 28) |11111111|11111111|11111111|1000 ffffff8 [28] +( 29) |11111111|11111111|11111111|1001 ffffff9 [28] +( 30) |11111111|11111111|11111111|1010 ffffffa [28] +( 31) |11111111|11111111|11111111|1011 ffffffb [28] +' ' ( 32) |010100 14 [ 6] +'!' ( 33) |11111110|00 3f8 [10] +'"' ( 34) |11111110|01 3f9 [10] +'#' ( 35) |11111111|1010 ffa [12] +'$' ( 36) |11111111|11001 1ff9 [13] +'%' ( 37) |010101 15 [ 6] +'&' ( 38) |11111000 f8 [ 8] +''' ( 39) |11111111|010 7fa [11] +'(' ( 40) |11111110|10 3fa [10] +')' ( 41) |11111110|11 3fb [10] +'*' ( 42) |11111001 f9 [ 8] +'+' ( 43) |11111111|011 7fb [11] +',' ( 44) |11111010 fa [ 8] +'-' ( 45) |010110 16 [ 6] +'.' ( 46) |010111 17 [ 6] +'/' ( 47) |011000 18 [ 6] +'0' ( 48) |00000 0 [ 5] +'1' ( 49) |00001 1 [ 5] +'2' ( 50) |00010 2 [ 5] +'3' ( 51) |011001 19 [ 6] +'4' ( 52) |011010 1a [ 6] +'5' ( 53) |011011 1b [ 6] +'6' ( 54) |011100 1c [ 6] +'7' ( 55) |011101 1d [ 6] +'8' ( 56) |011110 1e [ 6] +'9' ( 57) |011111 1f [ 6] +':' ( 58) |1011100 5c [ 7] +';' ( 59) |11111011 fb [ 8] +'<' ( 60) |11111111|1111100 7ffc [15] +'=' ( 61) |100000 20 [ 6] +'>' ( 62) |11111111|1011 ffb [12] +'?' ( 63) |11111111|00 3fc [10] +'@' ( 64) |11111111|11010 1ffa [13] +'A' ( 65) |100001 21 [ 6] +'B' ( 66) |1011101 5d [ 7] +'C' ( 67) |1011110 5e [ 7] +'D' ( 68) |1011111 5f [ 7] +'E' ( 69) |1100000 60 [ 7] +'F' ( 70) |1100001 61 [ 7] +'G' ( 71) |1100010 62 [ 7] +'H' ( 72) |1100011 63 [ 7] +'I' ( 73) |1100100 64 [ 7] +'J' ( 74) |1100101 65 [ 7] +'K' ( 75) |1100110 66 [ 7] +'L' ( 76) |1100111 67 [ 7] +'M' ( 77) |1101000 68 [ 7] +'N' ( 78) |1101001 69 [ 7] +'O' ( 79) |1101010 6a [ 7] +'P' ( 80) |1101011 6b [ 7] +'Q' ( 81) |1101100 6c [ 7] +'R' ( 82) |1101101 6d [ 7] +'S' ( 83) |1101110 6e [ 7] +'T' ( 84) |1101111 6f [ 7] +'U' ( 85) |1110000 70 [ 7] +'V' ( 86) |1110001 71 [ 7] +'W' ( 87) |1110010 72 [ 7] +'X' ( 88) |11111100 fc [ 8] +'Y' ( 89) |1110011 73 [ 7] +'Z' ( 90) |11111101 fd [ 8] +'[' ( 91) |11111111|11011 1ffb [13] +'\' ( 92) |11111111|11111110|000 7fff0 [19] +']' ( 93) |11111111|11100 1ffc [13] +'^' ( 94) |11111111|111100 3ffc [14] +'_' ( 95) |100010 22 [ 6] +'`' ( 96) |11111111|1111101 7ffd [15] +'a' ( 97) |00011 3 [ 5] +'b' ( 98) |100011 23 [ 6] +'c' ( 99) |00100 4 [ 5] +'d' (100) |100100 24 [ 6] +'e' (101) |00101 5 [ 5] +'f' (102) |100101 25 [ 6] +'g' (103) |100110 26 [ 6] +'h' (104) |100111 27 [ 6] +'i' (105) |00110 6 [ 5] +'j' (106) |1110100 74 [ 7] +'k' (107) |1110101 75 [ 7] +'l' (108) |101000 28 [ 6] +'m' (109) |101001 29 [ 6] +'n' (110) |101010 2a [ 6] +'o' (111) |00111 7 [ 5] +'p' (112) |101011 2b [ 6] +'q' (113) |1110110 76 [ 7] +'r' (114) |101100 2c [ 6] +'s' (115) |01000 8 [ 5] +'t' (116) |01001 9 [ 5] +'u' (117) |101101 2d [ 6] +'v' (118) |1110111 77 [ 7] +'w' (119) |1111000 78 [ 7] +'x' (120) |1111001 79 [ 7] +'y' (121) |1111010 7a [ 7] +'z' (122) |1111011 7b [ 7] +'{' (123) |11111111|1111110 7ffe [15] +'|' (124) |11111111|100 7fc [11] +'}' (125) |11111111|111101 3ffd [14] +'~' (126) |11111111|11101 1ffd [13] +(127) |11111111|11111111|11111111|1100 ffffffc [28] +(128) |11111111|11111110|0110 fffe6 [20] +(129) |11111111|11111111|010010 3fffd2 [22] +(130) |11111111|11111110|0111 fffe7 [20] +(131) |11111111|11111110|1000 fffe8 [20] +(132) |11111111|11111111|010011 3fffd3 [22] +(133) |11111111|11111111|010100 3fffd4 [22] +(134) |11111111|11111111|010101 3fffd5 [22] +(135) |11111111|11111111|1011001 7fffd9 [23] +(136) |11111111|11111111|010110 3fffd6 [22] +(137) |11111111|11111111|1011010 7fffda [23] +(138) |11111111|11111111|1011011 7fffdb [23] +(139) |11111111|11111111|1011100 7fffdc [23] +(140) |11111111|11111111|1011101 7fffdd [23] +(141) |11111111|11111111|1011110 7fffde [23] +(142) |11111111|11111111|11101011 ffffeb [24] +(143) |11111111|11111111|1011111 7fffdf [23] +(144) |11111111|11111111|11101100 ffffec [24] +(145) |11111111|11111111|11101101 ffffed [24] +(146) |11111111|11111111|010111 3fffd7 [22] +(147) |11111111|11111111|1100000 7fffe0 [23] +(148) |11111111|11111111|11101110 ffffee [24] +(149) |11111111|11111111|1100001 7fffe1 [23] +(150) |11111111|11111111|1100010 7fffe2 [23] +(151) |11111111|11111111|1100011 7fffe3 [23] +(152) |11111111|11111111|1100100 7fffe4 [23] +(153) |11111111|11111110|11100 1fffdc [21] +(154) |11111111|11111111|011000 3fffd8 [22] +(155) |11111111|11111111|1100101 7fffe5 [23] +(156) |11111111|11111111|011001 3fffd9 [22] +(157) |11111111|11111111|1100110 7fffe6 [23] +(158) |11111111|11111111|1100111 7fffe7 [23] +(159) |11111111|11111111|11101111 ffffef [24] +(160) |11111111|11111111|011010 3fffda [22] +(161) |11111111|11111110|11101 1fffdd [21] +(162) |11111111|11111110|1001 fffe9 [20] +(163) |11111111|11111111|011011 3fffdb [22] +(164) |11111111|11111111|011100 3fffdc [22] +(165) |11111111|11111111|1101000 7fffe8 [23] +(166) |11111111|11111111|1101001 7fffe9 [23] +(167) |11111111|11111110|11110 1fffde [21] +(168) |11111111|11111111|1101010 7fffea [23] +(169) |11111111|11111111|011101 3fffdd [22] +(170) |11111111|11111111|011110 3fffde [22] +(171) |11111111|11111111|11110000 fffff0 [24] +(172) |11111111|11111110|11111 1fffdf [21] +(173) |11111111|11111111|011111 3fffdf [22] +(174) |11111111|11111111|1101011 7fffeb [23] +(175) |11111111|11111111|1101100 7fffec [23] +(176) |11111111|11111111|00000 1fffe0 [21] +(177) |11111111|11111111|00001 1fffe1 [21] +(178) |11111111|11111111|100000 3fffe0 [22] +(179) |11111111|11111111|00010 1fffe2 [21] +(180) |11111111|11111111|1101101 7fffed [23] +(181) |11111111|11111111|100001 3fffe1 [22] +(182) |11111111|11111111|1101110 7fffee [23] +(183) |11111111|11111111|1101111 7fffef [23] +(184) |11111111|11111110|1010 fffea [20] +(185) |11111111|11111111|100010 3fffe2 [22] +(186) |11111111|11111111|100011 3fffe3 [22] +(187) |11111111|11111111|100100 3fffe4 [22] +(188) |11111111|11111111|1110000 7ffff0 [23] +(189) |11111111|11111111|100101 3fffe5 [22] +(190) |11111111|11111111|100110 3fffe6 [22] +(191) |11111111|11111111|1110001 7ffff1 [23] +(192) |11111111|11111111|11111000|00 3ffffe0 [26] +(193) |11111111|11111111|11111000|01 3ffffe1 [26] +(194) |11111111|11111110|1011 fffeb [20] +(195) |11111111|11111110|001 7fff1 [19] +(196) |11111111|11111111|100111 3fffe7 [22] +(197) |11111111|11111111|1110010 7ffff2 [23] +(198) |11111111|11111111|101000 3fffe8 [22] +(199) |11111111|11111111|11110110|0 1ffffec [25] +(200) |11111111|11111111|11111000|10 3ffffe2 [26] +(201) |11111111|11111111|11111000|11 3ffffe3 [26] +(202) |11111111|11111111|11111001|00 3ffffe4 [26] +(203) |11111111|11111111|11111011|110 7ffffde [27] +(204) |11111111|11111111|11111011|111 7ffffdf [27] +(205) |11111111|11111111|11111001|01 3ffffe5 [26] +(206) |11111111|11111111|11110001 fffff1 [24] +(207) |11111111|11111111|11110110|1 1ffffed [25] +(208) |11111111|11111110|010 7fff2 [19] +(209) |11111111|11111111|00011 1fffe3 [21] +(210) |11111111|11111111|11111001|10 3ffffe6 [26] +(211) |11111111|11111111|11111100|000 7ffffe0 [27] +(212) |11111111|11111111|11111100|001 7ffffe1 [27] +(213) |11111111|11111111|11111001|11 3ffffe7 [26] +(214) |11111111|11111111|11111100|010 7ffffe2 [27] +(215) |11111111|11111111|11110010 fffff2 [24] +(216) |11111111|11111111|00100 1fffe4 [21] +(217) |11111111|11111111|00101 1fffe5 [21] +(218) |11111111|11111111|11111010|00 3ffffe8 [26] +(219) |11111111|11111111|11111010|01 3ffffe9 [26] +(220) |11111111|11111111|11111111|1101 ffffffd [28] +(221) |11111111|11111111|11111100|011 7ffffe3 [27] +(222) |11111111|11111111|11111100|100 7ffffe4 [27] +(223) |11111111|11111111|11111100|101 7ffffe5 [27] +(224) |11111111|11111110|1100 fffec [20] +(225) |11111111|11111111|11110011 fffff3 [24] +(226) |11111111|11111110|1101 fffed [20] +(227) |11111111|11111111|00110 1fffe6 [21] +(228) |11111111|11111111|101001 3fffe9 [22] +(229) |11111111|11111111|00111 1fffe7 [21] +(230) |11111111|11111111|01000 1fffe8 [21] +(231) |11111111|11111111|1110011 7ffff3 [23] +(232) |11111111|11111111|101010 3fffea [22] +(233) |11111111|11111111|101011 3fffeb [22] +(234) |11111111|11111111|11110111|0 1ffffee [25] +(235) |11111111|11111111|11110111|1 1ffffef [25] +(236) |11111111|11111111|11110100 fffff4 [24] +(237) |11111111|11111111|11110101 fffff5 [24] +(238) |11111111|11111111|11111010|10 3ffffea [26] +(239) |11111111|11111111|1110100 7ffff4 [23] +(240) |11111111|11111111|11111010|11 3ffffeb [26] +(241) |11111111|11111111|11111100|110 7ffffe6 [27] +(242) |11111111|11111111|11111011|00 3ffffec [26] +(243) |11111111|11111111|11111011|01 3ffffed [26] +(244) |11111111|11111111|11111100|111 7ffffe7 [27] +(245) |11111111|11111111|11111101|000 7ffffe8 [27] +(246) |11111111|11111111|11111101|001 7ffffe9 [27] +(247) |11111111|11111111|11111101|010 7ffffea [27] +(248) |11111111|11111111|11111101|011 7ffffeb [27] +(249) |11111111|11111111|11111111|1110 ffffffe [28] +(250) |11111111|11111111|11111101|100 7ffffec [27] +(251) |11111111|11111111|11111101|101 7ffffed [27] +(252) |11111111|11111111|11111101|110 7ffffee [27] +(253) |11111111|11111111|11111101|111 7ffffef [27] +(254) |11111111|11111111|11111110|000 7fffff0 [27] +(255) |11111111|11111111|11111011|10 3ffffee [26] +EOS (256) |11111111|11111111|11111111|111111 3fffffff [30] diff --git a/deps/hpax/lib/hpax/table.ex b/deps/hpax/lib/hpax/table.ex new file mode 100644 index 0000000..8873b45 --- /dev/null +++ b/deps/hpax/lib/hpax/table.ex @@ -0,0 +1,348 @@ +defmodule HPAX.Table do + @moduledoc false + + @enforce_keys [:max_table_size, :huffman_encoding] + defstruct [ + :protocol_max_table_size, + :max_table_size, + :huffman_encoding, + entries: [], + size: 0, + length: 0, + pending_minimum_resize: nil + ] + + @type huffman_encoding() :: :always | :never + + @type t() :: %__MODULE__{ + protocol_max_table_size: non_neg_integer(), + max_table_size: non_neg_integer(), + huffman_encoding: huffman_encoding(), + entries: [{binary(), binary()}], + size: non_neg_integer(), + length: non_neg_integer(), + pending_minimum_resize: non_neg_integer() | nil + } + + @static_table [ + {":authority", nil}, + {":method", "GET"}, + {":method", "POST"}, + {":path", "/"}, + {":path", "/index.html"}, + {":scheme", "http"}, + {":scheme", "https"}, + {":status", "200"}, + {":status", "204"}, + {":status", "206"}, + {":status", "304"}, + {":status", "400"}, + {":status", "404"}, + {":status", "500"}, + {"accept-charset", nil}, + {"accept-encoding", "gzip, deflate"}, + {"accept-language", nil}, + {"accept-ranges", nil}, + {"accept", nil}, + {"access-control-allow-origin", nil}, + {"age", nil}, + {"allow", nil}, + {"authorization", nil}, + {"cache-control", nil}, + {"content-disposition", nil}, + {"content-encoding", nil}, + {"content-language", nil}, + {"content-length", nil}, + {"content-location", nil}, + {"content-range", nil}, + {"content-type", nil}, + {"cookie", nil}, + {"date", nil}, + {"etag", nil}, + {"expect", nil}, + {"expires", nil}, + {"from", nil}, + {"host", nil}, + {"if-match", nil}, + {"if-modified-since", nil}, + {"if-none-match", nil}, + {"if-range", nil}, + {"if-unmodified-since", nil}, + {"last-modified", nil}, + {"link", nil}, + {"location", nil}, + {"max-forwards", nil}, + {"proxy-authenticate", nil}, + {"proxy-authorization", nil}, + {"range", nil}, + {"referer", nil}, + {"refresh", nil}, + {"retry-after", nil}, + {"server", nil}, + {"set-cookie", nil}, + {"strict-transport-security", nil}, + {"transfer-encoding", nil}, + {"user-agent", nil}, + {"vary", nil}, + {"via", nil}, + {"www-authenticate", nil} + ] + + @static_table_size length(@static_table) + @dynamic_table_start @static_table_size + 1 + + @doc """ + Creates a new HPACK table with the given maximum size. + + The maximum size is not the maximum number of entries but rather the maximum size as defined in + http://httpwg.org/specs/rfc7541.html#maximum.table.size. + """ + @spec new(non_neg_integer(), huffman_encoding()) :: t() + def new(protocol_max_table_size, huffman_encoding) + when is_integer(protocol_max_table_size) and protocol_max_table_size >= 0 and + huffman_encoding in [:always, :never] do + %__MODULE__{ + protocol_max_table_size: protocol_max_table_size, + max_table_size: protocol_max_table_size, + huffman_encoding: huffman_encoding + } + end + + @doc """ + Adds the given header to the given table. + + If the new entry does not fit within the max table size then the oldest entries will be evicted. + + Header names should be lowercase when added to the HPACK table + as per the [HTTP/2 spec](https://http2.github.io/http2-spec/#rfc.section.8.1.2): + + > header field names MUST be converted to lowercase prior to their encoding in HTTP/2 + + """ + @spec add(t(), binary(), binary()) :: t() + def add(%__MODULE__{} = table, name, value) do + %{max_table_size: max_table_size, size: size} = table + entry_size = entry_size(name, value) + + cond do + # An attempt to add an entry larger than the maximum size causes the table to be emptied of + # all existing entries and results in an empty table. + entry_size > max_table_size -> + %{table | entries: [], size: 0, length: 0} + + size + entry_size > max_table_size -> + table + |> evict_to_size(max_table_size - entry_size) + |> add_header(name, value, entry_size) + + true -> + add_header(table, name, value, entry_size) + end + end + + defp add_header(%__MODULE__{} = table, name, value, entry_size) do + %{entries: entries, size: size, length: length} = table + %{table | entries: [{name, value} | entries], size: size + entry_size, length: length + 1} + end + + @doc """ + Looks up a header by index `index` in the given `table`. + + Returns `{:ok, {name, value}}` if a header is found at the given `index`, otherwise returns + `:error`. `value` can be a binary in case both the header name and value are present in the + table, or `nil` if only the name is present (this can only happen in the static table). + """ + @spec lookup_by_index(t(), pos_integer()) :: {:ok, {binary(), binary() | nil}} | :error + def lookup_by_index(table, index) + + # Static table + for {header, index} <- Enum.with_index(@static_table, 1) do + def lookup_by_index(%__MODULE__{}, unquote(index)), do: {:ok, unquote(header)} + end + + def lookup_by_index(%__MODULE__{length: 0}, _index) do + :error + end + + def lookup_by_index(%__MODULE__{entries: entries, length: length}, index) + when index >= @dynamic_table_start and index <= @dynamic_table_start + length - 1 do + {:ok, Enum.at(entries, index - @dynamic_table_start)} + end + + def lookup_by_index(%__MODULE__{}, _index) do + :error + end + + @doc """ + Looks up the index of a header by its name and value. + + It returns: + + * `{:full, index}` if the full header (name and value) are present in the table at `index` + + * `{:name, index}` if `name` is present in the table but with a different value than `value` + + * `:not_found` if the header name is not in the table at all + + Header names should be lowercase when looked up in the HPACK table + as per the [HTTP/2 spec](https://http2.github.io/http2-spec/#rfc.section.8.1.2): + + > header field names MUST be converted to lowercase prior to their encoding in HTTP/2 + + """ + @spec lookup_by_header(t(), binary(), binary() | nil) :: + {:full, pos_integer()} | {:name, pos_integer()} | :not_found + def lookup_by_header(table, name, value) + + def lookup_by_header(%__MODULE__{entries: entries}, name, value) do + case static_lookup_by_header(name, value) do + {:full, _index} = result -> + result + + {:name, index} -> + # Check if we get full match in the dynamic tabble + case dynamic_lookup_by_header(entries, name, value, @dynamic_table_start, nil) do + {:full, _index} = result -> result + _other -> {:name, index} + end + + :not_found -> + dynamic_lookup_by_header(entries, name, value, @dynamic_table_start, nil) + end + end + + for {{name, value}, index} when is_binary(value) <- Enum.with_index(@static_table, 1) do + defp static_lookup_by_header(unquote(name), unquote(value)) do + {:full, unquote(index)} + end + end + + static_table_names = + @static_table + |> Enum.map(&elem(&1, 0)) + |> Enum.with_index(1) + |> Enum.uniq_by(&elem(&1, 0)) + + for {name, index} <- static_table_names do + defp static_lookup_by_header(unquote(name), _value) do + {:name, unquote(index)} + end + end + + defp static_lookup_by_header(_name, _value) do + :not_found + end + + defp dynamic_lookup_by_header([{name, value} | _rest], name, value, index, _name_index) do + {:full, index} + end + + defp dynamic_lookup_by_header([{name, _} | rest], name, value, index, _name_index) do + dynamic_lookup_by_header(rest, name, value, index + 1, index) + end + + defp dynamic_lookup_by_header([_other | rest], name, value, index, name_index) do + dynamic_lookup_by_header(rest, name, value, index + 1, name_index) + end + + defp dynamic_lookup_by_header([], _name, _value, _index, name_index) do + if name_index, do: {:name, name_index}, else: :not_found + end + + @doc """ + Changes the table's protocol negotiated maximum size, possibly evicting entries as needed to satisfy. + + If the indicated size is less than the table's current max size, entries + will be evicted as needed to fit within the specified size, and the table's + maximum size will be decreased to the specified value. An will also be + set which will enqueue a 'dynamic table size update' command to be prefixed + to the next block encoded with this table, per RFC9113§4.3.1. + + If the indicated size is greater than or equal to the table's current max size, no entries are evicted + and the table's maximum size changes to the specified value. + + In all cases, the table's `:protocol_max_table_size` is updated accordingly + """ + @spec resize(t(), non_neg_integer()) :: t() + def resize(%__MODULE__{} = table, new_protocol_max_table_size) do + pending_minimum_resize = + case table.pending_minimum_resize do + nil -> new_protocol_max_table_size + current -> min(current, new_protocol_max_table_size) + end + + %{ + evict_to_size(table, new_protocol_max_table_size) + | protocol_max_table_size: new_protocol_max_table_size, + max_table_size: new_protocol_max_table_size, + pending_minimum_resize: pending_minimum_resize + } + end + + def dynamic_resize(%__MODULE__{} = table, new_max_table_size) do + %{ + evict_to_size(table, new_max_table_size) + | max_table_size: new_max_table_size + } + end + + @doc """ + Returns (and clears) any pending resize events on the table which will need to be signalled to + the decoder via dynamic table size update messages. Intended to be called at the start of any + block encode to prepend such dynamic table size update(s) as needed. The value of + `pending_minimum_resize` indicates the smallest maximum size of this table which has not yet + been signalled to the decoder, and is always included in the list returned if it is set. + Additionally, if the current max table size is larger than this value, it is also included int + the list, per https://www.rfc-editor.org/rfc/rfc7541#section-4.2 + """ + def pop_pending_resizes(%__MODULE__{pending_minimum_resize: nil} = table), do: {table, []} + + def pop_pending_resizes(%__MODULE__{} = table) do + pending_resizes = + if table.max_table_size > table.pending_minimum_resize, + do: [table.pending_minimum_resize, table.max_table_size], + else: [table.pending_minimum_resize] + + {%{table | pending_minimum_resize: nil}, pending_resizes} + end + + # Removes records as necessary to have the total size of entries within the table be less than + # or equal to the specified value. Does not change the table's max size. + defp evict_to_size(%__MODULE__{size: size} = table, new_size) when size <= new_size, do: table + + defp evict_to_size(%__MODULE__{entries: entries, size: size} = table, new_size) do + {new_entries_reversed, new_size} = + evict_towards_size(Enum.reverse(entries), size, new_size) + + %{ + table + | entries: Enum.reverse(new_entries_reversed), + size: new_size, + length: length(new_entries_reversed) + } + end + + defp evict_towards_size([{name, value} | rest], size, max_target_size) do + new_size = size - entry_size(name, value) + + if new_size <= max_target_size do + {rest, new_size} + else + evict_towards_size(rest, new_size, max_target_size) + end + end + + defp evict_towards_size([], 0, _max_target_size) do + {[], 0} + end + + defp entry_size(name, value) do + byte_size(name) + byte_size(value) + 32 + end + + # Made public to be used in tests. + @doc false + def __static_table__() do + @static_table + end +end diff --git a/deps/hpax/lib/hpax/types.ex b/deps/hpax/lib/hpax/types.ex new file mode 100644 index 0000000..8106555 --- /dev/null +++ b/deps/hpax/lib/hpax/types.ex @@ -0,0 +1,89 @@ +defmodule HPAX.Types do + @moduledoc false + + import Bitwise, only: [<<<: 2] + + alias HPAX.Huffman + + # This is used as a macro and not an inlined function because we want to be able to use it in + # guards. + defmacrop power_of_two(n) do + quote do: 1 <<< unquote(n) + end + + ## Encoding + + @spec encode_integer(non_neg_integer(), 1..8) :: bitstring() + def encode_integer(integer, prefix) + + def encode_integer(integer, prefix) when integer < power_of_two(prefix) - 1 do + <> + end + + def encode_integer(integer, prefix) do + initial = power_of_two(prefix) - 1 + remaining = integer - initial + <> + end + + defp encode_remaining_integer(remaining) when remaining >= 128 do + first = rem(remaining, 128) + 128 + <> + end + + defp encode_remaining_integer(remaining) do + <> + end + + @spec encode_binary(binary(), boolean()) :: iodata() + def encode_binary(binary, huffman?) do + binary = if huffman?, do: Huffman.encode(binary), else: binary + huffman_bit = if huffman?, do: 1, else: 0 + binary_size = encode_integer(byte_size(binary), 7) + [<>, binary] + end + + ## Decoding + + @spec decode_integer(bitstring, 1..8) :: {:ok, non_neg_integer(), binary()} | :error + def decode_integer(bitstring, prefix) when is_bitstring(bitstring) and prefix in 1..8 do + with <> <- bitstring do + if value < power_of_two(prefix) - 1 do + {:ok, value, rest} + else + decode_remaining_integer(rest, value, 0) + end + else + _ -> :error + end + end + + defp decode_remaining_integer(<<0::1, value::7, rest::binary>>, int, m) do + {:ok, int + (value <<< m), rest} + end + + defp decode_remaining_integer(<<1::1, value::7, rest::binary>>, int, m) do + decode_remaining_integer(rest, int + (value <<< m), m + 7) + end + + defp decode_remaining_integer(_, _, _) do + :error + end + + @spec decode_binary(binary) :: {:ok, binary(), binary()} | :error + def decode_binary(binary) when is_binary(binary) do + with <> <- binary, + {:ok, length, rest} <- decode_integer(rest, 7), + <> <- rest do + contents = + case huffman_bit do + 0 -> contents + 1 -> Huffman.decode(contents) + end + + {:ok, contents, rest} + else + _ -> :error + end + end +end diff --git a/deps/hpax/mix.exs b/deps/hpax/mix.exs new file mode 100644 index 0000000..a7ddaed --- /dev/null +++ b/deps/hpax/mix.exs @@ -0,0 +1,54 @@ +defmodule HPAX.MixProject do + use Mix.Project + + @version "1.0.3" + @repo_url "https://github.com/elixir-mint/hpax" + + def project do + [ + app: :hpax, + version: @version, + elixir: "~> 1.12", + start_permanent: Mix.env() == :prod, + deps: deps(), + + # Tests + test_coverage: [tool: ExCoveralls], + + # Hex + package: package(), + description: "Implementation of the HPACK protocol (RFC 7541) for Elixir", + + # Docs + name: "HPAX", + docs: [ + source_ref: "v#{@version}", + source_url: @repo_url + ] + ] + end + + def application do + [ + extra_applications: [] + ] + end + + defp deps do + [ + {:ex_doc, "~> 0.34", only: :dev}, + {:hpack, ">= 0.0.0", hex: :hpack_erl, only: :test}, + {:stream_data, "~> 1.0", only: [:dev, :test]}, + {:dialyxir, "~> 1.0", only: [:dev, :test], runtime: false}, + {:excoveralls, "~> 0.18", only: :test}, + {:castore, "~> 1.0", only: :test} + ] + end + + defp package do + [ + licenses: ["Apache-2.0"], + links: %{"GitHub" => @repo_url} + ] + end +end diff --git a/deps/idna/.hex b/deps/idna/.hex new file mode 100644 index 0000000..b84567b Binary files /dev/null and b/deps/idna/.hex differ diff --git a/deps/idna/CHANGELOG b/deps/idna/CHANGELOG new file mode 100644 index 0000000..d21de4c --- /dev/null +++ b/deps/idna/CHANGELOG @@ -0,0 +1,29 @@ +# CHANGELOG + +== 6.1.1 - 2020-12-06 + +- fix license information + +== 6.1.0 - 2020-12-05 + +- update to Unicode 13.0.0 +- bump unicode_util_compat to 0.7.0 +- remove support of Erlang < 19.3 +- remove support of rebar 2 + +== 6.0.1 - 2020-05-14 + +- bump to unicode_compat 0.5.0 + +== 6.0.0 - 2018-08-30 + +- IDNA 2008 support [RFC5981](https://tools.ietf.org/html/rfc5891) +- International Domain Name validation +- fix [Punycode](https://tools.ietf.org/html/rfc3492) algorithm + +Breaking changes: +- `idna:to_ascii/1` in 5.1.x did not encode or enforce rules if the input is already all ascii + +== 5.1.2 - 2018-06-09 + +- support build with rebar 2 diff --git a/deps/idna/LICENSE b/deps/idna/LICENSE new file mode 100644 index 0000000..a6bf242 --- /dev/null +++ b/deps/idna/LICENSE @@ -0,0 +1,22 @@ +Copyright the authors and contributors. All rights reserved. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/idna/NOTICE b/deps/idna/NOTICE new file mode 100644 index 0000000..f2e21e4 --- /dev/null +++ b/deps/idna/NOTICE @@ -0,0 +1,11 @@ +This file is part of erlang-idna released under the MIT license. +See the LICENSE for more information + +Copyright 2014-2020 Benoît Chesneau +Copyright 2009-2014 Tim Fletcher + +Others: + +* idna_ucs.erl: +Under the Apache 2 license +Copyright Ericsson AB 2005-2016. All Rights Reserved \ No newline at end of file diff --git a/deps/idna/README.md b/deps/idna/README.md new file mode 100644 index 0000000..47cb6e4 --- /dev/null +++ b/deps/idna/README.md @@ -0,0 +1,71 @@ +## erlang-idna + +A pure Erlang IDNA implementation that folllow the [RFC5891](https://tools.ietf.org/html/rfc5891). + +* support IDNA 2008 and IDNA 2003. +* label validation: + - [x] **check NFC**: Label must be in Normalization Form C + - [x] **check hyphen**: The Unicode string MUST NOT contain "--" (two consecutive hyphens) in + the third and fourth character positions and MUST NOT start or end + with a "-" (hyphen). + - [x] **Leading Combining Marks**: The Unicode string MUST NOT begin with a combining mark or combining character (see The Unicode Standard, Section 2.11 [Unicode](https://tools.ietf.org/html/rfc5891#ref-Unicode) for an exact definition). + - [x] **Contextual Rules**: The Unicode string MUST NOT contain any characters whose validity is + context-dependent, unless the validity is positively confirmed by a contextual rule. To check this, each code point identified as CONTEXTJ or CONTEXTO in the Tables document [RFC5892](https://tools.ietf.org/html/rfc5892#section-2.7) MUST have a non-null rule. If such a code point is missing a rule, the label is invalid. If the rule exists but the result of applying the rule is negative or inconclusive, the proposed label is invalid. + - [x] **check BIDI**: label contains any characters from scripts that are + written from right to left, it MUST meet the Bidi criteria [rfc5893](https://tools.ietf.org/html/rfc5893) + + + + +## Usage + + + +`idna:encode/{1,2}` and `idna:decode/{1, 2}` functions are used to encode or decode an Internationalized Domain +Names using IDNA protocol. + +Input can be mapped to unicode using [uts46](https://unicode.org/reports/tr46/#Introduction) +by setting the `uts46` flag to true (default is false). If transition from IDNA 2003 to +IDNA 2008 is needed, the flag `transitional` can be set to `true`, (`default` is false). If +conformance to STD3 is needed, the flag `std3_rules` can be set to true. (default is `false`). + +example: + +```erlang +1> idna:encode("日本語。JP", [uts46]). +"xn--wgv71a119e.xn--jp-" +2> idna:encode("日本語.JP", [uts46]). +"xn--wgv71a119e.xn--jp-" +... +``` + + +Legacy support of IDNA 2003 is also available with `to_ascii` and `to_unicode` functions: + + +```erlang +1> Domain = "www.詹姆斯.com". +[119,119,119,46,35449,22982,26031,46,99,111,109] +2> Encoded = idna:to_ascii("www.詹姆斯.com"). +"www.xn--8ws00zhy3a.com" +3> idna:to_unicode(Encoded). +[119,119,119,46,35449,22982,26031,46,99,111,109] +``` + + + +Update Unicode data + +wget -O test/IdnaTestV2.txt https://www.unicode.org/Public/idna/latest/IdnaTestV2.txt +wget -O uc_spec/ArabicShaping.txt https://www.unicode.org/Public/UNIDATA/ArabicShaping.txt +wget -O uc_spec/IdnaMappingTable.txt https://www.unicode.org/Public/idna/latest/IdnaMappingTable.txt +wget -O uc_spec/Scripts.txt https://www.unicode.org/Public/UNIDATA/Scripts.txt +wget -O uc_spec/UnicodeData.txt https://www.unicode.org/Public/UNIDATA/UnicodeData.txt + +git clone https://github.com/kjd/idna.git +./idna/tools/idna-data make-table --version 13.0.0 > uc_spec/idna-table.txt + +cd uc_spec +./gen_idnadata_mod.escript +./gen_idna_table_mod.escript +./gen_idna_mapping_mod.escript diff --git a/deps/idna/_build/prod/lib/.rebar3/rebar_compiler_erl/source.dag b/deps/idna/_build/prod/lib/.rebar3/rebar_compiler_erl/source.dag new file mode 100644 index 0000000..84c194d Binary files /dev/null and b/deps/idna/_build/prod/lib/.rebar3/rebar_compiler_erl/source.dag differ diff --git a/deps/idna/hex_metadata.config b/deps/idna/hex_metadata.config new file mode 100644 index 0000000..7eb1a3b --- /dev/null +++ b/deps/idna/hex_metadata.config @@ -0,0 +1,19 @@ +{<<"app">>,<<"idna">>}. +{<<"build_tools">>,[<<"rebar3">>]}. +{<<"description">>,<<"A pure Erlang IDNA implementation">>}. +{<<"files">>, + [<<"CHANGELOG">>,<<"LICENSE">>,<<"NOTICE">>,<<"README.md">>, + <<"rebar.config">>,<<"rebar.lock">>,<<"src/idna.app.src">>, + <<"src/idna.erl">>,<<"src/idna_bidi.erl">>,<<"src/idna_context.erl">>, + <<"src/idna_data.erl">>,<<"src/idna_logger.hrl">>, + <<"src/idna_mapping.erl">>,<<"src/idna_table.erl">>,<<"src/idna_ucs.erl">>, + <<"src/punycode.erl">>]}. +{<<"licenses">>,[<<"MIT">>]}. +{<<"links">>,[{<<"Github">>,<<"https://github.com/benoitc/erlang-idna">>}]}. +{<<"name">>,<<"idna">>}. +{<<"requirements">>, + [{<<"unicode_util_compat">>, + [{<<"app">>,<<"unicode_util_compat">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~>0.7.0">>}]}]}. +{<<"version">>,<<"6.1.1">>}. diff --git a/deps/idna/rebar.config b/deps/idna/rebar.config new file mode 100644 index 0000000..3af59e3 --- /dev/null +++ b/deps/idna/rebar.config @@ -0,0 +1,3 @@ +{erl_opts, []}. + +{deps, [{unicode_util_compat, "~>0.7.0"}]}. diff --git a/deps/idna/rebar.lock b/deps/idna/rebar.lock new file mode 100644 index 0000000..8813028 --- /dev/null +++ b/deps/idna/rebar.lock @@ -0,0 +1,8 @@ +{"1.2.0", +[{<<"unicode_util_compat">>,{pkg,<<"unicode_util_compat">>,<<"0.7.0">>},0}]}. +[ +{pkg_hash,[ + {<<"unicode_util_compat">>, <<"BC84380C9AB48177092F43AC89E4DFA2C6D62B40B8BD132B1059ECC7232F9A78">>}]}, +{pkg_hash_ext,[ + {<<"unicode_util_compat">>, <<"25EEE6D67DF61960CF6A794239566599B09E17E668D3700247BC498638152521">>}]} +]. diff --git a/deps/idna/src/idna.app.src b/deps/idna/src/idna.app.src new file mode 100644 index 0000000..b5eb0cb --- /dev/null +++ b/deps/idna/src/idna.app.src @@ -0,0 +1,8 @@ +{application,idna, + [{description,"A pure Erlang IDNA implementation"}, + {vsn,"6.1.1"}, + {modules,[]}, + {registered,[]}, + {applications,[kernel,stdlib,unicode_util_compat]}, + {licenses,["MIT"]}, + {links,[{"Github","https://github.com/benoitc/erlang-idna"}]}]}. diff --git a/deps/idna/src/idna.erl b/deps/idna/src/idna.erl new file mode 100644 index 0000000..61c115a --- /dev/null +++ b/deps/idna/src/idna.erl @@ -0,0 +1,421 @@ +%% -*- coding: utf-8 -*- +%%% +%%% This file is part of erlang-idna released under the MIT license. +%%% See the LICENSE for more information. +%%% +-module(idna). + +%% API +-export([encode/1, encode/2, + decode/1, decode/2]). + +%% compatibility API +-export([to_ascii/1, + to_unicode/1, + utf8_to_ascii/1, + from_ascii/1]). + + +-export([alabel/1, ulabel/1]). + +-export([check_hyphen/1, + check_nfc/1, + check_context/1, + check_initial_combiner/1, + check_label_length/1]). + +-export([check_label/1, check_label/4]). + +-define(ACE_PREFIX, "xn--"). + +-ifdef('OTP_RELEASE'). +-define(lower(C), string:lowercase(C)). +-else. +-define(lower(C), string:to_lower(C)). +-endif. + +-include("idna_logger.hrl"). + + +-type idna_flags() :: [{uts46, boolean()} | + {std3_rules, boolean()} | + {transitional, boolean()}]. + + + +%% @doc encode Internationalized Domain Names using IDNA protocol +-spec encode(string()) -> string(). +encode(Domain) -> + encode(Domain, []). + + +%% @doc encode Internationalized Domain Names using IDNA protocol. +%% Input can be mapped to unicode using [uts46](https://unicode.org/reports/tr46/#Introduction) +%% by setting the `uts46' flag to `true' (default is `false'). If transition from IDNA 2003 to +%% IDNA 2008 is needed, the flag `transitional' can be set to `true', (default is `false'). If +%% conformance to STD3 is needed, the flag `std3_rules' can be set to `true'. (default is `false'). +-spec encode(string(), idna_flags()) -> string(). +encode(Domain0, Options) -> + ok = validate_options(Options), + Domain = case proplists:get_value(uts46, Options, false) of + true -> + STD3Rules = proplists:get_value(std3_rules, Options, false), + Transitional = proplists:get_value(transitional, Options, false), + uts46_remap(Domain0, STD3Rules, Transitional); + false -> + Domain0 + end, + Labels = case proplists:get_value(strict, Options, false) of + false -> + re:split(Domain, "[.。.。]", [{return, list}, unicode]); + true -> + string:tokens(Domain, ".") + end, + case Labels of + [] -> exit(empty_domain); + _ -> + encode_1(Labels, []) + end. + +%% @doc decode an International Domain Name encoded with the IDNA protocol +-spec decode(string()) -> string(). +decode(Domain) -> + decode(Domain, []). + +%% @doc decode an International Domain Name encoded with the IDNA protocol +-spec decode(string(), idna_flags()) -> string(). +decode(Domain0, Options) -> + ok = validate_options(Options), + Domain = case proplists:get_value(uts46, Options, false) of + true -> + STD3Rules = proplists:get_value(std3_rules, Options, false), + Transitional = proplists:get_value(transitional, Options, false), + uts46_remap(Domain0, STD3Rules, Transitional); + false -> + Domain0 + end, + + Labels = case proplists:get_value(strict, Options, false) of + false -> + re:split(lowercase(Domain), "[.。.。]", [{return, list}, unicode]); + true -> + string:tokens(lowercase(Domain), ".") + end, + case Labels of + [] -> exit(empty_domain); + _ -> + decode_1(Labels, []) + end. + + +%% Compatibility API +%% + +%% @doc encode an International Domain Name to IDNA protocol (compatibility API) +-spec to_ascii(string()) -> string(). +to_ascii(Domain) -> encode(Domain). + +%% @doc decode an an encoded International Domain Name using the IDNA protocol (compatibility API) +-spec to_unicode(string()) -> string(). +to_unicode(Domain) -> decode(Domain). + + +utf8_to_ascii(Domain) -> + to_ascii(idna_ucs:from_utf8(Domain)). + +%% @doc like `to_ascii/1' +-spec from_ascii(nonempty_string()) -> nonempty_string(). +from_ascii(Domain) -> + decode(Domain). + + +%% Helper functions +%% + +validate_options([]) -> ok; +validate_options([uts46|Rs]) -> validate_options(Rs); +validate_options([{uts46, B}|Rs]) when is_boolean(B) -> validate_options(Rs); +validate_options([strict|Rs]) -> validate_options(Rs); +validate_options([{strict, B}|Rs]) when is_boolean(B) -> validate_options(Rs); +validate_options([std3_rules|Rs]) -> validate_options(Rs); +validate_options([{std3_rules, B}|Rs]) when is_boolean(B) -> validate_options(Rs); +validate_options([transitional|Rs]) -> validate_options(Rs); +validate_options([{transitional, B}|Rs]) when is_boolean(B) -> validate_options(Rs); +validate_options([_]) -> erlang:error(badarg). + +encode_1([], Acc) -> + lists:reverse(Acc); +encode_1([Label|Labels], []) -> + encode_1(Labels, lists:reverse(alabel(Label))); +encode_1([Label|Labels], Acc) -> + encode_1(Labels, lists:reverse(alabel(Label), [$.|Acc])). + +check_nfc(Label) -> + case characters_to_nfc_list(Label) of + Label -> ok; + _ -> + erlang:exit({bad_label, {nfc, "Label must be in Normalization Form C"}}) + end. + +check_hyphen(Label) -> check_hyphen(Label, true). + +check_hyphen(Label, true) when length(Label) >= 3 -> + case lists:nthtail(2, Label) of + [$-, $-|_] -> + ErrorMsg = error_msg("Label ~p has disallowed hyphens in 3rd and 4th position", [Label]), + erlang:exit({bad_label, {hyphen, ErrorMsg}}); + _ -> + case (lists:nth(1, Label) == $-) orelse (lists:last(Label) == $-) of + true -> + ErrorMsg = error_msg("Label ~p must not start or end with a hyphen", [Label]), + erlang:exit({bad_label, {hyphen, ErrorMsg}}); + false -> + ok + end + end; +check_hyphen(Label, true) -> + case (lists:nth(1, Label) == $-) orelse (lists:last(Label) == $-) of + true -> + ErrorMsg = error_msg("Label ~p must not start or end with a hyphen", [Label]), + erlang:exit({bad_label, {hyphen, ErrorMsg}}); + false -> + ok + end; +check_hyphen(_Label, false) -> + ok. + +check_initial_combiner([CP|_]) -> + case idna_data:lookup(CP) of + {[$M|_], _} -> + erlang:exit({bad_label, {initial_combiner, "Label begins with an illegal combining character"}}); + _ -> + ok + end. + +check_context(Label) -> + check_context(Label, Label, true, 0). + +check_context(Label, CheckJoiners) -> + check_context(Label, Label, CheckJoiners, 0). + +check_context([CP | Rest], Label, CheckJoiners, Pos) -> + case idna_table:lookup(CP) of + 'PVALID' -> + check_context(Rest, Label, CheckJoiners, Pos + 1); + 'CONTEXTJ' -> + ok = valid_contextj(CP, Label, Pos, CheckJoiners), + check_context(Rest, Label, CheckJoiners, Pos + 1); + 'CONTEXTO' -> + ok = valid_contexto(CP, Label, Pos, CheckJoiners), + check_context(Rest, Label, CheckJoiners, Pos + 1); + _Status -> + ErrorMsg = error_msg("Codepoint ~p not allowed (~p) at position ~p in ~p", [CP, _Status, Pos, Label]), + erlang:exit({bad_label, {context, ErrorMsg}}) + end; +check_context([], _, _, _) -> + ok. + + +valid_contextj(CP, Label, Pos, true) -> + case idna_context:valid_contextj(CP, Label, Pos) of + true -> + ok; + false -> + ErrorMsg = error_msg("Joiner ~p not allowed at position ~p in ~p", [CP, Pos, Label]), + erlang:exit({bad_label, {contextj, ErrorMsg}}) + end; +valid_contextj(_CP, _Label, _Pos, false) -> + ok. + +valid_contexto(CP, Label, Pos, true) -> + case idna_context:valid_contexto(CP, Label, Pos) of + true -> + ok; + false -> + ErrorMsg = error_msg("Joiner ~p not allowed at position ~p in ~p", [CP, Pos, Label]), + erlang:exit({bad_label, {contexto, ErrorMsg}}) + end; +valid_contexto(_CP, _Label, _Pos, false) -> + ok. + + + +-spec check_label(string()) -> ok. +check_label(Label) -> + check_label(Label, true, true, true). + +%% @doc validate a label of a domain +-spec check_label(Label, CheckHyphens, CheckJoiners, CheckBidi) -> Result when + Label :: string(), + CheckHyphens :: boolean(), + CheckJoiners :: boolean(), + CheckBidi :: boolean(), + Result :: ok. +check_label(Label, CheckHyphens, CheckJoiners, CheckBidi) -> + ok = check_nfc(Label), + ok = check_hyphen(Label, CheckHyphens), + ok = check_initial_combiner(Label), + ok = check_context(Label, CheckJoiners), + ok = check_bidi(Label, CheckBidi), + ok. + + +check_bidi(Label, true) -> + idna_bidi:check_bidi(Label); +check_bidi(_, false) -> + ok. + +check_label_length(Label) when length(Label) > 63 -> + ErrorMsg = error_msg("The label ~p is too long", [Label]), + erlang:exit({bad_label, {too_long, ErrorMsg}}); +check_label_length(_) -> + ok. + +alabel(Label0) -> + Label = case lists:all(fun(C) -> idna_ucs:is_ascii(C) end, Label0) of + true -> + _ = try ulabel(Label0) + catch + _:Error -> + ErrorMsg = error_msg("The label ~p is not a valid A-label: ulabel error=~p", [Label0, Error]), + erlang:exit({bad_label, {alabel, ErrorMsg}}) + end, + ok = check_label_length(Label0), + + Label0; + false -> + ok = check_label(Label0), + ?ACE_PREFIX ++ punycode:encode(Label0) + end, + ok = check_label_length(Label), + Label. + +decode_1([], Acc) -> + lists:reverse(Acc); +decode_1([Label|Labels], []) -> + decode_1(Labels, lists:reverse(ulabel(Label))); +decode_1([Label|Labels], Acc) -> + decode_1(Labels, lists:reverse(ulabel(Label), [$.|Acc])). + +ulabel([]) -> []; +ulabel(Label0) -> + Label = case lists:all(fun(C) -> idna_ucs:is_ascii(C) end, Label0) of + true -> + case Label0 of + [$x,$n,$-,$-|Label1] -> + punycode:decode(lowercase(Label1)); + _ -> + lowercase(Label0) + end; + false -> + lowercase(Label0) + end, + ok = check_label(Label), + Label. + +%% Lowercase all chars in Str +-spec lowercase(String::unicode:chardata()) -> unicode:chardata(). +lowercase(CD) when is_list(CD) -> + try lowercase_list(CD, false) + catch unchanged -> CD + end; +lowercase(<>=Orig) -> + try lowercase_bin(CP1, Rest, false) of + List -> unicode:characters_to_binary(List) + catch unchanged -> Orig + end; +lowercase(<<>>) -> + <<>>. + + +lowercase_list([CP1|[CP2|_]=Cont], _Changed) when $A =< CP1, CP1 =< $Z, CP2 < 256 -> + [CP1+32|lowercase_list(Cont, true)]; +lowercase_list([CP1|[CP2|_]=Cont], Changed) when CP1 < 128, CP2 < 256 -> + [CP1|lowercase_list(Cont, Changed)]; +lowercase_list([], true) -> + []; +lowercase_list([], false) -> + throw(unchanged); +lowercase_list(CPs0, Changed) -> + case unicode_util_compat:lowercase(CPs0) of + [Char|CPs] when Char =:= hd(CPs0) -> [Char|lowercase_list(CPs, Changed)]; + [Char|CPs] -> append(Char,lowercase_list(CPs, true)); + [] -> lowercase_list([], Changed) + end. + +lowercase_bin(CP1, <>, _Changed) + when $A =< CP1, CP1 =< $Z, CP2 < 256 -> + [CP1+32|lowercase_bin(CP2, Bin, true)]; +lowercase_bin(CP1, <>, Changed) + when CP1 < 128, CP2 < 256 -> + [CP1|lowercase_bin(CP2, Bin, Changed)]; +lowercase_bin(CP1, Bin, Changed) -> + case unicode_util_compat:lowercase([CP1|Bin]) of + [CP1|CPs] -> + case unicode_util_compat:cp(CPs) of + [Next|Rest] -> + [CP1|lowercase_bin(Next, Rest, Changed)]; + [] when Changed -> + [CP1]; + [] -> + throw(unchanged) + end; + [Char|CPs] -> + case unicode_util_compat:cp(CPs) of + [Next|Rest] -> + [Char|lowercase_bin(Next, Rest, true)]; + [] -> + [Char] + end + end. + + +append(Char, <<>>) when is_integer(Char) -> [Char]; +append(Char, <<>>) when is_list(Char) -> Char; +append(Char, Bin) when is_binary(Bin) -> [Char,Bin]; +append(Char, Str) when is_integer(Char) -> [Char|Str]; +append(GC, Str) when is_list(GC) -> GC ++ Str. + + +characters_to_nfc_list(CD) -> + case unicode_util_compat:nfc(CD) of + [CPs|Str] when is_list(CPs) -> CPs ++ characters_to_nfc_list(Str); + [CP|Str] -> [CP|characters_to_nfc_list(Str)]; + [] -> [] + end. + + +uts46_remap(Str, Std3Rules, Transitional) -> + characters_to_nfc_list(uts46_remap_1(Str, Std3Rules, Transitional)). + +uts46_remap_1([Cp|Rs], Std3Rules, Transitional) -> + Row = try idna_mapping:uts46_map(Cp) + catch + error:badarg -> + ?LOG_ERROR("codepoint ~p not found in mapping list~n", [Cp]), + erlang:exit({invalid_codepoint, Cp}) + end, + {Status, Replacement} = case Row of + {_, _} -> Row; + S -> {S, undefined} + end, + if + (Status =:= 'V'); + ((Status =:= 'D') andalso (Transitional =:= false)); + ((Status =:= '3') andalso (Std3Rules =:= true) andalso (Replacement =:= undefined)) -> + [Cp] ++ uts46_remap_1(Rs, Std3Rules, Transitional); + (Replacement =/= undefined) andalso ( + (Status =:= 'M') orelse + (Status =:= '3' andalso Std3Rules =:= false) orelse + (Status =:= 'D' andalso Transitional =:= true)) -> + Replacement ++ uts46_remap_1(Rs, Std3Rules, Transitional); + (Status =:= 'I') -> + uts46_remap_1(Rs, Std3Rules, Transitional); + true -> + erlang:exit({invalid_codepoint, Cp}) + end; +uts46_remap_1([], _, _) -> + []. + +error_msg(Msg, Fmt) -> + lists:flatten(io_lib:format(Msg, Fmt)). diff --git a/deps/idna/src/idna_bidi.erl b/deps/idna/src/idna_bidi.erl new file mode 100644 index 0000000..07cfa33 --- /dev/null +++ b/deps/idna/src/idna_bidi.erl @@ -0,0 +1,96 @@ +%% -*- coding: utf-8 -*- +%%% +%%% This file is part of erlang-idna released under the MIT license. +%%% See the LICENSE for more information. +%%% + +-module(idna_bidi). +-author("benoitc"). + +%% API +-export([check_bidi/1, check_bidi/2]). + +check_bidi(Label) -> check_bidi(Label, false). + +check_bidi(Label, CheckLtr) -> + %% Bidi rules should only be applied if string contains RTL characters + case {check_rtl(Label, Label), CheckLtr} of + {false, false} -> ok; + _ -> + [C | _Rest] = Label, + % bidi rule 1 + RTL = rtl(C, Label), + check_bidi1(Label, RTL, false, undefined) + end. + +check_rtl([C | Rest], Label) -> + case idna_data:bidirectional(C) of + false -> + erlang:exit(bidi_error("unknown directionality in label=~p c=~w~n", [Label, C])); + Dir -> + case lists:member(Dir, ["R", "AL", "AN"]) of + true -> true; + false -> check_rtl(Rest, Label) + end + end; +check_rtl([], _Label) -> + false. + +rtl(C, Label) -> + case idna_data:bidirectional(C) of + "R" -> true; + "AL" -> true; + "L" -> false; + _ -> + erlang:exit(bidi_error("first codepoint in label ~p must be directionality L, R or AL ", [Label])) + end. + + +check_bidi1([C | Rest], true, ValidEnding, NumberType) -> + Dir = idna_data:bidirectional(C), + %% bidi rule 2 + ValidEnding2 = case lists:member(Dir, ["R", "AL", "AN", "EN", "ES", "CS", "ET", "ON", "BN", "NSM"]) of + true -> + % bidi rule 3 + case lists:member(Dir, ["R", "AL", "AN", "EN"]) of + true -> true; + false when Dir =/= "NSM" -> false; + false -> ValidEnding + end; + false -> + erlang:exit({bad_label, {bidi, "Invalid direction for codepoint in a right-to-left label"}}) + end, + % bidi rule 4 + NumberType2 = case lists:member(Dir, ["AN", "EN"]) of + true when NumberType =:= undefined -> + Dir; + true when NumberType /= Dir -> + erlang:exit({bad_label, {bidi, "Can not mix numeral types in a right-to-left label"}}); + _ -> + NumberType + end, + check_bidi1(Rest, true, ValidEnding2, NumberType2); +check_bidi1([C | Rest], false, ValidEnding, NumberType) -> + Dir = idna_data:bidirectional(C), + % bidi rule 5 + ValidEnding2 = case lists:member(Dir, ["L", "EN", "ES", "CS", "ET", "ON", "BN", "NSM"]) of + true -> + % bidi rule 6 + case Dir of + "L" -> true; + "EN" -> true; + _ when Dir /= "NSM" -> false; + _ -> ValidEnding + end; + false -> + erlang:exit({bad_label, {bidi, "Invalid direction for codepoint in a left-to-right label"}}) + end, + check_bidi1(Rest, false, ValidEnding2, NumberType); +check_bidi1([], _, false, _) -> + erlang:exit({bad_label, {bidi, "Label ends with illegal codepoint directionality"}}); +check_bidi1([], _, true, _) -> + ok. + +bidi_error(Msg, Fmt) -> + ErrorMsg = lists:flatten(io_lib:format(Msg, Fmt)), + {bad_label, {bidi, ErrorMsg}}. diff --git a/deps/idna/src/idna_context.erl b/deps/idna/src/idna_context.erl new file mode 100644 index 0000000..faab65e --- /dev/null +++ b/deps/idna/src/idna_context.erl @@ -0,0 +1,154 @@ +%% -*- coding: utf-8 -*- +%%% +%%% This file is part of erlang-idna released under the MIT license. +%%% See the LICENSE for more information. +%%% +-module(idna_context). +-author("benoitc"). + +%% API +-export([ + valid_contextj/2, valid_contextj/3, + valid_contexto/2, valid_contexto/3, + contexto_with_rule/1 +]). + +-define(virama_combining_class, 9). + + +valid_contextj([], _Pos) -> true; + +valid_contextj(Label, Pos) -> + CP = lists:nth(Pos + 1, Label), + valid_contextj(CP, Label, Pos). + +valid_contextj(16#200c, Label, Pos) -> + if + Pos > 0 -> + case unicode_util_compat:lookup(lists:nth(Pos, Label)) of + #{ ccc := ?virama_combining_class } -> true; + _ -> + valid_contextj_1(Label, Pos) + end; + true -> + valid_contextj_1(Label, Pos) + end; + +valid_contextj(16#200d, Label, Pos) when Pos > 0 -> + case unicode_util_compat:lookup(lists:nth(Pos, Label)) of + #{ ccc := ?virama_combining_class } -> true; + _ -> false + end; +valid_contextj(_, _, _) -> + false. + +valid_contextj_1(Label, Pos) -> + case range(lists:reverse(lists:nthtail(Pos, Label))) of + true -> + range(lists:nthtail(Pos+2, Label)); + false -> + false + end. + +range([CP|Rest]) -> + case idna_data:joining_types(CP) of + "T" -> range(Rest); + "L" -> true; + "D" -> true; + _ -> + range(Rest) + end; +range([]) -> + false. + +valid_contexto([], _Pos) -> + io:format("ici", []), + true; +valid_contexto(Label, Pos) -> + CP = lists:nth(Pos + 1, Label), + valid_contexto(CP, Label, Pos). + +valid_contexto(CP, Label, Pos) -> + Len = length(Label), + case CP of + 16#00B7 -> + + % MIDDLE DOT + if + (Pos > 0) andalso (Pos < (Len -1)) -> + case lists:sublist(Label, Pos, 3) of + [16#006C, _, 16#006C] -> true; + _ -> false + end; + true -> + false + end; + 16#0375 -> + % GREEK LOWER NUMERAL SIGN (KERAIA) + if + (Pos < (Len -1)) andalso (Len > 1) -> + case idna_data:scripts(lists:nth(Pos + 2, Label)) of + "greek" -> true; + _Else -> false + end; + true -> + false + end; + 16#30FB -> + % KATAKANA MIDDLE DOT + script_ok(Label); + CP when CP == 16#05F3; CP == 16#05F4 -> + % HEBREW PUNCTUATION GERESH or HEBREW PUNCTUATION GERSHAYIM + if + Pos > 0 -> + case idna_data:scripts(lists:nth(Pos, Label)) of + "hebrew" -> true; + _ -> false + end; + true -> + false + end; + CP when CP >= 16#660, CP =< 16#669 -> + % ARABIC-INDIC DIGITS + contexto_in_range(Label, 16#6F0, 16#6F9); + CP when 16#6F0 =< CP, CP =< 16#6F9 -> + % EXTENDED ARABIC-INDIC DIGIT + contexto_in_range(Label, 16#660, 16#669); + _ -> + + false + end. + + +contexto_in_range([CP | _], Start, End) when CP >= Start, CP =< End -> false; +contexto_in_range([_CP|Rest], Start, End) -> contexto_in_range(Rest, Start, End); +contexto_in_range([], _, _) -> true. + +script_ok([16#30fb| Rest]) -> + script_ok(Rest); +script_ok([C | Rest]) -> + case idna_data:scripts(C) of + "hiragana" -> true; + "katakana" -> true; + "han" -> true; + _ -> + script_ok(Rest) + end; +script_ok([]) -> + false. + +contexto_with_rule(16#00B7) -> true; +% MIDDLE DOT +contexto_with_rule(16#0375) -> true; +% GREEK LOWER NUMERAL SIGN (KERAIA) +contexto_with_rule(16#05F3) -> true; +% HEBREW PUNCTUATION GERESH +contexto_with_rule(16#05F4) -> true; +% HEBREW PUNCTUATION GERSHAYIM +contexto_with_rule(16#30FB) -> true; +% KATAKANA MIDDLE DOT +contexto_with_rule(CP) when 16#0660 =< CP, CP =< 16#0669 -> true; +% ARABIC-INDIC DIGITS +contexto_with_rule(CP) when 16#06F0 =< CP, CP =< 16#06F9 -> true; +% KATAKANA MIDDLE DOT +contexto_with_rule(_) -> false. diff --git a/deps/idna/src/idna_data.erl b/deps/idna/src/idna_data.erl new file mode 100644 index 0000000..4349796 --- /dev/null +++ b/deps/idna/src/idna_data.erl @@ -0,0 +1,34728 @@ +%% +%% this file is generated do not modify +%% see ../uc_spec/gen_idnadata_mod.escript + +-module(idna_data). +-compile(compressed). +-export([lookup/1, joining_types/1, scripts/1]). +-export([bidirectional/1]). +bidirectional(CP) -> + case lookup(CP) of + {_, C} -> C; + false -> bidirectional_1(CP) + end. + +bidirectional_1(CP) when 1424 =< CP, CP =< 1535 -> "R"; +bidirectional_1(CP) when 1536 =< CP, CP =< 1983 -> "AL"; +bidirectional_1(CP) when 1984 =< CP, CP =< 2143 -> "R"; +bidirectional_1(CP) when 2144 =< CP, CP =< 2159 -> "AL"; +bidirectional_1(CP) when 2160 =< CP, CP =< 2207 -> "R"; +bidirectional_1(CP) when 2208 =< CP, CP =< 2303 -> "AL"; +bidirectional_1(CP) when 8352 =< CP, CP =< 8399 -> "ET"; +bidirectional_1(CP) when 64285 =< CP, CP =< 64335 -> "R"; +bidirectional_1(CP) when 64336 =< CP, CP =< 64975 -> "AL"; +bidirectional_1(CP) when 65008 =< CP, CP =< 65023 -> "AL"; +bidirectional_1(CP) when 65136 =< CP, CP =< 65279 -> "AL"; +bidirectional_1(CP) when 67584 =< CP, CP =< 68863 -> "R"; +bidirectional_1(CP) when 68864 =< CP, CP =< 68927 -> "AL"; +bidirectional_1(CP) when 68928 =< CP, CP =< 69423 -> "R"; +bidirectional_1(CP) when 69424 =< CP, CP =< 69487 -> "AL"; +bidirectional_1(CP) when 69488 =< CP, CP =< 69631 -> "R"; +bidirectional_1(CP) when 124928 =< CP, CP =< 126063 -> "R"; +bidirectional_1(CP) when 126064 =< CP, CP =< 126143 -> "AL"; +bidirectional_1(CP) when 126144 =< CP, CP =< 126463 -> "R"; +bidirectional_1(CP) when 126464 =< CP, CP =< 126719 -> "AL"; +bidirectional_1(CP) when 126720 =< CP, CP =< 126975 -> "R"; +bidirectional_1(_) -> "L". + +lookup(0) -> {"Cc","BN"}; +lookup(1) -> {"Cc","BN"}; +lookup(2) -> {"Cc","BN"}; +lookup(3) -> {"Cc","BN"}; +lookup(4) -> {"Cc","BN"}; +lookup(5) -> {"Cc","BN"}; +lookup(6) -> {"Cc","BN"}; +lookup(7) -> {"Cc","BN"}; +lookup(8) -> {"Cc","BN"}; +lookup(9) -> {"Cc","S"}; +lookup(10) -> {"Cc","B"}; +lookup(11) -> {"Cc","S"}; +lookup(12) -> {"Cc","WS"}; +lookup(13) -> {"Cc","B"}; +lookup(14) -> {"Cc","BN"}; +lookup(15) -> {"Cc","BN"}; +lookup(16) -> {"Cc","BN"}; +lookup(17) -> {"Cc","BN"}; +lookup(18) -> {"Cc","BN"}; +lookup(19) -> {"Cc","BN"}; +lookup(20) -> {"Cc","BN"}; +lookup(21) -> {"Cc","BN"}; +lookup(22) -> {"Cc","BN"}; +lookup(23) -> {"Cc","BN"}; +lookup(24) -> {"Cc","BN"}; +lookup(25) -> {"Cc","BN"}; +lookup(26) -> {"Cc","BN"}; +lookup(27) -> {"Cc","BN"}; +lookup(28) -> {"Cc","B"}; +lookup(29) -> {"Cc","B"}; +lookup(30) -> {"Cc","B"}; +lookup(31) -> {"Cc","S"}; +lookup(32) -> {"Zs","WS"}; +lookup(33) -> {"Po","ON"}; +lookup(34) -> {"Po","ON"}; +lookup(35) -> {"Po","ET"}; +lookup(36) -> {"Sc","ET"}; +lookup(37) -> {"Po","ET"}; +lookup(38) -> {"Po","ON"}; +lookup(39) -> {"Po","ON"}; +lookup(40) -> {"Ps","ON"}; +lookup(41) -> {"Pe","ON"}; +lookup(42) -> {"Po","ON"}; +lookup(43) -> {"Sm","ES"}; +lookup(44) -> {"Po","CS"}; +lookup(45) -> {"Pd","ES"}; +lookup(46) -> {"Po","CS"}; +lookup(47) -> {"Po","CS"}; +lookup(48) -> {"Nd","EN"}; +lookup(49) -> {"Nd","EN"}; +lookup(50) -> {"Nd","EN"}; +lookup(51) -> {"Nd","EN"}; +lookup(52) -> {"Nd","EN"}; +lookup(53) -> {"Nd","EN"}; +lookup(54) -> {"Nd","EN"}; +lookup(55) -> {"Nd","EN"}; +lookup(56) -> {"Nd","EN"}; +lookup(57) -> {"Nd","EN"}; +lookup(58) -> {"Po","CS"}; +lookup(59) -> {"Po","ON"}; +lookup(60) -> {"Sm","ON"}; +lookup(61) -> {"Sm","ON"}; +lookup(62) -> {"Sm","ON"}; +lookup(63) -> {"Po","ON"}; +lookup(64) -> {"Po","ON"}; +lookup(65) -> {"Lu","L"}; +lookup(66) -> {"Lu","L"}; +lookup(67) -> {"Lu","L"}; +lookup(68) -> {"Lu","L"}; +lookup(69) -> {"Lu","L"}; +lookup(70) -> {"Lu","L"}; +lookup(71) -> {"Lu","L"}; +lookup(72) -> {"Lu","L"}; +lookup(73) -> {"Lu","L"}; +lookup(74) -> {"Lu","L"}; +lookup(75) -> {"Lu","L"}; +lookup(76) -> {"Lu","L"}; +lookup(77) -> {"Lu","L"}; +lookup(78) -> {"Lu","L"}; +lookup(79) -> {"Lu","L"}; +lookup(80) -> {"Lu","L"}; +lookup(81) -> {"Lu","L"}; +lookup(82) -> {"Lu","L"}; +lookup(83) -> {"Lu","L"}; +lookup(84) -> {"Lu","L"}; +lookup(85) -> {"Lu","L"}; +lookup(86) -> {"Lu","L"}; +lookup(87) -> {"Lu","L"}; +lookup(88) -> {"Lu","L"}; +lookup(89) -> {"Lu","L"}; +lookup(90) -> {"Lu","L"}; +lookup(91) -> {"Ps","ON"}; +lookup(92) -> {"Po","ON"}; +lookup(93) -> {"Pe","ON"}; +lookup(94) -> {"Sk","ON"}; +lookup(95) -> {"Pc","ON"}; +lookup(96) -> {"Sk","ON"}; +lookup(97) -> {"Ll","L"}; +lookup(98) -> {"Ll","L"}; +lookup(99) -> {"Ll","L"}; +lookup(100) -> {"Ll","L"}; +lookup(101) -> {"Ll","L"}; +lookup(102) -> {"Ll","L"}; +lookup(103) -> {"Ll","L"}; +lookup(104) -> {"Ll","L"}; +lookup(105) -> {"Ll","L"}; +lookup(106) -> {"Ll","L"}; +lookup(107) -> {"Ll","L"}; +lookup(108) -> {"Ll","L"}; +lookup(109) -> {"Ll","L"}; +lookup(110) -> {"Ll","L"}; +lookup(111) -> {"Ll","L"}; +lookup(112) -> {"Ll","L"}; +lookup(113) -> {"Ll","L"}; +lookup(114) -> {"Ll","L"}; +lookup(115) -> {"Ll","L"}; +lookup(116) -> {"Ll","L"}; +lookup(117) -> {"Ll","L"}; +lookup(118) -> {"Ll","L"}; +lookup(119) -> {"Ll","L"}; +lookup(120) -> {"Ll","L"}; +lookup(121) -> {"Ll","L"}; +lookup(122) -> {"Ll","L"}; +lookup(123) -> {"Ps","ON"}; +lookup(124) -> {"Sm","ON"}; +lookup(125) -> {"Pe","ON"}; +lookup(126) -> {"Sm","ON"}; +lookup(127) -> {"Cc","BN"}; +lookup(128) -> {"Cc","BN"}; +lookup(129) -> {"Cc","BN"}; +lookup(130) -> {"Cc","BN"}; +lookup(131) -> {"Cc","BN"}; +lookup(132) -> {"Cc","BN"}; +lookup(133) -> {"Cc","B"}; +lookup(134) -> {"Cc","BN"}; +lookup(135) -> {"Cc","BN"}; +lookup(136) -> {"Cc","BN"}; +lookup(137) -> {"Cc","BN"}; +lookup(138) -> {"Cc","BN"}; +lookup(139) -> {"Cc","BN"}; +lookup(140) -> {"Cc","BN"}; +lookup(141) -> {"Cc","BN"}; +lookup(142) -> {"Cc","BN"}; +lookup(143) -> {"Cc","BN"}; +lookup(144) -> {"Cc","BN"}; +lookup(145) -> {"Cc","BN"}; +lookup(146) -> {"Cc","BN"}; +lookup(147) -> {"Cc","BN"}; +lookup(148) -> {"Cc","BN"}; +lookup(149) -> {"Cc","BN"}; +lookup(150) -> {"Cc","BN"}; +lookup(151) -> {"Cc","BN"}; +lookup(152) -> {"Cc","BN"}; +lookup(153) -> {"Cc","BN"}; +lookup(154) -> {"Cc","BN"}; +lookup(155) -> {"Cc","BN"}; +lookup(156) -> {"Cc","BN"}; +lookup(157) -> {"Cc","BN"}; +lookup(158) -> {"Cc","BN"}; +lookup(159) -> {"Cc","BN"}; +lookup(160) -> {"Zs","CS"}; +lookup(161) -> {"Po","ON"}; +lookup(162) -> {"Sc","ET"}; +lookup(163) -> {"Sc","ET"}; +lookup(164) -> {"Sc","ET"}; +lookup(165) -> {"Sc","ET"}; +lookup(166) -> {"So","ON"}; +lookup(167) -> {"Po","ON"}; +lookup(168) -> {"Sk","ON"}; +lookup(169) -> {"So","ON"}; +lookup(170) -> {"Lo","L"}; +lookup(171) -> {"Pi","ON"}; +lookup(172) -> {"Sm","ON"}; +lookup(173) -> {"Cf","BN"}; +lookup(174) -> {"So","ON"}; +lookup(175) -> {"Sk","ON"}; +lookup(176) -> {"So","ET"}; +lookup(177) -> {"Sm","ET"}; +lookup(178) -> {"No","EN"}; +lookup(179) -> {"No","EN"}; +lookup(180) -> {"Sk","ON"}; +lookup(181) -> {"Ll","L"}; +lookup(182) -> {"Po","ON"}; +lookup(183) -> {"Po","ON"}; +lookup(184) -> {"Sk","ON"}; +lookup(185) -> {"No","EN"}; +lookup(186) -> {"Lo","L"}; +lookup(187) -> {"Pf","ON"}; +lookup(188) -> {"No","ON"}; +lookup(189) -> {"No","ON"}; +lookup(190) -> {"No","ON"}; +lookup(191) -> {"Po","ON"}; +lookup(192) -> {"Lu","L"}; +lookup(193) -> {"Lu","L"}; +lookup(194) -> {"Lu","L"}; +lookup(195) -> {"Lu","L"}; +lookup(196) -> {"Lu","L"}; +lookup(197) -> {"Lu","L"}; +lookup(198) -> {"Lu","L"}; +lookup(199) -> {"Lu","L"}; +lookup(200) -> {"Lu","L"}; +lookup(201) -> {"Lu","L"}; +lookup(202) -> {"Lu","L"}; +lookup(203) -> {"Lu","L"}; +lookup(204) -> {"Lu","L"}; +lookup(205) -> {"Lu","L"}; +lookup(206) -> {"Lu","L"}; +lookup(207) -> {"Lu","L"}; +lookup(208) -> {"Lu","L"}; +lookup(209) -> {"Lu","L"}; +lookup(210) -> {"Lu","L"}; +lookup(211) -> {"Lu","L"}; +lookup(212) -> {"Lu","L"}; +lookup(213) -> {"Lu","L"}; +lookup(214) -> {"Lu","L"}; +lookup(215) -> {"Sm","ON"}; +lookup(216) -> {"Lu","L"}; +lookup(217) -> {"Lu","L"}; +lookup(218) -> {"Lu","L"}; +lookup(219) -> {"Lu","L"}; +lookup(220) -> {"Lu","L"}; +lookup(221) -> {"Lu","L"}; +lookup(222) -> {"Lu","L"}; +lookup(223) -> {"Ll","L"}; +lookup(224) -> {"Ll","L"}; +lookup(225) -> {"Ll","L"}; +lookup(226) -> {"Ll","L"}; +lookup(227) -> {"Ll","L"}; +lookup(228) -> {"Ll","L"}; +lookup(229) -> {"Ll","L"}; +lookup(230) -> {"Ll","L"}; +lookup(231) -> {"Ll","L"}; +lookup(232) -> {"Ll","L"}; +lookup(233) -> {"Ll","L"}; +lookup(234) -> {"Ll","L"}; +lookup(235) -> {"Ll","L"}; +lookup(236) -> {"Ll","L"}; +lookup(237) -> {"Ll","L"}; +lookup(238) -> {"Ll","L"}; +lookup(239) -> {"Ll","L"}; +lookup(240) -> {"Ll","L"}; +lookup(241) -> {"Ll","L"}; +lookup(242) -> {"Ll","L"}; +lookup(243) -> {"Ll","L"}; +lookup(244) -> {"Ll","L"}; +lookup(245) -> {"Ll","L"}; +lookup(246) -> {"Ll","L"}; +lookup(247) -> {"Sm","ON"}; +lookup(248) -> {"Ll","L"}; +lookup(249) -> {"Ll","L"}; +lookup(250) -> {"Ll","L"}; +lookup(251) -> {"Ll","L"}; +lookup(252) -> {"Ll","L"}; +lookup(253) -> {"Ll","L"}; +lookup(254) -> {"Ll","L"}; +lookup(255) -> {"Ll","L"}; +lookup(256) -> {"Lu","L"}; +lookup(257) -> {"Ll","L"}; +lookup(258) -> {"Lu","L"}; +lookup(259) -> {"Ll","L"}; +lookup(260) -> {"Lu","L"}; +lookup(261) -> {"Ll","L"}; +lookup(262) -> {"Lu","L"}; +lookup(263) -> {"Ll","L"}; +lookup(264) -> {"Lu","L"}; +lookup(265) -> {"Ll","L"}; +lookup(266) -> {"Lu","L"}; +lookup(267) -> {"Ll","L"}; +lookup(268) -> {"Lu","L"}; +lookup(269) -> {"Ll","L"}; +lookup(270) -> {"Lu","L"}; +lookup(271) -> {"Ll","L"}; +lookup(272) -> {"Lu","L"}; +lookup(273) -> {"Ll","L"}; +lookup(274) -> {"Lu","L"}; +lookup(275) -> {"Ll","L"}; +lookup(276) -> {"Lu","L"}; +lookup(277) -> {"Ll","L"}; +lookup(278) -> {"Lu","L"}; +lookup(279) -> {"Ll","L"}; +lookup(280) -> {"Lu","L"}; +lookup(281) -> {"Ll","L"}; +lookup(282) -> {"Lu","L"}; +lookup(283) -> {"Ll","L"}; +lookup(284) -> {"Lu","L"}; +lookup(285) -> {"Ll","L"}; +lookup(286) -> {"Lu","L"}; +lookup(287) -> {"Ll","L"}; +lookup(288) -> {"Lu","L"}; +lookup(289) -> {"Ll","L"}; +lookup(290) -> {"Lu","L"}; +lookup(291) -> {"Ll","L"}; +lookup(292) -> {"Lu","L"}; +lookup(293) -> {"Ll","L"}; +lookup(294) -> {"Lu","L"}; +lookup(295) -> {"Ll","L"}; +lookup(296) -> {"Lu","L"}; +lookup(297) -> {"Ll","L"}; +lookup(298) -> {"Lu","L"}; +lookup(299) -> {"Ll","L"}; +lookup(300) -> {"Lu","L"}; +lookup(301) -> {"Ll","L"}; +lookup(302) -> {"Lu","L"}; +lookup(303) -> {"Ll","L"}; +lookup(304) -> {"Lu","L"}; +lookup(305) -> {"Ll","L"}; +lookup(306) -> {"Lu","L"}; +lookup(307) -> {"Ll","L"}; +lookup(308) -> {"Lu","L"}; +lookup(309) -> {"Ll","L"}; +lookup(310) -> {"Lu","L"}; +lookup(311) -> {"Ll","L"}; +lookup(312) -> {"Ll","L"}; +lookup(313) -> {"Lu","L"}; +lookup(314) -> {"Ll","L"}; +lookup(315) -> {"Lu","L"}; +lookup(316) -> {"Ll","L"}; +lookup(317) -> {"Lu","L"}; +lookup(318) -> {"Ll","L"}; +lookup(319) -> {"Lu","L"}; +lookup(320) -> {"Ll","L"}; +lookup(321) -> {"Lu","L"}; +lookup(322) -> {"Ll","L"}; +lookup(323) -> {"Lu","L"}; +lookup(324) -> {"Ll","L"}; +lookup(325) -> {"Lu","L"}; +lookup(326) -> {"Ll","L"}; +lookup(327) -> {"Lu","L"}; +lookup(328) -> {"Ll","L"}; +lookup(329) -> {"Ll","L"}; +lookup(330) -> {"Lu","L"}; +lookup(331) -> {"Ll","L"}; +lookup(332) -> {"Lu","L"}; +lookup(333) -> {"Ll","L"}; +lookup(334) -> {"Lu","L"}; +lookup(335) -> {"Ll","L"}; +lookup(336) -> {"Lu","L"}; +lookup(337) -> {"Ll","L"}; +lookup(338) -> {"Lu","L"}; +lookup(339) -> {"Ll","L"}; +lookup(340) -> {"Lu","L"}; +lookup(341) -> {"Ll","L"}; +lookup(342) -> {"Lu","L"}; +lookup(343) -> {"Ll","L"}; +lookup(344) -> {"Lu","L"}; +lookup(345) -> {"Ll","L"}; +lookup(346) -> {"Lu","L"}; +lookup(347) -> {"Ll","L"}; +lookup(348) -> {"Lu","L"}; +lookup(349) -> {"Ll","L"}; +lookup(350) -> {"Lu","L"}; +lookup(351) -> {"Ll","L"}; +lookup(352) -> {"Lu","L"}; +lookup(353) -> {"Ll","L"}; +lookup(354) -> {"Lu","L"}; +lookup(355) -> {"Ll","L"}; +lookup(356) -> {"Lu","L"}; +lookup(357) -> {"Ll","L"}; +lookup(358) -> {"Lu","L"}; +lookup(359) -> {"Ll","L"}; +lookup(360) -> {"Lu","L"}; +lookup(361) -> {"Ll","L"}; +lookup(362) -> {"Lu","L"}; +lookup(363) -> {"Ll","L"}; +lookup(364) -> {"Lu","L"}; +lookup(365) -> {"Ll","L"}; +lookup(366) -> {"Lu","L"}; +lookup(367) -> {"Ll","L"}; +lookup(368) -> {"Lu","L"}; +lookup(369) -> {"Ll","L"}; +lookup(370) -> {"Lu","L"}; +lookup(371) -> {"Ll","L"}; +lookup(372) -> {"Lu","L"}; +lookup(373) -> {"Ll","L"}; +lookup(374) -> {"Lu","L"}; +lookup(375) -> {"Ll","L"}; +lookup(376) -> {"Lu","L"}; +lookup(377) -> {"Lu","L"}; +lookup(378) -> {"Ll","L"}; +lookup(379) -> {"Lu","L"}; +lookup(380) -> {"Ll","L"}; +lookup(381) -> {"Lu","L"}; +lookup(382) -> {"Ll","L"}; +lookup(383) -> {"Ll","L"}; +lookup(384) -> {"Ll","L"}; +lookup(385) -> {"Lu","L"}; +lookup(386) -> {"Lu","L"}; +lookup(387) -> {"Ll","L"}; +lookup(388) -> {"Lu","L"}; +lookup(389) -> {"Ll","L"}; +lookup(390) -> {"Lu","L"}; +lookup(391) -> {"Lu","L"}; +lookup(392) -> {"Ll","L"}; +lookup(393) -> {"Lu","L"}; +lookup(394) -> {"Lu","L"}; +lookup(395) -> {"Lu","L"}; +lookup(396) -> {"Ll","L"}; +lookup(397) -> {"Ll","L"}; +lookup(398) -> {"Lu","L"}; +lookup(399) -> {"Lu","L"}; +lookup(400) -> {"Lu","L"}; +lookup(401) -> {"Lu","L"}; +lookup(402) -> {"Ll","L"}; +lookup(403) -> {"Lu","L"}; +lookup(404) -> {"Lu","L"}; +lookup(405) -> {"Ll","L"}; +lookup(406) -> {"Lu","L"}; +lookup(407) -> {"Lu","L"}; +lookup(408) -> {"Lu","L"}; +lookup(409) -> {"Ll","L"}; +lookup(410) -> {"Ll","L"}; +lookup(411) -> {"Ll","L"}; +lookup(412) -> {"Lu","L"}; +lookup(413) -> {"Lu","L"}; +lookup(414) -> {"Ll","L"}; +lookup(415) -> {"Lu","L"}; +lookup(416) -> {"Lu","L"}; +lookup(417) -> {"Ll","L"}; +lookup(418) -> {"Lu","L"}; +lookup(419) -> {"Ll","L"}; +lookup(420) -> {"Lu","L"}; +lookup(421) -> {"Ll","L"}; +lookup(422) -> {"Lu","L"}; +lookup(423) -> {"Lu","L"}; +lookup(424) -> {"Ll","L"}; +lookup(425) -> {"Lu","L"}; +lookup(426) -> {"Ll","L"}; +lookup(427) -> {"Ll","L"}; +lookup(428) -> {"Lu","L"}; +lookup(429) -> {"Ll","L"}; +lookup(430) -> {"Lu","L"}; +lookup(431) -> {"Lu","L"}; +lookup(432) -> {"Ll","L"}; +lookup(433) -> {"Lu","L"}; +lookup(434) -> {"Lu","L"}; +lookup(435) -> {"Lu","L"}; +lookup(436) -> {"Ll","L"}; +lookup(437) -> {"Lu","L"}; +lookup(438) -> {"Ll","L"}; +lookup(439) -> {"Lu","L"}; +lookup(440) -> {"Lu","L"}; +lookup(441) -> {"Ll","L"}; +lookup(442) -> {"Ll","L"}; +lookup(443) -> {"Lo","L"}; +lookup(444) -> {"Lu","L"}; +lookup(445) -> {"Ll","L"}; +lookup(446) -> {"Ll","L"}; +lookup(447) -> {"Ll","L"}; +lookup(448) -> {"Lo","L"}; +lookup(449) -> {"Lo","L"}; +lookup(450) -> {"Lo","L"}; +lookup(451) -> {"Lo","L"}; +lookup(452) -> {"Lu","L"}; +lookup(453) -> {"Lt","L"}; +lookup(454) -> {"Ll","L"}; +lookup(455) -> {"Lu","L"}; +lookup(456) -> {"Lt","L"}; +lookup(457) -> {"Ll","L"}; +lookup(458) -> {"Lu","L"}; +lookup(459) -> {"Lt","L"}; +lookup(460) -> {"Ll","L"}; +lookup(461) -> {"Lu","L"}; +lookup(462) -> {"Ll","L"}; +lookup(463) -> {"Lu","L"}; +lookup(464) -> {"Ll","L"}; +lookup(465) -> {"Lu","L"}; +lookup(466) -> {"Ll","L"}; +lookup(467) -> {"Lu","L"}; +lookup(468) -> {"Ll","L"}; +lookup(469) -> {"Lu","L"}; +lookup(470) -> {"Ll","L"}; +lookup(471) -> {"Lu","L"}; +lookup(472) -> {"Ll","L"}; +lookup(473) -> {"Lu","L"}; +lookup(474) -> {"Ll","L"}; +lookup(475) -> {"Lu","L"}; +lookup(476) -> {"Ll","L"}; +lookup(477) -> {"Ll","L"}; +lookup(478) -> {"Lu","L"}; +lookup(479) -> {"Ll","L"}; +lookup(480) -> {"Lu","L"}; +lookup(481) -> {"Ll","L"}; +lookup(482) -> {"Lu","L"}; +lookup(483) -> {"Ll","L"}; +lookup(484) -> {"Lu","L"}; +lookup(485) -> {"Ll","L"}; +lookup(486) -> {"Lu","L"}; +lookup(487) -> {"Ll","L"}; +lookup(488) -> {"Lu","L"}; +lookup(489) -> {"Ll","L"}; +lookup(490) -> {"Lu","L"}; +lookup(491) -> {"Ll","L"}; +lookup(492) -> {"Lu","L"}; +lookup(493) -> {"Ll","L"}; +lookup(494) -> {"Lu","L"}; +lookup(495) -> {"Ll","L"}; +lookup(496) -> {"Ll","L"}; +lookup(497) -> {"Lu","L"}; +lookup(498) -> {"Lt","L"}; +lookup(499) -> {"Ll","L"}; +lookup(500) -> {"Lu","L"}; +lookup(501) -> {"Ll","L"}; +lookup(502) -> {"Lu","L"}; +lookup(503) -> {"Lu","L"}; +lookup(504) -> {"Lu","L"}; +lookup(505) -> {"Ll","L"}; +lookup(506) -> {"Lu","L"}; +lookup(507) -> {"Ll","L"}; +lookup(508) -> {"Lu","L"}; +lookup(509) -> {"Ll","L"}; +lookup(510) -> {"Lu","L"}; +lookup(511) -> {"Ll","L"}; +lookup(512) -> {"Lu","L"}; +lookup(513) -> {"Ll","L"}; +lookup(514) -> {"Lu","L"}; +lookup(515) -> {"Ll","L"}; +lookup(516) -> {"Lu","L"}; +lookup(517) -> {"Ll","L"}; +lookup(518) -> {"Lu","L"}; +lookup(519) -> {"Ll","L"}; +lookup(520) -> {"Lu","L"}; +lookup(521) -> {"Ll","L"}; +lookup(522) -> {"Lu","L"}; +lookup(523) -> {"Ll","L"}; +lookup(524) -> {"Lu","L"}; +lookup(525) -> {"Ll","L"}; +lookup(526) -> {"Lu","L"}; +lookup(527) -> {"Ll","L"}; +lookup(528) -> {"Lu","L"}; +lookup(529) -> {"Ll","L"}; +lookup(530) -> {"Lu","L"}; +lookup(531) -> {"Ll","L"}; +lookup(532) -> {"Lu","L"}; +lookup(533) -> {"Ll","L"}; +lookup(534) -> {"Lu","L"}; +lookup(535) -> {"Ll","L"}; +lookup(536) -> {"Lu","L"}; +lookup(537) -> {"Ll","L"}; +lookup(538) -> {"Lu","L"}; +lookup(539) -> {"Ll","L"}; +lookup(540) -> {"Lu","L"}; +lookup(541) -> {"Ll","L"}; +lookup(542) -> {"Lu","L"}; +lookup(543) -> {"Ll","L"}; +lookup(544) -> {"Lu","L"}; +lookup(545) -> {"Ll","L"}; +lookup(546) -> {"Lu","L"}; +lookup(547) -> {"Ll","L"}; +lookup(548) -> {"Lu","L"}; +lookup(549) -> {"Ll","L"}; +lookup(550) -> {"Lu","L"}; +lookup(551) -> {"Ll","L"}; +lookup(552) -> {"Lu","L"}; +lookup(553) -> {"Ll","L"}; +lookup(554) -> {"Lu","L"}; +lookup(555) -> {"Ll","L"}; +lookup(556) -> {"Lu","L"}; +lookup(557) -> {"Ll","L"}; +lookup(558) -> {"Lu","L"}; +lookup(559) -> {"Ll","L"}; +lookup(560) -> {"Lu","L"}; +lookup(561) -> {"Ll","L"}; +lookup(562) -> {"Lu","L"}; +lookup(563) -> {"Ll","L"}; +lookup(564) -> {"Ll","L"}; +lookup(565) -> {"Ll","L"}; +lookup(566) -> {"Ll","L"}; +lookup(567) -> {"Ll","L"}; +lookup(568) -> {"Ll","L"}; +lookup(569) -> {"Ll","L"}; +lookup(570) -> {"Lu","L"}; +lookup(571) -> {"Lu","L"}; +lookup(572) -> {"Ll","L"}; +lookup(573) -> {"Lu","L"}; +lookup(574) -> {"Lu","L"}; +lookup(575) -> {"Ll","L"}; +lookup(576) -> {"Ll","L"}; +lookup(577) -> {"Lu","L"}; +lookup(578) -> {"Ll","L"}; +lookup(579) -> {"Lu","L"}; +lookup(580) -> {"Lu","L"}; +lookup(581) -> {"Lu","L"}; +lookup(582) -> {"Lu","L"}; +lookup(583) -> {"Ll","L"}; +lookup(584) -> {"Lu","L"}; +lookup(585) -> {"Ll","L"}; +lookup(586) -> {"Lu","L"}; +lookup(587) -> {"Ll","L"}; +lookup(588) -> {"Lu","L"}; +lookup(589) -> {"Ll","L"}; +lookup(590) -> {"Lu","L"}; +lookup(591) -> {"Ll","L"}; +lookup(592) -> {"Ll","L"}; +lookup(593) -> {"Ll","L"}; +lookup(594) -> {"Ll","L"}; +lookup(595) -> {"Ll","L"}; +lookup(596) -> {"Ll","L"}; +lookup(597) -> {"Ll","L"}; +lookup(598) -> {"Ll","L"}; +lookup(599) -> {"Ll","L"}; +lookup(600) -> {"Ll","L"}; +lookup(601) -> {"Ll","L"}; +lookup(602) -> {"Ll","L"}; +lookup(603) -> {"Ll","L"}; +lookup(604) -> {"Ll","L"}; +lookup(605) -> {"Ll","L"}; +lookup(606) -> {"Ll","L"}; +lookup(607) -> {"Ll","L"}; +lookup(608) -> {"Ll","L"}; +lookup(609) -> {"Ll","L"}; +lookup(610) -> {"Ll","L"}; +lookup(611) -> {"Ll","L"}; +lookup(612) -> {"Ll","L"}; +lookup(613) -> {"Ll","L"}; +lookup(614) -> {"Ll","L"}; +lookup(615) -> {"Ll","L"}; +lookup(616) -> {"Ll","L"}; +lookup(617) -> {"Ll","L"}; +lookup(618) -> {"Ll","L"}; +lookup(619) -> {"Ll","L"}; +lookup(620) -> {"Ll","L"}; +lookup(621) -> {"Ll","L"}; +lookup(622) -> {"Ll","L"}; +lookup(623) -> {"Ll","L"}; +lookup(624) -> {"Ll","L"}; +lookup(625) -> {"Ll","L"}; +lookup(626) -> {"Ll","L"}; +lookup(627) -> {"Ll","L"}; +lookup(628) -> {"Ll","L"}; +lookup(629) -> {"Ll","L"}; +lookup(630) -> {"Ll","L"}; +lookup(631) -> {"Ll","L"}; +lookup(632) -> {"Ll","L"}; +lookup(633) -> {"Ll","L"}; +lookup(634) -> {"Ll","L"}; +lookup(635) -> {"Ll","L"}; +lookup(636) -> {"Ll","L"}; +lookup(637) -> {"Ll","L"}; +lookup(638) -> {"Ll","L"}; +lookup(639) -> {"Ll","L"}; +lookup(640) -> {"Ll","L"}; +lookup(641) -> {"Ll","L"}; +lookup(642) -> {"Ll","L"}; +lookup(643) -> {"Ll","L"}; +lookup(644) -> {"Ll","L"}; +lookup(645) -> {"Ll","L"}; +lookup(646) -> {"Ll","L"}; +lookup(647) -> {"Ll","L"}; +lookup(648) -> {"Ll","L"}; +lookup(649) -> {"Ll","L"}; +lookup(650) -> {"Ll","L"}; +lookup(651) -> {"Ll","L"}; +lookup(652) -> {"Ll","L"}; +lookup(653) -> {"Ll","L"}; +lookup(654) -> {"Ll","L"}; +lookup(655) -> {"Ll","L"}; +lookup(656) -> {"Ll","L"}; +lookup(657) -> {"Ll","L"}; +lookup(658) -> {"Ll","L"}; +lookup(659) -> {"Ll","L"}; +lookup(660) -> {"Lo","L"}; +lookup(661) -> {"Ll","L"}; +lookup(662) -> {"Ll","L"}; +lookup(663) -> {"Ll","L"}; +lookup(664) -> {"Ll","L"}; +lookup(665) -> {"Ll","L"}; +lookup(666) -> {"Ll","L"}; +lookup(667) -> {"Ll","L"}; +lookup(668) -> {"Ll","L"}; +lookup(669) -> {"Ll","L"}; +lookup(670) -> {"Ll","L"}; +lookup(671) -> {"Ll","L"}; +lookup(672) -> {"Ll","L"}; +lookup(673) -> {"Ll","L"}; +lookup(674) -> {"Ll","L"}; +lookup(675) -> {"Ll","L"}; +lookup(676) -> {"Ll","L"}; +lookup(677) -> {"Ll","L"}; +lookup(678) -> {"Ll","L"}; +lookup(679) -> {"Ll","L"}; +lookup(680) -> {"Ll","L"}; +lookup(681) -> {"Ll","L"}; +lookup(682) -> {"Ll","L"}; +lookup(683) -> {"Ll","L"}; +lookup(684) -> {"Ll","L"}; +lookup(685) -> {"Ll","L"}; +lookup(686) -> {"Ll","L"}; +lookup(687) -> {"Ll","L"}; +lookup(688) -> {"Lm","L"}; +lookup(689) -> {"Lm","L"}; +lookup(690) -> {"Lm","L"}; +lookup(691) -> {"Lm","L"}; +lookup(692) -> {"Lm","L"}; +lookup(693) -> {"Lm","L"}; +lookup(694) -> {"Lm","L"}; +lookup(695) -> {"Lm","L"}; +lookup(696) -> {"Lm","L"}; +lookup(697) -> {"Lm","ON"}; +lookup(698) -> {"Lm","ON"}; +lookup(699) -> {"Lm","L"}; +lookup(700) -> {"Lm","L"}; +lookup(701) -> {"Lm","L"}; +lookup(702) -> {"Lm","L"}; +lookup(703) -> {"Lm","L"}; +lookup(704) -> {"Lm","L"}; +lookup(705) -> {"Lm","L"}; +lookup(706) -> {"Sk","ON"}; +lookup(707) -> {"Sk","ON"}; +lookup(708) -> {"Sk","ON"}; +lookup(709) -> {"Sk","ON"}; +lookup(710) -> {"Lm","ON"}; +lookup(711) -> {"Lm","ON"}; +lookup(712) -> {"Lm","ON"}; +lookup(713) -> {"Lm","ON"}; +lookup(714) -> {"Lm","ON"}; +lookup(715) -> {"Lm","ON"}; +lookup(716) -> {"Lm","ON"}; +lookup(717) -> {"Lm","ON"}; +lookup(718) -> {"Lm","ON"}; +lookup(719) -> {"Lm","ON"}; +lookup(720) -> {"Lm","L"}; +lookup(721) -> {"Lm","L"}; +lookup(722) -> {"Sk","ON"}; +lookup(723) -> {"Sk","ON"}; +lookup(724) -> {"Sk","ON"}; +lookup(725) -> {"Sk","ON"}; +lookup(726) -> {"Sk","ON"}; +lookup(727) -> {"Sk","ON"}; +lookup(728) -> {"Sk","ON"}; +lookup(729) -> {"Sk","ON"}; +lookup(730) -> {"Sk","ON"}; +lookup(731) -> {"Sk","ON"}; +lookup(732) -> {"Sk","ON"}; +lookup(733) -> {"Sk","ON"}; +lookup(734) -> {"Sk","ON"}; +lookup(735) -> {"Sk","ON"}; +lookup(736) -> {"Lm","L"}; +lookup(737) -> {"Lm","L"}; +lookup(738) -> {"Lm","L"}; +lookup(739) -> {"Lm","L"}; +lookup(740) -> {"Lm","L"}; +lookup(741) -> {"Sk","ON"}; +lookup(742) -> {"Sk","ON"}; +lookup(743) -> {"Sk","ON"}; +lookup(744) -> {"Sk","ON"}; +lookup(745) -> {"Sk","ON"}; +lookup(746) -> {"Sk","ON"}; +lookup(747) -> {"Sk","ON"}; +lookup(748) -> {"Lm","ON"}; +lookup(749) -> {"Sk","ON"}; +lookup(750) -> {"Lm","L"}; +lookup(751) -> {"Sk","ON"}; +lookup(752) -> {"Sk","ON"}; +lookup(753) -> {"Sk","ON"}; +lookup(754) -> {"Sk","ON"}; +lookup(755) -> {"Sk","ON"}; +lookup(756) -> {"Sk","ON"}; +lookup(757) -> {"Sk","ON"}; +lookup(758) -> {"Sk","ON"}; +lookup(759) -> {"Sk","ON"}; +lookup(760) -> {"Sk","ON"}; +lookup(761) -> {"Sk","ON"}; +lookup(762) -> {"Sk","ON"}; +lookup(763) -> {"Sk","ON"}; +lookup(764) -> {"Sk","ON"}; +lookup(765) -> {"Sk","ON"}; +lookup(766) -> {"Sk","ON"}; +lookup(767) -> {"Sk","ON"}; +lookup(768) -> {"Mn","NSM"}; +lookup(769) -> {"Mn","NSM"}; +lookup(770) -> {"Mn","NSM"}; +lookup(771) -> {"Mn","NSM"}; +lookup(772) -> {"Mn","NSM"}; +lookup(773) -> {"Mn","NSM"}; +lookup(774) -> {"Mn","NSM"}; +lookup(775) -> {"Mn","NSM"}; +lookup(776) -> {"Mn","NSM"}; +lookup(777) -> {"Mn","NSM"}; +lookup(778) -> {"Mn","NSM"}; +lookup(779) -> {"Mn","NSM"}; +lookup(780) -> {"Mn","NSM"}; +lookup(781) -> {"Mn","NSM"}; +lookup(782) -> {"Mn","NSM"}; +lookup(783) -> {"Mn","NSM"}; +lookup(784) -> {"Mn","NSM"}; +lookup(785) -> {"Mn","NSM"}; +lookup(786) -> {"Mn","NSM"}; +lookup(787) -> {"Mn","NSM"}; +lookup(788) -> {"Mn","NSM"}; +lookup(789) -> {"Mn","NSM"}; +lookup(790) -> {"Mn","NSM"}; +lookup(791) -> {"Mn","NSM"}; +lookup(792) -> {"Mn","NSM"}; +lookup(793) -> {"Mn","NSM"}; +lookup(794) -> {"Mn","NSM"}; +lookup(795) -> {"Mn","NSM"}; +lookup(796) -> {"Mn","NSM"}; +lookup(797) -> {"Mn","NSM"}; +lookup(798) -> {"Mn","NSM"}; +lookup(799) -> {"Mn","NSM"}; +lookup(800) -> {"Mn","NSM"}; +lookup(801) -> {"Mn","NSM"}; +lookup(802) -> {"Mn","NSM"}; +lookup(803) -> {"Mn","NSM"}; +lookup(804) -> {"Mn","NSM"}; +lookup(805) -> {"Mn","NSM"}; +lookup(806) -> {"Mn","NSM"}; +lookup(807) -> {"Mn","NSM"}; +lookup(808) -> {"Mn","NSM"}; +lookup(809) -> {"Mn","NSM"}; +lookup(810) -> {"Mn","NSM"}; +lookup(811) -> {"Mn","NSM"}; +lookup(812) -> {"Mn","NSM"}; +lookup(813) -> {"Mn","NSM"}; +lookup(814) -> {"Mn","NSM"}; +lookup(815) -> {"Mn","NSM"}; +lookup(816) -> {"Mn","NSM"}; +lookup(817) -> {"Mn","NSM"}; +lookup(818) -> {"Mn","NSM"}; +lookup(819) -> {"Mn","NSM"}; +lookup(820) -> {"Mn","NSM"}; +lookup(821) -> {"Mn","NSM"}; +lookup(822) -> {"Mn","NSM"}; +lookup(823) -> {"Mn","NSM"}; +lookup(824) -> {"Mn","NSM"}; +lookup(825) -> {"Mn","NSM"}; +lookup(826) -> {"Mn","NSM"}; +lookup(827) -> {"Mn","NSM"}; +lookup(828) -> {"Mn","NSM"}; +lookup(829) -> {"Mn","NSM"}; +lookup(830) -> {"Mn","NSM"}; +lookup(831) -> {"Mn","NSM"}; +lookup(832) -> {"Mn","NSM"}; +lookup(833) -> {"Mn","NSM"}; +lookup(834) -> {"Mn","NSM"}; +lookup(835) -> {"Mn","NSM"}; +lookup(836) -> {"Mn","NSM"}; +lookup(837) -> {"Mn","NSM"}; +lookup(838) -> {"Mn","NSM"}; +lookup(839) -> {"Mn","NSM"}; +lookup(840) -> {"Mn","NSM"}; +lookup(841) -> {"Mn","NSM"}; +lookup(842) -> {"Mn","NSM"}; +lookup(843) -> {"Mn","NSM"}; +lookup(844) -> {"Mn","NSM"}; +lookup(845) -> {"Mn","NSM"}; +lookup(846) -> {"Mn","NSM"}; +lookup(847) -> {"Mn","NSM"}; +lookup(848) -> {"Mn","NSM"}; +lookup(849) -> {"Mn","NSM"}; +lookup(850) -> {"Mn","NSM"}; +lookup(851) -> {"Mn","NSM"}; +lookup(852) -> {"Mn","NSM"}; +lookup(853) -> {"Mn","NSM"}; +lookup(854) -> {"Mn","NSM"}; +lookup(855) -> {"Mn","NSM"}; +lookup(856) -> {"Mn","NSM"}; +lookup(857) -> {"Mn","NSM"}; +lookup(858) -> {"Mn","NSM"}; +lookup(859) -> {"Mn","NSM"}; +lookup(860) -> {"Mn","NSM"}; +lookup(861) -> {"Mn","NSM"}; +lookup(862) -> {"Mn","NSM"}; +lookup(863) -> {"Mn","NSM"}; +lookup(864) -> {"Mn","NSM"}; +lookup(865) -> {"Mn","NSM"}; +lookup(866) -> {"Mn","NSM"}; +lookup(867) -> {"Mn","NSM"}; +lookup(868) -> {"Mn","NSM"}; +lookup(869) -> {"Mn","NSM"}; +lookup(870) -> {"Mn","NSM"}; +lookup(871) -> {"Mn","NSM"}; +lookup(872) -> {"Mn","NSM"}; +lookup(873) -> {"Mn","NSM"}; +lookup(874) -> {"Mn","NSM"}; +lookup(875) -> {"Mn","NSM"}; +lookup(876) -> {"Mn","NSM"}; +lookup(877) -> {"Mn","NSM"}; +lookup(878) -> {"Mn","NSM"}; +lookup(879) -> {"Mn","NSM"}; +lookup(880) -> {"Lu","L"}; +lookup(881) -> {"Ll","L"}; +lookup(882) -> {"Lu","L"}; +lookup(883) -> {"Ll","L"}; +lookup(884) -> {"Lm","ON"}; +lookup(885) -> {"Sk","ON"}; +lookup(886) -> {"Lu","L"}; +lookup(887) -> {"Ll","L"}; +lookup(890) -> {"Lm","L"}; +lookup(891) -> {"Ll","L"}; +lookup(892) -> {"Ll","L"}; +lookup(893) -> {"Ll","L"}; +lookup(894) -> {"Po","ON"}; +lookup(895) -> {"Lu","L"}; +lookup(900) -> {"Sk","ON"}; +lookup(901) -> {"Sk","ON"}; +lookup(902) -> {"Lu","L"}; +lookup(903) -> {"Po","ON"}; +lookup(904) -> {"Lu","L"}; +lookup(905) -> {"Lu","L"}; +lookup(906) -> {"Lu","L"}; +lookup(908) -> {"Lu","L"}; +lookup(910) -> {"Lu","L"}; +lookup(911) -> {"Lu","L"}; +lookup(912) -> {"Ll","L"}; +lookup(913) -> {"Lu","L"}; +lookup(914) -> {"Lu","L"}; +lookup(915) -> {"Lu","L"}; +lookup(916) -> {"Lu","L"}; +lookup(917) -> {"Lu","L"}; +lookup(918) -> {"Lu","L"}; +lookup(919) -> {"Lu","L"}; +lookup(920) -> {"Lu","L"}; +lookup(921) -> {"Lu","L"}; +lookup(922) -> {"Lu","L"}; +lookup(923) -> {"Lu","L"}; +lookup(924) -> {"Lu","L"}; +lookup(925) -> {"Lu","L"}; +lookup(926) -> {"Lu","L"}; +lookup(927) -> {"Lu","L"}; +lookup(928) -> {"Lu","L"}; +lookup(929) -> {"Lu","L"}; +lookup(931) -> {"Lu","L"}; +lookup(932) -> {"Lu","L"}; +lookup(933) -> {"Lu","L"}; +lookup(934) -> {"Lu","L"}; +lookup(935) -> {"Lu","L"}; +lookup(936) -> {"Lu","L"}; +lookup(937) -> {"Lu","L"}; +lookup(938) -> {"Lu","L"}; +lookup(939) -> {"Lu","L"}; +lookup(940) -> {"Ll","L"}; +lookup(941) -> {"Ll","L"}; +lookup(942) -> {"Ll","L"}; +lookup(943) -> {"Ll","L"}; +lookup(944) -> {"Ll","L"}; +lookup(945) -> {"Ll","L"}; +lookup(946) -> {"Ll","L"}; +lookup(947) -> {"Ll","L"}; +lookup(948) -> {"Ll","L"}; +lookup(949) -> {"Ll","L"}; +lookup(950) -> {"Ll","L"}; +lookup(951) -> {"Ll","L"}; +lookup(952) -> {"Ll","L"}; +lookup(953) -> {"Ll","L"}; +lookup(954) -> {"Ll","L"}; +lookup(955) -> {"Ll","L"}; +lookup(956) -> {"Ll","L"}; +lookup(957) -> {"Ll","L"}; +lookup(958) -> {"Ll","L"}; +lookup(959) -> {"Ll","L"}; +lookup(960) -> {"Ll","L"}; +lookup(961) -> {"Ll","L"}; +lookup(962) -> {"Ll","L"}; +lookup(963) -> {"Ll","L"}; +lookup(964) -> {"Ll","L"}; +lookup(965) -> {"Ll","L"}; +lookup(966) -> {"Ll","L"}; +lookup(967) -> {"Ll","L"}; +lookup(968) -> {"Ll","L"}; +lookup(969) -> {"Ll","L"}; +lookup(970) -> {"Ll","L"}; +lookup(971) -> {"Ll","L"}; +lookup(972) -> {"Ll","L"}; +lookup(973) -> {"Ll","L"}; +lookup(974) -> {"Ll","L"}; +lookup(975) -> {"Lu","L"}; +lookup(976) -> {"Ll","L"}; +lookup(977) -> {"Ll","L"}; +lookup(978) -> {"Lu","L"}; +lookup(979) -> {"Lu","L"}; +lookup(980) -> {"Lu","L"}; +lookup(981) -> {"Ll","L"}; +lookup(982) -> {"Ll","L"}; +lookup(983) -> {"Ll","L"}; +lookup(984) -> {"Lu","L"}; +lookup(985) -> {"Ll","L"}; +lookup(986) -> {"Lu","L"}; +lookup(987) -> {"Ll","L"}; +lookup(988) -> {"Lu","L"}; +lookup(989) -> {"Ll","L"}; +lookup(990) -> {"Lu","L"}; +lookup(991) -> {"Ll","L"}; +lookup(992) -> {"Lu","L"}; +lookup(993) -> {"Ll","L"}; +lookup(994) -> {"Lu","L"}; +lookup(995) -> {"Ll","L"}; +lookup(996) -> {"Lu","L"}; +lookup(997) -> {"Ll","L"}; +lookup(998) -> {"Lu","L"}; +lookup(999) -> {"Ll","L"}; +lookup(1000) -> {"Lu","L"}; +lookup(1001) -> {"Ll","L"}; +lookup(1002) -> {"Lu","L"}; +lookup(1003) -> {"Ll","L"}; +lookup(1004) -> {"Lu","L"}; +lookup(1005) -> {"Ll","L"}; +lookup(1006) -> {"Lu","L"}; +lookup(1007) -> {"Ll","L"}; +lookup(1008) -> {"Ll","L"}; +lookup(1009) -> {"Ll","L"}; +lookup(1010) -> {"Ll","L"}; +lookup(1011) -> {"Ll","L"}; +lookup(1012) -> {"Lu","L"}; +lookup(1013) -> {"Ll","L"}; +lookup(1014) -> {"Sm","ON"}; +lookup(1015) -> {"Lu","L"}; +lookup(1016) -> {"Ll","L"}; +lookup(1017) -> {"Lu","L"}; +lookup(1018) -> {"Lu","L"}; +lookup(1019) -> {"Ll","L"}; +lookup(1020) -> {"Ll","L"}; +lookup(1021) -> {"Lu","L"}; +lookup(1022) -> {"Lu","L"}; +lookup(1023) -> {"Lu","L"}; +lookup(1024) -> {"Lu","L"}; +lookup(1025) -> {"Lu","L"}; +lookup(1026) -> {"Lu","L"}; +lookup(1027) -> {"Lu","L"}; +lookup(1028) -> {"Lu","L"}; +lookup(1029) -> {"Lu","L"}; +lookup(1030) -> {"Lu","L"}; +lookup(1031) -> {"Lu","L"}; +lookup(1032) -> {"Lu","L"}; +lookup(1033) -> {"Lu","L"}; +lookup(1034) -> {"Lu","L"}; +lookup(1035) -> {"Lu","L"}; +lookup(1036) -> {"Lu","L"}; +lookup(1037) -> {"Lu","L"}; +lookup(1038) -> {"Lu","L"}; +lookup(1039) -> {"Lu","L"}; +lookup(1040) -> {"Lu","L"}; +lookup(1041) -> {"Lu","L"}; +lookup(1042) -> {"Lu","L"}; +lookup(1043) -> {"Lu","L"}; +lookup(1044) -> {"Lu","L"}; +lookup(1045) -> {"Lu","L"}; +lookup(1046) -> {"Lu","L"}; +lookup(1047) -> {"Lu","L"}; +lookup(1048) -> {"Lu","L"}; +lookup(1049) -> {"Lu","L"}; +lookup(1050) -> {"Lu","L"}; +lookup(1051) -> {"Lu","L"}; +lookup(1052) -> {"Lu","L"}; +lookup(1053) -> {"Lu","L"}; +lookup(1054) -> {"Lu","L"}; +lookup(1055) -> {"Lu","L"}; +lookup(1056) -> {"Lu","L"}; +lookup(1057) -> {"Lu","L"}; +lookup(1058) -> {"Lu","L"}; +lookup(1059) -> {"Lu","L"}; +lookup(1060) -> {"Lu","L"}; +lookup(1061) -> {"Lu","L"}; +lookup(1062) -> {"Lu","L"}; +lookup(1063) -> {"Lu","L"}; +lookup(1064) -> {"Lu","L"}; +lookup(1065) -> {"Lu","L"}; +lookup(1066) -> {"Lu","L"}; +lookup(1067) -> {"Lu","L"}; +lookup(1068) -> {"Lu","L"}; +lookup(1069) -> {"Lu","L"}; +lookup(1070) -> {"Lu","L"}; +lookup(1071) -> {"Lu","L"}; +lookup(1072) -> {"Ll","L"}; +lookup(1073) -> {"Ll","L"}; +lookup(1074) -> {"Ll","L"}; +lookup(1075) -> {"Ll","L"}; +lookup(1076) -> {"Ll","L"}; +lookup(1077) -> {"Ll","L"}; +lookup(1078) -> {"Ll","L"}; +lookup(1079) -> {"Ll","L"}; +lookup(1080) -> {"Ll","L"}; +lookup(1081) -> {"Ll","L"}; +lookup(1082) -> {"Ll","L"}; +lookup(1083) -> {"Ll","L"}; +lookup(1084) -> {"Ll","L"}; +lookup(1085) -> {"Ll","L"}; +lookup(1086) -> {"Ll","L"}; +lookup(1087) -> {"Ll","L"}; +lookup(1088) -> {"Ll","L"}; +lookup(1089) -> {"Ll","L"}; +lookup(1090) -> {"Ll","L"}; +lookup(1091) -> {"Ll","L"}; +lookup(1092) -> {"Ll","L"}; +lookup(1093) -> {"Ll","L"}; +lookup(1094) -> {"Ll","L"}; +lookup(1095) -> {"Ll","L"}; +lookup(1096) -> {"Ll","L"}; +lookup(1097) -> {"Ll","L"}; +lookup(1098) -> {"Ll","L"}; +lookup(1099) -> {"Ll","L"}; +lookup(1100) -> {"Ll","L"}; +lookup(1101) -> {"Ll","L"}; +lookup(1102) -> {"Ll","L"}; +lookup(1103) -> {"Ll","L"}; +lookup(1104) -> {"Ll","L"}; +lookup(1105) -> {"Ll","L"}; +lookup(1106) -> {"Ll","L"}; +lookup(1107) -> {"Ll","L"}; +lookup(1108) -> {"Ll","L"}; +lookup(1109) -> {"Ll","L"}; +lookup(1110) -> {"Ll","L"}; +lookup(1111) -> {"Ll","L"}; +lookup(1112) -> {"Ll","L"}; +lookup(1113) -> {"Ll","L"}; +lookup(1114) -> {"Ll","L"}; +lookup(1115) -> {"Ll","L"}; +lookup(1116) -> {"Ll","L"}; +lookup(1117) -> {"Ll","L"}; +lookup(1118) -> {"Ll","L"}; +lookup(1119) -> {"Ll","L"}; +lookup(1120) -> {"Lu","L"}; +lookup(1121) -> {"Ll","L"}; +lookup(1122) -> {"Lu","L"}; +lookup(1123) -> {"Ll","L"}; +lookup(1124) -> {"Lu","L"}; +lookup(1125) -> {"Ll","L"}; +lookup(1126) -> {"Lu","L"}; +lookup(1127) -> {"Ll","L"}; +lookup(1128) -> {"Lu","L"}; +lookup(1129) -> {"Ll","L"}; +lookup(1130) -> {"Lu","L"}; +lookup(1131) -> {"Ll","L"}; +lookup(1132) -> {"Lu","L"}; +lookup(1133) -> {"Ll","L"}; +lookup(1134) -> {"Lu","L"}; +lookup(1135) -> {"Ll","L"}; +lookup(1136) -> {"Lu","L"}; +lookup(1137) -> {"Ll","L"}; +lookup(1138) -> {"Lu","L"}; +lookup(1139) -> {"Ll","L"}; +lookup(1140) -> {"Lu","L"}; +lookup(1141) -> {"Ll","L"}; +lookup(1142) -> {"Lu","L"}; +lookup(1143) -> {"Ll","L"}; +lookup(1144) -> {"Lu","L"}; +lookup(1145) -> {"Ll","L"}; +lookup(1146) -> {"Lu","L"}; +lookup(1147) -> {"Ll","L"}; +lookup(1148) -> {"Lu","L"}; +lookup(1149) -> {"Ll","L"}; +lookup(1150) -> {"Lu","L"}; +lookup(1151) -> {"Ll","L"}; +lookup(1152) -> {"Lu","L"}; +lookup(1153) -> {"Ll","L"}; +lookup(1154) -> {"So","L"}; +lookup(1155) -> {"Mn","NSM"}; +lookup(1156) -> {"Mn","NSM"}; +lookup(1157) -> {"Mn","NSM"}; +lookup(1158) -> {"Mn","NSM"}; +lookup(1159) -> {"Mn","NSM"}; +lookup(1160) -> {"Me","NSM"}; +lookup(1161) -> {"Me","NSM"}; +lookup(1162) -> {"Lu","L"}; +lookup(1163) -> {"Ll","L"}; +lookup(1164) -> {"Lu","L"}; +lookup(1165) -> {"Ll","L"}; +lookup(1166) -> {"Lu","L"}; +lookup(1167) -> {"Ll","L"}; +lookup(1168) -> {"Lu","L"}; +lookup(1169) -> {"Ll","L"}; +lookup(1170) -> {"Lu","L"}; +lookup(1171) -> {"Ll","L"}; +lookup(1172) -> {"Lu","L"}; +lookup(1173) -> {"Ll","L"}; +lookup(1174) -> {"Lu","L"}; +lookup(1175) -> {"Ll","L"}; +lookup(1176) -> {"Lu","L"}; +lookup(1177) -> {"Ll","L"}; +lookup(1178) -> {"Lu","L"}; +lookup(1179) -> {"Ll","L"}; +lookup(1180) -> {"Lu","L"}; +lookup(1181) -> {"Ll","L"}; +lookup(1182) -> {"Lu","L"}; +lookup(1183) -> {"Ll","L"}; +lookup(1184) -> {"Lu","L"}; +lookup(1185) -> {"Ll","L"}; +lookup(1186) -> {"Lu","L"}; +lookup(1187) -> {"Ll","L"}; +lookup(1188) -> {"Lu","L"}; +lookup(1189) -> {"Ll","L"}; +lookup(1190) -> {"Lu","L"}; +lookup(1191) -> {"Ll","L"}; +lookup(1192) -> {"Lu","L"}; +lookup(1193) -> {"Ll","L"}; +lookup(1194) -> {"Lu","L"}; +lookup(1195) -> {"Ll","L"}; +lookup(1196) -> {"Lu","L"}; +lookup(1197) -> {"Ll","L"}; +lookup(1198) -> {"Lu","L"}; +lookup(1199) -> {"Ll","L"}; +lookup(1200) -> {"Lu","L"}; +lookup(1201) -> {"Ll","L"}; +lookup(1202) -> {"Lu","L"}; +lookup(1203) -> {"Ll","L"}; +lookup(1204) -> {"Lu","L"}; +lookup(1205) -> {"Ll","L"}; +lookup(1206) -> {"Lu","L"}; +lookup(1207) -> {"Ll","L"}; +lookup(1208) -> {"Lu","L"}; +lookup(1209) -> {"Ll","L"}; +lookup(1210) -> {"Lu","L"}; +lookup(1211) -> {"Ll","L"}; +lookup(1212) -> {"Lu","L"}; +lookup(1213) -> {"Ll","L"}; +lookup(1214) -> {"Lu","L"}; +lookup(1215) -> {"Ll","L"}; +lookup(1216) -> {"Lu","L"}; +lookup(1217) -> {"Lu","L"}; +lookup(1218) -> {"Ll","L"}; +lookup(1219) -> {"Lu","L"}; +lookup(1220) -> {"Ll","L"}; +lookup(1221) -> {"Lu","L"}; +lookup(1222) -> {"Ll","L"}; +lookup(1223) -> {"Lu","L"}; +lookup(1224) -> {"Ll","L"}; +lookup(1225) -> {"Lu","L"}; +lookup(1226) -> {"Ll","L"}; +lookup(1227) -> {"Lu","L"}; +lookup(1228) -> {"Ll","L"}; +lookup(1229) -> {"Lu","L"}; +lookup(1230) -> {"Ll","L"}; +lookup(1231) -> {"Ll","L"}; +lookup(1232) -> {"Lu","L"}; +lookup(1233) -> {"Ll","L"}; +lookup(1234) -> {"Lu","L"}; +lookup(1235) -> {"Ll","L"}; +lookup(1236) -> {"Lu","L"}; +lookup(1237) -> {"Ll","L"}; +lookup(1238) -> {"Lu","L"}; +lookup(1239) -> {"Ll","L"}; +lookup(1240) -> {"Lu","L"}; +lookup(1241) -> {"Ll","L"}; +lookup(1242) -> {"Lu","L"}; +lookup(1243) -> {"Ll","L"}; +lookup(1244) -> {"Lu","L"}; +lookup(1245) -> {"Ll","L"}; +lookup(1246) -> {"Lu","L"}; +lookup(1247) -> {"Ll","L"}; +lookup(1248) -> {"Lu","L"}; +lookup(1249) -> {"Ll","L"}; +lookup(1250) -> {"Lu","L"}; +lookup(1251) -> {"Ll","L"}; +lookup(1252) -> {"Lu","L"}; +lookup(1253) -> {"Ll","L"}; +lookup(1254) -> {"Lu","L"}; +lookup(1255) -> {"Ll","L"}; +lookup(1256) -> {"Lu","L"}; +lookup(1257) -> {"Ll","L"}; +lookup(1258) -> {"Lu","L"}; +lookup(1259) -> {"Ll","L"}; +lookup(1260) -> {"Lu","L"}; +lookup(1261) -> {"Ll","L"}; +lookup(1262) -> {"Lu","L"}; +lookup(1263) -> {"Ll","L"}; +lookup(1264) -> {"Lu","L"}; +lookup(1265) -> {"Ll","L"}; +lookup(1266) -> {"Lu","L"}; +lookup(1267) -> {"Ll","L"}; +lookup(1268) -> {"Lu","L"}; +lookup(1269) -> {"Ll","L"}; +lookup(1270) -> {"Lu","L"}; +lookup(1271) -> {"Ll","L"}; +lookup(1272) -> {"Lu","L"}; +lookup(1273) -> {"Ll","L"}; +lookup(1274) -> {"Lu","L"}; +lookup(1275) -> {"Ll","L"}; +lookup(1276) -> {"Lu","L"}; +lookup(1277) -> {"Ll","L"}; +lookup(1278) -> {"Lu","L"}; +lookup(1279) -> {"Ll","L"}; +lookup(1280) -> {"Lu","L"}; +lookup(1281) -> {"Ll","L"}; +lookup(1282) -> {"Lu","L"}; +lookup(1283) -> {"Ll","L"}; +lookup(1284) -> {"Lu","L"}; +lookup(1285) -> {"Ll","L"}; +lookup(1286) -> {"Lu","L"}; +lookup(1287) -> {"Ll","L"}; +lookup(1288) -> {"Lu","L"}; +lookup(1289) -> {"Ll","L"}; +lookup(1290) -> {"Lu","L"}; +lookup(1291) -> {"Ll","L"}; +lookup(1292) -> {"Lu","L"}; +lookup(1293) -> {"Ll","L"}; +lookup(1294) -> {"Lu","L"}; +lookup(1295) -> {"Ll","L"}; +lookup(1296) -> {"Lu","L"}; +lookup(1297) -> {"Ll","L"}; +lookup(1298) -> {"Lu","L"}; +lookup(1299) -> {"Ll","L"}; +lookup(1300) -> {"Lu","L"}; +lookup(1301) -> {"Ll","L"}; +lookup(1302) -> {"Lu","L"}; +lookup(1303) -> {"Ll","L"}; +lookup(1304) -> {"Lu","L"}; +lookup(1305) -> {"Ll","L"}; +lookup(1306) -> {"Lu","L"}; +lookup(1307) -> {"Ll","L"}; +lookup(1308) -> {"Lu","L"}; +lookup(1309) -> {"Ll","L"}; +lookup(1310) -> {"Lu","L"}; +lookup(1311) -> {"Ll","L"}; +lookup(1312) -> {"Lu","L"}; +lookup(1313) -> {"Ll","L"}; +lookup(1314) -> {"Lu","L"}; +lookup(1315) -> {"Ll","L"}; +lookup(1316) -> {"Lu","L"}; +lookup(1317) -> {"Ll","L"}; +lookup(1318) -> {"Lu","L"}; +lookup(1319) -> {"Ll","L"}; +lookup(1320) -> {"Lu","L"}; +lookup(1321) -> {"Ll","L"}; +lookup(1322) -> {"Lu","L"}; +lookup(1323) -> {"Ll","L"}; +lookup(1324) -> {"Lu","L"}; +lookup(1325) -> {"Ll","L"}; +lookup(1326) -> {"Lu","L"}; +lookup(1327) -> {"Ll","L"}; +lookup(1329) -> {"Lu","L"}; +lookup(1330) -> {"Lu","L"}; +lookup(1331) -> {"Lu","L"}; +lookup(1332) -> {"Lu","L"}; +lookup(1333) -> {"Lu","L"}; +lookup(1334) -> {"Lu","L"}; +lookup(1335) -> {"Lu","L"}; +lookup(1336) -> {"Lu","L"}; +lookup(1337) -> {"Lu","L"}; +lookup(1338) -> {"Lu","L"}; +lookup(1339) -> {"Lu","L"}; +lookup(1340) -> {"Lu","L"}; +lookup(1341) -> {"Lu","L"}; +lookup(1342) -> {"Lu","L"}; +lookup(1343) -> {"Lu","L"}; +lookup(1344) -> {"Lu","L"}; +lookup(1345) -> {"Lu","L"}; +lookup(1346) -> {"Lu","L"}; +lookup(1347) -> {"Lu","L"}; +lookup(1348) -> {"Lu","L"}; +lookup(1349) -> {"Lu","L"}; +lookup(1350) -> {"Lu","L"}; +lookup(1351) -> {"Lu","L"}; +lookup(1352) -> {"Lu","L"}; +lookup(1353) -> {"Lu","L"}; +lookup(1354) -> {"Lu","L"}; +lookup(1355) -> {"Lu","L"}; +lookup(1356) -> {"Lu","L"}; +lookup(1357) -> {"Lu","L"}; +lookup(1358) -> {"Lu","L"}; +lookup(1359) -> {"Lu","L"}; +lookup(1360) -> {"Lu","L"}; +lookup(1361) -> {"Lu","L"}; +lookup(1362) -> {"Lu","L"}; +lookup(1363) -> {"Lu","L"}; +lookup(1364) -> {"Lu","L"}; +lookup(1365) -> {"Lu","L"}; +lookup(1366) -> {"Lu","L"}; +lookup(1369) -> {"Lm","L"}; +lookup(1370) -> {"Po","L"}; +lookup(1371) -> {"Po","L"}; +lookup(1372) -> {"Po","L"}; +lookup(1373) -> {"Po","L"}; +lookup(1374) -> {"Po","L"}; +lookup(1375) -> {"Po","L"}; +lookup(1376) -> {"Ll","L"}; +lookup(1377) -> {"Ll","L"}; +lookup(1378) -> {"Ll","L"}; +lookup(1379) -> {"Ll","L"}; +lookup(1380) -> {"Ll","L"}; +lookup(1381) -> {"Ll","L"}; +lookup(1382) -> {"Ll","L"}; +lookup(1383) -> {"Ll","L"}; +lookup(1384) -> {"Ll","L"}; +lookup(1385) -> {"Ll","L"}; +lookup(1386) -> {"Ll","L"}; +lookup(1387) -> {"Ll","L"}; +lookup(1388) -> {"Ll","L"}; +lookup(1389) -> {"Ll","L"}; +lookup(1390) -> {"Ll","L"}; +lookup(1391) -> {"Ll","L"}; +lookup(1392) -> {"Ll","L"}; +lookup(1393) -> {"Ll","L"}; +lookup(1394) -> {"Ll","L"}; +lookup(1395) -> {"Ll","L"}; +lookup(1396) -> {"Ll","L"}; +lookup(1397) -> {"Ll","L"}; +lookup(1398) -> {"Ll","L"}; +lookup(1399) -> {"Ll","L"}; +lookup(1400) -> {"Ll","L"}; +lookup(1401) -> {"Ll","L"}; +lookup(1402) -> {"Ll","L"}; +lookup(1403) -> {"Ll","L"}; +lookup(1404) -> {"Ll","L"}; +lookup(1405) -> {"Ll","L"}; +lookup(1406) -> {"Ll","L"}; +lookup(1407) -> {"Ll","L"}; +lookup(1408) -> {"Ll","L"}; +lookup(1409) -> {"Ll","L"}; +lookup(1410) -> {"Ll","L"}; +lookup(1411) -> {"Ll","L"}; +lookup(1412) -> {"Ll","L"}; +lookup(1413) -> {"Ll","L"}; +lookup(1414) -> {"Ll","L"}; +lookup(1415) -> {"Ll","L"}; +lookup(1416) -> {"Ll","L"}; +lookup(1417) -> {"Po","L"}; +lookup(1418) -> {"Pd","ON"}; +lookup(1421) -> {"So","ON"}; +lookup(1422) -> {"So","ON"}; +lookup(1423) -> {"Sc","ET"}; +lookup(1425) -> {"Mn","NSM"}; +lookup(1426) -> {"Mn","NSM"}; +lookup(1427) -> {"Mn","NSM"}; +lookup(1428) -> {"Mn","NSM"}; +lookup(1429) -> {"Mn","NSM"}; +lookup(1430) -> {"Mn","NSM"}; +lookup(1431) -> {"Mn","NSM"}; +lookup(1432) -> {"Mn","NSM"}; +lookup(1433) -> {"Mn","NSM"}; +lookup(1434) -> {"Mn","NSM"}; +lookup(1435) -> {"Mn","NSM"}; +lookup(1436) -> {"Mn","NSM"}; +lookup(1437) -> {"Mn","NSM"}; +lookup(1438) -> {"Mn","NSM"}; +lookup(1439) -> {"Mn","NSM"}; +lookup(1440) -> {"Mn","NSM"}; +lookup(1441) -> {"Mn","NSM"}; +lookup(1442) -> {"Mn","NSM"}; +lookup(1443) -> {"Mn","NSM"}; +lookup(1444) -> {"Mn","NSM"}; +lookup(1445) -> {"Mn","NSM"}; +lookup(1446) -> {"Mn","NSM"}; +lookup(1447) -> {"Mn","NSM"}; +lookup(1448) -> {"Mn","NSM"}; +lookup(1449) -> {"Mn","NSM"}; +lookup(1450) -> {"Mn","NSM"}; +lookup(1451) -> {"Mn","NSM"}; +lookup(1452) -> {"Mn","NSM"}; +lookup(1453) -> {"Mn","NSM"}; +lookup(1454) -> {"Mn","NSM"}; +lookup(1455) -> {"Mn","NSM"}; +lookup(1456) -> {"Mn","NSM"}; +lookup(1457) -> {"Mn","NSM"}; +lookup(1458) -> {"Mn","NSM"}; +lookup(1459) -> {"Mn","NSM"}; +lookup(1460) -> {"Mn","NSM"}; +lookup(1461) -> {"Mn","NSM"}; +lookup(1462) -> {"Mn","NSM"}; +lookup(1463) -> {"Mn","NSM"}; +lookup(1464) -> {"Mn","NSM"}; +lookup(1465) -> {"Mn","NSM"}; +lookup(1466) -> {"Mn","NSM"}; +lookup(1467) -> {"Mn","NSM"}; +lookup(1468) -> {"Mn","NSM"}; +lookup(1469) -> {"Mn","NSM"}; +lookup(1470) -> {"Pd","R"}; +lookup(1471) -> {"Mn","NSM"}; +lookup(1472) -> {"Po","R"}; +lookup(1473) -> {"Mn","NSM"}; +lookup(1474) -> {"Mn","NSM"}; +lookup(1475) -> {"Po","R"}; +lookup(1476) -> {"Mn","NSM"}; +lookup(1477) -> {"Mn","NSM"}; +lookup(1478) -> {"Po","R"}; +lookup(1479) -> {"Mn","NSM"}; +lookup(1488) -> {"Lo","R"}; +lookup(1489) -> {"Lo","R"}; +lookup(1490) -> {"Lo","R"}; +lookup(1491) -> {"Lo","R"}; +lookup(1492) -> {"Lo","R"}; +lookup(1493) -> {"Lo","R"}; +lookup(1494) -> {"Lo","R"}; +lookup(1495) -> {"Lo","R"}; +lookup(1496) -> {"Lo","R"}; +lookup(1497) -> {"Lo","R"}; +lookup(1498) -> {"Lo","R"}; +lookup(1499) -> {"Lo","R"}; +lookup(1500) -> {"Lo","R"}; +lookup(1501) -> {"Lo","R"}; +lookup(1502) -> {"Lo","R"}; +lookup(1503) -> {"Lo","R"}; +lookup(1504) -> {"Lo","R"}; +lookup(1505) -> {"Lo","R"}; +lookup(1506) -> {"Lo","R"}; +lookup(1507) -> {"Lo","R"}; +lookup(1508) -> {"Lo","R"}; +lookup(1509) -> {"Lo","R"}; +lookup(1510) -> {"Lo","R"}; +lookup(1511) -> {"Lo","R"}; +lookup(1512) -> {"Lo","R"}; +lookup(1513) -> {"Lo","R"}; +lookup(1514) -> {"Lo","R"}; +lookup(1519) -> {"Lo","R"}; +lookup(1520) -> {"Lo","R"}; +lookup(1521) -> {"Lo","R"}; +lookup(1522) -> {"Lo","R"}; +lookup(1523) -> {"Po","R"}; +lookup(1524) -> {"Po","R"}; +lookup(1536) -> {"Cf","AN"}; +lookup(1537) -> {"Cf","AN"}; +lookup(1538) -> {"Cf","AN"}; +lookup(1539) -> {"Cf","AN"}; +lookup(1540) -> {"Cf","AN"}; +lookup(1541) -> {"Cf","AN"}; +lookup(1542) -> {"Sm","ON"}; +lookup(1543) -> {"Sm","ON"}; +lookup(1544) -> {"Sm","AL"}; +lookup(1545) -> {"Po","ET"}; +lookup(1546) -> {"Po","ET"}; +lookup(1547) -> {"Sc","AL"}; +lookup(1548) -> {"Po","CS"}; +lookup(1549) -> {"Po","AL"}; +lookup(1550) -> {"So","ON"}; +lookup(1551) -> {"So","ON"}; +lookup(1552) -> {"Mn","NSM"}; +lookup(1553) -> {"Mn","NSM"}; +lookup(1554) -> {"Mn","NSM"}; +lookup(1555) -> {"Mn","NSM"}; +lookup(1556) -> {"Mn","NSM"}; +lookup(1557) -> {"Mn","NSM"}; +lookup(1558) -> {"Mn","NSM"}; +lookup(1559) -> {"Mn","NSM"}; +lookup(1560) -> {"Mn","NSM"}; +lookup(1561) -> {"Mn","NSM"}; +lookup(1562) -> {"Mn","NSM"}; +lookup(1563) -> {"Po","AL"}; +lookup(1564) -> {"Cf","AL"}; +lookup(1566) -> {"Po","AL"}; +lookup(1567) -> {"Po","AL"}; +lookup(1568) -> {"Lo","AL"}; +lookup(1569) -> {"Lo","AL"}; +lookup(1570) -> {"Lo","AL"}; +lookup(1571) -> {"Lo","AL"}; +lookup(1572) -> {"Lo","AL"}; +lookup(1573) -> {"Lo","AL"}; +lookup(1574) -> {"Lo","AL"}; +lookup(1575) -> {"Lo","AL"}; +lookup(1576) -> {"Lo","AL"}; +lookup(1577) -> {"Lo","AL"}; +lookup(1578) -> {"Lo","AL"}; +lookup(1579) -> {"Lo","AL"}; +lookup(1580) -> {"Lo","AL"}; +lookup(1581) -> {"Lo","AL"}; +lookup(1582) -> {"Lo","AL"}; +lookup(1583) -> {"Lo","AL"}; +lookup(1584) -> {"Lo","AL"}; +lookup(1585) -> {"Lo","AL"}; +lookup(1586) -> {"Lo","AL"}; +lookup(1587) -> {"Lo","AL"}; +lookup(1588) -> {"Lo","AL"}; +lookup(1589) -> {"Lo","AL"}; +lookup(1590) -> {"Lo","AL"}; +lookup(1591) -> {"Lo","AL"}; +lookup(1592) -> {"Lo","AL"}; +lookup(1593) -> {"Lo","AL"}; +lookup(1594) -> {"Lo","AL"}; +lookup(1595) -> {"Lo","AL"}; +lookup(1596) -> {"Lo","AL"}; +lookup(1597) -> {"Lo","AL"}; +lookup(1598) -> {"Lo","AL"}; +lookup(1599) -> {"Lo","AL"}; +lookup(1600) -> {"Lm","AL"}; +lookup(1601) -> {"Lo","AL"}; +lookup(1602) -> {"Lo","AL"}; +lookup(1603) -> {"Lo","AL"}; +lookup(1604) -> {"Lo","AL"}; +lookup(1605) -> {"Lo","AL"}; +lookup(1606) -> {"Lo","AL"}; +lookup(1607) -> {"Lo","AL"}; +lookup(1608) -> {"Lo","AL"}; +lookup(1609) -> {"Lo","AL"}; +lookup(1610) -> {"Lo","AL"}; +lookup(1611) -> {"Mn","NSM"}; +lookup(1612) -> {"Mn","NSM"}; +lookup(1613) -> {"Mn","NSM"}; +lookup(1614) -> {"Mn","NSM"}; +lookup(1615) -> {"Mn","NSM"}; +lookup(1616) -> {"Mn","NSM"}; +lookup(1617) -> {"Mn","NSM"}; +lookup(1618) -> {"Mn","NSM"}; +lookup(1619) -> {"Mn","NSM"}; +lookup(1620) -> {"Mn","NSM"}; +lookup(1621) -> {"Mn","NSM"}; +lookup(1622) -> {"Mn","NSM"}; +lookup(1623) -> {"Mn","NSM"}; +lookup(1624) -> {"Mn","NSM"}; +lookup(1625) -> {"Mn","NSM"}; +lookup(1626) -> {"Mn","NSM"}; +lookup(1627) -> {"Mn","NSM"}; +lookup(1628) -> {"Mn","NSM"}; +lookup(1629) -> {"Mn","NSM"}; +lookup(1630) -> {"Mn","NSM"}; +lookup(1631) -> {"Mn","NSM"}; +lookup(1632) -> {"Nd","AN"}; +lookup(1633) -> {"Nd","AN"}; +lookup(1634) -> {"Nd","AN"}; +lookup(1635) -> {"Nd","AN"}; +lookup(1636) -> {"Nd","AN"}; +lookup(1637) -> {"Nd","AN"}; +lookup(1638) -> {"Nd","AN"}; +lookup(1639) -> {"Nd","AN"}; +lookup(1640) -> {"Nd","AN"}; +lookup(1641) -> {"Nd","AN"}; +lookup(1642) -> {"Po","ET"}; +lookup(1643) -> {"Po","AN"}; +lookup(1644) -> {"Po","AN"}; +lookup(1645) -> {"Po","AL"}; +lookup(1646) -> {"Lo","AL"}; +lookup(1647) -> {"Lo","AL"}; +lookup(1648) -> {"Mn","NSM"}; +lookup(1649) -> {"Lo","AL"}; +lookup(1650) -> {"Lo","AL"}; +lookup(1651) -> {"Lo","AL"}; +lookup(1652) -> {"Lo","AL"}; +lookup(1653) -> {"Lo","AL"}; +lookup(1654) -> {"Lo","AL"}; +lookup(1655) -> {"Lo","AL"}; +lookup(1656) -> {"Lo","AL"}; +lookup(1657) -> {"Lo","AL"}; +lookup(1658) -> {"Lo","AL"}; +lookup(1659) -> {"Lo","AL"}; +lookup(1660) -> {"Lo","AL"}; +lookup(1661) -> {"Lo","AL"}; +lookup(1662) -> {"Lo","AL"}; +lookup(1663) -> {"Lo","AL"}; +lookup(1664) -> {"Lo","AL"}; +lookup(1665) -> {"Lo","AL"}; +lookup(1666) -> {"Lo","AL"}; +lookup(1667) -> {"Lo","AL"}; +lookup(1668) -> {"Lo","AL"}; +lookup(1669) -> {"Lo","AL"}; +lookup(1670) -> {"Lo","AL"}; +lookup(1671) -> {"Lo","AL"}; +lookup(1672) -> {"Lo","AL"}; +lookup(1673) -> {"Lo","AL"}; +lookup(1674) -> {"Lo","AL"}; +lookup(1675) -> {"Lo","AL"}; +lookup(1676) -> {"Lo","AL"}; +lookup(1677) -> {"Lo","AL"}; +lookup(1678) -> {"Lo","AL"}; +lookup(1679) -> {"Lo","AL"}; +lookup(1680) -> {"Lo","AL"}; +lookup(1681) -> {"Lo","AL"}; +lookup(1682) -> {"Lo","AL"}; +lookup(1683) -> {"Lo","AL"}; +lookup(1684) -> {"Lo","AL"}; +lookup(1685) -> {"Lo","AL"}; +lookup(1686) -> {"Lo","AL"}; +lookup(1687) -> {"Lo","AL"}; +lookup(1688) -> {"Lo","AL"}; +lookup(1689) -> {"Lo","AL"}; +lookup(1690) -> {"Lo","AL"}; +lookup(1691) -> {"Lo","AL"}; +lookup(1692) -> {"Lo","AL"}; +lookup(1693) -> {"Lo","AL"}; +lookup(1694) -> {"Lo","AL"}; +lookup(1695) -> {"Lo","AL"}; +lookup(1696) -> {"Lo","AL"}; +lookup(1697) -> {"Lo","AL"}; +lookup(1698) -> {"Lo","AL"}; +lookup(1699) -> {"Lo","AL"}; +lookup(1700) -> {"Lo","AL"}; +lookup(1701) -> {"Lo","AL"}; +lookup(1702) -> {"Lo","AL"}; +lookup(1703) -> {"Lo","AL"}; +lookup(1704) -> {"Lo","AL"}; +lookup(1705) -> {"Lo","AL"}; +lookup(1706) -> {"Lo","AL"}; +lookup(1707) -> {"Lo","AL"}; +lookup(1708) -> {"Lo","AL"}; +lookup(1709) -> {"Lo","AL"}; +lookup(1710) -> {"Lo","AL"}; +lookup(1711) -> {"Lo","AL"}; +lookup(1712) -> {"Lo","AL"}; +lookup(1713) -> {"Lo","AL"}; +lookup(1714) -> {"Lo","AL"}; +lookup(1715) -> {"Lo","AL"}; +lookup(1716) -> {"Lo","AL"}; +lookup(1717) -> {"Lo","AL"}; +lookup(1718) -> {"Lo","AL"}; +lookup(1719) -> {"Lo","AL"}; +lookup(1720) -> {"Lo","AL"}; +lookup(1721) -> {"Lo","AL"}; +lookup(1722) -> {"Lo","AL"}; +lookup(1723) -> {"Lo","AL"}; +lookup(1724) -> {"Lo","AL"}; +lookup(1725) -> {"Lo","AL"}; +lookup(1726) -> {"Lo","AL"}; +lookup(1727) -> {"Lo","AL"}; +lookup(1728) -> {"Lo","AL"}; +lookup(1729) -> {"Lo","AL"}; +lookup(1730) -> {"Lo","AL"}; +lookup(1731) -> {"Lo","AL"}; +lookup(1732) -> {"Lo","AL"}; +lookup(1733) -> {"Lo","AL"}; +lookup(1734) -> {"Lo","AL"}; +lookup(1735) -> {"Lo","AL"}; +lookup(1736) -> {"Lo","AL"}; +lookup(1737) -> {"Lo","AL"}; +lookup(1738) -> {"Lo","AL"}; +lookup(1739) -> {"Lo","AL"}; +lookup(1740) -> {"Lo","AL"}; +lookup(1741) -> {"Lo","AL"}; +lookup(1742) -> {"Lo","AL"}; +lookup(1743) -> {"Lo","AL"}; +lookup(1744) -> {"Lo","AL"}; +lookup(1745) -> {"Lo","AL"}; +lookup(1746) -> {"Lo","AL"}; +lookup(1747) -> {"Lo","AL"}; +lookup(1748) -> {"Po","AL"}; +lookup(1749) -> {"Lo","AL"}; +lookup(1750) -> {"Mn","NSM"}; +lookup(1751) -> {"Mn","NSM"}; +lookup(1752) -> {"Mn","NSM"}; +lookup(1753) -> {"Mn","NSM"}; +lookup(1754) -> {"Mn","NSM"}; +lookup(1755) -> {"Mn","NSM"}; +lookup(1756) -> {"Mn","NSM"}; +lookup(1757) -> {"Cf","AN"}; +lookup(1758) -> {"So","ON"}; +lookup(1759) -> {"Mn","NSM"}; +lookup(1760) -> {"Mn","NSM"}; +lookup(1761) -> {"Mn","NSM"}; +lookup(1762) -> {"Mn","NSM"}; +lookup(1763) -> {"Mn","NSM"}; +lookup(1764) -> {"Mn","NSM"}; +lookup(1765) -> {"Lm","AL"}; +lookup(1766) -> {"Lm","AL"}; +lookup(1767) -> {"Mn","NSM"}; +lookup(1768) -> {"Mn","NSM"}; +lookup(1769) -> {"So","ON"}; +lookup(1770) -> {"Mn","NSM"}; +lookup(1771) -> {"Mn","NSM"}; +lookup(1772) -> {"Mn","NSM"}; +lookup(1773) -> {"Mn","NSM"}; +lookup(1774) -> {"Lo","AL"}; +lookup(1775) -> {"Lo","AL"}; +lookup(1776) -> {"Nd","EN"}; +lookup(1777) -> {"Nd","EN"}; +lookup(1778) -> {"Nd","EN"}; +lookup(1779) -> {"Nd","EN"}; +lookup(1780) -> {"Nd","EN"}; +lookup(1781) -> {"Nd","EN"}; +lookup(1782) -> {"Nd","EN"}; +lookup(1783) -> {"Nd","EN"}; +lookup(1784) -> {"Nd","EN"}; +lookup(1785) -> {"Nd","EN"}; +lookup(1786) -> {"Lo","AL"}; +lookup(1787) -> {"Lo","AL"}; +lookup(1788) -> {"Lo","AL"}; +lookup(1789) -> {"So","AL"}; +lookup(1790) -> {"So","AL"}; +lookup(1791) -> {"Lo","AL"}; +lookup(1792) -> {"Po","AL"}; +lookup(1793) -> {"Po","AL"}; +lookup(1794) -> {"Po","AL"}; +lookup(1795) -> {"Po","AL"}; +lookup(1796) -> {"Po","AL"}; +lookup(1797) -> {"Po","AL"}; +lookup(1798) -> {"Po","AL"}; +lookup(1799) -> {"Po","AL"}; +lookup(1800) -> {"Po","AL"}; +lookup(1801) -> {"Po","AL"}; +lookup(1802) -> {"Po","AL"}; +lookup(1803) -> {"Po","AL"}; +lookup(1804) -> {"Po","AL"}; +lookup(1805) -> {"Po","AL"}; +lookup(1807) -> {"Cf","AL"}; +lookup(1808) -> {"Lo","AL"}; +lookup(1809) -> {"Mn","NSM"}; +lookup(1810) -> {"Lo","AL"}; +lookup(1811) -> {"Lo","AL"}; +lookup(1812) -> {"Lo","AL"}; +lookup(1813) -> {"Lo","AL"}; +lookup(1814) -> {"Lo","AL"}; +lookup(1815) -> {"Lo","AL"}; +lookup(1816) -> {"Lo","AL"}; +lookup(1817) -> {"Lo","AL"}; +lookup(1818) -> {"Lo","AL"}; +lookup(1819) -> {"Lo","AL"}; +lookup(1820) -> {"Lo","AL"}; +lookup(1821) -> {"Lo","AL"}; +lookup(1822) -> {"Lo","AL"}; +lookup(1823) -> {"Lo","AL"}; +lookup(1824) -> {"Lo","AL"}; +lookup(1825) -> {"Lo","AL"}; +lookup(1826) -> {"Lo","AL"}; +lookup(1827) -> {"Lo","AL"}; +lookup(1828) -> {"Lo","AL"}; +lookup(1829) -> {"Lo","AL"}; +lookup(1830) -> {"Lo","AL"}; +lookup(1831) -> {"Lo","AL"}; +lookup(1832) -> {"Lo","AL"}; +lookup(1833) -> {"Lo","AL"}; +lookup(1834) -> {"Lo","AL"}; +lookup(1835) -> {"Lo","AL"}; +lookup(1836) -> {"Lo","AL"}; +lookup(1837) -> {"Lo","AL"}; +lookup(1838) -> {"Lo","AL"}; +lookup(1839) -> {"Lo","AL"}; +lookup(1840) -> {"Mn","NSM"}; +lookup(1841) -> {"Mn","NSM"}; +lookup(1842) -> {"Mn","NSM"}; +lookup(1843) -> {"Mn","NSM"}; +lookup(1844) -> {"Mn","NSM"}; +lookup(1845) -> {"Mn","NSM"}; +lookup(1846) -> {"Mn","NSM"}; +lookup(1847) -> {"Mn","NSM"}; +lookup(1848) -> {"Mn","NSM"}; +lookup(1849) -> {"Mn","NSM"}; +lookup(1850) -> {"Mn","NSM"}; +lookup(1851) -> {"Mn","NSM"}; +lookup(1852) -> {"Mn","NSM"}; +lookup(1853) -> {"Mn","NSM"}; +lookup(1854) -> {"Mn","NSM"}; +lookup(1855) -> {"Mn","NSM"}; +lookup(1856) -> {"Mn","NSM"}; +lookup(1857) -> {"Mn","NSM"}; +lookup(1858) -> {"Mn","NSM"}; +lookup(1859) -> {"Mn","NSM"}; +lookup(1860) -> {"Mn","NSM"}; +lookup(1861) -> {"Mn","NSM"}; +lookup(1862) -> {"Mn","NSM"}; +lookup(1863) -> {"Mn","NSM"}; +lookup(1864) -> {"Mn","NSM"}; +lookup(1865) -> {"Mn","NSM"}; +lookup(1866) -> {"Mn","NSM"}; +lookup(1869) -> {"Lo","AL"}; +lookup(1870) -> {"Lo","AL"}; +lookup(1871) -> {"Lo","AL"}; +lookup(1872) -> {"Lo","AL"}; +lookup(1873) -> {"Lo","AL"}; +lookup(1874) -> {"Lo","AL"}; +lookup(1875) -> {"Lo","AL"}; +lookup(1876) -> {"Lo","AL"}; +lookup(1877) -> {"Lo","AL"}; +lookup(1878) -> {"Lo","AL"}; +lookup(1879) -> {"Lo","AL"}; +lookup(1880) -> {"Lo","AL"}; +lookup(1881) -> {"Lo","AL"}; +lookup(1882) -> {"Lo","AL"}; +lookup(1883) -> {"Lo","AL"}; +lookup(1884) -> {"Lo","AL"}; +lookup(1885) -> {"Lo","AL"}; +lookup(1886) -> {"Lo","AL"}; +lookup(1887) -> {"Lo","AL"}; +lookup(1888) -> {"Lo","AL"}; +lookup(1889) -> {"Lo","AL"}; +lookup(1890) -> {"Lo","AL"}; +lookup(1891) -> {"Lo","AL"}; +lookup(1892) -> {"Lo","AL"}; +lookup(1893) -> {"Lo","AL"}; +lookup(1894) -> {"Lo","AL"}; +lookup(1895) -> {"Lo","AL"}; +lookup(1896) -> {"Lo","AL"}; +lookup(1897) -> {"Lo","AL"}; +lookup(1898) -> {"Lo","AL"}; +lookup(1899) -> {"Lo","AL"}; +lookup(1900) -> {"Lo","AL"}; +lookup(1901) -> {"Lo","AL"}; +lookup(1902) -> {"Lo","AL"}; +lookup(1903) -> {"Lo","AL"}; +lookup(1904) -> {"Lo","AL"}; +lookup(1905) -> {"Lo","AL"}; +lookup(1906) -> {"Lo","AL"}; +lookup(1907) -> {"Lo","AL"}; +lookup(1908) -> {"Lo","AL"}; +lookup(1909) -> {"Lo","AL"}; +lookup(1910) -> {"Lo","AL"}; +lookup(1911) -> {"Lo","AL"}; +lookup(1912) -> {"Lo","AL"}; +lookup(1913) -> {"Lo","AL"}; +lookup(1914) -> {"Lo","AL"}; +lookup(1915) -> {"Lo","AL"}; +lookup(1916) -> {"Lo","AL"}; +lookup(1917) -> {"Lo","AL"}; +lookup(1918) -> {"Lo","AL"}; +lookup(1919) -> {"Lo","AL"}; +lookup(1920) -> {"Lo","AL"}; +lookup(1921) -> {"Lo","AL"}; +lookup(1922) -> {"Lo","AL"}; +lookup(1923) -> {"Lo","AL"}; +lookup(1924) -> {"Lo","AL"}; +lookup(1925) -> {"Lo","AL"}; +lookup(1926) -> {"Lo","AL"}; +lookup(1927) -> {"Lo","AL"}; +lookup(1928) -> {"Lo","AL"}; +lookup(1929) -> {"Lo","AL"}; +lookup(1930) -> {"Lo","AL"}; +lookup(1931) -> {"Lo","AL"}; +lookup(1932) -> {"Lo","AL"}; +lookup(1933) -> {"Lo","AL"}; +lookup(1934) -> {"Lo","AL"}; +lookup(1935) -> {"Lo","AL"}; +lookup(1936) -> {"Lo","AL"}; +lookup(1937) -> {"Lo","AL"}; +lookup(1938) -> {"Lo","AL"}; +lookup(1939) -> {"Lo","AL"}; +lookup(1940) -> {"Lo","AL"}; +lookup(1941) -> {"Lo","AL"}; +lookup(1942) -> {"Lo","AL"}; +lookup(1943) -> {"Lo","AL"}; +lookup(1944) -> {"Lo","AL"}; +lookup(1945) -> {"Lo","AL"}; +lookup(1946) -> {"Lo","AL"}; +lookup(1947) -> {"Lo","AL"}; +lookup(1948) -> {"Lo","AL"}; +lookup(1949) -> {"Lo","AL"}; +lookup(1950) -> {"Lo","AL"}; +lookup(1951) -> {"Lo","AL"}; +lookup(1952) -> {"Lo","AL"}; +lookup(1953) -> {"Lo","AL"}; +lookup(1954) -> {"Lo","AL"}; +lookup(1955) -> {"Lo","AL"}; +lookup(1956) -> {"Lo","AL"}; +lookup(1957) -> {"Lo","AL"}; +lookup(1958) -> {"Mn","NSM"}; +lookup(1959) -> {"Mn","NSM"}; +lookup(1960) -> {"Mn","NSM"}; +lookup(1961) -> {"Mn","NSM"}; +lookup(1962) -> {"Mn","NSM"}; +lookup(1963) -> {"Mn","NSM"}; +lookup(1964) -> {"Mn","NSM"}; +lookup(1965) -> {"Mn","NSM"}; +lookup(1966) -> {"Mn","NSM"}; +lookup(1967) -> {"Mn","NSM"}; +lookup(1968) -> {"Mn","NSM"}; +lookup(1969) -> {"Lo","AL"}; +lookup(1984) -> {"Nd","R"}; +lookup(1985) -> {"Nd","R"}; +lookup(1986) -> {"Nd","R"}; +lookup(1987) -> {"Nd","R"}; +lookup(1988) -> {"Nd","R"}; +lookup(1989) -> {"Nd","R"}; +lookup(1990) -> {"Nd","R"}; +lookup(1991) -> {"Nd","R"}; +lookup(1992) -> {"Nd","R"}; +lookup(1993) -> {"Nd","R"}; +lookup(1994) -> {"Lo","R"}; +lookup(1995) -> {"Lo","R"}; +lookup(1996) -> {"Lo","R"}; +lookup(1997) -> {"Lo","R"}; +lookup(1998) -> {"Lo","R"}; +lookup(1999) -> {"Lo","R"}; +lookup(2000) -> {"Lo","R"}; +lookup(2001) -> {"Lo","R"}; +lookup(2002) -> {"Lo","R"}; +lookup(2003) -> {"Lo","R"}; +lookup(2004) -> {"Lo","R"}; +lookup(2005) -> {"Lo","R"}; +lookup(2006) -> {"Lo","R"}; +lookup(2007) -> {"Lo","R"}; +lookup(2008) -> {"Lo","R"}; +lookup(2009) -> {"Lo","R"}; +lookup(2010) -> {"Lo","R"}; +lookup(2011) -> {"Lo","R"}; +lookup(2012) -> {"Lo","R"}; +lookup(2013) -> {"Lo","R"}; +lookup(2014) -> {"Lo","R"}; +lookup(2015) -> {"Lo","R"}; +lookup(2016) -> {"Lo","R"}; +lookup(2017) -> {"Lo","R"}; +lookup(2018) -> {"Lo","R"}; +lookup(2019) -> {"Lo","R"}; +lookup(2020) -> {"Lo","R"}; +lookup(2021) -> {"Lo","R"}; +lookup(2022) -> {"Lo","R"}; +lookup(2023) -> {"Lo","R"}; +lookup(2024) -> {"Lo","R"}; +lookup(2025) -> {"Lo","R"}; +lookup(2026) -> {"Lo","R"}; +lookup(2027) -> {"Mn","NSM"}; +lookup(2028) -> {"Mn","NSM"}; +lookup(2029) -> {"Mn","NSM"}; +lookup(2030) -> {"Mn","NSM"}; +lookup(2031) -> {"Mn","NSM"}; +lookup(2032) -> {"Mn","NSM"}; +lookup(2033) -> {"Mn","NSM"}; +lookup(2034) -> {"Mn","NSM"}; +lookup(2035) -> {"Mn","NSM"}; +lookup(2036) -> {"Lm","R"}; +lookup(2037) -> {"Lm","R"}; +lookup(2038) -> {"So","ON"}; +lookup(2039) -> {"Po","ON"}; +lookup(2040) -> {"Po","ON"}; +lookup(2041) -> {"Po","ON"}; +lookup(2042) -> {"Lm","R"}; +lookup(2045) -> {"Mn","NSM"}; +lookup(2046) -> {"Sc","R"}; +lookup(2047) -> {"Sc","R"}; +lookup(2048) -> {"Lo","R"}; +lookup(2049) -> {"Lo","R"}; +lookup(2050) -> {"Lo","R"}; +lookup(2051) -> {"Lo","R"}; +lookup(2052) -> {"Lo","R"}; +lookup(2053) -> {"Lo","R"}; +lookup(2054) -> {"Lo","R"}; +lookup(2055) -> {"Lo","R"}; +lookup(2056) -> {"Lo","R"}; +lookup(2057) -> {"Lo","R"}; +lookup(2058) -> {"Lo","R"}; +lookup(2059) -> {"Lo","R"}; +lookup(2060) -> {"Lo","R"}; +lookup(2061) -> {"Lo","R"}; +lookup(2062) -> {"Lo","R"}; +lookup(2063) -> {"Lo","R"}; +lookup(2064) -> {"Lo","R"}; +lookup(2065) -> {"Lo","R"}; +lookup(2066) -> {"Lo","R"}; +lookup(2067) -> {"Lo","R"}; +lookup(2068) -> {"Lo","R"}; +lookup(2069) -> {"Lo","R"}; +lookup(2070) -> {"Mn","NSM"}; +lookup(2071) -> {"Mn","NSM"}; +lookup(2072) -> {"Mn","NSM"}; +lookup(2073) -> {"Mn","NSM"}; +lookup(2074) -> {"Lm","R"}; +lookup(2075) -> {"Mn","NSM"}; +lookup(2076) -> {"Mn","NSM"}; +lookup(2077) -> {"Mn","NSM"}; +lookup(2078) -> {"Mn","NSM"}; +lookup(2079) -> {"Mn","NSM"}; +lookup(2080) -> {"Mn","NSM"}; +lookup(2081) -> {"Mn","NSM"}; +lookup(2082) -> {"Mn","NSM"}; +lookup(2083) -> {"Mn","NSM"}; +lookup(2084) -> {"Lm","R"}; +lookup(2085) -> {"Mn","NSM"}; +lookup(2086) -> {"Mn","NSM"}; +lookup(2087) -> {"Mn","NSM"}; +lookup(2088) -> {"Lm","R"}; +lookup(2089) -> {"Mn","NSM"}; +lookup(2090) -> {"Mn","NSM"}; +lookup(2091) -> {"Mn","NSM"}; +lookup(2092) -> {"Mn","NSM"}; +lookup(2093) -> {"Mn","NSM"}; +lookup(2096) -> {"Po","R"}; +lookup(2097) -> {"Po","R"}; +lookup(2098) -> {"Po","R"}; +lookup(2099) -> {"Po","R"}; +lookup(2100) -> {"Po","R"}; +lookup(2101) -> {"Po","R"}; +lookup(2102) -> {"Po","R"}; +lookup(2103) -> {"Po","R"}; +lookup(2104) -> {"Po","R"}; +lookup(2105) -> {"Po","R"}; +lookup(2106) -> {"Po","R"}; +lookup(2107) -> {"Po","R"}; +lookup(2108) -> {"Po","R"}; +lookup(2109) -> {"Po","R"}; +lookup(2110) -> {"Po","R"}; +lookup(2112) -> {"Lo","R"}; +lookup(2113) -> {"Lo","R"}; +lookup(2114) -> {"Lo","R"}; +lookup(2115) -> {"Lo","R"}; +lookup(2116) -> {"Lo","R"}; +lookup(2117) -> {"Lo","R"}; +lookup(2118) -> {"Lo","R"}; +lookup(2119) -> {"Lo","R"}; +lookup(2120) -> {"Lo","R"}; +lookup(2121) -> {"Lo","R"}; +lookup(2122) -> {"Lo","R"}; +lookup(2123) -> {"Lo","R"}; +lookup(2124) -> {"Lo","R"}; +lookup(2125) -> {"Lo","R"}; +lookup(2126) -> {"Lo","R"}; +lookup(2127) -> {"Lo","R"}; +lookup(2128) -> {"Lo","R"}; +lookup(2129) -> {"Lo","R"}; +lookup(2130) -> {"Lo","R"}; +lookup(2131) -> {"Lo","R"}; +lookup(2132) -> {"Lo","R"}; +lookup(2133) -> {"Lo","R"}; +lookup(2134) -> {"Lo","R"}; +lookup(2135) -> {"Lo","R"}; +lookup(2136) -> {"Lo","R"}; +lookup(2137) -> {"Mn","NSM"}; +lookup(2138) -> {"Mn","NSM"}; +lookup(2139) -> {"Mn","NSM"}; +lookup(2142) -> {"Po","R"}; +lookup(2144) -> {"Lo","AL"}; +lookup(2145) -> {"Lo","AL"}; +lookup(2146) -> {"Lo","AL"}; +lookup(2147) -> {"Lo","AL"}; +lookup(2148) -> {"Lo","AL"}; +lookup(2149) -> {"Lo","AL"}; +lookup(2150) -> {"Lo","AL"}; +lookup(2151) -> {"Lo","AL"}; +lookup(2152) -> {"Lo","AL"}; +lookup(2153) -> {"Lo","AL"}; +lookup(2154) -> {"Lo","AL"}; +lookup(2208) -> {"Lo","AL"}; +lookup(2209) -> {"Lo","AL"}; +lookup(2210) -> {"Lo","AL"}; +lookup(2211) -> {"Lo","AL"}; +lookup(2212) -> {"Lo","AL"}; +lookup(2213) -> {"Lo","AL"}; +lookup(2214) -> {"Lo","AL"}; +lookup(2215) -> {"Lo","AL"}; +lookup(2216) -> {"Lo","AL"}; +lookup(2217) -> {"Lo","AL"}; +lookup(2218) -> {"Lo","AL"}; +lookup(2219) -> {"Lo","AL"}; +lookup(2220) -> {"Lo","AL"}; +lookup(2221) -> {"Lo","AL"}; +lookup(2222) -> {"Lo","AL"}; +lookup(2223) -> {"Lo","AL"}; +lookup(2224) -> {"Lo","AL"}; +lookup(2225) -> {"Lo","AL"}; +lookup(2226) -> {"Lo","AL"}; +lookup(2227) -> {"Lo","AL"}; +lookup(2228) -> {"Lo","AL"}; +lookup(2230) -> {"Lo","AL"}; +lookup(2231) -> {"Lo","AL"}; +lookup(2232) -> {"Lo","AL"}; +lookup(2233) -> {"Lo","AL"}; +lookup(2234) -> {"Lo","AL"}; +lookup(2235) -> {"Lo","AL"}; +lookup(2236) -> {"Lo","AL"}; +lookup(2237) -> {"Lo","AL"}; +lookup(2238) -> {"Lo","AL"}; +lookup(2239) -> {"Lo","AL"}; +lookup(2240) -> {"Lo","AL"}; +lookup(2241) -> {"Lo","AL"}; +lookup(2242) -> {"Lo","AL"}; +lookup(2243) -> {"Lo","AL"}; +lookup(2244) -> {"Lo","AL"}; +lookup(2245) -> {"Lo","AL"}; +lookup(2246) -> {"Lo","AL"}; +lookup(2247) -> {"Lo","AL"}; +lookup(2259) -> {"Mn","NSM"}; +lookup(2260) -> {"Mn","NSM"}; +lookup(2261) -> {"Mn","NSM"}; +lookup(2262) -> {"Mn","NSM"}; +lookup(2263) -> {"Mn","NSM"}; +lookup(2264) -> {"Mn","NSM"}; +lookup(2265) -> {"Mn","NSM"}; +lookup(2266) -> {"Mn","NSM"}; +lookup(2267) -> {"Mn","NSM"}; +lookup(2268) -> {"Mn","NSM"}; +lookup(2269) -> {"Mn","NSM"}; +lookup(2270) -> {"Mn","NSM"}; +lookup(2271) -> {"Mn","NSM"}; +lookup(2272) -> {"Mn","NSM"}; +lookup(2273) -> {"Mn","NSM"}; +lookup(2274) -> {"Cf","AN"}; +lookup(2275) -> {"Mn","NSM"}; +lookup(2276) -> {"Mn","NSM"}; +lookup(2277) -> {"Mn","NSM"}; +lookup(2278) -> {"Mn","NSM"}; +lookup(2279) -> {"Mn","NSM"}; +lookup(2280) -> {"Mn","NSM"}; +lookup(2281) -> {"Mn","NSM"}; +lookup(2282) -> {"Mn","NSM"}; +lookup(2283) -> {"Mn","NSM"}; +lookup(2284) -> {"Mn","NSM"}; +lookup(2285) -> {"Mn","NSM"}; +lookup(2286) -> {"Mn","NSM"}; +lookup(2287) -> {"Mn","NSM"}; +lookup(2288) -> {"Mn","NSM"}; +lookup(2289) -> {"Mn","NSM"}; +lookup(2290) -> {"Mn","NSM"}; +lookup(2291) -> {"Mn","NSM"}; +lookup(2292) -> {"Mn","NSM"}; +lookup(2293) -> {"Mn","NSM"}; +lookup(2294) -> {"Mn","NSM"}; +lookup(2295) -> {"Mn","NSM"}; +lookup(2296) -> {"Mn","NSM"}; +lookup(2297) -> {"Mn","NSM"}; +lookup(2298) -> {"Mn","NSM"}; +lookup(2299) -> {"Mn","NSM"}; +lookup(2300) -> {"Mn","NSM"}; +lookup(2301) -> {"Mn","NSM"}; +lookup(2302) -> {"Mn","NSM"}; +lookup(2303) -> {"Mn","NSM"}; +lookup(2304) -> {"Mn","NSM"}; +lookup(2305) -> {"Mn","NSM"}; +lookup(2306) -> {"Mn","NSM"}; +lookup(2307) -> {"Mc","L"}; +lookup(2308) -> {"Lo","L"}; +lookup(2309) -> {"Lo","L"}; +lookup(2310) -> {"Lo","L"}; +lookup(2311) -> {"Lo","L"}; +lookup(2312) -> {"Lo","L"}; +lookup(2313) -> {"Lo","L"}; +lookup(2314) -> {"Lo","L"}; +lookup(2315) -> {"Lo","L"}; +lookup(2316) -> {"Lo","L"}; +lookup(2317) -> {"Lo","L"}; +lookup(2318) -> {"Lo","L"}; +lookup(2319) -> {"Lo","L"}; +lookup(2320) -> {"Lo","L"}; +lookup(2321) -> {"Lo","L"}; +lookup(2322) -> {"Lo","L"}; +lookup(2323) -> {"Lo","L"}; +lookup(2324) -> {"Lo","L"}; +lookup(2325) -> {"Lo","L"}; +lookup(2326) -> {"Lo","L"}; +lookup(2327) -> {"Lo","L"}; +lookup(2328) -> {"Lo","L"}; +lookup(2329) -> {"Lo","L"}; +lookup(2330) -> {"Lo","L"}; +lookup(2331) -> {"Lo","L"}; +lookup(2332) -> {"Lo","L"}; +lookup(2333) -> {"Lo","L"}; +lookup(2334) -> {"Lo","L"}; +lookup(2335) -> {"Lo","L"}; +lookup(2336) -> {"Lo","L"}; +lookup(2337) -> {"Lo","L"}; +lookup(2338) -> {"Lo","L"}; +lookup(2339) -> {"Lo","L"}; +lookup(2340) -> {"Lo","L"}; +lookup(2341) -> {"Lo","L"}; +lookup(2342) -> {"Lo","L"}; +lookup(2343) -> {"Lo","L"}; +lookup(2344) -> {"Lo","L"}; +lookup(2345) -> {"Lo","L"}; +lookup(2346) -> {"Lo","L"}; +lookup(2347) -> {"Lo","L"}; +lookup(2348) -> {"Lo","L"}; +lookup(2349) -> {"Lo","L"}; +lookup(2350) -> {"Lo","L"}; +lookup(2351) -> {"Lo","L"}; +lookup(2352) -> {"Lo","L"}; +lookup(2353) -> {"Lo","L"}; +lookup(2354) -> {"Lo","L"}; +lookup(2355) -> {"Lo","L"}; +lookup(2356) -> {"Lo","L"}; +lookup(2357) -> {"Lo","L"}; +lookup(2358) -> {"Lo","L"}; +lookup(2359) -> {"Lo","L"}; +lookup(2360) -> {"Lo","L"}; +lookup(2361) -> {"Lo","L"}; +lookup(2362) -> {"Mn","NSM"}; +lookup(2363) -> {"Mc","L"}; +lookup(2364) -> {"Mn","NSM"}; +lookup(2365) -> {"Lo","L"}; +lookup(2366) -> {"Mc","L"}; +lookup(2367) -> {"Mc","L"}; +lookup(2368) -> {"Mc","L"}; +lookup(2369) -> {"Mn","NSM"}; +lookup(2370) -> {"Mn","NSM"}; +lookup(2371) -> {"Mn","NSM"}; +lookup(2372) -> {"Mn","NSM"}; +lookup(2373) -> {"Mn","NSM"}; +lookup(2374) -> {"Mn","NSM"}; +lookup(2375) -> {"Mn","NSM"}; +lookup(2376) -> {"Mn","NSM"}; +lookup(2377) -> {"Mc","L"}; +lookup(2378) -> {"Mc","L"}; +lookup(2379) -> {"Mc","L"}; +lookup(2380) -> {"Mc","L"}; +lookup(2381) -> {"Mn","NSM"}; +lookup(2382) -> {"Mc","L"}; +lookup(2383) -> {"Mc","L"}; +lookup(2384) -> {"Lo","L"}; +lookup(2385) -> {"Mn","NSM"}; +lookup(2386) -> {"Mn","NSM"}; +lookup(2387) -> {"Mn","NSM"}; +lookup(2388) -> {"Mn","NSM"}; +lookup(2389) -> {"Mn","NSM"}; +lookup(2390) -> {"Mn","NSM"}; +lookup(2391) -> {"Mn","NSM"}; +lookup(2392) -> {"Lo","L"}; +lookup(2393) -> {"Lo","L"}; +lookup(2394) -> {"Lo","L"}; +lookup(2395) -> {"Lo","L"}; +lookup(2396) -> {"Lo","L"}; +lookup(2397) -> {"Lo","L"}; +lookup(2398) -> {"Lo","L"}; +lookup(2399) -> {"Lo","L"}; +lookup(2400) -> {"Lo","L"}; +lookup(2401) -> {"Lo","L"}; +lookup(2402) -> {"Mn","NSM"}; +lookup(2403) -> {"Mn","NSM"}; +lookup(2404) -> {"Po","L"}; +lookup(2405) -> {"Po","L"}; +lookup(2406) -> {"Nd","L"}; +lookup(2407) -> {"Nd","L"}; +lookup(2408) -> {"Nd","L"}; +lookup(2409) -> {"Nd","L"}; +lookup(2410) -> {"Nd","L"}; +lookup(2411) -> {"Nd","L"}; +lookup(2412) -> {"Nd","L"}; +lookup(2413) -> {"Nd","L"}; +lookup(2414) -> {"Nd","L"}; +lookup(2415) -> {"Nd","L"}; +lookup(2416) -> {"Po","L"}; +lookup(2417) -> {"Lm","L"}; +lookup(2418) -> {"Lo","L"}; +lookup(2419) -> {"Lo","L"}; +lookup(2420) -> {"Lo","L"}; +lookup(2421) -> {"Lo","L"}; +lookup(2422) -> {"Lo","L"}; +lookup(2423) -> {"Lo","L"}; +lookup(2424) -> {"Lo","L"}; +lookup(2425) -> {"Lo","L"}; +lookup(2426) -> {"Lo","L"}; +lookup(2427) -> {"Lo","L"}; +lookup(2428) -> {"Lo","L"}; +lookup(2429) -> {"Lo","L"}; +lookup(2430) -> {"Lo","L"}; +lookup(2431) -> {"Lo","L"}; +lookup(2432) -> {"Lo","L"}; +lookup(2433) -> {"Mn","NSM"}; +lookup(2434) -> {"Mc","L"}; +lookup(2435) -> {"Mc","L"}; +lookup(2437) -> {"Lo","L"}; +lookup(2438) -> {"Lo","L"}; +lookup(2439) -> {"Lo","L"}; +lookup(2440) -> {"Lo","L"}; +lookup(2441) -> {"Lo","L"}; +lookup(2442) -> {"Lo","L"}; +lookup(2443) -> {"Lo","L"}; +lookup(2444) -> {"Lo","L"}; +lookup(2447) -> {"Lo","L"}; +lookup(2448) -> {"Lo","L"}; +lookup(2451) -> {"Lo","L"}; +lookup(2452) -> {"Lo","L"}; +lookup(2453) -> {"Lo","L"}; +lookup(2454) -> {"Lo","L"}; +lookup(2455) -> {"Lo","L"}; +lookup(2456) -> {"Lo","L"}; +lookup(2457) -> {"Lo","L"}; +lookup(2458) -> {"Lo","L"}; +lookup(2459) -> {"Lo","L"}; +lookup(2460) -> {"Lo","L"}; +lookup(2461) -> {"Lo","L"}; +lookup(2462) -> {"Lo","L"}; +lookup(2463) -> {"Lo","L"}; +lookup(2464) -> {"Lo","L"}; +lookup(2465) -> {"Lo","L"}; +lookup(2466) -> {"Lo","L"}; +lookup(2467) -> {"Lo","L"}; +lookup(2468) -> {"Lo","L"}; +lookup(2469) -> {"Lo","L"}; +lookup(2470) -> {"Lo","L"}; +lookup(2471) -> {"Lo","L"}; +lookup(2472) -> {"Lo","L"}; +lookup(2474) -> {"Lo","L"}; +lookup(2475) -> {"Lo","L"}; +lookup(2476) -> {"Lo","L"}; +lookup(2477) -> {"Lo","L"}; +lookup(2478) -> {"Lo","L"}; +lookup(2479) -> {"Lo","L"}; +lookup(2480) -> {"Lo","L"}; +lookup(2482) -> {"Lo","L"}; +lookup(2486) -> {"Lo","L"}; +lookup(2487) -> {"Lo","L"}; +lookup(2488) -> {"Lo","L"}; +lookup(2489) -> {"Lo","L"}; +lookup(2492) -> {"Mn","NSM"}; +lookup(2493) -> {"Lo","L"}; +lookup(2494) -> {"Mc","L"}; +lookup(2495) -> {"Mc","L"}; +lookup(2496) -> {"Mc","L"}; +lookup(2497) -> {"Mn","NSM"}; +lookup(2498) -> {"Mn","NSM"}; +lookup(2499) -> {"Mn","NSM"}; +lookup(2500) -> {"Mn","NSM"}; +lookup(2503) -> {"Mc","L"}; +lookup(2504) -> {"Mc","L"}; +lookup(2507) -> {"Mc","L"}; +lookup(2508) -> {"Mc","L"}; +lookup(2509) -> {"Mn","NSM"}; +lookup(2510) -> {"Lo","L"}; +lookup(2519) -> {"Mc","L"}; +lookup(2524) -> {"Lo","L"}; +lookup(2525) -> {"Lo","L"}; +lookup(2527) -> {"Lo","L"}; +lookup(2528) -> {"Lo","L"}; +lookup(2529) -> {"Lo","L"}; +lookup(2530) -> {"Mn","NSM"}; +lookup(2531) -> {"Mn","NSM"}; +lookup(2534) -> {"Nd","L"}; +lookup(2535) -> {"Nd","L"}; +lookup(2536) -> {"Nd","L"}; +lookup(2537) -> {"Nd","L"}; +lookup(2538) -> {"Nd","L"}; +lookup(2539) -> {"Nd","L"}; +lookup(2540) -> {"Nd","L"}; +lookup(2541) -> {"Nd","L"}; +lookup(2542) -> {"Nd","L"}; +lookup(2543) -> {"Nd","L"}; +lookup(2544) -> {"Lo","L"}; +lookup(2545) -> {"Lo","L"}; +lookup(2546) -> {"Sc","ET"}; +lookup(2547) -> {"Sc","ET"}; +lookup(2548) -> {"No","L"}; +lookup(2549) -> {"No","L"}; +lookup(2550) -> {"No","L"}; +lookup(2551) -> {"No","L"}; +lookup(2552) -> {"No","L"}; +lookup(2553) -> {"No","L"}; +lookup(2554) -> {"So","L"}; +lookup(2555) -> {"Sc","ET"}; +lookup(2556) -> {"Lo","L"}; +lookup(2557) -> {"Po","L"}; +lookup(2558) -> {"Mn","NSM"}; +lookup(2561) -> {"Mn","NSM"}; +lookup(2562) -> {"Mn","NSM"}; +lookup(2563) -> {"Mc","L"}; +lookup(2565) -> {"Lo","L"}; +lookup(2566) -> {"Lo","L"}; +lookup(2567) -> {"Lo","L"}; +lookup(2568) -> {"Lo","L"}; +lookup(2569) -> {"Lo","L"}; +lookup(2570) -> {"Lo","L"}; +lookup(2575) -> {"Lo","L"}; +lookup(2576) -> {"Lo","L"}; +lookup(2579) -> {"Lo","L"}; +lookup(2580) -> {"Lo","L"}; +lookup(2581) -> {"Lo","L"}; +lookup(2582) -> {"Lo","L"}; +lookup(2583) -> {"Lo","L"}; +lookup(2584) -> {"Lo","L"}; +lookup(2585) -> {"Lo","L"}; +lookup(2586) -> {"Lo","L"}; +lookup(2587) -> {"Lo","L"}; +lookup(2588) -> {"Lo","L"}; +lookup(2589) -> {"Lo","L"}; +lookup(2590) -> {"Lo","L"}; +lookup(2591) -> {"Lo","L"}; +lookup(2592) -> {"Lo","L"}; +lookup(2593) -> {"Lo","L"}; +lookup(2594) -> {"Lo","L"}; +lookup(2595) -> {"Lo","L"}; +lookup(2596) -> {"Lo","L"}; +lookup(2597) -> {"Lo","L"}; +lookup(2598) -> {"Lo","L"}; +lookup(2599) -> {"Lo","L"}; +lookup(2600) -> {"Lo","L"}; +lookup(2602) -> {"Lo","L"}; +lookup(2603) -> {"Lo","L"}; +lookup(2604) -> {"Lo","L"}; +lookup(2605) -> {"Lo","L"}; +lookup(2606) -> {"Lo","L"}; +lookup(2607) -> {"Lo","L"}; +lookup(2608) -> {"Lo","L"}; +lookup(2610) -> {"Lo","L"}; +lookup(2611) -> {"Lo","L"}; +lookup(2613) -> {"Lo","L"}; +lookup(2614) -> {"Lo","L"}; +lookup(2616) -> {"Lo","L"}; +lookup(2617) -> {"Lo","L"}; +lookup(2620) -> {"Mn","NSM"}; +lookup(2622) -> {"Mc","L"}; +lookup(2623) -> {"Mc","L"}; +lookup(2624) -> {"Mc","L"}; +lookup(2625) -> {"Mn","NSM"}; +lookup(2626) -> {"Mn","NSM"}; +lookup(2631) -> {"Mn","NSM"}; +lookup(2632) -> {"Mn","NSM"}; +lookup(2635) -> {"Mn","NSM"}; +lookup(2636) -> {"Mn","NSM"}; +lookup(2637) -> {"Mn","NSM"}; +lookup(2641) -> {"Mn","NSM"}; +lookup(2649) -> {"Lo","L"}; +lookup(2650) -> {"Lo","L"}; +lookup(2651) -> {"Lo","L"}; +lookup(2652) -> {"Lo","L"}; +lookup(2654) -> {"Lo","L"}; +lookup(2662) -> {"Nd","L"}; +lookup(2663) -> {"Nd","L"}; +lookup(2664) -> {"Nd","L"}; +lookup(2665) -> {"Nd","L"}; +lookup(2666) -> {"Nd","L"}; +lookup(2667) -> {"Nd","L"}; +lookup(2668) -> {"Nd","L"}; +lookup(2669) -> {"Nd","L"}; +lookup(2670) -> {"Nd","L"}; +lookup(2671) -> {"Nd","L"}; +lookup(2672) -> {"Mn","NSM"}; +lookup(2673) -> {"Mn","NSM"}; +lookup(2674) -> {"Lo","L"}; +lookup(2675) -> {"Lo","L"}; +lookup(2676) -> {"Lo","L"}; +lookup(2677) -> {"Mn","NSM"}; +lookup(2678) -> {"Po","L"}; +lookup(2689) -> {"Mn","NSM"}; +lookup(2690) -> {"Mn","NSM"}; +lookup(2691) -> {"Mc","L"}; +lookup(2693) -> {"Lo","L"}; +lookup(2694) -> {"Lo","L"}; +lookup(2695) -> {"Lo","L"}; +lookup(2696) -> {"Lo","L"}; +lookup(2697) -> {"Lo","L"}; +lookup(2698) -> {"Lo","L"}; +lookup(2699) -> {"Lo","L"}; +lookup(2700) -> {"Lo","L"}; +lookup(2701) -> {"Lo","L"}; +lookup(2703) -> {"Lo","L"}; +lookup(2704) -> {"Lo","L"}; +lookup(2705) -> {"Lo","L"}; +lookup(2707) -> {"Lo","L"}; +lookup(2708) -> {"Lo","L"}; +lookup(2709) -> {"Lo","L"}; +lookup(2710) -> {"Lo","L"}; +lookup(2711) -> {"Lo","L"}; +lookup(2712) -> {"Lo","L"}; +lookup(2713) -> {"Lo","L"}; +lookup(2714) -> {"Lo","L"}; +lookup(2715) -> {"Lo","L"}; +lookup(2716) -> {"Lo","L"}; +lookup(2717) -> {"Lo","L"}; +lookup(2718) -> {"Lo","L"}; +lookup(2719) -> {"Lo","L"}; +lookup(2720) -> {"Lo","L"}; +lookup(2721) -> {"Lo","L"}; +lookup(2722) -> {"Lo","L"}; +lookup(2723) -> {"Lo","L"}; +lookup(2724) -> {"Lo","L"}; +lookup(2725) -> {"Lo","L"}; +lookup(2726) -> {"Lo","L"}; +lookup(2727) -> {"Lo","L"}; +lookup(2728) -> {"Lo","L"}; +lookup(2730) -> {"Lo","L"}; +lookup(2731) -> {"Lo","L"}; +lookup(2732) -> {"Lo","L"}; +lookup(2733) -> {"Lo","L"}; +lookup(2734) -> {"Lo","L"}; +lookup(2735) -> {"Lo","L"}; +lookup(2736) -> {"Lo","L"}; +lookup(2738) -> {"Lo","L"}; +lookup(2739) -> {"Lo","L"}; +lookup(2741) -> {"Lo","L"}; +lookup(2742) -> {"Lo","L"}; +lookup(2743) -> {"Lo","L"}; +lookup(2744) -> {"Lo","L"}; +lookup(2745) -> {"Lo","L"}; +lookup(2748) -> {"Mn","NSM"}; +lookup(2749) -> {"Lo","L"}; +lookup(2750) -> {"Mc","L"}; +lookup(2751) -> {"Mc","L"}; +lookup(2752) -> {"Mc","L"}; +lookup(2753) -> {"Mn","NSM"}; +lookup(2754) -> {"Mn","NSM"}; +lookup(2755) -> {"Mn","NSM"}; +lookup(2756) -> {"Mn","NSM"}; +lookup(2757) -> {"Mn","NSM"}; +lookup(2759) -> {"Mn","NSM"}; +lookup(2760) -> {"Mn","NSM"}; +lookup(2761) -> {"Mc","L"}; +lookup(2763) -> {"Mc","L"}; +lookup(2764) -> {"Mc","L"}; +lookup(2765) -> {"Mn","NSM"}; +lookup(2768) -> {"Lo","L"}; +lookup(2784) -> {"Lo","L"}; +lookup(2785) -> {"Lo","L"}; +lookup(2786) -> {"Mn","NSM"}; +lookup(2787) -> {"Mn","NSM"}; +lookup(2790) -> {"Nd","L"}; +lookup(2791) -> {"Nd","L"}; +lookup(2792) -> {"Nd","L"}; +lookup(2793) -> {"Nd","L"}; +lookup(2794) -> {"Nd","L"}; +lookup(2795) -> {"Nd","L"}; +lookup(2796) -> {"Nd","L"}; +lookup(2797) -> {"Nd","L"}; +lookup(2798) -> {"Nd","L"}; +lookup(2799) -> {"Nd","L"}; +lookup(2800) -> {"Po","L"}; +lookup(2801) -> {"Sc","ET"}; +lookup(2809) -> {"Lo","L"}; +lookup(2810) -> {"Mn","NSM"}; +lookup(2811) -> {"Mn","NSM"}; +lookup(2812) -> {"Mn","NSM"}; +lookup(2813) -> {"Mn","NSM"}; +lookup(2814) -> {"Mn","NSM"}; +lookup(2815) -> {"Mn","NSM"}; +lookup(2817) -> {"Mn","NSM"}; +lookup(2818) -> {"Mc","L"}; +lookup(2819) -> {"Mc","L"}; +lookup(2821) -> {"Lo","L"}; +lookup(2822) -> {"Lo","L"}; +lookup(2823) -> {"Lo","L"}; +lookup(2824) -> {"Lo","L"}; +lookup(2825) -> {"Lo","L"}; +lookup(2826) -> {"Lo","L"}; +lookup(2827) -> {"Lo","L"}; +lookup(2828) -> {"Lo","L"}; +lookup(2831) -> {"Lo","L"}; +lookup(2832) -> {"Lo","L"}; +lookup(2835) -> {"Lo","L"}; +lookup(2836) -> {"Lo","L"}; +lookup(2837) -> {"Lo","L"}; +lookup(2838) -> {"Lo","L"}; +lookup(2839) -> {"Lo","L"}; +lookup(2840) -> {"Lo","L"}; +lookup(2841) -> {"Lo","L"}; +lookup(2842) -> {"Lo","L"}; +lookup(2843) -> {"Lo","L"}; +lookup(2844) -> {"Lo","L"}; +lookup(2845) -> {"Lo","L"}; +lookup(2846) -> {"Lo","L"}; +lookup(2847) -> {"Lo","L"}; +lookup(2848) -> {"Lo","L"}; +lookup(2849) -> {"Lo","L"}; +lookup(2850) -> {"Lo","L"}; +lookup(2851) -> {"Lo","L"}; +lookup(2852) -> {"Lo","L"}; +lookup(2853) -> {"Lo","L"}; +lookup(2854) -> {"Lo","L"}; +lookup(2855) -> {"Lo","L"}; +lookup(2856) -> {"Lo","L"}; +lookup(2858) -> {"Lo","L"}; +lookup(2859) -> {"Lo","L"}; +lookup(2860) -> {"Lo","L"}; +lookup(2861) -> {"Lo","L"}; +lookup(2862) -> {"Lo","L"}; +lookup(2863) -> {"Lo","L"}; +lookup(2864) -> {"Lo","L"}; +lookup(2866) -> {"Lo","L"}; +lookup(2867) -> {"Lo","L"}; +lookup(2869) -> {"Lo","L"}; +lookup(2870) -> {"Lo","L"}; +lookup(2871) -> {"Lo","L"}; +lookup(2872) -> {"Lo","L"}; +lookup(2873) -> {"Lo","L"}; +lookup(2876) -> {"Mn","NSM"}; +lookup(2877) -> {"Lo","L"}; +lookup(2878) -> {"Mc","L"}; +lookup(2879) -> {"Mn","NSM"}; +lookup(2880) -> {"Mc","L"}; +lookup(2881) -> {"Mn","NSM"}; +lookup(2882) -> {"Mn","NSM"}; +lookup(2883) -> {"Mn","NSM"}; +lookup(2884) -> {"Mn","NSM"}; +lookup(2887) -> {"Mc","L"}; +lookup(2888) -> {"Mc","L"}; +lookup(2891) -> {"Mc","L"}; +lookup(2892) -> {"Mc","L"}; +lookup(2893) -> {"Mn","NSM"}; +lookup(2901) -> {"Mn","NSM"}; +lookup(2902) -> {"Mn","NSM"}; +lookup(2903) -> {"Mc","L"}; +lookup(2908) -> {"Lo","L"}; +lookup(2909) -> {"Lo","L"}; +lookup(2911) -> {"Lo","L"}; +lookup(2912) -> {"Lo","L"}; +lookup(2913) -> {"Lo","L"}; +lookup(2914) -> {"Mn","NSM"}; +lookup(2915) -> {"Mn","NSM"}; +lookup(2918) -> {"Nd","L"}; +lookup(2919) -> {"Nd","L"}; +lookup(2920) -> {"Nd","L"}; +lookup(2921) -> {"Nd","L"}; +lookup(2922) -> {"Nd","L"}; +lookup(2923) -> {"Nd","L"}; +lookup(2924) -> {"Nd","L"}; +lookup(2925) -> {"Nd","L"}; +lookup(2926) -> {"Nd","L"}; +lookup(2927) -> {"Nd","L"}; +lookup(2928) -> {"So","L"}; +lookup(2929) -> {"Lo","L"}; +lookup(2930) -> {"No","L"}; +lookup(2931) -> {"No","L"}; +lookup(2932) -> {"No","L"}; +lookup(2933) -> {"No","L"}; +lookup(2934) -> {"No","L"}; +lookup(2935) -> {"No","L"}; +lookup(2946) -> {"Mn","NSM"}; +lookup(2947) -> {"Lo","L"}; +lookup(2949) -> {"Lo","L"}; +lookup(2950) -> {"Lo","L"}; +lookup(2951) -> {"Lo","L"}; +lookup(2952) -> {"Lo","L"}; +lookup(2953) -> {"Lo","L"}; +lookup(2954) -> {"Lo","L"}; +lookup(2958) -> {"Lo","L"}; +lookup(2959) -> {"Lo","L"}; +lookup(2960) -> {"Lo","L"}; +lookup(2962) -> {"Lo","L"}; +lookup(2963) -> {"Lo","L"}; +lookup(2964) -> {"Lo","L"}; +lookup(2965) -> {"Lo","L"}; +lookup(2969) -> {"Lo","L"}; +lookup(2970) -> {"Lo","L"}; +lookup(2972) -> {"Lo","L"}; +lookup(2974) -> {"Lo","L"}; +lookup(2975) -> {"Lo","L"}; +lookup(2979) -> {"Lo","L"}; +lookup(2980) -> {"Lo","L"}; +lookup(2984) -> {"Lo","L"}; +lookup(2985) -> {"Lo","L"}; +lookup(2986) -> {"Lo","L"}; +lookup(2990) -> {"Lo","L"}; +lookup(2991) -> {"Lo","L"}; +lookup(2992) -> {"Lo","L"}; +lookup(2993) -> {"Lo","L"}; +lookup(2994) -> {"Lo","L"}; +lookup(2995) -> {"Lo","L"}; +lookup(2996) -> {"Lo","L"}; +lookup(2997) -> {"Lo","L"}; +lookup(2998) -> {"Lo","L"}; +lookup(2999) -> {"Lo","L"}; +lookup(3000) -> {"Lo","L"}; +lookup(3001) -> {"Lo","L"}; +lookup(3006) -> {"Mc","L"}; +lookup(3007) -> {"Mc","L"}; +lookup(3008) -> {"Mn","NSM"}; +lookup(3009) -> {"Mc","L"}; +lookup(3010) -> {"Mc","L"}; +lookup(3014) -> {"Mc","L"}; +lookup(3015) -> {"Mc","L"}; +lookup(3016) -> {"Mc","L"}; +lookup(3018) -> {"Mc","L"}; +lookup(3019) -> {"Mc","L"}; +lookup(3020) -> {"Mc","L"}; +lookup(3021) -> {"Mn","NSM"}; +lookup(3024) -> {"Lo","L"}; +lookup(3031) -> {"Mc","L"}; +lookup(3046) -> {"Nd","L"}; +lookup(3047) -> {"Nd","L"}; +lookup(3048) -> {"Nd","L"}; +lookup(3049) -> {"Nd","L"}; +lookup(3050) -> {"Nd","L"}; +lookup(3051) -> {"Nd","L"}; +lookup(3052) -> {"Nd","L"}; +lookup(3053) -> {"Nd","L"}; +lookup(3054) -> {"Nd","L"}; +lookup(3055) -> {"Nd","L"}; +lookup(3056) -> {"No","L"}; +lookup(3057) -> {"No","L"}; +lookup(3058) -> {"No","L"}; +lookup(3059) -> {"So","ON"}; +lookup(3060) -> {"So","ON"}; +lookup(3061) -> {"So","ON"}; +lookup(3062) -> {"So","ON"}; +lookup(3063) -> {"So","ON"}; +lookup(3064) -> {"So","ON"}; +lookup(3065) -> {"Sc","ET"}; +lookup(3066) -> {"So","ON"}; +lookup(3072) -> {"Mn","NSM"}; +lookup(3073) -> {"Mc","L"}; +lookup(3074) -> {"Mc","L"}; +lookup(3075) -> {"Mc","L"}; +lookup(3076) -> {"Mn","NSM"}; +lookup(3077) -> {"Lo","L"}; +lookup(3078) -> {"Lo","L"}; +lookup(3079) -> {"Lo","L"}; +lookup(3080) -> {"Lo","L"}; +lookup(3081) -> {"Lo","L"}; +lookup(3082) -> {"Lo","L"}; +lookup(3083) -> {"Lo","L"}; +lookup(3084) -> {"Lo","L"}; +lookup(3086) -> {"Lo","L"}; +lookup(3087) -> {"Lo","L"}; +lookup(3088) -> {"Lo","L"}; +lookup(3090) -> {"Lo","L"}; +lookup(3091) -> {"Lo","L"}; +lookup(3092) -> {"Lo","L"}; +lookup(3093) -> {"Lo","L"}; +lookup(3094) -> {"Lo","L"}; +lookup(3095) -> {"Lo","L"}; +lookup(3096) -> {"Lo","L"}; +lookup(3097) -> {"Lo","L"}; +lookup(3098) -> {"Lo","L"}; +lookup(3099) -> {"Lo","L"}; +lookup(3100) -> {"Lo","L"}; +lookup(3101) -> {"Lo","L"}; +lookup(3102) -> {"Lo","L"}; +lookup(3103) -> {"Lo","L"}; +lookup(3104) -> {"Lo","L"}; +lookup(3105) -> {"Lo","L"}; +lookup(3106) -> {"Lo","L"}; +lookup(3107) -> {"Lo","L"}; +lookup(3108) -> {"Lo","L"}; +lookup(3109) -> {"Lo","L"}; +lookup(3110) -> {"Lo","L"}; +lookup(3111) -> {"Lo","L"}; +lookup(3112) -> {"Lo","L"}; +lookup(3114) -> {"Lo","L"}; +lookup(3115) -> {"Lo","L"}; +lookup(3116) -> {"Lo","L"}; +lookup(3117) -> {"Lo","L"}; +lookup(3118) -> {"Lo","L"}; +lookup(3119) -> {"Lo","L"}; +lookup(3120) -> {"Lo","L"}; +lookup(3121) -> {"Lo","L"}; +lookup(3122) -> {"Lo","L"}; +lookup(3123) -> {"Lo","L"}; +lookup(3124) -> {"Lo","L"}; +lookup(3125) -> {"Lo","L"}; +lookup(3126) -> {"Lo","L"}; +lookup(3127) -> {"Lo","L"}; +lookup(3128) -> {"Lo","L"}; +lookup(3129) -> {"Lo","L"}; +lookup(3133) -> {"Lo","L"}; +lookup(3134) -> {"Mn","NSM"}; +lookup(3135) -> {"Mn","NSM"}; +lookup(3136) -> {"Mn","NSM"}; +lookup(3137) -> {"Mc","L"}; +lookup(3138) -> {"Mc","L"}; +lookup(3139) -> {"Mc","L"}; +lookup(3140) -> {"Mc","L"}; +lookup(3142) -> {"Mn","NSM"}; +lookup(3143) -> {"Mn","NSM"}; +lookup(3144) -> {"Mn","NSM"}; +lookup(3146) -> {"Mn","NSM"}; +lookup(3147) -> {"Mn","NSM"}; +lookup(3148) -> {"Mn","NSM"}; +lookup(3149) -> {"Mn","NSM"}; +lookup(3157) -> {"Mn","NSM"}; +lookup(3158) -> {"Mn","NSM"}; +lookup(3160) -> {"Lo","L"}; +lookup(3161) -> {"Lo","L"}; +lookup(3162) -> {"Lo","L"}; +lookup(3168) -> {"Lo","L"}; +lookup(3169) -> {"Lo","L"}; +lookup(3170) -> {"Mn","NSM"}; +lookup(3171) -> {"Mn","NSM"}; +lookup(3174) -> {"Nd","L"}; +lookup(3175) -> {"Nd","L"}; +lookup(3176) -> {"Nd","L"}; +lookup(3177) -> {"Nd","L"}; +lookup(3178) -> {"Nd","L"}; +lookup(3179) -> {"Nd","L"}; +lookup(3180) -> {"Nd","L"}; +lookup(3181) -> {"Nd","L"}; +lookup(3182) -> {"Nd","L"}; +lookup(3183) -> {"Nd","L"}; +lookup(3191) -> {"Po","L"}; +lookup(3192) -> {"No","ON"}; +lookup(3193) -> {"No","ON"}; +lookup(3194) -> {"No","ON"}; +lookup(3195) -> {"No","ON"}; +lookup(3196) -> {"No","ON"}; +lookup(3197) -> {"No","ON"}; +lookup(3198) -> {"No","ON"}; +lookup(3199) -> {"So","L"}; +lookup(3200) -> {"Lo","L"}; +lookup(3201) -> {"Mn","NSM"}; +lookup(3202) -> {"Mc","L"}; +lookup(3203) -> {"Mc","L"}; +lookup(3204) -> {"Po","L"}; +lookup(3205) -> {"Lo","L"}; +lookup(3206) -> {"Lo","L"}; +lookup(3207) -> {"Lo","L"}; +lookup(3208) -> {"Lo","L"}; +lookup(3209) -> {"Lo","L"}; +lookup(3210) -> {"Lo","L"}; +lookup(3211) -> {"Lo","L"}; +lookup(3212) -> {"Lo","L"}; +lookup(3214) -> {"Lo","L"}; +lookup(3215) -> {"Lo","L"}; +lookup(3216) -> {"Lo","L"}; +lookup(3218) -> {"Lo","L"}; +lookup(3219) -> {"Lo","L"}; +lookup(3220) -> {"Lo","L"}; +lookup(3221) -> {"Lo","L"}; +lookup(3222) -> {"Lo","L"}; +lookup(3223) -> {"Lo","L"}; +lookup(3224) -> {"Lo","L"}; +lookup(3225) -> {"Lo","L"}; +lookup(3226) -> {"Lo","L"}; +lookup(3227) -> {"Lo","L"}; +lookup(3228) -> {"Lo","L"}; +lookup(3229) -> {"Lo","L"}; +lookup(3230) -> {"Lo","L"}; +lookup(3231) -> {"Lo","L"}; +lookup(3232) -> {"Lo","L"}; +lookup(3233) -> {"Lo","L"}; +lookup(3234) -> {"Lo","L"}; +lookup(3235) -> {"Lo","L"}; +lookup(3236) -> {"Lo","L"}; +lookup(3237) -> {"Lo","L"}; +lookup(3238) -> {"Lo","L"}; +lookup(3239) -> {"Lo","L"}; +lookup(3240) -> {"Lo","L"}; +lookup(3242) -> {"Lo","L"}; +lookup(3243) -> {"Lo","L"}; +lookup(3244) -> {"Lo","L"}; +lookup(3245) -> {"Lo","L"}; +lookup(3246) -> {"Lo","L"}; +lookup(3247) -> {"Lo","L"}; +lookup(3248) -> {"Lo","L"}; +lookup(3249) -> {"Lo","L"}; +lookup(3250) -> {"Lo","L"}; +lookup(3251) -> {"Lo","L"}; +lookup(3253) -> {"Lo","L"}; +lookup(3254) -> {"Lo","L"}; +lookup(3255) -> {"Lo","L"}; +lookup(3256) -> {"Lo","L"}; +lookup(3257) -> {"Lo","L"}; +lookup(3260) -> {"Mn","NSM"}; +lookup(3261) -> {"Lo","L"}; +lookup(3262) -> {"Mc","L"}; +lookup(3263) -> {"Mn","L"}; +lookup(3264) -> {"Mc","L"}; +lookup(3265) -> {"Mc","L"}; +lookup(3266) -> {"Mc","L"}; +lookup(3267) -> {"Mc","L"}; +lookup(3268) -> {"Mc","L"}; +lookup(3270) -> {"Mn","L"}; +lookup(3271) -> {"Mc","L"}; +lookup(3272) -> {"Mc","L"}; +lookup(3274) -> {"Mc","L"}; +lookup(3275) -> {"Mc","L"}; +lookup(3276) -> {"Mn","NSM"}; +lookup(3277) -> {"Mn","NSM"}; +lookup(3285) -> {"Mc","L"}; +lookup(3286) -> {"Mc","L"}; +lookup(3294) -> {"Lo","L"}; +lookup(3296) -> {"Lo","L"}; +lookup(3297) -> {"Lo","L"}; +lookup(3298) -> {"Mn","NSM"}; +lookup(3299) -> {"Mn","NSM"}; +lookup(3302) -> {"Nd","L"}; +lookup(3303) -> {"Nd","L"}; +lookup(3304) -> {"Nd","L"}; +lookup(3305) -> {"Nd","L"}; +lookup(3306) -> {"Nd","L"}; +lookup(3307) -> {"Nd","L"}; +lookup(3308) -> {"Nd","L"}; +lookup(3309) -> {"Nd","L"}; +lookup(3310) -> {"Nd","L"}; +lookup(3311) -> {"Nd","L"}; +lookup(3313) -> {"Lo","L"}; +lookup(3314) -> {"Lo","L"}; +lookup(3328) -> {"Mn","NSM"}; +lookup(3329) -> {"Mn","NSM"}; +lookup(3330) -> {"Mc","L"}; +lookup(3331) -> {"Mc","L"}; +lookup(3332) -> {"Lo","L"}; +lookup(3333) -> {"Lo","L"}; +lookup(3334) -> {"Lo","L"}; +lookup(3335) -> {"Lo","L"}; +lookup(3336) -> {"Lo","L"}; +lookup(3337) -> {"Lo","L"}; +lookup(3338) -> {"Lo","L"}; +lookup(3339) -> {"Lo","L"}; +lookup(3340) -> {"Lo","L"}; +lookup(3342) -> {"Lo","L"}; +lookup(3343) -> {"Lo","L"}; +lookup(3344) -> {"Lo","L"}; +lookup(3346) -> {"Lo","L"}; +lookup(3347) -> {"Lo","L"}; +lookup(3348) -> {"Lo","L"}; +lookup(3349) -> {"Lo","L"}; +lookup(3350) -> {"Lo","L"}; +lookup(3351) -> {"Lo","L"}; +lookup(3352) -> {"Lo","L"}; +lookup(3353) -> {"Lo","L"}; +lookup(3354) -> {"Lo","L"}; +lookup(3355) -> {"Lo","L"}; +lookup(3356) -> {"Lo","L"}; +lookup(3357) -> {"Lo","L"}; +lookup(3358) -> {"Lo","L"}; +lookup(3359) -> {"Lo","L"}; +lookup(3360) -> {"Lo","L"}; +lookup(3361) -> {"Lo","L"}; +lookup(3362) -> {"Lo","L"}; +lookup(3363) -> {"Lo","L"}; +lookup(3364) -> {"Lo","L"}; +lookup(3365) -> {"Lo","L"}; +lookup(3366) -> {"Lo","L"}; +lookup(3367) -> {"Lo","L"}; +lookup(3368) -> {"Lo","L"}; +lookup(3369) -> {"Lo","L"}; +lookup(3370) -> {"Lo","L"}; +lookup(3371) -> {"Lo","L"}; +lookup(3372) -> {"Lo","L"}; +lookup(3373) -> {"Lo","L"}; +lookup(3374) -> {"Lo","L"}; +lookup(3375) -> {"Lo","L"}; +lookup(3376) -> {"Lo","L"}; +lookup(3377) -> {"Lo","L"}; +lookup(3378) -> {"Lo","L"}; +lookup(3379) -> {"Lo","L"}; +lookup(3380) -> {"Lo","L"}; +lookup(3381) -> {"Lo","L"}; +lookup(3382) -> {"Lo","L"}; +lookup(3383) -> {"Lo","L"}; +lookup(3384) -> {"Lo","L"}; +lookup(3385) -> {"Lo","L"}; +lookup(3386) -> {"Lo","L"}; +lookup(3387) -> {"Mn","NSM"}; +lookup(3388) -> {"Mn","NSM"}; +lookup(3389) -> {"Lo","L"}; +lookup(3390) -> {"Mc","L"}; +lookup(3391) -> {"Mc","L"}; +lookup(3392) -> {"Mc","L"}; +lookup(3393) -> {"Mn","NSM"}; +lookup(3394) -> {"Mn","NSM"}; +lookup(3395) -> {"Mn","NSM"}; +lookup(3396) -> {"Mn","NSM"}; +lookup(3398) -> {"Mc","L"}; +lookup(3399) -> {"Mc","L"}; +lookup(3400) -> {"Mc","L"}; +lookup(3402) -> {"Mc","L"}; +lookup(3403) -> {"Mc","L"}; +lookup(3404) -> {"Mc","L"}; +lookup(3405) -> {"Mn","NSM"}; +lookup(3406) -> {"Lo","L"}; +lookup(3407) -> {"So","L"}; +lookup(3412) -> {"Lo","L"}; +lookup(3413) -> {"Lo","L"}; +lookup(3414) -> {"Lo","L"}; +lookup(3415) -> {"Mc","L"}; +lookup(3416) -> {"No","L"}; +lookup(3417) -> {"No","L"}; +lookup(3418) -> {"No","L"}; +lookup(3419) -> {"No","L"}; +lookup(3420) -> {"No","L"}; +lookup(3421) -> {"No","L"}; +lookup(3422) -> {"No","L"}; +lookup(3423) -> {"Lo","L"}; +lookup(3424) -> {"Lo","L"}; +lookup(3425) -> {"Lo","L"}; +lookup(3426) -> {"Mn","NSM"}; +lookup(3427) -> {"Mn","NSM"}; +lookup(3430) -> {"Nd","L"}; +lookup(3431) -> {"Nd","L"}; +lookup(3432) -> {"Nd","L"}; +lookup(3433) -> {"Nd","L"}; +lookup(3434) -> {"Nd","L"}; +lookup(3435) -> {"Nd","L"}; +lookup(3436) -> {"Nd","L"}; +lookup(3437) -> {"Nd","L"}; +lookup(3438) -> {"Nd","L"}; +lookup(3439) -> {"Nd","L"}; +lookup(3440) -> {"No","L"}; +lookup(3441) -> {"No","L"}; +lookup(3442) -> {"No","L"}; +lookup(3443) -> {"No","L"}; +lookup(3444) -> {"No","L"}; +lookup(3445) -> {"No","L"}; +lookup(3446) -> {"No","L"}; +lookup(3447) -> {"No","L"}; +lookup(3448) -> {"No","L"}; +lookup(3449) -> {"So","L"}; +lookup(3450) -> {"Lo","L"}; +lookup(3451) -> {"Lo","L"}; +lookup(3452) -> {"Lo","L"}; +lookup(3453) -> {"Lo","L"}; +lookup(3454) -> {"Lo","L"}; +lookup(3455) -> {"Lo","L"}; +lookup(3457) -> {"Mn","NSM"}; +lookup(3458) -> {"Mc","L"}; +lookup(3459) -> {"Mc","L"}; +lookup(3461) -> {"Lo","L"}; +lookup(3462) -> {"Lo","L"}; +lookup(3463) -> {"Lo","L"}; +lookup(3464) -> {"Lo","L"}; +lookup(3465) -> {"Lo","L"}; +lookup(3466) -> {"Lo","L"}; +lookup(3467) -> {"Lo","L"}; +lookup(3468) -> {"Lo","L"}; +lookup(3469) -> {"Lo","L"}; +lookup(3470) -> {"Lo","L"}; +lookup(3471) -> {"Lo","L"}; +lookup(3472) -> {"Lo","L"}; +lookup(3473) -> {"Lo","L"}; +lookup(3474) -> {"Lo","L"}; +lookup(3475) -> {"Lo","L"}; +lookup(3476) -> {"Lo","L"}; +lookup(3477) -> {"Lo","L"}; +lookup(3478) -> {"Lo","L"}; +lookup(3482) -> {"Lo","L"}; +lookup(3483) -> {"Lo","L"}; +lookup(3484) -> {"Lo","L"}; +lookup(3485) -> {"Lo","L"}; +lookup(3486) -> {"Lo","L"}; +lookup(3487) -> {"Lo","L"}; +lookup(3488) -> {"Lo","L"}; +lookup(3489) -> {"Lo","L"}; +lookup(3490) -> {"Lo","L"}; +lookup(3491) -> {"Lo","L"}; +lookup(3492) -> {"Lo","L"}; +lookup(3493) -> {"Lo","L"}; +lookup(3494) -> {"Lo","L"}; +lookup(3495) -> {"Lo","L"}; +lookup(3496) -> {"Lo","L"}; +lookup(3497) -> {"Lo","L"}; +lookup(3498) -> {"Lo","L"}; +lookup(3499) -> {"Lo","L"}; +lookup(3500) -> {"Lo","L"}; +lookup(3501) -> {"Lo","L"}; +lookup(3502) -> {"Lo","L"}; +lookup(3503) -> {"Lo","L"}; +lookup(3504) -> {"Lo","L"}; +lookup(3505) -> {"Lo","L"}; +lookup(3507) -> {"Lo","L"}; +lookup(3508) -> {"Lo","L"}; +lookup(3509) -> {"Lo","L"}; +lookup(3510) -> {"Lo","L"}; +lookup(3511) -> {"Lo","L"}; +lookup(3512) -> {"Lo","L"}; +lookup(3513) -> {"Lo","L"}; +lookup(3514) -> {"Lo","L"}; +lookup(3515) -> {"Lo","L"}; +lookup(3517) -> {"Lo","L"}; +lookup(3520) -> {"Lo","L"}; +lookup(3521) -> {"Lo","L"}; +lookup(3522) -> {"Lo","L"}; +lookup(3523) -> {"Lo","L"}; +lookup(3524) -> {"Lo","L"}; +lookup(3525) -> {"Lo","L"}; +lookup(3526) -> {"Lo","L"}; +lookup(3530) -> {"Mn","NSM"}; +lookup(3535) -> {"Mc","L"}; +lookup(3536) -> {"Mc","L"}; +lookup(3537) -> {"Mc","L"}; +lookup(3538) -> {"Mn","NSM"}; +lookup(3539) -> {"Mn","NSM"}; +lookup(3540) -> {"Mn","NSM"}; +lookup(3542) -> {"Mn","NSM"}; +lookup(3544) -> {"Mc","L"}; +lookup(3545) -> {"Mc","L"}; +lookup(3546) -> {"Mc","L"}; +lookup(3547) -> {"Mc","L"}; +lookup(3548) -> {"Mc","L"}; +lookup(3549) -> {"Mc","L"}; +lookup(3550) -> {"Mc","L"}; +lookup(3551) -> {"Mc","L"}; +lookup(3558) -> {"Nd","L"}; +lookup(3559) -> {"Nd","L"}; +lookup(3560) -> {"Nd","L"}; +lookup(3561) -> {"Nd","L"}; +lookup(3562) -> {"Nd","L"}; +lookup(3563) -> {"Nd","L"}; +lookup(3564) -> {"Nd","L"}; +lookup(3565) -> {"Nd","L"}; +lookup(3566) -> {"Nd","L"}; +lookup(3567) -> {"Nd","L"}; +lookup(3570) -> {"Mc","L"}; +lookup(3571) -> {"Mc","L"}; +lookup(3572) -> {"Po","L"}; +lookup(3585) -> {"Lo","L"}; +lookup(3586) -> {"Lo","L"}; +lookup(3587) -> {"Lo","L"}; +lookup(3588) -> {"Lo","L"}; +lookup(3589) -> {"Lo","L"}; +lookup(3590) -> {"Lo","L"}; +lookup(3591) -> {"Lo","L"}; +lookup(3592) -> {"Lo","L"}; +lookup(3593) -> {"Lo","L"}; +lookup(3594) -> {"Lo","L"}; +lookup(3595) -> {"Lo","L"}; +lookup(3596) -> {"Lo","L"}; +lookup(3597) -> {"Lo","L"}; +lookup(3598) -> {"Lo","L"}; +lookup(3599) -> {"Lo","L"}; +lookup(3600) -> {"Lo","L"}; +lookup(3601) -> {"Lo","L"}; +lookup(3602) -> {"Lo","L"}; +lookup(3603) -> {"Lo","L"}; +lookup(3604) -> {"Lo","L"}; +lookup(3605) -> {"Lo","L"}; +lookup(3606) -> {"Lo","L"}; +lookup(3607) -> {"Lo","L"}; +lookup(3608) -> {"Lo","L"}; +lookup(3609) -> {"Lo","L"}; +lookup(3610) -> {"Lo","L"}; +lookup(3611) -> {"Lo","L"}; +lookup(3612) -> {"Lo","L"}; +lookup(3613) -> {"Lo","L"}; +lookup(3614) -> {"Lo","L"}; +lookup(3615) -> {"Lo","L"}; +lookup(3616) -> {"Lo","L"}; +lookup(3617) -> {"Lo","L"}; +lookup(3618) -> {"Lo","L"}; +lookup(3619) -> {"Lo","L"}; +lookup(3620) -> {"Lo","L"}; +lookup(3621) -> {"Lo","L"}; +lookup(3622) -> {"Lo","L"}; +lookup(3623) -> {"Lo","L"}; +lookup(3624) -> {"Lo","L"}; +lookup(3625) -> {"Lo","L"}; +lookup(3626) -> {"Lo","L"}; +lookup(3627) -> {"Lo","L"}; +lookup(3628) -> {"Lo","L"}; +lookup(3629) -> {"Lo","L"}; +lookup(3630) -> {"Lo","L"}; +lookup(3631) -> {"Lo","L"}; +lookup(3632) -> {"Lo","L"}; +lookup(3633) -> {"Mn","NSM"}; +lookup(3634) -> {"Lo","L"}; +lookup(3635) -> {"Lo","L"}; +lookup(3636) -> {"Mn","NSM"}; +lookup(3637) -> {"Mn","NSM"}; +lookup(3638) -> {"Mn","NSM"}; +lookup(3639) -> {"Mn","NSM"}; +lookup(3640) -> {"Mn","NSM"}; +lookup(3641) -> {"Mn","NSM"}; +lookup(3642) -> {"Mn","NSM"}; +lookup(3647) -> {"Sc","ET"}; +lookup(3648) -> {"Lo","L"}; +lookup(3649) -> {"Lo","L"}; +lookup(3650) -> {"Lo","L"}; +lookup(3651) -> {"Lo","L"}; +lookup(3652) -> {"Lo","L"}; +lookup(3653) -> {"Lo","L"}; +lookup(3654) -> {"Lm","L"}; +lookup(3655) -> {"Mn","NSM"}; +lookup(3656) -> {"Mn","NSM"}; +lookup(3657) -> {"Mn","NSM"}; +lookup(3658) -> {"Mn","NSM"}; +lookup(3659) -> {"Mn","NSM"}; +lookup(3660) -> {"Mn","NSM"}; +lookup(3661) -> {"Mn","NSM"}; +lookup(3662) -> {"Mn","NSM"}; +lookup(3663) -> {"Po","L"}; +lookup(3664) -> {"Nd","L"}; +lookup(3665) -> {"Nd","L"}; +lookup(3666) -> {"Nd","L"}; +lookup(3667) -> {"Nd","L"}; +lookup(3668) -> {"Nd","L"}; +lookup(3669) -> {"Nd","L"}; +lookup(3670) -> {"Nd","L"}; +lookup(3671) -> {"Nd","L"}; +lookup(3672) -> {"Nd","L"}; +lookup(3673) -> {"Nd","L"}; +lookup(3674) -> {"Po","L"}; +lookup(3675) -> {"Po","L"}; +lookup(3713) -> {"Lo","L"}; +lookup(3714) -> {"Lo","L"}; +lookup(3716) -> {"Lo","L"}; +lookup(3718) -> {"Lo","L"}; +lookup(3719) -> {"Lo","L"}; +lookup(3720) -> {"Lo","L"}; +lookup(3721) -> {"Lo","L"}; +lookup(3722) -> {"Lo","L"}; +lookup(3724) -> {"Lo","L"}; +lookup(3725) -> {"Lo","L"}; +lookup(3726) -> {"Lo","L"}; +lookup(3727) -> {"Lo","L"}; +lookup(3728) -> {"Lo","L"}; +lookup(3729) -> {"Lo","L"}; +lookup(3730) -> {"Lo","L"}; +lookup(3731) -> {"Lo","L"}; +lookup(3732) -> {"Lo","L"}; +lookup(3733) -> {"Lo","L"}; +lookup(3734) -> {"Lo","L"}; +lookup(3735) -> {"Lo","L"}; +lookup(3736) -> {"Lo","L"}; +lookup(3737) -> {"Lo","L"}; +lookup(3738) -> {"Lo","L"}; +lookup(3739) -> {"Lo","L"}; +lookup(3740) -> {"Lo","L"}; +lookup(3741) -> {"Lo","L"}; +lookup(3742) -> {"Lo","L"}; +lookup(3743) -> {"Lo","L"}; +lookup(3744) -> {"Lo","L"}; +lookup(3745) -> {"Lo","L"}; +lookup(3746) -> {"Lo","L"}; +lookup(3747) -> {"Lo","L"}; +lookup(3749) -> {"Lo","L"}; +lookup(3751) -> {"Lo","L"}; +lookup(3752) -> {"Lo","L"}; +lookup(3753) -> {"Lo","L"}; +lookup(3754) -> {"Lo","L"}; +lookup(3755) -> {"Lo","L"}; +lookup(3756) -> {"Lo","L"}; +lookup(3757) -> {"Lo","L"}; +lookup(3758) -> {"Lo","L"}; +lookup(3759) -> {"Lo","L"}; +lookup(3760) -> {"Lo","L"}; +lookup(3761) -> {"Mn","NSM"}; +lookup(3762) -> {"Lo","L"}; +lookup(3763) -> {"Lo","L"}; +lookup(3764) -> {"Mn","NSM"}; +lookup(3765) -> {"Mn","NSM"}; +lookup(3766) -> {"Mn","NSM"}; +lookup(3767) -> {"Mn","NSM"}; +lookup(3768) -> {"Mn","NSM"}; +lookup(3769) -> {"Mn","NSM"}; +lookup(3770) -> {"Mn","NSM"}; +lookup(3771) -> {"Mn","NSM"}; +lookup(3772) -> {"Mn","NSM"}; +lookup(3773) -> {"Lo","L"}; +lookup(3776) -> {"Lo","L"}; +lookup(3777) -> {"Lo","L"}; +lookup(3778) -> {"Lo","L"}; +lookup(3779) -> {"Lo","L"}; +lookup(3780) -> {"Lo","L"}; +lookup(3782) -> {"Lm","L"}; +lookup(3784) -> {"Mn","NSM"}; +lookup(3785) -> {"Mn","NSM"}; +lookup(3786) -> {"Mn","NSM"}; +lookup(3787) -> {"Mn","NSM"}; +lookup(3788) -> {"Mn","NSM"}; +lookup(3789) -> {"Mn","NSM"}; +lookup(3792) -> {"Nd","L"}; +lookup(3793) -> {"Nd","L"}; +lookup(3794) -> {"Nd","L"}; +lookup(3795) -> {"Nd","L"}; +lookup(3796) -> {"Nd","L"}; +lookup(3797) -> {"Nd","L"}; +lookup(3798) -> {"Nd","L"}; +lookup(3799) -> {"Nd","L"}; +lookup(3800) -> {"Nd","L"}; +lookup(3801) -> {"Nd","L"}; +lookup(3804) -> {"Lo","L"}; +lookup(3805) -> {"Lo","L"}; +lookup(3806) -> {"Lo","L"}; +lookup(3807) -> {"Lo","L"}; +lookup(3840) -> {"Lo","L"}; +lookup(3841) -> {"So","L"}; +lookup(3842) -> {"So","L"}; +lookup(3843) -> {"So","L"}; +lookup(3844) -> {"Po","L"}; +lookup(3845) -> {"Po","L"}; +lookup(3846) -> {"Po","L"}; +lookup(3847) -> {"Po","L"}; +lookup(3848) -> {"Po","L"}; +lookup(3849) -> {"Po","L"}; +lookup(3850) -> {"Po","L"}; +lookup(3851) -> {"Po","L"}; +lookup(3852) -> {"Po","L"}; +lookup(3853) -> {"Po","L"}; +lookup(3854) -> {"Po","L"}; +lookup(3855) -> {"Po","L"}; +lookup(3856) -> {"Po","L"}; +lookup(3857) -> {"Po","L"}; +lookup(3858) -> {"Po","L"}; +lookup(3859) -> {"So","L"}; +lookup(3860) -> {"Po","L"}; +lookup(3861) -> {"So","L"}; +lookup(3862) -> {"So","L"}; +lookup(3863) -> {"So","L"}; +lookup(3864) -> {"Mn","NSM"}; +lookup(3865) -> {"Mn","NSM"}; +lookup(3866) -> {"So","L"}; +lookup(3867) -> {"So","L"}; +lookup(3868) -> {"So","L"}; +lookup(3869) -> {"So","L"}; +lookup(3870) -> {"So","L"}; +lookup(3871) -> {"So","L"}; +lookup(3872) -> {"Nd","L"}; +lookup(3873) -> {"Nd","L"}; +lookup(3874) -> {"Nd","L"}; +lookup(3875) -> {"Nd","L"}; +lookup(3876) -> {"Nd","L"}; +lookup(3877) -> {"Nd","L"}; +lookup(3878) -> {"Nd","L"}; +lookup(3879) -> {"Nd","L"}; +lookup(3880) -> {"Nd","L"}; +lookup(3881) -> {"Nd","L"}; +lookup(3882) -> {"No","L"}; +lookup(3883) -> {"No","L"}; +lookup(3884) -> {"No","L"}; +lookup(3885) -> {"No","L"}; +lookup(3886) -> {"No","L"}; +lookup(3887) -> {"No","L"}; +lookup(3888) -> {"No","L"}; +lookup(3889) -> {"No","L"}; +lookup(3890) -> {"No","L"}; +lookup(3891) -> {"No","L"}; +lookup(3892) -> {"So","L"}; +lookup(3893) -> {"Mn","NSM"}; +lookup(3894) -> {"So","L"}; +lookup(3895) -> {"Mn","NSM"}; +lookup(3896) -> {"So","L"}; +lookup(3897) -> {"Mn","NSM"}; +lookup(3898) -> {"Ps","ON"}; +lookup(3899) -> {"Pe","ON"}; +lookup(3900) -> {"Ps","ON"}; +lookup(3901) -> {"Pe","ON"}; +lookup(3902) -> {"Mc","L"}; +lookup(3903) -> {"Mc","L"}; +lookup(3904) -> {"Lo","L"}; +lookup(3905) -> {"Lo","L"}; +lookup(3906) -> {"Lo","L"}; +lookup(3907) -> {"Lo","L"}; +lookup(3908) -> {"Lo","L"}; +lookup(3909) -> {"Lo","L"}; +lookup(3910) -> {"Lo","L"}; +lookup(3911) -> {"Lo","L"}; +lookup(3913) -> {"Lo","L"}; +lookup(3914) -> {"Lo","L"}; +lookup(3915) -> {"Lo","L"}; +lookup(3916) -> {"Lo","L"}; +lookup(3917) -> {"Lo","L"}; +lookup(3918) -> {"Lo","L"}; +lookup(3919) -> {"Lo","L"}; +lookup(3920) -> {"Lo","L"}; +lookup(3921) -> {"Lo","L"}; +lookup(3922) -> {"Lo","L"}; +lookup(3923) -> {"Lo","L"}; +lookup(3924) -> {"Lo","L"}; +lookup(3925) -> {"Lo","L"}; +lookup(3926) -> {"Lo","L"}; +lookup(3927) -> {"Lo","L"}; +lookup(3928) -> {"Lo","L"}; +lookup(3929) -> {"Lo","L"}; +lookup(3930) -> {"Lo","L"}; +lookup(3931) -> {"Lo","L"}; +lookup(3932) -> {"Lo","L"}; +lookup(3933) -> {"Lo","L"}; +lookup(3934) -> {"Lo","L"}; +lookup(3935) -> {"Lo","L"}; +lookup(3936) -> {"Lo","L"}; +lookup(3937) -> {"Lo","L"}; +lookup(3938) -> {"Lo","L"}; +lookup(3939) -> {"Lo","L"}; +lookup(3940) -> {"Lo","L"}; +lookup(3941) -> {"Lo","L"}; +lookup(3942) -> {"Lo","L"}; +lookup(3943) -> {"Lo","L"}; +lookup(3944) -> {"Lo","L"}; +lookup(3945) -> {"Lo","L"}; +lookup(3946) -> {"Lo","L"}; +lookup(3947) -> {"Lo","L"}; +lookup(3948) -> {"Lo","L"}; +lookup(3953) -> {"Mn","NSM"}; +lookup(3954) -> {"Mn","NSM"}; +lookup(3955) -> {"Mn","NSM"}; +lookup(3956) -> {"Mn","NSM"}; +lookup(3957) -> {"Mn","NSM"}; +lookup(3958) -> {"Mn","NSM"}; +lookup(3959) -> {"Mn","NSM"}; +lookup(3960) -> {"Mn","NSM"}; +lookup(3961) -> {"Mn","NSM"}; +lookup(3962) -> {"Mn","NSM"}; +lookup(3963) -> {"Mn","NSM"}; +lookup(3964) -> {"Mn","NSM"}; +lookup(3965) -> {"Mn","NSM"}; +lookup(3966) -> {"Mn","NSM"}; +lookup(3967) -> {"Mc","L"}; +lookup(3968) -> {"Mn","NSM"}; +lookup(3969) -> {"Mn","NSM"}; +lookup(3970) -> {"Mn","NSM"}; +lookup(3971) -> {"Mn","NSM"}; +lookup(3972) -> {"Mn","NSM"}; +lookup(3973) -> {"Po","L"}; +lookup(3974) -> {"Mn","NSM"}; +lookup(3975) -> {"Mn","NSM"}; +lookup(3976) -> {"Lo","L"}; +lookup(3977) -> {"Lo","L"}; +lookup(3978) -> {"Lo","L"}; +lookup(3979) -> {"Lo","L"}; +lookup(3980) -> {"Lo","L"}; +lookup(3981) -> {"Mn","NSM"}; +lookup(3982) -> {"Mn","NSM"}; +lookup(3983) -> {"Mn","NSM"}; +lookup(3984) -> {"Mn","NSM"}; +lookup(3985) -> {"Mn","NSM"}; +lookup(3986) -> {"Mn","NSM"}; +lookup(3987) -> {"Mn","NSM"}; +lookup(3988) -> {"Mn","NSM"}; +lookup(3989) -> {"Mn","NSM"}; +lookup(3990) -> {"Mn","NSM"}; +lookup(3991) -> {"Mn","NSM"}; +lookup(3993) -> {"Mn","NSM"}; +lookup(3994) -> {"Mn","NSM"}; +lookup(3995) -> {"Mn","NSM"}; +lookup(3996) -> {"Mn","NSM"}; +lookup(3997) -> {"Mn","NSM"}; +lookup(3998) -> {"Mn","NSM"}; +lookup(3999) -> {"Mn","NSM"}; +lookup(4000) -> {"Mn","NSM"}; +lookup(4001) -> {"Mn","NSM"}; +lookup(4002) -> {"Mn","NSM"}; +lookup(4003) -> {"Mn","NSM"}; +lookup(4004) -> {"Mn","NSM"}; +lookup(4005) -> {"Mn","NSM"}; +lookup(4006) -> {"Mn","NSM"}; +lookup(4007) -> {"Mn","NSM"}; +lookup(4008) -> {"Mn","NSM"}; +lookup(4009) -> {"Mn","NSM"}; +lookup(4010) -> {"Mn","NSM"}; +lookup(4011) -> {"Mn","NSM"}; +lookup(4012) -> {"Mn","NSM"}; +lookup(4013) -> {"Mn","NSM"}; +lookup(4014) -> {"Mn","NSM"}; +lookup(4015) -> {"Mn","NSM"}; +lookup(4016) -> {"Mn","NSM"}; +lookup(4017) -> {"Mn","NSM"}; +lookup(4018) -> {"Mn","NSM"}; +lookup(4019) -> {"Mn","NSM"}; +lookup(4020) -> {"Mn","NSM"}; +lookup(4021) -> {"Mn","NSM"}; +lookup(4022) -> {"Mn","NSM"}; +lookup(4023) -> {"Mn","NSM"}; +lookup(4024) -> {"Mn","NSM"}; +lookup(4025) -> {"Mn","NSM"}; +lookup(4026) -> {"Mn","NSM"}; +lookup(4027) -> {"Mn","NSM"}; +lookup(4028) -> {"Mn","NSM"}; +lookup(4030) -> {"So","L"}; +lookup(4031) -> {"So","L"}; +lookup(4032) -> {"So","L"}; +lookup(4033) -> {"So","L"}; +lookup(4034) -> {"So","L"}; +lookup(4035) -> {"So","L"}; +lookup(4036) -> {"So","L"}; +lookup(4037) -> {"So","L"}; +lookup(4038) -> {"Mn","NSM"}; +lookup(4039) -> {"So","L"}; +lookup(4040) -> {"So","L"}; +lookup(4041) -> {"So","L"}; +lookup(4042) -> {"So","L"}; +lookup(4043) -> {"So","L"}; +lookup(4044) -> {"So","L"}; +lookup(4046) -> {"So","L"}; +lookup(4047) -> {"So","L"}; +lookup(4048) -> {"Po","L"}; +lookup(4049) -> {"Po","L"}; +lookup(4050) -> {"Po","L"}; +lookup(4051) -> {"Po","L"}; +lookup(4052) -> {"Po","L"}; +lookup(4053) -> {"So","L"}; +lookup(4054) -> {"So","L"}; +lookup(4055) -> {"So","L"}; +lookup(4056) -> {"So","L"}; +lookup(4057) -> {"Po","L"}; +lookup(4058) -> {"Po","L"}; +lookup(4096) -> {"Lo","L"}; +lookup(4097) -> {"Lo","L"}; +lookup(4098) -> {"Lo","L"}; +lookup(4099) -> {"Lo","L"}; +lookup(4100) -> {"Lo","L"}; +lookup(4101) -> {"Lo","L"}; +lookup(4102) -> {"Lo","L"}; +lookup(4103) -> {"Lo","L"}; +lookup(4104) -> {"Lo","L"}; +lookup(4105) -> {"Lo","L"}; +lookup(4106) -> {"Lo","L"}; +lookup(4107) -> {"Lo","L"}; +lookup(4108) -> {"Lo","L"}; +lookup(4109) -> {"Lo","L"}; +lookup(4110) -> {"Lo","L"}; +lookup(4111) -> {"Lo","L"}; +lookup(4112) -> {"Lo","L"}; +lookup(4113) -> {"Lo","L"}; +lookup(4114) -> {"Lo","L"}; +lookup(4115) -> {"Lo","L"}; +lookup(4116) -> {"Lo","L"}; +lookup(4117) -> {"Lo","L"}; +lookup(4118) -> {"Lo","L"}; +lookup(4119) -> {"Lo","L"}; +lookup(4120) -> {"Lo","L"}; +lookup(4121) -> {"Lo","L"}; +lookup(4122) -> {"Lo","L"}; +lookup(4123) -> {"Lo","L"}; +lookup(4124) -> {"Lo","L"}; +lookup(4125) -> {"Lo","L"}; +lookup(4126) -> {"Lo","L"}; +lookup(4127) -> {"Lo","L"}; +lookup(4128) -> {"Lo","L"}; +lookup(4129) -> {"Lo","L"}; +lookup(4130) -> {"Lo","L"}; +lookup(4131) -> {"Lo","L"}; +lookup(4132) -> {"Lo","L"}; +lookup(4133) -> {"Lo","L"}; +lookup(4134) -> {"Lo","L"}; +lookup(4135) -> {"Lo","L"}; +lookup(4136) -> {"Lo","L"}; +lookup(4137) -> {"Lo","L"}; +lookup(4138) -> {"Lo","L"}; +lookup(4139) -> {"Mc","L"}; +lookup(4140) -> {"Mc","L"}; +lookup(4141) -> {"Mn","NSM"}; +lookup(4142) -> {"Mn","NSM"}; +lookup(4143) -> {"Mn","NSM"}; +lookup(4144) -> {"Mn","NSM"}; +lookup(4145) -> {"Mc","L"}; +lookup(4146) -> {"Mn","NSM"}; +lookup(4147) -> {"Mn","NSM"}; +lookup(4148) -> {"Mn","NSM"}; +lookup(4149) -> {"Mn","NSM"}; +lookup(4150) -> {"Mn","NSM"}; +lookup(4151) -> {"Mn","NSM"}; +lookup(4152) -> {"Mc","L"}; +lookup(4153) -> {"Mn","NSM"}; +lookup(4154) -> {"Mn","NSM"}; +lookup(4155) -> {"Mc","L"}; +lookup(4156) -> {"Mc","L"}; +lookup(4157) -> {"Mn","NSM"}; +lookup(4158) -> {"Mn","NSM"}; +lookup(4159) -> {"Lo","L"}; +lookup(4160) -> {"Nd","L"}; +lookup(4161) -> {"Nd","L"}; +lookup(4162) -> {"Nd","L"}; +lookup(4163) -> {"Nd","L"}; +lookup(4164) -> {"Nd","L"}; +lookup(4165) -> {"Nd","L"}; +lookup(4166) -> {"Nd","L"}; +lookup(4167) -> {"Nd","L"}; +lookup(4168) -> {"Nd","L"}; +lookup(4169) -> {"Nd","L"}; +lookup(4170) -> {"Po","L"}; +lookup(4171) -> {"Po","L"}; +lookup(4172) -> {"Po","L"}; +lookup(4173) -> {"Po","L"}; +lookup(4174) -> {"Po","L"}; +lookup(4175) -> {"Po","L"}; +lookup(4176) -> {"Lo","L"}; +lookup(4177) -> {"Lo","L"}; +lookup(4178) -> {"Lo","L"}; +lookup(4179) -> {"Lo","L"}; +lookup(4180) -> {"Lo","L"}; +lookup(4181) -> {"Lo","L"}; +lookup(4182) -> {"Mc","L"}; +lookup(4183) -> {"Mc","L"}; +lookup(4184) -> {"Mn","NSM"}; +lookup(4185) -> {"Mn","NSM"}; +lookup(4186) -> {"Lo","L"}; +lookup(4187) -> {"Lo","L"}; +lookup(4188) -> {"Lo","L"}; +lookup(4189) -> {"Lo","L"}; +lookup(4190) -> {"Mn","NSM"}; +lookup(4191) -> {"Mn","NSM"}; +lookup(4192) -> {"Mn","NSM"}; +lookup(4193) -> {"Lo","L"}; +lookup(4194) -> {"Mc","L"}; +lookup(4195) -> {"Mc","L"}; +lookup(4196) -> {"Mc","L"}; +lookup(4197) -> {"Lo","L"}; +lookup(4198) -> {"Lo","L"}; +lookup(4199) -> {"Mc","L"}; +lookup(4200) -> {"Mc","L"}; +lookup(4201) -> {"Mc","L"}; +lookup(4202) -> {"Mc","L"}; +lookup(4203) -> {"Mc","L"}; +lookup(4204) -> {"Mc","L"}; +lookup(4205) -> {"Mc","L"}; +lookup(4206) -> {"Lo","L"}; +lookup(4207) -> {"Lo","L"}; +lookup(4208) -> {"Lo","L"}; +lookup(4209) -> {"Mn","NSM"}; +lookup(4210) -> {"Mn","NSM"}; +lookup(4211) -> {"Mn","NSM"}; +lookup(4212) -> {"Mn","NSM"}; +lookup(4213) -> {"Lo","L"}; +lookup(4214) -> {"Lo","L"}; +lookup(4215) -> {"Lo","L"}; +lookup(4216) -> {"Lo","L"}; +lookup(4217) -> {"Lo","L"}; +lookup(4218) -> {"Lo","L"}; +lookup(4219) -> {"Lo","L"}; +lookup(4220) -> {"Lo","L"}; +lookup(4221) -> {"Lo","L"}; +lookup(4222) -> {"Lo","L"}; +lookup(4223) -> {"Lo","L"}; +lookup(4224) -> {"Lo","L"}; +lookup(4225) -> {"Lo","L"}; +lookup(4226) -> {"Mn","NSM"}; +lookup(4227) -> {"Mc","L"}; +lookup(4228) -> {"Mc","L"}; +lookup(4229) -> {"Mn","NSM"}; +lookup(4230) -> {"Mn","NSM"}; +lookup(4231) -> {"Mc","L"}; +lookup(4232) -> {"Mc","L"}; +lookup(4233) -> {"Mc","L"}; +lookup(4234) -> {"Mc","L"}; +lookup(4235) -> {"Mc","L"}; +lookup(4236) -> {"Mc","L"}; +lookup(4237) -> {"Mn","NSM"}; +lookup(4238) -> {"Lo","L"}; +lookup(4239) -> {"Mc","L"}; +lookup(4240) -> {"Nd","L"}; +lookup(4241) -> {"Nd","L"}; +lookup(4242) -> {"Nd","L"}; +lookup(4243) -> {"Nd","L"}; +lookup(4244) -> {"Nd","L"}; +lookup(4245) -> {"Nd","L"}; +lookup(4246) -> {"Nd","L"}; +lookup(4247) -> {"Nd","L"}; +lookup(4248) -> {"Nd","L"}; +lookup(4249) -> {"Nd","L"}; +lookup(4250) -> {"Mc","L"}; +lookup(4251) -> {"Mc","L"}; +lookup(4252) -> {"Mc","L"}; +lookup(4253) -> {"Mn","NSM"}; +lookup(4254) -> {"So","L"}; +lookup(4255) -> {"So","L"}; +lookup(4256) -> {"Lu","L"}; +lookup(4257) -> {"Lu","L"}; +lookup(4258) -> {"Lu","L"}; +lookup(4259) -> {"Lu","L"}; +lookup(4260) -> {"Lu","L"}; +lookup(4261) -> {"Lu","L"}; +lookup(4262) -> {"Lu","L"}; +lookup(4263) -> {"Lu","L"}; +lookup(4264) -> {"Lu","L"}; +lookup(4265) -> {"Lu","L"}; +lookup(4266) -> {"Lu","L"}; +lookup(4267) -> {"Lu","L"}; +lookup(4268) -> {"Lu","L"}; +lookup(4269) -> {"Lu","L"}; +lookup(4270) -> {"Lu","L"}; +lookup(4271) -> {"Lu","L"}; +lookup(4272) -> {"Lu","L"}; +lookup(4273) -> {"Lu","L"}; +lookup(4274) -> {"Lu","L"}; +lookup(4275) -> {"Lu","L"}; +lookup(4276) -> {"Lu","L"}; +lookup(4277) -> {"Lu","L"}; +lookup(4278) -> {"Lu","L"}; +lookup(4279) -> {"Lu","L"}; +lookup(4280) -> {"Lu","L"}; +lookup(4281) -> {"Lu","L"}; +lookup(4282) -> {"Lu","L"}; +lookup(4283) -> {"Lu","L"}; +lookup(4284) -> {"Lu","L"}; +lookup(4285) -> {"Lu","L"}; +lookup(4286) -> {"Lu","L"}; +lookup(4287) -> {"Lu","L"}; +lookup(4288) -> {"Lu","L"}; +lookup(4289) -> {"Lu","L"}; +lookup(4290) -> {"Lu","L"}; +lookup(4291) -> {"Lu","L"}; +lookup(4292) -> {"Lu","L"}; +lookup(4293) -> {"Lu","L"}; +lookup(4295) -> {"Lu","L"}; +lookup(4301) -> {"Lu","L"}; +lookup(4304) -> {"Ll","L"}; +lookup(4305) -> {"Ll","L"}; +lookup(4306) -> {"Ll","L"}; +lookup(4307) -> {"Ll","L"}; +lookup(4308) -> {"Ll","L"}; +lookup(4309) -> {"Ll","L"}; +lookup(4310) -> {"Ll","L"}; +lookup(4311) -> {"Ll","L"}; +lookup(4312) -> {"Ll","L"}; +lookup(4313) -> {"Ll","L"}; +lookup(4314) -> {"Ll","L"}; +lookup(4315) -> {"Ll","L"}; +lookup(4316) -> {"Ll","L"}; +lookup(4317) -> {"Ll","L"}; +lookup(4318) -> {"Ll","L"}; +lookup(4319) -> {"Ll","L"}; +lookup(4320) -> {"Ll","L"}; +lookup(4321) -> {"Ll","L"}; +lookup(4322) -> {"Ll","L"}; +lookup(4323) -> {"Ll","L"}; +lookup(4324) -> {"Ll","L"}; +lookup(4325) -> {"Ll","L"}; +lookup(4326) -> {"Ll","L"}; +lookup(4327) -> {"Ll","L"}; +lookup(4328) -> {"Ll","L"}; +lookup(4329) -> {"Ll","L"}; +lookup(4330) -> {"Ll","L"}; +lookup(4331) -> {"Ll","L"}; +lookup(4332) -> {"Ll","L"}; +lookup(4333) -> {"Ll","L"}; +lookup(4334) -> {"Ll","L"}; +lookup(4335) -> {"Ll","L"}; +lookup(4336) -> {"Ll","L"}; +lookup(4337) -> {"Ll","L"}; +lookup(4338) -> {"Ll","L"}; +lookup(4339) -> {"Ll","L"}; +lookup(4340) -> {"Ll","L"}; +lookup(4341) -> {"Ll","L"}; +lookup(4342) -> {"Ll","L"}; +lookup(4343) -> {"Ll","L"}; +lookup(4344) -> {"Ll","L"}; +lookup(4345) -> {"Ll","L"}; +lookup(4346) -> {"Ll","L"}; +lookup(4347) -> {"Po","L"}; +lookup(4348) -> {"Lm","L"}; +lookup(4349) -> {"Ll","L"}; +lookup(4350) -> {"Ll","L"}; +lookup(4351) -> {"Ll","L"}; +lookup(4352) -> {"Lo","L"}; +lookup(4353) -> {"Lo","L"}; +lookup(4354) -> {"Lo","L"}; +lookup(4355) -> {"Lo","L"}; +lookup(4356) -> {"Lo","L"}; +lookup(4357) -> {"Lo","L"}; +lookup(4358) -> {"Lo","L"}; +lookup(4359) -> {"Lo","L"}; +lookup(4360) -> {"Lo","L"}; +lookup(4361) -> {"Lo","L"}; +lookup(4362) -> {"Lo","L"}; +lookup(4363) -> {"Lo","L"}; +lookup(4364) -> {"Lo","L"}; +lookup(4365) -> {"Lo","L"}; +lookup(4366) -> {"Lo","L"}; +lookup(4367) -> {"Lo","L"}; +lookup(4368) -> {"Lo","L"}; +lookup(4369) -> {"Lo","L"}; +lookup(4370) -> {"Lo","L"}; +lookup(4371) -> {"Lo","L"}; +lookup(4372) -> {"Lo","L"}; +lookup(4373) -> {"Lo","L"}; +lookup(4374) -> {"Lo","L"}; +lookup(4375) -> {"Lo","L"}; +lookup(4376) -> {"Lo","L"}; +lookup(4377) -> {"Lo","L"}; +lookup(4378) -> {"Lo","L"}; +lookup(4379) -> {"Lo","L"}; +lookup(4380) -> {"Lo","L"}; +lookup(4381) -> {"Lo","L"}; +lookup(4382) -> {"Lo","L"}; +lookup(4383) -> {"Lo","L"}; +lookup(4384) -> {"Lo","L"}; +lookup(4385) -> {"Lo","L"}; +lookup(4386) -> {"Lo","L"}; +lookup(4387) -> {"Lo","L"}; +lookup(4388) -> {"Lo","L"}; +lookup(4389) -> {"Lo","L"}; +lookup(4390) -> {"Lo","L"}; +lookup(4391) -> {"Lo","L"}; +lookup(4392) -> {"Lo","L"}; +lookup(4393) -> {"Lo","L"}; +lookup(4394) -> {"Lo","L"}; +lookup(4395) -> {"Lo","L"}; +lookup(4396) -> {"Lo","L"}; +lookup(4397) -> {"Lo","L"}; +lookup(4398) -> {"Lo","L"}; +lookup(4399) -> {"Lo","L"}; +lookup(4400) -> {"Lo","L"}; +lookup(4401) -> {"Lo","L"}; +lookup(4402) -> {"Lo","L"}; +lookup(4403) -> {"Lo","L"}; +lookup(4404) -> {"Lo","L"}; +lookup(4405) -> {"Lo","L"}; +lookup(4406) -> {"Lo","L"}; +lookup(4407) -> {"Lo","L"}; +lookup(4408) -> {"Lo","L"}; +lookup(4409) -> {"Lo","L"}; +lookup(4410) -> {"Lo","L"}; +lookup(4411) -> {"Lo","L"}; +lookup(4412) -> {"Lo","L"}; +lookup(4413) -> {"Lo","L"}; +lookup(4414) -> {"Lo","L"}; +lookup(4415) -> {"Lo","L"}; +lookup(4416) -> {"Lo","L"}; +lookup(4417) -> {"Lo","L"}; +lookup(4418) -> {"Lo","L"}; +lookup(4419) -> {"Lo","L"}; +lookup(4420) -> {"Lo","L"}; +lookup(4421) -> {"Lo","L"}; +lookup(4422) -> {"Lo","L"}; +lookup(4423) -> {"Lo","L"}; +lookup(4424) -> {"Lo","L"}; +lookup(4425) -> {"Lo","L"}; +lookup(4426) -> {"Lo","L"}; +lookup(4427) -> {"Lo","L"}; +lookup(4428) -> {"Lo","L"}; +lookup(4429) -> {"Lo","L"}; +lookup(4430) -> {"Lo","L"}; +lookup(4431) -> {"Lo","L"}; +lookup(4432) -> {"Lo","L"}; +lookup(4433) -> {"Lo","L"}; +lookup(4434) -> {"Lo","L"}; +lookup(4435) -> {"Lo","L"}; +lookup(4436) -> {"Lo","L"}; +lookup(4437) -> {"Lo","L"}; +lookup(4438) -> {"Lo","L"}; +lookup(4439) -> {"Lo","L"}; +lookup(4440) -> {"Lo","L"}; +lookup(4441) -> {"Lo","L"}; +lookup(4442) -> {"Lo","L"}; +lookup(4443) -> {"Lo","L"}; +lookup(4444) -> {"Lo","L"}; +lookup(4445) -> {"Lo","L"}; +lookup(4446) -> {"Lo","L"}; +lookup(4447) -> {"Lo","L"}; +lookup(4448) -> {"Lo","L"}; +lookup(4449) -> {"Lo","L"}; +lookup(4450) -> {"Lo","L"}; +lookup(4451) -> {"Lo","L"}; +lookup(4452) -> {"Lo","L"}; +lookup(4453) -> {"Lo","L"}; +lookup(4454) -> {"Lo","L"}; +lookup(4455) -> {"Lo","L"}; +lookup(4456) -> {"Lo","L"}; +lookup(4457) -> {"Lo","L"}; +lookup(4458) -> {"Lo","L"}; +lookup(4459) -> {"Lo","L"}; +lookup(4460) -> {"Lo","L"}; +lookup(4461) -> {"Lo","L"}; +lookup(4462) -> {"Lo","L"}; +lookup(4463) -> {"Lo","L"}; +lookup(4464) -> {"Lo","L"}; +lookup(4465) -> {"Lo","L"}; +lookup(4466) -> {"Lo","L"}; +lookup(4467) -> {"Lo","L"}; +lookup(4468) -> {"Lo","L"}; +lookup(4469) -> {"Lo","L"}; +lookup(4470) -> {"Lo","L"}; +lookup(4471) -> {"Lo","L"}; +lookup(4472) -> {"Lo","L"}; +lookup(4473) -> {"Lo","L"}; +lookup(4474) -> {"Lo","L"}; +lookup(4475) -> {"Lo","L"}; +lookup(4476) -> {"Lo","L"}; +lookup(4477) -> {"Lo","L"}; +lookup(4478) -> {"Lo","L"}; +lookup(4479) -> {"Lo","L"}; +lookup(4480) -> {"Lo","L"}; +lookup(4481) -> {"Lo","L"}; +lookup(4482) -> {"Lo","L"}; +lookup(4483) -> {"Lo","L"}; +lookup(4484) -> {"Lo","L"}; +lookup(4485) -> {"Lo","L"}; +lookup(4486) -> {"Lo","L"}; +lookup(4487) -> {"Lo","L"}; +lookup(4488) -> {"Lo","L"}; +lookup(4489) -> {"Lo","L"}; +lookup(4490) -> {"Lo","L"}; +lookup(4491) -> {"Lo","L"}; +lookup(4492) -> {"Lo","L"}; +lookup(4493) -> {"Lo","L"}; +lookup(4494) -> {"Lo","L"}; +lookup(4495) -> {"Lo","L"}; +lookup(4496) -> {"Lo","L"}; +lookup(4497) -> {"Lo","L"}; +lookup(4498) -> {"Lo","L"}; +lookup(4499) -> {"Lo","L"}; +lookup(4500) -> {"Lo","L"}; +lookup(4501) -> {"Lo","L"}; +lookup(4502) -> {"Lo","L"}; +lookup(4503) -> {"Lo","L"}; +lookup(4504) -> {"Lo","L"}; +lookup(4505) -> {"Lo","L"}; +lookup(4506) -> {"Lo","L"}; +lookup(4507) -> {"Lo","L"}; +lookup(4508) -> {"Lo","L"}; +lookup(4509) -> {"Lo","L"}; +lookup(4510) -> {"Lo","L"}; +lookup(4511) -> {"Lo","L"}; +lookup(4512) -> {"Lo","L"}; +lookup(4513) -> {"Lo","L"}; +lookup(4514) -> {"Lo","L"}; +lookup(4515) -> {"Lo","L"}; +lookup(4516) -> {"Lo","L"}; +lookup(4517) -> {"Lo","L"}; +lookup(4518) -> {"Lo","L"}; +lookup(4519) -> {"Lo","L"}; +lookup(4520) -> {"Lo","L"}; +lookup(4521) -> {"Lo","L"}; +lookup(4522) -> {"Lo","L"}; +lookup(4523) -> {"Lo","L"}; +lookup(4524) -> {"Lo","L"}; +lookup(4525) -> {"Lo","L"}; +lookup(4526) -> {"Lo","L"}; +lookup(4527) -> {"Lo","L"}; +lookup(4528) -> {"Lo","L"}; +lookup(4529) -> {"Lo","L"}; +lookup(4530) -> {"Lo","L"}; +lookup(4531) -> {"Lo","L"}; +lookup(4532) -> {"Lo","L"}; +lookup(4533) -> {"Lo","L"}; +lookup(4534) -> {"Lo","L"}; +lookup(4535) -> {"Lo","L"}; +lookup(4536) -> {"Lo","L"}; +lookup(4537) -> {"Lo","L"}; +lookup(4538) -> {"Lo","L"}; +lookup(4539) -> {"Lo","L"}; +lookup(4540) -> {"Lo","L"}; +lookup(4541) -> {"Lo","L"}; +lookup(4542) -> {"Lo","L"}; +lookup(4543) -> {"Lo","L"}; +lookup(4544) -> {"Lo","L"}; +lookup(4545) -> {"Lo","L"}; +lookup(4546) -> {"Lo","L"}; +lookup(4547) -> {"Lo","L"}; +lookup(4548) -> {"Lo","L"}; +lookup(4549) -> {"Lo","L"}; +lookup(4550) -> {"Lo","L"}; +lookup(4551) -> {"Lo","L"}; +lookup(4552) -> {"Lo","L"}; +lookup(4553) -> {"Lo","L"}; +lookup(4554) -> {"Lo","L"}; +lookup(4555) -> {"Lo","L"}; +lookup(4556) -> {"Lo","L"}; +lookup(4557) -> {"Lo","L"}; +lookup(4558) -> {"Lo","L"}; +lookup(4559) -> {"Lo","L"}; +lookup(4560) -> {"Lo","L"}; +lookup(4561) -> {"Lo","L"}; +lookup(4562) -> {"Lo","L"}; +lookup(4563) -> {"Lo","L"}; +lookup(4564) -> {"Lo","L"}; +lookup(4565) -> {"Lo","L"}; +lookup(4566) -> {"Lo","L"}; +lookup(4567) -> {"Lo","L"}; +lookup(4568) -> {"Lo","L"}; +lookup(4569) -> {"Lo","L"}; +lookup(4570) -> {"Lo","L"}; +lookup(4571) -> {"Lo","L"}; +lookup(4572) -> {"Lo","L"}; +lookup(4573) -> {"Lo","L"}; +lookup(4574) -> {"Lo","L"}; +lookup(4575) -> {"Lo","L"}; +lookup(4576) -> {"Lo","L"}; +lookup(4577) -> {"Lo","L"}; +lookup(4578) -> {"Lo","L"}; +lookup(4579) -> {"Lo","L"}; +lookup(4580) -> {"Lo","L"}; +lookup(4581) -> {"Lo","L"}; +lookup(4582) -> {"Lo","L"}; +lookup(4583) -> {"Lo","L"}; +lookup(4584) -> {"Lo","L"}; +lookup(4585) -> {"Lo","L"}; +lookup(4586) -> {"Lo","L"}; +lookup(4587) -> {"Lo","L"}; +lookup(4588) -> {"Lo","L"}; +lookup(4589) -> {"Lo","L"}; +lookup(4590) -> {"Lo","L"}; +lookup(4591) -> {"Lo","L"}; +lookup(4592) -> {"Lo","L"}; +lookup(4593) -> {"Lo","L"}; +lookup(4594) -> {"Lo","L"}; +lookup(4595) -> {"Lo","L"}; +lookup(4596) -> {"Lo","L"}; +lookup(4597) -> {"Lo","L"}; +lookup(4598) -> {"Lo","L"}; +lookup(4599) -> {"Lo","L"}; +lookup(4600) -> {"Lo","L"}; +lookup(4601) -> {"Lo","L"}; +lookup(4602) -> {"Lo","L"}; +lookup(4603) -> {"Lo","L"}; +lookup(4604) -> {"Lo","L"}; +lookup(4605) -> {"Lo","L"}; +lookup(4606) -> {"Lo","L"}; +lookup(4607) -> {"Lo","L"}; +lookup(4608) -> {"Lo","L"}; +lookup(4609) -> {"Lo","L"}; +lookup(4610) -> {"Lo","L"}; +lookup(4611) -> {"Lo","L"}; +lookup(4612) -> {"Lo","L"}; +lookup(4613) -> {"Lo","L"}; +lookup(4614) -> {"Lo","L"}; +lookup(4615) -> {"Lo","L"}; +lookup(4616) -> {"Lo","L"}; +lookup(4617) -> {"Lo","L"}; +lookup(4618) -> {"Lo","L"}; +lookup(4619) -> {"Lo","L"}; +lookup(4620) -> {"Lo","L"}; +lookup(4621) -> {"Lo","L"}; +lookup(4622) -> {"Lo","L"}; +lookup(4623) -> {"Lo","L"}; +lookup(4624) -> {"Lo","L"}; +lookup(4625) -> {"Lo","L"}; +lookup(4626) -> {"Lo","L"}; +lookup(4627) -> {"Lo","L"}; +lookup(4628) -> {"Lo","L"}; +lookup(4629) -> {"Lo","L"}; +lookup(4630) -> {"Lo","L"}; +lookup(4631) -> {"Lo","L"}; +lookup(4632) -> {"Lo","L"}; +lookup(4633) -> {"Lo","L"}; +lookup(4634) -> {"Lo","L"}; +lookup(4635) -> {"Lo","L"}; +lookup(4636) -> {"Lo","L"}; +lookup(4637) -> {"Lo","L"}; +lookup(4638) -> {"Lo","L"}; +lookup(4639) -> {"Lo","L"}; +lookup(4640) -> {"Lo","L"}; +lookup(4641) -> {"Lo","L"}; +lookup(4642) -> {"Lo","L"}; +lookup(4643) -> {"Lo","L"}; +lookup(4644) -> {"Lo","L"}; +lookup(4645) -> {"Lo","L"}; +lookup(4646) -> {"Lo","L"}; +lookup(4647) -> {"Lo","L"}; +lookup(4648) -> {"Lo","L"}; +lookup(4649) -> {"Lo","L"}; +lookup(4650) -> {"Lo","L"}; +lookup(4651) -> {"Lo","L"}; +lookup(4652) -> {"Lo","L"}; +lookup(4653) -> {"Lo","L"}; +lookup(4654) -> {"Lo","L"}; +lookup(4655) -> {"Lo","L"}; +lookup(4656) -> {"Lo","L"}; +lookup(4657) -> {"Lo","L"}; +lookup(4658) -> {"Lo","L"}; +lookup(4659) -> {"Lo","L"}; +lookup(4660) -> {"Lo","L"}; +lookup(4661) -> {"Lo","L"}; +lookup(4662) -> {"Lo","L"}; +lookup(4663) -> {"Lo","L"}; +lookup(4664) -> {"Lo","L"}; +lookup(4665) -> {"Lo","L"}; +lookup(4666) -> {"Lo","L"}; +lookup(4667) -> {"Lo","L"}; +lookup(4668) -> {"Lo","L"}; +lookup(4669) -> {"Lo","L"}; +lookup(4670) -> {"Lo","L"}; +lookup(4671) -> {"Lo","L"}; +lookup(4672) -> {"Lo","L"}; +lookup(4673) -> {"Lo","L"}; +lookup(4674) -> {"Lo","L"}; +lookup(4675) -> {"Lo","L"}; +lookup(4676) -> {"Lo","L"}; +lookup(4677) -> {"Lo","L"}; +lookup(4678) -> {"Lo","L"}; +lookup(4679) -> {"Lo","L"}; +lookup(4680) -> {"Lo","L"}; +lookup(4682) -> {"Lo","L"}; +lookup(4683) -> {"Lo","L"}; +lookup(4684) -> {"Lo","L"}; +lookup(4685) -> {"Lo","L"}; +lookup(4688) -> {"Lo","L"}; +lookup(4689) -> {"Lo","L"}; +lookup(4690) -> {"Lo","L"}; +lookup(4691) -> {"Lo","L"}; +lookup(4692) -> {"Lo","L"}; +lookup(4693) -> {"Lo","L"}; +lookup(4694) -> {"Lo","L"}; +lookup(4696) -> {"Lo","L"}; +lookup(4698) -> {"Lo","L"}; +lookup(4699) -> {"Lo","L"}; +lookup(4700) -> {"Lo","L"}; +lookup(4701) -> {"Lo","L"}; +lookup(4704) -> {"Lo","L"}; +lookup(4705) -> {"Lo","L"}; +lookup(4706) -> {"Lo","L"}; +lookup(4707) -> {"Lo","L"}; +lookup(4708) -> {"Lo","L"}; +lookup(4709) -> {"Lo","L"}; +lookup(4710) -> {"Lo","L"}; +lookup(4711) -> {"Lo","L"}; +lookup(4712) -> {"Lo","L"}; +lookup(4713) -> {"Lo","L"}; +lookup(4714) -> {"Lo","L"}; +lookup(4715) -> {"Lo","L"}; +lookup(4716) -> {"Lo","L"}; +lookup(4717) -> {"Lo","L"}; +lookup(4718) -> {"Lo","L"}; +lookup(4719) -> {"Lo","L"}; +lookup(4720) -> {"Lo","L"}; +lookup(4721) -> {"Lo","L"}; +lookup(4722) -> {"Lo","L"}; +lookup(4723) -> {"Lo","L"}; +lookup(4724) -> {"Lo","L"}; +lookup(4725) -> {"Lo","L"}; +lookup(4726) -> {"Lo","L"}; +lookup(4727) -> {"Lo","L"}; +lookup(4728) -> {"Lo","L"}; +lookup(4729) -> {"Lo","L"}; +lookup(4730) -> {"Lo","L"}; +lookup(4731) -> {"Lo","L"}; +lookup(4732) -> {"Lo","L"}; +lookup(4733) -> {"Lo","L"}; +lookup(4734) -> {"Lo","L"}; +lookup(4735) -> {"Lo","L"}; +lookup(4736) -> {"Lo","L"}; +lookup(4737) -> {"Lo","L"}; +lookup(4738) -> {"Lo","L"}; +lookup(4739) -> {"Lo","L"}; +lookup(4740) -> {"Lo","L"}; +lookup(4741) -> {"Lo","L"}; +lookup(4742) -> {"Lo","L"}; +lookup(4743) -> {"Lo","L"}; +lookup(4744) -> {"Lo","L"}; +lookup(4746) -> {"Lo","L"}; +lookup(4747) -> {"Lo","L"}; +lookup(4748) -> {"Lo","L"}; +lookup(4749) -> {"Lo","L"}; +lookup(4752) -> {"Lo","L"}; +lookup(4753) -> {"Lo","L"}; +lookup(4754) -> {"Lo","L"}; +lookup(4755) -> {"Lo","L"}; +lookup(4756) -> {"Lo","L"}; +lookup(4757) -> {"Lo","L"}; +lookup(4758) -> {"Lo","L"}; +lookup(4759) -> {"Lo","L"}; +lookup(4760) -> {"Lo","L"}; +lookup(4761) -> {"Lo","L"}; +lookup(4762) -> {"Lo","L"}; +lookup(4763) -> {"Lo","L"}; +lookup(4764) -> {"Lo","L"}; +lookup(4765) -> {"Lo","L"}; +lookup(4766) -> {"Lo","L"}; +lookup(4767) -> {"Lo","L"}; +lookup(4768) -> {"Lo","L"}; +lookup(4769) -> {"Lo","L"}; +lookup(4770) -> {"Lo","L"}; +lookup(4771) -> {"Lo","L"}; +lookup(4772) -> {"Lo","L"}; +lookup(4773) -> {"Lo","L"}; +lookup(4774) -> {"Lo","L"}; +lookup(4775) -> {"Lo","L"}; +lookup(4776) -> {"Lo","L"}; +lookup(4777) -> {"Lo","L"}; +lookup(4778) -> {"Lo","L"}; +lookup(4779) -> {"Lo","L"}; +lookup(4780) -> {"Lo","L"}; +lookup(4781) -> {"Lo","L"}; +lookup(4782) -> {"Lo","L"}; +lookup(4783) -> {"Lo","L"}; +lookup(4784) -> {"Lo","L"}; +lookup(4786) -> {"Lo","L"}; +lookup(4787) -> {"Lo","L"}; +lookup(4788) -> {"Lo","L"}; +lookup(4789) -> {"Lo","L"}; +lookup(4792) -> {"Lo","L"}; +lookup(4793) -> {"Lo","L"}; +lookup(4794) -> {"Lo","L"}; +lookup(4795) -> {"Lo","L"}; +lookup(4796) -> {"Lo","L"}; +lookup(4797) -> {"Lo","L"}; +lookup(4798) -> {"Lo","L"}; +lookup(4800) -> {"Lo","L"}; +lookup(4802) -> {"Lo","L"}; +lookup(4803) -> {"Lo","L"}; +lookup(4804) -> {"Lo","L"}; +lookup(4805) -> {"Lo","L"}; +lookup(4808) -> {"Lo","L"}; +lookup(4809) -> {"Lo","L"}; +lookup(4810) -> {"Lo","L"}; +lookup(4811) -> {"Lo","L"}; +lookup(4812) -> {"Lo","L"}; +lookup(4813) -> {"Lo","L"}; +lookup(4814) -> {"Lo","L"}; +lookup(4815) -> {"Lo","L"}; +lookup(4816) -> {"Lo","L"}; +lookup(4817) -> {"Lo","L"}; +lookup(4818) -> {"Lo","L"}; +lookup(4819) -> {"Lo","L"}; +lookup(4820) -> {"Lo","L"}; +lookup(4821) -> {"Lo","L"}; +lookup(4822) -> {"Lo","L"}; +lookup(4824) -> {"Lo","L"}; +lookup(4825) -> {"Lo","L"}; +lookup(4826) -> {"Lo","L"}; +lookup(4827) -> {"Lo","L"}; +lookup(4828) -> {"Lo","L"}; +lookup(4829) -> {"Lo","L"}; +lookup(4830) -> {"Lo","L"}; +lookup(4831) -> {"Lo","L"}; +lookup(4832) -> {"Lo","L"}; +lookup(4833) -> {"Lo","L"}; +lookup(4834) -> {"Lo","L"}; +lookup(4835) -> {"Lo","L"}; +lookup(4836) -> {"Lo","L"}; +lookup(4837) -> {"Lo","L"}; +lookup(4838) -> {"Lo","L"}; +lookup(4839) -> {"Lo","L"}; +lookup(4840) -> {"Lo","L"}; +lookup(4841) -> {"Lo","L"}; +lookup(4842) -> {"Lo","L"}; +lookup(4843) -> {"Lo","L"}; +lookup(4844) -> {"Lo","L"}; +lookup(4845) -> {"Lo","L"}; +lookup(4846) -> {"Lo","L"}; +lookup(4847) -> {"Lo","L"}; +lookup(4848) -> {"Lo","L"}; +lookup(4849) -> {"Lo","L"}; +lookup(4850) -> {"Lo","L"}; +lookup(4851) -> {"Lo","L"}; +lookup(4852) -> {"Lo","L"}; +lookup(4853) -> {"Lo","L"}; +lookup(4854) -> {"Lo","L"}; +lookup(4855) -> {"Lo","L"}; +lookup(4856) -> {"Lo","L"}; +lookup(4857) -> {"Lo","L"}; +lookup(4858) -> {"Lo","L"}; +lookup(4859) -> {"Lo","L"}; +lookup(4860) -> {"Lo","L"}; +lookup(4861) -> {"Lo","L"}; +lookup(4862) -> {"Lo","L"}; +lookup(4863) -> {"Lo","L"}; +lookup(4864) -> {"Lo","L"}; +lookup(4865) -> {"Lo","L"}; +lookup(4866) -> {"Lo","L"}; +lookup(4867) -> {"Lo","L"}; +lookup(4868) -> {"Lo","L"}; +lookup(4869) -> {"Lo","L"}; +lookup(4870) -> {"Lo","L"}; +lookup(4871) -> {"Lo","L"}; +lookup(4872) -> {"Lo","L"}; +lookup(4873) -> {"Lo","L"}; +lookup(4874) -> {"Lo","L"}; +lookup(4875) -> {"Lo","L"}; +lookup(4876) -> {"Lo","L"}; +lookup(4877) -> {"Lo","L"}; +lookup(4878) -> {"Lo","L"}; +lookup(4879) -> {"Lo","L"}; +lookup(4880) -> {"Lo","L"}; +lookup(4882) -> {"Lo","L"}; +lookup(4883) -> {"Lo","L"}; +lookup(4884) -> {"Lo","L"}; +lookup(4885) -> {"Lo","L"}; +lookup(4888) -> {"Lo","L"}; +lookup(4889) -> {"Lo","L"}; +lookup(4890) -> {"Lo","L"}; +lookup(4891) -> {"Lo","L"}; +lookup(4892) -> {"Lo","L"}; +lookup(4893) -> {"Lo","L"}; +lookup(4894) -> {"Lo","L"}; +lookup(4895) -> {"Lo","L"}; +lookup(4896) -> {"Lo","L"}; +lookup(4897) -> {"Lo","L"}; +lookup(4898) -> {"Lo","L"}; +lookup(4899) -> {"Lo","L"}; +lookup(4900) -> {"Lo","L"}; +lookup(4901) -> {"Lo","L"}; +lookup(4902) -> {"Lo","L"}; +lookup(4903) -> {"Lo","L"}; +lookup(4904) -> {"Lo","L"}; +lookup(4905) -> {"Lo","L"}; +lookup(4906) -> {"Lo","L"}; +lookup(4907) -> {"Lo","L"}; +lookup(4908) -> {"Lo","L"}; +lookup(4909) -> {"Lo","L"}; +lookup(4910) -> {"Lo","L"}; +lookup(4911) -> {"Lo","L"}; +lookup(4912) -> {"Lo","L"}; +lookup(4913) -> {"Lo","L"}; +lookup(4914) -> {"Lo","L"}; +lookup(4915) -> {"Lo","L"}; +lookup(4916) -> {"Lo","L"}; +lookup(4917) -> {"Lo","L"}; +lookup(4918) -> {"Lo","L"}; +lookup(4919) -> {"Lo","L"}; +lookup(4920) -> {"Lo","L"}; +lookup(4921) -> {"Lo","L"}; +lookup(4922) -> {"Lo","L"}; +lookup(4923) -> {"Lo","L"}; +lookup(4924) -> {"Lo","L"}; +lookup(4925) -> {"Lo","L"}; +lookup(4926) -> {"Lo","L"}; +lookup(4927) -> {"Lo","L"}; +lookup(4928) -> {"Lo","L"}; +lookup(4929) -> {"Lo","L"}; +lookup(4930) -> {"Lo","L"}; +lookup(4931) -> {"Lo","L"}; +lookup(4932) -> {"Lo","L"}; +lookup(4933) -> {"Lo","L"}; +lookup(4934) -> {"Lo","L"}; +lookup(4935) -> {"Lo","L"}; +lookup(4936) -> {"Lo","L"}; +lookup(4937) -> {"Lo","L"}; +lookup(4938) -> {"Lo","L"}; +lookup(4939) -> {"Lo","L"}; +lookup(4940) -> {"Lo","L"}; +lookup(4941) -> {"Lo","L"}; +lookup(4942) -> {"Lo","L"}; +lookup(4943) -> {"Lo","L"}; +lookup(4944) -> {"Lo","L"}; +lookup(4945) -> {"Lo","L"}; +lookup(4946) -> {"Lo","L"}; +lookup(4947) -> {"Lo","L"}; +lookup(4948) -> {"Lo","L"}; +lookup(4949) -> {"Lo","L"}; +lookup(4950) -> {"Lo","L"}; +lookup(4951) -> {"Lo","L"}; +lookup(4952) -> {"Lo","L"}; +lookup(4953) -> {"Lo","L"}; +lookup(4954) -> {"Lo","L"}; +lookup(4957) -> {"Mn","NSM"}; +lookup(4958) -> {"Mn","NSM"}; +lookup(4959) -> {"Mn","NSM"}; +lookup(4960) -> {"Po","L"}; +lookup(4961) -> {"Po","L"}; +lookup(4962) -> {"Po","L"}; +lookup(4963) -> {"Po","L"}; +lookup(4964) -> {"Po","L"}; +lookup(4965) -> {"Po","L"}; +lookup(4966) -> {"Po","L"}; +lookup(4967) -> {"Po","L"}; +lookup(4968) -> {"Po","L"}; +lookup(4969) -> {"No","L"}; +lookup(4970) -> {"No","L"}; +lookup(4971) -> {"No","L"}; +lookup(4972) -> {"No","L"}; +lookup(4973) -> {"No","L"}; +lookup(4974) -> {"No","L"}; +lookup(4975) -> {"No","L"}; +lookup(4976) -> {"No","L"}; +lookup(4977) -> {"No","L"}; +lookup(4978) -> {"No","L"}; +lookup(4979) -> {"No","L"}; +lookup(4980) -> {"No","L"}; +lookup(4981) -> {"No","L"}; +lookup(4982) -> {"No","L"}; +lookup(4983) -> {"No","L"}; +lookup(4984) -> {"No","L"}; +lookup(4985) -> {"No","L"}; +lookup(4986) -> {"No","L"}; +lookup(4987) -> {"No","L"}; +lookup(4988) -> {"No","L"}; +lookup(4992) -> {"Lo","L"}; +lookup(4993) -> {"Lo","L"}; +lookup(4994) -> {"Lo","L"}; +lookup(4995) -> {"Lo","L"}; +lookup(4996) -> {"Lo","L"}; +lookup(4997) -> {"Lo","L"}; +lookup(4998) -> {"Lo","L"}; +lookup(4999) -> {"Lo","L"}; +lookup(5000) -> {"Lo","L"}; +lookup(5001) -> {"Lo","L"}; +lookup(5002) -> {"Lo","L"}; +lookup(5003) -> {"Lo","L"}; +lookup(5004) -> {"Lo","L"}; +lookup(5005) -> {"Lo","L"}; +lookup(5006) -> {"Lo","L"}; +lookup(5007) -> {"Lo","L"}; +lookup(5008) -> {"So","ON"}; +lookup(5009) -> {"So","ON"}; +lookup(5010) -> {"So","ON"}; +lookup(5011) -> {"So","ON"}; +lookup(5012) -> {"So","ON"}; +lookup(5013) -> {"So","ON"}; +lookup(5014) -> {"So","ON"}; +lookup(5015) -> {"So","ON"}; +lookup(5016) -> {"So","ON"}; +lookup(5017) -> {"So","ON"}; +lookup(5024) -> {"Lu","L"}; +lookup(5025) -> {"Lu","L"}; +lookup(5026) -> {"Lu","L"}; +lookup(5027) -> {"Lu","L"}; +lookup(5028) -> {"Lu","L"}; +lookup(5029) -> {"Lu","L"}; +lookup(5030) -> {"Lu","L"}; +lookup(5031) -> {"Lu","L"}; +lookup(5032) -> {"Lu","L"}; +lookup(5033) -> {"Lu","L"}; +lookup(5034) -> {"Lu","L"}; +lookup(5035) -> {"Lu","L"}; +lookup(5036) -> {"Lu","L"}; +lookup(5037) -> {"Lu","L"}; +lookup(5038) -> {"Lu","L"}; +lookup(5039) -> {"Lu","L"}; +lookup(5040) -> {"Lu","L"}; +lookup(5041) -> {"Lu","L"}; +lookup(5042) -> {"Lu","L"}; +lookup(5043) -> {"Lu","L"}; +lookup(5044) -> {"Lu","L"}; +lookup(5045) -> {"Lu","L"}; +lookup(5046) -> {"Lu","L"}; +lookup(5047) -> {"Lu","L"}; +lookup(5048) -> {"Lu","L"}; +lookup(5049) -> {"Lu","L"}; +lookup(5050) -> {"Lu","L"}; +lookup(5051) -> {"Lu","L"}; +lookup(5052) -> {"Lu","L"}; +lookup(5053) -> {"Lu","L"}; +lookup(5054) -> {"Lu","L"}; +lookup(5055) -> {"Lu","L"}; +lookup(5056) -> {"Lu","L"}; +lookup(5057) -> {"Lu","L"}; +lookup(5058) -> {"Lu","L"}; +lookup(5059) -> {"Lu","L"}; +lookup(5060) -> {"Lu","L"}; +lookup(5061) -> {"Lu","L"}; +lookup(5062) -> {"Lu","L"}; +lookup(5063) -> {"Lu","L"}; +lookup(5064) -> {"Lu","L"}; +lookup(5065) -> {"Lu","L"}; +lookup(5066) -> {"Lu","L"}; +lookup(5067) -> {"Lu","L"}; +lookup(5068) -> {"Lu","L"}; +lookup(5069) -> {"Lu","L"}; +lookup(5070) -> {"Lu","L"}; +lookup(5071) -> {"Lu","L"}; +lookup(5072) -> {"Lu","L"}; +lookup(5073) -> {"Lu","L"}; +lookup(5074) -> {"Lu","L"}; +lookup(5075) -> {"Lu","L"}; +lookup(5076) -> {"Lu","L"}; +lookup(5077) -> {"Lu","L"}; +lookup(5078) -> {"Lu","L"}; +lookup(5079) -> {"Lu","L"}; +lookup(5080) -> {"Lu","L"}; +lookup(5081) -> {"Lu","L"}; +lookup(5082) -> {"Lu","L"}; +lookup(5083) -> {"Lu","L"}; +lookup(5084) -> {"Lu","L"}; +lookup(5085) -> {"Lu","L"}; +lookup(5086) -> {"Lu","L"}; +lookup(5087) -> {"Lu","L"}; +lookup(5088) -> {"Lu","L"}; +lookup(5089) -> {"Lu","L"}; +lookup(5090) -> {"Lu","L"}; +lookup(5091) -> {"Lu","L"}; +lookup(5092) -> {"Lu","L"}; +lookup(5093) -> {"Lu","L"}; +lookup(5094) -> {"Lu","L"}; +lookup(5095) -> {"Lu","L"}; +lookup(5096) -> {"Lu","L"}; +lookup(5097) -> {"Lu","L"}; +lookup(5098) -> {"Lu","L"}; +lookup(5099) -> {"Lu","L"}; +lookup(5100) -> {"Lu","L"}; +lookup(5101) -> {"Lu","L"}; +lookup(5102) -> {"Lu","L"}; +lookup(5103) -> {"Lu","L"}; +lookup(5104) -> {"Lu","L"}; +lookup(5105) -> {"Lu","L"}; +lookup(5106) -> {"Lu","L"}; +lookup(5107) -> {"Lu","L"}; +lookup(5108) -> {"Lu","L"}; +lookup(5109) -> {"Lu","L"}; +lookup(5112) -> {"Ll","L"}; +lookup(5113) -> {"Ll","L"}; +lookup(5114) -> {"Ll","L"}; +lookup(5115) -> {"Ll","L"}; +lookup(5116) -> {"Ll","L"}; +lookup(5117) -> {"Ll","L"}; +lookup(5120) -> {"Pd","ON"}; +lookup(5121) -> {"Lo","L"}; +lookup(5122) -> {"Lo","L"}; +lookup(5123) -> {"Lo","L"}; +lookup(5124) -> {"Lo","L"}; +lookup(5125) -> {"Lo","L"}; +lookup(5126) -> {"Lo","L"}; +lookup(5127) -> {"Lo","L"}; +lookup(5128) -> {"Lo","L"}; +lookup(5129) -> {"Lo","L"}; +lookup(5130) -> {"Lo","L"}; +lookup(5131) -> {"Lo","L"}; +lookup(5132) -> {"Lo","L"}; +lookup(5133) -> {"Lo","L"}; +lookup(5134) -> {"Lo","L"}; +lookup(5135) -> {"Lo","L"}; +lookup(5136) -> {"Lo","L"}; +lookup(5137) -> {"Lo","L"}; +lookup(5138) -> {"Lo","L"}; +lookup(5139) -> {"Lo","L"}; +lookup(5140) -> {"Lo","L"}; +lookup(5141) -> {"Lo","L"}; +lookup(5142) -> {"Lo","L"}; +lookup(5143) -> {"Lo","L"}; +lookup(5144) -> {"Lo","L"}; +lookup(5145) -> {"Lo","L"}; +lookup(5146) -> {"Lo","L"}; +lookup(5147) -> {"Lo","L"}; +lookup(5148) -> {"Lo","L"}; +lookup(5149) -> {"Lo","L"}; +lookup(5150) -> {"Lo","L"}; +lookup(5151) -> {"Lo","L"}; +lookup(5152) -> {"Lo","L"}; +lookup(5153) -> {"Lo","L"}; +lookup(5154) -> {"Lo","L"}; +lookup(5155) -> {"Lo","L"}; +lookup(5156) -> {"Lo","L"}; +lookup(5157) -> {"Lo","L"}; +lookup(5158) -> {"Lo","L"}; +lookup(5159) -> {"Lo","L"}; +lookup(5160) -> {"Lo","L"}; +lookup(5161) -> {"Lo","L"}; +lookup(5162) -> {"Lo","L"}; +lookup(5163) -> {"Lo","L"}; +lookup(5164) -> {"Lo","L"}; +lookup(5165) -> {"Lo","L"}; +lookup(5166) -> {"Lo","L"}; +lookup(5167) -> {"Lo","L"}; +lookup(5168) -> {"Lo","L"}; +lookup(5169) -> {"Lo","L"}; +lookup(5170) -> {"Lo","L"}; +lookup(5171) -> {"Lo","L"}; +lookup(5172) -> {"Lo","L"}; +lookup(5173) -> {"Lo","L"}; +lookup(5174) -> {"Lo","L"}; +lookup(5175) -> {"Lo","L"}; +lookup(5176) -> {"Lo","L"}; +lookup(5177) -> {"Lo","L"}; +lookup(5178) -> {"Lo","L"}; +lookup(5179) -> {"Lo","L"}; +lookup(5180) -> {"Lo","L"}; +lookup(5181) -> {"Lo","L"}; +lookup(5182) -> {"Lo","L"}; +lookup(5183) -> {"Lo","L"}; +lookup(5184) -> {"Lo","L"}; +lookup(5185) -> {"Lo","L"}; +lookup(5186) -> {"Lo","L"}; +lookup(5187) -> {"Lo","L"}; +lookup(5188) -> {"Lo","L"}; +lookup(5189) -> {"Lo","L"}; +lookup(5190) -> {"Lo","L"}; +lookup(5191) -> {"Lo","L"}; +lookup(5192) -> {"Lo","L"}; +lookup(5193) -> {"Lo","L"}; +lookup(5194) -> {"Lo","L"}; +lookup(5195) -> {"Lo","L"}; +lookup(5196) -> {"Lo","L"}; +lookup(5197) -> {"Lo","L"}; +lookup(5198) -> {"Lo","L"}; +lookup(5199) -> {"Lo","L"}; +lookup(5200) -> {"Lo","L"}; +lookup(5201) -> {"Lo","L"}; +lookup(5202) -> {"Lo","L"}; +lookup(5203) -> {"Lo","L"}; +lookup(5204) -> {"Lo","L"}; +lookup(5205) -> {"Lo","L"}; +lookup(5206) -> {"Lo","L"}; +lookup(5207) -> {"Lo","L"}; +lookup(5208) -> {"Lo","L"}; +lookup(5209) -> {"Lo","L"}; +lookup(5210) -> {"Lo","L"}; +lookup(5211) -> {"Lo","L"}; +lookup(5212) -> {"Lo","L"}; +lookup(5213) -> {"Lo","L"}; +lookup(5214) -> {"Lo","L"}; +lookup(5215) -> {"Lo","L"}; +lookup(5216) -> {"Lo","L"}; +lookup(5217) -> {"Lo","L"}; +lookup(5218) -> {"Lo","L"}; +lookup(5219) -> {"Lo","L"}; +lookup(5220) -> {"Lo","L"}; +lookup(5221) -> {"Lo","L"}; +lookup(5222) -> {"Lo","L"}; +lookup(5223) -> {"Lo","L"}; +lookup(5224) -> {"Lo","L"}; +lookup(5225) -> {"Lo","L"}; +lookup(5226) -> {"Lo","L"}; +lookup(5227) -> {"Lo","L"}; +lookup(5228) -> {"Lo","L"}; +lookup(5229) -> {"Lo","L"}; +lookup(5230) -> {"Lo","L"}; +lookup(5231) -> {"Lo","L"}; +lookup(5232) -> {"Lo","L"}; +lookup(5233) -> {"Lo","L"}; +lookup(5234) -> {"Lo","L"}; +lookup(5235) -> {"Lo","L"}; +lookup(5236) -> {"Lo","L"}; +lookup(5237) -> {"Lo","L"}; +lookup(5238) -> {"Lo","L"}; +lookup(5239) -> {"Lo","L"}; +lookup(5240) -> {"Lo","L"}; +lookup(5241) -> {"Lo","L"}; +lookup(5242) -> {"Lo","L"}; +lookup(5243) -> {"Lo","L"}; +lookup(5244) -> {"Lo","L"}; +lookup(5245) -> {"Lo","L"}; +lookup(5246) -> {"Lo","L"}; +lookup(5247) -> {"Lo","L"}; +lookup(5248) -> {"Lo","L"}; +lookup(5249) -> {"Lo","L"}; +lookup(5250) -> {"Lo","L"}; +lookup(5251) -> {"Lo","L"}; +lookup(5252) -> {"Lo","L"}; +lookup(5253) -> {"Lo","L"}; +lookup(5254) -> {"Lo","L"}; +lookup(5255) -> {"Lo","L"}; +lookup(5256) -> {"Lo","L"}; +lookup(5257) -> {"Lo","L"}; +lookup(5258) -> {"Lo","L"}; +lookup(5259) -> {"Lo","L"}; +lookup(5260) -> {"Lo","L"}; +lookup(5261) -> {"Lo","L"}; +lookup(5262) -> {"Lo","L"}; +lookup(5263) -> {"Lo","L"}; +lookup(5264) -> {"Lo","L"}; +lookup(5265) -> {"Lo","L"}; +lookup(5266) -> {"Lo","L"}; +lookup(5267) -> {"Lo","L"}; +lookup(5268) -> {"Lo","L"}; +lookup(5269) -> {"Lo","L"}; +lookup(5270) -> {"Lo","L"}; +lookup(5271) -> {"Lo","L"}; +lookup(5272) -> {"Lo","L"}; +lookup(5273) -> {"Lo","L"}; +lookup(5274) -> {"Lo","L"}; +lookup(5275) -> {"Lo","L"}; +lookup(5276) -> {"Lo","L"}; +lookup(5277) -> {"Lo","L"}; +lookup(5278) -> {"Lo","L"}; +lookup(5279) -> {"Lo","L"}; +lookup(5280) -> {"Lo","L"}; +lookup(5281) -> {"Lo","L"}; +lookup(5282) -> {"Lo","L"}; +lookup(5283) -> {"Lo","L"}; +lookup(5284) -> {"Lo","L"}; +lookup(5285) -> {"Lo","L"}; +lookup(5286) -> {"Lo","L"}; +lookup(5287) -> {"Lo","L"}; +lookup(5288) -> {"Lo","L"}; +lookup(5289) -> {"Lo","L"}; +lookup(5290) -> {"Lo","L"}; +lookup(5291) -> {"Lo","L"}; +lookup(5292) -> {"Lo","L"}; +lookup(5293) -> {"Lo","L"}; +lookup(5294) -> {"Lo","L"}; +lookup(5295) -> {"Lo","L"}; +lookup(5296) -> {"Lo","L"}; +lookup(5297) -> {"Lo","L"}; +lookup(5298) -> {"Lo","L"}; +lookup(5299) -> {"Lo","L"}; +lookup(5300) -> {"Lo","L"}; +lookup(5301) -> {"Lo","L"}; +lookup(5302) -> {"Lo","L"}; +lookup(5303) -> {"Lo","L"}; +lookup(5304) -> {"Lo","L"}; +lookup(5305) -> {"Lo","L"}; +lookup(5306) -> {"Lo","L"}; +lookup(5307) -> {"Lo","L"}; +lookup(5308) -> {"Lo","L"}; +lookup(5309) -> {"Lo","L"}; +lookup(5310) -> {"Lo","L"}; +lookup(5311) -> {"Lo","L"}; +lookup(5312) -> {"Lo","L"}; +lookup(5313) -> {"Lo","L"}; +lookup(5314) -> {"Lo","L"}; +lookup(5315) -> {"Lo","L"}; +lookup(5316) -> {"Lo","L"}; +lookup(5317) -> {"Lo","L"}; +lookup(5318) -> {"Lo","L"}; +lookup(5319) -> {"Lo","L"}; +lookup(5320) -> {"Lo","L"}; +lookup(5321) -> {"Lo","L"}; +lookup(5322) -> {"Lo","L"}; +lookup(5323) -> {"Lo","L"}; +lookup(5324) -> {"Lo","L"}; +lookup(5325) -> {"Lo","L"}; +lookup(5326) -> {"Lo","L"}; +lookup(5327) -> {"Lo","L"}; +lookup(5328) -> {"Lo","L"}; +lookup(5329) -> {"Lo","L"}; +lookup(5330) -> {"Lo","L"}; +lookup(5331) -> {"Lo","L"}; +lookup(5332) -> {"Lo","L"}; +lookup(5333) -> {"Lo","L"}; +lookup(5334) -> {"Lo","L"}; +lookup(5335) -> {"Lo","L"}; +lookup(5336) -> {"Lo","L"}; +lookup(5337) -> {"Lo","L"}; +lookup(5338) -> {"Lo","L"}; +lookup(5339) -> {"Lo","L"}; +lookup(5340) -> {"Lo","L"}; +lookup(5341) -> {"Lo","L"}; +lookup(5342) -> {"Lo","L"}; +lookup(5343) -> {"Lo","L"}; +lookup(5344) -> {"Lo","L"}; +lookup(5345) -> {"Lo","L"}; +lookup(5346) -> {"Lo","L"}; +lookup(5347) -> {"Lo","L"}; +lookup(5348) -> {"Lo","L"}; +lookup(5349) -> {"Lo","L"}; +lookup(5350) -> {"Lo","L"}; +lookup(5351) -> {"Lo","L"}; +lookup(5352) -> {"Lo","L"}; +lookup(5353) -> {"Lo","L"}; +lookup(5354) -> {"Lo","L"}; +lookup(5355) -> {"Lo","L"}; +lookup(5356) -> {"Lo","L"}; +lookup(5357) -> {"Lo","L"}; +lookup(5358) -> {"Lo","L"}; +lookup(5359) -> {"Lo","L"}; +lookup(5360) -> {"Lo","L"}; +lookup(5361) -> {"Lo","L"}; +lookup(5362) -> {"Lo","L"}; +lookup(5363) -> {"Lo","L"}; +lookup(5364) -> {"Lo","L"}; +lookup(5365) -> {"Lo","L"}; +lookup(5366) -> {"Lo","L"}; +lookup(5367) -> {"Lo","L"}; +lookup(5368) -> {"Lo","L"}; +lookup(5369) -> {"Lo","L"}; +lookup(5370) -> {"Lo","L"}; +lookup(5371) -> {"Lo","L"}; +lookup(5372) -> {"Lo","L"}; +lookup(5373) -> {"Lo","L"}; +lookup(5374) -> {"Lo","L"}; +lookup(5375) -> {"Lo","L"}; +lookup(5376) -> {"Lo","L"}; +lookup(5377) -> {"Lo","L"}; +lookup(5378) -> {"Lo","L"}; +lookup(5379) -> {"Lo","L"}; +lookup(5380) -> {"Lo","L"}; +lookup(5381) -> {"Lo","L"}; +lookup(5382) -> {"Lo","L"}; +lookup(5383) -> {"Lo","L"}; +lookup(5384) -> {"Lo","L"}; +lookup(5385) -> {"Lo","L"}; +lookup(5386) -> {"Lo","L"}; +lookup(5387) -> {"Lo","L"}; +lookup(5388) -> {"Lo","L"}; +lookup(5389) -> {"Lo","L"}; +lookup(5390) -> {"Lo","L"}; +lookup(5391) -> {"Lo","L"}; +lookup(5392) -> {"Lo","L"}; +lookup(5393) -> {"Lo","L"}; +lookup(5394) -> {"Lo","L"}; +lookup(5395) -> {"Lo","L"}; +lookup(5396) -> {"Lo","L"}; +lookup(5397) -> {"Lo","L"}; +lookup(5398) -> {"Lo","L"}; +lookup(5399) -> {"Lo","L"}; +lookup(5400) -> {"Lo","L"}; +lookup(5401) -> {"Lo","L"}; +lookup(5402) -> {"Lo","L"}; +lookup(5403) -> {"Lo","L"}; +lookup(5404) -> {"Lo","L"}; +lookup(5405) -> {"Lo","L"}; +lookup(5406) -> {"Lo","L"}; +lookup(5407) -> {"Lo","L"}; +lookup(5408) -> {"Lo","L"}; +lookup(5409) -> {"Lo","L"}; +lookup(5410) -> {"Lo","L"}; +lookup(5411) -> {"Lo","L"}; +lookup(5412) -> {"Lo","L"}; +lookup(5413) -> {"Lo","L"}; +lookup(5414) -> {"Lo","L"}; +lookup(5415) -> {"Lo","L"}; +lookup(5416) -> {"Lo","L"}; +lookup(5417) -> {"Lo","L"}; +lookup(5418) -> {"Lo","L"}; +lookup(5419) -> {"Lo","L"}; +lookup(5420) -> {"Lo","L"}; +lookup(5421) -> {"Lo","L"}; +lookup(5422) -> {"Lo","L"}; +lookup(5423) -> {"Lo","L"}; +lookup(5424) -> {"Lo","L"}; +lookup(5425) -> {"Lo","L"}; +lookup(5426) -> {"Lo","L"}; +lookup(5427) -> {"Lo","L"}; +lookup(5428) -> {"Lo","L"}; +lookup(5429) -> {"Lo","L"}; +lookup(5430) -> {"Lo","L"}; +lookup(5431) -> {"Lo","L"}; +lookup(5432) -> {"Lo","L"}; +lookup(5433) -> {"Lo","L"}; +lookup(5434) -> {"Lo","L"}; +lookup(5435) -> {"Lo","L"}; +lookup(5436) -> {"Lo","L"}; +lookup(5437) -> {"Lo","L"}; +lookup(5438) -> {"Lo","L"}; +lookup(5439) -> {"Lo","L"}; +lookup(5440) -> {"Lo","L"}; +lookup(5441) -> {"Lo","L"}; +lookup(5442) -> {"Lo","L"}; +lookup(5443) -> {"Lo","L"}; +lookup(5444) -> {"Lo","L"}; +lookup(5445) -> {"Lo","L"}; +lookup(5446) -> {"Lo","L"}; +lookup(5447) -> {"Lo","L"}; +lookup(5448) -> {"Lo","L"}; +lookup(5449) -> {"Lo","L"}; +lookup(5450) -> {"Lo","L"}; +lookup(5451) -> {"Lo","L"}; +lookup(5452) -> {"Lo","L"}; +lookup(5453) -> {"Lo","L"}; +lookup(5454) -> {"Lo","L"}; +lookup(5455) -> {"Lo","L"}; +lookup(5456) -> {"Lo","L"}; +lookup(5457) -> {"Lo","L"}; +lookup(5458) -> {"Lo","L"}; +lookup(5459) -> {"Lo","L"}; +lookup(5460) -> {"Lo","L"}; +lookup(5461) -> {"Lo","L"}; +lookup(5462) -> {"Lo","L"}; +lookup(5463) -> {"Lo","L"}; +lookup(5464) -> {"Lo","L"}; +lookup(5465) -> {"Lo","L"}; +lookup(5466) -> {"Lo","L"}; +lookup(5467) -> {"Lo","L"}; +lookup(5468) -> {"Lo","L"}; +lookup(5469) -> {"Lo","L"}; +lookup(5470) -> {"Lo","L"}; +lookup(5471) -> {"Lo","L"}; +lookup(5472) -> {"Lo","L"}; +lookup(5473) -> {"Lo","L"}; +lookup(5474) -> {"Lo","L"}; +lookup(5475) -> {"Lo","L"}; +lookup(5476) -> {"Lo","L"}; +lookup(5477) -> {"Lo","L"}; +lookup(5478) -> {"Lo","L"}; +lookup(5479) -> {"Lo","L"}; +lookup(5480) -> {"Lo","L"}; +lookup(5481) -> {"Lo","L"}; +lookup(5482) -> {"Lo","L"}; +lookup(5483) -> {"Lo","L"}; +lookup(5484) -> {"Lo","L"}; +lookup(5485) -> {"Lo","L"}; +lookup(5486) -> {"Lo","L"}; +lookup(5487) -> {"Lo","L"}; +lookup(5488) -> {"Lo","L"}; +lookup(5489) -> {"Lo","L"}; +lookup(5490) -> {"Lo","L"}; +lookup(5491) -> {"Lo","L"}; +lookup(5492) -> {"Lo","L"}; +lookup(5493) -> {"Lo","L"}; +lookup(5494) -> {"Lo","L"}; +lookup(5495) -> {"Lo","L"}; +lookup(5496) -> {"Lo","L"}; +lookup(5497) -> {"Lo","L"}; +lookup(5498) -> {"Lo","L"}; +lookup(5499) -> {"Lo","L"}; +lookup(5500) -> {"Lo","L"}; +lookup(5501) -> {"Lo","L"}; +lookup(5502) -> {"Lo","L"}; +lookup(5503) -> {"Lo","L"}; +lookup(5504) -> {"Lo","L"}; +lookup(5505) -> {"Lo","L"}; +lookup(5506) -> {"Lo","L"}; +lookup(5507) -> {"Lo","L"}; +lookup(5508) -> {"Lo","L"}; +lookup(5509) -> {"Lo","L"}; +lookup(5510) -> {"Lo","L"}; +lookup(5511) -> {"Lo","L"}; +lookup(5512) -> {"Lo","L"}; +lookup(5513) -> {"Lo","L"}; +lookup(5514) -> {"Lo","L"}; +lookup(5515) -> {"Lo","L"}; +lookup(5516) -> {"Lo","L"}; +lookup(5517) -> {"Lo","L"}; +lookup(5518) -> {"Lo","L"}; +lookup(5519) -> {"Lo","L"}; +lookup(5520) -> {"Lo","L"}; +lookup(5521) -> {"Lo","L"}; +lookup(5522) -> {"Lo","L"}; +lookup(5523) -> {"Lo","L"}; +lookup(5524) -> {"Lo","L"}; +lookup(5525) -> {"Lo","L"}; +lookup(5526) -> {"Lo","L"}; +lookup(5527) -> {"Lo","L"}; +lookup(5528) -> {"Lo","L"}; +lookup(5529) -> {"Lo","L"}; +lookup(5530) -> {"Lo","L"}; +lookup(5531) -> {"Lo","L"}; +lookup(5532) -> {"Lo","L"}; +lookup(5533) -> {"Lo","L"}; +lookup(5534) -> {"Lo","L"}; +lookup(5535) -> {"Lo","L"}; +lookup(5536) -> {"Lo","L"}; +lookup(5537) -> {"Lo","L"}; +lookup(5538) -> {"Lo","L"}; +lookup(5539) -> {"Lo","L"}; +lookup(5540) -> {"Lo","L"}; +lookup(5541) -> {"Lo","L"}; +lookup(5542) -> {"Lo","L"}; +lookup(5543) -> {"Lo","L"}; +lookup(5544) -> {"Lo","L"}; +lookup(5545) -> {"Lo","L"}; +lookup(5546) -> {"Lo","L"}; +lookup(5547) -> {"Lo","L"}; +lookup(5548) -> {"Lo","L"}; +lookup(5549) -> {"Lo","L"}; +lookup(5550) -> {"Lo","L"}; +lookup(5551) -> {"Lo","L"}; +lookup(5552) -> {"Lo","L"}; +lookup(5553) -> {"Lo","L"}; +lookup(5554) -> {"Lo","L"}; +lookup(5555) -> {"Lo","L"}; +lookup(5556) -> {"Lo","L"}; +lookup(5557) -> {"Lo","L"}; +lookup(5558) -> {"Lo","L"}; +lookup(5559) -> {"Lo","L"}; +lookup(5560) -> {"Lo","L"}; +lookup(5561) -> {"Lo","L"}; +lookup(5562) -> {"Lo","L"}; +lookup(5563) -> {"Lo","L"}; +lookup(5564) -> {"Lo","L"}; +lookup(5565) -> {"Lo","L"}; +lookup(5566) -> {"Lo","L"}; +lookup(5567) -> {"Lo","L"}; +lookup(5568) -> {"Lo","L"}; +lookup(5569) -> {"Lo","L"}; +lookup(5570) -> {"Lo","L"}; +lookup(5571) -> {"Lo","L"}; +lookup(5572) -> {"Lo","L"}; +lookup(5573) -> {"Lo","L"}; +lookup(5574) -> {"Lo","L"}; +lookup(5575) -> {"Lo","L"}; +lookup(5576) -> {"Lo","L"}; +lookup(5577) -> {"Lo","L"}; +lookup(5578) -> {"Lo","L"}; +lookup(5579) -> {"Lo","L"}; +lookup(5580) -> {"Lo","L"}; +lookup(5581) -> {"Lo","L"}; +lookup(5582) -> {"Lo","L"}; +lookup(5583) -> {"Lo","L"}; +lookup(5584) -> {"Lo","L"}; +lookup(5585) -> {"Lo","L"}; +lookup(5586) -> {"Lo","L"}; +lookup(5587) -> {"Lo","L"}; +lookup(5588) -> {"Lo","L"}; +lookup(5589) -> {"Lo","L"}; +lookup(5590) -> {"Lo","L"}; +lookup(5591) -> {"Lo","L"}; +lookup(5592) -> {"Lo","L"}; +lookup(5593) -> {"Lo","L"}; +lookup(5594) -> {"Lo","L"}; +lookup(5595) -> {"Lo","L"}; +lookup(5596) -> {"Lo","L"}; +lookup(5597) -> {"Lo","L"}; +lookup(5598) -> {"Lo","L"}; +lookup(5599) -> {"Lo","L"}; +lookup(5600) -> {"Lo","L"}; +lookup(5601) -> {"Lo","L"}; +lookup(5602) -> {"Lo","L"}; +lookup(5603) -> {"Lo","L"}; +lookup(5604) -> {"Lo","L"}; +lookup(5605) -> {"Lo","L"}; +lookup(5606) -> {"Lo","L"}; +lookup(5607) -> {"Lo","L"}; +lookup(5608) -> {"Lo","L"}; +lookup(5609) -> {"Lo","L"}; +lookup(5610) -> {"Lo","L"}; +lookup(5611) -> {"Lo","L"}; +lookup(5612) -> {"Lo","L"}; +lookup(5613) -> {"Lo","L"}; +lookup(5614) -> {"Lo","L"}; +lookup(5615) -> {"Lo","L"}; +lookup(5616) -> {"Lo","L"}; +lookup(5617) -> {"Lo","L"}; +lookup(5618) -> {"Lo","L"}; +lookup(5619) -> {"Lo","L"}; +lookup(5620) -> {"Lo","L"}; +lookup(5621) -> {"Lo","L"}; +lookup(5622) -> {"Lo","L"}; +lookup(5623) -> {"Lo","L"}; +lookup(5624) -> {"Lo","L"}; +lookup(5625) -> {"Lo","L"}; +lookup(5626) -> {"Lo","L"}; +lookup(5627) -> {"Lo","L"}; +lookup(5628) -> {"Lo","L"}; +lookup(5629) -> {"Lo","L"}; +lookup(5630) -> {"Lo","L"}; +lookup(5631) -> {"Lo","L"}; +lookup(5632) -> {"Lo","L"}; +lookup(5633) -> {"Lo","L"}; +lookup(5634) -> {"Lo","L"}; +lookup(5635) -> {"Lo","L"}; +lookup(5636) -> {"Lo","L"}; +lookup(5637) -> {"Lo","L"}; +lookup(5638) -> {"Lo","L"}; +lookup(5639) -> {"Lo","L"}; +lookup(5640) -> {"Lo","L"}; +lookup(5641) -> {"Lo","L"}; +lookup(5642) -> {"Lo","L"}; +lookup(5643) -> {"Lo","L"}; +lookup(5644) -> {"Lo","L"}; +lookup(5645) -> {"Lo","L"}; +lookup(5646) -> {"Lo","L"}; +lookup(5647) -> {"Lo","L"}; +lookup(5648) -> {"Lo","L"}; +lookup(5649) -> {"Lo","L"}; +lookup(5650) -> {"Lo","L"}; +lookup(5651) -> {"Lo","L"}; +lookup(5652) -> {"Lo","L"}; +lookup(5653) -> {"Lo","L"}; +lookup(5654) -> {"Lo","L"}; +lookup(5655) -> {"Lo","L"}; +lookup(5656) -> {"Lo","L"}; +lookup(5657) -> {"Lo","L"}; +lookup(5658) -> {"Lo","L"}; +lookup(5659) -> {"Lo","L"}; +lookup(5660) -> {"Lo","L"}; +lookup(5661) -> {"Lo","L"}; +lookup(5662) -> {"Lo","L"}; +lookup(5663) -> {"Lo","L"}; +lookup(5664) -> {"Lo","L"}; +lookup(5665) -> {"Lo","L"}; +lookup(5666) -> {"Lo","L"}; +lookup(5667) -> {"Lo","L"}; +lookup(5668) -> {"Lo","L"}; +lookup(5669) -> {"Lo","L"}; +lookup(5670) -> {"Lo","L"}; +lookup(5671) -> {"Lo","L"}; +lookup(5672) -> {"Lo","L"}; +lookup(5673) -> {"Lo","L"}; +lookup(5674) -> {"Lo","L"}; +lookup(5675) -> {"Lo","L"}; +lookup(5676) -> {"Lo","L"}; +lookup(5677) -> {"Lo","L"}; +lookup(5678) -> {"Lo","L"}; +lookup(5679) -> {"Lo","L"}; +lookup(5680) -> {"Lo","L"}; +lookup(5681) -> {"Lo","L"}; +lookup(5682) -> {"Lo","L"}; +lookup(5683) -> {"Lo","L"}; +lookup(5684) -> {"Lo","L"}; +lookup(5685) -> {"Lo","L"}; +lookup(5686) -> {"Lo","L"}; +lookup(5687) -> {"Lo","L"}; +lookup(5688) -> {"Lo","L"}; +lookup(5689) -> {"Lo","L"}; +lookup(5690) -> {"Lo","L"}; +lookup(5691) -> {"Lo","L"}; +lookup(5692) -> {"Lo","L"}; +lookup(5693) -> {"Lo","L"}; +lookup(5694) -> {"Lo","L"}; +lookup(5695) -> {"Lo","L"}; +lookup(5696) -> {"Lo","L"}; +lookup(5697) -> {"Lo","L"}; +lookup(5698) -> {"Lo","L"}; +lookup(5699) -> {"Lo","L"}; +lookup(5700) -> {"Lo","L"}; +lookup(5701) -> {"Lo","L"}; +lookup(5702) -> {"Lo","L"}; +lookup(5703) -> {"Lo","L"}; +lookup(5704) -> {"Lo","L"}; +lookup(5705) -> {"Lo","L"}; +lookup(5706) -> {"Lo","L"}; +lookup(5707) -> {"Lo","L"}; +lookup(5708) -> {"Lo","L"}; +lookup(5709) -> {"Lo","L"}; +lookup(5710) -> {"Lo","L"}; +lookup(5711) -> {"Lo","L"}; +lookup(5712) -> {"Lo","L"}; +lookup(5713) -> {"Lo","L"}; +lookup(5714) -> {"Lo","L"}; +lookup(5715) -> {"Lo","L"}; +lookup(5716) -> {"Lo","L"}; +lookup(5717) -> {"Lo","L"}; +lookup(5718) -> {"Lo","L"}; +lookup(5719) -> {"Lo","L"}; +lookup(5720) -> {"Lo","L"}; +lookup(5721) -> {"Lo","L"}; +lookup(5722) -> {"Lo","L"}; +lookup(5723) -> {"Lo","L"}; +lookup(5724) -> {"Lo","L"}; +lookup(5725) -> {"Lo","L"}; +lookup(5726) -> {"Lo","L"}; +lookup(5727) -> {"Lo","L"}; +lookup(5728) -> {"Lo","L"}; +lookup(5729) -> {"Lo","L"}; +lookup(5730) -> {"Lo","L"}; +lookup(5731) -> {"Lo","L"}; +lookup(5732) -> {"Lo","L"}; +lookup(5733) -> {"Lo","L"}; +lookup(5734) -> {"Lo","L"}; +lookup(5735) -> {"Lo","L"}; +lookup(5736) -> {"Lo","L"}; +lookup(5737) -> {"Lo","L"}; +lookup(5738) -> {"Lo","L"}; +lookup(5739) -> {"Lo","L"}; +lookup(5740) -> {"Lo","L"}; +lookup(5741) -> {"So","L"}; +lookup(5742) -> {"Po","L"}; +lookup(5743) -> {"Lo","L"}; +lookup(5744) -> {"Lo","L"}; +lookup(5745) -> {"Lo","L"}; +lookup(5746) -> {"Lo","L"}; +lookup(5747) -> {"Lo","L"}; +lookup(5748) -> {"Lo","L"}; +lookup(5749) -> {"Lo","L"}; +lookup(5750) -> {"Lo","L"}; +lookup(5751) -> {"Lo","L"}; +lookup(5752) -> {"Lo","L"}; +lookup(5753) -> {"Lo","L"}; +lookup(5754) -> {"Lo","L"}; +lookup(5755) -> {"Lo","L"}; +lookup(5756) -> {"Lo","L"}; +lookup(5757) -> {"Lo","L"}; +lookup(5758) -> {"Lo","L"}; +lookup(5759) -> {"Lo","L"}; +lookup(5760) -> {"Zs","WS"}; +lookup(5761) -> {"Lo","L"}; +lookup(5762) -> {"Lo","L"}; +lookup(5763) -> {"Lo","L"}; +lookup(5764) -> {"Lo","L"}; +lookup(5765) -> {"Lo","L"}; +lookup(5766) -> {"Lo","L"}; +lookup(5767) -> {"Lo","L"}; +lookup(5768) -> {"Lo","L"}; +lookup(5769) -> {"Lo","L"}; +lookup(5770) -> {"Lo","L"}; +lookup(5771) -> {"Lo","L"}; +lookup(5772) -> {"Lo","L"}; +lookup(5773) -> {"Lo","L"}; +lookup(5774) -> {"Lo","L"}; +lookup(5775) -> {"Lo","L"}; +lookup(5776) -> {"Lo","L"}; +lookup(5777) -> {"Lo","L"}; +lookup(5778) -> {"Lo","L"}; +lookup(5779) -> {"Lo","L"}; +lookup(5780) -> {"Lo","L"}; +lookup(5781) -> {"Lo","L"}; +lookup(5782) -> {"Lo","L"}; +lookup(5783) -> {"Lo","L"}; +lookup(5784) -> {"Lo","L"}; +lookup(5785) -> {"Lo","L"}; +lookup(5786) -> {"Lo","L"}; +lookup(5787) -> {"Ps","ON"}; +lookup(5788) -> {"Pe","ON"}; +lookup(5792) -> {"Lo","L"}; +lookup(5793) -> {"Lo","L"}; +lookup(5794) -> {"Lo","L"}; +lookup(5795) -> {"Lo","L"}; +lookup(5796) -> {"Lo","L"}; +lookup(5797) -> {"Lo","L"}; +lookup(5798) -> {"Lo","L"}; +lookup(5799) -> {"Lo","L"}; +lookup(5800) -> {"Lo","L"}; +lookup(5801) -> {"Lo","L"}; +lookup(5802) -> {"Lo","L"}; +lookup(5803) -> {"Lo","L"}; +lookup(5804) -> {"Lo","L"}; +lookup(5805) -> {"Lo","L"}; +lookup(5806) -> {"Lo","L"}; +lookup(5807) -> {"Lo","L"}; +lookup(5808) -> {"Lo","L"}; +lookup(5809) -> {"Lo","L"}; +lookup(5810) -> {"Lo","L"}; +lookup(5811) -> {"Lo","L"}; +lookup(5812) -> {"Lo","L"}; +lookup(5813) -> {"Lo","L"}; +lookup(5814) -> {"Lo","L"}; +lookup(5815) -> {"Lo","L"}; +lookup(5816) -> {"Lo","L"}; +lookup(5817) -> {"Lo","L"}; +lookup(5818) -> {"Lo","L"}; +lookup(5819) -> {"Lo","L"}; +lookup(5820) -> {"Lo","L"}; +lookup(5821) -> {"Lo","L"}; +lookup(5822) -> {"Lo","L"}; +lookup(5823) -> {"Lo","L"}; +lookup(5824) -> {"Lo","L"}; +lookup(5825) -> {"Lo","L"}; +lookup(5826) -> {"Lo","L"}; +lookup(5827) -> {"Lo","L"}; +lookup(5828) -> {"Lo","L"}; +lookup(5829) -> {"Lo","L"}; +lookup(5830) -> {"Lo","L"}; +lookup(5831) -> {"Lo","L"}; +lookup(5832) -> {"Lo","L"}; +lookup(5833) -> {"Lo","L"}; +lookup(5834) -> {"Lo","L"}; +lookup(5835) -> {"Lo","L"}; +lookup(5836) -> {"Lo","L"}; +lookup(5837) -> {"Lo","L"}; +lookup(5838) -> {"Lo","L"}; +lookup(5839) -> {"Lo","L"}; +lookup(5840) -> {"Lo","L"}; +lookup(5841) -> {"Lo","L"}; +lookup(5842) -> {"Lo","L"}; +lookup(5843) -> {"Lo","L"}; +lookup(5844) -> {"Lo","L"}; +lookup(5845) -> {"Lo","L"}; +lookup(5846) -> {"Lo","L"}; +lookup(5847) -> {"Lo","L"}; +lookup(5848) -> {"Lo","L"}; +lookup(5849) -> {"Lo","L"}; +lookup(5850) -> {"Lo","L"}; +lookup(5851) -> {"Lo","L"}; +lookup(5852) -> {"Lo","L"}; +lookup(5853) -> {"Lo","L"}; +lookup(5854) -> {"Lo","L"}; +lookup(5855) -> {"Lo","L"}; +lookup(5856) -> {"Lo","L"}; +lookup(5857) -> {"Lo","L"}; +lookup(5858) -> {"Lo","L"}; +lookup(5859) -> {"Lo","L"}; +lookup(5860) -> {"Lo","L"}; +lookup(5861) -> {"Lo","L"}; +lookup(5862) -> {"Lo","L"}; +lookup(5863) -> {"Lo","L"}; +lookup(5864) -> {"Lo","L"}; +lookup(5865) -> {"Lo","L"}; +lookup(5866) -> {"Lo","L"}; +lookup(5867) -> {"Po","L"}; +lookup(5868) -> {"Po","L"}; +lookup(5869) -> {"Po","L"}; +lookup(5870) -> {"Nl","L"}; +lookup(5871) -> {"Nl","L"}; +lookup(5872) -> {"Nl","L"}; +lookup(5873) -> {"Lo","L"}; +lookup(5874) -> {"Lo","L"}; +lookup(5875) -> {"Lo","L"}; +lookup(5876) -> {"Lo","L"}; +lookup(5877) -> {"Lo","L"}; +lookup(5878) -> {"Lo","L"}; +lookup(5879) -> {"Lo","L"}; +lookup(5880) -> {"Lo","L"}; +lookup(5888) -> {"Lo","L"}; +lookup(5889) -> {"Lo","L"}; +lookup(5890) -> {"Lo","L"}; +lookup(5891) -> {"Lo","L"}; +lookup(5892) -> {"Lo","L"}; +lookup(5893) -> {"Lo","L"}; +lookup(5894) -> {"Lo","L"}; +lookup(5895) -> {"Lo","L"}; +lookup(5896) -> {"Lo","L"}; +lookup(5897) -> {"Lo","L"}; +lookup(5898) -> {"Lo","L"}; +lookup(5899) -> {"Lo","L"}; +lookup(5900) -> {"Lo","L"}; +lookup(5902) -> {"Lo","L"}; +lookup(5903) -> {"Lo","L"}; +lookup(5904) -> {"Lo","L"}; +lookup(5905) -> {"Lo","L"}; +lookup(5906) -> {"Mn","NSM"}; +lookup(5907) -> {"Mn","NSM"}; +lookup(5908) -> {"Mn","NSM"}; +lookup(5920) -> {"Lo","L"}; +lookup(5921) -> {"Lo","L"}; +lookup(5922) -> {"Lo","L"}; +lookup(5923) -> {"Lo","L"}; +lookup(5924) -> {"Lo","L"}; +lookup(5925) -> {"Lo","L"}; +lookup(5926) -> {"Lo","L"}; +lookup(5927) -> {"Lo","L"}; +lookup(5928) -> {"Lo","L"}; +lookup(5929) -> {"Lo","L"}; +lookup(5930) -> {"Lo","L"}; +lookup(5931) -> {"Lo","L"}; +lookup(5932) -> {"Lo","L"}; +lookup(5933) -> {"Lo","L"}; +lookup(5934) -> {"Lo","L"}; +lookup(5935) -> {"Lo","L"}; +lookup(5936) -> {"Lo","L"}; +lookup(5937) -> {"Lo","L"}; +lookup(5938) -> {"Mn","NSM"}; +lookup(5939) -> {"Mn","NSM"}; +lookup(5940) -> {"Mn","NSM"}; +lookup(5941) -> {"Po","L"}; +lookup(5942) -> {"Po","L"}; +lookup(5952) -> {"Lo","L"}; +lookup(5953) -> {"Lo","L"}; +lookup(5954) -> {"Lo","L"}; +lookup(5955) -> {"Lo","L"}; +lookup(5956) -> {"Lo","L"}; +lookup(5957) -> {"Lo","L"}; +lookup(5958) -> {"Lo","L"}; +lookup(5959) -> {"Lo","L"}; +lookup(5960) -> {"Lo","L"}; +lookup(5961) -> {"Lo","L"}; +lookup(5962) -> {"Lo","L"}; +lookup(5963) -> {"Lo","L"}; +lookup(5964) -> {"Lo","L"}; +lookup(5965) -> {"Lo","L"}; +lookup(5966) -> {"Lo","L"}; +lookup(5967) -> {"Lo","L"}; +lookup(5968) -> {"Lo","L"}; +lookup(5969) -> {"Lo","L"}; +lookup(5970) -> {"Mn","NSM"}; +lookup(5971) -> {"Mn","NSM"}; +lookup(5984) -> {"Lo","L"}; +lookup(5985) -> {"Lo","L"}; +lookup(5986) -> {"Lo","L"}; +lookup(5987) -> {"Lo","L"}; +lookup(5988) -> {"Lo","L"}; +lookup(5989) -> {"Lo","L"}; +lookup(5990) -> {"Lo","L"}; +lookup(5991) -> {"Lo","L"}; +lookup(5992) -> {"Lo","L"}; +lookup(5993) -> {"Lo","L"}; +lookup(5994) -> {"Lo","L"}; +lookup(5995) -> {"Lo","L"}; +lookup(5996) -> {"Lo","L"}; +lookup(5998) -> {"Lo","L"}; +lookup(5999) -> {"Lo","L"}; +lookup(6000) -> {"Lo","L"}; +lookup(6002) -> {"Mn","NSM"}; +lookup(6003) -> {"Mn","NSM"}; +lookup(6016) -> {"Lo","L"}; +lookup(6017) -> {"Lo","L"}; +lookup(6018) -> {"Lo","L"}; +lookup(6019) -> {"Lo","L"}; +lookup(6020) -> {"Lo","L"}; +lookup(6021) -> {"Lo","L"}; +lookup(6022) -> {"Lo","L"}; +lookup(6023) -> {"Lo","L"}; +lookup(6024) -> {"Lo","L"}; +lookup(6025) -> {"Lo","L"}; +lookup(6026) -> {"Lo","L"}; +lookup(6027) -> {"Lo","L"}; +lookup(6028) -> {"Lo","L"}; +lookup(6029) -> {"Lo","L"}; +lookup(6030) -> {"Lo","L"}; +lookup(6031) -> {"Lo","L"}; +lookup(6032) -> {"Lo","L"}; +lookup(6033) -> {"Lo","L"}; +lookup(6034) -> {"Lo","L"}; +lookup(6035) -> {"Lo","L"}; +lookup(6036) -> {"Lo","L"}; +lookup(6037) -> {"Lo","L"}; +lookup(6038) -> {"Lo","L"}; +lookup(6039) -> {"Lo","L"}; +lookup(6040) -> {"Lo","L"}; +lookup(6041) -> {"Lo","L"}; +lookup(6042) -> {"Lo","L"}; +lookup(6043) -> {"Lo","L"}; +lookup(6044) -> {"Lo","L"}; +lookup(6045) -> {"Lo","L"}; +lookup(6046) -> {"Lo","L"}; +lookup(6047) -> {"Lo","L"}; +lookup(6048) -> {"Lo","L"}; +lookup(6049) -> {"Lo","L"}; +lookup(6050) -> {"Lo","L"}; +lookup(6051) -> {"Lo","L"}; +lookup(6052) -> {"Lo","L"}; +lookup(6053) -> {"Lo","L"}; +lookup(6054) -> {"Lo","L"}; +lookup(6055) -> {"Lo","L"}; +lookup(6056) -> {"Lo","L"}; +lookup(6057) -> {"Lo","L"}; +lookup(6058) -> {"Lo","L"}; +lookup(6059) -> {"Lo","L"}; +lookup(6060) -> {"Lo","L"}; +lookup(6061) -> {"Lo","L"}; +lookup(6062) -> {"Lo","L"}; +lookup(6063) -> {"Lo","L"}; +lookup(6064) -> {"Lo","L"}; +lookup(6065) -> {"Lo","L"}; +lookup(6066) -> {"Lo","L"}; +lookup(6067) -> {"Lo","L"}; +lookup(6068) -> {"Mn","NSM"}; +lookup(6069) -> {"Mn","NSM"}; +lookup(6070) -> {"Mc","L"}; +lookup(6071) -> {"Mn","NSM"}; +lookup(6072) -> {"Mn","NSM"}; +lookup(6073) -> {"Mn","NSM"}; +lookup(6074) -> {"Mn","NSM"}; +lookup(6075) -> {"Mn","NSM"}; +lookup(6076) -> {"Mn","NSM"}; +lookup(6077) -> {"Mn","NSM"}; +lookup(6078) -> {"Mc","L"}; +lookup(6079) -> {"Mc","L"}; +lookup(6080) -> {"Mc","L"}; +lookup(6081) -> {"Mc","L"}; +lookup(6082) -> {"Mc","L"}; +lookup(6083) -> {"Mc","L"}; +lookup(6084) -> {"Mc","L"}; +lookup(6085) -> {"Mc","L"}; +lookup(6086) -> {"Mn","NSM"}; +lookup(6087) -> {"Mc","L"}; +lookup(6088) -> {"Mc","L"}; +lookup(6089) -> {"Mn","NSM"}; +lookup(6090) -> {"Mn","NSM"}; +lookup(6091) -> {"Mn","NSM"}; +lookup(6092) -> {"Mn","NSM"}; +lookup(6093) -> {"Mn","NSM"}; +lookup(6094) -> {"Mn","NSM"}; +lookup(6095) -> {"Mn","NSM"}; +lookup(6096) -> {"Mn","NSM"}; +lookup(6097) -> {"Mn","NSM"}; +lookup(6098) -> {"Mn","NSM"}; +lookup(6099) -> {"Mn","NSM"}; +lookup(6100) -> {"Po","L"}; +lookup(6101) -> {"Po","L"}; +lookup(6102) -> {"Po","L"}; +lookup(6103) -> {"Lm","L"}; +lookup(6104) -> {"Po","L"}; +lookup(6105) -> {"Po","L"}; +lookup(6106) -> {"Po","L"}; +lookup(6107) -> {"Sc","ET"}; +lookup(6108) -> {"Lo","L"}; +lookup(6109) -> {"Mn","NSM"}; +lookup(6112) -> {"Nd","L"}; +lookup(6113) -> {"Nd","L"}; +lookup(6114) -> {"Nd","L"}; +lookup(6115) -> {"Nd","L"}; +lookup(6116) -> {"Nd","L"}; +lookup(6117) -> {"Nd","L"}; +lookup(6118) -> {"Nd","L"}; +lookup(6119) -> {"Nd","L"}; +lookup(6120) -> {"Nd","L"}; +lookup(6121) -> {"Nd","L"}; +lookup(6128) -> {"No","ON"}; +lookup(6129) -> {"No","ON"}; +lookup(6130) -> {"No","ON"}; +lookup(6131) -> {"No","ON"}; +lookup(6132) -> {"No","ON"}; +lookup(6133) -> {"No","ON"}; +lookup(6134) -> {"No","ON"}; +lookup(6135) -> {"No","ON"}; +lookup(6136) -> {"No","ON"}; +lookup(6137) -> {"No","ON"}; +lookup(6144) -> {"Po","ON"}; +lookup(6145) -> {"Po","ON"}; +lookup(6146) -> {"Po","ON"}; +lookup(6147) -> {"Po","ON"}; +lookup(6148) -> {"Po","ON"}; +lookup(6149) -> {"Po","ON"}; +lookup(6150) -> {"Pd","ON"}; +lookup(6151) -> {"Po","ON"}; +lookup(6152) -> {"Po","ON"}; +lookup(6153) -> {"Po","ON"}; +lookup(6154) -> {"Po","ON"}; +lookup(6155) -> {"Mn","NSM"}; +lookup(6156) -> {"Mn","NSM"}; +lookup(6157) -> {"Mn","NSM"}; +lookup(6158) -> {"Cf","BN"}; +lookup(6160) -> {"Nd","L"}; +lookup(6161) -> {"Nd","L"}; +lookup(6162) -> {"Nd","L"}; +lookup(6163) -> {"Nd","L"}; +lookup(6164) -> {"Nd","L"}; +lookup(6165) -> {"Nd","L"}; +lookup(6166) -> {"Nd","L"}; +lookup(6167) -> {"Nd","L"}; +lookup(6168) -> {"Nd","L"}; +lookup(6169) -> {"Nd","L"}; +lookup(6176) -> {"Lo","L"}; +lookup(6177) -> {"Lo","L"}; +lookup(6178) -> {"Lo","L"}; +lookup(6179) -> {"Lo","L"}; +lookup(6180) -> {"Lo","L"}; +lookup(6181) -> {"Lo","L"}; +lookup(6182) -> {"Lo","L"}; +lookup(6183) -> {"Lo","L"}; +lookup(6184) -> {"Lo","L"}; +lookup(6185) -> {"Lo","L"}; +lookup(6186) -> {"Lo","L"}; +lookup(6187) -> {"Lo","L"}; +lookup(6188) -> {"Lo","L"}; +lookup(6189) -> {"Lo","L"}; +lookup(6190) -> {"Lo","L"}; +lookup(6191) -> {"Lo","L"}; +lookup(6192) -> {"Lo","L"}; +lookup(6193) -> {"Lo","L"}; +lookup(6194) -> {"Lo","L"}; +lookup(6195) -> {"Lo","L"}; +lookup(6196) -> {"Lo","L"}; +lookup(6197) -> {"Lo","L"}; +lookup(6198) -> {"Lo","L"}; +lookup(6199) -> {"Lo","L"}; +lookup(6200) -> {"Lo","L"}; +lookup(6201) -> {"Lo","L"}; +lookup(6202) -> {"Lo","L"}; +lookup(6203) -> {"Lo","L"}; +lookup(6204) -> {"Lo","L"}; +lookup(6205) -> {"Lo","L"}; +lookup(6206) -> {"Lo","L"}; +lookup(6207) -> {"Lo","L"}; +lookup(6208) -> {"Lo","L"}; +lookup(6209) -> {"Lo","L"}; +lookup(6210) -> {"Lo","L"}; +lookup(6211) -> {"Lm","L"}; +lookup(6212) -> {"Lo","L"}; +lookup(6213) -> {"Lo","L"}; +lookup(6214) -> {"Lo","L"}; +lookup(6215) -> {"Lo","L"}; +lookup(6216) -> {"Lo","L"}; +lookup(6217) -> {"Lo","L"}; +lookup(6218) -> {"Lo","L"}; +lookup(6219) -> {"Lo","L"}; +lookup(6220) -> {"Lo","L"}; +lookup(6221) -> {"Lo","L"}; +lookup(6222) -> {"Lo","L"}; +lookup(6223) -> {"Lo","L"}; +lookup(6224) -> {"Lo","L"}; +lookup(6225) -> {"Lo","L"}; +lookup(6226) -> {"Lo","L"}; +lookup(6227) -> {"Lo","L"}; +lookup(6228) -> {"Lo","L"}; +lookup(6229) -> {"Lo","L"}; +lookup(6230) -> {"Lo","L"}; +lookup(6231) -> {"Lo","L"}; +lookup(6232) -> {"Lo","L"}; +lookup(6233) -> {"Lo","L"}; +lookup(6234) -> {"Lo","L"}; +lookup(6235) -> {"Lo","L"}; +lookup(6236) -> {"Lo","L"}; +lookup(6237) -> {"Lo","L"}; +lookup(6238) -> {"Lo","L"}; +lookup(6239) -> {"Lo","L"}; +lookup(6240) -> {"Lo","L"}; +lookup(6241) -> {"Lo","L"}; +lookup(6242) -> {"Lo","L"}; +lookup(6243) -> {"Lo","L"}; +lookup(6244) -> {"Lo","L"}; +lookup(6245) -> {"Lo","L"}; +lookup(6246) -> {"Lo","L"}; +lookup(6247) -> {"Lo","L"}; +lookup(6248) -> {"Lo","L"}; +lookup(6249) -> {"Lo","L"}; +lookup(6250) -> {"Lo","L"}; +lookup(6251) -> {"Lo","L"}; +lookup(6252) -> {"Lo","L"}; +lookup(6253) -> {"Lo","L"}; +lookup(6254) -> {"Lo","L"}; +lookup(6255) -> {"Lo","L"}; +lookup(6256) -> {"Lo","L"}; +lookup(6257) -> {"Lo","L"}; +lookup(6258) -> {"Lo","L"}; +lookup(6259) -> {"Lo","L"}; +lookup(6260) -> {"Lo","L"}; +lookup(6261) -> {"Lo","L"}; +lookup(6262) -> {"Lo","L"}; +lookup(6263) -> {"Lo","L"}; +lookup(6264) -> {"Lo","L"}; +lookup(6272) -> {"Lo","L"}; +lookup(6273) -> {"Lo","L"}; +lookup(6274) -> {"Lo","L"}; +lookup(6275) -> {"Lo","L"}; +lookup(6276) -> {"Lo","L"}; +lookup(6277) -> {"Mn","NSM"}; +lookup(6278) -> {"Mn","NSM"}; +lookup(6279) -> {"Lo","L"}; +lookup(6280) -> {"Lo","L"}; +lookup(6281) -> {"Lo","L"}; +lookup(6282) -> {"Lo","L"}; +lookup(6283) -> {"Lo","L"}; +lookup(6284) -> {"Lo","L"}; +lookup(6285) -> {"Lo","L"}; +lookup(6286) -> {"Lo","L"}; +lookup(6287) -> {"Lo","L"}; +lookup(6288) -> {"Lo","L"}; +lookup(6289) -> {"Lo","L"}; +lookup(6290) -> {"Lo","L"}; +lookup(6291) -> {"Lo","L"}; +lookup(6292) -> {"Lo","L"}; +lookup(6293) -> {"Lo","L"}; +lookup(6294) -> {"Lo","L"}; +lookup(6295) -> {"Lo","L"}; +lookup(6296) -> {"Lo","L"}; +lookup(6297) -> {"Lo","L"}; +lookup(6298) -> {"Lo","L"}; +lookup(6299) -> {"Lo","L"}; +lookup(6300) -> {"Lo","L"}; +lookup(6301) -> {"Lo","L"}; +lookup(6302) -> {"Lo","L"}; +lookup(6303) -> {"Lo","L"}; +lookup(6304) -> {"Lo","L"}; +lookup(6305) -> {"Lo","L"}; +lookup(6306) -> {"Lo","L"}; +lookup(6307) -> {"Lo","L"}; +lookup(6308) -> {"Lo","L"}; +lookup(6309) -> {"Lo","L"}; +lookup(6310) -> {"Lo","L"}; +lookup(6311) -> {"Lo","L"}; +lookup(6312) -> {"Lo","L"}; +lookup(6313) -> {"Mn","NSM"}; +lookup(6314) -> {"Lo","L"}; +lookup(6320) -> {"Lo","L"}; +lookup(6321) -> {"Lo","L"}; +lookup(6322) -> {"Lo","L"}; +lookup(6323) -> {"Lo","L"}; +lookup(6324) -> {"Lo","L"}; +lookup(6325) -> {"Lo","L"}; +lookup(6326) -> {"Lo","L"}; +lookup(6327) -> {"Lo","L"}; +lookup(6328) -> {"Lo","L"}; +lookup(6329) -> {"Lo","L"}; +lookup(6330) -> {"Lo","L"}; +lookup(6331) -> {"Lo","L"}; +lookup(6332) -> {"Lo","L"}; +lookup(6333) -> {"Lo","L"}; +lookup(6334) -> {"Lo","L"}; +lookup(6335) -> {"Lo","L"}; +lookup(6336) -> {"Lo","L"}; +lookup(6337) -> {"Lo","L"}; +lookup(6338) -> {"Lo","L"}; +lookup(6339) -> {"Lo","L"}; +lookup(6340) -> {"Lo","L"}; +lookup(6341) -> {"Lo","L"}; +lookup(6342) -> {"Lo","L"}; +lookup(6343) -> {"Lo","L"}; +lookup(6344) -> {"Lo","L"}; +lookup(6345) -> {"Lo","L"}; +lookup(6346) -> {"Lo","L"}; +lookup(6347) -> {"Lo","L"}; +lookup(6348) -> {"Lo","L"}; +lookup(6349) -> {"Lo","L"}; +lookup(6350) -> {"Lo","L"}; +lookup(6351) -> {"Lo","L"}; +lookup(6352) -> {"Lo","L"}; +lookup(6353) -> {"Lo","L"}; +lookup(6354) -> {"Lo","L"}; +lookup(6355) -> {"Lo","L"}; +lookup(6356) -> {"Lo","L"}; +lookup(6357) -> {"Lo","L"}; +lookup(6358) -> {"Lo","L"}; +lookup(6359) -> {"Lo","L"}; +lookup(6360) -> {"Lo","L"}; +lookup(6361) -> {"Lo","L"}; +lookup(6362) -> {"Lo","L"}; +lookup(6363) -> {"Lo","L"}; +lookup(6364) -> {"Lo","L"}; +lookup(6365) -> {"Lo","L"}; +lookup(6366) -> {"Lo","L"}; +lookup(6367) -> {"Lo","L"}; +lookup(6368) -> {"Lo","L"}; +lookup(6369) -> {"Lo","L"}; +lookup(6370) -> {"Lo","L"}; +lookup(6371) -> {"Lo","L"}; +lookup(6372) -> {"Lo","L"}; +lookup(6373) -> {"Lo","L"}; +lookup(6374) -> {"Lo","L"}; +lookup(6375) -> {"Lo","L"}; +lookup(6376) -> {"Lo","L"}; +lookup(6377) -> {"Lo","L"}; +lookup(6378) -> {"Lo","L"}; +lookup(6379) -> {"Lo","L"}; +lookup(6380) -> {"Lo","L"}; +lookup(6381) -> {"Lo","L"}; +lookup(6382) -> {"Lo","L"}; +lookup(6383) -> {"Lo","L"}; +lookup(6384) -> {"Lo","L"}; +lookup(6385) -> {"Lo","L"}; +lookup(6386) -> {"Lo","L"}; +lookup(6387) -> {"Lo","L"}; +lookup(6388) -> {"Lo","L"}; +lookup(6389) -> {"Lo","L"}; +lookup(6400) -> {"Lo","L"}; +lookup(6401) -> {"Lo","L"}; +lookup(6402) -> {"Lo","L"}; +lookup(6403) -> {"Lo","L"}; +lookup(6404) -> {"Lo","L"}; +lookup(6405) -> {"Lo","L"}; +lookup(6406) -> {"Lo","L"}; +lookup(6407) -> {"Lo","L"}; +lookup(6408) -> {"Lo","L"}; +lookup(6409) -> {"Lo","L"}; +lookup(6410) -> {"Lo","L"}; +lookup(6411) -> {"Lo","L"}; +lookup(6412) -> {"Lo","L"}; +lookup(6413) -> {"Lo","L"}; +lookup(6414) -> {"Lo","L"}; +lookup(6415) -> {"Lo","L"}; +lookup(6416) -> {"Lo","L"}; +lookup(6417) -> {"Lo","L"}; +lookup(6418) -> {"Lo","L"}; +lookup(6419) -> {"Lo","L"}; +lookup(6420) -> {"Lo","L"}; +lookup(6421) -> {"Lo","L"}; +lookup(6422) -> {"Lo","L"}; +lookup(6423) -> {"Lo","L"}; +lookup(6424) -> {"Lo","L"}; +lookup(6425) -> {"Lo","L"}; +lookup(6426) -> {"Lo","L"}; +lookup(6427) -> {"Lo","L"}; +lookup(6428) -> {"Lo","L"}; +lookup(6429) -> {"Lo","L"}; +lookup(6430) -> {"Lo","L"}; +lookup(6432) -> {"Mn","NSM"}; +lookup(6433) -> {"Mn","NSM"}; +lookup(6434) -> {"Mn","NSM"}; +lookup(6435) -> {"Mc","L"}; +lookup(6436) -> {"Mc","L"}; +lookup(6437) -> {"Mc","L"}; +lookup(6438) -> {"Mc","L"}; +lookup(6439) -> {"Mn","NSM"}; +lookup(6440) -> {"Mn","NSM"}; +lookup(6441) -> {"Mc","L"}; +lookup(6442) -> {"Mc","L"}; +lookup(6443) -> {"Mc","L"}; +lookup(6448) -> {"Mc","L"}; +lookup(6449) -> {"Mc","L"}; +lookup(6450) -> {"Mn","NSM"}; +lookup(6451) -> {"Mc","L"}; +lookup(6452) -> {"Mc","L"}; +lookup(6453) -> {"Mc","L"}; +lookup(6454) -> {"Mc","L"}; +lookup(6455) -> {"Mc","L"}; +lookup(6456) -> {"Mc","L"}; +lookup(6457) -> {"Mn","NSM"}; +lookup(6458) -> {"Mn","NSM"}; +lookup(6459) -> {"Mn","NSM"}; +lookup(6464) -> {"So","ON"}; +lookup(6468) -> {"Po","ON"}; +lookup(6469) -> {"Po","ON"}; +lookup(6470) -> {"Nd","L"}; +lookup(6471) -> {"Nd","L"}; +lookup(6472) -> {"Nd","L"}; +lookup(6473) -> {"Nd","L"}; +lookup(6474) -> {"Nd","L"}; +lookup(6475) -> {"Nd","L"}; +lookup(6476) -> {"Nd","L"}; +lookup(6477) -> {"Nd","L"}; +lookup(6478) -> {"Nd","L"}; +lookup(6479) -> {"Nd","L"}; +lookup(6480) -> {"Lo","L"}; +lookup(6481) -> {"Lo","L"}; +lookup(6482) -> {"Lo","L"}; +lookup(6483) -> {"Lo","L"}; +lookup(6484) -> {"Lo","L"}; +lookup(6485) -> {"Lo","L"}; +lookup(6486) -> {"Lo","L"}; +lookup(6487) -> {"Lo","L"}; +lookup(6488) -> {"Lo","L"}; +lookup(6489) -> {"Lo","L"}; +lookup(6490) -> {"Lo","L"}; +lookup(6491) -> {"Lo","L"}; +lookup(6492) -> {"Lo","L"}; +lookup(6493) -> {"Lo","L"}; +lookup(6494) -> {"Lo","L"}; +lookup(6495) -> {"Lo","L"}; +lookup(6496) -> {"Lo","L"}; +lookup(6497) -> {"Lo","L"}; +lookup(6498) -> {"Lo","L"}; +lookup(6499) -> {"Lo","L"}; +lookup(6500) -> {"Lo","L"}; +lookup(6501) -> {"Lo","L"}; +lookup(6502) -> {"Lo","L"}; +lookup(6503) -> {"Lo","L"}; +lookup(6504) -> {"Lo","L"}; +lookup(6505) -> {"Lo","L"}; +lookup(6506) -> {"Lo","L"}; +lookup(6507) -> {"Lo","L"}; +lookup(6508) -> {"Lo","L"}; +lookup(6509) -> {"Lo","L"}; +lookup(6512) -> {"Lo","L"}; +lookup(6513) -> {"Lo","L"}; +lookup(6514) -> {"Lo","L"}; +lookup(6515) -> {"Lo","L"}; +lookup(6516) -> {"Lo","L"}; +lookup(6528) -> {"Lo","L"}; +lookup(6529) -> {"Lo","L"}; +lookup(6530) -> {"Lo","L"}; +lookup(6531) -> {"Lo","L"}; +lookup(6532) -> {"Lo","L"}; +lookup(6533) -> {"Lo","L"}; +lookup(6534) -> {"Lo","L"}; +lookup(6535) -> {"Lo","L"}; +lookup(6536) -> {"Lo","L"}; +lookup(6537) -> {"Lo","L"}; +lookup(6538) -> {"Lo","L"}; +lookup(6539) -> {"Lo","L"}; +lookup(6540) -> {"Lo","L"}; +lookup(6541) -> {"Lo","L"}; +lookup(6542) -> {"Lo","L"}; +lookup(6543) -> {"Lo","L"}; +lookup(6544) -> {"Lo","L"}; +lookup(6545) -> {"Lo","L"}; +lookup(6546) -> {"Lo","L"}; +lookup(6547) -> {"Lo","L"}; +lookup(6548) -> {"Lo","L"}; +lookup(6549) -> {"Lo","L"}; +lookup(6550) -> {"Lo","L"}; +lookup(6551) -> {"Lo","L"}; +lookup(6552) -> {"Lo","L"}; +lookup(6553) -> {"Lo","L"}; +lookup(6554) -> {"Lo","L"}; +lookup(6555) -> {"Lo","L"}; +lookup(6556) -> {"Lo","L"}; +lookup(6557) -> {"Lo","L"}; +lookup(6558) -> {"Lo","L"}; +lookup(6559) -> {"Lo","L"}; +lookup(6560) -> {"Lo","L"}; +lookup(6561) -> {"Lo","L"}; +lookup(6562) -> {"Lo","L"}; +lookup(6563) -> {"Lo","L"}; +lookup(6564) -> {"Lo","L"}; +lookup(6565) -> {"Lo","L"}; +lookup(6566) -> {"Lo","L"}; +lookup(6567) -> {"Lo","L"}; +lookup(6568) -> {"Lo","L"}; +lookup(6569) -> {"Lo","L"}; +lookup(6570) -> {"Lo","L"}; +lookup(6571) -> {"Lo","L"}; +lookup(6576) -> {"Lo","L"}; +lookup(6577) -> {"Lo","L"}; +lookup(6578) -> {"Lo","L"}; +lookup(6579) -> {"Lo","L"}; +lookup(6580) -> {"Lo","L"}; +lookup(6581) -> {"Lo","L"}; +lookup(6582) -> {"Lo","L"}; +lookup(6583) -> {"Lo","L"}; +lookup(6584) -> {"Lo","L"}; +lookup(6585) -> {"Lo","L"}; +lookup(6586) -> {"Lo","L"}; +lookup(6587) -> {"Lo","L"}; +lookup(6588) -> {"Lo","L"}; +lookup(6589) -> {"Lo","L"}; +lookup(6590) -> {"Lo","L"}; +lookup(6591) -> {"Lo","L"}; +lookup(6592) -> {"Lo","L"}; +lookup(6593) -> {"Lo","L"}; +lookup(6594) -> {"Lo","L"}; +lookup(6595) -> {"Lo","L"}; +lookup(6596) -> {"Lo","L"}; +lookup(6597) -> {"Lo","L"}; +lookup(6598) -> {"Lo","L"}; +lookup(6599) -> {"Lo","L"}; +lookup(6600) -> {"Lo","L"}; +lookup(6601) -> {"Lo","L"}; +lookup(6608) -> {"Nd","L"}; +lookup(6609) -> {"Nd","L"}; +lookup(6610) -> {"Nd","L"}; +lookup(6611) -> {"Nd","L"}; +lookup(6612) -> {"Nd","L"}; +lookup(6613) -> {"Nd","L"}; +lookup(6614) -> {"Nd","L"}; +lookup(6615) -> {"Nd","L"}; +lookup(6616) -> {"Nd","L"}; +lookup(6617) -> {"Nd","L"}; +lookup(6618) -> {"No","L"}; +lookup(6622) -> {"So","ON"}; +lookup(6623) -> {"So","ON"}; +lookup(6624) -> {"So","ON"}; +lookup(6625) -> {"So","ON"}; +lookup(6626) -> {"So","ON"}; +lookup(6627) -> {"So","ON"}; +lookup(6628) -> {"So","ON"}; +lookup(6629) -> {"So","ON"}; +lookup(6630) -> {"So","ON"}; +lookup(6631) -> {"So","ON"}; +lookup(6632) -> {"So","ON"}; +lookup(6633) -> {"So","ON"}; +lookup(6634) -> {"So","ON"}; +lookup(6635) -> {"So","ON"}; +lookup(6636) -> {"So","ON"}; +lookup(6637) -> {"So","ON"}; +lookup(6638) -> {"So","ON"}; +lookup(6639) -> {"So","ON"}; +lookup(6640) -> {"So","ON"}; +lookup(6641) -> {"So","ON"}; +lookup(6642) -> {"So","ON"}; +lookup(6643) -> {"So","ON"}; +lookup(6644) -> {"So","ON"}; +lookup(6645) -> {"So","ON"}; +lookup(6646) -> {"So","ON"}; +lookup(6647) -> {"So","ON"}; +lookup(6648) -> {"So","ON"}; +lookup(6649) -> {"So","ON"}; +lookup(6650) -> {"So","ON"}; +lookup(6651) -> {"So","ON"}; +lookup(6652) -> {"So","ON"}; +lookup(6653) -> {"So","ON"}; +lookup(6654) -> {"So","ON"}; +lookup(6655) -> {"So","ON"}; +lookup(6656) -> {"Lo","L"}; +lookup(6657) -> {"Lo","L"}; +lookup(6658) -> {"Lo","L"}; +lookup(6659) -> {"Lo","L"}; +lookup(6660) -> {"Lo","L"}; +lookup(6661) -> {"Lo","L"}; +lookup(6662) -> {"Lo","L"}; +lookup(6663) -> {"Lo","L"}; +lookup(6664) -> {"Lo","L"}; +lookup(6665) -> {"Lo","L"}; +lookup(6666) -> {"Lo","L"}; +lookup(6667) -> {"Lo","L"}; +lookup(6668) -> {"Lo","L"}; +lookup(6669) -> {"Lo","L"}; +lookup(6670) -> {"Lo","L"}; +lookup(6671) -> {"Lo","L"}; +lookup(6672) -> {"Lo","L"}; +lookup(6673) -> {"Lo","L"}; +lookup(6674) -> {"Lo","L"}; +lookup(6675) -> {"Lo","L"}; +lookup(6676) -> {"Lo","L"}; +lookup(6677) -> {"Lo","L"}; +lookup(6678) -> {"Lo","L"}; +lookup(6679) -> {"Mn","NSM"}; +lookup(6680) -> {"Mn","NSM"}; +lookup(6681) -> {"Mc","L"}; +lookup(6682) -> {"Mc","L"}; +lookup(6683) -> {"Mn","NSM"}; +lookup(6686) -> {"Po","L"}; +lookup(6687) -> {"Po","L"}; +lookup(6688) -> {"Lo","L"}; +lookup(6689) -> {"Lo","L"}; +lookup(6690) -> {"Lo","L"}; +lookup(6691) -> {"Lo","L"}; +lookup(6692) -> {"Lo","L"}; +lookup(6693) -> {"Lo","L"}; +lookup(6694) -> {"Lo","L"}; +lookup(6695) -> {"Lo","L"}; +lookup(6696) -> {"Lo","L"}; +lookup(6697) -> {"Lo","L"}; +lookup(6698) -> {"Lo","L"}; +lookup(6699) -> {"Lo","L"}; +lookup(6700) -> {"Lo","L"}; +lookup(6701) -> {"Lo","L"}; +lookup(6702) -> {"Lo","L"}; +lookup(6703) -> {"Lo","L"}; +lookup(6704) -> {"Lo","L"}; +lookup(6705) -> {"Lo","L"}; +lookup(6706) -> {"Lo","L"}; +lookup(6707) -> {"Lo","L"}; +lookup(6708) -> {"Lo","L"}; +lookup(6709) -> {"Lo","L"}; +lookup(6710) -> {"Lo","L"}; +lookup(6711) -> {"Lo","L"}; +lookup(6712) -> {"Lo","L"}; +lookup(6713) -> {"Lo","L"}; +lookup(6714) -> {"Lo","L"}; +lookup(6715) -> {"Lo","L"}; +lookup(6716) -> {"Lo","L"}; +lookup(6717) -> {"Lo","L"}; +lookup(6718) -> {"Lo","L"}; +lookup(6719) -> {"Lo","L"}; +lookup(6720) -> {"Lo","L"}; +lookup(6721) -> {"Lo","L"}; +lookup(6722) -> {"Lo","L"}; +lookup(6723) -> {"Lo","L"}; +lookup(6724) -> {"Lo","L"}; +lookup(6725) -> {"Lo","L"}; +lookup(6726) -> {"Lo","L"}; +lookup(6727) -> {"Lo","L"}; +lookup(6728) -> {"Lo","L"}; +lookup(6729) -> {"Lo","L"}; +lookup(6730) -> {"Lo","L"}; +lookup(6731) -> {"Lo","L"}; +lookup(6732) -> {"Lo","L"}; +lookup(6733) -> {"Lo","L"}; +lookup(6734) -> {"Lo","L"}; +lookup(6735) -> {"Lo","L"}; +lookup(6736) -> {"Lo","L"}; +lookup(6737) -> {"Lo","L"}; +lookup(6738) -> {"Lo","L"}; +lookup(6739) -> {"Lo","L"}; +lookup(6740) -> {"Lo","L"}; +lookup(6741) -> {"Mc","L"}; +lookup(6742) -> {"Mn","NSM"}; +lookup(6743) -> {"Mc","L"}; +lookup(6744) -> {"Mn","NSM"}; +lookup(6745) -> {"Mn","NSM"}; +lookup(6746) -> {"Mn","NSM"}; +lookup(6747) -> {"Mn","NSM"}; +lookup(6748) -> {"Mn","NSM"}; +lookup(6749) -> {"Mn","NSM"}; +lookup(6750) -> {"Mn","NSM"}; +lookup(6752) -> {"Mn","NSM"}; +lookup(6753) -> {"Mc","L"}; +lookup(6754) -> {"Mn","NSM"}; +lookup(6755) -> {"Mc","L"}; +lookup(6756) -> {"Mc","L"}; +lookup(6757) -> {"Mn","NSM"}; +lookup(6758) -> {"Mn","NSM"}; +lookup(6759) -> {"Mn","NSM"}; +lookup(6760) -> {"Mn","NSM"}; +lookup(6761) -> {"Mn","NSM"}; +lookup(6762) -> {"Mn","NSM"}; +lookup(6763) -> {"Mn","NSM"}; +lookup(6764) -> {"Mn","NSM"}; +lookup(6765) -> {"Mc","L"}; +lookup(6766) -> {"Mc","L"}; +lookup(6767) -> {"Mc","L"}; +lookup(6768) -> {"Mc","L"}; +lookup(6769) -> {"Mc","L"}; +lookup(6770) -> {"Mc","L"}; +lookup(6771) -> {"Mn","NSM"}; +lookup(6772) -> {"Mn","NSM"}; +lookup(6773) -> {"Mn","NSM"}; +lookup(6774) -> {"Mn","NSM"}; +lookup(6775) -> {"Mn","NSM"}; +lookup(6776) -> {"Mn","NSM"}; +lookup(6777) -> {"Mn","NSM"}; +lookup(6778) -> {"Mn","NSM"}; +lookup(6779) -> {"Mn","NSM"}; +lookup(6780) -> {"Mn","NSM"}; +lookup(6783) -> {"Mn","NSM"}; +lookup(6784) -> {"Nd","L"}; +lookup(6785) -> {"Nd","L"}; +lookup(6786) -> {"Nd","L"}; +lookup(6787) -> {"Nd","L"}; +lookup(6788) -> {"Nd","L"}; +lookup(6789) -> {"Nd","L"}; +lookup(6790) -> {"Nd","L"}; +lookup(6791) -> {"Nd","L"}; +lookup(6792) -> {"Nd","L"}; +lookup(6793) -> {"Nd","L"}; +lookup(6800) -> {"Nd","L"}; +lookup(6801) -> {"Nd","L"}; +lookup(6802) -> {"Nd","L"}; +lookup(6803) -> {"Nd","L"}; +lookup(6804) -> {"Nd","L"}; +lookup(6805) -> {"Nd","L"}; +lookup(6806) -> {"Nd","L"}; +lookup(6807) -> {"Nd","L"}; +lookup(6808) -> {"Nd","L"}; +lookup(6809) -> {"Nd","L"}; +lookup(6816) -> {"Po","L"}; +lookup(6817) -> {"Po","L"}; +lookup(6818) -> {"Po","L"}; +lookup(6819) -> {"Po","L"}; +lookup(6820) -> {"Po","L"}; +lookup(6821) -> {"Po","L"}; +lookup(6822) -> {"Po","L"}; +lookup(6823) -> {"Lm","L"}; +lookup(6824) -> {"Po","L"}; +lookup(6825) -> {"Po","L"}; +lookup(6826) -> {"Po","L"}; +lookup(6827) -> {"Po","L"}; +lookup(6828) -> {"Po","L"}; +lookup(6829) -> {"Po","L"}; +lookup(6832) -> {"Mn","NSM"}; +lookup(6833) -> {"Mn","NSM"}; +lookup(6834) -> {"Mn","NSM"}; +lookup(6835) -> {"Mn","NSM"}; +lookup(6836) -> {"Mn","NSM"}; +lookup(6837) -> {"Mn","NSM"}; +lookup(6838) -> {"Mn","NSM"}; +lookup(6839) -> {"Mn","NSM"}; +lookup(6840) -> {"Mn","NSM"}; +lookup(6841) -> {"Mn","NSM"}; +lookup(6842) -> {"Mn","NSM"}; +lookup(6843) -> {"Mn","NSM"}; +lookup(6844) -> {"Mn","NSM"}; +lookup(6845) -> {"Mn","NSM"}; +lookup(6846) -> {"Me","NSM"}; +lookup(6847) -> {"Mn","NSM"}; +lookup(6848) -> {"Mn","NSM"}; +lookup(6912) -> {"Mn","NSM"}; +lookup(6913) -> {"Mn","NSM"}; +lookup(6914) -> {"Mn","NSM"}; +lookup(6915) -> {"Mn","NSM"}; +lookup(6916) -> {"Mc","L"}; +lookup(6917) -> {"Lo","L"}; +lookup(6918) -> {"Lo","L"}; +lookup(6919) -> {"Lo","L"}; +lookup(6920) -> {"Lo","L"}; +lookup(6921) -> {"Lo","L"}; +lookup(6922) -> {"Lo","L"}; +lookup(6923) -> {"Lo","L"}; +lookup(6924) -> {"Lo","L"}; +lookup(6925) -> {"Lo","L"}; +lookup(6926) -> {"Lo","L"}; +lookup(6927) -> {"Lo","L"}; +lookup(6928) -> {"Lo","L"}; +lookup(6929) -> {"Lo","L"}; +lookup(6930) -> {"Lo","L"}; +lookup(6931) -> {"Lo","L"}; +lookup(6932) -> {"Lo","L"}; +lookup(6933) -> {"Lo","L"}; +lookup(6934) -> {"Lo","L"}; +lookup(6935) -> {"Lo","L"}; +lookup(6936) -> {"Lo","L"}; +lookup(6937) -> {"Lo","L"}; +lookup(6938) -> {"Lo","L"}; +lookup(6939) -> {"Lo","L"}; +lookup(6940) -> {"Lo","L"}; +lookup(6941) -> {"Lo","L"}; +lookup(6942) -> {"Lo","L"}; +lookup(6943) -> {"Lo","L"}; +lookup(6944) -> {"Lo","L"}; +lookup(6945) -> {"Lo","L"}; +lookup(6946) -> {"Lo","L"}; +lookup(6947) -> {"Lo","L"}; +lookup(6948) -> {"Lo","L"}; +lookup(6949) -> {"Lo","L"}; +lookup(6950) -> {"Lo","L"}; +lookup(6951) -> {"Lo","L"}; +lookup(6952) -> {"Lo","L"}; +lookup(6953) -> {"Lo","L"}; +lookup(6954) -> {"Lo","L"}; +lookup(6955) -> {"Lo","L"}; +lookup(6956) -> {"Lo","L"}; +lookup(6957) -> {"Lo","L"}; +lookup(6958) -> {"Lo","L"}; +lookup(6959) -> {"Lo","L"}; +lookup(6960) -> {"Lo","L"}; +lookup(6961) -> {"Lo","L"}; +lookup(6962) -> {"Lo","L"}; +lookup(6963) -> {"Lo","L"}; +lookup(6964) -> {"Mn","NSM"}; +lookup(6965) -> {"Mc","L"}; +lookup(6966) -> {"Mn","NSM"}; +lookup(6967) -> {"Mn","NSM"}; +lookup(6968) -> {"Mn","NSM"}; +lookup(6969) -> {"Mn","NSM"}; +lookup(6970) -> {"Mn","NSM"}; +lookup(6971) -> {"Mc","L"}; +lookup(6972) -> {"Mn","NSM"}; +lookup(6973) -> {"Mc","L"}; +lookup(6974) -> {"Mc","L"}; +lookup(6975) -> {"Mc","L"}; +lookup(6976) -> {"Mc","L"}; +lookup(6977) -> {"Mc","L"}; +lookup(6978) -> {"Mn","NSM"}; +lookup(6979) -> {"Mc","L"}; +lookup(6980) -> {"Mc","L"}; +lookup(6981) -> {"Lo","L"}; +lookup(6982) -> {"Lo","L"}; +lookup(6983) -> {"Lo","L"}; +lookup(6984) -> {"Lo","L"}; +lookup(6985) -> {"Lo","L"}; +lookup(6986) -> {"Lo","L"}; +lookup(6987) -> {"Lo","L"}; +lookup(6992) -> {"Nd","L"}; +lookup(6993) -> {"Nd","L"}; +lookup(6994) -> {"Nd","L"}; +lookup(6995) -> {"Nd","L"}; +lookup(6996) -> {"Nd","L"}; +lookup(6997) -> {"Nd","L"}; +lookup(6998) -> {"Nd","L"}; +lookup(6999) -> {"Nd","L"}; +lookup(7000) -> {"Nd","L"}; +lookup(7001) -> {"Nd","L"}; +lookup(7002) -> {"Po","L"}; +lookup(7003) -> {"Po","L"}; +lookup(7004) -> {"Po","L"}; +lookup(7005) -> {"Po","L"}; +lookup(7006) -> {"Po","L"}; +lookup(7007) -> {"Po","L"}; +lookup(7008) -> {"Po","L"}; +lookup(7009) -> {"So","L"}; +lookup(7010) -> {"So","L"}; +lookup(7011) -> {"So","L"}; +lookup(7012) -> {"So","L"}; +lookup(7013) -> {"So","L"}; +lookup(7014) -> {"So","L"}; +lookup(7015) -> {"So","L"}; +lookup(7016) -> {"So","L"}; +lookup(7017) -> {"So","L"}; +lookup(7018) -> {"So","L"}; +lookup(7019) -> {"Mn","NSM"}; +lookup(7020) -> {"Mn","NSM"}; +lookup(7021) -> {"Mn","NSM"}; +lookup(7022) -> {"Mn","NSM"}; +lookup(7023) -> {"Mn","NSM"}; +lookup(7024) -> {"Mn","NSM"}; +lookup(7025) -> {"Mn","NSM"}; +lookup(7026) -> {"Mn","NSM"}; +lookup(7027) -> {"Mn","NSM"}; +lookup(7028) -> {"So","L"}; +lookup(7029) -> {"So","L"}; +lookup(7030) -> {"So","L"}; +lookup(7031) -> {"So","L"}; +lookup(7032) -> {"So","L"}; +lookup(7033) -> {"So","L"}; +lookup(7034) -> {"So","L"}; +lookup(7035) -> {"So","L"}; +lookup(7036) -> {"So","L"}; +lookup(7040) -> {"Mn","NSM"}; +lookup(7041) -> {"Mn","NSM"}; +lookup(7042) -> {"Mc","L"}; +lookup(7043) -> {"Lo","L"}; +lookup(7044) -> {"Lo","L"}; +lookup(7045) -> {"Lo","L"}; +lookup(7046) -> {"Lo","L"}; +lookup(7047) -> {"Lo","L"}; +lookup(7048) -> {"Lo","L"}; +lookup(7049) -> {"Lo","L"}; +lookup(7050) -> {"Lo","L"}; +lookup(7051) -> {"Lo","L"}; +lookup(7052) -> {"Lo","L"}; +lookup(7053) -> {"Lo","L"}; +lookup(7054) -> {"Lo","L"}; +lookup(7055) -> {"Lo","L"}; +lookup(7056) -> {"Lo","L"}; +lookup(7057) -> {"Lo","L"}; +lookup(7058) -> {"Lo","L"}; +lookup(7059) -> {"Lo","L"}; +lookup(7060) -> {"Lo","L"}; +lookup(7061) -> {"Lo","L"}; +lookup(7062) -> {"Lo","L"}; +lookup(7063) -> {"Lo","L"}; +lookup(7064) -> {"Lo","L"}; +lookup(7065) -> {"Lo","L"}; +lookup(7066) -> {"Lo","L"}; +lookup(7067) -> {"Lo","L"}; +lookup(7068) -> {"Lo","L"}; +lookup(7069) -> {"Lo","L"}; +lookup(7070) -> {"Lo","L"}; +lookup(7071) -> {"Lo","L"}; +lookup(7072) -> {"Lo","L"}; +lookup(7073) -> {"Mc","L"}; +lookup(7074) -> {"Mn","NSM"}; +lookup(7075) -> {"Mn","NSM"}; +lookup(7076) -> {"Mn","NSM"}; +lookup(7077) -> {"Mn","NSM"}; +lookup(7078) -> {"Mc","L"}; +lookup(7079) -> {"Mc","L"}; +lookup(7080) -> {"Mn","NSM"}; +lookup(7081) -> {"Mn","NSM"}; +lookup(7082) -> {"Mc","L"}; +lookup(7083) -> {"Mn","NSM"}; +lookup(7084) -> {"Mn","NSM"}; +lookup(7085) -> {"Mn","NSM"}; +lookup(7086) -> {"Lo","L"}; +lookup(7087) -> {"Lo","L"}; +lookup(7088) -> {"Nd","L"}; +lookup(7089) -> {"Nd","L"}; +lookup(7090) -> {"Nd","L"}; +lookup(7091) -> {"Nd","L"}; +lookup(7092) -> {"Nd","L"}; +lookup(7093) -> {"Nd","L"}; +lookup(7094) -> {"Nd","L"}; +lookup(7095) -> {"Nd","L"}; +lookup(7096) -> {"Nd","L"}; +lookup(7097) -> {"Nd","L"}; +lookup(7098) -> {"Lo","L"}; +lookup(7099) -> {"Lo","L"}; +lookup(7100) -> {"Lo","L"}; +lookup(7101) -> {"Lo","L"}; +lookup(7102) -> {"Lo","L"}; +lookup(7103) -> {"Lo","L"}; +lookup(7104) -> {"Lo","L"}; +lookup(7105) -> {"Lo","L"}; +lookup(7106) -> {"Lo","L"}; +lookup(7107) -> {"Lo","L"}; +lookup(7108) -> {"Lo","L"}; +lookup(7109) -> {"Lo","L"}; +lookup(7110) -> {"Lo","L"}; +lookup(7111) -> {"Lo","L"}; +lookup(7112) -> {"Lo","L"}; +lookup(7113) -> {"Lo","L"}; +lookup(7114) -> {"Lo","L"}; +lookup(7115) -> {"Lo","L"}; +lookup(7116) -> {"Lo","L"}; +lookup(7117) -> {"Lo","L"}; +lookup(7118) -> {"Lo","L"}; +lookup(7119) -> {"Lo","L"}; +lookup(7120) -> {"Lo","L"}; +lookup(7121) -> {"Lo","L"}; +lookup(7122) -> {"Lo","L"}; +lookup(7123) -> {"Lo","L"}; +lookup(7124) -> {"Lo","L"}; +lookup(7125) -> {"Lo","L"}; +lookup(7126) -> {"Lo","L"}; +lookup(7127) -> {"Lo","L"}; +lookup(7128) -> {"Lo","L"}; +lookup(7129) -> {"Lo","L"}; +lookup(7130) -> {"Lo","L"}; +lookup(7131) -> {"Lo","L"}; +lookup(7132) -> {"Lo","L"}; +lookup(7133) -> {"Lo","L"}; +lookup(7134) -> {"Lo","L"}; +lookup(7135) -> {"Lo","L"}; +lookup(7136) -> {"Lo","L"}; +lookup(7137) -> {"Lo","L"}; +lookup(7138) -> {"Lo","L"}; +lookup(7139) -> {"Lo","L"}; +lookup(7140) -> {"Lo","L"}; +lookup(7141) -> {"Lo","L"}; +lookup(7142) -> {"Mn","NSM"}; +lookup(7143) -> {"Mc","L"}; +lookup(7144) -> {"Mn","NSM"}; +lookup(7145) -> {"Mn","NSM"}; +lookup(7146) -> {"Mc","L"}; +lookup(7147) -> {"Mc","L"}; +lookup(7148) -> {"Mc","L"}; +lookup(7149) -> {"Mn","NSM"}; +lookup(7150) -> {"Mc","L"}; +lookup(7151) -> {"Mn","NSM"}; +lookup(7152) -> {"Mn","NSM"}; +lookup(7153) -> {"Mn","NSM"}; +lookup(7154) -> {"Mc","L"}; +lookup(7155) -> {"Mc","L"}; +lookup(7164) -> {"Po","L"}; +lookup(7165) -> {"Po","L"}; +lookup(7166) -> {"Po","L"}; +lookup(7167) -> {"Po","L"}; +lookup(7168) -> {"Lo","L"}; +lookup(7169) -> {"Lo","L"}; +lookup(7170) -> {"Lo","L"}; +lookup(7171) -> {"Lo","L"}; +lookup(7172) -> {"Lo","L"}; +lookup(7173) -> {"Lo","L"}; +lookup(7174) -> {"Lo","L"}; +lookup(7175) -> {"Lo","L"}; +lookup(7176) -> {"Lo","L"}; +lookup(7177) -> {"Lo","L"}; +lookup(7178) -> {"Lo","L"}; +lookup(7179) -> {"Lo","L"}; +lookup(7180) -> {"Lo","L"}; +lookup(7181) -> {"Lo","L"}; +lookup(7182) -> {"Lo","L"}; +lookup(7183) -> {"Lo","L"}; +lookup(7184) -> {"Lo","L"}; +lookup(7185) -> {"Lo","L"}; +lookup(7186) -> {"Lo","L"}; +lookup(7187) -> {"Lo","L"}; +lookup(7188) -> {"Lo","L"}; +lookup(7189) -> {"Lo","L"}; +lookup(7190) -> {"Lo","L"}; +lookup(7191) -> {"Lo","L"}; +lookup(7192) -> {"Lo","L"}; +lookup(7193) -> {"Lo","L"}; +lookup(7194) -> {"Lo","L"}; +lookup(7195) -> {"Lo","L"}; +lookup(7196) -> {"Lo","L"}; +lookup(7197) -> {"Lo","L"}; +lookup(7198) -> {"Lo","L"}; +lookup(7199) -> {"Lo","L"}; +lookup(7200) -> {"Lo","L"}; +lookup(7201) -> {"Lo","L"}; +lookup(7202) -> {"Lo","L"}; +lookup(7203) -> {"Lo","L"}; +lookup(7204) -> {"Mc","L"}; +lookup(7205) -> {"Mc","L"}; +lookup(7206) -> {"Mc","L"}; +lookup(7207) -> {"Mc","L"}; +lookup(7208) -> {"Mc","L"}; +lookup(7209) -> {"Mc","L"}; +lookup(7210) -> {"Mc","L"}; +lookup(7211) -> {"Mc","L"}; +lookup(7212) -> {"Mn","NSM"}; +lookup(7213) -> {"Mn","NSM"}; +lookup(7214) -> {"Mn","NSM"}; +lookup(7215) -> {"Mn","NSM"}; +lookup(7216) -> {"Mn","NSM"}; +lookup(7217) -> {"Mn","NSM"}; +lookup(7218) -> {"Mn","NSM"}; +lookup(7219) -> {"Mn","NSM"}; +lookup(7220) -> {"Mc","L"}; +lookup(7221) -> {"Mc","L"}; +lookup(7222) -> {"Mn","NSM"}; +lookup(7223) -> {"Mn","NSM"}; +lookup(7227) -> {"Po","L"}; +lookup(7228) -> {"Po","L"}; +lookup(7229) -> {"Po","L"}; +lookup(7230) -> {"Po","L"}; +lookup(7231) -> {"Po","L"}; +lookup(7232) -> {"Nd","L"}; +lookup(7233) -> {"Nd","L"}; +lookup(7234) -> {"Nd","L"}; +lookup(7235) -> {"Nd","L"}; +lookup(7236) -> {"Nd","L"}; +lookup(7237) -> {"Nd","L"}; +lookup(7238) -> {"Nd","L"}; +lookup(7239) -> {"Nd","L"}; +lookup(7240) -> {"Nd","L"}; +lookup(7241) -> {"Nd","L"}; +lookup(7245) -> {"Lo","L"}; +lookup(7246) -> {"Lo","L"}; +lookup(7247) -> {"Lo","L"}; +lookup(7248) -> {"Nd","L"}; +lookup(7249) -> {"Nd","L"}; +lookup(7250) -> {"Nd","L"}; +lookup(7251) -> {"Nd","L"}; +lookup(7252) -> {"Nd","L"}; +lookup(7253) -> {"Nd","L"}; +lookup(7254) -> {"Nd","L"}; +lookup(7255) -> {"Nd","L"}; +lookup(7256) -> {"Nd","L"}; +lookup(7257) -> {"Nd","L"}; +lookup(7258) -> {"Lo","L"}; +lookup(7259) -> {"Lo","L"}; +lookup(7260) -> {"Lo","L"}; +lookup(7261) -> {"Lo","L"}; +lookup(7262) -> {"Lo","L"}; +lookup(7263) -> {"Lo","L"}; +lookup(7264) -> {"Lo","L"}; +lookup(7265) -> {"Lo","L"}; +lookup(7266) -> {"Lo","L"}; +lookup(7267) -> {"Lo","L"}; +lookup(7268) -> {"Lo","L"}; +lookup(7269) -> {"Lo","L"}; +lookup(7270) -> {"Lo","L"}; +lookup(7271) -> {"Lo","L"}; +lookup(7272) -> {"Lo","L"}; +lookup(7273) -> {"Lo","L"}; +lookup(7274) -> {"Lo","L"}; +lookup(7275) -> {"Lo","L"}; +lookup(7276) -> {"Lo","L"}; +lookup(7277) -> {"Lo","L"}; +lookup(7278) -> {"Lo","L"}; +lookup(7279) -> {"Lo","L"}; +lookup(7280) -> {"Lo","L"}; +lookup(7281) -> {"Lo","L"}; +lookup(7282) -> {"Lo","L"}; +lookup(7283) -> {"Lo","L"}; +lookup(7284) -> {"Lo","L"}; +lookup(7285) -> {"Lo","L"}; +lookup(7286) -> {"Lo","L"}; +lookup(7287) -> {"Lo","L"}; +lookup(7288) -> {"Lm","L"}; +lookup(7289) -> {"Lm","L"}; +lookup(7290) -> {"Lm","L"}; +lookup(7291) -> {"Lm","L"}; +lookup(7292) -> {"Lm","L"}; +lookup(7293) -> {"Lm","L"}; +lookup(7294) -> {"Po","L"}; +lookup(7295) -> {"Po","L"}; +lookup(7296) -> {"Ll","L"}; +lookup(7297) -> {"Ll","L"}; +lookup(7298) -> {"Ll","L"}; +lookup(7299) -> {"Ll","L"}; +lookup(7300) -> {"Ll","L"}; +lookup(7301) -> {"Ll","L"}; +lookup(7302) -> {"Ll","L"}; +lookup(7303) -> {"Ll","L"}; +lookup(7304) -> {"Ll","L"}; +lookup(7312) -> {"Lu","L"}; +lookup(7313) -> {"Lu","L"}; +lookup(7314) -> {"Lu","L"}; +lookup(7315) -> {"Lu","L"}; +lookup(7316) -> {"Lu","L"}; +lookup(7317) -> {"Lu","L"}; +lookup(7318) -> {"Lu","L"}; +lookup(7319) -> {"Lu","L"}; +lookup(7320) -> {"Lu","L"}; +lookup(7321) -> {"Lu","L"}; +lookup(7322) -> {"Lu","L"}; +lookup(7323) -> {"Lu","L"}; +lookup(7324) -> {"Lu","L"}; +lookup(7325) -> {"Lu","L"}; +lookup(7326) -> {"Lu","L"}; +lookup(7327) -> {"Lu","L"}; +lookup(7328) -> {"Lu","L"}; +lookup(7329) -> {"Lu","L"}; +lookup(7330) -> {"Lu","L"}; +lookup(7331) -> {"Lu","L"}; +lookup(7332) -> {"Lu","L"}; +lookup(7333) -> {"Lu","L"}; +lookup(7334) -> {"Lu","L"}; +lookup(7335) -> {"Lu","L"}; +lookup(7336) -> {"Lu","L"}; +lookup(7337) -> {"Lu","L"}; +lookup(7338) -> {"Lu","L"}; +lookup(7339) -> {"Lu","L"}; +lookup(7340) -> {"Lu","L"}; +lookup(7341) -> {"Lu","L"}; +lookup(7342) -> {"Lu","L"}; +lookup(7343) -> {"Lu","L"}; +lookup(7344) -> {"Lu","L"}; +lookup(7345) -> {"Lu","L"}; +lookup(7346) -> {"Lu","L"}; +lookup(7347) -> {"Lu","L"}; +lookup(7348) -> {"Lu","L"}; +lookup(7349) -> {"Lu","L"}; +lookup(7350) -> {"Lu","L"}; +lookup(7351) -> {"Lu","L"}; +lookup(7352) -> {"Lu","L"}; +lookup(7353) -> {"Lu","L"}; +lookup(7354) -> {"Lu","L"}; +lookup(7357) -> {"Lu","L"}; +lookup(7358) -> {"Lu","L"}; +lookup(7359) -> {"Lu","L"}; +lookup(7360) -> {"Po","L"}; +lookup(7361) -> {"Po","L"}; +lookup(7362) -> {"Po","L"}; +lookup(7363) -> {"Po","L"}; +lookup(7364) -> {"Po","L"}; +lookup(7365) -> {"Po","L"}; +lookup(7366) -> {"Po","L"}; +lookup(7367) -> {"Po","L"}; +lookup(7376) -> {"Mn","NSM"}; +lookup(7377) -> {"Mn","NSM"}; +lookup(7378) -> {"Mn","NSM"}; +lookup(7379) -> {"Po","L"}; +lookup(7380) -> {"Mn","NSM"}; +lookup(7381) -> {"Mn","NSM"}; +lookup(7382) -> {"Mn","NSM"}; +lookup(7383) -> {"Mn","NSM"}; +lookup(7384) -> {"Mn","NSM"}; +lookup(7385) -> {"Mn","NSM"}; +lookup(7386) -> {"Mn","NSM"}; +lookup(7387) -> {"Mn","NSM"}; +lookup(7388) -> {"Mn","NSM"}; +lookup(7389) -> {"Mn","NSM"}; +lookup(7390) -> {"Mn","NSM"}; +lookup(7391) -> {"Mn","NSM"}; +lookup(7392) -> {"Mn","NSM"}; +lookup(7393) -> {"Mc","L"}; +lookup(7394) -> {"Mn","NSM"}; +lookup(7395) -> {"Mn","NSM"}; +lookup(7396) -> {"Mn","NSM"}; +lookup(7397) -> {"Mn","NSM"}; +lookup(7398) -> {"Mn","NSM"}; +lookup(7399) -> {"Mn","NSM"}; +lookup(7400) -> {"Mn","NSM"}; +lookup(7401) -> {"Lo","L"}; +lookup(7402) -> {"Lo","L"}; +lookup(7403) -> {"Lo","L"}; +lookup(7404) -> {"Lo","L"}; +lookup(7405) -> {"Mn","NSM"}; +lookup(7406) -> {"Lo","L"}; +lookup(7407) -> {"Lo","L"}; +lookup(7408) -> {"Lo","L"}; +lookup(7409) -> {"Lo","L"}; +lookup(7410) -> {"Lo","L"}; +lookup(7411) -> {"Lo","L"}; +lookup(7412) -> {"Mn","NSM"}; +lookup(7413) -> {"Lo","L"}; +lookup(7414) -> {"Lo","L"}; +lookup(7415) -> {"Mc","L"}; +lookup(7416) -> {"Mn","NSM"}; +lookup(7417) -> {"Mn","NSM"}; +lookup(7418) -> {"Lo","L"}; +lookup(7424) -> {"Ll","L"}; +lookup(7425) -> {"Ll","L"}; +lookup(7426) -> {"Ll","L"}; +lookup(7427) -> {"Ll","L"}; +lookup(7428) -> {"Ll","L"}; +lookup(7429) -> {"Ll","L"}; +lookup(7430) -> {"Ll","L"}; +lookup(7431) -> {"Ll","L"}; +lookup(7432) -> {"Ll","L"}; +lookup(7433) -> {"Ll","L"}; +lookup(7434) -> {"Ll","L"}; +lookup(7435) -> {"Ll","L"}; +lookup(7436) -> {"Ll","L"}; +lookup(7437) -> {"Ll","L"}; +lookup(7438) -> {"Ll","L"}; +lookup(7439) -> {"Ll","L"}; +lookup(7440) -> {"Ll","L"}; +lookup(7441) -> {"Ll","L"}; +lookup(7442) -> {"Ll","L"}; +lookup(7443) -> {"Ll","L"}; +lookup(7444) -> {"Ll","L"}; +lookup(7445) -> {"Ll","L"}; +lookup(7446) -> {"Ll","L"}; +lookup(7447) -> {"Ll","L"}; +lookup(7448) -> {"Ll","L"}; +lookup(7449) -> {"Ll","L"}; +lookup(7450) -> {"Ll","L"}; +lookup(7451) -> {"Ll","L"}; +lookup(7452) -> {"Ll","L"}; +lookup(7453) -> {"Ll","L"}; +lookup(7454) -> {"Ll","L"}; +lookup(7455) -> {"Ll","L"}; +lookup(7456) -> {"Ll","L"}; +lookup(7457) -> {"Ll","L"}; +lookup(7458) -> {"Ll","L"}; +lookup(7459) -> {"Ll","L"}; +lookup(7460) -> {"Ll","L"}; +lookup(7461) -> {"Ll","L"}; +lookup(7462) -> {"Ll","L"}; +lookup(7463) -> {"Ll","L"}; +lookup(7464) -> {"Ll","L"}; +lookup(7465) -> {"Ll","L"}; +lookup(7466) -> {"Ll","L"}; +lookup(7467) -> {"Ll","L"}; +lookup(7468) -> {"Lm","L"}; +lookup(7469) -> {"Lm","L"}; +lookup(7470) -> {"Lm","L"}; +lookup(7471) -> {"Lm","L"}; +lookup(7472) -> {"Lm","L"}; +lookup(7473) -> {"Lm","L"}; +lookup(7474) -> {"Lm","L"}; +lookup(7475) -> {"Lm","L"}; +lookup(7476) -> {"Lm","L"}; +lookup(7477) -> {"Lm","L"}; +lookup(7478) -> {"Lm","L"}; +lookup(7479) -> {"Lm","L"}; +lookup(7480) -> {"Lm","L"}; +lookup(7481) -> {"Lm","L"}; +lookup(7482) -> {"Lm","L"}; +lookup(7483) -> {"Lm","L"}; +lookup(7484) -> {"Lm","L"}; +lookup(7485) -> {"Lm","L"}; +lookup(7486) -> {"Lm","L"}; +lookup(7487) -> {"Lm","L"}; +lookup(7488) -> {"Lm","L"}; +lookup(7489) -> {"Lm","L"}; +lookup(7490) -> {"Lm","L"}; +lookup(7491) -> {"Lm","L"}; +lookup(7492) -> {"Lm","L"}; +lookup(7493) -> {"Lm","L"}; +lookup(7494) -> {"Lm","L"}; +lookup(7495) -> {"Lm","L"}; +lookup(7496) -> {"Lm","L"}; +lookup(7497) -> {"Lm","L"}; +lookup(7498) -> {"Lm","L"}; +lookup(7499) -> {"Lm","L"}; +lookup(7500) -> {"Lm","L"}; +lookup(7501) -> {"Lm","L"}; +lookup(7502) -> {"Lm","L"}; +lookup(7503) -> {"Lm","L"}; +lookup(7504) -> {"Lm","L"}; +lookup(7505) -> {"Lm","L"}; +lookup(7506) -> {"Lm","L"}; +lookup(7507) -> {"Lm","L"}; +lookup(7508) -> {"Lm","L"}; +lookup(7509) -> {"Lm","L"}; +lookup(7510) -> {"Lm","L"}; +lookup(7511) -> {"Lm","L"}; +lookup(7512) -> {"Lm","L"}; +lookup(7513) -> {"Lm","L"}; +lookup(7514) -> {"Lm","L"}; +lookup(7515) -> {"Lm","L"}; +lookup(7516) -> {"Lm","L"}; +lookup(7517) -> {"Lm","L"}; +lookup(7518) -> {"Lm","L"}; +lookup(7519) -> {"Lm","L"}; +lookup(7520) -> {"Lm","L"}; +lookup(7521) -> {"Lm","L"}; +lookup(7522) -> {"Lm","L"}; +lookup(7523) -> {"Lm","L"}; +lookup(7524) -> {"Lm","L"}; +lookup(7525) -> {"Lm","L"}; +lookup(7526) -> {"Lm","L"}; +lookup(7527) -> {"Lm","L"}; +lookup(7528) -> {"Lm","L"}; +lookup(7529) -> {"Lm","L"}; +lookup(7530) -> {"Lm","L"}; +lookup(7531) -> {"Ll","L"}; +lookup(7532) -> {"Ll","L"}; +lookup(7533) -> {"Ll","L"}; +lookup(7534) -> {"Ll","L"}; +lookup(7535) -> {"Ll","L"}; +lookup(7536) -> {"Ll","L"}; +lookup(7537) -> {"Ll","L"}; +lookup(7538) -> {"Ll","L"}; +lookup(7539) -> {"Ll","L"}; +lookup(7540) -> {"Ll","L"}; +lookup(7541) -> {"Ll","L"}; +lookup(7542) -> {"Ll","L"}; +lookup(7543) -> {"Ll","L"}; +lookup(7544) -> {"Lm","L"}; +lookup(7545) -> {"Ll","L"}; +lookup(7546) -> {"Ll","L"}; +lookup(7547) -> {"Ll","L"}; +lookup(7548) -> {"Ll","L"}; +lookup(7549) -> {"Ll","L"}; +lookup(7550) -> {"Ll","L"}; +lookup(7551) -> {"Ll","L"}; +lookup(7552) -> {"Ll","L"}; +lookup(7553) -> {"Ll","L"}; +lookup(7554) -> {"Ll","L"}; +lookup(7555) -> {"Ll","L"}; +lookup(7556) -> {"Ll","L"}; +lookup(7557) -> {"Ll","L"}; +lookup(7558) -> {"Ll","L"}; +lookup(7559) -> {"Ll","L"}; +lookup(7560) -> {"Ll","L"}; +lookup(7561) -> {"Ll","L"}; +lookup(7562) -> {"Ll","L"}; +lookup(7563) -> {"Ll","L"}; +lookup(7564) -> {"Ll","L"}; +lookup(7565) -> {"Ll","L"}; +lookup(7566) -> {"Ll","L"}; +lookup(7567) -> {"Ll","L"}; +lookup(7568) -> {"Ll","L"}; +lookup(7569) -> {"Ll","L"}; +lookup(7570) -> {"Ll","L"}; +lookup(7571) -> {"Ll","L"}; +lookup(7572) -> {"Ll","L"}; +lookup(7573) -> {"Ll","L"}; +lookup(7574) -> {"Ll","L"}; +lookup(7575) -> {"Ll","L"}; +lookup(7576) -> {"Ll","L"}; +lookup(7577) -> {"Ll","L"}; +lookup(7578) -> {"Ll","L"}; +lookup(7579) -> {"Lm","L"}; +lookup(7580) -> {"Lm","L"}; +lookup(7581) -> {"Lm","L"}; +lookup(7582) -> {"Lm","L"}; +lookup(7583) -> {"Lm","L"}; +lookup(7584) -> {"Lm","L"}; +lookup(7585) -> {"Lm","L"}; +lookup(7586) -> {"Lm","L"}; +lookup(7587) -> {"Lm","L"}; +lookup(7588) -> {"Lm","L"}; +lookup(7589) -> {"Lm","L"}; +lookup(7590) -> {"Lm","L"}; +lookup(7591) -> {"Lm","L"}; +lookup(7592) -> {"Lm","L"}; +lookup(7593) -> {"Lm","L"}; +lookup(7594) -> {"Lm","L"}; +lookup(7595) -> {"Lm","L"}; +lookup(7596) -> {"Lm","L"}; +lookup(7597) -> {"Lm","L"}; +lookup(7598) -> {"Lm","L"}; +lookup(7599) -> {"Lm","L"}; +lookup(7600) -> {"Lm","L"}; +lookup(7601) -> {"Lm","L"}; +lookup(7602) -> {"Lm","L"}; +lookup(7603) -> {"Lm","L"}; +lookup(7604) -> {"Lm","L"}; +lookup(7605) -> {"Lm","L"}; +lookup(7606) -> {"Lm","L"}; +lookup(7607) -> {"Lm","L"}; +lookup(7608) -> {"Lm","L"}; +lookup(7609) -> {"Lm","L"}; +lookup(7610) -> {"Lm","L"}; +lookup(7611) -> {"Lm","L"}; +lookup(7612) -> {"Lm","L"}; +lookup(7613) -> {"Lm","L"}; +lookup(7614) -> {"Lm","L"}; +lookup(7615) -> {"Lm","L"}; +lookup(7616) -> {"Mn","NSM"}; +lookup(7617) -> {"Mn","NSM"}; +lookup(7618) -> {"Mn","NSM"}; +lookup(7619) -> {"Mn","NSM"}; +lookup(7620) -> {"Mn","NSM"}; +lookup(7621) -> {"Mn","NSM"}; +lookup(7622) -> {"Mn","NSM"}; +lookup(7623) -> {"Mn","NSM"}; +lookup(7624) -> {"Mn","NSM"}; +lookup(7625) -> {"Mn","NSM"}; +lookup(7626) -> {"Mn","NSM"}; +lookup(7627) -> {"Mn","NSM"}; +lookup(7628) -> {"Mn","NSM"}; +lookup(7629) -> {"Mn","NSM"}; +lookup(7630) -> {"Mn","NSM"}; +lookup(7631) -> {"Mn","NSM"}; +lookup(7632) -> {"Mn","NSM"}; +lookup(7633) -> {"Mn","NSM"}; +lookup(7634) -> {"Mn","NSM"}; +lookup(7635) -> {"Mn","NSM"}; +lookup(7636) -> {"Mn","NSM"}; +lookup(7637) -> {"Mn","NSM"}; +lookup(7638) -> {"Mn","NSM"}; +lookup(7639) -> {"Mn","NSM"}; +lookup(7640) -> {"Mn","NSM"}; +lookup(7641) -> {"Mn","NSM"}; +lookup(7642) -> {"Mn","NSM"}; +lookup(7643) -> {"Mn","NSM"}; +lookup(7644) -> {"Mn","NSM"}; +lookup(7645) -> {"Mn","NSM"}; +lookup(7646) -> {"Mn","NSM"}; +lookup(7647) -> {"Mn","NSM"}; +lookup(7648) -> {"Mn","NSM"}; +lookup(7649) -> {"Mn","NSM"}; +lookup(7650) -> {"Mn","NSM"}; +lookup(7651) -> {"Mn","NSM"}; +lookup(7652) -> {"Mn","NSM"}; +lookup(7653) -> {"Mn","NSM"}; +lookup(7654) -> {"Mn","NSM"}; +lookup(7655) -> {"Mn","NSM"}; +lookup(7656) -> {"Mn","NSM"}; +lookup(7657) -> {"Mn","NSM"}; +lookup(7658) -> {"Mn","NSM"}; +lookup(7659) -> {"Mn","NSM"}; +lookup(7660) -> {"Mn","NSM"}; +lookup(7661) -> {"Mn","NSM"}; +lookup(7662) -> {"Mn","NSM"}; +lookup(7663) -> {"Mn","NSM"}; +lookup(7664) -> {"Mn","NSM"}; +lookup(7665) -> {"Mn","NSM"}; +lookup(7666) -> {"Mn","NSM"}; +lookup(7667) -> {"Mn","NSM"}; +lookup(7668) -> {"Mn","NSM"}; +lookup(7669) -> {"Mn","NSM"}; +lookup(7670) -> {"Mn","NSM"}; +lookup(7671) -> {"Mn","NSM"}; +lookup(7672) -> {"Mn","NSM"}; +lookup(7673) -> {"Mn","NSM"}; +lookup(7675) -> {"Mn","NSM"}; +lookup(7676) -> {"Mn","NSM"}; +lookup(7677) -> {"Mn","NSM"}; +lookup(7678) -> {"Mn","NSM"}; +lookup(7679) -> {"Mn","NSM"}; +lookup(7680) -> {"Lu","L"}; +lookup(7681) -> {"Ll","L"}; +lookup(7682) -> {"Lu","L"}; +lookup(7683) -> {"Ll","L"}; +lookup(7684) -> {"Lu","L"}; +lookup(7685) -> {"Ll","L"}; +lookup(7686) -> {"Lu","L"}; +lookup(7687) -> {"Ll","L"}; +lookup(7688) -> {"Lu","L"}; +lookup(7689) -> {"Ll","L"}; +lookup(7690) -> {"Lu","L"}; +lookup(7691) -> {"Ll","L"}; +lookup(7692) -> {"Lu","L"}; +lookup(7693) -> {"Ll","L"}; +lookup(7694) -> {"Lu","L"}; +lookup(7695) -> {"Ll","L"}; +lookup(7696) -> {"Lu","L"}; +lookup(7697) -> {"Ll","L"}; +lookup(7698) -> {"Lu","L"}; +lookup(7699) -> {"Ll","L"}; +lookup(7700) -> {"Lu","L"}; +lookup(7701) -> {"Ll","L"}; +lookup(7702) -> {"Lu","L"}; +lookup(7703) -> {"Ll","L"}; +lookup(7704) -> {"Lu","L"}; +lookup(7705) -> {"Ll","L"}; +lookup(7706) -> {"Lu","L"}; +lookup(7707) -> {"Ll","L"}; +lookup(7708) -> {"Lu","L"}; +lookup(7709) -> {"Ll","L"}; +lookup(7710) -> {"Lu","L"}; +lookup(7711) -> {"Ll","L"}; +lookup(7712) -> {"Lu","L"}; +lookup(7713) -> {"Ll","L"}; +lookup(7714) -> {"Lu","L"}; +lookup(7715) -> {"Ll","L"}; +lookup(7716) -> {"Lu","L"}; +lookup(7717) -> {"Ll","L"}; +lookup(7718) -> {"Lu","L"}; +lookup(7719) -> {"Ll","L"}; +lookup(7720) -> {"Lu","L"}; +lookup(7721) -> {"Ll","L"}; +lookup(7722) -> {"Lu","L"}; +lookup(7723) -> {"Ll","L"}; +lookup(7724) -> {"Lu","L"}; +lookup(7725) -> {"Ll","L"}; +lookup(7726) -> {"Lu","L"}; +lookup(7727) -> {"Ll","L"}; +lookup(7728) -> {"Lu","L"}; +lookup(7729) -> {"Ll","L"}; +lookup(7730) -> {"Lu","L"}; +lookup(7731) -> {"Ll","L"}; +lookup(7732) -> {"Lu","L"}; +lookup(7733) -> {"Ll","L"}; +lookup(7734) -> {"Lu","L"}; +lookup(7735) -> {"Ll","L"}; +lookup(7736) -> {"Lu","L"}; +lookup(7737) -> {"Ll","L"}; +lookup(7738) -> {"Lu","L"}; +lookup(7739) -> {"Ll","L"}; +lookup(7740) -> {"Lu","L"}; +lookup(7741) -> {"Ll","L"}; +lookup(7742) -> {"Lu","L"}; +lookup(7743) -> {"Ll","L"}; +lookup(7744) -> {"Lu","L"}; +lookup(7745) -> {"Ll","L"}; +lookup(7746) -> {"Lu","L"}; +lookup(7747) -> {"Ll","L"}; +lookup(7748) -> {"Lu","L"}; +lookup(7749) -> {"Ll","L"}; +lookup(7750) -> {"Lu","L"}; +lookup(7751) -> {"Ll","L"}; +lookup(7752) -> {"Lu","L"}; +lookup(7753) -> {"Ll","L"}; +lookup(7754) -> {"Lu","L"}; +lookup(7755) -> {"Ll","L"}; +lookup(7756) -> {"Lu","L"}; +lookup(7757) -> {"Ll","L"}; +lookup(7758) -> {"Lu","L"}; +lookup(7759) -> {"Ll","L"}; +lookup(7760) -> {"Lu","L"}; +lookup(7761) -> {"Ll","L"}; +lookup(7762) -> {"Lu","L"}; +lookup(7763) -> {"Ll","L"}; +lookup(7764) -> {"Lu","L"}; +lookup(7765) -> {"Ll","L"}; +lookup(7766) -> {"Lu","L"}; +lookup(7767) -> {"Ll","L"}; +lookup(7768) -> {"Lu","L"}; +lookup(7769) -> {"Ll","L"}; +lookup(7770) -> {"Lu","L"}; +lookup(7771) -> {"Ll","L"}; +lookup(7772) -> {"Lu","L"}; +lookup(7773) -> {"Ll","L"}; +lookup(7774) -> {"Lu","L"}; +lookup(7775) -> {"Ll","L"}; +lookup(7776) -> {"Lu","L"}; +lookup(7777) -> {"Ll","L"}; +lookup(7778) -> {"Lu","L"}; +lookup(7779) -> {"Ll","L"}; +lookup(7780) -> {"Lu","L"}; +lookup(7781) -> {"Ll","L"}; +lookup(7782) -> {"Lu","L"}; +lookup(7783) -> {"Ll","L"}; +lookup(7784) -> {"Lu","L"}; +lookup(7785) -> {"Ll","L"}; +lookup(7786) -> {"Lu","L"}; +lookup(7787) -> {"Ll","L"}; +lookup(7788) -> {"Lu","L"}; +lookup(7789) -> {"Ll","L"}; +lookup(7790) -> {"Lu","L"}; +lookup(7791) -> {"Ll","L"}; +lookup(7792) -> {"Lu","L"}; +lookup(7793) -> {"Ll","L"}; +lookup(7794) -> {"Lu","L"}; +lookup(7795) -> {"Ll","L"}; +lookup(7796) -> {"Lu","L"}; +lookup(7797) -> {"Ll","L"}; +lookup(7798) -> {"Lu","L"}; +lookup(7799) -> {"Ll","L"}; +lookup(7800) -> {"Lu","L"}; +lookup(7801) -> {"Ll","L"}; +lookup(7802) -> {"Lu","L"}; +lookup(7803) -> {"Ll","L"}; +lookup(7804) -> {"Lu","L"}; +lookup(7805) -> {"Ll","L"}; +lookup(7806) -> {"Lu","L"}; +lookup(7807) -> {"Ll","L"}; +lookup(7808) -> {"Lu","L"}; +lookup(7809) -> {"Ll","L"}; +lookup(7810) -> {"Lu","L"}; +lookup(7811) -> {"Ll","L"}; +lookup(7812) -> {"Lu","L"}; +lookup(7813) -> {"Ll","L"}; +lookup(7814) -> {"Lu","L"}; +lookup(7815) -> {"Ll","L"}; +lookup(7816) -> {"Lu","L"}; +lookup(7817) -> {"Ll","L"}; +lookup(7818) -> {"Lu","L"}; +lookup(7819) -> {"Ll","L"}; +lookup(7820) -> {"Lu","L"}; +lookup(7821) -> {"Ll","L"}; +lookup(7822) -> {"Lu","L"}; +lookup(7823) -> {"Ll","L"}; +lookup(7824) -> {"Lu","L"}; +lookup(7825) -> {"Ll","L"}; +lookup(7826) -> {"Lu","L"}; +lookup(7827) -> {"Ll","L"}; +lookup(7828) -> {"Lu","L"}; +lookup(7829) -> {"Ll","L"}; +lookup(7830) -> {"Ll","L"}; +lookup(7831) -> {"Ll","L"}; +lookup(7832) -> {"Ll","L"}; +lookup(7833) -> {"Ll","L"}; +lookup(7834) -> {"Ll","L"}; +lookup(7835) -> {"Ll","L"}; +lookup(7836) -> {"Ll","L"}; +lookup(7837) -> {"Ll","L"}; +lookup(7838) -> {"Lu","L"}; +lookup(7839) -> {"Ll","L"}; +lookup(7840) -> {"Lu","L"}; +lookup(7841) -> {"Ll","L"}; +lookup(7842) -> {"Lu","L"}; +lookup(7843) -> {"Ll","L"}; +lookup(7844) -> {"Lu","L"}; +lookup(7845) -> {"Ll","L"}; +lookup(7846) -> {"Lu","L"}; +lookup(7847) -> {"Ll","L"}; +lookup(7848) -> {"Lu","L"}; +lookup(7849) -> {"Ll","L"}; +lookup(7850) -> {"Lu","L"}; +lookup(7851) -> {"Ll","L"}; +lookup(7852) -> {"Lu","L"}; +lookup(7853) -> {"Ll","L"}; +lookup(7854) -> {"Lu","L"}; +lookup(7855) -> {"Ll","L"}; +lookup(7856) -> {"Lu","L"}; +lookup(7857) -> {"Ll","L"}; +lookup(7858) -> {"Lu","L"}; +lookup(7859) -> {"Ll","L"}; +lookup(7860) -> {"Lu","L"}; +lookup(7861) -> {"Ll","L"}; +lookup(7862) -> {"Lu","L"}; +lookup(7863) -> {"Ll","L"}; +lookup(7864) -> {"Lu","L"}; +lookup(7865) -> {"Ll","L"}; +lookup(7866) -> {"Lu","L"}; +lookup(7867) -> {"Ll","L"}; +lookup(7868) -> {"Lu","L"}; +lookup(7869) -> {"Ll","L"}; +lookup(7870) -> {"Lu","L"}; +lookup(7871) -> {"Ll","L"}; +lookup(7872) -> {"Lu","L"}; +lookup(7873) -> {"Ll","L"}; +lookup(7874) -> {"Lu","L"}; +lookup(7875) -> {"Ll","L"}; +lookup(7876) -> {"Lu","L"}; +lookup(7877) -> {"Ll","L"}; +lookup(7878) -> {"Lu","L"}; +lookup(7879) -> {"Ll","L"}; +lookup(7880) -> {"Lu","L"}; +lookup(7881) -> {"Ll","L"}; +lookup(7882) -> {"Lu","L"}; +lookup(7883) -> {"Ll","L"}; +lookup(7884) -> {"Lu","L"}; +lookup(7885) -> {"Ll","L"}; +lookup(7886) -> {"Lu","L"}; +lookup(7887) -> {"Ll","L"}; +lookup(7888) -> {"Lu","L"}; +lookup(7889) -> {"Ll","L"}; +lookup(7890) -> {"Lu","L"}; +lookup(7891) -> {"Ll","L"}; +lookup(7892) -> {"Lu","L"}; +lookup(7893) -> {"Ll","L"}; +lookup(7894) -> {"Lu","L"}; +lookup(7895) -> {"Ll","L"}; +lookup(7896) -> {"Lu","L"}; +lookup(7897) -> {"Ll","L"}; +lookup(7898) -> {"Lu","L"}; +lookup(7899) -> {"Ll","L"}; +lookup(7900) -> {"Lu","L"}; +lookup(7901) -> {"Ll","L"}; +lookup(7902) -> {"Lu","L"}; +lookup(7903) -> {"Ll","L"}; +lookup(7904) -> {"Lu","L"}; +lookup(7905) -> {"Ll","L"}; +lookup(7906) -> {"Lu","L"}; +lookup(7907) -> {"Ll","L"}; +lookup(7908) -> {"Lu","L"}; +lookup(7909) -> {"Ll","L"}; +lookup(7910) -> {"Lu","L"}; +lookup(7911) -> {"Ll","L"}; +lookup(7912) -> {"Lu","L"}; +lookup(7913) -> {"Ll","L"}; +lookup(7914) -> {"Lu","L"}; +lookup(7915) -> {"Ll","L"}; +lookup(7916) -> {"Lu","L"}; +lookup(7917) -> {"Ll","L"}; +lookup(7918) -> {"Lu","L"}; +lookup(7919) -> {"Ll","L"}; +lookup(7920) -> {"Lu","L"}; +lookup(7921) -> {"Ll","L"}; +lookup(7922) -> {"Lu","L"}; +lookup(7923) -> {"Ll","L"}; +lookup(7924) -> {"Lu","L"}; +lookup(7925) -> {"Ll","L"}; +lookup(7926) -> {"Lu","L"}; +lookup(7927) -> {"Ll","L"}; +lookup(7928) -> {"Lu","L"}; +lookup(7929) -> {"Ll","L"}; +lookup(7930) -> {"Lu","L"}; +lookup(7931) -> {"Ll","L"}; +lookup(7932) -> {"Lu","L"}; +lookup(7933) -> {"Ll","L"}; +lookup(7934) -> {"Lu","L"}; +lookup(7935) -> {"Ll","L"}; +lookup(7936) -> {"Ll","L"}; +lookup(7937) -> {"Ll","L"}; +lookup(7938) -> {"Ll","L"}; +lookup(7939) -> {"Ll","L"}; +lookup(7940) -> {"Ll","L"}; +lookup(7941) -> {"Ll","L"}; +lookup(7942) -> {"Ll","L"}; +lookup(7943) -> {"Ll","L"}; +lookup(7944) -> {"Lu","L"}; +lookup(7945) -> {"Lu","L"}; +lookup(7946) -> {"Lu","L"}; +lookup(7947) -> {"Lu","L"}; +lookup(7948) -> {"Lu","L"}; +lookup(7949) -> {"Lu","L"}; +lookup(7950) -> {"Lu","L"}; +lookup(7951) -> {"Lu","L"}; +lookup(7952) -> {"Ll","L"}; +lookup(7953) -> {"Ll","L"}; +lookup(7954) -> {"Ll","L"}; +lookup(7955) -> {"Ll","L"}; +lookup(7956) -> {"Ll","L"}; +lookup(7957) -> {"Ll","L"}; +lookup(7960) -> {"Lu","L"}; +lookup(7961) -> {"Lu","L"}; +lookup(7962) -> {"Lu","L"}; +lookup(7963) -> {"Lu","L"}; +lookup(7964) -> {"Lu","L"}; +lookup(7965) -> {"Lu","L"}; +lookup(7968) -> {"Ll","L"}; +lookup(7969) -> {"Ll","L"}; +lookup(7970) -> {"Ll","L"}; +lookup(7971) -> {"Ll","L"}; +lookup(7972) -> {"Ll","L"}; +lookup(7973) -> {"Ll","L"}; +lookup(7974) -> {"Ll","L"}; +lookup(7975) -> {"Ll","L"}; +lookup(7976) -> {"Lu","L"}; +lookup(7977) -> {"Lu","L"}; +lookup(7978) -> {"Lu","L"}; +lookup(7979) -> {"Lu","L"}; +lookup(7980) -> {"Lu","L"}; +lookup(7981) -> {"Lu","L"}; +lookup(7982) -> {"Lu","L"}; +lookup(7983) -> {"Lu","L"}; +lookup(7984) -> {"Ll","L"}; +lookup(7985) -> {"Ll","L"}; +lookup(7986) -> {"Ll","L"}; +lookup(7987) -> {"Ll","L"}; +lookup(7988) -> {"Ll","L"}; +lookup(7989) -> {"Ll","L"}; +lookup(7990) -> {"Ll","L"}; +lookup(7991) -> {"Ll","L"}; +lookup(7992) -> {"Lu","L"}; +lookup(7993) -> {"Lu","L"}; +lookup(7994) -> {"Lu","L"}; +lookup(7995) -> {"Lu","L"}; +lookup(7996) -> {"Lu","L"}; +lookup(7997) -> {"Lu","L"}; +lookup(7998) -> {"Lu","L"}; +lookup(7999) -> {"Lu","L"}; +lookup(8000) -> {"Ll","L"}; +lookup(8001) -> {"Ll","L"}; +lookup(8002) -> {"Ll","L"}; +lookup(8003) -> {"Ll","L"}; +lookup(8004) -> {"Ll","L"}; +lookup(8005) -> {"Ll","L"}; +lookup(8008) -> {"Lu","L"}; +lookup(8009) -> {"Lu","L"}; +lookup(8010) -> {"Lu","L"}; +lookup(8011) -> {"Lu","L"}; +lookup(8012) -> {"Lu","L"}; +lookup(8013) -> {"Lu","L"}; +lookup(8016) -> {"Ll","L"}; +lookup(8017) -> {"Ll","L"}; +lookup(8018) -> {"Ll","L"}; +lookup(8019) -> {"Ll","L"}; +lookup(8020) -> {"Ll","L"}; +lookup(8021) -> {"Ll","L"}; +lookup(8022) -> {"Ll","L"}; +lookup(8023) -> {"Ll","L"}; +lookup(8025) -> {"Lu","L"}; +lookup(8027) -> {"Lu","L"}; +lookup(8029) -> {"Lu","L"}; +lookup(8031) -> {"Lu","L"}; +lookup(8032) -> {"Ll","L"}; +lookup(8033) -> {"Ll","L"}; +lookup(8034) -> {"Ll","L"}; +lookup(8035) -> {"Ll","L"}; +lookup(8036) -> {"Ll","L"}; +lookup(8037) -> {"Ll","L"}; +lookup(8038) -> {"Ll","L"}; +lookup(8039) -> {"Ll","L"}; +lookup(8040) -> {"Lu","L"}; +lookup(8041) -> {"Lu","L"}; +lookup(8042) -> {"Lu","L"}; +lookup(8043) -> {"Lu","L"}; +lookup(8044) -> {"Lu","L"}; +lookup(8045) -> {"Lu","L"}; +lookup(8046) -> {"Lu","L"}; +lookup(8047) -> {"Lu","L"}; +lookup(8048) -> {"Ll","L"}; +lookup(8049) -> {"Ll","L"}; +lookup(8050) -> {"Ll","L"}; +lookup(8051) -> {"Ll","L"}; +lookup(8052) -> {"Ll","L"}; +lookup(8053) -> {"Ll","L"}; +lookup(8054) -> {"Ll","L"}; +lookup(8055) -> {"Ll","L"}; +lookup(8056) -> {"Ll","L"}; +lookup(8057) -> {"Ll","L"}; +lookup(8058) -> {"Ll","L"}; +lookup(8059) -> {"Ll","L"}; +lookup(8060) -> {"Ll","L"}; +lookup(8061) -> {"Ll","L"}; +lookup(8064) -> {"Ll","L"}; +lookup(8065) -> {"Ll","L"}; +lookup(8066) -> {"Ll","L"}; +lookup(8067) -> {"Ll","L"}; +lookup(8068) -> {"Ll","L"}; +lookup(8069) -> {"Ll","L"}; +lookup(8070) -> {"Ll","L"}; +lookup(8071) -> {"Ll","L"}; +lookup(8072) -> {"Lt","L"}; +lookup(8073) -> {"Lt","L"}; +lookup(8074) -> {"Lt","L"}; +lookup(8075) -> {"Lt","L"}; +lookup(8076) -> {"Lt","L"}; +lookup(8077) -> {"Lt","L"}; +lookup(8078) -> {"Lt","L"}; +lookup(8079) -> {"Lt","L"}; +lookup(8080) -> {"Ll","L"}; +lookup(8081) -> {"Ll","L"}; +lookup(8082) -> {"Ll","L"}; +lookup(8083) -> {"Ll","L"}; +lookup(8084) -> {"Ll","L"}; +lookup(8085) -> {"Ll","L"}; +lookup(8086) -> {"Ll","L"}; +lookup(8087) -> {"Ll","L"}; +lookup(8088) -> {"Lt","L"}; +lookup(8089) -> {"Lt","L"}; +lookup(8090) -> {"Lt","L"}; +lookup(8091) -> {"Lt","L"}; +lookup(8092) -> {"Lt","L"}; +lookup(8093) -> {"Lt","L"}; +lookup(8094) -> {"Lt","L"}; +lookup(8095) -> {"Lt","L"}; +lookup(8096) -> {"Ll","L"}; +lookup(8097) -> {"Ll","L"}; +lookup(8098) -> {"Ll","L"}; +lookup(8099) -> {"Ll","L"}; +lookup(8100) -> {"Ll","L"}; +lookup(8101) -> {"Ll","L"}; +lookup(8102) -> {"Ll","L"}; +lookup(8103) -> {"Ll","L"}; +lookup(8104) -> {"Lt","L"}; +lookup(8105) -> {"Lt","L"}; +lookup(8106) -> {"Lt","L"}; +lookup(8107) -> {"Lt","L"}; +lookup(8108) -> {"Lt","L"}; +lookup(8109) -> {"Lt","L"}; +lookup(8110) -> {"Lt","L"}; +lookup(8111) -> {"Lt","L"}; +lookup(8112) -> {"Ll","L"}; +lookup(8113) -> {"Ll","L"}; +lookup(8114) -> {"Ll","L"}; +lookup(8115) -> {"Ll","L"}; +lookup(8116) -> {"Ll","L"}; +lookup(8118) -> {"Ll","L"}; +lookup(8119) -> {"Ll","L"}; +lookup(8120) -> {"Lu","L"}; +lookup(8121) -> {"Lu","L"}; +lookup(8122) -> {"Lu","L"}; +lookup(8123) -> {"Lu","L"}; +lookup(8124) -> {"Lt","L"}; +lookup(8125) -> {"Sk","ON"}; +lookup(8126) -> {"Ll","L"}; +lookup(8127) -> {"Sk","ON"}; +lookup(8128) -> {"Sk","ON"}; +lookup(8129) -> {"Sk","ON"}; +lookup(8130) -> {"Ll","L"}; +lookup(8131) -> {"Ll","L"}; +lookup(8132) -> {"Ll","L"}; +lookup(8134) -> {"Ll","L"}; +lookup(8135) -> {"Ll","L"}; +lookup(8136) -> {"Lu","L"}; +lookup(8137) -> {"Lu","L"}; +lookup(8138) -> {"Lu","L"}; +lookup(8139) -> {"Lu","L"}; +lookup(8140) -> {"Lt","L"}; +lookup(8141) -> {"Sk","ON"}; +lookup(8142) -> {"Sk","ON"}; +lookup(8143) -> {"Sk","ON"}; +lookup(8144) -> {"Ll","L"}; +lookup(8145) -> {"Ll","L"}; +lookup(8146) -> {"Ll","L"}; +lookup(8147) -> {"Ll","L"}; +lookup(8150) -> {"Ll","L"}; +lookup(8151) -> {"Ll","L"}; +lookup(8152) -> {"Lu","L"}; +lookup(8153) -> {"Lu","L"}; +lookup(8154) -> {"Lu","L"}; +lookup(8155) -> {"Lu","L"}; +lookup(8157) -> {"Sk","ON"}; +lookup(8158) -> {"Sk","ON"}; +lookup(8159) -> {"Sk","ON"}; +lookup(8160) -> {"Ll","L"}; +lookup(8161) -> {"Ll","L"}; +lookup(8162) -> {"Ll","L"}; +lookup(8163) -> {"Ll","L"}; +lookup(8164) -> {"Ll","L"}; +lookup(8165) -> {"Ll","L"}; +lookup(8166) -> {"Ll","L"}; +lookup(8167) -> {"Ll","L"}; +lookup(8168) -> {"Lu","L"}; +lookup(8169) -> {"Lu","L"}; +lookup(8170) -> {"Lu","L"}; +lookup(8171) -> {"Lu","L"}; +lookup(8172) -> {"Lu","L"}; +lookup(8173) -> {"Sk","ON"}; +lookup(8174) -> {"Sk","ON"}; +lookup(8175) -> {"Sk","ON"}; +lookup(8178) -> {"Ll","L"}; +lookup(8179) -> {"Ll","L"}; +lookup(8180) -> {"Ll","L"}; +lookup(8182) -> {"Ll","L"}; +lookup(8183) -> {"Ll","L"}; +lookup(8184) -> {"Lu","L"}; +lookup(8185) -> {"Lu","L"}; +lookup(8186) -> {"Lu","L"}; +lookup(8187) -> {"Lu","L"}; +lookup(8188) -> {"Lt","L"}; +lookup(8189) -> {"Sk","ON"}; +lookup(8190) -> {"Sk","ON"}; +lookup(8192) -> {"Zs","WS"}; +lookup(8193) -> {"Zs","WS"}; +lookup(8194) -> {"Zs","WS"}; +lookup(8195) -> {"Zs","WS"}; +lookup(8196) -> {"Zs","WS"}; +lookup(8197) -> {"Zs","WS"}; +lookup(8198) -> {"Zs","WS"}; +lookup(8199) -> {"Zs","WS"}; +lookup(8200) -> {"Zs","WS"}; +lookup(8201) -> {"Zs","WS"}; +lookup(8202) -> {"Zs","WS"}; +lookup(8203) -> {"Cf","BN"}; +lookup(8204) -> {"Cf","BN"}; +lookup(8205) -> {"Cf","BN"}; +lookup(8206) -> {"Cf","L"}; +lookup(8207) -> {"Cf","R"}; +lookup(8208) -> {"Pd","ON"}; +lookup(8209) -> {"Pd","ON"}; +lookup(8210) -> {"Pd","ON"}; +lookup(8211) -> {"Pd","ON"}; +lookup(8212) -> {"Pd","ON"}; +lookup(8213) -> {"Pd","ON"}; +lookup(8214) -> {"Po","ON"}; +lookup(8215) -> {"Po","ON"}; +lookup(8216) -> {"Pi","ON"}; +lookup(8217) -> {"Pf","ON"}; +lookup(8218) -> {"Ps","ON"}; +lookup(8219) -> {"Pi","ON"}; +lookup(8220) -> {"Pi","ON"}; +lookup(8221) -> {"Pf","ON"}; +lookup(8222) -> {"Ps","ON"}; +lookup(8223) -> {"Pi","ON"}; +lookup(8224) -> {"Po","ON"}; +lookup(8225) -> {"Po","ON"}; +lookup(8226) -> {"Po","ON"}; +lookup(8227) -> {"Po","ON"}; +lookup(8228) -> {"Po","ON"}; +lookup(8229) -> {"Po","ON"}; +lookup(8230) -> {"Po","ON"}; +lookup(8231) -> {"Po","ON"}; +lookup(8232) -> {"Zl","WS"}; +lookup(8233) -> {"Zp","B"}; +lookup(8234) -> {"Cf","LRE"}; +lookup(8235) -> {"Cf","RLE"}; +lookup(8236) -> {"Cf","PDF"}; +lookup(8237) -> {"Cf","LRO"}; +lookup(8238) -> {"Cf","RLO"}; +lookup(8239) -> {"Zs","CS"}; +lookup(8240) -> {"Po","ET"}; +lookup(8241) -> {"Po","ET"}; +lookup(8242) -> {"Po","ET"}; +lookup(8243) -> {"Po","ET"}; +lookup(8244) -> {"Po","ET"}; +lookup(8245) -> {"Po","ON"}; +lookup(8246) -> {"Po","ON"}; +lookup(8247) -> {"Po","ON"}; +lookup(8248) -> {"Po","ON"}; +lookup(8249) -> {"Pi","ON"}; +lookup(8250) -> {"Pf","ON"}; +lookup(8251) -> {"Po","ON"}; +lookup(8252) -> {"Po","ON"}; +lookup(8253) -> {"Po","ON"}; +lookup(8254) -> {"Po","ON"}; +lookup(8255) -> {"Pc","ON"}; +lookup(8256) -> {"Pc","ON"}; +lookup(8257) -> {"Po","ON"}; +lookup(8258) -> {"Po","ON"}; +lookup(8259) -> {"Po","ON"}; +lookup(8260) -> {"Sm","CS"}; +lookup(8261) -> {"Ps","ON"}; +lookup(8262) -> {"Pe","ON"}; +lookup(8263) -> {"Po","ON"}; +lookup(8264) -> {"Po","ON"}; +lookup(8265) -> {"Po","ON"}; +lookup(8266) -> {"Po","ON"}; +lookup(8267) -> {"Po","ON"}; +lookup(8268) -> {"Po","ON"}; +lookup(8269) -> {"Po","ON"}; +lookup(8270) -> {"Po","ON"}; +lookup(8271) -> {"Po","ON"}; +lookup(8272) -> {"Po","ON"}; +lookup(8273) -> {"Po","ON"}; +lookup(8274) -> {"Sm","ON"}; +lookup(8275) -> {"Po","ON"}; +lookup(8276) -> {"Pc","ON"}; +lookup(8277) -> {"Po","ON"}; +lookup(8278) -> {"Po","ON"}; +lookup(8279) -> {"Po","ON"}; +lookup(8280) -> {"Po","ON"}; +lookup(8281) -> {"Po","ON"}; +lookup(8282) -> {"Po","ON"}; +lookup(8283) -> {"Po","ON"}; +lookup(8284) -> {"Po","ON"}; +lookup(8285) -> {"Po","ON"}; +lookup(8286) -> {"Po","ON"}; +lookup(8287) -> {"Zs","WS"}; +lookup(8288) -> {"Cf","BN"}; +lookup(8289) -> {"Cf","BN"}; +lookup(8290) -> {"Cf","BN"}; +lookup(8291) -> {"Cf","BN"}; +lookup(8292) -> {"Cf","BN"}; +lookup(8294) -> {"Cf","LRI"}; +lookup(8295) -> {"Cf","RLI"}; +lookup(8296) -> {"Cf","FSI"}; +lookup(8297) -> {"Cf","PDI"}; +lookup(8298) -> {"Cf","BN"}; +lookup(8299) -> {"Cf","BN"}; +lookup(8300) -> {"Cf","BN"}; +lookup(8301) -> {"Cf","BN"}; +lookup(8302) -> {"Cf","BN"}; +lookup(8303) -> {"Cf","BN"}; +lookup(8304) -> {"No","EN"}; +lookup(8305) -> {"Lm","L"}; +lookup(8308) -> {"No","EN"}; +lookup(8309) -> {"No","EN"}; +lookup(8310) -> {"No","EN"}; +lookup(8311) -> {"No","EN"}; +lookup(8312) -> {"No","EN"}; +lookup(8313) -> {"No","EN"}; +lookup(8314) -> {"Sm","ES"}; +lookup(8315) -> {"Sm","ES"}; +lookup(8316) -> {"Sm","ON"}; +lookup(8317) -> {"Ps","ON"}; +lookup(8318) -> {"Pe","ON"}; +lookup(8319) -> {"Lm","L"}; +lookup(8320) -> {"No","EN"}; +lookup(8321) -> {"No","EN"}; +lookup(8322) -> {"No","EN"}; +lookup(8323) -> {"No","EN"}; +lookup(8324) -> {"No","EN"}; +lookup(8325) -> {"No","EN"}; +lookup(8326) -> {"No","EN"}; +lookup(8327) -> {"No","EN"}; +lookup(8328) -> {"No","EN"}; +lookup(8329) -> {"No","EN"}; +lookup(8330) -> {"Sm","ES"}; +lookup(8331) -> {"Sm","ES"}; +lookup(8332) -> {"Sm","ON"}; +lookup(8333) -> {"Ps","ON"}; +lookup(8334) -> {"Pe","ON"}; +lookup(8336) -> {"Lm","L"}; +lookup(8337) -> {"Lm","L"}; +lookup(8338) -> {"Lm","L"}; +lookup(8339) -> {"Lm","L"}; +lookup(8340) -> {"Lm","L"}; +lookup(8341) -> {"Lm","L"}; +lookup(8342) -> {"Lm","L"}; +lookup(8343) -> {"Lm","L"}; +lookup(8344) -> {"Lm","L"}; +lookup(8345) -> {"Lm","L"}; +lookup(8346) -> {"Lm","L"}; +lookup(8347) -> {"Lm","L"}; +lookup(8348) -> {"Lm","L"}; +lookup(8352) -> {"Sc","ET"}; +lookup(8353) -> {"Sc","ET"}; +lookup(8354) -> {"Sc","ET"}; +lookup(8355) -> {"Sc","ET"}; +lookup(8356) -> {"Sc","ET"}; +lookup(8357) -> {"Sc","ET"}; +lookup(8358) -> {"Sc","ET"}; +lookup(8359) -> {"Sc","ET"}; +lookup(8360) -> {"Sc","ET"}; +lookup(8361) -> {"Sc","ET"}; +lookup(8362) -> {"Sc","ET"}; +lookup(8363) -> {"Sc","ET"}; +lookup(8364) -> {"Sc","ET"}; +lookup(8365) -> {"Sc","ET"}; +lookup(8366) -> {"Sc","ET"}; +lookup(8367) -> {"Sc","ET"}; +lookup(8368) -> {"Sc","ET"}; +lookup(8369) -> {"Sc","ET"}; +lookup(8370) -> {"Sc","ET"}; +lookup(8371) -> {"Sc","ET"}; +lookup(8372) -> {"Sc","ET"}; +lookup(8373) -> {"Sc","ET"}; +lookup(8374) -> {"Sc","ET"}; +lookup(8375) -> {"Sc","ET"}; +lookup(8376) -> {"Sc","ET"}; +lookup(8377) -> {"Sc","ET"}; +lookup(8378) -> {"Sc","ET"}; +lookup(8379) -> {"Sc","ET"}; +lookup(8380) -> {"Sc","ET"}; +lookup(8381) -> {"Sc","ET"}; +lookup(8382) -> {"Sc","ET"}; +lookup(8383) -> {"Sc","ET"}; +lookup(8400) -> {"Mn","NSM"}; +lookup(8401) -> {"Mn","NSM"}; +lookup(8402) -> {"Mn","NSM"}; +lookup(8403) -> {"Mn","NSM"}; +lookup(8404) -> {"Mn","NSM"}; +lookup(8405) -> {"Mn","NSM"}; +lookup(8406) -> {"Mn","NSM"}; +lookup(8407) -> {"Mn","NSM"}; +lookup(8408) -> {"Mn","NSM"}; +lookup(8409) -> {"Mn","NSM"}; +lookup(8410) -> {"Mn","NSM"}; +lookup(8411) -> {"Mn","NSM"}; +lookup(8412) -> {"Mn","NSM"}; +lookup(8413) -> {"Me","NSM"}; +lookup(8414) -> {"Me","NSM"}; +lookup(8415) -> {"Me","NSM"}; +lookup(8416) -> {"Me","NSM"}; +lookup(8417) -> {"Mn","NSM"}; +lookup(8418) -> {"Me","NSM"}; +lookup(8419) -> {"Me","NSM"}; +lookup(8420) -> {"Me","NSM"}; +lookup(8421) -> {"Mn","NSM"}; +lookup(8422) -> {"Mn","NSM"}; +lookup(8423) -> {"Mn","NSM"}; +lookup(8424) -> {"Mn","NSM"}; +lookup(8425) -> {"Mn","NSM"}; +lookup(8426) -> {"Mn","NSM"}; +lookup(8427) -> {"Mn","NSM"}; +lookup(8428) -> {"Mn","NSM"}; +lookup(8429) -> {"Mn","NSM"}; +lookup(8430) -> {"Mn","NSM"}; +lookup(8431) -> {"Mn","NSM"}; +lookup(8432) -> {"Mn","NSM"}; +lookup(8448) -> {"So","ON"}; +lookup(8449) -> {"So","ON"}; +lookup(8450) -> {"Lu","L"}; +lookup(8451) -> {"So","ON"}; +lookup(8452) -> {"So","ON"}; +lookup(8453) -> {"So","ON"}; +lookup(8454) -> {"So","ON"}; +lookup(8455) -> {"Lu","L"}; +lookup(8456) -> {"So","ON"}; +lookup(8457) -> {"So","ON"}; +lookup(8458) -> {"Ll","L"}; +lookup(8459) -> {"Lu","L"}; +lookup(8460) -> {"Lu","L"}; +lookup(8461) -> {"Lu","L"}; +lookup(8462) -> {"Ll","L"}; +lookup(8463) -> {"Ll","L"}; +lookup(8464) -> {"Lu","L"}; +lookup(8465) -> {"Lu","L"}; +lookup(8466) -> {"Lu","L"}; +lookup(8467) -> {"Ll","L"}; +lookup(8468) -> {"So","ON"}; +lookup(8469) -> {"Lu","L"}; +lookup(8470) -> {"So","ON"}; +lookup(8471) -> {"So","ON"}; +lookup(8472) -> {"Sm","ON"}; +lookup(8473) -> {"Lu","L"}; +lookup(8474) -> {"Lu","L"}; +lookup(8475) -> {"Lu","L"}; +lookup(8476) -> {"Lu","L"}; +lookup(8477) -> {"Lu","L"}; +lookup(8478) -> {"So","ON"}; +lookup(8479) -> {"So","ON"}; +lookup(8480) -> {"So","ON"}; +lookup(8481) -> {"So","ON"}; +lookup(8482) -> {"So","ON"}; +lookup(8483) -> {"So","ON"}; +lookup(8484) -> {"Lu","L"}; +lookup(8485) -> {"So","ON"}; +lookup(8486) -> {"Lu","L"}; +lookup(8487) -> {"So","ON"}; +lookup(8488) -> {"Lu","L"}; +lookup(8489) -> {"So","ON"}; +lookup(8490) -> {"Lu","L"}; +lookup(8491) -> {"Lu","L"}; +lookup(8492) -> {"Lu","L"}; +lookup(8493) -> {"Lu","L"}; +lookup(8494) -> {"So","ET"}; +lookup(8495) -> {"Ll","L"}; +lookup(8496) -> {"Lu","L"}; +lookup(8497) -> {"Lu","L"}; +lookup(8498) -> {"Lu","L"}; +lookup(8499) -> {"Lu","L"}; +lookup(8500) -> {"Ll","L"}; +lookup(8501) -> {"Lo","L"}; +lookup(8502) -> {"Lo","L"}; +lookup(8503) -> {"Lo","L"}; +lookup(8504) -> {"Lo","L"}; +lookup(8505) -> {"Ll","L"}; +lookup(8506) -> {"So","ON"}; +lookup(8507) -> {"So","ON"}; +lookup(8508) -> {"Ll","L"}; +lookup(8509) -> {"Ll","L"}; +lookup(8510) -> {"Lu","L"}; +lookup(8511) -> {"Lu","L"}; +lookup(8512) -> {"Sm","ON"}; +lookup(8513) -> {"Sm","ON"}; +lookup(8514) -> {"Sm","ON"}; +lookup(8515) -> {"Sm","ON"}; +lookup(8516) -> {"Sm","ON"}; +lookup(8517) -> {"Lu","L"}; +lookup(8518) -> {"Ll","L"}; +lookup(8519) -> {"Ll","L"}; +lookup(8520) -> {"Ll","L"}; +lookup(8521) -> {"Ll","L"}; +lookup(8522) -> {"So","ON"}; +lookup(8523) -> {"Sm","ON"}; +lookup(8524) -> {"So","ON"}; +lookup(8525) -> {"So","ON"}; +lookup(8526) -> {"Ll","L"}; +lookup(8527) -> {"So","L"}; +lookup(8528) -> {"No","ON"}; +lookup(8529) -> {"No","ON"}; +lookup(8530) -> {"No","ON"}; +lookup(8531) -> {"No","ON"}; +lookup(8532) -> {"No","ON"}; +lookup(8533) -> {"No","ON"}; +lookup(8534) -> {"No","ON"}; +lookup(8535) -> {"No","ON"}; +lookup(8536) -> {"No","ON"}; +lookup(8537) -> {"No","ON"}; +lookup(8538) -> {"No","ON"}; +lookup(8539) -> {"No","ON"}; +lookup(8540) -> {"No","ON"}; +lookup(8541) -> {"No","ON"}; +lookup(8542) -> {"No","ON"}; +lookup(8543) -> {"No","ON"}; +lookup(8544) -> {"Nl","L"}; +lookup(8545) -> {"Nl","L"}; +lookup(8546) -> {"Nl","L"}; +lookup(8547) -> {"Nl","L"}; +lookup(8548) -> {"Nl","L"}; +lookup(8549) -> {"Nl","L"}; +lookup(8550) -> {"Nl","L"}; +lookup(8551) -> {"Nl","L"}; +lookup(8552) -> {"Nl","L"}; +lookup(8553) -> {"Nl","L"}; +lookup(8554) -> {"Nl","L"}; +lookup(8555) -> {"Nl","L"}; +lookup(8556) -> {"Nl","L"}; +lookup(8557) -> {"Nl","L"}; +lookup(8558) -> {"Nl","L"}; +lookup(8559) -> {"Nl","L"}; +lookup(8560) -> {"Nl","L"}; +lookup(8561) -> {"Nl","L"}; +lookup(8562) -> {"Nl","L"}; +lookup(8563) -> {"Nl","L"}; +lookup(8564) -> {"Nl","L"}; +lookup(8565) -> {"Nl","L"}; +lookup(8566) -> {"Nl","L"}; +lookup(8567) -> {"Nl","L"}; +lookup(8568) -> {"Nl","L"}; +lookup(8569) -> {"Nl","L"}; +lookup(8570) -> {"Nl","L"}; +lookup(8571) -> {"Nl","L"}; +lookup(8572) -> {"Nl","L"}; +lookup(8573) -> {"Nl","L"}; +lookup(8574) -> {"Nl","L"}; +lookup(8575) -> {"Nl","L"}; +lookup(8576) -> {"Nl","L"}; +lookup(8577) -> {"Nl","L"}; +lookup(8578) -> {"Nl","L"}; +lookup(8579) -> {"Lu","L"}; +lookup(8580) -> {"Ll","L"}; +lookup(8581) -> {"Nl","L"}; +lookup(8582) -> {"Nl","L"}; +lookup(8583) -> {"Nl","L"}; +lookup(8584) -> {"Nl","L"}; +lookup(8585) -> {"No","ON"}; +lookup(8586) -> {"So","ON"}; +lookup(8587) -> {"So","ON"}; +lookup(8592) -> {"Sm","ON"}; +lookup(8593) -> {"Sm","ON"}; +lookup(8594) -> {"Sm","ON"}; +lookup(8595) -> {"Sm","ON"}; +lookup(8596) -> {"Sm","ON"}; +lookup(8597) -> {"So","ON"}; +lookup(8598) -> {"So","ON"}; +lookup(8599) -> {"So","ON"}; +lookup(8600) -> {"So","ON"}; +lookup(8601) -> {"So","ON"}; +lookup(8602) -> {"Sm","ON"}; +lookup(8603) -> {"Sm","ON"}; +lookup(8604) -> {"So","ON"}; +lookup(8605) -> {"So","ON"}; +lookup(8606) -> {"So","ON"}; +lookup(8607) -> {"So","ON"}; +lookup(8608) -> {"Sm","ON"}; +lookup(8609) -> {"So","ON"}; +lookup(8610) -> {"So","ON"}; +lookup(8611) -> {"Sm","ON"}; +lookup(8612) -> {"So","ON"}; +lookup(8613) -> {"So","ON"}; +lookup(8614) -> {"Sm","ON"}; +lookup(8615) -> {"So","ON"}; +lookup(8616) -> {"So","ON"}; +lookup(8617) -> {"So","ON"}; +lookup(8618) -> {"So","ON"}; +lookup(8619) -> {"So","ON"}; +lookup(8620) -> {"So","ON"}; +lookup(8621) -> {"So","ON"}; +lookup(8622) -> {"Sm","ON"}; +lookup(8623) -> {"So","ON"}; +lookup(8624) -> {"So","ON"}; +lookup(8625) -> {"So","ON"}; +lookup(8626) -> {"So","ON"}; +lookup(8627) -> {"So","ON"}; +lookup(8628) -> {"So","ON"}; +lookup(8629) -> {"So","ON"}; +lookup(8630) -> {"So","ON"}; +lookup(8631) -> {"So","ON"}; +lookup(8632) -> {"So","ON"}; +lookup(8633) -> {"So","ON"}; +lookup(8634) -> {"So","ON"}; +lookup(8635) -> {"So","ON"}; +lookup(8636) -> {"So","ON"}; +lookup(8637) -> {"So","ON"}; +lookup(8638) -> {"So","ON"}; +lookup(8639) -> {"So","ON"}; +lookup(8640) -> {"So","ON"}; +lookup(8641) -> {"So","ON"}; +lookup(8642) -> {"So","ON"}; +lookup(8643) -> {"So","ON"}; +lookup(8644) -> {"So","ON"}; +lookup(8645) -> {"So","ON"}; +lookup(8646) -> {"So","ON"}; +lookup(8647) -> {"So","ON"}; +lookup(8648) -> {"So","ON"}; +lookup(8649) -> {"So","ON"}; +lookup(8650) -> {"So","ON"}; +lookup(8651) -> {"So","ON"}; +lookup(8652) -> {"So","ON"}; +lookup(8653) -> {"So","ON"}; +lookup(8654) -> {"Sm","ON"}; +lookup(8655) -> {"Sm","ON"}; +lookup(8656) -> {"So","ON"}; +lookup(8657) -> {"So","ON"}; +lookup(8658) -> {"Sm","ON"}; +lookup(8659) -> {"So","ON"}; +lookup(8660) -> {"Sm","ON"}; +lookup(8661) -> {"So","ON"}; +lookup(8662) -> {"So","ON"}; +lookup(8663) -> {"So","ON"}; +lookup(8664) -> {"So","ON"}; +lookup(8665) -> {"So","ON"}; +lookup(8666) -> {"So","ON"}; +lookup(8667) -> {"So","ON"}; +lookup(8668) -> {"So","ON"}; +lookup(8669) -> {"So","ON"}; +lookup(8670) -> {"So","ON"}; +lookup(8671) -> {"So","ON"}; +lookup(8672) -> {"So","ON"}; +lookup(8673) -> {"So","ON"}; +lookup(8674) -> {"So","ON"}; +lookup(8675) -> {"So","ON"}; +lookup(8676) -> {"So","ON"}; +lookup(8677) -> {"So","ON"}; +lookup(8678) -> {"So","ON"}; +lookup(8679) -> {"So","ON"}; +lookup(8680) -> {"So","ON"}; +lookup(8681) -> {"So","ON"}; +lookup(8682) -> {"So","ON"}; +lookup(8683) -> {"So","ON"}; +lookup(8684) -> {"So","ON"}; +lookup(8685) -> {"So","ON"}; +lookup(8686) -> {"So","ON"}; +lookup(8687) -> {"So","ON"}; +lookup(8688) -> {"So","ON"}; +lookup(8689) -> {"So","ON"}; +lookup(8690) -> {"So","ON"}; +lookup(8691) -> {"So","ON"}; +lookup(8692) -> {"Sm","ON"}; +lookup(8693) -> {"Sm","ON"}; +lookup(8694) -> {"Sm","ON"}; +lookup(8695) -> {"Sm","ON"}; +lookup(8696) -> {"Sm","ON"}; +lookup(8697) -> {"Sm","ON"}; +lookup(8698) -> {"Sm","ON"}; +lookup(8699) -> {"Sm","ON"}; +lookup(8700) -> {"Sm","ON"}; +lookup(8701) -> {"Sm","ON"}; +lookup(8702) -> {"Sm","ON"}; +lookup(8703) -> {"Sm","ON"}; +lookup(8704) -> {"Sm","ON"}; +lookup(8705) -> {"Sm","ON"}; +lookup(8706) -> {"Sm","ON"}; +lookup(8707) -> {"Sm","ON"}; +lookup(8708) -> {"Sm","ON"}; +lookup(8709) -> {"Sm","ON"}; +lookup(8710) -> {"Sm","ON"}; +lookup(8711) -> {"Sm","ON"}; +lookup(8712) -> {"Sm","ON"}; +lookup(8713) -> {"Sm","ON"}; +lookup(8714) -> {"Sm","ON"}; +lookup(8715) -> {"Sm","ON"}; +lookup(8716) -> {"Sm","ON"}; +lookup(8717) -> {"Sm","ON"}; +lookup(8718) -> {"Sm","ON"}; +lookup(8719) -> {"Sm","ON"}; +lookup(8720) -> {"Sm","ON"}; +lookup(8721) -> {"Sm","ON"}; +lookup(8722) -> {"Sm","ES"}; +lookup(8723) -> {"Sm","ET"}; +lookup(8724) -> {"Sm","ON"}; +lookup(8725) -> {"Sm","ON"}; +lookup(8726) -> {"Sm","ON"}; +lookup(8727) -> {"Sm","ON"}; +lookup(8728) -> {"Sm","ON"}; +lookup(8729) -> {"Sm","ON"}; +lookup(8730) -> {"Sm","ON"}; +lookup(8731) -> {"Sm","ON"}; +lookup(8732) -> {"Sm","ON"}; +lookup(8733) -> {"Sm","ON"}; +lookup(8734) -> {"Sm","ON"}; +lookup(8735) -> {"Sm","ON"}; +lookup(8736) -> {"Sm","ON"}; +lookup(8737) -> {"Sm","ON"}; +lookup(8738) -> {"Sm","ON"}; +lookup(8739) -> {"Sm","ON"}; +lookup(8740) -> {"Sm","ON"}; +lookup(8741) -> {"Sm","ON"}; +lookup(8742) -> {"Sm","ON"}; +lookup(8743) -> {"Sm","ON"}; +lookup(8744) -> {"Sm","ON"}; +lookup(8745) -> {"Sm","ON"}; +lookup(8746) -> {"Sm","ON"}; +lookup(8747) -> {"Sm","ON"}; +lookup(8748) -> {"Sm","ON"}; +lookup(8749) -> {"Sm","ON"}; +lookup(8750) -> {"Sm","ON"}; +lookup(8751) -> {"Sm","ON"}; +lookup(8752) -> {"Sm","ON"}; +lookup(8753) -> {"Sm","ON"}; +lookup(8754) -> {"Sm","ON"}; +lookup(8755) -> {"Sm","ON"}; +lookup(8756) -> {"Sm","ON"}; +lookup(8757) -> {"Sm","ON"}; +lookup(8758) -> {"Sm","ON"}; +lookup(8759) -> {"Sm","ON"}; +lookup(8760) -> {"Sm","ON"}; +lookup(8761) -> {"Sm","ON"}; +lookup(8762) -> {"Sm","ON"}; +lookup(8763) -> {"Sm","ON"}; +lookup(8764) -> {"Sm","ON"}; +lookup(8765) -> {"Sm","ON"}; +lookup(8766) -> {"Sm","ON"}; +lookup(8767) -> {"Sm","ON"}; +lookup(8768) -> {"Sm","ON"}; +lookup(8769) -> {"Sm","ON"}; +lookup(8770) -> {"Sm","ON"}; +lookup(8771) -> {"Sm","ON"}; +lookup(8772) -> {"Sm","ON"}; +lookup(8773) -> {"Sm","ON"}; +lookup(8774) -> {"Sm","ON"}; +lookup(8775) -> {"Sm","ON"}; +lookup(8776) -> {"Sm","ON"}; +lookup(8777) -> {"Sm","ON"}; +lookup(8778) -> {"Sm","ON"}; +lookup(8779) -> {"Sm","ON"}; +lookup(8780) -> {"Sm","ON"}; +lookup(8781) -> {"Sm","ON"}; +lookup(8782) -> {"Sm","ON"}; +lookup(8783) -> {"Sm","ON"}; +lookup(8784) -> {"Sm","ON"}; +lookup(8785) -> {"Sm","ON"}; +lookup(8786) -> {"Sm","ON"}; +lookup(8787) -> {"Sm","ON"}; +lookup(8788) -> {"Sm","ON"}; +lookup(8789) -> {"Sm","ON"}; +lookup(8790) -> {"Sm","ON"}; +lookup(8791) -> {"Sm","ON"}; +lookup(8792) -> {"Sm","ON"}; +lookup(8793) -> {"Sm","ON"}; +lookup(8794) -> {"Sm","ON"}; +lookup(8795) -> {"Sm","ON"}; +lookup(8796) -> {"Sm","ON"}; +lookup(8797) -> {"Sm","ON"}; +lookup(8798) -> {"Sm","ON"}; +lookup(8799) -> {"Sm","ON"}; +lookup(8800) -> {"Sm","ON"}; +lookup(8801) -> {"Sm","ON"}; +lookup(8802) -> {"Sm","ON"}; +lookup(8803) -> {"Sm","ON"}; +lookup(8804) -> {"Sm","ON"}; +lookup(8805) -> {"Sm","ON"}; +lookup(8806) -> {"Sm","ON"}; +lookup(8807) -> {"Sm","ON"}; +lookup(8808) -> {"Sm","ON"}; +lookup(8809) -> {"Sm","ON"}; +lookup(8810) -> {"Sm","ON"}; +lookup(8811) -> {"Sm","ON"}; +lookup(8812) -> {"Sm","ON"}; +lookup(8813) -> {"Sm","ON"}; +lookup(8814) -> {"Sm","ON"}; +lookup(8815) -> {"Sm","ON"}; +lookup(8816) -> {"Sm","ON"}; +lookup(8817) -> {"Sm","ON"}; +lookup(8818) -> {"Sm","ON"}; +lookup(8819) -> {"Sm","ON"}; +lookup(8820) -> {"Sm","ON"}; +lookup(8821) -> {"Sm","ON"}; +lookup(8822) -> {"Sm","ON"}; +lookup(8823) -> {"Sm","ON"}; +lookup(8824) -> {"Sm","ON"}; +lookup(8825) -> {"Sm","ON"}; +lookup(8826) -> {"Sm","ON"}; +lookup(8827) -> {"Sm","ON"}; +lookup(8828) -> {"Sm","ON"}; +lookup(8829) -> {"Sm","ON"}; +lookup(8830) -> {"Sm","ON"}; +lookup(8831) -> {"Sm","ON"}; +lookup(8832) -> {"Sm","ON"}; +lookup(8833) -> {"Sm","ON"}; +lookup(8834) -> {"Sm","ON"}; +lookup(8835) -> {"Sm","ON"}; +lookup(8836) -> {"Sm","ON"}; +lookup(8837) -> {"Sm","ON"}; +lookup(8838) -> {"Sm","ON"}; +lookup(8839) -> {"Sm","ON"}; +lookup(8840) -> {"Sm","ON"}; +lookup(8841) -> {"Sm","ON"}; +lookup(8842) -> {"Sm","ON"}; +lookup(8843) -> {"Sm","ON"}; +lookup(8844) -> {"Sm","ON"}; +lookup(8845) -> {"Sm","ON"}; +lookup(8846) -> {"Sm","ON"}; +lookup(8847) -> {"Sm","ON"}; +lookup(8848) -> {"Sm","ON"}; +lookup(8849) -> {"Sm","ON"}; +lookup(8850) -> {"Sm","ON"}; +lookup(8851) -> {"Sm","ON"}; +lookup(8852) -> {"Sm","ON"}; +lookup(8853) -> {"Sm","ON"}; +lookup(8854) -> {"Sm","ON"}; +lookup(8855) -> {"Sm","ON"}; +lookup(8856) -> {"Sm","ON"}; +lookup(8857) -> {"Sm","ON"}; +lookup(8858) -> {"Sm","ON"}; +lookup(8859) -> {"Sm","ON"}; +lookup(8860) -> {"Sm","ON"}; +lookup(8861) -> {"Sm","ON"}; +lookup(8862) -> {"Sm","ON"}; +lookup(8863) -> {"Sm","ON"}; +lookup(8864) -> {"Sm","ON"}; +lookup(8865) -> {"Sm","ON"}; +lookup(8866) -> {"Sm","ON"}; +lookup(8867) -> {"Sm","ON"}; +lookup(8868) -> {"Sm","ON"}; +lookup(8869) -> {"Sm","ON"}; +lookup(8870) -> {"Sm","ON"}; +lookup(8871) -> {"Sm","ON"}; +lookup(8872) -> {"Sm","ON"}; +lookup(8873) -> {"Sm","ON"}; +lookup(8874) -> {"Sm","ON"}; +lookup(8875) -> {"Sm","ON"}; +lookup(8876) -> {"Sm","ON"}; +lookup(8877) -> {"Sm","ON"}; +lookup(8878) -> {"Sm","ON"}; +lookup(8879) -> {"Sm","ON"}; +lookup(8880) -> {"Sm","ON"}; +lookup(8881) -> {"Sm","ON"}; +lookup(8882) -> {"Sm","ON"}; +lookup(8883) -> {"Sm","ON"}; +lookup(8884) -> {"Sm","ON"}; +lookup(8885) -> {"Sm","ON"}; +lookup(8886) -> {"Sm","ON"}; +lookup(8887) -> {"Sm","ON"}; +lookup(8888) -> {"Sm","ON"}; +lookup(8889) -> {"Sm","ON"}; +lookup(8890) -> {"Sm","ON"}; +lookup(8891) -> {"Sm","ON"}; +lookup(8892) -> {"Sm","ON"}; +lookup(8893) -> {"Sm","ON"}; +lookup(8894) -> {"Sm","ON"}; +lookup(8895) -> {"Sm","ON"}; +lookup(8896) -> {"Sm","ON"}; +lookup(8897) -> {"Sm","ON"}; +lookup(8898) -> {"Sm","ON"}; +lookup(8899) -> {"Sm","ON"}; +lookup(8900) -> {"Sm","ON"}; +lookup(8901) -> {"Sm","ON"}; +lookup(8902) -> {"Sm","ON"}; +lookup(8903) -> {"Sm","ON"}; +lookup(8904) -> {"Sm","ON"}; +lookup(8905) -> {"Sm","ON"}; +lookup(8906) -> {"Sm","ON"}; +lookup(8907) -> {"Sm","ON"}; +lookup(8908) -> {"Sm","ON"}; +lookup(8909) -> {"Sm","ON"}; +lookup(8910) -> {"Sm","ON"}; +lookup(8911) -> {"Sm","ON"}; +lookup(8912) -> {"Sm","ON"}; +lookup(8913) -> {"Sm","ON"}; +lookup(8914) -> {"Sm","ON"}; +lookup(8915) -> {"Sm","ON"}; +lookup(8916) -> {"Sm","ON"}; +lookup(8917) -> {"Sm","ON"}; +lookup(8918) -> {"Sm","ON"}; +lookup(8919) -> {"Sm","ON"}; +lookup(8920) -> {"Sm","ON"}; +lookup(8921) -> {"Sm","ON"}; +lookup(8922) -> {"Sm","ON"}; +lookup(8923) -> {"Sm","ON"}; +lookup(8924) -> {"Sm","ON"}; +lookup(8925) -> {"Sm","ON"}; +lookup(8926) -> {"Sm","ON"}; +lookup(8927) -> {"Sm","ON"}; +lookup(8928) -> {"Sm","ON"}; +lookup(8929) -> {"Sm","ON"}; +lookup(8930) -> {"Sm","ON"}; +lookup(8931) -> {"Sm","ON"}; +lookup(8932) -> {"Sm","ON"}; +lookup(8933) -> {"Sm","ON"}; +lookup(8934) -> {"Sm","ON"}; +lookup(8935) -> {"Sm","ON"}; +lookup(8936) -> {"Sm","ON"}; +lookup(8937) -> {"Sm","ON"}; +lookup(8938) -> {"Sm","ON"}; +lookup(8939) -> {"Sm","ON"}; +lookup(8940) -> {"Sm","ON"}; +lookup(8941) -> {"Sm","ON"}; +lookup(8942) -> {"Sm","ON"}; +lookup(8943) -> {"Sm","ON"}; +lookup(8944) -> {"Sm","ON"}; +lookup(8945) -> {"Sm","ON"}; +lookup(8946) -> {"Sm","ON"}; +lookup(8947) -> {"Sm","ON"}; +lookup(8948) -> {"Sm","ON"}; +lookup(8949) -> {"Sm","ON"}; +lookup(8950) -> {"Sm","ON"}; +lookup(8951) -> {"Sm","ON"}; +lookup(8952) -> {"Sm","ON"}; +lookup(8953) -> {"Sm","ON"}; +lookup(8954) -> {"Sm","ON"}; +lookup(8955) -> {"Sm","ON"}; +lookup(8956) -> {"Sm","ON"}; +lookup(8957) -> {"Sm","ON"}; +lookup(8958) -> {"Sm","ON"}; +lookup(8959) -> {"Sm","ON"}; +lookup(8960) -> {"So","ON"}; +lookup(8961) -> {"So","ON"}; +lookup(8962) -> {"So","ON"}; +lookup(8963) -> {"So","ON"}; +lookup(8964) -> {"So","ON"}; +lookup(8965) -> {"So","ON"}; +lookup(8966) -> {"So","ON"}; +lookup(8967) -> {"So","ON"}; +lookup(8968) -> {"Ps","ON"}; +lookup(8969) -> {"Pe","ON"}; +lookup(8970) -> {"Ps","ON"}; +lookup(8971) -> {"Pe","ON"}; +lookup(8972) -> {"So","ON"}; +lookup(8973) -> {"So","ON"}; +lookup(8974) -> {"So","ON"}; +lookup(8975) -> {"So","ON"}; +lookup(8976) -> {"So","ON"}; +lookup(8977) -> {"So","ON"}; +lookup(8978) -> {"So","ON"}; +lookup(8979) -> {"So","ON"}; +lookup(8980) -> {"So","ON"}; +lookup(8981) -> {"So","ON"}; +lookup(8982) -> {"So","ON"}; +lookup(8983) -> {"So","ON"}; +lookup(8984) -> {"So","ON"}; +lookup(8985) -> {"So","ON"}; +lookup(8986) -> {"So","ON"}; +lookup(8987) -> {"So","ON"}; +lookup(8988) -> {"So","ON"}; +lookup(8989) -> {"So","ON"}; +lookup(8990) -> {"So","ON"}; +lookup(8991) -> {"So","ON"}; +lookup(8992) -> {"Sm","ON"}; +lookup(8993) -> {"Sm","ON"}; +lookup(8994) -> {"So","ON"}; +lookup(8995) -> {"So","ON"}; +lookup(8996) -> {"So","ON"}; +lookup(8997) -> {"So","ON"}; +lookup(8998) -> {"So","ON"}; +lookup(8999) -> {"So","ON"}; +lookup(9000) -> {"So","ON"}; +lookup(9001) -> {"Ps","ON"}; +lookup(9002) -> {"Pe","ON"}; +lookup(9003) -> {"So","ON"}; +lookup(9004) -> {"So","ON"}; +lookup(9005) -> {"So","ON"}; +lookup(9006) -> {"So","ON"}; +lookup(9007) -> {"So","ON"}; +lookup(9008) -> {"So","ON"}; +lookup(9009) -> {"So","ON"}; +lookup(9010) -> {"So","ON"}; +lookup(9011) -> {"So","ON"}; +lookup(9012) -> {"So","ON"}; +lookup(9013) -> {"So","ON"}; +lookup(9014) -> {"So","L"}; +lookup(9015) -> {"So","L"}; +lookup(9016) -> {"So","L"}; +lookup(9017) -> {"So","L"}; +lookup(9018) -> {"So","L"}; +lookup(9019) -> {"So","L"}; +lookup(9020) -> {"So","L"}; +lookup(9021) -> {"So","L"}; +lookup(9022) -> {"So","L"}; +lookup(9023) -> {"So","L"}; +lookup(9024) -> {"So","L"}; +lookup(9025) -> {"So","L"}; +lookup(9026) -> {"So","L"}; +lookup(9027) -> {"So","L"}; +lookup(9028) -> {"So","L"}; +lookup(9029) -> {"So","L"}; +lookup(9030) -> {"So","L"}; +lookup(9031) -> {"So","L"}; +lookup(9032) -> {"So","L"}; +lookup(9033) -> {"So","L"}; +lookup(9034) -> {"So","L"}; +lookup(9035) -> {"So","L"}; +lookup(9036) -> {"So","L"}; +lookup(9037) -> {"So","L"}; +lookup(9038) -> {"So","L"}; +lookup(9039) -> {"So","L"}; +lookup(9040) -> {"So","L"}; +lookup(9041) -> {"So","L"}; +lookup(9042) -> {"So","L"}; +lookup(9043) -> {"So","L"}; +lookup(9044) -> {"So","L"}; +lookup(9045) -> {"So","L"}; +lookup(9046) -> {"So","L"}; +lookup(9047) -> {"So","L"}; +lookup(9048) -> {"So","L"}; +lookup(9049) -> {"So","L"}; +lookup(9050) -> {"So","L"}; +lookup(9051) -> {"So","L"}; +lookup(9052) -> {"So","L"}; +lookup(9053) -> {"So","L"}; +lookup(9054) -> {"So","L"}; +lookup(9055) -> {"So","L"}; +lookup(9056) -> {"So","L"}; +lookup(9057) -> {"So","L"}; +lookup(9058) -> {"So","L"}; +lookup(9059) -> {"So","L"}; +lookup(9060) -> {"So","L"}; +lookup(9061) -> {"So","L"}; +lookup(9062) -> {"So","L"}; +lookup(9063) -> {"So","L"}; +lookup(9064) -> {"So","L"}; +lookup(9065) -> {"So","L"}; +lookup(9066) -> {"So","L"}; +lookup(9067) -> {"So","L"}; +lookup(9068) -> {"So","L"}; +lookup(9069) -> {"So","L"}; +lookup(9070) -> {"So","L"}; +lookup(9071) -> {"So","L"}; +lookup(9072) -> {"So","L"}; +lookup(9073) -> {"So","L"}; +lookup(9074) -> {"So","L"}; +lookup(9075) -> {"So","L"}; +lookup(9076) -> {"So","L"}; +lookup(9077) -> {"So","L"}; +lookup(9078) -> {"So","L"}; +lookup(9079) -> {"So","L"}; +lookup(9080) -> {"So","L"}; +lookup(9081) -> {"So","L"}; +lookup(9082) -> {"So","L"}; +lookup(9083) -> {"So","ON"}; +lookup(9084) -> {"Sm","ON"}; +lookup(9085) -> {"So","ON"}; +lookup(9086) -> {"So","ON"}; +lookup(9087) -> {"So","ON"}; +lookup(9088) -> {"So","ON"}; +lookup(9089) -> {"So","ON"}; +lookup(9090) -> {"So","ON"}; +lookup(9091) -> {"So","ON"}; +lookup(9092) -> {"So","ON"}; +lookup(9093) -> {"So","ON"}; +lookup(9094) -> {"So","ON"}; +lookup(9095) -> {"So","ON"}; +lookup(9096) -> {"So","ON"}; +lookup(9097) -> {"So","ON"}; +lookup(9098) -> {"So","ON"}; +lookup(9099) -> {"So","ON"}; +lookup(9100) -> {"So","ON"}; +lookup(9101) -> {"So","ON"}; +lookup(9102) -> {"So","ON"}; +lookup(9103) -> {"So","ON"}; +lookup(9104) -> {"So","ON"}; +lookup(9105) -> {"So","ON"}; +lookup(9106) -> {"So","ON"}; +lookup(9107) -> {"So","ON"}; +lookup(9108) -> {"So","ON"}; +lookup(9109) -> {"So","L"}; +lookup(9110) -> {"So","ON"}; +lookup(9111) -> {"So","ON"}; +lookup(9112) -> {"So","ON"}; +lookup(9113) -> {"So","ON"}; +lookup(9114) -> {"So","ON"}; +lookup(9115) -> {"Sm","ON"}; +lookup(9116) -> {"Sm","ON"}; +lookup(9117) -> {"Sm","ON"}; +lookup(9118) -> {"Sm","ON"}; +lookup(9119) -> {"Sm","ON"}; +lookup(9120) -> {"Sm","ON"}; +lookup(9121) -> {"Sm","ON"}; +lookup(9122) -> {"Sm","ON"}; +lookup(9123) -> {"Sm","ON"}; +lookup(9124) -> {"Sm","ON"}; +lookup(9125) -> {"Sm","ON"}; +lookup(9126) -> {"Sm","ON"}; +lookup(9127) -> {"Sm","ON"}; +lookup(9128) -> {"Sm","ON"}; +lookup(9129) -> {"Sm","ON"}; +lookup(9130) -> {"Sm","ON"}; +lookup(9131) -> {"Sm","ON"}; +lookup(9132) -> {"Sm","ON"}; +lookup(9133) -> {"Sm","ON"}; +lookup(9134) -> {"Sm","ON"}; +lookup(9135) -> {"Sm","ON"}; +lookup(9136) -> {"Sm","ON"}; +lookup(9137) -> {"Sm","ON"}; +lookup(9138) -> {"Sm","ON"}; +lookup(9139) -> {"Sm","ON"}; +lookup(9140) -> {"So","ON"}; +lookup(9141) -> {"So","ON"}; +lookup(9142) -> {"So","ON"}; +lookup(9143) -> {"So","ON"}; +lookup(9144) -> {"So","ON"}; +lookup(9145) -> {"So","ON"}; +lookup(9146) -> {"So","ON"}; +lookup(9147) -> {"So","ON"}; +lookup(9148) -> {"So","ON"}; +lookup(9149) -> {"So","ON"}; +lookup(9150) -> {"So","ON"}; +lookup(9151) -> {"So","ON"}; +lookup(9152) -> {"So","ON"}; +lookup(9153) -> {"So","ON"}; +lookup(9154) -> {"So","ON"}; +lookup(9155) -> {"So","ON"}; +lookup(9156) -> {"So","ON"}; +lookup(9157) -> {"So","ON"}; +lookup(9158) -> {"So","ON"}; +lookup(9159) -> {"So","ON"}; +lookup(9160) -> {"So","ON"}; +lookup(9161) -> {"So","ON"}; +lookup(9162) -> {"So","ON"}; +lookup(9163) -> {"So","ON"}; +lookup(9164) -> {"So","ON"}; +lookup(9165) -> {"So","ON"}; +lookup(9166) -> {"So","ON"}; +lookup(9167) -> {"So","ON"}; +lookup(9168) -> {"So","ON"}; +lookup(9169) -> {"So","ON"}; +lookup(9170) -> {"So","ON"}; +lookup(9171) -> {"So","ON"}; +lookup(9172) -> {"So","ON"}; +lookup(9173) -> {"So","ON"}; +lookup(9174) -> {"So","ON"}; +lookup(9175) -> {"So","ON"}; +lookup(9176) -> {"So","ON"}; +lookup(9177) -> {"So","ON"}; +lookup(9178) -> {"So","ON"}; +lookup(9179) -> {"So","ON"}; +lookup(9180) -> {"Sm","ON"}; +lookup(9181) -> {"Sm","ON"}; +lookup(9182) -> {"Sm","ON"}; +lookup(9183) -> {"Sm","ON"}; +lookup(9184) -> {"Sm","ON"}; +lookup(9185) -> {"Sm","ON"}; +lookup(9186) -> {"So","ON"}; +lookup(9187) -> {"So","ON"}; +lookup(9188) -> {"So","ON"}; +lookup(9189) -> {"So","ON"}; +lookup(9190) -> {"So","ON"}; +lookup(9191) -> {"So","ON"}; +lookup(9192) -> {"So","ON"}; +lookup(9193) -> {"So","ON"}; +lookup(9194) -> {"So","ON"}; +lookup(9195) -> {"So","ON"}; +lookup(9196) -> {"So","ON"}; +lookup(9197) -> {"So","ON"}; +lookup(9198) -> {"So","ON"}; +lookup(9199) -> {"So","ON"}; +lookup(9200) -> {"So","ON"}; +lookup(9201) -> {"So","ON"}; +lookup(9202) -> {"So","ON"}; +lookup(9203) -> {"So","ON"}; +lookup(9204) -> {"So","ON"}; +lookup(9205) -> {"So","ON"}; +lookup(9206) -> {"So","ON"}; +lookup(9207) -> {"So","ON"}; +lookup(9208) -> {"So","ON"}; +lookup(9209) -> {"So","ON"}; +lookup(9210) -> {"So","ON"}; +lookup(9211) -> {"So","ON"}; +lookup(9212) -> {"So","ON"}; +lookup(9213) -> {"So","ON"}; +lookup(9214) -> {"So","ON"}; +lookup(9215) -> {"So","ON"}; +lookup(9216) -> {"So","ON"}; +lookup(9217) -> {"So","ON"}; +lookup(9218) -> {"So","ON"}; +lookup(9219) -> {"So","ON"}; +lookup(9220) -> {"So","ON"}; +lookup(9221) -> {"So","ON"}; +lookup(9222) -> {"So","ON"}; +lookup(9223) -> {"So","ON"}; +lookup(9224) -> {"So","ON"}; +lookup(9225) -> {"So","ON"}; +lookup(9226) -> {"So","ON"}; +lookup(9227) -> {"So","ON"}; +lookup(9228) -> {"So","ON"}; +lookup(9229) -> {"So","ON"}; +lookup(9230) -> {"So","ON"}; +lookup(9231) -> {"So","ON"}; +lookup(9232) -> {"So","ON"}; +lookup(9233) -> {"So","ON"}; +lookup(9234) -> {"So","ON"}; +lookup(9235) -> {"So","ON"}; +lookup(9236) -> {"So","ON"}; +lookup(9237) -> {"So","ON"}; +lookup(9238) -> {"So","ON"}; +lookup(9239) -> {"So","ON"}; +lookup(9240) -> {"So","ON"}; +lookup(9241) -> {"So","ON"}; +lookup(9242) -> {"So","ON"}; +lookup(9243) -> {"So","ON"}; +lookup(9244) -> {"So","ON"}; +lookup(9245) -> {"So","ON"}; +lookup(9246) -> {"So","ON"}; +lookup(9247) -> {"So","ON"}; +lookup(9248) -> {"So","ON"}; +lookup(9249) -> {"So","ON"}; +lookup(9250) -> {"So","ON"}; +lookup(9251) -> {"So","ON"}; +lookup(9252) -> {"So","ON"}; +lookup(9253) -> {"So","ON"}; +lookup(9254) -> {"So","ON"}; +lookup(9280) -> {"So","ON"}; +lookup(9281) -> {"So","ON"}; +lookup(9282) -> {"So","ON"}; +lookup(9283) -> {"So","ON"}; +lookup(9284) -> {"So","ON"}; +lookup(9285) -> {"So","ON"}; +lookup(9286) -> {"So","ON"}; +lookup(9287) -> {"So","ON"}; +lookup(9288) -> {"So","ON"}; +lookup(9289) -> {"So","ON"}; +lookup(9290) -> {"So","ON"}; +lookup(9312) -> {"No","ON"}; +lookup(9313) -> {"No","ON"}; +lookup(9314) -> {"No","ON"}; +lookup(9315) -> {"No","ON"}; +lookup(9316) -> {"No","ON"}; +lookup(9317) -> {"No","ON"}; +lookup(9318) -> {"No","ON"}; +lookup(9319) -> {"No","ON"}; +lookup(9320) -> {"No","ON"}; +lookup(9321) -> {"No","ON"}; +lookup(9322) -> {"No","ON"}; +lookup(9323) -> {"No","ON"}; +lookup(9324) -> {"No","ON"}; +lookup(9325) -> {"No","ON"}; +lookup(9326) -> {"No","ON"}; +lookup(9327) -> {"No","ON"}; +lookup(9328) -> {"No","ON"}; +lookup(9329) -> {"No","ON"}; +lookup(9330) -> {"No","ON"}; +lookup(9331) -> {"No","ON"}; +lookup(9332) -> {"No","ON"}; +lookup(9333) -> {"No","ON"}; +lookup(9334) -> {"No","ON"}; +lookup(9335) -> {"No","ON"}; +lookup(9336) -> {"No","ON"}; +lookup(9337) -> {"No","ON"}; +lookup(9338) -> {"No","ON"}; +lookup(9339) -> {"No","ON"}; +lookup(9340) -> {"No","ON"}; +lookup(9341) -> {"No","ON"}; +lookup(9342) -> {"No","ON"}; +lookup(9343) -> {"No","ON"}; +lookup(9344) -> {"No","ON"}; +lookup(9345) -> {"No","ON"}; +lookup(9346) -> {"No","ON"}; +lookup(9347) -> {"No","ON"}; +lookup(9348) -> {"No","ON"}; +lookup(9349) -> {"No","ON"}; +lookup(9350) -> {"No","ON"}; +lookup(9351) -> {"No","ON"}; +lookup(9352) -> {"No","EN"}; +lookup(9353) -> {"No","EN"}; +lookup(9354) -> {"No","EN"}; +lookup(9355) -> {"No","EN"}; +lookup(9356) -> {"No","EN"}; +lookup(9357) -> {"No","EN"}; +lookup(9358) -> {"No","EN"}; +lookup(9359) -> {"No","EN"}; +lookup(9360) -> {"No","EN"}; +lookup(9361) -> {"No","EN"}; +lookup(9362) -> {"No","EN"}; +lookup(9363) -> {"No","EN"}; +lookup(9364) -> {"No","EN"}; +lookup(9365) -> {"No","EN"}; +lookup(9366) -> {"No","EN"}; +lookup(9367) -> {"No","EN"}; +lookup(9368) -> {"No","EN"}; +lookup(9369) -> {"No","EN"}; +lookup(9370) -> {"No","EN"}; +lookup(9371) -> {"No","EN"}; +lookup(9372) -> {"So","L"}; +lookup(9373) -> {"So","L"}; +lookup(9374) -> {"So","L"}; +lookup(9375) -> {"So","L"}; +lookup(9376) -> {"So","L"}; +lookup(9377) -> {"So","L"}; +lookup(9378) -> {"So","L"}; +lookup(9379) -> {"So","L"}; +lookup(9380) -> {"So","L"}; +lookup(9381) -> {"So","L"}; +lookup(9382) -> {"So","L"}; +lookup(9383) -> {"So","L"}; +lookup(9384) -> {"So","L"}; +lookup(9385) -> {"So","L"}; +lookup(9386) -> {"So","L"}; +lookup(9387) -> {"So","L"}; +lookup(9388) -> {"So","L"}; +lookup(9389) -> {"So","L"}; +lookup(9390) -> {"So","L"}; +lookup(9391) -> {"So","L"}; +lookup(9392) -> {"So","L"}; +lookup(9393) -> {"So","L"}; +lookup(9394) -> {"So","L"}; +lookup(9395) -> {"So","L"}; +lookup(9396) -> {"So","L"}; +lookup(9397) -> {"So","L"}; +lookup(9398) -> {"So","L"}; +lookup(9399) -> {"So","L"}; +lookup(9400) -> {"So","L"}; +lookup(9401) -> {"So","L"}; +lookup(9402) -> {"So","L"}; +lookup(9403) -> {"So","L"}; +lookup(9404) -> {"So","L"}; +lookup(9405) -> {"So","L"}; +lookup(9406) -> {"So","L"}; +lookup(9407) -> {"So","L"}; +lookup(9408) -> {"So","L"}; +lookup(9409) -> {"So","L"}; +lookup(9410) -> {"So","L"}; +lookup(9411) -> {"So","L"}; +lookup(9412) -> {"So","L"}; +lookup(9413) -> {"So","L"}; +lookup(9414) -> {"So","L"}; +lookup(9415) -> {"So","L"}; +lookup(9416) -> {"So","L"}; +lookup(9417) -> {"So","L"}; +lookup(9418) -> {"So","L"}; +lookup(9419) -> {"So","L"}; +lookup(9420) -> {"So","L"}; +lookup(9421) -> {"So","L"}; +lookup(9422) -> {"So","L"}; +lookup(9423) -> {"So","L"}; +lookup(9424) -> {"So","L"}; +lookup(9425) -> {"So","L"}; +lookup(9426) -> {"So","L"}; +lookup(9427) -> {"So","L"}; +lookup(9428) -> {"So","L"}; +lookup(9429) -> {"So","L"}; +lookup(9430) -> {"So","L"}; +lookup(9431) -> {"So","L"}; +lookup(9432) -> {"So","L"}; +lookup(9433) -> {"So","L"}; +lookup(9434) -> {"So","L"}; +lookup(9435) -> {"So","L"}; +lookup(9436) -> {"So","L"}; +lookup(9437) -> {"So","L"}; +lookup(9438) -> {"So","L"}; +lookup(9439) -> {"So","L"}; +lookup(9440) -> {"So","L"}; +lookup(9441) -> {"So","L"}; +lookup(9442) -> {"So","L"}; +lookup(9443) -> {"So","L"}; +lookup(9444) -> {"So","L"}; +lookup(9445) -> {"So","L"}; +lookup(9446) -> {"So","L"}; +lookup(9447) -> {"So","L"}; +lookup(9448) -> {"So","L"}; +lookup(9449) -> {"So","L"}; +lookup(9450) -> {"No","ON"}; +lookup(9451) -> {"No","ON"}; +lookup(9452) -> {"No","ON"}; +lookup(9453) -> {"No","ON"}; +lookup(9454) -> {"No","ON"}; +lookup(9455) -> {"No","ON"}; +lookup(9456) -> {"No","ON"}; +lookup(9457) -> {"No","ON"}; +lookup(9458) -> {"No","ON"}; +lookup(9459) -> {"No","ON"}; +lookup(9460) -> {"No","ON"}; +lookup(9461) -> {"No","ON"}; +lookup(9462) -> {"No","ON"}; +lookup(9463) -> {"No","ON"}; +lookup(9464) -> {"No","ON"}; +lookup(9465) -> {"No","ON"}; +lookup(9466) -> {"No","ON"}; +lookup(9467) -> {"No","ON"}; +lookup(9468) -> {"No","ON"}; +lookup(9469) -> {"No","ON"}; +lookup(9470) -> {"No","ON"}; +lookup(9471) -> {"No","ON"}; +lookup(9472) -> {"So","ON"}; +lookup(9473) -> {"So","ON"}; +lookup(9474) -> {"So","ON"}; +lookup(9475) -> {"So","ON"}; +lookup(9476) -> {"So","ON"}; +lookup(9477) -> {"So","ON"}; +lookup(9478) -> {"So","ON"}; +lookup(9479) -> {"So","ON"}; +lookup(9480) -> {"So","ON"}; +lookup(9481) -> {"So","ON"}; +lookup(9482) -> {"So","ON"}; +lookup(9483) -> {"So","ON"}; +lookup(9484) -> {"So","ON"}; +lookup(9485) -> {"So","ON"}; +lookup(9486) -> {"So","ON"}; +lookup(9487) -> {"So","ON"}; +lookup(9488) -> {"So","ON"}; +lookup(9489) -> {"So","ON"}; +lookup(9490) -> {"So","ON"}; +lookup(9491) -> {"So","ON"}; +lookup(9492) -> {"So","ON"}; +lookup(9493) -> {"So","ON"}; +lookup(9494) -> {"So","ON"}; +lookup(9495) -> {"So","ON"}; +lookup(9496) -> {"So","ON"}; +lookup(9497) -> {"So","ON"}; +lookup(9498) -> {"So","ON"}; +lookup(9499) -> {"So","ON"}; +lookup(9500) -> {"So","ON"}; +lookup(9501) -> {"So","ON"}; +lookup(9502) -> {"So","ON"}; +lookup(9503) -> {"So","ON"}; +lookup(9504) -> {"So","ON"}; +lookup(9505) -> {"So","ON"}; +lookup(9506) -> {"So","ON"}; +lookup(9507) -> {"So","ON"}; +lookup(9508) -> {"So","ON"}; +lookup(9509) -> {"So","ON"}; +lookup(9510) -> {"So","ON"}; +lookup(9511) -> {"So","ON"}; +lookup(9512) -> {"So","ON"}; +lookup(9513) -> {"So","ON"}; +lookup(9514) -> {"So","ON"}; +lookup(9515) -> {"So","ON"}; +lookup(9516) -> {"So","ON"}; +lookup(9517) -> {"So","ON"}; +lookup(9518) -> {"So","ON"}; +lookup(9519) -> {"So","ON"}; +lookup(9520) -> {"So","ON"}; +lookup(9521) -> {"So","ON"}; +lookup(9522) -> {"So","ON"}; +lookup(9523) -> {"So","ON"}; +lookup(9524) -> {"So","ON"}; +lookup(9525) -> {"So","ON"}; +lookup(9526) -> {"So","ON"}; +lookup(9527) -> {"So","ON"}; +lookup(9528) -> {"So","ON"}; +lookup(9529) -> {"So","ON"}; +lookup(9530) -> {"So","ON"}; +lookup(9531) -> {"So","ON"}; +lookup(9532) -> {"So","ON"}; +lookup(9533) -> {"So","ON"}; +lookup(9534) -> {"So","ON"}; +lookup(9535) -> {"So","ON"}; +lookup(9536) -> {"So","ON"}; +lookup(9537) -> {"So","ON"}; +lookup(9538) -> {"So","ON"}; +lookup(9539) -> {"So","ON"}; +lookup(9540) -> {"So","ON"}; +lookup(9541) -> {"So","ON"}; +lookup(9542) -> {"So","ON"}; +lookup(9543) -> {"So","ON"}; +lookup(9544) -> {"So","ON"}; +lookup(9545) -> {"So","ON"}; +lookup(9546) -> {"So","ON"}; +lookup(9547) -> {"So","ON"}; +lookup(9548) -> {"So","ON"}; +lookup(9549) -> {"So","ON"}; +lookup(9550) -> {"So","ON"}; +lookup(9551) -> {"So","ON"}; +lookup(9552) -> {"So","ON"}; +lookup(9553) -> {"So","ON"}; +lookup(9554) -> {"So","ON"}; +lookup(9555) -> {"So","ON"}; +lookup(9556) -> {"So","ON"}; +lookup(9557) -> {"So","ON"}; +lookup(9558) -> {"So","ON"}; +lookup(9559) -> {"So","ON"}; +lookup(9560) -> {"So","ON"}; +lookup(9561) -> {"So","ON"}; +lookup(9562) -> {"So","ON"}; +lookup(9563) -> {"So","ON"}; +lookup(9564) -> {"So","ON"}; +lookup(9565) -> {"So","ON"}; +lookup(9566) -> {"So","ON"}; +lookup(9567) -> {"So","ON"}; +lookup(9568) -> {"So","ON"}; +lookup(9569) -> {"So","ON"}; +lookup(9570) -> {"So","ON"}; +lookup(9571) -> {"So","ON"}; +lookup(9572) -> {"So","ON"}; +lookup(9573) -> {"So","ON"}; +lookup(9574) -> {"So","ON"}; +lookup(9575) -> {"So","ON"}; +lookup(9576) -> {"So","ON"}; +lookup(9577) -> {"So","ON"}; +lookup(9578) -> {"So","ON"}; +lookup(9579) -> {"So","ON"}; +lookup(9580) -> {"So","ON"}; +lookup(9581) -> {"So","ON"}; +lookup(9582) -> {"So","ON"}; +lookup(9583) -> {"So","ON"}; +lookup(9584) -> {"So","ON"}; +lookup(9585) -> {"So","ON"}; +lookup(9586) -> {"So","ON"}; +lookup(9587) -> {"So","ON"}; +lookup(9588) -> {"So","ON"}; +lookup(9589) -> {"So","ON"}; +lookup(9590) -> {"So","ON"}; +lookup(9591) -> {"So","ON"}; +lookup(9592) -> {"So","ON"}; +lookup(9593) -> {"So","ON"}; +lookup(9594) -> {"So","ON"}; +lookup(9595) -> {"So","ON"}; +lookup(9596) -> {"So","ON"}; +lookup(9597) -> {"So","ON"}; +lookup(9598) -> {"So","ON"}; +lookup(9599) -> {"So","ON"}; +lookup(9600) -> {"So","ON"}; +lookup(9601) -> {"So","ON"}; +lookup(9602) -> {"So","ON"}; +lookup(9603) -> {"So","ON"}; +lookup(9604) -> {"So","ON"}; +lookup(9605) -> {"So","ON"}; +lookup(9606) -> {"So","ON"}; +lookup(9607) -> {"So","ON"}; +lookup(9608) -> {"So","ON"}; +lookup(9609) -> {"So","ON"}; +lookup(9610) -> {"So","ON"}; +lookup(9611) -> {"So","ON"}; +lookup(9612) -> {"So","ON"}; +lookup(9613) -> {"So","ON"}; +lookup(9614) -> {"So","ON"}; +lookup(9615) -> {"So","ON"}; +lookup(9616) -> {"So","ON"}; +lookup(9617) -> {"So","ON"}; +lookup(9618) -> {"So","ON"}; +lookup(9619) -> {"So","ON"}; +lookup(9620) -> {"So","ON"}; +lookup(9621) -> {"So","ON"}; +lookup(9622) -> {"So","ON"}; +lookup(9623) -> {"So","ON"}; +lookup(9624) -> {"So","ON"}; +lookup(9625) -> {"So","ON"}; +lookup(9626) -> {"So","ON"}; +lookup(9627) -> {"So","ON"}; +lookup(9628) -> {"So","ON"}; +lookup(9629) -> {"So","ON"}; +lookup(9630) -> {"So","ON"}; +lookup(9631) -> {"So","ON"}; +lookup(9632) -> {"So","ON"}; +lookup(9633) -> {"So","ON"}; +lookup(9634) -> {"So","ON"}; +lookup(9635) -> {"So","ON"}; +lookup(9636) -> {"So","ON"}; +lookup(9637) -> {"So","ON"}; +lookup(9638) -> {"So","ON"}; +lookup(9639) -> {"So","ON"}; +lookup(9640) -> {"So","ON"}; +lookup(9641) -> {"So","ON"}; +lookup(9642) -> {"So","ON"}; +lookup(9643) -> {"So","ON"}; +lookup(9644) -> {"So","ON"}; +lookup(9645) -> {"So","ON"}; +lookup(9646) -> {"So","ON"}; +lookup(9647) -> {"So","ON"}; +lookup(9648) -> {"So","ON"}; +lookup(9649) -> {"So","ON"}; +lookup(9650) -> {"So","ON"}; +lookup(9651) -> {"So","ON"}; +lookup(9652) -> {"So","ON"}; +lookup(9653) -> {"So","ON"}; +lookup(9654) -> {"So","ON"}; +lookup(9655) -> {"Sm","ON"}; +lookup(9656) -> {"So","ON"}; +lookup(9657) -> {"So","ON"}; +lookup(9658) -> {"So","ON"}; +lookup(9659) -> {"So","ON"}; +lookup(9660) -> {"So","ON"}; +lookup(9661) -> {"So","ON"}; +lookup(9662) -> {"So","ON"}; +lookup(9663) -> {"So","ON"}; +lookup(9664) -> {"So","ON"}; +lookup(9665) -> {"Sm","ON"}; +lookup(9666) -> {"So","ON"}; +lookup(9667) -> {"So","ON"}; +lookup(9668) -> {"So","ON"}; +lookup(9669) -> {"So","ON"}; +lookup(9670) -> {"So","ON"}; +lookup(9671) -> {"So","ON"}; +lookup(9672) -> {"So","ON"}; +lookup(9673) -> {"So","ON"}; +lookup(9674) -> {"So","ON"}; +lookup(9675) -> {"So","ON"}; +lookup(9676) -> {"So","ON"}; +lookup(9677) -> {"So","ON"}; +lookup(9678) -> {"So","ON"}; +lookup(9679) -> {"So","ON"}; +lookup(9680) -> {"So","ON"}; +lookup(9681) -> {"So","ON"}; +lookup(9682) -> {"So","ON"}; +lookup(9683) -> {"So","ON"}; +lookup(9684) -> {"So","ON"}; +lookup(9685) -> {"So","ON"}; +lookup(9686) -> {"So","ON"}; +lookup(9687) -> {"So","ON"}; +lookup(9688) -> {"So","ON"}; +lookup(9689) -> {"So","ON"}; +lookup(9690) -> {"So","ON"}; +lookup(9691) -> {"So","ON"}; +lookup(9692) -> {"So","ON"}; +lookup(9693) -> {"So","ON"}; +lookup(9694) -> {"So","ON"}; +lookup(9695) -> {"So","ON"}; +lookup(9696) -> {"So","ON"}; +lookup(9697) -> {"So","ON"}; +lookup(9698) -> {"So","ON"}; +lookup(9699) -> {"So","ON"}; +lookup(9700) -> {"So","ON"}; +lookup(9701) -> {"So","ON"}; +lookup(9702) -> {"So","ON"}; +lookup(9703) -> {"So","ON"}; +lookup(9704) -> {"So","ON"}; +lookup(9705) -> {"So","ON"}; +lookup(9706) -> {"So","ON"}; +lookup(9707) -> {"So","ON"}; +lookup(9708) -> {"So","ON"}; +lookup(9709) -> {"So","ON"}; +lookup(9710) -> {"So","ON"}; +lookup(9711) -> {"So","ON"}; +lookup(9712) -> {"So","ON"}; +lookup(9713) -> {"So","ON"}; +lookup(9714) -> {"So","ON"}; +lookup(9715) -> {"So","ON"}; +lookup(9716) -> {"So","ON"}; +lookup(9717) -> {"So","ON"}; +lookup(9718) -> {"So","ON"}; +lookup(9719) -> {"So","ON"}; +lookup(9720) -> {"Sm","ON"}; +lookup(9721) -> {"Sm","ON"}; +lookup(9722) -> {"Sm","ON"}; +lookup(9723) -> {"Sm","ON"}; +lookup(9724) -> {"Sm","ON"}; +lookup(9725) -> {"Sm","ON"}; +lookup(9726) -> {"Sm","ON"}; +lookup(9727) -> {"Sm","ON"}; +lookup(9728) -> {"So","ON"}; +lookup(9729) -> {"So","ON"}; +lookup(9730) -> {"So","ON"}; +lookup(9731) -> {"So","ON"}; +lookup(9732) -> {"So","ON"}; +lookup(9733) -> {"So","ON"}; +lookup(9734) -> {"So","ON"}; +lookup(9735) -> {"So","ON"}; +lookup(9736) -> {"So","ON"}; +lookup(9737) -> {"So","ON"}; +lookup(9738) -> {"So","ON"}; +lookup(9739) -> {"So","ON"}; +lookup(9740) -> {"So","ON"}; +lookup(9741) -> {"So","ON"}; +lookup(9742) -> {"So","ON"}; +lookup(9743) -> {"So","ON"}; +lookup(9744) -> {"So","ON"}; +lookup(9745) -> {"So","ON"}; +lookup(9746) -> {"So","ON"}; +lookup(9747) -> {"So","ON"}; +lookup(9748) -> {"So","ON"}; +lookup(9749) -> {"So","ON"}; +lookup(9750) -> {"So","ON"}; +lookup(9751) -> {"So","ON"}; +lookup(9752) -> {"So","ON"}; +lookup(9753) -> {"So","ON"}; +lookup(9754) -> {"So","ON"}; +lookup(9755) -> {"So","ON"}; +lookup(9756) -> {"So","ON"}; +lookup(9757) -> {"So","ON"}; +lookup(9758) -> {"So","ON"}; +lookup(9759) -> {"So","ON"}; +lookup(9760) -> {"So","ON"}; +lookup(9761) -> {"So","ON"}; +lookup(9762) -> {"So","ON"}; +lookup(9763) -> {"So","ON"}; +lookup(9764) -> {"So","ON"}; +lookup(9765) -> {"So","ON"}; +lookup(9766) -> {"So","ON"}; +lookup(9767) -> {"So","ON"}; +lookup(9768) -> {"So","ON"}; +lookup(9769) -> {"So","ON"}; +lookup(9770) -> {"So","ON"}; +lookup(9771) -> {"So","ON"}; +lookup(9772) -> {"So","ON"}; +lookup(9773) -> {"So","ON"}; +lookup(9774) -> {"So","ON"}; +lookup(9775) -> {"So","ON"}; +lookup(9776) -> {"So","ON"}; +lookup(9777) -> {"So","ON"}; +lookup(9778) -> {"So","ON"}; +lookup(9779) -> {"So","ON"}; +lookup(9780) -> {"So","ON"}; +lookup(9781) -> {"So","ON"}; +lookup(9782) -> {"So","ON"}; +lookup(9783) -> {"So","ON"}; +lookup(9784) -> {"So","ON"}; +lookup(9785) -> {"So","ON"}; +lookup(9786) -> {"So","ON"}; +lookup(9787) -> {"So","ON"}; +lookup(9788) -> {"So","ON"}; +lookup(9789) -> {"So","ON"}; +lookup(9790) -> {"So","ON"}; +lookup(9791) -> {"So","ON"}; +lookup(9792) -> {"So","ON"}; +lookup(9793) -> {"So","ON"}; +lookup(9794) -> {"So","ON"}; +lookup(9795) -> {"So","ON"}; +lookup(9796) -> {"So","ON"}; +lookup(9797) -> {"So","ON"}; +lookup(9798) -> {"So","ON"}; +lookup(9799) -> {"So","ON"}; +lookup(9800) -> {"So","ON"}; +lookup(9801) -> {"So","ON"}; +lookup(9802) -> {"So","ON"}; +lookup(9803) -> {"So","ON"}; +lookup(9804) -> {"So","ON"}; +lookup(9805) -> {"So","ON"}; +lookup(9806) -> {"So","ON"}; +lookup(9807) -> {"So","ON"}; +lookup(9808) -> {"So","ON"}; +lookup(9809) -> {"So","ON"}; +lookup(9810) -> {"So","ON"}; +lookup(9811) -> {"So","ON"}; +lookup(9812) -> {"So","ON"}; +lookup(9813) -> {"So","ON"}; +lookup(9814) -> {"So","ON"}; +lookup(9815) -> {"So","ON"}; +lookup(9816) -> {"So","ON"}; +lookup(9817) -> {"So","ON"}; +lookup(9818) -> {"So","ON"}; +lookup(9819) -> {"So","ON"}; +lookup(9820) -> {"So","ON"}; +lookup(9821) -> {"So","ON"}; +lookup(9822) -> {"So","ON"}; +lookup(9823) -> {"So","ON"}; +lookup(9824) -> {"So","ON"}; +lookup(9825) -> {"So","ON"}; +lookup(9826) -> {"So","ON"}; +lookup(9827) -> {"So","ON"}; +lookup(9828) -> {"So","ON"}; +lookup(9829) -> {"So","ON"}; +lookup(9830) -> {"So","ON"}; +lookup(9831) -> {"So","ON"}; +lookup(9832) -> {"So","ON"}; +lookup(9833) -> {"So","ON"}; +lookup(9834) -> {"So","ON"}; +lookup(9835) -> {"So","ON"}; +lookup(9836) -> {"So","ON"}; +lookup(9837) -> {"So","ON"}; +lookup(9838) -> {"So","ON"}; +lookup(9839) -> {"Sm","ON"}; +lookup(9840) -> {"So","ON"}; +lookup(9841) -> {"So","ON"}; +lookup(9842) -> {"So","ON"}; +lookup(9843) -> {"So","ON"}; +lookup(9844) -> {"So","ON"}; +lookup(9845) -> {"So","ON"}; +lookup(9846) -> {"So","ON"}; +lookup(9847) -> {"So","ON"}; +lookup(9848) -> {"So","ON"}; +lookup(9849) -> {"So","ON"}; +lookup(9850) -> {"So","ON"}; +lookup(9851) -> {"So","ON"}; +lookup(9852) -> {"So","ON"}; +lookup(9853) -> {"So","ON"}; +lookup(9854) -> {"So","ON"}; +lookup(9855) -> {"So","ON"}; +lookup(9856) -> {"So","ON"}; +lookup(9857) -> {"So","ON"}; +lookup(9858) -> {"So","ON"}; +lookup(9859) -> {"So","ON"}; +lookup(9860) -> {"So","ON"}; +lookup(9861) -> {"So","ON"}; +lookup(9862) -> {"So","ON"}; +lookup(9863) -> {"So","ON"}; +lookup(9864) -> {"So","ON"}; +lookup(9865) -> {"So","ON"}; +lookup(9866) -> {"So","ON"}; +lookup(9867) -> {"So","ON"}; +lookup(9868) -> {"So","ON"}; +lookup(9869) -> {"So","ON"}; +lookup(9870) -> {"So","ON"}; +lookup(9871) -> {"So","ON"}; +lookup(9872) -> {"So","ON"}; +lookup(9873) -> {"So","ON"}; +lookup(9874) -> {"So","ON"}; +lookup(9875) -> {"So","ON"}; +lookup(9876) -> {"So","ON"}; +lookup(9877) -> {"So","ON"}; +lookup(9878) -> {"So","ON"}; +lookup(9879) -> {"So","ON"}; +lookup(9880) -> {"So","ON"}; +lookup(9881) -> {"So","ON"}; +lookup(9882) -> {"So","ON"}; +lookup(9883) -> {"So","ON"}; +lookup(9884) -> {"So","ON"}; +lookup(9885) -> {"So","ON"}; +lookup(9886) -> {"So","ON"}; +lookup(9887) -> {"So","ON"}; +lookup(9888) -> {"So","ON"}; +lookup(9889) -> {"So","ON"}; +lookup(9890) -> {"So","ON"}; +lookup(9891) -> {"So","ON"}; +lookup(9892) -> {"So","ON"}; +lookup(9893) -> {"So","ON"}; +lookup(9894) -> {"So","ON"}; +lookup(9895) -> {"So","ON"}; +lookup(9896) -> {"So","ON"}; +lookup(9897) -> {"So","ON"}; +lookup(9898) -> {"So","ON"}; +lookup(9899) -> {"So","ON"}; +lookup(9900) -> {"So","L"}; +lookup(9901) -> {"So","ON"}; +lookup(9902) -> {"So","ON"}; +lookup(9903) -> {"So","ON"}; +lookup(9904) -> {"So","ON"}; +lookup(9905) -> {"So","ON"}; +lookup(9906) -> {"So","ON"}; +lookup(9907) -> {"So","ON"}; +lookup(9908) -> {"So","ON"}; +lookup(9909) -> {"So","ON"}; +lookup(9910) -> {"So","ON"}; +lookup(9911) -> {"So","ON"}; +lookup(9912) -> {"So","ON"}; +lookup(9913) -> {"So","ON"}; +lookup(9914) -> {"So","ON"}; +lookup(9915) -> {"So","ON"}; +lookup(9916) -> {"So","ON"}; +lookup(9917) -> {"So","ON"}; +lookup(9918) -> {"So","ON"}; +lookup(9919) -> {"So","ON"}; +lookup(9920) -> {"So","ON"}; +lookup(9921) -> {"So","ON"}; +lookup(9922) -> {"So","ON"}; +lookup(9923) -> {"So","ON"}; +lookup(9924) -> {"So","ON"}; +lookup(9925) -> {"So","ON"}; +lookup(9926) -> {"So","ON"}; +lookup(9927) -> {"So","ON"}; +lookup(9928) -> {"So","ON"}; +lookup(9929) -> {"So","ON"}; +lookup(9930) -> {"So","ON"}; +lookup(9931) -> {"So","ON"}; +lookup(9932) -> {"So","ON"}; +lookup(9933) -> {"So","ON"}; +lookup(9934) -> {"So","ON"}; +lookup(9935) -> {"So","ON"}; +lookup(9936) -> {"So","ON"}; +lookup(9937) -> {"So","ON"}; +lookup(9938) -> {"So","ON"}; +lookup(9939) -> {"So","ON"}; +lookup(9940) -> {"So","ON"}; +lookup(9941) -> {"So","ON"}; +lookup(9942) -> {"So","ON"}; +lookup(9943) -> {"So","ON"}; +lookup(9944) -> {"So","ON"}; +lookup(9945) -> {"So","ON"}; +lookup(9946) -> {"So","ON"}; +lookup(9947) -> {"So","ON"}; +lookup(9948) -> {"So","ON"}; +lookup(9949) -> {"So","ON"}; +lookup(9950) -> {"So","ON"}; +lookup(9951) -> {"So","ON"}; +lookup(9952) -> {"So","ON"}; +lookup(9953) -> {"So","ON"}; +lookup(9954) -> {"So","ON"}; +lookup(9955) -> {"So","ON"}; +lookup(9956) -> {"So","ON"}; +lookup(9957) -> {"So","ON"}; +lookup(9958) -> {"So","ON"}; +lookup(9959) -> {"So","ON"}; +lookup(9960) -> {"So","ON"}; +lookup(9961) -> {"So","ON"}; +lookup(9962) -> {"So","ON"}; +lookup(9963) -> {"So","ON"}; +lookup(9964) -> {"So","ON"}; +lookup(9965) -> {"So","ON"}; +lookup(9966) -> {"So","ON"}; +lookup(9967) -> {"So","ON"}; +lookup(9968) -> {"So","ON"}; +lookup(9969) -> {"So","ON"}; +lookup(9970) -> {"So","ON"}; +lookup(9971) -> {"So","ON"}; +lookup(9972) -> {"So","ON"}; +lookup(9973) -> {"So","ON"}; +lookup(9974) -> {"So","ON"}; +lookup(9975) -> {"So","ON"}; +lookup(9976) -> {"So","ON"}; +lookup(9977) -> {"So","ON"}; +lookup(9978) -> {"So","ON"}; +lookup(9979) -> {"So","ON"}; +lookup(9980) -> {"So","ON"}; +lookup(9981) -> {"So","ON"}; +lookup(9982) -> {"So","ON"}; +lookup(9983) -> {"So","ON"}; +lookup(9984) -> {"So","ON"}; +lookup(9985) -> {"So","ON"}; +lookup(9986) -> {"So","ON"}; +lookup(9987) -> {"So","ON"}; +lookup(9988) -> {"So","ON"}; +lookup(9989) -> {"So","ON"}; +lookup(9990) -> {"So","ON"}; +lookup(9991) -> {"So","ON"}; +lookup(9992) -> {"So","ON"}; +lookup(9993) -> {"So","ON"}; +lookup(9994) -> {"So","ON"}; +lookup(9995) -> {"So","ON"}; +lookup(9996) -> {"So","ON"}; +lookup(9997) -> {"So","ON"}; +lookup(9998) -> {"So","ON"}; +lookup(9999) -> {"So","ON"}; +lookup(10000) -> {"So","ON"}; +lookup(10001) -> {"So","ON"}; +lookup(10002) -> {"So","ON"}; +lookup(10003) -> {"So","ON"}; +lookup(10004) -> {"So","ON"}; +lookup(10005) -> {"So","ON"}; +lookup(10006) -> {"So","ON"}; +lookup(10007) -> {"So","ON"}; +lookup(10008) -> {"So","ON"}; +lookup(10009) -> {"So","ON"}; +lookup(10010) -> {"So","ON"}; +lookup(10011) -> {"So","ON"}; +lookup(10012) -> {"So","ON"}; +lookup(10013) -> {"So","ON"}; +lookup(10014) -> {"So","ON"}; +lookup(10015) -> {"So","ON"}; +lookup(10016) -> {"So","ON"}; +lookup(10017) -> {"So","ON"}; +lookup(10018) -> {"So","ON"}; +lookup(10019) -> {"So","ON"}; +lookup(10020) -> {"So","ON"}; +lookup(10021) -> {"So","ON"}; +lookup(10022) -> {"So","ON"}; +lookup(10023) -> {"So","ON"}; +lookup(10024) -> {"So","ON"}; +lookup(10025) -> {"So","ON"}; +lookup(10026) -> {"So","ON"}; +lookup(10027) -> {"So","ON"}; +lookup(10028) -> {"So","ON"}; +lookup(10029) -> {"So","ON"}; +lookup(10030) -> {"So","ON"}; +lookup(10031) -> {"So","ON"}; +lookup(10032) -> {"So","ON"}; +lookup(10033) -> {"So","ON"}; +lookup(10034) -> {"So","ON"}; +lookup(10035) -> {"So","ON"}; +lookup(10036) -> {"So","ON"}; +lookup(10037) -> {"So","ON"}; +lookup(10038) -> {"So","ON"}; +lookup(10039) -> {"So","ON"}; +lookup(10040) -> {"So","ON"}; +lookup(10041) -> {"So","ON"}; +lookup(10042) -> {"So","ON"}; +lookup(10043) -> {"So","ON"}; +lookup(10044) -> {"So","ON"}; +lookup(10045) -> {"So","ON"}; +lookup(10046) -> {"So","ON"}; +lookup(10047) -> {"So","ON"}; +lookup(10048) -> {"So","ON"}; +lookup(10049) -> {"So","ON"}; +lookup(10050) -> {"So","ON"}; +lookup(10051) -> {"So","ON"}; +lookup(10052) -> {"So","ON"}; +lookup(10053) -> {"So","ON"}; +lookup(10054) -> {"So","ON"}; +lookup(10055) -> {"So","ON"}; +lookup(10056) -> {"So","ON"}; +lookup(10057) -> {"So","ON"}; +lookup(10058) -> {"So","ON"}; +lookup(10059) -> {"So","ON"}; +lookup(10060) -> {"So","ON"}; +lookup(10061) -> {"So","ON"}; +lookup(10062) -> {"So","ON"}; +lookup(10063) -> {"So","ON"}; +lookup(10064) -> {"So","ON"}; +lookup(10065) -> {"So","ON"}; +lookup(10066) -> {"So","ON"}; +lookup(10067) -> {"So","ON"}; +lookup(10068) -> {"So","ON"}; +lookup(10069) -> {"So","ON"}; +lookup(10070) -> {"So","ON"}; +lookup(10071) -> {"So","ON"}; +lookup(10072) -> {"So","ON"}; +lookup(10073) -> {"So","ON"}; +lookup(10074) -> {"So","ON"}; +lookup(10075) -> {"So","ON"}; +lookup(10076) -> {"So","ON"}; +lookup(10077) -> {"So","ON"}; +lookup(10078) -> {"So","ON"}; +lookup(10079) -> {"So","ON"}; +lookup(10080) -> {"So","ON"}; +lookup(10081) -> {"So","ON"}; +lookup(10082) -> {"So","ON"}; +lookup(10083) -> {"So","ON"}; +lookup(10084) -> {"So","ON"}; +lookup(10085) -> {"So","ON"}; +lookup(10086) -> {"So","ON"}; +lookup(10087) -> {"So","ON"}; +lookup(10088) -> {"Ps","ON"}; +lookup(10089) -> {"Pe","ON"}; +lookup(10090) -> {"Ps","ON"}; +lookup(10091) -> {"Pe","ON"}; +lookup(10092) -> {"Ps","ON"}; +lookup(10093) -> {"Pe","ON"}; +lookup(10094) -> {"Ps","ON"}; +lookup(10095) -> {"Pe","ON"}; +lookup(10096) -> {"Ps","ON"}; +lookup(10097) -> {"Pe","ON"}; +lookup(10098) -> {"Ps","ON"}; +lookup(10099) -> {"Pe","ON"}; +lookup(10100) -> {"Ps","ON"}; +lookup(10101) -> {"Pe","ON"}; +lookup(10102) -> {"No","ON"}; +lookup(10103) -> {"No","ON"}; +lookup(10104) -> {"No","ON"}; +lookup(10105) -> {"No","ON"}; +lookup(10106) -> {"No","ON"}; +lookup(10107) -> {"No","ON"}; +lookup(10108) -> {"No","ON"}; +lookup(10109) -> {"No","ON"}; +lookup(10110) -> {"No","ON"}; +lookup(10111) -> {"No","ON"}; +lookup(10112) -> {"No","ON"}; +lookup(10113) -> {"No","ON"}; +lookup(10114) -> {"No","ON"}; +lookup(10115) -> {"No","ON"}; +lookup(10116) -> {"No","ON"}; +lookup(10117) -> {"No","ON"}; +lookup(10118) -> {"No","ON"}; +lookup(10119) -> {"No","ON"}; +lookup(10120) -> {"No","ON"}; +lookup(10121) -> {"No","ON"}; +lookup(10122) -> {"No","ON"}; +lookup(10123) -> {"No","ON"}; +lookup(10124) -> {"No","ON"}; +lookup(10125) -> {"No","ON"}; +lookup(10126) -> {"No","ON"}; +lookup(10127) -> {"No","ON"}; +lookup(10128) -> {"No","ON"}; +lookup(10129) -> {"No","ON"}; +lookup(10130) -> {"No","ON"}; +lookup(10131) -> {"No","ON"}; +lookup(10132) -> {"So","ON"}; +lookup(10133) -> {"So","ON"}; +lookup(10134) -> {"So","ON"}; +lookup(10135) -> {"So","ON"}; +lookup(10136) -> {"So","ON"}; +lookup(10137) -> {"So","ON"}; +lookup(10138) -> {"So","ON"}; +lookup(10139) -> {"So","ON"}; +lookup(10140) -> {"So","ON"}; +lookup(10141) -> {"So","ON"}; +lookup(10142) -> {"So","ON"}; +lookup(10143) -> {"So","ON"}; +lookup(10144) -> {"So","ON"}; +lookup(10145) -> {"So","ON"}; +lookup(10146) -> {"So","ON"}; +lookup(10147) -> {"So","ON"}; +lookup(10148) -> {"So","ON"}; +lookup(10149) -> {"So","ON"}; +lookup(10150) -> {"So","ON"}; +lookup(10151) -> {"So","ON"}; +lookup(10152) -> {"So","ON"}; +lookup(10153) -> {"So","ON"}; +lookup(10154) -> {"So","ON"}; +lookup(10155) -> {"So","ON"}; +lookup(10156) -> {"So","ON"}; +lookup(10157) -> {"So","ON"}; +lookup(10158) -> {"So","ON"}; +lookup(10159) -> {"So","ON"}; +lookup(10160) -> {"So","ON"}; +lookup(10161) -> {"So","ON"}; +lookup(10162) -> {"So","ON"}; +lookup(10163) -> {"So","ON"}; +lookup(10164) -> {"So","ON"}; +lookup(10165) -> {"So","ON"}; +lookup(10166) -> {"So","ON"}; +lookup(10167) -> {"So","ON"}; +lookup(10168) -> {"So","ON"}; +lookup(10169) -> {"So","ON"}; +lookup(10170) -> {"So","ON"}; +lookup(10171) -> {"So","ON"}; +lookup(10172) -> {"So","ON"}; +lookup(10173) -> {"So","ON"}; +lookup(10174) -> {"So","ON"}; +lookup(10175) -> {"So","ON"}; +lookup(10176) -> {"Sm","ON"}; +lookup(10177) -> {"Sm","ON"}; +lookup(10178) -> {"Sm","ON"}; +lookup(10179) -> {"Sm","ON"}; +lookup(10180) -> {"Sm","ON"}; +lookup(10181) -> {"Ps","ON"}; +lookup(10182) -> {"Pe","ON"}; +lookup(10183) -> {"Sm","ON"}; +lookup(10184) -> {"Sm","ON"}; +lookup(10185) -> {"Sm","ON"}; +lookup(10186) -> {"Sm","ON"}; +lookup(10187) -> {"Sm","ON"}; +lookup(10188) -> {"Sm","ON"}; +lookup(10189) -> {"Sm","ON"}; +lookup(10190) -> {"Sm","ON"}; +lookup(10191) -> {"Sm","ON"}; +lookup(10192) -> {"Sm","ON"}; +lookup(10193) -> {"Sm","ON"}; +lookup(10194) -> {"Sm","ON"}; +lookup(10195) -> {"Sm","ON"}; +lookup(10196) -> {"Sm","ON"}; +lookup(10197) -> {"Sm","ON"}; +lookup(10198) -> {"Sm","ON"}; +lookup(10199) -> {"Sm","ON"}; +lookup(10200) -> {"Sm","ON"}; +lookup(10201) -> {"Sm","ON"}; +lookup(10202) -> {"Sm","ON"}; +lookup(10203) -> {"Sm","ON"}; +lookup(10204) -> {"Sm","ON"}; +lookup(10205) -> {"Sm","ON"}; +lookup(10206) -> {"Sm","ON"}; +lookup(10207) -> {"Sm","ON"}; +lookup(10208) -> {"Sm","ON"}; +lookup(10209) -> {"Sm","ON"}; +lookup(10210) -> {"Sm","ON"}; +lookup(10211) -> {"Sm","ON"}; +lookup(10212) -> {"Sm","ON"}; +lookup(10213) -> {"Sm","ON"}; +lookup(10214) -> {"Ps","ON"}; +lookup(10215) -> {"Pe","ON"}; +lookup(10216) -> {"Ps","ON"}; +lookup(10217) -> {"Pe","ON"}; +lookup(10218) -> {"Ps","ON"}; +lookup(10219) -> {"Pe","ON"}; +lookup(10220) -> {"Ps","ON"}; +lookup(10221) -> {"Pe","ON"}; +lookup(10222) -> {"Ps","ON"}; +lookup(10223) -> {"Pe","ON"}; +lookup(10224) -> {"Sm","ON"}; +lookup(10225) -> {"Sm","ON"}; +lookup(10226) -> {"Sm","ON"}; +lookup(10227) -> {"Sm","ON"}; +lookup(10228) -> {"Sm","ON"}; +lookup(10229) -> {"Sm","ON"}; +lookup(10230) -> {"Sm","ON"}; +lookup(10231) -> {"Sm","ON"}; +lookup(10232) -> {"Sm","ON"}; +lookup(10233) -> {"Sm","ON"}; +lookup(10234) -> {"Sm","ON"}; +lookup(10235) -> {"Sm","ON"}; +lookup(10236) -> {"Sm","ON"}; +lookup(10237) -> {"Sm","ON"}; +lookup(10238) -> {"Sm","ON"}; +lookup(10239) -> {"Sm","ON"}; +lookup(10240) -> {"So","L"}; +lookup(10241) -> {"So","L"}; +lookup(10242) -> {"So","L"}; +lookup(10243) -> {"So","L"}; +lookup(10244) -> {"So","L"}; +lookup(10245) -> {"So","L"}; +lookup(10246) -> {"So","L"}; +lookup(10247) -> {"So","L"}; +lookup(10248) -> {"So","L"}; +lookup(10249) -> {"So","L"}; +lookup(10250) -> {"So","L"}; +lookup(10251) -> {"So","L"}; +lookup(10252) -> {"So","L"}; +lookup(10253) -> {"So","L"}; +lookup(10254) -> {"So","L"}; +lookup(10255) -> {"So","L"}; +lookup(10256) -> {"So","L"}; +lookup(10257) -> {"So","L"}; +lookup(10258) -> {"So","L"}; +lookup(10259) -> {"So","L"}; +lookup(10260) -> {"So","L"}; +lookup(10261) -> {"So","L"}; +lookup(10262) -> {"So","L"}; +lookup(10263) -> {"So","L"}; +lookup(10264) -> {"So","L"}; +lookup(10265) -> {"So","L"}; +lookup(10266) -> {"So","L"}; +lookup(10267) -> {"So","L"}; +lookup(10268) -> {"So","L"}; +lookup(10269) -> {"So","L"}; +lookup(10270) -> {"So","L"}; +lookup(10271) -> {"So","L"}; +lookup(10272) -> {"So","L"}; +lookup(10273) -> {"So","L"}; +lookup(10274) -> {"So","L"}; +lookup(10275) -> {"So","L"}; +lookup(10276) -> {"So","L"}; +lookup(10277) -> {"So","L"}; +lookup(10278) -> {"So","L"}; +lookup(10279) -> {"So","L"}; +lookup(10280) -> {"So","L"}; +lookup(10281) -> {"So","L"}; +lookup(10282) -> {"So","L"}; +lookup(10283) -> {"So","L"}; +lookup(10284) -> {"So","L"}; +lookup(10285) -> {"So","L"}; +lookup(10286) -> {"So","L"}; +lookup(10287) -> {"So","L"}; +lookup(10288) -> {"So","L"}; +lookup(10289) -> {"So","L"}; +lookup(10290) -> {"So","L"}; +lookup(10291) -> {"So","L"}; +lookup(10292) -> {"So","L"}; +lookup(10293) -> {"So","L"}; +lookup(10294) -> {"So","L"}; +lookup(10295) -> {"So","L"}; +lookup(10296) -> {"So","L"}; +lookup(10297) -> {"So","L"}; +lookup(10298) -> {"So","L"}; +lookup(10299) -> {"So","L"}; +lookup(10300) -> {"So","L"}; +lookup(10301) -> {"So","L"}; +lookup(10302) -> {"So","L"}; +lookup(10303) -> {"So","L"}; +lookup(10304) -> {"So","L"}; +lookup(10305) -> {"So","L"}; +lookup(10306) -> {"So","L"}; +lookup(10307) -> {"So","L"}; +lookup(10308) -> {"So","L"}; +lookup(10309) -> {"So","L"}; +lookup(10310) -> {"So","L"}; +lookup(10311) -> {"So","L"}; +lookup(10312) -> {"So","L"}; +lookup(10313) -> {"So","L"}; +lookup(10314) -> {"So","L"}; +lookup(10315) -> {"So","L"}; +lookup(10316) -> {"So","L"}; +lookup(10317) -> {"So","L"}; +lookup(10318) -> {"So","L"}; +lookup(10319) -> {"So","L"}; +lookup(10320) -> {"So","L"}; +lookup(10321) -> {"So","L"}; +lookup(10322) -> {"So","L"}; +lookup(10323) -> {"So","L"}; +lookup(10324) -> {"So","L"}; +lookup(10325) -> {"So","L"}; +lookup(10326) -> {"So","L"}; +lookup(10327) -> {"So","L"}; +lookup(10328) -> {"So","L"}; +lookup(10329) -> {"So","L"}; +lookup(10330) -> {"So","L"}; +lookup(10331) -> {"So","L"}; +lookup(10332) -> {"So","L"}; +lookup(10333) -> {"So","L"}; +lookup(10334) -> {"So","L"}; +lookup(10335) -> {"So","L"}; +lookup(10336) -> {"So","L"}; +lookup(10337) -> {"So","L"}; +lookup(10338) -> {"So","L"}; +lookup(10339) -> {"So","L"}; +lookup(10340) -> {"So","L"}; +lookup(10341) -> {"So","L"}; +lookup(10342) -> {"So","L"}; +lookup(10343) -> {"So","L"}; +lookup(10344) -> {"So","L"}; +lookup(10345) -> {"So","L"}; +lookup(10346) -> {"So","L"}; +lookup(10347) -> {"So","L"}; +lookup(10348) -> {"So","L"}; +lookup(10349) -> {"So","L"}; +lookup(10350) -> {"So","L"}; +lookup(10351) -> {"So","L"}; +lookup(10352) -> {"So","L"}; +lookup(10353) -> {"So","L"}; +lookup(10354) -> {"So","L"}; +lookup(10355) -> {"So","L"}; +lookup(10356) -> {"So","L"}; +lookup(10357) -> {"So","L"}; +lookup(10358) -> {"So","L"}; +lookup(10359) -> {"So","L"}; +lookup(10360) -> {"So","L"}; +lookup(10361) -> {"So","L"}; +lookup(10362) -> {"So","L"}; +lookup(10363) -> {"So","L"}; +lookup(10364) -> {"So","L"}; +lookup(10365) -> {"So","L"}; +lookup(10366) -> {"So","L"}; +lookup(10367) -> {"So","L"}; +lookup(10368) -> {"So","L"}; +lookup(10369) -> {"So","L"}; +lookup(10370) -> {"So","L"}; +lookup(10371) -> {"So","L"}; +lookup(10372) -> {"So","L"}; +lookup(10373) -> {"So","L"}; +lookup(10374) -> {"So","L"}; +lookup(10375) -> {"So","L"}; +lookup(10376) -> {"So","L"}; +lookup(10377) -> {"So","L"}; +lookup(10378) -> {"So","L"}; +lookup(10379) -> {"So","L"}; +lookup(10380) -> {"So","L"}; +lookup(10381) -> {"So","L"}; +lookup(10382) -> {"So","L"}; +lookup(10383) -> {"So","L"}; +lookup(10384) -> {"So","L"}; +lookup(10385) -> {"So","L"}; +lookup(10386) -> {"So","L"}; +lookup(10387) -> {"So","L"}; +lookup(10388) -> {"So","L"}; +lookup(10389) -> {"So","L"}; +lookup(10390) -> {"So","L"}; +lookup(10391) -> {"So","L"}; +lookup(10392) -> {"So","L"}; +lookup(10393) -> {"So","L"}; +lookup(10394) -> {"So","L"}; +lookup(10395) -> {"So","L"}; +lookup(10396) -> {"So","L"}; +lookup(10397) -> {"So","L"}; +lookup(10398) -> {"So","L"}; +lookup(10399) -> {"So","L"}; +lookup(10400) -> {"So","L"}; +lookup(10401) -> {"So","L"}; +lookup(10402) -> {"So","L"}; +lookup(10403) -> {"So","L"}; +lookup(10404) -> {"So","L"}; +lookup(10405) -> {"So","L"}; +lookup(10406) -> {"So","L"}; +lookup(10407) -> {"So","L"}; +lookup(10408) -> {"So","L"}; +lookup(10409) -> {"So","L"}; +lookup(10410) -> {"So","L"}; +lookup(10411) -> {"So","L"}; +lookup(10412) -> {"So","L"}; +lookup(10413) -> {"So","L"}; +lookup(10414) -> {"So","L"}; +lookup(10415) -> {"So","L"}; +lookup(10416) -> {"So","L"}; +lookup(10417) -> {"So","L"}; +lookup(10418) -> {"So","L"}; +lookup(10419) -> {"So","L"}; +lookup(10420) -> {"So","L"}; +lookup(10421) -> {"So","L"}; +lookup(10422) -> {"So","L"}; +lookup(10423) -> {"So","L"}; +lookup(10424) -> {"So","L"}; +lookup(10425) -> {"So","L"}; +lookup(10426) -> {"So","L"}; +lookup(10427) -> {"So","L"}; +lookup(10428) -> {"So","L"}; +lookup(10429) -> {"So","L"}; +lookup(10430) -> {"So","L"}; +lookup(10431) -> {"So","L"}; +lookup(10432) -> {"So","L"}; +lookup(10433) -> {"So","L"}; +lookup(10434) -> {"So","L"}; +lookup(10435) -> {"So","L"}; +lookup(10436) -> {"So","L"}; +lookup(10437) -> {"So","L"}; +lookup(10438) -> {"So","L"}; +lookup(10439) -> {"So","L"}; +lookup(10440) -> {"So","L"}; +lookup(10441) -> {"So","L"}; +lookup(10442) -> {"So","L"}; +lookup(10443) -> {"So","L"}; +lookup(10444) -> {"So","L"}; +lookup(10445) -> {"So","L"}; +lookup(10446) -> {"So","L"}; +lookup(10447) -> {"So","L"}; +lookup(10448) -> {"So","L"}; +lookup(10449) -> {"So","L"}; +lookup(10450) -> {"So","L"}; +lookup(10451) -> {"So","L"}; +lookup(10452) -> {"So","L"}; +lookup(10453) -> {"So","L"}; +lookup(10454) -> {"So","L"}; +lookup(10455) -> {"So","L"}; +lookup(10456) -> {"So","L"}; +lookup(10457) -> {"So","L"}; +lookup(10458) -> {"So","L"}; +lookup(10459) -> {"So","L"}; +lookup(10460) -> {"So","L"}; +lookup(10461) -> {"So","L"}; +lookup(10462) -> {"So","L"}; +lookup(10463) -> {"So","L"}; +lookup(10464) -> {"So","L"}; +lookup(10465) -> {"So","L"}; +lookup(10466) -> {"So","L"}; +lookup(10467) -> {"So","L"}; +lookup(10468) -> {"So","L"}; +lookup(10469) -> {"So","L"}; +lookup(10470) -> {"So","L"}; +lookup(10471) -> {"So","L"}; +lookup(10472) -> {"So","L"}; +lookup(10473) -> {"So","L"}; +lookup(10474) -> {"So","L"}; +lookup(10475) -> {"So","L"}; +lookup(10476) -> {"So","L"}; +lookup(10477) -> {"So","L"}; +lookup(10478) -> {"So","L"}; +lookup(10479) -> {"So","L"}; +lookup(10480) -> {"So","L"}; +lookup(10481) -> {"So","L"}; +lookup(10482) -> {"So","L"}; +lookup(10483) -> {"So","L"}; +lookup(10484) -> {"So","L"}; +lookup(10485) -> {"So","L"}; +lookup(10486) -> {"So","L"}; +lookup(10487) -> {"So","L"}; +lookup(10488) -> {"So","L"}; +lookup(10489) -> {"So","L"}; +lookup(10490) -> {"So","L"}; +lookup(10491) -> {"So","L"}; +lookup(10492) -> {"So","L"}; +lookup(10493) -> {"So","L"}; +lookup(10494) -> {"So","L"}; +lookup(10495) -> {"So","L"}; +lookup(10496) -> {"Sm","ON"}; +lookup(10497) -> {"Sm","ON"}; +lookup(10498) -> {"Sm","ON"}; +lookup(10499) -> {"Sm","ON"}; +lookup(10500) -> {"Sm","ON"}; +lookup(10501) -> {"Sm","ON"}; +lookup(10502) -> {"Sm","ON"}; +lookup(10503) -> {"Sm","ON"}; +lookup(10504) -> {"Sm","ON"}; +lookup(10505) -> {"Sm","ON"}; +lookup(10506) -> {"Sm","ON"}; +lookup(10507) -> {"Sm","ON"}; +lookup(10508) -> {"Sm","ON"}; +lookup(10509) -> {"Sm","ON"}; +lookup(10510) -> {"Sm","ON"}; +lookup(10511) -> {"Sm","ON"}; +lookup(10512) -> {"Sm","ON"}; +lookup(10513) -> {"Sm","ON"}; +lookup(10514) -> {"Sm","ON"}; +lookup(10515) -> {"Sm","ON"}; +lookup(10516) -> {"Sm","ON"}; +lookup(10517) -> {"Sm","ON"}; +lookup(10518) -> {"Sm","ON"}; +lookup(10519) -> {"Sm","ON"}; +lookup(10520) -> {"Sm","ON"}; +lookup(10521) -> {"Sm","ON"}; +lookup(10522) -> {"Sm","ON"}; +lookup(10523) -> {"Sm","ON"}; +lookup(10524) -> {"Sm","ON"}; +lookup(10525) -> {"Sm","ON"}; +lookup(10526) -> {"Sm","ON"}; +lookup(10527) -> {"Sm","ON"}; +lookup(10528) -> {"Sm","ON"}; +lookup(10529) -> {"Sm","ON"}; +lookup(10530) -> {"Sm","ON"}; +lookup(10531) -> {"Sm","ON"}; +lookup(10532) -> {"Sm","ON"}; +lookup(10533) -> {"Sm","ON"}; +lookup(10534) -> {"Sm","ON"}; +lookup(10535) -> {"Sm","ON"}; +lookup(10536) -> {"Sm","ON"}; +lookup(10537) -> {"Sm","ON"}; +lookup(10538) -> {"Sm","ON"}; +lookup(10539) -> {"Sm","ON"}; +lookup(10540) -> {"Sm","ON"}; +lookup(10541) -> {"Sm","ON"}; +lookup(10542) -> {"Sm","ON"}; +lookup(10543) -> {"Sm","ON"}; +lookup(10544) -> {"Sm","ON"}; +lookup(10545) -> {"Sm","ON"}; +lookup(10546) -> {"Sm","ON"}; +lookup(10547) -> {"Sm","ON"}; +lookup(10548) -> {"Sm","ON"}; +lookup(10549) -> {"Sm","ON"}; +lookup(10550) -> {"Sm","ON"}; +lookup(10551) -> {"Sm","ON"}; +lookup(10552) -> {"Sm","ON"}; +lookup(10553) -> {"Sm","ON"}; +lookup(10554) -> {"Sm","ON"}; +lookup(10555) -> {"Sm","ON"}; +lookup(10556) -> {"Sm","ON"}; +lookup(10557) -> {"Sm","ON"}; +lookup(10558) -> {"Sm","ON"}; +lookup(10559) -> {"Sm","ON"}; +lookup(10560) -> {"Sm","ON"}; +lookup(10561) -> {"Sm","ON"}; +lookup(10562) -> {"Sm","ON"}; +lookup(10563) -> {"Sm","ON"}; +lookup(10564) -> {"Sm","ON"}; +lookup(10565) -> {"Sm","ON"}; +lookup(10566) -> {"Sm","ON"}; +lookup(10567) -> {"Sm","ON"}; +lookup(10568) -> {"Sm","ON"}; +lookup(10569) -> {"Sm","ON"}; +lookup(10570) -> {"Sm","ON"}; +lookup(10571) -> {"Sm","ON"}; +lookup(10572) -> {"Sm","ON"}; +lookup(10573) -> {"Sm","ON"}; +lookup(10574) -> {"Sm","ON"}; +lookup(10575) -> {"Sm","ON"}; +lookup(10576) -> {"Sm","ON"}; +lookup(10577) -> {"Sm","ON"}; +lookup(10578) -> {"Sm","ON"}; +lookup(10579) -> {"Sm","ON"}; +lookup(10580) -> {"Sm","ON"}; +lookup(10581) -> {"Sm","ON"}; +lookup(10582) -> {"Sm","ON"}; +lookup(10583) -> {"Sm","ON"}; +lookup(10584) -> {"Sm","ON"}; +lookup(10585) -> {"Sm","ON"}; +lookup(10586) -> {"Sm","ON"}; +lookup(10587) -> {"Sm","ON"}; +lookup(10588) -> {"Sm","ON"}; +lookup(10589) -> {"Sm","ON"}; +lookup(10590) -> {"Sm","ON"}; +lookup(10591) -> {"Sm","ON"}; +lookup(10592) -> {"Sm","ON"}; +lookup(10593) -> {"Sm","ON"}; +lookup(10594) -> {"Sm","ON"}; +lookup(10595) -> {"Sm","ON"}; +lookup(10596) -> {"Sm","ON"}; +lookup(10597) -> {"Sm","ON"}; +lookup(10598) -> {"Sm","ON"}; +lookup(10599) -> {"Sm","ON"}; +lookup(10600) -> {"Sm","ON"}; +lookup(10601) -> {"Sm","ON"}; +lookup(10602) -> {"Sm","ON"}; +lookup(10603) -> {"Sm","ON"}; +lookup(10604) -> {"Sm","ON"}; +lookup(10605) -> {"Sm","ON"}; +lookup(10606) -> {"Sm","ON"}; +lookup(10607) -> {"Sm","ON"}; +lookup(10608) -> {"Sm","ON"}; +lookup(10609) -> {"Sm","ON"}; +lookup(10610) -> {"Sm","ON"}; +lookup(10611) -> {"Sm","ON"}; +lookup(10612) -> {"Sm","ON"}; +lookup(10613) -> {"Sm","ON"}; +lookup(10614) -> {"Sm","ON"}; +lookup(10615) -> {"Sm","ON"}; +lookup(10616) -> {"Sm","ON"}; +lookup(10617) -> {"Sm","ON"}; +lookup(10618) -> {"Sm","ON"}; +lookup(10619) -> {"Sm","ON"}; +lookup(10620) -> {"Sm","ON"}; +lookup(10621) -> {"Sm","ON"}; +lookup(10622) -> {"Sm","ON"}; +lookup(10623) -> {"Sm","ON"}; +lookup(10624) -> {"Sm","ON"}; +lookup(10625) -> {"Sm","ON"}; +lookup(10626) -> {"Sm","ON"}; +lookup(10627) -> {"Ps","ON"}; +lookup(10628) -> {"Pe","ON"}; +lookup(10629) -> {"Ps","ON"}; +lookup(10630) -> {"Pe","ON"}; +lookup(10631) -> {"Ps","ON"}; +lookup(10632) -> {"Pe","ON"}; +lookup(10633) -> {"Ps","ON"}; +lookup(10634) -> {"Pe","ON"}; +lookup(10635) -> {"Ps","ON"}; +lookup(10636) -> {"Pe","ON"}; +lookup(10637) -> {"Ps","ON"}; +lookup(10638) -> {"Pe","ON"}; +lookup(10639) -> {"Ps","ON"}; +lookup(10640) -> {"Pe","ON"}; +lookup(10641) -> {"Ps","ON"}; +lookup(10642) -> {"Pe","ON"}; +lookup(10643) -> {"Ps","ON"}; +lookup(10644) -> {"Pe","ON"}; +lookup(10645) -> {"Ps","ON"}; +lookup(10646) -> {"Pe","ON"}; +lookup(10647) -> {"Ps","ON"}; +lookup(10648) -> {"Pe","ON"}; +lookup(10649) -> {"Sm","ON"}; +lookup(10650) -> {"Sm","ON"}; +lookup(10651) -> {"Sm","ON"}; +lookup(10652) -> {"Sm","ON"}; +lookup(10653) -> {"Sm","ON"}; +lookup(10654) -> {"Sm","ON"}; +lookup(10655) -> {"Sm","ON"}; +lookup(10656) -> {"Sm","ON"}; +lookup(10657) -> {"Sm","ON"}; +lookup(10658) -> {"Sm","ON"}; +lookup(10659) -> {"Sm","ON"}; +lookup(10660) -> {"Sm","ON"}; +lookup(10661) -> {"Sm","ON"}; +lookup(10662) -> {"Sm","ON"}; +lookup(10663) -> {"Sm","ON"}; +lookup(10664) -> {"Sm","ON"}; +lookup(10665) -> {"Sm","ON"}; +lookup(10666) -> {"Sm","ON"}; +lookup(10667) -> {"Sm","ON"}; +lookup(10668) -> {"Sm","ON"}; +lookup(10669) -> {"Sm","ON"}; +lookup(10670) -> {"Sm","ON"}; +lookup(10671) -> {"Sm","ON"}; +lookup(10672) -> {"Sm","ON"}; +lookup(10673) -> {"Sm","ON"}; +lookup(10674) -> {"Sm","ON"}; +lookup(10675) -> {"Sm","ON"}; +lookup(10676) -> {"Sm","ON"}; +lookup(10677) -> {"Sm","ON"}; +lookup(10678) -> {"Sm","ON"}; +lookup(10679) -> {"Sm","ON"}; +lookup(10680) -> {"Sm","ON"}; +lookup(10681) -> {"Sm","ON"}; +lookup(10682) -> {"Sm","ON"}; +lookup(10683) -> {"Sm","ON"}; +lookup(10684) -> {"Sm","ON"}; +lookup(10685) -> {"Sm","ON"}; +lookup(10686) -> {"Sm","ON"}; +lookup(10687) -> {"Sm","ON"}; +lookup(10688) -> {"Sm","ON"}; +lookup(10689) -> {"Sm","ON"}; +lookup(10690) -> {"Sm","ON"}; +lookup(10691) -> {"Sm","ON"}; +lookup(10692) -> {"Sm","ON"}; +lookup(10693) -> {"Sm","ON"}; +lookup(10694) -> {"Sm","ON"}; +lookup(10695) -> {"Sm","ON"}; +lookup(10696) -> {"Sm","ON"}; +lookup(10697) -> {"Sm","ON"}; +lookup(10698) -> {"Sm","ON"}; +lookup(10699) -> {"Sm","ON"}; +lookup(10700) -> {"Sm","ON"}; +lookup(10701) -> {"Sm","ON"}; +lookup(10702) -> {"Sm","ON"}; +lookup(10703) -> {"Sm","ON"}; +lookup(10704) -> {"Sm","ON"}; +lookup(10705) -> {"Sm","ON"}; +lookup(10706) -> {"Sm","ON"}; +lookup(10707) -> {"Sm","ON"}; +lookup(10708) -> {"Sm","ON"}; +lookup(10709) -> {"Sm","ON"}; +lookup(10710) -> {"Sm","ON"}; +lookup(10711) -> {"Sm","ON"}; +lookup(10712) -> {"Ps","ON"}; +lookup(10713) -> {"Pe","ON"}; +lookup(10714) -> {"Ps","ON"}; +lookup(10715) -> {"Pe","ON"}; +lookup(10716) -> {"Sm","ON"}; +lookup(10717) -> {"Sm","ON"}; +lookup(10718) -> {"Sm","ON"}; +lookup(10719) -> {"Sm","ON"}; +lookup(10720) -> {"Sm","ON"}; +lookup(10721) -> {"Sm","ON"}; +lookup(10722) -> {"Sm","ON"}; +lookup(10723) -> {"Sm","ON"}; +lookup(10724) -> {"Sm","ON"}; +lookup(10725) -> {"Sm","ON"}; +lookup(10726) -> {"Sm","ON"}; +lookup(10727) -> {"Sm","ON"}; +lookup(10728) -> {"Sm","ON"}; +lookup(10729) -> {"Sm","ON"}; +lookup(10730) -> {"Sm","ON"}; +lookup(10731) -> {"Sm","ON"}; +lookup(10732) -> {"Sm","ON"}; +lookup(10733) -> {"Sm","ON"}; +lookup(10734) -> {"Sm","ON"}; +lookup(10735) -> {"Sm","ON"}; +lookup(10736) -> {"Sm","ON"}; +lookup(10737) -> {"Sm","ON"}; +lookup(10738) -> {"Sm","ON"}; +lookup(10739) -> {"Sm","ON"}; +lookup(10740) -> {"Sm","ON"}; +lookup(10741) -> {"Sm","ON"}; +lookup(10742) -> {"Sm","ON"}; +lookup(10743) -> {"Sm","ON"}; +lookup(10744) -> {"Sm","ON"}; +lookup(10745) -> {"Sm","ON"}; +lookup(10746) -> {"Sm","ON"}; +lookup(10747) -> {"Sm","ON"}; +lookup(10748) -> {"Ps","ON"}; +lookup(10749) -> {"Pe","ON"}; +lookup(10750) -> {"Sm","ON"}; +lookup(10751) -> {"Sm","ON"}; +lookup(10752) -> {"Sm","ON"}; +lookup(10753) -> {"Sm","ON"}; +lookup(10754) -> {"Sm","ON"}; +lookup(10755) -> {"Sm","ON"}; +lookup(10756) -> {"Sm","ON"}; +lookup(10757) -> {"Sm","ON"}; +lookup(10758) -> {"Sm","ON"}; +lookup(10759) -> {"Sm","ON"}; +lookup(10760) -> {"Sm","ON"}; +lookup(10761) -> {"Sm","ON"}; +lookup(10762) -> {"Sm","ON"}; +lookup(10763) -> {"Sm","ON"}; +lookup(10764) -> {"Sm","ON"}; +lookup(10765) -> {"Sm","ON"}; +lookup(10766) -> {"Sm","ON"}; +lookup(10767) -> {"Sm","ON"}; +lookup(10768) -> {"Sm","ON"}; +lookup(10769) -> {"Sm","ON"}; +lookup(10770) -> {"Sm","ON"}; +lookup(10771) -> {"Sm","ON"}; +lookup(10772) -> {"Sm","ON"}; +lookup(10773) -> {"Sm","ON"}; +lookup(10774) -> {"Sm","ON"}; +lookup(10775) -> {"Sm","ON"}; +lookup(10776) -> {"Sm","ON"}; +lookup(10777) -> {"Sm","ON"}; +lookup(10778) -> {"Sm","ON"}; +lookup(10779) -> {"Sm","ON"}; +lookup(10780) -> {"Sm","ON"}; +lookup(10781) -> {"Sm","ON"}; +lookup(10782) -> {"Sm","ON"}; +lookup(10783) -> {"Sm","ON"}; +lookup(10784) -> {"Sm","ON"}; +lookup(10785) -> {"Sm","ON"}; +lookup(10786) -> {"Sm","ON"}; +lookup(10787) -> {"Sm","ON"}; +lookup(10788) -> {"Sm","ON"}; +lookup(10789) -> {"Sm","ON"}; +lookup(10790) -> {"Sm","ON"}; +lookup(10791) -> {"Sm","ON"}; +lookup(10792) -> {"Sm","ON"}; +lookup(10793) -> {"Sm","ON"}; +lookup(10794) -> {"Sm","ON"}; +lookup(10795) -> {"Sm","ON"}; +lookup(10796) -> {"Sm","ON"}; +lookup(10797) -> {"Sm","ON"}; +lookup(10798) -> {"Sm","ON"}; +lookup(10799) -> {"Sm","ON"}; +lookup(10800) -> {"Sm","ON"}; +lookup(10801) -> {"Sm","ON"}; +lookup(10802) -> {"Sm","ON"}; +lookup(10803) -> {"Sm","ON"}; +lookup(10804) -> {"Sm","ON"}; +lookup(10805) -> {"Sm","ON"}; +lookup(10806) -> {"Sm","ON"}; +lookup(10807) -> {"Sm","ON"}; +lookup(10808) -> {"Sm","ON"}; +lookup(10809) -> {"Sm","ON"}; +lookup(10810) -> {"Sm","ON"}; +lookup(10811) -> {"Sm","ON"}; +lookup(10812) -> {"Sm","ON"}; +lookup(10813) -> {"Sm","ON"}; +lookup(10814) -> {"Sm","ON"}; +lookup(10815) -> {"Sm","ON"}; +lookup(10816) -> {"Sm","ON"}; +lookup(10817) -> {"Sm","ON"}; +lookup(10818) -> {"Sm","ON"}; +lookup(10819) -> {"Sm","ON"}; +lookup(10820) -> {"Sm","ON"}; +lookup(10821) -> {"Sm","ON"}; +lookup(10822) -> {"Sm","ON"}; +lookup(10823) -> {"Sm","ON"}; +lookup(10824) -> {"Sm","ON"}; +lookup(10825) -> {"Sm","ON"}; +lookup(10826) -> {"Sm","ON"}; +lookup(10827) -> {"Sm","ON"}; +lookup(10828) -> {"Sm","ON"}; +lookup(10829) -> {"Sm","ON"}; +lookup(10830) -> {"Sm","ON"}; +lookup(10831) -> {"Sm","ON"}; +lookup(10832) -> {"Sm","ON"}; +lookup(10833) -> {"Sm","ON"}; +lookup(10834) -> {"Sm","ON"}; +lookup(10835) -> {"Sm","ON"}; +lookup(10836) -> {"Sm","ON"}; +lookup(10837) -> {"Sm","ON"}; +lookup(10838) -> {"Sm","ON"}; +lookup(10839) -> {"Sm","ON"}; +lookup(10840) -> {"Sm","ON"}; +lookup(10841) -> {"Sm","ON"}; +lookup(10842) -> {"Sm","ON"}; +lookup(10843) -> {"Sm","ON"}; +lookup(10844) -> {"Sm","ON"}; +lookup(10845) -> {"Sm","ON"}; +lookup(10846) -> {"Sm","ON"}; +lookup(10847) -> {"Sm","ON"}; +lookup(10848) -> {"Sm","ON"}; +lookup(10849) -> {"Sm","ON"}; +lookup(10850) -> {"Sm","ON"}; +lookup(10851) -> {"Sm","ON"}; +lookup(10852) -> {"Sm","ON"}; +lookup(10853) -> {"Sm","ON"}; +lookup(10854) -> {"Sm","ON"}; +lookup(10855) -> {"Sm","ON"}; +lookup(10856) -> {"Sm","ON"}; +lookup(10857) -> {"Sm","ON"}; +lookup(10858) -> {"Sm","ON"}; +lookup(10859) -> {"Sm","ON"}; +lookup(10860) -> {"Sm","ON"}; +lookup(10861) -> {"Sm","ON"}; +lookup(10862) -> {"Sm","ON"}; +lookup(10863) -> {"Sm","ON"}; +lookup(10864) -> {"Sm","ON"}; +lookup(10865) -> {"Sm","ON"}; +lookup(10866) -> {"Sm","ON"}; +lookup(10867) -> {"Sm","ON"}; +lookup(10868) -> {"Sm","ON"}; +lookup(10869) -> {"Sm","ON"}; +lookup(10870) -> {"Sm","ON"}; +lookup(10871) -> {"Sm","ON"}; +lookup(10872) -> {"Sm","ON"}; +lookup(10873) -> {"Sm","ON"}; +lookup(10874) -> {"Sm","ON"}; +lookup(10875) -> {"Sm","ON"}; +lookup(10876) -> {"Sm","ON"}; +lookup(10877) -> {"Sm","ON"}; +lookup(10878) -> {"Sm","ON"}; +lookup(10879) -> {"Sm","ON"}; +lookup(10880) -> {"Sm","ON"}; +lookup(10881) -> {"Sm","ON"}; +lookup(10882) -> {"Sm","ON"}; +lookup(10883) -> {"Sm","ON"}; +lookup(10884) -> {"Sm","ON"}; +lookup(10885) -> {"Sm","ON"}; +lookup(10886) -> {"Sm","ON"}; +lookup(10887) -> {"Sm","ON"}; +lookup(10888) -> {"Sm","ON"}; +lookup(10889) -> {"Sm","ON"}; +lookup(10890) -> {"Sm","ON"}; +lookup(10891) -> {"Sm","ON"}; +lookup(10892) -> {"Sm","ON"}; +lookup(10893) -> {"Sm","ON"}; +lookup(10894) -> {"Sm","ON"}; +lookup(10895) -> {"Sm","ON"}; +lookup(10896) -> {"Sm","ON"}; +lookup(10897) -> {"Sm","ON"}; +lookup(10898) -> {"Sm","ON"}; +lookup(10899) -> {"Sm","ON"}; +lookup(10900) -> {"Sm","ON"}; +lookup(10901) -> {"Sm","ON"}; +lookup(10902) -> {"Sm","ON"}; +lookup(10903) -> {"Sm","ON"}; +lookup(10904) -> {"Sm","ON"}; +lookup(10905) -> {"Sm","ON"}; +lookup(10906) -> {"Sm","ON"}; +lookup(10907) -> {"Sm","ON"}; +lookup(10908) -> {"Sm","ON"}; +lookup(10909) -> {"Sm","ON"}; +lookup(10910) -> {"Sm","ON"}; +lookup(10911) -> {"Sm","ON"}; +lookup(10912) -> {"Sm","ON"}; +lookup(10913) -> {"Sm","ON"}; +lookup(10914) -> {"Sm","ON"}; +lookup(10915) -> {"Sm","ON"}; +lookup(10916) -> {"Sm","ON"}; +lookup(10917) -> {"Sm","ON"}; +lookup(10918) -> {"Sm","ON"}; +lookup(10919) -> {"Sm","ON"}; +lookup(10920) -> {"Sm","ON"}; +lookup(10921) -> {"Sm","ON"}; +lookup(10922) -> {"Sm","ON"}; +lookup(10923) -> {"Sm","ON"}; +lookup(10924) -> {"Sm","ON"}; +lookup(10925) -> {"Sm","ON"}; +lookup(10926) -> {"Sm","ON"}; +lookup(10927) -> {"Sm","ON"}; +lookup(10928) -> {"Sm","ON"}; +lookup(10929) -> {"Sm","ON"}; +lookup(10930) -> {"Sm","ON"}; +lookup(10931) -> {"Sm","ON"}; +lookup(10932) -> {"Sm","ON"}; +lookup(10933) -> {"Sm","ON"}; +lookup(10934) -> {"Sm","ON"}; +lookup(10935) -> {"Sm","ON"}; +lookup(10936) -> {"Sm","ON"}; +lookup(10937) -> {"Sm","ON"}; +lookup(10938) -> {"Sm","ON"}; +lookup(10939) -> {"Sm","ON"}; +lookup(10940) -> {"Sm","ON"}; +lookup(10941) -> {"Sm","ON"}; +lookup(10942) -> {"Sm","ON"}; +lookup(10943) -> {"Sm","ON"}; +lookup(10944) -> {"Sm","ON"}; +lookup(10945) -> {"Sm","ON"}; +lookup(10946) -> {"Sm","ON"}; +lookup(10947) -> {"Sm","ON"}; +lookup(10948) -> {"Sm","ON"}; +lookup(10949) -> {"Sm","ON"}; +lookup(10950) -> {"Sm","ON"}; +lookup(10951) -> {"Sm","ON"}; +lookup(10952) -> {"Sm","ON"}; +lookup(10953) -> {"Sm","ON"}; +lookup(10954) -> {"Sm","ON"}; +lookup(10955) -> {"Sm","ON"}; +lookup(10956) -> {"Sm","ON"}; +lookup(10957) -> {"Sm","ON"}; +lookup(10958) -> {"Sm","ON"}; +lookup(10959) -> {"Sm","ON"}; +lookup(10960) -> {"Sm","ON"}; +lookup(10961) -> {"Sm","ON"}; +lookup(10962) -> {"Sm","ON"}; +lookup(10963) -> {"Sm","ON"}; +lookup(10964) -> {"Sm","ON"}; +lookup(10965) -> {"Sm","ON"}; +lookup(10966) -> {"Sm","ON"}; +lookup(10967) -> {"Sm","ON"}; +lookup(10968) -> {"Sm","ON"}; +lookup(10969) -> {"Sm","ON"}; +lookup(10970) -> {"Sm","ON"}; +lookup(10971) -> {"Sm","ON"}; +lookup(10972) -> {"Sm","ON"}; +lookup(10973) -> {"Sm","ON"}; +lookup(10974) -> {"Sm","ON"}; +lookup(10975) -> {"Sm","ON"}; +lookup(10976) -> {"Sm","ON"}; +lookup(10977) -> {"Sm","ON"}; +lookup(10978) -> {"Sm","ON"}; +lookup(10979) -> {"Sm","ON"}; +lookup(10980) -> {"Sm","ON"}; +lookup(10981) -> {"Sm","ON"}; +lookup(10982) -> {"Sm","ON"}; +lookup(10983) -> {"Sm","ON"}; +lookup(10984) -> {"Sm","ON"}; +lookup(10985) -> {"Sm","ON"}; +lookup(10986) -> {"Sm","ON"}; +lookup(10987) -> {"Sm","ON"}; +lookup(10988) -> {"Sm","ON"}; +lookup(10989) -> {"Sm","ON"}; +lookup(10990) -> {"Sm","ON"}; +lookup(10991) -> {"Sm","ON"}; +lookup(10992) -> {"Sm","ON"}; +lookup(10993) -> {"Sm","ON"}; +lookup(10994) -> {"Sm","ON"}; +lookup(10995) -> {"Sm","ON"}; +lookup(10996) -> {"Sm","ON"}; +lookup(10997) -> {"Sm","ON"}; +lookup(10998) -> {"Sm","ON"}; +lookup(10999) -> {"Sm","ON"}; +lookup(11000) -> {"Sm","ON"}; +lookup(11001) -> {"Sm","ON"}; +lookup(11002) -> {"Sm","ON"}; +lookup(11003) -> {"Sm","ON"}; +lookup(11004) -> {"Sm","ON"}; +lookup(11005) -> {"Sm","ON"}; +lookup(11006) -> {"Sm","ON"}; +lookup(11007) -> {"Sm","ON"}; +lookup(11008) -> {"So","ON"}; +lookup(11009) -> {"So","ON"}; +lookup(11010) -> {"So","ON"}; +lookup(11011) -> {"So","ON"}; +lookup(11012) -> {"So","ON"}; +lookup(11013) -> {"So","ON"}; +lookup(11014) -> {"So","ON"}; +lookup(11015) -> {"So","ON"}; +lookup(11016) -> {"So","ON"}; +lookup(11017) -> {"So","ON"}; +lookup(11018) -> {"So","ON"}; +lookup(11019) -> {"So","ON"}; +lookup(11020) -> {"So","ON"}; +lookup(11021) -> {"So","ON"}; +lookup(11022) -> {"So","ON"}; +lookup(11023) -> {"So","ON"}; +lookup(11024) -> {"So","ON"}; +lookup(11025) -> {"So","ON"}; +lookup(11026) -> {"So","ON"}; +lookup(11027) -> {"So","ON"}; +lookup(11028) -> {"So","ON"}; +lookup(11029) -> {"So","ON"}; +lookup(11030) -> {"So","ON"}; +lookup(11031) -> {"So","ON"}; +lookup(11032) -> {"So","ON"}; +lookup(11033) -> {"So","ON"}; +lookup(11034) -> {"So","ON"}; +lookup(11035) -> {"So","ON"}; +lookup(11036) -> {"So","ON"}; +lookup(11037) -> {"So","ON"}; +lookup(11038) -> {"So","ON"}; +lookup(11039) -> {"So","ON"}; +lookup(11040) -> {"So","ON"}; +lookup(11041) -> {"So","ON"}; +lookup(11042) -> {"So","ON"}; +lookup(11043) -> {"So","ON"}; +lookup(11044) -> {"So","ON"}; +lookup(11045) -> {"So","ON"}; +lookup(11046) -> {"So","ON"}; +lookup(11047) -> {"So","ON"}; +lookup(11048) -> {"So","ON"}; +lookup(11049) -> {"So","ON"}; +lookup(11050) -> {"So","ON"}; +lookup(11051) -> {"So","ON"}; +lookup(11052) -> {"So","ON"}; +lookup(11053) -> {"So","ON"}; +lookup(11054) -> {"So","ON"}; +lookup(11055) -> {"So","ON"}; +lookup(11056) -> {"Sm","ON"}; +lookup(11057) -> {"Sm","ON"}; +lookup(11058) -> {"Sm","ON"}; +lookup(11059) -> {"Sm","ON"}; +lookup(11060) -> {"Sm","ON"}; +lookup(11061) -> {"Sm","ON"}; +lookup(11062) -> {"Sm","ON"}; +lookup(11063) -> {"Sm","ON"}; +lookup(11064) -> {"Sm","ON"}; +lookup(11065) -> {"Sm","ON"}; +lookup(11066) -> {"Sm","ON"}; +lookup(11067) -> {"Sm","ON"}; +lookup(11068) -> {"Sm","ON"}; +lookup(11069) -> {"Sm","ON"}; +lookup(11070) -> {"Sm","ON"}; +lookup(11071) -> {"Sm","ON"}; +lookup(11072) -> {"Sm","ON"}; +lookup(11073) -> {"Sm","ON"}; +lookup(11074) -> {"Sm","ON"}; +lookup(11075) -> {"Sm","ON"}; +lookup(11076) -> {"Sm","ON"}; +lookup(11077) -> {"So","ON"}; +lookup(11078) -> {"So","ON"}; +lookup(11079) -> {"Sm","ON"}; +lookup(11080) -> {"Sm","ON"}; +lookup(11081) -> {"Sm","ON"}; +lookup(11082) -> {"Sm","ON"}; +lookup(11083) -> {"Sm","ON"}; +lookup(11084) -> {"Sm","ON"}; +lookup(11085) -> {"So","ON"}; +lookup(11086) -> {"So","ON"}; +lookup(11087) -> {"So","ON"}; +lookup(11088) -> {"So","ON"}; +lookup(11089) -> {"So","ON"}; +lookup(11090) -> {"So","ON"}; +lookup(11091) -> {"So","ON"}; +lookup(11092) -> {"So","ON"}; +lookup(11093) -> {"So","ON"}; +lookup(11094) -> {"So","ON"}; +lookup(11095) -> {"So","ON"}; +lookup(11096) -> {"So","ON"}; +lookup(11097) -> {"So","ON"}; +lookup(11098) -> {"So","ON"}; +lookup(11099) -> {"So","ON"}; +lookup(11100) -> {"So","ON"}; +lookup(11101) -> {"So","ON"}; +lookup(11102) -> {"So","ON"}; +lookup(11103) -> {"So","ON"}; +lookup(11104) -> {"So","ON"}; +lookup(11105) -> {"So","ON"}; +lookup(11106) -> {"So","ON"}; +lookup(11107) -> {"So","ON"}; +lookup(11108) -> {"So","ON"}; +lookup(11109) -> {"So","ON"}; +lookup(11110) -> {"So","ON"}; +lookup(11111) -> {"So","ON"}; +lookup(11112) -> {"So","ON"}; +lookup(11113) -> {"So","ON"}; +lookup(11114) -> {"So","ON"}; +lookup(11115) -> {"So","ON"}; +lookup(11116) -> {"So","ON"}; +lookup(11117) -> {"So","ON"}; +lookup(11118) -> {"So","ON"}; +lookup(11119) -> {"So","ON"}; +lookup(11120) -> {"So","ON"}; +lookup(11121) -> {"So","ON"}; +lookup(11122) -> {"So","ON"}; +lookup(11123) -> {"So","ON"}; +lookup(11126) -> {"So","ON"}; +lookup(11127) -> {"So","ON"}; +lookup(11128) -> {"So","ON"}; +lookup(11129) -> {"So","ON"}; +lookup(11130) -> {"So","ON"}; +lookup(11131) -> {"So","ON"}; +lookup(11132) -> {"So","ON"}; +lookup(11133) -> {"So","ON"}; +lookup(11134) -> {"So","ON"}; +lookup(11135) -> {"So","ON"}; +lookup(11136) -> {"So","ON"}; +lookup(11137) -> {"So","ON"}; +lookup(11138) -> {"So","ON"}; +lookup(11139) -> {"So","ON"}; +lookup(11140) -> {"So","ON"}; +lookup(11141) -> {"So","ON"}; +lookup(11142) -> {"So","ON"}; +lookup(11143) -> {"So","ON"}; +lookup(11144) -> {"So","ON"}; +lookup(11145) -> {"So","ON"}; +lookup(11146) -> {"So","ON"}; +lookup(11147) -> {"So","ON"}; +lookup(11148) -> {"So","ON"}; +lookup(11149) -> {"So","ON"}; +lookup(11150) -> {"So","ON"}; +lookup(11151) -> {"So","ON"}; +lookup(11152) -> {"So","ON"}; +lookup(11153) -> {"So","ON"}; +lookup(11154) -> {"So","ON"}; +lookup(11155) -> {"So","ON"}; +lookup(11156) -> {"So","ON"}; +lookup(11157) -> {"So","ON"}; +lookup(11159) -> {"So","ON"}; +lookup(11160) -> {"So","ON"}; +lookup(11161) -> {"So","ON"}; +lookup(11162) -> {"So","ON"}; +lookup(11163) -> {"So","ON"}; +lookup(11164) -> {"So","ON"}; +lookup(11165) -> {"So","ON"}; +lookup(11166) -> {"So","ON"}; +lookup(11167) -> {"So","ON"}; +lookup(11168) -> {"So","ON"}; +lookup(11169) -> {"So","ON"}; +lookup(11170) -> {"So","ON"}; +lookup(11171) -> {"So","ON"}; +lookup(11172) -> {"So","ON"}; +lookup(11173) -> {"So","ON"}; +lookup(11174) -> {"So","ON"}; +lookup(11175) -> {"So","ON"}; +lookup(11176) -> {"So","ON"}; +lookup(11177) -> {"So","ON"}; +lookup(11178) -> {"So","ON"}; +lookup(11179) -> {"So","ON"}; +lookup(11180) -> {"So","ON"}; +lookup(11181) -> {"So","ON"}; +lookup(11182) -> {"So","ON"}; +lookup(11183) -> {"So","ON"}; +lookup(11184) -> {"So","ON"}; +lookup(11185) -> {"So","ON"}; +lookup(11186) -> {"So","ON"}; +lookup(11187) -> {"So","ON"}; +lookup(11188) -> {"So","ON"}; +lookup(11189) -> {"So","ON"}; +lookup(11190) -> {"So","ON"}; +lookup(11191) -> {"So","ON"}; +lookup(11192) -> {"So","ON"}; +lookup(11193) -> {"So","ON"}; +lookup(11194) -> {"So","ON"}; +lookup(11195) -> {"So","ON"}; +lookup(11196) -> {"So","ON"}; +lookup(11197) -> {"So","ON"}; +lookup(11198) -> {"So","ON"}; +lookup(11199) -> {"So","ON"}; +lookup(11200) -> {"So","ON"}; +lookup(11201) -> {"So","ON"}; +lookup(11202) -> {"So","ON"}; +lookup(11203) -> {"So","ON"}; +lookup(11204) -> {"So","ON"}; +lookup(11205) -> {"So","ON"}; +lookup(11206) -> {"So","ON"}; +lookup(11207) -> {"So","ON"}; +lookup(11208) -> {"So","ON"}; +lookup(11209) -> {"So","ON"}; +lookup(11210) -> {"So","ON"}; +lookup(11211) -> {"So","ON"}; +lookup(11212) -> {"So","ON"}; +lookup(11213) -> {"So","ON"}; +lookup(11214) -> {"So","ON"}; +lookup(11215) -> {"So","ON"}; +lookup(11216) -> {"So","ON"}; +lookup(11217) -> {"So","ON"}; +lookup(11218) -> {"So","ON"}; +lookup(11219) -> {"So","ON"}; +lookup(11220) -> {"So","ON"}; +lookup(11221) -> {"So","ON"}; +lookup(11222) -> {"So","ON"}; +lookup(11223) -> {"So","ON"}; +lookup(11224) -> {"So","ON"}; +lookup(11225) -> {"So","ON"}; +lookup(11226) -> {"So","ON"}; +lookup(11227) -> {"So","ON"}; +lookup(11228) -> {"So","ON"}; +lookup(11229) -> {"So","ON"}; +lookup(11230) -> {"So","ON"}; +lookup(11231) -> {"So","ON"}; +lookup(11232) -> {"So","ON"}; +lookup(11233) -> {"So","ON"}; +lookup(11234) -> {"So","ON"}; +lookup(11235) -> {"So","ON"}; +lookup(11236) -> {"So","ON"}; +lookup(11237) -> {"So","ON"}; +lookup(11238) -> {"So","ON"}; +lookup(11239) -> {"So","ON"}; +lookup(11240) -> {"So","ON"}; +lookup(11241) -> {"So","ON"}; +lookup(11242) -> {"So","ON"}; +lookup(11243) -> {"So","ON"}; +lookup(11244) -> {"So","ON"}; +lookup(11245) -> {"So","ON"}; +lookup(11246) -> {"So","ON"}; +lookup(11247) -> {"So","ON"}; +lookup(11248) -> {"So","ON"}; +lookup(11249) -> {"So","ON"}; +lookup(11250) -> {"So","ON"}; +lookup(11251) -> {"So","ON"}; +lookup(11252) -> {"So","ON"}; +lookup(11253) -> {"So","ON"}; +lookup(11254) -> {"So","ON"}; +lookup(11255) -> {"So","ON"}; +lookup(11256) -> {"So","ON"}; +lookup(11257) -> {"So","ON"}; +lookup(11258) -> {"So","ON"}; +lookup(11259) -> {"So","ON"}; +lookup(11260) -> {"So","ON"}; +lookup(11261) -> {"So","ON"}; +lookup(11262) -> {"So","ON"}; +lookup(11263) -> {"So","ON"}; +lookup(11264) -> {"Lu","L"}; +lookup(11265) -> {"Lu","L"}; +lookup(11266) -> {"Lu","L"}; +lookup(11267) -> {"Lu","L"}; +lookup(11268) -> {"Lu","L"}; +lookup(11269) -> {"Lu","L"}; +lookup(11270) -> {"Lu","L"}; +lookup(11271) -> {"Lu","L"}; +lookup(11272) -> {"Lu","L"}; +lookup(11273) -> {"Lu","L"}; +lookup(11274) -> {"Lu","L"}; +lookup(11275) -> {"Lu","L"}; +lookup(11276) -> {"Lu","L"}; +lookup(11277) -> {"Lu","L"}; +lookup(11278) -> {"Lu","L"}; +lookup(11279) -> {"Lu","L"}; +lookup(11280) -> {"Lu","L"}; +lookup(11281) -> {"Lu","L"}; +lookup(11282) -> {"Lu","L"}; +lookup(11283) -> {"Lu","L"}; +lookup(11284) -> {"Lu","L"}; +lookup(11285) -> {"Lu","L"}; +lookup(11286) -> {"Lu","L"}; +lookup(11287) -> {"Lu","L"}; +lookup(11288) -> {"Lu","L"}; +lookup(11289) -> {"Lu","L"}; +lookup(11290) -> {"Lu","L"}; +lookup(11291) -> {"Lu","L"}; +lookup(11292) -> {"Lu","L"}; +lookup(11293) -> {"Lu","L"}; +lookup(11294) -> {"Lu","L"}; +lookup(11295) -> {"Lu","L"}; +lookup(11296) -> {"Lu","L"}; +lookup(11297) -> {"Lu","L"}; +lookup(11298) -> {"Lu","L"}; +lookup(11299) -> {"Lu","L"}; +lookup(11300) -> {"Lu","L"}; +lookup(11301) -> {"Lu","L"}; +lookup(11302) -> {"Lu","L"}; +lookup(11303) -> {"Lu","L"}; +lookup(11304) -> {"Lu","L"}; +lookup(11305) -> {"Lu","L"}; +lookup(11306) -> {"Lu","L"}; +lookup(11307) -> {"Lu","L"}; +lookup(11308) -> {"Lu","L"}; +lookup(11309) -> {"Lu","L"}; +lookup(11310) -> {"Lu","L"}; +lookup(11312) -> {"Ll","L"}; +lookup(11313) -> {"Ll","L"}; +lookup(11314) -> {"Ll","L"}; +lookup(11315) -> {"Ll","L"}; +lookup(11316) -> {"Ll","L"}; +lookup(11317) -> {"Ll","L"}; +lookup(11318) -> {"Ll","L"}; +lookup(11319) -> {"Ll","L"}; +lookup(11320) -> {"Ll","L"}; +lookup(11321) -> {"Ll","L"}; +lookup(11322) -> {"Ll","L"}; +lookup(11323) -> {"Ll","L"}; +lookup(11324) -> {"Ll","L"}; +lookup(11325) -> {"Ll","L"}; +lookup(11326) -> {"Ll","L"}; +lookup(11327) -> {"Ll","L"}; +lookup(11328) -> {"Ll","L"}; +lookup(11329) -> {"Ll","L"}; +lookup(11330) -> {"Ll","L"}; +lookup(11331) -> {"Ll","L"}; +lookup(11332) -> {"Ll","L"}; +lookup(11333) -> {"Ll","L"}; +lookup(11334) -> {"Ll","L"}; +lookup(11335) -> {"Ll","L"}; +lookup(11336) -> {"Ll","L"}; +lookup(11337) -> {"Ll","L"}; +lookup(11338) -> {"Ll","L"}; +lookup(11339) -> {"Ll","L"}; +lookup(11340) -> {"Ll","L"}; +lookup(11341) -> {"Ll","L"}; +lookup(11342) -> {"Ll","L"}; +lookup(11343) -> {"Ll","L"}; +lookup(11344) -> {"Ll","L"}; +lookup(11345) -> {"Ll","L"}; +lookup(11346) -> {"Ll","L"}; +lookup(11347) -> {"Ll","L"}; +lookup(11348) -> {"Ll","L"}; +lookup(11349) -> {"Ll","L"}; +lookup(11350) -> {"Ll","L"}; +lookup(11351) -> {"Ll","L"}; +lookup(11352) -> {"Ll","L"}; +lookup(11353) -> {"Ll","L"}; +lookup(11354) -> {"Ll","L"}; +lookup(11355) -> {"Ll","L"}; +lookup(11356) -> {"Ll","L"}; +lookup(11357) -> {"Ll","L"}; +lookup(11358) -> {"Ll","L"}; +lookup(11360) -> {"Lu","L"}; +lookup(11361) -> {"Ll","L"}; +lookup(11362) -> {"Lu","L"}; +lookup(11363) -> {"Lu","L"}; +lookup(11364) -> {"Lu","L"}; +lookup(11365) -> {"Ll","L"}; +lookup(11366) -> {"Ll","L"}; +lookup(11367) -> {"Lu","L"}; +lookup(11368) -> {"Ll","L"}; +lookup(11369) -> {"Lu","L"}; +lookup(11370) -> {"Ll","L"}; +lookup(11371) -> {"Lu","L"}; +lookup(11372) -> {"Ll","L"}; +lookup(11373) -> {"Lu","L"}; +lookup(11374) -> {"Lu","L"}; +lookup(11375) -> {"Lu","L"}; +lookup(11376) -> {"Lu","L"}; +lookup(11377) -> {"Ll","L"}; +lookup(11378) -> {"Lu","L"}; +lookup(11379) -> {"Ll","L"}; +lookup(11380) -> {"Ll","L"}; +lookup(11381) -> {"Lu","L"}; +lookup(11382) -> {"Ll","L"}; +lookup(11383) -> {"Ll","L"}; +lookup(11384) -> {"Ll","L"}; +lookup(11385) -> {"Ll","L"}; +lookup(11386) -> {"Ll","L"}; +lookup(11387) -> {"Ll","L"}; +lookup(11388) -> {"Lm","L"}; +lookup(11389) -> {"Lm","L"}; +lookup(11390) -> {"Lu","L"}; +lookup(11391) -> {"Lu","L"}; +lookup(11392) -> {"Lu","L"}; +lookup(11393) -> {"Ll","L"}; +lookup(11394) -> {"Lu","L"}; +lookup(11395) -> {"Ll","L"}; +lookup(11396) -> {"Lu","L"}; +lookup(11397) -> {"Ll","L"}; +lookup(11398) -> {"Lu","L"}; +lookup(11399) -> {"Ll","L"}; +lookup(11400) -> {"Lu","L"}; +lookup(11401) -> {"Ll","L"}; +lookup(11402) -> {"Lu","L"}; +lookup(11403) -> {"Ll","L"}; +lookup(11404) -> {"Lu","L"}; +lookup(11405) -> {"Ll","L"}; +lookup(11406) -> {"Lu","L"}; +lookup(11407) -> {"Ll","L"}; +lookup(11408) -> {"Lu","L"}; +lookup(11409) -> {"Ll","L"}; +lookup(11410) -> {"Lu","L"}; +lookup(11411) -> {"Ll","L"}; +lookup(11412) -> {"Lu","L"}; +lookup(11413) -> {"Ll","L"}; +lookup(11414) -> {"Lu","L"}; +lookup(11415) -> {"Ll","L"}; +lookup(11416) -> {"Lu","L"}; +lookup(11417) -> {"Ll","L"}; +lookup(11418) -> {"Lu","L"}; +lookup(11419) -> {"Ll","L"}; +lookup(11420) -> {"Lu","L"}; +lookup(11421) -> {"Ll","L"}; +lookup(11422) -> {"Lu","L"}; +lookup(11423) -> {"Ll","L"}; +lookup(11424) -> {"Lu","L"}; +lookup(11425) -> {"Ll","L"}; +lookup(11426) -> {"Lu","L"}; +lookup(11427) -> {"Ll","L"}; +lookup(11428) -> {"Lu","L"}; +lookup(11429) -> {"Ll","L"}; +lookup(11430) -> {"Lu","L"}; +lookup(11431) -> {"Ll","L"}; +lookup(11432) -> {"Lu","L"}; +lookup(11433) -> {"Ll","L"}; +lookup(11434) -> {"Lu","L"}; +lookup(11435) -> {"Ll","L"}; +lookup(11436) -> {"Lu","L"}; +lookup(11437) -> {"Ll","L"}; +lookup(11438) -> {"Lu","L"}; +lookup(11439) -> {"Ll","L"}; +lookup(11440) -> {"Lu","L"}; +lookup(11441) -> {"Ll","L"}; +lookup(11442) -> {"Lu","L"}; +lookup(11443) -> {"Ll","L"}; +lookup(11444) -> {"Lu","L"}; +lookup(11445) -> {"Ll","L"}; +lookup(11446) -> {"Lu","L"}; +lookup(11447) -> {"Ll","L"}; +lookup(11448) -> {"Lu","L"}; +lookup(11449) -> {"Ll","L"}; +lookup(11450) -> {"Lu","L"}; +lookup(11451) -> {"Ll","L"}; +lookup(11452) -> {"Lu","L"}; +lookup(11453) -> {"Ll","L"}; +lookup(11454) -> {"Lu","L"}; +lookup(11455) -> {"Ll","L"}; +lookup(11456) -> {"Lu","L"}; +lookup(11457) -> {"Ll","L"}; +lookup(11458) -> {"Lu","L"}; +lookup(11459) -> {"Ll","L"}; +lookup(11460) -> {"Lu","L"}; +lookup(11461) -> {"Ll","L"}; +lookup(11462) -> {"Lu","L"}; +lookup(11463) -> {"Ll","L"}; +lookup(11464) -> {"Lu","L"}; +lookup(11465) -> {"Ll","L"}; +lookup(11466) -> {"Lu","L"}; +lookup(11467) -> {"Ll","L"}; +lookup(11468) -> {"Lu","L"}; +lookup(11469) -> {"Ll","L"}; +lookup(11470) -> {"Lu","L"}; +lookup(11471) -> {"Ll","L"}; +lookup(11472) -> {"Lu","L"}; +lookup(11473) -> {"Ll","L"}; +lookup(11474) -> {"Lu","L"}; +lookup(11475) -> {"Ll","L"}; +lookup(11476) -> {"Lu","L"}; +lookup(11477) -> {"Ll","L"}; +lookup(11478) -> {"Lu","L"}; +lookup(11479) -> {"Ll","L"}; +lookup(11480) -> {"Lu","L"}; +lookup(11481) -> {"Ll","L"}; +lookup(11482) -> {"Lu","L"}; +lookup(11483) -> {"Ll","L"}; +lookup(11484) -> {"Lu","L"}; +lookup(11485) -> {"Ll","L"}; +lookup(11486) -> {"Lu","L"}; +lookup(11487) -> {"Ll","L"}; +lookup(11488) -> {"Lu","L"}; +lookup(11489) -> {"Ll","L"}; +lookup(11490) -> {"Lu","L"}; +lookup(11491) -> {"Ll","L"}; +lookup(11492) -> {"Ll","L"}; +lookup(11493) -> {"So","ON"}; +lookup(11494) -> {"So","ON"}; +lookup(11495) -> {"So","ON"}; +lookup(11496) -> {"So","ON"}; +lookup(11497) -> {"So","ON"}; +lookup(11498) -> {"So","ON"}; +lookup(11499) -> {"Lu","L"}; +lookup(11500) -> {"Ll","L"}; +lookup(11501) -> {"Lu","L"}; +lookup(11502) -> {"Ll","L"}; +lookup(11503) -> {"Mn","NSM"}; +lookup(11504) -> {"Mn","NSM"}; +lookup(11505) -> {"Mn","NSM"}; +lookup(11506) -> {"Lu","L"}; +lookup(11507) -> {"Ll","L"}; +lookup(11513) -> {"Po","ON"}; +lookup(11514) -> {"Po","ON"}; +lookup(11515) -> {"Po","ON"}; +lookup(11516) -> {"Po","ON"}; +lookup(11517) -> {"No","ON"}; +lookup(11518) -> {"Po","ON"}; +lookup(11519) -> {"Po","ON"}; +lookup(11520) -> {"Ll","L"}; +lookup(11521) -> {"Ll","L"}; +lookup(11522) -> {"Ll","L"}; +lookup(11523) -> {"Ll","L"}; +lookup(11524) -> {"Ll","L"}; +lookup(11525) -> {"Ll","L"}; +lookup(11526) -> {"Ll","L"}; +lookup(11527) -> {"Ll","L"}; +lookup(11528) -> {"Ll","L"}; +lookup(11529) -> {"Ll","L"}; +lookup(11530) -> {"Ll","L"}; +lookup(11531) -> {"Ll","L"}; +lookup(11532) -> {"Ll","L"}; +lookup(11533) -> {"Ll","L"}; +lookup(11534) -> {"Ll","L"}; +lookup(11535) -> {"Ll","L"}; +lookup(11536) -> {"Ll","L"}; +lookup(11537) -> {"Ll","L"}; +lookup(11538) -> {"Ll","L"}; +lookup(11539) -> {"Ll","L"}; +lookup(11540) -> {"Ll","L"}; +lookup(11541) -> {"Ll","L"}; +lookup(11542) -> {"Ll","L"}; +lookup(11543) -> {"Ll","L"}; +lookup(11544) -> {"Ll","L"}; +lookup(11545) -> {"Ll","L"}; +lookup(11546) -> {"Ll","L"}; +lookup(11547) -> {"Ll","L"}; +lookup(11548) -> {"Ll","L"}; +lookup(11549) -> {"Ll","L"}; +lookup(11550) -> {"Ll","L"}; +lookup(11551) -> {"Ll","L"}; +lookup(11552) -> {"Ll","L"}; +lookup(11553) -> {"Ll","L"}; +lookup(11554) -> {"Ll","L"}; +lookup(11555) -> {"Ll","L"}; +lookup(11556) -> {"Ll","L"}; +lookup(11557) -> {"Ll","L"}; +lookup(11559) -> {"Ll","L"}; +lookup(11565) -> {"Ll","L"}; +lookup(11568) -> {"Lo","L"}; +lookup(11569) -> {"Lo","L"}; +lookup(11570) -> {"Lo","L"}; +lookup(11571) -> {"Lo","L"}; +lookup(11572) -> {"Lo","L"}; +lookup(11573) -> {"Lo","L"}; +lookup(11574) -> {"Lo","L"}; +lookup(11575) -> {"Lo","L"}; +lookup(11576) -> {"Lo","L"}; +lookup(11577) -> {"Lo","L"}; +lookup(11578) -> {"Lo","L"}; +lookup(11579) -> {"Lo","L"}; +lookup(11580) -> {"Lo","L"}; +lookup(11581) -> {"Lo","L"}; +lookup(11582) -> {"Lo","L"}; +lookup(11583) -> {"Lo","L"}; +lookup(11584) -> {"Lo","L"}; +lookup(11585) -> {"Lo","L"}; +lookup(11586) -> {"Lo","L"}; +lookup(11587) -> {"Lo","L"}; +lookup(11588) -> {"Lo","L"}; +lookup(11589) -> {"Lo","L"}; +lookup(11590) -> {"Lo","L"}; +lookup(11591) -> {"Lo","L"}; +lookup(11592) -> {"Lo","L"}; +lookup(11593) -> {"Lo","L"}; +lookup(11594) -> {"Lo","L"}; +lookup(11595) -> {"Lo","L"}; +lookup(11596) -> {"Lo","L"}; +lookup(11597) -> {"Lo","L"}; +lookup(11598) -> {"Lo","L"}; +lookup(11599) -> {"Lo","L"}; +lookup(11600) -> {"Lo","L"}; +lookup(11601) -> {"Lo","L"}; +lookup(11602) -> {"Lo","L"}; +lookup(11603) -> {"Lo","L"}; +lookup(11604) -> {"Lo","L"}; +lookup(11605) -> {"Lo","L"}; +lookup(11606) -> {"Lo","L"}; +lookup(11607) -> {"Lo","L"}; +lookup(11608) -> {"Lo","L"}; +lookup(11609) -> {"Lo","L"}; +lookup(11610) -> {"Lo","L"}; +lookup(11611) -> {"Lo","L"}; +lookup(11612) -> {"Lo","L"}; +lookup(11613) -> {"Lo","L"}; +lookup(11614) -> {"Lo","L"}; +lookup(11615) -> {"Lo","L"}; +lookup(11616) -> {"Lo","L"}; +lookup(11617) -> {"Lo","L"}; +lookup(11618) -> {"Lo","L"}; +lookup(11619) -> {"Lo","L"}; +lookup(11620) -> {"Lo","L"}; +lookup(11621) -> {"Lo","L"}; +lookup(11622) -> {"Lo","L"}; +lookup(11623) -> {"Lo","L"}; +lookup(11631) -> {"Lm","L"}; +lookup(11632) -> {"Po","L"}; +lookup(11647) -> {"Mn","NSM"}; +lookup(11648) -> {"Lo","L"}; +lookup(11649) -> {"Lo","L"}; +lookup(11650) -> {"Lo","L"}; +lookup(11651) -> {"Lo","L"}; +lookup(11652) -> {"Lo","L"}; +lookup(11653) -> {"Lo","L"}; +lookup(11654) -> {"Lo","L"}; +lookup(11655) -> {"Lo","L"}; +lookup(11656) -> {"Lo","L"}; +lookup(11657) -> {"Lo","L"}; +lookup(11658) -> {"Lo","L"}; +lookup(11659) -> {"Lo","L"}; +lookup(11660) -> {"Lo","L"}; +lookup(11661) -> {"Lo","L"}; +lookup(11662) -> {"Lo","L"}; +lookup(11663) -> {"Lo","L"}; +lookup(11664) -> {"Lo","L"}; +lookup(11665) -> {"Lo","L"}; +lookup(11666) -> {"Lo","L"}; +lookup(11667) -> {"Lo","L"}; +lookup(11668) -> {"Lo","L"}; +lookup(11669) -> {"Lo","L"}; +lookup(11670) -> {"Lo","L"}; +lookup(11680) -> {"Lo","L"}; +lookup(11681) -> {"Lo","L"}; +lookup(11682) -> {"Lo","L"}; +lookup(11683) -> {"Lo","L"}; +lookup(11684) -> {"Lo","L"}; +lookup(11685) -> {"Lo","L"}; +lookup(11686) -> {"Lo","L"}; +lookup(11688) -> {"Lo","L"}; +lookup(11689) -> {"Lo","L"}; +lookup(11690) -> {"Lo","L"}; +lookup(11691) -> {"Lo","L"}; +lookup(11692) -> {"Lo","L"}; +lookup(11693) -> {"Lo","L"}; +lookup(11694) -> {"Lo","L"}; +lookup(11696) -> {"Lo","L"}; +lookup(11697) -> {"Lo","L"}; +lookup(11698) -> {"Lo","L"}; +lookup(11699) -> {"Lo","L"}; +lookup(11700) -> {"Lo","L"}; +lookup(11701) -> {"Lo","L"}; +lookup(11702) -> {"Lo","L"}; +lookup(11704) -> {"Lo","L"}; +lookup(11705) -> {"Lo","L"}; +lookup(11706) -> {"Lo","L"}; +lookup(11707) -> {"Lo","L"}; +lookup(11708) -> {"Lo","L"}; +lookup(11709) -> {"Lo","L"}; +lookup(11710) -> {"Lo","L"}; +lookup(11712) -> {"Lo","L"}; +lookup(11713) -> {"Lo","L"}; +lookup(11714) -> {"Lo","L"}; +lookup(11715) -> {"Lo","L"}; +lookup(11716) -> {"Lo","L"}; +lookup(11717) -> {"Lo","L"}; +lookup(11718) -> {"Lo","L"}; +lookup(11720) -> {"Lo","L"}; +lookup(11721) -> {"Lo","L"}; +lookup(11722) -> {"Lo","L"}; +lookup(11723) -> {"Lo","L"}; +lookup(11724) -> {"Lo","L"}; +lookup(11725) -> {"Lo","L"}; +lookup(11726) -> {"Lo","L"}; +lookup(11728) -> {"Lo","L"}; +lookup(11729) -> {"Lo","L"}; +lookup(11730) -> {"Lo","L"}; +lookup(11731) -> {"Lo","L"}; +lookup(11732) -> {"Lo","L"}; +lookup(11733) -> {"Lo","L"}; +lookup(11734) -> {"Lo","L"}; +lookup(11736) -> {"Lo","L"}; +lookup(11737) -> {"Lo","L"}; +lookup(11738) -> {"Lo","L"}; +lookup(11739) -> {"Lo","L"}; +lookup(11740) -> {"Lo","L"}; +lookup(11741) -> {"Lo","L"}; +lookup(11742) -> {"Lo","L"}; +lookup(11744) -> {"Mn","NSM"}; +lookup(11745) -> {"Mn","NSM"}; +lookup(11746) -> {"Mn","NSM"}; +lookup(11747) -> {"Mn","NSM"}; +lookup(11748) -> {"Mn","NSM"}; +lookup(11749) -> {"Mn","NSM"}; +lookup(11750) -> {"Mn","NSM"}; +lookup(11751) -> {"Mn","NSM"}; +lookup(11752) -> {"Mn","NSM"}; +lookup(11753) -> {"Mn","NSM"}; +lookup(11754) -> {"Mn","NSM"}; +lookup(11755) -> {"Mn","NSM"}; +lookup(11756) -> {"Mn","NSM"}; +lookup(11757) -> {"Mn","NSM"}; +lookup(11758) -> {"Mn","NSM"}; +lookup(11759) -> {"Mn","NSM"}; +lookup(11760) -> {"Mn","NSM"}; +lookup(11761) -> {"Mn","NSM"}; +lookup(11762) -> {"Mn","NSM"}; +lookup(11763) -> {"Mn","NSM"}; +lookup(11764) -> {"Mn","NSM"}; +lookup(11765) -> {"Mn","NSM"}; +lookup(11766) -> {"Mn","NSM"}; +lookup(11767) -> {"Mn","NSM"}; +lookup(11768) -> {"Mn","NSM"}; +lookup(11769) -> {"Mn","NSM"}; +lookup(11770) -> {"Mn","NSM"}; +lookup(11771) -> {"Mn","NSM"}; +lookup(11772) -> {"Mn","NSM"}; +lookup(11773) -> {"Mn","NSM"}; +lookup(11774) -> {"Mn","NSM"}; +lookup(11775) -> {"Mn","NSM"}; +lookup(11776) -> {"Po","ON"}; +lookup(11777) -> {"Po","ON"}; +lookup(11778) -> {"Pi","ON"}; +lookup(11779) -> {"Pf","ON"}; +lookup(11780) -> {"Pi","ON"}; +lookup(11781) -> {"Pf","ON"}; +lookup(11782) -> {"Po","ON"}; +lookup(11783) -> {"Po","ON"}; +lookup(11784) -> {"Po","ON"}; +lookup(11785) -> {"Pi","ON"}; +lookup(11786) -> {"Pf","ON"}; +lookup(11787) -> {"Po","ON"}; +lookup(11788) -> {"Pi","ON"}; +lookup(11789) -> {"Pf","ON"}; +lookup(11790) -> {"Po","ON"}; +lookup(11791) -> {"Po","ON"}; +lookup(11792) -> {"Po","ON"}; +lookup(11793) -> {"Po","ON"}; +lookup(11794) -> {"Po","ON"}; +lookup(11795) -> {"Po","ON"}; +lookup(11796) -> {"Po","ON"}; +lookup(11797) -> {"Po","ON"}; +lookup(11798) -> {"Po","ON"}; +lookup(11799) -> {"Pd","ON"}; +lookup(11800) -> {"Po","ON"}; +lookup(11801) -> {"Po","ON"}; +lookup(11802) -> {"Pd","ON"}; +lookup(11803) -> {"Po","ON"}; +lookup(11804) -> {"Pi","ON"}; +lookup(11805) -> {"Pf","ON"}; +lookup(11806) -> {"Po","ON"}; +lookup(11807) -> {"Po","ON"}; +lookup(11808) -> {"Pi","ON"}; +lookup(11809) -> {"Pf","ON"}; +lookup(11810) -> {"Ps","ON"}; +lookup(11811) -> {"Pe","ON"}; +lookup(11812) -> {"Ps","ON"}; +lookup(11813) -> {"Pe","ON"}; +lookup(11814) -> {"Ps","ON"}; +lookup(11815) -> {"Pe","ON"}; +lookup(11816) -> {"Ps","ON"}; +lookup(11817) -> {"Pe","ON"}; +lookup(11818) -> {"Po","ON"}; +lookup(11819) -> {"Po","ON"}; +lookup(11820) -> {"Po","ON"}; +lookup(11821) -> {"Po","ON"}; +lookup(11822) -> {"Po","ON"}; +lookup(11823) -> {"Lm","ON"}; +lookup(11824) -> {"Po","ON"}; +lookup(11825) -> {"Po","ON"}; +lookup(11826) -> {"Po","ON"}; +lookup(11827) -> {"Po","ON"}; +lookup(11828) -> {"Po","ON"}; +lookup(11829) -> {"Po","ON"}; +lookup(11830) -> {"Po","ON"}; +lookup(11831) -> {"Po","ON"}; +lookup(11832) -> {"Po","ON"}; +lookup(11833) -> {"Po","ON"}; +lookup(11834) -> {"Pd","ON"}; +lookup(11835) -> {"Pd","ON"}; +lookup(11836) -> {"Po","ON"}; +lookup(11837) -> {"Po","ON"}; +lookup(11838) -> {"Po","ON"}; +lookup(11839) -> {"Po","ON"}; +lookup(11840) -> {"Pd","ON"}; +lookup(11841) -> {"Po","ON"}; +lookup(11842) -> {"Ps","ON"}; +lookup(11843) -> {"Po","ON"}; +lookup(11844) -> {"Po","ON"}; +lookup(11845) -> {"Po","ON"}; +lookup(11846) -> {"Po","ON"}; +lookup(11847) -> {"Po","ON"}; +lookup(11848) -> {"Po","ON"}; +lookup(11849) -> {"Po","ON"}; +lookup(11850) -> {"Po","ON"}; +lookup(11851) -> {"Po","ON"}; +lookup(11852) -> {"Po","ON"}; +lookup(11853) -> {"Po","ON"}; +lookup(11854) -> {"Po","ON"}; +lookup(11855) -> {"Po","ON"}; +lookup(11856) -> {"So","ON"}; +lookup(11857) -> {"So","ON"}; +lookup(11858) -> {"Po","ON"}; +lookup(11904) -> {"So","ON"}; +lookup(11905) -> {"So","ON"}; +lookup(11906) -> {"So","ON"}; +lookup(11907) -> {"So","ON"}; +lookup(11908) -> {"So","ON"}; +lookup(11909) -> {"So","ON"}; +lookup(11910) -> {"So","ON"}; +lookup(11911) -> {"So","ON"}; +lookup(11912) -> {"So","ON"}; +lookup(11913) -> {"So","ON"}; +lookup(11914) -> {"So","ON"}; +lookup(11915) -> {"So","ON"}; +lookup(11916) -> {"So","ON"}; +lookup(11917) -> {"So","ON"}; +lookup(11918) -> {"So","ON"}; +lookup(11919) -> {"So","ON"}; +lookup(11920) -> {"So","ON"}; +lookup(11921) -> {"So","ON"}; +lookup(11922) -> {"So","ON"}; +lookup(11923) -> {"So","ON"}; +lookup(11924) -> {"So","ON"}; +lookup(11925) -> {"So","ON"}; +lookup(11926) -> {"So","ON"}; +lookup(11927) -> {"So","ON"}; +lookup(11928) -> {"So","ON"}; +lookup(11929) -> {"So","ON"}; +lookup(11931) -> {"So","ON"}; +lookup(11932) -> {"So","ON"}; +lookup(11933) -> {"So","ON"}; +lookup(11934) -> {"So","ON"}; +lookup(11935) -> {"So","ON"}; +lookup(11936) -> {"So","ON"}; +lookup(11937) -> {"So","ON"}; +lookup(11938) -> {"So","ON"}; +lookup(11939) -> {"So","ON"}; +lookup(11940) -> {"So","ON"}; +lookup(11941) -> {"So","ON"}; +lookup(11942) -> {"So","ON"}; +lookup(11943) -> {"So","ON"}; +lookup(11944) -> {"So","ON"}; +lookup(11945) -> {"So","ON"}; +lookup(11946) -> {"So","ON"}; +lookup(11947) -> {"So","ON"}; +lookup(11948) -> {"So","ON"}; +lookup(11949) -> {"So","ON"}; +lookup(11950) -> {"So","ON"}; +lookup(11951) -> {"So","ON"}; +lookup(11952) -> {"So","ON"}; +lookup(11953) -> {"So","ON"}; +lookup(11954) -> {"So","ON"}; +lookup(11955) -> {"So","ON"}; +lookup(11956) -> {"So","ON"}; +lookup(11957) -> {"So","ON"}; +lookup(11958) -> {"So","ON"}; +lookup(11959) -> {"So","ON"}; +lookup(11960) -> {"So","ON"}; +lookup(11961) -> {"So","ON"}; +lookup(11962) -> {"So","ON"}; +lookup(11963) -> {"So","ON"}; +lookup(11964) -> {"So","ON"}; +lookup(11965) -> {"So","ON"}; +lookup(11966) -> {"So","ON"}; +lookup(11967) -> {"So","ON"}; +lookup(11968) -> {"So","ON"}; +lookup(11969) -> {"So","ON"}; +lookup(11970) -> {"So","ON"}; +lookup(11971) -> {"So","ON"}; +lookup(11972) -> {"So","ON"}; +lookup(11973) -> {"So","ON"}; +lookup(11974) -> {"So","ON"}; +lookup(11975) -> {"So","ON"}; +lookup(11976) -> {"So","ON"}; +lookup(11977) -> {"So","ON"}; +lookup(11978) -> {"So","ON"}; +lookup(11979) -> {"So","ON"}; +lookup(11980) -> {"So","ON"}; +lookup(11981) -> {"So","ON"}; +lookup(11982) -> {"So","ON"}; +lookup(11983) -> {"So","ON"}; +lookup(11984) -> {"So","ON"}; +lookup(11985) -> {"So","ON"}; +lookup(11986) -> {"So","ON"}; +lookup(11987) -> {"So","ON"}; +lookup(11988) -> {"So","ON"}; +lookup(11989) -> {"So","ON"}; +lookup(11990) -> {"So","ON"}; +lookup(11991) -> {"So","ON"}; +lookup(11992) -> {"So","ON"}; +lookup(11993) -> {"So","ON"}; +lookup(11994) -> {"So","ON"}; +lookup(11995) -> {"So","ON"}; +lookup(11996) -> {"So","ON"}; +lookup(11997) -> {"So","ON"}; +lookup(11998) -> {"So","ON"}; +lookup(11999) -> {"So","ON"}; +lookup(12000) -> {"So","ON"}; +lookup(12001) -> {"So","ON"}; +lookup(12002) -> {"So","ON"}; +lookup(12003) -> {"So","ON"}; +lookup(12004) -> {"So","ON"}; +lookup(12005) -> {"So","ON"}; +lookup(12006) -> {"So","ON"}; +lookup(12007) -> {"So","ON"}; +lookup(12008) -> {"So","ON"}; +lookup(12009) -> {"So","ON"}; +lookup(12010) -> {"So","ON"}; +lookup(12011) -> {"So","ON"}; +lookup(12012) -> {"So","ON"}; +lookup(12013) -> {"So","ON"}; +lookup(12014) -> {"So","ON"}; +lookup(12015) -> {"So","ON"}; +lookup(12016) -> {"So","ON"}; +lookup(12017) -> {"So","ON"}; +lookup(12018) -> {"So","ON"}; +lookup(12019) -> {"So","ON"}; +lookup(12032) -> {"So","ON"}; +lookup(12033) -> {"So","ON"}; +lookup(12034) -> {"So","ON"}; +lookup(12035) -> {"So","ON"}; +lookup(12036) -> {"So","ON"}; +lookup(12037) -> {"So","ON"}; +lookup(12038) -> {"So","ON"}; +lookup(12039) -> {"So","ON"}; +lookup(12040) -> {"So","ON"}; +lookup(12041) -> {"So","ON"}; +lookup(12042) -> {"So","ON"}; +lookup(12043) -> {"So","ON"}; +lookup(12044) -> {"So","ON"}; +lookup(12045) -> {"So","ON"}; +lookup(12046) -> {"So","ON"}; +lookup(12047) -> {"So","ON"}; +lookup(12048) -> {"So","ON"}; +lookup(12049) -> {"So","ON"}; +lookup(12050) -> {"So","ON"}; +lookup(12051) -> {"So","ON"}; +lookup(12052) -> {"So","ON"}; +lookup(12053) -> {"So","ON"}; +lookup(12054) -> {"So","ON"}; +lookup(12055) -> {"So","ON"}; +lookup(12056) -> {"So","ON"}; +lookup(12057) -> {"So","ON"}; +lookup(12058) -> {"So","ON"}; +lookup(12059) -> {"So","ON"}; +lookup(12060) -> {"So","ON"}; +lookup(12061) -> {"So","ON"}; +lookup(12062) -> {"So","ON"}; +lookup(12063) -> {"So","ON"}; +lookup(12064) -> {"So","ON"}; +lookup(12065) -> {"So","ON"}; +lookup(12066) -> {"So","ON"}; +lookup(12067) -> {"So","ON"}; +lookup(12068) -> {"So","ON"}; +lookup(12069) -> {"So","ON"}; +lookup(12070) -> {"So","ON"}; +lookup(12071) -> {"So","ON"}; +lookup(12072) -> {"So","ON"}; +lookup(12073) -> {"So","ON"}; +lookup(12074) -> {"So","ON"}; +lookup(12075) -> {"So","ON"}; +lookup(12076) -> {"So","ON"}; +lookup(12077) -> {"So","ON"}; +lookup(12078) -> {"So","ON"}; +lookup(12079) -> {"So","ON"}; +lookup(12080) -> {"So","ON"}; +lookup(12081) -> {"So","ON"}; +lookup(12082) -> {"So","ON"}; +lookup(12083) -> {"So","ON"}; +lookup(12084) -> {"So","ON"}; +lookup(12085) -> {"So","ON"}; +lookup(12086) -> {"So","ON"}; +lookup(12087) -> {"So","ON"}; +lookup(12088) -> {"So","ON"}; +lookup(12089) -> {"So","ON"}; +lookup(12090) -> {"So","ON"}; +lookup(12091) -> {"So","ON"}; +lookup(12092) -> {"So","ON"}; +lookup(12093) -> {"So","ON"}; +lookup(12094) -> {"So","ON"}; +lookup(12095) -> {"So","ON"}; +lookup(12096) -> {"So","ON"}; +lookup(12097) -> {"So","ON"}; +lookup(12098) -> {"So","ON"}; +lookup(12099) -> {"So","ON"}; +lookup(12100) -> {"So","ON"}; +lookup(12101) -> {"So","ON"}; +lookup(12102) -> {"So","ON"}; +lookup(12103) -> {"So","ON"}; +lookup(12104) -> {"So","ON"}; +lookup(12105) -> {"So","ON"}; +lookup(12106) -> {"So","ON"}; +lookup(12107) -> {"So","ON"}; +lookup(12108) -> {"So","ON"}; +lookup(12109) -> {"So","ON"}; +lookup(12110) -> {"So","ON"}; +lookup(12111) -> {"So","ON"}; +lookup(12112) -> {"So","ON"}; +lookup(12113) -> {"So","ON"}; +lookup(12114) -> {"So","ON"}; +lookup(12115) -> {"So","ON"}; +lookup(12116) -> {"So","ON"}; +lookup(12117) -> {"So","ON"}; +lookup(12118) -> {"So","ON"}; +lookup(12119) -> {"So","ON"}; +lookup(12120) -> {"So","ON"}; +lookup(12121) -> {"So","ON"}; +lookup(12122) -> {"So","ON"}; +lookup(12123) -> {"So","ON"}; +lookup(12124) -> {"So","ON"}; +lookup(12125) -> {"So","ON"}; +lookup(12126) -> {"So","ON"}; +lookup(12127) -> {"So","ON"}; +lookup(12128) -> {"So","ON"}; +lookup(12129) -> {"So","ON"}; +lookup(12130) -> {"So","ON"}; +lookup(12131) -> {"So","ON"}; +lookup(12132) -> {"So","ON"}; +lookup(12133) -> {"So","ON"}; +lookup(12134) -> {"So","ON"}; +lookup(12135) -> {"So","ON"}; +lookup(12136) -> {"So","ON"}; +lookup(12137) -> {"So","ON"}; +lookup(12138) -> {"So","ON"}; +lookup(12139) -> {"So","ON"}; +lookup(12140) -> {"So","ON"}; +lookup(12141) -> {"So","ON"}; +lookup(12142) -> {"So","ON"}; +lookup(12143) -> {"So","ON"}; +lookup(12144) -> {"So","ON"}; +lookup(12145) -> {"So","ON"}; +lookup(12146) -> {"So","ON"}; +lookup(12147) -> {"So","ON"}; +lookup(12148) -> {"So","ON"}; +lookup(12149) -> {"So","ON"}; +lookup(12150) -> {"So","ON"}; +lookup(12151) -> {"So","ON"}; +lookup(12152) -> {"So","ON"}; +lookup(12153) -> {"So","ON"}; +lookup(12154) -> {"So","ON"}; +lookup(12155) -> {"So","ON"}; +lookup(12156) -> {"So","ON"}; +lookup(12157) -> {"So","ON"}; +lookup(12158) -> {"So","ON"}; +lookup(12159) -> {"So","ON"}; +lookup(12160) -> {"So","ON"}; +lookup(12161) -> {"So","ON"}; +lookup(12162) -> {"So","ON"}; +lookup(12163) -> {"So","ON"}; +lookup(12164) -> {"So","ON"}; +lookup(12165) -> {"So","ON"}; +lookup(12166) -> {"So","ON"}; +lookup(12167) -> {"So","ON"}; +lookup(12168) -> {"So","ON"}; +lookup(12169) -> {"So","ON"}; +lookup(12170) -> {"So","ON"}; +lookup(12171) -> {"So","ON"}; +lookup(12172) -> {"So","ON"}; +lookup(12173) -> {"So","ON"}; +lookup(12174) -> {"So","ON"}; +lookup(12175) -> {"So","ON"}; +lookup(12176) -> {"So","ON"}; +lookup(12177) -> {"So","ON"}; +lookup(12178) -> {"So","ON"}; +lookup(12179) -> {"So","ON"}; +lookup(12180) -> {"So","ON"}; +lookup(12181) -> {"So","ON"}; +lookup(12182) -> {"So","ON"}; +lookup(12183) -> {"So","ON"}; +lookup(12184) -> {"So","ON"}; +lookup(12185) -> {"So","ON"}; +lookup(12186) -> {"So","ON"}; +lookup(12187) -> {"So","ON"}; +lookup(12188) -> {"So","ON"}; +lookup(12189) -> {"So","ON"}; +lookup(12190) -> {"So","ON"}; +lookup(12191) -> {"So","ON"}; +lookup(12192) -> {"So","ON"}; +lookup(12193) -> {"So","ON"}; +lookup(12194) -> {"So","ON"}; +lookup(12195) -> {"So","ON"}; +lookup(12196) -> {"So","ON"}; +lookup(12197) -> {"So","ON"}; +lookup(12198) -> {"So","ON"}; +lookup(12199) -> {"So","ON"}; +lookup(12200) -> {"So","ON"}; +lookup(12201) -> {"So","ON"}; +lookup(12202) -> {"So","ON"}; +lookup(12203) -> {"So","ON"}; +lookup(12204) -> {"So","ON"}; +lookup(12205) -> {"So","ON"}; +lookup(12206) -> {"So","ON"}; +lookup(12207) -> {"So","ON"}; +lookup(12208) -> {"So","ON"}; +lookup(12209) -> {"So","ON"}; +lookup(12210) -> {"So","ON"}; +lookup(12211) -> {"So","ON"}; +lookup(12212) -> {"So","ON"}; +lookup(12213) -> {"So","ON"}; +lookup(12214) -> {"So","ON"}; +lookup(12215) -> {"So","ON"}; +lookup(12216) -> {"So","ON"}; +lookup(12217) -> {"So","ON"}; +lookup(12218) -> {"So","ON"}; +lookup(12219) -> {"So","ON"}; +lookup(12220) -> {"So","ON"}; +lookup(12221) -> {"So","ON"}; +lookup(12222) -> {"So","ON"}; +lookup(12223) -> {"So","ON"}; +lookup(12224) -> {"So","ON"}; +lookup(12225) -> {"So","ON"}; +lookup(12226) -> {"So","ON"}; +lookup(12227) -> {"So","ON"}; +lookup(12228) -> {"So","ON"}; +lookup(12229) -> {"So","ON"}; +lookup(12230) -> {"So","ON"}; +lookup(12231) -> {"So","ON"}; +lookup(12232) -> {"So","ON"}; +lookup(12233) -> {"So","ON"}; +lookup(12234) -> {"So","ON"}; +lookup(12235) -> {"So","ON"}; +lookup(12236) -> {"So","ON"}; +lookup(12237) -> {"So","ON"}; +lookup(12238) -> {"So","ON"}; +lookup(12239) -> {"So","ON"}; +lookup(12240) -> {"So","ON"}; +lookup(12241) -> {"So","ON"}; +lookup(12242) -> {"So","ON"}; +lookup(12243) -> {"So","ON"}; +lookup(12244) -> {"So","ON"}; +lookup(12245) -> {"So","ON"}; +lookup(12272) -> {"So","ON"}; +lookup(12273) -> {"So","ON"}; +lookup(12274) -> {"So","ON"}; +lookup(12275) -> {"So","ON"}; +lookup(12276) -> {"So","ON"}; +lookup(12277) -> {"So","ON"}; +lookup(12278) -> {"So","ON"}; +lookup(12279) -> {"So","ON"}; +lookup(12280) -> {"So","ON"}; +lookup(12281) -> {"So","ON"}; +lookup(12282) -> {"So","ON"}; +lookup(12283) -> {"So","ON"}; +lookup(12288) -> {"Zs","WS"}; +lookup(12289) -> {"Po","ON"}; +lookup(12290) -> {"Po","ON"}; +lookup(12291) -> {"Po","ON"}; +lookup(12292) -> {"So","ON"}; +lookup(12293) -> {"Lm","L"}; +lookup(12294) -> {"Lo","L"}; +lookup(12295) -> {"Nl","L"}; +lookup(12296) -> {"Ps","ON"}; +lookup(12297) -> {"Pe","ON"}; +lookup(12298) -> {"Ps","ON"}; +lookup(12299) -> {"Pe","ON"}; +lookup(12300) -> {"Ps","ON"}; +lookup(12301) -> {"Pe","ON"}; +lookup(12302) -> {"Ps","ON"}; +lookup(12303) -> {"Pe","ON"}; +lookup(12304) -> {"Ps","ON"}; +lookup(12305) -> {"Pe","ON"}; +lookup(12306) -> {"So","ON"}; +lookup(12307) -> {"So","ON"}; +lookup(12308) -> {"Ps","ON"}; +lookup(12309) -> {"Pe","ON"}; +lookup(12310) -> {"Ps","ON"}; +lookup(12311) -> {"Pe","ON"}; +lookup(12312) -> {"Ps","ON"}; +lookup(12313) -> {"Pe","ON"}; +lookup(12314) -> {"Ps","ON"}; +lookup(12315) -> {"Pe","ON"}; +lookup(12316) -> {"Pd","ON"}; +lookup(12317) -> {"Ps","ON"}; +lookup(12318) -> {"Pe","ON"}; +lookup(12319) -> {"Pe","ON"}; +lookup(12320) -> {"So","ON"}; +lookup(12321) -> {"Nl","L"}; +lookup(12322) -> {"Nl","L"}; +lookup(12323) -> {"Nl","L"}; +lookup(12324) -> {"Nl","L"}; +lookup(12325) -> {"Nl","L"}; +lookup(12326) -> {"Nl","L"}; +lookup(12327) -> {"Nl","L"}; +lookup(12328) -> {"Nl","L"}; +lookup(12329) -> {"Nl","L"}; +lookup(12330) -> {"Mn","NSM"}; +lookup(12331) -> {"Mn","NSM"}; +lookup(12332) -> {"Mn","NSM"}; +lookup(12333) -> {"Mn","NSM"}; +lookup(12334) -> {"Mc","L"}; +lookup(12335) -> {"Mc","L"}; +lookup(12336) -> {"Pd","ON"}; +lookup(12337) -> {"Lm","L"}; +lookup(12338) -> {"Lm","L"}; +lookup(12339) -> {"Lm","L"}; +lookup(12340) -> {"Lm","L"}; +lookup(12341) -> {"Lm","L"}; +lookup(12342) -> {"So","ON"}; +lookup(12343) -> {"So","ON"}; +lookup(12344) -> {"Nl","L"}; +lookup(12345) -> {"Nl","L"}; +lookup(12346) -> {"Nl","L"}; +lookup(12347) -> {"Lm","L"}; +lookup(12348) -> {"Lo","L"}; +lookup(12349) -> {"Po","ON"}; +lookup(12350) -> {"So","ON"}; +lookup(12351) -> {"So","ON"}; +lookup(12353) -> {"Lo","L"}; +lookup(12354) -> {"Lo","L"}; +lookup(12355) -> {"Lo","L"}; +lookup(12356) -> {"Lo","L"}; +lookup(12357) -> {"Lo","L"}; +lookup(12358) -> {"Lo","L"}; +lookup(12359) -> {"Lo","L"}; +lookup(12360) -> {"Lo","L"}; +lookup(12361) -> {"Lo","L"}; +lookup(12362) -> {"Lo","L"}; +lookup(12363) -> {"Lo","L"}; +lookup(12364) -> {"Lo","L"}; +lookup(12365) -> {"Lo","L"}; +lookup(12366) -> {"Lo","L"}; +lookup(12367) -> {"Lo","L"}; +lookup(12368) -> {"Lo","L"}; +lookup(12369) -> {"Lo","L"}; +lookup(12370) -> {"Lo","L"}; +lookup(12371) -> {"Lo","L"}; +lookup(12372) -> {"Lo","L"}; +lookup(12373) -> {"Lo","L"}; +lookup(12374) -> {"Lo","L"}; +lookup(12375) -> {"Lo","L"}; +lookup(12376) -> {"Lo","L"}; +lookup(12377) -> {"Lo","L"}; +lookup(12378) -> {"Lo","L"}; +lookup(12379) -> {"Lo","L"}; +lookup(12380) -> {"Lo","L"}; +lookup(12381) -> {"Lo","L"}; +lookup(12382) -> {"Lo","L"}; +lookup(12383) -> {"Lo","L"}; +lookup(12384) -> {"Lo","L"}; +lookup(12385) -> {"Lo","L"}; +lookup(12386) -> {"Lo","L"}; +lookup(12387) -> {"Lo","L"}; +lookup(12388) -> {"Lo","L"}; +lookup(12389) -> {"Lo","L"}; +lookup(12390) -> {"Lo","L"}; +lookup(12391) -> {"Lo","L"}; +lookup(12392) -> {"Lo","L"}; +lookup(12393) -> {"Lo","L"}; +lookup(12394) -> {"Lo","L"}; +lookup(12395) -> {"Lo","L"}; +lookup(12396) -> {"Lo","L"}; +lookup(12397) -> {"Lo","L"}; +lookup(12398) -> {"Lo","L"}; +lookup(12399) -> {"Lo","L"}; +lookup(12400) -> {"Lo","L"}; +lookup(12401) -> {"Lo","L"}; +lookup(12402) -> {"Lo","L"}; +lookup(12403) -> {"Lo","L"}; +lookup(12404) -> {"Lo","L"}; +lookup(12405) -> {"Lo","L"}; +lookup(12406) -> {"Lo","L"}; +lookup(12407) -> {"Lo","L"}; +lookup(12408) -> {"Lo","L"}; +lookup(12409) -> {"Lo","L"}; +lookup(12410) -> {"Lo","L"}; +lookup(12411) -> {"Lo","L"}; +lookup(12412) -> {"Lo","L"}; +lookup(12413) -> {"Lo","L"}; +lookup(12414) -> {"Lo","L"}; +lookup(12415) -> {"Lo","L"}; +lookup(12416) -> {"Lo","L"}; +lookup(12417) -> {"Lo","L"}; +lookup(12418) -> {"Lo","L"}; +lookup(12419) -> {"Lo","L"}; +lookup(12420) -> {"Lo","L"}; +lookup(12421) -> {"Lo","L"}; +lookup(12422) -> {"Lo","L"}; +lookup(12423) -> {"Lo","L"}; +lookup(12424) -> {"Lo","L"}; +lookup(12425) -> {"Lo","L"}; +lookup(12426) -> {"Lo","L"}; +lookup(12427) -> {"Lo","L"}; +lookup(12428) -> {"Lo","L"}; +lookup(12429) -> {"Lo","L"}; +lookup(12430) -> {"Lo","L"}; +lookup(12431) -> {"Lo","L"}; +lookup(12432) -> {"Lo","L"}; +lookup(12433) -> {"Lo","L"}; +lookup(12434) -> {"Lo","L"}; +lookup(12435) -> {"Lo","L"}; +lookup(12436) -> {"Lo","L"}; +lookup(12437) -> {"Lo","L"}; +lookup(12438) -> {"Lo","L"}; +lookup(12441) -> {"Mn","NSM"}; +lookup(12442) -> {"Mn","NSM"}; +lookup(12443) -> {"Sk","ON"}; +lookup(12444) -> {"Sk","ON"}; +lookup(12445) -> {"Lm","L"}; +lookup(12446) -> {"Lm","L"}; +lookup(12447) -> {"Lo","L"}; +lookup(12448) -> {"Pd","ON"}; +lookup(12449) -> {"Lo","L"}; +lookup(12450) -> {"Lo","L"}; +lookup(12451) -> {"Lo","L"}; +lookup(12452) -> {"Lo","L"}; +lookup(12453) -> {"Lo","L"}; +lookup(12454) -> {"Lo","L"}; +lookup(12455) -> {"Lo","L"}; +lookup(12456) -> {"Lo","L"}; +lookup(12457) -> {"Lo","L"}; +lookup(12458) -> {"Lo","L"}; +lookup(12459) -> {"Lo","L"}; +lookup(12460) -> {"Lo","L"}; +lookup(12461) -> {"Lo","L"}; +lookup(12462) -> {"Lo","L"}; +lookup(12463) -> {"Lo","L"}; +lookup(12464) -> {"Lo","L"}; +lookup(12465) -> {"Lo","L"}; +lookup(12466) -> {"Lo","L"}; +lookup(12467) -> {"Lo","L"}; +lookup(12468) -> {"Lo","L"}; +lookup(12469) -> {"Lo","L"}; +lookup(12470) -> {"Lo","L"}; +lookup(12471) -> {"Lo","L"}; +lookup(12472) -> {"Lo","L"}; +lookup(12473) -> {"Lo","L"}; +lookup(12474) -> {"Lo","L"}; +lookup(12475) -> {"Lo","L"}; +lookup(12476) -> {"Lo","L"}; +lookup(12477) -> {"Lo","L"}; +lookup(12478) -> {"Lo","L"}; +lookup(12479) -> {"Lo","L"}; +lookup(12480) -> {"Lo","L"}; +lookup(12481) -> {"Lo","L"}; +lookup(12482) -> {"Lo","L"}; +lookup(12483) -> {"Lo","L"}; +lookup(12484) -> {"Lo","L"}; +lookup(12485) -> {"Lo","L"}; +lookup(12486) -> {"Lo","L"}; +lookup(12487) -> {"Lo","L"}; +lookup(12488) -> {"Lo","L"}; +lookup(12489) -> {"Lo","L"}; +lookup(12490) -> {"Lo","L"}; +lookup(12491) -> {"Lo","L"}; +lookup(12492) -> {"Lo","L"}; +lookup(12493) -> {"Lo","L"}; +lookup(12494) -> {"Lo","L"}; +lookup(12495) -> {"Lo","L"}; +lookup(12496) -> {"Lo","L"}; +lookup(12497) -> {"Lo","L"}; +lookup(12498) -> {"Lo","L"}; +lookup(12499) -> {"Lo","L"}; +lookup(12500) -> {"Lo","L"}; +lookup(12501) -> {"Lo","L"}; +lookup(12502) -> {"Lo","L"}; +lookup(12503) -> {"Lo","L"}; +lookup(12504) -> {"Lo","L"}; +lookup(12505) -> {"Lo","L"}; +lookup(12506) -> {"Lo","L"}; +lookup(12507) -> {"Lo","L"}; +lookup(12508) -> {"Lo","L"}; +lookup(12509) -> {"Lo","L"}; +lookup(12510) -> {"Lo","L"}; +lookup(12511) -> {"Lo","L"}; +lookup(12512) -> {"Lo","L"}; +lookup(12513) -> {"Lo","L"}; +lookup(12514) -> {"Lo","L"}; +lookup(12515) -> {"Lo","L"}; +lookup(12516) -> {"Lo","L"}; +lookup(12517) -> {"Lo","L"}; +lookup(12518) -> {"Lo","L"}; +lookup(12519) -> {"Lo","L"}; +lookup(12520) -> {"Lo","L"}; +lookup(12521) -> {"Lo","L"}; +lookup(12522) -> {"Lo","L"}; +lookup(12523) -> {"Lo","L"}; +lookup(12524) -> {"Lo","L"}; +lookup(12525) -> {"Lo","L"}; +lookup(12526) -> {"Lo","L"}; +lookup(12527) -> {"Lo","L"}; +lookup(12528) -> {"Lo","L"}; +lookup(12529) -> {"Lo","L"}; +lookup(12530) -> {"Lo","L"}; +lookup(12531) -> {"Lo","L"}; +lookup(12532) -> {"Lo","L"}; +lookup(12533) -> {"Lo","L"}; +lookup(12534) -> {"Lo","L"}; +lookup(12535) -> {"Lo","L"}; +lookup(12536) -> {"Lo","L"}; +lookup(12537) -> {"Lo","L"}; +lookup(12538) -> {"Lo","L"}; +lookup(12539) -> {"Po","ON"}; +lookup(12540) -> {"Lm","L"}; +lookup(12541) -> {"Lm","L"}; +lookup(12542) -> {"Lm","L"}; +lookup(12543) -> {"Lo","L"}; +lookup(12549) -> {"Lo","L"}; +lookup(12550) -> {"Lo","L"}; +lookup(12551) -> {"Lo","L"}; +lookup(12552) -> {"Lo","L"}; +lookup(12553) -> {"Lo","L"}; +lookup(12554) -> {"Lo","L"}; +lookup(12555) -> {"Lo","L"}; +lookup(12556) -> {"Lo","L"}; +lookup(12557) -> {"Lo","L"}; +lookup(12558) -> {"Lo","L"}; +lookup(12559) -> {"Lo","L"}; +lookup(12560) -> {"Lo","L"}; +lookup(12561) -> {"Lo","L"}; +lookup(12562) -> {"Lo","L"}; +lookup(12563) -> {"Lo","L"}; +lookup(12564) -> {"Lo","L"}; +lookup(12565) -> {"Lo","L"}; +lookup(12566) -> {"Lo","L"}; +lookup(12567) -> {"Lo","L"}; +lookup(12568) -> {"Lo","L"}; +lookup(12569) -> {"Lo","L"}; +lookup(12570) -> {"Lo","L"}; +lookup(12571) -> {"Lo","L"}; +lookup(12572) -> {"Lo","L"}; +lookup(12573) -> {"Lo","L"}; +lookup(12574) -> {"Lo","L"}; +lookup(12575) -> {"Lo","L"}; +lookup(12576) -> {"Lo","L"}; +lookup(12577) -> {"Lo","L"}; +lookup(12578) -> {"Lo","L"}; +lookup(12579) -> {"Lo","L"}; +lookup(12580) -> {"Lo","L"}; +lookup(12581) -> {"Lo","L"}; +lookup(12582) -> {"Lo","L"}; +lookup(12583) -> {"Lo","L"}; +lookup(12584) -> {"Lo","L"}; +lookup(12585) -> {"Lo","L"}; +lookup(12586) -> {"Lo","L"}; +lookup(12587) -> {"Lo","L"}; +lookup(12588) -> {"Lo","L"}; +lookup(12589) -> {"Lo","L"}; +lookup(12590) -> {"Lo","L"}; +lookup(12591) -> {"Lo","L"}; +lookup(12593) -> {"Lo","L"}; +lookup(12594) -> {"Lo","L"}; +lookup(12595) -> {"Lo","L"}; +lookup(12596) -> {"Lo","L"}; +lookup(12597) -> {"Lo","L"}; +lookup(12598) -> {"Lo","L"}; +lookup(12599) -> {"Lo","L"}; +lookup(12600) -> {"Lo","L"}; +lookup(12601) -> {"Lo","L"}; +lookup(12602) -> {"Lo","L"}; +lookup(12603) -> {"Lo","L"}; +lookup(12604) -> {"Lo","L"}; +lookup(12605) -> {"Lo","L"}; +lookup(12606) -> {"Lo","L"}; +lookup(12607) -> {"Lo","L"}; +lookup(12608) -> {"Lo","L"}; +lookup(12609) -> {"Lo","L"}; +lookup(12610) -> {"Lo","L"}; +lookup(12611) -> {"Lo","L"}; +lookup(12612) -> {"Lo","L"}; +lookup(12613) -> {"Lo","L"}; +lookup(12614) -> {"Lo","L"}; +lookup(12615) -> {"Lo","L"}; +lookup(12616) -> {"Lo","L"}; +lookup(12617) -> {"Lo","L"}; +lookup(12618) -> {"Lo","L"}; +lookup(12619) -> {"Lo","L"}; +lookup(12620) -> {"Lo","L"}; +lookup(12621) -> {"Lo","L"}; +lookup(12622) -> {"Lo","L"}; +lookup(12623) -> {"Lo","L"}; +lookup(12624) -> {"Lo","L"}; +lookup(12625) -> {"Lo","L"}; +lookup(12626) -> {"Lo","L"}; +lookup(12627) -> {"Lo","L"}; +lookup(12628) -> {"Lo","L"}; +lookup(12629) -> {"Lo","L"}; +lookup(12630) -> {"Lo","L"}; +lookup(12631) -> {"Lo","L"}; +lookup(12632) -> {"Lo","L"}; +lookup(12633) -> {"Lo","L"}; +lookup(12634) -> {"Lo","L"}; +lookup(12635) -> {"Lo","L"}; +lookup(12636) -> {"Lo","L"}; +lookup(12637) -> {"Lo","L"}; +lookup(12638) -> {"Lo","L"}; +lookup(12639) -> {"Lo","L"}; +lookup(12640) -> {"Lo","L"}; +lookup(12641) -> {"Lo","L"}; +lookup(12642) -> {"Lo","L"}; +lookup(12643) -> {"Lo","L"}; +lookup(12644) -> {"Lo","L"}; +lookup(12645) -> {"Lo","L"}; +lookup(12646) -> {"Lo","L"}; +lookup(12647) -> {"Lo","L"}; +lookup(12648) -> {"Lo","L"}; +lookup(12649) -> {"Lo","L"}; +lookup(12650) -> {"Lo","L"}; +lookup(12651) -> {"Lo","L"}; +lookup(12652) -> {"Lo","L"}; +lookup(12653) -> {"Lo","L"}; +lookup(12654) -> {"Lo","L"}; +lookup(12655) -> {"Lo","L"}; +lookup(12656) -> {"Lo","L"}; +lookup(12657) -> {"Lo","L"}; +lookup(12658) -> {"Lo","L"}; +lookup(12659) -> {"Lo","L"}; +lookup(12660) -> {"Lo","L"}; +lookup(12661) -> {"Lo","L"}; +lookup(12662) -> {"Lo","L"}; +lookup(12663) -> {"Lo","L"}; +lookup(12664) -> {"Lo","L"}; +lookup(12665) -> {"Lo","L"}; +lookup(12666) -> {"Lo","L"}; +lookup(12667) -> {"Lo","L"}; +lookup(12668) -> {"Lo","L"}; +lookup(12669) -> {"Lo","L"}; +lookup(12670) -> {"Lo","L"}; +lookup(12671) -> {"Lo","L"}; +lookup(12672) -> {"Lo","L"}; +lookup(12673) -> {"Lo","L"}; +lookup(12674) -> {"Lo","L"}; +lookup(12675) -> {"Lo","L"}; +lookup(12676) -> {"Lo","L"}; +lookup(12677) -> {"Lo","L"}; +lookup(12678) -> {"Lo","L"}; +lookup(12679) -> {"Lo","L"}; +lookup(12680) -> {"Lo","L"}; +lookup(12681) -> {"Lo","L"}; +lookup(12682) -> {"Lo","L"}; +lookup(12683) -> {"Lo","L"}; +lookup(12684) -> {"Lo","L"}; +lookup(12685) -> {"Lo","L"}; +lookup(12686) -> {"Lo","L"}; +lookup(12688) -> {"So","L"}; +lookup(12689) -> {"So","L"}; +lookup(12690) -> {"No","L"}; +lookup(12691) -> {"No","L"}; +lookup(12692) -> {"No","L"}; +lookup(12693) -> {"No","L"}; +lookup(12694) -> {"So","L"}; +lookup(12695) -> {"So","L"}; +lookup(12696) -> {"So","L"}; +lookup(12697) -> {"So","L"}; +lookup(12698) -> {"So","L"}; +lookup(12699) -> {"So","L"}; +lookup(12700) -> {"So","L"}; +lookup(12701) -> {"So","L"}; +lookup(12702) -> {"So","L"}; +lookup(12703) -> {"So","L"}; +lookup(12704) -> {"Lo","L"}; +lookup(12705) -> {"Lo","L"}; +lookup(12706) -> {"Lo","L"}; +lookup(12707) -> {"Lo","L"}; +lookup(12708) -> {"Lo","L"}; +lookup(12709) -> {"Lo","L"}; +lookup(12710) -> {"Lo","L"}; +lookup(12711) -> {"Lo","L"}; +lookup(12712) -> {"Lo","L"}; +lookup(12713) -> {"Lo","L"}; +lookup(12714) -> {"Lo","L"}; +lookup(12715) -> {"Lo","L"}; +lookup(12716) -> {"Lo","L"}; +lookup(12717) -> {"Lo","L"}; +lookup(12718) -> {"Lo","L"}; +lookup(12719) -> {"Lo","L"}; +lookup(12720) -> {"Lo","L"}; +lookup(12721) -> {"Lo","L"}; +lookup(12722) -> {"Lo","L"}; +lookup(12723) -> {"Lo","L"}; +lookup(12724) -> {"Lo","L"}; +lookup(12725) -> {"Lo","L"}; +lookup(12726) -> {"Lo","L"}; +lookup(12727) -> {"Lo","L"}; +lookup(12728) -> {"Lo","L"}; +lookup(12729) -> {"Lo","L"}; +lookup(12730) -> {"Lo","L"}; +lookup(12731) -> {"Lo","L"}; +lookup(12732) -> {"Lo","L"}; +lookup(12733) -> {"Lo","L"}; +lookup(12734) -> {"Lo","L"}; +lookup(12735) -> {"Lo","L"}; +lookup(12736) -> {"So","ON"}; +lookup(12737) -> {"So","ON"}; +lookup(12738) -> {"So","ON"}; +lookup(12739) -> {"So","ON"}; +lookup(12740) -> {"So","ON"}; +lookup(12741) -> {"So","ON"}; +lookup(12742) -> {"So","ON"}; +lookup(12743) -> {"So","ON"}; +lookup(12744) -> {"So","ON"}; +lookup(12745) -> {"So","ON"}; +lookup(12746) -> {"So","ON"}; +lookup(12747) -> {"So","ON"}; +lookup(12748) -> {"So","ON"}; +lookup(12749) -> {"So","ON"}; +lookup(12750) -> {"So","ON"}; +lookup(12751) -> {"So","ON"}; +lookup(12752) -> {"So","ON"}; +lookup(12753) -> {"So","ON"}; +lookup(12754) -> {"So","ON"}; +lookup(12755) -> {"So","ON"}; +lookup(12756) -> {"So","ON"}; +lookup(12757) -> {"So","ON"}; +lookup(12758) -> {"So","ON"}; +lookup(12759) -> {"So","ON"}; +lookup(12760) -> {"So","ON"}; +lookup(12761) -> {"So","ON"}; +lookup(12762) -> {"So","ON"}; +lookup(12763) -> {"So","ON"}; +lookup(12764) -> {"So","ON"}; +lookup(12765) -> {"So","ON"}; +lookup(12766) -> {"So","ON"}; +lookup(12767) -> {"So","ON"}; +lookup(12768) -> {"So","ON"}; +lookup(12769) -> {"So","ON"}; +lookup(12770) -> {"So","ON"}; +lookup(12771) -> {"So","ON"}; +lookup(12784) -> {"Lo","L"}; +lookup(12785) -> {"Lo","L"}; +lookup(12786) -> {"Lo","L"}; +lookup(12787) -> {"Lo","L"}; +lookup(12788) -> {"Lo","L"}; +lookup(12789) -> {"Lo","L"}; +lookup(12790) -> {"Lo","L"}; +lookup(12791) -> {"Lo","L"}; +lookup(12792) -> {"Lo","L"}; +lookup(12793) -> {"Lo","L"}; +lookup(12794) -> {"Lo","L"}; +lookup(12795) -> {"Lo","L"}; +lookup(12796) -> {"Lo","L"}; +lookup(12797) -> {"Lo","L"}; +lookup(12798) -> {"Lo","L"}; +lookup(12799) -> {"Lo","L"}; +lookup(12800) -> {"So","L"}; +lookup(12801) -> {"So","L"}; +lookup(12802) -> {"So","L"}; +lookup(12803) -> {"So","L"}; +lookup(12804) -> {"So","L"}; +lookup(12805) -> {"So","L"}; +lookup(12806) -> {"So","L"}; +lookup(12807) -> {"So","L"}; +lookup(12808) -> {"So","L"}; +lookup(12809) -> {"So","L"}; +lookup(12810) -> {"So","L"}; +lookup(12811) -> {"So","L"}; +lookup(12812) -> {"So","L"}; +lookup(12813) -> {"So","L"}; +lookup(12814) -> {"So","L"}; +lookup(12815) -> {"So","L"}; +lookup(12816) -> {"So","L"}; +lookup(12817) -> {"So","L"}; +lookup(12818) -> {"So","L"}; +lookup(12819) -> {"So","L"}; +lookup(12820) -> {"So","L"}; +lookup(12821) -> {"So","L"}; +lookup(12822) -> {"So","L"}; +lookup(12823) -> {"So","L"}; +lookup(12824) -> {"So","L"}; +lookup(12825) -> {"So","L"}; +lookup(12826) -> {"So","L"}; +lookup(12827) -> {"So","L"}; +lookup(12828) -> {"So","L"}; +lookup(12829) -> {"So","ON"}; +lookup(12830) -> {"So","ON"}; +lookup(12832) -> {"No","L"}; +lookup(12833) -> {"No","L"}; +lookup(12834) -> {"No","L"}; +lookup(12835) -> {"No","L"}; +lookup(12836) -> {"No","L"}; +lookup(12837) -> {"No","L"}; +lookup(12838) -> {"No","L"}; +lookup(12839) -> {"No","L"}; +lookup(12840) -> {"No","L"}; +lookup(12841) -> {"No","L"}; +lookup(12842) -> {"So","L"}; +lookup(12843) -> {"So","L"}; +lookup(12844) -> {"So","L"}; +lookup(12845) -> {"So","L"}; +lookup(12846) -> {"So","L"}; +lookup(12847) -> {"So","L"}; +lookup(12848) -> {"So","L"}; +lookup(12849) -> {"So","L"}; +lookup(12850) -> {"So","L"}; +lookup(12851) -> {"So","L"}; +lookup(12852) -> {"So","L"}; +lookup(12853) -> {"So","L"}; +lookup(12854) -> {"So","L"}; +lookup(12855) -> {"So","L"}; +lookup(12856) -> {"So","L"}; +lookup(12857) -> {"So","L"}; +lookup(12858) -> {"So","L"}; +lookup(12859) -> {"So","L"}; +lookup(12860) -> {"So","L"}; +lookup(12861) -> {"So","L"}; +lookup(12862) -> {"So","L"}; +lookup(12863) -> {"So","L"}; +lookup(12864) -> {"So","L"}; +lookup(12865) -> {"So","L"}; +lookup(12866) -> {"So","L"}; +lookup(12867) -> {"So","L"}; +lookup(12868) -> {"So","L"}; +lookup(12869) -> {"So","L"}; +lookup(12870) -> {"So","L"}; +lookup(12871) -> {"So","L"}; +lookup(12872) -> {"No","L"}; +lookup(12873) -> {"No","L"}; +lookup(12874) -> {"No","L"}; +lookup(12875) -> {"No","L"}; +lookup(12876) -> {"No","L"}; +lookup(12877) -> {"No","L"}; +lookup(12878) -> {"No","L"}; +lookup(12879) -> {"No","L"}; +lookup(12880) -> {"So","ON"}; +lookup(12881) -> {"No","ON"}; +lookup(12882) -> {"No","ON"}; +lookup(12883) -> {"No","ON"}; +lookup(12884) -> {"No","ON"}; +lookup(12885) -> {"No","ON"}; +lookup(12886) -> {"No","ON"}; +lookup(12887) -> {"No","ON"}; +lookup(12888) -> {"No","ON"}; +lookup(12889) -> {"No","ON"}; +lookup(12890) -> {"No","ON"}; +lookup(12891) -> {"No","ON"}; +lookup(12892) -> {"No","ON"}; +lookup(12893) -> {"No","ON"}; +lookup(12894) -> {"No","ON"}; +lookup(12895) -> {"No","ON"}; +lookup(12896) -> {"So","L"}; +lookup(12897) -> {"So","L"}; +lookup(12898) -> {"So","L"}; +lookup(12899) -> {"So","L"}; +lookup(12900) -> {"So","L"}; +lookup(12901) -> {"So","L"}; +lookup(12902) -> {"So","L"}; +lookup(12903) -> {"So","L"}; +lookup(12904) -> {"So","L"}; +lookup(12905) -> {"So","L"}; +lookup(12906) -> {"So","L"}; +lookup(12907) -> {"So","L"}; +lookup(12908) -> {"So","L"}; +lookup(12909) -> {"So","L"}; +lookup(12910) -> {"So","L"}; +lookup(12911) -> {"So","L"}; +lookup(12912) -> {"So","L"}; +lookup(12913) -> {"So","L"}; +lookup(12914) -> {"So","L"}; +lookup(12915) -> {"So","L"}; +lookup(12916) -> {"So","L"}; +lookup(12917) -> {"So","L"}; +lookup(12918) -> {"So","L"}; +lookup(12919) -> {"So","L"}; +lookup(12920) -> {"So","L"}; +lookup(12921) -> {"So","L"}; +lookup(12922) -> {"So","L"}; +lookup(12923) -> {"So","L"}; +lookup(12924) -> {"So","ON"}; +lookup(12925) -> {"So","ON"}; +lookup(12926) -> {"So","ON"}; +lookup(12927) -> {"So","L"}; +lookup(12928) -> {"No","L"}; +lookup(12929) -> {"No","L"}; +lookup(12930) -> {"No","L"}; +lookup(12931) -> {"No","L"}; +lookup(12932) -> {"No","L"}; +lookup(12933) -> {"No","L"}; +lookup(12934) -> {"No","L"}; +lookup(12935) -> {"No","L"}; +lookup(12936) -> {"No","L"}; +lookup(12937) -> {"No","L"}; +lookup(12938) -> {"So","L"}; +lookup(12939) -> {"So","L"}; +lookup(12940) -> {"So","L"}; +lookup(12941) -> {"So","L"}; +lookup(12942) -> {"So","L"}; +lookup(12943) -> {"So","L"}; +lookup(12944) -> {"So","L"}; +lookup(12945) -> {"So","L"}; +lookup(12946) -> {"So","L"}; +lookup(12947) -> {"So","L"}; +lookup(12948) -> {"So","L"}; +lookup(12949) -> {"So","L"}; +lookup(12950) -> {"So","L"}; +lookup(12951) -> {"So","L"}; +lookup(12952) -> {"So","L"}; +lookup(12953) -> {"So","L"}; +lookup(12954) -> {"So","L"}; +lookup(12955) -> {"So","L"}; +lookup(12956) -> {"So","L"}; +lookup(12957) -> {"So","L"}; +lookup(12958) -> {"So","L"}; +lookup(12959) -> {"So","L"}; +lookup(12960) -> {"So","L"}; +lookup(12961) -> {"So","L"}; +lookup(12962) -> {"So","L"}; +lookup(12963) -> {"So","L"}; +lookup(12964) -> {"So","L"}; +lookup(12965) -> {"So","L"}; +lookup(12966) -> {"So","L"}; +lookup(12967) -> {"So","L"}; +lookup(12968) -> {"So","L"}; +lookup(12969) -> {"So","L"}; +lookup(12970) -> {"So","L"}; +lookup(12971) -> {"So","L"}; +lookup(12972) -> {"So","L"}; +lookup(12973) -> {"So","L"}; +lookup(12974) -> {"So","L"}; +lookup(12975) -> {"So","L"}; +lookup(12976) -> {"So","L"}; +lookup(12977) -> {"No","ON"}; +lookup(12978) -> {"No","ON"}; +lookup(12979) -> {"No","ON"}; +lookup(12980) -> {"No","ON"}; +lookup(12981) -> {"No","ON"}; +lookup(12982) -> {"No","ON"}; +lookup(12983) -> {"No","ON"}; +lookup(12984) -> {"No","ON"}; +lookup(12985) -> {"No","ON"}; +lookup(12986) -> {"No","ON"}; +lookup(12987) -> {"No","ON"}; +lookup(12988) -> {"No","ON"}; +lookup(12989) -> {"No","ON"}; +lookup(12990) -> {"No","ON"}; +lookup(12991) -> {"No","ON"}; +lookup(12992) -> {"So","L"}; +lookup(12993) -> {"So","L"}; +lookup(12994) -> {"So","L"}; +lookup(12995) -> {"So","L"}; +lookup(12996) -> {"So","L"}; +lookup(12997) -> {"So","L"}; +lookup(12998) -> {"So","L"}; +lookup(12999) -> {"So","L"}; +lookup(13000) -> {"So","L"}; +lookup(13001) -> {"So","L"}; +lookup(13002) -> {"So","L"}; +lookup(13003) -> {"So","L"}; +lookup(13004) -> {"So","ON"}; +lookup(13005) -> {"So","ON"}; +lookup(13006) -> {"So","ON"}; +lookup(13007) -> {"So","ON"}; +lookup(13008) -> {"So","L"}; +lookup(13009) -> {"So","L"}; +lookup(13010) -> {"So","L"}; +lookup(13011) -> {"So","L"}; +lookup(13012) -> {"So","L"}; +lookup(13013) -> {"So","L"}; +lookup(13014) -> {"So","L"}; +lookup(13015) -> {"So","L"}; +lookup(13016) -> {"So","L"}; +lookup(13017) -> {"So","L"}; +lookup(13018) -> {"So","L"}; +lookup(13019) -> {"So","L"}; +lookup(13020) -> {"So","L"}; +lookup(13021) -> {"So","L"}; +lookup(13022) -> {"So","L"}; +lookup(13023) -> {"So","L"}; +lookup(13024) -> {"So","L"}; +lookup(13025) -> {"So","L"}; +lookup(13026) -> {"So","L"}; +lookup(13027) -> {"So","L"}; +lookup(13028) -> {"So","L"}; +lookup(13029) -> {"So","L"}; +lookup(13030) -> {"So","L"}; +lookup(13031) -> {"So","L"}; +lookup(13032) -> {"So","L"}; +lookup(13033) -> {"So","L"}; +lookup(13034) -> {"So","L"}; +lookup(13035) -> {"So","L"}; +lookup(13036) -> {"So","L"}; +lookup(13037) -> {"So","L"}; +lookup(13038) -> {"So","L"}; +lookup(13039) -> {"So","L"}; +lookup(13040) -> {"So","L"}; +lookup(13041) -> {"So","L"}; +lookup(13042) -> {"So","L"}; +lookup(13043) -> {"So","L"}; +lookup(13044) -> {"So","L"}; +lookup(13045) -> {"So","L"}; +lookup(13046) -> {"So","L"}; +lookup(13047) -> {"So","L"}; +lookup(13048) -> {"So","L"}; +lookup(13049) -> {"So","L"}; +lookup(13050) -> {"So","L"}; +lookup(13051) -> {"So","L"}; +lookup(13052) -> {"So","L"}; +lookup(13053) -> {"So","L"}; +lookup(13054) -> {"So","L"}; +lookup(13055) -> {"So","L"}; +lookup(13056) -> {"So","L"}; +lookup(13057) -> {"So","L"}; +lookup(13058) -> {"So","L"}; +lookup(13059) -> {"So","L"}; +lookup(13060) -> {"So","L"}; +lookup(13061) -> {"So","L"}; +lookup(13062) -> {"So","L"}; +lookup(13063) -> {"So","L"}; +lookup(13064) -> {"So","L"}; +lookup(13065) -> {"So","L"}; +lookup(13066) -> {"So","L"}; +lookup(13067) -> {"So","L"}; +lookup(13068) -> {"So","L"}; +lookup(13069) -> {"So","L"}; +lookup(13070) -> {"So","L"}; +lookup(13071) -> {"So","L"}; +lookup(13072) -> {"So","L"}; +lookup(13073) -> {"So","L"}; +lookup(13074) -> {"So","L"}; +lookup(13075) -> {"So","L"}; +lookup(13076) -> {"So","L"}; +lookup(13077) -> {"So","L"}; +lookup(13078) -> {"So","L"}; +lookup(13079) -> {"So","L"}; +lookup(13080) -> {"So","L"}; +lookup(13081) -> {"So","L"}; +lookup(13082) -> {"So","L"}; +lookup(13083) -> {"So","L"}; +lookup(13084) -> {"So","L"}; +lookup(13085) -> {"So","L"}; +lookup(13086) -> {"So","L"}; +lookup(13087) -> {"So","L"}; +lookup(13088) -> {"So","L"}; +lookup(13089) -> {"So","L"}; +lookup(13090) -> {"So","L"}; +lookup(13091) -> {"So","L"}; +lookup(13092) -> {"So","L"}; +lookup(13093) -> {"So","L"}; +lookup(13094) -> {"So","L"}; +lookup(13095) -> {"So","L"}; +lookup(13096) -> {"So","L"}; +lookup(13097) -> {"So","L"}; +lookup(13098) -> {"So","L"}; +lookup(13099) -> {"So","L"}; +lookup(13100) -> {"So","L"}; +lookup(13101) -> {"So","L"}; +lookup(13102) -> {"So","L"}; +lookup(13103) -> {"So","L"}; +lookup(13104) -> {"So","L"}; +lookup(13105) -> {"So","L"}; +lookup(13106) -> {"So","L"}; +lookup(13107) -> {"So","L"}; +lookup(13108) -> {"So","L"}; +lookup(13109) -> {"So","L"}; +lookup(13110) -> {"So","L"}; +lookup(13111) -> {"So","L"}; +lookup(13112) -> {"So","L"}; +lookup(13113) -> {"So","L"}; +lookup(13114) -> {"So","L"}; +lookup(13115) -> {"So","L"}; +lookup(13116) -> {"So","L"}; +lookup(13117) -> {"So","L"}; +lookup(13118) -> {"So","L"}; +lookup(13119) -> {"So","L"}; +lookup(13120) -> {"So","L"}; +lookup(13121) -> {"So","L"}; +lookup(13122) -> {"So","L"}; +lookup(13123) -> {"So","L"}; +lookup(13124) -> {"So","L"}; +lookup(13125) -> {"So","L"}; +lookup(13126) -> {"So","L"}; +lookup(13127) -> {"So","L"}; +lookup(13128) -> {"So","L"}; +lookup(13129) -> {"So","L"}; +lookup(13130) -> {"So","L"}; +lookup(13131) -> {"So","L"}; +lookup(13132) -> {"So","L"}; +lookup(13133) -> {"So","L"}; +lookup(13134) -> {"So","L"}; +lookup(13135) -> {"So","L"}; +lookup(13136) -> {"So","L"}; +lookup(13137) -> {"So","L"}; +lookup(13138) -> {"So","L"}; +lookup(13139) -> {"So","L"}; +lookup(13140) -> {"So","L"}; +lookup(13141) -> {"So","L"}; +lookup(13142) -> {"So","L"}; +lookup(13143) -> {"So","L"}; +lookup(13144) -> {"So","L"}; +lookup(13145) -> {"So","L"}; +lookup(13146) -> {"So","L"}; +lookup(13147) -> {"So","L"}; +lookup(13148) -> {"So","L"}; +lookup(13149) -> {"So","L"}; +lookup(13150) -> {"So","L"}; +lookup(13151) -> {"So","L"}; +lookup(13152) -> {"So","L"}; +lookup(13153) -> {"So","L"}; +lookup(13154) -> {"So","L"}; +lookup(13155) -> {"So","L"}; +lookup(13156) -> {"So","L"}; +lookup(13157) -> {"So","L"}; +lookup(13158) -> {"So","L"}; +lookup(13159) -> {"So","L"}; +lookup(13160) -> {"So","L"}; +lookup(13161) -> {"So","L"}; +lookup(13162) -> {"So","L"}; +lookup(13163) -> {"So","L"}; +lookup(13164) -> {"So","L"}; +lookup(13165) -> {"So","L"}; +lookup(13166) -> {"So","L"}; +lookup(13167) -> {"So","L"}; +lookup(13168) -> {"So","L"}; +lookup(13169) -> {"So","L"}; +lookup(13170) -> {"So","L"}; +lookup(13171) -> {"So","L"}; +lookup(13172) -> {"So","L"}; +lookup(13173) -> {"So","L"}; +lookup(13174) -> {"So","L"}; +lookup(13175) -> {"So","ON"}; +lookup(13176) -> {"So","ON"}; +lookup(13177) -> {"So","ON"}; +lookup(13178) -> {"So","ON"}; +lookup(13179) -> {"So","L"}; +lookup(13180) -> {"So","L"}; +lookup(13181) -> {"So","L"}; +lookup(13182) -> {"So","L"}; +lookup(13183) -> {"So","L"}; +lookup(13184) -> {"So","L"}; +lookup(13185) -> {"So","L"}; +lookup(13186) -> {"So","L"}; +lookup(13187) -> {"So","L"}; +lookup(13188) -> {"So","L"}; +lookup(13189) -> {"So","L"}; +lookup(13190) -> {"So","L"}; +lookup(13191) -> {"So","L"}; +lookup(13192) -> {"So","L"}; +lookup(13193) -> {"So","L"}; +lookup(13194) -> {"So","L"}; +lookup(13195) -> {"So","L"}; +lookup(13196) -> {"So","L"}; +lookup(13197) -> {"So","L"}; +lookup(13198) -> {"So","L"}; +lookup(13199) -> {"So","L"}; +lookup(13200) -> {"So","L"}; +lookup(13201) -> {"So","L"}; +lookup(13202) -> {"So","L"}; +lookup(13203) -> {"So","L"}; +lookup(13204) -> {"So","L"}; +lookup(13205) -> {"So","L"}; +lookup(13206) -> {"So","L"}; +lookup(13207) -> {"So","L"}; +lookup(13208) -> {"So","L"}; +lookup(13209) -> {"So","L"}; +lookup(13210) -> {"So","L"}; +lookup(13211) -> {"So","L"}; +lookup(13212) -> {"So","L"}; +lookup(13213) -> {"So","L"}; +lookup(13214) -> {"So","L"}; +lookup(13215) -> {"So","L"}; +lookup(13216) -> {"So","L"}; +lookup(13217) -> {"So","L"}; +lookup(13218) -> {"So","L"}; +lookup(13219) -> {"So","L"}; +lookup(13220) -> {"So","L"}; +lookup(13221) -> {"So","L"}; +lookup(13222) -> {"So","L"}; +lookup(13223) -> {"So","L"}; +lookup(13224) -> {"So","L"}; +lookup(13225) -> {"So","L"}; +lookup(13226) -> {"So","L"}; +lookup(13227) -> {"So","L"}; +lookup(13228) -> {"So","L"}; +lookup(13229) -> {"So","L"}; +lookup(13230) -> {"So","L"}; +lookup(13231) -> {"So","L"}; +lookup(13232) -> {"So","L"}; +lookup(13233) -> {"So","L"}; +lookup(13234) -> {"So","L"}; +lookup(13235) -> {"So","L"}; +lookup(13236) -> {"So","L"}; +lookup(13237) -> {"So","L"}; +lookup(13238) -> {"So","L"}; +lookup(13239) -> {"So","L"}; +lookup(13240) -> {"So","L"}; +lookup(13241) -> {"So","L"}; +lookup(13242) -> {"So","L"}; +lookup(13243) -> {"So","L"}; +lookup(13244) -> {"So","L"}; +lookup(13245) -> {"So","L"}; +lookup(13246) -> {"So","L"}; +lookup(13247) -> {"So","L"}; +lookup(13248) -> {"So","L"}; +lookup(13249) -> {"So","L"}; +lookup(13250) -> {"So","L"}; +lookup(13251) -> {"So","L"}; +lookup(13252) -> {"So","L"}; +lookup(13253) -> {"So","L"}; +lookup(13254) -> {"So","L"}; +lookup(13255) -> {"So","L"}; +lookup(13256) -> {"So","L"}; +lookup(13257) -> {"So","L"}; +lookup(13258) -> {"So","L"}; +lookup(13259) -> {"So","L"}; +lookup(13260) -> {"So","L"}; +lookup(13261) -> {"So","L"}; +lookup(13262) -> {"So","L"}; +lookup(13263) -> {"So","L"}; +lookup(13264) -> {"So","L"}; +lookup(13265) -> {"So","L"}; +lookup(13266) -> {"So","L"}; +lookup(13267) -> {"So","L"}; +lookup(13268) -> {"So","L"}; +lookup(13269) -> {"So","L"}; +lookup(13270) -> {"So","L"}; +lookup(13271) -> {"So","L"}; +lookup(13272) -> {"So","L"}; +lookup(13273) -> {"So","L"}; +lookup(13274) -> {"So","L"}; +lookup(13275) -> {"So","L"}; +lookup(13276) -> {"So","L"}; +lookup(13277) -> {"So","L"}; +lookup(13278) -> {"So","ON"}; +lookup(13279) -> {"So","ON"}; +lookup(13280) -> {"So","L"}; +lookup(13281) -> {"So","L"}; +lookup(13282) -> {"So","L"}; +lookup(13283) -> {"So","L"}; +lookup(13284) -> {"So","L"}; +lookup(13285) -> {"So","L"}; +lookup(13286) -> {"So","L"}; +lookup(13287) -> {"So","L"}; +lookup(13288) -> {"So","L"}; +lookup(13289) -> {"So","L"}; +lookup(13290) -> {"So","L"}; +lookup(13291) -> {"So","L"}; +lookup(13292) -> {"So","L"}; +lookup(13293) -> {"So","L"}; +lookup(13294) -> {"So","L"}; +lookup(13295) -> {"So","L"}; +lookup(13296) -> {"So","L"}; +lookup(13297) -> {"So","L"}; +lookup(13298) -> {"So","L"}; +lookup(13299) -> {"So","L"}; +lookup(13300) -> {"So","L"}; +lookup(13301) -> {"So","L"}; +lookup(13302) -> {"So","L"}; +lookup(13303) -> {"So","L"}; +lookup(13304) -> {"So","L"}; +lookup(13305) -> {"So","L"}; +lookup(13306) -> {"So","L"}; +lookup(13307) -> {"So","L"}; +lookup(13308) -> {"So","L"}; +lookup(13309) -> {"So","L"}; +lookup(13310) -> {"So","L"}; +lookup(13311) -> {"So","ON"}; +lookup(13312) -> {"Lo","L"}; +lookup(19903) -> {"Lo","L"}; +lookup(19904) -> {"So","ON"}; +lookup(19905) -> {"So","ON"}; +lookup(19906) -> {"So","ON"}; +lookup(19907) -> {"So","ON"}; +lookup(19908) -> {"So","ON"}; +lookup(19909) -> {"So","ON"}; +lookup(19910) -> {"So","ON"}; +lookup(19911) -> {"So","ON"}; +lookup(19912) -> {"So","ON"}; +lookup(19913) -> {"So","ON"}; +lookup(19914) -> {"So","ON"}; +lookup(19915) -> {"So","ON"}; +lookup(19916) -> {"So","ON"}; +lookup(19917) -> {"So","ON"}; +lookup(19918) -> {"So","ON"}; +lookup(19919) -> {"So","ON"}; +lookup(19920) -> {"So","ON"}; +lookup(19921) -> {"So","ON"}; +lookup(19922) -> {"So","ON"}; +lookup(19923) -> {"So","ON"}; +lookup(19924) -> {"So","ON"}; +lookup(19925) -> {"So","ON"}; +lookup(19926) -> {"So","ON"}; +lookup(19927) -> {"So","ON"}; +lookup(19928) -> {"So","ON"}; +lookup(19929) -> {"So","ON"}; +lookup(19930) -> {"So","ON"}; +lookup(19931) -> {"So","ON"}; +lookup(19932) -> {"So","ON"}; +lookup(19933) -> {"So","ON"}; +lookup(19934) -> {"So","ON"}; +lookup(19935) -> {"So","ON"}; +lookup(19936) -> {"So","ON"}; +lookup(19937) -> {"So","ON"}; +lookup(19938) -> {"So","ON"}; +lookup(19939) -> {"So","ON"}; +lookup(19940) -> {"So","ON"}; +lookup(19941) -> {"So","ON"}; +lookup(19942) -> {"So","ON"}; +lookup(19943) -> {"So","ON"}; +lookup(19944) -> {"So","ON"}; +lookup(19945) -> {"So","ON"}; +lookup(19946) -> {"So","ON"}; +lookup(19947) -> {"So","ON"}; +lookup(19948) -> {"So","ON"}; +lookup(19949) -> {"So","ON"}; +lookup(19950) -> {"So","ON"}; +lookup(19951) -> {"So","ON"}; +lookup(19952) -> {"So","ON"}; +lookup(19953) -> {"So","ON"}; +lookup(19954) -> {"So","ON"}; +lookup(19955) -> {"So","ON"}; +lookup(19956) -> {"So","ON"}; +lookup(19957) -> {"So","ON"}; +lookup(19958) -> {"So","ON"}; +lookup(19959) -> {"So","ON"}; +lookup(19960) -> {"So","ON"}; +lookup(19961) -> {"So","ON"}; +lookup(19962) -> {"So","ON"}; +lookup(19963) -> {"So","ON"}; +lookup(19964) -> {"So","ON"}; +lookup(19965) -> {"So","ON"}; +lookup(19966) -> {"So","ON"}; +lookup(19967) -> {"So","ON"}; +lookup(19968) -> {"Lo","L"}; +lookup(40956) -> {"Lo","L"}; +lookup(40960) -> {"Lo","L"}; +lookup(40961) -> {"Lo","L"}; +lookup(40962) -> {"Lo","L"}; +lookup(40963) -> {"Lo","L"}; +lookup(40964) -> {"Lo","L"}; +lookup(40965) -> {"Lo","L"}; +lookup(40966) -> {"Lo","L"}; +lookup(40967) -> {"Lo","L"}; +lookup(40968) -> {"Lo","L"}; +lookup(40969) -> {"Lo","L"}; +lookup(40970) -> {"Lo","L"}; +lookup(40971) -> {"Lo","L"}; +lookup(40972) -> {"Lo","L"}; +lookup(40973) -> {"Lo","L"}; +lookup(40974) -> {"Lo","L"}; +lookup(40975) -> {"Lo","L"}; +lookup(40976) -> {"Lo","L"}; +lookup(40977) -> {"Lo","L"}; +lookup(40978) -> {"Lo","L"}; +lookup(40979) -> {"Lo","L"}; +lookup(40980) -> {"Lo","L"}; +lookup(40981) -> {"Lm","L"}; +lookup(40982) -> {"Lo","L"}; +lookup(40983) -> {"Lo","L"}; +lookup(40984) -> {"Lo","L"}; +lookup(40985) -> {"Lo","L"}; +lookup(40986) -> {"Lo","L"}; +lookup(40987) -> {"Lo","L"}; +lookup(40988) -> {"Lo","L"}; +lookup(40989) -> {"Lo","L"}; +lookup(40990) -> {"Lo","L"}; +lookup(40991) -> {"Lo","L"}; +lookup(40992) -> {"Lo","L"}; +lookup(40993) -> {"Lo","L"}; +lookup(40994) -> {"Lo","L"}; +lookup(40995) -> {"Lo","L"}; +lookup(40996) -> {"Lo","L"}; +lookup(40997) -> {"Lo","L"}; +lookup(40998) -> {"Lo","L"}; +lookup(40999) -> {"Lo","L"}; +lookup(41000) -> {"Lo","L"}; +lookup(41001) -> {"Lo","L"}; +lookup(41002) -> {"Lo","L"}; +lookup(41003) -> {"Lo","L"}; +lookup(41004) -> {"Lo","L"}; +lookup(41005) -> {"Lo","L"}; +lookup(41006) -> {"Lo","L"}; +lookup(41007) -> {"Lo","L"}; +lookup(41008) -> {"Lo","L"}; +lookup(41009) -> {"Lo","L"}; +lookup(41010) -> {"Lo","L"}; +lookup(41011) -> {"Lo","L"}; +lookup(41012) -> {"Lo","L"}; +lookup(41013) -> {"Lo","L"}; +lookup(41014) -> {"Lo","L"}; +lookup(41015) -> {"Lo","L"}; +lookup(41016) -> {"Lo","L"}; +lookup(41017) -> {"Lo","L"}; +lookup(41018) -> {"Lo","L"}; +lookup(41019) -> {"Lo","L"}; +lookup(41020) -> {"Lo","L"}; +lookup(41021) -> {"Lo","L"}; +lookup(41022) -> {"Lo","L"}; +lookup(41023) -> {"Lo","L"}; +lookup(41024) -> {"Lo","L"}; +lookup(41025) -> {"Lo","L"}; +lookup(41026) -> {"Lo","L"}; +lookup(41027) -> {"Lo","L"}; +lookup(41028) -> {"Lo","L"}; +lookup(41029) -> {"Lo","L"}; +lookup(41030) -> {"Lo","L"}; +lookup(41031) -> {"Lo","L"}; +lookup(41032) -> {"Lo","L"}; +lookup(41033) -> {"Lo","L"}; +lookup(41034) -> {"Lo","L"}; +lookup(41035) -> {"Lo","L"}; +lookup(41036) -> {"Lo","L"}; +lookup(41037) -> {"Lo","L"}; +lookup(41038) -> {"Lo","L"}; +lookup(41039) -> {"Lo","L"}; +lookup(41040) -> {"Lo","L"}; +lookup(41041) -> {"Lo","L"}; +lookup(41042) -> {"Lo","L"}; +lookup(41043) -> {"Lo","L"}; +lookup(41044) -> {"Lo","L"}; +lookup(41045) -> {"Lo","L"}; +lookup(41046) -> {"Lo","L"}; +lookup(41047) -> {"Lo","L"}; +lookup(41048) -> {"Lo","L"}; +lookup(41049) -> {"Lo","L"}; +lookup(41050) -> {"Lo","L"}; +lookup(41051) -> {"Lo","L"}; +lookup(41052) -> {"Lo","L"}; +lookup(41053) -> {"Lo","L"}; +lookup(41054) -> {"Lo","L"}; +lookup(41055) -> {"Lo","L"}; +lookup(41056) -> {"Lo","L"}; +lookup(41057) -> {"Lo","L"}; +lookup(41058) -> {"Lo","L"}; +lookup(41059) -> {"Lo","L"}; +lookup(41060) -> {"Lo","L"}; +lookup(41061) -> {"Lo","L"}; +lookup(41062) -> {"Lo","L"}; +lookup(41063) -> {"Lo","L"}; +lookup(41064) -> {"Lo","L"}; +lookup(41065) -> {"Lo","L"}; +lookup(41066) -> {"Lo","L"}; +lookup(41067) -> {"Lo","L"}; +lookup(41068) -> {"Lo","L"}; +lookup(41069) -> {"Lo","L"}; +lookup(41070) -> {"Lo","L"}; +lookup(41071) -> {"Lo","L"}; +lookup(41072) -> {"Lo","L"}; +lookup(41073) -> {"Lo","L"}; +lookup(41074) -> {"Lo","L"}; +lookup(41075) -> {"Lo","L"}; +lookup(41076) -> {"Lo","L"}; +lookup(41077) -> {"Lo","L"}; +lookup(41078) -> {"Lo","L"}; +lookup(41079) -> {"Lo","L"}; +lookup(41080) -> {"Lo","L"}; +lookup(41081) -> {"Lo","L"}; +lookup(41082) -> {"Lo","L"}; +lookup(41083) -> {"Lo","L"}; +lookup(41084) -> {"Lo","L"}; +lookup(41085) -> {"Lo","L"}; +lookup(41086) -> {"Lo","L"}; +lookup(41087) -> {"Lo","L"}; +lookup(41088) -> {"Lo","L"}; +lookup(41089) -> {"Lo","L"}; +lookup(41090) -> {"Lo","L"}; +lookup(41091) -> {"Lo","L"}; +lookup(41092) -> {"Lo","L"}; +lookup(41093) -> {"Lo","L"}; +lookup(41094) -> {"Lo","L"}; +lookup(41095) -> {"Lo","L"}; +lookup(41096) -> {"Lo","L"}; +lookup(41097) -> {"Lo","L"}; +lookup(41098) -> {"Lo","L"}; +lookup(41099) -> {"Lo","L"}; +lookup(41100) -> {"Lo","L"}; +lookup(41101) -> {"Lo","L"}; +lookup(41102) -> {"Lo","L"}; +lookup(41103) -> {"Lo","L"}; +lookup(41104) -> {"Lo","L"}; +lookup(41105) -> {"Lo","L"}; +lookup(41106) -> {"Lo","L"}; +lookup(41107) -> {"Lo","L"}; +lookup(41108) -> {"Lo","L"}; +lookup(41109) -> {"Lo","L"}; +lookup(41110) -> {"Lo","L"}; +lookup(41111) -> {"Lo","L"}; +lookup(41112) -> {"Lo","L"}; +lookup(41113) -> {"Lo","L"}; +lookup(41114) -> {"Lo","L"}; +lookup(41115) -> {"Lo","L"}; +lookup(41116) -> {"Lo","L"}; +lookup(41117) -> {"Lo","L"}; +lookup(41118) -> {"Lo","L"}; +lookup(41119) -> {"Lo","L"}; +lookup(41120) -> {"Lo","L"}; +lookup(41121) -> {"Lo","L"}; +lookup(41122) -> {"Lo","L"}; +lookup(41123) -> {"Lo","L"}; +lookup(41124) -> {"Lo","L"}; +lookup(41125) -> {"Lo","L"}; +lookup(41126) -> {"Lo","L"}; +lookup(41127) -> {"Lo","L"}; +lookup(41128) -> {"Lo","L"}; +lookup(41129) -> {"Lo","L"}; +lookup(41130) -> {"Lo","L"}; +lookup(41131) -> {"Lo","L"}; +lookup(41132) -> {"Lo","L"}; +lookup(41133) -> {"Lo","L"}; +lookup(41134) -> {"Lo","L"}; +lookup(41135) -> {"Lo","L"}; +lookup(41136) -> {"Lo","L"}; +lookup(41137) -> {"Lo","L"}; +lookup(41138) -> {"Lo","L"}; +lookup(41139) -> {"Lo","L"}; +lookup(41140) -> {"Lo","L"}; +lookup(41141) -> {"Lo","L"}; +lookup(41142) -> {"Lo","L"}; +lookup(41143) -> {"Lo","L"}; +lookup(41144) -> {"Lo","L"}; +lookup(41145) -> {"Lo","L"}; +lookup(41146) -> {"Lo","L"}; +lookup(41147) -> {"Lo","L"}; +lookup(41148) -> {"Lo","L"}; +lookup(41149) -> {"Lo","L"}; +lookup(41150) -> {"Lo","L"}; +lookup(41151) -> {"Lo","L"}; +lookup(41152) -> {"Lo","L"}; +lookup(41153) -> {"Lo","L"}; +lookup(41154) -> {"Lo","L"}; +lookup(41155) -> {"Lo","L"}; +lookup(41156) -> {"Lo","L"}; +lookup(41157) -> {"Lo","L"}; +lookup(41158) -> {"Lo","L"}; +lookup(41159) -> {"Lo","L"}; +lookup(41160) -> {"Lo","L"}; +lookup(41161) -> {"Lo","L"}; +lookup(41162) -> {"Lo","L"}; +lookup(41163) -> {"Lo","L"}; +lookup(41164) -> {"Lo","L"}; +lookup(41165) -> {"Lo","L"}; +lookup(41166) -> {"Lo","L"}; +lookup(41167) -> {"Lo","L"}; +lookup(41168) -> {"Lo","L"}; +lookup(41169) -> {"Lo","L"}; +lookup(41170) -> {"Lo","L"}; +lookup(41171) -> {"Lo","L"}; +lookup(41172) -> {"Lo","L"}; +lookup(41173) -> {"Lo","L"}; +lookup(41174) -> {"Lo","L"}; +lookup(41175) -> {"Lo","L"}; +lookup(41176) -> {"Lo","L"}; +lookup(41177) -> {"Lo","L"}; +lookup(41178) -> {"Lo","L"}; +lookup(41179) -> {"Lo","L"}; +lookup(41180) -> {"Lo","L"}; +lookup(41181) -> {"Lo","L"}; +lookup(41182) -> {"Lo","L"}; +lookup(41183) -> {"Lo","L"}; +lookup(41184) -> {"Lo","L"}; +lookup(41185) -> {"Lo","L"}; +lookup(41186) -> {"Lo","L"}; +lookup(41187) -> {"Lo","L"}; +lookup(41188) -> {"Lo","L"}; +lookup(41189) -> {"Lo","L"}; +lookup(41190) -> {"Lo","L"}; +lookup(41191) -> {"Lo","L"}; +lookup(41192) -> {"Lo","L"}; +lookup(41193) -> {"Lo","L"}; +lookup(41194) -> {"Lo","L"}; +lookup(41195) -> {"Lo","L"}; +lookup(41196) -> {"Lo","L"}; +lookup(41197) -> {"Lo","L"}; +lookup(41198) -> {"Lo","L"}; +lookup(41199) -> {"Lo","L"}; +lookup(41200) -> {"Lo","L"}; +lookup(41201) -> {"Lo","L"}; +lookup(41202) -> {"Lo","L"}; +lookup(41203) -> {"Lo","L"}; +lookup(41204) -> {"Lo","L"}; +lookup(41205) -> {"Lo","L"}; +lookup(41206) -> {"Lo","L"}; +lookup(41207) -> {"Lo","L"}; +lookup(41208) -> {"Lo","L"}; +lookup(41209) -> {"Lo","L"}; +lookup(41210) -> {"Lo","L"}; +lookup(41211) -> {"Lo","L"}; +lookup(41212) -> {"Lo","L"}; +lookup(41213) -> {"Lo","L"}; +lookup(41214) -> {"Lo","L"}; +lookup(41215) -> {"Lo","L"}; +lookup(41216) -> {"Lo","L"}; +lookup(41217) -> {"Lo","L"}; +lookup(41218) -> {"Lo","L"}; +lookup(41219) -> {"Lo","L"}; +lookup(41220) -> {"Lo","L"}; +lookup(41221) -> {"Lo","L"}; +lookup(41222) -> {"Lo","L"}; +lookup(41223) -> {"Lo","L"}; +lookup(41224) -> {"Lo","L"}; +lookup(41225) -> {"Lo","L"}; +lookup(41226) -> {"Lo","L"}; +lookup(41227) -> {"Lo","L"}; +lookup(41228) -> {"Lo","L"}; +lookup(41229) -> {"Lo","L"}; +lookup(41230) -> {"Lo","L"}; +lookup(41231) -> {"Lo","L"}; +lookup(41232) -> {"Lo","L"}; +lookup(41233) -> {"Lo","L"}; +lookup(41234) -> {"Lo","L"}; +lookup(41235) -> {"Lo","L"}; +lookup(41236) -> {"Lo","L"}; +lookup(41237) -> {"Lo","L"}; +lookup(41238) -> {"Lo","L"}; +lookup(41239) -> {"Lo","L"}; +lookup(41240) -> {"Lo","L"}; +lookup(41241) -> {"Lo","L"}; +lookup(41242) -> {"Lo","L"}; +lookup(41243) -> {"Lo","L"}; +lookup(41244) -> {"Lo","L"}; +lookup(41245) -> {"Lo","L"}; +lookup(41246) -> {"Lo","L"}; +lookup(41247) -> {"Lo","L"}; +lookup(41248) -> {"Lo","L"}; +lookup(41249) -> {"Lo","L"}; +lookup(41250) -> {"Lo","L"}; +lookup(41251) -> {"Lo","L"}; +lookup(41252) -> {"Lo","L"}; +lookup(41253) -> {"Lo","L"}; +lookup(41254) -> {"Lo","L"}; +lookup(41255) -> {"Lo","L"}; +lookup(41256) -> {"Lo","L"}; +lookup(41257) -> {"Lo","L"}; +lookup(41258) -> {"Lo","L"}; +lookup(41259) -> {"Lo","L"}; +lookup(41260) -> {"Lo","L"}; +lookup(41261) -> {"Lo","L"}; +lookup(41262) -> {"Lo","L"}; +lookup(41263) -> {"Lo","L"}; +lookup(41264) -> {"Lo","L"}; +lookup(41265) -> {"Lo","L"}; +lookup(41266) -> {"Lo","L"}; +lookup(41267) -> {"Lo","L"}; +lookup(41268) -> {"Lo","L"}; +lookup(41269) -> {"Lo","L"}; +lookup(41270) -> {"Lo","L"}; +lookup(41271) -> {"Lo","L"}; +lookup(41272) -> {"Lo","L"}; +lookup(41273) -> {"Lo","L"}; +lookup(41274) -> {"Lo","L"}; +lookup(41275) -> {"Lo","L"}; +lookup(41276) -> {"Lo","L"}; +lookup(41277) -> {"Lo","L"}; +lookup(41278) -> {"Lo","L"}; +lookup(41279) -> {"Lo","L"}; +lookup(41280) -> {"Lo","L"}; +lookup(41281) -> {"Lo","L"}; +lookup(41282) -> {"Lo","L"}; +lookup(41283) -> {"Lo","L"}; +lookup(41284) -> {"Lo","L"}; +lookup(41285) -> {"Lo","L"}; +lookup(41286) -> {"Lo","L"}; +lookup(41287) -> {"Lo","L"}; +lookup(41288) -> {"Lo","L"}; +lookup(41289) -> {"Lo","L"}; +lookup(41290) -> {"Lo","L"}; +lookup(41291) -> {"Lo","L"}; +lookup(41292) -> {"Lo","L"}; +lookup(41293) -> {"Lo","L"}; +lookup(41294) -> {"Lo","L"}; +lookup(41295) -> {"Lo","L"}; +lookup(41296) -> {"Lo","L"}; +lookup(41297) -> {"Lo","L"}; +lookup(41298) -> {"Lo","L"}; +lookup(41299) -> {"Lo","L"}; +lookup(41300) -> {"Lo","L"}; +lookup(41301) -> {"Lo","L"}; +lookup(41302) -> {"Lo","L"}; +lookup(41303) -> {"Lo","L"}; +lookup(41304) -> {"Lo","L"}; +lookup(41305) -> {"Lo","L"}; +lookup(41306) -> {"Lo","L"}; +lookup(41307) -> {"Lo","L"}; +lookup(41308) -> {"Lo","L"}; +lookup(41309) -> {"Lo","L"}; +lookup(41310) -> {"Lo","L"}; +lookup(41311) -> {"Lo","L"}; +lookup(41312) -> {"Lo","L"}; +lookup(41313) -> {"Lo","L"}; +lookup(41314) -> {"Lo","L"}; +lookup(41315) -> {"Lo","L"}; +lookup(41316) -> {"Lo","L"}; +lookup(41317) -> {"Lo","L"}; +lookup(41318) -> {"Lo","L"}; +lookup(41319) -> {"Lo","L"}; +lookup(41320) -> {"Lo","L"}; +lookup(41321) -> {"Lo","L"}; +lookup(41322) -> {"Lo","L"}; +lookup(41323) -> {"Lo","L"}; +lookup(41324) -> {"Lo","L"}; +lookup(41325) -> {"Lo","L"}; +lookup(41326) -> {"Lo","L"}; +lookup(41327) -> {"Lo","L"}; +lookup(41328) -> {"Lo","L"}; +lookup(41329) -> {"Lo","L"}; +lookup(41330) -> {"Lo","L"}; +lookup(41331) -> {"Lo","L"}; +lookup(41332) -> {"Lo","L"}; +lookup(41333) -> {"Lo","L"}; +lookup(41334) -> {"Lo","L"}; +lookup(41335) -> {"Lo","L"}; +lookup(41336) -> {"Lo","L"}; +lookup(41337) -> {"Lo","L"}; +lookup(41338) -> {"Lo","L"}; +lookup(41339) -> {"Lo","L"}; +lookup(41340) -> {"Lo","L"}; +lookup(41341) -> {"Lo","L"}; +lookup(41342) -> {"Lo","L"}; +lookup(41343) -> {"Lo","L"}; +lookup(41344) -> {"Lo","L"}; +lookup(41345) -> {"Lo","L"}; +lookup(41346) -> {"Lo","L"}; +lookup(41347) -> {"Lo","L"}; +lookup(41348) -> {"Lo","L"}; +lookup(41349) -> {"Lo","L"}; +lookup(41350) -> {"Lo","L"}; +lookup(41351) -> {"Lo","L"}; +lookup(41352) -> {"Lo","L"}; +lookup(41353) -> {"Lo","L"}; +lookup(41354) -> {"Lo","L"}; +lookup(41355) -> {"Lo","L"}; +lookup(41356) -> {"Lo","L"}; +lookup(41357) -> {"Lo","L"}; +lookup(41358) -> {"Lo","L"}; +lookup(41359) -> {"Lo","L"}; +lookup(41360) -> {"Lo","L"}; +lookup(41361) -> {"Lo","L"}; +lookup(41362) -> {"Lo","L"}; +lookup(41363) -> {"Lo","L"}; +lookup(41364) -> {"Lo","L"}; +lookup(41365) -> {"Lo","L"}; +lookup(41366) -> {"Lo","L"}; +lookup(41367) -> {"Lo","L"}; +lookup(41368) -> {"Lo","L"}; +lookup(41369) -> {"Lo","L"}; +lookup(41370) -> {"Lo","L"}; +lookup(41371) -> {"Lo","L"}; +lookup(41372) -> {"Lo","L"}; +lookup(41373) -> {"Lo","L"}; +lookup(41374) -> {"Lo","L"}; +lookup(41375) -> {"Lo","L"}; +lookup(41376) -> {"Lo","L"}; +lookup(41377) -> {"Lo","L"}; +lookup(41378) -> {"Lo","L"}; +lookup(41379) -> {"Lo","L"}; +lookup(41380) -> {"Lo","L"}; +lookup(41381) -> {"Lo","L"}; +lookup(41382) -> {"Lo","L"}; +lookup(41383) -> {"Lo","L"}; +lookup(41384) -> {"Lo","L"}; +lookup(41385) -> {"Lo","L"}; +lookup(41386) -> {"Lo","L"}; +lookup(41387) -> {"Lo","L"}; +lookup(41388) -> {"Lo","L"}; +lookup(41389) -> {"Lo","L"}; +lookup(41390) -> {"Lo","L"}; +lookup(41391) -> {"Lo","L"}; +lookup(41392) -> {"Lo","L"}; +lookup(41393) -> {"Lo","L"}; +lookup(41394) -> {"Lo","L"}; +lookup(41395) -> {"Lo","L"}; +lookup(41396) -> {"Lo","L"}; +lookup(41397) -> {"Lo","L"}; +lookup(41398) -> {"Lo","L"}; +lookup(41399) -> {"Lo","L"}; +lookup(41400) -> {"Lo","L"}; +lookup(41401) -> {"Lo","L"}; +lookup(41402) -> {"Lo","L"}; +lookup(41403) -> {"Lo","L"}; +lookup(41404) -> {"Lo","L"}; +lookup(41405) -> {"Lo","L"}; +lookup(41406) -> {"Lo","L"}; +lookup(41407) -> {"Lo","L"}; +lookup(41408) -> {"Lo","L"}; +lookup(41409) -> {"Lo","L"}; +lookup(41410) -> {"Lo","L"}; +lookup(41411) -> {"Lo","L"}; +lookup(41412) -> {"Lo","L"}; +lookup(41413) -> {"Lo","L"}; +lookup(41414) -> {"Lo","L"}; +lookup(41415) -> {"Lo","L"}; +lookup(41416) -> {"Lo","L"}; +lookup(41417) -> {"Lo","L"}; +lookup(41418) -> {"Lo","L"}; +lookup(41419) -> {"Lo","L"}; +lookup(41420) -> {"Lo","L"}; +lookup(41421) -> {"Lo","L"}; +lookup(41422) -> {"Lo","L"}; +lookup(41423) -> {"Lo","L"}; +lookup(41424) -> {"Lo","L"}; +lookup(41425) -> {"Lo","L"}; +lookup(41426) -> {"Lo","L"}; +lookup(41427) -> {"Lo","L"}; +lookup(41428) -> {"Lo","L"}; +lookup(41429) -> {"Lo","L"}; +lookup(41430) -> {"Lo","L"}; +lookup(41431) -> {"Lo","L"}; +lookup(41432) -> {"Lo","L"}; +lookup(41433) -> {"Lo","L"}; +lookup(41434) -> {"Lo","L"}; +lookup(41435) -> {"Lo","L"}; +lookup(41436) -> {"Lo","L"}; +lookup(41437) -> {"Lo","L"}; +lookup(41438) -> {"Lo","L"}; +lookup(41439) -> {"Lo","L"}; +lookup(41440) -> {"Lo","L"}; +lookup(41441) -> {"Lo","L"}; +lookup(41442) -> {"Lo","L"}; +lookup(41443) -> {"Lo","L"}; +lookup(41444) -> {"Lo","L"}; +lookup(41445) -> {"Lo","L"}; +lookup(41446) -> {"Lo","L"}; +lookup(41447) -> {"Lo","L"}; +lookup(41448) -> {"Lo","L"}; +lookup(41449) -> {"Lo","L"}; +lookup(41450) -> {"Lo","L"}; +lookup(41451) -> {"Lo","L"}; +lookup(41452) -> {"Lo","L"}; +lookup(41453) -> {"Lo","L"}; +lookup(41454) -> {"Lo","L"}; +lookup(41455) -> {"Lo","L"}; +lookup(41456) -> {"Lo","L"}; +lookup(41457) -> {"Lo","L"}; +lookup(41458) -> {"Lo","L"}; +lookup(41459) -> {"Lo","L"}; +lookup(41460) -> {"Lo","L"}; +lookup(41461) -> {"Lo","L"}; +lookup(41462) -> {"Lo","L"}; +lookup(41463) -> {"Lo","L"}; +lookup(41464) -> {"Lo","L"}; +lookup(41465) -> {"Lo","L"}; +lookup(41466) -> {"Lo","L"}; +lookup(41467) -> {"Lo","L"}; +lookup(41468) -> {"Lo","L"}; +lookup(41469) -> {"Lo","L"}; +lookup(41470) -> {"Lo","L"}; +lookup(41471) -> {"Lo","L"}; +lookup(41472) -> {"Lo","L"}; +lookup(41473) -> {"Lo","L"}; +lookup(41474) -> {"Lo","L"}; +lookup(41475) -> {"Lo","L"}; +lookup(41476) -> {"Lo","L"}; +lookup(41477) -> {"Lo","L"}; +lookup(41478) -> {"Lo","L"}; +lookup(41479) -> {"Lo","L"}; +lookup(41480) -> {"Lo","L"}; +lookup(41481) -> {"Lo","L"}; +lookup(41482) -> {"Lo","L"}; +lookup(41483) -> {"Lo","L"}; +lookup(41484) -> {"Lo","L"}; +lookup(41485) -> {"Lo","L"}; +lookup(41486) -> {"Lo","L"}; +lookup(41487) -> {"Lo","L"}; +lookup(41488) -> {"Lo","L"}; +lookup(41489) -> {"Lo","L"}; +lookup(41490) -> {"Lo","L"}; +lookup(41491) -> {"Lo","L"}; +lookup(41492) -> {"Lo","L"}; +lookup(41493) -> {"Lo","L"}; +lookup(41494) -> {"Lo","L"}; +lookup(41495) -> {"Lo","L"}; +lookup(41496) -> {"Lo","L"}; +lookup(41497) -> {"Lo","L"}; +lookup(41498) -> {"Lo","L"}; +lookup(41499) -> {"Lo","L"}; +lookup(41500) -> {"Lo","L"}; +lookup(41501) -> {"Lo","L"}; +lookup(41502) -> {"Lo","L"}; +lookup(41503) -> {"Lo","L"}; +lookup(41504) -> {"Lo","L"}; +lookup(41505) -> {"Lo","L"}; +lookup(41506) -> {"Lo","L"}; +lookup(41507) -> {"Lo","L"}; +lookup(41508) -> {"Lo","L"}; +lookup(41509) -> {"Lo","L"}; +lookup(41510) -> {"Lo","L"}; +lookup(41511) -> {"Lo","L"}; +lookup(41512) -> {"Lo","L"}; +lookup(41513) -> {"Lo","L"}; +lookup(41514) -> {"Lo","L"}; +lookup(41515) -> {"Lo","L"}; +lookup(41516) -> {"Lo","L"}; +lookup(41517) -> {"Lo","L"}; +lookup(41518) -> {"Lo","L"}; +lookup(41519) -> {"Lo","L"}; +lookup(41520) -> {"Lo","L"}; +lookup(41521) -> {"Lo","L"}; +lookup(41522) -> {"Lo","L"}; +lookup(41523) -> {"Lo","L"}; +lookup(41524) -> {"Lo","L"}; +lookup(41525) -> {"Lo","L"}; +lookup(41526) -> {"Lo","L"}; +lookup(41527) -> {"Lo","L"}; +lookup(41528) -> {"Lo","L"}; +lookup(41529) -> {"Lo","L"}; +lookup(41530) -> {"Lo","L"}; +lookup(41531) -> {"Lo","L"}; +lookup(41532) -> {"Lo","L"}; +lookup(41533) -> {"Lo","L"}; +lookup(41534) -> {"Lo","L"}; +lookup(41535) -> {"Lo","L"}; +lookup(41536) -> {"Lo","L"}; +lookup(41537) -> {"Lo","L"}; +lookup(41538) -> {"Lo","L"}; +lookup(41539) -> {"Lo","L"}; +lookup(41540) -> {"Lo","L"}; +lookup(41541) -> {"Lo","L"}; +lookup(41542) -> {"Lo","L"}; +lookup(41543) -> {"Lo","L"}; +lookup(41544) -> {"Lo","L"}; +lookup(41545) -> {"Lo","L"}; +lookup(41546) -> {"Lo","L"}; +lookup(41547) -> {"Lo","L"}; +lookup(41548) -> {"Lo","L"}; +lookup(41549) -> {"Lo","L"}; +lookup(41550) -> {"Lo","L"}; +lookup(41551) -> {"Lo","L"}; +lookup(41552) -> {"Lo","L"}; +lookup(41553) -> {"Lo","L"}; +lookup(41554) -> {"Lo","L"}; +lookup(41555) -> {"Lo","L"}; +lookup(41556) -> {"Lo","L"}; +lookup(41557) -> {"Lo","L"}; +lookup(41558) -> {"Lo","L"}; +lookup(41559) -> {"Lo","L"}; +lookup(41560) -> {"Lo","L"}; +lookup(41561) -> {"Lo","L"}; +lookup(41562) -> {"Lo","L"}; +lookup(41563) -> {"Lo","L"}; +lookup(41564) -> {"Lo","L"}; +lookup(41565) -> {"Lo","L"}; +lookup(41566) -> {"Lo","L"}; +lookup(41567) -> {"Lo","L"}; +lookup(41568) -> {"Lo","L"}; +lookup(41569) -> {"Lo","L"}; +lookup(41570) -> {"Lo","L"}; +lookup(41571) -> {"Lo","L"}; +lookup(41572) -> {"Lo","L"}; +lookup(41573) -> {"Lo","L"}; +lookup(41574) -> {"Lo","L"}; +lookup(41575) -> {"Lo","L"}; +lookup(41576) -> {"Lo","L"}; +lookup(41577) -> {"Lo","L"}; +lookup(41578) -> {"Lo","L"}; +lookup(41579) -> {"Lo","L"}; +lookup(41580) -> {"Lo","L"}; +lookup(41581) -> {"Lo","L"}; +lookup(41582) -> {"Lo","L"}; +lookup(41583) -> {"Lo","L"}; +lookup(41584) -> {"Lo","L"}; +lookup(41585) -> {"Lo","L"}; +lookup(41586) -> {"Lo","L"}; +lookup(41587) -> {"Lo","L"}; +lookup(41588) -> {"Lo","L"}; +lookup(41589) -> {"Lo","L"}; +lookup(41590) -> {"Lo","L"}; +lookup(41591) -> {"Lo","L"}; +lookup(41592) -> {"Lo","L"}; +lookup(41593) -> {"Lo","L"}; +lookup(41594) -> {"Lo","L"}; +lookup(41595) -> {"Lo","L"}; +lookup(41596) -> {"Lo","L"}; +lookup(41597) -> {"Lo","L"}; +lookup(41598) -> {"Lo","L"}; +lookup(41599) -> {"Lo","L"}; +lookup(41600) -> {"Lo","L"}; +lookup(41601) -> {"Lo","L"}; +lookup(41602) -> {"Lo","L"}; +lookup(41603) -> {"Lo","L"}; +lookup(41604) -> {"Lo","L"}; +lookup(41605) -> {"Lo","L"}; +lookup(41606) -> {"Lo","L"}; +lookup(41607) -> {"Lo","L"}; +lookup(41608) -> {"Lo","L"}; +lookup(41609) -> {"Lo","L"}; +lookup(41610) -> {"Lo","L"}; +lookup(41611) -> {"Lo","L"}; +lookup(41612) -> {"Lo","L"}; +lookup(41613) -> {"Lo","L"}; +lookup(41614) -> {"Lo","L"}; +lookup(41615) -> {"Lo","L"}; +lookup(41616) -> {"Lo","L"}; +lookup(41617) -> {"Lo","L"}; +lookup(41618) -> {"Lo","L"}; +lookup(41619) -> {"Lo","L"}; +lookup(41620) -> {"Lo","L"}; +lookup(41621) -> {"Lo","L"}; +lookup(41622) -> {"Lo","L"}; +lookup(41623) -> {"Lo","L"}; +lookup(41624) -> {"Lo","L"}; +lookup(41625) -> {"Lo","L"}; +lookup(41626) -> {"Lo","L"}; +lookup(41627) -> {"Lo","L"}; +lookup(41628) -> {"Lo","L"}; +lookup(41629) -> {"Lo","L"}; +lookup(41630) -> {"Lo","L"}; +lookup(41631) -> {"Lo","L"}; +lookup(41632) -> {"Lo","L"}; +lookup(41633) -> {"Lo","L"}; +lookup(41634) -> {"Lo","L"}; +lookup(41635) -> {"Lo","L"}; +lookup(41636) -> {"Lo","L"}; +lookup(41637) -> {"Lo","L"}; +lookup(41638) -> {"Lo","L"}; +lookup(41639) -> {"Lo","L"}; +lookup(41640) -> {"Lo","L"}; +lookup(41641) -> {"Lo","L"}; +lookup(41642) -> {"Lo","L"}; +lookup(41643) -> {"Lo","L"}; +lookup(41644) -> {"Lo","L"}; +lookup(41645) -> {"Lo","L"}; +lookup(41646) -> {"Lo","L"}; +lookup(41647) -> {"Lo","L"}; +lookup(41648) -> {"Lo","L"}; +lookup(41649) -> {"Lo","L"}; +lookup(41650) -> {"Lo","L"}; +lookup(41651) -> {"Lo","L"}; +lookup(41652) -> {"Lo","L"}; +lookup(41653) -> {"Lo","L"}; +lookup(41654) -> {"Lo","L"}; +lookup(41655) -> {"Lo","L"}; +lookup(41656) -> {"Lo","L"}; +lookup(41657) -> {"Lo","L"}; +lookup(41658) -> {"Lo","L"}; +lookup(41659) -> {"Lo","L"}; +lookup(41660) -> {"Lo","L"}; +lookup(41661) -> {"Lo","L"}; +lookup(41662) -> {"Lo","L"}; +lookup(41663) -> {"Lo","L"}; +lookup(41664) -> {"Lo","L"}; +lookup(41665) -> {"Lo","L"}; +lookup(41666) -> {"Lo","L"}; +lookup(41667) -> {"Lo","L"}; +lookup(41668) -> {"Lo","L"}; +lookup(41669) -> {"Lo","L"}; +lookup(41670) -> {"Lo","L"}; +lookup(41671) -> {"Lo","L"}; +lookup(41672) -> {"Lo","L"}; +lookup(41673) -> {"Lo","L"}; +lookup(41674) -> {"Lo","L"}; +lookup(41675) -> {"Lo","L"}; +lookup(41676) -> {"Lo","L"}; +lookup(41677) -> {"Lo","L"}; +lookup(41678) -> {"Lo","L"}; +lookup(41679) -> {"Lo","L"}; +lookup(41680) -> {"Lo","L"}; +lookup(41681) -> {"Lo","L"}; +lookup(41682) -> {"Lo","L"}; +lookup(41683) -> {"Lo","L"}; +lookup(41684) -> {"Lo","L"}; +lookup(41685) -> {"Lo","L"}; +lookup(41686) -> {"Lo","L"}; +lookup(41687) -> {"Lo","L"}; +lookup(41688) -> {"Lo","L"}; +lookup(41689) -> {"Lo","L"}; +lookup(41690) -> {"Lo","L"}; +lookup(41691) -> {"Lo","L"}; +lookup(41692) -> {"Lo","L"}; +lookup(41693) -> {"Lo","L"}; +lookup(41694) -> {"Lo","L"}; +lookup(41695) -> {"Lo","L"}; +lookup(41696) -> {"Lo","L"}; +lookup(41697) -> {"Lo","L"}; +lookup(41698) -> {"Lo","L"}; +lookup(41699) -> {"Lo","L"}; +lookup(41700) -> {"Lo","L"}; +lookup(41701) -> {"Lo","L"}; +lookup(41702) -> {"Lo","L"}; +lookup(41703) -> {"Lo","L"}; +lookup(41704) -> {"Lo","L"}; +lookup(41705) -> {"Lo","L"}; +lookup(41706) -> {"Lo","L"}; +lookup(41707) -> {"Lo","L"}; +lookup(41708) -> {"Lo","L"}; +lookup(41709) -> {"Lo","L"}; +lookup(41710) -> {"Lo","L"}; +lookup(41711) -> {"Lo","L"}; +lookup(41712) -> {"Lo","L"}; +lookup(41713) -> {"Lo","L"}; +lookup(41714) -> {"Lo","L"}; +lookup(41715) -> {"Lo","L"}; +lookup(41716) -> {"Lo","L"}; +lookup(41717) -> {"Lo","L"}; +lookup(41718) -> {"Lo","L"}; +lookup(41719) -> {"Lo","L"}; +lookup(41720) -> {"Lo","L"}; +lookup(41721) -> {"Lo","L"}; +lookup(41722) -> {"Lo","L"}; +lookup(41723) -> {"Lo","L"}; +lookup(41724) -> {"Lo","L"}; +lookup(41725) -> {"Lo","L"}; +lookup(41726) -> {"Lo","L"}; +lookup(41727) -> {"Lo","L"}; +lookup(41728) -> {"Lo","L"}; +lookup(41729) -> {"Lo","L"}; +lookup(41730) -> {"Lo","L"}; +lookup(41731) -> {"Lo","L"}; +lookup(41732) -> {"Lo","L"}; +lookup(41733) -> {"Lo","L"}; +lookup(41734) -> {"Lo","L"}; +lookup(41735) -> {"Lo","L"}; +lookup(41736) -> {"Lo","L"}; +lookup(41737) -> {"Lo","L"}; +lookup(41738) -> {"Lo","L"}; +lookup(41739) -> {"Lo","L"}; +lookup(41740) -> {"Lo","L"}; +lookup(41741) -> {"Lo","L"}; +lookup(41742) -> {"Lo","L"}; +lookup(41743) -> {"Lo","L"}; +lookup(41744) -> {"Lo","L"}; +lookup(41745) -> {"Lo","L"}; +lookup(41746) -> {"Lo","L"}; +lookup(41747) -> {"Lo","L"}; +lookup(41748) -> {"Lo","L"}; +lookup(41749) -> {"Lo","L"}; +lookup(41750) -> {"Lo","L"}; +lookup(41751) -> {"Lo","L"}; +lookup(41752) -> {"Lo","L"}; +lookup(41753) -> {"Lo","L"}; +lookup(41754) -> {"Lo","L"}; +lookup(41755) -> {"Lo","L"}; +lookup(41756) -> {"Lo","L"}; +lookup(41757) -> {"Lo","L"}; +lookup(41758) -> {"Lo","L"}; +lookup(41759) -> {"Lo","L"}; +lookup(41760) -> {"Lo","L"}; +lookup(41761) -> {"Lo","L"}; +lookup(41762) -> {"Lo","L"}; +lookup(41763) -> {"Lo","L"}; +lookup(41764) -> {"Lo","L"}; +lookup(41765) -> {"Lo","L"}; +lookup(41766) -> {"Lo","L"}; +lookup(41767) -> {"Lo","L"}; +lookup(41768) -> {"Lo","L"}; +lookup(41769) -> {"Lo","L"}; +lookup(41770) -> {"Lo","L"}; +lookup(41771) -> {"Lo","L"}; +lookup(41772) -> {"Lo","L"}; +lookup(41773) -> {"Lo","L"}; +lookup(41774) -> {"Lo","L"}; +lookup(41775) -> {"Lo","L"}; +lookup(41776) -> {"Lo","L"}; +lookup(41777) -> {"Lo","L"}; +lookup(41778) -> {"Lo","L"}; +lookup(41779) -> {"Lo","L"}; +lookup(41780) -> {"Lo","L"}; +lookup(41781) -> {"Lo","L"}; +lookup(41782) -> {"Lo","L"}; +lookup(41783) -> {"Lo","L"}; +lookup(41784) -> {"Lo","L"}; +lookup(41785) -> {"Lo","L"}; +lookup(41786) -> {"Lo","L"}; +lookup(41787) -> {"Lo","L"}; +lookup(41788) -> {"Lo","L"}; +lookup(41789) -> {"Lo","L"}; +lookup(41790) -> {"Lo","L"}; +lookup(41791) -> {"Lo","L"}; +lookup(41792) -> {"Lo","L"}; +lookup(41793) -> {"Lo","L"}; +lookup(41794) -> {"Lo","L"}; +lookup(41795) -> {"Lo","L"}; +lookup(41796) -> {"Lo","L"}; +lookup(41797) -> {"Lo","L"}; +lookup(41798) -> {"Lo","L"}; +lookup(41799) -> {"Lo","L"}; +lookup(41800) -> {"Lo","L"}; +lookup(41801) -> {"Lo","L"}; +lookup(41802) -> {"Lo","L"}; +lookup(41803) -> {"Lo","L"}; +lookup(41804) -> {"Lo","L"}; +lookup(41805) -> {"Lo","L"}; +lookup(41806) -> {"Lo","L"}; +lookup(41807) -> {"Lo","L"}; +lookup(41808) -> {"Lo","L"}; +lookup(41809) -> {"Lo","L"}; +lookup(41810) -> {"Lo","L"}; +lookup(41811) -> {"Lo","L"}; +lookup(41812) -> {"Lo","L"}; +lookup(41813) -> {"Lo","L"}; +lookup(41814) -> {"Lo","L"}; +lookup(41815) -> {"Lo","L"}; +lookup(41816) -> {"Lo","L"}; +lookup(41817) -> {"Lo","L"}; +lookup(41818) -> {"Lo","L"}; +lookup(41819) -> {"Lo","L"}; +lookup(41820) -> {"Lo","L"}; +lookup(41821) -> {"Lo","L"}; +lookup(41822) -> {"Lo","L"}; +lookup(41823) -> {"Lo","L"}; +lookup(41824) -> {"Lo","L"}; +lookup(41825) -> {"Lo","L"}; +lookup(41826) -> {"Lo","L"}; +lookup(41827) -> {"Lo","L"}; +lookup(41828) -> {"Lo","L"}; +lookup(41829) -> {"Lo","L"}; +lookup(41830) -> {"Lo","L"}; +lookup(41831) -> {"Lo","L"}; +lookup(41832) -> {"Lo","L"}; +lookup(41833) -> {"Lo","L"}; +lookup(41834) -> {"Lo","L"}; +lookup(41835) -> {"Lo","L"}; +lookup(41836) -> {"Lo","L"}; +lookup(41837) -> {"Lo","L"}; +lookup(41838) -> {"Lo","L"}; +lookup(41839) -> {"Lo","L"}; +lookup(41840) -> {"Lo","L"}; +lookup(41841) -> {"Lo","L"}; +lookup(41842) -> {"Lo","L"}; +lookup(41843) -> {"Lo","L"}; +lookup(41844) -> {"Lo","L"}; +lookup(41845) -> {"Lo","L"}; +lookup(41846) -> {"Lo","L"}; +lookup(41847) -> {"Lo","L"}; +lookup(41848) -> {"Lo","L"}; +lookup(41849) -> {"Lo","L"}; +lookup(41850) -> {"Lo","L"}; +lookup(41851) -> {"Lo","L"}; +lookup(41852) -> {"Lo","L"}; +lookup(41853) -> {"Lo","L"}; +lookup(41854) -> {"Lo","L"}; +lookup(41855) -> {"Lo","L"}; +lookup(41856) -> {"Lo","L"}; +lookup(41857) -> {"Lo","L"}; +lookup(41858) -> {"Lo","L"}; +lookup(41859) -> {"Lo","L"}; +lookup(41860) -> {"Lo","L"}; +lookup(41861) -> {"Lo","L"}; +lookup(41862) -> {"Lo","L"}; +lookup(41863) -> {"Lo","L"}; +lookup(41864) -> {"Lo","L"}; +lookup(41865) -> {"Lo","L"}; +lookup(41866) -> {"Lo","L"}; +lookup(41867) -> {"Lo","L"}; +lookup(41868) -> {"Lo","L"}; +lookup(41869) -> {"Lo","L"}; +lookup(41870) -> {"Lo","L"}; +lookup(41871) -> {"Lo","L"}; +lookup(41872) -> {"Lo","L"}; +lookup(41873) -> {"Lo","L"}; +lookup(41874) -> {"Lo","L"}; +lookup(41875) -> {"Lo","L"}; +lookup(41876) -> {"Lo","L"}; +lookup(41877) -> {"Lo","L"}; +lookup(41878) -> {"Lo","L"}; +lookup(41879) -> {"Lo","L"}; +lookup(41880) -> {"Lo","L"}; +lookup(41881) -> {"Lo","L"}; +lookup(41882) -> {"Lo","L"}; +lookup(41883) -> {"Lo","L"}; +lookup(41884) -> {"Lo","L"}; +lookup(41885) -> {"Lo","L"}; +lookup(41886) -> {"Lo","L"}; +lookup(41887) -> {"Lo","L"}; +lookup(41888) -> {"Lo","L"}; +lookup(41889) -> {"Lo","L"}; +lookup(41890) -> {"Lo","L"}; +lookup(41891) -> {"Lo","L"}; +lookup(41892) -> {"Lo","L"}; +lookup(41893) -> {"Lo","L"}; +lookup(41894) -> {"Lo","L"}; +lookup(41895) -> {"Lo","L"}; +lookup(41896) -> {"Lo","L"}; +lookup(41897) -> {"Lo","L"}; +lookup(41898) -> {"Lo","L"}; +lookup(41899) -> {"Lo","L"}; +lookup(41900) -> {"Lo","L"}; +lookup(41901) -> {"Lo","L"}; +lookup(41902) -> {"Lo","L"}; +lookup(41903) -> {"Lo","L"}; +lookup(41904) -> {"Lo","L"}; +lookup(41905) -> {"Lo","L"}; +lookup(41906) -> {"Lo","L"}; +lookup(41907) -> {"Lo","L"}; +lookup(41908) -> {"Lo","L"}; +lookup(41909) -> {"Lo","L"}; +lookup(41910) -> {"Lo","L"}; +lookup(41911) -> {"Lo","L"}; +lookup(41912) -> {"Lo","L"}; +lookup(41913) -> {"Lo","L"}; +lookup(41914) -> {"Lo","L"}; +lookup(41915) -> {"Lo","L"}; +lookup(41916) -> {"Lo","L"}; +lookup(41917) -> {"Lo","L"}; +lookup(41918) -> {"Lo","L"}; +lookup(41919) -> {"Lo","L"}; +lookup(41920) -> {"Lo","L"}; +lookup(41921) -> {"Lo","L"}; +lookup(41922) -> {"Lo","L"}; +lookup(41923) -> {"Lo","L"}; +lookup(41924) -> {"Lo","L"}; +lookup(41925) -> {"Lo","L"}; +lookup(41926) -> {"Lo","L"}; +lookup(41927) -> {"Lo","L"}; +lookup(41928) -> {"Lo","L"}; +lookup(41929) -> {"Lo","L"}; +lookup(41930) -> {"Lo","L"}; +lookup(41931) -> {"Lo","L"}; +lookup(41932) -> {"Lo","L"}; +lookup(41933) -> {"Lo","L"}; +lookup(41934) -> {"Lo","L"}; +lookup(41935) -> {"Lo","L"}; +lookup(41936) -> {"Lo","L"}; +lookup(41937) -> {"Lo","L"}; +lookup(41938) -> {"Lo","L"}; +lookup(41939) -> {"Lo","L"}; +lookup(41940) -> {"Lo","L"}; +lookup(41941) -> {"Lo","L"}; +lookup(41942) -> {"Lo","L"}; +lookup(41943) -> {"Lo","L"}; +lookup(41944) -> {"Lo","L"}; +lookup(41945) -> {"Lo","L"}; +lookup(41946) -> {"Lo","L"}; +lookup(41947) -> {"Lo","L"}; +lookup(41948) -> {"Lo","L"}; +lookup(41949) -> {"Lo","L"}; +lookup(41950) -> {"Lo","L"}; +lookup(41951) -> {"Lo","L"}; +lookup(41952) -> {"Lo","L"}; +lookup(41953) -> {"Lo","L"}; +lookup(41954) -> {"Lo","L"}; +lookup(41955) -> {"Lo","L"}; +lookup(41956) -> {"Lo","L"}; +lookup(41957) -> {"Lo","L"}; +lookup(41958) -> {"Lo","L"}; +lookup(41959) -> {"Lo","L"}; +lookup(41960) -> {"Lo","L"}; +lookup(41961) -> {"Lo","L"}; +lookup(41962) -> {"Lo","L"}; +lookup(41963) -> {"Lo","L"}; +lookup(41964) -> {"Lo","L"}; +lookup(41965) -> {"Lo","L"}; +lookup(41966) -> {"Lo","L"}; +lookup(41967) -> {"Lo","L"}; +lookup(41968) -> {"Lo","L"}; +lookup(41969) -> {"Lo","L"}; +lookup(41970) -> {"Lo","L"}; +lookup(41971) -> {"Lo","L"}; +lookup(41972) -> {"Lo","L"}; +lookup(41973) -> {"Lo","L"}; +lookup(41974) -> {"Lo","L"}; +lookup(41975) -> {"Lo","L"}; +lookup(41976) -> {"Lo","L"}; +lookup(41977) -> {"Lo","L"}; +lookup(41978) -> {"Lo","L"}; +lookup(41979) -> {"Lo","L"}; +lookup(41980) -> {"Lo","L"}; +lookup(41981) -> {"Lo","L"}; +lookup(41982) -> {"Lo","L"}; +lookup(41983) -> {"Lo","L"}; +lookup(41984) -> {"Lo","L"}; +lookup(41985) -> {"Lo","L"}; +lookup(41986) -> {"Lo","L"}; +lookup(41987) -> {"Lo","L"}; +lookup(41988) -> {"Lo","L"}; +lookup(41989) -> {"Lo","L"}; +lookup(41990) -> {"Lo","L"}; +lookup(41991) -> {"Lo","L"}; +lookup(41992) -> {"Lo","L"}; +lookup(41993) -> {"Lo","L"}; +lookup(41994) -> {"Lo","L"}; +lookup(41995) -> {"Lo","L"}; +lookup(41996) -> {"Lo","L"}; +lookup(41997) -> {"Lo","L"}; +lookup(41998) -> {"Lo","L"}; +lookup(41999) -> {"Lo","L"}; +lookup(42000) -> {"Lo","L"}; +lookup(42001) -> {"Lo","L"}; +lookup(42002) -> {"Lo","L"}; +lookup(42003) -> {"Lo","L"}; +lookup(42004) -> {"Lo","L"}; +lookup(42005) -> {"Lo","L"}; +lookup(42006) -> {"Lo","L"}; +lookup(42007) -> {"Lo","L"}; +lookup(42008) -> {"Lo","L"}; +lookup(42009) -> {"Lo","L"}; +lookup(42010) -> {"Lo","L"}; +lookup(42011) -> {"Lo","L"}; +lookup(42012) -> {"Lo","L"}; +lookup(42013) -> {"Lo","L"}; +lookup(42014) -> {"Lo","L"}; +lookup(42015) -> {"Lo","L"}; +lookup(42016) -> {"Lo","L"}; +lookup(42017) -> {"Lo","L"}; +lookup(42018) -> {"Lo","L"}; +lookup(42019) -> {"Lo","L"}; +lookup(42020) -> {"Lo","L"}; +lookup(42021) -> {"Lo","L"}; +lookup(42022) -> {"Lo","L"}; +lookup(42023) -> {"Lo","L"}; +lookup(42024) -> {"Lo","L"}; +lookup(42025) -> {"Lo","L"}; +lookup(42026) -> {"Lo","L"}; +lookup(42027) -> {"Lo","L"}; +lookup(42028) -> {"Lo","L"}; +lookup(42029) -> {"Lo","L"}; +lookup(42030) -> {"Lo","L"}; +lookup(42031) -> {"Lo","L"}; +lookup(42032) -> {"Lo","L"}; +lookup(42033) -> {"Lo","L"}; +lookup(42034) -> {"Lo","L"}; +lookup(42035) -> {"Lo","L"}; +lookup(42036) -> {"Lo","L"}; +lookup(42037) -> {"Lo","L"}; +lookup(42038) -> {"Lo","L"}; +lookup(42039) -> {"Lo","L"}; +lookup(42040) -> {"Lo","L"}; +lookup(42041) -> {"Lo","L"}; +lookup(42042) -> {"Lo","L"}; +lookup(42043) -> {"Lo","L"}; +lookup(42044) -> {"Lo","L"}; +lookup(42045) -> {"Lo","L"}; +lookup(42046) -> {"Lo","L"}; +lookup(42047) -> {"Lo","L"}; +lookup(42048) -> {"Lo","L"}; +lookup(42049) -> {"Lo","L"}; +lookup(42050) -> {"Lo","L"}; +lookup(42051) -> {"Lo","L"}; +lookup(42052) -> {"Lo","L"}; +lookup(42053) -> {"Lo","L"}; +lookup(42054) -> {"Lo","L"}; +lookup(42055) -> {"Lo","L"}; +lookup(42056) -> {"Lo","L"}; +lookup(42057) -> {"Lo","L"}; +lookup(42058) -> {"Lo","L"}; +lookup(42059) -> {"Lo","L"}; +lookup(42060) -> {"Lo","L"}; +lookup(42061) -> {"Lo","L"}; +lookup(42062) -> {"Lo","L"}; +lookup(42063) -> {"Lo","L"}; +lookup(42064) -> {"Lo","L"}; +lookup(42065) -> {"Lo","L"}; +lookup(42066) -> {"Lo","L"}; +lookup(42067) -> {"Lo","L"}; +lookup(42068) -> {"Lo","L"}; +lookup(42069) -> {"Lo","L"}; +lookup(42070) -> {"Lo","L"}; +lookup(42071) -> {"Lo","L"}; +lookup(42072) -> {"Lo","L"}; +lookup(42073) -> {"Lo","L"}; +lookup(42074) -> {"Lo","L"}; +lookup(42075) -> {"Lo","L"}; +lookup(42076) -> {"Lo","L"}; +lookup(42077) -> {"Lo","L"}; +lookup(42078) -> {"Lo","L"}; +lookup(42079) -> {"Lo","L"}; +lookup(42080) -> {"Lo","L"}; +lookup(42081) -> {"Lo","L"}; +lookup(42082) -> {"Lo","L"}; +lookup(42083) -> {"Lo","L"}; +lookup(42084) -> {"Lo","L"}; +lookup(42085) -> {"Lo","L"}; +lookup(42086) -> {"Lo","L"}; +lookup(42087) -> {"Lo","L"}; +lookup(42088) -> {"Lo","L"}; +lookup(42089) -> {"Lo","L"}; +lookup(42090) -> {"Lo","L"}; +lookup(42091) -> {"Lo","L"}; +lookup(42092) -> {"Lo","L"}; +lookup(42093) -> {"Lo","L"}; +lookup(42094) -> {"Lo","L"}; +lookup(42095) -> {"Lo","L"}; +lookup(42096) -> {"Lo","L"}; +lookup(42097) -> {"Lo","L"}; +lookup(42098) -> {"Lo","L"}; +lookup(42099) -> {"Lo","L"}; +lookup(42100) -> {"Lo","L"}; +lookup(42101) -> {"Lo","L"}; +lookup(42102) -> {"Lo","L"}; +lookup(42103) -> {"Lo","L"}; +lookup(42104) -> {"Lo","L"}; +lookup(42105) -> {"Lo","L"}; +lookup(42106) -> {"Lo","L"}; +lookup(42107) -> {"Lo","L"}; +lookup(42108) -> {"Lo","L"}; +lookup(42109) -> {"Lo","L"}; +lookup(42110) -> {"Lo","L"}; +lookup(42111) -> {"Lo","L"}; +lookup(42112) -> {"Lo","L"}; +lookup(42113) -> {"Lo","L"}; +lookup(42114) -> {"Lo","L"}; +lookup(42115) -> {"Lo","L"}; +lookup(42116) -> {"Lo","L"}; +lookup(42117) -> {"Lo","L"}; +lookup(42118) -> {"Lo","L"}; +lookup(42119) -> {"Lo","L"}; +lookup(42120) -> {"Lo","L"}; +lookup(42121) -> {"Lo","L"}; +lookup(42122) -> {"Lo","L"}; +lookup(42123) -> {"Lo","L"}; +lookup(42124) -> {"Lo","L"}; +lookup(42128) -> {"So","ON"}; +lookup(42129) -> {"So","ON"}; +lookup(42130) -> {"So","ON"}; +lookup(42131) -> {"So","ON"}; +lookup(42132) -> {"So","ON"}; +lookup(42133) -> {"So","ON"}; +lookup(42134) -> {"So","ON"}; +lookup(42135) -> {"So","ON"}; +lookup(42136) -> {"So","ON"}; +lookup(42137) -> {"So","ON"}; +lookup(42138) -> {"So","ON"}; +lookup(42139) -> {"So","ON"}; +lookup(42140) -> {"So","ON"}; +lookup(42141) -> {"So","ON"}; +lookup(42142) -> {"So","ON"}; +lookup(42143) -> {"So","ON"}; +lookup(42144) -> {"So","ON"}; +lookup(42145) -> {"So","ON"}; +lookup(42146) -> {"So","ON"}; +lookup(42147) -> {"So","ON"}; +lookup(42148) -> {"So","ON"}; +lookup(42149) -> {"So","ON"}; +lookup(42150) -> {"So","ON"}; +lookup(42151) -> {"So","ON"}; +lookup(42152) -> {"So","ON"}; +lookup(42153) -> {"So","ON"}; +lookup(42154) -> {"So","ON"}; +lookup(42155) -> {"So","ON"}; +lookup(42156) -> {"So","ON"}; +lookup(42157) -> {"So","ON"}; +lookup(42158) -> {"So","ON"}; +lookup(42159) -> {"So","ON"}; +lookup(42160) -> {"So","ON"}; +lookup(42161) -> {"So","ON"}; +lookup(42162) -> {"So","ON"}; +lookup(42163) -> {"So","ON"}; +lookup(42164) -> {"So","ON"}; +lookup(42165) -> {"So","ON"}; +lookup(42166) -> {"So","ON"}; +lookup(42167) -> {"So","ON"}; +lookup(42168) -> {"So","ON"}; +lookup(42169) -> {"So","ON"}; +lookup(42170) -> {"So","ON"}; +lookup(42171) -> {"So","ON"}; +lookup(42172) -> {"So","ON"}; +lookup(42173) -> {"So","ON"}; +lookup(42174) -> {"So","ON"}; +lookup(42175) -> {"So","ON"}; +lookup(42176) -> {"So","ON"}; +lookup(42177) -> {"So","ON"}; +lookup(42178) -> {"So","ON"}; +lookup(42179) -> {"So","ON"}; +lookup(42180) -> {"So","ON"}; +lookup(42181) -> {"So","ON"}; +lookup(42182) -> {"So","ON"}; +lookup(42192) -> {"Lo","L"}; +lookup(42193) -> {"Lo","L"}; +lookup(42194) -> {"Lo","L"}; +lookup(42195) -> {"Lo","L"}; +lookup(42196) -> {"Lo","L"}; +lookup(42197) -> {"Lo","L"}; +lookup(42198) -> {"Lo","L"}; +lookup(42199) -> {"Lo","L"}; +lookup(42200) -> {"Lo","L"}; +lookup(42201) -> {"Lo","L"}; +lookup(42202) -> {"Lo","L"}; +lookup(42203) -> {"Lo","L"}; +lookup(42204) -> {"Lo","L"}; +lookup(42205) -> {"Lo","L"}; +lookup(42206) -> {"Lo","L"}; +lookup(42207) -> {"Lo","L"}; +lookup(42208) -> {"Lo","L"}; +lookup(42209) -> {"Lo","L"}; +lookup(42210) -> {"Lo","L"}; +lookup(42211) -> {"Lo","L"}; +lookup(42212) -> {"Lo","L"}; +lookup(42213) -> {"Lo","L"}; +lookup(42214) -> {"Lo","L"}; +lookup(42215) -> {"Lo","L"}; +lookup(42216) -> {"Lo","L"}; +lookup(42217) -> {"Lo","L"}; +lookup(42218) -> {"Lo","L"}; +lookup(42219) -> {"Lo","L"}; +lookup(42220) -> {"Lo","L"}; +lookup(42221) -> {"Lo","L"}; +lookup(42222) -> {"Lo","L"}; +lookup(42223) -> {"Lo","L"}; +lookup(42224) -> {"Lo","L"}; +lookup(42225) -> {"Lo","L"}; +lookup(42226) -> {"Lo","L"}; +lookup(42227) -> {"Lo","L"}; +lookup(42228) -> {"Lo","L"}; +lookup(42229) -> {"Lo","L"}; +lookup(42230) -> {"Lo","L"}; +lookup(42231) -> {"Lo","L"}; +lookup(42232) -> {"Lm","L"}; +lookup(42233) -> {"Lm","L"}; +lookup(42234) -> {"Lm","L"}; +lookup(42235) -> {"Lm","L"}; +lookup(42236) -> {"Lm","L"}; +lookup(42237) -> {"Lm","L"}; +lookup(42238) -> {"Po","L"}; +lookup(42239) -> {"Po","L"}; +lookup(42240) -> {"Lo","L"}; +lookup(42241) -> {"Lo","L"}; +lookup(42242) -> {"Lo","L"}; +lookup(42243) -> {"Lo","L"}; +lookup(42244) -> {"Lo","L"}; +lookup(42245) -> {"Lo","L"}; +lookup(42246) -> {"Lo","L"}; +lookup(42247) -> {"Lo","L"}; +lookup(42248) -> {"Lo","L"}; +lookup(42249) -> {"Lo","L"}; +lookup(42250) -> {"Lo","L"}; +lookup(42251) -> {"Lo","L"}; +lookup(42252) -> {"Lo","L"}; +lookup(42253) -> {"Lo","L"}; +lookup(42254) -> {"Lo","L"}; +lookup(42255) -> {"Lo","L"}; +lookup(42256) -> {"Lo","L"}; +lookup(42257) -> {"Lo","L"}; +lookup(42258) -> {"Lo","L"}; +lookup(42259) -> {"Lo","L"}; +lookup(42260) -> {"Lo","L"}; +lookup(42261) -> {"Lo","L"}; +lookup(42262) -> {"Lo","L"}; +lookup(42263) -> {"Lo","L"}; +lookup(42264) -> {"Lo","L"}; +lookup(42265) -> {"Lo","L"}; +lookup(42266) -> {"Lo","L"}; +lookup(42267) -> {"Lo","L"}; +lookup(42268) -> {"Lo","L"}; +lookup(42269) -> {"Lo","L"}; +lookup(42270) -> {"Lo","L"}; +lookup(42271) -> {"Lo","L"}; +lookup(42272) -> {"Lo","L"}; +lookup(42273) -> {"Lo","L"}; +lookup(42274) -> {"Lo","L"}; +lookup(42275) -> {"Lo","L"}; +lookup(42276) -> {"Lo","L"}; +lookup(42277) -> {"Lo","L"}; +lookup(42278) -> {"Lo","L"}; +lookup(42279) -> {"Lo","L"}; +lookup(42280) -> {"Lo","L"}; +lookup(42281) -> {"Lo","L"}; +lookup(42282) -> {"Lo","L"}; +lookup(42283) -> {"Lo","L"}; +lookup(42284) -> {"Lo","L"}; +lookup(42285) -> {"Lo","L"}; +lookup(42286) -> {"Lo","L"}; +lookup(42287) -> {"Lo","L"}; +lookup(42288) -> {"Lo","L"}; +lookup(42289) -> {"Lo","L"}; +lookup(42290) -> {"Lo","L"}; +lookup(42291) -> {"Lo","L"}; +lookup(42292) -> {"Lo","L"}; +lookup(42293) -> {"Lo","L"}; +lookup(42294) -> {"Lo","L"}; +lookup(42295) -> {"Lo","L"}; +lookup(42296) -> {"Lo","L"}; +lookup(42297) -> {"Lo","L"}; +lookup(42298) -> {"Lo","L"}; +lookup(42299) -> {"Lo","L"}; +lookup(42300) -> {"Lo","L"}; +lookup(42301) -> {"Lo","L"}; +lookup(42302) -> {"Lo","L"}; +lookup(42303) -> {"Lo","L"}; +lookup(42304) -> {"Lo","L"}; +lookup(42305) -> {"Lo","L"}; +lookup(42306) -> {"Lo","L"}; +lookup(42307) -> {"Lo","L"}; +lookup(42308) -> {"Lo","L"}; +lookup(42309) -> {"Lo","L"}; +lookup(42310) -> {"Lo","L"}; +lookup(42311) -> {"Lo","L"}; +lookup(42312) -> {"Lo","L"}; +lookup(42313) -> {"Lo","L"}; +lookup(42314) -> {"Lo","L"}; +lookup(42315) -> {"Lo","L"}; +lookup(42316) -> {"Lo","L"}; +lookup(42317) -> {"Lo","L"}; +lookup(42318) -> {"Lo","L"}; +lookup(42319) -> {"Lo","L"}; +lookup(42320) -> {"Lo","L"}; +lookup(42321) -> {"Lo","L"}; +lookup(42322) -> {"Lo","L"}; +lookup(42323) -> {"Lo","L"}; +lookup(42324) -> {"Lo","L"}; +lookup(42325) -> {"Lo","L"}; +lookup(42326) -> {"Lo","L"}; +lookup(42327) -> {"Lo","L"}; +lookup(42328) -> {"Lo","L"}; +lookup(42329) -> {"Lo","L"}; +lookup(42330) -> {"Lo","L"}; +lookup(42331) -> {"Lo","L"}; +lookup(42332) -> {"Lo","L"}; +lookup(42333) -> {"Lo","L"}; +lookup(42334) -> {"Lo","L"}; +lookup(42335) -> {"Lo","L"}; +lookup(42336) -> {"Lo","L"}; +lookup(42337) -> {"Lo","L"}; +lookup(42338) -> {"Lo","L"}; +lookup(42339) -> {"Lo","L"}; +lookup(42340) -> {"Lo","L"}; +lookup(42341) -> {"Lo","L"}; +lookup(42342) -> {"Lo","L"}; +lookup(42343) -> {"Lo","L"}; +lookup(42344) -> {"Lo","L"}; +lookup(42345) -> {"Lo","L"}; +lookup(42346) -> {"Lo","L"}; +lookup(42347) -> {"Lo","L"}; +lookup(42348) -> {"Lo","L"}; +lookup(42349) -> {"Lo","L"}; +lookup(42350) -> {"Lo","L"}; +lookup(42351) -> {"Lo","L"}; +lookup(42352) -> {"Lo","L"}; +lookup(42353) -> {"Lo","L"}; +lookup(42354) -> {"Lo","L"}; +lookup(42355) -> {"Lo","L"}; +lookup(42356) -> {"Lo","L"}; +lookup(42357) -> {"Lo","L"}; +lookup(42358) -> {"Lo","L"}; +lookup(42359) -> {"Lo","L"}; +lookup(42360) -> {"Lo","L"}; +lookup(42361) -> {"Lo","L"}; +lookup(42362) -> {"Lo","L"}; +lookup(42363) -> {"Lo","L"}; +lookup(42364) -> {"Lo","L"}; +lookup(42365) -> {"Lo","L"}; +lookup(42366) -> {"Lo","L"}; +lookup(42367) -> {"Lo","L"}; +lookup(42368) -> {"Lo","L"}; +lookup(42369) -> {"Lo","L"}; +lookup(42370) -> {"Lo","L"}; +lookup(42371) -> {"Lo","L"}; +lookup(42372) -> {"Lo","L"}; +lookup(42373) -> {"Lo","L"}; +lookup(42374) -> {"Lo","L"}; +lookup(42375) -> {"Lo","L"}; +lookup(42376) -> {"Lo","L"}; +lookup(42377) -> {"Lo","L"}; +lookup(42378) -> {"Lo","L"}; +lookup(42379) -> {"Lo","L"}; +lookup(42380) -> {"Lo","L"}; +lookup(42381) -> {"Lo","L"}; +lookup(42382) -> {"Lo","L"}; +lookup(42383) -> {"Lo","L"}; +lookup(42384) -> {"Lo","L"}; +lookup(42385) -> {"Lo","L"}; +lookup(42386) -> {"Lo","L"}; +lookup(42387) -> {"Lo","L"}; +lookup(42388) -> {"Lo","L"}; +lookup(42389) -> {"Lo","L"}; +lookup(42390) -> {"Lo","L"}; +lookup(42391) -> {"Lo","L"}; +lookup(42392) -> {"Lo","L"}; +lookup(42393) -> {"Lo","L"}; +lookup(42394) -> {"Lo","L"}; +lookup(42395) -> {"Lo","L"}; +lookup(42396) -> {"Lo","L"}; +lookup(42397) -> {"Lo","L"}; +lookup(42398) -> {"Lo","L"}; +lookup(42399) -> {"Lo","L"}; +lookup(42400) -> {"Lo","L"}; +lookup(42401) -> {"Lo","L"}; +lookup(42402) -> {"Lo","L"}; +lookup(42403) -> {"Lo","L"}; +lookup(42404) -> {"Lo","L"}; +lookup(42405) -> {"Lo","L"}; +lookup(42406) -> {"Lo","L"}; +lookup(42407) -> {"Lo","L"}; +lookup(42408) -> {"Lo","L"}; +lookup(42409) -> {"Lo","L"}; +lookup(42410) -> {"Lo","L"}; +lookup(42411) -> {"Lo","L"}; +lookup(42412) -> {"Lo","L"}; +lookup(42413) -> {"Lo","L"}; +lookup(42414) -> {"Lo","L"}; +lookup(42415) -> {"Lo","L"}; +lookup(42416) -> {"Lo","L"}; +lookup(42417) -> {"Lo","L"}; +lookup(42418) -> {"Lo","L"}; +lookup(42419) -> {"Lo","L"}; +lookup(42420) -> {"Lo","L"}; +lookup(42421) -> {"Lo","L"}; +lookup(42422) -> {"Lo","L"}; +lookup(42423) -> {"Lo","L"}; +lookup(42424) -> {"Lo","L"}; +lookup(42425) -> {"Lo","L"}; +lookup(42426) -> {"Lo","L"}; +lookup(42427) -> {"Lo","L"}; +lookup(42428) -> {"Lo","L"}; +lookup(42429) -> {"Lo","L"}; +lookup(42430) -> {"Lo","L"}; +lookup(42431) -> {"Lo","L"}; +lookup(42432) -> {"Lo","L"}; +lookup(42433) -> {"Lo","L"}; +lookup(42434) -> {"Lo","L"}; +lookup(42435) -> {"Lo","L"}; +lookup(42436) -> {"Lo","L"}; +lookup(42437) -> {"Lo","L"}; +lookup(42438) -> {"Lo","L"}; +lookup(42439) -> {"Lo","L"}; +lookup(42440) -> {"Lo","L"}; +lookup(42441) -> {"Lo","L"}; +lookup(42442) -> {"Lo","L"}; +lookup(42443) -> {"Lo","L"}; +lookup(42444) -> {"Lo","L"}; +lookup(42445) -> {"Lo","L"}; +lookup(42446) -> {"Lo","L"}; +lookup(42447) -> {"Lo","L"}; +lookup(42448) -> {"Lo","L"}; +lookup(42449) -> {"Lo","L"}; +lookup(42450) -> {"Lo","L"}; +lookup(42451) -> {"Lo","L"}; +lookup(42452) -> {"Lo","L"}; +lookup(42453) -> {"Lo","L"}; +lookup(42454) -> {"Lo","L"}; +lookup(42455) -> {"Lo","L"}; +lookup(42456) -> {"Lo","L"}; +lookup(42457) -> {"Lo","L"}; +lookup(42458) -> {"Lo","L"}; +lookup(42459) -> {"Lo","L"}; +lookup(42460) -> {"Lo","L"}; +lookup(42461) -> {"Lo","L"}; +lookup(42462) -> {"Lo","L"}; +lookup(42463) -> {"Lo","L"}; +lookup(42464) -> {"Lo","L"}; +lookup(42465) -> {"Lo","L"}; +lookup(42466) -> {"Lo","L"}; +lookup(42467) -> {"Lo","L"}; +lookup(42468) -> {"Lo","L"}; +lookup(42469) -> {"Lo","L"}; +lookup(42470) -> {"Lo","L"}; +lookup(42471) -> {"Lo","L"}; +lookup(42472) -> {"Lo","L"}; +lookup(42473) -> {"Lo","L"}; +lookup(42474) -> {"Lo","L"}; +lookup(42475) -> {"Lo","L"}; +lookup(42476) -> {"Lo","L"}; +lookup(42477) -> {"Lo","L"}; +lookup(42478) -> {"Lo","L"}; +lookup(42479) -> {"Lo","L"}; +lookup(42480) -> {"Lo","L"}; +lookup(42481) -> {"Lo","L"}; +lookup(42482) -> {"Lo","L"}; +lookup(42483) -> {"Lo","L"}; +lookup(42484) -> {"Lo","L"}; +lookup(42485) -> {"Lo","L"}; +lookup(42486) -> {"Lo","L"}; +lookup(42487) -> {"Lo","L"}; +lookup(42488) -> {"Lo","L"}; +lookup(42489) -> {"Lo","L"}; +lookup(42490) -> {"Lo","L"}; +lookup(42491) -> {"Lo","L"}; +lookup(42492) -> {"Lo","L"}; +lookup(42493) -> {"Lo","L"}; +lookup(42494) -> {"Lo","L"}; +lookup(42495) -> {"Lo","L"}; +lookup(42496) -> {"Lo","L"}; +lookup(42497) -> {"Lo","L"}; +lookup(42498) -> {"Lo","L"}; +lookup(42499) -> {"Lo","L"}; +lookup(42500) -> {"Lo","L"}; +lookup(42501) -> {"Lo","L"}; +lookup(42502) -> {"Lo","L"}; +lookup(42503) -> {"Lo","L"}; +lookup(42504) -> {"Lo","L"}; +lookup(42505) -> {"Lo","L"}; +lookup(42506) -> {"Lo","L"}; +lookup(42507) -> {"Lo","L"}; +lookup(42508) -> {"Lm","L"}; +lookup(42509) -> {"Po","ON"}; +lookup(42510) -> {"Po","ON"}; +lookup(42511) -> {"Po","ON"}; +lookup(42512) -> {"Lo","L"}; +lookup(42513) -> {"Lo","L"}; +lookup(42514) -> {"Lo","L"}; +lookup(42515) -> {"Lo","L"}; +lookup(42516) -> {"Lo","L"}; +lookup(42517) -> {"Lo","L"}; +lookup(42518) -> {"Lo","L"}; +lookup(42519) -> {"Lo","L"}; +lookup(42520) -> {"Lo","L"}; +lookup(42521) -> {"Lo","L"}; +lookup(42522) -> {"Lo","L"}; +lookup(42523) -> {"Lo","L"}; +lookup(42524) -> {"Lo","L"}; +lookup(42525) -> {"Lo","L"}; +lookup(42526) -> {"Lo","L"}; +lookup(42527) -> {"Lo","L"}; +lookup(42528) -> {"Nd","L"}; +lookup(42529) -> {"Nd","L"}; +lookup(42530) -> {"Nd","L"}; +lookup(42531) -> {"Nd","L"}; +lookup(42532) -> {"Nd","L"}; +lookup(42533) -> {"Nd","L"}; +lookup(42534) -> {"Nd","L"}; +lookup(42535) -> {"Nd","L"}; +lookup(42536) -> {"Nd","L"}; +lookup(42537) -> {"Nd","L"}; +lookup(42538) -> {"Lo","L"}; +lookup(42539) -> {"Lo","L"}; +lookup(42560) -> {"Lu","L"}; +lookup(42561) -> {"Ll","L"}; +lookup(42562) -> {"Lu","L"}; +lookup(42563) -> {"Ll","L"}; +lookup(42564) -> {"Lu","L"}; +lookup(42565) -> {"Ll","L"}; +lookup(42566) -> {"Lu","L"}; +lookup(42567) -> {"Ll","L"}; +lookup(42568) -> {"Lu","L"}; +lookup(42569) -> {"Ll","L"}; +lookup(42570) -> {"Lu","L"}; +lookup(42571) -> {"Ll","L"}; +lookup(42572) -> {"Lu","L"}; +lookup(42573) -> {"Ll","L"}; +lookup(42574) -> {"Lu","L"}; +lookup(42575) -> {"Ll","L"}; +lookup(42576) -> {"Lu","L"}; +lookup(42577) -> {"Ll","L"}; +lookup(42578) -> {"Lu","L"}; +lookup(42579) -> {"Ll","L"}; +lookup(42580) -> {"Lu","L"}; +lookup(42581) -> {"Ll","L"}; +lookup(42582) -> {"Lu","L"}; +lookup(42583) -> {"Ll","L"}; +lookup(42584) -> {"Lu","L"}; +lookup(42585) -> {"Ll","L"}; +lookup(42586) -> {"Lu","L"}; +lookup(42587) -> {"Ll","L"}; +lookup(42588) -> {"Lu","L"}; +lookup(42589) -> {"Ll","L"}; +lookup(42590) -> {"Lu","L"}; +lookup(42591) -> {"Ll","L"}; +lookup(42592) -> {"Lu","L"}; +lookup(42593) -> {"Ll","L"}; +lookup(42594) -> {"Lu","L"}; +lookup(42595) -> {"Ll","L"}; +lookup(42596) -> {"Lu","L"}; +lookup(42597) -> {"Ll","L"}; +lookup(42598) -> {"Lu","L"}; +lookup(42599) -> {"Ll","L"}; +lookup(42600) -> {"Lu","L"}; +lookup(42601) -> {"Ll","L"}; +lookup(42602) -> {"Lu","L"}; +lookup(42603) -> {"Ll","L"}; +lookup(42604) -> {"Lu","L"}; +lookup(42605) -> {"Ll","L"}; +lookup(42606) -> {"Lo","L"}; +lookup(42607) -> {"Mn","NSM"}; +lookup(42608) -> {"Me","NSM"}; +lookup(42609) -> {"Me","NSM"}; +lookup(42610) -> {"Me","NSM"}; +lookup(42611) -> {"Po","ON"}; +lookup(42612) -> {"Mn","NSM"}; +lookup(42613) -> {"Mn","NSM"}; +lookup(42614) -> {"Mn","NSM"}; +lookup(42615) -> {"Mn","NSM"}; +lookup(42616) -> {"Mn","NSM"}; +lookup(42617) -> {"Mn","NSM"}; +lookup(42618) -> {"Mn","NSM"}; +lookup(42619) -> {"Mn","NSM"}; +lookup(42620) -> {"Mn","NSM"}; +lookup(42621) -> {"Mn","NSM"}; +lookup(42622) -> {"Po","ON"}; +lookup(42623) -> {"Lm","ON"}; +lookup(42624) -> {"Lu","L"}; +lookup(42625) -> {"Ll","L"}; +lookup(42626) -> {"Lu","L"}; +lookup(42627) -> {"Ll","L"}; +lookup(42628) -> {"Lu","L"}; +lookup(42629) -> {"Ll","L"}; +lookup(42630) -> {"Lu","L"}; +lookup(42631) -> {"Ll","L"}; +lookup(42632) -> {"Lu","L"}; +lookup(42633) -> {"Ll","L"}; +lookup(42634) -> {"Lu","L"}; +lookup(42635) -> {"Ll","L"}; +lookup(42636) -> {"Lu","L"}; +lookup(42637) -> {"Ll","L"}; +lookup(42638) -> {"Lu","L"}; +lookup(42639) -> {"Ll","L"}; +lookup(42640) -> {"Lu","L"}; +lookup(42641) -> {"Ll","L"}; +lookup(42642) -> {"Lu","L"}; +lookup(42643) -> {"Ll","L"}; +lookup(42644) -> {"Lu","L"}; +lookup(42645) -> {"Ll","L"}; +lookup(42646) -> {"Lu","L"}; +lookup(42647) -> {"Ll","L"}; +lookup(42648) -> {"Lu","L"}; +lookup(42649) -> {"Ll","L"}; +lookup(42650) -> {"Lu","L"}; +lookup(42651) -> {"Ll","L"}; +lookup(42652) -> {"Lm","L"}; +lookup(42653) -> {"Lm","L"}; +lookup(42654) -> {"Mn","NSM"}; +lookup(42655) -> {"Mn","NSM"}; +lookup(42656) -> {"Lo","L"}; +lookup(42657) -> {"Lo","L"}; +lookup(42658) -> {"Lo","L"}; +lookup(42659) -> {"Lo","L"}; +lookup(42660) -> {"Lo","L"}; +lookup(42661) -> {"Lo","L"}; +lookup(42662) -> {"Lo","L"}; +lookup(42663) -> {"Lo","L"}; +lookup(42664) -> {"Lo","L"}; +lookup(42665) -> {"Lo","L"}; +lookup(42666) -> {"Lo","L"}; +lookup(42667) -> {"Lo","L"}; +lookup(42668) -> {"Lo","L"}; +lookup(42669) -> {"Lo","L"}; +lookup(42670) -> {"Lo","L"}; +lookup(42671) -> {"Lo","L"}; +lookup(42672) -> {"Lo","L"}; +lookup(42673) -> {"Lo","L"}; +lookup(42674) -> {"Lo","L"}; +lookup(42675) -> {"Lo","L"}; +lookup(42676) -> {"Lo","L"}; +lookup(42677) -> {"Lo","L"}; +lookup(42678) -> {"Lo","L"}; +lookup(42679) -> {"Lo","L"}; +lookup(42680) -> {"Lo","L"}; +lookup(42681) -> {"Lo","L"}; +lookup(42682) -> {"Lo","L"}; +lookup(42683) -> {"Lo","L"}; +lookup(42684) -> {"Lo","L"}; +lookup(42685) -> {"Lo","L"}; +lookup(42686) -> {"Lo","L"}; +lookup(42687) -> {"Lo","L"}; +lookup(42688) -> {"Lo","L"}; +lookup(42689) -> {"Lo","L"}; +lookup(42690) -> {"Lo","L"}; +lookup(42691) -> {"Lo","L"}; +lookup(42692) -> {"Lo","L"}; +lookup(42693) -> {"Lo","L"}; +lookup(42694) -> {"Lo","L"}; +lookup(42695) -> {"Lo","L"}; +lookup(42696) -> {"Lo","L"}; +lookup(42697) -> {"Lo","L"}; +lookup(42698) -> {"Lo","L"}; +lookup(42699) -> {"Lo","L"}; +lookup(42700) -> {"Lo","L"}; +lookup(42701) -> {"Lo","L"}; +lookup(42702) -> {"Lo","L"}; +lookup(42703) -> {"Lo","L"}; +lookup(42704) -> {"Lo","L"}; +lookup(42705) -> {"Lo","L"}; +lookup(42706) -> {"Lo","L"}; +lookup(42707) -> {"Lo","L"}; +lookup(42708) -> {"Lo","L"}; +lookup(42709) -> {"Lo","L"}; +lookup(42710) -> {"Lo","L"}; +lookup(42711) -> {"Lo","L"}; +lookup(42712) -> {"Lo","L"}; +lookup(42713) -> {"Lo","L"}; +lookup(42714) -> {"Lo","L"}; +lookup(42715) -> {"Lo","L"}; +lookup(42716) -> {"Lo","L"}; +lookup(42717) -> {"Lo","L"}; +lookup(42718) -> {"Lo","L"}; +lookup(42719) -> {"Lo","L"}; +lookup(42720) -> {"Lo","L"}; +lookup(42721) -> {"Lo","L"}; +lookup(42722) -> {"Lo","L"}; +lookup(42723) -> {"Lo","L"}; +lookup(42724) -> {"Lo","L"}; +lookup(42725) -> {"Lo","L"}; +lookup(42726) -> {"Nl","L"}; +lookup(42727) -> {"Nl","L"}; +lookup(42728) -> {"Nl","L"}; +lookup(42729) -> {"Nl","L"}; +lookup(42730) -> {"Nl","L"}; +lookup(42731) -> {"Nl","L"}; +lookup(42732) -> {"Nl","L"}; +lookup(42733) -> {"Nl","L"}; +lookup(42734) -> {"Nl","L"}; +lookup(42735) -> {"Nl","L"}; +lookup(42736) -> {"Mn","NSM"}; +lookup(42737) -> {"Mn","NSM"}; +lookup(42738) -> {"Po","L"}; +lookup(42739) -> {"Po","L"}; +lookup(42740) -> {"Po","L"}; +lookup(42741) -> {"Po","L"}; +lookup(42742) -> {"Po","L"}; +lookup(42743) -> {"Po","L"}; +lookup(42752) -> {"Sk","ON"}; +lookup(42753) -> {"Sk","ON"}; +lookup(42754) -> {"Sk","ON"}; +lookup(42755) -> {"Sk","ON"}; +lookup(42756) -> {"Sk","ON"}; +lookup(42757) -> {"Sk","ON"}; +lookup(42758) -> {"Sk","ON"}; +lookup(42759) -> {"Sk","ON"}; +lookup(42760) -> {"Sk","ON"}; +lookup(42761) -> {"Sk","ON"}; +lookup(42762) -> {"Sk","ON"}; +lookup(42763) -> {"Sk","ON"}; +lookup(42764) -> {"Sk","ON"}; +lookup(42765) -> {"Sk","ON"}; +lookup(42766) -> {"Sk","ON"}; +lookup(42767) -> {"Sk","ON"}; +lookup(42768) -> {"Sk","ON"}; +lookup(42769) -> {"Sk","ON"}; +lookup(42770) -> {"Sk","ON"}; +lookup(42771) -> {"Sk","ON"}; +lookup(42772) -> {"Sk","ON"}; +lookup(42773) -> {"Sk","ON"}; +lookup(42774) -> {"Sk","ON"}; +lookup(42775) -> {"Lm","ON"}; +lookup(42776) -> {"Lm","ON"}; +lookup(42777) -> {"Lm","ON"}; +lookup(42778) -> {"Lm","ON"}; +lookup(42779) -> {"Lm","ON"}; +lookup(42780) -> {"Lm","ON"}; +lookup(42781) -> {"Lm","ON"}; +lookup(42782) -> {"Lm","ON"}; +lookup(42783) -> {"Lm","ON"}; +lookup(42784) -> {"Sk","ON"}; +lookup(42785) -> {"Sk","ON"}; +lookup(42786) -> {"Lu","L"}; +lookup(42787) -> {"Ll","L"}; +lookup(42788) -> {"Lu","L"}; +lookup(42789) -> {"Ll","L"}; +lookup(42790) -> {"Lu","L"}; +lookup(42791) -> {"Ll","L"}; +lookup(42792) -> {"Lu","L"}; +lookup(42793) -> {"Ll","L"}; +lookup(42794) -> {"Lu","L"}; +lookup(42795) -> {"Ll","L"}; +lookup(42796) -> {"Lu","L"}; +lookup(42797) -> {"Ll","L"}; +lookup(42798) -> {"Lu","L"}; +lookup(42799) -> {"Ll","L"}; +lookup(42800) -> {"Ll","L"}; +lookup(42801) -> {"Ll","L"}; +lookup(42802) -> {"Lu","L"}; +lookup(42803) -> {"Ll","L"}; +lookup(42804) -> {"Lu","L"}; +lookup(42805) -> {"Ll","L"}; +lookup(42806) -> {"Lu","L"}; +lookup(42807) -> {"Ll","L"}; +lookup(42808) -> {"Lu","L"}; +lookup(42809) -> {"Ll","L"}; +lookup(42810) -> {"Lu","L"}; +lookup(42811) -> {"Ll","L"}; +lookup(42812) -> {"Lu","L"}; +lookup(42813) -> {"Ll","L"}; +lookup(42814) -> {"Lu","L"}; +lookup(42815) -> {"Ll","L"}; +lookup(42816) -> {"Lu","L"}; +lookup(42817) -> {"Ll","L"}; +lookup(42818) -> {"Lu","L"}; +lookup(42819) -> {"Ll","L"}; +lookup(42820) -> {"Lu","L"}; +lookup(42821) -> {"Ll","L"}; +lookup(42822) -> {"Lu","L"}; +lookup(42823) -> {"Ll","L"}; +lookup(42824) -> {"Lu","L"}; +lookup(42825) -> {"Ll","L"}; +lookup(42826) -> {"Lu","L"}; +lookup(42827) -> {"Ll","L"}; +lookup(42828) -> {"Lu","L"}; +lookup(42829) -> {"Ll","L"}; +lookup(42830) -> {"Lu","L"}; +lookup(42831) -> {"Ll","L"}; +lookup(42832) -> {"Lu","L"}; +lookup(42833) -> {"Ll","L"}; +lookup(42834) -> {"Lu","L"}; +lookup(42835) -> {"Ll","L"}; +lookup(42836) -> {"Lu","L"}; +lookup(42837) -> {"Ll","L"}; +lookup(42838) -> {"Lu","L"}; +lookup(42839) -> {"Ll","L"}; +lookup(42840) -> {"Lu","L"}; +lookup(42841) -> {"Ll","L"}; +lookup(42842) -> {"Lu","L"}; +lookup(42843) -> {"Ll","L"}; +lookup(42844) -> {"Lu","L"}; +lookup(42845) -> {"Ll","L"}; +lookup(42846) -> {"Lu","L"}; +lookup(42847) -> {"Ll","L"}; +lookup(42848) -> {"Lu","L"}; +lookup(42849) -> {"Ll","L"}; +lookup(42850) -> {"Lu","L"}; +lookup(42851) -> {"Ll","L"}; +lookup(42852) -> {"Lu","L"}; +lookup(42853) -> {"Ll","L"}; +lookup(42854) -> {"Lu","L"}; +lookup(42855) -> {"Ll","L"}; +lookup(42856) -> {"Lu","L"}; +lookup(42857) -> {"Ll","L"}; +lookup(42858) -> {"Lu","L"}; +lookup(42859) -> {"Ll","L"}; +lookup(42860) -> {"Lu","L"}; +lookup(42861) -> {"Ll","L"}; +lookup(42862) -> {"Lu","L"}; +lookup(42863) -> {"Ll","L"}; +lookup(42864) -> {"Lm","L"}; +lookup(42865) -> {"Ll","L"}; +lookup(42866) -> {"Ll","L"}; +lookup(42867) -> {"Ll","L"}; +lookup(42868) -> {"Ll","L"}; +lookup(42869) -> {"Ll","L"}; +lookup(42870) -> {"Ll","L"}; +lookup(42871) -> {"Ll","L"}; +lookup(42872) -> {"Ll","L"}; +lookup(42873) -> {"Lu","L"}; +lookup(42874) -> {"Ll","L"}; +lookup(42875) -> {"Lu","L"}; +lookup(42876) -> {"Ll","L"}; +lookup(42877) -> {"Lu","L"}; +lookup(42878) -> {"Lu","L"}; +lookup(42879) -> {"Ll","L"}; +lookup(42880) -> {"Lu","L"}; +lookup(42881) -> {"Ll","L"}; +lookup(42882) -> {"Lu","L"}; +lookup(42883) -> {"Ll","L"}; +lookup(42884) -> {"Lu","L"}; +lookup(42885) -> {"Ll","L"}; +lookup(42886) -> {"Lu","L"}; +lookup(42887) -> {"Ll","L"}; +lookup(42888) -> {"Lm","ON"}; +lookup(42889) -> {"Sk","L"}; +lookup(42890) -> {"Sk","L"}; +lookup(42891) -> {"Lu","L"}; +lookup(42892) -> {"Ll","L"}; +lookup(42893) -> {"Lu","L"}; +lookup(42894) -> {"Ll","L"}; +lookup(42895) -> {"Lo","L"}; +lookup(42896) -> {"Lu","L"}; +lookup(42897) -> {"Ll","L"}; +lookup(42898) -> {"Lu","L"}; +lookup(42899) -> {"Ll","L"}; +lookup(42900) -> {"Ll","L"}; +lookup(42901) -> {"Ll","L"}; +lookup(42902) -> {"Lu","L"}; +lookup(42903) -> {"Ll","L"}; +lookup(42904) -> {"Lu","L"}; +lookup(42905) -> {"Ll","L"}; +lookup(42906) -> {"Lu","L"}; +lookup(42907) -> {"Ll","L"}; +lookup(42908) -> {"Lu","L"}; +lookup(42909) -> {"Ll","L"}; +lookup(42910) -> {"Lu","L"}; +lookup(42911) -> {"Ll","L"}; +lookup(42912) -> {"Lu","L"}; +lookup(42913) -> {"Ll","L"}; +lookup(42914) -> {"Lu","L"}; +lookup(42915) -> {"Ll","L"}; +lookup(42916) -> {"Lu","L"}; +lookup(42917) -> {"Ll","L"}; +lookup(42918) -> {"Lu","L"}; +lookup(42919) -> {"Ll","L"}; +lookup(42920) -> {"Lu","L"}; +lookup(42921) -> {"Ll","L"}; +lookup(42922) -> {"Lu","L"}; +lookup(42923) -> {"Lu","L"}; +lookup(42924) -> {"Lu","L"}; +lookup(42925) -> {"Lu","L"}; +lookup(42926) -> {"Lu","L"}; +lookup(42927) -> {"Ll","L"}; +lookup(42928) -> {"Lu","L"}; +lookup(42929) -> {"Lu","L"}; +lookup(42930) -> {"Lu","L"}; +lookup(42931) -> {"Lu","L"}; +lookup(42932) -> {"Lu","L"}; +lookup(42933) -> {"Ll","L"}; +lookup(42934) -> {"Lu","L"}; +lookup(42935) -> {"Ll","L"}; +lookup(42936) -> {"Lu","L"}; +lookup(42937) -> {"Ll","L"}; +lookup(42938) -> {"Lu","L"}; +lookup(42939) -> {"Ll","L"}; +lookup(42940) -> {"Lu","L"}; +lookup(42941) -> {"Ll","L"}; +lookup(42942) -> {"Lu","L"}; +lookup(42943) -> {"Ll","L"}; +lookup(42946) -> {"Lu","L"}; +lookup(42947) -> {"Ll","L"}; +lookup(42948) -> {"Lu","L"}; +lookup(42949) -> {"Lu","L"}; +lookup(42950) -> {"Lu","L"}; +lookup(42951) -> {"Lu","L"}; +lookup(42952) -> {"Ll","L"}; +lookup(42953) -> {"Lu","L"}; +lookup(42954) -> {"Ll","L"}; +lookup(42997) -> {"Lu","L"}; +lookup(42998) -> {"Ll","L"}; +lookup(42999) -> {"Lo","L"}; +lookup(43000) -> {"Lm","L"}; +lookup(43001) -> {"Lm","L"}; +lookup(43002) -> {"Ll","L"}; +lookup(43003) -> {"Lo","L"}; +lookup(43004) -> {"Lo","L"}; +lookup(43005) -> {"Lo","L"}; +lookup(43006) -> {"Lo","L"}; +lookup(43007) -> {"Lo","L"}; +lookup(43008) -> {"Lo","L"}; +lookup(43009) -> {"Lo","L"}; +lookup(43010) -> {"Mn","NSM"}; +lookup(43011) -> {"Lo","L"}; +lookup(43012) -> {"Lo","L"}; +lookup(43013) -> {"Lo","L"}; +lookup(43014) -> {"Mn","NSM"}; +lookup(43015) -> {"Lo","L"}; +lookup(43016) -> {"Lo","L"}; +lookup(43017) -> {"Lo","L"}; +lookup(43018) -> {"Lo","L"}; +lookup(43019) -> {"Mn","NSM"}; +lookup(43020) -> {"Lo","L"}; +lookup(43021) -> {"Lo","L"}; +lookup(43022) -> {"Lo","L"}; +lookup(43023) -> {"Lo","L"}; +lookup(43024) -> {"Lo","L"}; +lookup(43025) -> {"Lo","L"}; +lookup(43026) -> {"Lo","L"}; +lookup(43027) -> {"Lo","L"}; +lookup(43028) -> {"Lo","L"}; +lookup(43029) -> {"Lo","L"}; +lookup(43030) -> {"Lo","L"}; +lookup(43031) -> {"Lo","L"}; +lookup(43032) -> {"Lo","L"}; +lookup(43033) -> {"Lo","L"}; +lookup(43034) -> {"Lo","L"}; +lookup(43035) -> {"Lo","L"}; +lookup(43036) -> {"Lo","L"}; +lookup(43037) -> {"Lo","L"}; +lookup(43038) -> {"Lo","L"}; +lookup(43039) -> {"Lo","L"}; +lookup(43040) -> {"Lo","L"}; +lookup(43041) -> {"Lo","L"}; +lookup(43042) -> {"Lo","L"}; +lookup(43043) -> {"Mc","L"}; +lookup(43044) -> {"Mc","L"}; +lookup(43045) -> {"Mn","NSM"}; +lookup(43046) -> {"Mn","NSM"}; +lookup(43047) -> {"Mc","L"}; +lookup(43048) -> {"So","ON"}; +lookup(43049) -> {"So","ON"}; +lookup(43050) -> {"So","ON"}; +lookup(43051) -> {"So","ON"}; +lookup(43052) -> {"Mn","NSM"}; +lookup(43056) -> {"No","L"}; +lookup(43057) -> {"No","L"}; +lookup(43058) -> {"No","L"}; +lookup(43059) -> {"No","L"}; +lookup(43060) -> {"No","L"}; +lookup(43061) -> {"No","L"}; +lookup(43062) -> {"So","L"}; +lookup(43063) -> {"So","L"}; +lookup(43064) -> {"Sc","ET"}; +lookup(43065) -> {"So","ET"}; +lookup(43072) -> {"Lo","L"}; +lookup(43073) -> {"Lo","L"}; +lookup(43074) -> {"Lo","L"}; +lookup(43075) -> {"Lo","L"}; +lookup(43076) -> {"Lo","L"}; +lookup(43077) -> {"Lo","L"}; +lookup(43078) -> {"Lo","L"}; +lookup(43079) -> {"Lo","L"}; +lookup(43080) -> {"Lo","L"}; +lookup(43081) -> {"Lo","L"}; +lookup(43082) -> {"Lo","L"}; +lookup(43083) -> {"Lo","L"}; +lookup(43084) -> {"Lo","L"}; +lookup(43085) -> {"Lo","L"}; +lookup(43086) -> {"Lo","L"}; +lookup(43087) -> {"Lo","L"}; +lookup(43088) -> {"Lo","L"}; +lookup(43089) -> {"Lo","L"}; +lookup(43090) -> {"Lo","L"}; +lookup(43091) -> {"Lo","L"}; +lookup(43092) -> {"Lo","L"}; +lookup(43093) -> {"Lo","L"}; +lookup(43094) -> {"Lo","L"}; +lookup(43095) -> {"Lo","L"}; +lookup(43096) -> {"Lo","L"}; +lookup(43097) -> {"Lo","L"}; +lookup(43098) -> {"Lo","L"}; +lookup(43099) -> {"Lo","L"}; +lookup(43100) -> {"Lo","L"}; +lookup(43101) -> {"Lo","L"}; +lookup(43102) -> {"Lo","L"}; +lookup(43103) -> {"Lo","L"}; +lookup(43104) -> {"Lo","L"}; +lookup(43105) -> {"Lo","L"}; +lookup(43106) -> {"Lo","L"}; +lookup(43107) -> {"Lo","L"}; +lookup(43108) -> {"Lo","L"}; +lookup(43109) -> {"Lo","L"}; +lookup(43110) -> {"Lo","L"}; +lookup(43111) -> {"Lo","L"}; +lookup(43112) -> {"Lo","L"}; +lookup(43113) -> {"Lo","L"}; +lookup(43114) -> {"Lo","L"}; +lookup(43115) -> {"Lo","L"}; +lookup(43116) -> {"Lo","L"}; +lookup(43117) -> {"Lo","L"}; +lookup(43118) -> {"Lo","L"}; +lookup(43119) -> {"Lo","L"}; +lookup(43120) -> {"Lo","L"}; +lookup(43121) -> {"Lo","L"}; +lookup(43122) -> {"Lo","L"}; +lookup(43123) -> {"Lo","L"}; +lookup(43124) -> {"Po","ON"}; +lookup(43125) -> {"Po","ON"}; +lookup(43126) -> {"Po","ON"}; +lookup(43127) -> {"Po","ON"}; +lookup(43136) -> {"Mc","L"}; +lookup(43137) -> {"Mc","L"}; +lookup(43138) -> {"Lo","L"}; +lookup(43139) -> {"Lo","L"}; +lookup(43140) -> {"Lo","L"}; +lookup(43141) -> {"Lo","L"}; +lookup(43142) -> {"Lo","L"}; +lookup(43143) -> {"Lo","L"}; +lookup(43144) -> {"Lo","L"}; +lookup(43145) -> {"Lo","L"}; +lookup(43146) -> {"Lo","L"}; +lookup(43147) -> {"Lo","L"}; +lookup(43148) -> {"Lo","L"}; +lookup(43149) -> {"Lo","L"}; +lookup(43150) -> {"Lo","L"}; +lookup(43151) -> {"Lo","L"}; +lookup(43152) -> {"Lo","L"}; +lookup(43153) -> {"Lo","L"}; +lookup(43154) -> {"Lo","L"}; +lookup(43155) -> {"Lo","L"}; +lookup(43156) -> {"Lo","L"}; +lookup(43157) -> {"Lo","L"}; +lookup(43158) -> {"Lo","L"}; +lookup(43159) -> {"Lo","L"}; +lookup(43160) -> {"Lo","L"}; +lookup(43161) -> {"Lo","L"}; +lookup(43162) -> {"Lo","L"}; +lookup(43163) -> {"Lo","L"}; +lookup(43164) -> {"Lo","L"}; +lookup(43165) -> {"Lo","L"}; +lookup(43166) -> {"Lo","L"}; +lookup(43167) -> {"Lo","L"}; +lookup(43168) -> {"Lo","L"}; +lookup(43169) -> {"Lo","L"}; +lookup(43170) -> {"Lo","L"}; +lookup(43171) -> {"Lo","L"}; +lookup(43172) -> {"Lo","L"}; +lookup(43173) -> {"Lo","L"}; +lookup(43174) -> {"Lo","L"}; +lookup(43175) -> {"Lo","L"}; +lookup(43176) -> {"Lo","L"}; +lookup(43177) -> {"Lo","L"}; +lookup(43178) -> {"Lo","L"}; +lookup(43179) -> {"Lo","L"}; +lookup(43180) -> {"Lo","L"}; +lookup(43181) -> {"Lo","L"}; +lookup(43182) -> {"Lo","L"}; +lookup(43183) -> {"Lo","L"}; +lookup(43184) -> {"Lo","L"}; +lookup(43185) -> {"Lo","L"}; +lookup(43186) -> {"Lo","L"}; +lookup(43187) -> {"Lo","L"}; +lookup(43188) -> {"Mc","L"}; +lookup(43189) -> {"Mc","L"}; +lookup(43190) -> {"Mc","L"}; +lookup(43191) -> {"Mc","L"}; +lookup(43192) -> {"Mc","L"}; +lookup(43193) -> {"Mc","L"}; +lookup(43194) -> {"Mc","L"}; +lookup(43195) -> {"Mc","L"}; +lookup(43196) -> {"Mc","L"}; +lookup(43197) -> {"Mc","L"}; +lookup(43198) -> {"Mc","L"}; +lookup(43199) -> {"Mc","L"}; +lookup(43200) -> {"Mc","L"}; +lookup(43201) -> {"Mc","L"}; +lookup(43202) -> {"Mc","L"}; +lookup(43203) -> {"Mc","L"}; +lookup(43204) -> {"Mn","NSM"}; +lookup(43205) -> {"Mn","NSM"}; +lookup(43214) -> {"Po","L"}; +lookup(43215) -> {"Po","L"}; +lookup(43216) -> {"Nd","L"}; +lookup(43217) -> {"Nd","L"}; +lookup(43218) -> {"Nd","L"}; +lookup(43219) -> {"Nd","L"}; +lookup(43220) -> {"Nd","L"}; +lookup(43221) -> {"Nd","L"}; +lookup(43222) -> {"Nd","L"}; +lookup(43223) -> {"Nd","L"}; +lookup(43224) -> {"Nd","L"}; +lookup(43225) -> {"Nd","L"}; +lookup(43232) -> {"Mn","NSM"}; +lookup(43233) -> {"Mn","NSM"}; +lookup(43234) -> {"Mn","NSM"}; +lookup(43235) -> {"Mn","NSM"}; +lookup(43236) -> {"Mn","NSM"}; +lookup(43237) -> {"Mn","NSM"}; +lookup(43238) -> {"Mn","NSM"}; +lookup(43239) -> {"Mn","NSM"}; +lookup(43240) -> {"Mn","NSM"}; +lookup(43241) -> {"Mn","NSM"}; +lookup(43242) -> {"Mn","NSM"}; +lookup(43243) -> {"Mn","NSM"}; +lookup(43244) -> {"Mn","NSM"}; +lookup(43245) -> {"Mn","NSM"}; +lookup(43246) -> {"Mn","NSM"}; +lookup(43247) -> {"Mn","NSM"}; +lookup(43248) -> {"Mn","NSM"}; +lookup(43249) -> {"Mn","NSM"}; +lookup(43250) -> {"Lo","L"}; +lookup(43251) -> {"Lo","L"}; +lookup(43252) -> {"Lo","L"}; +lookup(43253) -> {"Lo","L"}; +lookup(43254) -> {"Lo","L"}; +lookup(43255) -> {"Lo","L"}; +lookup(43256) -> {"Po","L"}; +lookup(43257) -> {"Po","L"}; +lookup(43258) -> {"Po","L"}; +lookup(43259) -> {"Lo","L"}; +lookup(43260) -> {"Po","L"}; +lookup(43261) -> {"Lo","L"}; +lookup(43262) -> {"Lo","L"}; +lookup(43263) -> {"Mn","NSM"}; +lookup(43264) -> {"Nd","L"}; +lookup(43265) -> {"Nd","L"}; +lookup(43266) -> {"Nd","L"}; +lookup(43267) -> {"Nd","L"}; +lookup(43268) -> {"Nd","L"}; +lookup(43269) -> {"Nd","L"}; +lookup(43270) -> {"Nd","L"}; +lookup(43271) -> {"Nd","L"}; +lookup(43272) -> {"Nd","L"}; +lookup(43273) -> {"Nd","L"}; +lookup(43274) -> {"Lo","L"}; +lookup(43275) -> {"Lo","L"}; +lookup(43276) -> {"Lo","L"}; +lookup(43277) -> {"Lo","L"}; +lookup(43278) -> {"Lo","L"}; +lookup(43279) -> {"Lo","L"}; +lookup(43280) -> {"Lo","L"}; +lookup(43281) -> {"Lo","L"}; +lookup(43282) -> {"Lo","L"}; +lookup(43283) -> {"Lo","L"}; +lookup(43284) -> {"Lo","L"}; +lookup(43285) -> {"Lo","L"}; +lookup(43286) -> {"Lo","L"}; +lookup(43287) -> {"Lo","L"}; +lookup(43288) -> {"Lo","L"}; +lookup(43289) -> {"Lo","L"}; +lookup(43290) -> {"Lo","L"}; +lookup(43291) -> {"Lo","L"}; +lookup(43292) -> {"Lo","L"}; +lookup(43293) -> {"Lo","L"}; +lookup(43294) -> {"Lo","L"}; +lookup(43295) -> {"Lo","L"}; +lookup(43296) -> {"Lo","L"}; +lookup(43297) -> {"Lo","L"}; +lookup(43298) -> {"Lo","L"}; +lookup(43299) -> {"Lo","L"}; +lookup(43300) -> {"Lo","L"}; +lookup(43301) -> {"Lo","L"}; +lookup(43302) -> {"Mn","NSM"}; +lookup(43303) -> {"Mn","NSM"}; +lookup(43304) -> {"Mn","NSM"}; +lookup(43305) -> {"Mn","NSM"}; +lookup(43306) -> {"Mn","NSM"}; +lookup(43307) -> {"Mn","NSM"}; +lookup(43308) -> {"Mn","NSM"}; +lookup(43309) -> {"Mn","NSM"}; +lookup(43310) -> {"Po","L"}; +lookup(43311) -> {"Po","L"}; +lookup(43312) -> {"Lo","L"}; +lookup(43313) -> {"Lo","L"}; +lookup(43314) -> {"Lo","L"}; +lookup(43315) -> {"Lo","L"}; +lookup(43316) -> {"Lo","L"}; +lookup(43317) -> {"Lo","L"}; +lookup(43318) -> {"Lo","L"}; +lookup(43319) -> {"Lo","L"}; +lookup(43320) -> {"Lo","L"}; +lookup(43321) -> {"Lo","L"}; +lookup(43322) -> {"Lo","L"}; +lookup(43323) -> {"Lo","L"}; +lookup(43324) -> {"Lo","L"}; +lookup(43325) -> {"Lo","L"}; +lookup(43326) -> {"Lo","L"}; +lookup(43327) -> {"Lo","L"}; +lookup(43328) -> {"Lo","L"}; +lookup(43329) -> {"Lo","L"}; +lookup(43330) -> {"Lo","L"}; +lookup(43331) -> {"Lo","L"}; +lookup(43332) -> {"Lo","L"}; +lookup(43333) -> {"Lo","L"}; +lookup(43334) -> {"Lo","L"}; +lookup(43335) -> {"Mn","NSM"}; +lookup(43336) -> {"Mn","NSM"}; +lookup(43337) -> {"Mn","NSM"}; +lookup(43338) -> {"Mn","NSM"}; +lookup(43339) -> {"Mn","NSM"}; +lookup(43340) -> {"Mn","NSM"}; +lookup(43341) -> {"Mn","NSM"}; +lookup(43342) -> {"Mn","NSM"}; +lookup(43343) -> {"Mn","NSM"}; +lookup(43344) -> {"Mn","NSM"}; +lookup(43345) -> {"Mn","NSM"}; +lookup(43346) -> {"Mc","L"}; +lookup(43347) -> {"Mc","L"}; +lookup(43359) -> {"Po","L"}; +lookup(43360) -> {"Lo","L"}; +lookup(43361) -> {"Lo","L"}; +lookup(43362) -> {"Lo","L"}; +lookup(43363) -> {"Lo","L"}; +lookup(43364) -> {"Lo","L"}; +lookup(43365) -> {"Lo","L"}; +lookup(43366) -> {"Lo","L"}; +lookup(43367) -> {"Lo","L"}; +lookup(43368) -> {"Lo","L"}; +lookup(43369) -> {"Lo","L"}; +lookup(43370) -> {"Lo","L"}; +lookup(43371) -> {"Lo","L"}; +lookup(43372) -> {"Lo","L"}; +lookup(43373) -> {"Lo","L"}; +lookup(43374) -> {"Lo","L"}; +lookup(43375) -> {"Lo","L"}; +lookup(43376) -> {"Lo","L"}; +lookup(43377) -> {"Lo","L"}; +lookup(43378) -> {"Lo","L"}; +lookup(43379) -> {"Lo","L"}; +lookup(43380) -> {"Lo","L"}; +lookup(43381) -> {"Lo","L"}; +lookup(43382) -> {"Lo","L"}; +lookup(43383) -> {"Lo","L"}; +lookup(43384) -> {"Lo","L"}; +lookup(43385) -> {"Lo","L"}; +lookup(43386) -> {"Lo","L"}; +lookup(43387) -> {"Lo","L"}; +lookup(43388) -> {"Lo","L"}; +lookup(43392) -> {"Mn","NSM"}; +lookup(43393) -> {"Mn","NSM"}; +lookup(43394) -> {"Mn","NSM"}; +lookup(43395) -> {"Mc","L"}; +lookup(43396) -> {"Lo","L"}; +lookup(43397) -> {"Lo","L"}; +lookup(43398) -> {"Lo","L"}; +lookup(43399) -> {"Lo","L"}; +lookup(43400) -> {"Lo","L"}; +lookup(43401) -> {"Lo","L"}; +lookup(43402) -> {"Lo","L"}; +lookup(43403) -> {"Lo","L"}; +lookup(43404) -> {"Lo","L"}; +lookup(43405) -> {"Lo","L"}; +lookup(43406) -> {"Lo","L"}; +lookup(43407) -> {"Lo","L"}; +lookup(43408) -> {"Lo","L"}; +lookup(43409) -> {"Lo","L"}; +lookup(43410) -> {"Lo","L"}; +lookup(43411) -> {"Lo","L"}; +lookup(43412) -> {"Lo","L"}; +lookup(43413) -> {"Lo","L"}; +lookup(43414) -> {"Lo","L"}; +lookup(43415) -> {"Lo","L"}; +lookup(43416) -> {"Lo","L"}; +lookup(43417) -> {"Lo","L"}; +lookup(43418) -> {"Lo","L"}; +lookup(43419) -> {"Lo","L"}; +lookup(43420) -> {"Lo","L"}; +lookup(43421) -> {"Lo","L"}; +lookup(43422) -> {"Lo","L"}; +lookup(43423) -> {"Lo","L"}; +lookup(43424) -> {"Lo","L"}; +lookup(43425) -> {"Lo","L"}; +lookup(43426) -> {"Lo","L"}; +lookup(43427) -> {"Lo","L"}; +lookup(43428) -> {"Lo","L"}; +lookup(43429) -> {"Lo","L"}; +lookup(43430) -> {"Lo","L"}; +lookup(43431) -> {"Lo","L"}; +lookup(43432) -> {"Lo","L"}; +lookup(43433) -> {"Lo","L"}; +lookup(43434) -> {"Lo","L"}; +lookup(43435) -> {"Lo","L"}; +lookup(43436) -> {"Lo","L"}; +lookup(43437) -> {"Lo","L"}; +lookup(43438) -> {"Lo","L"}; +lookup(43439) -> {"Lo","L"}; +lookup(43440) -> {"Lo","L"}; +lookup(43441) -> {"Lo","L"}; +lookup(43442) -> {"Lo","L"}; +lookup(43443) -> {"Mn","NSM"}; +lookup(43444) -> {"Mc","L"}; +lookup(43445) -> {"Mc","L"}; +lookup(43446) -> {"Mn","NSM"}; +lookup(43447) -> {"Mn","NSM"}; +lookup(43448) -> {"Mn","NSM"}; +lookup(43449) -> {"Mn","NSM"}; +lookup(43450) -> {"Mc","L"}; +lookup(43451) -> {"Mc","L"}; +lookup(43452) -> {"Mn","NSM"}; +lookup(43453) -> {"Mn","NSM"}; +lookup(43454) -> {"Mc","L"}; +lookup(43455) -> {"Mc","L"}; +lookup(43456) -> {"Mc","L"}; +lookup(43457) -> {"Po","L"}; +lookup(43458) -> {"Po","L"}; +lookup(43459) -> {"Po","L"}; +lookup(43460) -> {"Po","L"}; +lookup(43461) -> {"Po","L"}; +lookup(43462) -> {"Po","L"}; +lookup(43463) -> {"Po","L"}; +lookup(43464) -> {"Po","L"}; +lookup(43465) -> {"Po","L"}; +lookup(43466) -> {"Po","L"}; +lookup(43467) -> {"Po","L"}; +lookup(43468) -> {"Po","L"}; +lookup(43469) -> {"Po","L"}; +lookup(43471) -> {"Lm","L"}; +lookup(43472) -> {"Nd","L"}; +lookup(43473) -> {"Nd","L"}; +lookup(43474) -> {"Nd","L"}; +lookup(43475) -> {"Nd","L"}; +lookup(43476) -> {"Nd","L"}; +lookup(43477) -> {"Nd","L"}; +lookup(43478) -> {"Nd","L"}; +lookup(43479) -> {"Nd","L"}; +lookup(43480) -> {"Nd","L"}; +lookup(43481) -> {"Nd","L"}; +lookup(43486) -> {"Po","L"}; +lookup(43487) -> {"Po","L"}; +lookup(43488) -> {"Lo","L"}; +lookup(43489) -> {"Lo","L"}; +lookup(43490) -> {"Lo","L"}; +lookup(43491) -> {"Lo","L"}; +lookup(43492) -> {"Lo","L"}; +lookup(43493) -> {"Mn","NSM"}; +lookup(43494) -> {"Lm","L"}; +lookup(43495) -> {"Lo","L"}; +lookup(43496) -> {"Lo","L"}; +lookup(43497) -> {"Lo","L"}; +lookup(43498) -> {"Lo","L"}; +lookup(43499) -> {"Lo","L"}; +lookup(43500) -> {"Lo","L"}; +lookup(43501) -> {"Lo","L"}; +lookup(43502) -> {"Lo","L"}; +lookup(43503) -> {"Lo","L"}; +lookup(43504) -> {"Nd","L"}; +lookup(43505) -> {"Nd","L"}; +lookup(43506) -> {"Nd","L"}; +lookup(43507) -> {"Nd","L"}; +lookup(43508) -> {"Nd","L"}; +lookup(43509) -> {"Nd","L"}; +lookup(43510) -> {"Nd","L"}; +lookup(43511) -> {"Nd","L"}; +lookup(43512) -> {"Nd","L"}; +lookup(43513) -> {"Nd","L"}; +lookup(43514) -> {"Lo","L"}; +lookup(43515) -> {"Lo","L"}; +lookup(43516) -> {"Lo","L"}; +lookup(43517) -> {"Lo","L"}; +lookup(43518) -> {"Lo","L"}; +lookup(43520) -> {"Lo","L"}; +lookup(43521) -> {"Lo","L"}; +lookup(43522) -> {"Lo","L"}; +lookup(43523) -> {"Lo","L"}; +lookup(43524) -> {"Lo","L"}; +lookup(43525) -> {"Lo","L"}; +lookup(43526) -> {"Lo","L"}; +lookup(43527) -> {"Lo","L"}; +lookup(43528) -> {"Lo","L"}; +lookup(43529) -> {"Lo","L"}; +lookup(43530) -> {"Lo","L"}; +lookup(43531) -> {"Lo","L"}; +lookup(43532) -> {"Lo","L"}; +lookup(43533) -> {"Lo","L"}; +lookup(43534) -> {"Lo","L"}; +lookup(43535) -> {"Lo","L"}; +lookup(43536) -> {"Lo","L"}; +lookup(43537) -> {"Lo","L"}; +lookup(43538) -> {"Lo","L"}; +lookup(43539) -> {"Lo","L"}; +lookup(43540) -> {"Lo","L"}; +lookup(43541) -> {"Lo","L"}; +lookup(43542) -> {"Lo","L"}; +lookup(43543) -> {"Lo","L"}; +lookup(43544) -> {"Lo","L"}; +lookup(43545) -> {"Lo","L"}; +lookup(43546) -> {"Lo","L"}; +lookup(43547) -> {"Lo","L"}; +lookup(43548) -> {"Lo","L"}; +lookup(43549) -> {"Lo","L"}; +lookup(43550) -> {"Lo","L"}; +lookup(43551) -> {"Lo","L"}; +lookup(43552) -> {"Lo","L"}; +lookup(43553) -> {"Lo","L"}; +lookup(43554) -> {"Lo","L"}; +lookup(43555) -> {"Lo","L"}; +lookup(43556) -> {"Lo","L"}; +lookup(43557) -> {"Lo","L"}; +lookup(43558) -> {"Lo","L"}; +lookup(43559) -> {"Lo","L"}; +lookup(43560) -> {"Lo","L"}; +lookup(43561) -> {"Mn","NSM"}; +lookup(43562) -> {"Mn","NSM"}; +lookup(43563) -> {"Mn","NSM"}; +lookup(43564) -> {"Mn","NSM"}; +lookup(43565) -> {"Mn","NSM"}; +lookup(43566) -> {"Mn","NSM"}; +lookup(43567) -> {"Mc","L"}; +lookup(43568) -> {"Mc","L"}; +lookup(43569) -> {"Mn","NSM"}; +lookup(43570) -> {"Mn","NSM"}; +lookup(43571) -> {"Mc","L"}; +lookup(43572) -> {"Mc","L"}; +lookup(43573) -> {"Mn","NSM"}; +lookup(43574) -> {"Mn","NSM"}; +lookup(43584) -> {"Lo","L"}; +lookup(43585) -> {"Lo","L"}; +lookup(43586) -> {"Lo","L"}; +lookup(43587) -> {"Mn","NSM"}; +lookup(43588) -> {"Lo","L"}; +lookup(43589) -> {"Lo","L"}; +lookup(43590) -> {"Lo","L"}; +lookup(43591) -> {"Lo","L"}; +lookup(43592) -> {"Lo","L"}; +lookup(43593) -> {"Lo","L"}; +lookup(43594) -> {"Lo","L"}; +lookup(43595) -> {"Lo","L"}; +lookup(43596) -> {"Mn","NSM"}; +lookup(43597) -> {"Mc","L"}; +lookup(43600) -> {"Nd","L"}; +lookup(43601) -> {"Nd","L"}; +lookup(43602) -> {"Nd","L"}; +lookup(43603) -> {"Nd","L"}; +lookup(43604) -> {"Nd","L"}; +lookup(43605) -> {"Nd","L"}; +lookup(43606) -> {"Nd","L"}; +lookup(43607) -> {"Nd","L"}; +lookup(43608) -> {"Nd","L"}; +lookup(43609) -> {"Nd","L"}; +lookup(43612) -> {"Po","L"}; +lookup(43613) -> {"Po","L"}; +lookup(43614) -> {"Po","L"}; +lookup(43615) -> {"Po","L"}; +lookup(43616) -> {"Lo","L"}; +lookup(43617) -> {"Lo","L"}; +lookup(43618) -> {"Lo","L"}; +lookup(43619) -> {"Lo","L"}; +lookup(43620) -> {"Lo","L"}; +lookup(43621) -> {"Lo","L"}; +lookup(43622) -> {"Lo","L"}; +lookup(43623) -> {"Lo","L"}; +lookup(43624) -> {"Lo","L"}; +lookup(43625) -> {"Lo","L"}; +lookup(43626) -> {"Lo","L"}; +lookup(43627) -> {"Lo","L"}; +lookup(43628) -> {"Lo","L"}; +lookup(43629) -> {"Lo","L"}; +lookup(43630) -> {"Lo","L"}; +lookup(43631) -> {"Lo","L"}; +lookup(43632) -> {"Lm","L"}; +lookup(43633) -> {"Lo","L"}; +lookup(43634) -> {"Lo","L"}; +lookup(43635) -> {"Lo","L"}; +lookup(43636) -> {"Lo","L"}; +lookup(43637) -> {"Lo","L"}; +lookup(43638) -> {"Lo","L"}; +lookup(43639) -> {"So","L"}; +lookup(43640) -> {"So","L"}; +lookup(43641) -> {"So","L"}; +lookup(43642) -> {"Lo","L"}; +lookup(43643) -> {"Mc","L"}; +lookup(43644) -> {"Mn","NSM"}; +lookup(43645) -> {"Mc","L"}; +lookup(43646) -> {"Lo","L"}; +lookup(43647) -> {"Lo","L"}; +lookup(43648) -> {"Lo","L"}; +lookup(43649) -> {"Lo","L"}; +lookup(43650) -> {"Lo","L"}; +lookup(43651) -> {"Lo","L"}; +lookup(43652) -> {"Lo","L"}; +lookup(43653) -> {"Lo","L"}; +lookup(43654) -> {"Lo","L"}; +lookup(43655) -> {"Lo","L"}; +lookup(43656) -> {"Lo","L"}; +lookup(43657) -> {"Lo","L"}; +lookup(43658) -> {"Lo","L"}; +lookup(43659) -> {"Lo","L"}; +lookup(43660) -> {"Lo","L"}; +lookup(43661) -> {"Lo","L"}; +lookup(43662) -> {"Lo","L"}; +lookup(43663) -> {"Lo","L"}; +lookup(43664) -> {"Lo","L"}; +lookup(43665) -> {"Lo","L"}; +lookup(43666) -> {"Lo","L"}; +lookup(43667) -> {"Lo","L"}; +lookup(43668) -> {"Lo","L"}; +lookup(43669) -> {"Lo","L"}; +lookup(43670) -> {"Lo","L"}; +lookup(43671) -> {"Lo","L"}; +lookup(43672) -> {"Lo","L"}; +lookup(43673) -> {"Lo","L"}; +lookup(43674) -> {"Lo","L"}; +lookup(43675) -> {"Lo","L"}; +lookup(43676) -> {"Lo","L"}; +lookup(43677) -> {"Lo","L"}; +lookup(43678) -> {"Lo","L"}; +lookup(43679) -> {"Lo","L"}; +lookup(43680) -> {"Lo","L"}; +lookup(43681) -> {"Lo","L"}; +lookup(43682) -> {"Lo","L"}; +lookup(43683) -> {"Lo","L"}; +lookup(43684) -> {"Lo","L"}; +lookup(43685) -> {"Lo","L"}; +lookup(43686) -> {"Lo","L"}; +lookup(43687) -> {"Lo","L"}; +lookup(43688) -> {"Lo","L"}; +lookup(43689) -> {"Lo","L"}; +lookup(43690) -> {"Lo","L"}; +lookup(43691) -> {"Lo","L"}; +lookup(43692) -> {"Lo","L"}; +lookup(43693) -> {"Lo","L"}; +lookup(43694) -> {"Lo","L"}; +lookup(43695) -> {"Lo","L"}; +lookup(43696) -> {"Mn","NSM"}; +lookup(43697) -> {"Lo","L"}; +lookup(43698) -> {"Mn","NSM"}; +lookup(43699) -> {"Mn","NSM"}; +lookup(43700) -> {"Mn","NSM"}; +lookup(43701) -> {"Lo","L"}; +lookup(43702) -> {"Lo","L"}; +lookup(43703) -> {"Mn","NSM"}; +lookup(43704) -> {"Mn","NSM"}; +lookup(43705) -> {"Lo","L"}; +lookup(43706) -> {"Lo","L"}; +lookup(43707) -> {"Lo","L"}; +lookup(43708) -> {"Lo","L"}; +lookup(43709) -> {"Lo","L"}; +lookup(43710) -> {"Mn","NSM"}; +lookup(43711) -> {"Mn","NSM"}; +lookup(43712) -> {"Lo","L"}; +lookup(43713) -> {"Mn","NSM"}; +lookup(43714) -> {"Lo","L"}; +lookup(43739) -> {"Lo","L"}; +lookup(43740) -> {"Lo","L"}; +lookup(43741) -> {"Lm","L"}; +lookup(43742) -> {"Po","L"}; +lookup(43743) -> {"Po","L"}; +lookup(43744) -> {"Lo","L"}; +lookup(43745) -> {"Lo","L"}; +lookup(43746) -> {"Lo","L"}; +lookup(43747) -> {"Lo","L"}; +lookup(43748) -> {"Lo","L"}; +lookup(43749) -> {"Lo","L"}; +lookup(43750) -> {"Lo","L"}; +lookup(43751) -> {"Lo","L"}; +lookup(43752) -> {"Lo","L"}; +lookup(43753) -> {"Lo","L"}; +lookup(43754) -> {"Lo","L"}; +lookup(43755) -> {"Mc","L"}; +lookup(43756) -> {"Mn","NSM"}; +lookup(43757) -> {"Mn","NSM"}; +lookup(43758) -> {"Mc","L"}; +lookup(43759) -> {"Mc","L"}; +lookup(43760) -> {"Po","L"}; +lookup(43761) -> {"Po","L"}; +lookup(43762) -> {"Lo","L"}; +lookup(43763) -> {"Lm","L"}; +lookup(43764) -> {"Lm","L"}; +lookup(43765) -> {"Mc","L"}; +lookup(43766) -> {"Mn","NSM"}; +lookup(43777) -> {"Lo","L"}; +lookup(43778) -> {"Lo","L"}; +lookup(43779) -> {"Lo","L"}; +lookup(43780) -> {"Lo","L"}; +lookup(43781) -> {"Lo","L"}; +lookup(43782) -> {"Lo","L"}; +lookup(43785) -> {"Lo","L"}; +lookup(43786) -> {"Lo","L"}; +lookup(43787) -> {"Lo","L"}; +lookup(43788) -> {"Lo","L"}; +lookup(43789) -> {"Lo","L"}; +lookup(43790) -> {"Lo","L"}; +lookup(43793) -> {"Lo","L"}; +lookup(43794) -> {"Lo","L"}; +lookup(43795) -> {"Lo","L"}; +lookup(43796) -> {"Lo","L"}; +lookup(43797) -> {"Lo","L"}; +lookup(43798) -> {"Lo","L"}; +lookup(43808) -> {"Lo","L"}; +lookup(43809) -> {"Lo","L"}; +lookup(43810) -> {"Lo","L"}; +lookup(43811) -> {"Lo","L"}; +lookup(43812) -> {"Lo","L"}; +lookup(43813) -> {"Lo","L"}; +lookup(43814) -> {"Lo","L"}; +lookup(43816) -> {"Lo","L"}; +lookup(43817) -> {"Lo","L"}; +lookup(43818) -> {"Lo","L"}; +lookup(43819) -> {"Lo","L"}; +lookup(43820) -> {"Lo","L"}; +lookup(43821) -> {"Lo","L"}; +lookup(43822) -> {"Lo","L"}; +lookup(43824) -> {"Ll","L"}; +lookup(43825) -> {"Ll","L"}; +lookup(43826) -> {"Ll","L"}; +lookup(43827) -> {"Ll","L"}; +lookup(43828) -> {"Ll","L"}; +lookup(43829) -> {"Ll","L"}; +lookup(43830) -> {"Ll","L"}; +lookup(43831) -> {"Ll","L"}; +lookup(43832) -> {"Ll","L"}; +lookup(43833) -> {"Ll","L"}; +lookup(43834) -> {"Ll","L"}; +lookup(43835) -> {"Ll","L"}; +lookup(43836) -> {"Ll","L"}; +lookup(43837) -> {"Ll","L"}; +lookup(43838) -> {"Ll","L"}; +lookup(43839) -> {"Ll","L"}; +lookup(43840) -> {"Ll","L"}; +lookup(43841) -> {"Ll","L"}; +lookup(43842) -> {"Ll","L"}; +lookup(43843) -> {"Ll","L"}; +lookup(43844) -> {"Ll","L"}; +lookup(43845) -> {"Ll","L"}; +lookup(43846) -> {"Ll","L"}; +lookup(43847) -> {"Ll","L"}; +lookup(43848) -> {"Ll","L"}; +lookup(43849) -> {"Ll","L"}; +lookup(43850) -> {"Ll","L"}; +lookup(43851) -> {"Ll","L"}; +lookup(43852) -> {"Ll","L"}; +lookup(43853) -> {"Ll","L"}; +lookup(43854) -> {"Ll","L"}; +lookup(43855) -> {"Ll","L"}; +lookup(43856) -> {"Ll","L"}; +lookup(43857) -> {"Ll","L"}; +lookup(43858) -> {"Ll","L"}; +lookup(43859) -> {"Ll","L"}; +lookup(43860) -> {"Ll","L"}; +lookup(43861) -> {"Ll","L"}; +lookup(43862) -> {"Ll","L"}; +lookup(43863) -> {"Ll","L"}; +lookup(43864) -> {"Ll","L"}; +lookup(43865) -> {"Ll","L"}; +lookup(43866) -> {"Ll","L"}; +lookup(43867) -> {"Sk","L"}; +lookup(43868) -> {"Lm","L"}; +lookup(43869) -> {"Lm","L"}; +lookup(43870) -> {"Lm","L"}; +lookup(43871) -> {"Lm","L"}; +lookup(43872) -> {"Ll","L"}; +lookup(43873) -> {"Ll","L"}; +lookup(43874) -> {"Ll","L"}; +lookup(43875) -> {"Ll","L"}; +lookup(43876) -> {"Ll","L"}; +lookup(43877) -> {"Ll","L"}; +lookup(43878) -> {"Ll","L"}; +lookup(43879) -> {"Ll","L"}; +lookup(43880) -> {"Ll","L"}; +lookup(43881) -> {"Lm","L"}; +lookup(43882) -> {"Sk","ON"}; +lookup(43883) -> {"Sk","ON"}; +lookup(43888) -> {"Ll","L"}; +lookup(43889) -> {"Ll","L"}; +lookup(43890) -> {"Ll","L"}; +lookup(43891) -> {"Ll","L"}; +lookup(43892) -> {"Ll","L"}; +lookup(43893) -> {"Ll","L"}; +lookup(43894) -> {"Ll","L"}; +lookup(43895) -> {"Ll","L"}; +lookup(43896) -> {"Ll","L"}; +lookup(43897) -> {"Ll","L"}; +lookup(43898) -> {"Ll","L"}; +lookup(43899) -> {"Ll","L"}; +lookup(43900) -> {"Ll","L"}; +lookup(43901) -> {"Ll","L"}; +lookup(43902) -> {"Ll","L"}; +lookup(43903) -> {"Ll","L"}; +lookup(43904) -> {"Ll","L"}; +lookup(43905) -> {"Ll","L"}; +lookup(43906) -> {"Ll","L"}; +lookup(43907) -> {"Ll","L"}; +lookup(43908) -> {"Ll","L"}; +lookup(43909) -> {"Ll","L"}; +lookup(43910) -> {"Ll","L"}; +lookup(43911) -> {"Ll","L"}; +lookup(43912) -> {"Ll","L"}; +lookup(43913) -> {"Ll","L"}; +lookup(43914) -> {"Ll","L"}; +lookup(43915) -> {"Ll","L"}; +lookup(43916) -> {"Ll","L"}; +lookup(43917) -> {"Ll","L"}; +lookup(43918) -> {"Ll","L"}; +lookup(43919) -> {"Ll","L"}; +lookup(43920) -> {"Ll","L"}; +lookup(43921) -> {"Ll","L"}; +lookup(43922) -> {"Ll","L"}; +lookup(43923) -> {"Ll","L"}; +lookup(43924) -> {"Ll","L"}; +lookup(43925) -> {"Ll","L"}; +lookup(43926) -> {"Ll","L"}; +lookup(43927) -> {"Ll","L"}; +lookup(43928) -> {"Ll","L"}; +lookup(43929) -> {"Ll","L"}; +lookup(43930) -> {"Ll","L"}; +lookup(43931) -> {"Ll","L"}; +lookup(43932) -> {"Ll","L"}; +lookup(43933) -> {"Ll","L"}; +lookup(43934) -> {"Ll","L"}; +lookup(43935) -> {"Ll","L"}; +lookup(43936) -> {"Ll","L"}; +lookup(43937) -> {"Ll","L"}; +lookup(43938) -> {"Ll","L"}; +lookup(43939) -> {"Ll","L"}; +lookup(43940) -> {"Ll","L"}; +lookup(43941) -> {"Ll","L"}; +lookup(43942) -> {"Ll","L"}; +lookup(43943) -> {"Ll","L"}; +lookup(43944) -> {"Ll","L"}; +lookup(43945) -> {"Ll","L"}; +lookup(43946) -> {"Ll","L"}; +lookup(43947) -> {"Ll","L"}; +lookup(43948) -> {"Ll","L"}; +lookup(43949) -> {"Ll","L"}; +lookup(43950) -> {"Ll","L"}; +lookup(43951) -> {"Ll","L"}; +lookup(43952) -> {"Ll","L"}; +lookup(43953) -> {"Ll","L"}; +lookup(43954) -> {"Ll","L"}; +lookup(43955) -> {"Ll","L"}; +lookup(43956) -> {"Ll","L"}; +lookup(43957) -> {"Ll","L"}; +lookup(43958) -> {"Ll","L"}; +lookup(43959) -> {"Ll","L"}; +lookup(43960) -> {"Ll","L"}; +lookup(43961) -> {"Ll","L"}; +lookup(43962) -> {"Ll","L"}; +lookup(43963) -> {"Ll","L"}; +lookup(43964) -> {"Ll","L"}; +lookup(43965) -> {"Ll","L"}; +lookup(43966) -> {"Ll","L"}; +lookup(43967) -> {"Ll","L"}; +lookup(43968) -> {"Lo","L"}; +lookup(43969) -> {"Lo","L"}; +lookup(43970) -> {"Lo","L"}; +lookup(43971) -> {"Lo","L"}; +lookup(43972) -> {"Lo","L"}; +lookup(43973) -> {"Lo","L"}; +lookup(43974) -> {"Lo","L"}; +lookup(43975) -> {"Lo","L"}; +lookup(43976) -> {"Lo","L"}; +lookup(43977) -> {"Lo","L"}; +lookup(43978) -> {"Lo","L"}; +lookup(43979) -> {"Lo","L"}; +lookup(43980) -> {"Lo","L"}; +lookup(43981) -> {"Lo","L"}; +lookup(43982) -> {"Lo","L"}; +lookup(43983) -> {"Lo","L"}; +lookup(43984) -> {"Lo","L"}; +lookup(43985) -> {"Lo","L"}; +lookup(43986) -> {"Lo","L"}; +lookup(43987) -> {"Lo","L"}; +lookup(43988) -> {"Lo","L"}; +lookup(43989) -> {"Lo","L"}; +lookup(43990) -> {"Lo","L"}; +lookup(43991) -> {"Lo","L"}; +lookup(43992) -> {"Lo","L"}; +lookup(43993) -> {"Lo","L"}; +lookup(43994) -> {"Lo","L"}; +lookup(43995) -> {"Lo","L"}; +lookup(43996) -> {"Lo","L"}; +lookup(43997) -> {"Lo","L"}; +lookup(43998) -> {"Lo","L"}; +lookup(43999) -> {"Lo","L"}; +lookup(44000) -> {"Lo","L"}; +lookup(44001) -> {"Lo","L"}; +lookup(44002) -> {"Lo","L"}; +lookup(44003) -> {"Mc","L"}; +lookup(44004) -> {"Mc","L"}; +lookup(44005) -> {"Mn","NSM"}; +lookup(44006) -> {"Mc","L"}; +lookup(44007) -> {"Mc","L"}; +lookup(44008) -> {"Mn","NSM"}; +lookup(44009) -> {"Mc","L"}; +lookup(44010) -> {"Mc","L"}; +lookup(44011) -> {"Po","L"}; +lookup(44012) -> {"Mc","L"}; +lookup(44013) -> {"Mn","NSM"}; +lookup(44016) -> {"Nd","L"}; +lookup(44017) -> {"Nd","L"}; +lookup(44018) -> {"Nd","L"}; +lookup(44019) -> {"Nd","L"}; +lookup(44020) -> {"Nd","L"}; +lookup(44021) -> {"Nd","L"}; +lookup(44022) -> {"Nd","L"}; +lookup(44023) -> {"Nd","L"}; +lookup(44024) -> {"Nd","L"}; +lookup(44025) -> {"Nd","L"}; +lookup(44032) -> {"Lo","L"}; +lookup(55203) -> {"Lo","L"}; +lookup(55216) -> {"Lo","L"}; +lookup(55217) -> {"Lo","L"}; +lookup(55218) -> {"Lo","L"}; +lookup(55219) -> {"Lo","L"}; +lookup(55220) -> {"Lo","L"}; +lookup(55221) -> {"Lo","L"}; +lookup(55222) -> {"Lo","L"}; +lookup(55223) -> {"Lo","L"}; +lookup(55224) -> {"Lo","L"}; +lookup(55225) -> {"Lo","L"}; +lookup(55226) -> {"Lo","L"}; +lookup(55227) -> {"Lo","L"}; +lookup(55228) -> {"Lo","L"}; +lookup(55229) -> {"Lo","L"}; +lookup(55230) -> {"Lo","L"}; +lookup(55231) -> {"Lo","L"}; +lookup(55232) -> {"Lo","L"}; +lookup(55233) -> {"Lo","L"}; +lookup(55234) -> {"Lo","L"}; +lookup(55235) -> {"Lo","L"}; +lookup(55236) -> {"Lo","L"}; +lookup(55237) -> {"Lo","L"}; +lookup(55238) -> {"Lo","L"}; +lookup(55243) -> {"Lo","L"}; +lookup(55244) -> {"Lo","L"}; +lookup(55245) -> {"Lo","L"}; +lookup(55246) -> {"Lo","L"}; +lookup(55247) -> {"Lo","L"}; +lookup(55248) -> {"Lo","L"}; +lookup(55249) -> {"Lo","L"}; +lookup(55250) -> {"Lo","L"}; +lookup(55251) -> {"Lo","L"}; +lookup(55252) -> {"Lo","L"}; +lookup(55253) -> {"Lo","L"}; +lookup(55254) -> {"Lo","L"}; +lookup(55255) -> {"Lo","L"}; +lookup(55256) -> {"Lo","L"}; +lookup(55257) -> {"Lo","L"}; +lookup(55258) -> {"Lo","L"}; +lookup(55259) -> {"Lo","L"}; +lookup(55260) -> {"Lo","L"}; +lookup(55261) -> {"Lo","L"}; +lookup(55262) -> {"Lo","L"}; +lookup(55263) -> {"Lo","L"}; +lookup(55264) -> {"Lo","L"}; +lookup(55265) -> {"Lo","L"}; +lookup(55266) -> {"Lo","L"}; +lookup(55267) -> {"Lo","L"}; +lookup(55268) -> {"Lo","L"}; +lookup(55269) -> {"Lo","L"}; +lookup(55270) -> {"Lo","L"}; +lookup(55271) -> {"Lo","L"}; +lookup(55272) -> {"Lo","L"}; +lookup(55273) -> {"Lo","L"}; +lookup(55274) -> {"Lo","L"}; +lookup(55275) -> {"Lo","L"}; +lookup(55276) -> {"Lo","L"}; +lookup(55277) -> {"Lo","L"}; +lookup(55278) -> {"Lo","L"}; +lookup(55279) -> {"Lo","L"}; +lookup(55280) -> {"Lo","L"}; +lookup(55281) -> {"Lo","L"}; +lookup(55282) -> {"Lo","L"}; +lookup(55283) -> {"Lo","L"}; +lookup(55284) -> {"Lo","L"}; +lookup(55285) -> {"Lo","L"}; +lookup(55286) -> {"Lo","L"}; +lookup(55287) -> {"Lo","L"}; +lookup(55288) -> {"Lo","L"}; +lookup(55289) -> {"Lo","L"}; +lookup(55290) -> {"Lo","L"}; +lookup(55291) -> {"Lo","L"}; +lookup(55296) -> {"Cs","L"}; +lookup(56191) -> {"Cs","L"}; +lookup(56192) -> {"Cs","L"}; +lookup(56319) -> {"Cs","L"}; +lookup(56320) -> {"Cs","L"}; +lookup(57343) -> {"Cs","L"}; +lookup(57344) -> {"Co","L"}; +lookup(63743) -> {"Co","L"}; +lookup(63744) -> {"Lo","L"}; +lookup(63745) -> {"Lo","L"}; +lookup(63746) -> {"Lo","L"}; +lookup(63747) -> {"Lo","L"}; +lookup(63748) -> {"Lo","L"}; +lookup(63749) -> {"Lo","L"}; +lookup(63750) -> {"Lo","L"}; +lookup(63751) -> {"Lo","L"}; +lookup(63752) -> {"Lo","L"}; +lookup(63753) -> {"Lo","L"}; +lookup(63754) -> {"Lo","L"}; +lookup(63755) -> {"Lo","L"}; +lookup(63756) -> {"Lo","L"}; +lookup(63757) -> {"Lo","L"}; +lookup(63758) -> {"Lo","L"}; +lookup(63759) -> {"Lo","L"}; +lookup(63760) -> {"Lo","L"}; +lookup(63761) -> {"Lo","L"}; +lookup(63762) -> {"Lo","L"}; +lookup(63763) -> {"Lo","L"}; +lookup(63764) -> {"Lo","L"}; +lookup(63765) -> {"Lo","L"}; +lookup(63766) -> {"Lo","L"}; +lookup(63767) -> {"Lo","L"}; +lookup(63768) -> {"Lo","L"}; +lookup(63769) -> {"Lo","L"}; +lookup(63770) -> {"Lo","L"}; +lookup(63771) -> {"Lo","L"}; +lookup(63772) -> {"Lo","L"}; +lookup(63773) -> {"Lo","L"}; +lookup(63774) -> {"Lo","L"}; +lookup(63775) -> {"Lo","L"}; +lookup(63776) -> {"Lo","L"}; +lookup(63777) -> {"Lo","L"}; +lookup(63778) -> {"Lo","L"}; +lookup(63779) -> {"Lo","L"}; +lookup(63780) -> {"Lo","L"}; +lookup(63781) -> {"Lo","L"}; +lookup(63782) -> {"Lo","L"}; +lookup(63783) -> {"Lo","L"}; +lookup(63784) -> {"Lo","L"}; +lookup(63785) -> {"Lo","L"}; +lookup(63786) -> {"Lo","L"}; +lookup(63787) -> {"Lo","L"}; +lookup(63788) -> {"Lo","L"}; +lookup(63789) -> {"Lo","L"}; +lookup(63790) -> {"Lo","L"}; +lookup(63791) -> {"Lo","L"}; +lookup(63792) -> {"Lo","L"}; +lookup(63793) -> {"Lo","L"}; +lookup(63794) -> {"Lo","L"}; +lookup(63795) -> {"Lo","L"}; +lookup(63796) -> {"Lo","L"}; +lookup(63797) -> {"Lo","L"}; +lookup(63798) -> {"Lo","L"}; +lookup(63799) -> {"Lo","L"}; +lookup(63800) -> {"Lo","L"}; +lookup(63801) -> {"Lo","L"}; +lookup(63802) -> {"Lo","L"}; +lookup(63803) -> {"Lo","L"}; +lookup(63804) -> {"Lo","L"}; +lookup(63805) -> {"Lo","L"}; +lookup(63806) -> {"Lo","L"}; +lookup(63807) -> {"Lo","L"}; +lookup(63808) -> {"Lo","L"}; +lookup(63809) -> {"Lo","L"}; +lookup(63810) -> {"Lo","L"}; +lookup(63811) -> {"Lo","L"}; +lookup(63812) -> {"Lo","L"}; +lookup(63813) -> {"Lo","L"}; +lookup(63814) -> {"Lo","L"}; +lookup(63815) -> {"Lo","L"}; +lookup(63816) -> {"Lo","L"}; +lookup(63817) -> {"Lo","L"}; +lookup(63818) -> {"Lo","L"}; +lookup(63819) -> {"Lo","L"}; +lookup(63820) -> {"Lo","L"}; +lookup(63821) -> {"Lo","L"}; +lookup(63822) -> {"Lo","L"}; +lookup(63823) -> {"Lo","L"}; +lookup(63824) -> {"Lo","L"}; +lookup(63825) -> {"Lo","L"}; +lookup(63826) -> {"Lo","L"}; +lookup(63827) -> {"Lo","L"}; +lookup(63828) -> {"Lo","L"}; +lookup(63829) -> {"Lo","L"}; +lookup(63830) -> {"Lo","L"}; +lookup(63831) -> {"Lo","L"}; +lookup(63832) -> {"Lo","L"}; +lookup(63833) -> {"Lo","L"}; +lookup(63834) -> {"Lo","L"}; +lookup(63835) -> {"Lo","L"}; +lookup(63836) -> {"Lo","L"}; +lookup(63837) -> {"Lo","L"}; +lookup(63838) -> {"Lo","L"}; +lookup(63839) -> {"Lo","L"}; +lookup(63840) -> {"Lo","L"}; +lookup(63841) -> {"Lo","L"}; +lookup(63842) -> {"Lo","L"}; +lookup(63843) -> {"Lo","L"}; +lookup(63844) -> {"Lo","L"}; +lookup(63845) -> {"Lo","L"}; +lookup(63846) -> {"Lo","L"}; +lookup(63847) -> {"Lo","L"}; +lookup(63848) -> {"Lo","L"}; +lookup(63849) -> {"Lo","L"}; +lookup(63850) -> {"Lo","L"}; +lookup(63851) -> {"Lo","L"}; +lookup(63852) -> {"Lo","L"}; +lookup(63853) -> {"Lo","L"}; +lookup(63854) -> {"Lo","L"}; +lookup(63855) -> {"Lo","L"}; +lookup(63856) -> {"Lo","L"}; +lookup(63857) -> {"Lo","L"}; +lookup(63858) -> {"Lo","L"}; +lookup(63859) -> {"Lo","L"}; +lookup(63860) -> {"Lo","L"}; +lookup(63861) -> {"Lo","L"}; +lookup(63862) -> {"Lo","L"}; +lookup(63863) -> {"Lo","L"}; +lookup(63864) -> {"Lo","L"}; +lookup(63865) -> {"Lo","L"}; +lookup(63866) -> {"Lo","L"}; +lookup(63867) -> {"Lo","L"}; +lookup(63868) -> {"Lo","L"}; +lookup(63869) -> {"Lo","L"}; +lookup(63870) -> {"Lo","L"}; +lookup(63871) -> {"Lo","L"}; +lookup(63872) -> {"Lo","L"}; +lookup(63873) -> {"Lo","L"}; +lookup(63874) -> {"Lo","L"}; +lookup(63875) -> {"Lo","L"}; +lookup(63876) -> {"Lo","L"}; +lookup(63877) -> {"Lo","L"}; +lookup(63878) -> {"Lo","L"}; +lookup(63879) -> {"Lo","L"}; +lookup(63880) -> {"Lo","L"}; +lookup(63881) -> {"Lo","L"}; +lookup(63882) -> {"Lo","L"}; +lookup(63883) -> {"Lo","L"}; +lookup(63884) -> {"Lo","L"}; +lookup(63885) -> {"Lo","L"}; +lookup(63886) -> {"Lo","L"}; +lookup(63887) -> {"Lo","L"}; +lookup(63888) -> {"Lo","L"}; +lookup(63889) -> {"Lo","L"}; +lookup(63890) -> {"Lo","L"}; +lookup(63891) -> {"Lo","L"}; +lookup(63892) -> {"Lo","L"}; +lookup(63893) -> {"Lo","L"}; +lookup(63894) -> {"Lo","L"}; +lookup(63895) -> {"Lo","L"}; +lookup(63896) -> {"Lo","L"}; +lookup(63897) -> {"Lo","L"}; +lookup(63898) -> {"Lo","L"}; +lookup(63899) -> {"Lo","L"}; +lookup(63900) -> {"Lo","L"}; +lookup(63901) -> {"Lo","L"}; +lookup(63902) -> {"Lo","L"}; +lookup(63903) -> {"Lo","L"}; +lookup(63904) -> {"Lo","L"}; +lookup(63905) -> {"Lo","L"}; +lookup(63906) -> {"Lo","L"}; +lookup(63907) -> {"Lo","L"}; +lookup(63908) -> {"Lo","L"}; +lookup(63909) -> {"Lo","L"}; +lookup(63910) -> {"Lo","L"}; +lookup(63911) -> {"Lo","L"}; +lookup(63912) -> {"Lo","L"}; +lookup(63913) -> {"Lo","L"}; +lookup(63914) -> {"Lo","L"}; +lookup(63915) -> {"Lo","L"}; +lookup(63916) -> {"Lo","L"}; +lookup(63917) -> {"Lo","L"}; +lookup(63918) -> {"Lo","L"}; +lookup(63919) -> {"Lo","L"}; +lookup(63920) -> {"Lo","L"}; +lookup(63921) -> {"Lo","L"}; +lookup(63922) -> {"Lo","L"}; +lookup(63923) -> {"Lo","L"}; +lookup(63924) -> {"Lo","L"}; +lookup(63925) -> {"Lo","L"}; +lookup(63926) -> {"Lo","L"}; +lookup(63927) -> {"Lo","L"}; +lookup(63928) -> {"Lo","L"}; +lookup(63929) -> {"Lo","L"}; +lookup(63930) -> {"Lo","L"}; +lookup(63931) -> {"Lo","L"}; +lookup(63932) -> {"Lo","L"}; +lookup(63933) -> {"Lo","L"}; +lookup(63934) -> {"Lo","L"}; +lookup(63935) -> {"Lo","L"}; +lookup(63936) -> {"Lo","L"}; +lookup(63937) -> {"Lo","L"}; +lookup(63938) -> {"Lo","L"}; +lookup(63939) -> {"Lo","L"}; +lookup(63940) -> {"Lo","L"}; +lookup(63941) -> {"Lo","L"}; +lookup(63942) -> {"Lo","L"}; +lookup(63943) -> {"Lo","L"}; +lookup(63944) -> {"Lo","L"}; +lookup(63945) -> {"Lo","L"}; +lookup(63946) -> {"Lo","L"}; +lookup(63947) -> {"Lo","L"}; +lookup(63948) -> {"Lo","L"}; +lookup(63949) -> {"Lo","L"}; +lookup(63950) -> {"Lo","L"}; +lookup(63951) -> {"Lo","L"}; +lookup(63952) -> {"Lo","L"}; +lookup(63953) -> {"Lo","L"}; +lookup(63954) -> {"Lo","L"}; +lookup(63955) -> {"Lo","L"}; +lookup(63956) -> {"Lo","L"}; +lookup(63957) -> {"Lo","L"}; +lookup(63958) -> {"Lo","L"}; +lookup(63959) -> {"Lo","L"}; +lookup(63960) -> {"Lo","L"}; +lookup(63961) -> {"Lo","L"}; +lookup(63962) -> {"Lo","L"}; +lookup(63963) -> {"Lo","L"}; +lookup(63964) -> {"Lo","L"}; +lookup(63965) -> {"Lo","L"}; +lookup(63966) -> {"Lo","L"}; +lookup(63967) -> {"Lo","L"}; +lookup(63968) -> {"Lo","L"}; +lookup(63969) -> {"Lo","L"}; +lookup(63970) -> {"Lo","L"}; +lookup(63971) -> {"Lo","L"}; +lookup(63972) -> {"Lo","L"}; +lookup(63973) -> {"Lo","L"}; +lookup(63974) -> {"Lo","L"}; +lookup(63975) -> {"Lo","L"}; +lookup(63976) -> {"Lo","L"}; +lookup(63977) -> {"Lo","L"}; +lookup(63978) -> {"Lo","L"}; +lookup(63979) -> {"Lo","L"}; +lookup(63980) -> {"Lo","L"}; +lookup(63981) -> {"Lo","L"}; +lookup(63982) -> {"Lo","L"}; +lookup(63983) -> {"Lo","L"}; +lookup(63984) -> {"Lo","L"}; +lookup(63985) -> {"Lo","L"}; +lookup(63986) -> {"Lo","L"}; +lookup(63987) -> {"Lo","L"}; +lookup(63988) -> {"Lo","L"}; +lookup(63989) -> {"Lo","L"}; +lookup(63990) -> {"Lo","L"}; +lookup(63991) -> {"Lo","L"}; +lookup(63992) -> {"Lo","L"}; +lookup(63993) -> {"Lo","L"}; +lookup(63994) -> {"Lo","L"}; +lookup(63995) -> {"Lo","L"}; +lookup(63996) -> {"Lo","L"}; +lookup(63997) -> {"Lo","L"}; +lookup(63998) -> {"Lo","L"}; +lookup(63999) -> {"Lo","L"}; +lookup(64000) -> {"Lo","L"}; +lookup(64001) -> {"Lo","L"}; +lookup(64002) -> {"Lo","L"}; +lookup(64003) -> {"Lo","L"}; +lookup(64004) -> {"Lo","L"}; +lookup(64005) -> {"Lo","L"}; +lookup(64006) -> {"Lo","L"}; +lookup(64007) -> {"Lo","L"}; +lookup(64008) -> {"Lo","L"}; +lookup(64009) -> {"Lo","L"}; +lookup(64010) -> {"Lo","L"}; +lookup(64011) -> {"Lo","L"}; +lookup(64012) -> {"Lo","L"}; +lookup(64013) -> {"Lo","L"}; +lookup(64014) -> {"Lo","L"}; +lookup(64015) -> {"Lo","L"}; +lookup(64016) -> {"Lo","L"}; +lookup(64017) -> {"Lo","L"}; +lookup(64018) -> {"Lo","L"}; +lookup(64019) -> {"Lo","L"}; +lookup(64020) -> {"Lo","L"}; +lookup(64021) -> {"Lo","L"}; +lookup(64022) -> {"Lo","L"}; +lookup(64023) -> {"Lo","L"}; +lookup(64024) -> {"Lo","L"}; +lookup(64025) -> {"Lo","L"}; +lookup(64026) -> {"Lo","L"}; +lookup(64027) -> {"Lo","L"}; +lookup(64028) -> {"Lo","L"}; +lookup(64029) -> {"Lo","L"}; +lookup(64030) -> {"Lo","L"}; +lookup(64031) -> {"Lo","L"}; +lookup(64032) -> {"Lo","L"}; +lookup(64033) -> {"Lo","L"}; +lookup(64034) -> {"Lo","L"}; +lookup(64035) -> {"Lo","L"}; +lookup(64036) -> {"Lo","L"}; +lookup(64037) -> {"Lo","L"}; +lookup(64038) -> {"Lo","L"}; +lookup(64039) -> {"Lo","L"}; +lookup(64040) -> {"Lo","L"}; +lookup(64041) -> {"Lo","L"}; +lookup(64042) -> {"Lo","L"}; +lookup(64043) -> {"Lo","L"}; +lookup(64044) -> {"Lo","L"}; +lookup(64045) -> {"Lo","L"}; +lookup(64046) -> {"Lo","L"}; +lookup(64047) -> {"Lo","L"}; +lookup(64048) -> {"Lo","L"}; +lookup(64049) -> {"Lo","L"}; +lookup(64050) -> {"Lo","L"}; +lookup(64051) -> {"Lo","L"}; +lookup(64052) -> {"Lo","L"}; +lookup(64053) -> {"Lo","L"}; +lookup(64054) -> {"Lo","L"}; +lookup(64055) -> {"Lo","L"}; +lookup(64056) -> {"Lo","L"}; +lookup(64057) -> {"Lo","L"}; +lookup(64058) -> {"Lo","L"}; +lookup(64059) -> {"Lo","L"}; +lookup(64060) -> {"Lo","L"}; +lookup(64061) -> {"Lo","L"}; +lookup(64062) -> {"Lo","L"}; +lookup(64063) -> {"Lo","L"}; +lookup(64064) -> {"Lo","L"}; +lookup(64065) -> {"Lo","L"}; +lookup(64066) -> {"Lo","L"}; +lookup(64067) -> {"Lo","L"}; +lookup(64068) -> {"Lo","L"}; +lookup(64069) -> {"Lo","L"}; +lookup(64070) -> {"Lo","L"}; +lookup(64071) -> {"Lo","L"}; +lookup(64072) -> {"Lo","L"}; +lookup(64073) -> {"Lo","L"}; +lookup(64074) -> {"Lo","L"}; +lookup(64075) -> {"Lo","L"}; +lookup(64076) -> {"Lo","L"}; +lookup(64077) -> {"Lo","L"}; +lookup(64078) -> {"Lo","L"}; +lookup(64079) -> {"Lo","L"}; +lookup(64080) -> {"Lo","L"}; +lookup(64081) -> {"Lo","L"}; +lookup(64082) -> {"Lo","L"}; +lookup(64083) -> {"Lo","L"}; +lookup(64084) -> {"Lo","L"}; +lookup(64085) -> {"Lo","L"}; +lookup(64086) -> {"Lo","L"}; +lookup(64087) -> {"Lo","L"}; +lookup(64088) -> {"Lo","L"}; +lookup(64089) -> {"Lo","L"}; +lookup(64090) -> {"Lo","L"}; +lookup(64091) -> {"Lo","L"}; +lookup(64092) -> {"Lo","L"}; +lookup(64093) -> {"Lo","L"}; +lookup(64094) -> {"Lo","L"}; +lookup(64095) -> {"Lo","L"}; +lookup(64096) -> {"Lo","L"}; +lookup(64097) -> {"Lo","L"}; +lookup(64098) -> {"Lo","L"}; +lookup(64099) -> {"Lo","L"}; +lookup(64100) -> {"Lo","L"}; +lookup(64101) -> {"Lo","L"}; +lookup(64102) -> {"Lo","L"}; +lookup(64103) -> {"Lo","L"}; +lookup(64104) -> {"Lo","L"}; +lookup(64105) -> {"Lo","L"}; +lookup(64106) -> {"Lo","L"}; +lookup(64107) -> {"Lo","L"}; +lookup(64108) -> {"Lo","L"}; +lookup(64109) -> {"Lo","L"}; +lookup(64112) -> {"Lo","L"}; +lookup(64113) -> {"Lo","L"}; +lookup(64114) -> {"Lo","L"}; +lookup(64115) -> {"Lo","L"}; +lookup(64116) -> {"Lo","L"}; +lookup(64117) -> {"Lo","L"}; +lookup(64118) -> {"Lo","L"}; +lookup(64119) -> {"Lo","L"}; +lookup(64120) -> {"Lo","L"}; +lookup(64121) -> {"Lo","L"}; +lookup(64122) -> {"Lo","L"}; +lookup(64123) -> {"Lo","L"}; +lookup(64124) -> {"Lo","L"}; +lookup(64125) -> {"Lo","L"}; +lookup(64126) -> {"Lo","L"}; +lookup(64127) -> {"Lo","L"}; +lookup(64128) -> {"Lo","L"}; +lookup(64129) -> {"Lo","L"}; +lookup(64130) -> {"Lo","L"}; +lookup(64131) -> {"Lo","L"}; +lookup(64132) -> {"Lo","L"}; +lookup(64133) -> {"Lo","L"}; +lookup(64134) -> {"Lo","L"}; +lookup(64135) -> {"Lo","L"}; +lookup(64136) -> {"Lo","L"}; +lookup(64137) -> {"Lo","L"}; +lookup(64138) -> {"Lo","L"}; +lookup(64139) -> {"Lo","L"}; +lookup(64140) -> {"Lo","L"}; +lookup(64141) -> {"Lo","L"}; +lookup(64142) -> {"Lo","L"}; +lookup(64143) -> {"Lo","L"}; +lookup(64144) -> {"Lo","L"}; +lookup(64145) -> {"Lo","L"}; +lookup(64146) -> {"Lo","L"}; +lookup(64147) -> {"Lo","L"}; +lookup(64148) -> {"Lo","L"}; +lookup(64149) -> {"Lo","L"}; +lookup(64150) -> {"Lo","L"}; +lookup(64151) -> {"Lo","L"}; +lookup(64152) -> {"Lo","L"}; +lookup(64153) -> {"Lo","L"}; +lookup(64154) -> {"Lo","L"}; +lookup(64155) -> {"Lo","L"}; +lookup(64156) -> {"Lo","L"}; +lookup(64157) -> {"Lo","L"}; +lookup(64158) -> {"Lo","L"}; +lookup(64159) -> {"Lo","L"}; +lookup(64160) -> {"Lo","L"}; +lookup(64161) -> {"Lo","L"}; +lookup(64162) -> {"Lo","L"}; +lookup(64163) -> {"Lo","L"}; +lookup(64164) -> {"Lo","L"}; +lookup(64165) -> {"Lo","L"}; +lookup(64166) -> {"Lo","L"}; +lookup(64167) -> {"Lo","L"}; +lookup(64168) -> {"Lo","L"}; +lookup(64169) -> {"Lo","L"}; +lookup(64170) -> {"Lo","L"}; +lookup(64171) -> {"Lo","L"}; +lookup(64172) -> {"Lo","L"}; +lookup(64173) -> {"Lo","L"}; +lookup(64174) -> {"Lo","L"}; +lookup(64175) -> {"Lo","L"}; +lookup(64176) -> {"Lo","L"}; +lookup(64177) -> {"Lo","L"}; +lookup(64178) -> {"Lo","L"}; +lookup(64179) -> {"Lo","L"}; +lookup(64180) -> {"Lo","L"}; +lookup(64181) -> {"Lo","L"}; +lookup(64182) -> {"Lo","L"}; +lookup(64183) -> {"Lo","L"}; +lookup(64184) -> {"Lo","L"}; +lookup(64185) -> {"Lo","L"}; +lookup(64186) -> {"Lo","L"}; +lookup(64187) -> {"Lo","L"}; +lookup(64188) -> {"Lo","L"}; +lookup(64189) -> {"Lo","L"}; +lookup(64190) -> {"Lo","L"}; +lookup(64191) -> {"Lo","L"}; +lookup(64192) -> {"Lo","L"}; +lookup(64193) -> {"Lo","L"}; +lookup(64194) -> {"Lo","L"}; +lookup(64195) -> {"Lo","L"}; +lookup(64196) -> {"Lo","L"}; +lookup(64197) -> {"Lo","L"}; +lookup(64198) -> {"Lo","L"}; +lookup(64199) -> {"Lo","L"}; +lookup(64200) -> {"Lo","L"}; +lookup(64201) -> {"Lo","L"}; +lookup(64202) -> {"Lo","L"}; +lookup(64203) -> {"Lo","L"}; +lookup(64204) -> {"Lo","L"}; +lookup(64205) -> {"Lo","L"}; +lookup(64206) -> {"Lo","L"}; +lookup(64207) -> {"Lo","L"}; +lookup(64208) -> {"Lo","L"}; +lookup(64209) -> {"Lo","L"}; +lookup(64210) -> {"Lo","L"}; +lookup(64211) -> {"Lo","L"}; +lookup(64212) -> {"Lo","L"}; +lookup(64213) -> {"Lo","L"}; +lookup(64214) -> {"Lo","L"}; +lookup(64215) -> {"Lo","L"}; +lookup(64216) -> {"Lo","L"}; +lookup(64217) -> {"Lo","L"}; +lookup(64256) -> {"Ll","L"}; +lookup(64257) -> {"Ll","L"}; +lookup(64258) -> {"Ll","L"}; +lookup(64259) -> {"Ll","L"}; +lookup(64260) -> {"Ll","L"}; +lookup(64261) -> {"Ll","L"}; +lookup(64262) -> {"Ll","L"}; +lookup(64275) -> {"Ll","L"}; +lookup(64276) -> {"Ll","L"}; +lookup(64277) -> {"Ll","L"}; +lookup(64278) -> {"Ll","L"}; +lookup(64279) -> {"Ll","L"}; +lookup(64285) -> {"Lo","R"}; +lookup(64286) -> {"Mn","NSM"}; +lookup(64287) -> {"Lo","R"}; +lookup(64288) -> {"Lo","R"}; +lookup(64289) -> {"Lo","R"}; +lookup(64290) -> {"Lo","R"}; +lookup(64291) -> {"Lo","R"}; +lookup(64292) -> {"Lo","R"}; +lookup(64293) -> {"Lo","R"}; +lookup(64294) -> {"Lo","R"}; +lookup(64295) -> {"Lo","R"}; +lookup(64296) -> {"Lo","R"}; +lookup(64297) -> {"Sm","ES"}; +lookup(64298) -> {"Lo","R"}; +lookup(64299) -> {"Lo","R"}; +lookup(64300) -> {"Lo","R"}; +lookup(64301) -> {"Lo","R"}; +lookup(64302) -> {"Lo","R"}; +lookup(64303) -> {"Lo","R"}; +lookup(64304) -> {"Lo","R"}; +lookup(64305) -> {"Lo","R"}; +lookup(64306) -> {"Lo","R"}; +lookup(64307) -> {"Lo","R"}; +lookup(64308) -> {"Lo","R"}; +lookup(64309) -> {"Lo","R"}; +lookup(64310) -> {"Lo","R"}; +lookup(64312) -> {"Lo","R"}; +lookup(64313) -> {"Lo","R"}; +lookup(64314) -> {"Lo","R"}; +lookup(64315) -> {"Lo","R"}; +lookup(64316) -> {"Lo","R"}; +lookup(64318) -> {"Lo","R"}; +lookup(64320) -> {"Lo","R"}; +lookup(64321) -> {"Lo","R"}; +lookup(64323) -> {"Lo","R"}; +lookup(64324) -> {"Lo","R"}; +lookup(64326) -> {"Lo","R"}; +lookup(64327) -> {"Lo","R"}; +lookup(64328) -> {"Lo","R"}; +lookup(64329) -> {"Lo","R"}; +lookup(64330) -> {"Lo","R"}; +lookup(64331) -> {"Lo","R"}; +lookup(64332) -> {"Lo","R"}; +lookup(64333) -> {"Lo","R"}; +lookup(64334) -> {"Lo","R"}; +lookup(64335) -> {"Lo","R"}; +lookup(64336) -> {"Lo","AL"}; +lookup(64337) -> {"Lo","AL"}; +lookup(64338) -> {"Lo","AL"}; +lookup(64339) -> {"Lo","AL"}; +lookup(64340) -> {"Lo","AL"}; +lookup(64341) -> {"Lo","AL"}; +lookup(64342) -> {"Lo","AL"}; +lookup(64343) -> {"Lo","AL"}; +lookup(64344) -> {"Lo","AL"}; +lookup(64345) -> {"Lo","AL"}; +lookup(64346) -> {"Lo","AL"}; +lookup(64347) -> {"Lo","AL"}; +lookup(64348) -> {"Lo","AL"}; +lookup(64349) -> {"Lo","AL"}; +lookup(64350) -> {"Lo","AL"}; +lookup(64351) -> {"Lo","AL"}; +lookup(64352) -> {"Lo","AL"}; +lookup(64353) -> {"Lo","AL"}; +lookup(64354) -> {"Lo","AL"}; +lookup(64355) -> {"Lo","AL"}; +lookup(64356) -> {"Lo","AL"}; +lookup(64357) -> {"Lo","AL"}; +lookup(64358) -> {"Lo","AL"}; +lookup(64359) -> {"Lo","AL"}; +lookup(64360) -> {"Lo","AL"}; +lookup(64361) -> {"Lo","AL"}; +lookup(64362) -> {"Lo","AL"}; +lookup(64363) -> {"Lo","AL"}; +lookup(64364) -> {"Lo","AL"}; +lookup(64365) -> {"Lo","AL"}; +lookup(64366) -> {"Lo","AL"}; +lookup(64367) -> {"Lo","AL"}; +lookup(64368) -> {"Lo","AL"}; +lookup(64369) -> {"Lo","AL"}; +lookup(64370) -> {"Lo","AL"}; +lookup(64371) -> {"Lo","AL"}; +lookup(64372) -> {"Lo","AL"}; +lookup(64373) -> {"Lo","AL"}; +lookup(64374) -> {"Lo","AL"}; +lookup(64375) -> {"Lo","AL"}; +lookup(64376) -> {"Lo","AL"}; +lookup(64377) -> {"Lo","AL"}; +lookup(64378) -> {"Lo","AL"}; +lookup(64379) -> {"Lo","AL"}; +lookup(64380) -> {"Lo","AL"}; +lookup(64381) -> {"Lo","AL"}; +lookup(64382) -> {"Lo","AL"}; +lookup(64383) -> {"Lo","AL"}; +lookup(64384) -> {"Lo","AL"}; +lookup(64385) -> {"Lo","AL"}; +lookup(64386) -> {"Lo","AL"}; +lookup(64387) -> {"Lo","AL"}; +lookup(64388) -> {"Lo","AL"}; +lookup(64389) -> {"Lo","AL"}; +lookup(64390) -> {"Lo","AL"}; +lookup(64391) -> {"Lo","AL"}; +lookup(64392) -> {"Lo","AL"}; +lookup(64393) -> {"Lo","AL"}; +lookup(64394) -> {"Lo","AL"}; +lookup(64395) -> {"Lo","AL"}; +lookup(64396) -> {"Lo","AL"}; +lookup(64397) -> {"Lo","AL"}; +lookup(64398) -> {"Lo","AL"}; +lookup(64399) -> {"Lo","AL"}; +lookup(64400) -> {"Lo","AL"}; +lookup(64401) -> {"Lo","AL"}; +lookup(64402) -> {"Lo","AL"}; +lookup(64403) -> {"Lo","AL"}; +lookup(64404) -> {"Lo","AL"}; +lookup(64405) -> {"Lo","AL"}; +lookup(64406) -> {"Lo","AL"}; +lookup(64407) -> {"Lo","AL"}; +lookup(64408) -> {"Lo","AL"}; +lookup(64409) -> {"Lo","AL"}; +lookup(64410) -> {"Lo","AL"}; +lookup(64411) -> {"Lo","AL"}; +lookup(64412) -> {"Lo","AL"}; +lookup(64413) -> {"Lo","AL"}; +lookup(64414) -> {"Lo","AL"}; +lookup(64415) -> {"Lo","AL"}; +lookup(64416) -> {"Lo","AL"}; +lookup(64417) -> {"Lo","AL"}; +lookup(64418) -> {"Lo","AL"}; +lookup(64419) -> {"Lo","AL"}; +lookup(64420) -> {"Lo","AL"}; +lookup(64421) -> {"Lo","AL"}; +lookup(64422) -> {"Lo","AL"}; +lookup(64423) -> {"Lo","AL"}; +lookup(64424) -> {"Lo","AL"}; +lookup(64425) -> {"Lo","AL"}; +lookup(64426) -> {"Lo","AL"}; +lookup(64427) -> {"Lo","AL"}; +lookup(64428) -> {"Lo","AL"}; +lookup(64429) -> {"Lo","AL"}; +lookup(64430) -> {"Lo","AL"}; +lookup(64431) -> {"Lo","AL"}; +lookup(64432) -> {"Lo","AL"}; +lookup(64433) -> {"Lo","AL"}; +lookup(64434) -> {"Sk","AL"}; +lookup(64435) -> {"Sk","AL"}; +lookup(64436) -> {"Sk","AL"}; +lookup(64437) -> {"Sk","AL"}; +lookup(64438) -> {"Sk","AL"}; +lookup(64439) -> {"Sk","AL"}; +lookup(64440) -> {"Sk","AL"}; +lookup(64441) -> {"Sk","AL"}; +lookup(64442) -> {"Sk","AL"}; +lookup(64443) -> {"Sk","AL"}; +lookup(64444) -> {"Sk","AL"}; +lookup(64445) -> {"Sk","AL"}; +lookup(64446) -> {"Sk","AL"}; +lookup(64447) -> {"Sk","AL"}; +lookup(64448) -> {"Sk","AL"}; +lookup(64449) -> {"Sk","AL"}; +lookup(64467) -> {"Lo","AL"}; +lookup(64468) -> {"Lo","AL"}; +lookup(64469) -> {"Lo","AL"}; +lookup(64470) -> {"Lo","AL"}; +lookup(64471) -> {"Lo","AL"}; +lookup(64472) -> {"Lo","AL"}; +lookup(64473) -> {"Lo","AL"}; +lookup(64474) -> {"Lo","AL"}; +lookup(64475) -> {"Lo","AL"}; +lookup(64476) -> {"Lo","AL"}; +lookup(64477) -> {"Lo","AL"}; +lookup(64478) -> {"Lo","AL"}; +lookup(64479) -> {"Lo","AL"}; +lookup(64480) -> {"Lo","AL"}; +lookup(64481) -> {"Lo","AL"}; +lookup(64482) -> {"Lo","AL"}; +lookup(64483) -> {"Lo","AL"}; +lookup(64484) -> {"Lo","AL"}; +lookup(64485) -> {"Lo","AL"}; +lookup(64486) -> {"Lo","AL"}; +lookup(64487) -> {"Lo","AL"}; +lookup(64488) -> {"Lo","AL"}; +lookup(64489) -> {"Lo","AL"}; +lookup(64490) -> {"Lo","AL"}; +lookup(64491) -> {"Lo","AL"}; +lookup(64492) -> {"Lo","AL"}; +lookup(64493) -> {"Lo","AL"}; +lookup(64494) -> {"Lo","AL"}; +lookup(64495) -> {"Lo","AL"}; +lookup(64496) -> {"Lo","AL"}; +lookup(64497) -> {"Lo","AL"}; +lookup(64498) -> {"Lo","AL"}; +lookup(64499) -> {"Lo","AL"}; +lookup(64500) -> {"Lo","AL"}; +lookup(64501) -> {"Lo","AL"}; +lookup(64502) -> {"Lo","AL"}; +lookup(64503) -> {"Lo","AL"}; +lookup(64504) -> {"Lo","AL"}; +lookup(64505) -> {"Lo","AL"}; +lookup(64506) -> {"Lo","AL"}; +lookup(64507) -> {"Lo","AL"}; +lookup(64508) -> {"Lo","AL"}; +lookup(64509) -> {"Lo","AL"}; +lookup(64510) -> {"Lo","AL"}; +lookup(64511) -> {"Lo","AL"}; +lookup(64512) -> {"Lo","AL"}; +lookup(64513) -> {"Lo","AL"}; +lookup(64514) -> {"Lo","AL"}; +lookup(64515) -> {"Lo","AL"}; +lookup(64516) -> {"Lo","AL"}; +lookup(64517) -> {"Lo","AL"}; +lookup(64518) -> {"Lo","AL"}; +lookup(64519) -> {"Lo","AL"}; +lookup(64520) -> {"Lo","AL"}; +lookup(64521) -> {"Lo","AL"}; +lookup(64522) -> {"Lo","AL"}; +lookup(64523) -> {"Lo","AL"}; +lookup(64524) -> {"Lo","AL"}; +lookup(64525) -> {"Lo","AL"}; +lookup(64526) -> {"Lo","AL"}; +lookup(64527) -> {"Lo","AL"}; +lookup(64528) -> {"Lo","AL"}; +lookup(64529) -> {"Lo","AL"}; +lookup(64530) -> {"Lo","AL"}; +lookup(64531) -> {"Lo","AL"}; +lookup(64532) -> {"Lo","AL"}; +lookup(64533) -> {"Lo","AL"}; +lookup(64534) -> {"Lo","AL"}; +lookup(64535) -> {"Lo","AL"}; +lookup(64536) -> {"Lo","AL"}; +lookup(64537) -> {"Lo","AL"}; +lookup(64538) -> {"Lo","AL"}; +lookup(64539) -> {"Lo","AL"}; +lookup(64540) -> {"Lo","AL"}; +lookup(64541) -> {"Lo","AL"}; +lookup(64542) -> {"Lo","AL"}; +lookup(64543) -> {"Lo","AL"}; +lookup(64544) -> {"Lo","AL"}; +lookup(64545) -> {"Lo","AL"}; +lookup(64546) -> {"Lo","AL"}; +lookup(64547) -> {"Lo","AL"}; +lookup(64548) -> {"Lo","AL"}; +lookup(64549) -> {"Lo","AL"}; +lookup(64550) -> {"Lo","AL"}; +lookup(64551) -> {"Lo","AL"}; +lookup(64552) -> {"Lo","AL"}; +lookup(64553) -> {"Lo","AL"}; +lookup(64554) -> {"Lo","AL"}; +lookup(64555) -> {"Lo","AL"}; +lookup(64556) -> {"Lo","AL"}; +lookup(64557) -> {"Lo","AL"}; +lookup(64558) -> {"Lo","AL"}; +lookup(64559) -> {"Lo","AL"}; +lookup(64560) -> {"Lo","AL"}; +lookup(64561) -> {"Lo","AL"}; +lookup(64562) -> {"Lo","AL"}; +lookup(64563) -> {"Lo","AL"}; +lookup(64564) -> {"Lo","AL"}; +lookup(64565) -> {"Lo","AL"}; +lookup(64566) -> {"Lo","AL"}; +lookup(64567) -> {"Lo","AL"}; +lookup(64568) -> {"Lo","AL"}; +lookup(64569) -> {"Lo","AL"}; +lookup(64570) -> {"Lo","AL"}; +lookup(64571) -> {"Lo","AL"}; +lookup(64572) -> {"Lo","AL"}; +lookup(64573) -> {"Lo","AL"}; +lookup(64574) -> {"Lo","AL"}; +lookup(64575) -> {"Lo","AL"}; +lookup(64576) -> {"Lo","AL"}; +lookup(64577) -> {"Lo","AL"}; +lookup(64578) -> {"Lo","AL"}; +lookup(64579) -> {"Lo","AL"}; +lookup(64580) -> {"Lo","AL"}; +lookup(64581) -> {"Lo","AL"}; +lookup(64582) -> {"Lo","AL"}; +lookup(64583) -> {"Lo","AL"}; +lookup(64584) -> {"Lo","AL"}; +lookup(64585) -> {"Lo","AL"}; +lookup(64586) -> {"Lo","AL"}; +lookup(64587) -> {"Lo","AL"}; +lookup(64588) -> {"Lo","AL"}; +lookup(64589) -> {"Lo","AL"}; +lookup(64590) -> {"Lo","AL"}; +lookup(64591) -> {"Lo","AL"}; +lookup(64592) -> {"Lo","AL"}; +lookup(64593) -> {"Lo","AL"}; +lookup(64594) -> {"Lo","AL"}; +lookup(64595) -> {"Lo","AL"}; +lookup(64596) -> {"Lo","AL"}; +lookup(64597) -> {"Lo","AL"}; +lookup(64598) -> {"Lo","AL"}; +lookup(64599) -> {"Lo","AL"}; +lookup(64600) -> {"Lo","AL"}; +lookup(64601) -> {"Lo","AL"}; +lookup(64602) -> {"Lo","AL"}; +lookup(64603) -> {"Lo","AL"}; +lookup(64604) -> {"Lo","AL"}; +lookup(64605) -> {"Lo","AL"}; +lookup(64606) -> {"Lo","AL"}; +lookup(64607) -> {"Lo","AL"}; +lookup(64608) -> {"Lo","AL"}; +lookup(64609) -> {"Lo","AL"}; +lookup(64610) -> {"Lo","AL"}; +lookup(64611) -> {"Lo","AL"}; +lookup(64612) -> {"Lo","AL"}; +lookup(64613) -> {"Lo","AL"}; +lookup(64614) -> {"Lo","AL"}; +lookup(64615) -> {"Lo","AL"}; +lookup(64616) -> {"Lo","AL"}; +lookup(64617) -> {"Lo","AL"}; +lookup(64618) -> {"Lo","AL"}; +lookup(64619) -> {"Lo","AL"}; +lookup(64620) -> {"Lo","AL"}; +lookup(64621) -> {"Lo","AL"}; +lookup(64622) -> {"Lo","AL"}; +lookup(64623) -> {"Lo","AL"}; +lookup(64624) -> {"Lo","AL"}; +lookup(64625) -> {"Lo","AL"}; +lookup(64626) -> {"Lo","AL"}; +lookup(64627) -> {"Lo","AL"}; +lookup(64628) -> {"Lo","AL"}; +lookup(64629) -> {"Lo","AL"}; +lookup(64630) -> {"Lo","AL"}; +lookup(64631) -> {"Lo","AL"}; +lookup(64632) -> {"Lo","AL"}; +lookup(64633) -> {"Lo","AL"}; +lookup(64634) -> {"Lo","AL"}; +lookup(64635) -> {"Lo","AL"}; +lookup(64636) -> {"Lo","AL"}; +lookup(64637) -> {"Lo","AL"}; +lookup(64638) -> {"Lo","AL"}; +lookup(64639) -> {"Lo","AL"}; +lookup(64640) -> {"Lo","AL"}; +lookup(64641) -> {"Lo","AL"}; +lookup(64642) -> {"Lo","AL"}; +lookup(64643) -> {"Lo","AL"}; +lookup(64644) -> {"Lo","AL"}; +lookup(64645) -> {"Lo","AL"}; +lookup(64646) -> {"Lo","AL"}; +lookup(64647) -> {"Lo","AL"}; +lookup(64648) -> {"Lo","AL"}; +lookup(64649) -> {"Lo","AL"}; +lookup(64650) -> {"Lo","AL"}; +lookup(64651) -> {"Lo","AL"}; +lookup(64652) -> {"Lo","AL"}; +lookup(64653) -> {"Lo","AL"}; +lookup(64654) -> {"Lo","AL"}; +lookup(64655) -> {"Lo","AL"}; +lookup(64656) -> {"Lo","AL"}; +lookup(64657) -> {"Lo","AL"}; +lookup(64658) -> {"Lo","AL"}; +lookup(64659) -> {"Lo","AL"}; +lookup(64660) -> {"Lo","AL"}; +lookup(64661) -> {"Lo","AL"}; +lookup(64662) -> {"Lo","AL"}; +lookup(64663) -> {"Lo","AL"}; +lookup(64664) -> {"Lo","AL"}; +lookup(64665) -> {"Lo","AL"}; +lookup(64666) -> {"Lo","AL"}; +lookup(64667) -> {"Lo","AL"}; +lookup(64668) -> {"Lo","AL"}; +lookup(64669) -> {"Lo","AL"}; +lookup(64670) -> {"Lo","AL"}; +lookup(64671) -> {"Lo","AL"}; +lookup(64672) -> {"Lo","AL"}; +lookup(64673) -> {"Lo","AL"}; +lookup(64674) -> {"Lo","AL"}; +lookup(64675) -> {"Lo","AL"}; +lookup(64676) -> {"Lo","AL"}; +lookup(64677) -> {"Lo","AL"}; +lookup(64678) -> {"Lo","AL"}; +lookup(64679) -> {"Lo","AL"}; +lookup(64680) -> {"Lo","AL"}; +lookup(64681) -> {"Lo","AL"}; +lookup(64682) -> {"Lo","AL"}; +lookup(64683) -> {"Lo","AL"}; +lookup(64684) -> {"Lo","AL"}; +lookup(64685) -> {"Lo","AL"}; +lookup(64686) -> {"Lo","AL"}; +lookup(64687) -> {"Lo","AL"}; +lookup(64688) -> {"Lo","AL"}; +lookup(64689) -> {"Lo","AL"}; +lookup(64690) -> {"Lo","AL"}; +lookup(64691) -> {"Lo","AL"}; +lookup(64692) -> {"Lo","AL"}; +lookup(64693) -> {"Lo","AL"}; +lookup(64694) -> {"Lo","AL"}; +lookup(64695) -> {"Lo","AL"}; +lookup(64696) -> {"Lo","AL"}; +lookup(64697) -> {"Lo","AL"}; +lookup(64698) -> {"Lo","AL"}; +lookup(64699) -> {"Lo","AL"}; +lookup(64700) -> {"Lo","AL"}; +lookup(64701) -> {"Lo","AL"}; +lookup(64702) -> {"Lo","AL"}; +lookup(64703) -> {"Lo","AL"}; +lookup(64704) -> {"Lo","AL"}; +lookup(64705) -> {"Lo","AL"}; +lookup(64706) -> {"Lo","AL"}; +lookup(64707) -> {"Lo","AL"}; +lookup(64708) -> {"Lo","AL"}; +lookup(64709) -> {"Lo","AL"}; +lookup(64710) -> {"Lo","AL"}; +lookup(64711) -> {"Lo","AL"}; +lookup(64712) -> {"Lo","AL"}; +lookup(64713) -> {"Lo","AL"}; +lookup(64714) -> {"Lo","AL"}; +lookup(64715) -> {"Lo","AL"}; +lookup(64716) -> {"Lo","AL"}; +lookup(64717) -> {"Lo","AL"}; +lookup(64718) -> {"Lo","AL"}; +lookup(64719) -> {"Lo","AL"}; +lookup(64720) -> {"Lo","AL"}; +lookup(64721) -> {"Lo","AL"}; +lookup(64722) -> {"Lo","AL"}; +lookup(64723) -> {"Lo","AL"}; +lookup(64724) -> {"Lo","AL"}; +lookup(64725) -> {"Lo","AL"}; +lookup(64726) -> {"Lo","AL"}; +lookup(64727) -> {"Lo","AL"}; +lookup(64728) -> {"Lo","AL"}; +lookup(64729) -> {"Lo","AL"}; +lookup(64730) -> {"Lo","AL"}; +lookup(64731) -> {"Lo","AL"}; +lookup(64732) -> {"Lo","AL"}; +lookup(64733) -> {"Lo","AL"}; +lookup(64734) -> {"Lo","AL"}; +lookup(64735) -> {"Lo","AL"}; +lookup(64736) -> {"Lo","AL"}; +lookup(64737) -> {"Lo","AL"}; +lookup(64738) -> {"Lo","AL"}; +lookup(64739) -> {"Lo","AL"}; +lookup(64740) -> {"Lo","AL"}; +lookup(64741) -> {"Lo","AL"}; +lookup(64742) -> {"Lo","AL"}; +lookup(64743) -> {"Lo","AL"}; +lookup(64744) -> {"Lo","AL"}; +lookup(64745) -> {"Lo","AL"}; +lookup(64746) -> {"Lo","AL"}; +lookup(64747) -> {"Lo","AL"}; +lookup(64748) -> {"Lo","AL"}; +lookup(64749) -> {"Lo","AL"}; +lookup(64750) -> {"Lo","AL"}; +lookup(64751) -> {"Lo","AL"}; +lookup(64752) -> {"Lo","AL"}; +lookup(64753) -> {"Lo","AL"}; +lookup(64754) -> {"Lo","AL"}; +lookup(64755) -> {"Lo","AL"}; +lookup(64756) -> {"Lo","AL"}; +lookup(64757) -> {"Lo","AL"}; +lookup(64758) -> {"Lo","AL"}; +lookup(64759) -> {"Lo","AL"}; +lookup(64760) -> {"Lo","AL"}; +lookup(64761) -> {"Lo","AL"}; +lookup(64762) -> {"Lo","AL"}; +lookup(64763) -> {"Lo","AL"}; +lookup(64764) -> {"Lo","AL"}; +lookup(64765) -> {"Lo","AL"}; +lookup(64766) -> {"Lo","AL"}; +lookup(64767) -> {"Lo","AL"}; +lookup(64768) -> {"Lo","AL"}; +lookup(64769) -> {"Lo","AL"}; +lookup(64770) -> {"Lo","AL"}; +lookup(64771) -> {"Lo","AL"}; +lookup(64772) -> {"Lo","AL"}; +lookup(64773) -> {"Lo","AL"}; +lookup(64774) -> {"Lo","AL"}; +lookup(64775) -> {"Lo","AL"}; +lookup(64776) -> {"Lo","AL"}; +lookup(64777) -> {"Lo","AL"}; +lookup(64778) -> {"Lo","AL"}; +lookup(64779) -> {"Lo","AL"}; +lookup(64780) -> {"Lo","AL"}; +lookup(64781) -> {"Lo","AL"}; +lookup(64782) -> {"Lo","AL"}; +lookup(64783) -> {"Lo","AL"}; +lookup(64784) -> {"Lo","AL"}; +lookup(64785) -> {"Lo","AL"}; +lookup(64786) -> {"Lo","AL"}; +lookup(64787) -> {"Lo","AL"}; +lookup(64788) -> {"Lo","AL"}; +lookup(64789) -> {"Lo","AL"}; +lookup(64790) -> {"Lo","AL"}; +lookup(64791) -> {"Lo","AL"}; +lookup(64792) -> {"Lo","AL"}; +lookup(64793) -> {"Lo","AL"}; +lookup(64794) -> {"Lo","AL"}; +lookup(64795) -> {"Lo","AL"}; +lookup(64796) -> {"Lo","AL"}; +lookup(64797) -> {"Lo","AL"}; +lookup(64798) -> {"Lo","AL"}; +lookup(64799) -> {"Lo","AL"}; +lookup(64800) -> {"Lo","AL"}; +lookup(64801) -> {"Lo","AL"}; +lookup(64802) -> {"Lo","AL"}; +lookup(64803) -> {"Lo","AL"}; +lookup(64804) -> {"Lo","AL"}; +lookup(64805) -> {"Lo","AL"}; +lookup(64806) -> {"Lo","AL"}; +lookup(64807) -> {"Lo","AL"}; +lookup(64808) -> {"Lo","AL"}; +lookup(64809) -> {"Lo","AL"}; +lookup(64810) -> {"Lo","AL"}; +lookup(64811) -> {"Lo","AL"}; +lookup(64812) -> {"Lo","AL"}; +lookup(64813) -> {"Lo","AL"}; +lookup(64814) -> {"Lo","AL"}; +lookup(64815) -> {"Lo","AL"}; +lookup(64816) -> {"Lo","AL"}; +lookup(64817) -> {"Lo","AL"}; +lookup(64818) -> {"Lo","AL"}; +lookup(64819) -> {"Lo","AL"}; +lookup(64820) -> {"Lo","AL"}; +lookup(64821) -> {"Lo","AL"}; +lookup(64822) -> {"Lo","AL"}; +lookup(64823) -> {"Lo","AL"}; +lookup(64824) -> {"Lo","AL"}; +lookup(64825) -> {"Lo","AL"}; +lookup(64826) -> {"Lo","AL"}; +lookup(64827) -> {"Lo","AL"}; +lookup(64828) -> {"Lo","AL"}; +lookup(64829) -> {"Lo","AL"}; +lookup(64830) -> {"Pe","ON"}; +lookup(64831) -> {"Ps","ON"}; +lookup(64848) -> {"Lo","AL"}; +lookup(64849) -> {"Lo","AL"}; +lookup(64850) -> {"Lo","AL"}; +lookup(64851) -> {"Lo","AL"}; +lookup(64852) -> {"Lo","AL"}; +lookup(64853) -> {"Lo","AL"}; +lookup(64854) -> {"Lo","AL"}; +lookup(64855) -> {"Lo","AL"}; +lookup(64856) -> {"Lo","AL"}; +lookup(64857) -> {"Lo","AL"}; +lookup(64858) -> {"Lo","AL"}; +lookup(64859) -> {"Lo","AL"}; +lookup(64860) -> {"Lo","AL"}; +lookup(64861) -> {"Lo","AL"}; +lookup(64862) -> {"Lo","AL"}; +lookup(64863) -> {"Lo","AL"}; +lookup(64864) -> {"Lo","AL"}; +lookup(64865) -> {"Lo","AL"}; +lookup(64866) -> {"Lo","AL"}; +lookup(64867) -> {"Lo","AL"}; +lookup(64868) -> {"Lo","AL"}; +lookup(64869) -> {"Lo","AL"}; +lookup(64870) -> {"Lo","AL"}; +lookup(64871) -> {"Lo","AL"}; +lookup(64872) -> {"Lo","AL"}; +lookup(64873) -> {"Lo","AL"}; +lookup(64874) -> {"Lo","AL"}; +lookup(64875) -> {"Lo","AL"}; +lookup(64876) -> {"Lo","AL"}; +lookup(64877) -> {"Lo","AL"}; +lookup(64878) -> {"Lo","AL"}; +lookup(64879) -> {"Lo","AL"}; +lookup(64880) -> {"Lo","AL"}; +lookup(64881) -> {"Lo","AL"}; +lookup(64882) -> {"Lo","AL"}; +lookup(64883) -> {"Lo","AL"}; +lookup(64884) -> {"Lo","AL"}; +lookup(64885) -> {"Lo","AL"}; +lookup(64886) -> {"Lo","AL"}; +lookup(64887) -> {"Lo","AL"}; +lookup(64888) -> {"Lo","AL"}; +lookup(64889) -> {"Lo","AL"}; +lookup(64890) -> {"Lo","AL"}; +lookup(64891) -> {"Lo","AL"}; +lookup(64892) -> {"Lo","AL"}; +lookup(64893) -> {"Lo","AL"}; +lookup(64894) -> {"Lo","AL"}; +lookup(64895) -> {"Lo","AL"}; +lookup(64896) -> {"Lo","AL"}; +lookup(64897) -> {"Lo","AL"}; +lookup(64898) -> {"Lo","AL"}; +lookup(64899) -> {"Lo","AL"}; +lookup(64900) -> {"Lo","AL"}; +lookup(64901) -> {"Lo","AL"}; +lookup(64902) -> {"Lo","AL"}; +lookup(64903) -> {"Lo","AL"}; +lookup(64904) -> {"Lo","AL"}; +lookup(64905) -> {"Lo","AL"}; +lookup(64906) -> {"Lo","AL"}; +lookup(64907) -> {"Lo","AL"}; +lookup(64908) -> {"Lo","AL"}; +lookup(64909) -> {"Lo","AL"}; +lookup(64910) -> {"Lo","AL"}; +lookup(64911) -> {"Lo","AL"}; +lookup(64914) -> {"Lo","AL"}; +lookup(64915) -> {"Lo","AL"}; +lookup(64916) -> {"Lo","AL"}; +lookup(64917) -> {"Lo","AL"}; +lookup(64918) -> {"Lo","AL"}; +lookup(64919) -> {"Lo","AL"}; +lookup(64920) -> {"Lo","AL"}; +lookup(64921) -> {"Lo","AL"}; +lookup(64922) -> {"Lo","AL"}; +lookup(64923) -> {"Lo","AL"}; +lookup(64924) -> {"Lo","AL"}; +lookup(64925) -> {"Lo","AL"}; +lookup(64926) -> {"Lo","AL"}; +lookup(64927) -> {"Lo","AL"}; +lookup(64928) -> {"Lo","AL"}; +lookup(64929) -> {"Lo","AL"}; +lookup(64930) -> {"Lo","AL"}; +lookup(64931) -> {"Lo","AL"}; +lookup(64932) -> {"Lo","AL"}; +lookup(64933) -> {"Lo","AL"}; +lookup(64934) -> {"Lo","AL"}; +lookup(64935) -> {"Lo","AL"}; +lookup(64936) -> {"Lo","AL"}; +lookup(64937) -> {"Lo","AL"}; +lookup(64938) -> {"Lo","AL"}; +lookup(64939) -> {"Lo","AL"}; +lookup(64940) -> {"Lo","AL"}; +lookup(64941) -> {"Lo","AL"}; +lookup(64942) -> {"Lo","AL"}; +lookup(64943) -> {"Lo","AL"}; +lookup(64944) -> {"Lo","AL"}; +lookup(64945) -> {"Lo","AL"}; +lookup(64946) -> {"Lo","AL"}; +lookup(64947) -> {"Lo","AL"}; +lookup(64948) -> {"Lo","AL"}; +lookup(64949) -> {"Lo","AL"}; +lookup(64950) -> {"Lo","AL"}; +lookup(64951) -> {"Lo","AL"}; +lookup(64952) -> {"Lo","AL"}; +lookup(64953) -> {"Lo","AL"}; +lookup(64954) -> {"Lo","AL"}; +lookup(64955) -> {"Lo","AL"}; +lookup(64956) -> {"Lo","AL"}; +lookup(64957) -> {"Lo","AL"}; +lookup(64958) -> {"Lo","AL"}; +lookup(64959) -> {"Lo","AL"}; +lookup(64960) -> {"Lo","AL"}; +lookup(64961) -> {"Lo","AL"}; +lookup(64962) -> {"Lo","AL"}; +lookup(64963) -> {"Lo","AL"}; +lookup(64964) -> {"Lo","AL"}; +lookup(64965) -> {"Lo","AL"}; +lookup(64966) -> {"Lo","AL"}; +lookup(64967) -> {"Lo","AL"}; +lookup(65008) -> {"Lo","AL"}; +lookup(65009) -> {"Lo","AL"}; +lookup(65010) -> {"Lo","AL"}; +lookup(65011) -> {"Lo","AL"}; +lookup(65012) -> {"Lo","AL"}; +lookup(65013) -> {"Lo","AL"}; +lookup(65014) -> {"Lo","AL"}; +lookup(65015) -> {"Lo","AL"}; +lookup(65016) -> {"Lo","AL"}; +lookup(65017) -> {"Lo","AL"}; +lookup(65018) -> {"Lo","AL"}; +lookup(65019) -> {"Lo","AL"}; +lookup(65020) -> {"Sc","AL"}; +lookup(65021) -> {"So","ON"}; +lookup(65024) -> {"Mn","NSM"}; +lookup(65025) -> {"Mn","NSM"}; +lookup(65026) -> {"Mn","NSM"}; +lookup(65027) -> {"Mn","NSM"}; +lookup(65028) -> {"Mn","NSM"}; +lookup(65029) -> {"Mn","NSM"}; +lookup(65030) -> {"Mn","NSM"}; +lookup(65031) -> {"Mn","NSM"}; +lookup(65032) -> {"Mn","NSM"}; +lookup(65033) -> {"Mn","NSM"}; +lookup(65034) -> {"Mn","NSM"}; +lookup(65035) -> {"Mn","NSM"}; +lookup(65036) -> {"Mn","NSM"}; +lookup(65037) -> {"Mn","NSM"}; +lookup(65038) -> {"Mn","NSM"}; +lookup(65039) -> {"Mn","NSM"}; +lookup(65040) -> {"Po","ON"}; +lookup(65041) -> {"Po","ON"}; +lookup(65042) -> {"Po","ON"}; +lookup(65043) -> {"Po","ON"}; +lookup(65044) -> {"Po","ON"}; +lookup(65045) -> {"Po","ON"}; +lookup(65046) -> {"Po","ON"}; +lookup(65047) -> {"Ps","ON"}; +lookup(65048) -> {"Pe","ON"}; +lookup(65049) -> {"Po","ON"}; +lookup(65056) -> {"Mn","NSM"}; +lookup(65057) -> {"Mn","NSM"}; +lookup(65058) -> {"Mn","NSM"}; +lookup(65059) -> {"Mn","NSM"}; +lookup(65060) -> {"Mn","NSM"}; +lookup(65061) -> {"Mn","NSM"}; +lookup(65062) -> {"Mn","NSM"}; +lookup(65063) -> {"Mn","NSM"}; +lookup(65064) -> {"Mn","NSM"}; +lookup(65065) -> {"Mn","NSM"}; +lookup(65066) -> {"Mn","NSM"}; +lookup(65067) -> {"Mn","NSM"}; +lookup(65068) -> {"Mn","NSM"}; +lookup(65069) -> {"Mn","NSM"}; +lookup(65070) -> {"Mn","NSM"}; +lookup(65071) -> {"Mn","NSM"}; +lookup(65072) -> {"Po","ON"}; +lookup(65073) -> {"Pd","ON"}; +lookup(65074) -> {"Pd","ON"}; +lookup(65075) -> {"Pc","ON"}; +lookup(65076) -> {"Pc","ON"}; +lookup(65077) -> {"Ps","ON"}; +lookup(65078) -> {"Pe","ON"}; +lookup(65079) -> {"Ps","ON"}; +lookup(65080) -> {"Pe","ON"}; +lookup(65081) -> {"Ps","ON"}; +lookup(65082) -> {"Pe","ON"}; +lookup(65083) -> {"Ps","ON"}; +lookup(65084) -> {"Pe","ON"}; +lookup(65085) -> {"Ps","ON"}; +lookup(65086) -> {"Pe","ON"}; +lookup(65087) -> {"Ps","ON"}; +lookup(65088) -> {"Pe","ON"}; +lookup(65089) -> {"Ps","ON"}; +lookup(65090) -> {"Pe","ON"}; +lookup(65091) -> {"Ps","ON"}; +lookup(65092) -> {"Pe","ON"}; +lookup(65093) -> {"Po","ON"}; +lookup(65094) -> {"Po","ON"}; +lookup(65095) -> {"Ps","ON"}; +lookup(65096) -> {"Pe","ON"}; +lookup(65097) -> {"Po","ON"}; +lookup(65098) -> {"Po","ON"}; +lookup(65099) -> {"Po","ON"}; +lookup(65100) -> {"Po","ON"}; +lookup(65101) -> {"Pc","ON"}; +lookup(65102) -> {"Pc","ON"}; +lookup(65103) -> {"Pc","ON"}; +lookup(65104) -> {"Po","CS"}; +lookup(65105) -> {"Po","ON"}; +lookup(65106) -> {"Po","CS"}; +lookup(65108) -> {"Po","ON"}; +lookup(65109) -> {"Po","CS"}; +lookup(65110) -> {"Po","ON"}; +lookup(65111) -> {"Po","ON"}; +lookup(65112) -> {"Pd","ON"}; +lookup(65113) -> {"Ps","ON"}; +lookup(65114) -> {"Pe","ON"}; +lookup(65115) -> {"Ps","ON"}; +lookup(65116) -> {"Pe","ON"}; +lookup(65117) -> {"Ps","ON"}; +lookup(65118) -> {"Pe","ON"}; +lookup(65119) -> {"Po","ET"}; +lookup(65120) -> {"Po","ON"}; +lookup(65121) -> {"Po","ON"}; +lookup(65122) -> {"Sm","ES"}; +lookup(65123) -> {"Pd","ES"}; +lookup(65124) -> {"Sm","ON"}; +lookup(65125) -> {"Sm","ON"}; +lookup(65126) -> {"Sm","ON"}; +lookup(65128) -> {"Po","ON"}; +lookup(65129) -> {"Sc","ET"}; +lookup(65130) -> {"Po","ET"}; +lookup(65131) -> {"Po","ON"}; +lookup(65136) -> {"Lo","AL"}; +lookup(65137) -> {"Lo","AL"}; +lookup(65138) -> {"Lo","AL"}; +lookup(65139) -> {"Lo","AL"}; +lookup(65140) -> {"Lo","AL"}; +lookup(65142) -> {"Lo","AL"}; +lookup(65143) -> {"Lo","AL"}; +lookup(65144) -> {"Lo","AL"}; +lookup(65145) -> {"Lo","AL"}; +lookup(65146) -> {"Lo","AL"}; +lookup(65147) -> {"Lo","AL"}; +lookup(65148) -> {"Lo","AL"}; +lookup(65149) -> {"Lo","AL"}; +lookup(65150) -> {"Lo","AL"}; +lookup(65151) -> {"Lo","AL"}; +lookup(65152) -> {"Lo","AL"}; +lookup(65153) -> {"Lo","AL"}; +lookup(65154) -> {"Lo","AL"}; +lookup(65155) -> {"Lo","AL"}; +lookup(65156) -> {"Lo","AL"}; +lookup(65157) -> {"Lo","AL"}; +lookup(65158) -> {"Lo","AL"}; +lookup(65159) -> {"Lo","AL"}; +lookup(65160) -> {"Lo","AL"}; +lookup(65161) -> {"Lo","AL"}; +lookup(65162) -> {"Lo","AL"}; +lookup(65163) -> {"Lo","AL"}; +lookup(65164) -> {"Lo","AL"}; +lookup(65165) -> {"Lo","AL"}; +lookup(65166) -> {"Lo","AL"}; +lookup(65167) -> {"Lo","AL"}; +lookup(65168) -> {"Lo","AL"}; +lookup(65169) -> {"Lo","AL"}; +lookup(65170) -> {"Lo","AL"}; +lookup(65171) -> {"Lo","AL"}; +lookup(65172) -> {"Lo","AL"}; +lookup(65173) -> {"Lo","AL"}; +lookup(65174) -> {"Lo","AL"}; +lookup(65175) -> {"Lo","AL"}; +lookup(65176) -> {"Lo","AL"}; +lookup(65177) -> {"Lo","AL"}; +lookup(65178) -> {"Lo","AL"}; +lookup(65179) -> {"Lo","AL"}; +lookup(65180) -> {"Lo","AL"}; +lookup(65181) -> {"Lo","AL"}; +lookup(65182) -> {"Lo","AL"}; +lookup(65183) -> {"Lo","AL"}; +lookup(65184) -> {"Lo","AL"}; +lookup(65185) -> {"Lo","AL"}; +lookup(65186) -> {"Lo","AL"}; +lookup(65187) -> {"Lo","AL"}; +lookup(65188) -> {"Lo","AL"}; +lookup(65189) -> {"Lo","AL"}; +lookup(65190) -> {"Lo","AL"}; +lookup(65191) -> {"Lo","AL"}; +lookup(65192) -> {"Lo","AL"}; +lookup(65193) -> {"Lo","AL"}; +lookup(65194) -> {"Lo","AL"}; +lookup(65195) -> {"Lo","AL"}; +lookup(65196) -> {"Lo","AL"}; +lookup(65197) -> {"Lo","AL"}; +lookup(65198) -> {"Lo","AL"}; +lookup(65199) -> {"Lo","AL"}; +lookup(65200) -> {"Lo","AL"}; +lookup(65201) -> {"Lo","AL"}; +lookup(65202) -> {"Lo","AL"}; +lookup(65203) -> {"Lo","AL"}; +lookup(65204) -> {"Lo","AL"}; +lookup(65205) -> {"Lo","AL"}; +lookup(65206) -> {"Lo","AL"}; +lookup(65207) -> {"Lo","AL"}; +lookup(65208) -> {"Lo","AL"}; +lookup(65209) -> {"Lo","AL"}; +lookup(65210) -> {"Lo","AL"}; +lookup(65211) -> {"Lo","AL"}; +lookup(65212) -> {"Lo","AL"}; +lookup(65213) -> {"Lo","AL"}; +lookup(65214) -> {"Lo","AL"}; +lookup(65215) -> {"Lo","AL"}; +lookup(65216) -> {"Lo","AL"}; +lookup(65217) -> {"Lo","AL"}; +lookup(65218) -> {"Lo","AL"}; +lookup(65219) -> {"Lo","AL"}; +lookup(65220) -> {"Lo","AL"}; +lookup(65221) -> {"Lo","AL"}; +lookup(65222) -> {"Lo","AL"}; +lookup(65223) -> {"Lo","AL"}; +lookup(65224) -> {"Lo","AL"}; +lookup(65225) -> {"Lo","AL"}; +lookup(65226) -> {"Lo","AL"}; +lookup(65227) -> {"Lo","AL"}; +lookup(65228) -> {"Lo","AL"}; +lookup(65229) -> {"Lo","AL"}; +lookup(65230) -> {"Lo","AL"}; +lookup(65231) -> {"Lo","AL"}; +lookup(65232) -> {"Lo","AL"}; +lookup(65233) -> {"Lo","AL"}; +lookup(65234) -> {"Lo","AL"}; +lookup(65235) -> {"Lo","AL"}; +lookup(65236) -> {"Lo","AL"}; +lookup(65237) -> {"Lo","AL"}; +lookup(65238) -> {"Lo","AL"}; +lookup(65239) -> {"Lo","AL"}; +lookup(65240) -> {"Lo","AL"}; +lookup(65241) -> {"Lo","AL"}; +lookup(65242) -> {"Lo","AL"}; +lookup(65243) -> {"Lo","AL"}; +lookup(65244) -> {"Lo","AL"}; +lookup(65245) -> {"Lo","AL"}; +lookup(65246) -> {"Lo","AL"}; +lookup(65247) -> {"Lo","AL"}; +lookup(65248) -> {"Lo","AL"}; +lookup(65249) -> {"Lo","AL"}; +lookup(65250) -> {"Lo","AL"}; +lookup(65251) -> {"Lo","AL"}; +lookup(65252) -> {"Lo","AL"}; +lookup(65253) -> {"Lo","AL"}; +lookup(65254) -> {"Lo","AL"}; +lookup(65255) -> {"Lo","AL"}; +lookup(65256) -> {"Lo","AL"}; +lookup(65257) -> {"Lo","AL"}; +lookup(65258) -> {"Lo","AL"}; +lookup(65259) -> {"Lo","AL"}; +lookup(65260) -> {"Lo","AL"}; +lookup(65261) -> {"Lo","AL"}; +lookup(65262) -> {"Lo","AL"}; +lookup(65263) -> {"Lo","AL"}; +lookup(65264) -> {"Lo","AL"}; +lookup(65265) -> {"Lo","AL"}; +lookup(65266) -> {"Lo","AL"}; +lookup(65267) -> {"Lo","AL"}; +lookup(65268) -> {"Lo","AL"}; +lookup(65269) -> {"Lo","AL"}; +lookup(65270) -> {"Lo","AL"}; +lookup(65271) -> {"Lo","AL"}; +lookup(65272) -> {"Lo","AL"}; +lookup(65273) -> {"Lo","AL"}; +lookup(65274) -> {"Lo","AL"}; +lookup(65275) -> {"Lo","AL"}; +lookup(65276) -> {"Lo","AL"}; +lookup(65279) -> {"Cf","BN"}; +lookup(65281) -> {"Po","ON"}; +lookup(65282) -> {"Po","ON"}; +lookup(65283) -> {"Po","ET"}; +lookup(65284) -> {"Sc","ET"}; +lookup(65285) -> {"Po","ET"}; +lookup(65286) -> {"Po","ON"}; +lookup(65287) -> {"Po","ON"}; +lookup(65288) -> {"Ps","ON"}; +lookup(65289) -> {"Pe","ON"}; +lookup(65290) -> {"Po","ON"}; +lookup(65291) -> {"Sm","ES"}; +lookup(65292) -> {"Po","CS"}; +lookup(65293) -> {"Pd","ES"}; +lookup(65294) -> {"Po","CS"}; +lookup(65295) -> {"Po","CS"}; +lookup(65296) -> {"Nd","EN"}; +lookup(65297) -> {"Nd","EN"}; +lookup(65298) -> {"Nd","EN"}; +lookup(65299) -> {"Nd","EN"}; +lookup(65300) -> {"Nd","EN"}; +lookup(65301) -> {"Nd","EN"}; +lookup(65302) -> {"Nd","EN"}; +lookup(65303) -> {"Nd","EN"}; +lookup(65304) -> {"Nd","EN"}; +lookup(65305) -> {"Nd","EN"}; +lookup(65306) -> {"Po","CS"}; +lookup(65307) -> {"Po","ON"}; +lookup(65308) -> {"Sm","ON"}; +lookup(65309) -> {"Sm","ON"}; +lookup(65310) -> {"Sm","ON"}; +lookup(65311) -> {"Po","ON"}; +lookup(65312) -> {"Po","ON"}; +lookup(65313) -> {"Lu","L"}; +lookup(65314) -> {"Lu","L"}; +lookup(65315) -> {"Lu","L"}; +lookup(65316) -> {"Lu","L"}; +lookup(65317) -> {"Lu","L"}; +lookup(65318) -> {"Lu","L"}; +lookup(65319) -> {"Lu","L"}; +lookup(65320) -> {"Lu","L"}; +lookup(65321) -> {"Lu","L"}; +lookup(65322) -> {"Lu","L"}; +lookup(65323) -> {"Lu","L"}; +lookup(65324) -> {"Lu","L"}; +lookup(65325) -> {"Lu","L"}; +lookup(65326) -> {"Lu","L"}; +lookup(65327) -> {"Lu","L"}; +lookup(65328) -> {"Lu","L"}; +lookup(65329) -> {"Lu","L"}; +lookup(65330) -> {"Lu","L"}; +lookup(65331) -> {"Lu","L"}; +lookup(65332) -> {"Lu","L"}; +lookup(65333) -> {"Lu","L"}; +lookup(65334) -> {"Lu","L"}; +lookup(65335) -> {"Lu","L"}; +lookup(65336) -> {"Lu","L"}; +lookup(65337) -> {"Lu","L"}; +lookup(65338) -> {"Lu","L"}; +lookup(65339) -> {"Ps","ON"}; +lookup(65340) -> {"Po","ON"}; +lookup(65341) -> {"Pe","ON"}; +lookup(65342) -> {"Sk","ON"}; +lookup(65343) -> {"Pc","ON"}; +lookup(65344) -> {"Sk","ON"}; +lookup(65345) -> {"Ll","L"}; +lookup(65346) -> {"Ll","L"}; +lookup(65347) -> {"Ll","L"}; +lookup(65348) -> {"Ll","L"}; +lookup(65349) -> {"Ll","L"}; +lookup(65350) -> {"Ll","L"}; +lookup(65351) -> {"Ll","L"}; +lookup(65352) -> {"Ll","L"}; +lookup(65353) -> {"Ll","L"}; +lookup(65354) -> {"Ll","L"}; +lookup(65355) -> {"Ll","L"}; +lookup(65356) -> {"Ll","L"}; +lookup(65357) -> {"Ll","L"}; +lookup(65358) -> {"Ll","L"}; +lookup(65359) -> {"Ll","L"}; +lookup(65360) -> {"Ll","L"}; +lookup(65361) -> {"Ll","L"}; +lookup(65362) -> {"Ll","L"}; +lookup(65363) -> {"Ll","L"}; +lookup(65364) -> {"Ll","L"}; +lookup(65365) -> {"Ll","L"}; +lookup(65366) -> {"Ll","L"}; +lookup(65367) -> {"Ll","L"}; +lookup(65368) -> {"Ll","L"}; +lookup(65369) -> {"Ll","L"}; +lookup(65370) -> {"Ll","L"}; +lookup(65371) -> {"Ps","ON"}; +lookup(65372) -> {"Sm","ON"}; +lookup(65373) -> {"Pe","ON"}; +lookup(65374) -> {"Sm","ON"}; +lookup(65375) -> {"Ps","ON"}; +lookup(65376) -> {"Pe","ON"}; +lookup(65377) -> {"Po","ON"}; +lookup(65378) -> {"Ps","ON"}; +lookup(65379) -> {"Pe","ON"}; +lookup(65380) -> {"Po","ON"}; +lookup(65381) -> {"Po","ON"}; +lookup(65382) -> {"Lo","L"}; +lookup(65383) -> {"Lo","L"}; +lookup(65384) -> {"Lo","L"}; +lookup(65385) -> {"Lo","L"}; +lookup(65386) -> {"Lo","L"}; +lookup(65387) -> {"Lo","L"}; +lookup(65388) -> {"Lo","L"}; +lookup(65389) -> {"Lo","L"}; +lookup(65390) -> {"Lo","L"}; +lookup(65391) -> {"Lo","L"}; +lookup(65392) -> {"Lm","L"}; +lookup(65393) -> {"Lo","L"}; +lookup(65394) -> {"Lo","L"}; +lookup(65395) -> {"Lo","L"}; +lookup(65396) -> {"Lo","L"}; +lookup(65397) -> {"Lo","L"}; +lookup(65398) -> {"Lo","L"}; +lookup(65399) -> {"Lo","L"}; +lookup(65400) -> {"Lo","L"}; +lookup(65401) -> {"Lo","L"}; +lookup(65402) -> {"Lo","L"}; +lookup(65403) -> {"Lo","L"}; +lookup(65404) -> {"Lo","L"}; +lookup(65405) -> {"Lo","L"}; +lookup(65406) -> {"Lo","L"}; +lookup(65407) -> {"Lo","L"}; +lookup(65408) -> {"Lo","L"}; +lookup(65409) -> {"Lo","L"}; +lookup(65410) -> {"Lo","L"}; +lookup(65411) -> {"Lo","L"}; +lookup(65412) -> {"Lo","L"}; +lookup(65413) -> {"Lo","L"}; +lookup(65414) -> {"Lo","L"}; +lookup(65415) -> {"Lo","L"}; +lookup(65416) -> {"Lo","L"}; +lookup(65417) -> {"Lo","L"}; +lookup(65418) -> {"Lo","L"}; +lookup(65419) -> {"Lo","L"}; +lookup(65420) -> {"Lo","L"}; +lookup(65421) -> {"Lo","L"}; +lookup(65422) -> {"Lo","L"}; +lookup(65423) -> {"Lo","L"}; +lookup(65424) -> {"Lo","L"}; +lookup(65425) -> {"Lo","L"}; +lookup(65426) -> {"Lo","L"}; +lookup(65427) -> {"Lo","L"}; +lookup(65428) -> {"Lo","L"}; +lookup(65429) -> {"Lo","L"}; +lookup(65430) -> {"Lo","L"}; +lookup(65431) -> {"Lo","L"}; +lookup(65432) -> {"Lo","L"}; +lookup(65433) -> {"Lo","L"}; +lookup(65434) -> {"Lo","L"}; +lookup(65435) -> {"Lo","L"}; +lookup(65436) -> {"Lo","L"}; +lookup(65437) -> {"Lo","L"}; +lookup(65438) -> {"Lm","L"}; +lookup(65439) -> {"Lm","L"}; +lookup(65440) -> {"Lo","L"}; +lookup(65441) -> {"Lo","L"}; +lookup(65442) -> {"Lo","L"}; +lookup(65443) -> {"Lo","L"}; +lookup(65444) -> {"Lo","L"}; +lookup(65445) -> {"Lo","L"}; +lookup(65446) -> {"Lo","L"}; +lookup(65447) -> {"Lo","L"}; +lookup(65448) -> {"Lo","L"}; +lookup(65449) -> {"Lo","L"}; +lookup(65450) -> {"Lo","L"}; +lookup(65451) -> {"Lo","L"}; +lookup(65452) -> {"Lo","L"}; +lookup(65453) -> {"Lo","L"}; +lookup(65454) -> {"Lo","L"}; +lookup(65455) -> {"Lo","L"}; +lookup(65456) -> {"Lo","L"}; +lookup(65457) -> {"Lo","L"}; +lookup(65458) -> {"Lo","L"}; +lookup(65459) -> {"Lo","L"}; +lookup(65460) -> {"Lo","L"}; +lookup(65461) -> {"Lo","L"}; +lookup(65462) -> {"Lo","L"}; +lookup(65463) -> {"Lo","L"}; +lookup(65464) -> {"Lo","L"}; +lookup(65465) -> {"Lo","L"}; +lookup(65466) -> {"Lo","L"}; +lookup(65467) -> {"Lo","L"}; +lookup(65468) -> {"Lo","L"}; +lookup(65469) -> {"Lo","L"}; +lookup(65470) -> {"Lo","L"}; +lookup(65474) -> {"Lo","L"}; +lookup(65475) -> {"Lo","L"}; +lookup(65476) -> {"Lo","L"}; +lookup(65477) -> {"Lo","L"}; +lookup(65478) -> {"Lo","L"}; +lookup(65479) -> {"Lo","L"}; +lookup(65482) -> {"Lo","L"}; +lookup(65483) -> {"Lo","L"}; +lookup(65484) -> {"Lo","L"}; +lookup(65485) -> {"Lo","L"}; +lookup(65486) -> {"Lo","L"}; +lookup(65487) -> {"Lo","L"}; +lookup(65490) -> {"Lo","L"}; +lookup(65491) -> {"Lo","L"}; +lookup(65492) -> {"Lo","L"}; +lookup(65493) -> {"Lo","L"}; +lookup(65494) -> {"Lo","L"}; +lookup(65495) -> {"Lo","L"}; +lookup(65498) -> {"Lo","L"}; +lookup(65499) -> {"Lo","L"}; +lookup(65500) -> {"Lo","L"}; +lookup(65504) -> {"Sc","ET"}; +lookup(65505) -> {"Sc","ET"}; +lookup(65506) -> {"Sm","ON"}; +lookup(65507) -> {"Sk","ON"}; +lookup(65508) -> {"So","ON"}; +lookup(65509) -> {"Sc","ET"}; +lookup(65510) -> {"Sc","ET"}; +lookup(65512) -> {"So","ON"}; +lookup(65513) -> {"Sm","ON"}; +lookup(65514) -> {"Sm","ON"}; +lookup(65515) -> {"Sm","ON"}; +lookup(65516) -> {"Sm","ON"}; +lookup(65517) -> {"So","ON"}; +lookup(65518) -> {"So","ON"}; +lookup(65529) -> {"Cf","ON"}; +lookup(65530) -> {"Cf","ON"}; +lookup(65531) -> {"Cf","ON"}; +lookup(65532) -> {"So","ON"}; +lookup(65533) -> {"So","ON"}; +lookup(65536) -> {"Lo","L"}; +lookup(65537) -> {"Lo","L"}; +lookup(65538) -> {"Lo","L"}; +lookup(65539) -> {"Lo","L"}; +lookup(65540) -> {"Lo","L"}; +lookup(65541) -> {"Lo","L"}; +lookup(65542) -> {"Lo","L"}; +lookup(65543) -> {"Lo","L"}; +lookup(65544) -> {"Lo","L"}; +lookup(65545) -> {"Lo","L"}; +lookup(65546) -> {"Lo","L"}; +lookup(65547) -> {"Lo","L"}; +lookup(65549) -> {"Lo","L"}; +lookup(65550) -> {"Lo","L"}; +lookup(65551) -> {"Lo","L"}; +lookup(65552) -> {"Lo","L"}; +lookup(65553) -> {"Lo","L"}; +lookup(65554) -> {"Lo","L"}; +lookup(65555) -> {"Lo","L"}; +lookup(65556) -> {"Lo","L"}; +lookup(65557) -> {"Lo","L"}; +lookup(65558) -> {"Lo","L"}; +lookup(65559) -> {"Lo","L"}; +lookup(65560) -> {"Lo","L"}; +lookup(65561) -> {"Lo","L"}; +lookup(65562) -> {"Lo","L"}; +lookup(65563) -> {"Lo","L"}; +lookup(65564) -> {"Lo","L"}; +lookup(65565) -> {"Lo","L"}; +lookup(65566) -> {"Lo","L"}; +lookup(65567) -> {"Lo","L"}; +lookup(65568) -> {"Lo","L"}; +lookup(65569) -> {"Lo","L"}; +lookup(65570) -> {"Lo","L"}; +lookup(65571) -> {"Lo","L"}; +lookup(65572) -> {"Lo","L"}; +lookup(65573) -> {"Lo","L"}; +lookup(65574) -> {"Lo","L"}; +lookup(65576) -> {"Lo","L"}; +lookup(65577) -> {"Lo","L"}; +lookup(65578) -> {"Lo","L"}; +lookup(65579) -> {"Lo","L"}; +lookup(65580) -> {"Lo","L"}; +lookup(65581) -> {"Lo","L"}; +lookup(65582) -> {"Lo","L"}; +lookup(65583) -> {"Lo","L"}; +lookup(65584) -> {"Lo","L"}; +lookup(65585) -> {"Lo","L"}; +lookup(65586) -> {"Lo","L"}; +lookup(65587) -> {"Lo","L"}; +lookup(65588) -> {"Lo","L"}; +lookup(65589) -> {"Lo","L"}; +lookup(65590) -> {"Lo","L"}; +lookup(65591) -> {"Lo","L"}; +lookup(65592) -> {"Lo","L"}; +lookup(65593) -> {"Lo","L"}; +lookup(65594) -> {"Lo","L"}; +lookup(65596) -> {"Lo","L"}; +lookup(65597) -> {"Lo","L"}; +lookup(65599) -> {"Lo","L"}; +lookup(65600) -> {"Lo","L"}; +lookup(65601) -> {"Lo","L"}; +lookup(65602) -> {"Lo","L"}; +lookup(65603) -> {"Lo","L"}; +lookup(65604) -> {"Lo","L"}; +lookup(65605) -> {"Lo","L"}; +lookup(65606) -> {"Lo","L"}; +lookup(65607) -> {"Lo","L"}; +lookup(65608) -> {"Lo","L"}; +lookup(65609) -> {"Lo","L"}; +lookup(65610) -> {"Lo","L"}; +lookup(65611) -> {"Lo","L"}; +lookup(65612) -> {"Lo","L"}; +lookup(65613) -> {"Lo","L"}; +lookup(65616) -> {"Lo","L"}; +lookup(65617) -> {"Lo","L"}; +lookup(65618) -> {"Lo","L"}; +lookup(65619) -> {"Lo","L"}; +lookup(65620) -> {"Lo","L"}; +lookup(65621) -> {"Lo","L"}; +lookup(65622) -> {"Lo","L"}; +lookup(65623) -> {"Lo","L"}; +lookup(65624) -> {"Lo","L"}; +lookup(65625) -> {"Lo","L"}; +lookup(65626) -> {"Lo","L"}; +lookup(65627) -> {"Lo","L"}; +lookup(65628) -> {"Lo","L"}; +lookup(65629) -> {"Lo","L"}; +lookup(65664) -> {"Lo","L"}; +lookup(65665) -> {"Lo","L"}; +lookup(65666) -> {"Lo","L"}; +lookup(65667) -> {"Lo","L"}; +lookup(65668) -> {"Lo","L"}; +lookup(65669) -> {"Lo","L"}; +lookup(65670) -> {"Lo","L"}; +lookup(65671) -> {"Lo","L"}; +lookup(65672) -> {"Lo","L"}; +lookup(65673) -> {"Lo","L"}; +lookup(65674) -> {"Lo","L"}; +lookup(65675) -> {"Lo","L"}; +lookup(65676) -> {"Lo","L"}; +lookup(65677) -> {"Lo","L"}; +lookup(65678) -> {"Lo","L"}; +lookup(65679) -> {"Lo","L"}; +lookup(65680) -> {"Lo","L"}; +lookup(65681) -> {"Lo","L"}; +lookup(65682) -> {"Lo","L"}; +lookup(65683) -> {"Lo","L"}; +lookup(65684) -> {"Lo","L"}; +lookup(65685) -> {"Lo","L"}; +lookup(65686) -> {"Lo","L"}; +lookup(65687) -> {"Lo","L"}; +lookup(65688) -> {"Lo","L"}; +lookup(65689) -> {"Lo","L"}; +lookup(65690) -> {"Lo","L"}; +lookup(65691) -> {"Lo","L"}; +lookup(65692) -> {"Lo","L"}; +lookup(65693) -> {"Lo","L"}; +lookup(65694) -> {"Lo","L"}; +lookup(65695) -> {"Lo","L"}; +lookup(65696) -> {"Lo","L"}; +lookup(65697) -> {"Lo","L"}; +lookup(65698) -> {"Lo","L"}; +lookup(65699) -> {"Lo","L"}; +lookup(65700) -> {"Lo","L"}; +lookup(65701) -> {"Lo","L"}; +lookup(65702) -> {"Lo","L"}; +lookup(65703) -> {"Lo","L"}; +lookup(65704) -> {"Lo","L"}; +lookup(65705) -> {"Lo","L"}; +lookup(65706) -> {"Lo","L"}; +lookup(65707) -> {"Lo","L"}; +lookup(65708) -> {"Lo","L"}; +lookup(65709) -> {"Lo","L"}; +lookup(65710) -> {"Lo","L"}; +lookup(65711) -> {"Lo","L"}; +lookup(65712) -> {"Lo","L"}; +lookup(65713) -> {"Lo","L"}; +lookup(65714) -> {"Lo","L"}; +lookup(65715) -> {"Lo","L"}; +lookup(65716) -> {"Lo","L"}; +lookup(65717) -> {"Lo","L"}; +lookup(65718) -> {"Lo","L"}; +lookup(65719) -> {"Lo","L"}; +lookup(65720) -> {"Lo","L"}; +lookup(65721) -> {"Lo","L"}; +lookup(65722) -> {"Lo","L"}; +lookup(65723) -> {"Lo","L"}; +lookup(65724) -> {"Lo","L"}; +lookup(65725) -> {"Lo","L"}; +lookup(65726) -> {"Lo","L"}; +lookup(65727) -> {"Lo","L"}; +lookup(65728) -> {"Lo","L"}; +lookup(65729) -> {"Lo","L"}; +lookup(65730) -> {"Lo","L"}; +lookup(65731) -> {"Lo","L"}; +lookup(65732) -> {"Lo","L"}; +lookup(65733) -> {"Lo","L"}; +lookup(65734) -> {"Lo","L"}; +lookup(65735) -> {"Lo","L"}; +lookup(65736) -> {"Lo","L"}; +lookup(65737) -> {"Lo","L"}; +lookup(65738) -> {"Lo","L"}; +lookup(65739) -> {"Lo","L"}; +lookup(65740) -> {"Lo","L"}; +lookup(65741) -> {"Lo","L"}; +lookup(65742) -> {"Lo","L"}; +lookup(65743) -> {"Lo","L"}; +lookup(65744) -> {"Lo","L"}; +lookup(65745) -> {"Lo","L"}; +lookup(65746) -> {"Lo","L"}; +lookup(65747) -> {"Lo","L"}; +lookup(65748) -> {"Lo","L"}; +lookup(65749) -> {"Lo","L"}; +lookup(65750) -> {"Lo","L"}; +lookup(65751) -> {"Lo","L"}; +lookup(65752) -> {"Lo","L"}; +lookup(65753) -> {"Lo","L"}; +lookup(65754) -> {"Lo","L"}; +lookup(65755) -> {"Lo","L"}; +lookup(65756) -> {"Lo","L"}; +lookup(65757) -> {"Lo","L"}; +lookup(65758) -> {"Lo","L"}; +lookup(65759) -> {"Lo","L"}; +lookup(65760) -> {"Lo","L"}; +lookup(65761) -> {"Lo","L"}; +lookup(65762) -> {"Lo","L"}; +lookup(65763) -> {"Lo","L"}; +lookup(65764) -> {"Lo","L"}; +lookup(65765) -> {"Lo","L"}; +lookup(65766) -> {"Lo","L"}; +lookup(65767) -> {"Lo","L"}; +lookup(65768) -> {"Lo","L"}; +lookup(65769) -> {"Lo","L"}; +lookup(65770) -> {"Lo","L"}; +lookup(65771) -> {"Lo","L"}; +lookup(65772) -> {"Lo","L"}; +lookup(65773) -> {"Lo","L"}; +lookup(65774) -> {"Lo","L"}; +lookup(65775) -> {"Lo","L"}; +lookup(65776) -> {"Lo","L"}; +lookup(65777) -> {"Lo","L"}; +lookup(65778) -> {"Lo","L"}; +lookup(65779) -> {"Lo","L"}; +lookup(65780) -> {"Lo","L"}; +lookup(65781) -> {"Lo","L"}; +lookup(65782) -> {"Lo","L"}; +lookup(65783) -> {"Lo","L"}; +lookup(65784) -> {"Lo","L"}; +lookup(65785) -> {"Lo","L"}; +lookup(65786) -> {"Lo","L"}; +lookup(65792) -> {"Po","L"}; +lookup(65793) -> {"Po","ON"}; +lookup(65794) -> {"Po","L"}; +lookup(65799) -> {"No","L"}; +lookup(65800) -> {"No","L"}; +lookup(65801) -> {"No","L"}; +lookup(65802) -> {"No","L"}; +lookup(65803) -> {"No","L"}; +lookup(65804) -> {"No","L"}; +lookup(65805) -> {"No","L"}; +lookup(65806) -> {"No","L"}; +lookup(65807) -> {"No","L"}; +lookup(65808) -> {"No","L"}; +lookup(65809) -> {"No","L"}; +lookup(65810) -> {"No","L"}; +lookup(65811) -> {"No","L"}; +lookup(65812) -> {"No","L"}; +lookup(65813) -> {"No","L"}; +lookup(65814) -> {"No","L"}; +lookup(65815) -> {"No","L"}; +lookup(65816) -> {"No","L"}; +lookup(65817) -> {"No","L"}; +lookup(65818) -> {"No","L"}; +lookup(65819) -> {"No","L"}; +lookup(65820) -> {"No","L"}; +lookup(65821) -> {"No","L"}; +lookup(65822) -> {"No","L"}; +lookup(65823) -> {"No","L"}; +lookup(65824) -> {"No","L"}; +lookup(65825) -> {"No","L"}; +lookup(65826) -> {"No","L"}; +lookup(65827) -> {"No","L"}; +lookup(65828) -> {"No","L"}; +lookup(65829) -> {"No","L"}; +lookup(65830) -> {"No","L"}; +lookup(65831) -> {"No","L"}; +lookup(65832) -> {"No","L"}; +lookup(65833) -> {"No","L"}; +lookup(65834) -> {"No","L"}; +lookup(65835) -> {"No","L"}; +lookup(65836) -> {"No","L"}; +lookup(65837) -> {"No","L"}; +lookup(65838) -> {"No","L"}; +lookup(65839) -> {"No","L"}; +lookup(65840) -> {"No","L"}; +lookup(65841) -> {"No","L"}; +lookup(65842) -> {"No","L"}; +lookup(65843) -> {"No","L"}; +lookup(65847) -> {"So","L"}; +lookup(65848) -> {"So","L"}; +lookup(65849) -> {"So","L"}; +lookup(65850) -> {"So","L"}; +lookup(65851) -> {"So","L"}; +lookup(65852) -> {"So","L"}; +lookup(65853) -> {"So","L"}; +lookup(65854) -> {"So","L"}; +lookup(65855) -> {"So","L"}; +lookup(65856) -> {"Nl","ON"}; +lookup(65857) -> {"Nl","ON"}; +lookup(65858) -> {"Nl","ON"}; +lookup(65859) -> {"Nl","ON"}; +lookup(65860) -> {"Nl","ON"}; +lookup(65861) -> {"Nl","ON"}; +lookup(65862) -> {"Nl","ON"}; +lookup(65863) -> {"Nl","ON"}; +lookup(65864) -> {"Nl","ON"}; +lookup(65865) -> {"Nl","ON"}; +lookup(65866) -> {"Nl","ON"}; +lookup(65867) -> {"Nl","ON"}; +lookup(65868) -> {"Nl","ON"}; +lookup(65869) -> {"Nl","ON"}; +lookup(65870) -> {"Nl","ON"}; +lookup(65871) -> {"Nl","ON"}; +lookup(65872) -> {"Nl","ON"}; +lookup(65873) -> {"Nl","ON"}; +lookup(65874) -> {"Nl","ON"}; +lookup(65875) -> {"Nl","ON"}; +lookup(65876) -> {"Nl","ON"}; +lookup(65877) -> {"Nl","ON"}; +lookup(65878) -> {"Nl","ON"}; +lookup(65879) -> {"Nl","ON"}; +lookup(65880) -> {"Nl","ON"}; +lookup(65881) -> {"Nl","ON"}; +lookup(65882) -> {"Nl","ON"}; +lookup(65883) -> {"Nl","ON"}; +lookup(65884) -> {"Nl","ON"}; +lookup(65885) -> {"Nl","ON"}; +lookup(65886) -> {"Nl","ON"}; +lookup(65887) -> {"Nl","ON"}; +lookup(65888) -> {"Nl","ON"}; +lookup(65889) -> {"Nl","ON"}; +lookup(65890) -> {"Nl","ON"}; +lookup(65891) -> {"Nl","ON"}; +lookup(65892) -> {"Nl","ON"}; +lookup(65893) -> {"Nl","ON"}; +lookup(65894) -> {"Nl","ON"}; +lookup(65895) -> {"Nl","ON"}; +lookup(65896) -> {"Nl","ON"}; +lookup(65897) -> {"Nl","ON"}; +lookup(65898) -> {"Nl","ON"}; +lookup(65899) -> {"Nl","ON"}; +lookup(65900) -> {"Nl","ON"}; +lookup(65901) -> {"Nl","ON"}; +lookup(65902) -> {"Nl","ON"}; +lookup(65903) -> {"Nl","ON"}; +lookup(65904) -> {"Nl","ON"}; +lookup(65905) -> {"Nl","ON"}; +lookup(65906) -> {"Nl","ON"}; +lookup(65907) -> {"Nl","ON"}; +lookup(65908) -> {"Nl","ON"}; +lookup(65909) -> {"No","ON"}; +lookup(65910) -> {"No","ON"}; +lookup(65911) -> {"No","ON"}; +lookup(65912) -> {"No","ON"}; +lookup(65913) -> {"So","ON"}; +lookup(65914) -> {"So","ON"}; +lookup(65915) -> {"So","ON"}; +lookup(65916) -> {"So","ON"}; +lookup(65917) -> {"So","ON"}; +lookup(65918) -> {"So","ON"}; +lookup(65919) -> {"So","ON"}; +lookup(65920) -> {"So","ON"}; +lookup(65921) -> {"So","ON"}; +lookup(65922) -> {"So","ON"}; +lookup(65923) -> {"So","ON"}; +lookup(65924) -> {"So","ON"}; +lookup(65925) -> {"So","ON"}; +lookup(65926) -> {"So","ON"}; +lookup(65927) -> {"So","ON"}; +lookup(65928) -> {"So","ON"}; +lookup(65929) -> {"So","ON"}; +lookup(65930) -> {"No","ON"}; +lookup(65931) -> {"No","ON"}; +lookup(65932) -> {"So","ON"}; +lookup(65933) -> {"So","L"}; +lookup(65934) -> {"So","L"}; +lookup(65936) -> {"So","ON"}; +lookup(65937) -> {"So","ON"}; +lookup(65938) -> {"So","ON"}; +lookup(65939) -> {"So","ON"}; +lookup(65940) -> {"So","ON"}; +lookup(65941) -> {"So","ON"}; +lookup(65942) -> {"So","ON"}; +lookup(65943) -> {"So","ON"}; +lookup(65944) -> {"So","ON"}; +lookup(65945) -> {"So","ON"}; +lookup(65946) -> {"So","ON"}; +lookup(65947) -> {"So","ON"}; +lookup(65948) -> {"So","ON"}; +lookup(65952) -> {"So","ON"}; +lookup(66000) -> {"So","L"}; +lookup(66001) -> {"So","L"}; +lookup(66002) -> {"So","L"}; +lookup(66003) -> {"So","L"}; +lookup(66004) -> {"So","L"}; +lookup(66005) -> {"So","L"}; +lookup(66006) -> {"So","L"}; +lookup(66007) -> {"So","L"}; +lookup(66008) -> {"So","L"}; +lookup(66009) -> {"So","L"}; +lookup(66010) -> {"So","L"}; +lookup(66011) -> {"So","L"}; +lookup(66012) -> {"So","L"}; +lookup(66013) -> {"So","L"}; +lookup(66014) -> {"So","L"}; +lookup(66015) -> {"So","L"}; +lookup(66016) -> {"So","L"}; +lookup(66017) -> {"So","L"}; +lookup(66018) -> {"So","L"}; +lookup(66019) -> {"So","L"}; +lookup(66020) -> {"So","L"}; +lookup(66021) -> {"So","L"}; +lookup(66022) -> {"So","L"}; +lookup(66023) -> {"So","L"}; +lookup(66024) -> {"So","L"}; +lookup(66025) -> {"So","L"}; +lookup(66026) -> {"So","L"}; +lookup(66027) -> {"So","L"}; +lookup(66028) -> {"So","L"}; +lookup(66029) -> {"So","L"}; +lookup(66030) -> {"So","L"}; +lookup(66031) -> {"So","L"}; +lookup(66032) -> {"So","L"}; +lookup(66033) -> {"So","L"}; +lookup(66034) -> {"So","L"}; +lookup(66035) -> {"So","L"}; +lookup(66036) -> {"So","L"}; +lookup(66037) -> {"So","L"}; +lookup(66038) -> {"So","L"}; +lookup(66039) -> {"So","L"}; +lookup(66040) -> {"So","L"}; +lookup(66041) -> {"So","L"}; +lookup(66042) -> {"So","L"}; +lookup(66043) -> {"So","L"}; +lookup(66044) -> {"So","L"}; +lookup(66045) -> {"Mn","NSM"}; +lookup(66176) -> {"Lo","L"}; +lookup(66177) -> {"Lo","L"}; +lookup(66178) -> {"Lo","L"}; +lookup(66179) -> {"Lo","L"}; +lookup(66180) -> {"Lo","L"}; +lookup(66181) -> {"Lo","L"}; +lookup(66182) -> {"Lo","L"}; +lookup(66183) -> {"Lo","L"}; +lookup(66184) -> {"Lo","L"}; +lookup(66185) -> {"Lo","L"}; +lookup(66186) -> {"Lo","L"}; +lookup(66187) -> {"Lo","L"}; +lookup(66188) -> {"Lo","L"}; +lookup(66189) -> {"Lo","L"}; +lookup(66190) -> {"Lo","L"}; +lookup(66191) -> {"Lo","L"}; +lookup(66192) -> {"Lo","L"}; +lookup(66193) -> {"Lo","L"}; +lookup(66194) -> {"Lo","L"}; +lookup(66195) -> {"Lo","L"}; +lookup(66196) -> {"Lo","L"}; +lookup(66197) -> {"Lo","L"}; +lookup(66198) -> {"Lo","L"}; +lookup(66199) -> {"Lo","L"}; +lookup(66200) -> {"Lo","L"}; +lookup(66201) -> {"Lo","L"}; +lookup(66202) -> {"Lo","L"}; +lookup(66203) -> {"Lo","L"}; +lookup(66204) -> {"Lo","L"}; +lookup(66208) -> {"Lo","L"}; +lookup(66209) -> {"Lo","L"}; +lookup(66210) -> {"Lo","L"}; +lookup(66211) -> {"Lo","L"}; +lookup(66212) -> {"Lo","L"}; +lookup(66213) -> {"Lo","L"}; +lookup(66214) -> {"Lo","L"}; +lookup(66215) -> {"Lo","L"}; +lookup(66216) -> {"Lo","L"}; +lookup(66217) -> {"Lo","L"}; +lookup(66218) -> {"Lo","L"}; +lookup(66219) -> {"Lo","L"}; +lookup(66220) -> {"Lo","L"}; +lookup(66221) -> {"Lo","L"}; +lookup(66222) -> {"Lo","L"}; +lookup(66223) -> {"Lo","L"}; +lookup(66224) -> {"Lo","L"}; +lookup(66225) -> {"Lo","L"}; +lookup(66226) -> {"Lo","L"}; +lookup(66227) -> {"Lo","L"}; +lookup(66228) -> {"Lo","L"}; +lookup(66229) -> {"Lo","L"}; +lookup(66230) -> {"Lo","L"}; +lookup(66231) -> {"Lo","L"}; +lookup(66232) -> {"Lo","L"}; +lookup(66233) -> {"Lo","L"}; +lookup(66234) -> {"Lo","L"}; +lookup(66235) -> {"Lo","L"}; +lookup(66236) -> {"Lo","L"}; +lookup(66237) -> {"Lo","L"}; +lookup(66238) -> {"Lo","L"}; +lookup(66239) -> {"Lo","L"}; +lookup(66240) -> {"Lo","L"}; +lookup(66241) -> {"Lo","L"}; +lookup(66242) -> {"Lo","L"}; +lookup(66243) -> {"Lo","L"}; +lookup(66244) -> {"Lo","L"}; +lookup(66245) -> {"Lo","L"}; +lookup(66246) -> {"Lo","L"}; +lookup(66247) -> {"Lo","L"}; +lookup(66248) -> {"Lo","L"}; +lookup(66249) -> {"Lo","L"}; +lookup(66250) -> {"Lo","L"}; +lookup(66251) -> {"Lo","L"}; +lookup(66252) -> {"Lo","L"}; +lookup(66253) -> {"Lo","L"}; +lookup(66254) -> {"Lo","L"}; +lookup(66255) -> {"Lo","L"}; +lookup(66256) -> {"Lo","L"}; +lookup(66272) -> {"Mn","NSM"}; +lookup(66273) -> {"No","EN"}; +lookup(66274) -> {"No","EN"}; +lookup(66275) -> {"No","EN"}; +lookup(66276) -> {"No","EN"}; +lookup(66277) -> {"No","EN"}; +lookup(66278) -> {"No","EN"}; +lookup(66279) -> {"No","EN"}; +lookup(66280) -> {"No","EN"}; +lookup(66281) -> {"No","EN"}; +lookup(66282) -> {"No","EN"}; +lookup(66283) -> {"No","EN"}; +lookup(66284) -> {"No","EN"}; +lookup(66285) -> {"No","EN"}; +lookup(66286) -> {"No","EN"}; +lookup(66287) -> {"No","EN"}; +lookup(66288) -> {"No","EN"}; +lookup(66289) -> {"No","EN"}; +lookup(66290) -> {"No","EN"}; +lookup(66291) -> {"No","EN"}; +lookup(66292) -> {"No","EN"}; +lookup(66293) -> {"No","EN"}; +lookup(66294) -> {"No","EN"}; +lookup(66295) -> {"No","EN"}; +lookup(66296) -> {"No","EN"}; +lookup(66297) -> {"No","EN"}; +lookup(66298) -> {"No","EN"}; +lookup(66299) -> {"No","EN"}; +lookup(66304) -> {"Lo","L"}; +lookup(66305) -> {"Lo","L"}; +lookup(66306) -> {"Lo","L"}; +lookup(66307) -> {"Lo","L"}; +lookup(66308) -> {"Lo","L"}; +lookup(66309) -> {"Lo","L"}; +lookup(66310) -> {"Lo","L"}; +lookup(66311) -> {"Lo","L"}; +lookup(66312) -> {"Lo","L"}; +lookup(66313) -> {"Lo","L"}; +lookup(66314) -> {"Lo","L"}; +lookup(66315) -> {"Lo","L"}; +lookup(66316) -> {"Lo","L"}; +lookup(66317) -> {"Lo","L"}; +lookup(66318) -> {"Lo","L"}; +lookup(66319) -> {"Lo","L"}; +lookup(66320) -> {"Lo","L"}; +lookup(66321) -> {"Lo","L"}; +lookup(66322) -> {"Lo","L"}; +lookup(66323) -> {"Lo","L"}; +lookup(66324) -> {"Lo","L"}; +lookup(66325) -> {"Lo","L"}; +lookup(66326) -> {"Lo","L"}; +lookup(66327) -> {"Lo","L"}; +lookup(66328) -> {"Lo","L"}; +lookup(66329) -> {"Lo","L"}; +lookup(66330) -> {"Lo","L"}; +lookup(66331) -> {"Lo","L"}; +lookup(66332) -> {"Lo","L"}; +lookup(66333) -> {"Lo","L"}; +lookup(66334) -> {"Lo","L"}; +lookup(66335) -> {"Lo","L"}; +lookup(66336) -> {"No","L"}; +lookup(66337) -> {"No","L"}; +lookup(66338) -> {"No","L"}; +lookup(66339) -> {"No","L"}; +lookup(66349) -> {"Lo","L"}; +lookup(66350) -> {"Lo","L"}; +lookup(66351) -> {"Lo","L"}; +lookup(66352) -> {"Lo","L"}; +lookup(66353) -> {"Lo","L"}; +lookup(66354) -> {"Lo","L"}; +lookup(66355) -> {"Lo","L"}; +lookup(66356) -> {"Lo","L"}; +lookup(66357) -> {"Lo","L"}; +lookup(66358) -> {"Lo","L"}; +lookup(66359) -> {"Lo","L"}; +lookup(66360) -> {"Lo","L"}; +lookup(66361) -> {"Lo","L"}; +lookup(66362) -> {"Lo","L"}; +lookup(66363) -> {"Lo","L"}; +lookup(66364) -> {"Lo","L"}; +lookup(66365) -> {"Lo","L"}; +lookup(66366) -> {"Lo","L"}; +lookup(66367) -> {"Lo","L"}; +lookup(66368) -> {"Lo","L"}; +lookup(66369) -> {"Nl","L"}; +lookup(66370) -> {"Lo","L"}; +lookup(66371) -> {"Lo","L"}; +lookup(66372) -> {"Lo","L"}; +lookup(66373) -> {"Lo","L"}; +lookup(66374) -> {"Lo","L"}; +lookup(66375) -> {"Lo","L"}; +lookup(66376) -> {"Lo","L"}; +lookup(66377) -> {"Lo","L"}; +lookup(66378) -> {"Nl","L"}; +lookup(66384) -> {"Lo","L"}; +lookup(66385) -> {"Lo","L"}; +lookup(66386) -> {"Lo","L"}; +lookup(66387) -> {"Lo","L"}; +lookup(66388) -> {"Lo","L"}; +lookup(66389) -> {"Lo","L"}; +lookup(66390) -> {"Lo","L"}; +lookup(66391) -> {"Lo","L"}; +lookup(66392) -> {"Lo","L"}; +lookup(66393) -> {"Lo","L"}; +lookup(66394) -> {"Lo","L"}; +lookup(66395) -> {"Lo","L"}; +lookup(66396) -> {"Lo","L"}; +lookup(66397) -> {"Lo","L"}; +lookup(66398) -> {"Lo","L"}; +lookup(66399) -> {"Lo","L"}; +lookup(66400) -> {"Lo","L"}; +lookup(66401) -> {"Lo","L"}; +lookup(66402) -> {"Lo","L"}; +lookup(66403) -> {"Lo","L"}; +lookup(66404) -> {"Lo","L"}; +lookup(66405) -> {"Lo","L"}; +lookup(66406) -> {"Lo","L"}; +lookup(66407) -> {"Lo","L"}; +lookup(66408) -> {"Lo","L"}; +lookup(66409) -> {"Lo","L"}; +lookup(66410) -> {"Lo","L"}; +lookup(66411) -> {"Lo","L"}; +lookup(66412) -> {"Lo","L"}; +lookup(66413) -> {"Lo","L"}; +lookup(66414) -> {"Lo","L"}; +lookup(66415) -> {"Lo","L"}; +lookup(66416) -> {"Lo","L"}; +lookup(66417) -> {"Lo","L"}; +lookup(66418) -> {"Lo","L"}; +lookup(66419) -> {"Lo","L"}; +lookup(66420) -> {"Lo","L"}; +lookup(66421) -> {"Lo","L"}; +lookup(66422) -> {"Mn","NSM"}; +lookup(66423) -> {"Mn","NSM"}; +lookup(66424) -> {"Mn","NSM"}; +lookup(66425) -> {"Mn","NSM"}; +lookup(66426) -> {"Mn","NSM"}; +lookup(66432) -> {"Lo","L"}; +lookup(66433) -> {"Lo","L"}; +lookup(66434) -> {"Lo","L"}; +lookup(66435) -> {"Lo","L"}; +lookup(66436) -> {"Lo","L"}; +lookup(66437) -> {"Lo","L"}; +lookup(66438) -> {"Lo","L"}; +lookup(66439) -> {"Lo","L"}; +lookup(66440) -> {"Lo","L"}; +lookup(66441) -> {"Lo","L"}; +lookup(66442) -> {"Lo","L"}; +lookup(66443) -> {"Lo","L"}; +lookup(66444) -> {"Lo","L"}; +lookup(66445) -> {"Lo","L"}; +lookup(66446) -> {"Lo","L"}; +lookup(66447) -> {"Lo","L"}; +lookup(66448) -> {"Lo","L"}; +lookup(66449) -> {"Lo","L"}; +lookup(66450) -> {"Lo","L"}; +lookup(66451) -> {"Lo","L"}; +lookup(66452) -> {"Lo","L"}; +lookup(66453) -> {"Lo","L"}; +lookup(66454) -> {"Lo","L"}; +lookup(66455) -> {"Lo","L"}; +lookup(66456) -> {"Lo","L"}; +lookup(66457) -> {"Lo","L"}; +lookup(66458) -> {"Lo","L"}; +lookup(66459) -> {"Lo","L"}; +lookup(66460) -> {"Lo","L"}; +lookup(66461) -> {"Lo","L"}; +lookup(66463) -> {"Po","L"}; +lookup(66464) -> {"Lo","L"}; +lookup(66465) -> {"Lo","L"}; +lookup(66466) -> {"Lo","L"}; +lookup(66467) -> {"Lo","L"}; +lookup(66468) -> {"Lo","L"}; +lookup(66469) -> {"Lo","L"}; +lookup(66470) -> {"Lo","L"}; +lookup(66471) -> {"Lo","L"}; +lookup(66472) -> {"Lo","L"}; +lookup(66473) -> {"Lo","L"}; +lookup(66474) -> {"Lo","L"}; +lookup(66475) -> {"Lo","L"}; +lookup(66476) -> {"Lo","L"}; +lookup(66477) -> {"Lo","L"}; +lookup(66478) -> {"Lo","L"}; +lookup(66479) -> {"Lo","L"}; +lookup(66480) -> {"Lo","L"}; +lookup(66481) -> {"Lo","L"}; +lookup(66482) -> {"Lo","L"}; +lookup(66483) -> {"Lo","L"}; +lookup(66484) -> {"Lo","L"}; +lookup(66485) -> {"Lo","L"}; +lookup(66486) -> {"Lo","L"}; +lookup(66487) -> {"Lo","L"}; +lookup(66488) -> {"Lo","L"}; +lookup(66489) -> {"Lo","L"}; +lookup(66490) -> {"Lo","L"}; +lookup(66491) -> {"Lo","L"}; +lookup(66492) -> {"Lo","L"}; +lookup(66493) -> {"Lo","L"}; +lookup(66494) -> {"Lo","L"}; +lookup(66495) -> {"Lo","L"}; +lookup(66496) -> {"Lo","L"}; +lookup(66497) -> {"Lo","L"}; +lookup(66498) -> {"Lo","L"}; +lookup(66499) -> {"Lo","L"}; +lookup(66504) -> {"Lo","L"}; +lookup(66505) -> {"Lo","L"}; +lookup(66506) -> {"Lo","L"}; +lookup(66507) -> {"Lo","L"}; +lookup(66508) -> {"Lo","L"}; +lookup(66509) -> {"Lo","L"}; +lookup(66510) -> {"Lo","L"}; +lookup(66511) -> {"Lo","L"}; +lookup(66512) -> {"Po","L"}; +lookup(66513) -> {"Nl","L"}; +lookup(66514) -> {"Nl","L"}; +lookup(66515) -> {"Nl","L"}; +lookup(66516) -> {"Nl","L"}; +lookup(66517) -> {"Nl","L"}; +lookup(66560) -> {"Lu","L"}; +lookup(66561) -> {"Lu","L"}; +lookup(66562) -> {"Lu","L"}; +lookup(66563) -> {"Lu","L"}; +lookup(66564) -> {"Lu","L"}; +lookup(66565) -> {"Lu","L"}; +lookup(66566) -> {"Lu","L"}; +lookup(66567) -> {"Lu","L"}; +lookup(66568) -> {"Lu","L"}; +lookup(66569) -> {"Lu","L"}; +lookup(66570) -> {"Lu","L"}; +lookup(66571) -> {"Lu","L"}; +lookup(66572) -> {"Lu","L"}; +lookup(66573) -> {"Lu","L"}; +lookup(66574) -> {"Lu","L"}; +lookup(66575) -> {"Lu","L"}; +lookup(66576) -> {"Lu","L"}; +lookup(66577) -> {"Lu","L"}; +lookup(66578) -> {"Lu","L"}; +lookup(66579) -> {"Lu","L"}; +lookup(66580) -> {"Lu","L"}; +lookup(66581) -> {"Lu","L"}; +lookup(66582) -> {"Lu","L"}; +lookup(66583) -> {"Lu","L"}; +lookup(66584) -> {"Lu","L"}; +lookup(66585) -> {"Lu","L"}; +lookup(66586) -> {"Lu","L"}; +lookup(66587) -> {"Lu","L"}; +lookup(66588) -> {"Lu","L"}; +lookup(66589) -> {"Lu","L"}; +lookup(66590) -> {"Lu","L"}; +lookup(66591) -> {"Lu","L"}; +lookup(66592) -> {"Lu","L"}; +lookup(66593) -> {"Lu","L"}; +lookup(66594) -> {"Lu","L"}; +lookup(66595) -> {"Lu","L"}; +lookup(66596) -> {"Lu","L"}; +lookup(66597) -> {"Lu","L"}; +lookup(66598) -> {"Lu","L"}; +lookup(66599) -> {"Lu","L"}; +lookup(66600) -> {"Ll","L"}; +lookup(66601) -> {"Ll","L"}; +lookup(66602) -> {"Ll","L"}; +lookup(66603) -> {"Ll","L"}; +lookup(66604) -> {"Ll","L"}; +lookup(66605) -> {"Ll","L"}; +lookup(66606) -> {"Ll","L"}; +lookup(66607) -> {"Ll","L"}; +lookup(66608) -> {"Ll","L"}; +lookup(66609) -> {"Ll","L"}; +lookup(66610) -> {"Ll","L"}; +lookup(66611) -> {"Ll","L"}; +lookup(66612) -> {"Ll","L"}; +lookup(66613) -> {"Ll","L"}; +lookup(66614) -> {"Ll","L"}; +lookup(66615) -> {"Ll","L"}; +lookup(66616) -> {"Ll","L"}; +lookup(66617) -> {"Ll","L"}; +lookup(66618) -> {"Ll","L"}; +lookup(66619) -> {"Ll","L"}; +lookup(66620) -> {"Ll","L"}; +lookup(66621) -> {"Ll","L"}; +lookup(66622) -> {"Ll","L"}; +lookup(66623) -> {"Ll","L"}; +lookup(66624) -> {"Ll","L"}; +lookup(66625) -> {"Ll","L"}; +lookup(66626) -> {"Ll","L"}; +lookup(66627) -> {"Ll","L"}; +lookup(66628) -> {"Ll","L"}; +lookup(66629) -> {"Ll","L"}; +lookup(66630) -> {"Ll","L"}; +lookup(66631) -> {"Ll","L"}; +lookup(66632) -> {"Ll","L"}; +lookup(66633) -> {"Ll","L"}; +lookup(66634) -> {"Ll","L"}; +lookup(66635) -> {"Ll","L"}; +lookup(66636) -> {"Ll","L"}; +lookup(66637) -> {"Ll","L"}; +lookup(66638) -> {"Ll","L"}; +lookup(66639) -> {"Ll","L"}; +lookup(66640) -> {"Lo","L"}; +lookup(66641) -> {"Lo","L"}; +lookup(66642) -> {"Lo","L"}; +lookup(66643) -> {"Lo","L"}; +lookup(66644) -> {"Lo","L"}; +lookup(66645) -> {"Lo","L"}; +lookup(66646) -> {"Lo","L"}; +lookup(66647) -> {"Lo","L"}; +lookup(66648) -> {"Lo","L"}; +lookup(66649) -> {"Lo","L"}; +lookup(66650) -> {"Lo","L"}; +lookup(66651) -> {"Lo","L"}; +lookup(66652) -> {"Lo","L"}; +lookup(66653) -> {"Lo","L"}; +lookup(66654) -> {"Lo","L"}; +lookup(66655) -> {"Lo","L"}; +lookup(66656) -> {"Lo","L"}; +lookup(66657) -> {"Lo","L"}; +lookup(66658) -> {"Lo","L"}; +lookup(66659) -> {"Lo","L"}; +lookup(66660) -> {"Lo","L"}; +lookup(66661) -> {"Lo","L"}; +lookup(66662) -> {"Lo","L"}; +lookup(66663) -> {"Lo","L"}; +lookup(66664) -> {"Lo","L"}; +lookup(66665) -> {"Lo","L"}; +lookup(66666) -> {"Lo","L"}; +lookup(66667) -> {"Lo","L"}; +lookup(66668) -> {"Lo","L"}; +lookup(66669) -> {"Lo","L"}; +lookup(66670) -> {"Lo","L"}; +lookup(66671) -> {"Lo","L"}; +lookup(66672) -> {"Lo","L"}; +lookup(66673) -> {"Lo","L"}; +lookup(66674) -> {"Lo","L"}; +lookup(66675) -> {"Lo","L"}; +lookup(66676) -> {"Lo","L"}; +lookup(66677) -> {"Lo","L"}; +lookup(66678) -> {"Lo","L"}; +lookup(66679) -> {"Lo","L"}; +lookup(66680) -> {"Lo","L"}; +lookup(66681) -> {"Lo","L"}; +lookup(66682) -> {"Lo","L"}; +lookup(66683) -> {"Lo","L"}; +lookup(66684) -> {"Lo","L"}; +lookup(66685) -> {"Lo","L"}; +lookup(66686) -> {"Lo","L"}; +lookup(66687) -> {"Lo","L"}; +lookup(66688) -> {"Lo","L"}; +lookup(66689) -> {"Lo","L"}; +lookup(66690) -> {"Lo","L"}; +lookup(66691) -> {"Lo","L"}; +lookup(66692) -> {"Lo","L"}; +lookup(66693) -> {"Lo","L"}; +lookup(66694) -> {"Lo","L"}; +lookup(66695) -> {"Lo","L"}; +lookup(66696) -> {"Lo","L"}; +lookup(66697) -> {"Lo","L"}; +lookup(66698) -> {"Lo","L"}; +lookup(66699) -> {"Lo","L"}; +lookup(66700) -> {"Lo","L"}; +lookup(66701) -> {"Lo","L"}; +lookup(66702) -> {"Lo","L"}; +lookup(66703) -> {"Lo","L"}; +lookup(66704) -> {"Lo","L"}; +lookup(66705) -> {"Lo","L"}; +lookup(66706) -> {"Lo","L"}; +lookup(66707) -> {"Lo","L"}; +lookup(66708) -> {"Lo","L"}; +lookup(66709) -> {"Lo","L"}; +lookup(66710) -> {"Lo","L"}; +lookup(66711) -> {"Lo","L"}; +lookup(66712) -> {"Lo","L"}; +lookup(66713) -> {"Lo","L"}; +lookup(66714) -> {"Lo","L"}; +lookup(66715) -> {"Lo","L"}; +lookup(66716) -> {"Lo","L"}; +lookup(66717) -> {"Lo","L"}; +lookup(66720) -> {"Nd","L"}; +lookup(66721) -> {"Nd","L"}; +lookup(66722) -> {"Nd","L"}; +lookup(66723) -> {"Nd","L"}; +lookup(66724) -> {"Nd","L"}; +lookup(66725) -> {"Nd","L"}; +lookup(66726) -> {"Nd","L"}; +lookup(66727) -> {"Nd","L"}; +lookup(66728) -> {"Nd","L"}; +lookup(66729) -> {"Nd","L"}; +lookup(66736) -> {"Lu","L"}; +lookup(66737) -> {"Lu","L"}; +lookup(66738) -> {"Lu","L"}; +lookup(66739) -> {"Lu","L"}; +lookup(66740) -> {"Lu","L"}; +lookup(66741) -> {"Lu","L"}; +lookup(66742) -> {"Lu","L"}; +lookup(66743) -> {"Lu","L"}; +lookup(66744) -> {"Lu","L"}; +lookup(66745) -> {"Lu","L"}; +lookup(66746) -> {"Lu","L"}; +lookup(66747) -> {"Lu","L"}; +lookup(66748) -> {"Lu","L"}; +lookup(66749) -> {"Lu","L"}; +lookup(66750) -> {"Lu","L"}; +lookup(66751) -> {"Lu","L"}; +lookup(66752) -> {"Lu","L"}; +lookup(66753) -> {"Lu","L"}; +lookup(66754) -> {"Lu","L"}; +lookup(66755) -> {"Lu","L"}; +lookup(66756) -> {"Lu","L"}; +lookup(66757) -> {"Lu","L"}; +lookup(66758) -> {"Lu","L"}; +lookup(66759) -> {"Lu","L"}; +lookup(66760) -> {"Lu","L"}; +lookup(66761) -> {"Lu","L"}; +lookup(66762) -> {"Lu","L"}; +lookup(66763) -> {"Lu","L"}; +lookup(66764) -> {"Lu","L"}; +lookup(66765) -> {"Lu","L"}; +lookup(66766) -> {"Lu","L"}; +lookup(66767) -> {"Lu","L"}; +lookup(66768) -> {"Lu","L"}; +lookup(66769) -> {"Lu","L"}; +lookup(66770) -> {"Lu","L"}; +lookup(66771) -> {"Lu","L"}; +lookup(66776) -> {"Ll","L"}; +lookup(66777) -> {"Ll","L"}; +lookup(66778) -> {"Ll","L"}; +lookup(66779) -> {"Ll","L"}; +lookup(66780) -> {"Ll","L"}; +lookup(66781) -> {"Ll","L"}; +lookup(66782) -> {"Ll","L"}; +lookup(66783) -> {"Ll","L"}; +lookup(66784) -> {"Ll","L"}; +lookup(66785) -> {"Ll","L"}; +lookup(66786) -> {"Ll","L"}; +lookup(66787) -> {"Ll","L"}; +lookup(66788) -> {"Ll","L"}; +lookup(66789) -> {"Ll","L"}; +lookup(66790) -> {"Ll","L"}; +lookup(66791) -> {"Ll","L"}; +lookup(66792) -> {"Ll","L"}; +lookup(66793) -> {"Ll","L"}; +lookup(66794) -> {"Ll","L"}; +lookup(66795) -> {"Ll","L"}; +lookup(66796) -> {"Ll","L"}; +lookup(66797) -> {"Ll","L"}; +lookup(66798) -> {"Ll","L"}; +lookup(66799) -> {"Ll","L"}; +lookup(66800) -> {"Ll","L"}; +lookup(66801) -> {"Ll","L"}; +lookup(66802) -> {"Ll","L"}; +lookup(66803) -> {"Ll","L"}; +lookup(66804) -> {"Ll","L"}; +lookup(66805) -> {"Ll","L"}; +lookup(66806) -> {"Ll","L"}; +lookup(66807) -> {"Ll","L"}; +lookup(66808) -> {"Ll","L"}; +lookup(66809) -> {"Ll","L"}; +lookup(66810) -> {"Ll","L"}; +lookup(66811) -> {"Ll","L"}; +lookup(66816) -> {"Lo","L"}; +lookup(66817) -> {"Lo","L"}; +lookup(66818) -> {"Lo","L"}; +lookup(66819) -> {"Lo","L"}; +lookup(66820) -> {"Lo","L"}; +lookup(66821) -> {"Lo","L"}; +lookup(66822) -> {"Lo","L"}; +lookup(66823) -> {"Lo","L"}; +lookup(66824) -> {"Lo","L"}; +lookup(66825) -> {"Lo","L"}; +lookup(66826) -> {"Lo","L"}; +lookup(66827) -> {"Lo","L"}; +lookup(66828) -> {"Lo","L"}; +lookup(66829) -> {"Lo","L"}; +lookup(66830) -> {"Lo","L"}; +lookup(66831) -> {"Lo","L"}; +lookup(66832) -> {"Lo","L"}; +lookup(66833) -> {"Lo","L"}; +lookup(66834) -> {"Lo","L"}; +lookup(66835) -> {"Lo","L"}; +lookup(66836) -> {"Lo","L"}; +lookup(66837) -> {"Lo","L"}; +lookup(66838) -> {"Lo","L"}; +lookup(66839) -> {"Lo","L"}; +lookup(66840) -> {"Lo","L"}; +lookup(66841) -> {"Lo","L"}; +lookup(66842) -> {"Lo","L"}; +lookup(66843) -> {"Lo","L"}; +lookup(66844) -> {"Lo","L"}; +lookup(66845) -> {"Lo","L"}; +lookup(66846) -> {"Lo","L"}; +lookup(66847) -> {"Lo","L"}; +lookup(66848) -> {"Lo","L"}; +lookup(66849) -> {"Lo","L"}; +lookup(66850) -> {"Lo","L"}; +lookup(66851) -> {"Lo","L"}; +lookup(66852) -> {"Lo","L"}; +lookup(66853) -> {"Lo","L"}; +lookup(66854) -> {"Lo","L"}; +lookup(66855) -> {"Lo","L"}; +lookup(66864) -> {"Lo","L"}; +lookup(66865) -> {"Lo","L"}; +lookup(66866) -> {"Lo","L"}; +lookup(66867) -> {"Lo","L"}; +lookup(66868) -> {"Lo","L"}; +lookup(66869) -> {"Lo","L"}; +lookup(66870) -> {"Lo","L"}; +lookup(66871) -> {"Lo","L"}; +lookup(66872) -> {"Lo","L"}; +lookup(66873) -> {"Lo","L"}; +lookup(66874) -> {"Lo","L"}; +lookup(66875) -> {"Lo","L"}; +lookup(66876) -> {"Lo","L"}; +lookup(66877) -> {"Lo","L"}; +lookup(66878) -> {"Lo","L"}; +lookup(66879) -> {"Lo","L"}; +lookup(66880) -> {"Lo","L"}; +lookup(66881) -> {"Lo","L"}; +lookup(66882) -> {"Lo","L"}; +lookup(66883) -> {"Lo","L"}; +lookup(66884) -> {"Lo","L"}; +lookup(66885) -> {"Lo","L"}; +lookup(66886) -> {"Lo","L"}; +lookup(66887) -> {"Lo","L"}; +lookup(66888) -> {"Lo","L"}; +lookup(66889) -> {"Lo","L"}; +lookup(66890) -> {"Lo","L"}; +lookup(66891) -> {"Lo","L"}; +lookup(66892) -> {"Lo","L"}; +lookup(66893) -> {"Lo","L"}; +lookup(66894) -> {"Lo","L"}; +lookup(66895) -> {"Lo","L"}; +lookup(66896) -> {"Lo","L"}; +lookup(66897) -> {"Lo","L"}; +lookup(66898) -> {"Lo","L"}; +lookup(66899) -> {"Lo","L"}; +lookup(66900) -> {"Lo","L"}; +lookup(66901) -> {"Lo","L"}; +lookup(66902) -> {"Lo","L"}; +lookup(66903) -> {"Lo","L"}; +lookup(66904) -> {"Lo","L"}; +lookup(66905) -> {"Lo","L"}; +lookup(66906) -> {"Lo","L"}; +lookup(66907) -> {"Lo","L"}; +lookup(66908) -> {"Lo","L"}; +lookup(66909) -> {"Lo","L"}; +lookup(66910) -> {"Lo","L"}; +lookup(66911) -> {"Lo","L"}; +lookup(66912) -> {"Lo","L"}; +lookup(66913) -> {"Lo","L"}; +lookup(66914) -> {"Lo","L"}; +lookup(66915) -> {"Lo","L"}; +lookup(66927) -> {"Po","L"}; +lookup(67072) -> {"Lo","L"}; +lookup(67073) -> {"Lo","L"}; +lookup(67074) -> {"Lo","L"}; +lookup(67075) -> {"Lo","L"}; +lookup(67076) -> {"Lo","L"}; +lookup(67077) -> {"Lo","L"}; +lookup(67078) -> {"Lo","L"}; +lookup(67079) -> {"Lo","L"}; +lookup(67080) -> {"Lo","L"}; +lookup(67081) -> {"Lo","L"}; +lookup(67082) -> {"Lo","L"}; +lookup(67083) -> {"Lo","L"}; +lookup(67084) -> {"Lo","L"}; +lookup(67085) -> {"Lo","L"}; +lookup(67086) -> {"Lo","L"}; +lookup(67087) -> {"Lo","L"}; +lookup(67088) -> {"Lo","L"}; +lookup(67089) -> {"Lo","L"}; +lookup(67090) -> {"Lo","L"}; +lookup(67091) -> {"Lo","L"}; +lookup(67092) -> {"Lo","L"}; +lookup(67093) -> {"Lo","L"}; +lookup(67094) -> {"Lo","L"}; +lookup(67095) -> {"Lo","L"}; +lookup(67096) -> {"Lo","L"}; +lookup(67097) -> {"Lo","L"}; +lookup(67098) -> {"Lo","L"}; +lookup(67099) -> {"Lo","L"}; +lookup(67100) -> {"Lo","L"}; +lookup(67101) -> {"Lo","L"}; +lookup(67102) -> {"Lo","L"}; +lookup(67103) -> {"Lo","L"}; +lookup(67104) -> {"Lo","L"}; +lookup(67105) -> {"Lo","L"}; +lookup(67106) -> {"Lo","L"}; +lookup(67107) -> {"Lo","L"}; +lookup(67108) -> {"Lo","L"}; +lookup(67109) -> {"Lo","L"}; +lookup(67110) -> {"Lo","L"}; +lookup(67111) -> {"Lo","L"}; +lookup(67112) -> {"Lo","L"}; +lookup(67113) -> {"Lo","L"}; +lookup(67114) -> {"Lo","L"}; +lookup(67115) -> {"Lo","L"}; +lookup(67116) -> {"Lo","L"}; +lookup(67117) -> {"Lo","L"}; +lookup(67118) -> {"Lo","L"}; +lookup(67119) -> {"Lo","L"}; +lookup(67120) -> {"Lo","L"}; +lookup(67121) -> {"Lo","L"}; +lookup(67122) -> {"Lo","L"}; +lookup(67123) -> {"Lo","L"}; +lookup(67124) -> {"Lo","L"}; +lookup(67125) -> {"Lo","L"}; +lookup(67126) -> {"Lo","L"}; +lookup(67127) -> {"Lo","L"}; +lookup(67128) -> {"Lo","L"}; +lookup(67129) -> {"Lo","L"}; +lookup(67130) -> {"Lo","L"}; +lookup(67131) -> {"Lo","L"}; +lookup(67132) -> {"Lo","L"}; +lookup(67133) -> {"Lo","L"}; +lookup(67134) -> {"Lo","L"}; +lookup(67135) -> {"Lo","L"}; +lookup(67136) -> {"Lo","L"}; +lookup(67137) -> {"Lo","L"}; +lookup(67138) -> {"Lo","L"}; +lookup(67139) -> {"Lo","L"}; +lookup(67140) -> {"Lo","L"}; +lookup(67141) -> {"Lo","L"}; +lookup(67142) -> {"Lo","L"}; +lookup(67143) -> {"Lo","L"}; +lookup(67144) -> {"Lo","L"}; +lookup(67145) -> {"Lo","L"}; +lookup(67146) -> {"Lo","L"}; +lookup(67147) -> {"Lo","L"}; +lookup(67148) -> {"Lo","L"}; +lookup(67149) -> {"Lo","L"}; +lookup(67150) -> {"Lo","L"}; +lookup(67151) -> {"Lo","L"}; +lookup(67152) -> {"Lo","L"}; +lookup(67153) -> {"Lo","L"}; +lookup(67154) -> {"Lo","L"}; +lookup(67155) -> {"Lo","L"}; +lookup(67156) -> {"Lo","L"}; +lookup(67157) -> {"Lo","L"}; +lookup(67158) -> {"Lo","L"}; +lookup(67159) -> {"Lo","L"}; +lookup(67160) -> {"Lo","L"}; +lookup(67161) -> {"Lo","L"}; +lookup(67162) -> {"Lo","L"}; +lookup(67163) -> {"Lo","L"}; +lookup(67164) -> {"Lo","L"}; +lookup(67165) -> {"Lo","L"}; +lookup(67166) -> {"Lo","L"}; +lookup(67167) -> {"Lo","L"}; +lookup(67168) -> {"Lo","L"}; +lookup(67169) -> {"Lo","L"}; +lookup(67170) -> {"Lo","L"}; +lookup(67171) -> {"Lo","L"}; +lookup(67172) -> {"Lo","L"}; +lookup(67173) -> {"Lo","L"}; +lookup(67174) -> {"Lo","L"}; +lookup(67175) -> {"Lo","L"}; +lookup(67176) -> {"Lo","L"}; +lookup(67177) -> {"Lo","L"}; +lookup(67178) -> {"Lo","L"}; +lookup(67179) -> {"Lo","L"}; +lookup(67180) -> {"Lo","L"}; +lookup(67181) -> {"Lo","L"}; +lookup(67182) -> {"Lo","L"}; +lookup(67183) -> {"Lo","L"}; +lookup(67184) -> {"Lo","L"}; +lookup(67185) -> {"Lo","L"}; +lookup(67186) -> {"Lo","L"}; +lookup(67187) -> {"Lo","L"}; +lookup(67188) -> {"Lo","L"}; +lookup(67189) -> {"Lo","L"}; +lookup(67190) -> {"Lo","L"}; +lookup(67191) -> {"Lo","L"}; +lookup(67192) -> {"Lo","L"}; +lookup(67193) -> {"Lo","L"}; +lookup(67194) -> {"Lo","L"}; +lookup(67195) -> {"Lo","L"}; +lookup(67196) -> {"Lo","L"}; +lookup(67197) -> {"Lo","L"}; +lookup(67198) -> {"Lo","L"}; +lookup(67199) -> {"Lo","L"}; +lookup(67200) -> {"Lo","L"}; +lookup(67201) -> {"Lo","L"}; +lookup(67202) -> {"Lo","L"}; +lookup(67203) -> {"Lo","L"}; +lookup(67204) -> {"Lo","L"}; +lookup(67205) -> {"Lo","L"}; +lookup(67206) -> {"Lo","L"}; +lookup(67207) -> {"Lo","L"}; +lookup(67208) -> {"Lo","L"}; +lookup(67209) -> {"Lo","L"}; +lookup(67210) -> {"Lo","L"}; +lookup(67211) -> {"Lo","L"}; +lookup(67212) -> {"Lo","L"}; +lookup(67213) -> {"Lo","L"}; +lookup(67214) -> {"Lo","L"}; +lookup(67215) -> {"Lo","L"}; +lookup(67216) -> {"Lo","L"}; +lookup(67217) -> {"Lo","L"}; +lookup(67218) -> {"Lo","L"}; +lookup(67219) -> {"Lo","L"}; +lookup(67220) -> {"Lo","L"}; +lookup(67221) -> {"Lo","L"}; +lookup(67222) -> {"Lo","L"}; +lookup(67223) -> {"Lo","L"}; +lookup(67224) -> {"Lo","L"}; +lookup(67225) -> {"Lo","L"}; +lookup(67226) -> {"Lo","L"}; +lookup(67227) -> {"Lo","L"}; +lookup(67228) -> {"Lo","L"}; +lookup(67229) -> {"Lo","L"}; +lookup(67230) -> {"Lo","L"}; +lookup(67231) -> {"Lo","L"}; +lookup(67232) -> {"Lo","L"}; +lookup(67233) -> {"Lo","L"}; +lookup(67234) -> {"Lo","L"}; +lookup(67235) -> {"Lo","L"}; +lookup(67236) -> {"Lo","L"}; +lookup(67237) -> {"Lo","L"}; +lookup(67238) -> {"Lo","L"}; +lookup(67239) -> {"Lo","L"}; +lookup(67240) -> {"Lo","L"}; +lookup(67241) -> {"Lo","L"}; +lookup(67242) -> {"Lo","L"}; +lookup(67243) -> {"Lo","L"}; +lookup(67244) -> {"Lo","L"}; +lookup(67245) -> {"Lo","L"}; +lookup(67246) -> {"Lo","L"}; +lookup(67247) -> {"Lo","L"}; +lookup(67248) -> {"Lo","L"}; +lookup(67249) -> {"Lo","L"}; +lookup(67250) -> {"Lo","L"}; +lookup(67251) -> {"Lo","L"}; +lookup(67252) -> {"Lo","L"}; +lookup(67253) -> {"Lo","L"}; +lookup(67254) -> {"Lo","L"}; +lookup(67255) -> {"Lo","L"}; +lookup(67256) -> {"Lo","L"}; +lookup(67257) -> {"Lo","L"}; +lookup(67258) -> {"Lo","L"}; +lookup(67259) -> {"Lo","L"}; +lookup(67260) -> {"Lo","L"}; +lookup(67261) -> {"Lo","L"}; +lookup(67262) -> {"Lo","L"}; +lookup(67263) -> {"Lo","L"}; +lookup(67264) -> {"Lo","L"}; +lookup(67265) -> {"Lo","L"}; +lookup(67266) -> {"Lo","L"}; +lookup(67267) -> {"Lo","L"}; +lookup(67268) -> {"Lo","L"}; +lookup(67269) -> {"Lo","L"}; +lookup(67270) -> {"Lo","L"}; +lookup(67271) -> {"Lo","L"}; +lookup(67272) -> {"Lo","L"}; +lookup(67273) -> {"Lo","L"}; +lookup(67274) -> {"Lo","L"}; +lookup(67275) -> {"Lo","L"}; +lookup(67276) -> {"Lo","L"}; +lookup(67277) -> {"Lo","L"}; +lookup(67278) -> {"Lo","L"}; +lookup(67279) -> {"Lo","L"}; +lookup(67280) -> {"Lo","L"}; +lookup(67281) -> {"Lo","L"}; +lookup(67282) -> {"Lo","L"}; +lookup(67283) -> {"Lo","L"}; +lookup(67284) -> {"Lo","L"}; +lookup(67285) -> {"Lo","L"}; +lookup(67286) -> {"Lo","L"}; +lookup(67287) -> {"Lo","L"}; +lookup(67288) -> {"Lo","L"}; +lookup(67289) -> {"Lo","L"}; +lookup(67290) -> {"Lo","L"}; +lookup(67291) -> {"Lo","L"}; +lookup(67292) -> {"Lo","L"}; +lookup(67293) -> {"Lo","L"}; +lookup(67294) -> {"Lo","L"}; +lookup(67295) -> {"Lo","L"}; +lookup(67296) -> {"Lo","L"}; +lookup(67297) -> {"Lo","L"}; +lookup(67298) -> {"Lo","L"}; +lookup(67299) -> {"Lo","L"}; +lookup(67300) -> {"Lo","L"}; +lookup(67301) -> {"Lo","L"}; +lookup(67302) -> {"Lo","L"}; +lookup(67303) -> {"Lo","L"}; +lookup(67304) -> {"Lo","L"}; +lookup(67305) -> {"Lo","L"}; +lookup(67306) -> {"Lo","L"}; +lookup(67307) -> {"Lo","L"}; +lookup(67308) -> {"Lo","L"}; +lookup(67309) -> {"Lo","L"}; +lookup(67310) -> {"Lo","L"}; +lookup(67311) -> {"Lo","L"}; +lookup(67312) -> {"Lo","L"}; +lookup(67313) -> {"Lo","L"}; +lookup(67314) -> {"Lo","L"}; +lookup(67315) -> {"Lo","L"}; +lookup(67316) -> {"Lo","L"}; +lookup(67317) -> {"Lo","L"}; +lookup(67318) -> {"Lo","L"}; +lookup(67319) -> {"Lo","L"}; +lookup(67320) -> {"Lo","L"}; +lookup(67321) -> {"Lo","L"}; +lookup(67322) -> {"Lo","L"}; +lookup(67323) -> {"Lo","L"}; +lookup(67324) -> {"Lo","L"}; +lookup(67325) -> {"Lo","L"}; +lookup(67326) -> {"Lo","L"}; +lookup(67327) -> {"Lo","L"}; +lookup(67328) -> {"Lo","L"}; +lookup(67329) -> {"Lo","L"}; +lookup(67330) -> {"Lo","L"}; +lookup(67331) -> {"Lo","L"}; +lookup(67332) -> {"Lo","L"}; +lookup(67333) -> {"Lo","L"}; +lookup(67334) -> {"Lo","L"}; +lookup(67335) -> {"Lo","L"}; +lookup(67336) -> {"Lo","L"}; +lookup(67337) -> {"Lo","L"}; +lookup(67338) -> {"Lo","L"}; +lookup(67339) -> {"Lo","L"}; +lookup(67340) -> {"Lo","L"}; +lookup(67341) -> {"Lo","L"}; +lookup(67342) -> {"Lo","L"}; +lookup(67343) -> {"Lo","L"}; +lookup(67344) -> {"Lo","L"}; +lookup(67345) -> {"Lo","L"}; +lookup(67346) -> {"Lo","L"}; +lookup(67347) -> {"Lo","L"}; +lookup(67348) -> {"Lo","L"}; +lookup(67349) -> {"Lo","L"}; +lookup(67350) -> {"Lo","L"}; +lookup(67351) -> {"Lo","L"}; +lookup(67352) -> {"Lo","L"}; +lookup(67353) -> {"Lo","L"}; +lookup(67354) -> {"Lo","L"}; +lookup(67355) -> {"Lo","L"}; +lookup(67356) -> {"Lo","L"}; +lookup(67357) -> {"Lo","L"}; +lookup(67358) -> {"Lo","L"}; +lookup(67359) -> {"Lo","L"}; +lookup(67360) -> {"Lo","L"}; +lookup(67361) -> {"Lo","L"}; +lookup(67362) -> {"Lo","L"}; +lookup(67363) -> {"Lo","L"}; +lookup(67364) -> {"Lo","L"}; +lookup(67365) -> {"Lo","L"}; +lookup(67366) -> {"Lo","L"}; +lookup(67367) -> {"Lo","L"}; +lookup(67368) -> {"Lo","L"}; +lookup(67369) -> {"Lo","L"}; +lookup(67370) -> {"Lo","L"}; +lookup(67371) -> {"Lo","L"}; +lookup(67372) -> {"Lo","L"}; +lookup(67373) -> {"Lo","L"}; +lookup(67374) -> {"Lo","L"}; +lookup(67375) -> {"Lo","L"}; +lookup(67376) -> {"Lo","L"}; +lookup(67377) -> {"Lo","L"}; +lookup(67378) -> {"Lo","L"}; +lookup(67379) -> {"Lo","L"}; +lookup(67380) -> {"Lo","L"}; +lookup(67381) -> {"Lo","L"}; +lookup(67382) -> {"Lo","L"}; +lookup(67392) -> {"Lo","L"}; +lookup(67393) -> {"Lo","L"}; +lookup(67394) -> {"Lo","L"}; +lookup(67395) -> {"Lo","L"}; +lookup(67396) -> {"Lo","L"}; +lookup(67397) -> {"Lo","L"}; +lookup(67398) -> {"Lo","L"}; +lookup(67399) -> {"Lo","L"}; +lookup(67400) -> {"Lo","L"}; +lookup(67401) -> {"Lo","L"}; +lookup(67402) -> {"Lo","L"}; +lookup(67403) -> {"Lo","L"}; +lookup(67404) -> {"Lo","L"}; +lookup(67405) -> {"Lo","L"}; +lookup(67406) -> {"Lo","L"}; +lookup(67407) -> {"Lo","L"}; +lookup(67408) -> {"Lo","L"}; +lookup(67409) -> {"Lo","L"}; +lookup(67410) -> {"Lo","L"}; +lookup(67411) -> {"Lo","L"}; +lookup(67412) -> {"Lo","L"}; +lookup(67413) -> {"Lo","L"}; +lookup(67424) -> {"Lo","L"}; +lookup(67425) -> {"Lo","L"}; +lookup(67426) -> {"Lo","L"}; +lookup(67427) -> {"Lo","L"}; +lookup(67428) -> {"Lo","L"}; +lookup(67429) -> {"Lo","L"}; +lookup(67430) -> {"Lo","L"}; +lookup(67431) -> {"Lo","L"}; +lookup(67584) -> {"Lo","R"}; +lookup(67585) -> {"Lo","R"}; +lookup(67586) -> {"Lo","R"}; +lookup(67587) -> {"Lo","R"}; +lookup(67588) -> {"Lo","R"}; +lookup(67589) -> {"Lo","R"}; +lookup(67592) -> {"Lo","R"}; +lookup(67594) -> {"Lo","R"}; +lookup(67595) -> {"Lo","R"}; +lookup(67596) -> {"Lo","R"}; +lookup(67597) -> {"Lo","R"}; +lookup(67598) -> {"Lo","R"}; +lookup(67599) -> {"Lo","R"}; +lookup(67600) -> {"Lo","R"}; +lookup(67601) -> {"Lo","R"}; +lookup(67602) -> {"Lo","R"}; +lookup(67603) -> {"Lo","R"}; +lookup(67604) -> {"Lo","R"}; +lookup(67605) -> {"Lo","R"}; +lookup(67606) -> {"Lo","R"}; +lookup(67607) -> {"Lo","R"}; +lookup(67608) -> {"Lo","R"}; +lookup(67609) -> {"Lo","R"}; +lookup(67610) -> {"Lo","R"}; +lookup(67611) -> {"Lo","R"}; +lookup(67612) -> {"Lo","R"}; +lookup(67613) -> {"Lo","R"}; +lookup(67614) -> {"Lo","R"}; +lookup(67615) -> {"Lo","R"}; +lookup(67616) -> {"Lo","R"}; +lookup(67617) -> {"Lo","R"}; +lookup(67618) -> {"Lo","R"}; +lookup(67619) -> {"Lo","R"}; +lookup(67620) -> {"Lo","R"}; +lookup(67621) -> {"Lo","R"}; +lookup(67622) -> {"Lo","R"}; +lookup(67623) -> {"Lo","R"}; +lookup(67624) -> {"Lo","R"}; +lookup(67625) -> {"Lo","R"}; +lookup(67626) -> {"Lo","R"}; +lookup(67627) -> {"Lo","R"}; +lookup(67628) -> {"Lo","R"}; +lookup(67629) -> {"Lo","R"}; +lookup(67630) -> {"Lo","R"}; +lookup(67631) -> {"Lo","R"}; +lookup(67632) -> {"Lo","R"}; +lookup(67633) -> {"Lo","R"}; +lookup(67634) -> {"Lo","R"}; +lookup(67635) -> {"Lo","R"}; +lookup(67636) -> {"Lo","R"}; +lookup(67637) -> {"Lo","R"}; +lookup(67639) -> {"Lo","R"}; +lookup(67640) -> {"Lo","R"}; +lookup(67644) -> {"Lo","R"}; +lookup(67647) -> {"Lo","R"}; +lookup(67648) -> {"Lo","R"}; +lookup(67649) -> {"Lo","R"}; +lookup(67650) -> {"Lo","R"}; +lookup(67651) -> {"Lo","R"}; +lookup(67652) -> {"Lo","R"}; +lookup(67653) -> {"Lo","R"}; +lookup(67654) -> {"Lo","R"}; +lookup(67655) -> {"Lo","R"}; +lookup(67656) -> {"Lo","R"}; +lookup(67657) -> {"Lo","R"}; +lookup(67658) -> {"Lo","R"}; +lookup(67659) -> {"Lo","R"}; +lookup(67660) -> {"Lo","R"}; +lookup(67661) -> {"Lo","R"}; +lookup(67662) -> {"Lo","R"}; +lookup(67663) -> {"Lo","R"}; +lookup(67664) -> {"Lo","R"}; +lookup(67665) -> {"Lo","R"}; +lookup(67666) -> {"Lo","R"}; +lookup(67667) -> {"Lo","R"}; +lookup(67668) -> {"Lo","R"}; +lookup(67669) -> {"Lo","R"}; +lookup(67671) -> {"Po","R"}; +lookup(67672) -> {"No","R"}; +lookup(67673) -> {"No","R"}; +lookup(67674) -> {"No","R"}; +lookup(67675) -> {"No","R"}; +lookup(67676) -> {"No","R"}; +lookup(67677) -> {"No","R"}; +lookup(67678) -> {"No","R"}; +lookup(67679) -> {"No","R"}; +lookup(67680) -> {"Lo","R"}; +lookup(67681) -> {"Lo","R"}; +lookup(67682) -> {"Lo","R"}; +lookup(67683) -> {"Lo","R"}; +lookup(67684) -> {"Lo","R"}; +lookup(67685) -> {"Lo","R"}; +lookup(67686) -> {"Lo","R"}; +lookup(67687) -> {"Lo","R"}; +lookup(67688) -> {"Lo","R"}; +lookup(67689) -> {"Lo","R"}; +lookup(67690) -> {"Lo","R"}; +lookup(67691) -> {"Lo","R"}; +lookup(67692) -> {"Lo","R"}; +lookup(67693) -> {"Lo","R"}; +lookup(67694) -> {"Lo","R"}; +lookup(67695) -> {"Lo","R"}; +lookup(67696) -> {"Lo","R"}; +lookup(67697) -> {"Lo","R"}; +lookup(67698) -> {"Lo","R"}; +lookup(67699) -> {"Lo","R"}; +lookup(67700) -> {"Lo","R"}; +lookup(67701) -> {"Lo","R"}; +lookup(67702) -> {"Lo","R"}; +lookup(67703) -> {"So","R"}; +lookup(67704) -> {"So","R"}; +lookup(67705) -> {"No","R"}; +lookup(67706) -> {"No","R"}; +lookup(67707) -> {"No","R"}; +lookup(67708) -> {"No","R"}; +lookup(67709) -> {"No","R"}; +lookup(67710) -> {"No","R"}; +lookup(67711) -> {"No","R"}; +lookup(67712) -> {"Lo","R"}; +lookup(67713) -> {"Lo","R"}; +lookup(67714) -> {"Lo","R"}; +lookup(67715) -> {"Lo","R"}; +lookup(67716) -> {"Lo","R"}; +lookup(67717) -> {"Lo","R"}; +lookup(67718) -> {"Lo","R"}; +lookup(67719) -> {"Lo","R"}; +lookup(67720) -> {"Lo","R"}; +lookup(67721) -> {"Lo","R"}; +lookup(67722) -> {"Lo","R"}; +lookup(67723) -> {"Lo","R"}; +lookup(67724) -> {"Lo","R"}; +lookup(67725) -> {"Lo","R"}; +lookup(67726) -> {"Lo","R"}; +lookup(67727) -> {"Lo","R"}; +lookup(67728) -> {"Lo","R"}; +lookup(67729) -> {"Lo","R"}; +lookup(67730) -> {"Lo","R"}; +lookup(67731) -> {"Lo","R"}; +lookup(67732) -> {"Lo","R"}; +lookup(67733) -> {"Lo","R"}; +lookup(67734) -> {"Lo","R"}; +lookup(67735) -> {"Lo","R"}; +lookup(67736) -> {"Lo","R"}; +lookup(67737) -> {"Lo","R"}; +lookup(67738) -> {"Lo","R"}; +lookup(67739) -> {"Lo","R"}; +lookup(67740) -> {"Lo","R"}; +lookup(67741) -> {"Lo","R"}; +lookup(67742) -> {"Lo","R"}; +lookup(67751) -> {"No","R"}; +lookup(67752) -> {"No","R"}; +lookup(67753) -> {"No","R"}; +lookup(67754) -> {"No","R"}; +lookup(67755) -> {"No","R"}; +lookup(67756) -> {"No","R"}; +lookup(67757) -> {"No","R"}; +lookup(67758) -> {"No","R"}; +lookup(67759) -> {"No","R"}; +lookup(67808) -> {"Lo","R"}; +lookup(67809) -> {"Lo","R"}; +lookup(67810) -> {"Lo","R"}; +lookup(67811) -> {"Lo","R"}; +lookup(67812) -> {"Lo","R"}; +lookup(67813) -> {"Lo","R"}; +lookup(67814) -> {"Lo","R"}; +lookup(67815) -> {"Lo","R"}; +lookup(67816) -> {"Lo","R"}; +lookup(67817) -> {"Lo","R"}; +lookup(67818) -> {"Lo","R"}; +lookup(67819) -> {"Lo","R"}; +lookup(67820) -> {"Lo","R"}; +lookup(67821) -> {"Lo","R"}; +lookup(67822) -> {"Lo","R"}; +lookup(67823) -> {"Lo","R"}; +lookup(67824) -> {"Lo","R"}; +lookup(67825) -> {"Lo","R"}; +lookup(67826) -> {"Lo","R"}; +lookup(67828) -> {"Lo","R"}; +lookup(67829) -> {"Lo","R"}; +lookup(67835) -> {"No","R"}; +lookup(67836) -> {"No","R"}; +lookup(67837) -> {"No","R"}; +lookup(67838) -> {"No","R"}; +lookup(67839) -> {"No","R"}; +lookup(67840) -> {"Lo","R"}; +lookup(67841) -> {"Lo","R"}; +lookup(67842) -> {"Lo","R"}; +lookup(67843) -> {"Lo","R"}; +lookup(67844) -> {"Lo","R"}; +lookup(67845) -> {"Lo","R"}; +lookup(67846) -> {"Lo","R"}; +lookup(67847) -> {"Lo","R"}; +lookup(67848) -> {"Lo","R"}; +lookup(67849) -> {"Lo","R"}; +lookup(67850) -> {"Lo","R"}; +lookup(67851) -> {"Lo","R"}; +lookup(67852) -> {"Lo","R"}; +lookup(67853) -> {"Lo","R"}; +lookup(67854) -> {"Lo","R"}; +lookup(67855) -> {"Lo","R"}; +lookup(67856) -> {"Lo","R"}; +lookup(67857) -> {"Lo","R"}; +lookup(67858) -> {"Lo","R"}; +lookup(67859) -> {"Lo","R"}; +lookup(67860) -> {"Lo","R"}; +lookup(67861) -> {"Lo","R"}; +lookup(67862) -> {"No","R"}; +lookup(67863) -> {"No","R"}; +lookup(67864) -> {"No","R"}; +lookup(67865) -> {"No","R"}; +lookup(67866) -> {"No","R"}; +lookup(67867) -> {"No","R"}; +lookup(67871) -> {"Po","ON"}; +lookup(67872) -> {"Lo","R"}; +lookup(67873) -> {"Lo","R"}; +lookup(67874) -> {"Lo","R"}; +lookup(67875) -> {"Lo","R"}; +lookup(67876) -> {"Lo","R"}; +lookup(67877) -> {"Lo","R"}; +lookup(67878) -> {"Lo","R"}; +lookup(67879) -> {"Lo","R"}; +lookup(67880) -> {"Lo","R"}; +lookup(67881) -> {"Lo","R"}; +lookup(67882) -> {"Lo","R"}; +lookup(67883) -> {"Lo","R"}; +lookup(67884) -> {"Lo","R"}; +lookup(67885) -> {"Lo","R"}; +lookup(67886) -> {"Lo","R"}; +lookup(67887) -> {"Lo","R"}; +lookup(67888) -> {"Lo","R"}; +lookup(67889) -> {"Lo","R"}; +lookup(67890) -> {"Lo","R"}; +lookup(67891) -> {"Lo","R"}; +lookup(67892) -> {"Lo","R"}; +lookup(67893) -> {"Lo","R"}; +lookup(67894) -> {"Lo","R"}; +lookup(67895) -> {"Lo","R"}; +lookup(67896) -> {"Lo","R"}; +lookup(67897) -> {"Lo","R"}; +lookup(67903) -> {"Po","R"}; +lookup(67968) -> {"Lo","R"}; +lookup(67969) -> {"Lo","R"}; +lookup(67970) -> {"Lo","R"}; +lookup(67971) -> {"Lo","R"}; +lookup(67972) -> {"Lo","R"}; +lookup(67973) -> {"Lo","R"}; +lookup(67974) -> {"Lo","R"}; +lookup(67975) -> {"Lo","R"}; +lookup(67976) -> {"Lo","R"}; +lookup(67977) -> {"Lo","R"}; +lookup(67978) -> {"Lo","R"}; +lookup(67979) -> {"Lo","R"}; +lookup(67980) -> {"Lo","R"}; +lookup(67981) -> {"Lo","R"}; +lookup(67982) -> {"Lo","R"}; +lookup(67983) -> {"Lo","R"}; +lookup(67984) -> {"Lo","R"}; +lookup(67985) -> {"Lo","R"}; +lookup(67986) -> {"Lo","R"}; +lookup(67987) -> {"Lo","R"}; +lookup(67988) -> {"Lo","R"}; +lookup(67989) -> {"Lo","R"}; +lookup(67990) -> {"Lo","R"}; +lookup(67991) -> {"Lo","R"}; +lookup(67992) -> {"Lo","R"}; +lookup(67993) -> {"Lo","R"}; +lookup(67994) -> {"Lo","R"}; +lookup(67995) -> {"Lo","R"}; +lookup(67996) -> {"Lo","R"}; +lookup(67997) -> {"Lo","R"}; +lookup(67998) -> {"Lo","R"}; +lookup(67999) -> {"Lo","R"}; +lookup(68000) -> {"Lo","R"}; +lookup(68001) -> {"Lo","R"}; +lookup(68002) -> {"Lo","R"}; +lookup(68003) -> {"Lo","R"}; +lookup(68004) -> {"Lo","R"}; +lookup(68005) -> {"Lo","R"}; +lookup(68006) -> {"Lo","R"}; +lookup(68007) -> {"Lo","R"}; +lookup(68008) -> {"Lo","R"}; +lookup(68009) -> {"Lo","R"}; +lookup(68010) -> {"Lo","R"}; +lookup(68011) -> {"Lo","R"}; +lookup(68012) -> {"Lo","R"}; +lookup(68013) -> {"Lo","R"}; +lookup(68014) -> {"Lo","R"}; +lookup(68015) -> {"Lo","R"}; +lookup(68016) -> {"Lo","R"}; +lookup(68017) -> {"Lo","R"}; +lookup(68018) -> {"Lo","R"}; +lookup(68019) -> {"Lo","R"}; +lookup(68020) -> {"Lo","R"}; +lookup(68021) -> {"Lo","R"}; +lookup(68022) -> {"Lo","R"}; +lookup(68023) -> {"Lo","R"}; +lookup(68028) -> {"No","R"}; +lookup(68029) -> {"No","R"}; +lookup(68030) -> {"Lo","R"}; +lookup(68031) -> {"Lo","R"}; +lookup(68032) -> {"No","R"}; +lookup(68033) -> {"No","R"}; +lookup(68034) -> {"No","R"}; +lookup(68035) -> {"No","R"}; +lookup(68036) -> {"No","R"}; +lookup(68037) -> {"No","R"}; +lookup(68038) -> {"No","R"}; +lookup(68039) -> {"No","R"}; +lookup(68040) -> {"No","R"}; +lookup(68041) -> {"No","R"}; +lookup(68042) -> {"No","R"}; +lookup(68043) -> {"No","R"}; +lookup(68044) -> {"No","R"}; +lookup(68045) -> {"No","R"}; +lookup(68046) -> {"No","R"}; +lookup(68047) -> {"No","R"}; +lookup(68050) -> {"No","R"}; +lookup(68051) -> {"No","R"}; +lookup(68052) -> {"No","R"}; +lookup(68053) -> {"No","R"}; +lookup(68054) -> {"No","R"}; +lookup(68055) -> {"No","R"}; +lookup(68056) -> {"No","R"}; +lookup(68057) -> {"No","R"}; +lookup(68058) -> {"No","R"}; +lookup(68059) -> {"No","R"}; +lookup(68060) -> {"No","R"}; +lookup(68061) -> {"No","R"}; +lookup(68062) -> {"No","R"}; +lookup(68063) -> {"No","R"}; +lookup(68064) -> {"No","R"}; +lookup(68065) -> {"No","R"}; +lookup(68066) -> {"No","R"}; +lookup(68067) -> {"No","R"}; +lookup(68068) -> {"No","R"}; +lookup(68069) -> {"No","R"}; +lookup(68070) -> {"No","R"}; +lookup(68071) -> {"No","R"}; +lookup(68072) -> {"No","R"}; +lookup(68073) -> {"No","R"}; +lookup(68074) -> {"No","R"}; +lookup(68075) -> {"No","R"}; +lookup(68076) -> {"No","R"}; +lookup(68077) -> {"No","R"}; +lookup(68078) -> {"No","R"}; +lookup(68079) -> {"No","R"}; +lookup(68080) -> {"No","R"}; +lookup(68081) -> {"No","R"}; +lookup(68082) -> {"No","R"}; +lookup(68083) -> {"No","R"}; +lookup(68084) -> {"No","R"}; +lookup(68085) -> {"No","R"}; +lookup(68086) -> {"No","R"}; +lookup(68087) -> {"No","R"}; +lookup(68088) -> {"No","R"}; +lookup(68089) -> {"No","R"}; +lookup(68090) -> {"No","R"}; +lookup(68091) -> {"No","R"}; +lookup(68092) -> {"No","R"}; +lookup(68093) -> {"No","R"}; +lookup(68094) -> {"No","R"}; +lookup(68095) -> {"No","R"}; +lookup(68096) -> {"Lo","R"}; +lookup(68097) -> {"Mn","NSM"}; +lookup(68098) -> {"Mn","NSM"}; +lookup(68099) -> {"Mn","NSM"}; +lookup(68101) -> {"Mn","NSM"}; +lookup(68102) -> {"Mn","NSM"}; +lookup(68108) -> {"Mn","NSM"}; +lookup(68109) -> {"Mn","NSM"}; +lookup(68110) -> {"Mn","NSM"}; +lookup(68111) -> {"Mn","NSM"}; +lookup(68112) -> {"Lo","R"}; +lookup(68113) -> {"Lo","R"}; +lookup(68114) -> {"Lo","R"}; +lookup(68115) -> {"Lo","R"}; +lookup(68117) -> {"Lo","R"}; +lookup(68118) -> {"Lo","R"}; +lookup(68119) -> {"Lo","R"}; +lookup(68121) -> {"Lo","R"}; +lookup(68122) -> {"Lo","R"}; +lookup(68123) -> {"Lo","R"}; +lookup(68124) -> {"Lo","R"}; +lookup(68125) -> {"Lo","R"}; +lookup(68126) -> {"Lo","R"}; +lookup(68127) -> {"Lo","R"}; +lookup(68128) -> {"Lo","R"}; +lookup(68129) -> {"Lo","R"}; +lookup(68130) -> {"Lo","R"}; +lookup(68131) -> {"Lo","R"}; +lookup(68132) -> {"Lo","R"}; +lookup(68133) -> {"Lo","R"}; +lookup(68134) -> {"Lo","R"}; +lookup(68135) -> {"Lo","R"}; +lookup(68136) -> {"Lo","R"}; +lookup(68137) -> {"Lo","R"}; +lookup(68138) -> {"Lo","R"}; +lookup(68139) -> {"Lo","R"}; +lookup(68140) -> {"Lo","R"}; +lookup(68141) -> {"Lo","R"}; +lookup(68142) -> {"Lo","R"}; +lookup(68143) -> {"Lo","R"}; +lookup(68144) -> {"Lo","R"}; +lookup(68145) -> {"Lo","R"}; +lookup(68146) -> {"Lo","R"}; +lookup(68147) -> {"Lo","R"}; +lookup(68148) -> {"Lo","R"}; +lookup(68149) -> {"Lo","R"}; +lookup(68152) -> {"Mn","NSM"}; +lookup(68153) -> {"Mn","NSM"}; +lookup(68154) -> {"Mn","NSM"}; +lookup(68159) -> {"Mn","NSM"}; +lookup(68160) -> {"No","R"}; +lookup(68161) -> {"No","R"}; +lookup(68162) -> {"No","R"}; +lookup(68163) -> {"No","R"}; +lookup(68164) -> {"No","R"}; +lookup(68165) -> {"No","R"}; +lookup(68166) -> {"No","R"}; +lookup(68167) -> {"No","R"}; +lookup(68168) -> {"No","R"}; +lookup(68176) -> {"Po","R"}; +lookup(68177) -> {"Po","R"}; +lookup(68178) -> {"Po","R"}; +lookup(68179) -> {"Po","R"}; +lookup(68180) -> {"Po","R"}; +lookup(68181) -> {"Po","R"}; +lookup(68182) -> {"Po","R"}; +lookup(68183) -> {"Po","R"}; +lookup(68184) -> {"Po","R"}; +lookup(68192) -> {"Lo","R"}; +lookup(68193) -> {"Lo","R"}; +lookup(68194) -> {"Lo","R"}; +lookup(68195) -> {"Lo","R"}; +lookup(68196) -> {"Lo","R"}; +lookup(68197) -> {"Lo","R"}; +lookup(68198) -> {"Lo","R"}; +lookup(68199) -> {"Lo","R"}; +lookup(68200) -> {"Lo","R"}; +lookup(68201) -> {"Lo","R"}; +lookup(68202) -> {"Lo","R"}; +lookup(68203) -> {"Lo","R"}; +lookup(68204) -> {"Lo","R"}; +lookup(68205) -> {"Lo","R"}; +lookup(68206) -> {"Lo","R"}; +lookup(68207) -> {"Lo","R"}; +lookup(68208) -> {"Lo","R"}; +lookup(68209) -> {"Lo","R"}; +lookup(68210) -> {"Lo","R"}; +lookup(68211) -> {"Lo","R"}; +lookup(68212) -> {"Lo","R"}; +lookup(68213) -> {"Lo","R"}; +lookup(68214) -> {"Lo","R"}; +lookup(68215) -> {"Lo","R"}; +lookup(68216) -> {"Lo","R"}; +lookup(68217) -> {"Lo","R"}; +lookup(68218) -> {"Lo","R"}; +lookup(68219) -> {"Lo","R"}; +lookup(68220) -> {"Lo","R"}; +lookup(68221) -> {"No","R"}; +lookup(68222) -> {"No","R"}; +lookup(68223) -> {"Po","R"}; +lookup(68224) -> {"Lo","R"}; +lookup(68225) -> {"Lo","R"}; +lookup(68226) -> {"Lo","R"}; +lookup(68227) -> {"Lo","R"}; +lookup(68228) -> {"Lo","R"}; +lookup(68229) -> {"Lo","R"}; +lookup(68230) -> {"Lo","R"}; +lookup(68231) -> {"Lo","R"}; +lookup(68232) -> {"Lo","R"}; +lookup(68233) -> {"Lo","R"}; +lookup(68234) -> {"Lo","R"}; +lookup(68235) -> {"Lo","R"}; +lookup(68236) -> {"Lo","R"}; +lookup(68237) -> {"Lo","R"}; +lookup(68238) -> {"Lo","R"}; +lookup(68239) -> {"Lo","R"}; +lookup(68240) -> {"Lo","R"}; +lookup(68241) -> {"Lo","R"}; +lookup(68242) -> {"Lo","R"}; +lookup(68243) -> {"Lo","R"}; +lookup(68244) -> {"Lo","R"}; +lookup(68245) -> {"Lo","R"}; +lookup(68246) -> {"Lo","R"}; +lookup(68247) -> {"Lo","R"}; +lookup(68248) -> {"Lo","R"}; +lookup(68249) -> {"Lo","R"}; +lookup(68250) -> {"Lo","R"}; +lookup(68251) -> {"Lo","R"}; +lookup(68252) -> {"Lo","R"}; +lookup(68253) -> {"No","R"}; +lookup(68254) -> {"No","R"}; +lookup(68255) -> {"No","R"}; +lookup(68288) -> {"Lo","R"}; +lookup(68289) -> {"Lo","R"}; +lookup(68290) -> {"Lo","R"}; +lookup(68291) -> {"Lo","R"}; +lookup(68292) -> {"Lo","R"}; +lookup(68293) -> {"Lo","R"}; +lookup(68294) -> {"Lo","R"}; +lookup(68295) -> {"Lo","R"}; +lookup(68296) -> {"So","R"}; +lookup(68297) -> {"Lo","R"}; +lookup(68298) -> {"Lo","R"}; +lookup(68299) -> {"Lo","R"}; +lookup(68300) -> {"Lo","R"}; +lookup(68301) -> {"Lo","R"}; +lookup(68302) -> {"Lo","R"}; +lookup(68303) -> {"Lo","R"}; +lookup(68304) -> {"Lo","R"}; +lookup(68305) -> {"Lo","R"}; +lookup(68306) -> {"Lo","R"}; +lookup(68307) -> {"Lo","R"}; +lookup(68308) -> {"Lo","R"}; +lookup(68309) -> {"Lo","R"}; +lookup(68310) -> {"Lo","R"}; +lookup(68311) -> {"Lo","R"}; +lookup(68312) -> {"Lo","R"}; +lookup(68313) -> {"Lo","R"}; +lookup(68314) -> {"Lo","R"}; +lookup(68315) -> {"Lo","R"}; +lookup(68316) -> {"Lo","R"}; +lookup(68317) -> {"Lo","R"}; +lookup(68318) -> {"Lo","R"}; +lookup(68319) -> {"Lo","R"}; +lookup(68320) -> {"Lo","R"}; +lookup(68321) -> {"Lo","R"}; +lookup(68322) -> {"Lo","R"}; +lookup(68323) -> {"Lo","R"}; +lookup(68324) -> {"Lo","R"}; +lookup(68325) -> {"Mn","NSM"}; +lookup(68326) -> {"Mn","NSM"}; +lookup(68331) -> {"No","R"}; +lookup(68332) -> {"No","R"}; +lookup(68333) -> {"No","R"}; +lookup(68334) -> {"No","R"}; +lookup(68335) -> {"No","R"}; +lookup(68336) -> {"Po","R"}; +lookup(68337) -> {"Po","R"}; +lookup(68338) -> {"Po","R"}; +lookup(68339) -> {"Po","R"}; +lookup(68340) -> {"Po","R"}; +lookup(68341) -> {"Po","R"}; +lookup(68342) -> {"Po","R"}; +lookup(68352) -> {"Lo","R"}; +lookup(68353) -> {"Lo","R"}; +lookup(68354) -> {"Lo","R"}; +lookup(68355) -> {"Lo","R"}; +lookup(68356) -> {"Lo","R"}; +lookup(68357) -> {"Lo","R"}; +lookup(68358) -> {"Lo","R"}; +lookup(68359) -> {"Lo","R"}; +lookup(68360) -> {"Lo","R"}; +lookup(68361) -> {"Lo","R"}; +lookup(68362) -> {"Lo","R"}; +lookup(68363) -> {"Lo","R"}; +lookup(68364) -> {"Lo","R"}; +lookup(68365) -> {"Lo","R"}; +lookup(68366) -> {"Lo","R"}; +lookup(68367) -> {"Lo","R"}; +lookup(68368) -> {"Lo","R"}; +lookup(68369) -> {"Lo","R"}; +lookup(68370) -> {"Lo","R"}; +lookup(68371) -> {"Lo","R"}; +lookup(68372) -> {"Lo","R"}; +lookup(68373) -> {"Lo","R"}; +lookup(68374) -> {"Lo","R"}; +lookup(68375) -> {"Lo","R"}; +lookup(68376) -> {"Lo","R"}; +lookup(68377) -> {"Lo","R"}; +lookup(68378) -> {"Lo","R"}; +lookup(68379) -> {"Lo","R"}; +lookup(68380) -> {"Lo","R"}; +lookup(68381) -> {"Lo","R"}; +lookup(68382) -> {"Lo","R"}; +lookup(68383) -> {"Lo","R"}; +lookup(68384) -> {"Lo","R"}; +lookup(68385) -> {"Lo","R"}; +lookup(68386) -> {"Lo","R"}; +lookup(68387) -> {"Lo","R"}; +lookup(68388) -> {"Lo","R"}; +lookup(68389) -> {"Lo","R"}; +lookup(68390) -> {"Lo","R"}; +lookup(68391) -> {"Lo","R"}; +lookup(68392) -> {"Lo","R"}; +lookup(68393) -> {"Lo","R"}; +lookup(68394) -> {"Lo","R"}; +lookup(68395) -> {"Lo","R"}; +lookup(68396) -> {"Lo","R"}; +lookup(68397) -> {"Lo","R"}; +lookup(68398) -> {"Lo","R"}; +lookup(68399) -> {"Lo","R"}; +lookup(68400) -> {"Lo","R"}; +lookup(68401) -> {"Lo","R"}; +lookup(68402) -> {"Lo","R"}; +lookup(68403) -> {"Lo","R"}; +lookup(68404) -> {"Lo","R"}; +lookup(68405) -> {"Lo","R"}; +lookup(68409) -> {"Po","ON"}; +lookup(68410) -> {"Po","ON"}; +lookup(68411) -> {"Po","ON"}; +lookup(68412) -> {"Po","ON"}; +lookup(68413) -> {"Po","ON"}; +lookup(68414) -> {"Po","ON"}; +lookup(68415) -> {"Po","ON"}; +lookup(68416) -> {"Lo","R"}; +lookup(68417) -> {"Lo","R"}; +lookup(68418) -> {"Lo","R"}; +lookup(68419) -> {"Lo","R"}; +lookup(68420) -> {"Lo","R"}; +lookup(68421) -> {"Lo","R"}; +lookup(68422) -> {"Lo","R"}; +lookup(68423) -> {"Lo","R"}; +lookup(68424) -> {"Lo","R"}; +lookup(68425) -> {"Lo","R"}; +lookup(68426) -> {"Lo","R"}; +lookup(68427) -> {"Lo","R"}; +lookup(68428) -> {"Lo","R"}; +lookup(68429) -> {"Lo","R"}; +lookup(68430) -> {"Lo","R"}; +lookup(68431) -> {"Lo","R"}; +lookup(68432) -> {"Lo","R"}; +lookup(68433) -> {"Lo","R"}; +lookup(68434) -> {"Lo","R"}; +lookup(68435) -> {"Lo","R"}; +lookup(68436) -> {"Lo","R"}; +lookup(68437) -> {"Lo","R"}; +lookup(68440) -> {"No","R"}; +lookup(68441) -> {"No","R"}; +lookup(68442) -> {"No","R"}; +lookup(68443) -> {"No","R"}; +lookup(68444) -> {"No","R"}; +lookup(68445) -> {"No","R"}; +lookup(68446) -> {"No","R"}; +lookup(68447) -> {"No","R"}; +lookup(68448) -> {"Lo","R"}; +lookup(68449) -> {"Lo","R"}; +lookup(68450) -> {"Lo","R"}; +lookup(68451) -> {"Lo","R"}; +lookup(68452) -> {"Lo","R"}; +lookup(68453) -> {"Lo","R"}; +lookup(68454) -> {"Lo","R"}; +lookup(68455) -> {"Lo","R"}; +lookup(68456) -> {"Lo","R"}; +lookup(68457) -> {"Lo","R"}; +lookup(68458) -> {"Lo","R"}; +lookup(68459) -> {"Lo","R"}; +lookup(68460) -> {"Lo","R"}; +lookup(68461) -> {"Lo","R"}; +lookup(68462) -> {"Lo","R"}; +lookup(68463) -> {"Lo","R"}; +lookup(68464) -> {"Lo","R"}; +lookup(68465) -> {"Lo","R"}; +lookup(68466) -> {"Lo","R"}; +lookup(68472) -> {"No","R"}; +lookup(68473) -> {"No","R"}; +lookup(68474) -> {"No","R"}; +lookup(68475) -> {"No","R"}; +lookup(68476) -> {"No","R"}; +lookup(68477) -> {"No","R"}; +lookup(68478) -> {"No","R"}; +lookup(68479) -> {"No","R"}; +lookup(68480) -> {"Lo","R"}; +lookup(68481) -> {"Lo","R"}; +lookup(68482) -> {"Lo","R"}; +lookup(68483) -> {"Lo","R"}; +lookup(68484) -> {"Lo","R"}; +lookup(68485) -> {"Lo","R"}; +lookup(68486) -> {"Lo","R"}; +lookup(68487) -> {"Lo","R"}; +lookup(68488) -> {"Lo","R"}; +lookup(68489) -> {"Lo","R"}; +lookup(68490) -> {"Lo","R"}; +lookup(68491) -> {"Lo","R"}; +lookup(68492) -> {"Lo","R"}; +lookup(68493) -> {"Lo","R"}; +lookup(68494) -> {"Lo","R"}; +lookup(68495) -> {"Lo","R"}; +lookup(68496) -> {"Lo","R"}; +lookup(68497) -> {"Lo","R"}; +lookup(68505) -> {"Po","R"}; +lookup(68506) -> {"Po","R"}; +lookup(68507) -> {"Po","R"}; +lookup(68508) -> {"Po","R"}; +lookup(68521) -> {"No","R"}; +lookup(68522) -> {"No","R"}; +lookup(68523) -> {"No","R"}; +lookup(68524) -> {"No","R"}; +lookup(68525) -> {"No","R"}; +lookup(68526) -> {"No","R"}; +lookup(68527) -> {"No","R"}; +lookup(68608) -> {"Lo","R"}; +lookup(68609) -> {"Lo","R"}; +lookup(68610) -> {"Lo","R"}; +lookup(68611) -> {"Lo","R"}; +lookup(68612) -> {"Lo","R"}; +lookup(68613) -> {"Lo","R"}; +lookup(68614) -> {"Lo","R"}; +lookup(68615) -> {"Lo","R"}; +lookup(68616) -> {"Lo","R"}; +lookup(68617) -> {"Lo","R"}; +lookup(68618) -> {"Lo","R"}; +lookup(68619) -> {"Lo","R"}; +lookup(68620) -> {"Lo","R"}; +lookup(68621) -> {"Lo","R"}; +lookup(68622) -> {"Lo","R"}; +lookup(68623) -> {"Lo","R"}; +lookup(68624) -> {"Lo","R"}; +lookup(68625) -> {"Lo","R"}; +lookup(68626) -> {"Lo","R"}; +lookup(68627) -> {"Lo","R"}; +lookup(68628) -> {"Lo","R"}; +lookup(68629) -> {"Lo","R"}; +lookup(68630) -> {"Lo","R"}; +lookup(68631) -> {"Lo","R"}; +lookup(68632) -> {"Lo","R"}; +lookup(68633) -> {"Lo","R"}; +lookup(68634) -> {"Lo","R"}; +lookup(68635) -> {"Lo","R"}; +lookup(68636) -> {"Lo","R"}; +lookup(68637) -> {"Lo","R"}; +lookup(68638) -> {"Lo","R"}; +lookup(68639) -> {"Lo","R"}; +lookup(68640) -> {"Lo","R"}; +lookup(68641) -> {"Lo","R"}; +lookup(68642) -> {"Lo","R"}; +lookup(68643) -> {"Lo","R"}; +lookup(68644) -> {"Lo","R"}; +lookup(68645) -> {"Lo","R"}; +lookup(68646) -> {"Lo","R"}; +lookup(68647) -> {"Lo","R"}; +lookup(68648) -> {"Lo","R"}; +lookup(68649) -> {"Lo","R"}; +lookup(68650) -> {"Lo","R"}; +lookup(68651) -> {"Lo","R"}; +lookup(68652) -> {"Lo","R"}; +lookup(68653) -> {"Lo","R"}; +lookup(68654) -> {"Lo","R"}; +lookup(68655) -> {"Lo","R"}; +lookup(68656) -> {"Lo","R"}; +lookup(68657) -> {"Lo","R"}; +lookup(68658) -> {"Lo","R"}; +lookup(68659) -> {"Lo","R"}; +lookup(68660) -> {"Lo","R"}; +lookup(68661) -> {"Lo","R"}; +lookup(68662) -> {"Lo","R"}; +lookup(68663) -> {"Lo","R"}; +lookup(68664) -> {"Lo","R"}; +lookup(68665) -> {"Lo","R"}; +lookup(68666) -> {"Lo","R"}; +lookup(68667) -> {"Lo","R"}; +lookup(68668) -> {"Lo","R"}; +lookup(68669) -> {"Lo","R"}; +lookup(68670) -> {"Lo","R"}; +lookup(68671) -> {"Lo","R"}; +lookup(68672) -> {"Lo","R"}; +lookup(68673) -> {"Lo","R"}; +lookup(68674) -> {"Lo","R"}; +lookup(68675) -> {"Lo","R"}; +lookup(68676) -> {"Lo","R"}; +lookup(68677) -> {"Lo","R"}; +lookup(68678) -> {"Lo","R"}; +lookup(68679) -> {"Lo","R"}; +lookup(68680) -> {"Lo","R"}; +lookup(68736) -> {"Lu","R"}; +lookup(68737) -> {"Lu","R"}; +lookup(68738) -> {"Lu","R"}; +lookup(68739) -> {"Lu","R"}; +lookup(68740) -> {"Lu","R"}; +lookup(68741) -> {"Lu","R"}; +lookup(68742) -> {"Lu","R"}; +lookup(68743) -> {"Lu","R"}; +lookup(68744) -> {"Lu","R"}; +lookup(68745) -> {"Lu","R"}; +lookup(68746) -> {"Lu","R"}; +lookup(68747) -> {"Lu","R"}; +lookup(68748) -> {"Lu","R"}; +lookup(68749) -> {"Lu","R"}; +lookup(68750) -> {"Lu","R"}; +lookup(68751) -> {"Lu","R"}; +lookup(68752) -> {"Lu","R"}; +lookup(68753) -> {"Lu","R"}; +lookup(68754) -> {"Lu","R"}; +lookup(68755) -> {"Lu","R"}; +lookup(68756) -> {"Lu","R"}; +lookup(68757) -> {"Lu","R"}; +lookup(68758) -> {"Lu","R"}; +lookup(68759) -> {"Lu","R"}; +lookup(68760) -> {"Lu","R"}; +lookup(68761) -> {"Lu","R"}; +lookup(68762) -> {"Lu","R"}; +lookup(68763) -> {"Lu","R"}; +lookup(68764) -> {"Lu","R"}; +lookup(68765) -> {"Lu","R"}; +lookup(68766) -> {"Lu","R"}; +lookup(68767) -> {"Lu","R"}; +lookup(68768) -> {"Lu","R"}; +lookup(68769) -> {"Lu","R"}; +lookup(68770) -> {"Lu","R"}; +lookup(68771) -> {"Lu","R"}; +lookup(68772) -> {"Lu","R"}; +lookup(68773) -> {"Lu","R"}; +lookup(68774) -> {"Lu","R"}; +lookup(68775) -> {"Lu","R"}; +lookup(68776) -> {"Lu","R"}; +lookup(68777) -> {"Lu","R"}; +lookup(68778) -> {"Lu","R"}; +lookup(68779) -> {"Lu","R"}; +lookup(68780) -> {"Lu","R"}; +lookup(68781) -> {"Lu","R"}; +lookup(68782) -> {"Lu","R"}; +lookup(68783) -> {"Lu","R"}; +lookup(68784) -> {"Lu","R"}; +lookup(68785) -> {"Lu","R"}; +lookup(68786) -> {"Lu","R"}; +lookup(68800) -> {"Ll","R"}; +lookup(68801) -> {"Ll","R"}; +lookup(68802) -> {"Ll","R"}; +lookup(68803) -> {"Ll","R"}; +lookup(68804) -> {"Ll","R"}; +lookup(68805) -> {"Ll","R"}; +lookup(68806) -> {"Ll","R"}; +lookup(68807) -> {"Ll","R"}; +lookup(68808) -> {"Ll","R"}; +lookup(68809) -> {"Ll","R"}; +lookup(68810) -> {"Ll","R"}; +lookup(68811) -> {"Ll","R"}; +lookup(68812) -> {"Ll","R"}; +lookup(68813) -> {"Ll","R"}; +lookup(68814) -> {"Ll","R"}; +lookup(68815) -> {"Ll","R"}; +lookup(68816) -> {"Ll","R"}; +lookup(68817) -> {"Ll","R"}; +lookup(68818) -> {"Ll","R"}; +lookup(68819) -> {"Ll","R"}; +lookup(68820) -> {"Ll","R"}; +lookup(68821) -> {"Ll","R"}; +lookup(68822) -> {"Ll","R"}; +lookup(68823) -> {"Ll","R"}; +lookup(68824) -> {"Ll","R"}; +lookup(68825) -> {"Ll","R"}; +lookup(68826) -> {"Ll","R"}; +lookup(68827) -> {"Ll","R"}; +lookup(68828) -> {"Ll","R"}; +lookup(68829) -> {"Ll","R"}; +lookup(68830) -> {"Ll","R"}; +lookup(68831) -> {"Ll","R"}; +lookup(68832) -> {"Ll","R"}; +lookup(68833) -> {"Ll","R"}; +lookup(68834) -> {"Ll","R"}; +lookup(68835) -> {"Ll","R"}; +lookup(68836) -> {"Ll","R"}; +lookup(68837) -> {"Ll","R"}; +lookup(68838) -> {"Ll","R"}; +lookup(68839) -> {"Ll","R"}; +lookup(68840) -> {"Ll","R"}; +lookup(68841) -> {"Ll","R"}; +lookup(68842) -> {"Ll","R"}; +lookup(68843) -> {"Ll","R"}; +lookup(68844) -> {"Ll","R"}; +lookup(68845) -> {"Ll","R"}; +lookup(68846) -> {"Ll","R"}; +lookup(68847) -> {"Ll","R"}; +lookup(68848) -> {"Ll","R"}; +lookup(68849) -> {"Ll","R"}; +lookup(68850) -> {"Ll","R"}; +lookup(68858) -> {"No","R"}; +lookup(68859) -> {"No","R"}; +lookup(68860) -> {"No","R"}; +lookup(68861) -> {"No","R"}; +lookup(68862) -> {"No","R"}; +lookup(68863) -> {"No","R"}; +lookup(68864) -> {"Lo","AL"}; +lookup(68865) -> {"Lo","AL"}; +lookup(68866) -> {"Lo","AL"}; +lookup(68867) -> {"Lo","AL"}; +lookup(68868) -> {"Lo","AL"}; +lookup(68869) -> {"Lo","AL"}; +lookup(68870) -> {"Lo","AL"}; +lookup(68871) -> {"Lo","AL"}; +lookup(68872) -> {"Lo","AL"}; +lookup(68873) -> {"Lo","AL"}; +lookup(68874) -> {"Lo","AL"}; +lookup(68875) -> {"Lo","AL"}; +lookup(68876) -> {"Lo","AL"}; +lookup(68877) -> {"Lo","AL"}; +lookup(68878) -> {"Lo","AL"}; +lookup(68879) -> {"Lo","AL"}; +lookup(68880) -> {"Lo","AL"}; +lookup(68881) -> {"Lo","AL"}; +lookup(68882) -> {"Lo","AL"}; +lookup(68883) -> {"Lo","AL"}; +lookup(68884) -> {"Lo","AL"}; +lookup(68885) -> {"Lo","AL"}; +lookup(68886) -> {"Lo","AL"}; +lookup(68887) -> {"Lo","AL"}; +lookup(68888) -> {"Lo","AL"}; +lookup(68889) -> {"Lo","AL"}; +lookup(68890) -> {"Lo","AL"}; +lookup(68891) -> {"Lo","AL"}; +lookup(68892) -> {"Lo","AL"}; +lookup(68893) -> {"Lo","AL"}; +lookup(68894) -> {"Lo","AL"}; +lookup(68895) -> {"Lo","AL"}; +lookup(68896) -> {"Lo","AL"}; +lookup(68897) -> {"Lo","AL"}; +lookup(68898) -> {"Lo","AL"}; +lookup(68899) -> {"Lo","AL"}; +lookup(68900) -> {"Mn","NSM"}; +lookup(68901) -> {"Mn","NSM"}; +lookup(68902) -> {"Mn","NSM"}; +lookup(68903) -> {"Mn","NSM"}; +lookup(68912) -> {"Nd","AN"}; +lookup(68913) -> {"Nd","AN"}; +lookup(68914) -> {"Nd","AN"}; +lookup(68915) -> {"Nd","AN"}; +lookup(68916) -> {"Nd","AN"}; +lookup(68917) -> {"Nd","AN"}; +lookup(68918) -> {"Nd","AN"}; +lookup(68919) -> {"Nd","AN"}; +lookup(68920) -> {"Nd","AN"}; +lookup(68921) -> {"Nd","AN"}; +lookup(69216) -> {"No","AN"}; +lookup(69217) -> {"No","AN"}; +lookup(69218) -> {"No","AN"}; +lookup(69219) -> {"No","AN"}; +lookup(69220) -> {"No","AN"}; +lookup(69221) -> {"No","AN"}; +lookup(69222) -> {"No","AN"}; +lookup(69223) -> {"No","AN"}; +lookup(69224) -> {"No","AN"}; +lookup(69225) -> {"No","AN"}; +lookup(69226) -> {"No","AN"}; +lookup(69227) -> {"No","AN"}; +lookup(69228) -> {"No","AN"}; +lookup(69229) -> {"No","AN"}; +lookup(69230) -> {"No","AN"}; +lookup(69231) -> {"No","AN"}; +lookup(69232) -> {"No","AN"}; +lookup(69233) -> {"No","AN"}; +lookup(69234) -> {"No","AN"}; +lookup(69235) -> {"No","AN"}; +lookup(69236) -> {"No","AN"}; +lookup(69237) -> {"No","AN"}; +lookup(69238) -> {"No","AN"}; +lookup(69239) -> {"No","AN"}; +lookup(69240) -> {"No","AN"}; +lookup(69241) -> {"No","AN"}; +lookup(69242) -> {"No","AN"}; +lookup(69243) -> {"No","AN"}; +lookup(69244) -> {"No","AN"}; +lookup(69245) -> {"No","AN"}; +lookup(69246) -> {"No","AN"}; +lookup(69248) -> {"Lo","R"}; +lookup(69249) -> {"Lo","R"}; +lookup(69250) -> {"Lo","R"}; +lookup(69251) -> {"Lo","R"}; +lookup(69252) -> {"Lo","R"}; +lookup(69253) -> {"Lo","R"}; +lookup(69254) -> {"Lo","R"}; +lookup(69255) -> {"Lo","R"}; +lookup(69256) -> {"Lo","R"}; +lookup(69257) -> {"Lo","R"}; +lookup(69258) -> {"Lo","R"}; +lookup(69259) -> {"Lo","R"}; +lookup(69260) -> {"Lo","R"}; +lookup(69261) -> {"Lo","R"}; +lookup(69262) -> {"Lo","R"}; +lookup(69263) -> {"Lo","R"}; +lookup(69264) -> {"Lo","R"}; +lookup(69265) -> {"Lo","R"}; +lookup(69266) -> {"Lo","R"}; +lookup(69267) -> {"Lo","R"}; +lookup(69268) -> {"Lo","R"}; +lookup(69269) -> {"Lo","R"}; +lookup(69270) -> {"Lo","R"}; +lookup(69271) -> {"Lo","R"}; +lookup(69272) -> {"Lo","R"}; +lookup(69273) -> {"Lo","R"}; +lookup(69274) -> {"Lo","R"}; +lookup(69275) -> {"Lo","R"}; +lookup(69276) -> {"Lo","R"}; +lookup(69277) -> {"Lo","R"}; +lookup(69278) -> {"Lo","R"}; +lookup(69279) -> {"Lo","R"}; +lookup(69280) -> {"Lo","R"}; +lookup(69281) -> {"Lo","R"}; +lookup(69282) -> {"Lo","R"}; +lookup(69283) -> {"Lo","R"}; +lookup(69284) -> {"Lo","R"}; +lookup(69285) -> {"Lo","R"}; +lookup(69286) -> {"Lo","R"}; +lookup(69287) -> {"Lo","R"}; +lookup(69288) -> {"Lo","R"}; +lookup(69289) -> {"Lo","R"}; +lookup(69291) -> {"Mn","NSM"}; +lookup(69292) -> {"Mn","NSM"}; +lookup(69293) -> {"Pd","R"}; +lookup(69296) -> {"Lo","R"}; +lookup(69297) -> {"Lo","R"}; +lookup(69376) -> {"Lo","R"}; +lookup(69377) -> {"Lo","R"}; +lookup(69378) -> {"Lo","R"}; +lookup(69379) -> {"Lo","R"}; +lookup(69380) -> {"Lo","R"}; +lookup(69381) -> {"Lo","R"}; +lookup(69382) -> {"Lo","R"}; +lookup(69383) -> {"Lo","R"}; +lookup(69384) -> {"Lo","R"}; +lookup(69385) -> {"Lo","R"}; +lookup(69386) -> {"Lo","R"}; +lookup(69387) -> {"Lo","R"}; +lookup(69388) -> {"Lo","R"}; +lookup(69389) -> {"Lo","R"}; +lookup(69390) -> {"Lo","R"}; +lookup(69391) -> {"Lo","R"}; +lookup(69392) -> {"Lo","R"}; +lookup(69393) -> {"Lo","R"}; +lookup(69394) -> {"Lo","R"}; +lookup(69395) -> {"Lo","R"}; +lookup(69396) -> {"Lo","R"}; +lookup(69397) -> {"Lo","R"}; +lookup(69398) -> {"Lo","R"}; +lookup(69399) -> {"Lo","R"}; +lookup(69400) -> {"Lo","R"}; +lookup(69401) -> {"Lo","R"}; +lookup(69402) -> {"Lo","R"}; +lookup(69403) -> {"Lo","R"}; +lookup(69404) -> {"Lo","R"}; +lookup(69405) -> {"No","R"}; +lookup(69406) -> {"No","R"}; +lookup(69407) -> {"No","R"}; +lookup(69408) -> {"No","R"}; +lookup(69409) -> {"No","R"}; +lookup(69410) -> {"No","R"}; +lookup(69411) -> {"No","R"}; +lookup(69412) -> {"No","R"}; +lookup(69413) -> {"No","R"}; +lookup(69414) -> {"No","R"}; +lookup(69415) -> {"Lo","R"}; +lookup(69424) -> {"Lo","AL"}; +lookup(69425) -> {"Lo","AL"}; +lookup(69426) -> {"Lo","AL"}; +lookup(69427) -> {"Lo","AL"}; +lookup(69428) -> {"Lo","AL"}; +lookup(69429) -> {"Lo","AL"}; +lookup(69430) -> {"Lo","AL"}; +lookup(69431) -> {"Lo","AL"}; +lookup(69432) -> {"Lo","AL"}; +lookup(69433) -> {"Lo","AL"}; +lookup(69434) -> {"Lo","AL"}; +lookup(69435) -> {"Lo","AL"}; +lookup(69436) -> {"Lo","AL"}; +lookup(69437) -> {"Lo","AL"}; +lookup(69438) -> {"Lo","AL"}; +lookup(69439) -> {"Lo","AL"}; +lookup(69440) -> {"Lo","AL"}; +lookup(69441) -> {"Lo","AL"}; +lookup(69442) -> {"Lo","AL"}; +lookup(69443) -> {"Lo","AL"}; +lookup(69444) -> {"Lo","AL"}; +lookup(69445) -> {"Lo","AL"}; +lookup(69446) -> {"Mn","NSM"}; +lookup(69447) -> {"Mn","NSM"}; +lookup(69448) -> {"Mn","NSM"}; +lookup(69449) -> {"Mn","NSM"}; +lookup(69450) -> {"Mn","NSM"}; +lookup(69451) -> {"Mn","NSM"}; +lookup(69452) -> {"Mn","NSM"}; +lookup(69453) -> {"Mn","NSM"}; +lookup(69454) -> {"Mn","NSM"}; +lookup(69455) -> {"Mn","NSM"}; +lookup(69456) -> {"Mn","NSM"}; +lookup(69457) -> {"No","AL"}; +lookup(69458) -> {"No","AL"}; +lookup(69459) -> {"No","AL"}; +lookup(69460) -> {"No","AL"}; +lookup(69461) -> {"Po","AL"}; +lookup(69462) -> {"Po","AL"}; +lookup(69463) -> {"Po","AL"}; +lookup(69464) -> {"Po","AL"}; +lookup(69465) -> {"Po","AL"}; +lookup(69552) -> {"Lo","R"}; +lookup(69553) -> {"Lo","R"}; +lookup(69554) -> {"Lo","R"}; +lookup(69555) -> {"Lo","R"}; +lookup(69556) -> {"Lo","R"}; +lookup(69557) -> {"Lo","R"}; +lookup(69558) -> {"Lo","R"}; +lookup(69559) -> {"Lo","R"}; +lookup(69560) -> {"Lo","R"}; +lookup(69561) -> {"Lo","R"}; +lookup(69562) -> {"Lo","R"}; +lookup(69563) -> {"Lo","R"}; +lookup(69564) -> {"Lo","R"}; +lookup(69565) -> {"Lo","R"}; +lookup(69566) -> {"Lo","R"}; +lookup(69567) -> {"Lo","R"}; +lookup(69568) -> {"Lo","R"}; +lookup(69569) -> {"Lo","R"}; +lookup(69570) -> {"Lo","R"}; +lookup(69571) -> {"Lo","R"}; +lookup(69572) -> {"Lo","R"}; +lookup(69573) -> {"No","R"}; +lookup(69574) -> {"No","R"}; +lookup(69575) -> {"No","R"}; +lookup(69576) -> {"No","R"}; +lookup(69577) -> {"No","R"}; +lookup(69578) -> {"No","R"}; +lookup(69579) -> {"No","R"}; +lookup(69600) -> {"Lo","R"}; +lookup(69601) -> {"Lo","R"}; +lookup(69602) -> {"Lo","R"}; +lookup(69603) -> {"Lo","R"}; +lookup(69604) -> {"Lo","R"}; +lookup(69605) -> {"Lo","R"}; +lookup(69606) -> {"Lo","R"}; +lookup(69607) -> {"Lo","R"}; +lookup(69608) -> {"Lo","R"}; +lookup(69609) -> {"Lo","R"}; +lookup(69610) -> {"Lo","R"}; +lookup(69611) -> {"Lo","R"}; +lookup(69612) -> {"Lo","R"}; +lookup(69613) -> {"Lo","R"}; +lookup(69614) -> {"Lo","R"}; +lookup(69615) -> {"Lo","R"}; +lookup(69616) -> {"Lo","R"}; +lookup(69617) -> {"Lo","R"}; +lookup(69618) -> {"Lo","R"}; +lookup(69619) -> {"Lo","R"}; +lookup(69620) -> {"Lo","R"}; +lookup(69621) -> {"Lo","R"}; +lookup(69622) -> {"Lo","R"}; +lookup(69632) -> {"Mc","L"}; +lookup(69633) -> {"Mn","NSM"}; +lookup(69634) -> {"Mc","L"}; +lookup(69635) -> {"Lo","L"}; +lookup(69636) -> {"Lo","L"}; +lookup(69637) -> {"Lo","L"}; +lookup(69638) -> {"Lo","L"}; +lookup(69639) -> {"Lo","L"}; +lookup(69640) -> {"Lo","L"}; +lookup(69641) -> {"Lo","L"}; +lookup(69642) -> {"Lo","L"}; +lookup(69643) -> {"Lo","L"}; +lookup(69644) -> {"Lo","L"}; +lookup(69645) -> {"Lo","L"}; +lookup(69646) -> {"Lo","L"}; +lookup(69647) -> {"Lo","L"}; +lookup(69648) -> {"Lo","L"}; +lookup(69649) -> {"Lo","L"}; +lookup(69650) -> {"Lo","L"}; +lookup(69651) -> {"Lo","L"}; +lookup(69652) -> {"Lo","L"}; +lookup(69653) -> {"Lo","L"}; +lookup(69654) -> {"Lo","L"}; +lookup(69655) -> {"Lo","L"}; +lookup(69656) -> {"Lo","L"}; +lookup(69657) -> {"Lo","L"}; +lookup(69658) -> {"Lo","L"}; +lookup(69659) -> {"Lo","L"}; +lookup(69660) -> {"Lo","L"}; +lookup(69661) -> {"Lo","L"}; +lookup(69662) -> {"Lo","L"}; +lookup(69663) -> {"Lo","L"}; +lookup(69664) -> {"Lo","L"}; +lookup(69665) -> {"Lo","L"}; +lookup(69666) -> {"Lo","L"}; +lookup(69667) -> {"Lo","L"}; +lookup(69668) -> {"Lo","L"}; +lookup(69669) -> {"Lo","L"}; +lookup(69670) -> {"Lo","L"}; +lookup(69671) -> {"Lo","L"}; +lookup(69672) -> {"Lo","L"}; +lookup(69673) -> {"Lo","L"}; +lookup(69674) -> {"Lo","L"}; +lookup(69675) -> {"Lo","L"}; +lookup(69676) -> {"Lo","L"}; +lookup(69677) -> {"Lo","L"}; +lookup(69678) -> {"Lo","L"}; +lookup(69679) -> {"Lo","L"}; +lookup(69680) -> {"Lo","L"}; +lookup(69681) -> {"Lo","L"}; +lookup(69682) -> {"Lo","L"}; +lookup(69683) -> {"Lo","L"}; +lookup(69684) -> {"Lo","L"}; +lookup(69685) -> {"Lo","L"}; +lookup(69686) -> {"Lo","L"}; +lookup(69687) -> {"Lo","L"}; +lookup(69688) -> {"Mn","NSM"}; +lookup(69689) -> {"Mn","NSM"}; +lookup(69690) -> {"Mn","NSM"}; +lookup(69691) -> {"Mn","NSM"}; +lookup(69692) -> {"Mn","NSM"}; +lookup(69693) -> {"Mn","NSM"}; +lookup(69694) -> {"Mn","NSM"}; +lookup(69695) -> {"Mn","NSM"}; +lookup(69696) -> {"Mn","NSM"}; +lookup(69697) -> {"Mn","NSM"}; +lookup(69698) -> {"Mn","NSM"}; +lookup(69699) -> {"Mn","NSM"}; +lookup(69700) -> {"Mn","NSM"}; +lookup(69701) -> {"Mn","NSM"}; +lookup(69702) -> {"Mn","NSM"}; +lookup(69703) -> {"Po","L"}; +lookup(69704) -> {"Po","L"}; +lookup(69705) -> {"Po","L"}; +lookup(69706) -> {"Po","L"}; +lookup(69707) -> {"Po","L"}; +lookup(69708) -> {"Po","L"}; +lookup(69709) -> {"Po","L"}; +lookup(69714) -> {"No","ON"}; +lookup(69715) -> {"No","ON"}; +lookup(69716) -> {"No","ON"}; +lookup(69717) -> {"No","ON"}; +lookup(69718) -> {"No","ON"}; +lookup(69719) -> {"No","ON"}; +lookup(69720) -> {"No","ON"}; +lookup(69721) -> {"No","ON"}; +lookup(69722) -> {"No","ON"}; +lookup(69723) -> {"No","ON"}; +lookup(69724) -> {"No","ON"}; +lookup(69725) -> {"No","ON"}; +lookup(69726) -> {"No","ON"}; +lookup(69727) -> {"No","ON"}; +lookup(69728) -> {"No","ON"}; +lookup(69729) -> {"No","ON"}; +lookup(69730) -> {"No","ON"}; +lookup(69731) -> {"No","ON"}; +lookup(69732) -> {"No","ON"}; +lookup(69733) -> {"No","ON"}; +lookup(69734) -> {"Nd","L"}; +lookup(69735) -> {"Nd","L"}; +lookup(69736) -> {"Nd","L"}; +lookup(69737) -> {"Nd","L"}; +lookup(69738) -> {"Nd","L"}; +lookup(69739) -> {"Nd","L"}; +lookup(69740) -> {"Nd","L"}; +lookup(69741) -> {"Nd","L"}; +lookup(69742) -> {"Nd","L"}; +lookup(69743) -> {"Nd","L"}; +lookup(69759) -> {"Mn","NSM"}; +lookup(69760) -> {"Mn","NSM"}; +lookup(69761) -> {"Mn","NSM"}; +lookup(69762) -> {"Mc","L"}; +lookup(69763) -> {"Lo","L"}; +lookup(69764) -> {"Lo","L"}; +lookup(69765) -> {"Lo","L"}; +lookup(69766) -> {"Lo","L"}; +lookup(69767) -> {"Lo","L"}; +lookup(69768) -> {"Lo","L"}; +lookup(69769) -> {"Lo","L"}; +lookup(69770) -> {"Lo","L"}; +lookup(69771) -> {"Lo","L"}; +lookup(69772) -> {"Lo","L"}; +lookup(69773) -> {"Lo","L"}; +lookup(69774) -> {"Lo","L"}; +lookup(69775) -> {"Lo","L"}; +lookup(69776) -> {"Lo","L"}; +lookup(69777) -> {"Lo","L"}; +lookup(69778) -> {"Lo","L"}; +lookup(69779) -> {"Lo","L"}; +lookup(69780) -> {"Lo","L"}; +lookup(69781) -> {"Lo","L"}; +lookup(69782) -> {"Lo","L"}; +lookup(69783) -> {"Lo","L"}; +lookup(69784) -> {"Lo","L"}; +lookup(69785) -> {"Lo","L"}; +lookup(69786) -> {"Lo","L"}; +lookup(69787) -> {"Lo","L"}; +lookup(69788) -> {"Lo","L"}; +lookup(69789) -> {"Lo","L"}; +lookup(69790) -> {"Lo","L"}; +lookup(69791) -> {"Lo","L"}; +lookup(69792) -> {"Lo","L"}; +lookup(69793) -> {"Lo","L"}; +lookup(69794) -> {"Lo","L"}; +lookup(69795) -> {"Lo","L"}; +lookup(69796) -> {"Lo","L"}; +lookup(69797) -> {"Lo","L"}; +lookup(69798) -> {"Lo","L"}; +lookup(69799) -> {"Lo","L"}; +lookup(69800) -> {"Lo","L"}; +lookup(69801) -> {"Lo","L"}; +lookup(69802) -> {"Lo","L"}; +lookup(69803) -> {"Lo","L"}; +lookup(69804) -> {"Lo","L"}; +lookup(69805) -> {"Lo","L"}; +lookup(69806) -> {"Lo","L"}; +lookup(69807) -> {"Lo","L"}; +lookup(69808) -> {"Mc","L"}; +lookup(69809) -> {"Mc","L"}; +lookup(69810) -> {"Mc","L"}; +lookup(69811) -> {"Mn","NSM"}; +lookup(69812) -> {"Mn","NSM"}; +lookup(69813) -> {"Mn","NSM"}; +lookup(69814) -> {"Mn","NSM"}; +lookup(69815) -> {"Mc","L"}; +lookup(69816) -> {"Mc","L"}; +lookup(69817) -> {"Mn","NSM"}; +lookup(69818) -> {"Mn","NSM"}; +lookup(69819) -> {"Po","L"}; +lookup(69820) -> {"Po","L"}; +lookup(69821) -> {"Cf","L"}; +lookup(69822) -> {"Po","L"}; +lookup(69823) -> {"Po","L"}; +lookup(69824) -> {"Po","L"}; +lookup(69825) -> {"Po","L"}; +lookup(69837) -> {"Cf","L"}; +lookup(69840) -> {"Lo","L"}; +lookup(69841) -> {"Lo","L"}; +lookup(69842) -> {"Lo","L"}; +lookup(69843) -> {"Lo","L"}; +lookup(69844) -> {"Lo","L"}; +lookup(69845) -> {"Lo","L"}; +lookup(69846) -> {"Lo","L"}; +lookup(69847) -> {"Lo","L"}; +lookup(69848) -> {"Lo","L"}; +lookup(69849) -> {"Lo","L"}; +lookup(69850) -> {"Lo","L"}; +lookup(69851) -> {"Lo","L"}; +lookup(69852) -> {"Lo","L"}; +lookup(69853) -> {"Lo","L"}; +lookup(69854) -> {"Lo","L"}; +lookup(69855) -> {"Lo","L"}; +lookup(69856) -> {"Lo","L"}; +lookup(69857) -> {"Lo","L"}; +lookup(69858) -> {"Lo","L"}; +lookup(69859) -> {"Lo","L"}; +lookup(69860) -> {"Lo","L"}; +lookup(69861) -> {"Lo","L"}; +lookup(69862) -> {"Lo","L"}; +lookup(69863) -> {"Lo","L"}; +lookup(69864) -> {"Lo","L"}; +lookup(69872) -> {"Nd","L"}; +lookup(69873) -> {"Nd","L"}; +lookup(69874) -> {"Nd","L"}; +lookup(69875) -> {"Nd","L"}; +lookup(69876) -> {"Nd","L"}; +lookup(69877) -> {"Nd","L"}; +lookup(69878) -> {"Nd","L"}; +lookup(69879) -> {"Nd","L"}; +lookup(69880) -> {"Nd","L"}; +lookup(69881) -> {"Nd","L"}; +lookup(69888) -> {"Mn","NSM"}; +lookup(69889) -> {"Mn","NSM"}; +lookup(69890) -> {"Mn","NSM"}; +lookup(69891) -> {"Lo","L"}; +lookup(69892) -> {"Lo","L"}; +lookup(69893) -> {"Lo","L"}; +lookup(69894) -> {"Lo","L"}; +lookup(69895) -> {"Lo","L"}; +lookup(69896) -> {"Lo","L"}; +lookup(69897) -> {"Lo","L"}; +lookup(69898) -> {"Lo","L"}; +lookup(69899) -> {"Lo","L"}; +lookup(69900) -> {"Lo","L"}; +lookup(69901) -> {"Lo","L"}; +lookup(69902) -> {"Lo","L"}; +lookup(69903) -> {"Lo","L"}; +lookup(69904) -> {"Lo","L"}; +lookup(69905) -> {"Lo","L"}; +lookup(69906) -> {"Lo","L"}; +lookup(69907) -> {"Lo","L"}; +lookup(69908) -> {"Lo","L"}; +lookup(69909) -> {"Lo","L"}; +lookup(69910) -> {"Lo","L"}; +lookup(69911) -> {"Lo","L"}; +lookup(69912) -> {"Lo","L"}; +lookup(69913) -> {"Lo","L"}; +lookup(69914) -> {"Lo","L"}; +lookup(69915) -> {"Lo","L"}; +lookup(69916) -> {"Lo","L"}; +lookup(69917) -> {"Lo","L"}; +lookup(69918) -> {"Lo","L"}; +lookup(69919) -> {"Lo","L"}; +lookup(69920) -> {"Lo","L"}; +lookup(69921) -> {"Lo","L"}; +lookup(69922) -> {"Lo","L"}; +lookup(69923) -> {"Lo","L"}; +lookup(69924) -> {"Lo","L"}; +lookup(69925) -> {"Lo","L"}; +lookup(69926) -> {"Lo","L"}; +lookup(69927) -> {"Mn","NSM"}; +lookup(69928) -> {"Mn","NSM"}; +lookup(69929) -> {"Mn","NSM"}; +lookup(69930) -> {"Mn","NSM"}; +lookup(69931) -> {"Mn","NSM"}; +lookup(69932) -> {"Mc","L"}; +lookup(69933) -> {"Mn","NSM"}; +lookup(69934) -> {"Mn","NSM"}; +lookup(69935) -> {"Mn","NSM"}; +lookup(69936) -> {"Mn","NSM"}; +lookup(69937) -> {"Mn","NSM"}; +lookup(69938) -> {"Mn","NSM"}; +lookup(69939) -> {"Mn","NSM"}; +lookup(69940) -> {"Mn","NSM"}; +lookup(69942) -> {"Nd","L"}; +lookup(69943) -> {"Nd","L"}; +lookup(69944) -> {"Nd","L"}; +lookup(69945) -> {"Nd","L"}; +lookup(69946) -> {"Nd","L"}; +lookup(69947) -> {"Nd","L"}; +lookup(69948) -> {"Nd","L"}; +lookup(69949) -> {"Nd","L"}; +lookup(69950) -> {"Nd","L"}; +lookup(69951) -> {"Nd","L"}; +lookup(69952) -> {"Po","L"}; +lookup(69953) -> {"Po","L"}; +lookup(69954) -> {"Po","L"}; +lookup(69955) -> {"Po","L"}; +lookup(69956) -> {"Lo","L"}; +lookup(69957) -> {"Mc","L"}; +lookup(69958) -> {"Mc","L"}; +lookup(69959) -> {"Lo","L"}; +lookup(69968) -> {"Lo","L"}; +lookup(69969) -> {"Lo","L"}; +lookup(69970) -> {"Lo","L"}; +lookup(69971) -> {"Lo","L"}; +lookup(69972) -> {"Lo","L"}; +lookup(69973) -> {"Lo","L"}; +lookup(69974) -> {"Lo","L"}; +lookup(69975) -> {"Lo","L"}; +lookup(69976) -> {"Lo","L"}; +lookup(69977) -> {"Lo","L"}; +lookup(69978) -> {"Lo","L"}; +lookup(69979) -> {"Lo","L"}; +lookup(69980) -> {"Lo","L"}; +lookup(69981) -> {"Lo","L"}; +lookup(69982) -> {"Lo","L"}; +lookup(69983) -> {"Lo","L"}; +lookup(69984) -> {"Lo","L"}; +lookup(69985) -> {"Lo","L"}; +lookup(69986) -> {"Lo","L"}; +lookup(69987) -> {"Lo","L"}; +lookup(69988) -> {"Lo","L"}; +lookup(69989) -> {"Lo","L"}; +lookup(69990) -> {"Lo","L"}; +lookup(69991) -> {"Lo","L"}; +lookup(69992) -> {"Lo","L"}; +lookup(69993) -> {"Lo","L"}; +lookup(69994) -> {"Lo","L"}; +lookup(69995) -> {"Lo","L"}; +lookup(69996) -> {"Lo","L"}; +lookup(69997) -> {"Lo","L"}; +lookup(69998) -> {"Lo","L"}; +lookup(69999) -> {"Lo","L"}; +lookup(70000) -> {"Lo","L"}; +lookup(70001) -> {"Lo","L"}; +lookup(70002) -> {"Lo","L"}; +lookup(70003) -> {"Mn","NSM"}; +lookup(70004) -> {"Po","L"}; +lookup(70005) -> {"Po","L"}; +lookup(70006) -> {"Lo","L"}; +lookup(70016) -> {"Mn","NSM"}; +lookup(70017) -> {"Mn","NSM"}; +lookup(70018) -> {"Mc","L"}; +lookup(70019) -> {"Lo","L"}; +lookup(70020) -> {"Lo","L"}; +lookup(70021) -> {"Lo","L"}; +lookup(70022) -> {"Lo","L"}; +lookup(70023) -> {"Lo","L"}; +lookup(70024) -> {"Lo","L"}; +lookup(70025) -> {"Lo","L"}; +lookup(70026) -> {"Lo","L"}; +lookup(70027) -> {"Lo","L"}; +lookup(70028) -> {"Lo","L"}; +lookup(70029) -> {"Lo","L"}; +lookup(70030) -> {"Lo","L"}; +lookup(70031) -> {"Lo","L"}; +lookup(70032) -> {"Lo","L"}; +lookup(70033) -> {"Lo","L"}; +lookup(70034) -> {"Lo","L"}; +lookup(70035) -> {"Lo","L"}; +lookup(70036) -> {"Lo","L"}; +lookup(70037) -> {"Lo","L"}; +lookup(70038) -> {"Lo","L"}; +lookup(70039) -> {"Lo","L"}; +lookup(70040) -> {"Lo","L"}; +lookup(70041) -> {"Lo","L"}; +lookup(70042) -> {"Lo","L"}; +lookup(70043) -> {"Lo","L"}; +lookup(70044) -> {"Lo","L"}; +lookup(70045) -> {"Lo","L"}; +lookup(70046) -> {"Lo","L"}; +lookup(70047) -> {"Lo","L"}; +lookup(70048) -> {"Lo","L"}; +lookup(70049) -> {"Lo","L"}; +lookup(70050) -> {"Lo","L"}; +lookup(70051) -> {"Lo","L"}; +lookup(70052) -> {"Lo","L"}; +lookup(70053) -> {"Lo","L"}; +lookup(70054) -> {"Lo","L"}; +lookup(70055) -> {"Lo","L"}; +lookup(70056) -> {"Lo","L"}; +lookup(70057) -> {"Lo","L"}; +lookup(70058) -> {"Lo","L"}; +lookup(70059) -> {"Lo","L"}; +lookup(70060) -> {"Lo","L"}; +lookup(70061) -> {"Lo","L"}; +lookup(70062) -> {"Lo","L"}; +lookup(70063) -> {"Lo","L"}; +lookup(70064) -> {"Lo","L"}; +lookup(70065) -> {"Lo","L"}; +lookup(70066) -> {"Lo","L"}; +lookup(70067) -> {"Mc","L"}; +lookup(70068) -> {"Mc","L"}; +lookup(70069) -> {"Mc","L"}; +lookup(70070) -> {"Mn","NSM"}; +lookup(70071) -> {"Mn","NSM"}; +lookup(70072) -> {"Mn","NSM"}; +lookup(70073) -> {"Mn","NSM"}; +lookup(70074) -> {"Mn","NSM"}; +lookup(70075) -> {"Mn","NSM"}; +lookup(70076) -> {"Mn","NSM"}; +lookup(70077) -> {"Mn","NSM"}; +lookup(70078) -> {"Mn","NSM"}; +lookup(70079) -> {"Mc","L"}; +lookup(70080) -> {"Mc","L"}; +lookup(70081) -> {"Lo","L"}; +lookup(70082) -> {"Lo","L"}; +lookup(70083) -> {"Lo","L"}; +lookup(70084) -> {"Lo","L"}; +lookup(70085) -> {"Po","L"}; +lookup(70086) -> {"Po","L"}; +lookup(70087) -> {"Po","L"}; +lookup(70088) -> {"Po","L"}; +lookup(70089) -> {"Mn","NSM"}; +lookup(70090) -> {"Mn","NSM"}; +lookup(70091) -> {"Mn","NSM"}; +lookup(70092) -> {"Mn","NSM"}; +lookup(70093) -> {"Po","L"}; +lookup(70094) -> {"Mc","L"}; +lookup(70095) -> {"Mn","NSM"}; +lookup(70096) -> {"Nd","L"}; +lookup(70097) -> {"Nd","L"}; +lookup(70098) -> {"Nd","L"}; +lookup(70099) -> {"Nd","L"}; +lookup(70100) -> {"Nd","L"}; +lookup(70101) -> {"Nd","L"}; +lookup(70102) -> {"Nd","L"}; +lookup(70103) -> {"Nd","L"}; +lookup(70104) -> {"Nd","L"}; +lookup(70105) -> {"Nd","L"}; +lookup(70106) -> {"Lo","L"}; +lookup(70107) -> {"Po","L"}; +lookup(70108) -> {"Lo","L"}; +lookup(70109) -> {"Po","L"}; +lookup(70110) -> {"Po","L"}; +lookup(70111) -> {"Po","L"}; +lookup(70113) -> {"No","L"}; +lookup(70114) -> {"No","L"}; +lookup(70115) -> {"No","L"}; +lookup(70116) -> {"No","L"}; +lookup(70117) -> {"No","L"}; +lookup(70118) -> {"No","L"}; +lookup(70119) -> {"No","L"}; +lookup(70120) -> {"No","L"}; +lookup(70121) -> {"No","L"}; +lookup(70122) -> {"No","L"}; +lookup(70123) -> {"No","L"}; +lookup(70124) -> {"No","L"}; +lookup(70125) -> {"No","L"}; +lookup(70126) -> {"No","L"}; +lookup(70127) -> {"No","L"}; +lookup(70128) -> {"No","L"}; +lookup(70129) -> {"No","L"}; +lookup(70130) -> {"No","L"}; +lookup(70131) -> {"No","L"}; +lookup(70132) -> {"No","L"}; +lookup(70144) -> {"Lo","L"}; +lookup(70145) -> {"Lo","L"}; +lookup(70146) -> {"Lo","L"}; +lookup(70147) -> {"Lo","L"}; +lookup(70148) -> {"Lo","L"}; +lookup(70149) -> {"Lo","L"}; +lookup(70150) -> {"Lo","L"}; +lookup(70151) -> {"Lo","L"}; +lookup(70152) -> {"Lo","L"}; +lookup(70153) -> {"Lo","L"}; +lookup(70154) -> {"Lo","L"}; +lookup(70155) -> {"Lo","L"}; +lookup(70156) -> {"Lo","L"}; +lookup(70157) -> {"Lo","L"}; +lookup(70158) -> {"Lo","L"}; +lookup(70159) -> {"Lo","L"}; +lookup(70160) -> {"Lo","L"}; +lookup(70161) -> {"Lo","L"}; +lookup(70163) -> {"Lo","L"}; +lookup(70164) -> {"Lo","L"}; +lookup(70165) -> {"Lo","L"}; +lookup(70166) -> {"Lo","L"}; +lookup(70167) -> {"Lo","L"}; +lookup(70168) -> {"Lo","L"}; +lookup(70169) -> {"Lo","L"}; +lookup(70170) -> {"Lo","L"}; +lookup(70171) -> {"Lo","L"}; +lookup(70172) -> {"Lo","L"}; +lookup(70173) -> {"Lo","L"}; +lookup(70174) -> {"Lo","L"}; +lookup(70175) -> {"Lo","L"}; +lookup(70176) -> {"Lo","L"}; +lookup(70177) -> {"Lo","L"}; +lookup(70178) -> {"Lo","L"}; +lookup(70179) -> {"Lo","L"}; +lookup(70180) -> {"Lo","L"}; +lookup(70181) -> {"Lo","L"}; +lookup(70182) -> {"Lo","L"}; +lookup(70183) -> {"Lo","L"}; +lookup(70184) -> {"Lo","L"}; +lookup(70185) -> {"Lo","L"}; +lookup(70186) -> {"Lo","L"}; +lookup(70187) -> {"Lo","L"}; +lookup(70188) -> {"Mc","L"}; +lookup(70189) -> {"Mc","L"}; +lookup(70190) -> {"Mc","L"}; +lookup(70191) -> {"Mn","NSM"}; +lookup(70192) -> {"Mn","NSM"}; +lookup(70193) -> {"Mn","NSM"}; +lookup(70194) -> {"Mc","L"}; +lookup(70195) -> {"Mc","L"}; +lookup(70196) -> {"Mn","NSM"}; +lookup(70197) -> {"Mc","L"}; +lookup(70198) -> {"Mn","NSM"}; +lookup(70199) -> {"Mn","NSM"}; +lookup(70200) -> {"Po","L"}; +lookup(70201) -> {"Po","L"}; +lookup(70202) -> {"Po","L"}; +lookup(70203) -> {"Po","L"}; +lookup(70204) -> {"Po","L"}; +lookup(70205) -> {"Po","L"}; +lookup(70206) -> {"Mn","NSM"}; +lookup(70272) -> {"Lo","L"}; +lookup(70273) -> {"Lo","L"}; +lookup(70274) -> {"Lo","L"}; +lookup(70275) -> {"Lo","L"}; +lookup(70276) -> {"Lo","L"}; +lookup(70277) -> {"Lo","L"}; +lookup(70278) -> {"Lo","L"}; +lookup(70280) -> {"Lo","L"}; +lookup(70282) -> {"Lo","L"}; +lookup(70283) -> {"Lo","L"}; +lookup(70284) -> {"Lo","L"}; +lookup(70285) -> {"Lo","L"}; +lookup(70287) -> {"Lo","L"}; +lookup(70288) -> {"Lo","L"}; +lookup(70289) -> {"Lo","L"}; +lookup(70290) -> {"Lo","L"}; +lookup(70291) -> {"Lo","L"}; +lookup(70292) -> {"Lo","L"}; +lookup(70293) -> {"Lo","L"}; +lookup(70294) -> {"Lo","L"}; +lookup(70295) -> {"Lo","L"}; +lookup(70296) -> {"Lo","L"}; +lookup(70297) -> {"Lo","L"}; +lookup(70298) -> {"Lo","L"}; +lookup(70299) -> {"Lo","L"}; +lookup(70300) -> {"Lo","L"}; +lookup(70301) -> {"Lo","L"}; +lookup(70303) -> {"Lo","L"}; +lookup(70304) -> {"Lo","L"}; +lookup(70305) -> {"Lo","L"}; +lookup(70306) -> {"Lo","L"}; +lookup(70307) -> {"Lo","L"}; +lookup(70308) -> {"Lo","L"}; +lookup(70309) -> {"Lo","L"}; +lookup(70310) -> {"Lo","L"}; +lookup(70311) -> {"Lo","L"}; +lookup(70312) -> {"Lo","L"}; +lookup(70313) -> {"Po","L"}; +lookup(70320) -> {"Lo","L"}; +lookup(70321) -> {"Lo","L"}; +lookup(70322) -> {"Lo","L"}; +lookup(70323) -> {"Lo","L"}; +lookup(70324) -> {"Lo","L"}; +lookup(70325) -> {"Lo","L"}; +lookup(70326) -> {"Lo","L"}; +lookup(70327) -> {"Lo","L"}; +lookup(70328) -> {"Lo","L"}; +lookup(70329) -> {"Lo","L"}; +lookup(70330) -> {"Lo","L"}; +lookup(70331) -> {"Lo","L"}; +lookup(70332) -> {"Lo","L"}; +lookup(70333) -> {"Lo","L"}; +lookup(70334) -> {"Lo","L"}; +lookup(70335) -> {"Lo","L"}; +lookup(70336) -> {"Lo","L"}; +lookup(70337) -> {"Lo","L"}; +lookup(70338) -> {"Lo","L"}; +lookup(70339) -> {"Lo","L"}; +lookup(70340) -> {"Lo","L"}; +lookup(70341) -> {"Lo","L"}; +lookup(70342) -> {"Lo","L"}; +lookup(70343) -> {"Lo","L"}; +lookup(70344) -> {"Lo","L"}; +lookup(70345) -> {"Lo","L"}; +lookup(70346) -> {"Lo","L"}; +lookup(70347) -> {"Lo","L"}; +lookup(70348) -> {"Lo","L"}; +lookup(70349) -> {"Lo","L"}; +lookup(70350) -> {"Lo","L"}; +lookup(70351) -> {"Lo","L"}; +lookup(70352) -> {"Lo","L"}; +lookup(70353) -> {"Lo","L"}; +lookup(70354) -> {"Lo","L"}; +lookup(70355) -> {"Lo","L"}; +lookup(70356) -> {"Lo","L"}; +lookup(70357) -> {"Lo","L"}; +lookup(70358) -> {"Lo","L"}; +lookup(70359) -> {"Lo","L"}; +lookup(70360) -> {"Lo","L"}; +lookup(70361) -> {"Lo","L"}; +lookup(70362) -> {"Lo","L"}; +lookup(70363) -> {"Lo","L"}; +lookup(70364) -> {"Lo","L"}; +lookup(70365) -> {"Lo","L"}; +lookup(70366) -> {"Lo","L"}; +lookup(70367) -> {"Mn","NSM"}; +lookup(70368) -> {"Mc","L"}; +lookup(70369) -> {"Mc","L"}; +lookup(70370) -> {"Mc","L"}; +lookup(70371) -> {"Mn","NSM"}; +lookup(70372) -> {"Mn","NSM"}; +lookup(70373) -> {"Mn","NSM"}; +lookup(70374) -> {"Mn","NSM"}; +lookup(70375) -> {"Mn","NSM"}; +lookup(70376) -> {"Mn","NSM"}; +lookup(70377) -> {"Mn","NSM"}; +lookup(70378) -> {"Mn","NSM"}; +lookup(70384) -> {"Nd","L"}; +lookup(70385) -> {"Nd","L"}; +lookup(70386) -> {"Nd","L"}; +lookup(70387) -> {"Nd","L"}; +lookup(70388) -> {"Nd","L"}; +lookup(70389) -> {"Nd","L"}; +lookup(70390) -> {"Nd","L"}; +lookup(70391) -> {"Nd","L"}; +lookup(70392) -> {"Nd","L"}; +lookup(70393) -> {"Nd","L"}; +lookup(70400) -> {"Mn","NSM"}; +lookup(70401) -> {"Mn","NSM"}; +lookup(70402) -> {"Mc","L"}; +lookup(70403) -> {"Mc","L"}; +lookup(70405) -> {"Lo","L"}; +lookup(70406) -> {"Lo","L"}; +lookup(70407) -> {"Lo","L"}; +lookup(70408) -> {"Lo","L"}; +lookup(70409) -> {"Lo","L"}; +lookup(70410) -> {"Lo","L"}; +lookup(70411) -> {"Lo","L"}; +lookup(70412) -> {"Lo","L"}; +lookup(70415) -> {"Lo","L"}; +lookup(70416) -> {"Lo","L"}; +lookup(70419) -> {"Lo","L"}; +lookup(70420) -> {"Lo","L"}; +lookup(70421) -> {"Lo","L"}; +lookup(70422) -> {"Lo","L"}; +lookup(70423) -> {"Lo","L"}; +lookup(70424) -> {"Lo","L"}; +lookup(70425) -> {"Lo","L"}; +lookup(70426) -> {"Lo","L"}; +lookup(70427) -> {"Lo","L"}; +lookup(70428) -> {"Lo","L"}; +lookup(70429) -> {"Lo","L"}; +lookup(70430) -> {"Lo","L"}; +lookup(70431) -> {"Lo","L"}; +lookup(70432) -> {"Lo","L"}; +lookup(70433) -> {"Lo","L"}; +lookup(70434) -> {"Lo","L"}; +lookup(70435) -> {"Lo","L"}; +lookup(70436) -> {"Lo","L"}; +lookup(70437) -> {"Lo","L"}; +lookup(70438) -> {"Lo","L"}; +lookup(70439) -> {"Lo","L"}; +lookup(70440) -> {"Lo","L"}; +lookup(70442) -> {"Lo","L"}; +lookup(70443) -> {"Lo","L"}; +lookup(70444) -> {"Lo","L"}; +lookup(70445) -> {"Lo","L"}; +lookup(70446) -> {"Lo","L"}; +lookup(70447) -> {"Lo","L"}; +lookup(70448) -> {"Lo","L"}; +lookup(70450) -> {"Lo","L"}; +lookup(70451) -> {"Lo","L"}; +lookup(70453) -> {"Lo","L"}; +lookup(70454) -> {"Lo","L"}; +lookup(70455) -> {"Lo","L"}; +lookup(70456) -> {"Lo","L"}; +lookup(70457) -> {"Lo","L"}; +lookup(70459) -> {"Mn","NSM"}; +lookup(70460) -> {"Mn","NSM"}; +lookup(70461) -> {"Lo","L"}; +lookup(70462) -> {"Mc","L"}; +lookup(70463) -> {"Mc","L"}; +lookup(70464) -> {"Mn","NSM"}; +lookup(70465) -> {"Mc","L"}; +lookup(70466) -> {"Mc","L"}; +lookup(70467) -> {"Mc","L"}; +lookup(70468) -> {"Mc","L"}; +lookup(70471) -> {"Mc","L"}; +lookup(70472) -> {"Mc","L"}; +lookup(70475) -> {"Mc","L"}; +lookup(70476) -> {"Mc","L"}; +lookup(70477) -> {"Mc","L"}; +lookup(70480) -> {"Lo","L"}; +lookup(70487) -> {"Mc","L"}; +lookup(70493) -> {"Lo","L"}; +lookup(70494) -> {"Lo","L"}; +lookup(70495) -> {"Lo","L"}; +lookup(70496) -> {"Lo","L"}; +lookup(70497) -> {"Lo","L"}; +lookup(70498) -> {"Mc","L"}; +lookup(70499) -> {"Mc","L"}; +lookup(70502) -> {"Mn","NSM"}; +lookup(70503) -> {"Mn","NSM"}; +lookup(70504) -> {"Mn","NSM"}; +lookup(70505) -> {"Mn","NSM"}; +lookup(70506) -> {"Mn","NSM"}; +lookup(70507) -> {"Mn","NSM"}; +lookup(70508) -> {"Mn","NSM"}; +lookup(70512) -> {"Mn","NSM"}; +lookup(70513) -> {"Mn","NSM"}; +lookup(70514) -> {"Mn","NSM"}; +lookup(70515) -> {"Mn","NSM"}; +lookup(70516) -> {"Mn","NSM"}; +lookup(70656) -> {"Lo","L"}; +lookup(70657) -> {"Lo","L"}; +lookup(70658) -> {"Lo","L"}; +lookup(70659) -> {"Lo","L"}; +lookup(70660) -> {"Lo","L"}; +lookup(70661) -> {"Lo","L"}; +lookup(70662) -> {"Lo","L"}; +lookup(70663) -> {"Lo","L"}; +lookup(70664) -> {"Lo","L"}; +lookup(70665) -> {"Lo","L"}; +lookup(70666) -> {"Lo","L"}; +lookup(70667) -> {"Lo","L"}; +lookup(70668) -> {"Lo","L"}; +lookup(70669) -> {"Lo","L"}; +lookup(70670) -> {"Lo","L"}; +lookup(70671) -> {"Lo","L"}; +lookup(70672) -> {"Lo","L"}; +lookup(70673) -> {"Lo","L"}; +lookup(70674) -> {"Lo","L"}; +lookup(70675) -> {"Lo","L"}; +lookup(70676) -> {"Lo","L"}; +lookup(70677) -> {"Lo","L"}; +lookup(70678) -> {"Lo","L"}; +lookup(70679) -> {"Lo","L"}; +lookup(70680) -> {"Lo","L"}; +lookup(70681) -> {"Lo","L"}; +lookup(70682) -> {"Lo","L"}; +lookup(70683) -> {"Lo","L"}; +lookup(70684) -> {"Lo","L"}; +lookup(70685) -> {"Lo","L"}; +lookup(70686) -> {"Lo","L"}; +lookup(70687) -> {"Lo","L"}; +lookup(70688) -> {"Lo","L"}; +lookup(70689) -> {"Lo","L"}; +lookup(70690) -> {"Lo","L"}; +lookup(70691) -> {"Lo","L"}; +lookup(70692) -> {"Lo","L"}; +lookup(70693) -> {"Lo","L"}; +lookup(70694) -> {"Lo","L"}; +lookup(70695) -> {"Lo","L"}; +lookup(70696) -> {"Lo","L"}; +lookup(70697) -> {"Lo","L"}; +lookup(70698) -> {"Lo","L"}; +lookup(70699) -> {"Lo","L"}; +lookup(70700) -> {"Lo","L"}; +lookup(70701) -> {"Lo","L"}; +lookup(70702) -> {"Lo","L"}; +lookup(70703) -> {"Lo","L"}; +lookup(70704) -> {"Lo","L"}; +lookup(70705) -> {"Lo","L"}; +lookup(70706) -> {"Lo","L"}; +lookup(70707) -> {"Lo","L"}; +lookup(70708) -> {"Lo","L"}; +lookup(70709) -> {"Mc","L"}; +lookup(70710) -> {"Mc","L"}; +lookup(70711) -> {"Mc","L"}; +lookup(70712) -> {"Mn","NSM"}; +lookup(70713) -> {"Mn","NSM"}; +lookup(70714) -> {"Mn","NSM"}; +lookup(70715) -> {"Mn","NSM"}; +lookup(70716) -> {"Mn","NSM"}; +lookup(70717) -> {"Mn","NSM"}; +lookup(70718) -> {"Mn","NSM"}; +lookup(70719) -> {"Mn","NSM"}; +lookup(70720) -> {"Mc","L"}; +lookup(70721) -> {"Mc","L"}; +lookup(70722) -> {"Mn","NSM"}; +lookup(70723) -> {"Mn","NSM"}; +lookup(70724) -> {"Mn","NSM"}; +lookup(70725) -> {"Mc","L"}; +lookup(70726) -> {"Mn","NSM"}; +lookup(70727) -> {"Lo","L"}; +lookup(70728) -> {"Lo","L"}; +lookup(70729) -> {"Lo","L"}; +lookup(70730) -> {"Lo","L"}; +lookup(70731) -> {"Po","L"}; +lookup(70732) -> {"Po","L"}; +lookup(70733) -> {"Po","L"}; +lookup(70734) -> {"Po","L"}; +lookup(70735) -> {"Po","L"}; +lookup(70736) -> {"Nd","L"}; +lookup(70737) -> {"Nd","L"}; +lookup(70738) -> {"Nd","L"}; +lookup(70739) -> {"Nd","L"}; +lookup(70740) -> {"Nd","L"}; +lookup(70741) -> {"Nd","L"}; +lookup(70742) -> {"Nd","L"}; +lookup(70743) -> {"Nd","L"}; +lookup(70744) -> {"Nd","L"}; +lookup(70745) -> {"Nd","L"}; +lookup(70746) -> {"Po","L"}; +lookup(70747) -> {"Po","L"}; +lookup(70749) -> {"Po","L"}; +lookup(70750) -> {"Mn","NSM"}; +lookup(70751) -> {"Lo","L"}; +lookup(70752) -> {"Lo","L"}; +lookup(70753) -> {"Lo","L"}; +lookup(70784) -> {"Lo","L"}; +lookup(70785) -> {"Lo","L"}; +lookup(70786) -> {"Lo","L"}; +lookup(70787) -> {"Lo","L"}; +lookup(70788) -> {"Lo","L"}; +lookup(70789) -> {"Lo","L"}; +lookup(70790) -> {"Lo","L"}; +lookup(70791) -> {"Lo","L"}; +lookup(70792) -> {"Lo","L"}; +lookup(70793) -> {"Lo","L"}; +lookup(70794) -> {"Lo","L"}; +lookup(70795) -> {"Lo","L"}; +lookup(70796) -> {"Lo","L"}; +lookup(70797) -> {"Lo","L"}; +lookup(70798) -> {"Lo","L"}; +lookup(70799) -> {"Lo","L"}; +lookup(70800) -> {"Lo","L"}; +lookup(70801) -> {"Lo","L"}; +lookup(70802) -> {"Lo","L"}; +lookup(70803) -> {"Lo","L"}; +lookup(70804) -> {"Lo","L"}; +lookup(70805) -> {"Lo","L"}; +lookup(70806) -> {"Lo","L"}; +lookup(70807) -> {"Lo","L"}; +lookup(70808) -> {"Lo","L"}; +lookup(70809) -> {"Lo","L"}; +lookup(70810) -> {"Lo","L"}; +lookup(70811) -> {"Lo","L"}; +lookup(70812) -> {"Lo","L"}; +lookup(70813) -> {"Lo","L"}; +lookup(70814) -> {"Lo","L"}; +lookup(70815) -> {"Lo","L"}; +lookup(70816) -> {"Lo","L"}; +lookup(70817) -> {"Lo","L"}; +lookup(70818) -> {"Lo","L"}; +lookup(70819) -> {"Lo","L"}; +lookup(70820) -> {"Lo","L"}; +lookup(70821) -> {"Lo","L"}; +lookup(70822) -> {"Lo","L"}; +lookup(70823) -> {"Lo","L"}; +lookup(70824) -> {"Lo","L"}; +lookup(70825) -> {"Lo","L"}; +lookup(70826) -> {"Lo","L"}; +lookup(70827) -> {"Lo","L"}; +lookup(70828) -> {"Lo","L"}; +lookup(70829) -> {"Lo","L"}; +lookup(70830) -> {"Lo","L"}; +lookup(70831) -> {"Lo","L"}; +lookup(70832) -> {"Mc","L"}; +lookup(70833) -> {"Mc","L"}; +lookup(70834) -> {"Mc","L"}; +lookup(70835) -> {"Mn","NSM"}; +lookup(70836) -> {"Mn","NSM"}; +lookup(70837) -> {"Mn","NSM"}; +lookup(70838) -> {"Mn","NSM"}; +lookup(70839) -> {"Mn","NSM"}; +lookup(70840) -> {"Mn","NSM"}; +lookup(70841) -> {"Mc","L"}; +lookup(70842) -> {"Mn","NSM"}; +lookup(70843) -> {"Mc","L"}; +lookup(70844) -> {"Mc","L"}; +lookup(70845) -> {"Mc","L"}; +lookup(70846) -> {"Mc","L"}; +lookup(70847) -> {"Mn","NSM"}; +lookup(70848) -> {"Mn","NSM"}; +lookup(70849) -> {"Mc","L"}; +lookup(70850) -> {"Mn","NSM"}; +lookup(70851) -> {"Mn","NSM"}; +lookup(70852) -> {"Lo","L"}; +lookup(70853) -> {"Lo","L"}; +lookup(70854) -> {"Po","L"}; +lookup(70855) -> {"Lo","L"}; +lookup(70864) -> {"Nd","L"}; +lookup(70865) -> {"Nd","L"}; +lookup(70866) -> {"Nd","L"}; +lookup(70867) -> {"Nd","L"}; +lookup(70868) -> {"Nd","L"}; +lookup(70869) -> {"Nd","L"}; +lookup(70870) -> {"Nd","L"}; +lookup(70871) -> {"Nd","L"}; +lookup(70872) -> {"Nd","L"}; +lookup(70873) -> {"Nd","L"}; +lookup(71040) -> {"Lo","L"}; +lookup(71041) -> {"Lo","L"}; +lookup(71042) -> {"Lo","L"}; +lookup(71043) -> {"Lo","L"}; +lookup(71044) -> {"Lo","L"}; +lookup(71045) -> {"Lo","L"}; +lookup(71046) -> {"Lo","L"}; +lookup(71047) -> {"Lo","L"}; +lookup(71048) -> {"Lo","L"}; +lookup(71049) -> {"Lo","L"}; +lookup(71050) -> {"Lo","L"}; +lookup(71051) -> {"Lo","L"}; +lookup(71052) -> {"Lo","L"}; +lookup(71053) -> {"Lo","L"}; +lookup(71054) -> {"Lo","L"}; +lookup(71055) -> {"Lo","L"}; +lookup(71056) -> {"Lo","L"}; +lookup(71057) -> {"Lo","L"}; +lookup(71058) -> {"Lo","L"}; +lookup(71059) -> {"Lo","L"}; +lookup(71060) -> {"Lo","L"}; +lookup(71061) -> {"Lo","L"}; +lookup(71062) -> {"Lo","L"}; +lookup(71063) -> {"Lo","L"}; +lookup(71064) -> {"Lo","L"}; +lookup(71065) -> {"Lo","L"}; +lookup(71066) -> {"Lo","L"}; +lookup(71067) -> {"Lo","L"}; +lookup(71068) -> {"Lo","L"}; +lookup(71069) -> {"Lo","L"}; +lookup(71070) -> {"Lo","L"}; +lookup(71071) -> {"Lo","L"}; +lookup(71072) -> {"Lo","L"}; +lookup(71073) -> {"Lo","L"}; +lookup(71074) -> {"Lo","L"}; +lookup(71075) -> {"Lo","L"}; +lookup(71076) -> {"Lo","L"}; +lookup(71077) -> {"Lo","L"}; +lookup(71078) -> {"Lo","L"}; +lookup(71079) -> {"Lo","L"}; +lookup(71080) -> {"Lo","L"}; +lookup(71081) -> {"Lo","L"}; +lookup(71082) -> {"Lo","L"}; +lookup(71083) -> {"Lo","L"}; +lookup(71084) -> {"Lo","L"}; +lookup(71085) -> {"Lo","L"}; +lookup(71086) -> {"Lo","L"}; +lookup(71087) -> {"Mc","L"}; +lookup(71088) -> {"Mc","L"}; +lookup(71089) -> {"Mc","L"}; +lookup(71090) -> {"Mn","NSM"}; +lookup(71091) -> {"Mn","NSM"}; +lookup(71092) -> {"Mn","NSM"}; +lookup(71093) -> {"Mn","NSM"}; +lookup(71096) -> {"Mc","L"}; +lookup(71097) -> {"Mc","L"}; +lookup(71098) -> {"Mc","L"}; +lookup(71099) -> {"Mc","L"}; +lookup(71100) -> {"Mn","NSM"}; +lookup(71101) -> {"Mn","NSM"}; +lookup(71102) -> {"Mc","L"}; +lookup(71103) -> {"Mn","NSM"}; +lookup(71104) -> {"Mn","NSM"}; +lookup(71105) -> {"Po","L"}; +lookup(71106) -> {"Po","L"}; +lookup(71107) -> {"Po","L"}; +lookup(71108) -> {"Po","L"}; +lookup(71109) -> {"Po","L"}; +lookup(71110) -> {"Po","L"}; +lookup(71111) -> {"Po","L"}; +lookup(71112) -> {"Po","L"}; +lookup(71113) -> {"Po","L"}; +lookup(71114) -> {"Po","L"}; +lookup(71115) -> {"Po","L"}; +lookup(71116) -> {"Po","L"}; +lookup(71117) -> {"Po","L"}; +lookup(71118) -> {"Po","L"}; +lookup(71119) -> {"Po","L"}; +lookup(71120) -> {"Po","L"}; +lookup(71121) -> {"Po","L"}; +lookup(71122) -> {"Po","L"}; +lookup(71123) -> {"Po","L"}; +lookup(71124) -> {"Po","L"}; +lookup(71125) -> {"Po","L"}; +lookup(71126) -> {"Po","L"}; +lookup(71127) -> {"Po","L"}; +lookup(71128) -> {"Lo","L"}; +lookup(71129) -> {"Lo","L"}; +lookup(71130) -> {"Lo","L"}; +lookup(71131) -> {"Lo","L"}; +lookup(71132) -> {"Mn","NSM"}; +lookup(71133) -> {"Mn","NSM"}; +lookup(71168) -> {"Lo","L"}; +lookup(71169) -> {"Lo","L"}; +lookup(71170) -> {"Lo","L"}; +lookup(71171) -> {"Lo","L"}; +lookup(71172) -> {"Lo","L"}; +lookup(71173) -> {"Lo","L"}; +lookup(71174) -> {"Lo","L"}; +lookup(71175) -> {"Lo","L"}; +lookup(71176) -> {"Lo","L"}; +lookup(71177) -> {"Lo","L"}; +lookup(71178) -> {"Lo","L"}; +lookup(71179) -> {"Lo","L"}; +lookup(71180) -> {"Lo","L"}; +lookup(71181) -> {"Lo","L"}; +lookup(71182) -> {"Lo","L"}; +lookup(71183) -> {"Lo","L"}; +lookup(71184) -> {"Lo","L"}; +lookup(71185) -> {"Lo","L"}; +lookup(71186) -> {"Lo","L"}; +lookup(71187) -> {"Lo","L"}; +lookup(71188) -> {"Lo","L"}; +lookup(71189) -> {"Lo","L"}; +lookup(71190) -> {"Lo","L"}; +lookup(71191) -> {"Lo","L"}; +lookup(71192) -> {"Lo","L"}; +lookup(71193) -> {"Lo","L"}; +lookup(71194) -> {"Lo","L"}; +lookup(71195) -> {"Lo","L"}; +lookup(71196) -> {"Lo","L"}; +lookup(71197) -> {"Lo","L"}; +lookup(71198) -> {"Lo","L"}; +lookup(71199) -> {"Lo","L"}; +lookup(71200) -> {"Lo","L"}; +lookup(71201) -> {"Lo","L"}; +lookup(71202) -> {"Lo","L"}; +lookup(71203) -> {"Lo","L"}; +lookup(71204) -> {"Lo","L"}; +lookup(71205) -> {"Lo","L"}; +lookup(71206) -> {"Lo","L"}; +lookup(71207) -> {"Lo","L"}; +lookup(71208) -> {"Lo","L"}; +lookup(71209) -> {"Lo","L"}; +lookup(71210) -> {"Lo","L"}; +lookup(71211) -> {"Lo","L"}; +lookup(71212) -> {"Lo","L"}; +lookup(71213) -> {"Lo","L"}; +lookup(71214) -> {"Lo","L"}; +lookup(71215) -> {"Lo","L"}; +lookup(71216) -> {"Mc","L"}; +lookup(71217) -> {"Mc","L"}; +lookup(71218) -> {"Mc","L"}; +lookup(71219) -> {"Mn","NSM"}; +lookup(71220) -> {"Mn","NSM"}; +lookup(71221) -> {"Mn","NSM"}; +lookup(71222) -> {"Mn","NSM"}; +lookup(71223) -> {"Mn","NSM"}; +lookup(71224) -> {"Mn","NSM"}; +lookup(71225) -> {"Mn","NSM"}; +lookup(71226) -> {"Mn","NSM"}; +lookup(71227) -> {"Mc","L"}; +lookup(71228) -> {"Mc","L"}; +lookup(71229) -> {"Mn","NSM"}; +lookup(71230) -> {"Mc","L"}; +lookup(71231) -> {"Mn","NSM"}; +lookup(71232) -> {"Mn","NSM"}; +lookup(71233) -> {"Po","L"}; +lookup(71234) -> {"Po","L"}; +lookup(71235) -> {"Po","L"}; +lookup(71236) -> {"Lo","L"}; +lookup(71248) -> {"Nd","L"}; +lookup(71249) -> {"Nd","L"}; +lookup(71250) -> {"Nd","L"}; +lookup(71251) -> {"Nd","L"}; +lookup(71252) -> {"Nd","L"}; +lookup(71253) -> {"Nd","L"}; +lookup(71254) -> {"Nd","L"}; +lookup(71255) -> {"Nd","L"}; +lookup(71256) -> {"Nd","L"}; +lookup(71257) -> {"Nd","L"}; +lookup(71264) -> {"Po","ON"}; +lookup(71265) -> {"Po","ON"}; +lookup(71266) -> {"Po","ON"}; +lookup(71267) -> {"Po","ON"}; +lookup(71268) -> {"Po","ON"}; +lookup(71269) -> {"Po","ON"}; +lookup(71270) -> {"Po","ON"}; +lookup(71271) -> {"Po","ON"}; +lookup(71272) -> {"Po","ON"}; +lookup(71273) -> {"Po","ON"}; +lookup(71274) -> {"Po","ON"}; +lookup(71275) -> {"Po","ON"}; +lookup(71276) -> {"Po","ON"}; +lookup(71296) -> {"Lo","L"}; +lookup(71297) -> {"Lo","L"}; +lookup(71298) -> {"Lo","L"}; +lookup(71299) -> {"Lo","L"}; +lookup(71300) -> {"Lo","L"}; +lookup(71301) -> {"Lo","L"}; +lookup(71302) -> {"Lo","L"}; +lookup(71303) -> {"Lo","L"}; +lookup(71304) -> {"Lo","L"}; +lookup(71305) -> {"Lo","L"}; +lookup(71306) -> {"Lo","L"}; +lookup(71307) -> {"Lo","L"}; +lookup(71308) -> {"Lo","L"}; +lookup(71309) -> {"Lo","L"}; +lookup(71310) -> {"Lo","L"}; +lookup(71311) -> {"Lo","L"}; +lookup(71312) -> {"Lo","L"}; +lookup(71313) -> {"Lo","L"}; +lookup(71314) -> {"Lo","L"}; +lookup(71315) -> {"Lo","L"}; +lookup(71316) -> {"Lo","L"}; +lookup(71317) -> {"Lo","L"}; +lookup(71318) -> {"Lo","L"}; +lookup(71319) -> {"Lo","L"}; +lookup(71320) -> {"Lo","L"}; +lookup(71321) -> {"Lo","L"}; +lookup(71322) -> {"Lo","L"}; +lookup(71323) -> {"Lo","L"}; +lookup(71324) -> {"Lo","L"}; +lookup(71325) -> {"Lo","L"}; +lookup(71326) -> {"Lo","L"}; +lookup(71327) -> {"Lo","L"}; +lookup(71328) -> {"Lo","L"}; +lookup(71329) -> {"Lo","L"}; +lookup(71330) -> {"Lo","L"}; +lookup(71331) -> {"Lo","L"}; +lookup(71332) -> {"Lo","L"}; +lookup(71333) -> {"Lo","L"}; +lookup(71334) -> {"Lo","L"}; +lookup(71335) -> {"Lo","L"}; +lookup(71336) -> {"Lo","L"}; +lookup(71337) -> {"Lo","L"}; +lookup(71338) -> {"Lo","L"}; +lookup(71339) -> {"Mn","NSM"}; +lookup(71340) -> {"Mc","L"}; +lookup(71341) -> {"Mn","NSM"}; +lookup(71342) -> {"Mc","L"}; +lookup(71343) -> {"Mc","L"}; +lookup(71344) -> {"Mn","NSM"}; +lookup(71345) -> {"Mn","NSM"}; +lookup(71346) -> {"Mn","NSM"}; +lookup(71347) -> {"Mn","NSM"}; +lookup(71348) -> {"Mn","NSM"}; +lookup(71349) -> {"Mn","NSM"}; +lookup(71350) -> {"Mc","L"}; +lookup(71351) -> {"Mn","NSM"}; +lookup(71352) -> {"Lo","L"}; +lookup(71360) -> {"Nd","L"}; +lookup(71361) -> {"Nd","L"}; +lookup(71362) -> {"Nd","L"}; +lookup(71363) -> {"Nd","L"}; +lookup(71364) -> {"Nd","L"}; +lookup(71365) -> {"Nd","L"}; +lookup(71366) -> {"Nd","L"}; +lookup(71367) -> {"Nd","L"}; +lookup(71368) -> {"Nd","L"}; +lookup(71369) -> {"Nd","L"}; +lookup(71424) -> {"Lo","L"}; +lookup(71425) -> {"Lo","L"}; +lookup(71426) -> {"Lo","L"}; +lookup(71427) -> {"Lo","L"}; +lookup(71428) -> {"Lo","L"}; +lookup(71429) -> {"Lo","L"}; +lookup(71430) -> {"Lo","L"}; +lookup(71431) -> {"Lo","L"}; +lookup(71432) -> {"Lo","L"}; +lookup(71433) -> {"Lo","L"}; +lookup(71434) -> {"Lo","L"}; +lookup(71435) -> {"Lo","L"}; +lookup(71436) -> {"Lo","L"}; +lookup(71437) -> {"Lo","L"}; +lookup(71438) -> {"Lo","L"}; +lookup(71439) -> {"Lo","L"}; +lookup(71440) -> {"Lo","L"}; +lookup(71441) -> {"Lo","L"}; +lookup(71442) -> {"Lo","L"}; +lookup(71443) -> {"Lo","L"}; +lookup(71444) -> {"Lo","L"}; +lookup(71445) -> {"Lo","L"}; +lookup(71446) -> {"Lo","L"}; +lookup(71447) -> {"Lo","L"}; +lookup(71448) -> {"Lo","L"}; +lookup(71449) -> {"Lo","L"}; +lookup(71450) -> {"Lo","L"}; +lookup(71453) -> {"Mn","NSM"}; +lookup(71454) -> {"Mn","NSM"}; +lookup(71455) -> {"Mn","NSM"}; +lookup(71456) -> {"Mc","L"}; +lookup(71457) -> {"Mc","L"}; +lookup(71458) -> {"Mn","NSM"}; +lookup(71459) -> {"Mn","NSM"}; +lookup(71460) -> {"Mn","NSM"}; +lookup(71461) -> {"Mn","NSM"}; +lookup(71462) -> {"Mc","L"}; +lookup(71463) -> {"Mn","NSM"}; +lookup(71464) -> {"Mn","NSM"}; +lookup(71465) -> {"Mn","NSM"}; +lookup(71466) -> {"Mn","NSM"}; +lookup(71467) -> {"Mn","NSM"}; +lookup(71472) -> {"Nd","L"}; +lookup(71473) -> {"Nd","L"}; +lookup(71474) -> {"Nd","L"}; +lookup(71475) -> {"Nd","L"}; +lookup(71476) -> {"Nd","L"}; +lookup(71477) -> {"Nd","L"}; +lookup(71478) -> {"Nd","L"}; +lookup(71479) -> {"Nd","L"}; +lookup(71480) -> {"Nd","L"}; +lookup(71481) -> {"Nd","L"}; +lookup(71482) -> {"No","L"}; +lookup(71483) -> {"No","L"}; +lookup(71484) -> {"Po","L"}; +lookup(71485) -> {"Po","L"}; +lookup(71486) -> {"Po","L"}; +lookup(71487) -> {"So","L"}; +lookup(71680) -> {"Lo","L"}; +lookup(71681) -> {"Lo","L"}; +lookup(71682) -> {"Lo","L"}; +lookup(71683) -> {"Lo","L"}; +lookup(71684) -> {"Lo","L"}; +lookup(71685) -> {"Lo","L"}; +lookup(71686) -> {"Lo","L"}; +lookup(71687) -> {"Lo","L"}; +lookup(71688) -> {"Lo","L"}; +lookup(71689) -> {"Lo","L"}; +lookup(71690) -> {"Lo","L"}; +lookup(71691) -> {"Lo","L"}; +lookup(71692) -> {"Lo","L"}; +lookup(71693) -> {"Lo","L"}; +lookup(71694) -> {"Lo","L"}; +lookup(71695) -> {"Lo","L"}; +lookup(71696) -> {"Lo","L"}; +lookup(71697) -> {"Lo","L"}; +lookup(71698) -> {"Lo","L"}; +lookup(71699) -> {"Lo","L"}; +lookup(71700) -> {"Lo","L"}; +lookup(71701) -> {"Lo","L"}; +lookup(71702) -> {"Lo","L"}; +lookup(71703) -> {"Lo","L"}; +lookup(71704) -> {"Lo","L"}; +lookup(71705) -> {"Lo","L"}; +lookup(71706) -> {"Lo","L"}; +lookup(71707) -> {"Lo","L"}; +lookup(71708) -> {"Lo","L"}; +lookup(71709) -> {"Lo","L"}; +lookup(71710) -> {"Lo","L"}; +lookup(71711) -> {"Lo","L"}; +lookup(71712) -> {"Lo","L"}; +lookup(71713) -> {"Lo","L"}; +lookup(71714) -> {"Lo","L"}; +lookup(71715) -> {"Lo","L"}; +lookup(71716) -> {"Lo","L"}; +lookup(71717) -> {"Lo","L"}; +lookup(71718) -> {"Lo","L"}; +lookup(71719) -> {"Lo","L"}; +lookup(71720) -> {"Lo","L"}; +lookup(71721) -> {"Lo","L"}; +lookup(71722) -> {"Lo","L"}; +lookup(71723) -> {"Lo","L"}; +lookup(71724) -> {"Mc","L"}; +lookup(71725) -> {"Mc","L"}; +lookup(71726) -> {"Mc","L"}; +lookup(71727) -> {"Mn","NSM"}; +lookup(71728) -> {"Mn","NSM"}; +lookup(71729) -> {"Mn","NSM"}; +lookup(71730) -> {"Mn","NSM"}; +lookup(71731) -> {"Mn","NSM"}; +lookup(71732) -> {"Mn","NSM"}; +lookup(71733) -> {"Mn","NSM"}; +lookup(71734) -> {"Mn","NSM"}; +lookup(71735) -> {"Mn","NSM"}; +lookup(71736) -> {"Mc","L"}; +lookup(71737) -> {"Mn","NSM"}; +lookup(71738) -> {"Mn","NSM"}; +lookup(71739) -> {"Po","L"}; +lookup(71840) -> {"Lu","L"}; +lookup(71841) -> {"Lu","L"}; +lookup(71842) -> {"Lu","L"}; +lookup(71843) -> {"Lu","L"}; +lookup(71844) -> {"Lu","L"}; +lookup(71845) -> {"Lu","L"}; +lookup(71846) -> {"Lu","L"}; +lookup(71847) -> {"Lu","L"}; +lookup(71848) -> {"Lu","L"}; +lookup(71849) -> {"Lu","L"}; +lookup(71850) -> {"Lu","L"}; +lookup(71851) -> {"Lu","L"}; +lookup(71852) -> {"Lu","L"}; +lookup(71853) -> {"Lu","L"}; +lookup(71854) -> {"Lu","L"}; +lookup(71855) -> {"Lu","L"}; +lookup(71856) -> {"Lu","L"}; +lookup(71857) -> {"Lu","L"}; +lookup(71858) -> {"Lu","L"}; +lookup(71859) -> {"Lu","L"}; +lookup(71860) -> {"Lu","L"}; +lookup(71861) -> {"Lu","L"}; +lookup(71862) -> {"Lu","L"}; +lookup(71863) -> {"Lu","L"}; +lookup(71864) -> {"Lu","L"}; +lookup(71865) -> {"Lu","L"}; +lookup(71866) -> {"Lu","L"}; +lookup(71867) -> {"Lu","L"}; +lookup(71868) -> {"Lu","L"}; +lookup(71869) -> {"Lu","L"}; +lookup(71870) -> {"Lu","L"}; +lookup(71871) -> {"Lu","L"}; +lookup(71872) -> {"Ll","L"}; +lookup(71873) -> {"Ll","L"}; +lookup(71874) -> {"Ll","L"}; +lookup(71875) -> {"Ll","L"}; +lookup(71876) -> {"Ll","L"}; +lookup(71877) -> {"Ll","L"}; +lookup(71878) -> {"Ll","L"}; +lookup(71879) -> {"Ll","L"}; +lookup(71880) -> {"Ll","L"}; +lookup(71881) -> {"Ll","L"}; +lookup(71882) -> {"Ll","L"}; +lookup(71883) -> {"Ll","L"}; +lookup(71884) -> {"Ll","L"}; +lookup(71885) -> {"Ll","L"}; +lookup(71886) -> {"Ll","L"}; +lookup(71887) -> {"Ll","L"}; +lookup(71888) -> {"Ll","L"}; +lookup(71889) -> {"Ll","L"}; +lookup(71890) -> {"Ll","L"}; +lookup(71891) -> {"Ll","L"}; +lookup(71892) -> {"Ll","L"}; +lookup(71893) -> {"Ll","L"}; +lookup(71894) -> {"Ll","L"}; +lookup(71895) -> {"Ll","L"}; +lookup(71896) -> {"Ll","L"}; +lookup(71897) -> {"Ll","L"}; +lookup(71898) -> {"Ll","L"}; +lookup(71899) -> {"Ll","L"}; +lookup(71900) -> {"Ll","L"}; +lookup(71901) -> {"Ll","L"}; +lookup(71902) -> {"Ll","L"}; +lookup(71903) -> {"Ll","L"}; +lookup(71904) -> {"Nd","L"}; +lookup(71905) -> {"Nd","L"}; +lookup(71906) -> {"Nd","L"}; +lookup(71907) -> {"Nd","L"}; +lookup(71908) -> {"Nd","L"}; +lookup(71909) -> {"Nd","L"}; +lookup(71910) -> {"Nd","L"}; +lookup(71911) -> {"Nd","L"}; +lookup(71912) -> {"Nd","L"}; +lookup(71913) -> {"Nd","L"}; +lookup(71914) -> {"No","L"}; +lookup(71915) -> {"No","L"}; +lookup(71916) -> {"No","L"}; +lookup(71917) -> {"No","L"}; +lookup(71918) -> {"No","L"}; +lookup(71919) -> {"No","L"}; +lookup(71920) -> {"No","L"}; +lookup(71921) -> {"No","L"}; +lookup(71922) -> {"No","L"}; +lookup(71935) -> {"Lo","L"}; +lookup(71936) -> {"Lo","L"}; +lookup(71937) -> {"Lo","L"}; +lookup(71938) -> {"Lo","L"}; +lookup(71939) -> {"Lo","L"}; +lookup(71940) -> {"Lo","L"}; +lookup(71941) -> {"Lo","L"}; +lookup(71942) -> {"Lo","L"}; +lookup(71945) -> {"Lo","L"}; +lookup(71948) -> {"Lo","L"}; +lookup(71949) -> {"Lo","L"}; +lookup(71950) -> {"Lo","L"}; +lookup(71951) -> {"Lo","L"}; +lookup(71952) -> {"Lo","L"}; +lookup(71953) -> {"Lo","L"}; +lookup(71954) -> {"Lo","L"}; +lookup(71955) -> {"Lo","L"}; +lookup(71957) -> {"Lo","L"}; +lookup(71958) -> {"Lo","L"}; +lookup(71960) -> {"Lo","L"}; +lookup(71961) -> {"Lo","L"}; +lookup(71962) -> {"Lo","L"}; +lookup(71963) -> {"Lo","L"}; +lookup(71964) -> {"Lo","L"}; +lookup(71965) -> {"Lo","L"}; +lookup(71966) -> {"Lo","L"}; +lookup(71967) -> {"Lo","L"}; +lookup(71968) -> {"Lo","L"}; +lookup(71969) -> {"Lo","L"}; +lookup(71970) -> {"Lo","L"}; +lookup(71971) -> {"Lo","L"}; +lookup(71972) -> {"Lo","L"}; +lookup(71973) -> {"Lo","L"}; +lookup(71974) -> {"Lo","L"}; +lookup(71975) -> {"Lo","L"}; +lookup(71976) -> {"Lo","L"}; +lookup(71977) -> {"Lo","L"}; +lookup(71978) -> {"Lo","L"}; +lookup(71979) -> {"Lo","L"}; +lookup(71980) -> {"Lo","L"}; +lookup(71981) -> {"Lo","L"}; +lookup(71982) -> {"Lo","L"}; +lookup(71983) -> {"Lo","L"}; +lookup(71984) -> {"Mc","L"}; +lookup(71985) -> {"Mc","L"}; +lookup(71986) -> {"Mc","L"}; +lookup(71987) -> {"Mc","L"}; +lookup(71988) -> {"Mc","L"}; +lookup(71989) -> {"Mc","L"}; +lookup(71991) -> {"Mc","L"}; +lookup(71992) -> {"Mc","L"}; +lookup(71995) -> {"Mn","NSM"}; +lookup(71996) -> {"Mn","NSM"}; +lookup(71997) -> {"Mc","L"}; +lookup(71998) -> {"Mn","NSM"}; +lookup(71999) -> {"Lo","L"}; +lookup(72000) -> {"Mc","L"}; +lookup(72001) -> {"Lo","L"}; +lookup(72002) -> {"Mc","L"}; +lookup(72003) -> {"Mn","NSM"}; +lookup(72004) -> {"Po","L"}; +lookup(72005) -> {"Po","L"}; +lookup(72006) -> {"Po","L"}; +lookup(72016) -> {"Nd","L"}; +lookup(72017) -> {"Nd","L"}; +lookup(72018) -> {"Nd","L"}; +lookup(72019) -> {"Nd","L"}; +lookup(72020) -> {"Nd","L"}; +lookup(72021) -> {"Nd","L"}; +lookup(72022) -> {"Nd","L"}; +lookup(72023) -> {"Nd","L"}; +lookup(72024) -> {"Nd","L"}; +lookup(72025) -> {"Nd","L"}; +lookup(72096) -> {"Lo","L"}; +lookup(72097) -> {"Lo","L"}; +lookup(72098) -> {"Lo","L"}; +lookup(72099) -> {"Lo","L"}; +lookup(72100) -> {"Lo","L"}; +lookup(72101) -> {"Lo","L"}; +lookup(72102) -> {"Lo","L"}; +lookup(72103) -> {"Lo","L"}; +lookup(72106) -> {"Lo","L"}; +lookup(72107) -> {"Lo","L"}; +lookup(72108) -> {"Lo","L"}; +lookup(72109) -> {"Lo","L"}; +lookup(72110) -> {"Lo","L"}; +lookup(72111) -> {"Lo","L"}; +lookup(72112) -> {"Lo","L"}; +lookup(72113) -> {"Lo","L"}; +lookup(72114) -> {"Lo","L"}; +lookup(72115) -> {"Lo","L"}; +lookup(72116) -> {"Lo","L"}; +lookup(72117) -> {"Lo","L"}; +lookup(72118) -> {"Lo","L"}; +lookup(72119) -> {"Lo","L"}; +lookup(72120) -> {"Lo","L"}; +lookup(72121) -> {"Lo","L"}; +lookup(72122) -> {"Lo","L"}; +lookup(72123) -> {"Lo","L"}; +lookup(72124) -> {"Lo","L"}; +lookup(72125) -> {"Lo","L"}; +lookup(72126) -> {"Lo","L"}; +lookup(72127) -> {"Lo","L"}; +lookup(72128) -> {"Lo","L"}; +lookup(72129) -> {"Lo","L"}; +lookup(72130) -> {"Lo","L"}; +lookup(72131) -> {"Lo","L"}; +lookup(72132) -> {"Lo","L"}; +lookup(72133) -> {"Lo","L"}; +lookup(72134) -> {"Lo","L"}; +lookup(72135) -> {"Lo","L"}; +lookup(72136) -> {"Lo","L"}; +lookup(72137) -> {"Lo","L"}; +lookup(72138) -> {"Lo","L"}; +lookup(72139) -> {"Lo","L"}; +lookup(72140) -> {"Lo","L"}; +lookup(72141) -> {"Lo","L"}; +lookup(72142) -> {"Lo","L"}; +lookup(72143) -> {"Lo","L"}; +lookup(72144) -> {"Lo","L"}; +lookup(72145) -> {"Mc","L"}; +lookup(72146) -> {"Mc","L"}; +lookup(72147) -> {"Mc","L"}; +lookup(72148) -> {"Mn","NSM"}; +lookup(72149) -> {"Mn","NSM"}; +lookup(72150) -> {"Mn","NSM"}; +lookup(72151) -> {"Mn","NSM"}; +lookup(72154) -> {"Mn","NSM"}; +lookup(72155) -> {"Mn","NSM"}; +lookup(72156) -> {"Mc","L"}; +lookup(72157) -> {"Mc","L"}; +lookup(72158) -> {"Mc","L"}; +lookup(72159) -> {"Mc","L"}; +lookup(72160) -> {"Mn","NSM"}; +lookup(72161) -> {"Lo","L"}; +lookup(72162) -> {"Po","L"}; +lookup(72163) -> {"Lo","L"}; +lookup(72164) -> {"Mc","L"}; +lookup(72192) -> {"Lo","L"}; +lookup(72193) -> {"Mn","NSM"}; +lookup(72194) -> {"Mn","NSM"}; +lookup(72195) -> {"Mn","NSM"}; +lookup(72196) -> {"Mn","NSM"}; +lookup(72197) -> {"Mn","NSM"}; +lookup(72198) -> {"Mn","NSM"}; +lookup(72199) -> {"Mn","L"}; +lookup(72200) -> {"Mn","L"}; +lookup(72201) -> {"Mn","NSM"}; +lookup(72202) -> {"Mn","NSM"}; +lookup(72203) -> {"Lo","L"}; +lookup(72204) -> {"Lo","L"}; +lookup(72205) -> {"Lo","L"}; +lookup(72206) -> {"Lo","L"}; +lookup(72207) -> {"Lo","L"}; +lookup(72208) -> {"Lo","L"}; +lookup(72209) -> {"Lo","L"}; +lookup(72210) -> {"Lo","L"}; +lookup(72211) -> {"Lo","L"}; +lookup(72212) -> {"Lo","L"}; +lookup(72213) -> {"Lo","L"}; +lookup(72214) -> {"Lo","L"}; +lookup(72215) -> {"Lo","L"}; +lookup(72216) -> {"Lo","L"}; +lookup(72217) -> {"Lo","L"}; +lookup(72218) -> {"Lo","L"}; +lookup(72219) -> {"Lo","L"}; +lookup(72220) -> {"Lo","L"}; +lookup(72221) -> {"Lo","L"}; +lookup(72222) -> {"Lo","L"}; +lookup(72223) -> {"Lo","L"}; +lookup(72224) -> {"Lo","L"}; +lookup(72225) -> {"Lo","L"}; +lookup(72226) -> {"Lo","L"}; +lookup(72227) -> {"Lo","L"}; +lookup(72228) -> {"Lo","L"}; +lookup(72229) -> {"Lo","L"}; +lookup(72230) -> {"Lo","L"}; +lookup(72231) -> {"Lo","L"}; +lookup(72232) -> {"Lo","L"}; +lookup(72233) -> {"Lo","L"}; +lookup(72234) -> {"Lo","L"}; +lookup(72235) -> {"Lo","L"}; +lookup(72236) -> {"Lo","L"}; +lookup(72237) -> {"Lo","L"}; +lookup(72238) -> {"Lo","L"}; +lookup(72239) -> {"Lo","L"}; +lookup(72240) -> {"Lo","L"}; +lookup(72241) -> {"Lo","L"}; +lookup(72242) -> {"Lo","L"}; +lookup(72243) -> {"Mn","NSM"}; +lookup(72244) -> {"Mn","NSM"}; +lookup(72245) -> {"Mn","NSM"}; +lookup(72246) -> {"Mn","NSM"}; +lookup(72247) -> {"Mn","NSM"}; +lookup(72248) -> {"Mn","NSM"}; +lookup(72249) -> {"Mc","L"}; +lookup(72250) -> {"Lo","L"}; +lookup(72251) -> {"Mn","NSM"}; +lookup(72252) -> {"Mn","NSM"}; +lookup(72253) -> {"Mn","NSM"}; +lookup(72254) -> {"Mn","NSM"}; +lookup(72255) -> {"Po","L"}; +lookup(72256) -> {"Po","L"}; +lookup(72257) -> {"Po","L"}; +lookup(72258) -> {"Po","L"}; +lookup(72259) -> {"Po","L"}; +lookup(72260) -> {"Po","L"}; +lookup(72261) -> {"Po","L"}; +lookup(72262) -> {"Po","L"}; +lookup(72263) -> {"Mn","NSM"}; +lookup(72272) -> {"Lo","L"}; +lookup(72273) -> {"Mn","NSM"}; +lookup(72274) -> {"Mn","NSM"}; +lookup(72275) -> {"Mn","NSM"}; +lookup(72276) -> {"Mn","NSM"}; +lookup(72277) -> {"Mn","NSM"}; +lookup(72278) -> {"Mn","NSM"}; +lookup(72279) -> {"Mc","L"}; +lookup(72280) -> {"Mc","L"}; +lookup(72281) -> {"Mn","NSM"}; +lookup(72282) -> {"Mn","NSM"}; +lookup(72283) -> {"Mn","NSM"}; +lookup(72284) -> {"Lo","L"}; +lookup(72285) -> {"Lo","L"}; +lookup(72286) -> {"Lo","L"}; +lookup(72287) -> {"Lo","L"}; +lookup(72288) -> {"Lo","L"}; +lookup(72289) -> {"Lo","L"}; +lookup(72290) -> {"Lo","L"}; +lookup(72291) -> {"Lo","L"}; +lookup(72292) -> {"Lo","L"}; +lookup(72293) -> {"Lo","L"}; +lookup(72294) -> {"Lo","L"}; +lookup(72295) -> {"Lo","L"}; +lookup(72296) -> {"Lo","L"}; +lookup(72297) -> {"Lo","L"}; +lookup(72298) -> {"Lo","L"}; +lookup(72299) -> {"Lo","L"}; +lookup(72300) -> {"Lo","L"}; +lookup(72301) -> {"Lo","L"}; +lookup(72302) -> {"Lo","L"}; +lookup(72303) -> {"Lo","L"}; +lookup(72304) -> {"Lo","L"}; +lookup(72305) -> {"Lo","L"}; +lookup(72306) -> {"Lo","L"}; +lookup(72307) -> {"Lo","L"}; +lookup(72308) -> {"Lo","L"}; +lookup(72309) -> {"Lo","L"}; +lookup(72310) -> {"Lo","L"}; +lookup(72311) -> {"Lo","L"}; +lookup(72312) -> {"Lo","L"}; +lookup(72313) -> {"Lo","L"}; +lookup(72314) -> {"Lo","L"}; +lookup(72315) -> {"Lo","L"}; +lookup(72316) -> {"Lo","L"}; +lookup(72317) -> {"Lo","L"}; +lookup(72318) -> {"Lo","L"}; +lookup(72319) -> {"Lo","L"}; +lookup(72320) -> {"Lo","L"}; +lookup(72321) -> {"Lo","L"}; +lookup(72322) -> {"Lo","L"}; +lookup(72323) -> {"Lo","L"}; +lookup(72324) -> {"Lo","L"}; +lookup(72325) -> {"Lo","L"}; +lookup(72326) -> {"Lo","L"}; +lookup(72327) -> {"Lo","L"}; +lookup(72328) -> {"Lo","L"}; +lookup(72329) -> {"Lo","L"}; +lookup(72330) -> {"Mn","NSM"}; +lookup(72331) -> {"Mn","NSM"}; +lookup(72332) -> {"Mn","NSM"}; +lookup(72333) -> {"Mn","NSM"}; +lookup(72334) -> {"Mn","NSM"}; +lookup(72335) -> {"Mn","NSM"}; +lookup(72336) -> {"Mn","NSM"}; +lookup(72337) -> {"Mn","NSM"}; +lookup(72338) -> {"Mn","NSM"}; +lookup(72339) -> {"Mn","NSM"}; +lookup(72340) -> {"Mn","NSM"}; +lookup(72341) -> {"Mn","NSM"}; +lookup(72342) -> {"Mn","NSM"}; +lookup(72343) -> {"Mc","L"}; +lookup(72344) -> {"Mn","NSM"}; +lookup(72345) -> {"Mn","NSM"}; +lookup(72346) -> {"Po","L"}; +lookup(72347) -> {"Po","L"}; +lookup(72348) -> {"Po","L"}; +lookup(72349) -> {"Lo","L"}; +lookup(72350) -> {"Po","L"}; +lookup(72351) -> {"Po","L"}; +lookup(72352) -> {"Po","L"}; +lookup(72353) -> {"Po","L"}; +lookup(72354) -> {"Po","L"}; +lookup(72384) -> {"Lo","L"}; +lookup(72385) -> {"Lo","L"}; +lookup(72386) -> {"Lo","L"}; +lookup(72387) -> {"Lo","L"}; +lookup(72388) -> {"Lo","L"}; +lookup(72389) -> {"Lo","L"}; +lookup(72390) -> {"Lo","L"}; +lookup(72391) -> {"Lo","L"}; +lookup(72392) -> {"Lo","L"}; +lookup(72393) -> {"Lo","L"}; +lookup(72394) -> {"Lo","L"}; +lookup(72395) -> {"Lo","L"}; +lookup(72396) -> {"Lo","L"}; +lookup(72397) -> {"Lo","L"}; +lookup(72398) -> {"Lo","L"}; +lookup(72399) -> {"Lo","L"}; +lookup(72400) -> {"Lo","L"}; +lookup(72401) -> {"Lo","L"}; +lookup(72402) -> {"Lo","L"}; +lookup(72403) -> {"Lo","L"}; +lookup(72404) -> {"Lo","L"}; +lookup(72405) -> {"Lo","L"}; +lookup(72406) -> {"Lo","L"}; +lookup(72407) -> {"Lo","L"}; +lookup(72408) -> {"Lo","L"}; +lookup(72409) -> {"Lo","L"}; +lookup(72410) -> {"Lo","L"}; +lookup(72411) -> {"Lo","L"}; +lookup(72412) -> {"Lo","L"}; +lookup(72413) -> {"Lo","L"}; +lookup(72414) -> {"Lo","L"}; +lookup(72415) -> {"Lo","L"}; +lookup(72416) -> {"Lo","L"}; +lookup(72417) -> {"Lo","L"}; +lookup(72418) -> {"Lo","L"}; +lookup(72419) -> {"Lo","L"}; +lookup(72420) -> {"Lo","L"}; +lookup(72421) -> {"Lo","L"}; +lookup(72422) -> {"Lo","L"}; +lookup(72423) -> {"Lo","L"}; +lookup(72424) -> {"Lo","L"}; +lookup(72425) -> {"Lo","L"}; +lookup(72426) -> {"Lo","L"}; +lookup(72427) -> {"Lo","L"}; +lookup(72428) -> {"Lo","L"}; +lookup(72429) -> {"Lo","L"}; +lookup(72430) -> {"Lo","L"}; +lookup(72431) -> {"Lo","L"}; +lookup(72432) -> {"Lo","L"}; +lookup(72433) -> {"Lo","L"}; +lookup(72434) -> {"Lo","L"}; +lookup(72435) -> {"Lo","L"}; +lookup(72436) -> {"Lo","L"}; +lookup(72437) -> {"Lo","L"}; +lookup(72438) -> {"Lo","L"}; +lookup(72439) -> {"Lo","L"}; +lookup(72440) -> {"Lo","L"}; +lookup(72704) -> {"Lo","L"}; +lookup(72705) -> {"Lo","L"}; +lookup(72706) -> {"Lo","L"}; +lookup(72707) -> {"Lo","L"}; +lookup(72708) -> {"Lo","L"}; +lookup(72709) -> {"Lo","L"}; +lookup(72710) -> {"Lo","L"}; +lookup(72711) -> {"Lo","L"}; +lookup(72712) -> {"Lo","L"}; +lookup(72714) -> {"Lo","L"}; +lookup(72715) -> {"Lo","L"}; +lookup(72716) -> {"Lo","L"}; +lookup(72717) -> {"Lo","L"}; +lookup(72718) -> {"Lo","L"}; +lookup(72719) -> {"Lo","L"}; +lookup(72720) -> {"Lo","L"}; +lookup(72721) -> {"Lo","L"}; +lookup(72722) -> {"Lo","L"}; +lookup(72723) -> {"Lo","L"}; +lookup(72724) -> {"Lo","L"}; +lookup(72725) -> {"Lo","L"}; +lookup(72726) -> {"Lo","L"}; +lookup(72727) -> {"Lo","L"}; +lookup(72728) -> {"Lo","L"}; +lookup(72729) -> {"Lo","L"}; +lookup(72730) -> {"Lo","L"}; +lookup(72731) -> {"Lo","L"}; +lookup(72732) -> {"Lo","L"}; +lookup(72733) -> {"Lo","L"}; +lookup(72734) -> {"Lo","L"}; +lookup(72735) -> {"Lo","L"}; +lookup(72736) -> {"Lo","L"}; +lookup(72737) -> {"Lo","L"}; +lookup(72738) -> {"Lo","L"}; +lookup(72739) -> {"Lo","L"}; +lookup(72740) -> {"Lo","L"}; +lookup(72741) -> {"Lo","L"}; +lookup(72742) -> {"Lo","L"}; +lookup(72743) -> {"Lo","L"}; +lookup(72744) -> {"Lo","L"}; +lookup(72745) -> {"Lo","L"}; +lookup(72746) -> {"Lo","L"}; +lookup(72747) -> {"Lo","L"}; +lookup(72748) -> {"Lo","L"}; +lookup(72749) -> {"Lo","L"}; +lookup(72750) -> {"Lo","L"}; +lookup(72751) -> {"Mc","L"}; +lookup(72752) -> {"Mn","NSM"}; +lookup(72753) -> {"Mn","NSM"}; +lookup(72754) -> {"Mn","NSM"}; +lookup(72755) -> {"Mn","NSM"}; +lookup(72756) -> {"Mn","NSM"}; +lookup(72757) -> {"Mn","NSM"}; +lookup(72758) -> {"Mn","NSM"}; +lookup(72760) -> {"Mn","NSM"}; +lookup(72761) -> {"Mn","NSM"}; +lookup(72762) -> {"Mn","NSM"}; +lookup(72763) -> {"Mn","NSM"}; +lookup(72764) -> {"Mn","NSM"}; +lookup(72765) -> {"Mn","NSM"}; +lookup(72766) -> {"Mc","L"}; +lookup(72767) -> {"Mn","L"}; +lookup(72768) -> {"Lo","L"}; +lookup(72769) -> {"Po","L"}; +lookup(72770) -> {"Po","L"}; +lookup(72771) -> {"Po","L"}; +lookup(72772) -> {"Po","L"}; +lookup(72773) -> {"Po","L"}; +lookup(72784) -> {"Nd","L"}; +lookup(72785) -> {"Nd","L"}; +lookup(72786) -> {"Nd","L"}; +lookup(72787) -> {"Nd","L"}; +lookup(72788) -> {"Nd","L"}; +lookup(72789) -> {"Nd","L"}; +lookup(72790) -> {"Nd","L"}; +lookup(72791) -> {"Nd","L"}; +lookup(72792) -> {"Nd","L"}; +lookup(72793) -> {"Nd","L"}; +lookup(72794) -> {"No","L"}; +lookup(72795) -> {"No","L"}; +lookup(72796) -> {"No","L"}; +lookup(72797) -> {"No","L"}; +lookup(72798) -> {"No","L"}; +lookup(72799) -> {"No","L"}; +lookup(72800) -> {"No","L"}; +lookup(72801) -> {"No","L"}; +lookup(72802) -> {"No","L"}; +lookup(72803) -> {"No","L"}; +lookup(72804) -> {"No","L"}; +lookup(72805) -> {"No","L"}; +lookup(72806) -> {"No","L"}; +lookup(72807) -> {"No","L"}; +lookup(72808) -> {"No","L"}; +lookup(72809) -> {"No","L"}; +lookup(72810) -> {"No","L"}; +lookup(72811) -> {"No","L"}; +lookup(72812) -> {"No","L"}; +lookup(72816) -> {"Po","L"}; +lookup(72817) -> {"Po","L"}; +lookup(72818) -> {"Lo","L"}; +lookup(72819) -> {"Lo","L"}; +lookup(72820) -> {"Lo","L"}; +lookup(72821) -> {"Lo","L"}; +lookup(72822) -> {"Lo","L"}; +lookup(72823) -> {"Lo","L"}; +lookup(72824) -> {"Lo","L"}; +lookup(72825) -> {"Lo","L"}; +lookup(72826) -> {"Lo","L"}; +lookup(72827) -> {"Lo","L"}; +lookup(72828) -> {"Lo","L"}; +lookup(72829) -> {"Lo","L"}; +lookup(72830) -> {"Lo","L"}; +lookup(72831) -> {"Lo","L"}; +lookup(72832) -> {"Lo","L"}; +lookup(72833) -> {"Lo","L"}; +lookup(72834) -> {"Lo","L"}; +lookup(72835) -> {"Lo","L"}; +lookup(72836) -> {"Lo","L"}; +lookup(72837) -> {"Lo","L"}; +lookup(72838) -> {"Lo","L"}; +lookup(72839) -> {"Lo","L"}; +lookup(72840) -> {"Lo","L"}; +lookup(72841) -> {"Lo","L"}; +lookup(72842) -> {"Lo","L"}; +lookup(72843) -> {"Lo","L"}; +lookup(72844) -> {"Lo","L"}; +lookup(72845) -> {"Lo","L"}; +lookup(72846) -> {"Lo","L"}; +lookup(72847) -> {"Lo","L"}; +lookup(72850) -> {"Mn","NSM"}; +lookup(72851) -> {"Mn","NSM"}; +lookup(72852) -> {"Mn","NSM"}; +lookup(72853) -> {"Mn","NSM"}; +lookup(72854) -> {"Mn","NSM"}; +lookup(72855) -> {"Mn","NSM"}; +lookup(72856) -> {"Mn","NSM"}; +lookup(72857) -> {"Mn","NSM"}; +lookup(72858) -> {"Mn","NSM"}; +lookup(72859) -> {"Mn","NSM"}; +lookup(72860) -> {"Mn","NSM"}; +lookup(72861) -> {"Mn","NSM"}; +lookup(72862) -> {"Mn","NSM"}; +lookup(72863) -> {"Mn","NSM"}; +lookup(72864) -> {"Mn","NSM"}; +lookup(72865) -> {"Mn","NSM"}; +lookup(72866) -> {"Mn","NSM"}; +lookup(72867) -> {"Mn","NSM"}; +lookup(72868) -> {"Mn","NSM"}; +lookup(72869) -> {"Mn","NSM"}; +lookup(72870) -> {"Mn","NSM"}; +lookup(72871) -> {"Mn","NSM"}; +lookup(72873) -> {"Mc","L"}; +lookup(72874) -> {"Mn","NSM"}; +lookup(72875) -> {"Mn","NSM"}; +lookup(72876) -> {"Mn","NSM"}; +lookup(72877) -> {"Mn","NSM"}; +lookup(72878) -> {"Mn","NSM"}; +lookup(72879) -> {"Mn","NSM"}; +lookup(72880) -> {"Mn","NSM"}; +lookup(72881) -> {"Mc","L"}; +lookup(72882) -> {"Mn","NSM"}; +lookup(72883) -> {"Mn","NSM"}; +lookup(72884) -> {"Mc","L"}; +lookup(72885) -> {"Mn","NSM"}; +lookup(72886) -> {"Mn","NSM"}; +lookup(72960) -> {"Lo","L"}; +lookup(72961) -> {"Lo","L"}; +lookup(72962) -> {"Lo","L"}; +lookup(72963) -> {"Lo","L"}; +lookup(72964) -> {"Lo","L"}; +lookup(72965) -> {"Lo","L"}; +lookup(72966) -> {"Lo","L"}; +lookup(72968) -> {"Lo","L"}; +lookup(72969) -> {"Lo","L"}; +lookup(72971) -> {"Lo","L"}; +lookup(72972) -> {"Lo","L"}; +lookup(72973) -> {"Lo","L"}; +lookup(72974) -> {"Lo","L"}; +lookup(72975) -> {"Lo","L"}; +lookup(72976) -> {"Lo","L"}; +lookup(72977) -> {"Lo","L"}; +lookup(72978) -> {"Lo","L"}; +lookup(72979) -> {"Lo","L"}; +lookup(72980) -> {"Lo","L"}; +lookup(72981) -> {"Lo","L"}; +lookup(72982) -> {"Lo","L"}; +lookup(72983) -> {"Lo","L"}; +lookup(72984) -> {"Lo","L"}; +lookup(72985) -> {"Lo","L"}; +lookup(72986) -> {"Lo","L"}; +lookup(72987) -> {"Lo","L"}; +lookup(72988) -> {"Lo","L"}; +lookup(72989) -> {"Lo","L"}; +lookup(72990) -> {"Lo","L"}; +lookup(72991) -> {"Lo","L"}; +lookup(72992) -> {"Lo","L"}; +lookup(72993) -> {"Lo","L"}; +lookup(72994) -> {"Lo","L"}; +lookup(72995) -> {"Lo","L"}; +lookup(72996) -> {"Lo","L"}; +lookup(72997) -> {"Lo","L"}; +lookup(72998) -> {"Lo","L"}; +lookup(72999) -> {"Lo","L"}; +lookup(73000) -> {"Lo","L"}; +lookup(73001) -> {"Lo","L"}; +lookup(73002) -> {"Lo","L"}; +lookup(73003) -> {"Lo","L"}; +lookup(73004) -> {"Lo","L"}; +lookup(73005) -> {"Lo","L"}; +lookup(73006) -> {"Lo","L"}; +lookup(73007) -> {"Lo","L"}; +lookup(73008) -> {"Lo","L"}; +lookup(73009) -> {"Mn","NSM"}; +lookup(73010) -> {"Mn","NSM"}; +lookup(73011) -> {"Mn","NSM"}; +lookup(73012) -> {"Mn","NSM"}; +lookup(73013) -> {"Mn","NSM"}; +lookup(73014) -> {"Mn","NSM"}; +lookup(73018) -> {"Mn","NSM"}; +lookup(73020) -> {"Mn","NSM"}; +lookup(73021) -> {"Mn","NSM"}; +lookup(73023) -> {"Mn","NSM"}; +lookup(73024) -> {"Mn","NSM"}; +lookup(73025) -> {"Mn","NSM"}; +lookup(73026) -> {"Mn","NSM"}; +lookup(73027) -> {"Mn","NSM"}; +lookup(73028) -> {"Mn","NSM"}; +lookup(73029) -> {"Mn","NSM"}; +lookup(73030) -> {"Lo","L"}; +lookup(73031) -> {"Mn","NSM"}; +lookup(73040) -> {"Nd","L"}; +lookup(73041) -> {"Nd","L"}; +lookup(73042) -> {"Nd","L"}; +lookup(73043) -> {"Nd","L"}; +lookup(73044) -> {"Nd","L"}; +lookup(73045) -> {"Nd","L"}; +lookup(73046) -> {"Nd","L"}; +lookup(73047) -> {"Nd","L"}; +lookup(73048) -> {"Nd","L"}; +lookup(73049) -> {"Nd","L"}; +lookup(73056) -> {"Lo","L"}; +lookup(73057) -> {"Lo","L"}; +lookup(73058) -> {"Lo","L"}; +lookup(73059) -> {"Lo","L"}; +lookup(73060) -> {"Lo","L"}; +lookup(73061) -> {"Lo","L"}; +lookup(73063) -> {"Lo","L"}; +lookup(73064) -> {"Lo","L"}; +lookup(73066) -> {"Lo","L"}; +lookup(73067) -> {"Lo","L"}; +lookup(73068) -> {"Lo","L"}; +lookup(73069) -> {"Lo","L"}; +lookup(73070) -> {"Lo","L"}; +lookup(73071) -> {"Lo","L"}; +lookup(73072) -> {"Lo","L"}; +lookup(73073) -> {"Lo","L"}; +lookup(73074) -> {"Lo","L"}; +lookup(73075) -> {"Lo","L"}; +lookup(73076) -> {"Lo","L"}; +lookup(73077) -> {"Lo","L"}; +lookup(73078) -> {"Lo","L"}; +lookup(73079) -> {"Lo","L"}; +lookup(73080) -> {"Lo","L"}; +lookup(73081) -> {"Lo","L"}; +lookup(73082) -> {"Lo","L"}; +lookup(73083) -> {"Lo","L"}; +lookup(73084) -> {"Lo","L"}; +lookup(73085) -> {"Lo","L"}; +lookup(73086) -> {"Lo","L"}; +lookup(73087) -> {"Lo","L"}; +lookup(73088) -> {"Lo","L"}; +lookup(73089) -> {"Lo","L"}; +lookup(73090) -> {"Lo","L"}; +lookup(73091) -> {"Lo","L"}; +lookup(73092) -> {"Lo","L"}; +lookup(73093) -> {"Lo","L"}; +lookup(73094) -> {"Lo","L"}; +lookup(73095) -> {"Lo","L"}; +lookup(73096) -> {"Lo","L"}; +lookup(73097) -> {"Lo","L"}; +lookup(73098) -> {"Mc","L"}; +lookup(73099) -> {"Mc","L"}; +lookup(73100) -> {"Mc","L"}; +lookup(73101) -> {"Mc","L"}; +lookup(73102) -> {"Mc","L"}; +lookup(73104) -> {"Mn","NSM"}; +lookup(73105) -> {"Mn","NSM"}; +lookup(73107) -> {"Mc","L"}; +lookup(73108) -> {"Mc","L"}; +lookup(73109) -> {"Mn","NSM"}; +lookup(73110) -> {"Mc","L"}; +lookup(73111) -> {"Mn","NSM"}; +lookup(73112) -> {"Lo","L"}; +lookup(73120) -> {"Nd","L"}; +lookup(73121) -> {"Nd","L"}; +lookup(73122) -> {"Nd","L"}; +lookup(73123) -> {"Nd","L"}; +lookup(73124) -> {"Nd","L"}; +lookup(73125) -> {"Nd","L"}; +lookup(73126) -> {"Nd","L"}; +lookup(73127) -> {"Nd","L"}; +lookup(73128) -> {"Nd","L"}; +lookup(73129) -> {"Nd","L"}; +lookup(73440) -> {"Lo","L"}; +lookup(73441) -> {"Lo","L"}; +lookup(73442) -> {"Lo","L"}; +lookup(73443) -> {"Lo","L"}; +lookup(73444) -> {"Lo","L"}; +lookup(73445) -> {"Lo","L"}; +lookup(73446) -> {"Lo","L"}; +lookup(73447) -> {"Lo","L"}; +lookup(73448) -> {"Lo","L"}; +lookup(73449) -> {"Lo","L"}; +lookup(73450) -> {"Lo","L"}; +lookup(73451) -> {"Lo","L"}; +lookup(73452) -> {"Lo","L"}; +lookup(73453) -> {"Lo","L"}; +lookup(73454) -> {"Lo","L"}; +lookup(73455) -> {"Lo","L"}; +lookup(73456) -> {"Lo","L"}; +lookup(73457) -> {"Lo","L"}; +lookup(73458) -> {"Lo","L"}; +lookup(73459) -> {"Mn","NSM"}; +lookup(73460) -> {"Mn","NSM"}; +lookup(73461) -> {"Mc","L"}; +lookup(73462) -> {"Mc","L"}; +lookup(73463) -> {"Po","L"}; +lookup(73464) -> {"Po","L"}; +lookup(73648) -> {"Lo","L"}; +lookup(73664) -> {"No","L"}; +lookup(73665) -> {"No","L"}; +lookup(73666) -> {"No","L"}; +lookup(73667) -> {"No","L"}; +lookup(73668) -> {"No","L"}; +lookup(73669) -> {"No","L"}; +lookup(73670) -> {"No","L"}; +lookup(73671) -> {"No","L"}; +lookup(73672) -> {"No","L"}; +lookup(73673) -> {"No","L"}; +lookup(73674) -> {"No","L"}; +lookup(73675) -> {"No","L"}; +lookup(73676) -> {"No","L"}; +lookup(73677) -> {"No","L"}; +lookup(73678) -> {"No","L"}; +lookup(73679) -> {"No","L"}; +lookup(73680) -> {"No","L"}; +lookup(73681) -> {"No","L"}; +lookup(73682) -> {"No","L"}; +lookup(73683) -> {"No","L"}; +lookup(73684) -> {"No","L"}; +lookup(73685) -> {"So","ON"}; +lookup(73686) -> {"So","ON"}; +lookup(73687) -> {"So","ON"}; +lookup(73688) -> {"So","ON"}; +lookup(73689) -> {"So","ON"}; +lookup(73690) -> {"So","ON"}; +lookup(73691) -> {"So","ON"}; +lookup(73692) -> {"So","ON"}; +lookup(73693) -> {"Sc","ET"}; +lookup(73694) -> {"Sc","ET"}; +lookup(73695) -> {"Sc","ET"}; +lookup(73696) -> {"Sc","ET"}; +lookup(73697) -> {"So","ON"}; +lookup(73698) -> {"So","ON"}; +lookup(73699) -> {"So","ON"}; +lookup(73700) -> {"So","ON"}; +lookup(73701) -> {"So","ON"}; +lookup(73702) -> {"So","ON"}; +lookup(73703) -> {"So","ON"}; +lookup(73704) -> {"So","ON"}; +lookup(73705) -> {"So","ON"}; +lookup(73706) -> {"So","ON"}; +lookup(73707) -> {"So","ON"}; +lookup(73708) -> {"So","ON"}; +lookup(73709) -> {"So","ON"}; +lookup(73710) -> {"So","ON"}; +lookup(73711) -> {"So","ON"}; +lookup(73712) -> {"So","ON"}; +lookup(73713) -> {"So","ON"}; +lookup(73727) -> {"Po","L"}; +lookup(73728) -> {"Lo","L"}; +lookup(73729) -> {"Lo","L"}; +lookup(73730) -> {"Lo","L"}; +lookup(73731) -> {"Lo","L"}; +lookup(73732) -> {"Lo","L"}; +lookup(73733) -> {"Lo","L"}; +lookup(73734) -> {"Lo","L"}; +lookup(73735) -> {"Lo","L"}; +lookup(73736) -> {"Lo","L"}; +lookup(73737) -> {"Lo","L"}; +lookup(73738) -> {"Lo","L"}; +lookup(73739) -> {"Lo","L"}; +lookup(73740) -> {"Lo","L"}; +lookup(73741) -> {"Lo","L"}; +lookup(73742) -> {"Lo","L"}; +lookup(73743) -> {"Lo","L"}; +lookup(73744) -> {"Lo","L"}; +lookup(73745) -> {"Lo","L"}; +lookup(73746) -> {"Lo","L"}; +lookup(73747) -> {"Lo","L"}; +lookup(73748) -> {"Lo","L"}; +lookup(73749) -> {"Lo","L"}; +lookup(73750) -> {"Lo","L"}; +lookup(73751) -> {"Lo","L"}; +lookup(73752) -> {"Lo","L"}; +lookup(73753) -> {"Lo","L"}; +lookup(73754) -> {"Lo","L"}; +lookup(73755) -> {"Lo","L"}; +lookup(73756) -> {"Lo","L"}; +lookup(73757) -> {"Lo","L"}; +lookup(73758) -> {"Lo","L"}; +lookup(73759) -> {"Lo","L"}; +lookup(73760) -> {"Lo","L"}; +lookup(73761) -> {"Lo","L"}; +lookup(73762) -> {"Lo","L"}; +lookup(73763) -> {"Lo","L"}; +lookup(73764) -> {"Lo","L"}; +lookup(73765) -> {"Lo","L"}; +lookup(73766) -> {"Lo","L"}; +lookup(73767) -> {"Lo","L"}; +lookup(73768) -> {"Lo","L"}; +lookup(73769) -> {"Lo","L"}; +lookup(73770) -> {"Lo","L"}; +lookup(73771) -> {"Lo","L"}; +lookup(73772) -> {"Lo","L"}; +lookup(73773) -> {"Lo","L"}; +lookup(73774) -> {"Lo","L"}; +lookup(73775) -> {"Lo","L"}; +lookup(73776) -> {"Lo","L"}; +lookup(73777) -> {"Lo","L"}; +lookup(73778) -> {"Lo","L"}; +lookup(73779) -> {"Lo","L"}; +lookup(73780) -> {"Lo","L"}; +lookup(73781) -> {"Lo","L"}; +lookup(73782) -> {"Lo","L"}; +lookup(73783) -> {"Lo","L"}; +lookup(73784) -> {"Lo","L"}; +lookup(73785) -> {"Lo","L"}; +lookup(73786) -> {"Lo","L"}; +lookup(73787) -> {"Lo","L"}; +lookup(73788) -> {"Lo","L"}; +lookup(73789) -> {"Lo","L"}; +lookup(73790) -> {"Lo","L"}; +lookup(73791) -> {"Lo","L"}; +lookup(73792) -> {"Lo","L"}; +lookup(73793) -> {"Lo","L"}; +lookup(73794) -> {"Lo","L"}; +lookup(73795) -> {"Lo","L"}; +lookup(73796) -> {"Lo","L"}; +lookup(73797) -> {"Lo","L"}; +lookup(73798) -> {"Lo","L"}; +lookup(73799) -> {"Lo","L"}; +lookup(73800) -> {"Lo","L"}; +lookup(73801) -> {"Lo","L"}; +lookup(73802) -> {"Lo","L"}; +lookup(73803) -> {"Lo","L"}; +lookup(73804) -> {"Lo","L"}; +lookup(73805) -> {"Lo","L"}; +lookup(73806) -> {"Lo","L"}; +lookup(73807) -> {"Lo","L"}; +lookup(73808) -> {"Lo","L"}; +lookup(73809) -> {"Lo","L"}; +lookup(73810) -> {"Lo","L"}; +lookup(73811) -> {"Lo","L"}; +lookup(73812) -> {"Lo","L"}; +lookup(73813) -> {"Lo","L"}; +lookup(73814) -> {"Lo","L"}; +lookup(73815) -> {"Lo","L"}; +lookup(73816) -> {"Lo","L"}; +lookup(73817) -> {"Lo","L"}; +lookup(73818) -> {"Lo","L"}; +lookup(73819) -> {"Lo","L"}; +lookup(73820) -> {"Lo","L"}; +lookup(73821) -> {"Lo","L"}; +lookup(73822) -> {"Lo","L"}; +lookup(73823) -> {"Lo","L"}; +lookup(73824) -> {"Lo","L"}; +lookup(73825) -> {"Lo","L"}; +lookup(73826) -> {"Lo","L"}; +lookup(73827) -> {"Lo","L"}; +lookup(73828) -> {"Lo","L"}; +lookup(73829) -> {"Lo","L"}; +lookup(73830) -> {"Lo","L"}; +lookup(73831) -> {"Lo","L"}; +lookup(73832) -> {"Lo","L"}; +lookup(73833) -> {"Lo","L"}; +lookup(73834) -> {"Lo","L"}; +lookup(73835) -> {"Lo","L"}; +lookup(73836) -> {"Lo","L"}; +lookup(73837) -> {"Lo","L"}; +lookup(73838) -> {"Lo","L"}; +lookup(73839) -> {"Lo","L"}; +lookup(73840) -> {"Lo","L"}; +lookup(73841) -> {"Lo","L"}; +lookup(73842) -> {"Lo","L"}; +lookup(73843) -> {"Lo","L"}; +lookup(73844) -> {"Lo","L"}; +lookup(73845) -> {"Lo","L"}; +lookup(73846) -> {"Lo","L"}; +lookup(73847) -> {"Lo","L"}; +lookup(73848) -> {"Lo","L"}; +lookup(73849) -> {"Lo","L"}; +lookup(73850) -> {"Lo","L"}; +lookup(73851) -> {"Lo","L"}; +lookup(73852) -> {"Lo","L"}; +lookup(73853) -> {"Lo","L"}; +lookup(73854) -> {"Lo","L"}; +lookup(73855) -> {"Lo","L"}; +lookup(73856) -> {"Lo","L"}; +lookup(73857) -> {"Lo","L"}; +lookup(73858) -> {"Lo","L"}; +lookup(73859) -> {"Lo","L"}; +lookup(73860) -> {"Lo","L"}; +lookup(73861) -> {"Lo","L"}; +lookup(73862) -> {"Lo","L"}; +lookup(73863) -> {"Lo","L"}; +lookup(73864) -> {"Lo","L"}; +lookup(73865) -> {"Lo","L"}; +lookup(73866) -> {"Lo","L"}; +lookup(73867) -> {"Lo","L"}; +lookup(73868) -> {"Lo","L"}; +lookup(73869) -> {"Lo","L"}; +lookup(73870) -> {"Lo","L"}; +lookup(73871) -> {"Lo","L"}; +lookup(73872) -> {"Lo","L"}; +lookup(73873) -> {"Lo","L"}; +lookup(73874) -> {"Lo","L"}; +lookup(73875) -> {"Lo","L"}; +lookup(73876) -> {"Lo","L"}; +lookup(73877) -> {"Lo","L"}; +lookup(73878) -> {"Lo","L"}; +lookup(73879) -> {"Lo","L"}; +lookup(73880) -> {"Lo","L"}; +lookup(73881) -> {"Lo","L"}; +lookup(73882) -> {"Lo","L"}; +lookup(73883) -> {"Lo","L"}; +lookup(73884) -> {"Lo","L"}; +lookup(73885) -> {"Lo","L"}; +lookup(73886) -> {"Lo","L"}; +lookup(73887) -> {"Lo","L"}; +lookup(73888) -> {"Lo","L"}; +lookup(73889) -> {"Lo","L"}; +lookup(73890) -> {"Lo","L"}; +lookup(73891) -> {"Lo","L"}; +lookup(73892) -> {"Lo","L"}; +lookup(73893) -> {"Lo","L"}; +lookup(73894) -> {"Lo","L"}; +lookup(73895) -> {"Lo","L"}; +lookup(73896) -> {"Lo","L"}; +lookup(73897) -> {"Lo","L"}; +lookup(73898) -> {"Lo","L"}; +lookup(73899) -> {"Lo","L"}; +lookup(73900) -> {"Lo","L"}; +lookup(73901) -> {"Lo","L"}; +lookup(73902) -> {"Lo","L"}; +lookup(73903) -> {"Lo","L"}; +lookup(73904) -> {"Lo","L"}; +lookup(73905) -> {"Lo","L"}; +lookup(73906) -> {"Lo","L"}; +lookup(73907) -> {"Lo","L"}; +lookup(73908) -> {"Lo","L"}; +lookup(73909) -> {"Lo","L"}; +lookup(73910) -> {"Lo","L"}; +lookup(73911) -> {"Lo","L"}; +lookup(73912) -> {"Lo","L"}; +lookup(73913) -> {"Lo","L"}; +lookup(73914) -> {"Lo","L"}; +lookup(73915) -> {"Lo","L"}; +lookup(73916) -> {"Lo","L"}; +lookup(73917) -> {"Lo","L"}; +lookup(73918) -> {"Lo","L"}; +lookup(73919) -> {"Lo","L"}; +lookup(73920) -> {"Lo","L"}; +lookup(73921) -> {"Lo","L"}; +lookup(73922) -> {"Lo","L"}; +lookup(73923) -> {"Lo","L"}; +lookup(73924) -> {"Lo","L"}; +lookup(73925) -> {"Lo","L"}; +lookup(73926) -> {"Lo","L"}; +lookup(73927) -> {"Lo","L"}; +lookup(73928) -> {"Lo","L"}; +lookup(73929) -> {"Lo","L"}; +lookup(73930) -> {"Lo","L"}; +lookup(73931) -> {"Lo","L"}; +lookup(73932) -> {"Lo","L"}; +lookup(73933) -> {"Lo","L"}; +lookup(73934) -> {"Lo","L"}; +lookup(73935) -> {"Lo","L"}; +lookup(73936) -> {"Lo","L"}; +lookup(73937) -> {"Lo","L"}; +lookup(73938) -> {"Lo","L"}; +lookup(73939) -> {"Lo","L"}; +lookup(73940) -> {"Lo","L"}; +lookup(73941) -> {"Lo","L"}; +lookup(73942) -> {"Lo","L"}; +lookup(73943) -> {"Lo","L"}; +lookup(73944) -> {"Lo","L"}; +lookup(73945) -> {"Lo","L"}; +lookup(73946) -> {"Lo","L"}; +lookup(73947) -> {"Lo","L"}; +lookup(73948) -> {"Lo","L"}; +lookup(73949) -> {"Lo","L"}; +lookup(73950) -> {"Lo","L"}; +lookup(73951) -> {"Lo","L"}; +lookup(73952) -> {"Lo","L"}; +lookup(73953) -> {"Lo","L"}; +lookup(73954) -> {"Lo","L"}; +lookup(73955) -> {"Lo","L"}; +lookup(73956) -> {"Lo","L"}; +lookup(73957) -> {"Lo","L"}; +lookup(73958) -> {"Lo","L"}; +lookup(73959) -> {"Lo","L"}; +lookup(73960) -> {"Lo","L"}; +lookup(73961) -> {"Lo","L"}; +lookup(73962) -> {"Lo","L"}; +lookup(73963) -> {"Lo","L"}; +lookup(73964) -> {"Lo","L"}; +lookup(73965) -> {"Lo","L"}; +lookup(73966) -> {"Lo","L"}; +lookup(73967) -> {"Lo","L"}; +lookup(73968) -> {"Lo","L"}; +lookup(73969) -> {"Lo","L"}; +lookup(73970) -> {"Lo","L"}; +lookup(73971) -> {"Lo","L"}; +lookup(73972) -> {"Lo","L"}; +lookup(73973) -> {"Lo","L"}; +lookup(73974) -> {"Lo","L"}; +lookup(73975) -> {"Lo","L"}; +lookup(73976) -> {"Lo","L"}; +lookup(73977) -> {"Lo","L"}; +lookup(73978) -> {"Lo","L"}; +lookup(73979) -> {"Lo","L"}; +lookup(73980) -> {"Lo","L"}; +lookup(73981) -> {"Lo","L"}; +lookup(73982) -> {"Lo","L"}; +lookup(73983) -> {"Lo","L"}; +lookup(73984) -> {"Lo","L"}; +lookup(73985) -> {"Lo","L"}; +lookup(73986) -> {"Lo","L"}; +lookup(73987) -> {"Lo","L"}; +lookup(73988) -> {"Lo","L"}; +lookup(73989) -> {"Lo","L"}; +lookup(73990) -> {"Lo","L"}; +lookup(73991) -> {"Lo","L"}; +lookup(73992) -> {"Lo","L"}; +lookup(73993) -> {"Lo","L"}; +lookup(73994) -> {"Lo","L"}; +lookup(73995) -> {"Lo","L"}; +lookup(73996) -> {"Lo","L"}; +lookup(73997) -> {"Lo","L"}; +lookup(73998) -> {"Lo","L"}; +lookup(73999) -> {"Lo","L"}; +lookup(74000) -> {"Lo","L"}; +lookup(74001) -> {"Lo","L"}; +lookup(74002) -> {"Lo","L"}; +lookup(74003) -> {"Lo","L"}; +lookup(74004) -> {"Lo","L"}; +lookup(74005) -> {"Lo","L"}; +lookup(74006) -> {"Lo","L"}; +lookup(74007) -> {"Lo","L"}; +lookup(74008) -> {"Lo","L"}; +lookup(74009) -> {"Lo","L"}; +lookup(74010) -> {"Lo","L"}; +lookup(74011) -> {"Lo","L"}; +lookup(74012) -> {"Lo","L"}; +lookup(74013) -> {"Lo","L"}; +lookup(74014) -> {"Lo","L"}; +lookup(74015) -> {"Lo","L"}; +lookup(74016) -> {"Lo","L"}; +lookup(74017) -> {"Lo","L"}; +lookup(74018) -> {"Lo","L"}; +lookup(74019) -> {"Lo","L"}; +lookup(74020) -> {"Lo","L"}; +lookup(74021) -> {"Lo","L"}; +lookup(74022) -> {"Lo","L"}; +lookup(74023) -> {"Lo","L"}; +lookup(74024) -> {"Lo","L"}; +lookup(74025) -> {"Lo","L"}; +lookup(74026) -> {"Lo","L"}; +lookup(74027) -> {"Lo","L"}; +lookup(74028) -> {"Lo","L"}; +lookup(74029) -> {"Lo","L"}; +lookup(74030) -> {"Lo","L"}; +lookup(74031) -> {"Lo","L"}; +lookup(74032) -> {"Lo","L"}; +lookup(74033) -> {"Lo","L"}; +lookup(74034) -> {"Lo","L"}; +lookup(74035) -> {"Lo","L"}; +lookup(74036) -> {"Lo","L"}; +lookup(74037) -> {"Lo","L"}; +lookup(74038) -> {"Lo","L"}; +lookup(74039) -> {"Lo","L"}; +lookup(74040) -> {"Lo","L"}; +lookup(74041) -> {"Lo","L"}; +lookup(74042) -> {"Lo","L"}; +lookup(74043) -> {"Lo","L"}; +lookup(74044) -> {"Lo","L"}; +lookup(74045) -> {"Lo","L"}; +lookup(74046) -> {"Lo","L"}; +lookup(74047) -> {"Lo","L"}; +lookup(74048) -> {"Lo","L"}; +lookup(74049) -> {"Lo","L"}; +lookup(74050) -> {"Lo","L"}; +lookup(74051) -> {"Lo","L"}; +lookup(74052) -> {"Lo","L"}; +lookup(74053) -> {"Lo","L"}; +lookup(74054) -> {"Lo","L"}; +lookup(74055) -> {"Lo","L"}; +lookup(74056) -> {"Lo","L"}; +lookup(74057) -> {"Lo","L"}; +lookup(74058) -> {"Lo","L"}; +lookup(74059) -> {"Lo","L"}; +lookup(74060) -> {"Lo","L"}; +lookup(74061) -> {"Lo","L"}; +lookup(74062) -> {"Lo","L"}; +lookup(74063) -> {"Lo","L"}; +lookup(74064) -> {"Lo","L"}; +lookup(74065) -> {"Lo","L"}; +lookup(74066) -> {"Lo","L"}; +lookup(74067) -> {"Lo","L"}; +lookup(74068) -> {"Lo","L"}; +lookup(74069) -> {"Lo","L"}; +lookup(74070) -> {"Lo","L"}; +lookup(74071) -> {"Lo","L"}; +lookup(74072) -> {"Lo","L"}; +lookup(74073) -> {"Lo","L"}; +lookup(74074) -> {"Lo","L"}; +lookup(74075) -> {"Lo","L"}; +lookup(74076) -> {"Lo","L"}; +lookup(74077) -> {"Lo","L"}; +lookup(74078) -> {"Lo","L"}; +lookup(74079) -> {"Lo","L"}; +lookup(74080) -> {"Lo","L"}; +lookup(74081) -> {"Lo","L"}; +lookup(74082) -> {"Lo","L"}; +lookup(74083) -> {"Lo","L"}; +lookup(74084) -> {"Lo","L"}; +lookup(74085) -> {"Lo","L"}; +lookup(74086) -> {"Lo","L"}; +lookup(74087) -> {"Lo","L"}; +lookup(74088) -> {"Lo","L"}; +lookup(74089) -> {"Lo","L"}; +lookup(74090) -> {"Lo","L"}; +lookup(74091) -> {"Lo","L"}; +lookup(74092) -> {"Lo","L"}; +lookup(74093) -> {"Lo","L"}; +lookup(74094) -> {"Lo","L"}; +lookup(74095) -> {"Lo","L"}; +lookup(74096) -> {"Lo","L"}; +lookup(74097) -> {"Lo","L"}; +lookup(74098) -> {"Lo","L"}; +lookup(74099) -> {"Lo","L"}; +lookup(74100) -> {"Lo","L"}; +lookup(74101) -> {"Lo","L"}; +lookup(74102) -> {"Lo","L"}; +lookup(74103) -> {"Lo","L"}; +lookup(74104) -> {"Lo","L"}; +lookup(74105) -> {"Lo","L"}; +lookup(74106) -> {"Lo","L"}; +lookup(74107) -> {"Lo","L"}; +lookup(74108) -> {"Lo","L"}; +lookup(74109) -> {"Lo","L"}; +lookup(74110) -> {"Lo","L"}; +lookup(74111) -> {"Lo","L"}; +lookup(74112) -> {"Lo","L"}; +lookup(74113) -> {"Lo","L"}; +lookup(74114) -> {"Lo","L"}; +lookup(74115) -> {"Lo","L"}; +lookup(74116) -> {"Lo","L"}; +lookup(74117) -> {"Lo","L"}; +lookup(74118) -> {"Lo","L"}; +lookup(74119) -> {"Lo","L"}; +lookup(74120) -> {"Lo","L"}; +lookup(74121) -> {"Lo","L"}; +lookup(74122) -> {"Lo","L"}; +lookup(74123) -> {"Lo","L"}; +lookup(74124) -> {"Lo","L"}; +lookup(74125) -> {"Lo","L"}; +lookup(74126) -> {"Lo","L"}; +lookup(74127) -> {"Lo","L"}; +lookup(74128) -> {"Lo","L"}; +lookup(74129) -> {"Lo","L"}; +lookup(74130) -> {"Lo","L"}; +lookup(74131) -> {"Lo","L"}; +lookup(74132) -> {"Lo","L"}; +lookup(74133) -> {"Lo","L"}; +lookup(74134) -> {"Lo","L"}; +lookup(74135) -> {"Lo","L"}; +lookup(74136) -> {"Lo","L"}; +lookup(74137) -> {"Lo","L"}; +lookup(74138) -> {"Lo","L"}; +lookup(74139) -> {"Lo","L"}; +lookup(74140) -> {"Lo","L"}; +lookup(74141) -> {"Lo","L"}; +lookup(74142) -> {"Lo","L"}; +lookup(74143) -> {"Lo","L"}; +lookup(74144) -> {"Lo","L"}; +lookup(74145) -> {"Lo","L"}; +lookup(74146) -> {"Lo","L"}; +lookup(74147) -> {"Lo","L"}; +lookup(74148) -> {"Lo","L"}; +lookup(74149) -> {"Lo","L"}; +lookup(74150) -> {"Lo","L"}; +lookup(74151) -> {"Lo","L"}; +lookup(74152) -> {"Lo","L"}; +lookup(74153) -> {"Lo","L"}; +lookup(74154) -> {"Lo","L"}; +lookup(74155) -> {"Lo","L"}; +lookup(74156) -> {"Lo","L"}; +lookup(74157) -> {"Lo","L"}; +lookup(74158) -> {"Lo","L"}; +lookup(74159) -> {"Lo","L"}; +lookup(74160) -> {"Lo","L"}; +lookup(74161) -> {"Lo","L"}; +lookup(74162) -> {"Lo","L"}; +lookup(74163) -> {"Lo","L"}; +lookup(74164) -> {"Lo","L"}; +lookup(74165) -> {"Lo","L"}; +lookup(74166) -> {"Lo","L"}; +lookup(74167) -> {"Lo","L"}; +lookup(74168) -> {"Lo","L"}; +lookup(74169) -> {"Lo","L"}; +lookup(74170) -> {"Lo","L"}; +lookup(74171) -> {"Lo","L"}; +lookup(74172) -> {"Lo","L"}; +lookup(74173) -> {"Lo","L"}; +lookup(74174) -> {"Lo","L"}; +lookup(74175) -> {"Lo","L"}; +lookup(74176) -> {"Lo","L"}; +lookup(74177) -> {"Lo","L"}; +lookup(74178) -> {"Lo","L"}; +lookup(74179) -> {"Lo","L"}; +lookup(74180) -> {"Lo","L"}; +lookup(74181) -> {"Lo","L"}; +lookup(74182) -> {"Lo","L"}; +lookup(74183) -> {"Lo","L"}; +lookup(74184) -> {"Lo","L"}; +lookup(74185) -> {"Lo","L"}; +lookup(74186) -> {"Lo","L"}; +lookup(74187) -> {"Lo","L"}; +lookup(74188) -> {"Lo","L"}; +lookup(74189) -> {"Lo","L"}; +lookup(74190) -> {"Lo","L"}; +lookup(74191) -> {"Lo","L"}; +lookup(74192) -> {"Lo","L"}; +lookup(74193) -> {"Lo","L"}; +lookup(74194) -> {"Lo","L"}; +lookup(74195) -> {"Lo","L"}; +lookup(74196) -> {"Lo","L"}; +lookup(74197) -> {"Lo","L"}; +lookup(74198) -> {"Lo","L"}; +lookup(74199) -> {"Lo","L"}; +lookup(74200) -> {"Lo","L"}; +lookup(74201) -> {"Lo","L"}; +lookup(74202) -> {"Lo","L"}; +lookup(74203) -> {"Lo","L"}; +lookup(74204) -> {"Lo","L"}; +lookup(74205) -> {"Lo","L"}; +lookup(74206) -> {"Lo","L"}; +lookup(74207) -> {"Lo","L"}; +lookup(74208) -> {"Lo","L"}; +lookup(74209) -> {"Lo","L"}; +lookup(74210) -> {"Lo","L"}; +lookup(74211) -> {"Lo","L"}; +lookup(74212) -> {"Lo","L"}; +lookup(74213) -> {"Lo","L"}; +lookup(74214) -> {"Lo","L"}; +lookup(74215) -> {"Lo","L"}; +lookup(74216) -> {"Lo","L"}; +lookup(74217) -> {"Lo","L"}; +lookup(74218) -> {"Lo","L"}; +lookup(74219) -> {"Lo","L"}; +lookup(74220) -> {"Lo","L"}; +lookup(74221) -> {"Lo","L"}; +lookup(74222) -> {"Lo","L"}; +lookup(74223) -> {"Lo","L"}; +lookup(74224) -> {"Lo","L"}; +lookup(74225) -> {"Lo","L"}; +lookup(74226) -> {"Lo","L"}; +lookup(74227) -> {"Lo","L"}; +lookup(74228) -> {"Lo","L"}; +lookup(74229) -> {"Lo","L"}; +lookup(74230) -> {"Lo","L"}; +lookup(74231) -> {"Lo","L"}; +lookup(74232) -> {"Lo","L"}; +lookup(74233) -> {"Lo","L"}; +lookup(74234) -> {"Lo","L"}; +lookup(74235) -> {"Lo","L"}; +lookup(74236) -> {"Lo","L"}; +lookup(74237) -> {"Lo","L"}; +lookup(74238) -> {"Lo","L"}; +lookup(74239) -> {"Lo","L"}; +lookup(74240) -> {"Lo","L"}; +lookup(74241) -> {"Lo","L"}; +lookup(74242) -> {"Lo","L"}; +lookup(74243) -> {"Lo","L"}; +lookup(74244) -> {"Lo","L"}; +lookup(74245) -> {"Lo","L"}; +lookup(74246) -> {"Lo","L"}; +lookup(74247) -> {"Lo","L"}; +lookup(74248) -> {"Lo","L"}; +lookup(74249) -> {"Lo","L"}; +lookup(74250) -> {"Lo","L"}; +lookup(74251) -> {"Lo","L"}; +lookup(74252) -> {"Lo","L"}; +lookup(74253) -> {"Lo","L"}; +lookup(74254) -> {"Lo","L"}; +lookup(74255) -> {"Lo","L"}; +lookup(74256) -> {"Lo","L"}; +lookup(74257) -> {"Lo","L"}; +lookup(74258) -> {"Lo","L"}; +lookup(74259) -> {"Lo","L"}; +lookup(74260) -> {"Lo","L"}; +lookup(74261) -> {"Lo","L"}; +lookup(74262) -> {"Lo","L"}; +lookup(74263) -> {"Lo","L"}; +lookup(74264) -> {"Lo","L"}; +lookup(74265) -> {"Lo","L"}; +lookup(74266) -> {"Lo","L"}; +lookup(74267) -> {"Lo","L"}; +lookup(74268) -> {"Lo","L"}; +lookup(74269) -> {"Lo","L"}; +lookup(74270) -> {"Lo","L"}; +lookup(74271) -> {"Lo","L"}; +lookup(74272) -> {"Lo","L"}; +lookup(74273) -> {"Lo","L"}; +lookup(74274) -> {"Lo","L"}; +lookup(74275) -> {"Lo","L"}; +lookup(74276) -> {"Lo","L"}; +lookup(74277) -> {"Lo","L"}; +lookup(74278) -> {"Lo","L"}; +lookup(74279) -> {"Lo","L"}; +lookup(74280) -> {"Lo","L"}; +lookup(74281) -> {"Lo","L"}; +lookup(74282) -> {"Lo","L"}; +lookup(74283) -> {"Lo","L"}; +lookup(74284) -> {"Lo","L"}; +lookup(74285) -> {"Lo","L"}; +lookup(74286) -> {"Lo","L"}; +lookup(74287) -> {"Lo","L"}; +lookup(74288) -> {"Lo","L"}; +lookup(74289) -> {"Lo","L"}; +lookup(74290) -> {"Lo","L"}; +lookup(74291) -> {"Lo","L"}; +lookup(74292) -> {"Lo","L"}; +lookup(74293) -> {"Lo","L"}; +lookup(74294) -> {"Lo","L"}; +lookup(74295) -> {"Lo","L"}; +lookup(74296) -> {"Lo","L"}; +lookup(74297) -> {"Lo","L"}; +lookup(74298) -> {"Lo","L"}; +lookup(74299) -> {"Lo","L"}; +lookup(74300) -> {"Lo","L"}; +lookup(74301) -> {"Lo","L"}; +lookup(74302) -> {"Lo","L"}; +lookup(74303) -> {"Lo","L"}; +lookup(74304) -> {"Lo","L"}; +lookup(74305) -> {"Lo","L"}; +lookup(74306) -> {"Lo","L"}; +lookup(74307) -> {"Lo","L"}; +lookup(74308) -> {"Lo","L"}; +lookup(74309) -> {"Lo","L"}; +lookup(74310) -> {"Lo","L"}; +lookup(74311) -> {"Lo","L"}; +lookup(74312) -> {"Lo","L"}; +lookup(74313) -> {"Lo","L"}; +lookup(74314) -> {"Lo","L"}; +lookup(74315) -> {"Lo","L"}; +lookup(74316) -> {"Lo","L"}; +lookup(74317) -> {"Lo","L"}; +lookup(74318) -> {"Lo","L"}; +lookup(74319) -> {"Lo","L"}; +lookup(74320) -> {"Lo","L"}; +lookup(74321) -> {"Lo","L"}; +lookup(74322) -> {"Lo","L"}; +lookup(74323) -> {"Lo","L"}; +lookup(74324) -> {"Lo","L"}; +lookup(74325) -> {"Lo","L"}; +lookup(74326) -> {"Lo","L"}; +lookup(74327) -> {"Lo","L"}; +lookup(74328) -> {"Lo","L"}; +lookup(74329) -> {"Lo","L"}; +lookup(74330) -> {"Lo","L"}; +lookup(74331) -> {"Lo","L"}; +lookup(74332) -> {"Lo","L"}; +lookup(74333) -> {"Lo","L"}; +lookup(74334) -> {"Lo","L"}; +lookup(74335) -> {"Lo","L"}; +lookup(74336) -> {"Lo","L"}; +lookup(74337) -> {"Lo","L"}; +lookup(74338) -> {"Lo","L"}; +lookup(74339) -> {"Lo","L"}; +lookup(74340) -> {"Lo","L"}; +lookup(74341) -> {"Lo","L"}; +lookup(74342) -> {"Lo","L"}; +lookup(74343) -> {"Lo","L"}; +lookup(74344) -> {"Lo","L"}; +lookup(74345) -> {"Lo","L"}; +lookup(74346) -> {"Lo","L"}; +lookup(74347) -> {"Lo","L"}; +lookup(74348) -> {"Lo","L"}; +lookup(74349) -> {"Lo","L"}; +lookup(74350) -> {"Lo","L"}; +lookup(74351) -> {"Lo","L"}; +lookup(74352) -> {"Lo","L"}; +lookup(74353) -> {"Lo","L"}; +lookup(74354) -> {"Lo","L"}; +lookup(74355) -> {"Lo","L"}; +lookup(74356) -> {"Lo","L"}; +lookup(74357) -> {"Lo","L"}; +lookup(74358) -> {"Lo","L"}; +lookup(74359) -> {"Lo","L"}; +lookup(74360) -> {"Lo","L"}; +lookup(74361) -> {"Lo","L"}; +lookup(74362) -> {"Lo","L"}; +lookup(74363) -> {"Lo","L"}; +lookup(74364) -> {"Lo","L"}; +lookup(74365) -> {"Lo","L"}; +lookup(74366) -> {"Lo","L"}; +lookup(74367) -> {"Lo","L"}; +lookup(74368) -> {"Lo","L"}; +lookup(74369) -> {"Lo","L"}; +lookup(74370) -> {"Lo","L"}; +lookup(74371) -> {"Lo","L"}; +lookup(74372) -> {"Lo","L"}; +lookup(74373) -> {"Lo","L"}; +lookup(74374) -> {"Lo","L"}; +lookup(74375) -> {"Lo","L"}; +lookup(74376) -> {"Lo","L"}; +lookup(74377) -> {"Lo","L"}; +lookup(74378) -> {"Lo","L"}; +lookup(74379) -> {"Lo","L"}; +lookup(74380) -> {"Lo","L"}; +lookup(74381) -> {"Lo","L"}; +lookup(74382) -> {"Lo","L"}; +lookup(74383) -> {"Lo","L"}; +lookup(74384) -> {"Lo","L"}; +lookup(74385) -> {"Lo","L"}; +lookup(74386) -> {"Lo","L"}; +lookup(74387) -> {"Lo","L"}; +lookup(74388) -> {"Lo","L"}; +lookup(74389) -> {"Lo","L"}; +lookup(74390) -> {"Lo","L"}; +lookup(74391) -> {"Lo","L"}; +lookup(74392) -> {"Lo","L"}; +lookup(74393) -> {"Lo","L"}; +lookup(74394) -> {"Lo","L"}; +lookup(74395) -> {"Lo","L"}; +lookup(74396) -> {"Lo","L"}; +lookup(74397) -> {"Lo","L"}; +lookup(74398) -> {"Lo","L"}; +lookup(74399) -> {"Lo","L"}; +lookup(74400) -> {"Lo","L"}; +lookup(74401) -> {"Lo","L"}; +lookup(74402) -> {"Lo","L"}; +lookup(74403) -> {"Lo","L"}; +lookup(74404) -> {"Lo","L"}; +lookup(74405) -> {"Lo","L"}; +lookup(74406) -> {"Lo","L"}; +lookup(74407) -> {"Lo","L"}; +lookup(74408) -> {"Lo","L"}; +lookup(74409) -> {"Lo","L"}; +lookup(74410) -> {"Lo","L"}; +lookup(74411) -> {"Lo","L"}; +lookup(74412) -> {"Lo","L"}; +lookup(74413) -> {"Lo","L"}; +lookup(74414) -> {"Lo","L"}; +lookup(74415) -> {"Lo","L"}; +lookup(74416) -> {"Lo","L"}; +lookup(74417) -> {"Lo","L"}; +lookup(74418) -> {"Lo","L"}; +lookup(74419) -> {"Lo","L"}; +lookup(74420) -> {"Lo","L"}; +lookup(74421) -> {"Lo","L"}; +lookup(74422) -> {"Lo","L"}; +lookup(74423) -> {"Lo","L"}; +lookup(74424) -> {"Lo","L"}; +lookup(74425) -> {"Lo","L"}; +lookup(74426) -> {"Lo","L"}; +lookup(74427) -> {"Lo","L"}; +lookup(74428) -> {"Lo","L"}; +lookup(74429) -> {"Lo","L"}; +lookup(74430) -> {"Lo","L"}; +lookup(74431) -> {"Lo","L"}; +lookup(74432) -> {"Lo","L"}; +lookup(74433) -> {"Lo","L"}; +lookup(74434) -> {"Lo","L"}; +lookup(74435) -> {"Lo","L"}; +lookup(74436) -> {"Lo","L"}; +lookup(74437) -> {"Lo","L"}; +lookup(74438) -> {"Lo","L"}; +lookup(74439) -> {"Lo","L"}; +lookup(74440) -> {"Lo","L"}; +lookup(74441) -> {"Lo","L"}; +lookup(74442) -> {"Lo","L"}; +lookup(74443) -> {"Lo","L"}; +lookup(74444) -> {"Lo","L"}; +lookup(74445) -> {"Lo","L"}; +lookup(74446) -> {"Lo","L"}; +lookup(74447) -> {"Lo","L"}; +lookup(74448) -> {"Lo","L"}; +lookup(74449) -> {"Lo","L"}; +lookup(74450) -> {"Lo","L"}; +lookup(74451) -> {"Lo","L"}; +lookup(74452) -> {"Lo","L"}; +lookup(74453) -> {"Lo","L"}; +lookup(74454) -> {"Lo","L"}; +lookup(74455) -> {"Lo","L"}; +lookup(74456) -> {"Lo","L"}; +lookup(74457) -> {"Lo","L"}; +lookup(74458) -> {"Lo","L"}; +lookup(74459) -> {"Lo","L"}; +lookup(74460) -> {"Lo","L"}; +lookup(74461) -> {"Lo","L"}; +lookup(74462) -> {"Lo","L"}; +lookup(74463) -> {"Lo","L"}; +lookup(74464) -> {"Lo","L"}; +lookup(74465) -> {"Lo","L"}; +lookup(74466) -> {"Lo","L"}; +lookup(74467) -> {"Lo","L"}; +lookup(74468) -> {"Lo","L"}; +lookup(74469) -> {"Lo","L"}; +lookup(74470) -> {"Lo","L"}; +lookup(74471) -> {"Lo","L"}; +lookup(74472) -> {"Lo","L"}; +lookup(74473) -> {"Lo","L"}; +lookup(74474) -> {"Lo","L"}; +lookup(74475) -> {"Lo","L"}; +lookup(74476) -> {"Lo","L"}; +lookup(74477) -> {"Lo","L"}; +lookup(74478) -> {"Lo","L"}; +lookup(74479) -> {"Lo","L"}; +lookup(74480) -> {"Lo","L"}; +lookup(74481) -> {"Lo","L"}; +lookup(74482) -> {"Lo","L"}; +lookup(74483) -> {"Lo","L"}; +lookup(74484) -> {"Lo","L"}; +lookup(74485) -> {"Lo","L"}; +lookup(74486) -> {"Lo","L"}; +lookup(74487) -> {"Lo","L"}; +lookup(74488) -> {"Lo","L"}; +lookup(74489) -> {"Lo","L"}; +lookup(74490) -> {"Lo","L"}; +lookup(74491) -> {"Lo","L"}; +lookup(74492) -> {"Lo","L"}; +lookup(74493) -> {"Lo","L"}; +lookup(74494) -> {"Lo","L"}; +lookup(74495) -> {"Lo","L"}; +lookup(74496) -> {"Lo","L"}; +lookup(74497) -> {"Lo","L"}; +lookup(74498) -> {"Lo","L"}; +lookup(74499) -> {"Lo","L"}; +lookup(74500) -> {"Lo","L"}; +lookup(74501) -> {"Lo","L"}; +lookup(74502) -> {"Lo","L"}; +lookup(74503) -> {"Lo","L"}; +lookup(74504) -> {"Lo","L"}; +lookup(74505) -> {"Lo","L"}; +lookup(74506) -> {"Lo","L"}; +lookup(74507) -> {"Lo","L"}; +lookup(74508) -> {"Lo","L"}; +lookup(74509) -> {"Lo","L"}; +lookup(74510) -> {"Lo","L"}; +lookup(74511) -> {"Lo","L"}; +lookup(74512) -> {"Lo","L"}; +lookup(74513) -> {"Lo","L"}; +lookup(74514) -> {"Lo","L"}; +lookup(74515) -> {"Lo","L"}; +lookup(74516) -> {"Lo","L"}; +lookup(74517) -> {"Lo","L"}; +lookup(74518) -> {"Lo","L"}; +lookup(74519) -> {"Lo","L"}; +lookup(74520) -> {"Lo","L"}; +lookup(74521) -> {"Lo","L"}; +lookup(74522) -> {"Lo","L"}; +lookup(74523) -> {"Lo","L"}; +lookup(74524) -> {"Lo","L"}; +lookup(74525) -> {"Lo","L"}; +lookup(74526) -> {"Lo","L"}; +lookup(74527) -> {"Lo","L"}; +lookup(74528) -> {"Lo","L"}; +lookup(74529) -> {"Lo","L"}; +lookup(74530) -> {"Lo","L"}; +lookup(74531) -> {"Lo","L"}; +lookup(74532) -> {"Lo","L"}; +lookup(74533) -> {"Lo","L"}; +lookup(74534) -> {"Lo","L"}; +lookup(74535) -> {"Lo","L"}; +lookup(74536) -> {"Lo","L"}; +lookup(74537) -> {"Lo","L"}; +lookup(74538) -> {"Lo","L"}; +lookup(74539) -> {"Lo","L"}; +lookup(74540) -> {"Lo","L"}; +lookup(74541) -> {"Lo","L"}; +lookup(74542) -> {"Lo","L"}; +lookup(74543) -> {"Lo","L"}; +lookup(74544) -> {"Lo","L"}; +lookup(74545) -> {"Lo","L"}; +lookup(74546) -> {"Lo","L"}; +lookup(74547) -> {"Lo","L"}; +lookup(74548) -> {"Lo","L"}; +lookup(74549) -> {"Lo","L"}; +lookup(74550) -> {"Lo","L"}; +lookup(74551) -> {"Lo","L"}; +lookup(74552) -> {"Lo","L"}; +lookup(74553) -> {"Lo","L"}; +lookup(74554) -> {"Lo","L"}; +lookup(74555) -> {"Lo","L"}; +lookup(74556) -> {"Lo","L"}; +lookup(74557) -> {"Lo","L"}; +lookup(74558) -> {"Lo","L"}; +lookup(74559) -> {"Lo","L"}; +lookup(74560) -> {"Lo","L"}; +lookup(74561) -> {"Lo","L"}; +lookup(74562) -> {"Lo","L"}; +lookup(74563) -> {"Lo","L"}; +lookup(74564) -> {"Lo","L"}; +lookup(74565) -> {"Lo","L"}; +lookup(74566) -> {"Lo","L"}; +lookup(74567) -> {"Lo","L"}; +lookup(74568) -> {"Lo","L"}; +lookup(74569) -> {"Lo","L"}; +lookup(74570) -> {"Lo","L"}; +lookup(74571) -> {"Lo","L"}; +lookup(74572) -> {"Lo","L"}; +lookup(74573) -> {"Lo","L"}; +lookup(74574) -> {"Lo","L"}; +lookup(74575) -> {"Lo","L"}; +lookup(74576) -> {"Lo","L"}; +lookup(74577) -> {"Lo","L"}; +lookup(74578) -> {"Lo","L"}; +lookup(74579) -> {"Lo","L"}; +lookup(74580) -> {"Lo","L"}; +lookup(74581) -> {"Lo","L"}; +lookup(74582) -> {"Lo","L"}; +lookup(74583) -> {"Lo","L"}; +lookup(74584) -> {"Lo","L"}; +lookup(74585) -> {"Lo","L"}; +lookup(74586) -> {"Lo","L"}; +lookup(74587) -> {"Lo","L"}; +lookup(74588) -> {"Lo","L"}; +lookup(74589) -> {"Lo","L"}; +lookup(74590) -> {"Lo","L"}; +lookup(74591) -> {"Lo","L"}; +lookup(74592) -> {"Lo","L"}; +lookup(74593) -> {"Lo","L"}; +lookup(74594) -> {"Lo","L"}; +lookup(74595) -> {"Lo","L"}; +lookup(74596) -> {"Lo","L"}; +lookup(74597) -> {"Lo","L"}; +lookup(74598) -> {"Lo","L"}; +lookup(74599) -> {"Lo","L"}; +lookup(74600) -> {"Lo","L"}; +lookup(74601) -> {"Lo","L"}; +lookup(74602) -> {"Lo","L"}; +lookup(74603) -> {"Lo","L"}; +lookup(74604) -> {"Lo","L"}; +lookup(74605) -> {"Lo","L"}; +lookup(74606) -> {"Lo","L"}; +lookup(74607) -> {"Lo","L"}; +lookup(74608) -> {"Lo","L"}; +lookup(74609) -> {"Lo","L"}; +lookup(74610) -> {"Lo","L"}; +lookup(74611) -> {"Lo","L"}; +lookup(74612) -> {"Lo","L"}; +lookup(74613) -> {"Lo","L"}; +lookup(74614) -> {"Lo","L"}; +lookup(74615) -> {"Lo","L"}; +lookup(74616) -> {"Lo","L"}; +lookup(74617) -> {"Lo","L"}; +lookup(74618) -> {"Lo","L"}; +lookup(74619) -> {"Lo","L"}; +lookup(74620) -> {"Lo","L"}; +lookup(74621) -> {"Lo","L"}; +lookup(74622) -> {"Lo","L"}; +lookup(74623) -> {"Lo","L"}; +lookup(74624) -> {"Lo","L"}; +lookup(74625) -> {"Lo","L"}; +lookup(74626) -> {"Lo","L"}; +lookup(74627) -> {"Lo","L"}; +lookup(74628) -> {"Lo","L"}; +lookup(74629) -> {"Lo","L"}; +lookup(74630) -> {"Lo","L"}; +lookup(74631) -> {"Lo","L"}; +lookup(74632) -> {"Lo","L"}; +lookup(74633) -> {"Lo","L"}; +lookup(74634) -> {"Lo","L"}; +lookup(74635) -> {"Lo","L"}; +lookup(74636) -> {"Lo","L"}; +lookup(74637) -> {"Lo","L"}; +lookup(74638) -> {"Lo","L"}; +lookup(74639) -> {"Lo","L"}; +lookup(74640) -> {"Lo","L"}; +lookup(74641) -> {"Lo","L"}; +lookup(74642) -> {"Lo","L"}; +lookup(74643) -> {"Lo","L"}; +lookup(74644) -> {"Lo","L"}; +lookup(74645) -> {"Lo","L"}; +lookup(74646) -> {"Lo","L"}; +lookup(74647) -> {"Lo","L"}; +lookup(74648) -> {"Lo","L"}; +lookup(74649) -> {"Lo","L"}; +lookup(74752) -> {"Nl","L"}; +lookup(74753) -> {"Nl","L"}; +lookup(74754) -> {"Nl","L"}; +lookup(74755) -> {"Nl","L"}; +lookup(74756) -> {"Nl","L"}; +lookup(74757) -> {"Nl","L"}; +lookup(74758) -> {"Nl","L"}; +lookup(74759) -> {"Nl","L"}; +lookup(74760) -> {"Nl","L"}; +lookup(74761) -> {"Nl","L"}; +lookup(74762) -> {"Nl","L"}; +lookup(74763) -> {"Nl","L"}; +lookup(74764) -> {"Nl","L"}; +lookup(74765) -> {"Nl","L"}; +lookup(74766) -> {"Nl","L"}; +lookup(74767) -> {"Nl","L"}; +lookup(74768) -> {"Nl","L"}; +lookup(74769) -> {"Nl","L"}; +lookup(74770) -> {"Nl","L"}; +lookup(74771) -> {"Nl","L"}; +lookup(74772) -> {"Nl","L"}; +lookup(74773) -> {"Nl","L"}; +lookup(74774) -> {"Nl","L"}; +lookup(74775) -> {"Nl","L"}; +lookup(74776) -> {"Nl","L"}; +lookup(74777) -> {"Nl","L"}; +lookup(74778) -> {"Nl","L"}; +lookup(74779) -> {"Nl","L"}; +lookup(74780) -> {"Nl","L"}; +lookup(74781) -> {"Nl","L"}; +lookup(74782) -> {"Nl","L"}; +lookup(74783) -> {"Nl","L"}; +lookup(74784) -> {"Nl","L"}; +lookup(74785) -> {"Nl","L"}; +lookup(74786) -> {"Nl","L"}; +lookup(74787) -> {"Nl","L"}; +lookup(74788) -> {"Nl","L"}; +lookup(74789) -> {"Nl","L"}; +lookup(74790) -> {"Nl","L"}; +lookup(74791) -> {"Nl","L"}; +lookup(74792) -> {"Nl","L"}; +lookup(74793) -> {"Nl","L"}; +lookup(74794) -> {"Nl","L"}; +lookup(74795) -> {"Nl","L"}; +lookup(74796) -> {"Nl","L"}; +lookup(74797) -> {"Nl","L"}; +lookup(74798) -> {"Nl","L"}; +lookup(74799) -> {"Nl","L"}; +lookup(74800) -> {"Nl","L"}; +lookup(74801) -> {"Nl","L"}; +lookup(74802) -> {"Nl","L"}; +lookup(74803) -> {"Nl","L"}; +lookup(74804) -> {"Nl","L"}; +lookup(74805) -> {"Nl","L"}; +lookup(74806) -> {"Nl","L"}; +lookup(74807) -> {"Nl","L"}; +lookup(74808) -> {"Nl","L"}; +lookup(74809) -> {"Nl","L"}; +lookup(74810) -> {"Nl","L"}; +lookup(74811) -> {"Nl","L"}; +lookup(74812) -> {"Nl","L"}; +lookup(74813) -> {"Nl","L"}; +lookup(74814) -> {"Nl","L"}; +lookup(74815) -> {"Nl","L"}; +lookup(74816) -> {"Nl","L"}; +lookup(74817) -> {"Nl","L"}; +lookup(74818) -> {"Nl","L"}; +lookup(74819) -> {"Nl","L"}; +lookup(74820) -> {"Nl","L"}; +lookup(74821) -> {"Nl","L"}; +lookup(74822) -> {"Nl","L"}; +lookup(74823) -> {"Nl","L"}; +lookup(74824) -> {"Nl","L"}; +lookup(74825) -> {"Nl","L"}; +lookup(74826) -> {"Nl","L"}; +lookup(74827) -> {"Nl","L"}; +lookup(74828) -> {"Nl","L"}; +lookup(74829) -> {"Nl","L"}; +lookup(74830) -> {"Nl","L"}; +lookup(74831) -> {"Nl","L"}; +lookup(74832) -> {"Nl","L"}; +lookup(74833) -> {"Nl","L"}; +lookup(74834) -> {"Nl","L"}; +lookup(74835) -> {"Nl","L"}; +lookup(74836) -> {"Nl","L"}; +lookup(74837) -> {"Nl","L"}; +lookup(74838) -> {"Nl","L"}; +lookup(74839) -> {"Nl","L"}; +lookup(74840) -> {"Nl","L"}; +lookup(74841) -> {"Nl","L"}; +lookup(74842) -> {"Nl","L"}; +lookup(74843) -> {"Nl","L"}; +lookup(74844) -> {"Nl","L"}; +lookup(74845) -> {"Nl","L"}; +lookup(74846) -> {"Nl","L"}; +lookup(74847) -> {"Nl","L"}; +lookup(74848) -> {"Nl","L"}; +lookup(74849) -> {"Nl","L"}; +lookup(74850) -> {"Nl","L"}; +lookup(74851) -> {"Nl","L"}; +lookup(74852) -> {"Nl","L"}; +lookup(74853) -> {"Nl","L"}; +lookup(74854) -> {"Nl","L"}; +lookup(74855) -> {"Nl","L"}; +lookup(74856) -> {"Nl","L"}; +lookup(74857) -> {"Nl","L"}; +lookup(74858) -> {"Nl","L"}; +lookup(74859) -> {"Nl","L"}; +lookup(74860) -> {"Nl","L"}; +lookup(74861) -> {"Nl","L"}; +lookup(74862) -> {"Nl","L"}; +lookup(74864) -> {"Po","L"}; +lookup(74865) -> {"Po","L"}; +lookup(74866) -> {"Po","L"}; +lookup(74867) -> {"Po","L"}; +lookup(74868) -> {"Po","L"}; +lookup(74880) -> {"Lo","L"}; +lookup(74881) -> {"Lo","L"}; +lookup(74882) -> {"Lo","L"}; +lookup(74883) -> {"Lo","L"}; +lookup(74884) -> {"Lo","L"}; +lookup(74885) -> {"Lo","L"}; +lookup(74886) -> {"Lo","L"}; +lookup(74887) -> {"Lo","L"}; +lookup(74888) -> {"Lo","L"}; +lookup(74889) -> {"Lo","L"}; +lookup(74890) -> {"Lo","L"}; +lookup(74891) -> {"Lo","L"}; +lookup(74892) -> {"Lo","L"}; +lookup(74893) -> {"Lo","L"}; +lookup(74894) -> {"Lo","L"}; +lookup(74895) -> {"Lo","L"}; +lookup(74896) -> {"Lo","L"}; +lookup(74897) -> {"Lo","L"}; +lookup(74898) -> {"Lo","L"}; +lookup(74899) -> {"Lo","L"}; +lookup(74900) -> {"Lo","L"}; +lookup(74901) -> {"Lo","L"}; +lookup(74902) -> {"Lo","L"}; +lookup(74903) -> {"Lo","L"}; +lookup(74904) -> {"Lo","L"}; +lookup(74905) -> {"Lo","L"}; +lookup(74906) -> {"Lo","L"}; +lookup(74907) -> {"Lo","L"}; +lookup(74908) -> {"Lo","L"}; +lookup(74909) -> {"Lo","L"}; +lookup(74910) -> {"Lo","L"}; +lookup(74911) -> {"Lo","L"}; +lookup(74912) -> {"Lo","L"}; +lookup(74913) -> {"Lo","L"}; +lookup(74914) -> {"Lo","L"}; +lookup(74915) -> {"Lo","L"}; +lookup(74916) -> {"Lo","L"}; +lookup(74917) -> {"Lo","L"}; +lookup(74918) -> {"Lo","L"}; +lookup(74919) -> {"Lo","L"}; +lookup(74920) -> {"Lo","L"}; +lookup(74921) -> {"Lo","L"}; +lookup(74922) -> {"Lo","L"}; +lookup(74923) -> {"Lo","L"}; +lookup(74924) -> {"Lo","L"}; +lookup(74925) -> {"Lo","L"}; +lookup(74926) -> {"Lo","L"}; +lookup(74927) -> {"Lo","L"}; +lookup(74928) -> {"Lo","L"}; +lookup(74929) -> {"Lo","L"}; +lookup(74930) -> {"Lo","L"}; +lookup(74931) -> {"Lo","L"}; +lookup(74932) -> {"Lo","L"}; +lookup(74933) -> {"Lo","L"}; +lookup(74934) -> {"Lo","L"}; +lookup(74935) -> {"Lo","L"}; +lookup(74936) -> {"Lo","L"}; +lookup(74937) -> {"Lo","L"}; +lookup(74938) -> {"Lo","L"}; +lookup(74939) -> {"Lo","L"}; +lookup(74940) -> {"Lo","L"}; +lookup(74941) -> {"Lo","L"}; +lookup(74942) -> {"Lo","L"}; +lookup(74943) -> {"Lo","L"}; +lookup(74944) -> {"Lo","L"}; +lookup(74945) -> {"Lo","L"}; +lookup(74946) -> {"Lo","L"}; +lookup(74947) -> {"Lo","L"}; +lookup(74948) -> {"Lo","L"}; +lookup(74949) -> {"Lo","L"}; +lookup(74950) -> {"Lo","L"}; +lookup(74951) -> {"Lo","L"}; +lookup(74952) -> {"Lo","L"}; +lookup(74953) -> {"Lo","L"}; +lookup(74954) -> {"Lo","L"}; +lookup(74955) -> {"Lo","L"}; +lookup(74956) -> {"Lo","L"}; +lookup(74957) -> {"Lo","L"}; +lookup(74958) -> {"Lo","L"}; +lookup(74959) -> {"Lo","L"}; +lookup(74960) -> {"Lo","L"}; +lookup(74961) -> {"Lo","L"}; +lookup(74962) -> {"Lo","L"}; +lookup(74963) -> {"Lo","L"}; +lookup(74964) -> {"Lo","L"}; +lookup(74965) -> {"Lo","L"}; +lookup(74966) -> {"Lo","L"}; +lookup(74967) -> {"Lo","L"}; +lookup(74968) -> {"Lo","L"}; +lookup(74969) -> {"Lo","L"}; +lookup(74970) -> {"Lo","L"}; +lookup(74971) -> {"Lo","L"}; +lookup(74972) -> {"Lo","L"}; +lookup(74973) -> {"Lo","L"}; +lookup(74974) -> {"Lo","L"}; +lookup(74975) -> {"Lo","L"}; +lookup(74976) -> {"Lo","L"}; +lookup(74977) -> {"Lo","L"}; +lookup(74978) -> {"Lo","L"}; +lookup(74979) -> {"Lo","L"}; +lookup(74980) -> {"Lo","L"}; +lookup(74981) -> {"Lo","L"}; +lookup(74982) -> {"Lo","L"}; +lookup(74983) -> {"Lo","L"}; +lookup(74984) -> {"Lo","L"}; +lookup(74985) -> {"Lo","L"}; +lookup(74986) -> {"Lo","L"}; +lookup(74987) -> {"Lo","L"}; +lookup(74988) -> {"Lo","L"}; +lookup(74989) -> {"Lo","L"}; +lookup(74990) -> {"Lo","L"}; +lookup(74991) -> {"Lo","L"}; +lookup(74992) -> {"Lo","L"}; +lookup(74993) -> {"Lo","L"}; +lookup(74994) -> {"Lo","L"}; +lookup(74995) -> {"Lo","L"}; +lookup(74996) -> {"Lo","L"}; +lookup(74997) -> {"Lo","L"}; +lookup(74998) -> {"Lo","L"}; +lookup(74999) -> {"Lo","L"}; +lookup(75000) -> {"Lo","L"}; +lookup(75001) -> {"Lo","L"}; +lookup(75002) -> {"Lo","L"}; +lookup(75003) -> {"Lo","L"}; +lookup(75004) -> {"Lo","L"}; +lookup(75005) -> {"Lo","L"}; +lookup(75006) -> {"Lo","L"}; +lookup(75007) -> {"Lo","L"}; +lookup(75008) -> {"Lo","L"}; +lookup(75009) -> {"Lo","L"}; +lookup(75010) -> {"Lo","L"}; +lookup(75011) -> {"Lo","L"}; +lookup(75012) -> {"Lo","L"}; +lookup(75013) -> {"Lo","L"}; +lookup(75014) -> {"Lo","L"}; +lookup(75015) -> {"Lo","L"}; +lookup(75016) -> {"Lo","L"}; +lookup(75017) -> {"Lo","L"}; +lookup(75018) -> {"Lo","L"}; +lookup(75019) -> {"Lo","L"}; +lookup(75020) -> {"Lo","L"}; +lookup(75021) -> {"Lo","L"}; +lookup(75022) -> {"Lo","L"}; +lookup(75023) -> {"Lo","L"}; +lookup(75024) -> {"Lo","L"}; +lookup(75025) -> {"Lo","L"}; +lookup(75026) -> {"Lo","L"}; +lookup(75027) -> {"Lo","L"}; +lookup(75028) -> {"Lo","L"}; +lookup(75029) -> {"Lo","L"}; +lookup(75030) -> {"Lo","L"}; +lookup(75031) -> {"Lo","L"}; +lookup(75032) -> {"Lo","L"}; +lookup(75033) -> {"Lo","L"}; +lookup(75034) -> {"Lo","L"}; +lookup(75035) -> {"Lo","L"}; +lookup(75036) -> {"Lo","L"}; +lookup(75037) -> {"Lo","L"}; +lookup(75038) -> {"Lo","L"}; +lookup(75039) -> {"Lo","L"}; +lookup(75040) -> {"Lo","L"}; +lookup(75041) -> {"Lo","L"}; +lookup(75042) -> {"Lo","L"}; +lookup(75043) -> {"Lo","L"}; +lookup(75044) -> {"Lo","L"}; +lookup(75045) -> {"Lo","L"}; +lookup(75046) -> {"Lo","L"}; +lookup(75047) -> {"Lo","L"}; +lookup(75048) -> {"Lo","L"}; +lookup(75049) -> {"Lo","L"}; +lookup(75050) -> {"Lo","L"}; +lookup(75051) -> {"Lo","L"}; +lookup(75052) -> {"Lo","L"}; +lookup(75053) -> {"Lo","L"}; +lookup(75054) -> {"Lo","L"}; +lookup(75055) -> {"Lo","L"}; +lookup(75056) -> {"Lo","L"}; +lookup(75057) -> {"Lo","L"}; +lookup(75058) -> {"Lo","L"}; +lookup(75059) -> {"Lo","L"}; +lookup(75060) -> {"Lo","L"}; +lookup(75061) -> {"Lo","L"}; +lookup(75062) -> {"Lo","L"}; +lookup(75063) -> {"Lo","L"}; +lookup(75064) -> {"Lo","L"}; +lookup(75065) -> {"Lo","L"}; +lookup(75066) -> {"Lo","L"}; +lookup(75067) -> {"Lo","L"}; +lookup(75068) -> {"Lo","L"}; +lookup(75069) -> {"Lo","L"}; +lookup(75070) -> {"Lo","L"}; +lookup(75071) -> {"Lo","L"}; +lookup(75072) -> {"Lo","L"}; +lookup(75073) -> {"Lo","L"}; +lookup(75074) -> {"Lo","L"}; +lookup(75075) -> {"Lo","L"}; +lookup(77824) -> {"Lo","L"}; +lookup(77825) -> {"Lo","L"}; +lookup(77826) -> {"Lo","L"}; +lookup(77827) -> {"Lo","L"}; +lookup(77828) -> {"Lo","L"}; +lookup(77829) -> {"Lo","L"}; +lookup(77830) -> {"Lo","L"}; +lookup(77831) -> {"Lo","L"}; +lookup(77832) -> {"Lo","L"}; +lookup(77833) -> {"Lo","L"}; +lookup(77834) -> {"Lo","L"}; +lookup(77835) -> {"Lo","L"}; +lookup(77836) -> {"Lo","L"}; +lookup(77837) -> {"Lo","L"}; +lookup(77838) -> {"Lo","L"}; +lookup(77839) -> {"Lo","L"}; +lookup(77840) -> {"Lo","L"}; +lookup(77841) -> {"Lo","L"}; +lookup(77842) -> {"Lo","L"}; +lookup(77843) -> {"Lo","L"}; +lookup(77844) -> {"Lo","L"}; +lookup(77845) -> {"Lo","L"}; +lookup(77846) -> {"Lo","L"}; +lookup(77847) -> {"Lo","L"}; +lookup(77848) -> {"Lo","L"}; +lookup(77849) -> {"Lo","L"}; +lookup(77850) -> {"Lo","L"}; +lookup(77851) -> {"Lo","L"}; +lookup(77852) -> {"Lo","L"}; +lookup(77853) -> {"Lo","L"}; +lookup(77854) -> {"Lo","L"}; +lookup(77855) -> {"Lo","L"}; +lookup(77856) -> {"Lo","L"}; +lookup(77857) -> {"Lo","L"}; +lookup(77858) -> {"Lo","L"}; +lookup(77859) -> {"Lo","L"}; +lookup(77860) -> {"Lo","L"}; +lookup(77861) -> {"Lo","L"}; +lookup(77862) -> {"Lo","L"}; +lookup(77863) -> {"Lo","L"}; +lookup(77864) -> {"Lo","L"}; +lookup(77865) -> {"Lo","L"}; +lookup(77866) -> {"Lo","L"}; +lookup(77867) -> {"Lo","L"}; +lookup(77868) -> {"Lo","L"}; +lookup(77869) -> {"Lo","L"}; +lookup(77870) -> {"Lo","L"}; +lookup(77871) -> {"Lo","L"}; +lookup(77872) -> {"Lo","L"}; +lookup(77873) -> {"Lo","L"}; +lookup(77874) -> {"Lo","L"}; +lookup(77875) -> {"Lo","L"}; +lookup(77876) -> {"Lo","L"}; +lookup(77877) -> {"Lo","L"}; +lookup(77878) -> {"Lo","L"}; +lookup(77879) -> {"Lo","L"}; +lookup(77880) -> {"Lo","L"}; +lookup(77881) -> {"Lo","L"}; +lookup(77882) -> {"Lo","L"}; +lookup(77883) -> {"Lo","L"}; +lookup(77884) -> {"Lo","L"}; +lookup(77885) -> {"Lo","L"}; +lookup(77886) -> {"Lo","L"}; +lookup(77887) -> {"Lo","L"}; +lookup(77888) -> {"Lo","L"}; +lookup(77889) -> {"Lo","L"}; +lookup(77890) -> {"Lo","L"}; +lookup(77891) -> {"Lo","L"}; +lookup(77892) -> {"Lo","L"}; +lookup(77893) -> {"Lo","L"}; +lookup(77894) -> {"Lo","L"}; +lookup(77895) -> {"Lo","L"}; +lookup(77896) -> {"Lo","L"}; +lookup(77897) -> {"Lo","L"}; +lookup(77898) -> {"Lo","L"}; +lookup(77899) -> {"Lo","L"}; +lookup(77900) -> {"Lo","L"}; +lookup(77901) -> {"Lo","L"}; +lookup(77902) -> {"Lo","L"}; +lookup(77903) -> {"Lo","L"}; +lookup(77904) -> {"Lo","L"}; +lookup(77905) -> {"Lo","L"}; +lookup(77906) -> {"Lo","L"}; +lookup(77907) -> {"Lo","L"}; +lookup(77908) -> {"Lo","L"}; +lookup(77909) -> {"Lo","L"}; +lookup(77910) -> {"Lo","L"}; +lookup(77911) -> {"Lo","L"}; +lookup(77912) -> {"Lo","L"}; +lookup(77913) -> {"Lo","L"}; +lookup(77914) -> {"Lo","L"}; +lookup(77915) -> {"Lo","L"}; +lookup(77916) -> {"Lo","L"}; +lookup(77917) -> {"Lo","L"}; +lookup(77918) -> {"Lo","L"}; +lookup(77919) -> {"Lo","L"}; +lookup(77920) -> {"Lo","L"}; +lookup(77921) -> {"Lo","L"}; +lookup(77922) -> {"Lo","L"}; +lookup(77923) -> {"Lo","L"}; +lookup(77924) -> {"Lo","L"}; +lookup(77925) -> {"Lo","L"}; +lookup(77926) -> {"Lo","L"}; +lookup(77927) -> {"Lo","L"}; +lookup(77928) -> {"Lo","L"}; +lookup(77929) -> {"Lo","L"}; +lookup(77930) -> {"Lo","L"}; +lookup(77931) -> {"Lo","L"}; +lookup(77932) -> {"Lo","L"}; +lookup(77933) -> {"Lo","L"}; +lookup(77934) -> {"Lo","L"}; +lookup(77935) -> {"Lo","L"}; +lookup(77936) -> {"Lo","L"}; +lookup(77937) -> {"Lo","L"}; +lookup(77938) -> {"Lo","L"}; +lookup(77939) -> {"Lo","L"}; +lookup(77940) -> {"Lo","L"}; +lookup(77941) -> {"Lo","L"}; +lookup(77942) -> {"Lo","L"}; +lookup(77943) -> {"Lo","L"}; +lookup(77944) -> {"Lo","L"}; +lookup(77945) -> {"Lo","L"}; +lookup(77946) -> {"Lo","L"}; +lookup(77947) -> {"Lo","L"}; +lookup(77948) -> {"Lo","L"}; +lookup(77949) -> {"Lo","L"}; +lookup(77950) -> {"Lo","L"}; +lookup(77951) -> {"Lo","L"}; +lookup(77952) -> {"Lo","L"}; +lookup(77953) -> {"Lo","L"}; +lookup(77954) -> {"Lo","L"}; +lookup(77955) -> {"Lo","L"}; +lookup(77956) -> {"Lo","L"}; +lookup(77957) -> {"Lo","L"}; +lookup(77958) -> {"Lo","L"}; +lookup(77959) -> {"Lo","L"}; +lookup(77960) -> {"Lo","L"}; +lookup(77961) -> {"Lo","L"}; +lookup(77962) -> {"Lo","L"}; +lookup(77963) -> {"Lo","L"}; +lookup(77964) -> {"Lo","L"}; +lookup(77965) -> {"Lo","L"}; +lookup(77966) -> {"Lo","L"}; +lookup(77967) -> {"Lo","L"}; +lookup(77968) -> {"Lo","L"}; +lookup(77969) -> {"Lo","L"}; +lookup(77970) -> {"Lo","L"}; +lookup(77971) -> {"Lo","L"}; +lookup(77972) -> {"Lo","L"}; +lookup(77973) -> {"Lo","L"}; +lookup(77974) -> {"Lo","L"}; +lookup(77975) -> {"Lo","L"}; +lookup(77976) -> {"Lo","L"}; +lookup(77977) -> {"Lo","L"}; +lookup(77978) -> {"Lo","L"}; +lookup(77979) -> {"Lo","L"}; +lookup(77980) -> {"Lo","L"}; +lookup(77981) -> {"Lo","L"}; +lookup(77982) -> {"Lo","L"}; +lookup(77983) -> {"Lo","L"}; +lookup(77984) -> {"Lo","L"}; +lookup(77985) -> {"Lo","L"}; +lookup(77986) -> {"Lo","L"}; +lookup(77987) -> {"Lo","L"}; +lookup(77988) -> {"Lo","L"}; +lookup(77989) -> {"Lo","L"}; +lookup(77990) -> {"Lo","L"}; +lookup(77991) -> {"Lo","L"}; +lookup(77992) -> {"Lo","L"}; +lookup(77993) -> {"Lo","L"}; +lookup(77994) -> {"Lo","L"}; +lookup(77995) -> {"Lo","L"}; +lookup(77996) -> {"Lo","L"}; +lookup(77997) -> {"Lo","L"}; +lookup(77998) -> {"Lo","L"}; +lookup(77999) -> {"Lo","L"}; +lookup(78000) -> {"Lo","L"}; +lookup(78001) -> {"Lo","L"}; +lookup(78002) -> {"Lo","L"}; +lookup(78003) -> {"Lo","L"}; +lookup(78004) -> {"Lo","L"}; +lookup(78005) -> {"Lo","L"}; +lookup(78006) -> {"Lo","L"}; +lookup(78007) -> {"Lo","L"}; +lookup(78008) -> {"Lo","L"}; +lookup(78009) -> {"Lo","L"}; +lookup(78010) -> {"Lo","L"}; +lookup(78011) -> {"Lo","L"}; +lookup(78012) -> {"Lo","L"}; +lookup(78013) -> {"Lo","L"}; +lookup(78014) -> {"Lo","L"}; +lookup(78015) -> {"Lo","L"}; +lookup(78016) -> {"Lo","L"}; +lookup(78017) -> {"Lo","L"}; +lookup(78018) -> {"Lo","L"}; +lookup(78019) -> {"Lo","L"}; +lookup(78020) -> {"Lo","L"}; +lookup(78021) -> {"Lo","L"}; +lookup(78022) -> {"Lo","L"}; +lookup(78023) -> {"Lo","L"}; +lookup(78024) -> {"Lo","L"}; +lookup(78025) -> {"Lo","L"}; +lookup(78026) -> {"Lo","L"}; +lookup(78027) -> {"Lo","L"}; +lookup(78028) -> {"Lo","L"}; +lookup(78029) -> {"Lo","L"}; +lookup(78030) -> {"Lo","L"}; +lookup(78031) -> {"Lo","L"}; +lookup(78032) -> {"Lo","L"}; +lookup(78033) -> {"Lo","L"}; +lookup(78034) -> {"Lo","L"}; +lookup(78035) -> {"Lo","L"}; +lookup(78036) -> {"Lo","L"}; +lookup(78037) -> {"Lo","L"}; +lookup(78038) -> {"Lo","L"}; +lookup(78039) -> {"Lo","L"}; +lookup(78040) -> {"Lo","L"}; +lookup(78041) -> {"Lo","L"}; +lookup(78042) -> {"Lo","L"}; +lookup(78043) -> {"Lo","L"}; +lookup(78044) -> {"Lo","L"}; +lookup(78045) -> {"Lo","L"}; +lookup(78046) -> {"Lo","L"}; +lookup(78047) -> {"Lo","L"}; +lookup(78048) -> {"Lo","L"}; +lookup(78049) -> {"Lo","L"}; +lookup(78050) -> {"Lo","L"}; +lookup(78051) -> {"Lo","L"}; +lookup(78052) -> {"Lo","L"}; +lookup(78053) -> {"Lo","L"}; +lookup(78054) -> {"Lo","L"}; +lookup(78055) -> {"Lo","L"}; +lookup(78056) -> {"Lo","L"}; +lookup(78057) -> {"Lo","L"}; +lookup(78058) -> {"Lo","L"}; +lookup(78059) -> {"Lo","L"}; +lookup(78060) -> {"Lo","L"}; +lookup(78061) -> {"Lo","L"}; +lookup(78062) -> {"Lo","L"}; +lookup(78063) -> {"Lo","L"}; +lookup(78064) -> {"Lo","L"}; +lookup(78065) -> {"Lo","L"}; +lookup(78066) -> {"Lo","L"}; +lookup(78067) -> {"Lo","L"}; +lookup(78068) -> {"Lo","L"}; +lookup(78069) -> {"Lo","L"}; +lookup(78070) -> {"Lo","L"}; +lookup(78071) -> {"Lo","L"}; +lookup(78072) -> {"Lo","L"}; +lookup(78073) -> {"Lo","L"}; +lookup(78074) -> {"Lo","L"}; +lookup(78075) -> {"Lo","L"}; +lookup(78076) -> {"Lo","L"}; +lookup(78077) -> {"Lo","L"}; +lookup(78078) -> {"Lo","L"}; +lookup(78079) -> {"Lo","L"}; +lookup(78080) -> {"Lo","L"}; +lookup(78081) -> {"Lo","L"}; +lookup(78082) -> {"Lo","L"}; +lookup(78083) -> {"Lo","L"}; +lookup(78084) -> {"Lo","L"}; +lookup(78085) -> {"Lo","L"}; +lookup(78086) -> {"Lo","L"}; +lookup(78087) -> {"Lo","L"}; +lookup(78088) -> {"Lo","L"}; +lookup(78089) -> {"Lo","L"}; +lookup(78090) -> {"Lo","L"}; +lookup(78091) -> {"Lo","L"}; +lookup(78092) -> {"Lo","L"}; +lookup(78093) -> {"Lo","L"}; +lookup(78094) -> {"Lo","L"}; +lookup(78095) -> {"Lo","L"}; +lookup(78096) -> {"Lo","L"}; +lookup(78097) -> {"Lo","L"}; +lookup(78098) -> {"Lo","L"}; +lookup(78099) -> {"Lo","L"}; +lookup(78100) -> {"Lo","L"}; +lookup(78101) -> {"Lo","L"}; +lookup(78102) -> {"Lo","L"}; +lookup(78103) -> {"Lo","L"}; +lookup(78104) -> {"Lo","L"}; +lookup(78105) -> {"Lo","L"}; +lookup(78106) -> {"Lo","L"}; +lookup(78107) -> {"Lo","L"}; +lookup(78108) -> {"Lo","L"}; +lookup(78109) -> {"Lo","L"}; +lookup(78110) -> {"Lo","L"}; +lookup(78111) -> {"Lo","L"}; +lookup(78112) -> {"Lo","L"}; +lookup(78113) -> {"Lo","L"}; +lookup(78114) -> {"Lo","L"}; +lookup(78115) -> {"Lo","L"}; +lookup(78116) -> {"Lo","L"}; +lookup(78117) -> {"Lo","L"}; +lookup(78118) -> {"Lo","L"}; +lookup(78119) -> {"Lo","L"}; +lookup(78120) -> {"Lo","L"}; +lookup(78121) -> {"Lo","L"}; +lookup(78122) -> {"Lo","L"}; +lookup(78123) -> {"Lo","L"}; +lookup(78124) -> {"Lo","L"}; +lookup(78125) -> {"Lo","L"}; +lookup(78126) -> {"Lo","L"}; +lookup(78127) -> {"Lo","L"}; +lookup(78128) -> {"Lo","L"}; +lookup(78129) -> {"Lo","L"}; +lookup(78130) -> {"Lo","L"}; +lookup(78131) -> {"Lo","L"}; +lookup(78132) -> {"Lo","L"}; +lookup(78133) -> {"Lo","L"}; +lookup(78134) -> {"Lo","L"}; +lookup(78135) -> {"Lo","L"}; +lookup(78136) -> {"Lo","L"}; +lookup(78137) -> {"Lo","L"}; +lookup(78138) -> {"Lo","L"}; +lookup(78139) -> {"Lo","L"}; +lookup(78140) -> {"Lo","L"}; +lookup(78141) -> {"Lo","L"}; +lookup(78142) -> {"Lo","L"}; +lookup(78143) -> {"Lo","L"}; +lookup(78144) -> {"Lo","L"}; +lookup(78145) -> {"Lo","L"}; +lookup(78146) -> {"Lo","L"}; +lookup(78147) -> {"Lo","L"}; +lookup(78148) -> {"Lo","L"}; +lookup(78149) -> {"Lo","L"}; +lookup(78150) -> {"Lo","L"}; +lookup(78151) -> {"Lo","L"}; +lookup(78152) -> {"Lo","L"}; +lookup(78153) -> {"Lo","L"}; +lookup(78154) -> {"Lo","L"}; +lookup(78155) -> {"Lo","L"}; +lookup(78156) -> {"Lo","L"}; +lookup(78157) -> {"Lo","L"}; +lookup(78158) -> {"Lo","L"}; +lookup(78159) -> {"Lo","L"}; +lookup(78160) -> {"Lo","L"}; +lookup(78161) -> {"Lo","L"}; +lookup(78162) -> {"Lo","L"}; +lookup(78163) -> {"Lo","L"}; +lookup(78164) -> {"Lo","L"}; +lookup(78165) -> {"Lo","L"}; +lookup(78166) -> {"Lo","L"}; +lookup(78167) -> {"Lo","L"}; +lookup(78168) -> {"Lo","L"}; +lookup(78169) -> {"Lo","L"}; +lookup(78170) -> {"Lo","L"}; +lookup(78171) -> {"Lo","L"}; +lookup(78172) -> {"Lo","L"}; +lookup(78173) -> {"Lo","L"}; +lookup(78174) -> {"Lo","L"}; +lookup(78175) -> {"Lo","L"}; +lookup(78176) -> {"Lo","L"}; +lookup(78177) -> {"Lo","L"}; +lookup(78178) -> {"Lo","L"}; +lookup(78179) -> {"Lo","L"}; +lookup(78180) -> {"Lo","L"}; +lookup(78181) -> {"Lo","L"}; +lookup(78182) -> {"Lo","L"}; +lookup(78183) -> {"Lo","L"}; +lookup(78184) -> {"Lo","L"}; +lookup(78185) -> {"Lo","L"}; +lookup(78186) -> {"Lo","L"}; +lookup(78187) -> {"Lo","L"}; +lookup(78188) -> {"Lo","L"}; +lookup(78189) -> {"Lo","L"}; +lookup(78190) -> {"Lo","L"}; +lookup(78191) -> {"Lo","L"}; +lookup(78192) -> {"Lo","L"}; +lookup(78193) -> {"Lo","L"}; +lookup(78194) -> {"Lo","L"}; +lookup(78195) -> {"Lo","L"}; +lookup(78196) -> {"Lo","L"}; +lookup(78197) -> {"Lo","L"}; +lookup(78198) -> {"Lo","L"}; +lookup(78199) -> {"Lo","L"}; +lookup(78200) -> {"Lo","L"}; +lookup(78201) -> {"Lo","L"}; +lookup(78202) -> {"Lo","L"}; +lookup(78203) -> {"Lo","L"}; +lookup(78204) -> {"Lo","L"}; +lookup(78205) -> {"Lo","L"}; +lookup(78206) -> {"Lo","L"}; +lookup(78207) -> {"Lo","L"}; +lookup(78208) -> {"Lo","L"}; +lookup(78209) -> {"Lo","L"}; +lookup(78210) -> {"Lo","L"}; +lookup(78211) -> {"Lo","L"}; +lookup(78212) -> {"Lo","L"}; +lookup(78213) -> {"Lo","L"}; +lookup(78214) -> {"Lo","L"}; +lookup(78215) -> {"Lo","L"}; +lookup(78216) -> {"Lo","L"}; +lookup(78217) -> {"Lo","L"}; +lookup(78218) -> {"Lo","L"}; +lookup(78219) -> {"Lo","L"}; +lookup(78220) -> {"Lo","L"}; +lookup(78221) -> {"Lo","L"}; +lookup(78222) -> {"Lo","L"}; +lookup(78223) -> {"Lo","L"}; +lookup(78224) -> {"Lo","L"}; +lookup(78225) -> {"Lo","L"}; +lookup(78226) -> {"Lo","L"}; +lookup(78227) -> {"Lo","L"}; +lookup(78228) -> {"Lo","L"}; +lookup(78229) -> {"Lo","L"}; +lookup(78230) -> {"Lo","L"}; +lookup(78231) -> {"Lo","L"}; +lookup(78232) -> {"Lo","L"}; +lookup(78233) -> {"Lo","L"}; +lookup(78234) -> {"Lo","L"}; +lookup(78235) -> {"Lo","L"}; +lookup(78236) -> {"Lo","L"}; +lookup(78237) -> {"Lo","L"}; +lookup(78238) -> {"Lo","L"}; +lookup(78239) -> {"Lo","L"}; +lookup(78240) -> {"Lo","L"}; +lookup(78241) -> {"Lo","L"}; +lookup(78242) -> {"Lo","L"}; +lookup(78243) -> {"Lo","L"}; +lookup(78244) -> {"Lo","L"}; +lookup(78245) -> {"Lo","L"}; +lookup(78246) -> {"Lo","L"}; +lookup(78247) -> {"Lo","L"}; +lookup(78248) -> {"Lo","L"}; +lookup(78249) -> {"Lo","L"}; +lookup(78250) -> {"Lo","L"}; +lookup(78251) -> {"Lo","L"}; +lookup(78252) -> {"Lo","L"}; +lookup(78253) -> {"Lo","L"}; +lookup(78254) -> {"Lo","L"}; +lookup(78255) -> {"Lo","L"}; +lookup(78256) -> {"Lo","L"}; +lookup(78257) -> {"Lo","L"}; +lookup(78258) -> {"Lo","L"}; +lookup(78259) -> {"Lo","L"}; +lookup(78260) -> {"Lo","L"}; +lookup(78261) -> {"Lo","L"}; +lookup(78262) -> {"Lo","L"}; +lookup(78263) -> {"Lo","L"}; +lookup(78264) -> {"Lo","L"}; +lookup(78265) -> {"Lo","L"}; +lookup(78266) -> {"Lo","L"}; +lookup(78267) -> {"Lo","L"}; +lookup(78268) -> {"Lo","L"}; +lookup(78269) -> {"Lo","L"}; +lookup(78270) -> {"Lo","L"}; +lookup(78271) -> {"Lo","L"}; +lookup(78272) -> {"Lo","L"}; +lookup(78273) -> {"Lo","L"}; +lookup(78274) -> {"Lo","L"}; +lookup(78275) -> {"Lo","L"}; +lookup(78276) -> {"Lo","L"}; +lookup(78277) -> {"Lo","L"}; +lookup(78278) -> {"Lo","L"}; +lookup(78279) -> {"Lo","L"}; +lookup(78280) -> {"Lo","L"}; +lookup(78281) -> {"Lo","L"}; +lookup(78282) -> {"Lo","L"}; +lookup(78283) -> {"Lo","L"}; +lookup(78284) -> {"Lo","L"}; +lookup(78285) -> {"Lo","L"}; +lookup(78286) -> {"Lo","L"}; +lookup(78287) -> {"Lo","L"}; +lookup(78288) -> {"Lo","L"}; +lookup(78289) -> {"Lo","L"}; +lookup(78290) -> {"Lo","L"}; +lookup(78291) -> {"Lo","L"}; +lookup(78292) -> {"Lo","L"}; +lookup(78293) -> {"Lo","L"}; +lookup(78294) -> {"Lo","L"}; +lookup(78295) -> {"Lo","L"}; +lookup(78296) -> {"Lo","L"}; +lookup(78297) -> {"Lo","L"}; +lookup(78298) -> {"Lo","L"}; +lookup(78299) -> {"Lo","L"}; +lookup(78300) -> {"Lo","L"}; +lookup(78301) -> {"Lo","L"}; +lookup(78302) -> {"Lo","L"}; +lookup(78303) -> {"Lo","L"}; +lookup(78304) -> {"Lo","L"}; +lookup(78305) -> {"Lo","L"}; +lookup(78306) -> {"Lo","L"}; +lookup(78307) -> {"Lo","L"}; +lookup(78308) -> {"Lo","L"}; +lookup(78309) -> {"Lo","L"}; +lookup(78310) -> {"Lo","L"}; +lookup(78311) -> {"Lo","L"}; +lookup(78312) -> {"Lo","L"}; +lookup(78313) -> {"Lo","L"}; +lookup(78314) -> {"Lo","L"}; +lookup(78315) -> {"Lo","L"}; +lookup(78316) -> {"Lo","L"}; +lookup(78317) -> {"Lo","L"}; +lookup(78318) -> {"Lo","L"}; +lookup(78319) -> {"Lo","L"}; +lookup(78320) -> {"Lo","L"}; +lookup(78321) -> {"Lo","L"}; +lookup(78322) -> {"Lo","L"}; +lookup(78323) -> {"Lo","L"}; +lookup(78324) -> {"Lo","L"}; +lookup(78325) -> {"Lo","L"}; +lookup(78326) -> {"Lo","L"}; +lookup(78327) -> {"Lo","L"}; +lookup(78328) -> {"Lo","L"}; +lookup(78329) -> {"Lo","L"}; +lookup(78330) -> {"Lo","L"}; +lookup(78331) -> {"Lo","L"}; +lookup(78332) -> {"Lo","L"}; +lookup(78333) -> {"Lo","L"}; +lookup(78334) -> {"Lo","L"}; +lookup(78335) -> {"Lo","L"}; +lookup(78336) -> {"Lo","L"}; +lookup(78337) -> {"Lo","L"}; +lookup(78338) -> {"Lo","L"}; +lookup(78339) -> {"Lo","L"}; +lookup(78340) -> {"Lo","L"}; +lookup(78341) -> {"Lo","L"}; +lookup(78342) -> {"Lo","L"}; +lookup(78343) -> {"Lo","L"}; +lookup(78344) -> {"Lo","L"}; +lookup(78345) -> {"Lo","L"}; +lookup(78346) -> {"Lo","L"}; +lookup(78347) -> {"Lo","L"}; +lookup(78348) -> {"Lo","L"}; +lookup(78349) -> {"Lo","L"}; +lookup(78350) -> {"Lo","L"}; +lookup(78351) -> {"Lo","L"}; +lookup(78352) -> {"Lo","L"}; +lookup(78353) -> {"Lo","L"}; +lookup(78354) -> {"Lo","L"}; +lookup(78355) -> {"Lo","L"}; +lookup(78356) -> {"Lo","L"}; +lookup(78357) -> {"Lo","L"}; +lookup(78358) -> {"Lo","L"}; +lookup(78359) -> {"Lo","L"}; +lookup(78360) -> {"Lo","L"}; +lookup(78361) -> {"Lo","L"}; +lookup(78362) -> {"Lo","L"}; +lookup(78363) -> {"Lo","L"}; +lookup(78364) -> {"Lo","L"}; +lookup(78365) -> {"Lo","L"}; +lookup(78366) -> {"Lo","L"}; +lookup(78367) -> {"Lo","L"}; +lookup(78368) -> {"Lo","L"}; +lookup(78369) -> {"Lo","L"}; +lookup(78370) -> {"Lo","L"}; +lookup(78371) -> {"Lo","L"}; +lookup(78372) -> {"Lo","L"}; +lookup(78373) -> {"Lo","L"}; +lookup(78374) -> {"Lo","L"}; +lookup(78375) -> {"Lo","L"}; +lookup(78376) -> {"Lo","L"}; +lookup(78377) -> {"Lo","L"}; +lookup(78378) -> {"Lo","L"}; +lookup(78379) -> {"Lo","L"}; +lookup(78380) -> {"Lo","L"}; +lookup(78381) -> {"Lo","L"}; +lookup(78382) -> {"Lo","L"}; +lookup(78383) -> {"Lo","L"}; +lookup(78384) -> {"Lo","L"}; +lookup(78385) -> {"Lo","L"}; +lookup(78386) -> {"Lo","L"}; +lookup(78387) -> {"Lo","L"}; +lookup(78388) -> {"Lo","L"}; +lookup(78389) -> {"Lo","L"}; +lookup(78390) -> {"Lo","L"}; +lookup(78391) -> {"Lo","L"}; +lookup(78392) -> {"Lo","L"}; +lookup(78393) -> {"Lo","L"}; +lookup(78394) -> {"Lo","L"}; +lookup(78395) -> {"Lo","L"}; +lookup(78396) -> {"Lo","L"}; +lookup(78397) -> {"Lo","L"}; +lookup(78398) -> {"Lo","L"}; +lookup(78399) -> {"Lo","L"}; +lookup(78400) -> {"Lo","L"}; +lookup(78401) -> {"Lo","L"}; +lookup(78402) -> {"Lo","L"}; +lookup(78403) -> {"Lo","L"}; +lookup(78404) -> {"Lo","L"}; +lookup(78405) -> {"Lo","L"}; +lookup(78406) -> {"Lo","L"}; +lookup(78407) -> {"Lo","L"}; +lookup(78408) -> {"Lo","L"}; +lookup(78409) -> {"Lo","L"}; +lookup(78410) -> {"Lo","L"}; +lookup(78411) -> {"Lo","L"}; +lookup(78412) -> {"Lo","L"}; +lookup(78413) -> {"Lo","L"}; +lookup(78414) -> {"Lo","L"}; +lookup(78415) -> {"Lo","L"}; +lookup(78416) -> {"Lo","L"}; +lookup(78417) -> {"Lo","L"}; +lookup(78418) -> {"Lo","L"}; +lookup(78419) -> {"Lo","L"}; +lookup(78420) -> {"Lo","L"}; +lookup(78421) -> {"Lo","L"}; +lookup(78422) -> {"Lo","L"}; +lookup(78423) -> {"Lo","L"}; +lookup(78424) -> {"Lo","L"}; +lookup(78425) -> {"Lo","L"}; +lookup(78426) -> {"Lo","L"}; +lookup(78427) -> {"Lo","L"}; +lookup(78428) -> {"Lo","L"}; +lookup(78429) -> {"Lo","L"}; +lookup(78430) -> {"Lo","L"}; +lookup(78431) -> {"Lo","L"}; +lookup(78432) -> {"Lo","L"}; +lookup(78433) -> {"Lo","L"}; +lookup(78434) -> {"Lo","L"}; +lookup(78435) -> {"Lo","L"}; +lookup(78436) -> {"Lo","L"}; +lookup(78437) -> {"Lo","L"}; +lookup(78438) -> {"Lo","L"}; +lookup(78439) -> {"Lo","L"}; +lookup(78440) -> {"Lo","L"}; +lookup(78441) -> {"Lo","L"}; +lookup(78442) -> {"Lo","L"}; +lookup(78443) -> {"Lo","L"}; +lookup(78444) -> {"Lo","L"}; +lookup(78445) -> {"Lo","L"}; +lookup(78446) -> {"Lo","L"}; +lookup(78447) -> {"Lo","L"}; +lookup(78448) -> {"Lo","L"}; +lookup(78449) -> {"Lo","L"}; +lookup(78450) -> {"Lo","L"}; +lookup(78451) -> {"Lo","L"}; +lookup(78452) -> {"Lo","L"}; +lookup(78453) -> {"Lo","L"}; +lookup(78454) -> {"Lo","L"}; +lookup(78455) -> {"Lo","L"}; +lookup(78456) -> {"Lo","L"}; +lookup(78457) -> {"Lo","L"}; +lookup(78458) -> {"Lo","L"}; +lookup(78459) -> {"Lo","L"}; +lookup(78460) -> {"Lo","L"}; +lookup(78461) -> {"Lo","L"}; +lookup(78462) -> {"Lo","L"}; +lookup(78463) -> {"Lo","L"}; +lookup(78464) -> {"Lo","L"}; +lookup(78465) -> {"Lo","L"}; +lookup(78466) -> {"Lo","L"}; +lookup(78467) -> {"Lo","L"}; +lookup(78468) -> {"Lo","L"}; +lookup(78469) -> {"Lo","L"}; +lookup(78470) -> {"Lo","L"}; +lookup(78471) -> {"Lo","L"}; +lookup(78472) -> {"Lo","L"}; +lookup(78473) -> {"Lo","L"}; +lookup(78474) -> {"Lo","L"}; +lookup(78475) -> {"Lo","L"}; +lookup(78476) -> {"Lo","L"}; +lookup(78477) -> {"Lo","L"}; +lookup(78478) -> {"Lo","L"}; +lookup(78479) -> {"Lo","L"}; +lookup(78480) -> {"Lo","L"}; +lookup(78481) -> {"Lo","L"}; +lookup(78482) -> {"Lo","L"}; +lookup(78483) -> {"Lo","L"}; +lookup(78484) -> {"Lo","L"}; +lookup(78485) -> {"Lo","L"}; +lookup(78486) -> {"Lo","L"}; +lookup(78487) -> {"Lo","L"}; +lookup(78488) -> {"Lo","L"}; +lookup(78489) -> {"Lo","L"}; +lookup(78490) -> {"Lo","L"}; +lookup(78491) -> {"Lo","L"}; +lookup(78492) -> {"Lo","L"}; +lookup(78493) -> {"Lo","L"}; +lookup(78494) -> {"Lo","L"}; +lookup(78495) -> {"Lo","L"}; +lookup(78496) -> {"Lo","L"}; +lookup(78497) -> {"Lo","L"}; +lookup(78498) -> {"Lo","L"}; +lookup(78499) -> {"Lo","L"}; +lookup(78500) -> {"Lo","L"}; +lookup(78501) -> {"Lo","L"}; +lookup(78502) -> {"Lo","L"}; +lookup(78503) -> {"Lo","L"}; +lookup(78504) -> {"Lo","L"}; +lookup(78505) -> {"Lo","L"}; +lookup(78506) -> {"Lo","L"}; +lookup(78507) -> {"Lo","L"}; +lookup(78508) -> {"Lo","L"}; +lookup(78509) -> {"Lo","L"}; +lookup(78510) -> {"Lo","L"}; +lookup(78511) -> {"Lo","L"}; +lookup(78512) -> {"Lo","L"}; +lookup(78513) -> {"Lo","L"}; +lookup(78514) -> {"Lo","L"}; +lookup(78515) -> {"Lo","L"}; +lookup(78516) -> {"Lo","L"}; +lookup(78517) -> {"Lo","L"}; +lookup(78518) -> {"Lo","L"}; +lookup(78519) -> {"Lo","L"}; +lookup(78520) -> {"Lo","L"}; +lookup(78521) -> {"Lo","L"}; +lookup(78522) -> {"Lo","L"}; +lookup(78523) -> {"Lo","L"}; +lookup(78524) -> {"Lo","L"}; +lookup(78525) -> {"Lo","L"}; +lookup(78526) -> {"Lo","L"}; +lookup(78527) -> {"Lo","L"}; +lookup(78528) -> {"Lo","L"}; +lookup(78529) -> {"Lo","L"}; +lookup(78530) -> {"Lo","L"}; +lookup(78531) -> {"Lo","L"}; +lookup(78532) -> {"Lo","L"}; +lookup(78533) -> {"Lo","L"}; +lookup(78534) -> {"Lo","L"}; +lookup(78535) -> {"Lo","L"}; +lookup(78536) -> {"Lo","L"}; +lookup(78537) -> {"Lo","L"}; +lookup(78538) -> {"Lo","L"}; +lookup(78539) -> {"Lo","L"}; +lookup(78540) -> {"Lo","L"}; +lookup(78541) -> {"Lo","L"}; +lookup(78542) -> {"Lo","L"}; +lookup(78543) -> {"Lo","L"}; +lookup(78544) -> {"Lo","L"}; +lookup(78545) -> {"Lo","L"}; +lookup(78546) -> {"Lo","L"}; +lookup(78547) -> {"Lo","L"}; +lookup(78548) -> {"Lo","L"}; +lookup(78549) -> {"Lo","L"}; +lookup(78550) -> {"Lo","L"}; +lookup(78551) -> {"Lo","L"}; +lookup(78552) -> {"Lo","L"}; +lookup(78553) -> {"Lo","L"}; +lookup(78554) -> {"Lo","L"}; +lookup(78555) -> {"Lo","L"}; +lookup(78556) -> {"Lo","L"}; +lookup(78557) -> {"Lo","L"}; +lookup(78558) -> {"Lo","L"}; +lookup(78559) -> {"Lo","L"}; +lookup(78560) -> {"Lo","L"}; +lookup(78561) -> {"Lo","L"}; +lookup(78562) -> {"Lo","L"}; +lookup(78563) -> {"Lo","L"}; +lookup(78564) -> {"Lo","L"}; +lookup(78565) -> {"Lo","L"}; +lookup(78566) -> {"Lo","L"}; +lookup(78567) -> {"Lo","L"}; +lookup(78568) -> {"Lo","L"}; +lookup(78569) -> {"Lo","L"}; +lookup(78570) -> {"Lo","L"}; +lookup(78571) -> {"Lo","L"}; +lookup(78572) -> {"Lo","L"}; +lookup(78573) -> {"Lo","L"}; +lookup(78574) -> {"Lo","L"}; +lookup(78575) -> {"Lo","L"}; +lookup(78576) -> {"Lo","L"}; +lookup(78577) -> {"Lo","L"}; +lookup(78578) -> {"Lo","L"}; +lookup(78579) -> {"Lo","L"}; +lookup(78580) -> {"Lo","L"}; +lookup(78581) -> {"Lo","L"}; +lookup(78582) -> {"Lo","L"}; +lookup(78583) -> {"Lo","L"}; +lookup(78584) -> {"Lo","L"}; +lookup(78585) -> {"Lo","L"}; +lookup(78586) -> {"Lo","L"}; +lookup(78587) -> {"Lo","L"}; +lookup(78588) -> {"Lo","L"}; +lookup(78589) -> {"Lo","L"}; +lookup(78590) -> {"Lo","L"}; +lookup(78591) -> {"Lo","L"}; +lookup(78592) -> {"Lo","L"}; +lookup(78593) -> {"Lo","L"}; +lookup(78594) -> {"Lo","L"}; +lookup(78595) -> {"Lo","L"}; +lookup(78596) -> {"Lo","L"}; +lookup(78597) -> {"Lo","L"}; +lookup(78598) -> {"Lo","L"}; +lookup(78599) -> {"Lo","L"}; +lookup(78600) -> {"Lo","L"}; +lookup(78601) -> {"Lo","L"}; +lookup(78602) -> {"Lo","L"}; +lookup(78603) -> {"Lo","L"}; +lookup(78604) -> {"Lo","L"}; +lookup(78605) -> {"Lo","L"}; +lookup(78606) -> {"Lo","L"}; +lookup(78607) -> {"Lo","L"}; +lookup(78608) -> {"Lo","L"}; +lookup(78609) -> {"Lo","L"}; +lookup(78610) -> {"Lo","L"}; +lookup(78611) -> {"Lo","L"}; +lookup(78612) -> {"Lo","L"}; +lookup(78613) -> {"Lo","L"}; +lookup(78614) -> {"Lo","L"}; +lookup(78615) -> {"Lo","L"}; +lookup(78616) -> {"Lo","L"}; +lookup(78617) -> {"Lo","L"}; +lookup(78618) -> {"Lo","L"}; +lookup(78619) -> {"Lo","L"}; +lookup(78620) -> {"Lo","L"}; +lookup(78621) -> {"Lo","L"}; +lookup(78622) -> {"Lo","L"}; +lookup(78623) -> {"Lo","L"}; +lookup(78624) -> {"Lo","L"}; +lookup(78625) -> {"Lo","L"}; +lookup(78626) -> {"Lo","L"}; +lookup(78627) -> {"Lo","L"}; +lookup(78628) -> {"Lo","L"}; +lookup(78629) -> {"Lo","L"}; +lookup(78630) -> {"Lo","L"}; +lookup(78631) -> {"Lo","L"}; +lookup(78632) -> {"Lo","L"}; +lookup(78633) -> {"Lo","L"}; +lookup(78634) -> {"Lo","L"}; +lookup(78635) -> {"Lo","L"}; +lookup(78636) -> {"Lo","L"}; +lookup(78637) -> {"Lo","L"}; +lookup(78638) -> {"Lo","L"}; +lookup(78639) -> {"Lo","L"}; +lookup(78640) -> {"Lo","L"}; +lookup(78641) -> {"Lo","L"}; +lookup(78642) -> {"Lo","L"}; +lookup(78643) -> {"Lo","L"}; +lookup(78644) -> {"Lo","L"}; +lookup(78645) -> {"Lo","L"}; +lookup(78646) -> {"Lo","L"}; +lookup(78647) -> {"Lo","L"}; +lookup(78648) -> {"Lo","L"}; +lookup(78649) -> {"Lo","L"}; +lookup(78650) -> {"Lo","L"}; +lookup(78651) -> {"Lo","L"}; +lookup(78652) -> {"Lo","L"}; +lookup(78653) -> {"Lo","L"}; +lookup(78654) -> {"Lo","L"}; +lookup(78655) -> {"Lo","L"}; +lookup(78656) -> {"Lo","L"}; +lookup(78657) -> {"Lo","L"}; +lookup(78658) -> {"Lo","L"}; +lookup(78659) -> {"Lo","L"}; +lookup(78660) -> {"Lo","L"}; +lookup(78661) -> {"Lo","L"}; +lookup(78662) -> {"Lo","L"}; +lookup(78663) -> {"Lo","L"}; +lookup(78664) -> {"Lo","L"}; +lookup(78665) -> {"Lo","L"}; +lookup(78666) -> {"Lo","L"}; +lookup(78667) -> {"Lo","L"}; +lookup(78668) -> {"Lo","L"}; +lookup(78669) -> {"Lo","L"}; +lookup(78670) -> {"Lo","L"}; +lookup(78671) -> {"Lo","L"}; +lookup(78672) -> {"Lo","L"}; +lookup(78673) -> {"Lo","L"}; +lookup(78674) -> {"Lo","L"}; +lookup(78675) -> {"Lo","L"}; +lookup(78676) -> {"Lo","L"}; +lookup(78677) -> {"Lo","L"}; +lookup(78678) -> {"Lo","L"}; +lookup(78679) -> {"Lo","L"}; +lookup(78680) -> {"Lo","L"}; +lookup(78681) -> {"Lo","L"}; +lookup(78682) -> {"Lo","L"}; +lookup(78683) -> {"Lo","L"}; +lookup(78684) -> {"Lo","L"}; +lookup(78685) -> {"Lo","L"}; +lookup(78686) -> {"Lo","L"}; +lookup(78687) -> {"Lo","L"}; +lookup(78688) -> {"Lo","L"}; +lookup(78689) -> {"Lo","L"}; +lookup(78690) -> {"Lo","L"}; +lookup(78691) -> {"Lo","L"}; +lookup(78692) -> {"Lo","L"}; +lookup(78693) -> {"Lo","L"}; +lookup(78694) -> {"Lo","L"}; +lookup(78695) -> {"Lo","L"}; +lookup(78696) -> {"Lo","L"}; +lookup(78697) -> {"Lo","L"}; +lookup(78698) -> {"Lo","L"}; +lookup(78699) -> {"Lo","L"}; +lookup(78700) -> {"Lo","L"}; +lookup(78701) -> {"Lo","L"}; +lookup(78702) -> {"Lo","L"}; +lookup(78703) -> {"Lo","L"}; +lookup(78704) -> {"Lo","L"}; +lookup(78705) -> {"Lo","L"}; +lookup(78706) -> {"Lo","L"}; +lookup(78707) -> {"Lo","L"}; +lookup(78708) -> {"Lo","L"}; +lookup(78709) -> {"Lo","L"}; +lookup(78710) -> {"Lo","L"}; +lookup(78711) -> {"Lo","L"}; +lookup(78712) -> {"Lo","L"}; +lookup(78713) -> {"Lo","L"}; +lookup(78714) -> {"Lo","L"}; +lookup(78715) -> {"Lo","L"}; +lookup(78716) -> {"Lo","L"}; +lookup(78717) -> {"Lo","L"}; +lookup(78718) -> {"Lo","L"}; +lookup(78719) -> {"Lo","L"}; +lookup(78720) -> {"Lo","L"}; +lookup(78721) -> {"Lo","L"}; +lookup(78722) -> {"Lo","L"}; +lookup(78723) -> {"Lo","L"}; +lookup(78724) -> {"Lo","L"}; +lookup(78725) -> {"Lo","L"}; +lookup(78726) -> {"Lo","L"}; +lookup(78727) -> {"Lo","L"}; +lookup(78728) -> {"Lo","L"}; +lookup(78729) -> {"Lo","L"}; +lookup(78730) -> {"Lo","L"}; +lookup(78731) -> {"Lo","L"}; +lookup(78732) -> {"Lo","L"}; +lookup(78733) -> {"Lo","L"}; +lookup(78734) -> {"Lo","L"}; +lookup(78735) -> {"Lo","L"}; +lookup(78736) -> {"Lo","L"}; +lookup(78737) -> {"Lo","L"}; +lookup(78738) -> {"Lo","L"}; +lookup(78739) -> {"Lo","L"}; +lookup(78740) -> {"Lo","L"}; +lookup(78741) -> {"Lo","L"}; +lookup(78742) -> {"Lo","L"}; +lookup(78743) -> {"Lo","L"}; +lookup(78744) -> {"Lo","L"}; +lookup(78745) -> {"Lo","L"}; +lookup(78746) -> {"Lo","L"}; +lookup(78747) -> {"Lo","L"}; +lookup(78748) -> {"Lo","L"}; +lookup(78749) -> {"Lo","L"}; +lookup(78750) -> {"Lo","L"}; +lookup(78751) -> {"Lo","L"}; +lookup(78752) -> {"Lo","L"}; +lookup(78753) -> {"Lo","L"}; +lookup(78754) -> {"Lo","L"}; +lookup(78755) -> {"Lo","L"}; +lookup(78756) -> {"Lo","L"}; +lookup(78757) -> {"Lo","L"}; +lookup(78758) -> {"Lo","L"}; +lookup(78759) -> {"Lo","L"}; +lookup(78760) -> {"Lo","L"}; +lookup(78761) -> {"Lo","L"}; +lookup(78762) -> {"Lo","L"}; +lookup(78763) -> {"Lo","L"}; +lookup(78764) -> {"Lo","L"}; +lookup(78765) -> {"Lo","L"}; +lookup(78766) -> {"Lo","L"}; +lookup(78767) -> {"Lo","L"}; +lookup(78768) -> {"Lo","L"}; +lookup(78769) -> {"Lo","L"}; +lookup(78770) -> {"Lo","L"}; +lookup(78771) -> {"Lo","L"}; +lookup(78772) -> {"Lo","L"}; +lookup(78773) -> {"Lo","L"}; +lookup(78774) -> {"Lo","L"}; +lookup(78775) -> {"Lo","L"}; +lookup(78776) -> {"Lo","L"}; +lookup(78777) -> {"Lo","L"}; +lookup(78778) -> {"Lo","L"}; +lookup(78779) -> {"Lo","L"}; +lookup(78780) -> {"Lo","L"}; +lookup(78781) -> {"Lo","L"}; +lookup(78782) -> {"Lo","L"}; +lookup(78783) -> {"Lo","L"}; +lookup(78784) -> {"Lo","L"}; +lookup(78785) -> {"Lo","L"}; +lookup(78786) -> {"Lo","L"}; +lookup(78787) -> {"Lo","L"}; +lookup(78788) -> {"Lo","L"}; +lookup(78789) -> {"Lo","L"}; +lookup(78790) -> {"Lo","L"}; +lookup(78791) -> {"Lo","L"}; +lookup(78792) -> {"Lo","L"}; +lookup(78793) -> {"Lo","L"}; +lookup(78794) -> {"Lo","L"}; +lookup(78795) -> {"Lo","L"}; +lookup(78796) -> {"Lo","L"}; +lookup(78797) -> {"Lo","L"}; +lookup(78798) -> {"Lo","L"}; +lookup(78799) -> {"Lo","L"}; +lookup(78800) -> {"Lo","L"}; +lookup(78801) -> {"Lo","L"}; +lookup(78802) -> {"Lo","L"}; +lookup(78803) -> {"Lo","L"}; +lookup(78804) -> {"Lo","L"}; +lookup(78805) -> {"Lo","L"}; +lookup(78806) -> {"Lo","L"}; +lookup(78807) -> {"Lo","L"}; +lookup(78808) -> {"Lo","L"}; +lookup(78809) -> {"Lo","L"}; +lookup(78810) -> {"Lo","L"}; +lookup(78811) -> {"Lo","L"}; +lookup(78812) -> {"Lo","L"}; +lookup(78813) -> {"Lo","L"}; +lookup(78814) -> {"Lo","L"}; +lookup(78815) -> {"Lo","L"}; +lookup(78816) -> {"Lo","L"}; +lookup(78817) -> {"Lo","L"}; +lookup(78818) -> {"Lo","L"}; +lookup(78819) -> {"Lo","L"}; +lookup(78820) -> {"Lo","L"}; +lookup(78821) -> {"Lo","L"}; +lookup(78822) -> {"Lo","L"}; +lookup(78823) -> {"Lo","L"}; +lookup(78824) -> {"Lo","L"}; +lookup(78825) -> {"Lo","L"}; +lookup(78826) -> {"Lo","L"}; +lookup(78827) -> {"Lo","L"}; +lookup(78828) -> {"Lo","L"}; +lookup(78829) -> {"Lo","L"}; +lookup(78830) -> {"Lo","L"}; +lookup(78831) -> {"Lo","L"}; +lookup(78832) -> {"Lo","L"}; +lookup(78833) -> {"Lo","L"}; +lookup(78834) -> {"Lo","L"}; +lookup(78835) -> {"Lo","L"}; +lookup(78836) -> {"Lo","L"}; +lookup(78837) -> {"Lo","L"}; +lookup(78838) -> {"Lo","L"}; +lookup(78839) -> {"Lo","L"}; +lookup(78840) -> {"Lo","L"}; +lookup(78841) -> {"Lo","L"}; +lookup(78842) -> {"Lo","L"}; +lookup(78843) -> {"Lo","L"}; +lookup(78844) -> {"Lo","L"}; +lookup(78845) -> {"Lo","L"}; +lookup(78846) -> {"Lo","L"}; +lookup(78847) -> {"Lo","L"}; +lookup(78848) -> {"Lo","L"}; +lookup(78849) -> {"Lo","L"}; +lookup(78850) -> {"Lo","L"}; +lookup(78851) -> {"Lo","L"}; +lookup(78852) -> {"Lo","L"}; +lookup(78853) -> {"Lo","L"}; +lookup(78854) -> {"Lo","L"}; +lookup(78855) -> {"Lo","L"}; +lookup(78856) -> {"Lo","L"}; +lookup(78857) -> {"Lo","L"}; +lookup(78858) -> {"Lo","L"}; +lookup(78859) -> {"Lo","L"}; +lookup(78860) -> {"Lo","L"}; +lookup(78861) -> {"Lo","L"}; +lookup(78862) -> {"Lo","L"}; +lookup(78863) -> {"Lo","L"}; +lookup(78864) -> {"Lo","L"}; +lookup(78865) -> {"Lo","L"}; +lookup(78866) -> {"Lo","L"}; +lookup(78867) -> {"Lo","L"}; +lookup(78868) -> {"Lo","L"}; +lookup(78869) -> {"Lo","L"}; +lookup(78870) -> {"Lo","L"}; +lookup(78871) -> {"Lo","L"}; +lookup(78872) -> {"Lo","L"}; +lookup(78873) -> {"Lo","L"}; +lookup(78874) -> {"Lo","L"}; +lookup(78875) -> {"Lo","L"}; +lookup(78876) -> {"Lo","L"}; +lookup(78877) -> {"Lo","L"}; +lookup(78878) -> {"Lo","L"}; +lookup(78879) -> {"Lo","L"}; +lookup(78880) -> {"Lo","L"}; +lookup(78881) -> {"Lo","L"}; +lookup(78882) -> {"Lo","L"}; +lookup(78883) -> {"Lo","L"}; +lookup(78884) -> {"Lo","L"}; +lookup(78885) -> {"Lo","L"}; +lookup(78886) -> {"Lo","L"}; +lookup(78887) -> {"Lo","L"}; +lookup(78888) -> {"Lo","L"}; +lookup(78889) -> {"Lo","L"}; +lookup(78890) -> {"Lo","L"}; +lookup(78891) -> {"Lo","L"}; +lookup(78892) -> {"Lo","L"}; +lookup(78893) -> {"Lo","L"}; +lookup(78894) -> {"Lo","L"}; +lookup(78896) -> {"Cf","L"}; +lookup(78897) -> {"Cf","L"}; +lookup(78898) -> {"Cf","L"}; +lookup(78899) -> {"Cf","L"}; +lookup(78900) -> {"Cf","L"}; +lookup(78901) -> {"Cf","L"}; +lookup(78902) -> {"Cf","L"}; +lookup(78903) -> {"Cf","L"}; +lookup(78904) -> {"Cf","L"}; +lookup(82944) -> {"Lo","L"}; +lookup(82945) -> {"Lo","L"}; +lookup(82946) -> {"Lo","L"}; +lookup(82947) -> {"Lo","L"}; +lookup(82948) -> {"Lo","L"}; +lookup(82949) -> {"Lo","L"}; +lookup(82950) -> {"Lo","L"}; +lookup(82951) -> {"Lo","L"}; +lookup(82952) -> {"Lo","L"}; +lookup(82953) -> {"Lo","L"}; +lookup(82954) -> {"Lo","L"}; +lookup(82955) -> {"Lo","L"}; +lookup(82956) -> {"Lo","L"}; +lookup(82957) -> {"Lo","L"}; +lookup(82958) -> {"Lo","L"}; +lookup(82959) -> {"Lo","L"}; +lookup(82960) -> {"Lo","L"}; +lookup(82961) -> {"Lo","L"}; +lookup(82962) -> {"Lo","L"}; +lookup(82963) -> {"Lo","L"}; +lookup(82964) -> {"Lo","L"}; +lookup(82965) -> {"Lo","L"}; +lookup(82966) -> {"Lo","L"}; +lookup(82967) -> {"Lo","L"}; +lookup(82968) -> {"Lo","L"}; +lookup(82969) -> {"Lo","L"}; +lookup(82970) -> {"Lo","L"}; +lookup(82971) -> {"Lo","L"}; +lookup(82972) -> {"Lo","L"}; +lookup(82973) -> {"Lo","L"}; +lookup(82974) -> {"Lo","L"}; +lookup(82975) -> {"Lo","L"}; +lookup(82976) -> {"Lo","L"}; +lookup(82977) -> {"Lo","L"}; +lookup(82978) -> {"Lo","L"}; +lookup(82979) -> {"Lo","L"}; +lookup(82980) -> {"Lo","L"}; +lookup(82981) -> {"Lo","L"}; +lookup(82982) -> {"Lo","L"}; +lookup(82983) -> {"Lo","L"}; +lookup(82984) -> {"Lo","L"}; +lookup(82985) -> {"Lo","L"}; +lookup(82986) -> {"Lo","L"}; +lookup(82987) -> {"Lo","L"}; +lookup(82988) -> {"Lo","L"}; +lookup(82989) -> {"Lo","L"}; +lookup(82990) -> {"Lo","L"}; +lookup(82991) -> {"Lo","L"}; +lookup(82992) -> {"Lo","L"}; +lookup(82993) -> {"Lo","L"}; +lookup(82994) -> {"Lo","L"}; +lookup(82995) -> {"Lo","L"}; +lookup(82996) -> {"Lo","L"}; +lookup(82997) -> {"Lo","L"}; +lookup(82998) -> {"Lo","L"}; +lookup(82999) -> {"Lo","L"}; +lookup(83000) -> {"Lo","L"}; +lookup(83001) -> {"Lo","L"}; +lookup(83002) -> {"Lo","L"}; +lookup(83003) -> {"Lo","L"}; +lookup(83004) -> {"Lo","L"}; +lookup(83005) -> {"Lo","L"}; +lookup(83006) -> {"Lo","L"}; +lookup(83007) -> {"Lo","L"}; +lookup(83008) -> {"Lo","L"}; +lookup(83009) -> {"Lo","L"}; +lookup(83010) -> {"Lo","L"}; +lookup(83011) -> {"Lo","L"}; +lookup(83012) -> {"Lo","L"}; +lookup(83013) -> {"Lo","L"}; +lookup(83014) -> {"Lo","L"}; +lookup(83015) -> {"Lo","L"}; +lookup(83016) -> {"Lo","L"}; +lookup(83017) -> {"Lo","L"}; +lookup(83018) -> {"Lo","L"}; +lookup(83019) -> {"Lo","L"}; +lookup(83020) -> {"Lo","L"}; +lookup(83021) -> {"Lo","L"}; +lookup(83022) -> {"Lo","L"}; +lookup(83023) -> {"Lo","L"}; +lookup(83024) -> {"Lo","L"}; +lookup(83025) -> {"Lo","L"}; +lookup(83026) -> {"Lo","L"}; +lookup(83027) -> {"Lo","L"}; +lookup(83028) -> {"Lo","L"}; +lookup(83029) -> {"Lo","L"}; +lookup(83030) -> {"Lo","L"}; +lookup(83031) -> {"Lo","L"}; +lookup(83032) -> {"Lo","L"}; +lookup(83033) -> {"Lo","L"}; +lookup(83034) -> {"Lo","L"}; +lookup(83035) -> {"Lo","L"}; +lookup(83036) -> {"Lo","L"}; +lookup(83037) -> {"Lo","L"}; +lookup(83038) -> {"Lo","L"}; +lookup(83039) -> {"Lo","L"}; +lookup(83040) -> {"Lo","L"}; +lookup(83041) -> {"Lo","L"}; +lookup(83042) -> {"Lo","L"}; +lookup(83043) -> {"Lo","L"}; +lookup(83044) -> {"Lo","L"}; +lookup(83045) -> {"Lo","L"}; +lookup(83046) -> {"Lo","L"}; +lookup(83047) -> {"Lo","L"}; +lookup(83048) -> {"Lo","L"}; +lookup(83049) -> {"Lo","L"}; +lookup(83050) -> {"Lo","L"}; +lookup(83051) -> {"Lo","L"}; +lookup(83052) -> {"Lo","L"}; +lookup(83053) -> {"Lo","L"}; +lookup(83054) -> {"Lo","L"}; +lookup(83055) -> {"Lo","L"}; +lookup(83056) -> {"Lo","L"}; +lookup(83057) -> {"Lo","L"}; +lookup(83058) -> {"Lo","L"}; +lookup(83059) -> {"Lo","L"}; +lookup(83060) -> {"Lo","L"}; +lookup(83061) -> {"Lo","L"}; +lookup(83062) -> {"Lo","L"}; +lookup(83063) -> {"Lo","L"}; +lookup(83064) -> {"Lo","L"}; +lookup(83065) -> {"Lo","L"}; +lookup(83066) -> {"Lo","L"}; +lookup(83067) -> {"Lo","L"}; +lookup(83068) -> {"Lo","L"}; +lookup(83069) -> {"Lo","L"}; +lookup(83070) -> {"Lo","L"}; +lookup(83071) -> {"Lo","L"}; +lookup(83072) -> {"Lo","L"}; +lookup(83073) -> {"Lo","L"}; +lookup(83074) -> {"Lo","L"}; +lookup(83075) -> {"Lo","L"}; +lookup(83076) -> {"Lo","L"}; +lookup(83077) -> {"Lo","L"}; +lookup(83078) -> {"Lo","L"}; +lookup(83079) -> {"Lo","L"}; +lookup(83080) -> {"Lo","L"}; +lookup(83081) -> {"Lo","L"}; +lookup(83082) -> {"Lo","L"}; +lookup(83083) -> {"Lo","L"}; +lookup(83084) -> {"Lo","L"}; +lookup(83085) -> {"Lo","L"}; +lookup(83086) -> {"Lo","L"}; +lookup(83087) -> {"Lo","L"}; +lookup(83088) -> {"Lo","L"}; +lookup(83089) -> {"Lo","L"}; +lookup(83090) -> {"Lo","L"}; +lookup(83091) -> {"Lo","L"}; +lookup(83092) -> {"Lo","L"}; +lookup(83093) -> {"Lo","L"}; +lookup(83094) -> {"Lo","L"}; +lookup(83095) -> {"Lo","L"}; +lookup(83096) -> {"Lo","L"}; +lookup(83097) -> {"Lo","L"}; +lookup(83098) -> {"Lo","L"}; +lookup(83099) -> {"Lo","L"}; +lookup(83100) -> {"Lo","L"}; +lookup(83101) -> {"Lo","L"}; +lookup(83102) -> {"Lo","L"}; +lookup(83103) -> {"Lo","L"}; +lookup(83104) -> {"Lo","L"}; +lookup(83105) -> {"Lo","L"}; +lookup(83106) -> {"Lo","L"}; +lookup(83107) -> {"Lo","L"}; +lookup(83108) -> {"Lo","L"}; +lookup(83109) -> {"Lo","L"}; +lookup(83110) -> {"Lo","L"}; +lookup(83111) -> {"Lo","L"}; +lookup(83112) -> {"Lo","L"}; +lookup(83113) -> {"Lo","L"}; +lookup(83114) -> {"Lo","L"}; +lookup(83115) -> {"Lo","L"}; +lookup(83116) -> {"Lo","L"}; +lookup(83117) -> {"Lo","L"}; +lookup(83118) -> {"Lo","L"}; +lookup(83119) -> {"Lo","L"}; +lookup(83120) -> {"Lo","L"}; +lookup(83121) -> {"Lo","L"}; +lookup(83122) -> {"Lo","L"}; +lookup(83123) -> {"Lo","L"}; +lookup(83124) -> {"Lo","L"}; +lookup(83125) -> {"Lo","L"}; +lookup(83126) -> {"Lo","L"}; +lookup(83127) -> {"Lo","L"}; +lookup(83128) -> {"Lo","L"}; +lookup(83129) -> {"Lo","L"}; +lookup(83130) -> {"Lo","L"}; +lookup(83131) -> {"Lo","L"}; +lookup(83132) -> {"Lo","L"}; +lookup(83133) -> {"Lo","L"}; +lookup(83134) -> {"Lo","L"}; +lookup(83135) -> {"Lo","L"}; +lookup(83136) -> {"Lo","L"}; +lookup(83137) -> {"Lo","L"}; +lookup(83138) -> {"Lo","L"}; +lookup(83139) -> {"Lo","L"}; +lookup(83140) -> {"Lo","L"}; +lookup(83141) -> {"Lo","L"}; +lookup(83142) -> {"Lo","L"}; +lookup(83143) -> {"Lo","L"}; +lookup(83144) -> {"Lo","L"}; +lookup(83145) -> {"Lo","L"}; +lookup(83146) -> {"Lo","L"}; +lookup(83147) -> {"Lo","L"}; +lookup(83148) -> {"Lo","L"}; +lookup(83149) -> {"Lo","L"}; +lookup(83150) -> {"Lo","L"}; +lookup(83151) -> {"Lo","L"}; +lookup(83152) -> {"Lo","L"}; +lookup(83153) -> {"Lo","L"}; +lookup(83154) -> {"Lo","L"}; +lookup(83155) -> {"Lo","L"}; +lookup(83156) -> {"Lo","L"}; +lookup(83157) -> {"Lo","L"}; +lookup(83158) -> {"Lo","L"}; +lookup(83159) -> {"Lo","L"}; +lookup(83160) -> {"Lo","L"}; +lookup(83161) -> {"Lo","L"}; +lookup(83162) -> {"Lo","L"}; +lookup(83163) -> {"Lo","L"}; +lookup(83164) -> {"Lo","L"}; +lookup(83165) -> {"Lo","L"}; +lookup(83166) -> {"Lo","L"}; +lookup(83167) -> {"Lo","L"}; +lookup(83168) -> {"Lo","L"}; +lookup(83169) -> {"Lo","L"}; +lookup(83170) -> {"Lo","L"}; +lookup(83171) -> {"Lo","L"}; +lookup(83172) -> {"Lo","L"}; +lookup(83173) -> {"Lo","L"}; +lookup(83174) -> {"Lo","L"}; +lookup(83175) -> {"Lo","L"}; +lookup(83176) -> {"Lo","L"}; +lookup(83177) -> {"Lo","L"}; +lookup(83178) -> {"Lo","L"}; +lookup(83179) -> {"Lo","L"}; +lookup(83180) -> {"Lo","L"}; +lookup(83181) -> {"Lo","L"}; +lookup(83182) -> {"Lo","L"}; +lookup(83183) -> {"Lo","L"}; +lookup(83184) -> {"Lo","L"}; +lookup(83185) -> {"Lo","L"}; +lookup(83186) -> {"Lo","L"}; +lookup(83187) -> {"Lo","L"}; +lookup(83188) -> {"Lo","L"}; +lookup(83189) -> {"Lo","L"}; +lookup(83190) -> {"Lo","L"}; +lookup(83191) -> {"Lo","L"}; +lookup(83192) -> {"Lo","L"}; +lookup(83193) -> {"Lo","L"}; +lookup(83194) -> {"Lo","L"}; +lookup(83195) -> {"Lo","L"}; +lookup(83196) -> {"Lo","L"}; +lookup(83197) -> {"Lo","L"}; +lookup(83198) -> {"Lo","L"}; +lookup(83199) -> {"Lo","L"}; +lookup(83200) -> {"Lo","L"}; +lookup(83201) -> {"Lo","L"}; +lookup(83202) -> {"Lo","L"}; +lookup(83203) -> {"Lo","L"}; +lookup(83204) -> {"Lo","L"}; +lookup(83205) -> {"Lo","L"}; +lookup(83206) -> {"Lo","L"}; +lookup(83207) -> {"Lo","L"}; +lookup(83208) -> {"Lo","L"}; +lookup(83209) -> {"Lo","L"}; +lookup(83210) -> {"Lo","L"}; +lookup(83211) -> {"Lo","L"}; +lookup(83212) -> {"Lo","L"}; +lookup(83213) -> {"Lo","L"}; +lookup(83214) -> {"Lo","L"}; +lookup(83215) -> {"Lo","L"}; +lookup(83216) -> {"Lo","L"}; +lookup(83217) -> {"Lo","L"}; +lookup(83218) -> {"Lo","L"}; +lookup(83219) -> {"Lo","L"}; +lookup(83220) -> {"Lo","L"}; +lookup(83221) -> {"Lo","L"}; +lookup(83222) -> {"Lo","L"}; +lookup(83223) -> {"Lo","L"}; +lookup(83224) -> {"Lo","L"}; +lookup(83225) -> {"Lo","L"}; +lookup(83226) -> {"Lo","L"}; +lookup(83227) -> {"Lo","L"}; +lookup(83228) -> {"Lo","L"}; +lookup(83229) -> {"Lo","L"}; +lookup(83230) -> {"Lo","L"}; +lookup(83231) -> {"Lo","L"}; +lookup(83232) -> {"Lo","L"}; +lookup(83233) -> {"Lo","L"}; +lookup(83234) -> {"Lo","L"}; +lookup(83235) -> {"Lo","L"}; +lookup(83236) -> {"Lo","L"}; +lookup(83237) -> {"Lo","L"}; +lookup(83238) -> {"Lo","L"}; +lookup(83239) -> {"Lo","L"}; +lookup(83240) -> {"Lo","L"}; +lookup(83241) -> {"Lo","L"}; +lookup(83242) -> {"Lo","L"}; +lookup(83243) -> {"Lo","L"}; +lookup(83244) -> {"Lo","L"}; +lookup(83245) -> {"Lo","L"}; +lookup(83246) -> {"Lo","L"}; +lookup(83247) -> {"Lo","L"}; +lookup(83248) -> {"Lo","L"}; +lookup(83249) -> {"Lo","L"}; +lookup(83250) -> {"Lo","L"}; +lookup(83251) -> {"Lo","L"}; +lookup(83252) -> {"Lo","L"}; +lookup(83253) -> {"Lo","L"}; +lookup(83254) -> {"Lo","L"}; +lookup(83255) -> {"Lo","L"}; +lookup(83256) -> {"Lo","L"}; +lookup(83257) -> {"Lo","L"}; +lookup(83258) -> {"Lo","L"}; +lookup(83259) -> {"Lo","L"}; +lookup(83260) -> {"Lo","L"}; +lookup(83261) -> {"Lo","L"}; +lookup(83262) -> {"Lo","L"}; +lookup(83263) -> {"Lo","L"}; +lookup(83264) -> {"Lo","L"}; +lookup(83265) -> {"Lo","L"}; +lookup(83266) -> {"Lo","L"}; +lookup(83267) -> {"Lo","L"}; +lookup(83268) -> {"Lo","L"}; +lookup(83269) -> {"Lo","L"}; +lookup(83270) -> {"Lo","L"}; +lookup(83271) -> {"Lo","L"}; +lookup(83272) -> {"Lo","L"}; +lookup(83273) -> {"Lo","L"}; +lookup(83274) -> {"Lo","L"}; +lookup(83275) -> {"Lo","L"}; +lookup(83276) -> {"Lo","L"}; +lookup(83277) -> {"Lo","L"}; +lookup(83278) -> {"Lo","L"}; +lookup(83279) -> {"Lo","L"}; +lookup(83280) -> {"Lo","L"}; +lookup(83281) -> {"Lo","L"}; +lookup(83282) -> {"Lo","L"}; +lookup(83283) -> {"Lo","L"}; +lookup(83284) -> {"Lo","L"}; +lookup(83285) -> {"Lo","L"}; +lookup(83286) -> {"Lo","L"}; +lookup(83287) -> {"Lo","L"}; +lookup(83288) -> {"Lo","L"}; +lookup(83289) -> {"Lo","L"}; +lookup(83290) -> {"Lo","L"}; +lookup(83291) -> {"Lo","L"}; +lookup(83292) -> {"Lo","L"}; +lookup(83293) -> {"Lo","L"}; +lookup(83294) -> {"Lo","L"}; +lookup(83295) -> {"Lo","L"}; +lookup(83296) -> {"Lo","L"}; +lookup(83297) -> {"Lo","L"}; +lookup(83298) -> {"Lo","L"}; +lookup(83299) -> {"Lo","L"}; +lookup(83300) -> {"Lo","L"}; +lookup(83301) -> {"Lo","L"}; +lookup(83302) -> {"Lo","L"}; +lookup(83303) -> {"Lo","L"}; +lookup(83304) -> {"Lo","L"}; +lookup(83305) -> {"Lo","L"}; +lookup(83306) -> {"Lo","L"}; +lookup(83307) -> {"Lo","L"}; +lookup(83308) -> {"Lo","L"}; +lookup(83309) -> {"Lo","L"}; +lookup(83310) -> {"Lo","L"}; +lookup(83311) -> {"Lo","L"}; +lookup(83312) -> {"Lo","L"}; +lookup(83313) -> {"Lo","L"}; +lookup(83314) -> {"Lo","L"}; +lookup(83315) -> {"Lo","L"}; +lookup(83316) -> {"Lo","L"}; +lookup(83317) -> {"Lo","L"}; +lookup(83318) -> {"Lo","L"}; +lookup(83319) -> {"Lo","L"}; +lookup(83320) -> {"Lo","L"}; +lookup(83321) -> {"Lo","L"}; +lookup(83322) -> {"Lo","L"}; +lookup(83323) -> {"Lo","L"}; +lookup(83324) -> {"Lo","L"}; +lookup(83325) -> {"Lo","L"}; +lookup(83326) -> {"Lo","L"}; +lookup(83327) -> {"Lo","L"}; +lookup(83328) -> {"Lo","L"}; +lookup(83329) -> {"Lo","L"}; +lookup(83330) -> {"Lo","L"}; +lookup(83331) -> {"Lo","L"}; +lookup(83332) -> {"Lo","L"}; +lookup(83333) -> {"Lo","L"}; +lookup(83334) -> {"Lo","L"}; +lookup(83335) -> {"Lo","L"}; +lookup(83336) -> {"Lo","L"}; +lookup(83337) -> {"Lo","L"}; +lookup(83338) -> {"Lo","L"}; +lookup(83339) -> {"Lo","L"}; +lookup(83340) -> {"Lo","L"}; +lookup(83341) -> {"Lo","L"}; +lookup(83342) -> {"Lo","L"}; +lookup(83343) -> {"Lo","L"}; +lookup(83344) -> {"Lo","L"}; +lookup(83345) -> {"Lo","L"}; +lookup(83346) -> {"Lo","L"}; +lookup(83347) -> {"Lo","L"}; +lookup(83348) -> {"Lo","L"}; +lookup(83349) -> {"Lo","L"}; +lookup(83350) -> {"Lo","L"}; +lookup(83351) -> {"Lo","L"}; +lookup(83352) -> {"Lo","L"}; +lookup(83353) -> {"Lo","L"}; +lookup(83354) -> {"Lo","L"}; +lookup(83355) -> {"Lo","L"}; +lookup(83356) -> {"Lo","L"}; +lookup(83357) -> {"Lo","L"}; +lookup(83358) -> {"Lo","L"}; +lookup(83359) -> {"Lo","L"}; +lookup(83360) -> {"Lo","L"}; +lookup(83361) -> {"Lo","L"}; +lookup(83362) -> {"Lo","L"}; +lookup(83363) -> {"Lo","L"}; +lookup(83364) -> {"Lo","L"}; +lookup(83365) -> {"Lo","L"}; +lookup(83366) -> {"Lo","L"}; +lookup(83367) -> {"Lo","L"}; +lookup(83368) -> {"Lo","L"}; +lookup(83369) -> {"Lo","L"}; +lookup(83370) -> {"Lo","L"}; +lookup(83371) -> {"Lo","L"}; +lookup(83372) -> {"Lo","L"}; +lookup(83373) -> {"Lo","L"}; +lookup(83374) -> {"Lo","L"}; +lookup(83375) -> {"Lo","L"}; +lookup(83376) -> {"Lo","L"}; +lookup(83377) -> {"Lo","L"}; +lookup(83378) -> {"Lo","L"}; +lookup(83379) -> {"Lo","L"}; +lookup(83380) -> {"Lo","L"}; +lookup(83381) -> {"Lo","L"}; +lookup(83382) -> {"Lo","L"}; +lookup(83383) -> {"Lo","L"}; +lookup(83384) -> {"Lo","L"}; +lookup(83385) -> {"Lo","L"}; +lookup(83386) -> {"Lo","L"}; +lookup(83387) -> {"Lo","L"}; +lookup(83388) -> {"Lo","L"}; +lookup(83389) -> {"Lo","L"}; +lookup(83390) -> {"Lo","L"}; +lookup(83391) -> {"Lo","L"}; +lookup(83392) -> {"Lo","L"}; +lookup(83393) -> {"Lo","L"}; +lookup(83394) -> {"Lo","L"}; +lookup(83395) -> {"Lo","L"}; +lookup(83396) -> {"Lo","L"}; +lookup(83397) -> {"Lo","L"}; +lookup(83398) -> {"Lo","L"}; +lookup(83399) -> {"Lo","L"}; +lookup(83400) -> {"Lo","L"}; +lookup(83401) -> {"Lo","L"}; +lookup(83402) -> {"Lo","L"}; +lookup(83403) -> {"Lo","L"}; +lookup(83404) -> {"Lo","L"}; +lookup(83405) -> {"Lo","L"}; +lookup(83406) -> {"Lo","L"}; +lookup(83407) -> {"Lo","L"}; +lookup(83408) -> {"Lo","L"}; +lookup(83409) -> {"Lo","L"}; +lookup(83410) -> {"Lo","L"}; +lookup(83411) -> {"Lo","L"}; +lookup(83412) -> {"Lo","L"}; +lookup(83413) -> {"Lo","L"}; +lookup(83414) -> {"Lo","L"}; +lookup(83415) -> {"Lo","L"}; +lookup(83416) -> {"Lo","L"}; +lookup(83417) -> {"Lo","L"}; +lookup(83418) -> {"Lo","L"}; +lookup(83419) -> {"Lo","L"}; +lookup(83420) -> {"Lo","L"}; +lookup(83421) -> {"Lo","L"}; +lookup(83422) -> {"Lo","L"}; +lookup(83423) -> {"Lo","L"}; +lookup(83424) -> {"Lo","L"}; +lookup(83425) -> {"Lo","L"}; +lookup(83426) -> {"Lo","L"}; +lookup(83427) -> {"Lo","L"}; +lookup(83428) -> {"Lo","L"}; +lookup(83429) -> {"Lo","L"}; +lookup(83430) -> {"Lo","L"}; +lookup(83431) -> {"Lo","L"}; +lookup(83432) -> {"Lo","L"}; +lookup(83433) -> {"Lo","L"}; +lookup(83434) -> {"Lo","L"}; +lookup(83435) -> {"Lo","L"}; +lookup(83436) -> {"Lo","L"}; +lookup(83437) -> {"Lo","L"}; +lookup(83438) -> {"Lo","L"}; +lookup(83439) -> {"Lo","L"}; +lookup(83440) -> {"Lo","L"}; +lookup(83441) -> {"Lo","L"}; +lookup(83442) -> {"Lo","L"}; +lookup(83443) -> {"Lo","L"}; +lookup(83444) -> {"Lo","L"}; +lookup(83445) -> {"Lo","L"}; +lookup(83446) -> {"Lo","L"}; +lookup(83447) -> {"Lo","L"}; +lookup(83448) -> {"Lo","L"}; +lookup(83449) -> {"Lo","L"}; +lookup(83450) -> {"Lo","L"}; +lookup(83451) -> {"Lo","L"}; +lookup(83452) -> {"Lo","L"}; +lookup(83453) -> {"Lo","L"}; +lookup(83454) -> {"Lo","L"}; +lookup(83455) -> {"Lo","L"}; +lookup(83456) -> {"Lo","L"}; +lookup(83457) -> {"Lo","L"}; +lookup(83458) -> {"Lo","L"}; +lookup(83459) -> {"Lo","L"}; +lookup(83460) -> {"Lo","L"}; +lookup(83461) -> {"Lo","L"}; +lookup(83462) -> {"Lo","L"}; +lookup(83463) -> {"Lo","L"}; +lookup(83464) -> {"Lo","L"}; +lookup(83465) -> {"Lo","L"}; +lookup(83466) -> {"Lo","L"}; +lookup(83467) -> {"Lo","L"}; +lookup(83468) -> {"Lo","L"}; +lookup(83469) -> {"Lo","L"}; +lookup(83470) -> {"Lo","L"}; +lookup(83471) -> {"Lo","L"}; +lookup(83472) -> {"Lo","L"}; +lookup(83473) -> {"Lo","L"}; +lookup(83474) -> {"Lo","L"}; +lookup(83475) -> {"Lo","L"}; +lookup(83476) -> {"Lo","L"}; +lookup(83477) -> {"Lo","L"}; +lookup(83478) -> {"Lo","L"}; +lookup(83479) -> {"Lo","L"}; +lookup(83480) -> {"Lo","L"}; +lookup(83481) -> {"Lo","L"}; +lookup(83482) -> {"Lo","L"}; +lookup(83483) -> {"Lo","L"}; +lookup(83484) -> {"Lo","L"}; +lookup(83485) -> {"Lo","L"}; +lookup(83486) -> {"Lo","L"}; +lookup(83487) -> {"Lo","L"}; +lookup(83488) -> {"Lo","L"}; +lookup(83489) -> {"Lo","L"}; +lookup(83490) -> {"Lo","L"}; +lookup(83491) -> {"Lo","L"}; +lookup(83492) -> {"Lo","L"}; +lookup(83493) -> {"Lo","L"}; +lookup(83494) -> {"Lo","L"}; +lookup(83495) -> {"Lo","L"}; +lookup(83496) -> {"Lo","L"}; +lookup(83497) -> {"Lo","L"}; +lookup(83498) -> {"Lo","L"}; +lookup(83499) -> {"Lo","L"}; +lookup(83500) -> {"Lo","L"}; +lookup(83501) -> {"Lo","L"}; +lookup(83502) -> {"Lo","L"}; +lookup(83503) -> {"Lo","L"}; +lookup(83504) -> {"Lo","L"}; +lookup(83505) -> {"Lo","L"}; +lookup(83506) -> {"Lo","L"}; +lookup(83507) -> {"Lo","L"}; +lookup(83508) -> {"Lo","L"}; +lookup(83509) -> {"Lo","L"}; +lookup(83510) -> {"Lo","L"}; +lookup(83511) -> {"Lo","L"}; +lookup(83512) -> {"Lo","L"}; +lookup(83513) -> {"Lo","L"}; +lookup(83514) -> {"Lo","L"}; +lookup(83515) -> {"Lo","L"}; +lookup(83516) -> {"Lo","L"}; +lookup(83517) -> {"Lo","L"}; +lookup(83518) -> {"Lo","L"}; +lookup(83519) -> {"Lo","L"}; +lookup(83520) -> {"Lo","L"}; +lookup(83521) -> {"Lo","L"}; +lookup(83522) -> {"Lo","L"}; +lookup(83523) -> {"Lo","L"}; +lookup(83524) -> {"Lo","L"}; +lookup(83525) -> {"Lo","L"}; +lookup(83526) -> {"Lo","L"}; +lookup(92160) -> {"Lo","L"}; +lookup(92161) -> {"Lo","L"}; +lookup(92162) -> {"Lo","L"}; +lookup(92163) -> {"Lo","L"}; +lookup(92164) -> {"Lo","L"}; +lookup(92165) -> {"Lo","L"}; +lookup(92166) -> {"Lo","L"}; +lookup(92167) -> {"Lo","L"}; +lookup(92168) -> {"Lo","L"}; +lookup(92169) -> {"Lo","L"}; +lookup(92170) -> {"Lo","L"}; +lookup(92171) -> {"Lo","L"}; +lookup(92172) -> {"Lo","L"}; +lookup(92173) -> {"Lo","L"}; +lookup(92174) -> {"Lo","L"}; +lookup(92175) -> {"Lo","L"}; +lookup(92176) -> {"Lo","L"}; +lookup(92177) -> {"Lo","L"}; +lookup(92178) -> {"Lo","L"}; +lookup(92179) -> {"Lo","L"}; +lookup(92180) -> {"Lo","L"}; +lookup(92181) -> {"Lo","L"}; +lookup(92182) -> {"Lo","L"}; +lookup(92183) -> {"Lo","L"}; +lookup(92184) -> {"Lo","L"}; +lookup(92185) -> {"Lo","L"}; +lookup(92186) -> {"Lo","L"}; +lookup(92187) -> {"Lo","L"}; +lookup(92188) -> {"Lo","L"}; +lookup(92189) -> {"Lo","L"}; +lookup(92190) -> {"Lo","L"}; +lookup(92191) -> {"Lo","L"}; +lookup(92192) -> {"Lo","L"}; +lookup(92193) -> {"Lo","L"}; +lookup(92194) -> {"Lo","L"}; +lookup(92195) -> {"Lo","L"}; +lookup(92196) -> {"Lo","L"}; +lookup(92197) -> {"Lo","L"}; +lookup(92198) -> {"Lo","L"}; +lookup(92199) -> {"Lo","L"}; +lookup(92200) -> {"Lo","L"}; +lookup(92201) -> {"Lo","L"}; +lookup(92202) -> {"Lo","L"}; +lookup(92203) -> {"Lo","L"}; +lookup(92204) -> {"Lo","L"}; +lookup(92205) -> {"Lo","L"}; +lookup(92206) -> {"Lo","L"}; +lookup(92207) -> {"Lo","L"}; +lookup(92208) -> {"Lo","L"}; +lookup(92209) -> {"Lo","L"}; +lookup(92210) -> {"Lo","L"}; +lookup(92211) -> {"Lo","L"}; +lookup(92212) -> {"Lo","L"}; +lookup(92213) -> {"Lo","L"}; +lookup(92214) -> {"Lo","L"}; +lookup(92215) -> {"Lo","L"}; +lookup(92216) -> {"Lo","L"}; +lookup(92217) -> {"Lo","L"}; +lookup(92218) -> {"Lo","L"}; +lookup(92219) -> {"Lo","L"}; +lookup(92220) -> {"Lo","L"}; +lookup(92221) -> {"Lo","L"}; +lookup(92222) -> {"Lo","L"}; +lookup(92223) -> {"Lo","L"}; +lookup(92224) -> {"Lo","L"}; +lookup(92225) -> {"Lo","L"}; +lookup(92226) -> {"Lo","L"}; +lookup(92227) -> {"Lo","L"}; +lookup(92228) -> {"Lo","L"}; +lookup(92229) -> {"Lo","L"}; +lookup(92230) -> {"Lo","L"}; +lookup(92231) -> {"Lo","L"}; +lookup(92232) -> {"Lo","L"}; +lookup(92233) -> {"Lo","L"}; +lookup(92234) -> {"Lo","L"}; +lookup(92235) -> {"Lo","L"}; +lookup(92236) -> {"Lo","L"}; +lookup(92237) -> {"Lo","L"}; +lookup(92238) -> {"Lo","L"}; +lookup(92239) -> {"Lo","L"}; +lookup(92240) -> {"Lo","L"}; +lookup(92241) -> {"Lo","L"}; +lookup(92242) -> {"Lo","L"}; +lookup(92243) -> {"Lo","L"}; +lookup(92244) -> {"Lo","L"}; +lookup(92245) -> {"Lo","L"}; +lookup(92246) -> {"Lo","L"}; +lookup(92247) -> {"Lo","L"}; +lookup(92248) -> {"Lo","L"}; +lookup(92249) -> {"Lo","L"}; +lookup(92250) -> {"Lo","L"}; +lookup(92251) -> {"Lo","L"}; +lookup(92252) -> {"Lo","L"}; +lookup(92253) -> {"Lo","L"}; +lookup(92254) -> {"Lo","L"}; +lookup(92255) -> {"Lo","L"}; +lookup(92256) -> {"Lo","L"}; +lookup(92257) -> {"Lo","L"}; +lookup(92258) -> {"Lo","L"}; +lookup(92259) -> {"Lo","L"}; +lookup(92260) -> {"Lo","L"}; +lookup(92261) -> {"Lo","L"}; +lookup(92262) -> {"Lo","L"}; +lookup(92263) -> {"Lo","L"}; +lookup(92264) -> {"Lo","L"}; +lookup(92265) -> {"Lo","L"}; +lookup(92266) -> {"Lo","L"}; +lookup(92267) -> {"Lo","L"}; +lookup(92268) -> {"Lo","L"}; +lookup(92269) -> {"Lo","L"}; +lookup(92270) -> {"Lo","L"}; +lookup(92271) -> {"Lo","L"}; +lookup(92272) -> {"Lo","L"}; +lookup(92273) -> {"Lo","L"}; +lookup(92274) -> {"Lo","L"}; +lookup(92275) -> {"Lo","L"}; +lookup(92276) -> {"Lo","L"}; +lookup(92277) -> {"Lo","L"}; +lookup(92278) -> {"Lo","L"}; +lookup(92279) -> {"Lo","L"}; +lookup(92280) -> {"Lo","L"}; +lookup(92281) -> {"Lo","L"}; +lookup(92282) -> {"Lo","L"}; +lookup(92283) -> {"Lo","L"}; +lookup(92284) -> {"Lo","L"}; +lookup(92285) -> {"Lo","L"}; +lookup(92286) -> {"Lo","L"}; +lookup(92287) -> {"Lo","L"}; +lookup(92288) -> {"Lo","L"}; +lookup(92289) -> {"Lo","L"}; +lookup(92290) -> {"Lo","L"}; +lookup(92291) -> {"Lo","L"}; +lookup(92292) -> {"Lo","L"}; +lookup(92293) -> {"Lo","L"}; +lookup(92294) -> {"Lo","L"}; +lookup(92295) -> {"Lo","L"}; +lookup(92296) -> {"Lo","L"}; +lookup(92297) -> {"Lo","L"}; +lookup(92298) -> {"Lo","L"}; +lookup(92299) -> {"Lo","L"}; +lookup(92300) -> {"Lo","L"}; +lookup(92301) -> {"Lo","L"}; +lookup(92302) -> {"Lo","L"}; +lookup(92303) -> {"Lo","L"}; +lookup(92304) -> {"Lo","L"}; +lookup(92305) -> {"Lo","L"}; +lookup(92306) -> {"Lo","L"}; +lookup(92307) -> {"Lo","L"}; +lookup(92308) -> {"Lo","L"}; +lookup(92309) -> {"Lo","L"}; +lookup(92310) -> {"Lo","L"}; +lookup(92311) -> {"Lo","L"}; +lookup(92312) -> {"Lo","L"}; +lookup(92313) -> {"Lo","L"}; +lookup(92314) -> {"Lo","L"}; +lookup(92315) -> {"Lo","L"}; +lookup(92316) -> {"Lo","L"}; +lookup(92317) -> {"Lo","L"}; +lookup(92318) -> {"Lo","L"}; +lookup(92319) -> {"Lo","L"}; +lookup(92320) -> {"Lo","L"}; +lookup(92321) -> {"Lo","L"}; +lookup(92322) -> {"Lo","L"}; +lookup(92323) -> {"Lo","L"}; +lookup(92324) -> {"Lo","L"}; +lookup(92325) -> {"Lo","L"}; +lookup(92326) -> {"Lo","L"}; +lookup(92327) -> {"Lo","L"}; +lookup(92328) -> {"Lo","L"}; +lookup(92329) -> {"Lo","L"}; +lookup(92330) -> {"Lo","L"}; +lookup(92331) -> {"Lo","L"}; +lookup(92332) -> {"Lo","L"}; +lookup(92333) -> {"Lo","L"}; +lookup(92334) -> {"Lo","L"}; +lookup(92335) -> {"Lo","L"}; +lookup(92336) -> {"Lo","L"}; +lookup(92337) -> {"Lo","L"}; +lookup(92338) -> {"Lo","L"}; +lookup(92339) -> {"Lo","L"}; +lookup(92340) -> {"Lo","L"}; +lookup(92341) -> {"Lo","L"}; +lookup(92342) -> {"Lo","L"}; +lookup(92343) -> {"Lo","L"}; +lookup(92344) -> {"Lo","L"}; +lookup(92345) -> {"Lo","L"}; +lookup(92346) -> {"Lo","L"}; +lookup(92347) -> {"Lo","L"}; +lookup(92348) -> {"Lo","L"}; +lookup(92349) -> {"Lo","L"}; +lookup(92350) -> {"Lo","L"}; +lookup(92351) -> {"Lo","L"}; +lookup(92352) -> {"Lo","L"}; +lookup(92353) -> {"Lo","L"}; +lookup(92354) -> {"Lo","L"}; +lookup(92355) -> {"Lo","L"}; +lookup(92356) -> {"Lo","L"}; +lookup(92357) -> {"Lo","L"}; +lookup(92358) -> {"Lo","L"}; +lookup(92359) -> {"Lo","L"}; +lookup(92360) -> {"Lo","L"}; +lookup(92361) -> {"Lo","L"}; +lookup(92362) -> {"Lo","L"}; +lookup(92363) -> {"Lo","L"}; +lookup(92364) -> {"Lo","L"}; +lookup(92365) -> {"Lo","L"}; +lookup(92366) -> {"Lo","L"}; +lookup(92367) -> {"Lo","L"}; +lookup(92368) -> {"Lo","L"}; +lookup(92369) -> {"Lo","L"}; +lookup(92370) -> {"Lo","L"}; +lookup(92371) -> {"Lo","L"}; +lookup(92372) -> {"Lo","L"}; +lookup(92373) -> {"Lo","L"}; +lookup(92374) -> {"Lo","L"}; +lookup(92375) -> {"Lo","L"}; +lookup(92376) -> {"Lo","L"}; +lookup(92377) -> {"Lo","L"}; +lookup(92378) -> {"Lo","L"}; +lookup(92379) -> {"Lo","L"}; +lookup(92380) -> {"Lo","L"}; +lookup(92381) -> {"Lo","L"}; +lookup(92382) -> {"Lo","L"}; +lookup(92383) -> {"Lo","L"}; +lookup(92384) -> {"Lo","L"}; +lookup(92385) -> {"Lo","L"}; +lookup(92386) -> {"Lo","L"}; +lookup(92387) -> {"Lo","L"}; +lookup(92388) -> {"Lo","L"}; +lookup(92389) -> {"Lo","L"}; +lookup(92390) -> {"Lo","L"}; +lookup(92391) -> {"Lo","L"}; +lookup(92392) -> {"Lo","L"}; +lookup(92393) -> {"Lo","L"}; +lookup(92394) -> {"Lo","L"}; +lookup(92395) -> {"Lo","L"}; +lookup(92396) -> {"Lo","L"}; +lookup(92397) -> {"Lo","L"}; +lookup(92398) -> {"Lo","L"}; +lookup(92399) -> {"Lo","L"}; +lookup(92400) -> {"Lo","L"}; +lookup(92401) -> {"Lo","L"}; +lookup(92402) -> {"Lo","L"}; +lookup(92403) -> {"Lo","L"}; +lookup(92404) -> {"Lo","L"}; +lookup(92405) -> {"Lo","L"}; +lookup(92406) -> {"Lo","L"}; +lookup(92407) -> {"Lo","L"}; +lookup(92408) -> {"Lo","L"}; +lookup(92409) -> {"Lo","L"}; +lookup(92410) -> {"Lo","L"}; +lookup(92411) -> {"Lo","L"}; +lookup(92412) -> {"Lo","L"}; +lookup(92413) -> {"Lo","L"}; +lookup(92414) -> {"Lo","L"}; +lookup(92415) -> {"Lo","L"}; +lookup(92416) -> {"Lo","L"}; +lookup(92417) -> {"Lo","L"}; +lookup(92418) -> {"Lo","L"}; +lookup(92419) -> {"Lo","L"}; +lookup(92420) -> {"Lo","L"}; +lookup(92421) -> {"Lo","L"}; +lookup(92422) -> {"Lo","L"}; +lookup(92423) -> {"Lo","L"}; +lookup(92424) -> {"Lo","L"}; +lookup(92425) -> {"Lo","L"}; +lookup(92426) -> {"Lo","L"}; +lookup(92427) -> {"Lo","L"}; +lookup(92428) -> {"Lo","L"}; +lookup(92429) -> {"Lo","L"}; +lookup(92430) -> {"Lo","L"}; +lookup(92431) -> {"Lo","L"}; +lookup(92432) -> {"Lo","L"}; +lookup(92433) -> {"Lo","L"}; +lookup(92434) -> {"Lo","L"}; +lookup(92435) -> {"Lo","L"}; +lookup(92436) -> {"Lo","L"}; +lookup(92437) -> {"Lo","L"}; +lookup(92438) -> {"Lo","L"}; +lookup(92439) -> {"Lo","L"}; +lookup(92440) -> {"Lo","L"}; +lookup(92441) -> {"Lo","L"}; +lookup(92442) -> {"Lo","L"}; +lookup(92443) -> {"Lo","L"}; +lookup(92444) -> {"Lo","L"}; +lookup(92445) -> {"Lo","L"}; +lookup(92446) -> {"Lo","L"}; +lookup(92447) -> {"Lo","L"}; +lookup(92448) -> {"Lo","L"}; +lookup(92449) -> {"Lo","L"}; +lookup(92450) -> {"Lo","L"}; +lookup(92451) -> {"Lo","L"}; +lookup(92452) -> {"Lo","L"}; +lookup(92453) -> {"Lo","L"}; +lookup(92454) -> {"Lo","L"}; +lookup(92455) -> {"Lo","L"}; +lookup(92456) -> {"Lo","L"}; +lookup(92457) -> {"Lo","L"}; +lookup(92458) -> {"Lo","L"}; +lookup(92459) -> {"Lo","L"}; +lookup(92460) -> {"Lo","L"}; +lookup(92461) -> {"Lo","L"}; +lookup(92462) -> {"Lo","L"}; +lookup(92463) -> {"Lo","L"}; +lookup(92464) -> {"Lo","L"}; +lookup(92465) -> {"Lo","L"}; +lookup(92466) -> {"Lo","L"}; +lookup(92467) -> {"Lo","L"}; +lookup(92468) -> {"Lo","L"}; +lookup(92469) -> {"Lo","L"}; +lookup(92470) -> {"Lo","L"}; +lookup(92471) -> {"Lo","L"}; +lookup(92472) -> {"Lo","L"}; +lookup(92473) -> {"Lo","L"}; +lookup(92474) -> {"Lo","L"}; +lookup(92475) -> {"Lo","L"}; +lookup(92476) -> {"Lo","L"}; +lookup(92477) -> {"Lo","L"}; +lookup(92478) -> {"Lo","L"}; +lookup(92479) -> {"Lo","L"}; +lookup(92480) -> {"Lo","L"}; +lookup(92481) -> {"Lo","L"}; +lookup(92482) -> {"Lo","L"}; +lookup(92483) -> {"Lo","L"}; +lookup(92484) -> {"Lo","L"}; +lookup(92485) -> {"Lo","L"}; +lookup(92486) -> {"Lo","L"}; +lookup(92487) -> {"Lo","L"}; +lookup(92488) -> {"Lo","L"}; +lookup(92489) -> {"Lo","L"}; +lookup(92490) -> {"Lo","L"}; +lookup(92491) -> {"Lo","L"}; +lookup(92492) -> {"Lo","L"}; +lookup(92493) -> {"Lo","L"}; +lookup(92494) -> {"Lo","L"}; +lookup(92495) -> {"Lo","L"}; +lookup(92496) -> {"Lo","L"}; +lookup(92497) -> {"Lo","L"}; +lookup(92498) -> {"Lo","L"}; +lookup(92499) -> {"Lo","L"}; +lookup(92500) -> {"Lo","L"}; +lookup(92501) -> {"Lo","L"}; +lookup(92502) -> {"Lo","L"}; +lookup(92503) -> {"Lo","L"}; +lookup(92504) -> {"Lo","L"}; +lookup(92505) -> {"Lo","L"}; +lookup(92506) -> {"Lo","L"}; +lookup(92507) -> {"Lo","L"}; +lookup(92508) -> {"Lo","L"}; +lookup(92509) -> {"Lo","L"}; +lookup(92510) -> {"Lo","L"}; +lookup(92511) -> {"Lo","L"}; +lookup(92512) -> {"Lo","L"}; +lookup(92513) -> {"Lo","L"}; +lookup(92514) -> {"Lo","L"}; +lookup(92515) -> {"Lo","L"}; +lookup(92516) -> {"Lo","L"}; +lookup(92517) -> {"Lo","L"}; +lookup(92518) -> {"Lo","L"}; +lookup(92519) -> {"Lo","L"}; +lookup(92520) -> {"Lo","L"}; +lookup(92521) -> {"Lo","L"}; +lookup(92522) -> {"Lo","L"}; +lookup(92523) -> {"Lo","L"}; +lookup(92524) -> {"Lo","L"}; +lookup(92525) -> {"Lo","L"}; +lookup(92526) -> {"Lo","L"}; +lookup(92527) -> {"Lo","L"}; +lookup(92528) -> {"Lo","L"}; +lookup(92529) -> {"Lo","L"}; +lookup(92530) -> {"Lo","L"}; +lookup(92531) -> {"Lo","L"}; +lookup(92532) -> {"Lo","L"}; +lookup(92533) -> {"Lo","L"}; +lookup(92534) -> {"Lo","L"}; +lookup(92535) -> {"Lo","L"}; +lookup(92536) -> {"Lo","L"}; +lookup(92537) -> {"Lo","L"}; +lookup(92538) -> {"Lo","L"}; +lookup(92539) -> {"Lo","L"}; +lookup(92540) -> {"Lo","L"}; +lookup(92541) -> {"Lo","L"}; +lookup(92542) -> {"Lo","L"}; +lookup(92543) -> {"Lo","L"}; +lookup(92544) -> {"Lo","L"}; +lookup(92545) -> {"Lo","L"}; +lookup(92546) -> {"Lo","L"}; +lookup(92547) -> {"Lo","L"}; +lookup(92548) -> {"Lo","L"}; +lookup(92549) -> {"Lo","L"}; +lookup(92550) -> {"Lo","L"}; +lookup(92551) -> {"Lo","L"}; +lookup(92552) -> {"Lo","L"}; +lookup(92553) -> {"Lo","L"}; +lookup(92554) -> {"Lo","L"}; +lookup(92555) -> {"Lo","L"}; +lookup(92556) -> {"Lo","L"}; +lookup(92557) -> {"Lo","L"}; +lookup(92558) -> {"Lo","L"}; +lookup(92559) -> {"Lo","L"}; +lookup(92560) -> {"Lo","L"}; +lookup(92561) -> {"Lo","L"}; +lookup(92562) -> {"Lo","L"}; +lookup(92563) -> {"Lo","L"}; +lookup(92564) -> {"Lo","L"}; +lookup(92565) -> {"Lo","L"}; +lookup(92566) -> {"Lo","L"}; +lookup(92567) -> {"Lo","L"}; +lookup(92568) -> {"Lo","L"}; +lookup(92569) -> {"Lo","L"}; +lookup(92570) -> {"Lo","L"}; +lookup(92571) -> {"Lo","L"}; +lookup(92572) -> {"Lo","L"}; +lookup(92573) -> {"Lo","L"}; +lookup(92574) -> {"Lo","L"}; +lookup(92575) -> {"Lo","L"}; +lookup(92576) -> {"Lo","L"}; +lookup(92577) -> {"Lo","L"}; +lookup(92578) -> {"Lo","L"}; +lookup(92579) -> {"Lo","L"}; +lookup(92580) -> {"Lo","L"}; +lookup(92581) -> {"Lo","L"}; +lookup(92582) -> {"Lo","L"}; +lookup(92583) -> {"Lo","L"}; +lookup(92584) -> {"Lo","L"}; +lookup(92585) -> {"Lo","L"}; +lookup(92586) -> {"Lo","L"}; +lookup(92587) -> {"Lo","L"}; +lookup(92588) -> {"Lo","L"}; +lookup(92589) -> {"Lo","L"}; +lookup(92590) -> {"Lo","L"}; +lookup(92591) -> {"Lo","L"}; +lookup(92592) -> {"Lo","L"}; +lookup(92593) -> {"Lo","L"}; +lookup(92594) -> {"Lo","L"}; +lookup(92595) -> {"Lo","L"}; +lookup(92596) -> {"Lo","L"}; +lookup(92597) -> {"Lo","L"}; +lookup(92598) -> {"Lo","L"}; +lookup(92599) -> {"Lo","L"}; +lookup(92600) -> {"Lo","L"}; +lookup(92601) -> {"Lo","L"}; +lookup(92602) -> {"Lo","L"}; +lookup(92603) -> {"Lo","L"}; +lookup(92604) -> {"Lo","L"}; +lookup(92605) -> {"Lo","L"}; +lookup(92606) -> {"Lo","L"}; +lookup(92607) -> {"Lo","L"}; +lookup(92608) -> {"Lo","L"}; +lookup(92609) -> {"Lo","L"}; +lookup(92610) -> {"Lo","L"}; +lookup(92611) -> {"Lo","L"}; +lookup(92612) -> {"Lo","L"}; +lookup(92613) -> {"Lo","L"}; +lookup(92614) -> {"Lo","L"}; +lookup(92615) -> {"Lo","L"}; +lookup(92616) -> {"Lo","L"}; +lookup(92617) -> {"Lo","L"}; +lookup(92618) -> {"Lo","L"}; +lookup(92619) -> {"Lo","L"}; +lookup(92620) -> {"Lo","L"}; +lookup(92621) -> {"Lo","L"}; +lookup(92622) -> {"Lo","L"}; +lookup(92623) -> {"Lo","L"}; +lookup(92624) -> {"Lo","L"}; +lookup(92625) -> {"Lo","L"}; +lookup(92626) -> {"Lo","L"}; +lookup(92627) -> {"Lo","L"}; +lookup(92628) -> {"Lo","L"}; +lookup(92629) -> {"Lo","L"}; +lookup(92630) -> {"Lo","L"}; +lookup(92631) -> {"Lo","L"}; +lookup(92632) -> {"Lo","L"}; +lookup(92633) -> {"Lo","L"}; +lookup(92634) -> {"Lo","L"}; +lookup(92635) -> {"Lo","L"}; +lookup(92636) -> {"Lo","L"}; +lookup(92637) -> {"Lo","L"}; +lookup(92638) -> {"Lo","L"}; +lookup(92639) -> {"Lo","L"}; +lookup(92640) -> {"Lo","L"}; +lookup(92641) -> {"Lo","L"}; +lookup(92642) -> {"Lo","L"}; +lookup(92643) -> {"Lo","L"}; +lookup(92644) -> {"Lo","L"}; +lookup(92645) -> {"Lo","L"}; +lookup(92646) -> {"Lo","L"}; +lookup(92647) -> {"Lo","L"}; +lookup(92648) -> {"Lo","L"}; +lookup(92649) -> {"Lo","L"}; +lookup(92650) -> {"Lo","L"}; +lookup(92651) -> {"Lo","L"}; +lookup(92652) -> {"Lo","L"}; +lookup(92653) -> {"Lo","L"}; +lookup(92654) -> {"Lo","L"}; +lookup(92655) -> {"Lo","L"}; +lookup(92656) -> {"Lo","L"}; +lookup(92657) -> {"Lo","L"}; +lookup(92658) -> {"Lo","L"}; +lookup(92659) -> {"Lo","L"}; +lookup(92660) -> {"Lo","L"}; +lookup(92661) -> {"Lo","L"}; +lookup(92662) -> {"Lo","L"}; +lookup(92663) -> {"Lo","L"}; +lookup(92664) -> {"Lo","L"}; +lookup(92665) -> {"Lo","L"}; +lookup(92666) -> {"Lo","L"}; +lookup(92667) -> {"Lo","L"}; +lookup(92668) -> {"Lo","L"}; +lookup(92669) -> {"Lo","L"}; +lookup(92670) -> {"Lo","L"}; +lookup(92671) -> {"Lo","L"}; +lookup(92672) -> {"Lo","L"}; +lookup(92673) -> {"Lo","L"}; +lookup(92674) -> {"Lo","L"}; +lookup(92675) -> {"Lo","L"}; +lookup(92676) -> {"Lo","L"}; +lookup(92677) -> {"Lo","L"}; +lookup(92678) -> {"Lo","L"}; +lookup(92679) -> {"Lo","L"}; +lookup(92680) -> {"Lo","L"}; +lookup(92681) -> {"Lo","L"}; +lookup(92682) -> {"Lo","L"}; +lookup(92683) -> {"Lo","L"}; +lookup(92684) -> {"Lo","L"}; +lookup(92685) -> {"Lo","L"}; +lookup(92686) -> {"Lo","L"}; +lookup(92687) -> {"Lo","L"}; +lookup(92688) -> {"Lo","L"}; +lookup(92689) -> {"Lo","L"}; +lookup(92690) -> {"Lo","L"}; +lookup(92691) -> {"Lo","L"}; +lookup(92692) -> {"Lo","L"}; +lookup(92693) -> {"Lo","L"}; +lookup(92694) -> {"Lo","L"}; +lookup(92695) -> {"Lo","L"}; +lookup(92696) -> {"Lo","L"}; +lookup(92697) -> {"Lo","L"}; +lookup(92698) -> {"Lo","L"}; +lookup(92699) -> {"Lo","L"}; +lookup(92700) -> {"Lo","L"}; +lookup(92701) -> {"Lo","L"}; +lookup(92702) -> {"Lo","L"}; +lookup(92703) -> {"Lo","L"}; +lookup(92704) -> {"Lo","L"}; +lookup(92705) -> {"Lo","L"}; +lookup(92706) -> {"Lo","L"}; +lookup(92707) -> {"Lo","L"}; +lookup(92708) -> {"Lo","L"}; +lookup(92709) -> {"Lo","L"}; +lookup(92710) -> {"Lo","L"}; +lookup(92711) -> {"Lo","L"}; +lookup(92712) -> {"Lo","L"}; +lookup(92713) -> {"Lo","L"}; +lookup(92714) -> {"Lo","L"}; +lookup(92715) -> {"Lo","L"}; +lookup(92716) -> {"Lo","L"}; +lookup(92717) -> {"Lo","L"}; +lookup(92718) -> {"Lo","L"}; +lookup(92719) -> {"Lo","L"}; +lookup(92720) -> {"Lo","L"}; +lookup(92721) -> {"Lo","L"}; +lookup(92722) -> {"Lo","L"}; +lookup(92723) -> {"Lo","L"}; +lookup(92724) -> {"Lo","L"}; +lookup(92725) -> {"Lo","L"}; +lookup(92726) -> {"Lo","L"}; +lookup(92727) -> {"Lo","L"}; +lookup(92728) -> {"Lo","L"}; +lookup(92736) -> {"Lo","L"}; +lookup(92737) -> {"Lo","L"}; +lookup(92738) -> {"Lo","L"}; +lookup(92739) -> {"Lo","L"}; +lookup(92740) -> {"Lo","L"}; +lookup(92741) -> {"Lo","L"}; +lookup(92742) -> {"Lo","L"}; +lookup(92743) -> {"Lo","L"}; +lookup(92744) -> {"Lo","L"}; +lookup(92745) -> {"Lo","L"}; +lookup(92746) -> {"Lo","L"}; +lookup(92747) -> {"Lo","L"}; +lookup(92748) -> {"Lo","L"}; +lookup(92749) -> {"Lo","L"}; +lookup(92750) -> {"Lo","L"}; +lookup(92751) -> {"Lo","L"}; +lookup(92752) -> {"Lo","L"}; +lookup(92753) -> {"Lo","L"}; +lookup(92754) -> {"Lo","L"}; +lookup(92755) -> {"Lo","L"}; +lookup(92756) -> {"Lo","L"}; +lookup(92757) -> {"Lo","L"}; +lookup(92758) -> {"Lo","L"}; +lookup(92759) -> {"Lo","L"}; +lookup(92760) -> {"Lo","L"}; +lookup(92761) -> {"Lo","L"}; +lookup(92762) -> {"Lo","L"}; +lookup(92763) -> {"Lo","L"}; +lookup(92764) -> {"Lo","L"}; +lookup(92765) -> {"Lo","L"}; +lookup(92766) -> {"Lo","L"}; +lookup(92768) -> {"Nd","L"}; +lookup(92769) -> {"Nd","L"}; +lookup(92770) -> {"Nd","L"}; +lookup(92771) -> {"Nd","L"}; +lookup(92772) -> {"Nd","L"}; +lookup(92773) -> {"Nd","L"}; +lookup(92774) -> {"Nd","L"}; +lookup(92775) -> {"Nd","L"}; +lookup(92776) -> {"Nd","L"}; +lookup(92777) -> {"Nd","L"}; +lookup(92782) -> {"Po","L"}; +lookup(92783) -> {"Po","L"}; +lookup(92880) -> {"Lo","L"}; +lookup(92881) -> {"Lo","L"}; +lookup(92882) -> {"Lo","L"}; +lookup(92883) -> {"Lo","L"}; +lookup(92884) -> {"Lo","L"}; +lookup(92885) -> {"Lo","L"}; +lookup(92886) -> {"Lo","L"}; +lookup(92887) -> {"Lo","L"}; +lookup(92888) -> {"Lo","L"}; +lookup(92889) -> {"Lo","L"}; +lookup(92890) -> {"Lo","L"}; +lookup(92891) -> {"Lo","L"}; +lookup(92892) -> {"Lo","L"}; +lookup(92893) -> {"Lo","L"}; +lookup(92894) -> {"Lo","L"}; +lookup(92895) -> {"Lo","L"}; +lookup(92896) -> {"Lo","L"}; +lookup(92897) -> {"Lo","L"}; +lookup(92898) -> {"Lo","L"}; +lookup(92899) -> {"Lo","L"}; +lookup(92900) -> {"Lo","L"}; +lookup(92901) -> {"Lo","L"}; +lookup(92902) -> {"Lo","L"}; +lookup(92903) -> {"Lo","L"}; +lookup(92904) -> {"Lo","L"}; +lookup(92905) -> {"Lo","L"}; +lookup(92906) -> {"Lo","L"}; +lookup(92907) -> {"Lo","L"}; +lookup(92908) -> {"Lo","L"}; +lookup(92909) -> {"Lo","L"}; +lookup(92912) -> {"Mn","NSM"}; +lookup(92913) -> {"Mn","NSM"}; +lookup(92914) -> {"Mn","NSM"}; +lookup(92915) -> {"Mn","NSM"}; +lookup(92916) -> {"Mn","NSM"}; +lookup(92917) -> {"Po","L"}; +lookup(92928) -> {"Lo","L"}; +lookup(92929) -> {"Lo","L"}; +lookup(92930) -> {"Lo","L"}; +lookup(92931) -> {"Lo","L"}; +lookup(92932) -> {"Lo","L"}; +lookup(92933) -> {"Lo","L"}; +lookup(92934) -> {"Lo","L"}; +lookup(92935) -> {"Lo","L"}; +lookup(92936) -> {"Lo","L"}; +lookup(92937) -> {"Lo","L"}; +lookup(92938) -> {"Lo","L"}; +lookup(92939) -> {"Lo","L"}; +lookup(92940) -> {"Lo","L"}; +lookup(92941) -> {"Lo","L"}; +lookup(92942) -> {"Lo","L"}; +lookup(92943) -> {"Lo","L"}; +lookup(92944) -> {"Lo","L"}; +lookup(92945) -> {"Lo","L"}; +lookup(92946) -> {"Lo","L"}; +lookup(92947) -> {"Lo","L"}; +lookup(92948) -> {"Lo","L"}; +lookup(92949) -> {"Lo","L"}; +lookup(92950) -> {"Lo","L"}; +lookup(92951) -> {"Lo","L"}; +lookup(92952) -> {"Lo","L"}; +lookup(92953) -> {"Lo","L"}; +lookup(92954) -> {"Lo","L"}; +lookup(92955) -> {"Lo","L"}; +lookup(92956) -> {"Lo","L"}; +lookup(92957) -> {"Lo","L"}; +lookup(92958) -> {"Lo","L"}; +lookup(92959) -> {"Lo","L"}; +lookup(92960) -> {"Lo","L"}; +lookup(92961) -> {"Lo","L"}; +lookup(92962) -> {"Lo","L"}; +lookup(92963) -> {"Lo","L"}; +lookup(92964) -> {"Lo","L"}; +lookup(92965) -> {"Lo","L"}; +lookup(92966) -> {"Lo","L"}; +lookup(92967) -> {"Lo","L"}; +lookup(92968) -> {"Lo","L"}; +lookup(92969) -> {"Lo","L"}; +lookup(92970) -> {"Lo","L"}; +lookup(92971) -> {"Lo","L"}; +lookup(92972) -> {"Lo","L"}; +lookup(92973) -> {"Lo","L"}; +lookup(92974) -> {"Lo","L"}; +lookup(92975) -> {"Lo","L"}; +lookup(92976) -> {"Mn","NSM"}; +lookup(92977) -> {"Mn","NSM"}; +lookup(92978) -> {"Mn","NSM"}; +lookup(92979) -> {"Mn","NSM"}; +lookup(92980) -> {"Mn","NSM"}; +lookup(92981) -> {"Mn","NSM"}; +lookup(92982) -> {"Mn","NSM"}; +lookup(92983) -> {"Po","L"}; +lookup(92984) -> {"Po","L"}; +lookup(92985) -> {"Po","L"}; +lookup(92986) -> {"Po","L"}; +lookup(92987) -> {"Po","L"}; +lookup(92988) -> {"So","L"}; +lookup(92989) -> {"So","L"}; +lookup(92990) -> {"So","L"}; +lookup(92991) -> {"So","L"}; +lookup(92992) -> {"Lm","L"}; +lookup(92993) -> {"Lm","L"}; +lookup(92994) -> {"Lm","L"}; +lookup(92995) -> {"Lm","L"}; +lookup(92996) -> {"Po","L"}; +lookup(92997) -> {"So","L"}; +lookup(93008) -> {"Nd","L"}; +lookup(93009) -> {"Nd","L"}; +lookup(93010) -> {"Nd","L"}; +lookup(93011) -> {"Nd","L"}; +lookup(93012) -> {"Nd","L"}; +lookup(93013) -> {"Nd","L"}; +lookup(93014) -> {"Nd","L"}; +lookup(93015) -> {"Nd","L"}; +lookup(93016) -> {"Nd","L"}; +lookup(93017) -> {"Nd","L"}; +lookup(93019) -> {"No","L"}; +lookup(93020) -> {"No","L"}; +lookup(93021) -> {"No","L"}; +lookup(93022) -> {"No","L"}; +lookup(93023) -> {"No","L"}; +lookup(93024) -> {"No","L"}; +lookup(93025) -> {"No","L"}; +lookup(93027) -> {"Lo","L"}; +lookup(93028) -> {"Lo","L"}; +lookup(93029) -> {"Lo","L"}; +lookup(93030) -> {"Lo","L"}; +lookup(93031) -> {"Lo","L"}; +lookup(93032) -> {"Lo","L"}; +lookup(93033) -> {"Lo","L"}; +lookup(93034) -> {"Lo","L"}; +lookup(93035) -> {"Lo","L"}; +lookup(93036) -> {"Lo","L"}; +lookup(93037) -> {"Lo","L"}; +lookup(93038) -> {"Lo","L"}; +lookup(93039) -> {"Lo","L"}; +lookup(93040) -> {"Lo","L"}; +lookup(93041) -> {"Lo","L"}; +lookup(93042) -> {"Lo","L"}; +lookup(93043) -> {"Lo","L"}; +lookup(93044) -> {"Lo","L"}; +lookup(93045) -> {"Lo","L"}; +lookup(93046) -> {"Lo","L"}; +lookup(93047) -> {"Lo","L"}; +lookup(93053) -> {"Lo","L"}; +lookup(93054) -> {"Lo","L"}; +lookup(93055) -> {"Lo","L"}; +lookup(93056) -> {"Lo","L"}; +lookup(93057) -> {"Lo","L"}; +lookup(93058) -> {"Lo","L"}; +lookup(93059) -> {"Lo","L"}; +lookup(93060) -> {"Lo","L"}; +lookup(93061) -> {"Lo","L"}; +lookup(93062) -> {"Lo","L"}; +lookup(93063) -> {"Lo","L"}; +lookup(93064) -> {"Lo","L"}; +lookup(93065) -> {"Lo","L"}; +lookup(93066) -> {"Lo","L"}; +lookup(93067) -> {"Lo","L"}; +lookup(93068) -> {"Lo","L"}; +lookup(93069) -> {"Lo","L"}; +lookup(93070) -> {"Lo","L"}; +lookup(93071) -> {"Lo","L"}; +lookup(93760) -> {"Lu","L"}; +lookup(93761) -> {"Lu","L"}; +lookup(93762) -> {"Lu","L"}; +lookup(93763) -> {"Lu","L"}; +lookup(93764) -> {"Lu","L"}; +lookup(93765) -> {"Lu","L"}; +lookup(93766) -> {"Lu","L"}; +lookup(93767) -> {"Lu","L"}; +lookup(93768) -> {"Lu","L"}; +lookup(93769) -> {"Lu","L"}; +lookup(93770) -> {"Lu","L"}; +lookup(93771) -> {"Lu","L"}; +lookup(93772) -> {"Lu","L"}; +lookup(93773) -> {"Lu","L"}; +lookup(93774) -> {"Lu","L"}; +lookup(93775) -> {"Lu","L"}; +lookup(93776) -> {"Lu","L"}; +lookup(93777) -> {"Lu","L"}; +lookup(93778) -> {"Lu","L"}; +lookup(93779) -> {"Lu","L"}; +lookup(93780) -> {"Lu","L"}; +lookup(93781) -> {"Lu","L"}; +lookup(93782) -> {"Lu","L"}; +lookup(93783) -> {"Lu","L"}; +lookup(93784) -> {"Lu","L"}; +lookup(93785) -> {"Lu","L"}; +lookup(93786) -> {"Lu","L"}; +lookup(93787) -> {"Lu","L"}; +lookup(93788) -> {"Lu","L"}; +lookup(93789) -> {"Lu","L"}; +lookup(93790) -> {"Lu","L"}; +lookup(93791) -> {"Lu","L"}; +lookup(93792) -> {"Ll","L"}; +lookup(93793) -> {"Ll","L"}; +lookup(93794) -> {"Ll","L"}; +lookup(93795) -> {"Ll","L"}; +lookup(93796) -> {"Ll","L"}; +lookup(93797) -> {"Ll","L"}; +lookup(93798) -> {"Ll","L"}; +lookup(93799) -> {"Ll","L"}; +lookup(93800) -> {"Ll","L"}; +lookup(93801) -> {"Ll","L"}; +lookup(93802) -> {"Ll","L"}; +lookup(93803) -> {"Ll","L"}; +lookup(93804) -> {"Ll","L"}; +lookup(93805) -> {"Ll","L"}; +lookup(93806) -> {"Ll","L"}; +lookup(93807) -> {"Ll","L"}; +lookup(93808) -> {"Ll","L"}; +lookup(93809) -> {"Ll","L"}; +lookup(93810) -> {"Ll","L"}; +lookup(93811) -> {"Ll","L"}; +lookup(93812) -> {"Ll","L"}; +lookup(93813) -> {"Ll","L"}; +lookup(93814) -> {"Ll","L"}; +lookup(93815) -> {"Ll","L"}; +lookup(93816) -> {"Ll","L"}; +lookup(93817) -> {"Ll","L"}; +lookup(93818) -> {"Ll","L"}; +lookup(93819) -> {"Ll","L"}; +lookup(93820) -> {"Ll","L"}; +lookup(93821) -> {"Ll","L"}; +lookup(93822) -> {"Ll","L"}; +lookup(93823) -> {"Ll","L"}; +lookup(93824) -> {"No","L"}; +lookup(93825) -> {"No","L"}; +lookup(93826) -> {"No","L"}; +lookup(93827) -> {"No","L"}; +lookup(93828) -> {"No","L"}; +lookup(93829) -> {"No","L"}; +lookup(93830) -> {"No","L"}; +lookup(93831) -> {"No","L"}; +lookup(93832) -> {"No","L"}; +lookup(93833) -> {"No","L"}; +lookup(93834) -> {"No","L"}; +lookup(93835) -> {"No","L"}; +lookup(93836) -> {"No","L"}; +lookup(93837) -> {"No","L"}; +lookup(93838) -> {"No","L"}; +lookup(93839) -> {"No","L"}; +lookup(93840) -> {"No","L"}; +lookup(93841) -> {"No","L"}; +lookup(93842) -> {"No","L"}; +lookup(93843) -> {"No","L"}; +lookup(93844) -> {"No","L"}; +lookup(93845) -> {"No","L"}; +lookup(93846) -> {"No","L"}; +lookup(93847) -> {"Po","L"}; +lookup(93848) -> {"Po","L"}; +lookup(93849) -> {"Po","L"}; +lookup(93850) -> {"Po","L"}; +lookup(93952) -> {"Lo","L"}; +lookup(93953) -> {"Lo","L"}; +lookup(93954) -> {"Lo","L"}; +lookup(93955) -> {"Lo","L"}; +lookup(93956) -> {"Lo","L"}; +lookup(93957) -> {"Lo","L"}; +lookup(93958) -> {"Lo","L"}; +lookup(93959) -> {"Lo","L"}; +lookup(93960) -> {"Lo","L"}; +lookup(93961) -> {"Lo","L"}; +lookup(93962) -> {"Lo","L"}; +lookup(93963) -> {"Lo","L"}; +lookup(93964) -> {"Lo","L"}; +lookup(93965) -> {"Lo","L"}; +lookup(93966) -> {"Lo","L"}; +lookup(93967) -> {"Lo","L"}; +lookup(93968) -> {"Lo","L"}; +lookup(93969) -> {"Lo","L"}; +lookup(93970) -> {"Lo","L"}; +lookup(93971) -> {"Lo","L"}; +lookup(93972) -> {"Lo","L"}; +lookup(93973) -> {"Lo","L"}; +lookup(93974) -> {"Lo","L"}; +lookup(93975) -> {"Lo","L"}; +lookup(93976) -> {"Lo","L"}; +lookup(93977) -> {"Lo","L"}; +lookup(93978) -> {"Lo","L"}; +lookup(93979) -> {"Lo","L"}; +lookup(93980) -> {"Lo","L"}; +lookup(93981) -> {"Lo","L"}; +lookup(93982) -> {"Lo","L"}; +lookup(93983) -> {"Lo","L"}; +lookup(93984) -> {"Lo","L"}; +lookup(93985) -> {"Lo","L"}; +lookup(93986) -> {"Lo","L"}; +lookup(93987) -> {"Lo","L"}; +lookup(93988) -> {"Lo","L"}; +lookup(93989) -> {"Lo","L"}; +lookup(93990) -> {"Lo","L"}; +lookup(93991) -> {"Lo","L"}; +lookup(93992) -> {"Lo","L"}; +lookup(93993) -> {"Lo","L"}; +lookup(93994) -> {"Lo","L"}; +lookup(93995) -> {"Lo","L"}; +lookup(93996) -> {"Lo","L"}; +lookup(93997) -> {"Lo","L"}; +lookup(93998) -> {"Lo","L"}; +lookup(93999) -> {"Lo","L"}; +lookup(94000) -> {"Lo","L"}; +lookup(94001) -> {"Lo","L"}; +lookup(94002) -> {"Lo","L"}; +lookup(94003) -> {"Lo","L"}; +lookup(94004) -> {"Lo","L"}; +lookup(94005) -> {"Lo","L"}; +lookup(94006) -> {"Lo","L"}; +lookup(94007) -> {"Lo","L"}; +lookup(94008) -> {"Lo","L"}; +lookup(94009) -> {"Lo","L"}; +lookup(94010) -> {"Lo","L"}; +lookup(94011) -> {"Lo","L"}; +lookup(94012) -> {"Lo","L"}; +lookup(94013) -> {"Lo","L"}; +lookup(94014) -> {"Lo","L"}; +lookup(94015) -> {"Lo","L"}; +lookup(94016) -> {"Lo","L"}; +lookup(94017) -> {"Lo","L"}; +lookup(94018) -> {"Lo","L"}; +lookup(94019) -> {"Lo","L"}; +lookup(94020) -> {"Lo","L"}; +lookup(94021) -> {"Lo","L"}; +lookup(94022) -> {"Lo","L"}; +lookup(94023) -> {"Lo","L"}; +lookup(94024) -> {"Lo","L"}; +lookup(94025) -> {"Lo","L"}; +lookup(94026) -> {"Lo","L"}; +lookup(94031) -> {"Mn","NSM"}; +lookup(94032) -> {"Lo","L"}; +lookup(94033) -> {"Mc","L"}; +lookup(94034) -> {"Mc","L"}; +lookup(94035) -> {"Mc","L"}; +lookup(94036) -> {"Mc","L"}; +lookup(94037) -> {"Mc","L"}; +lookup(94038) -> {"Mc","L"}; +lookup(94039) -> {"Mc","L"}; +lookup(94040) -> {"Mc","L"}; +lookup(94041) -> {"Mc","L"}; +lookup(94042) -> {"Mc","L"}; +lookup(94043) -> {"Mc","L"}; +lookup(94044) -> {"Mc","L"}; +lookup(94045) -> {"Mc","L"}; +lookup(94046) -> {"Mc","L"}; +lookup(94047) -> {"Mc","L"}; +lookup(94048) -> {"Mc","L"}; +lookup(94049) -> {"Mc","L"}; +lookup(94050) -> {"Mc","L"}; +lookup(94051) -> {"Mc","L"}; +lookup(94052) -> {"Mc","L"}; +lookup(94053) -> {"Mc","L"}; +lookup(94054) -> {"Mc","L"}; +lookup(94055) -> {"Mc","L"}; +lookup(94056) -> {"Mc","L"}; +lookup(94057) -> {"Mc","L"}; +lookup(94058) -> {"Mc","L"}; +lookup(94059) -> {"Mc","L"}; +lookup(94060) -> {"Mc","L"}; +lookup(94061) -> {"Mc","L"}; +lookup(94062) -> {"Mc","L"}; +lookup(94063) -> {"Mc","L"}; +lookup(94064) -> {"Mc","L"}; +lookup(94065) -> {"Mc","L"}; +lookup(94066) -> {"Mc","L"}; +lookup(94067) -> {"Mc","L"}; +lookup(94068) -> {"Mc","L"}; +lookup(94069) -> {"Mc","L"}; +lookup(94070) -> {"Mc","L"}; +lookup(94071) -> {"Mc","L"}; +lookup(94072) -> {"Mc","L"}; +lookup(94073) -> {"Mc","L"}; +lookup(94074) -> {"Mc","L"}; +lookup(94075) -> {"Mc","L"}; +lookup(94076) -> {"Mc","L"}; +lookup(94077) -> {"Mc","L"}; +lookup(94078) -> {"Mc","L"}; +lookup(94079) -> {"Mc","L"}; +lookup(94080) -> {"Mc","L"}; +lookup(94081) -> {"Mc","L"}; +lookup(94082) -> {"Mc","L"}; +lookup(94083) -> {"Mc","L"}; +lookup(94084) -> {"Mc","L"}; +lookup(94085) -> {"Mc","L"}; +lookup(94086) -> {"Mc","L"}; +lookup(94087) -> {"Mc","L"}; +lookup(94095) -> {"Mn","NSM"}; +lookup(94096) -> {"Mn","NSM"}; +lookup(94097) -> {"Mn","NSM"}; +lookup(94098) -> {"Mn","NSM"}; +lookup(94099) -> {"Lm","L"}; +lookup(94100) -> {"Lm","L"}; +lookup(94101) -> {"Lm","L"}; +lookup(94102) -> {"Lm","L"}; +lookup(94103) -> {"Lm","L"}; +lookup(94104) -> {"Lm","L"}; +lookup(94105) -> {"Lm","L"}; +lookup(94106) -> {"Lm","L"}; +lookup(94107) -> {"Lm","L"}; +lookup(94108) -> {"Lm","L"}; +lookup(94109) -> {"Lm","L"}; +lookup(94110) -> {"Lm","L"}; +lookup(94111) -> {"Lm","L"}; +lookup(94176) -> {"Lm","L"}; +lookup(94177) -> {"Lm","L"}; +lookup(94178) -> {"Po","ON"}; +lookup(94179) -> {"Lm","L"}; +lookup(94180) -> {"Mn","NSM"}; +lookup(94192) -> {"Mc","L"}; +lookup(94193) -> {"Mc","L"}; +lookup(94208) -> {"Lo","L"}; +lookup(100343) -> {"Lo","L"}; +lookup(100352) -> {"Lo","L"}; +lookup(100353) -> {"Lo","L"}; +lookup(100354) -> {"Lo","L"}; +lookup(100355) -> {"Lo","L"}; +lookup(100356) -> {"Lo","L"}; +lookup(100357) -> {"Lo","L"}; +lookup(100358) -> {"Lo","L"}; +lookup(100359) -> {"Lo","L"}; +lookup(100360) -> {"Lo","L"}; +lookup(100361) -> {"Lo","L"}; +lookup(100362) -> {"Lo","L"}; +lookup(100363) -> {"Lo","L"}; +lookup(100364) -> {"Lo","L"}; +lookup(100365) -> {"Lo","L"}; +lookup(100366) -> {"Lo","L"}; +lookup(100367) -> {"Lo","L"}; +lookup(100368) -> {"Lo","L"}; +lookup(100369) -> {"Lo","L"}; +lookup(100370) -> {"Lo","L"}; +lookup(100371) -> {"Lo","L"}; +lookup(100372) -> {"Lo","L"}; +lookup(100373) -> {"Lo","L"}; +lookup(100374) -> {"Lo","L"}; +lookup(100375) -> {"Lo","L"}; +lookup(100376) -> {"Lo","L"}; +lookup(100377) -> {"Lo","L"}; +lookup(100378) -> {"Lo","L"}; +lookup(100379) -> {"Lo","L"}; +lookup(100380) -> {"Lo","L"}; +lookup(100381) -> {"Lo","L"}; +lookup(100382) -> {"Lo","L"}; +lookup(100383) -> {"Lo","L"}; +lookup(100384) -> {"Lo","L"}; +lookup(100385) -> {"Lo","L"}; +lookup(100386) -> {"Lo","L"}; +lookup(100387) -> {"Lo","L"}; +lookup(100388) -> {"Lo","L"}; +lookup(100389) -> {"Lo","L"}; +lookup(100390) -> {"Lo","L"}; +lookup(100391) -> {"Lo","L"}; +lookup(100392) -> {"Lo","L"}; +lookup(100393) -> {"Lo","L"}; +lookup(100394) -> {"Lo","L"}; +lookup(100395) -> {"Lo","L"}; +lookup(100396) -> {"Lo","L"}; +lookup(100397) -> {"Lo","L"}; +lookup(100398) -> {"Lo","L"}; +lookup(100399) -> {"Lo","L"}; +lookup(100400) -> {"Lo","L"}; +lookup(100401) -> {"Lo","L"}; +lookup(100402) -> {"Lo","L"}; +lookup(100403) -> {"Lo","L"}; +lookup(100404) -> {"Lo","L"}; +lookup(100405) -> {"Lo","L"}; +lookup(100406) -> {"Lo","L"}; +lookup(100407) -> {"Lo","L"}; +lookup(100408) -> {"Lo","L"}; +lookup(100409) -> {"Lo","L"}; +lookup(100410) -> {"Lo","L"}; +lookup(100411) -> {"Lo","L"}; +lookup(100412) -> {"Lo","L"}; +lookup(100413) -> {"Lo","L"}; +lookup(100414) -> {"Lo","L"}; +lookup(100415) -> {"Lo","L"}; +lookup(100416) -> {"Lo","L"}; +lookup(100417) -> {"Lo","L"}; +lookup(100418) -> {"Lo","L"}; +lookup(100419) -> {"Lo","L"}; +lookup(100420) -> {"Lo","L"}; +lookup(100421) -> {"Lo","L"}; +lookup(100422) -> {"Lo","L"}; +lookup(100423) -> {"Lo","L"}; +lookup(100424) -> {"Lo","L"}; +lookup(100425) -> {"Lo","L"}; +lookup(100426) -> {"Lo","L"}; +lookup(100427) -> {"Lo","L"}; +lookup(100428) -> {"Lo","L"}; +lookup(100429) -> {"Lo","L"}; +lookup(100430) -> {"Lo","L"}; +lookup(100431) -> {"Lo","L"}; +lookup(100432) -> {"Lo","L"}; +lookup(100433) -> {"Lo","L"}; +lookup(100434) -> {"Lo","L"}; +lookup(100435) -> {"Lo","L"}; +lookup(100436) -> {"Lo","L"}; +lookup(100437) -> {"Lo","L"}; +lookup(100438) -> {"Lo","L"}; +lookup(100439) -> {"Lo","L"}; +lookup(100440) -> {"Lo","L"}; +lookup(100441) -> {"Lo","L"}; +lookup(100442) -> {"Lo","L"}; +lookup(100443) -> {"Lo","L"}; +lookup(100444) -> {"Lo","L"}; +lookup(100445) -> {"Lo","L"}; +lookup(100446) -> {"Lo","L"}; +lookup(100447) -> {"Lo","L"}; +lookup(100448) -> {"Lo","L"}; +lookup(100449) -> {"Lo","L"}; +lookup(100450) -> {"Lo","L"}; +lookup(100451) -> {"Lo","L"}; +lookup(100452) -> {"Lo","L"}; +lookup(100453) -> {"Lo","L"}; +lookup(100454) -> {"Lo","L"}; +lookup(100455) -> {"Lo","L"}; +lookup(100456) -> {"Lo","L"}; +lookup(100457) -> {"Lo","L"}; +lookup(100458) -> {"Lo","L"}; +lookup(100459) -> {"Lo","L"}; +lookup(100460) -> {"Lo","L"}; +lookup(100461) -> {"Lo","L"}; +lookup(100462) -> {"Lo","L"}; +lookup(100463) -> {"Lo","L"}; +lookup(100464) -> {"Lo","L"}; +lookup(100465) -> {"Lo","L"}; +lookup(100466) -> {"Lo","L"}; +lookup(100467) -> {"Lo","L"}; +lookup(100468) -> {"Lo","L"}; +lookup(100469) -> {"Lo","L"}; +lookup(100470) -> {"Lo","L"}; +lookup(100471) -> {"Lo","L"}; +lookup(100472) -> {"Lo","L"}; +lookup(100473) -> {"Lo","L"}; +lookup(100474) -> {"Lo","L"}; +lookup(100475) -> {"Lo","L"}; +lookup(100476) -> {"Lo","L"}; +lookup(100477) -> {"Lo","L"}; +lookup(100478) -> {"Lo","L"}; +lookup(100479) -> {"Lo","L"}; +lookup(100480) -> {"Lo","L"}; +lookup(100481) -> {"Lo","L"}; +lookup(100482) -> {"Lo","L"}; +lookup(100483) -> {"Lo","L"}; +lookup(100484) -> {"Lo","L"}; +lookup(100485) -> {"Lo","L"}; +lookup(100486) -> {"Lo","L"}; +lookup(100487) -> {"Lo","L"}; +lookup(100488) -> {"Lo","L"}; +lookup(100489) -> {"Lo","L"}; +lookup(100490) -> {"Lo","L"}; +lookup(100491) -> {"Lo","L"}; +lookup(100492) -> {"Lo","L"}; +lookup(100493) -> {"Lo","L"}; +lookup(100494) -> {"Lo","L"}; +lookup(100495) -> {"Lo","L"}; +lookup(100496) -> {"Lo","L"}; +lookup(100497) -> {"Lo","L"}; +lookup(100498) -> {"Lo","L"}; +lookup(100499) -> {"Lo","L"}; +lookup(100500) -> {"Lo","L"}; +lookup(100501) -> {"Lo","L"}; +lookup(100502) -> {"Lo","L"}; +lookup(100503) -> {"Lo","L"}; +lookup(100504) -> {"Lo","L"}; +lookup(100505) -> {"Lo","L"}; +lookup(100506) -> {"Lo","L"}; +lookup(100507) -> {"Lo","L"}; +lookup(100508) -> {"Lo","L"}; +lookup(100509) -> {"Lo","L"}; +lookup(100510) -> {"Lo","L"}; +lookup(100511) -> {"Lo","L"}; +lookup(100512) -> {"Lo","L"}; +lookup(100513) -> {"Lo","L"}; +lookup(100514) -> {"Lo","L"}; +lookup(100515) -> {"Lo","L"}; +lookup(100516) -> {"Lo","L"}; +lookup(100517) -> {"Lo","L"}; +lookup(100518) -> {"Lo","L"}; +lookup(100519) -> {"Lo","L"}; +lookup(100520) -> {"Lo","L"}; +lookup(100521) -> {"Lo","L"}; +lookup(100522) -> {"Lo","L"}; +lookup(100523) -> {"Lo","L"}; +lookup(100524) -> {"Lo","L"}; +lookup(100525) -> {"Lo","L"}; +lookup(100526) -> {"Lo","L"}; +lookup(100527) -> {"Lo","L"}; +lookup(100528) -> {"Lo","L"}; +lookup(100529) -> {"Lo","L"}; +lookup(100530) -> {"Lo","L"}; +lookup(100531) -> {"Lo","L"}; +lookup(100532) -> {"Lo","L"}; +lookup(100533) -> {"Lo","L"}; +lookup(100534) -> {"Lo","L"}; +lookup(100535) -> {"Lo","L"}; +lookup(100536) -> {"Lo","L"}; +lookup(100537) -> {"Lo","L"}; +lookup(100538) -> {"Lo","L"}; +lookup(100539) -> {"Lo","L"}; +lookup(100540) -> {"Lo","L"}; +lookup(100541) -> {"Lo","L"}; +lookup(100542) -> {"Lo","L"}; +lookup(100543) -> {"Lo","L"}; +lookup(100544) -> {"Lo","L"}; +lookup(100545) -> {"Lo","L"}; +lookup(100546) -> {"Lo","L"}; +lookup(100547) -> {"Lo","L"}; +lookup(100548) -> {"Lo","L"}; +lookup(100549) -> {"Lo","L"}; +lookup(100550) -> {"Lo","L"}; +lookup(100551) -> {"Lo","L"}; +lookup(100552) -> {"Lo","L"}; +lookup(100553) -> {"Lo","L"}; +lookup(100554) -> {"Lo","L"}; +lookup(100555) -> {"Lo","L"}; +lookup(100556) -> {"Lo","L"}; +lookup(100557) -> {"Lo","L"}; +lookup(100558) -> {"Lo","L"}; +lookup(100559) -> {"Lo","L"}; +lookup(100560) -> {"Lo","L"}; +lookup(100561) -> {"Lo","L"}; +lookup(100562) -> {"Lo","L"}; +lookup(100563) -> {"Lo","L"}; +lookup(100564) -> {"Lo","L"}; +lookup(100565) -> {"Lo","L"}; +lookup(100566) -> {"Lo","L"}; +lookup(100567) -> {"Lo","L"}; +lookup(100568) -> {"Lo","L"}; +lookup(100569) -> {"Lo","L"}; +lookup(100570) -> {"Lo","L"}; +lookup(100571) -> {"Lo","L"}; +lookup(100572) -> {"Lo","L"}; +lookup(100573) -> {"Lo","L"}; +lookup(100574) -> {"Lo","L"}; +lookup(100575) -> {"Lo","L"}; +lookup(100576) -> {"Lo","L"}; +lookup(100577) -> {"Lo","L"}; +lookup(100578) -> {"Lo","L"}; +lookup(100579) -> {"Lo","L"}; +lookup(100580) -> {"Lo","L"}; +lookup(100581) -> {"Lo","L"}; +lookup(100582) -> {"Lo","L"}; +lookup(100583) -> {"Lo","L"}; +lookup(100584) -> {"Lo","L"}; +lookup(100585) -> {"Lo","L"}; +lookup(100586) -> {"Lo","L"}; +lookup(100587) -> {"Lo","L"}; +lookup(100588) -> {"Lo","L"}; +lookup(100589) -> {"Lo","L"}; +lookup(100590) -> {"Lo","L"}; +lookup(100591) -> {"Lo","L"}; +lookup(100592) -> {"Lo","L"}; +lookup(100593) -> {"Lo","L"}; +lookup(100594) -> {"Lo","L"}; +lookup(100595) -> {"Lo","L"}; +lookup(100596) -> {"Lo","L"}; +lookup(100597) -> {"Lo","L"}; +lookup(100598) -> {"Lo","L"}; +lookup(100599) -> {"Lo","L"}; +lookup(100600) -> {"Lo","L"}; +lookup(100601) -> {"Lo","L"}; +lookup(100602) -> {"Lo","L"}; +lookup(100603) -> {"Lo","L"}; +lookup(100604) -> {"Lo","L"}; +lookup(100605) -> {"Lo","L"}; +lookup(100606) -> {"Lo","L"}; +lookup(100607) -> {"Lo","L"}; +lookup(100608) -> {"Lo","L"}; +lookup(100609) -> {"Lo","L"}; +lookup(100610) -> {"Lo","L"}; +lookup(100611) -> {"Lo","L"}; +lookup(100612) -> {"Lo","L"}; +lookup(100613) -> {"Lo","L"}; +lookup(100614) -> {"Lo","L"}; +lookup(100615) -> {"Lo","L"}; +lookup(100616) -> {"Lo","L"}; +lookup(100617) -> {"Lo","L"}; +lookup(100618) -> {"Lo","L"}; +lookup(100619) -> {"Lo","L"}; +lookup(100620) -> {"Lo","L"}; +lookup(100621) -> {"Lo","L"}; +lookup(100622) -> {"Lo","L"}; +lookup(100623) -> {"Lo","L"}; +lookup(100624) -> {"Lo","L"}; +lookup(100625) -> {"Lo","L"}; +lookup(100626) -> {"Lo","L"}; +lookup(100627) -> {"Lo","L"}; +lookup(100628) -> {"Lo","L"}; +lookup(100629) -> {"Lo","L"}; +lookup(100630) -> {"Lo","L"}; +lookup(100631) -> {"Lo","L"}; +lookup(100632) -> {"Lo","L"}; +lookup(100633) -> {"Lo","L"}; +lookup(100634) -> {"Lo","L"}; +lookup(100635) -> {"Lo","L"}; +lookup(100636) -> {"Lo","L"}; +lookup(100637) -> {"Lo","L"}; +lookup(100638) -> {"Lo","L"}; +lookup(100639) -> {"Lo","L"}; +lookup(100640) -> {"Lo","L"}; +lookup(100641) -> {"Lo","L"}; +lookup(100642) -> {"Lo","L"}; +lookup(100643) -> {"Lo","L"}; +lookup(100644) -> {"Lo","L"}; +lookup(100645) -> {"Lo","L"}; +lookup(100646) -> {"Lo","L"}; +lookup(100647) -> {"Lo","L"}; +lookup(100648) -> {"Lo","L"}; +lookup(100649) -> {"Lo","L"}; +lookup(100650) -> {"Lo","L"}; +lookup(100651) -> {"Lo","L"}; +lookup(100652) -> {"Lo","L"}; +lookup(100653) -> {"Lo","L"}; +lookup(100654) -> {"Lo","L"}; +lookup(100655) -> {"Lo","L"}; +lookup(100656) -> {"Lo","L"}; +lookup(100657) -> {"Lo","L"}; +lookup(100658) -> {"Lo","L"}; +lookup(100659) -> {"Lo","L"}; +lookup(100660) -> {"Lo","L"}; +lookup(100661) -> {"Lo","L"}; +lookup(100662) -> {"Lo","L"}; +lookup(100663) -> {"Lo","L"}; +lookup(100664) -> {"Lo","L"}; +lookup(100665) -> {"Lo","L"}; +lookup(100666) -> {"Lo","L"}; +lookup(100667) -> {"Lo","L"}; +lookup(100668) -> {"Lo","L"}; +lookup(100669) -> {"Lo","L"}; +lookup(100670) -> {"Lo","L"}; +lookup(100671) -> {"Lo","L"}; +lookup(100672) -> {"Lo","L"}; +lookup(100673) -> {"Lo","L"}; +lookup(100674) -> {"Lo","L"}; +lookup(100675) -> {"Lo","L"}; +lookup(100676) -> {"Lo","L"}; +lookup(100677) -> {"Lo","L"}; +lookup(100678) -> {"Lo","L"}; +lookup(100679) -> {"Lo","L"}; +lookup(100680) -> {"Lo","L"}; +lookup(100681) -> {"Lo","L"}; +lookup(100682) -> {"Lo","L"}; +lookup(100683) -> {"Lo","L"}; +lookup(100684) -> {"Lo","L"}; +lookup(100685) -> {"Lo","L"}; +lookup(100686) -> {"Lo","L"}; +lookup(100687) -> {"Lo","L"}; +lookup(100688) -> {"Lo","L"}; +lookup(100689) -> {"Lo","L"}; +lookup(100690) -> {"Lo","L"}; +lookup(100691) -> {"Lo","L"}; +lookup(100692) -> {"Lo","L"}; +lookup(100693) -> {"Lo","L"}; +lookup(100694) -> {"Lo","L"}; +lookup(100695) -> {"Lo","L"}; +lookup(100696) -> {"Lo","L"}; +lookup(100697) -> {"Lo","L"}; +lookup(100698) -> {"Lo","L"}; +lookup(100699) -> {"Lo","L"}; +lookup(100700) -> {"Lo","L"}; +lookup(100701) -> {"Lo","L"}; +lookup(100702) -> {"Lo","L"}; +lookup(100703) -> {"Lo","L"}; +lookup(100704) -> {"Lo","L"}; +lookup(100705) -> {"Lo","L"}; +lookup(100706) -> {"Lo","L"}; +lookup(100707) -> {"Lo","L"}; +lookup(100708) -> {"Lo","L"}; +lookup(100709) -> {"Lo","L"}; +lookup(100710) -> {"Lo","L"}; +lookup(100711) -> {"Lo","L"}; +lookup(100712) -> {"Lo","L"}; +lookup(100713) -> {"Lo","L"}; +lookup(100714) -> {"Lo","L"}; +lookup(100715) -> {"Lo","L"}; +lookup(100716) -> {"Lo","L"}; +lookup(100717) -> {"Lo","L"}; +lookup(100718) -> {"Lo","L"}; +lookup(100719) -> {"Lo","L"}; +lookup(100720) -> {"Lo","L"}; +lookup(100721) -> {"Lo","L"}; +lookup(100722) -> {"Lo","L"}; +lookup(100723) -> {"Lo","L"}; +lookup(100724) -> {"Lo","L"}; +lookup(100725) -> {"Lo","L"}; +lookup(100726) -> {"Lo","L"}; +lookup(100727) -> {"Lo","L"}; +lookup(100728) -> {"Lo","L"}; +lookup(100729) -> {"Lo","L"}; +lookup(100730) -> {"Lo","L"}; +lookup(100731) -> {"Lo","L"}; +lookup(100732) -> {"Lo","L"}; +lookup(100733) -> {"Lo","L"}; +lookup(100734) -> {"Lo","L"}; +lookup(100735) -> {"Lo","L"}; +lookup(100736) -> {"Lo","L"}; +lookup(100737) -> {"Lo","L"}; +lookup(100738) -> {"Lo","L"}; +lookup(100739) -> {"Lo","L"}; +lookup(100740) -> {"Lo","L"}; +lookup(100741) -> {"Lo","L"}; +lookup(100742) -> {"Lo","L"}; +lookup(100743) -> {"Lo","L"}; +lookup(100744) -> {"Lo","L"}; +lookup(100745) -> {"Lo","L"}; +lookup(100746) -> {"Lo","L"}; +lookup(100747) -> {"Lo","L"}; +lookup(100748) -> {"Lo","L"}; +lookup(100749) -> {"Lo","L"}; +lookup(100750) -> {"Lo","L"}; +lookup(100751) -> {"Lo","L"}; +lookup(100752) -> {"Lo","L"}; +lookup(100753) -> {"Lo","L"}; +lookup(100754) -> {"Lo","L"}; +lookup(100755) -> {"Lo","L"}; +lookup(100756) -> {"Lo","L"}; +lookup(100757) -> {"Lo","L"}; +lookup(100758) -> {"Lo","L"}; +lookup(100759) -> {"Lo","L"}; +lookup(100760) -> {"Lo","L"}; +lookup(100761) -> {"Lo","L"}; +lookup(100762) -> {"Lo","L"}; +lookup(100763) -> {"Lo","L"}; +lookup(100764) -> {"Lo","L"}; +lookup(100765) -> {"Lo","L"}; +lookup(100766) -> {"Lo","L"}; +lookup(100767) -> {"Lo","L"}; +lookup(100768) -> {"Lo","L"}; +lookup(100769) -> {"Lo","L"}; +lookup(100770) -> {"Lo","L"}; +lookup(100771) -> {"Lo","L"}; +lookup(100772) -> {"Lo","L"}; +lookup(100773) -> {"Lo","L"}; +lookup(100774) -> {"Lo","L"}; +lookup(100775) -> {"Lo","L"}; +lookup(100776) -> {"Lo","L"}; +lookup(100777) -> {"Lo","L"}; +lookup(100778) -> {"Lo","L"}; +lookup(100779) -> {"Lo","L"}; +lookup(100780) -> {"Lo","L"}; +lookup(100781) -> {"Lo","L"}; +lookup(100782) -> {"Lo","L"}; +lookup(100783) -> {"Lo","L"}; +lookup(100784) -> {"Lo","L"}; +lookup(100785) -> {"Lo","L"}; +lookup(100786) -> {"Lo","L"}; +lookup(100787) -> {"Lo","L"}; +lookup(100788) -> {"Lo","L"}; +lookup(100789) -> {"Lo","L"}; +lookup(100790) -> {"Lo","L"}; +lookup(100791) -> {"Lo","L"}; +lookup(100792) -> {"Lo","L"}; +lookup(100793) -> {"Lo","L"}; +lookup(100794) -> {"Lo","L"}; +lookup(100795) -> {"Lo","L"}; +lookup(100796) -> {"Lo","L"}; +lookup(100797) -> {"Lo","L"}; +lookup(100798) -> {"Lo","L"}; +lookup(100799) -> {"Lo","L"}; +lookup(100800) -> {"Lo","L"}; +lookup(100801) -> {"Lo","L"}; +lookup(100802) -> {"Lo","L"}; +lookup(100803) -> {"Lo","L"}; +lookup(100804) -> {"Lo","L"}; +lookup(100805) -> {"Lo","L"}; +lookup(100806) -> {"Lo","L"}; +lookup(100807) -> {"Lo","L"}; +lookup(100808) -> {"Lo","L"}; +lookup(100809) -> {"Lo","L"}; +lookup(100810) -> {"Lo","L"}; +lookup(100811) -> {"Lo","L"}; +lookup(100812) -> {"Lo","L"}; +lookup(100813) -> {"Lo","L"}; +lookup(100814) -> {"Lo","L"}; +lookup(100815) -> {"Lo","L"}; +lookup(100816) -> {"Lo","L"}; +lookup(100817) -> {"Lo","L"}; +lookup(100818) -> {"Lo","L"}; +lookup(100819) -> {"Lo","L"}; +lookup(100820) -> {"Lo","L"}; +lookup(100821) -> {"Lo","L"}; +lookup(100822) -> {"Lo","L"}; +lookup(100823) -> {"Lo","L"}; +lookup(100824) -> {"Lo","L"}; +lookup(100825) -> {"Lo","L"}; +lookup(100826) -> {"Lo","L"}; +lookup(100827) -> {"Lo","L"}; +lookup(100828) -> {"Lo","L"}; +lookup(100829) -> {"Lo","L"}; +lookup(100830) -> {"Lo","L"}; +lookup(100831) -> {"Lo","L"}; +lookup(100832) -> {"Lo","L"}; +lookup(100833) -> {"Lo","L"}; +lookup(100834) -> {"Lo","L"}; +lookup(100835) -> {"Lo","L"}; +lookup(100836) -> {"Lo","L"}; +lookup(100837) -> {"Lo","L"}; +lookup(100838) -> {"Lo","L"}; +lookup(100839) -> {"Lo","L"}; +lookup(100840) -> {"Lo","L"}; +lookup(100841) -> {"Lo","L"}; +lookup(100842) -> {"Lo","L"}; +lookup(100843) -> {"Lo","L"}; +lookup(100844) -> {"Lo","L"}; +lookup(100845) -> {"Lo","L"}; +lookup(100846) -> {"Lo","L"}; +lookup(100847) -> {"Lo","L"}; +lookup(100848) -> {"Lo","L"}; +lookup(100849) -> {"Lo","L"}; +lookup(100850) -> {"Lo","L"}; +lookup(100851) -> {"Lo","L"}; +lookup(100852) -> {"Lo","L"}; +lookup(100853) -> {"Lo","L"}; +lookup(100854) -> {"Lo","L"}; +lookup(100855) -> {"Lo","L"}; +lookup(100856) -> {"Lo","L"}; +lookup(100857) -> {"Lo","L"}; +lookup(100858) -> {"Lo","L"}; +lookup(100859) -> {"Lo","L"}; +lookup(100860) -> {"Lo","L"}; +lookup(100861) -> {"Lo","L"}; +lookup(100862) -> {"Lo","L"}; +lookup(100863) -> {"Lo","L"}; +lookup(100864) -> {"Lo","L"}; +lookup(100865) -> {"Lo","L"}; +lookup(100866) -> {"Lo","L"}; +lookup(100867) -> {"Lo","L"}; +lookup(100868) -> {"Lo","L"}; +lookup(100869) -> {"Lo","L"}; +lookup(100870) -> {"Lo","L"}; +lookup(100871) -> {"Lo","L"}; +lookup(100872) -> {"Lo","L"}; +lookup(100873) -> {"Lo","L"}; +lookup(100874) -> {"Lo","L"}; +lookup(100875) -> {"Lo","L"}; +lookup(100876) -> {"Lo","L"}; +lookup(100877) -> {"Lo","L"}; +lookup(100878) -> {"Lo","L"}; +lookup(100879) -> {"Lo","L"}; +lookup(100880) -> {"Lo","L"}; +lookup(100881) -> {"Lo","L"}; +lookup(100882) -> {"Lo","L"}; +lookup(100883) -> {"Lo","L"}; +lookup(100884) -> {"Lo","L"}; +lookup(100885) -> {"Lo","L"}; +lookup(100886) -> {"Lo","L"}; +lookup(100887) -> {"Lo","L"}; +lookup(100888) -> {"Lo","L"}; +lookup(100889) -> {"Lo","L"}; +lookup(100890) -> {"Lo","L"}; +lookup(100891) -> {"Lo","L"}; +lookup(100892) -> {"Lo","L"}; +lookup(100893) -> {"Lo","L"}; +lookup(100894) -> {"Lo","L"}; +lookup(100895) -> {"Lo","L"}; +lookup(100896) -> {"Lo","L"}; +lookup(100897) -> {"Lo","L"}; +lookup(100898) -> {"Lo","L"}; +lookup(100899) -> {"Lo","L"}; +lookup(100900) -> {"Lo","L"}; +lookup(100901) -> {"Lo","L"}; +lookup(100902) -> {"Lo","L"}; +lookup(100903) -> {"Lo","L"}; +lookup(100904) -> {"Lo","L"}; +lookup(100905) -> {"Lo","L"}; +lookup(100906) -> {"Lo","L"}; +lookup(100907) -> {"Lo","L"}; +lookup(100908) -> {"Lo","L"}; +lookup(100909) -> {"Lo","L"}; +lookup(100910) -> {"Lo","L"}; +lookup(100911) -> {"Lo","L"}; +lookup(100912) -> {"Lo","L"}; +lookup(100913) -> {"Lo","L"}; +lookup(100914) -> {"Lo","L"}; +lookup(100915) -> {"Lo","L"}; +lookup(100916) -> {"Lo","L"}; +lookup(100917) -> {"Lo","L"}; +lookup(100918) -> {"Lo","L"}; +lookup(100919) -> {"Lo","L"}; +lookup(100920) -> {"Lo","L"}; +lookup(100921) -> {"Lo","L"}; +lookup(100922) -> {"Lo","L"}; +lookup(100923) -> {"Lo","L"}; +lookup(100924) -> {"Lo","L"}; +lookup(100925) -> {"Lo","L"}; +lookup(100926) -> {"Lo","L"}; +lookup(100927) -> {"Lo","L"}; +lookup(100928) -> {"Lo","L"}; +lookup(100929) -> {"Lo","L"}; +lookup(100930) -> {"Lo","L"}; +lookup(100931) -> {"Lo","L"}; +lookup(100932) -> {"Lo","L"}; +lookup(100933) -> {"Lo","L"}; +lookup(100934) -> {"Lo","L"}; +lookup(100935) -> {"Lo","L"}; +lookup(100936) -> {"Lo","L"}; +lookup(100937) -> {"Lo","L"}; +lookup(100938) -> {"Lo","L"}; +lookup(100939) -> {"Lo","L"}; +lookup(100940) -> {"Lo","L"}; +lookup(100941) -> {"Lo","L"}; +lookup(100942) -> {"Lo","L"}; +lookup(100943) -> {"Lo","L"}; +lookup(100944) -> {"Lo","L"}; +lookup(100945) -> {"Lo","L"}; +lookup(100946) -> {"Lo","L"}; +lookup(100947) -> {"Lo","L"}; +lookup(100948) -> {"Lo","L"}; +lookup(100949) -> {"Lo","L"}; +lookup(100950) -> {"Lo","L"}; +lookup(100951) -> {"Lo","L"}; +lookup(100952) -> {"Lo","L"}; +lookup(100953) -> {"Lo","L"}; +lookup(100954) -> {"Lo","L"}; +lookup(100955) -> {"Lo","L"}; +lookup(100956) -> {"Lo","L"}; +lookup(100957) -> {"Lo","L"}; +lookup(100958) -> {"Lo","L"}; +lookup(100959) -> {"Lo","L"}; +lookup(100960) -> {"Lo","L"}; +lookup(100961) -> {"Lo","L"}; +lookup(100962) -> {"Lo","L"}; +lookup(100963) -> {"Lo","L"}; +lookup(100964) -> {"Lo","L"}; +lookup(100965) -> {"Lo","L"}; +lookup(100966) -> {"Lo","L"}; +lookup(100967) -> {"Lo","L"}; +lookup(100968) -> {"Lo","L"}; +lookup(100969) -> {"Lo","L"}; +lookup(100970) -> {"Lo","L"}; +lookup(100971) -> {"Lo","L"}; +lookup(100972) -> {"Lo","L"}; +lookup(100973) -> {"Lo","L"}; +lookup(100974) -> {"Lo","L"}; +lookup(100975) -> {"Lo","L"}; +lookup(100976) -> {"Lo","L"}; +lookup(100977) -> {"Lo","L"}; +lookup(100978) -> {"Lo","L"}; +lookup(100979) -> {"Lo","L"}; +lookup(100980) -> {"Lo","L"}; +lookup(100981) -> {"Lo","L"}; +lookup(100982) -> {"Lo","L"}; +lookup(100983) -> {"Lo","L"}; +lookup(100984) -> {"Lo","L"}; +lookup(100985) -> {"Lo","L"}; +lookup(100986) -> {"Lo","L"}; +lookup(100987) -> {"Lo","L"}; +lookup(100988) -> {"Lo","L"}; +lookup(100989) -> {"Lo","L"}; +lookup(100990) -> {"Lo","L"}; +lookup(100991) -> {"Lo","L"}; +lookup(100992) -> {"Lo","L"}; +lookup(100993) -> {"Lo","L"}; +lookup(100994) -> {"Lo","L"}; +lookup(100995) -> {"Lo","L"}; +lookup(100996) -> {"Lo","L"}; +lookup(100997) -> {"Lo","L"}; +lookup(100998) -> {"Lo","L"}; +lookup(100999) -> {"Lo","L"}; +lookup(101000) -> {"Lo","L"}; +lookup(101001) -> {"Lo","L"}; +lookup(101002) -> {"Lo","L"}; +lookup(101003) -> {"Lo","L"}; +lookup(101004) -> {"Lo","L"}; +lookup(101005) -> {"Lo","L"}; +lookup(101006) -> {"Lo","L"}; +lookup(101007) -> {"Lo","L"}; +lookup(101008) -> {"Lo","L"}; +lookup(101009) -> {"Lo","L"}; +lookup(101010) -> {"Lo","L"}; +lookup(101011) -> {"Lo","L"}; +lookup(101012) -> {"Lo","L"}; +lookup(101013) -> {"Lo","L"}; +lookup(101014) -> {"Lo","L"}; +lookup(101015) -> {"Lo","L"}; +lookup(101016) -> {"Lo","L"}; +lookup(101017) -> {"Lo","L"}; +lookup(101018) -> {"Lo","L"}; +lookup(101019) -> {"Lo","L"}; +lookup(101020) -> {"Lo","L"}; +lookup(101021) -> {"Lo","L"}; +lookup(101022) -> {"Lo","L"}; +lookup(101023) -> {"Lo","L"}; +lookup(101024) -> {"Lo","L"}; +lookup(101025) -> {"Lo","L"}; +lookup(101026) -> {"Lo","L"}; +lookup(101027) -> {"Lo","L"}; +lookup(101028) -> {"Lo","L"}; +lookup(101029) -> {"Lo","L"}; +lookup(101030) -> {"Lo","L"}; +lookup(101031) -> {"Lo","L"}; +lookup(101032) -> {"Lo","L"}; +lookup(101033) -> {"Lo","L"}; +lookup(101034) -> {"Lo","L"}; +lookup(101035) -> {"Lo","L"}; +lookup(101036) -> {"Lo","L"}; +lookup(101037) -> {"Lo","L"}; +lookup(101038) -> {"Lo","L"}; +lookup(101039) -> {"Lo","L"}; +lookup(101040) -> {"Lo","L"}; +lookup(101041) -> {"Lo","L"}; +lookup(101042) -> {"Lo","L"}; +lookup(101043) -> {"Lo","L"}; +lookup(101044) -> {"Lo","L"}; +lookup(101045) -> {"Lo","L"}; +lookup(101046) -> {"Lo","L"}; +lookup(101047) -> {"Lo","L"}; +lookup(101048) -> {"Lo","L"}; +lookup(101049) -> {"Lo","L"}; +lookup(101050) -> {"Lo","L"}; +lookup(101051) -> {"Lo","L"}; +lookup(101052) -> {"Lo","L"}; +lookup(101053) -> {"Lo","L"}; +lookup(101054) -> {"Lo","L"}; +lookup(101055) -> {"Lo","L"}; +lookup(101056) -> {"Lo","L"}; +lookup(101057) -> {"Lo","L"}; +lookup(101058) -> {"Lo","L"}; +lookup(101059) -> {"Lo","L"}; +lookup(101060) -> {"Lo","L"}; +lookup(101061) -> {"Lo","L"}; +lookup(101062) -> {"Lo","L"}; +lookup(101063) -> {"Lo","L"}; +lookup(101064) -> {"Lo","L"}; +lookup(101065) -> {"Lo","L"}; +lookup(101066) -> {"Lo","L"}; +lookup(101067) -> {"Lo","L"}; +lookup(101068) -> {"Lo","L"}; +lookup(101069) -> {"Lo","L"}; +lookup(101070) -> {"Lo","L"}; +lookup(101071) -> {"Lo","L"}; +lookup(101072) -> {"Lo","L"}; +lookup(101073) -> {"Lo","L"}; +lookup(101074) -> {"Lo","L"}; +lookup(101075) -> {"Lo","L"}; +lookup(101076) -> {"Lo","L"}; +lookup(101077) -> {"Lo","L"}; +lookup(101078) -> {"Lo","L"}; +lookup(101079) -> {"Lo","L"}; +lookup(101080) -> {"Lo","L"}; +lookup(101081) -> {"Lo","L"}; +lookup(101082) -> {"Lo","L"}; +lookup(101083) -> {"Lo","L"}; +lookup(101084) -> {"Lo","L"}; +lookup(101085) -> {"Lo","L"}; +lookup(101086) -> {"Lo","L"}; +lookup(101087) -> {"Lo","L"}; +lookup(101088) -> {"Lo","L"}; +lookup(101089) -> {"Lo","L"}; +lookup(101090) -> {"Lo","L"}; +lookup(101091) -> {"Lo","L"}; +lookup(101092) -> {"Lo","L"}; +lookup(101093) -> {"Lo","L"}; +lookup(101094) -> {"Lo","L"}; +lookup(101095) -> {"Lo","L"}; +lookup(101096) -> {"Lo","L"}; +lookup(101097) -> {"Lo","L"}; +lookup(101098) -> {"Lo","L"}; +lookup(101099) -> {"Lo","L"}; +lookup(101100) -> {"Lo","L"}; +lookup(101101) -> {"Lo","L"}; +lookup(101102) -> {"Lo","L"}; +lookup(101103) -> {"Lo","L"}; +lookup(101104) -> {"Lo","L"}; +lookup(101105) -> {"Lo","L"}; +lookup(101106) -> {"Lo","L"}; +lookup(101107) -> {"Lo","L"}; +lookup(101108) -> {"Lo","L"}; +lookup(101109) -> {"Lo","L"}; +lookup(101110) -> {"Lo","L"}; +lookup(101111) -> {"Lo","L"}; +lookup(101112) -> {"Lo","L"}; +lookup(101113) -> {"Lo","L"}; +lookup(101114) -> {"Lo","L"}; +lookup(101115) -> {"Lo","L"}; +lookup(101116) -> {"Lo","L"}; +lookup(101117) -> {"Lo","L"}; +lookup(101118) -> {"Lo","L"}; +lookup(101119) -> {"Lo","L"}; +lookup(101120) -> {"Lo","L"}; +lookup(101121) -> {"Lo","L"}; +lookup(101122) -> {"Lo","L"}; +lookup(101123) -> {"Lo","L"}; +lookup(101124) -> {"Lo","L"}; +lookup(101125) -> {"Lo","L"}; +lookup(101126) -> {"Lo","L"}; +lookup(101127) -> {"Lo","L"}; +lookup(101128) -> {"Lo","L"}; +lookup(101129) -> {"Lo","L"}; +lookup(101130) -> {"Lo","L"}; +lookup(101131) -> {"Lo","L"}; +lookup(101132) -> {"Lo","L"}; +lookup(101133) -> {"Lo","L"}; +lookup(101134) -> {"Lo","L"}; +lookup(101135) -> {"Lo","L"}; +lookup(101136) -> {"Lo","L"}; +lookup(101137) -> {"Lo","L"}; +lookup(101138) -> {"Lo","L"}; +lookup(101139) -> {"Lo","L"}; +lookup(101140) -> {"Lo","L"}; +lookup(101141) -> {"Lo","L"}; +lookup(101142) -> {"Lo","L"}; +lookup(101143) -> {"Lo","L"}; +lookup(101144) -> {"Lo","L"}; +lookup(101145) -> {"Lo","L"}; +lookup(101146) -> {"Lo","L"}; +lookup(101147) -> {"Lo","L"}; +lookup(101148) -> {"Lo","L"}; +lookup(101149) -> {"Lo","L"}; +lookup(101150) -> {"Lo","L"}; +lookup(101151) -> {"Lo","L"}; +lookup(101152) -> {"Lo","L"}; +lookup(101153) -> {"Lo","L"}; +lookup(101154) -> {"Lo","L"}; +lookup(101155) -> {"Lo","L"}; +lookup(101156) -> {"Lo","L"}; +lookup(101157) -> {"Lo","L"}; +lookup(101158) -> {"Lo","L"}; +lookup(101159) -> {"Lo","L"}; +lookup(101160) -> {"Lo","L"}; +lookup(101161) -> {"Lo","L"}; +lookup(101162) -> {"Lo","L"}; +lookup(101163) -> {"Lo","L"}; +lookup(101164) -> {"Lo","L"}; +lookup(101165) -> {"Lo","L"}; +lookup(101166) -> {"Lo","L"}; +lookup(101167) -> {"Lo","L"}; +lookup(101168) -> {"Lo","L"}; +lookup(101169) -> {"Lo","L"}; +lookup(101170) -> {"Lo","L"}; +lookup(101171) -> {"Lo","L"}; +lookup(101172) -> {"Lo","L"}; +lookup(101173) -> {"Lo","L"}; +lookup(101174) -> {"Lo","L"}; +lookup(101175) -> {"Lo","L"}; +lookup(101176) -> {"Lo","L"}; +lookup(101177) -> {"Lo","L"}; +lookup(101178) -> {"Lo","L"}; +lookup(101179) -> {"Lo","L"}; +lookup(101180) -> {"Lo","L"}; +lookup(101181) -> {"Lo","L"}; +lookup(101182) -> {"Lo","L"}; +lookup(101183) -> {"Lo","L"}; +lookup(101184) -> {"Lo","L"}; +lookup(101185) -> {"Lo","L"}; +lookup(101186) -> {"Lo","L"}; +lookup(101187) -> {"Lo","L"}; +lookup(101188) -> {"Lo","L"}; +lookup(101189) -> {"Lo","L"}; +lookup(101190) -> {"Lo","L"}; +lookup(101191) -> {"Lo","L"}; +lookup(101192) -> {"Lo","L"}; +lookup(101193) -> {"Lo","L"}; +lookup(101194) -> {"Lo","L"}; +lookup(101195) -> {"Lo","L"}; +lookup(101196) -> {"Lo","L"}; +lookup(101197) -> {"Lo","L"}; +lookup(101198) -> {"Lo","L"}; +lookup(101199) -> {"Lo","L"}; +lookup(101200) -> {"Lo","L"}; +lookup(101201) -> {"Lo","L"}; +lookup(101202) -> {"Lo","L"}; +lookup(101203) -> {"Lo","L"}; +lookup(101204) -> {"Lo","L"}; +lookup(101205) -> {"Lo","L"}; +lookup(101206) -> {"Lo","L"}; +lookup(101207) -> {"Lo","L"}; +lookup(101208) -> {"Lo","L"}; +lookup(101209) -> {"Lo","L"}; +lookup(101210) -> {"Lo","L"}; +lookup(101211) -> {"Lo","L"}; +lookup(101212) -> {"Lo","L"}; +lookup(101213) -> {"Lo","L"}; +lookup(101214) -> {"Lo","L"}; +lookup(101215) -> {"Lo","L"}; +lookup(101216) -> {"Lo","L"}; +lookup(101217) -> {"Lo","L"}; +lookup(101218) -> {"Lo","L"}; +lookup(101219) -> {"Lo","L"}; +lookup(101220) -> {"Lo","L"}; +lookup(101221) -> {"Lo","L"}; +lookup(101222) -> {"Lo","L"}; +lookup(101223) -> {"Lo","L"}; +lookup(101224) -> {"Lo","L"}; +lookup(101225) -> {"Lo","L"}; +lookup(101226) -> {"Lo","L"}; +lookup(101227) -> {"Lo","L"}; +lookup(101228) -> {"Lo","L"}; +lookup(101229) -> {"Lo","L"}; +lookup(101230) -> {"Lo","L"}; +lookup(101231) -> {"Lo","L"}; +lookup(101232) -> {"Lo","L"}; +lookup(101233) -> {"Lo","L"}; +lookup(101234) -> {"Lo","L"}; +lookup(101235) -> {"Lo","L"}; +lookup(101236) -> {"Lo","L"}; +lookup(101237) -> {"Lo","L"}; +lookup(101238) -> {"Lo","L"}; +lookup(101239) -> {"Lo","L"}; +lookup(101240) -> {"Lo","L"}; +lookup(101241) -> {"Lo","L"}; +lookup(101242) -> {"Lo","L"}; +lookup(101243) -> {"Lo","L"}; +lookup(101244) -> {"Lo","L"}; +lookup(101245) -> {"Lo","L"}; +lookup(101246) -> {"Lo","L"}; +lookup(101247) -> {"Lo","L"}; +lookup(101248) -> {"Lo","L"}; +lookup(101249) -> {"Lo","L"}; +lookup(101250) -> {"Lo","L"}; +lookup(101251) -> {"Lo","L"}; +lookup(101252) -> {"Lo","L"}; +lookup(101253) -> {"Lo","L"}; +lookup(101254) -> {"Lo","L"}; +lookup(101255) -> {"Lo","L"}; +lookup(101256) -> {"Lo","L"}; +lookup(101257) -> {"Lo","L"}; +lookup(101258) -> {"Lo","L"}; +lookup(101259) -> {"Lo","L"}; +lookup(101260) -> {"Lo","L"}; +lookup(101261) -> {"Lo","L"}; +lookup(101262) -> {"Lo","L"}; +lookup(101263) -> {"Lo","L"}; +lookup(101264) -> {"Lo","L"}; +lookup(101265) -> {"Lo","L"}; +lookup(101266) -> {"Lo","L"}; +lookup(101267) -> {"Lo","L"}; +lookup(101268) -> {"Lo","L"}; +lookup(101269) -> {"Lo","L"}; +lookup(101270) -> {"Lo","L"}; +lookup(101271) -> {"Lo","L"}; +lookup(101272) -> {"Lo","L"}; +lookup(101273) -> {"Lo","L"}; +lookup(101274) -> {"Lo","L"}; +lookup(101275) -> {"Lo","L"}; +lookup(101276) -> {"Lo","L"}; +lookup(101277) -> {"Lo","L"}; +lookup(101278) -> {"Lo","L"}; +lookup(101279) -> {"Lo","L"}; +lookup(101280) -> {"Lo","L"}; +lookup(101281) -> {"Lo","L"}; +lookup(101282) -> {"Lo","L"}; +lookup(101283) -> {"Lo","L"}; +lookup(101284) -> {"Lo","L"}; +lookup(101285) -> {"Lo","L"}; +lookup(101286) -> {"Lo","L"}; +lookup(101287) -> {"Lo","L"}; +lookup(101288) -> {"Lo","L"}; +lookup(101289) -> {"Lo","L"}; +lookup(101290) -> {"Lo","L"}; +lookup(101291) -> {"Lo","L"}; +lookup(101292) -> {"Lo","L"}; +lookup(101293) -> {"Lo","L"}; +lookup(101294) -> {"Lo","L"}; +lookup(101295) -> {"Lo","L"}; +lookup(101296) -> {"Lo","L"}; +lookup(101297) -> {"Lo","L"}; +lookup(101298) -> {"Lo","L"}; +lookup(101299) -> {"Lo","L"}; +lookup(101300) -> {"Lo","L"}; +lookup(101301) -> {"Lo","L"}; +lookup(101302) -> {"Lo","L"}; +lookup(101303) -> {"Lo","L"}; +lookup(101304) -> {"Lo","L"}; +lookup(101305) -> {"Lo","L"}; +lookup(101306) -> {"Lo","L"}; +lookup(101307) -> {"Lo","L"}; +lookup(101308) -> {"Lo","L"}; +lookup(101309) -> {"Lo","L"}; +lookup(101310) -> {"Lo","L"}; +lookup(101311) -> {"Lo","L"}; +lookup(101312) -> {"Lo","L"}; +lookup(101313) -> {"Lo","L"}; +lookup(101314) -> {"Lo","L"}; +lookup(101315) -> {"Lo","L"}; +lookup(101316) -> {"Lo","L"}; +lookup(101317) -> {"Lo","L"}; +lookup(101318) -> {"Lo","L"}; +lookup(101319) -> {"Lo","L"}; +lookup(101320) -> {"Lo","L"}; +lookup(101321) -> {"Lo","L"}; +lookup(101322) -> {"Lo","L"}; +lookup(101323) -> {"Lo","L"}; +lookup(101324) -> {"Lo","L"}; +lookup(101325) -> {"Lo","L"}; +lookup(101326) -> {"Lo","L"}; +lookup(101327) -> {"Lo","L"}; +lookup(101328) -> {"Lo","L"}; +lookup(101329) -> {"Lo","L"}; +lookup(101330) -> {"Lo","L"}; +lookup(101331) -> {"Lo","L"}; +lookup(101332) -> {"Lo","L"}; +lookup(101333) -> {"Lo","L"}; +lookup(101334) -> {"Lo","L"}; +lookup(101335) -> {"Lo","L"}; +lookup(101336) -> {"Lo","L"}; +lookup(101337) -> {"Lo","L"}; +lookup(101338) -> {"Lo","L"}; +lookup(101339) -> {"Lo","L"}; +lookup(101340) -> {"Lo","L"}; +lookup(101341) -> {"Lo","L"}; +lookup(101342) -> {"Lo","L"}; +lookup(101343) -> {"Lo","L"}; +lookup(101344) -> {"Lo","L"}; +lookup(101345) -> {"Lo","L"}; +lookup(101346) -> {"Lo","L"}; +lookup(101347) -> {"Lo","L"}; +lookup(101348) -> {"Lo","L"}; +lookup(101349) -> {"Lo","L"}; +lookup(101350) -> {"Lo","L"}; +lookup(101351) -> {"Lo","L"}; +lookup(101352) -> {"Lo","L"}; +lookup(101353) -> {"Lo","L"}; +lookup(101354) -> {"Lo","L"}; +lookup(101355) -> {"Lo","L"}; +lookup(101356) -> {"Lo","L"}; +lookup(101357) -> {"Lo","L"}; +lookup(101358) -> {"Lo","L"}; +lookup(101359) -> {"Lo","L"}; +lookup(101360) -> {"Lo","L"}; +lookup(101361) -> {"Lo","L"}; +lookup(101362) -> {"Lo","L"}; +lookup(101363) -> {"Lo","L"}; +lookup(101364) -> {"Lo","L"}; +lookup(101365) -> {"Lo","L"}; +lookup(101366) -> {"Lo","L"}; +lookup(101367) -> {"Lo","L"}; +lookup(101368) -> {"Lo","L"}; +lookup(101369) -> {"Lo","L"}; +lookup(101370) -> {"Lo","L"}; +lookup(101371) -> {"Lo","L"}; +lookup(101372) -> {"Lo","L"}; +lookup(101373) -> {"Lo","L"}; +lookup(101374) -> {"Lo","L"}; +lookup(101375) -> {"Lo","L"}; +lookup(101376) -> {"Lo","L"}; +lookup(101377) -> {"Lo","L"}; +lookup(101378) -> {"Lo","L"}; +lookup(101379) -> {"Lo","L"}; +lookup(101380) -> {"Lo","L"}; +lookup(101381) -> {"Lo","L"}; +lookup(101382) -> {"Lo","L"}; +lookup(101383) -> {"Lo","L"}; +lookup(101384) -> {"Lo","L"}; +lookup(101385) -> {"Lo","L"}; +lookup(101386) -> {"Lo","L"}; +lookup(101387) -> {"Lo","L"}; +lookup(101388) -> {"Lo","L"}; +lookup(101389) -> {"Lo","L"}; +lookup(101390) -> {"Lo","L"}; +lookup(101391) -> {"Lo","L"}; +lookup(101392) -> {"Lo","L"}; +lookup(101393) -> {"Lo","L"}; +lookup(101394) -> {"Lo","L"}; +lookup(101395) -> {"Lo","L"}; +lookup(101396) -> {"Lo","L"}; +lookup(101397) -> {"Lo","L"}; +lookup(101398) -> {"Lo","L"}; +lookup(101399) -> {"Lo","L"}; +lookup(101400) -> {"Lo","L"}; +lookup(101401) -> {"Lo","L"}; +lookup(101402) -> {"Lo","L"}; +lookup(101403) -> {"Lo","L"}; +lookup(101404) -> {"Lo","L"}; +lookup(101405) -> {"Lo","L"}; +lookup(101406) -> {"Lo","L"}; +lookup(101407) -> {"Lo","L"}; +lookup(101408) -> {"Lo","L"}; +lookup(101409) -> {"Lo","L"}; +lookup(101410) -> {"Lo","L"}; +lookup(101411) -> {"Lo","L"}; +lookup(101412) -> {"Lo","L"}; +lookup(101413) -> {"Lo","L"}; +lookup(101414) -> {"Lo","L"}; +lookup(101415) -> {"Lo","L"}; +lookup(101416) -> {"Lo","L"}; +lookup(101417) -> {"Lo","L"}; +lookup(101418) -> {"Lo","L"}; +lookup(101419) -> {"Lo","L"}; +lookup(101420) -> {"Lo","L"}; +lookup(101421) -> {"Lo","L"}; +lookup(101422) -> {"Lo","L"}; +lookup(101423) -> {"Lo","L"}; +lookup(101424) -> {"Lo","L"}; +lookup(101425) -> {"Lo","L"}; +lookup(101426) -> {"Lo","L"}; +lookup(101427) -> {"Lo","L"}; +lookup(101428) -> {"Lo","L"}; +lookup(101429) -> {"Lo","L"}; +lookup(101430) -> {"Lo","L"}; +lookup(101431) -> {"Lo","L"}; +lookup(101432) -> {"Lo","L"}; +lookup(101433) -> {"Lo","L"}; +lookup(101434) -> {"Lo","L"}; +lookup(101435) -> {"Lo","L"}; +lookup(101436) -> {"Lo","L"}; +lookup(101437) -> {"Lo","L"}; +lookup(101438) -> {"Lo","L"}; +lookup(101439) -> {"Lo","L"}; +lookup(101440) -> {"Lo","L"}; +lookup(101441) -> {"Lo","L"}; +lookup(101442) -> {"Lo","L"}; +lookup(101443) -> {"Lo","L"}; +lookup(101444) -> {"Lo","L"}; +lookup(101445) -> {"Lo","L"}; +lookup(101446) -> {"Lo","L"}; +lookup(101447) -> {"Lo","L"}; +lookup(101448) -> {"Lo","L"}; +lookup(101449) -> {"Lo","L"}; +lookup(101450) -> {"Lo","L"}; +lookup(101451) -> {"Lo","L"}; +lookup(101452) -> {"Lo","L"}; +lookup(101453) -> {"Lo","L"}; +lookup(101454) -> {"Lo","L"}; +lookup(101455) -> {"Lo","L"}; +lookup(101456) -> {"Lo","L"}; +lookup(101457) -> {"Lo","L"}; +lookup(101458) -> {"Lo","L"}; +lookup(101459) -> {"Lo","L"}; +lookup(101460) -> {"Lo","L"}; +lookup(101461) -> {"Lo","L"}; +lookup(101462) -> {"Lo","L"}; +lookup(101463) -> {"Lo","L"}; +lookup(101464) -> {"Lo","L"}; +lookup(101465) -> {"Lo","L"}; +lookup(101466) -> {"Lo","L"}; +lookup(101467) -> {"Lo","L"}; +lookup(101468) -> {"Lo","L"}; +lookup(101469) -> {"Lo","L"}; +lookup(101470) -> {"Lo","L"}; +lookup(101471) -> {"Lo","L"}; +lookup(101472) -> {"Lo","L"}; +lookup(101473) -> {"Lo","L"}; +lookup(101474) -> {"Lo","L"}; +lookup(101475) -> {"Lo","L"}; +lookup(101476) -> {"Lo","L"}; +lookup(101477) -> {"Lo","L"}; +lookup(101478) -> {"Lo","L"}; +lookup(101479) -> {"Lo","L"}; +lookup(101480) -> {"Lo","L"}; +lookup(101481) -> {"Lo","L"}; +lookup(101482) -> {"Lo","L"}; +lookup(101483) -> {"Lo","L"}; +lookup(101484) -> {"Lo","L"}; +lookup(101485) -> {"Lo","L"}; +lookup(101486) -> {"Lo","L"}; +lookup(101487) -> {"Lo","L"}; +lookup(101488) -> {"Lo","L"}; +lookup(101489) -> {"Lo","L"}; +lookup(101490) -> {"Lo","L"}; +lookup(101491) -> {"Lo","L"}; +lookup(101492) -> {"Lo","L"}; +lookup(101493) -> {"Lo","L"}; +lookup(101494) -> {"Lo","L"}; +lookup(101495) -> {"Lo","L"}; +lookup(101496) -> {"Lo","L"}; +lookup(101497) -> {"Lo","L"}; +lookup(101498) -> {"Lo","L"}; +lookup(101499) -> {"Lo","L"}; +lookup(101500) -> {"Lo","L"}; +lookup(101501) -> {"Lo","L"}; +lookup(101502) -> {"Lo","L"}; +lookup(101503) -> {"Lo","L"}; +lookup(101504) -> {"Lo","L"}; +lookup(101505) -> {"Lo","L"}; +lookup(101506) -> {"Lo","L"}; +lookup(101507) -> {"Lo","L"}; +lookup(101508) -> {"Lo","L"}; +lookup(101509) -> {"Lo","L"}; +lookup(101510) -> {"Lo","L"}; +lookup(101511) -> {"Lo","L"}; +lookup(101512) -> {"Lo","L"}; +lookup(101513) -> {"Lo","L"}; +lookup(101514) -> {"Lo","L"}; +lookup(101515) -> {"Lo","L"}; +lookup(101516) -> {"Lo","L"}; +lookup(101517) -> {"Lo","L"}; +lookup(101518) -> {"Lo","L"}; +lookup(101519) -> {"Lo","L"}; +lookup(101520) -> {"Lo","L"}; +lookup(101521) -> {"Lo","L"}; +lookup(101522) -> {"Lo","L"}; +lookup(101523) -> {"Lo","L"}; +lookup(101524) -> {"Lo","L"}; +lookup(101525) -> {"Lo","L"}; +lookup(101526) -> {"Lo","L"}; +lookup(101527) -> {"Lo","L"}; +lookup(101528) -> {"Lo","L"}; +lookup(101529) -> {"Lo","L"}; +lookup(101530) -> {"Lo","L"}; +lookup(101531) -> {"Lo","L"}; +lookup(101532) -> {"Lo","L"}; +lookup(101533) -> {"Lo","L"}; +lookup(101534) -> {"Lo","L"}; +lookup(101535) -> {"Lo","L"}; +lookup(101536) -> {"Lo","L"}; +lookup(101537) -> {"Lo","L"}; +lookup(101538) -> {"Lo","L"}; +lookup(101539) -> {"Lo","L"}; +lookup(101540) -> {"Lo","L"}; +lookup(101541) -> {"Lo","L"}; +lookup(101542) -> {"Lo","L"}; +lookup(101543) -> {"Lo","L"}; +lookup(101544) -> {"Lo","L"}; +lookup(101545) -> {"Lo","L"}; +lookup(101546) -> {"Lo","L"}; +lookup(101547) -> {"Lo","L"}; +lookup(101548) -> {"Lo","L"}; +lookup(101549) -> {"Lo","L"}; +lookup(101550) -> {"Lo","L"}; +lookup(101551) -> {"Lo","L"}; +lookup(101552) -> {"Lo","L"}; +lookup(101553) -> {"Lo","L"}; +lookup(101554) -> {"Lo","L"}; +lookup(101555) -> {"Lo","L"}; +lookup(101556) -> {"Lo","L"}; +lookup(101557) -> {"Lo","L"}; +lookup(101558) -> {"Lo","L"}; +lookup(101559) -> {"Lo","L"}; +lookup(101560) -> {"Lo","L"}; +lookup(101561) -> {"Lo","L"}; +lookup(101562) -> {"Lo","L"}; +lookup(101563) -> {"Lo","L"}; +lookup(101564) -> {"Lo","L"}; +lookup(101565) -> {"Lo","L"}; +lookup(101566) -> {"Lo","L"}; +lookup(101567) -> {"Lo","L"}; +lookup(101568) -> {"Lo","L"}; +lookup(101569) -> {"Lo","L"}; +lookup(101570) -> {"Lo","L"}; +lookup(101571) -> {"Lo","L"}; +lookup(101572) -> {"Lo","L"}; +lookup(101573) -> {"Lo","L"}; +lookup(101574) -> {"Lo","L"}; +lookup(101575) -> {"Lo","L"}; +lookup(101576) -> {"Lo","L"}; +lookup(101577) -> {"Lo","L"}; +lookup(101578) -> {"Lo","L"}; +lookup(101579) -> {"Lo","L"}; +lookup(101580) -> {"Lo","L"}; +lookup(101581) -> {"Lo","L"}; +lookup(101582) -> {"Lo","L"}; +lookup(101583) -> {"Lo","L"}; +lookup(101584) -> {"Lo","L"}; +lookup(101585) -> {"Lo","L"}; +lookup(101586) -> {"Lo","L"}; +lookup(101587) -> {"Lo","L"}; +lookup(101588) -> {"Lo","L"}; +lookup(101589) -> {"Lo","L"}; +lookup(101632) -> {"Lo","L"}; +lookup(101640) -> {"Lo","L"}; +lookup(110592) -> {"Lo","L"}; +lookup(110593) -> {"Lo","L"}; +lookup(110594) -> {"Lo","L"}; +lookup(110595) -> {"Lo","L"}; +lookup(110596) -> {"Lo","L"}; +lookup(110597) -> {"Lo","L"}; +lookup(110598) -> {"Lo","L"}; +lookup(110599) -> {"Lo","L"}; +lookup(110600) -> {"Lo","L"}; +lookup(110601) -> {"Lo","L"}; +lookup(110602) -> {"Lo","L"}; +lookup(110603) -> {"Lo","L"}; +lookup(110604) -> {"Lo","L"}; +lookup(110605) -> {"Lo","L"}; +lookup(110606) -> {"Lo","L"}; +lookup(110607) -> {"Lo","L"}; +lookup(110608) -> {"Lo","L"}; +lookup(110609) -> {"Lo","L"}; +lookup(110610) -> {"Lo","L"}; +lookup(110611) -> {"Lo","L"}; +lookup(110612) -> {"Lo","L"}; +lookup(110613) -> {"Lo","L"}; +lookup(110614) -> {"Lo","L"}; +lookup(110615) -> {"Lo","L"}; +lookup(110616) -> {"Lo","L"}; +lookup(110617) -> {"Lo","L"}; +lookup(110618) -> {"Lo","L"}; +lookup(110619) -> {"Lo","L"}; +lookup(110620) -> {"Lo","L"}; +lookup(110621) -> {"Lo","L"}; +lookup(110622) -> {"Lo","L"}; +lookup(110623) -> {"Lo","L"}; +lookup(110624) -> {"Lo","L"}; +lookup(110625) -> {"Lo","L"}; +lookup(110626) -> {"Lo","L"}; +lookup(110627) -> {"Lo","L"}; +lookup(110628) -> {"Lo","L"}; +lookup(110629) -> {"Lo","L"}; +lookup(110630) -> {"Lo","L"}; +lookup(110631) -> {"Lo","L"}; +lookup(110632) -> {"Lo","L"}; +lookup(110633) -> {"Lo","L"}; +lookup(110634) -> {"Lo","L"}; +lookup(110635) -> {"Lo","L"}; +lookup(110636) -> {"Lo","L"}; +lookup(110637) -> {"Lo","L"}; +lookup(110638) -> {"Lo","L"}; +lookup(110639) -> {"Lo","L"}; +lookup(110640) -> {"Lo","L"}; +lookup(110641) -> {"Lo","L"}; +lookup(110642) -> {"Lo","L"}; +lookup(110643) -> {"Lo","L"}; +lookup(110644) -> {"Lo","L"}; +lookup(110645) -> {"Lo","L"}; +lookup(110646) -> {"Lo","L"}; +lookup(110647) -> {"Lo","L"}; +lookup(110648) -> {"Lo","L"}; +lookup(110649) -> {"Lo","L"}; +lookup(110650) -> {"Lo","L"}; +lookup(110651) -> {"Lo","L"}; +lookup(110652) -> {"Lo","L"}; +lookup(110653) -> {"Lo","L"}; +lookup(110654) -> {"Lo","L"}; +lookup(110655) -> {"Lo","L"}; +lookup(110656) -> {"Lo","L"}; +lookup(110657) -> {"Lo","L"}; +lookup(110658) -> {"Lo","L"}; +lookup(110659) -> {"Lo","L"}; +lookup(110660) -> {"Lo","L"}; +lookup(110661) -> {"Lo","L"}; +lookup(110662) -> {"Lo","L"}; +lookup(110663) -> {"Lo","L"}; +lookup(110664) -> {"Lo","L"}; +lookup(110665) -> {"Lo","L"}; +lookup(110666) -> {"Lo","L"}; +lookup(110667) -> {"Lo","L"}; +lookup(110668) -> {"Lo","L"}; +lookup(110669) -> {"Lo","L"}; +lookup(110670) -> {"Lo","L"}; +lookup(110671) -> {"Lo","L"}; +lookup(110672) -> {"Lo","L"}; +lookup(110673) -> {"Lo","L"}; +lookup(110674) -> {"Lo","L"}; +lookup(110675) -> {"Lo","L"}; +lookup(110676) -> {"Lo","L"}; +lookup(110677) -> {"Lo","L"}; +lookup(110678) -> {"Lo","L"}; +lookup(110679) -> {"Lo","L"}; +lookup(110680) -> {"Lo","L"}; +lookup(110681) -> {"Lo","L"}; +lookup(110682) -> {"Lo","L"}; +lookup(110683) -> {"Lo","L"}; +lookup(110684) -> {"Lo","L"}; +lookup(110685) -> {"Lo","L"}; +lookup(110686) -> {"Lo","L"}; +lookup(110687) -> {"Lo","L"}; +lookup(110688) -> {"Lo","L"}; +lookup(110689) -> {"Lo","L"}; +lookup(110690) -> {"Lo","L"}; +lookup(110691) -> {"Lo","L"}; +lookup(110692) -> {"Lo","L"}; +lookup(110693) -> {"Lo","L"}; +lookup(110694) -> {"Lo","L"}; +lookup(110695) -> {"Lo","L"}; +lookup(110696) -> {"Lo","L"}; +lookup(110697) -> {"Lo","L"}; +lookup(110698) -> {"Lo","L"}; +lookup(110699) -> {"Lo","L"}; +lookup(110700) -> {"Lo","L"}; +lookup(110701) -> {"Lo","L"}; +lookup(110702) -> {"Lo","L"}; +lookup(110703) -> {"Lo","L"}; +lookup(110704) -> {"Lo","L"}; +lookup(110705) -> {"Lo","L"}; +lookup(110706) -> {"Lo","L"}; +lookup(110707) -> {"Lo","L"}; +lookup(110708) -> {"Lo","L"}; +lookup(110709) -> {"Lo","L"}; +lookup(110710) -> {"Lo","L"}; +lookup(110711) -> {"Lo","L"}; +lookup(110712) -> {"Lo","L"}; +lookup(110713) -> {"Lo","L"}; +lookup(110714) -> {"Lo","L"}; +lookup(110715) -> {"Lo","L"}; +lookup(110716) -> {"Lo","L"}; +lookup(110717) -> {"Lo","L"}; +lookup(110718) -> {"Lo","L"}; +lookup(110719) -> {"Lo","L"}; +lookup(110720) -> {"Lo","L"}; +lookup(110721) -> {"Lo","L"}; +lookup(110722) -> {"Lo","L"}; +lookup(110723) -> {"Lo","L"}; +lookup(110724) -> {"Lo","L"}; +lookup(110725) -> {"Lo","L"}; +lookup(110726) -> {"Lo","L"}; +lookup(110727) -> {"Lo","L"}; +lookup(110728) -> {"Lo","L"}; +lookup(110729) -> {"Lo","L"}; +lookup(110730) -> {"Lo","L"}; +lookup(110731) -> {"Lo","L"}; +lookup(110732) -> {"Lo","L"}; +lookup(110733) -> {"Lo","L"}; +lookup(110734) -> {"Lo","L"}; +lookup(110735) -> {"Lo","L"}; +lookup(110736) -> {"Lo","L"}; +lookup(110737) -> {"Lo","L"}; +lookup(110738) -> {"Lo","L"}; +lookup(110739) -> {"Lo","L"}; +lookup(110740) -> {"Lo","L"}; +lookup(110741) -> {"Lo","L"}; +lookup(110742) -> {"Lo","L"}; +lookup(110743) -> {"Lo","L"}; +lookup(110744) -> {"Lo","L"}; +lookup(110745) -> {"Lo","L"}; +lookup(110746) -> {"Lo","L"}; +lookup(110747) -> {"Lo","L"}; +lookup(110748) -> {"Lo","L"}; +lookup(110749) -> {"Lo","L"}; +lookup(110750) -> {"Lo","L"}; +lookup(110751) -> {"Lo","L"}; +lookup(110752) -> {"Lo","L"}; +lookup(110753) -> {"Lo","L"}; +lookup(110754) -> {"Lo","L"}; +lookup(110755) -> {"Lo","L"}; +lookup(110756) -> {"Lo","L"}; +lookup(110757) -> {"Lo","L"}; +lookup(110758) -> {"Lo","L"}; +lookup(110759) -> {"Lo","L"}; +lookup(110760) -> {"Lo","L"}; +lookup(110761) -> {"Lo","L"}; +lookup(110762) -> {"Lo","L"}; +lookup(110763) -> {"Lo","L"}; +lookup(110764) -> {"Lo","L"}; +lookup(110765) -> {"Lo","L"}; +lookup(110766) -> {"Lo","L"}; +lookup(110767) -> {"Lo","L"}; +lookup(110768) -> {"Lo","L"}; +lookup(110769) -> {"Lo","L"}; +lookup(110770) -> {"Lo","L"}; +lookup(110771) -> {"Lo","L"}; +lookup(110772) -> {"Lo","L"}; +lookup(110773) -> {"Lo","L"}; +lookup(110774) -> {"Lo","L"}; +lookup(110775) -> {"Lo","L"}; +lookup(110776) -> {"Lo","L"}; +lookup(110777) -> {"Lo","L"}; +lookup(110778) -> {"Lo","L"}; +lookup(110779) -> {"Lo","L"}; +lookup(110780) -> {"Lo","L"}; +lookup(110781) -> {"Lo","L"}; +lookup(110782) -> {"Lo","L"}; +lookup(110783) -> {"Lo","L"}; +lookup(110784) -> {"Lo","L"}; +lookup(110785) -> {"Lo","L"}; +lookup(110786) -> {"Lo","L"}; +lookup(110787) -> {"Lo","L"}; +lookup(110788) -> {"Lo","L"}; +lookup(110789) -> {"Lo","L"}; +lookup(110790) -> {"Lo","L"}; +lookup(110791) -> {"Lo","L"}; +lookup(110792) -> {"Lo","L"}; +lookup(110793) -> {"Lo","L"}; +lookup(110794) -> {"Lo","L"}; +lookup(110795) -> {"Lo","L"}; +lookup(110796) -> {"Lo","L"}; +lookup(110797) -> {"Lo","L"}; +lookup(110798) -> {"Lo","L"}; +lookup(110799) -> {"Lo","L"}; +lookup(110800) -> {"Lo","L"}; +lookup(110801) -> {"Lo","L"}; +lookup(110802) -> {"Lo","L"}; +lookup(110803) -> {"Lo","L"}; +lookup(110804) -> {"Lo","L"}; +lookup(110805) -> {"Lo","L"}; +lookup(110806) -> {"Lo","L"}; +lookup(110807) -> {"Lo","L"}; +lookup(110808) -> {"Lo","L"}; +lookup(110809) -> {"Lo","L"}; +lookup(110810) -> {"Lo","L"}; +lookup(110811) -> {"Lo","L"}; +lookup(110812) -> {"Lo","L"}; +lookup(110813) -> {"Lo","L"}; +lookup(110814) -> {"Lo","L"}; +lookup(110815) -> {"Lo","L"}; +lookup(110816) -> {"Lo","L"}; +lookup(110817) -> {"Lo","L"}; +lookup(110818) -> {"Lo","L"}; +lookup(110819) -> {"Lo","L"}; +lookup(110820) -> {"Lo","L"}; +lookup(110821) -> {"Lo","L"}; +lookup(110822) -> {"Lo","L"}; +lookup(110823) -> {"Lo","L"}; +lookup(110824) -> {"Lo","L"}; +lookup(110825) -> {"Lo","L"}; +lookup(110826) -> {"Lo","L"}; +lookup(110827) -> {"Lo","L"}; +lookup(110828) -> {"Lo","L"}; +lookup(110829) -> {"Lo","L"}; +lookup(110830) -> {"Lo","L"}; +lookup(110831) -> {"Lo","L"}; +lookup(110832) -> {"Lo","L"}; +lookup(110833) -> {"Lo","L"}; +lookup(110834) -> {"Lo","L"}; +lookup(110835) -> {"Lo","L"}; +lookup(110836) -> {"Lo","L"}; +lookup(110837) -> {"Lo","L"}; +lookup(110838) -> {"Lo","L"}; +lookup(110839) -> {"Lo","L"}; +lookup(110840) -> {"Lo","L"}; +lookup(110841) -> {"Lo","L"}; +lookup(110842) -> {"Lo","L"}; +lookup(110843) -> {"Lo","L"}; +lookup(110844) -> {"Lo","L"}; +lookup(110845) -> {"Lo","L"}; +lookup(110846) -> {"Lo","L"}; +lookup(110847) -> {"Lo","L"}; +lookup(110848) -> {"Lo","L"}; +lookup(110849) -> {"Lo","L"}; +lookup(110850) -> {"Lo","L"}; +lookup(110851) -> {"Lo","L"}; +lookup(110852) -> {"Lo","L"}; +lookup(110853) -> {"Lo","L"}; +lookup(110854) -> {"Lo","L"}; +lookup(110855) -> {"Lo","L"}; +lookup(110856) -> {"Lo","L"}; +lookup(110857) -> {"Lo","L"}; +lookup(110858) -> {"Lo","L"}; +lookup(110859) -> {"Lo","L"}; +lookup(110860) -> {"Lo","L"}; +lookup(110861) -> {"Lo","L"}; +lookup(110862) -> {"Lo","L"}; +lookup(110863) -> {"Lo","L"}; +lookup(110864) -> {"Lo","L"}; +lookup(110865) -> {"Lo","L"}; +lookup(110866) -> {"Lo","L"}; +lookup(110867) -> {"Lo","L"}; +lookup(110868) -> {"Lo","L"}; +lookup(110869) -> {"Lo","L"}; +lookup(110870) -> {"Lo","L"}; +lookup(110871) -> {"Lo","L"}; +lookup(110872) -> {"Lo","L"}; +lookup(110873) -> {"Lo","L"}; +lookup(110874) -> {"Lo","L"}; +lookup(110875) -> {"Lo","L"}; +lookup(110876) -> {"Lo","L"}; +lookup(110877) -> {"Lo","L"}; +lookup(110878) -> {"Lo","L"}; +lookup(110928) -> {"Lo","L"}; +lookup(110929) -> {"Lo","L"}; +lookup(110930) -> {"Lo","L"}; +lookup(110948) -> {"Lo","L"}; +lookup(110949) -> {"Lo","L"}; +lookup(110950) -> {"Lo","L"}; +lookup(110951) -> {"Lo","L"}; +lookup(110960) -> {"Lo","L"}; +lookup(110961) -> {"Lo","L"}; +lookup(110962) -> {"Lo","L"}; +lookup(110963) -> {"Lo","L"}; +lookup(110964) -> {"Lo","L"}; +lookup(110965) -> {"Lo","L"}; +lookup(110966) -> {"Lo","L"}; +lookup(110967) -> {"Lo","L"}; +lookup(110968) -> {"Lo","L"}; +lookup(110969) -> {"Lo","L"}; +lookup(110970) -> {"Lo","L"}; +lookup(110971) -> {"Lo","L"}; +lookup(110972) -> {"Lo","L"}; +lookup(110973) -> {"Lo","L"}; +lookup(110974) -> {"Lo","L"}; +lookup(110975) -> {"Lo","L"}; +lookup(110976) -> {"Lo","L"}; +lookup(110977) -> {"Lo","L"}; +lookup(110978) -> {"Lo","L"}; +lookup(110979) -> {"Lo","L"}; +lookup(110980) -> {"Lo","L"}; +lookup(110981) -> {"Lo","L"}; +lookup(110982) -> {"Lo","L"}; +lookup(110983) -> {"Lo","L"}; +lookup(110984) -> {"Lo","L"}; +lookup(110985) -> {"Lo","L"}; +lookup(110986) -> {"Lo","L"}; +lookup(110987) -> {"Lo","L"}; +lookup(110988) -> {"Lo","L"}; +lookup(110989) -> {"Lo","L"}; +lookup(110990) -> {"Lo","L"}; +lookup(110991) -> {"Lo","L"}; +lookup(110992) -> {"Lo","L"}; +lookup(110993) -> {"Lo","L"}; +lookup(110994) -> {"Lo","L"}; +lookup(110995) -> {"Lo","L"}; +lookup(110996) -> {"Lo","L"}; +lookup(110997) -> {"Lo","L"}; +lookup(110998) -> {"Lo","L"}; +lookup(110999) -> {"Lo","L"}; +lookup(111000) -> {"Lo","L"}; +lookup(111001) -> {"Lo","L"}; +lookup(111002) -> {"Lo","L"}; +lookup(111003) -> {"Lo","L"}; +lookup(111004) -> {"Lo","L"}; +lookup(111005) -> {"Lo","L"}; +lookup(111006) -> {"Lo","L"}; +lookup(111007) -> {"Lo","L"}; +lookup(111008) -> {"Lo","L"}; +lookup(111009) -> {"Lo","L"}; +lookup(111010) -> {"Lo","L"}; +lookup(111011) -> {"Lo","L"}; +lookup(111012) -> {"Lo","L"}; +lookup(111013) -> {"Lo","L"}; +lookup(111014) -> {"Lo","L"}; +lookup(111015) -> {"Lo","L"}; +lookup(111016) -> {"Lo","L"}; +lookup(111017) -> {"Lo","L"}; +lookup(111018) -> {"Lo","L"}; +lookup(111019) -> {"Lo","L"}; +lookup(111020) -> {"Lo","L"}; +lookup(111021) -> {"Lo","L"}; +lookup(111022) -> {"Lo","L"}; +lookup(111023) -> {"Lo","L"}; +lookup(111024) -> {"Lo","L"}; +lookup(111025) -> {"Lo","L"}; +lookup(111026) -> {"Lo","L"}; +lookup(111027) -> {"Lo","L"}; +lookup(111028) -> {"Lo","L"}; +lookup(111029) -> {"Lo","L"}; +lookup(111030) -> {"Lo","L"}; +lookup(111031) -> {"Lo","L"}; +lookup(111032) -> {"Lo","L"}; +lookup(111033) -> {"Lo","L"}; +lookup(111034) -> {"Lo","L"}; +lookup(111035) -> {"Lo","L"}; +lookup(111036) -> {"Lo","L"}; +lookup(111037) -> {"Lo","L"}; +lookup(111038) -> {"Lo","L"}; +lookup(111039) -> {"Lo","L"}; +lookup(111040) -> {"Lo","L"}; +lookup(111041) -> {"Lo","L"}; +lookup(111042) -> {"Lo","L"}; +lookup(111043) -> {"Lo","L"}; +lookup(111044) -> {"Lo","L"}; +lookup(111045) -> {"Lo","L"}; +lookup(111046) -> {"Lo","L"}; +lookup(111047) -> {"Lo","L"}; +lookup(111048) -> {"Lo","L"}; +lookup(111049) -> {"Lo","L"}; +lookup(111050) -> {"Lo","L"}; +lookup(111051) -> {"Lo","L"}; +lookup(111052) -> {"Lo","L"}; +lookup(111053) -> {"Lo","L"}; +lookup(111054) -> {"Lo","L"}; +lookup(111055) -> {"Lo","L"}; +lookup(111056) -> {"Lo","L"}; +lookup(111057) -> {"Lo","L"}; +lookup(111058) -> {"Lo","L"}; +lookup(111059) -> {"Lo","L"}; +lookup(111060) -> {"Lo","L"}; +lookup(111061) -> {"Lo","L"}; +lookup(111062) -> {"Lo","L"}; +lookup(111063) -> {"Lo","L"}; +lookup(111064) -> {"Lo","L"}; +lookup(111065) -> {"Lo","L"}; +lookup(111066) -> {"Lo","L"}; +lookup(111067) -> {"Lo","L"}; +lookup(111068) -> {"Lo","L"}; +lookup(111069) -> {"Lo","L"}; +lookup(111070) -> {"Lo","L"}; +lookup(111071) -> {"Lo","L"}; +lookup(111072) -> {"Lo","L"}; +lookup(111073) -> {"Lo","L"}; +lookup(111074) -> {"Lo","L"}; +lookup(111075) -> {"Lo","L"}; +lookup(111076) -> {"Lo","L"}; +lookup(111077) -> {"Lo","L"}; +lookup(111078) -> {"Lo","L"}; +lookup(111079) -> {"Lo","L"}; +lookup(111080) -> {"Lo","L"}; +lookup(111081) -> {"Lo","L"}; +lookup(111082) -> {"Lo","L"}; +lookup(111083) -> {"Lo","L"}; +lookup(111084) -> {"Lo","L"}; +lookup(111085) -> {"Lo","L"}; +lookup(111086) -> {"Lo","L"}; +lookup(111087) -> {"Lo","L"}; +lookup(111088) -> {"Lo","L"}; +lookup(111089) -> {"Lo","L"}; +lookup(111090) -> {"Lo","L"}; +lookup(111091) -> {"Lo","L"}; +lookup(111092) -> {"Lo","L"}; +lookup(111093) -> {"Lo","L"}; +lookup(111094) -> {"Lo","L"}; +lookup(111095) -> {"Lo","L"}; +lookup(111096) -> {"Lo","L"}; +lookup(111097) -> {"Lo","L"}; +lookup(111098) -> {"Lo","L"}; +lookup(111099) -> {"Lo","L"}; +lookup(111100) -> {"Lo","L"}; +lookup(111101) -> {"Lo","L"}; +lookup(111102) -> {"Lo","L"}; +lookup(111103) -> {"Lo","L"}; +lookup(111104) -> {"Lo","L"}; +lookup(111105) -> {"Lo","L"}; +lookup(111106) -> {"Lo","L"}; +lookup(111107) -> {"Lo","L"}; +lookup(111108) -> {"Lo","L"}; +lookup(111109) -> {"Lo","L"}; +lookup(111110) -> {"Lo","L"}; +lookup(111111) -> {"Lo","L"}; +lookup(111112) -> {"Lo","L"}; +lookup(111113) -> {"Lo","L"}; +lookup(111114) -> {"Lo","L"}; +lookup(111115) -> {"Lo","L"}; +lookup(111116) -> {"Lo","L"}; +lookup(111117) -> {"Lo","L"}; +lookup(111118) -> {"Lo","L"}; +lookup(111119) -> {"Lo","L"}; +lookup(111120) -> {"Lo","L"}; +lookup(111121) -> {"Lo","L"}; +lookup(111122) -> {"Lo","L"}; +lookup(111123) -> {"Lo","L"}; +lookup(111124) -> {"Lo","L"}; +lookup(111125) -> {"Lo","L"}; +lookup(111126) -> {"Lo","L"}; +lookup(111127) -> {"Lo","L"}; +lookup(111128) -> {"Lo","L"}; +lookup(111129) -> {"Lo","L"}; +lookup(111130) -> {"Lo","L"}; +lookup(111131) -> {"Lo","L"}; +lookup(111132) -> {"Lo","L"}; +lookup(111133) -> {"Lo","L"}; +lookup(111134) -> {"Lo","L"}; +lookup(111135) -> {"Lo","L"}; +lookup(111136) -> {"Lo","L"}; +lookup(111137) -> {"Lo","L"}; +lookup(111138) -> {"Lo","L"}; +lookup(111139) -> {"Lo","L"}; +lookup(111140) -> {"Lo","L"}; +lookup(111141) -> {"Lo","L"}; +lookup(111142) -> {"Lo","L"}; +lookup(111143) -> {"Lo","L"}; +lookup(111144) -> {"Lo","L"}; +lookup(111145) -> {"Lo","L"}; +lookup(111146) -> {"Lo","L"}; +lookup(111147) -> {"Lo","L"}; +lookup(111148) -> {"Lo","L"}; +lookup(111149) -> {"Lo","L"}; +lookup(111150) -> {"Lo","L"}; +lookup(111151) -> {"Lo","L"}; +lookup(111152) -> {"Lo","L"}; +lookup(111153) -> {"Lo","L"}; +lookup(111154) -> {"Lo","L"}; +lookup(111155) -> {"Lo","L"}; +lookup(111156) -> {"Lo","L"}; +lookup(111157) -> {"Lo","L"}; +lookup(111158) -> {"Lo","L"}; +lookup(111159) -> {"Lo","L"}; +lookup(111160) -> {"Lo","L"}; +lookup(111161) -> {"Lo","L"}; +lookup(111162) -> {"Lo","L"}; +lookup(111163) -> {"Lo","L"}; +lookup(111164) -> {"Lo","L"}; +lookup(111165) -> {"Lo","L"}; +lookup(111166) -> {"Lo","L"}; +lookup(111167) -> {"Lo","L"}; +lookup(111168) -> {"Lo","L"}; +lookup(111169) -> {"Lo","L"}; +lookup(111170) -> {"Lo","L"}; +lookup(111171) -> {"Lo","L"}; +lookup(111172) -> {"Lo","L"}; +lookup(111173) -> {"Lo","L"}; +lookup(111174) -> {"Lo","L"}; +lookup(111175) -> {"Lo","L"}; +lookup(111176) -> {"Lo","L"}; +lookup(111177) -> {"Lo","L"}; +lookup(111178) -> {"Lo","L"}; +lookup(111179) -> {"Lo","L"}; +lookup(111180) -> {"Lo","L"}; +lookup(111181) -> {"Lo","L"}; +lookup(111182) -> {"Lo","L"}; +lookup(111183) -> {"Lo","L"}; +lookup(111184) -> {"Lo","L"}; +lookup(111185) -> {"Lo","L"}; +lookup(111186) -> {"Lo","L"}; +lookup(111187) -> {"Lo","L"}; +lookup(111188) -> {"Lo","L"}; +lookup(111189) -> {"Lo","L"}; +lookup(111190) -> {"Lo","L"}; +lookup(111191) -> {"Lo","L"}; +lookup(111192) -> {"Lo","L"}; +lookup(111193) -> {"Lo","L"}; +lookup(111194) -> {"Lo","L"}; +lookup(111195) -> {"Lo","L"}; +lookup(111196) -> {"Lo","L"}; +lookup(111197) -> {"Lo","L"}; +lookup(111198) -> {"Lo","L"}; +lookup(111199) -> {"Lo","L"}; +lookup(111200) -> {"Lo","L"}; +lookup(111201) -> {"Lo","L"}; +lookup(111202) -> {"Lo","L"}; +lookup(111203) -> {"Lo","L"}; +lookup(111204) -> {"Lo","L"}; +lookup(111205) -> {"Lo","L"}; +lookup(111206) -> {"Lo","L"}; +lookup(111207) -> {"Lo","L"}; +lookup(111208) -> {"Lo","L"}; +lookup(111209) -> {"Lo","L"}; +lookup(111210) -> {"Lo","L"}; +lookup(111211) -> {"Lo","L"}; +lookup(111212) -> {"Lo","L"}; +lookup(111213) -> {"Lo","L"}; +lookup(111214) -> {"Lo","L"}; +lookup(111215) -> {"Lo","L"}; +lookup(111216) -> {"Lo","L"}; +lookup(111217) -> {"Lo","L"}; +lookup(111218) -> {"Lo","L"}; +lookup(111219) -> {"Lo","L"}; +lookup(111220) -> {"Lo","L"}; +lookup(111221) -> {"Lo","L"}; +lookup(111222) -> {"Lo","L"}; +lookup(111223) -> {"Lo","L"}; +lookup(111224) -> {"Lo","L"}; +lookup(111225) -> {"Lo","L"}; +lookup(111226) -> {"Lo","L"}; +lookup(111227) -> {"Lo","L"}; +lookup(111228) -> {"Lo","L"}; +lookup(111229) -> {"Lo","L"}; +lookup(111230) -> {"Lo","L"}; +lookup(111231) -> {"Lo","L"}; +lookup(111232) -> {"Lo","L"}; +lookup(111233) -> {"Lo","L"}; +lookup(111234) -> {"Lo","L"}; +lookup(111235) -> {"Lo","L"}; +lookup(111236) -> {"Lo","L"}; +lookup(111237) -> {"Lo","L"}; +lookup(111238) -> {"Lo","L"}; +lookup(111239) -> {"Lo","L"}; +lookup(111240) -> {"Lo","L"}; +lookup(111241) -> {"Lo","L"}; +lookup(111242) -> {"Lo","L"}; +lookup(111243) -> {"Lo","L"}; +lookup(111244) -> {"Lo","L"}; +lookup(111245) -> {"Lo","L"}; +lookup(111246) -> {"Lo","L"}; +lookup(111247) -> {"Lo","L"}; +lookup(111248) -> {"Lo","L"}; +lookup(111249) -> {"Lo","L"}; +lookup(111250) -> {"Lo","L"}; +lookup(111251) -> {"Lo","L"}; +lookup(111252) -> {"Lo","L"}; +lookup(111253) -> {"Lo","L"}; +lookup(111254) -> {"Lo","L"}; +lookup(111255) -> {"Lo","L"}; +lookup(111256) -> {"Lo","L"}; +lookup(111257) -> {"Lo","L"}; +lookup(111258) -> {"Lo","L"}; +lookup(111259) -> {"Lo","L"}; +lookup(111260) -> {"Lo","L"}; +lookup(111261) -> {"Lo","L"}; +lookup(111262) -> {"Lo","L"}; +lookup(111263) -> {"Lo","L"}; +lookup(111264) -> {"Lo","L"}; +lookup(111265) -> {"Lo","L"}; +lookup(111266) -> {"Lo","L"}; +lookup(111267) -> {"Lo","L"}; +lookup(111268) -> {"Lo","L"}; +lookup(111269) -> {"Lo","L"}; +lookup(111270) -> {"Lo","L"}; +lookup(111271) -> {"Lo","L"}; +lookup(111272) -> {"Lo","L"}; +lookup(111273) -> {"Lo","L"}; +lookup(111274) -> {"Lo","L"}; +lookup(111275) -> {"Lo","L"}; +lookup(111276) -> {"Lo","L"}; +lookup(111277) -> {"Lo","L"}; +lookup(111278) -> {"Lo","L"}; +lookup(111279) -> {"Lo","L"}; +lookup(111280) -> {"Lo","L"}; +lookup(111281) -> {"Lo","L"}; +lookup(111282) -> {"Lo","L"}; +lookup(111283) -> {"Lo","L"}; +lookup(111284) -> {"Lo","L"}; +lookup(111285) -> {"Lo","L"}; +lookup(111286) -> {"Lo","L"}; +lookup(111287) -> {"Lo","L"}; +lookup(111288) -> {"Lo","L"}; +lookup(111289) -> {"Lo","L"}; +lookup(111290) -> {"Lo","L"}; +lookup(111291) -> {"Lo","L"}; +lookup(111292) -> {"Lo","L"}; +lookup(111293) -> {"Lo","L"}; +lookup(111294) -> {"Lo","L"}; +lookup(111295) -> {"Lo","L"}; +lookup(111296) -> {"Lo","L"}; +lookup(111297) -> {"Lo","L"}; +lookup(111298) -> {"Lo","L"}; +lookup(111299) -> {"Lo","L"}; +lookup(111300) -> {"Lo","L"}; +lookup(111301) -> {"Lo","L"}; +lookup(111302) -> {"Lo","L"}; +lookup(111303) -> {"Lo","L"}; +lookup(111304) -> {"Lo","L"}; +lookup(111305) -> {"Lo","L"}; +lookup(111306) -> {"Lo","L"}; +lookup(111307) -> {"Lo","L"}; +lookup(111308) -> {"Lo","L"}; +lookup(111309) -> {"Lo","L"}; +lookup(111310) -> {"Lo","L"}; +lookup(111311) -> {"Lo","L"}; +lookup(111312) -> {"Lo","L"}; +lookup(111313) -> {"Lo","L"}; +lookup(111314) -> {"Lo","L"}; +lookup(111315) -> {"Lo","L"}; +lookup(111316) -> {"Lo","L"}; +lookup(111317) -> {"Lo","L"}; +lookup(111318) -> {"Lo","L"}; +lookup(111319) -> {"Lo","L"}; +lookup(111320) -> {"Lo","L"}; +lookup(111321) -> {"Lo","L"}; +lookup(111322) -> {"Lo","L"}; +lookup(111323) -> {"Lo","L"}; +lookup(111324) -> {"Lo","L"}; +lookup(111325) -> {"Lo","L"}; +lookup(111326) -> {"Lo","L"}; +lookup(111327) -> {"Lo","L"}; +lookup(111328) -> {"Lo","L"}; +lookup(111329) -> {"Lo","L"}; +lookup(111330) -> {"Lo","L"}; +lookup(111331) -> {"Lo","L"}; +lookup(111332) -> {"Lo","L"}; +lookup(111333) -> {"Lo","L"}; +lookup(111334) -> {"Lo","L"}; +lookup(111335) -> {"Lo","L"}; +lookup(111336) -> {"Lo","L"}; +lookup(111337) -> {"Lo","L"}; +lookup(111338) -> {"Lo","L"}; +lookup(111339) -> {"Lo","L"}; +lookup(111340) -> {"Lo","L"}; +lookup(111341) -> {"Lo","L"}; +lookup(111342) -> {"Lo","L"}; +lookup(111343) -> {"Lo","L"}; +lookup(111344) -> {"Lo","L"}; +lookup(111345) -> {"Lo","L"}; +lookup(111346) -> {"Lo","L"}; +lookup(111347) -> {"Lo","L"}; +lookup(111348) -> {"Lo","L"}; +lookup(111349) -> {"Lo","L"}; +lookup(111350) -> {"Lo","L"}; +lookup(111351) -> {"Lo","L"}; +lookup(111352) -> {"Lo","L"}; +lookup(111353) -> {"Lo","L"}; +lookup(111354) -> {"Lo","L"}; +lookup(111355) -> {"Lo","L"}; +lookup(113664) -> {"Lo","L"}; +lookup(113665) -> {"Lo","L"}; +lookup(113666) -> {"Lo","L"}; +lookup(113667) -> {"Lo","L"}; +lookup(113668) -> {"Lo","L"}; +lookup(113669) -> {"Lo","L"}; +lookup(113670) -> {"Lo","L"}; +lookup(113671) -> {"Lo","L"}; +lookup(113672) -> {"Lo","L"}; +lookup(113673) -> {"Lo","L"}; +lookup(113674) -> {"Lo","L"}; +lookup(113675) -> {"Lo","L"}; +lookup(113676) -> {"Lo","L"}; +lookup(113677) -> {"Lo","L"}; +lookup(113678) -> {"Lo","L"}; +lookup(113679) -> {"Lo","L"}; +lookup(113680) -> {"Lo","L"}; +lookup(113681) -> {"Lo","L"}; +lookup(113682) -> {"Lo","L"}; +lookup(113683) -> {"Lo","L"}; +lookup(113684) -> {"Lo","L"}; +lookup(113685) -> {"Lo","L"}; +lookup(113686) -> {"Lo","L"}; +lookup(113687) -> {"Lo","L"}; +lookup(113688) -> {"Lo","L"}; +lookup(113689) -> {"Lo","L"}; +lookup(113690) -> {"Lo","L"}; +lookup(113691) -> {"Lo","L"}; +lookup(113692) -> {"Lo","L"}; +lookup(113693) -> {"Lo","L"}; +lookup(113694) -> {"Lo","L"}; +lookup(113695) -> {"Lo","L"}; +lookup(113696) -> {"Lo","L"}; +lookup(113697) -> {"Lo","L"}; +lookup(113698) -> {"Lo","L"}; +lookup(113699) -> {"Lo","L"}; +lookup(113700) -> {"Lo","L"}; +lookup(113701) -> {"Lo","L"}; +lookup(113702) -> {"Lo","L"}; +lookup(113703) -> {"Lo","L"}; +lookup(113704) -> {"Lo","L"}; +lookup(113705) -> {"Lo","L"}; +lookup(113706) -> {"Lo","L"}; +lookup(113707) -> {"Lo","L"}; +lookup(113708) -> {"Lo","L"}; +lookup(113709) -> {"Lo","L"}; +lookup(113710) -> {"Lo","L"}; +lookup(113711) -> {"Lo","L"}; +lookup(113712) -> {"Lo","L"}; +lookup(113713) -> {"Lo","L"}; +lookup(113714) -> {"Lo","L"}; +lookup(113715) -> {"Lo","L"}; +lookup(113716) -> {"Lo","L"}; +lookup(113717) -> {"Lo","L"}; +lookup(113718) -> {"Lo","L"}; +lookup(113719) -> {"Lo","L"}; +lookup(113720) -> {"Lo","L"}; +lookup(113721) -> {"Lo","L"}; +lookup(113722) -> {"Lo","L"}; +lookup(113723) -> {"Lo","L"}; +lookup(113724) -> {"Lo","L"}; +lookup(113725) -> {"Lo","L"}; +lookup(113726) -> {"Lo","L"}; +lookup(113727) -> {"Lo","L"}; +lookup(113728) -> {"Lo","L"}; +lookup(113729) -> {"Lo","L"}; +lookup(113730) -> {"Lo","L"}; +lookup(113731) -> {"Lo","L"}; +lookup(113732) -> {"Lo","L"}; +lookup(113733) -> {"Lo","L"}; +lookup(113734) -> {"Lo","L"}; +lookup(113735) -> {"Lo","L"}; +lookup(113736) -> {"Lo","L"}; +lookup(113737) -> {"Lo","L"}; +lookup(113738) -> {"Lo","L"}; +lookup(113739) -> {"Lo","L"}; +lookup(113740) -> {"Lo","L"}; +lookup(113741) -> {"Lo","L"}; +lookup(113742) -> {"Lo","L"}; +lookup(113743) -> {"Lo","L"}; +lookup(113744) -> {"Lo","L"}; +lookup(113745) -> {"Lo","L"}; +lookup(113746) -> {"Lo","L"}; +lookup(113747) -> {"Lo","L"}; +lookup(113748) -> {"Lo","L"}; +lookup(113749) -> {"Lo","L"}; +lookup(113750) -> {"Lo","L"}; +lookup(113751) -> {"Lo","L"}; +lookup(113752) -> {"Lo","L"}; +lookup(113753) -> {"Lo","L"}; +lookup(113754) -> {"Lo","L"}; +lookup(113755) -> {"Lo","L"}; +lookup(113756) -> {"Lo","L"}; +lookup(113757) -> {"Lo","L"}; +lookup(113758) -> {"Lo","L"}; +lookup(113759) -> {"Lo","L"}; +lookup(113760) -> {"Lo","L"}; +lookup(113761) -> {"Lo","L"}; +lookup(113762) -> {"Lo","L"}; +lookup(113763) -> {"Lo","L"}; +lookup(113764) -> {"Lo","L"}; +lookup(113765) -> {"Lo","L"}; +lookup(113766) -> {"Lo","L"}; +lookup(113767) -> {"Lo","L"}; +lookup(113768) -> {"Lo","L"}; +lookup(113769) -> {"Lo","L"}; +lookup(113770) -> {"Lo","L"}; +lookup(113776) -> {"Lo","L"}; +lookup(113777) -> {"Lo","L"}; +lookup(113778) -> {"Lo","L"}; +lookup(113779) -> {"Lo","L"}; +lookup(113780) -> {"Lo","L"}; +lookup(113781) -> {"Lo","L"}; +lookup(113782) -> {"Lo","L"}; +lookup(113783) -> {"Lo","L"}; +lookup(113784) -> {"Lo","L"}; +lookup(113785) -> {"Lo","L"}; +lookup(113786) -> {"Lo","L"}; +lookup(113787) -> {"Lo","L"}; +lookup(113788) -> {"Lo","L"}; +lookup(113792) -> {"Lo","L"}; +lookup(113793) -> {"Lo","L"}; +lookup(113794) -> {"Lo","L"}; +lookup(113795) -> {"Lo","L"}; +lookup(113796) -> {"Lo","L"}; +lookup(113797) -> {"Lo","L"}; +lookup(113798) -> {"Lo","L"}; +lookup(113799) -> {"Lo","L"}; +lookup(113800) -> {"Lo","L"}; +lookup(113808) -> {"Lo","L"}; +lookup(113809) -> {"Lo","L"}; +lookup(113810) -> {"Lo","L"}; +lookup(113811) -> {"Lo","L"}; +lookup(113812) -> {"Lo","L"}; +lookup(113813) -> {"Lo","L"}; +lookup(113814) -> {"Lo","L"}; +lookup(113815) -> {"Lo","L"}; +lookup(113816) -> {"Lo","L"}; +lookup(113817) -> {"Lo","L"}; +lookup(113820) -> {"So","L"}; +lookup(113821) -> {"Mn","NSM"}; +lookup(113822) -> {"Mn","NSM"}; +lookup(113823) -> {"Po","L"}; +lookup(113824) -> {"Cf","BN"}; +lookup(113825) -> {"Cf","BN"}; +lookup(113826) -> {"Cf","BN"}; +lookup(113827) -> {"Cf","BN"}; +lookup(118784) -> {"So","L"}; +lookup(118785) -> {"So","L"}; +lookup(118786) -> {"So","L"}; +lookup(118787) -> {"So","L"}; +lookup(118788) -> {"So","L"}; +lookup(118789) -> {"So","L"}; +lookup(118790) -> {"So","L"}; +lookup(118791) -> {"So","L"}; +lookup(118792) -> {"So","L"}; +lookup(118793) -> {"So","L"}; +lookup(118794) -> {"So","L"}; +lookup(118795) -> {"So","L"}; +lookup(118796) -> {"So","L"}; +lookup(118797) -> {"So","L"}; +lookup(118798) -> {"So","L"}; +lookup(118799) -> {"So","L"}; +lookup(118800) -> {"So","L"}; +lookup(118801) -> {"So","L"}; +lookup(118802) -> {"So","L"}; +lookup(118803) -> {"So","L"}; +lookup(118804) -> {"So","L"}; +lookup(118805) -> {"So","L"}; +lookup(118806) -> {"So","L"}; +lookup(118807) -> {"So","L"}; +lookup(118808) -> {"So","L"}; +lookup(118809) -> {"So","L"}; +lookup(118810) -> {"So","L"}; +lookup(118811) -> {"So","L"}; +lookup(118812) -> {"So","L"}; +lookup(118813) -> {"So","L"}; +lookup(118814) -> {"So","L"}; +lookup(118815) -> {"So","L"}; +lookup(118816) -> {"So","L"}; +lookup(118817) -> {"So","L"}; +lookup(118818) -> {"So","L"}; +lookup(118819) -> {"So","L"}; +lookup(118820) -> {"So","L"}; +lookup(118821) -> {"So","L"}; +lookup(118822) -> {"So","L"}; +lookup(118823) -> {"So","L"}; +lookup(118824) -> {"So","L"}; +lookup(118825) -> {"So","L"}; +lookup(118826) -> {"So","L"}; +lookup(118827) -> {"So","L"}; +lookup(118828) -> {"So","L"}; +lookup(118829) -> {"So","L"}; +lookup(118830) -> {"So","L"}; +lookup(118831) -> {"So","L"}; +lookup(118832) -> {"So","L"}; +lookup(118833) -> {"So","L"}; +lookup(118834) -> {"So","L"}; +lookup(118835) -> {"So","L"}; +lookup(118836) -> {"So","L"}; +lookup(118837) -> {"So","L"}; +lookup(118838) -> {"So","L"}; +lookup(118839) -> {"So","L"}; +lookup(118840) -> {"So","L"}; +lookup(118841) -> {"So","L"}; +lookup(118842) -> {"So","L"}; +lookup(118843) -> {"So","L"}; +lookup(118844) -> {"So","L"}; +lookup(118845) -> {"So","L"}; +lookup(118846) -> {"So","L"}; +lookup(118847) -> {"So","L"}; +lookup(118848) -> {"So","L"}; +lookup(118849) -> {"So","L"}; +lookup(118850) -> {"So","L"}; +lookup(118851) -> {"So","L"}; +lookup(118852) -> {"So","L"}; +lookup(118853) -> {"So","L"}; +lookup(118854) -> {"So","L"}; +lookup(118855) -> {"So","L"}; +lookup(118856) -> {"So","L"}; +lookup(118857) -> {"So","L"}; +lookup(118858) -> {"So","L"}; +lookup(118859) -> {"So","L"}; +lookup(118860) -> {"So","L"}; +lookup(118861) -> {"So","L"}; +lookup(118862) -> {"So","L"}; +lookup(118863) -> {"So","L"}; +lookup(118864) -> {"So","L"}; +lookup(118865) -> {"So","L"}; +lookup(118866) -> {"So","L"}; +lookup(118867) -> {"So","L"}; +lookup(118868) -> {"So","L"}; +lookup(118869) -> {"So","L"}; +lookup(118870) -> {"So","L"}; +lookup(118871) -> {"So","L"}; +lookup(118872) -> {"So","L"}; +lookup(118873) -> {"So","L"}; +lookup(118874) -> {"So","L"}; +lookup(118875) -> {"So","L"}; +lookup(118876) -> {"So","L"}; +lookup(118877) -> {"So","L"}; +lookup(118878) -> {"So","L"}; +lookup(118879) -> {"So","L"}; +lookup(118880) -> {"So","L"}; +lookup(118881) -> {"So","L"}; +lookup(118882) -> {"So","L"}; +lookup(118883) -> {"So","L"}; +lookup(118884) -> {"So","L"}; +lookup(118885) -> {"So","L"}; +lookup(118886) -> {"So","L"}; +lookup(118887) -> {"So","L"}; +lookup(118888) -> {"So","L"}; +lookup(118889) -> {"So","L"}; +lookup(118890) -> {"So","L"}; +lookup(118891) -> {"So","L"}; +lookup(118892) -> {"So","L"}; +lookup(118893) -> {"So","L"}; +lookup(118894) -> {"So","L"}; +lookup(118895) -> {"So","L"}; +lookup(118896) -> {"So","L"}; +lookup(118897) -> {"So","L"}; +lookup(118898) -> {"So","L"}; +lookup(118899) -> {"So","L"}; +lookup(118900) -> {"So","L"}; +lookup(118901) -> {"So","L"}; +lookup(118902) -> {"So","L"}; +lookup(118903) -> {"So","L"}; +lookup(118904) -> {"So","L"}; +lookup(118905) -> {"So","L"}; +lookup(118906) -> {"So","L"}; +lookup(118907) -> {"So","L"}; +lookup(118908) -> {"So","L"}; +lookup(118909) -> {"So","L"}; +lookup(118910) -> {"So","L"}; +lookup(118911) -> {"So","L"}; +lookup(118912) -> {"So","L"}; +lookup(118913) -> {"So","L"}; +lookup(118914) -> {"So","L"}; +lookup(118915) -> {"So","L"}; +lookup(118916) -> {"So","L"}; +lookup(118917) -> {"So","L"}; +lookup(118918) -> {"So","L"}; +lookup(118919) -> {"So","L"}; +lookup(118920) -> {"So","L"}; +lookup(118921) -> {"So","L"}; +lookup(118922) -> {"So","L"}; +lookup(118923) -> {"So","L"}; +lookup(118924) -> {"So","L"}; +lookup(118925) -> {"So","L"}; +lookup(118926) -> {"So","L"}; +lookup(118927) -> {"So","L"}; +lookup(118928) -> {"So","L"}; +lookup(118929) -> {"So","L"}; +lookup(118930) -> {"So","L"}; +lookup(118931) -> {"So","L"}; +lookup(118932) -> {"So","L"}; +lookup(118933) -> {"So","L"}; +lookup(118934) -> {"So","L"}; +lookup(118935) -> {"So","L"}; +lookup(118936) -> {"So","L"}; +lookup(118937) -> {"So","L"}; +lookup(118938) -> {"So","L"}; +lookup(118939) -> {"So","L"}; +lookup(118940) -> {"So","L"}; +lookup(118941) -> {"So","L"}; +lookup(118942) -> {"So","L"}; +lookup(118943) -> {"So","L"}; +lookup(118944) -> {"So","L"}; +lookup(118945) -> {"So","L"}; +lookup(118946) -> {"So","L"}; +lookup(118947) -> {"So","L"}; +lookup(118948) -> {"So","L"}; +lookup(118949) -> {"So","L"}; +lookup(118950) -> {"So","L"}; +lookup(118951) -> {"So","L"}; +lookup(118952) -> {"So","L"}; +lookup(118953) -> {"So","L"}; +lookup(118954) -> {"So","L"}; +lookup(118955) -> {"So","L"}; +lookup(118956) -> {"So","L"}; +lookup(118957) -> {"So","L"}; +lookup(118958) -> {"So","L"}; +lookup(118959) -> {"So","L"}; +lookup(118960) -> {"So","L"}; +lookup(118961) -> {"So","L"}; +lookup(118962) -> {"So","L"}; +lookup(118963) -> {"So","L"}; +lookup(118964) -> {"So","L"}; +lookup(118965) -> {"So","L"}; +lookup(118966) -> {"So","L"}; +lookup(118967) -> {"So","L"}; +lookup(118968) -> {"So","L"}; +lookup(118969) -> {"So","L"}; +lookup(118970) -> {"So","L"}; +lookup(118971) -> {"So","L"}; +lookup(118972) -> {"So","L"}; +lookup(118973) -> {"So","L"}; +lookup(118974) -> {"So","L"}; +lookup(118975) -> {"So","L"}; +lookup(118976) -> {"So","L"}; +lookup(118977) -> {"So","L"}; +lookup(118978) -> {"So","L"}; +lookup(118979) -> {"So","L"}; +lookup(118980) -> {"So","L"}; +lookup(118981) -> {"So","L"}; +lookup(118982) -> {"So","L"}; +lookup(118983) -> {"So","L"}; +lookup(118984) -> {"So","L"}; +lookup(118985) -> {"So","L"}; +lookup(118986) -> {"So","L"}; +lookup(118987) -> {"So","L"}; +lookup(118988) -> {"So","L"}; +lookup(118989) -> {"So","L"}; +lookup(118990) -> {"So","L"}; +lookup(118991) -> {"So","L"}; +lookup(118992) -> {"So","L"}; +lookup(118993) -> {"So","L"}; +lookup(118994) -> {"So","L"}; +lookup(118995) -> {"So","L"}; +lookup(118996) -> {"So","L"}; +lookup(118997) -> {"So","L"}; +lookup(118998) -> {"So","L"}; +lookup(118999) -> {"So","L"}; +lookup(119000) -> {"So","L"}; +lookup(119001) -> {"So","L"}; +lookup(119002) -> {"So","L"}; +lookup(119003) -> {"So","L"}; +lookup(119004) -> {"So","L"}; +lookup(119005) -> {"So","L"}; +lookup(119006) -> {"So","L"}; +lookup(119007) -> {"So","L"}; +lookup(119008) -> {"So","L"}; +lookup(119009) -> {"So","L"}; +lookup(119010) -> {"So","L"}; +lookup(119011) -> {"So","L"}; +lookup(119012) -> {"So","L"}; +lookup(119013) -> {"So","L"}; +lookup(119014) -> {"So","L"}; +lookup(119015) -> {"So","L"}; +lookup(119016) -> {"So","L"}; +lookup(119017) -> {"So","L"}; +lookup(119018) -> {"So","L"}; +lookup(119019) -> {"So","L"}; +lookup(119020) -> {"So","L"}; +lookup(119021) -> {"So","L"}; +lookup(119022) -> {"So","L"}; +lookup(119023) -> {"So","L"}; +lookup(119024) -> {"So","L"}; +lookup(119025) -> {"So","L"}; +lookup(119026) -> {"So","L"}; +lookup(119027) -> {"So","L"}; +lookup(119028) -> {"So","L"}; +lookup(119029) -> {"So","L"}; +lookup(119040) -> {"So","L"}; +lookup(119041) -> {"So","L"}; +lookup(119042) -> {"So","L"}; +lookup(119043) -> {"So","L"}; +lookup(119044) -> {"So","L"}; +lookup(119045) -> {"So","L"}; +lookup(119046) -> {"So","L"}; +lookup(119047) -> {"So","L"}; +lookup(119048) -> {"So","L"}; +lookup(119049) -> {"So","L"}; +lookup(119050) -> {"So","L"}; +lookup(119051) -> {"So","L"}; +lookup(119052) -> {"So","L"}; +lookup(119053) -> {"So","L"}; +lookup(119054) -> {"So","L"}; +lookup(119055) -> {"So","L"}; +lookup(119056) -> {"So","L"}; +lookup(119057) -> {"So","L"}; +lookup(119058) -> {"So","L"}; +lookup(119059) -> {"So","L"}; +lookup(119060) -> {"So","L"}; +lookup(119061) -> {"So","L"}; +lookup(119062) -> {"So","L"}; +lookup(119063) -> {"So","L"}; +lookup(119064) -> {"So","L"}; +lookup(119065) -> {"So","L"}; +lookup(119066) -> {"So","L"}; +lookup(119067) -> {"So","L"}; +lookup(119068) -> {"So","L"}; +lookup(119069) -> {"So","L"}; +lookup(119070) -> {"So","L"}; +lookup(119071) -> {"So","L"}; +lookup(119072) -> {"So","L"}; +lookup(119073) -> {"So","L"}; +lookup(119074) -> {"So","L"}; +lookup(119075) -> {"So","L"}; +lookup(119076) -> {"So","L"}; +lookup(119077) -> {"So","L"}; +lookup(119078) -> {"So","L"}; +lookup(119081) -> {"So","L"}; +lookup(119082) -> {"So","L"}; +lookup(119083) -> {"So","L"}; +lookup(119084) -> {"So","L"}; +lookup(119085) -> {"So","L"}; +lookup(119086) -> {"So","L"}; +lookup(119087) -> {"So","L"}; +lookup(119088) -> {"So","L"}; +lookup(119089) -> {"So","L"}; +lookup(119090) -> {"So","L"}; +lookup(119091) -> {"So","L"}; +lookup(119092) -> {"So","L"}; +lookup(119093) -> {"So","L"}; +lookup(119094) -> {"So","L"}; +lookup(119095) -> {"So","L"}; +lookup(119096) -> {"So","L"}; +lookup(119097) -> {"So","L"}; +lookup(119098) -> {"So","L"}; +lookup(119099) -> {"So","L"}; +lookup(119100) -> {"So","L"}; +lookup(119101) -> {"So","L"}; +lookup(119102) -> {"So","L"}; +lookup(119103) -> {"So","L"}; +lookup(119104) -> {"So","L"}; +lookup(119105) -> {"So","L"}; +lookup(119106) -> {"So","L"}; +lookup(119107) -> {"So","L"}; +lookup(119108) -> {"So","L"}; +lookup(119109) -> {"So","L"}; +lookup(119110) -> {"So","L"}; +lookup(119111) -> {"So","L"}; +lookup(119112) -> {"So","L"}; +lookup(119113) -> {"So","L"}; +lookup(119114) -> {"So","L"}; +lookup(119115) -> {"So","L"}; +lookup(119116) -> {"So","L"}; +lookup(119117) -> {"So","L"}; +lookup(119118) -> {"So","L"}; +lookup(119119) -> {"So","L"}; +lookup(119120) -> {"So","L"}; +lookup(119121) -> {"So","L"}; +lookup(119122) -> {"So","L"}; +lookup(119123) -> {"So","L"}; +lookup(119124) -> {"So","L"}; +lookup(119125) -> {"So","L"}; +lookup(119126) -> {"So","L"}; +lookup(119127) -> {"So","L"}; +lookup(119128) -> {"So","L"}; +lookup(119129) -> {"So","L"}; +lookup(119130) -> {"So","L"}; +lookup(119131) -> {"So","L"}; +lookup(119132) -> {"So","L"}; +lookup(119133) -> {"So","L"}; +lookup(119134) -> {"So","L"}; +lookup(119135) -> {"So","L"}; +lookup(119136) -> {"So","L"}; +lookup(119137) -> {"So","L"}; +lookup(119138) -> {"So","L"}; +lookup(119139) -> {"So","L"}; +lookup(119140) -> {"So","L"}; +lookup(119141) -> {"Mc","L"}; +lookup(119142) -> {"Mc","L"}; +lookup(119143) -> {"Mn","NSM"}; +lookup(119144) -> {"Mn","NSM"}; +lookup(119145) -> {"Mn","NSM"}; +lookup(119146) -> {"So","L"}; +lookup(119147) -> {"So","L"}; +lookup(119148) -> {"So","L"}; +lookup(119149) -> {"Mc","L"}; +lookup(119150) -> {"Mc","L"}; +lookup(119151) -> {"Mc","L"}; +lookup(119152) -> {"Mc","L"}; +lookup(119153) -> {"Mc","L"}; +lookup(119154) -> {"Mc","L"}; +lookup(119155) -> {"Cf","BN"}; +lookup(119156) -> {"Cf","BN"}; +lookup(119157) -> {"Cf","BN"}; +lookup(119158) -> {"Cf","BN"}; +lookup(119159) -> {"Cf","BN"}; +lookup(119160) -> {"Cf","BN"}; +lookup(119161) -> {"Cf","BN"}; +lookup(119162) -> {"Cf","BN"}; +lookup(119163) -> {"Mn","NSM"}; +lookup(119164) -> {"Mn","NSM"}; +lookup(119165) -> {"Mn","NSM"}; +lookup(119166) -> {"Mn","NSM"}; +lookup(119167) -> {"Mn","NSM"}; +lookup(119168) -> {"Mn","NSM"}; +lookup(119169) -> {"Mn","NSM"}; +lookup(119170) -> {"Mn","NSM"}; +lookup(119171) -> {"So","L"}; +lookup(119172) -> {"So","L"}; +lookup(119173) -> {"Mn","NSM"}; +lookup(119174) -> {"Mn","NSM"}; +lookup(119175) -> {"Mn","NSM"}; +lookup(119176) -> {"Mn","NSM"}; +lookup(119177) -> {"Mn","NSM"}; +lookup(119178) -> {"Mn","NSM"}; +lookup(119179) -> {"Mn","NSM"}; +lookup(119180) -> {"So","L"}; +lookup(119181) -> {"So","L"}; +lookup(119182) -> {"So","L"}; +lookup(119183) -> {"So","L"}; +lookup(119184) -> {"So","L"}; +lookup(119185) -> {"So","L"}; +lookup(119186) -> {"So","L"}; +lookup(119187) -> {"So","L"}; +lookup(119188) -> {"So","L"}; +lookup(119189) -> {"So","L"}; +lookup(119190) -> {"So","L"}; +lookup(119191) -> {"So","L"}; +lookup(119192) -> {"So","L"}; +lookup(119193) -> {"So","L"}; +lookup(119194) -> {"So","L"}; +lookup(119195) -> {"So","L"}; +lookup(119196) -> {"So","L"}; +lookup(119197) -> {"So","L"}; +lookup(119198) -> {"So","L"}; +lookup(119199) -> {"So","L"}; +lookup(119200) -> {"So","L"}; +lookup(119201) -> {"So","L"}; +lookup(119202) -> {"So","L"}; +lookup(119203) -> {"So","L"}; +lookup(119204) -> {"So","L"}; +lookup(119205) -> {"So","L"}; +lookup(119206) -> {"So","L"}; +lookup(119207) -> {"So","L"}; +lookup(119208) -> {"So","L"}; +lookup(119209) -> {"So","L"}; +lookup(119210) -> {"Mn","NSM"}; +lookup(119211) -> {"Mn","NSM"}; +lookup(119212) -> {"Mn","NSM"}; +lookup(119213) -> {"Mn","NSM"}; +lookup(119214) -> {"So","L"}; +lookup(119215) -> {"So","L"}; +lookup(119216) -> {"So","L"}; +lookup(119217) -> {"So","L"}; +lookup(119218) -> {"So","L"}; +lookup(119219) -> {"So","L"}; +lookup(119220) -> {"So","L"}; +lookup(119221) -> {"So","L"}; +lookup(119222) -> {"So","L"}; +lookup(119223) -> {"So","L"}; +lookup(119224) -> {"So","L"}; +lookup(119225) -> {"So","L"}; +lookup(119226) -> {"So","L"}; +lookup(119227) -> {"So","L"}; +lookup(119228) -> {"So","L"}; +lookup(119229) -> {"So","L"}; +lookup(119230) -> {"So","L"}; +lookup(119231) -> {"So","L"}; +lookup(119232) -> {"So","L"}; +lookup(119233) -> {"So","L"}; +lookup(119234) -> {"So","L"}; +lookup(119235) -> {"So","L"}; +lookup(119236) -> {"So","L"}; +lookup(119237) -> {"So","L"}; +lookup(119238) -> {"So","L"}; +lookup(119239) -> {"So","L"}; +lookup(119240) -> {"So","L"}; +lookup(119241) -> {"So","L"}; +lookup(119242) -> {"So","L"}; +lookup(119243) -> {"So","L"}; +lookup(119244) -> {"So","L"}; +lookup(119245) -> {"So","L"}; +lookup(119246) -> {"So","L"}; +lookup(119247) -> {"So","L"}; +lookup(119248) -> {"So","L"}; +lookup(119249) -> {"So","L"}; +lookup(119250) -> {"So","L"}; +lookup(119251) -> {"So","L"}; +lookup(119252) -> {"So","L"}; +lookup(119253) -> {"So","L"}; +lookup(119254) -> {"So","L"}; +lookup(119255) -> {"So","L"}; +lookup(119256) -> {"So","L"}; +lookup(119257) -> {"So","L"}; +lookup(119258) -> {"So","L"}; +lookup(119259) -> {"So","L"}; +lookup(119260) -> {"So","L"}; +lookup(119261) -> {"So","L"}; +lookup(119262) -> {"So","L"}; +lookup(119263) -> {"So","L"}; +lookup(119264) -> {"So","L"}; +lookup(119265) -> {"So","L"}; +lookup(119266) -> {"So","L"}; +lookup(119267) -> {"So","L"}; +lookup(119268) -> {"So","L"}; +lookup(119269) -> {"So","L"}; +lookup(119270) -> {"So","L"}; +lookup(119271) -> {"So","L"}; +lookup(119272) -> {"So","L"}; +lookup(119296) -> {"So","ON"}; +lookup(119297) -> {"So","ON"}; +lookup(119298) -> {"So","ON"}; +lookup(119299) -> {"So","ON"}; +lookup(119300) -> {"So","ON"}; +lookup(119301) -> {"So","ON"}; +lookup(119302) -> {"So","ON"}; +lookup(119303) -> {"So","ON"}; +lookup(119304) -> {"So","ON"}; +lookup(119305) -> {"So","ON"}; +lookup(119306) -> {"So","ON"}; +lookup(119307) -> {"So","ON"}; +lookup(119308) -> {"So","ON"}; +lookup(119309) -> {"So","ON"}; +lookup(119310) -> {"So","ON"}; +lookup(119311) -> {"So","ON"}; +lookup(119312) -> {"So","ON"}; +lookup(119313) -> {"So","ON"}; +lookup(119314) -> {"So","ON"}; +lookup(119315) -> {"So","ON"}; +lookup(119316) -> {"So","ON"}; +lookup(119317) -> {"So","ON"}; +lookup(119318) -> {"So","ON"}; +lookup(119319) -> {"So","ON"}; +lookup(119320) -> {"So","ON"}; +lookup(119321) -> {"So","ON"}; +lookup(119322) -> {"So","ON"}; +lookup(119323) -> {"So","ON"}; +lookup(119324) -> {"So","ON"}; +lookup(119325) -> {"So","ON"}; +lookup(119326) -> {"So","ON"}; +lookup(119327) -> {"So","ON"}; +lookup(119328) -> {"So","ON"}; +lookup(119329) -> {"So","ON"}; +lookup(119330) -> {"So","ON"}; +lookup(119331) -> {"So","ON"}; +lookup(119332) -> {"So","ON"}; +lookup(119333) -> {"So","ON"}; +lookup(119334) -> {"So","ON"}; +lookup(119335) -> {"So","ON"}; +lookup(119336) -> {"So","ON"}; +lookup(119337) -> {"So","ON"}; +lookup(119338) -> {"So","ON"}; +lookup(119339) -> {"So","ON"}; +lookup(119340) -> {"So","ON"}; +lookup(119341) -> {"So","ON"}; +lookup(119342) -> {"So","ON"}; +lookup(119343) -> {"So","ON"}; +lookup(119344) -> {"So","ON"}; +lookup(119345) -> {"So","ON"}; +lookup(119346) -> {"So","ON"}; +lookup(119347) -> {"So","ON"}; +lookup(119348) -> {"So","ON"}; +lookup(119349) -> {"So","ON"}; +lookup(119350) -> {"So","ON"}; +lookup(119351) -> {"So","ON"}; +lookup(119352) -> {"So","ON"}; +lookup(119353) -> {"So","ON"}; +lookup(119354) -> {"So","ON"}; +lookup(119355) -> {"So","ON"}; +lookup(119356) -> {"So","ON"}; +lookup(119357) -> {"So","ON"}; +lookup(119358) -> {"So","ON"}; +lookup(119359) -> {"So","ON"}; +lookup(119360) -> {"So","ON"}; +lookup(119361) -> {"So","ON"}; +lookup(119362) -> {"Mn","NSM"}; +lookup(119363) -> {"Mn","NSM"}; +lookup(119364) -> {"Mn","NSM"}; +lookup(119365) -> {"So","ON"}; +lookup(119520) -> {"No","L"}; +lookup(119521) -> {"No","L"}; +lookup(119522) -> {"No","L"}; +lookup(119523) -> {"No","L"}; +lookup(119524) -> {"No","L"}; +lookup(119525) -> {"No","L"}; +lookup(119526) -> {"No","L"}; +lookup(119527) -> {"No","L"}; +lookup(119528) -> {"No","L"}; +lookup(119529) -> {"No","L"}; +lookup(119530) -> {"No","L"}; +lookup(119531) -> {"No","L"}; +lookup(119532) -> {"No","L"}; +lookup(119533) -> {"No","L"}; +lookup(119534) -> {"No","L"}; +lookup(119535) -> {"No","L"}; +lookup(119536) -> {"No","L"}; +lookup(119537) -> {"No","L"}; +lookup(119538) -> {"No","L"}; +lookup(119539) -> {"No","L"}; +lookup(119552) -> {"So","ON"}; +lookup(119553) -> {"So","ON"}; +lookup(119554) -> {"So","ON"}; +lookup(119555) -> {"So","ON"}; +lookup(119556) -> {"So","ON"}; +lookup(119557) -> {"So","ON"}; +lookup(119558) -> {"So","ON"}; +lookup(119559) -> {"So","ON"}; +lookup(119560) -> {"So","ON"}; +lookup(119561) -> {"So","ON"}; +lookup(119562) -> {"So","ON"}; +lookup(119563) -> {"So","ON"}; +lookup(119564) -> {"So","ON"}; +lookup(119565) -> {"So","ON"}; +lookup(119566) -> {"So","ON"}; +lookup(119567) -> {"So","ON"}; +lookup(119568) -> {"So","ON"}; +lookup(119569) -> {"So","ON"}; +lookup(119570) -> {"So","ON"}; +lookup(119571) -> {"So","ON"}; +lookup(119572) -> {"So","ON"}; +lookup(119573) -> {"So","ON"}; +lookup(119574) -> {"So","ON"}; +lookup(119575) -> {"So","ON"}; +lookup(119576) -> {"So","ON"}; +lookup(119577) -> {"So","ON"}; +lookup(119578) -> {"So","ON"}; +lookup(119579) -> {"So","ON"}; +lookup(119580) -> {"So","ON"}; +lookup(119581) -> {"So","ON"}; +lookup(119582) -> {"So","ON"}; +lookup(119583) -> {"So","ON"}; +lookup(119584) -> {"So","ON"}; +lookup(119585) -> {"So","ON"}; +lookup(119586) -> {"So","ON"}; +lookup(119587) -> {"So","ON"}; +lookup(119588) -> {"So","ON"}; +lookup(119589) -> {"So","ON"}; +lookup(119590) -> {"So","ON"}; +lookup(119591) -> {"So","ON"}; +lookup(119592) -> {"So","ON"}; +lookup(119593) -> {"So","ON"}; +lookup(119594) -> {"So","ON"}; +lookup(119595) -> {"So","ON"}; +lookup(119596) -> {"So","ON"}; +lookup(119597) -> {"So","ON"}; +lookup(119598) -> {"So","ON"}; +lookup(119599) -> {"So","ON"}; +lookup(119600) -> {"So","ON"}; +lookup(119601) -> {"So","ON"}; +lookup(119602) -> {"So","ON"}; +lookup(119603) -> {"So","ON"}; +lookup(119604) -> {"So","ON"}; +lookup(119605) -> {"So","ON"}; +lookup(119606) -> {"So","ON"}; +lookup(119607) -> {"So","ON"}; +lookup(119608) -> {"So","ON"}; +lookup(119609) -> {"So","ON"}; +lookup(119610) -> {"So","ON"}; +lookup(119611) -> {"So","ON"}; +lookup(119612) -> {"So","ON"}; +lookup(119613) -> {"So","ON"}; +lookup(119614) -> {"So","ON"}; +lookup(119615) -> {"So","ON"}; +lookup(119616) -> {"So","ON"}; +lookup(119617) -> {"So","ON"}; +lookup(119618) -> {"So","ON"}; +lookup(119619) -> {"So","ON"}; +lookup(119620) -> {"So","ON"}; +lookup(119621) -> {"So","ON"}; +lookup(119622) -> {"So","ON"}; +lookup(119623) -> {"So","ON"}; +lookup(119624) -> {"So","ON"}; +lookup(119625) -> {"So","ON"}; +lookup(119626) -> {"So","ON"}; +lookup(119627) -> {"So","ON"}; +lookup(119628) -> {"So","ON"}; +lookup(119629) -> {"So","ON"}; +lookup(119630) -> {"So","ON"}; +lookup(119631) -> {"So","ON"}; +lookup(119632) -> {"So","ON"}; +lookup(119633) -> {"So","ON"}; +lookup(119634) -> {"So","ON"}; +lookup(119635) -> {"So","ON"}; +lookup(119636) -> {"So","ON"}; +lookup(119637) -> {"So","ON"}; +lookup(119638) -> {"So","ON"}; +lookup(119648) -> {"No","L"}; +lookup(119649) -> {"No","L"}; +lookup(119650) -> {"No","L"}; +lookup(119651) -> {"No","L"}; +lookup(119652) -> {"No","L"}; +lookup(119653) -> {"No","L"}; +lookup(119654) -> {"No","L"}; +lookup(119655) -> {"No","L"}; +lookup(119656) -> {"No","L"}; +lookup(119657) -> {"No","L"}; +lookup(119658) -> {"No","L"}; +lookup(119659) -> {"No","L"}; +lookup(119660) -> {"No","L"}; +lookup(119661) -> {"No","L"}; +lookup(119662) -> {"No","L"}; +lookup(119663) -> {"No","L"}; +lookup(119664) -> {"No","L"}; +lookup(119665) -> {"No","L"}; +lookup(119666) -> {"No","L"}; +lookup(119667) -> {"No","L"}; +lookup(119668) -> {"No","L"}; +lookup(119669) -> {"No","L"}; +lookup(119670) -> {"No","L"}; +lookup(119671) -> {"No","L"}; +lookup(119672) -> {"No","L"}; +lookup(119808) -> {"Lu","L"}; +lookup(119809) -> {"Lu","L"}; +lookup(119810) -> {"Lu","L"}; +lookup(119811) -> {"Lu","L"}; +lookup(119812) -> {"Lu","L"}; +lookup(119813) -> {"Lu","L"}; +lookup(119814) -> {"Lu","L"}; +lookup(119815) -> {"Lu","L"}; +lookup(119816) -> {"Lu","L"}; +lookup(119817) -> {"Lu","L"}; +lookup(119818) -> {"Lu","L"}; +lookup(119819) -> {"Lu","L"}; +lookup(119820) -> {"Lu","L"}; +lookup(119821) -> {"Lu","L"}; +lookup(119822) -> {"Lu","L"}; +lookup(119823) -> {"Lu","L"}; +lookup(119824) -> {"Lu","L"}; +lookup(119825) -> {"Lu","L"}; +lookup(119826) -> {"Lu","L"}; +lookup(119827) -> {"Lu","L"}; +lookup(119828) -> {"Lu","L"}; +lookup(119829) -> {"Lu","L"}; +lookup(119830) -> {"Lu","L"}; +lookup(119831) -> {"Lu","L"}; +lookup(119832) -> {"Lu","L"}; +lookup(119833) -> {"Lu","L"}; +lookup(119834) -> {"Ll","L"}; +lookup(119835) -> {"Ll","L"}; +lookup(119836) -> {"Ll","L"}; +lookup(119837) -> {"Ll","L"}; +lookup(119838) -> {"Ll","L"}; +lookup(119839) -> {"Ll","L"}; +lookup(119840) -> {"Ll","L"}; +lookup(119841) -> {"Ll","L"}; +lookup(119842) -> {"Ll","L"}; +lookup(119843) -> {"Ll","L"}; +lookup(119844) -> {"Ll","L"}; +lookup(119845) -> {"Ll","L"}; +lookup(119846) -> {"Ll","L"}; +lookup(119847) -> {"Ll","L"}; +lookup(119848) -> {"Ll","L"}; +lookup(119849) -> {"Ll","L"}; +lookup(119850) -> {"Ll","L"}; +lookup(119851) -> {"Ll","L"}; +lookup(119852) -> {"Ll","L"}; +lookup(119853) -> {"Ll","L"}; +lookup(119854) -> {"Ll","L"}; +lookup(119855) -> {"Ll","L"}; +lookup(119856) -> {"Ll","L"}; +lookup(119857) -> {"Ll","L"}; +lookup(119858) -> {"Ll","L"}; +lookup(119859) -> {"Ll","L"}; +lookup(119860) -> {"Lu","L"}; +lookup(119861) -> {"Lu","L"}; +lookup(119862) -> {"Lu","L"}; +lookup(119863) -> {"Lu","L"}; +lookup(119864) -> {"Lu","L"}; +lookup(119865) -> {"Lu","L"}; +lookup(119866) -> {"Lu","L"}; +lookup(119867) -> {"Lu","L"}; +lookup(119868) -> {"Lu","L"}; +lookup(119869) -> {"Lu","L"}; +lookup(119870) -> {"Lu","L"}; +lookup(119871) -> {"Lu","L"}; +lookup(119872) -> {"Lu","L"}; +lookup(119873) -> {"Lu","L"}; +lookup(119874) -> {"Lu","L"}; +lookup(119875) -> {"Lu","L"}; +lookup(119876) -> {"Lu","L"}; +lookup(119877) -> {"Lu","L"}; +lookup(119878) -> {"Lu","L"}; +lookup(119879) -> {"Lu","L"}; +lookup(119880) -> {"Lu","L"}; +lookup(119881) -> {"Lu","L"}; +lookup(119882) -> {"Lu","L"}; +lookup(119883) -> {"Lu","L"}; +lookup(119884) -> {"Lu","L"}; +lookup(119885) -> {"Lu","L"}; +lookup(119886) -> {"Ll","L"}; +lookup(119887) -> {"Ll","L"}; +lookup(119888) -> {"Ll","L"}; +lookup(119889) -> {"Ll","L"}; +lookup(119890) -> {"Ll","L"}; +lookup(119891) -> {"Ll","L"}; +lookup(119892) -> {"Ll","L"}; +lookup(119894) -> {"Ll","L"}; +lookup(119895) -> {"Ll","L"}; +lookup(119896) -> {"Ll","L"}; +lookup(119897) -> {"Ll","L"}; +lookup(119898) -> {"Ll","L"}; +lookup(119899) -> {"Ll","L"}; +lookup(119900) -> {"Ll","L"}; +lookup(119901) -> {"Ll","L"}; +lookup(119902) -> {"Ll","L"}; +lookup(119903) -> {"Ll","L"}; +lookup(119904) -> {"Ll","L"}; +lookup(119905) -> {"Ll","L"}; +lookup(119906) -> {"Ll","L"}; +lookup(119907) -> {"Ll","L"}; +lookup(119908) -> {"Ll","L"}; +lookup(119909) -> {"Ll","L"}; +lookup(119910) -> {"Ll","L"}; +lookup(119911) -> {"Ll","L"}; +lookup(119912) -> {"Lu","L"}; +lookup(119913) -> {"Lu","L"}; +lookup(119914) -> {"Lu","L"}; +lookup(119915) -> {"Lu","L"}; +lookup(119916) -> {"Lu","L"}; +lookup(119917) -> {"Lu","L"}; +lookup(119918) -> {"Lu","L"}; +lookup(119919) -> {"Lu","L"}; +lookup(119920) -> {"Lu","L"}; +lookup(119921) -> {"Lu","L"}; +lookup(119922) -> {"Lu","L"}; +lookup(119923) -> {"Lu","L"}; +lookup(119924) -> {"Lu","L"}; +lookup(119925) -> {"Lu","L"}; +lookup(119926) -> {"Lu","L"}; +lookup(119927) -> {"Lu","L"}; +lookup(119928) -> {"Lu","L"}; +lookup(119929) -> {"Lu","L"}; +lookup(119930) -> {"Lu","L"}; +lookup(119931) -> {"Lu","L"}; +lookup(119932) -> {"Lu","L"}; +lookup(119933) -> {"Lu","L"}; +lookup(119934) -> {"Lu","L"}; +lookup(119935) -> {"Lu","L"}; +lookup(119936) -> {"Lu","L"}; +lookup(119937) -> {"Lu","L"}; +lookup(119938) -> {"Ll","L"}; +lookup(119939) -> {"Ll","L"}; +lookup(119940) -> {"Ll","L"}; +lookup(119941) -> {"Ll","L"}; +lookup(119942) -> {"Ll","L"}; +lookup(119943) -> {"Ll","L"}; +lookup(119944) -> {"Ll","L"}; +lookup(119945) -> {"Ll","L"}; +lookup(119946) -> {"Ll","L"}; +lookup(119947) -> {"Ll","L"}; +lookup(119948) -> {"Ll","L"}; +lookup(119949) -> {"Ll","L"}; +lookup(119950) -> {"Ll","L"}; +lookup(119951) -> {"Ll","L"}; +lookup(119952) -> {"Ll","L"}; +lookup(119953) -> {"Ll","L"}; +lookup(119954) -> {"Ll","L"}; +lookup(119955) -> {"Ll","L"}; +lookup(119956) -> {"Ll","L"}; +lookup(119957) -> {"Ll","L"}; +lookup(119958) -> {"Ll","L"}; +lookup(119959) -> {"Ll","L"}; +lookup(119960) -> {"Ll","L"}; +lookup(119961) -> {"Ll","L"}; +lookup(119962) -> {"Ll","L"}; +lookup(119963) -> {"Ll","L"}; +lookup(119964) -> {"Lu","L"}; +lookup(119966) -> {"Lu","L"}; +lookup(119967) -> {"Lu","L"}; +lookup(119970) -> {"Lu","L"}; +lookup(119973) -> {"Lu","L"}; +lookup(119974) -> {"Lu","L"}; +lookup(119977) -> {"Lu","L"}; +lookup(119978) -> {"Lu","L"}; +lookup(119979) -> {"Lu","L"}; +lookup(119980) -> {"Lu","L"}; +lookup(119982) -> {"Lu","L"}; +lookup(119983) -> {"Lu","L"}; +lookup(119984) -> {"Lu","L"}; +lookup(119985) -> {"Lu","L"}; +lookup(119986) -> {"Lu","L"}; +lookup(119987) -> {"Lu","L"}; +lookup(119988) -> {"Lu","L"}; +lookup(119989) -> {"Lu","L"}; +lookup(119990) -> {"Ll","L"}; +lookup(119991) -> {"Ll","L"}; +lookup(119992) -> {"Ll","L"}; +lookup(119993) -> {"Ll","L"}; +lookup(119995) -> {"Ll","L"}; +lookup(119997) -> {"Ll","L"}; +lookup(119998) -> {"Ll","L"}; +lookup(119999) -> {"Ll","L"}; +lookup(120000) -> {"Ll","L"}; +lookup(120001) -> {"Ll","L"}; +lookup(120002) -> {"Ll","L"}; +lookup(120003) -> {"Ll","L"}; +lookup(120005) -> {"Ll","L"}; +lookup(120006) -> {"Ll","L"}; +lookup(120007) -> {"Ll","L"}; +lookup(120008) -> {"Ll","L"}; +lookup(120009) -> {"Ll","L"}; +lookup(120010) -> {"Ll","L"}; +lookup(120011) -> {"Ll","L"}; +lookup(120012) -> {"Ll","L"}; +lookup(120013) -> {"Ll","L"}; +lookup(120014) -> {"Ll","L"}; +lookup(120015) -> {"Ll","L"}; +lookup(120016) -> {"Lu","L"}; +lookup(120017) -> {"Lu","L"}; +lookup(120018) -> {"Lu","L"}; +lookup(120019) -> {"Lu","L"}; +lookup(120020) -> {"Lu","L"}; +lookup(120021) -> {"Lu","L"}; +lookup(120022) -> {"Lu","L"}; +lookup(120023) -> {"Lu","L"}; +lookup(120024) -> {"Lu","L"}; +lookup(120025) -> {"Lu","L"}; +lookup(120026) -> {"Lu","L"}; +lookup(120027) -> {"Lu","L"}; +lookup(120028) -> {"Lu","L"}; +lookup(120029) -> {"Lu","L"}; +lookup(120030) -> {"Lu","L"}; +lookup(120031) -> {"Lu","L"}; +lookup(120032) -> {"Lu","L"}; +lookup(120033) -> {"Lu","L"}; +lookup(120034) -> {"Lu","L"}; +lookup(120035) -> {"Lu","L"}; +lookup(120036) -> {"Lu","L"}; +lookup(120037) -> {"Lu","L"}; +lookup(120038) -> {"Lu","L"}; +lookup(120039) -> {"Lu","L"}; +lookup(120040) -> {"Lu","L"}; +lookup(120041) -> {"Lu","L"}; +lookup(120042) -> {"Ll","L"}; +lookup(120043) -> {"Ll","L"}; +lookup(120044) -> {"Ll","L"}; +lookup(120045) -> {"Ll","L"}; +lookup(120046) -> {"Ll","L"}; +lookup(120047) -> {"Ll","L"}; +lookup(120048) -> {"Ll","L"}; +lookup(120049) -> {"Ll","L"}; +lookup(120050) -> {"Ll","L"}; +lookup(120051) -> {"Ll","L"}; +lookup(120052) -> {"Ll","L"}; +lookup(120053) -> {"Ll","L"}; +lookup(120054) -> {"Ll","L"}; +lookup(120055) -> {"Ll","L"}; +lookup(120056) -> {"Ll","L"}; +lookup(120057) -> {"Ll","L"}; +lookup(120058) -> {"Ll","L"}; +lookup(120059) -> {"Ll","L"}; +lookup(120060) -> {"Ll","L"}; +lookup(120061) -> {"Ll","L"}; +lookup(120062) -> {"Ll","L"}; +lookup(120063) -> {"Ll","L"}; +lookup(120064) -> {"Ll","L"}; +lookup(120065) -> {"Ll","L"}; +lookup(120066) -> {"Ll","L"}; +lookup(120067) -> {"Ll","L"}; +lookup(120068) -> {"Lu","L"}; +lookup(120069) -> {"Lu","L"}; +lookup(120071) -> {"Lu","L"}; +lookup(120072) -> {"Lu","L"}; +lookup(120073) -> {"Lu","L"}; +lookup(120074) -> {"Lu","L"}; +lookup(120077) -> {"Lu","L"}; +lookup(120078) -> {"Lu","L"}; +lookup(120079) -> {"Lu","L"}; +lookup(120080) -> {"Lu","L"}; +lookup(120081) -> {"Lu","L"}; +lookup(120082) -> {"Lu","L"}; +lookup(120083) -> {"Lu","L"}; +lookup(120084) -> {"Lu","L"}; +lookup(120086) -> {"Lu","L"}; +lookup(120087) -> {"Lu","L"}; +lookup(120088) -> {"Lu","L"}; +lookup(120089) -> {"Lu","L"}; +lookup(120090) -> {"Lu","L"}; +lookup(120091) -> {"Lu","L"}; +lookup(120092) -> {"Lu","L"}; +lookup(120094) -> {"Ll","L"}; +lookup(120095) -> {"Ll","L"}; +lookup(120096) -> {"Ll","L"}; +lookup(120097) -> {"Ll","L"}; +lookup(120098) -> {"Ll","L"}; +lookup(120099) -> {"Ll","L"}; +lookup(120100) -> {"Ll","L"}; +lookup(120101) -> {"Ll","L"}; +lookup(120102) -> {"Ll","L"}; +lookup(120103) -> {"Ll","L"}; +lookup(120104) -> {"Ll","L"}; +lookup(120105) -> {"Ll","L"}; +lookup(120106) -> {"Ll","L"}; +lookup(120107) -> {"Ll","L"}; +lookup(120108) -> {"Ll","L"}; +lookup(120109) -> {"Ll","L"}; +lookup(120110) -> {"Ll","L"}; +lookup(120111) -> {"Ll","L"}; +lookup(120112) -> {"Ll","L"}; +lookup(120113) -> {"Ll","L"}; +lookup(120114) -> {"Ll","L"}; +lookup(120115) -> {"Ll","L"}; +lookup(120116) -> {"Ll","L"}; +lookup(120117) -> {"Ll","L"}; +lookup(120118) -> {"Ll","L"}; +lookup(120119) -> {"Ll","L"}; +lookup(120120) -> {"Lu","L"}; +lookup(120121) -> {"Lu","L"}; +lookup(120123) -> {"Lu","L"}; +lookup(120124) -> {"Lu","L"}; +lookup(120125) -> {"Lu","L"}; +lookup(120126) -> {"Lu","L"}; +lookup(120128) -> {"Lu","L"}; +lookup(120129) -> {"Lu","L"}; +lookup(120130) -> {"Lu","L"}; +lookup(120131) -> {"Lu","L"}; +lookup(120132) -> {"Lu","L"}; +lookup(120134) -> {"Lu","L"}; +lookup(120138) -> {"Lu","L"}; +lookup(120139) -> {"Lu","L"}; +lookup(120140) -> {"Lu","L"}; +lookup(120141) -> {"Lu","L"}; +lookup(120142) -> {"Lu","L"}; +lookup(120143) -> {"Lu","L"}; +lookup(120144) -> {"Lu","L"}; +lookup(120146) -> {"Ll","L"}; +lookup(120147) -> {"Ll","L"}; +lookup(120148) -> {"Ll","L"}; +lookup(120149) -> {"Ll","L"}; +lookup(120150) -> {"Ll","L"}; +lookup(120151) -> {"Ll","L"}; +lookup(120152) -> {"Ll","L"}; +lookup(120153) -> {"Ll","L"}; +lookup(120154) -> {"Ll","L"}; +lookup(120155) -> {"Ll","L"}; +lookup(120156) -> {"Ll","L"}; +lookup(120157) -> {"Ll","L"}; +lookup(120158) -> {"Ll","L"}; +lookup(120159) -> {"Ll","L"}; +lookup(120160) -> {"Ll","L"}; +lookup(120161) -> {"Ll","L"}; +lookup(120162) -> {"Ll","L"}; +lookup(120163) -> {"Ll","L"}; +lookup(120164) -> {"Ll","L"}; +lookup(120165) -> {"Ll","L"}; +lookup(120166) -> {"Ll","L"}; +lookup(120167) -> {"Ll","L"}; +lookup(120168) -> {"Ll","L"}; +lookup(120169) -> {"Ll","L"}; +lookup(120170) -> {"Ll","L"}; +lookup(120171) -> {"Ll","L"}; +lookup(120172) -> {"Lu","L"}; +lookup(120173) -> {"Lu","L"}; +lookup(120174) -> {"Lu","L"}; +lookup(120175) -> {"Lu","L"}; +lookup(120176) -> {"Lu","L"}; +lookup(120177) -> {"Lu","L"}; +lookup(120178) -> {"Lu","L"}; +lookup(120179) -> {"Lu","L"}; +lookup(120180) -> {"Lu","L"}; +lookup(120181) -> {"Lu","L"}; +lookup(120182) -> {"Lu","L"}; +lookup(120183) -> {"Lu","L"}; +lookup(120184) -> {"Lu","L"}; +lookup(120185) -> {"Lu","L"}; +lookup(120186) -> {"Lu","L"}; +lookup(120187) -> {"Lu","L"}; +lookup(120188) -> {"Lu","L"}; +lookup(120189) -> {"Lu","L"}; +lookup(120190) -> {"Lu","L"}; +lookup(120191) -> {"Lu","L"}; +lookup(120192) -> {"Lu","L"}; +lookup(120193) -> {"Lu","L"}; +lookup(120194) -> {"Lu","L"}; +lookup(120195) -> {"Lu","L"}; +lookup(120196) -> {"Lu","L"}; +lookup(120197) -> {"Lu","L"}; +lookup(120198) -> {"Ll","L"}; +lookup(120199) -> {"Ll","L"}; +lookup(120200) -> {"Ll","L"}; +lookup(120201) -> {"Ll","L"}; +lookup(120202) -> {"Ll","L"}; +lookup(120203) -> {"Ll","L"}; +lookup(120204) -> {"Ll","L"}; +lookup(120205) -> {"Ll","L"}; +lookup(120206) -> {"Ll","L"}; +lookup(120207) -> {"Ll","L"}; +lookup(120208) -> {"Ll","L"}; +lookup(120209) -> {"Ll","L"}; +lookup(120210) -> {"Ll","L"}; +lookup(120211) -> {"Ll","L"}; +lookup(120212) -> {"Ll","L"}; +lookup(120213) -> {"Ll","L"}; +lookup(120214) -> {"Ll","L"}; +lookup(120215) -> {"Ll","L"}; +lookup(120216) -> {"Ll","L"}; +lookup(120217) -> {"Ll","L"}; +lookup(120218) -> {"Ll","L"}; +lookup(120219) -> {"Ll","L"}; +lookup(120220) -> {"Ll","L"}; +lookup(120221) -> {"Ll","L"}; +lookup(120222) -> {"Ll","L"}; +lookup(120223) -> {"Ll","L"}; +lookup(120224) -> {"Lu","L"}; +lookup(120225) -> {"Lu","L"}; +lookup(120226) -> {"Lu","L"}; +lookup(120227) -> {"Lu","L"}; +lookup(120228) -> {"Lu","L"}; +lookup(120229) -> {"Lu","L"}; +lookup(120230) -> {"Lu","L"}; +lookup(120231) -> {"Lu","L"}; +lookup(120232) -> {"Lu","L"}; +lookup(120233) -> {"Lu","L"}; +lookup(120234) -> {"Lu","L"}; +lookup(120235) -> {"Lu","L"}; +lookup(120236) -> {"Lu","L"}; +lookup(120237) -> {"Lu","L"}; +lookup(120238) -> {"Lu","L"}; +lookup(120239) -> {"Lu","L"}; +lookup(120240) -> {"Lu","L"}; +lookup(120241) -> {"Lu","L"}; +lookup(120242) -> {"Lu","L"}; +lookup(120243) -> {"Lu","L"}; +lookup(120244) -> {"Lu","L"}; +lookup(120245) -> {"Lu","L"}; +lookup(120246) -> {"Lu","L"}; +lookup(120247) -> {"Lu","L"}; +lookup(120248) -> {"Lu","L"}; +lookup(120249) -> {"Lu","L"}; +lookup(120250) -> {"Ll","L"}; +lookup(120251) -> {"Ll","L"}; +lookup(120252) -> {"Ll","L"}; +lookup(120253) -> {"Ll","L"}; +lookup(120254) -> {"Ll","L"}; +lookup(120255) -> {"Ll","L"}; +lookup(120256) -> {"Ll","L"}; +lookup(120257) -> {"Ll","L"}; +lookup(120258) -> {"Ll","L"}; +lookup(120259) -> {"Ll","L"}; +lookup(120260) -> {"Ll","L"}; +lookup(120261) -> {"Ll","L"}; +lookup(120262) -> {"Ll","L"}; +lookup(120263) -> {"Ll","L"}; +lookup(120264) -> {"Ll","L"}; +lookup(120265) -> {"Ll","L"}; +lookup(120266) -> {"Ll","L"}; +lookup(120267) -> {"Ll","L"}; +lookup(120268) -> {"Ll","L"}; +lookup(120269) -> {"Ll","L"}; +lookup(120270) -> {"Ll","L"}; +lookup(120271) -> {"Ll","L"}; +lookup(120272) -> {"Ll","L"}; +lookup(120273) -> {"Ll","L"}; +lookup(120274) -> {"Ll","L"}; +lookup(120275) -> {"Ll","L"}; +lookup(120276) -> {"Lu","L"}; +lookup(120277) -> {"Lu","L"}; +lookup(120278) -> {"Lu","L"}; +lookup(120279) -> {"Lu","L"}; +lookup(120280) -> {"Lu","L"}; +lookup(120281) -> {"Lu","L"}; +lookup(120282) -> {"Lu","L"}; +lookup(120283) -> {"Lu","L"}; +lookup(120284) -> {"Lu","L"}; +lookup(120285) -> {"Lu","L"}; +lookup(120286) -> {"Lu","L"}; +lookup(120287) -> {"Lu","L"}; +lookup(120288) -> {"Lu","L"}; +lookup(120289) -> {"Lu","L"}; +lookup(120290) -> {"Lu","L"}; +lookup(120291) -> {"Lu","L"}; +lookup(120292) -> {"Lu","L"}; +lookup(120293) -> {"Lu","L"}; +lookup(120294) -> {"Lu","L"}; +lookup(120295) -> {"Lu","L"}; +lookup(120296) -> {"Lu","L"}; +lookup(120297) -> {"Lu","L"}; +lookup(120298) -> {"Lu","L"}; +lookup(120299) -> {"Lu","L"}; +lookup(120300) -> {"Lu","L"}; +lookup(120301) -> {"Lu","L"}; +lookup(120302) -> {"Ll","L"}; +lookup(120303) -> {"Ll","L"}; +lookup(120304) -> {"Ll","L"}; +lookup(120305) -> {"Ll","L"}; +lookup(120306) -> {"Ll","L"}; +lookup(120307) -> {"Ll","L"}; +lookup(120308) -> {"Ll","L"}; +lookup(120309) -> {"Ll","L"}; +lookup(120310) -> {"Ll","L"}; +lookup(120311) -> {"Ll","L"}; +lookup(120312) -> {"Ll","L"}; +lookup(120313) -> {"Ll","L"}; +lookup(120314) -> {"Ll","L"}; +lookup(120315) -> {"Ll","L"}; +lookup(120316) -> {"Ll","L"}; +lookup(120317) -> {"Ll","L"}; +lookup(120318) -> {"Ll","L"}; +lookup(120319) -> {"Ll","L"}; +lookup(120320) -> {"Ll","L"}; +lookup(120321) -> {"Ll","L"}; +lookup(120322) -> {"Ll","L"}; +lookup(120323) -> {"Ll","L"}; +lookup(120324) -> {"Ll","L"}; +lookup(120325) -> {"Ll","L"}; +lookup(120326) -> {"Ll","L"}; +lookup(120327) -> {"Ll","L"}; +lookup(120328) -> {"Lu","L"}; +lookup(120329) -> {"Lu","L"}; +lookup(120330) -> {"Lu","L"}; +lookup(120331) -> {"Lu","L"}; +lookup(120332) -> {"Lu","L"}; +lookup(120333) -> {"Lu","L"}; +lookup(120334) -> {"Lu","L"}; +lookup(120335) -> {"Lu","L"}; +lookup(120336) -> {"Lu","L"}; +lookup(120337) -> {"Lu","L"}; +lookup(120338) -> {"Lu","L"}; +lookup(120339) -> {"Lu","L"}; +lookup(120340) -> {"Lu","L"}; +lookup(120341) -> {"Lu","L"}; +lookup(120342) -> {"Lu","L"}; +lookup(120343) -> {"Lu","L"}; +lookup(120344) -> {"Lu","L"}; +lookup(120345) -> {"Lu","L"}; +lookup(120346) -> {"Lu","L"}; +lookup(120347) -> {"Lu","L"}; +lookup(120348) -> {"Lu","L"}; +lookup(120349) -> {"Lu","L"}; +lookup(120350) -> {"Lu","L"}; +lookup(120351) -> {"Lu","L"}; +lookup(120352) -> {"Lu","L"}; +lookup(120353) -> {"Lu","L"}; +lookup(120354) -> {"Ll","L"}; +lookup(120355) -> {"Ll","L"}; +lookup(120356) -> {"Ll","L"}; +lookup(120357) -> {"Ll","L"}; +lookup(120358) -> {"Ll","L"}; +lookup(120359) -> {"Ll","L"}; +lookup(120360) -> {"Ll","L"}; +lookup(120361) -> {"Ll","L"}; +lookup(120362) -> {"Ll","L"}; +lookup(120363) -> {"Ll","L"}; +lookup(120364) -> {"Ll","L"}; +lookup(120365) -> {"Ll","L"}; +lookup(120366) -> {"Ll","L"}; +lookup(120367) -> {"Ll","L"}; +lookup(120368) -> {"Ll","L"}; +lookup(120369) -> {"Ll","L"}; +lookup(120370) -> {"Ll","L"}; +lookup(120371) -> {"Ll","L"}; +lookup(120372) -> {"Ll","L"}; +lookup(120373) -> {"Ll","L"}; +lookup(120374) -> {"Ll","L"}; +lookup(120375) -> {"Ll","L"}; +lookup(120376) -> {"Ll","L"}; +lookup(120377) -> {"Ll","L"}; +lookup(120378) -> {"Ll","L"}; +lookup(120379) -> {"Ll","L"}; +lookup(120380) -> {"Lu","L"}; +lookup(120381) -> {"Lu","L"}; +lookup(120382) -> {"Lu","L"}; +lookup(120383) -> {"Lu","L"}; +lookup(120384) -> {"Lu","L"}; +lookup(120385) -> {"Lu","L"}; +lookup(120386) -> {"Lu","L"}; +lookup(120387) -> {"Lu","L"}; +lookup(120388) -> {"Lu","L"}; +lookup(120389) -> {"Lu","L"}; +lookup(120390) -> {"Lu","L"}; +lookup(120391) -> {"Lu","L"}; +lookup(120392) -> {"Lu","L"}; +lookup(120393) -> {"Lu","L"}; +lookup(120394) -> {"Lu","L"}; +lookup(120395) -> {"Lu","L"}; +lookup(120396) -> {"Lu","L"}; +lookup(120397) -> {"Lu","L"}; +lookup(120398) -> {"Lu","L"}; +lookup(120399) -> {"Lu","L"}; +lookup(120400) -> {"Lu","L"}; +lookup(120401) -> {"Lu","L"}; +lookup(120402) -> {"Lu","L"}; +lookup(120403) -> {"Lu","L"}; +lookup(120404) -> {"Lu","L"}; +lookup(120405) -> {"Lu","L"}; +lookup(120406) -> {"Ll","L"}; +lookup(120407) -> {"Ll","L"}; +lookup(120408) -> {"Ll","L"}; +lookup(120409) -> {"Ll","L"}; +lookup(120410) -> {"Ll","L"}; +lookup(120411) -> {"Ll","L"}; +lookup(120412) -> {"Ll","L"}; +lookup(120413) -> {"Ll","L"}; +lookup(120414) -> {"Ll","L"}; +lookup(120415) -> {"Ll","L"}; +lookup(120416) -> {"Ll","L"}; +lookup(120417) -> {"Ll","L"}; +lookup(120418) -> {"Ll","L"}; +lookup(120419) -> {"Ll","L"}; +lookup(120420) -> {"Ll","L"}; +lookup(120421) -> {"Ll","L"}; +lookup(120422) -> {"Ll","L"}; +lookup(120423) -> {"Ll","L"}; +lookup(120424) -> {"Ll","L"}; +lookup(120425) -> {"Ll","L"}; +lookup(120426) -> {"Ll","L"}; +lookup(120427) -> {"Ll","L"}; +lookup(120428) -> {"Ll","L"}; +lookup(120429) -> {"Ll","L"}; +lookup(120430) -> {"Ll","L"}; +lookup(120431) -> {"Ll","L"}; +lookup(120432) -> {"Lu","L"}; +lookup(120433) -> {"Lu","L"}; +lookup(120434) -> {"Lu","L"}; +lookup(120435) -> {"Lu","L"}; +lookup(120436) -> {"Lu","L"}; +lookup(120437) -> {"Lu","L"}; +lookup(120438) -> {"Lu","L"}; +lookup(120439) -> {"Lu","L"}; +lookup(120440) -> {"Lu","L"}; +lookup(120441) -> {"Lu","L"}; +lookup(120442) -> {"Lu","L"}; +lookup(120443) -> {"Lu","L"}; +lookup(120444) -> {"Lu","L"}; +lookup(120445) -> {"Lu","L"}; +lookup(120446) -> {"Lu","L"}; +lookup(120447) -> {"Lu","L"}; +lookup(120448) -> {"Lu","L"}; +lookup(120449) -> {"Lu","L"}; +lookup(120450) -> {"Lu","L"}; +lookup(120451) -> {"Lu","L"}; +lookup(120452) -> {"Lu","L"}; +lookup(120453) -> {"Lu","L"}; +lookup(120454) -> {"Lu","L"}; +lookup(120455) -> {"Lu","L"}; +lookup(120456) -> {"Lu","L"}; +lookup(120457) -> {"Lu","L"}; +lookup(120458) -> {"Ll","L"}; +lookup(120459) -> {"Ll","L"}; +lookup(120460) -> {"Ll","L"}; +lookup(120461) -> {"Ll","L"}; +lookup(120462) -> {"Ll","L"}; +lookup(120463) -> {"Ll","L"}; +lookup(120464) -> {"Ll","L"}; +lookup(120465) -> {"Ll","L"}; +lookup(120466) -> {"Ll","L"}; +lookup(120467) -> {"Ll","L"}; +lookup(120468) -> {"Ll","L"}; +lookup(120469) -> {"Ll","L"}; +lookup(120470) -> {"Ll","L"}; +lookup(120471) -> {"Ll","L"}; +lookup(120472) -> {"Ll","L"}; +lookup(120473) -> {"Ll","L"}; +lookup(120474) -> {"Ll","L"}; +lookup(120475) -> {"Ll","L"}; +lookup(120476) -> {"Ll","L"}; +lookup(120477) -> {"Ll","L"}; +lookup(120478) -> {"Ll","L"}; +lookup(120479) -> {"Ll","L"}; +lookup(120480) -> {"Ll","L"}; +lookup(120481) -> {"Ll","L"}; +lookup(120482) -> {"Ll","L"}; +lookup(120483) -> {"Ll","L"}; +lookup(120484) -> {"Ll","L"}; +lookup(120485) -> {"Ll","L"}; +lookup(120488) -> {"Lu","L"}; +lookup(120489) -> {"Lu","L"}; +lookup(120490) -> {"Lu","L"}; +lookup(120491) -> {"Lu","L"}; +lookup(120492) -> {"Lu","L"}; +lookup(120493) -> {"Lu","L"}; +lookup(120494) -> {"Lu","L"}; +lookup(120495) -> {"Lu","L"}; +lookup(120496) -> {"Lu","L"}; +lookup(120497) -> {"Lu","L"}; +lookup(120498) -> {"Lu","L"}; +lookup(120499) -> {"Lu","L"}; +lookup(120500) -> {"Lu","L"}; +lookup(120501) -> {"Lu","L"}; +lookup(120502) -> {"Lu","L"}; +lookup(120503) -> {"Lu","L"}; +lookup(120504) -> {"Lu","L"}; +lookup(120505) -> {"Lu","L"}; +lookup(120506) -> {"Lu","L"}; +lookup(120507) -> {"Lu","L"}; +lookup(120508) -> {"Lu","L"}; +lookup(120509) -> {"Lu","L"}; +lookup(120510) -> {"Lu","L"}; +lookup(120511) -> {"Lu","L"}; +lookup(120512) -> {"Lu","L"}; +lookup(120513) -> {"Sm","L"}; +lookup(120514) -> {"Ll","L"}; +lookup(120515) -> {"Ll","L"}; +lookup(120516) -> {"Ll","L"}; +lookup(120517) -> {"Ll","L"}; +lookup(120518) -> {"Ll","L"}; +lookup(120519) -> {"Ll","L"}; +lookup(120520) -> {"Ll","L"}; +lookup(120521) -> {"Ll","L"}; +lookup(120522) -> {"Ll","L"}; +lookup(120523) -> {"Ll","L"}; +lookup(120524) -> {"Ll","L"}; +lookup(120525) -> {"Ll","L"}; +lookup(120526) -> {"Ll","L"}; +lookup(120527) -> {"Ll","L"}; +lookup(120528) -> {"Ll","L"}; +lookup(120529) -> {"Ll","L"}; +lookup(120530) -> {"Ll","L"}; +lookup(120531) -> {"Ll","L"}; +lookup(120532) -> {"Ll","L"}; +lookup(120533) -> {"Ll","L"}; +lookup(120534) -> {"Ll","L"}; +lookup(120535) -> {"Ll","L"}; +lookup(120536) -> {"Ll","L"}; +lookup(120537) -> {"Ll","L"}; +lookup(120538) -> {"Ll","L"}; +lookup(120539) -> {"Sm","ON"}; +lookup(120540) -> {"Ll","L"}; +lookup(120541) -> {"Ll","L"}; +lookup(120542) -> {"Ll","L"}; +lookup(120543) -> {"Ll","L"}; +lookup(120544) -> {"Ll","L"}; +lookup(120545) -> {"Ll","L"}; +lookup(120546) -> {"Lu","L"}; +lookup(120547) -> {"Lu","L"}; +lookup(120548) -> {"Lu","L"}; +lookup(120549) -> {"Lu","L"}; +lookup(120550) -> {"Lu","L"}; +lookup(120551) -> {"Lu","L"}; +lookup(120552) -> {"Lu","L"}; +lookup(120553) -> {"Lu","L"}; +lookup(120554) -> {"Lu","L"}; +lookup(120555) -> {"Lu","L"}; +lookup(120556) -> {"Lu","L"}; +lookup(120557) -> {"Lu","L"}; +lookup(120558) -> {"Lu","L"}; +lookup(120559) -> {"Lu","L"}; +lookup(120560) -> {"Lu","L"}; +lookup(120561) -> {"Lu","L"}; +lookup(120562) -> {"Lu","L"}; +lookup(120563) -> {"Lu","L"}; +lookup(120564) -> {"Lu","L"}; +lookup(120565) -> {"Lu","L"}; +lookup(120566) -> {"Lu","L"}; +lookup(120567) -> {"Lu","L"}; +lookup(120568) -> {"Lu","L"}; +lookup(120569) -> {"Lu","L"}; +lookup(120570) -> {"Lu","L"}; +lookup(120571) -> {"Sm","L"}; +lookup(120572) -> {"Ll","L"}; +lookup(120573) -> {"Ll","L"}; +lookup(120574) -> {"Ll","L"}; +lookup(120575) -> {"Ll","L"}; +lookup(120576) -> {"Ll","L"}; +lookup(120577) -> {"Ll","L"}; +lookup(120578) -> {"Ll","L"}; +lookup(120579) -> {"Ll","L"}; +lookup(120580) -> {"Ll","L"}; +lookup(120581) -> {"Ll","L"}; +lookup(120582) -> {"Ll","L"}; +lookup(120583) -> {"Ll","L"}; +lookup(120584) -> {"Ll","L"}; +lookup(120585) -> {"Ll","L"}; +lookup(120586) -> {"Ll","L"}; +lookup(120587) -> {"Ll","L"}; +lookup(120588) -> {"Ll","L"}; +lookup(120589) -> {"Ll","L"}; +lookup(120590) -> {"Ll","L"}; +lookup(120591) -> {"Ll","L"}; +lookup(120592) -> {"Ll","L"}; +lookup(120593) -> {"Ll","L"}; +lookup(120594) -> {"Ll","L"}; +lookup(120595) -> {"Ll","L"}; +lookup(120596) -> {"Ll","L"}; +lookup(120597) -> {"Sm","ON"}; +lookup(120598) -> {"Ll","L"}; +lookup(120599) -> {"Ll","L"}; +lookup(120600) -> {"Ll","L"}; +lookup(120601) -> {"Ll","L"}; +lookup(120602) -> {"Ll","L"}; +lookup(120603) -> {"Ll","L"}; +lookup(120604) -> {"Lu","L"}; +lookup(120605) -> {"Lu","L"}; +lookup(120606) -> {"Lu","L"}; +lookup(120607) -> {"Lu","L"}; +lookup(120608) -> {"Lu","L"}; +lookup(120609) -> {"Lu","L"}; +lookup(120610) -> {"Lu","L"}; +lookup(120611) -> {"Lu","L"}; +lookup(120612) -> {"Lu","L"}; +lookup(120613) -> {"Lu","L"}; +lookup(120614) -> {"Lu","L"}; +lookup(120615) -> {"Lu","L"}; +lookup(120616) -> {"Lu","L"}; +lookup(120617) -> {"Lu","L"}; +lookup(120618) -> {"Lu","L"}; +lookup(120619) -> {"Lu","L"}; +lookup(120620) -> {"Lu","L"}; +lookup(120621) -> {"Lu","L"}; +lookup(120622) -> {"Lu","L"}; +lookup(120623) -> {"Lu","L"}; +lookup(120624) -> {"Lu","L"}; +lookup(120625) -> {"Lu","L"}; +lookup(120626) -> {"Lu","L"}; +lookup(120627) -> {"Lu","L"}; +lookup(120628) -> {"Lu","L"}; +lookup(120629) -> {"Sm","L"}; +lookup(120630) -> {"Ll","L"}; +lookup(120631) -> {"Ll","L"}; +lookup(120632) -> {"Ll","L"}; +lookup(120633) -> {"Ll","L"}; +lookup(120634) -> {"Ll","L"}; +lookup(120635) -> {"Ll","L"}; +lookup(120636) -> {"Ll","L"}; +lookup(120637) -> {"Ll","L"}; +lookup(120638) -> {"Ll","L"}; +lookup(120639) -> {"Ll","L"}; +lookup(120640) -> {"Ll","L"}; +lookup(120641) -> {"Ll","L"}; +lookup(120642) -> {"Ll","L"}; +lookup(120643) -> {"Ll","L"}; +lookup(120644) -> {"Ll","L"}; +lookup(120645) -> {"Ll","L"}; +lookup(120646) -> {"Ll","L"}; +lookup(120647) -> {"Ll","L"}; +lookup(120648) -> {"Ll","L"}; +lookup(120649) -> {"Ll","L"}; +lookup(120650) -> {"Ll","L"}; +lookup(120651) -> {"Ll","L"}; +lookup(120652) -> {"Ll","L"}; +lookup(120653) -> {"Ll","L"}; +lookup(120654) -> {"Ll","L"}; +lookup(120655) -> {"Sm","ON"}; +lookup(120656) -> {"Ll","L"}; +lookup(120657) -> {"Ll","L"}; +lookup(120658) -> {"Ll","L"}; +lookup(120659) -> {"Ll","L"}; +lookup(120660) -> {"Ll","L"}; +lookup(120661) -> {"Ll","L"}; +lookup(120662) -> {"Lu","L"}; +lookup(120663) -> {"Lu","L"}; +lookup(120664) -> {"Lu","L"}; +lookup(120665) -> {"Lu","L"}; +lookup(120666) -> {"Lu","L"}; +lookup(120667) -> {"Lu","L"}; +lookup(120668) -> {"Lu","L"}; +lookup(120669) -> {"Lu","L"}; +lookup(120670) -> {"Lu","L"}; +lookup(120671) -> {"Lu","L"}; +lookup(120672) -> {"Lu","L"}; +lookup(120673) -> {"Lu","L"}; +lookup(120674) -> {"Lu","L"}; +lookup(120675) -> {"Lu","L"}; +lookup(120676) -> {"Lu","L"}; +lookup(120677) -> {"Lu","L"}; +lookup(120678) -> {"Lu","L"}; +lookup(120679) -> {"Lu","L"}; +lookup(120680) -> {"Lu","L"}; +lookup(120681) -> {"Lu","L"}; +lookup(120682) -> {"Lu","L"}; +lookup(120683) -> {"Lu","L"}; +lookup(120684) -> {"Lu","L"}; +lookup(120685) -> {"Lu","L"}; +lookup(120686) -> {"Lu","L"}; +lookup(120687) -> {"Sm","L"}; +lookup(120688) -> {"Ll","L"}; +lookup(120689) -> {"Ll","L"}; +lookup(120690) -> {"Ll","L"}; +lookup(120691) -> {"Ll","L"}; +lookup(120692) -> {"Ll","L"}; +lookup(120693) -> {"Ll","L"}; +lookup(120694) -> {"Ll","L"}; +lookup(120695) -> {"Ll","L"}; +lookup(120696) -> {"Ll","L"}; +lookup(120697) -> {"Ll","L"}; +lookup(120698) -> {"Ll","L"}; +lookup(120699) -> {"Ll","L"}; +lookup(120700) -> {"Ll","L"}; +lookup(120701) -> {"Ll","L"}; +lookup(120702) -> {"Ll","L"}; +lookup(120703) -> {"Ll","L"}; +lookup(120704) -> {"Ll","L"}; +lookup(120705) -> {"Ll","L"}; +lookup(120706) -> {"Ll","L"}; +lookup(120707) -> {"Ll","L"}; +lookup(120708) -> {"Ll","L"}; +lookup(120709) -> {"Ll","L"}; +lookup(120710) -> {"Ll","L"}; +lookup(120711) -> {"Ll","L"}; +lookup(120712) -> {"Ll","L"}; +lookup(120713) -> {"Sm","ON"}; +lookup(120714) -> {"Ll","L"}; +lookup(120715) -> {"Ll","L"}; +lookup(120716) -> {"Ll","L"}; +lookup(120717) -> {"Ll","L"}; +lookup(120718) -> {"Ll","L"}; +lookup(120719) -> {"Ll","L"}; +lookup(120720) -> {"Lu","L"}; +lookup(120721) -> {"Lu","L"}; +lookup(120722) -> {"Lu","L"}; +lookup(120723) -> {"Lu","L"}; +lookup(120724) -> {"Lu","L"}; +lookup(120725) -> {"Lu","L"}; +lookup(120726) -> {"Lu","L"}; +lookup(120727) -> {"Lu","L"}; +lookup(120728) -> {"Lu","L"}; +lookup(120729) -> {"Lu","L"}; +lookup(120730) -> {"Lu","L"}; +lookup(120731) -> {"Lu","L"}; +lookup(120732) -> {"Lu","L"}; +lookup(120733) -> {"Lu","L"}; +lookup(120734) -> {"Lu","L"}; +lookup(120735) -> {"Lu","L"}; +lookup(120736) -> {"Lu","L"}; +lookup(120737) -> {"Lu","L"}; +lookup(120738) -> {"Lu","L"}; +lookup(120739) -> {"Lu","L"}; +lookup(120740) -> {"Lu","L"}; +lookup(120741) -> {"Lu","L"}; +lookup(120742) -> {"Lu","L"}; +lookup(120743) -> {"Lu","L"}; +lookup(120744) -> {"Lu","L"}; +lookup(120745) -> {"Sm","L"}; +lookup(120746) -> {"Ll","L"}; +lookup(120747) -> {"Ll","L"}; +lookup(120748) -> {"Ll","L"}; +lookup(120749) -> {"Ll","L"}; +lookup(120750) -> {"Ll","L"}; +lookup(120751) -> {"Ll","L"}; +lookup(120752) -> {"Ll","L"}; +lookup(120753) -> {"Ll","L"}; +lookup(120754) -> {"Ll","L"}; +lookup(120755) -> {"Ll","L"}; +lookup(120756) -> {"Ll","L"}; +lookup(120757) -> {"Ll","L"}; +lookup(120758) -> {"Ll","L"}; +lookup(120759) -> {"Ll","L"}; +lookup(120760) -> {"Ll","L"}; +lookup(120761) -> {"Ll","L"}; +lookup(120762) -> {"Ll","L"}; +lookup(120763) -> {"Ll","L"}; +lookup(120764) -> {"Ll","L"}; +lookup(120765) -> {"Ll","L"}; +lookup(120766) -> {"Ll","L"}; +lookup(120767) -> {"Ll","L"}; +lookup(120768) -> {"Ll","L"}; +lookup(120769) -> {"Ll","L"}; +lookup(120770) -> {"Ll","L"}; +lookup(120771) -> {"Sm","ON"}; +lookup(120772) -> {"Ll","L"}; +lookup(120773) -> {"Ll","L"}; +lookup(120774) -> {"Ll","L"}; +lookup(120775) -> {"Ll","L"}; +lookup(120776) -> {"Ll","L"}; +lookup(120777) -> {"Ll","L"}; +lookup(120778) -> {"Lu","L"}; +lookup(120779) -> {"Ll","L"}; +lookup(120782) -> {"Nd","EN"}; +lookup(120783) -> {"Nd","EN"}; +lookup(120784) -> {"Nd","EN"}; +lookup(120785) -> {"Nd","EN"}; +lookup(120786) -> {"Nd","EN"}; +lookup(120787) -> {"Nd","EN"}; +lookup(120788) -> {"Nd","EN"}; +lookup(120789) -> {"Nd","EN"}; +lookup(120790) -> {"Nd","EN"}; +lookup(120791) -> {"Nd","EN"}; +lookup(120792) -> {"Nd","EN"}; +lookup(120793) -> {"Nd","EN"}; +lookup(120794) -> {"Nd","EN"}; +lookup(120795) -> {"Nd","EN"}; +lookup(120796) -> {"Nd","EN"}; +lookup(120797) -> {"Nd","EN"}; +lookup(120798) -> {"Nd","EN"}; +lookup(120799) -> {"Nd","EN"}; +lookup(120800) -> {"Nd","EN"}; +lookup(120801) -> {"Nd","EN"}; +lookup(120802) -> {"Nd","EN"}; +lookup(120803) -> {"Nd","EN"}; +lookup(120804) -> {"Nd","EN"}; +lookup(120805) -> {"Nd","EN"}; +lookup(120806) -> {"Nd","EN"}; +lookup(120807) -> {"Nd","EN"}; +lookup(120808) -> {"Nd","EN"}; +lookup(120809) -> {"Nd","EN"}; +lookup(120810) -> {"Nd","EN"}; +lookup(120811) -> {"Nd","EN"}; +lookup(120812) -> {"Nd","EN"}; +lookup(120813) -> {"Nd","EN"}; +lookup(120814) -> {"Nd","EN"}; +lookup(120815) -> {"Nd","EN"}; +lookup(120816) -> {"Nd","EN"}; +lookup(120817) -> {"Nd","EN"}; +lookup(120818) -> {"Nd","EN"}; +lookup(120819) -> {"Nd","EN"}; +lookup(120820) -> {"Nd","EN"}; +lookup(120821) -> {"Nd","EN"}; +lookup(120822) -> {"Nd","EN"}; +lookup(120823) -> {"Nd","EN"}; +lookup(120824) -> {"Nd","EN"}; +lookup(120825) -> {"Nd","EN"}; +lookup(120826) -> {"Nd","EN"}; +lookup(120827) -> {"Nd","EN"}; +lookup(120828) -> {"Nd","EN"}; +lookup(120829) -> {"Nd","EN"}; +lookup(120830) -> {"Nd","EN"}; +lookup(120831) -> {"Nd","EN"}; +lookup(120832) -> {"So","L"}; +lookup(120833) -> {"So","L"}; +lookup(120834) -> {"So","L"}; +lookup(120835) -> {"So","L"}; +lookup(120836) -> {"So","L"}; +lookup(120837) -> {"So","L"}; +lookup(120838) -> {"So","L"}; +lookup(120839) -> {"So","L"}; +lookup(120840) -> {"So","L"}; +lookup(120841) -> {"So","L"}; +lookup(120842) -> {"So","L"}; +lookup(120843) -> {"So","L"}; +lookup(120844) -> {"So","L"}; +lookup(120845) -> {"So","L"}; +lookup(120846) -> {"So","L"}; +lookup(120847) -> {"So","L"}; +lookup(120848) -> {"So","L"}; +lookup(120849) -> {"So","L"}; +lookup(120850) -> {"So","L"}; +lookup(120851) -> {"So","L"}; +lookup(120852) -> {"So","L"}; +lookup(120853) -> {"So","L"}; +lookup(120854) -> {"So","L"}; +lookup(120855) -> {"So","L"}; +lookup(120856) -> {"So","L"}; +lookup(120857) -> {"So","L"}; +lookup(120858) -> {"So","L"}; +lookup(120859) -> {"So","L"}; +lookup(120860) -> {"So","L"}; +lookup(120861) -> {"So","L"}; +lookup(120862) -> {"So","L"}; +lookup(120863) -> {"So","L"}; +lookup(120864) -> {"So","L"}; +lookup(120865) -> {"So","L"}; +lookup(120866) -> {"So","L"}; +lookup(120867) -> {"So","L"}; +lookup(120868) -> {"So","L"}; +lookup(120869) -> {"So","L"}; +lookup(120870) -> {"So","L"}; +lookup(120871) -> {"So","L"}; +lookup(120872) -> {"So","L"}; +lookup(120873) -> {"So","L"}; +lookup(120874) -> {"So","L"}; +lookup(120875) -> {"So","L"}; +lookup(120876) -> {"So","L"}; +lookup(120877) -> {"So","L"}; +lookup(120878) -> {"So","L"}; +lookup(120879) -> {"So","L"}; +lookup(120880) -> {"So","L"}; +lookup(120881) -> {"So","L"}; +lookup(120882) -> {"So","L"}; +lookup(120883) -> {"So","L"}; +lookup(120884) -> {"So","L"}; +lookup(120885) -> {"So","L"}; +lookup(120886) -> {"So","L"}; +lookup(120887) -> {"So","L"}; +lookup(120888) -> {"So","L"}; +lookup(120889) -> {"So","L"}; +lookup(120890) -> {"So","L"}; +lookup(120891) -> {"So","L"}; +lookup(120892) -> {"So","L"}; +lookup(120893) -> {"So","L"}; +lookup(120894) -> {"So","L"}; +lookup(120895) -> {"So","L"}; +lookup(120896) -> {"So","L"}; +lookup(120897) -> {"So","L"}; +lookup(120898) -> {"So","L"}; +lookup(120899) -> {"So","L"}; +lookup(120900) -> {"So","L"}; +lookup(120901) -> {"So","L"}; +lookup(120902) -> {"So","L"}; +lookup(120903) -> {"So","L"}; +lookup(120904) -> {"So","L"}; +lookup(120905) -> {"So","L"}; +lookup(120906) -> {"So","L"}; +lookup(120907) -> {"So","L"}; +lookup(120908) -> {"So","L"}; +lookup(120909) -> {"So","L"}; +lookup(120910) -> {"So","L"}; +lookup(120911) -> {"So","L"}; +lookup(120912) -> {"So","L"}; +lookup(120913) -> {"So","L"}; +lookup(120914) -> {"So","L"}; +lookup(120915) -> {"So","L"}; +lookup(120916) -> {"So","L"}; +lookup(120917) -> {"So","L"}; +lookup(120918) -> {"So","L"}; +lookup(120919) -> {"So","L"}; +lookup(120920) -> {"So","L"}; +lookup(120921) -> {"So","L"}; +lookup(120922) -> {"So","L"}; +lookup(120923) -> {"So","L"}; +lookup(120924) -> {"So","L"}; +lookup(120925) -> {"So","L"}; +lookup(120926) -> {"So","L"}; +lookup(120927) -> {"So","L"}; +lookup(120928) -> {"So","L"}; +lookup(120929) -> {"So","L"}; +lookup(120930) -> {"So","L"}; +lookup(120931) -> {"So","L"}; +lookup(120932) -> {"So","L"}; +lookup(120933) -> {"So","L"}; +lookup(120934) -> {"So","L"}; +lookup(120935) -> {"So","L"}; +lookup(120936) -> {"So","L"}; +lookup(120937) -> {"So","L"}; +lookup(120938) -> {"So","L"}; +lookup(120939) -> {"So","L"}; +lookup(120940) -> {"So","L"}; +lookup(120941) -> {"So","L"}; +lookup(120942) -> {"So","L"}; +lookup(120943) -> {"So","L"}; +lookup(120944) -> {"So","L"}; +lookup(120945) -> {"So","L"}; +lookup(120946) -> {"So","L"}; +lookup(120947) -> {"So","L"}; +lookup(120948) -> {"So","L"}; +lookup(120949) -> {"So","L"}; +lookup(120950) -> {"So","L"}; +lookup(120951) -> {"So","L"}; +lookup(120952) -> {"So","L"}; +lookup(120953) -> {"So","L"}; +lookup(120954) -> {"So","L"}; +lookup(120955) -> {"So","L"}; +lookup(120956) -> {"So","L"}; +lookup(120957) -> {"So","L"}; +lookup(120958) -> {"So","L"}; +lookup(120959) -> {"So","L"}; +lookup(120960) -> {"So","L"}; +lookup(120961) -> {"So","L"}; +lookup(120962) -> {"So","L"}; +lookup(120963) -> {"So","L"}; +lookup(120964) -> {"So","L"}; +lookup(120965) -> {"So","L"}; +lookup(120966) -> {"So","L"}; +lookup(120967) -> {"So","L"}; +lookup(120968) -> {"So","L"}; +lookup(120969) -> {"So","L"}; +lookup(120970) -> {"So","L"}; +lookup(120971) -> {"So","L"}; +lookup(120972) -> {"So","L"}; +lookup(120973) -> {"So","L"}; +lookup(120974) -> {"So","L"}; +lookup(120975) -> {"So","L"}; +lookup(120976) -> {"So","L"}; +lookup(120977) -> {"So","L"}; +lookup(120978) -> {"So","L"}; +lookup(120979) -> {"So","L"}; +lookup(120980) -> {"So","L"}; +lookup(120981) -> {"So","L"}; +lookup(120982) -> {"So","L"}; +lookup(120983) -> {"So","L"}; +lookup(120984) -> {"So","L"}; +lookup(120985) -> {"So","L"}; +lookup(120986) -> {"So","L"}; +lookup(120987) -> {"So","L"}; +lookup(120988) -> {"So","L"}; +lookup(120989) -> {"So","L"}; +lookup(120990) -> {"So","L"}; +lookup(120991) -> {"So","L"}; +lookup(120992) -> {"So","L"}; +lookup(120993) -> {"So","L"}; +lookup(120994) -> {"So","L"}; +lookup(120995) -> {"So","L"}; +lookup(120996) -> {"So","L"}; +lookup(120997) -> {"So","L"}; +lookup(120998) -> {"So","L"}; +lookup(120999) -> {"So","L"}; +lookup(121000) -> {"So","L"}; +lookup(121001) -> {"So","L"}; +lookup(121002) -> {"So","L"}; +lookup(121003) -> {"So","L"}; +lookup(121004) -> {"So","L"}; +lookup(121005) -> {"So","L"}; +lookup(121006) -> {"So","L"}; +lookup(121007) -> {"So","L"}; +lookup(121008) -> {"So","L"}; +lookup(121009) -> {"So","L"}; +lookup(121010) -> {"So","L"}; +lookup(121011) -> {"So","L"}; +lookup(121012) -> {"So","L"}; +lookup(121013) -> {"So","L"}; +lookup(121014) -> {"So","L"}; +lookup(121015) -> {"So","L"}; +lookup(121016) -> {"So","L"}; +lookup(121017) -> {"So","L"}; +lookup(121018) -> {"So","L"}; +lookup(121019) -> {"So","L"}; +lookup(121020) -> {"So","L"}; +lookup(121021) -> {"So","L"}; +lookup(121022) -> {"So","L"}; +lookup(121023) -> {"So","L"}; +lookup(121024) -> {"So","L"}; +lookup(121025) -> {"So","L"}; +lookup(121026) -> {"So","L"}; +lookup(121027) -> {"So","L"}; +lookup(121028) -> {"So","L"}; +lookup(121029) -> {"So","L"}; +lookup(121030) -> {"So","L"}; +lookup(121031) -> {"So","L"}; +lookup(121032) -> {"So","L"}; +lookup(121033) -> {"So","L"}; +lookup(121034) -> {"So","L"}; +lookup(121035) -> {"So","L"}; +lookup(121036) -> {"So","L"}; +lookup(121037) -> {"So","L"}; +lookup(121038) -> {"So","L"}; +lookup(121039) -> {"So","L"}; +lookup(121040) -> {"So","L"}; +lookup(121041) -> {"So","L"}; +lookup(121042) -> {"So","L"}; +lookup(121043) -> {"So","L"}; +lookup(121044) -> {"So","L"}; +lookup(121045) -> {"So","L"}; +lookup(121046) -> {"So","L"}; +lookup(121047) -> {"So","L"}; +lookup(121048) -> {"So","L"}; +lookup(121049) -> {"So","L"}; +lookup(121050) -> {"So","L"}; +lookup(121051) -> {"So","L"}; +lookup(121052) -> {"So","L"}; +lookup(121053) -> {"So","L"}; +lookup(121054) -> {"So","L"}; +lookup(121055) -> {"So","L"}; +lookup(121056) -> {"So","L"}; +lookup(121057) -> {"So","L"}; +lookup(121058) -> {"So","L"}; +lookup(121059) -> {"So","L"}; +lookup(121060) -> {"So","L"}; +lookup(121061) -> {"So","L"}; +lookup(121062) -> {"So","L"}; +lookup(121063) -> {"So","L"}; +lookup(121064) -> {"So","L"}; +lookup(121065) -> {"So","L"}; +lookup(121066) -> {"So","L"}; +lookup(121067) -> {"So","L"}; +lookup(121068) -> {"So","L"}; +lookup(121069) -> {"So","L"}; +lookup(121070) -> {"So","L"}; +lookup(121071) -> {"So","L"}; +lookup(121072) -> {"So","L"}; +lookup(121073) -> {"So","L"}; +lookup(121074) -> {"So","L"}; +lookup(121075) -> {"So","L"}; +lookup(121076) -> {"So","L"}; +lookup(121077) -> {"So","L"}; +lookup(121078) -> {"So","L"}; +lookup(121079) -> {"So","L"}; +lookup(121080) -> {"So","L"}; +lookup(121081) -> {"So","L"}; +lookup(121082) -> {"So","L"}; +lookup(121083) -> {"So","L"}; +lookup(121084) -> {"So","L"}; +lookup(121085) -> {"So","L"}; +lookup(121086) -> {"So","L"}; +lookup(121087) -> {"So","L"}; +lookup(121088) -> {"So","L"}; +lookup(121089) -> {"So","L"}; +lookup(121090) -> {"So","L"}; +lookup(121091) -> {"So","L"}; +lookup(121092) -> {"So","L"}; +lookup(121093) -> {"So","L"}; +lookup(121094) -> {"So","L"}; +lookup(121095) -> {"So","L"}; +lookup(121096) -> {"So","L"}; +lookup(121097) -> {"So","L"}; +lookup(121098) -> {"So","L"}; +lookup(121099) -> {"So","L"}; +lookup(121100) -> {"So","L"}; +lookup(121101) -> {"So","L"}; +lookup(121102) -> {"So","L"}; +lookup(121103) -> {"So","L"}; +lookup(121104) -> {"So","L"}; +lookup(121105) -> {"So","L"}; +lookup(121106) -> {"So","L"}; +lookup(121107) -> {"So","L"}; +lookup(121108) -> {"So","L"}; +lookup(121109) -> {"So","L"}; +lookup(121110) -> {"So","L"}; +lookup(121111) -> {"So","L"}; +lookup(121112) -> {"So","L"}; +lookup(121113) -> {"So","L"}; +lookup(121114) -> {"So","L"}; +lookup(121115) -> {"So","L"}; +lookup(121116) -> {"So","L"}; +lookup(121117) -> {"So","L"}; +lookup(121118) -> {"So","L"}; +lookup(121119) -> {"So","L"}; +lookup(121120) -> {"So","L"}; +lookup(121121) -> {"So","L"}; +lookup(121122) -> {"So","L"}; +lookup(121123) -> {"So","L"}; +lookup(121124) -> {"So","L"}; +lookup(121125) -> {"So","L"}; +lookup(121126) -> {"So","L"}; +lookup(121127) -> {"So","L"}; +lookup(121128) -> {"So","L"}; +lookup(121129) -> {"So","L"}; +lookup(121130) -> {"So","L"}; +lookup(121131) -> {"So","L"}; +lookup(121132) -> {"So","L"}; +lookup(121133) -> {"So","L"}; +lookup(121134) -> {"So","L"}; +lookup(121135) -> {"So","L"}; +lookup(121136) -> {"So","L"}; +lookup(121137) -> {"So","L"}; +lookup(121138) -> {"So","L"}; +lookup(121139) -> {"So","L"}; +lookup(121140) -> {"So","L"}; +lookup(121141) -> {"So","L"}; +lookup(121142) -> {"So","L"}; +lookup(121143) -> {"So","L"}; +lookup(121144) -> {"So","L"}; +lookup(121145) -> {"So","L"}; +lookup(121146) -> {"So","L"}; +lookup(121147) -> {"So","L"}; +lookup(121148) -> {"So","L"}; +lookup(121149) -> {"So","L"}; +lookup(121150) -> {"So","L"}; +lookup(121151) -> {"So","L"}; +lookup(121152) -> {"So","L"}; +lookup(121153) -> {"So","L"}; +lookup(121154) -> {"So","L"}; +lookup(121155) -> {"So","L"}; +lookup(121156) -> {"So","L"}; +lookup(121157) -> {"So","L"}; +lookup(121158) -> {"So","L"}; +lookup(121159) -> {"So","L"}; +lookup(121160) -> {"So","L"}; +lookup(121161) -> {"So","L"}; +lookup(121162) -> {"So","L"}; +lookup(121163) -> {"So","L"}; +lookup(121164) -> {"So","L"}; +lookup(121165) -> {"So","L"}; +lookup(121166) -> {"So","L"}; +lookup(121167) -> {"So","L"}; +lookup(121168) -> {"So","L"}; +lookup(121169) -> {"So","L"}; +lookup(121170) -> {"So","L"}; +lookup(121171) -> {"So","L"}; +lookup(121172) -> {"So","L"}; +lookup(121173) -> {"So","L"}; +lookup(121174) -> {"So","L"}; +lookup(121175) -> {"So","L"}; +lookup(121176) -> {"So","L"}; +lookup(121177) -> {"So","L"}; +lookup(121178) -> {"So","L"}; +lookup(121179) -> {"So","L"}; +lookup(121180) -> {"So","L"}; +lookup(121181) -> {"So","L"}; +lookup(121182) -> {"So","L"}; +lookup(121183) -> {"So","L"}; +lookup(121184) -> {"So","L"}; +lookup(121185) -> {"So","L"}; +lookup(121186) -> {"So","L"}; +lookup(121187) -> {"So","L"}; +lookup(121188) -> {"So","L"}; +lookup(121189) -> {"So","L"}; +lookup(121190) -> {"So","L"}; +lookup(121191) -> {"So","L"}; +lookup(121192) -> {"So","L"}; +lookup(121193) -> {"So","L"}; +lookup(121194) -> {"So","L"}; +lookup(121195) -> {"So","L"}; +lookup(121196) -> {"So","L"}; +lookup(121197) -> {"So","L"}; +lookup(121198) -> {"So","L"}; +lookup(121199) -> {"So","L"}; +lookup(121200) -> {"So","L"}; +lookup(121201) -> {"So","L"}; +lookup(121202) -> {"So","L"}; +lookup(121203) -> {"So","L"}; +lookup(121204) -> {"So","L"}; +lookup(121205) -> {"So","L"}; +lookup(121206) -> {"So","L"}; +lookup(121207) -> {"So","L"}; +lookup(121208) -> {"So","L"}; +lookup(121209) -> {"So","L"}; +lookup(121210) -> {"So","L"}; +lookup(121211) -> {"So","L"}; +lookup(121212) -> {"So","L"}; +lookup(121213) -> {"So","L"}; +lookup(121214) -> {"So","L"}; +lookup(121215) -> {"So","L"}; +lookup(121216) -> {"So","L"}; +lookup(121217) -> {"So","L"}; +lookup(121218) -> {"So","L"}; +lookup(121219) -> {"So","L"}; +lookup(121220) -> {"So","L"}; +lookup(121221) -> {"So","L"}; +lookup(121222) -> {"So","L"}; +lookup(121223) -> {"So","L"}; +lookup(121224) -> {"So","L"}; +lookup(121225) -> {"So","L"}; +lookup(121226) -> {"So","L"}; +lookup(121227) -> {"So","L"}; +lookup(121228) -> {"So","L"}; +lookup(121229) -> {"So","L"}; +lookup(121230) -> {"So","L"}; +lookup(121231) -> {"So","L"}; +lookup(121232) -> {"So","L"}; +lookup(121233) -> {"So","L"}; +lookup(121234) -> {"So","L"}; +lookup(121235) -> {"So","L"}; +lookup(121236) -> {"So","L"}; +lookup(121237) -> {"So","L"}; +lookup(121238) -> {"So","L"}; +lookup(121239) -> {"So","L"}; +lookup(121240) -> {"So","L"}; +lookup(121241) -> {"So","L"}; +lookup(121242) -> {"So","L"}; +lookup(121243) -> {"So","L"}; +lookup(121244) -> {"So","L"}; +lookup(121245) -> {"So","L"}; +lookup(121246) -> {"So","L"}; +lookup(121247) -> {"So","L"}; +lookup(121248) -> {"So","L"}; +lookup(121249) -> {"So","L"}; +lookup(121250) -> {"So","L"}; +lookup(121251) -> {"So","L"}; +lookup(121252) -> {"So","L"}; +lookup(121253) -> {"So","L"}; +lookup(121254) -> {"So","L"}; +lookup(121255) -> {"So","L"}; +lookup(121256) -> {"So","L"}; +lookup(121257) -> {"So","L"}; +lookup(121258) -> {"So","L"}; +lookup(121259) -> {"So","L"}; +lookup(121260) -> {"So","L"}; +lookup(121261) -> {"So","L"}; +lookup(121262) -> {"So","L"}; +lookup(121263) -> {"So","L"}; +lookup(121264) -> {"So","L"}; +lookup(121265) -> {"So","L"}; +lookup(121266) -> {"So","L"}; +lookup(121267) -> {"So","L"}; +lookup(121268) -> {"So","L"}; +lookup(121269) -> {"So","L"}; +lookup(121270) -> {"So","L"}; +lookup(121271) -> {"So","L"}; +lookup(121272) -> {"So","L"}; +lookup(121273) -> {"So","L"}; +lookup(121274) -> {"So","L"}; +lookup(121275) -> {"So","L"}; +lookup(121276) -> {"So","L"}; +lookup(121277) -> {"So","L"}; +lookup(121278) -> {"So","L"}; +lookup(121279) -> {"So","L"}; +lookup(121280) -> {"So","L"}; +lookup(121281) -> {"So","L"}; +lookup(121282) -> {"So","L"}; +lookup(121283) -> {"So","L"}; +lookup(121284) -> {"So","L"}; +lookup(121285) -> {"So","L"}; +lookup(121286) -> {"So","L"}; +lookup(121287) -> {"So","L"}; +lookup(121288) -> {"So","L"}; +lookup(121289) -> {"So","L"}; +lookup(121290) -> {"So","L"}; +lookup(121291) -> {"So","L"}; +lookup(121292) -> {"So","L"}; +lookup(121293) -> {"So","L"}; +lookup(121294) -> {"So","L"}; +lookup(121295) -> {"So","L"}; +lookup(121296) -> {"So","L"}; +lookup(121297) -> {"So","L"}; +lookup(121298) -> {"So","L"}; +lookup(121299) -> {"So","L"}; +lookup(121300) -> {"So","L"}; +lookup(121301) -> {"So","L"}; +lookup(121302) -> {"So","L"}; +lookup(121303) -> {"So","L"}; +lookup(121304) -> {"So","L"}; +lookup(121305) -> {"So","L"}; +lookup(121306) -> {"So","L"}; +lookup(121307) -> {"So","L"}; +lookup(121308) -> {"So","L"}; +lookup(121309) -> {"So","L"}; +lookup(121310) -> {"So","L"}; +lookup(121311) -> {"So","L"}; +lookup(121312) -> {"So","L"}; +lookup(121313) -> {"So","L"}; +lookup(121314) -> {"So","L"}; +lookup(121315) -> {"So","L"}; +lookup(121316) -> {"So","L"}; +lookup(121317) -> {"So","L"}; +lookup(121318) -> {"So","L"}; +lookup(121319) -> {"So","L"}; +lookup(121320) -> {"So","L"}; +lookup(121321) -> {"So","L"}; +lookup(121322) -> {"So","L"}; +lookup(121323) -> {"So","L"}; +lookup(121324) -> {"So","L"}; +lookup(121325) -> {"So","L"}; +lookup(121326) -> {"So","L"}; +lookup(121327) -> {"So","L"}; +lookup(121328) -> {"So","L"}; +lookup(121329) -> {"So","L"}; +lookup(121330) -> {"So","L"}; +lookup(121331) -> {"So","L"}; +lookup(121332) -> {"So","L"}; +lookup(121333) -> {"So","L"}; +lookup(121334) -> {"So","L"}; +lookup(121335) -> {"So","L"}; +lookup(121336) -> {"So","L"}; +lookup(121337) -> {"So","L"}; +lookup(121338) -> {"So","L"}; +lookup(121339) -> {"So","L"}; +lookup(121340) -> {"So","L"}; +lookup(121341) -> {"So","L"}; +lookup(121342) -> {"So","L"}; +lookup(121343) -> {"So","L"}; +lookup(121344) -> {"Mn","NSM"}; +lookup(121345) -> {"Mn","NSM"}; +lookup(121346) -> {"Mn","NSM"}; +lookup(121347) -> {"Mn","NSM"}; +lookup(121348) -> {"Mn","NSM"}; +lookup(121349) -> {"Mn","NSM"}; +lookup(121350) -> {"Mn","NSM"}; +lookup(121351) -> {"Mn","NSM"}; +lookup(121352) -> {"Mn","NSM"}; +lookup(121353) -> {"Mn","NSM"}; +lookup(121354) -> {"Mn","NSM"}; +lookup(121355) -> {"Mn","NSM"}; +lookup(121356) -> {"Mn","NSM"}; +lookup(121357) -> {"Mn","NSM"}; +lookup(121358) -> {"Mn","NSM"}; +lookup(121359) -> {"Mn","NSM"}; +lookup(121360) -> {"Mn","NSM"}; +lookup(121361) -> {"Mn","NSM"}; +lookup(121362) -> {"Mn","NSM"}; +lookup(121363) -> {"Mn","NSM"}; +lookup(121364) -> {"Mn","NSM"}; +lookup(121365) -> {"Mn","NSM"}; +lookup(121366) -> {"Mn","NSM"}; +lookup(121367) -> {"Mn","NSM"}; +lookup(121368) -> {"Mn","NSM"}; +lookup(121369) -> {"Mn","NSM"}; +lookup(121370) -> {"Mn","NSM"}; +lookup(121371) -> {"Mn","NSM"}; +lookup(121372) -> {"Mn","NSM"}; +lookup(121373) -> {"Mn","NSM"}; +lookup(121374) -> {"Mn","NSM"}; +lookup(121375) -> {"Mn","NSM"}; +lookup(121376) -> {"Mn","NSM"}; +lookup(121377) -> {"Mn","NSM"}; +lookup(121378) -> {"Mn","NSM"}; +lookup(121379) -> {"Mn","NSM"}; +lookup(121380) -> {"Mn","NSM"}; +lookup(121381) -> {"Mn","NSM"}; +lookup(121382) -> {"Mn","NSM"}; +lookup(121383) -> {"Mn","NSM"}; +lookup(121384) -> {"Mn","NSM"}; +lookup(121385) -> {"Mn","NSM"}; +lookup(121386) -> {"Mn","NSM"}; +lookup(121387) -> {"Mn","NSM"}; +lookup(121388) -> {"Mn","NSM"}; +lookup(121389) -> {"Mn","NSM"}; +lookup(121390) -> {"Mn","NSM"}; +lookup(121391) -> {"Mn","NSM"}; +lookup(121392) -> {"Mn","NSM"}; +lookup(121393) -> {"Mn","NSM"}; +lookup(121394) -> {"Mn","NSM"}; +lookup(121395) -> {"Mn","NSM"}; +lookup(121396) -> {"Mn","NSM"}; +lookup(121397) -> {"Mn","NSM"}; +lookup(121398) -> {"Mn","NSM"}; +lookup(121399) -> {"So","L"}; +lookup(121400) -> {"So","L"}; +lookup(121401) -> {"So","L"}; +lookup(121402) -> {"So","L"}; +lookup(121403) -> {"Mn","NSM"}; +lookup(121404) -> {"Mn","NSM"}; +lookup(121405) -> {"Mn","NSM"}; +lookup(121406) -> {"Mn","NSM"}; +lookup(121407) -> {"Mn","NSM"}; +lookup(121408) -> {"Mn","NSM"}; +lookup(121409) -> {"Mn","NSM"}; +lookup(121410) -> {"Mn","NSM"}; +lookup(121411) -> {"Mn","NSM"}; +lookup(121412) -> {"Mn","NSM"}; +lookup(121413) -> {"Mn","NSM"}; +lookup(121414) -> {"Mn","NSM"}; +lookup(121415) -> {"Mn","NSM"}; +lookup(121416) -> {"Mn","NSM"}; +lookup(121417) -> {"Mn","NSM"}; +lookup(121418) -> {"Mn","NSM"}; +lookup(121419) -> {"Mn","NSM"}; +lookup(121420) -> {"Mn","NSM"}; +lookup(121421) -> {"Mn","NSM"}; +lookup(121422) -> {"Mn","NSM"}; +lookup(121423) -> {"Mn","NSM"}; +lookup(121424) -> {"Mn","NSM"}; +lookup(121425) -> {"Mn","NSM"}; +lookup(121426) -> {"Mn","NSM"}; +lookup(121427) -> {"Mn","NSM"}; +lookup(121428) -> {"Mn","NSM"}; +lookup(121429) -> {"Mn","NSM"}; +lookup(121430) -> {"Mn","NSM"}; +lookup(121431) -> {"Mn","NSM"}; +lookup(121432) -> {"Mn","NSM"}; +lookup(121433) -> {"Mn","NSM"}; +lookup(121434) -> {"Mn","NSM"}; +lookup(121435) -> {"Mn","NSM"}; +lookup(121436) -> {"Mn","NSM"}; +lookup(121437) -> {"Mn","NSM"}; +lookup(121438) -> {"Mn","NSM"}; +lookup(121439) -> {"Mn","NSM"}; +lookup(121440) -> {"Mn","NSM"}; +lookup(121441) -> {"Mn","NSM"}; +lookup(121442) -> {"Mn","NSM"}; +lookup(121443) -> {"Mn","NSM"}; +lookup(121444) -> {"Mn","NSM"}; +lookup(121445) -> {"Mn","NSM"}; +lookup(121446) -> {"Mn","NSM"}; +lookup(121447) -> {"Mn","NSM"}; +lookup(121448) -> {"Mn","NSM"}; +lookup(121449) -> {"Mn","NSM"}; +lookup(121450) -> {"Mn","NSM"}; +lookup(121451) -> {"Mn","NSM"}; +lookup(121452) -> {"Mn","NSM"}; +lookup(121453) -> {"So","L"}; +lookup(121454) -> {"So","L"}; +lookup(121455) -> {"So","L"}; +lookup(121456) -> {"So","L"}; +lookup(121457) -> {"So","L"}; +lookup(121458) -> {"So","L"}; +lookup(121459) -> {"So","L"}; +lookup(121460) -> {"So","L"}; +lookup(121461) -> {"Mn","NSM"}; +lookup(121462) -> {"So","L"}; +lookup(121463) -> {"So","L"}; +lookup(121464) -> {"So","L"}; +lookup(121465) -> {"So","L"}; +lookup(121466) -> {"So","L"}; +lookup(121467) -> {"So","L"}; +lookup(121468) -> {"So","L"}; +lookup(121469) -> {"So","L"}; +lookup(121470) -> {"So","L"}; +lookup(121471) -> {"So","L"}; +lookup(121472) -> {"So","L"}; +lookup(121473) -> {"So","L"}; +lookup(121474) -> {"So","L"}; +lookup(121475) -> {"So","L"}; +lookup(121476) -> {"Mn","NSM"}; +lookup(121477) -> {"So","L"}; +lookup(121478) -> {"So","L"}; +lookup(121479) -> {"Po","L"}; +lookup(121480) -> {"Po","L"}; +lookup(121481) -> {"Po","L"}; +lookup(121482) -> {"Po","L"}; +lookup(121483) -> {"Po","L"}; +lookup(121499) -> {"Mn","NSM"}; +lookup(121500) -> {"Mn","NSM"}; +lookup(121501) -> {"Mn","NSM"}; +lookup(121502) -> {"Mn","NSM"}; +lookup(121503) -> {"Mn","NSM"}; +lookup(121505) -> {"Mn","NSM"}; +lookup(121506) -> {"Mn","NSM"}; +lookup(121507) -> {"Mn","NSM"}; +lookup(121508) -> {"Mn","NSM"}; +lookup(121509) -> {"Mn","NSM"}; +lookup(121510) -> {"Mn","NSM"}; +lookup(121511) -> {"Mn","NSM"}; +lookup(121512) -> {"Mn","NSM"}; +lookup(121513) -> {"Mn","NSM"}; +lookup(121514) -> {"Mn","NSM"}; +lookup(121515) -> {"Mn","NSM"}; +lookup(121516) -> {"Mn","NSM"}; +lookup(121517) -> {"Mn","NSM"}; +lookup(121518) -> {"Mn","NSM"}; +lookup(121519) -> {"Mn","NSM"}; +lookup(122880) -> {"Mn","NSM"}; +lookup(122881) -> {"Mn","NSM"}; +lookup(122882) -> {"Mn","NSM"}; +lookup(122883) -> {"Mn","NSM"}; +lookup(122884) -> {"Mn","NSM"}; +lookup(122885) -> {"Mn","NSM"}; +lookup(122886) -> {"Mn","NSM"}; +lookup(122888) -> {"Mn","NSM"}; +lookup(122889) -> {"Mn","NSM"}; +lookup(122890) -> {"Mn","NSM"}; +lookup(122891) -> {"Mn","NSM"}; +lookup(122892) -> {"Mn","NSM"}; +lookup(122893) -> {"Mn","NSM"}; +lookup(122894) -> {"Mn","NSM"}; +lookup(122895) -> {"Mn","NSM"}; +lookup(122896) -> {"Mn","NSM"}; +lookup(122897) -> {"Mn","NSM"}; +lookup(122898) -> {"Mn","NSM"}; +lookup(122899) -> {"Mn","NSM"}; +lookup(122900) -> {"Mn","NSM"}; +lookup(122901) -> {"Mn","NSM"}; +lookup(122902) -> {"Mn","NSM"}; +lookup(122903) -> {"Mn","NSM"}; +lookup(122904) -> {"Mn","NSM"}; +lookup(122907) -> {"Mn","NSM"}; +lookup(122908) -> {"Mn","NSM"}; +lookup(122909) -> {"Mn","NSM"}; +lookup(122910) -> {"Mn","NSM"}; +lookup(122911) -> {"Mn","NSM"}; +lookup(122912) -> {"Mn","NSM"}; +lookup(122913) -> {"Mn","NSM"}; +lookup(122915) -> {"Mn","NSM"}; +lookup(122916) -> {"Mn","NSM"}; +lookup(122918) -> {"Mn","NSM"}; +lookup(122919) -> {"Mn","NSM"}; +lookup(122920) -> {"Mn","NSM"}; +lookup(122921) -> {"Mn","NSM"}; +lookup(122922) -> {"Mn","NSM"}; +lookup(123136) -> {"Lo","L"}; +lookup(123137) -> {"Lo","L"}; +lookup(123138) -> {"Lo","L"}; +lookup(123139) -> {"Lo","L"}; +lookup(123140) -> {"Lo","L"}; +lookup(123141) -> {"Lo","L"}; +lookup(123142) -> {"Lo","L"}; +lookup(123143) -> {"Lo","L"}; +lookup(123144) -> {"Lo","L"}; +lookup(123145) -> {"Lo","L"}; +lookup(123146) -> {"Lo","L"}; +lookup(123147) -> {"Lo","L"}; +lookup(123148) -> {"Lo","L"}; +lookup(123149) -> {"Lo","L"}; +lookup(123150) -> {"Lo","L"}; +lookup(123151) -> {"Lo","L"}; +lookup(123152) -> {"Lo","L"}; +lookup(123153) -> {"Lo","L"}; +lookup(123154) -> {"Lo","L"}; +lookup(123155) -> {"Lo","L"}; +lookup(123156) -> {"Lo","L"}; +lookup(123157) -> {"Lo","L"}; +lookup(123158) -> {"Lo","L"}; +lookup(123159) -> {"Lo","L"}; +lookup(123160) -> {"Lo","L"}; +lookup(123161) -> {"Lo","L"}; +lookup(123162) -> {"Lo","L"}; +lookup(123163) -> {"Lo","L"}; +lookup(123164) -> {"Lo","L"}; +lookup(123165) -> {"Lo","L"}; +lookup(123166) -> {"Lo","L"}; +lookup(123167) -> {"Lo","L"}; +lookup(123168) -> {"Lo","L"}; +lookup(123169) -> {"Lo","L"}; +lookup(123170) -> {"Lo","L"}; +lookup(123171) -> {"Lo","L"}; +lookup(123172) -> {"Lo","L"}; +lookup(123173) -> {"Lo","L"}; +lookup(123174) -> {"Lo","L"}; +lookup(123175) -> {"Lo","L"}; +lookup(123176) -> {"Lo","L"}; +lookup(123177) -> {"Lo","L"}; +lookup(123178) -> {"Lo","L"}; +lookup(123179) -> {"Lo","L"}; +lookup(123180) -> {"Lo","L"}; +lookup(123184) -> {"Mn","NSM"}; +lookup(123185) -> {"Mn","NSM"}; +lookup(123186) -> {"Mn","NSM"}; +lookup(123187) -> {"Mn","NSM"}; +lookup(123188) -> {"Mn","NSM"}; +lookup(123189) -> {"Mn","NSM"}; +lookup(123190) -> {"Mn","NSM"}; +lookup(123191) -> {"Lm","L"}; +lookup(123192) -> {"Lm","L"}; +lookup(123193) -> {"Lm","L"}; +lookup(123194) -> {"Lm","L"}; +lookup(123195) -> {"Lm","L"}; +lookup(123196) -> {"Lm","L"}; +lookup(123197) -> {"Lm","L"}; +lookup(123200) -> {"Nd","L"}; +lookup(123201) -> {"Nd","L"}; +lookup(123202) -> {"Nd","L"}; +lookup(123203) -> {"Nd","L"}; +lookup(123204) -> {"Nd","L"}; +lookup(123205) -> {"Nd","L"}; +lookup(123206) -> {"Nd","L"}; +lookup(123207) -> {"Nd","L"}; +lookup(123208) -> {"Nd","L"}; +lookup(123209) -> {"Nd","L"}; +lookup(123214) -> {"Lo","L"}; +lookup(123215) -> {"So","L"}; +lookup(123584) -> {"Lo","L"}; +lookup(123585) -> {"Lo","L"}; +lookup(123586) -> {"Lo","L"}; +lookup(123587) -> {"Lo","L"}; +lookup(123588) -> {"Lo","L"}; +lookup(123589) -> {"Lo","L"}; +lookup(123590) -> {"Lo","L"}; +lookup(123591) -> {"Lo","L"}; +lookup(123592) -> {"Lo","L"}; +lookup(123593) -> {"Lo","L"}; +lookup(123594) -> {"Lo","L"}; +lookup(123595) -> {"Lo","L"}; +lookup(123596) -> {"Lo","L"}; +lookup(123597) -> {"Lo","L"}; +lookup(123598) -> {"Lo","L"}; +lookup(123599) -> {"Lo","L"}; +lookup(123600) -> {"Lo","L"}; +lookup(123601) -> {"Lo","L"}; +lookup(123602) -> {"Lo","L"}; +lookup(123603) -> {"Lo","L"}; +lookup(123604) -> {"Lo","L"}; +lookup(123605) -> {"Lo","L"}; +lookup(123606) -> {"Lo","L"}; +lookup(123607) -> {"Lo","L"}; +lookup(123608) -> {"Lo","L"}; +lookup(123609) -> {"Lo","L"}; +lookup(123610) -> {"Lo","L"}; +lookup(123611) -> {"Lo","L"}; +lookup(123612) -> {"Lo","L"}; +lookup(123613) -> {"Lo","L"}; +lookup(123614) -> {"Lo","L"}; +lookup(123615) -> {"Lo","L"}; +lookup(123616) -> {"Lo","L"}; +lookup(123617) -> {"Lo","L"}; +lookup(123618) -> {"Lo","L"}; +lookup(123619) -> {"Lo","L"}; +lookup(123620) -> {"Lo","L"}; +lookup(123621) -> {"Lo","L"}; +lookup(123622) -> {"Lo","L"}; +lookup(123623) -> {"Lo","L"}; +lookup(123624) -> {"Lo","L"}; +lookup(123625) -> {"Lo","L"}; +lookup(123626) -> {"Lo","L"}; +lookup(123627) -> {"Lo","L"}; +lookup(123628) -> {"Mn","NSM"}; +lookup(123629) -> {"Mn","NSM"}; +lookup(123630) -> {"Mn","NSM"}; +lookup(123631) -> {"Mn","NSM"}; +lookup(123632) -> {"Nd","L"}; +lookup(123633) -> {"Nd","L"}; +lookup(123634) -> {"Nd","L"}; +lookup(123635) -> {"Nd","L"}; +lookup(123636) -> {"Nd","L"}; +lookup(123637) -> {"Nd","L"}; +lookup(123638) -> {"Nd","L"}; +lookup(123639) -> {"Nd","L"}; +lookup(123640) -> {"Nd","L"}; +lookup(123641) -> {"Nd","L"}; +lookup(123647) -> {"Sc","ET"}; +lookup(124928) -> {"Lo","R"}; +lookup(124929) -> {"Lo","R"}; +lookup(124930) -> {"Lo","R"}; +lookup(124931) -> {"Lo","R"}; +lookup(124932) -> {"Lo","R"}; +lookup(124933) -> {"Lo","R"}; +lookup(124934) -> {"Lo","R"}; +lookup(124935) -> {"Lo","R"}; +lookup(124936) -> {"Lo","R"}; +lookup(124937) -> {"Lo","R"}; +lookup(124938) -> {"Lo","R"}; +lookup(124939) -> {"Lo","R"}; +lookup(124940) -> {"Lo","R"}; +lookup(124941) -> {"Lo","R"}; +lookup(124942) -> {"Lo","R"}; +lookup(124943) -> {"Lo","R"}; +lookup(124944) -> {"Lo","R"}; +lookup(124945) -> {"Lo","R"}; +lookup(124946) -> {"Lo","R"}; +lookup(124947) -> {"Lo","R"}; +lookup(124948) -> {"Lo","R"}; +lookup(124949) -> {"Lo","R"}; +lookup(124950) -> {"Lo","R"}; +lookup(124951) -> {"Lo","R"}; +lookup(124952) -> {"Lo","R"}; +lookup(124953) -> {"Lo","R"}; +lookup(124954) -> {"Lo","R"}; +lookup(124955) -> {"Lo","R"}; +lookup(124956) -> {"Lo","R"}; +lookup(124957) -> {"Lo","R"}; +lookup(124958) -> {"Lo","R"}; +lookup(124959) -> {"Lo","R"}; +lookup(124960) -> {"Lo","R"}; +lookup(124961) -> {"Lo","R"}; +lookup(124962) -> {"Lo","R"}; +lookup(124963) -> {"Lo","R"}; +lookup(124964) -> {"Lo","R"}; +lookup(124965) -> {"Lo","R"}; +lookup(124966) -> {"Lo","R"}; +lookup(124967) -> {"Lo","R"}; +lookup(124968) -> {"Lo","R"}; +lookup(124969) -> {"Lo","R"}; +lookup(124970) -> {"Lo","R"}; +lookup(124971) -> {"Lo","R"}; +lookup(124972) -> {"Lo","R"}; +lookup(124973) -> {"Lo","R"}; +lookup(124974) -> {"Lo","R"}; +lookup(124975) -> {"Lo","R"}; +lookup(124976) -> {"Lo","R"}; +lookup(124977) -> {"Lo","R"}; +lookup(124978) -> {"Lo","R"}; +lookup(124979) -> {"Lo","R"}; +lookup(124980) -> {"Lo","R"}; +lookup(124981) -> {"Lo","R"}; +lookup(124982) -> {"Lo","R"}; +lookup(124983) -> {"Lo","R"}; +lookup(124984) -> {"Lo","R"}; +lookup(124985) -> {"Lo","R"}; +lookup(124986) -> {"Lo","R"}; +lookup(124987) -> {"Lo","R"}; +lookup(124988) -> {"Lo","R"}; +lookup(124989) -> {"Lo","R"}; +lookup(124990) -> {"Lo","R"}; +lookup(124991) -> {"Lo","R"}; +lookup(124992) -> {"Lo","R"}; +lookup(124993) -> {"Lo","R"}; +lookup(124994) -> {"Lo","R"}; +lookup(124995) -> {"Lo","R"}; +lookup(124996) -> {"Lo","R"}; +lookup(124997) -> {"Lo","R"}; +lookup(124998) -> {"Lo","R"}; +lookup(124999) -> {"Lo","R"}; +lookup(125000) -> {"Lo","R"}; +lookup(125001) -> {"Lo","R"}; +lookup(125002) -> {"Lo","R"}; +lookup(125003) -> {"Lo","R"}; +lookup(125004) -> {"Lo","R"}; +lookup(125005) -> {"Lo","R"}; +lookup(125006) -> {"Lo","R"}; +lookup(125007) -> {"Lo","R"}; +lookup(125008) -> {"Lo","R"}; +lookup(125009) -> {"Lo","R"}; +lookup(125010) -> {"Lo","R"}; +lookup(125011) -> {"Lo","R"}; +lookup(125012) -> {"Lo","R"}; +lookup(125013) -> {"Lo","R"}; +lookup(125014) -> {"Lo","R"}; +lookup(125015) -> {"Lo","R"}; +lookup(125016) -> {"Lo","R"}; +lookup(125017) -> {"Lo","R"}; +lookup(125018) -> {"Lo","R"}; +lookup(125019) -> {"Lo","R"}; +lookup(125020) -> {"Lo","R"}; +lookup(125021) -> {"Lo","R"}; +lookup(125022) -> {"Lo","R"}; +lookup(125023) -> {"Lo","R"}; +lookup(125024) -> {"Lo","R"}; +lookup(125025) -> {"Lo","R"}; +lookup(125026) -> {"Lo","R"}; +lookup(125027) -> {"Lo","R"}; +lookup(125028) -> {"Lo","R"}; +lookup(125029) -> {"Lo","R"}; +lookup(125030) -> {"Lo","R"}; +lookup(125031) -> {"Lo","R"}; +lookup(125032) -> {"Lo","R"}; +lookup(125033) -> {"Lo","R"}; +lookup(125034) -> {"Lo","R"}; +lookup(125035) -> {"Lo","R"}; +lookup(125036) -> {"Lo","R"}; +lookup(125037) -> {"Lo","R"}; +lookup(125038) -> {"Lo","R"}; +lookup(125039) -> {"Lo","R"}; +lookup(125040) -> {"Lo","R"}; +lookup(125041) -> {"Lo","R"}; +lookup(125042) -> {"Lo","R"}; +lookup(125043) -> {"Lo","R"}; +lookup(125044) -> {"Lo","R"}; +lookup(125045) -> {"Lo","R"}; +lookup(125046) -> {"Lo","R"}; +lookup(125047) -> {"Lo","R"}; +lookup(125048) -> {"Lo","R"}; +lookup(125049) -> {"Lo","R"}; +lookup(125050) -> {"Lo","R"}; +lookup(125051) -> {"Lo","R"}; +lookup(125052) -> {"Lo","R"}; +lookup(125053) -> {"Lo","R"}; +lookup(125054) -> {"Lo","R"}; +lookup(125055) -> {"Lo","R"}; +lookup(125056) -> {"Lo","R"}; +lookup(125057) -> {"Lo","R"}; +lookup(125058) -> {"Lo","R"}; +lookup(125059) -> {"Lo","R"}; +lookup(125060) -> {"Lo","R"}; +lookup(125061) -> {"Lo","R"}; +lookup(125062) -> {"Lo","R"}; +lookup(125063) -> {"Lo","R"}; +lookup(125064) -> {"Lo","R"}; +lookup(125065) -> {"Lo","R"}; +lookup(125066) -> {"Lo","R"}; +lookup(125067) -> {"Lo","R"}; +lookup(125068) -> {"Lo","R"}; +lookup(125069) -> {"Lo","R"}; +lookup(125070) -> {"Lo","R"}; +lookup(125071) -> {"Lo","R"}; +lookup(125072) -> {"Lo","R"}; +lookup(125073) -> {"Lo","R"}; +lookup(125074) -> {"Lo","R"}; +lookup(125075) -> {"Lo","R"}; +lookup(125076) -> {"Lo","R"}; +lookup(125077) -> {"Lo","R"}; +lookup(125078) -> {"Lo","R"}; +lookup(125079) -> {"Lo","R"}; +lookup(125080) -> {"Lo","R"}; +lookup(125081) -> {"Lo","R"}; +lookup(125082) -> {"Lo","R"}; +lookup(125083) -> {"Lo","R"}; +lookup(125084) -> {"Lo","R"}; +lookup(125085) -> {"Lo","R"}; +lookup(125086) -> {"Lo","R"}; +lookup(125087) -> {"Lo","R"}; +lookup(125088) -> {"Lo","R"}; +lookup(125089) -> {"Lo","R"}; +lookup(125090) -> {"Lo","R"}; +lookup(125091) -> {"Lo","R"}; +lookup(125092) -> {"Lo","R"}; +lookup(125093) -> {"Lo","R"}; +lookup(125094) -> {"Lo","R"}; +lookup(125095) -> {"Lo","R"}; +lookup(125096) -> {"Lo","R"}; +lookup(125097) -> {"Lo","R"}; +lookup(125098) -> {"Lo","R"}; +lookup(125099) -> {"Lo","R"}; +lookup(125100) -> {"Lo","R"}; +lookup(125101) -> {"Lo","R"}; +lookup(125102) -> {"Lo","R"}; +lookup(125103) -> {"Lo","R"}; +lookup(125104) -> {"Lo","R"}; +lookup(125105) -> {"Lo","R"}; +lookup(125106) -> {"Lo","R"}; +lookup(125107) -> {"Lo","R"}; +lookup(125108) -> {"Lo","R"}; +lookup(125109) -> {"Lo","R"}; +lookup(125110) -> {"Lo","R"}; +lookup(125111) -> {"Lo","R"}; +lookup(125112) -> {"Lo","R"}; +lookup(125113) -> {"Lo","R"}; +lookup(125114) -> {"Lo","R"}; +lookup(125115) -> {"Lo","R"}; +lookup(125116) -> {"Lo","R"}; +lookup(125117) -> {"Lo","R"}; +lookup(125118) -> {"Lo","R"}; +lookup(125119) -> {"Lo","R"}; +lookup(125120) -> {"Lo","R"}; +lookup(125121) -> {"Lo","R"}; +lookup(125122) -> {"Lo","R"}; +lookup(125123) -> {"Lo","R"}; +lookup(125124) -> {"Lo","R"}; +lookup(125127) -> {"No","R"}; +lookup(125128) -> {"No","R"}; +lookup(125129) -> {"No","R"}; +lookup(125130) -> {"No","R"}; +lookup(125131) -> {"No","R"}; +lookup(125132) -> {"No","R"}; +lookup(125133) -> {"No","R"}; +lookup(125134) -> {"No","R"}; +lookup(125135) -> {"No","R"}; +lookup(125136) -> {"Mn","NSM"}; +lookup(125137) -> {"Mn","NSM"}; +lookup(125138) -> {"Mn","NSM"}; +lookup(125139) -> {"Mn","NSM"}; +lookup(125140) -> {"Mn","NSM"}; +lookup(125141) -> {"Mn","NSM"}; +lookup(125142) -> {"Mn","NSM"}; +lookup(125184) -> {"Lu","R"}; +lookup(125185) -> {"Lu","R"}; +lookup(125186) -> {"Lu","R"}; +lookup(125187) -> {"Lu","R"}; +lookup(125188) -> {"Lu","R"}; +lookup(125189) -> {"Lu","R"}; +lookup(125190) -> {"Lu","R"}; +lookup(125191) -> {"Lu","R"}; +lookup(125192) -> {"Lu","R"}; +lookup(125193) -> {"Lu","R"}; +lookup(125194) -> {"Lu","R"}; +lookup(125195) -> {"Lu","R"}; +lookup(125196) -> {"Lu","R"}; +lookup(125197) -> {"Lu","R"}; +lookup(125198) -> {"Lu","R"}; +lookup(125199) -> {"Lu","R"}; +lookup(125200) -> {"Lu","R"}; +lookup(125201) -> {"Lu","R"}; +lookup(125202) -> {"Lu","R"}; +lookup(125203) -> {"Lu","R"}; +lookup(125204) -> {"Lu","R"}; +lookup(125205) -> {"Lu","R"}; +lookup(125206) -> {"Lu","R"}; +lookup(125207) -> {"Lu","R"}; +lookup(125208) -> {"Lu","R"}; +lookup(125209) -> {"Lu","R"}; +lookup(125210) -> {"Lu","R"}; +lookup(125211) -> {"Lu","R"}; +lookup(125212) -> {"Lu","R"}; +lookup(125213) -> {"Lu","R"}; +lookup(125214) -> {"Lu","R"}; +lookup(125215) -> {"Lu","R"}; +lookup(125216) -> {"Lu","R"}; +lookup(125217) -> {"Lu","R"}; +lookup(125218) -> {"Ll","R"}; +lookup(125219) -> {"Ll","R"}; +lookup(125220) -> {"Ll","R"}; +lookup(125221) -> {"Ll","R"}; +lookup(125222) -> {"Ll","R"}; +lookup(125223) -> {"Ll","R"}; +lookup(125224) -> {"Ll","R"}; +lookup(125225) -> {"Ll","R"}; +lookup(125226) -> {"Ll","R"}; +lookup(125227) -> {"Ll","R"}; +lookup(125228) -> {"Ll","R"}; +lookup(125229) -> {"Ll","R"}; +lookup(125230) -> {"Ll","R"}; +lookup(125231) -> {"Ll","R"}; +lookup(125232) -> {"Ll","R"}; +lookup(125233) -> {"Ll","R"}; +lookup(125234) -> {"Ll","R"}; +lookup(125235) -> {"Ll","R"}; +lookup(125236) -> {"Ll","R"}; +lookup(125237) -> {"Ll","R"}; +lookup(125238) -> {"Ll","R"}; +lookup(125239) -> {"Ll","R"}; +lookup(125240) -> {"Ll","R"}; +lookup(125241) -> {"Ll","R"}; +lookup(125242) -> {"Ll","R"}; +lookup(125243) -> {"Ll","R"}; +lookup(125244) -> {"Ll","R"}; +lookup(125245) -> {"Ll","R"}; +lookup(125246) -> {"Ll","R"}; +lookup(125247) -> {"Ll","R"}; +lookup(125248) -> {"Ll","R"}; +lookup(125249) -> {"Ll","R"}; +lookup(125250) -> {"Ll","R"}; +lookup(125251) -> {"Ll","R"}; +lookup(125252) -> {"Mn","NSM"}; +lookup(125253) -> {"Mn","NSM"}; +lookup(125254) -> {"Mn","NSM"}; +lookup(125255) -> {"Mn","NSM"}; +lookup(125256) -> {"Mn","NSM"}; +lookup(125257) -> {"Mn","NSM"}; +lookup(125258) -> {"Mn","NSM"}; +lookup(125259) -> {"Lm","R"}; +lookup(125264) -> {"Nd","R"}; +lookup(125265) -> {"Nd","R"}; +lookup(125266) -> {"Nd","R"}; +lookup(125267) -> {"Nd","R"}; +lookup(125268) -> {"Nd","R"}; +lookup(125269) -> {"Nd","R"}; +lookup(125270) -> {"Nd","R"}; +lookup(125271) -> {"Nd","R"}; +lookup(125272) -> {"Nd","R"}; +lookup(125273) -> {"Nd","R"}; +lookup(125278) -> {"Po","R"}; +lookup(125279) -> {"Po","R"}; +lookup(126065) -> {"No","AL"}; +lookup(126066) -> {"No","AL"}; +lookup(126067) -> {"No","AL"}; +lookup(126068) -> {"No","AL"}; +lookup(126069) -> {"No","AL"}; +lookup(126070) -> {"No","AL"}; +lookup(126071) -> {"No","AL"}; +lookup(126072) -> {"No","AL"}; +lookup(126073) -> {"No","AL"}; +lookup(126074) -> {"No","AL"}; +lookup(126075) -> {"No","AL"}; +lookup(126076) -> {"No","AL"}; +lookup(126077) -> {"No","AL"}; +lookup(126078) -> {"No","AL"}; +lookup(126079) -> {"No","AL"}; +lookup(126080) -> {"No","AL"}; +lookup(126081) -> {"No","AL"}; +lookup(126082) -> {"No","AL"}; +lookup(126083) -> {"No","AL"}; +lookup(126084) -> {"No","AL"}; +lookup(126085) -> {"No","AL"}; +lookup(126086) -> {"No","AL"}; +lookup(126087) -> {"No","AL"}; +lookup(126088) -> {"No","AL"}; +lookup(126089) -> {"No","AL"}; +lookup(126090) -> {"No","AL"}; +lookup(126091) -> {"No","AL"}; +lookup(126092) -> {"No","AL"}; +lookup(126093) -> {"No","AL"}; +lookup(126094) -> {"No","AL"}; +lookup(126095) -> {"No","AL"}; +lookup(126096) -> {"No","AL"}; +lookup(126097) -> {"No","AL"}; +lookup(126098) -> {"No","AL"}; +lookup(126099) -> {"No","AL"}; +lookup(126100) -> {"No","AL"}; +lookup(126101) -> {"No","AL"}; +lookup(126102) -> {"No","AL"}; +lookup(126103) -> {"No","AL"}; +lookup(126104) -> {"No","AL"}; +lookup(126105) -> {"No","AL"}; +lookup(126106) -> {"No","AL"}; +lookup(126107) -> {"No","AL"}; +lookup(126108) -> {"No","AL"}; +lookup(126109) -> {"No","AL"}; +lookup(126110) -> {"No","AL"}; +lookup(126111) -> {"No","AL"}; +lookup(126112) -> {"No","AL"}; +lookup(126113) -> {"No","AL"}; +lookup(126114) -> {"No","AL"}; +lookup(126115) -> {"No","AL"}; +lookup(126116) -> {"No","AL"}; +lookup(126117) -> {"No","AL"}; +lookup(126118) -> {"No","AL"}; +lookup(126119) -> {"No","AL"}; +lookup(126120) -> {"No","AL"}; +lookup(126121) -> {"No","AL"}; +lookup(126122) -> {"No","AL"}; +lookup(126123) -> {"No","AL"}; +lookup(126124) -> {"So","AL"}; +lookup(126125) -> {"No","AL"}; +lookup(126126) -> {"No","AL"}; +lookup(126127) -> {"No","AL"}; +lookup(126128) -> {"Sc","AL"}; +lookup(126129) -> {"No","AL"}; +lookup(126130) -> {"No","AL"}; +lookup(126131) -> {"No","AL"}; +lookup(126132) -> {"No","AL"}; +lookup(126209) -> {"No","AL"}; +lookup(126210) -> {"No","AL"}; +lookup(126211) -> {"No","AL"}; +lookup(126212) -> {"No","AL"}; +lookup(126213) -> {"No","AL"}; +lookup(126214) -> {"No","AL"}; +lookup(126215) -> {"No","AL"}; +lookup(126216) -> {"No","AL"}; +lookup(126217) -> {"No","AL"}; +lookup(126218) -> {"No","AL"}; +lookup(126219) -> {"No","AL"}; +lookup(126220) -> {"No","AL"}; +lookup(126221) -> {"No","AL"}; +lookup(126222) -> {"No","AL"}; +lookup(126223) -> {"No","AL"}; +lookup(126224) -> {"No","AL"}; +lookup(126225) -> {"No","AL"}; +lookup(126226) -> {"No","AL"}; +lookup(126227) -> {"No","AL"}; +lookup(126228) -> {"No","AL"}; +lookup(126229) -> {"No","AL"}; +lookup(126230) -> {"No","AL"}; +lookup(126231) -> {"No","AL"}; +lookup(126232) -> {"No","AL"}; +lookup(126233) -> {"No","AL"}; +lookup(126234) -> {"No","AL"}; +lookup(126235) -> {"No","AL"}; +lookup(126236) -> {"No","AL"}; +lookup(126237) -> {"No","AL"}; +lookup(126238) -> {"No","AL"}; +lookup(126239) -> {"No","AL"}; +lookup(126240) -> {"No","AL"}; +lookup(126241) -> {"No","AL"}; +lookup(126242) -> {"No","AL"}; +lookup(126243) -> {"No","AL"}; +lookup(126244) -> {"No","AL"}; +lookup(126245) -> {"No","AL"}; +lookup(126246) -> {"No","AL"}; +lookup(126247) -> {"No","AL"}; +lookup(126248) -> {"No","AL"}; +lookup(126249) -> {"No","AL"}; +lookup(126250) -> {"No","AL"}; +lookup(126251) -> {"No","AL"}; +lookup(126252) -> {"No","AL"}; +lookup(126253) -> {"No","AL"}; +lookup(126254) -> {"So","AL"}; +lookup(126255) -> {"No","AL"}; +lookup(126256) -> {"No","AL"}; +lookup(126257) -> {"No","AL"}; +lookup(126258) -> {"No","AL"}; +lookup(126259) -> {"No","AL"}; +lookup(126260) -> {"No","AL"}; +lookup(126261) -> {"No","AL"}; +lookup(126262) -> {"No","AL"}; +lookup(126263) -> {"No","AL"}; +lookup(126264) -> {"No","AL"}; +lookup(126265) -> {"No","AL"}; +lookup(126266) -> {"No","AL"}; +lookup(126267) -> {"No","AL"}; +lookup(126268) -> {"No","AL"}; +lookup(126269) -> {"No","AL"}; +lookup(126464) -> {"Lo","AL"}; +lookup(126465) -> {"Lo","AL"}; +lookup(126466) -> {"Lo","AL"}; +lookup(126467) -> {"Lo","AL"}; +lookup(126469) -> {"Lo","AL"}; +lookup(126470) -> {"Lo","AL"}; +lookup(126471) -> {"Lo","AL"}; +lookup(126472) -> {"Lo","AL"}; +lookup(126473) -> {"Lo","AL"}; +lookup(126474) -> {"Lo","AL"}; +lookup(126475) -> {"Lo","AL"}; +lookup(126476) -> {"Lo","AL"}; +lookup(126477) -> {"Lo","AL"}; +lookup(126478) -> {"Lo","AL"}; +lookup(126479) -> {"Lo","AL"}; +lookup(126480) -> {"Lo","AL"}; +lookup(126481) -> {"Lo","AL"}; +lookup(126482) -> {"Lo","AL"}; +lookup(126483) -> {"Lo","AL"}; +lookup(126484) -> {"Lo","AL"}; +lookup(126485) -> {"Lo","AL"}; +lookup(126486) -> {"Lo","AL"}; +lookup(126487) -> {"Lo","AL"}; +lookup(126488) -> {"Lo","AL"}; +lookup(126489) -> {"Lo","AL"}; +lookup(126490) -> {"Lo","AL"}; +lookup(126491) -> {"Lo","AL"}; +lookup(126492) -> {"Lo","AL"}; +lookup(126493) -> {"Lo","AL"}; +lookup(126494) -> {"Lo","AL"}; +lookup(126495) -> {"Lo","AL"}; +lookup(126497) -> {"Lo","AL"}; +lookup(126498) -> {"Lo","AL"}; +lookup(126500) -> {"Lo","AL"}; +lookup(126503) -> {"Lo","AL"}; +lookup(126505) -> {"Lo","AL"}; +lookup(126506) -> {"Lo","AL"}; +lookup(126507) -> {"Lo","AL"}; +lookup(126508) -> {"Lo","AL"}; +lookup(126509) -> {"Lo","AL"}; +lookup(126510) -> {"Lo","AL"}; +lookup(126511) -> {"Lo","AL"}; +lookup(126512) -> {"Lo","AL"}; +lookup(126513) -> {"Lo","AL"}; +lookup(126514) -> {"Lo","AL"}; +lookup(126516) -> {"Lo","AL"}; +lookup(126517) -> {"Lo","AL"}; +lookup(126518) -> {"Lo","AL"}; +lookup(126519) -> {"Lo","AL"}; +lookup(126521) -> {"Lo","AL"}; +lookup(126523) -> {"Lo","AL"}; +lookup(126530) -> {"Lo","AL"}; +lookup(126535) -> {"Lo","AL"}; +lookup(126537) -> {"Lo","AL"}; +lookup(126539) -> {"Lo","AL"}; +lookup(126541) -> {"Lo","AL"}; +lookup(126542) -> {"Lo","AL"}; +lookup(126543) -> {"Lo","AL"}; +lookup(126545) -> {"Lo","AL"}; +lookup(126546) -> {"Lo","AL"}; +lookup(126548) -> {"Lo","AL"}; +lookup(126551) -> {"Lo","AL"}; +lookup(126553) -> {"Lo","AL"}; +lookup(126555) -> {"Lo","AL"}; +lookup(126557) -> {"Lo","AL"}; +lookup(126559) -> {"Lo","AL"}; +lookup(126561) -> {"Lo","AL"}; +lookup(126562) -> {"Lo","AL"}; +lookup(126564) -> {"Lo","AL"}; +lookup(126567) -> {"Lo","AL"}; +lookup(126568) -> {"Lo","AL"}; +lookup(126569) -> {"Lo","AL"}; +lookup(126570) -> {"Lo","AL"}; +lookup(126572) -> {"Lo","AL"}; +lookup(126573) -> {"Lo","AL"}; +lookup(126574) -> {"Lo","AL"}; +lookup(126575) -> {"Lo","AL"}; +lookup(126576) -> {"Lo","AL"}; +lookup(126577) -> {"Lo","AL"}; +lookup(126578) -> {"Lo","AL"}; +lookup(126580) -> {"Lo","AL"}; +lookup(126581) -> {"Lo","AL"}; +lookup(126582) -> {"Lo","AL"}; +lookup(126583) -> {"Lo","AL"}; +lookup(126585) -> {"Lo","AL"}; +lookup(126586) -> {"Lo","AL"}; +lookup(126587) -> {"Lo","AL"}; +lookup(126588) -> {"Lo","AL"}; +lookup(126590) -> {"Lo","AL"}; +lookup(126592) -> {"Lo","AL"}; +lookup(126593) -> {"Lo","AL"}; +lookup(126594) -> {"Lo","AL"}; +lookup(126595) -> {"Lo","AL"}; +lookup(126596) -> {"Lo","AL"}; +lookup(126597) -> {"Lo","AL"}; +lookup(126598) -> {"Lo","AL"}; +lookup(126599) -> {"Lo","AL"}; +lookup(126600) -> {"Lo","AL"}; +lookup(126601) -> {"Lo","AL"}; +lookup(126603) -> {"Lo","AL"}; +lookup(126604) -> {"Lo","AL"}; +lookup(126605) -> {"Lo","AL"}; +lookup(126606) -> {"Lo","AL"}; +lookup(126607) -> {"Lo","AL"}; +lookup(126608) -> {"Lo","AL"}; +lookup(126609) -> {"Lo","AL"}; +lookup(126610) -> {"Lo","AL"}; +lookup(126611) -> {"Lo","AL"}; +lookup(126612) -> {"Lo","AL"}; +lookup(126613) -> {"Lo","AL"}; +lookup(126614) -> {"Lo","AL"}; +lookup(126615) -> {"Lo","AL"}; +lookup(126616) -> {"Lo","AL"}; +lookup(126617) -> {"Lo","AL"}; +lookup(126618) -> {"Lo","AL"}; +lookup(126619) -> {"Lo","AL"}; +lookup(126625) -> {"Lo","AL"}; +lookup(126626) -> {"Lo","AL"}; +lookup(126627) -> {"Lo","AL"}; +lookup(126629) -> {"Lo","AL"}; +lookup(126630) -> {"Lo","AL"}; +lookup(126631) -> {"Lo","AL"}; +lookup(126632) -> {"Lo","AL"}; +lookup(126633) -> {"Lo","AL"}; +lookup(126635) -> {"Lo","AL"}; +lookup(126636) -> {"Lo","AL"}; +lookup(126637) -> {"Lo","AL"}; +lookup(126638) -> {"Lo","AL"}; +lookup(126639) -> {"Lo","AL"}; +lookup(126640) -> {"Lo","AL"}; +lookup(126641) -> {"Lo","AL"}; +lookup(126642) -> {"Lo","AL"}; +lookup(126643) -> {"Lo","AL"}; +lookup(126644) -> {"Lo","AL"}; +lookup(126645) -> {"Lo","AL"}; +lookup(126646) -> {"Lo","AL"}; +lookup(126647) -> {"Lo","AL"}; +lookup(126648) -> {"Lo","AL"}; +lookup(126649) -> {"Lo","AL"}; +lookup(126650) -> {"Lo","AL"}; +lookup(126651) -> {"Lo","AL"}; +lookup(126704) -> {"Sm","ON"}; +lookup(126705) -> {"Sm","ON"}; +lookup(126976) -> {"So","ON"}; +lookup(126977) -> {"So","ON"}; +lookup(126978) -> {"So","ON"}; +lookup(126979) -> {"So","ON"}; +lookup(126980) -> {"So","ON"}; +lookup(126981) -> {"So","ON"}; +lookup(126982) -> {"So","ON"}; +lookup(126983) -> {"So","ON"}; +lookup(126984) -> {"So","ON"}; +lookup(126985) -> {"So","ON"}; +lookup(126986) -> {"So","ON"}; +lookup(126987) -> {"So","ON"}; +lookup(126988) -> {"So","ON"}; +lookup(126989) -> {"So","ON"}; +lookup(126990) -> {"So","ON"}; +lookup(126991) -> {"So","ON"}; +lookup(126992) -> {"So","ON"}; +lookup(126993) -> {"So","ON"}; +lookup(126994) -> {"So","ON"}; +lookup(126995) -> {"So","ON"}; +lookup(126996) -> {"So","ON"}; +lookup(126997) -> {"So","ON"}; +lookup(126998) -> {"So","ON"}; +lookup(126999) -> {"So","ON"}; +lookup(127000) -> {"So","ON"}; +lookup(127001) -> {"So","ON"}; +lookup(127002) -> {"So","ON"}; +lookup(127003) -> {"So","ON"}; +lookup(127004) -> {"So","ON"}; +lookup(127005) -> {"So","ON"}; +lookup(127006) -> {"So","ON"}; +lookup(127007) -> {"So","ON"}; +lookup(127008) -> {"So","ON"}; +lookup(127009) -> {"So","ON"}; +lookup(127010) -> {"So","ON"}; +lookup(127011) -> {"So","ON"}; +lookup(127012) -> {"So","ON"}; +lookup(127013) -> {"So","ON"}; +lookup(127014) -> {"So","ON"}; +lookup(127015) -> {"So","ON"}; +lookup(127016) -> {"So","ON"}; +lookup(127017) -> {"So","ON"}; +lookup(127018) -> {"So","ON"}; +lookup(127019) -> {"So","ON"}; +lookup(127024) -> {"So","ON"}; +lookup(127025) -> {"So","ON"}; +lookup(127026) -> {"So","ON"}; +lookup(127027) -> {"So","ON"}; +lookup(127028) -> {"So","ON"}; +lookup(127029) -> {"So","ON"}; +lookup(127030) -> {"So","ON"}; +lookup(127031) -> {"So","ON"}; +lookup(127032) -> {"So","ON"}; +lookup(127033) -> {"So","ON"}; +lookup(127034) -> {"So","ON"}; +lookup(127035) -> {"So","ON"}; +lookup(127036) -> {"So","ON"}; +lookup(127037) -> {"So","ON"}; +lookup(127038) -> {"So","ON"}; +lookup(127039) -> {"So","ON"}; +lookup(127040) -> {"So","ON"}; +lookup(127041) -> {"So","ON"}; +lookup(127042) -> {"So","ON"}; +lookup(127043) -> {"So","ON"}; +lookup(127044) -> {"So","ON"}; +lookup(127045) -> {"So","ON"}; +lookup(127046) -> {"So","ON"}; +lookup(127047) -> {"So","ON"}; +lookup(127048) -> {"So","ON"}; +lookup(127049) -> {"So","ON"}; +lookup(127050) -> {"So","ON"}; +lookup(127051) -> {"So","ON"}; +lookup(127052) -> {"So","ON"}; +lookup(127053) -> {"So","ON"}; +lookup(127054) -> {"So","ON"}; +lookup(127055) -> {"So","ON"}; +lookup(127056) -> {"So","ON"}; +lookup(127057) -> {"So","ON"}; +lookup(127058) -> {"So","ON"}; +lookup(127059) -> {"So","ON"}; +lookup(127060) -> {"So","ON"}; +lookup(127061) -> {"So","ON"}; +lookup(127062) -> {"So","ON"}; +lookup(127063) -> {"So","ON"}; +lookup(127064) -> {"So","ON"}; +lookup(127065) -> {"So","ON"}; +lookup(127066) -> {"So","ON"}; +lookup(127067) -> {"So","ON"}; +lookup(127068) -> {"So","ON"}; +lookup(127069) -> {"So","ON"}; +lookup(127070) -> {"So","ON"}; +lookup(127071) -> {"So","ON"}; +lookup(127072) -> {"So","ON"}; +lookup(127073) -> {"So","ON"}; +lookup(127074) -> {"So","ON"}; +lookup(127075) -> {"So","ON"}; +lookup(127076) -> {"So","ON"}; +lookup(127077) -> {"So","ON"}; +lookup(127078) -> {"So","ON"}; +lookup(127079) -> {"So","ON"}; +lookup(127080) -> {"So","ON"}; +lookup(127081) -> {"So","ON"}; +lookup(127082) -> {"So","ON"}; +lookup(127083) -> {"So","ON"}; +lookup(127084) -> {"So","ON"}; +lookup(127085) -> {"So","ON"}; +lookup(127086) -> {"So","ON"}; +lookup(127087) -> {"So","ON"}; +lookup(127088) -> {"So","ON"}; +lookup(127089) -> {"So","ON"}; +lookup(127090) -> {"So","ON"}; +lookup(127091) -> {"So","ON"}; +lookup(127092) -> {"So","ON"}; +lookup(127093) -> {"So","ON"}; +lookup(127094) -> {"So","ON"}; +lookup(127095) -> {"So","ON"}; +lookup(127096) -> {"So","ON"}; +lookup(127097) -> {"So","ON"}; +lookup(127098) -> {"So","ON"}; +lookup(127099) -> {"So","ON"}; +lookup(127100) -> {"So","ON"}; +lookup(127101) -> {"So","ON"}; +lookup(127102) -> {"So","ON"}; +lookup(127103) -> {"So","ON"}; +lookup(127104) -> {"So","ON"}; +lookup(127105) -> {"So","ON"}; +lookup(127106) -> {"So","ON"}; +lookup(127107) -> {"So","ON"}; +lookup(127108) -> {"So","ON"}; +lookup(127109) -> {"So","ON"}; +lookup(127110) -> {"So","ON"}; +lookup(127111) -> {"So","ON"}; +lookup(127112) -> {"So","ON"}; +lookup(127113) -> {"So","ON"}; +lookup(127114) -> {"So","ON"}; +lookup(127115) -> {"So","ON"}; +lookup(127116) -> {"So","ON"}; +lookup(127117) -> {"So","ON"}; +lookup(127118) -> {"So","ON"}; +lookup(127119) -> {"So","ON"}; +lookup(127120) -> {"So","ON"}; +lookup(127121) -> {"So","ON"}; +lookup(127122) -> {"So","ON"}; +lookup(127123) -> {"So","ON"}; +lookup(127136) -> {"So","ON"}; +lookup(127137) -> {"So","ON"}; +lookup(127138) -> {"So","ON"}; +lookup(127139) -> {"So","ON"}; +lookup(127140) -> {"So","ON"}; +lookup(127141) -> {"So","ON"}; +lookup(127142) -> {"So","ON"}; +lookup(127143) -> {"So","ON"}; +lookup(127144) -> {"So","ON"}; +lookup(127145) -> {"So","ON"}; +lookup(127146) -> {"So","ON"}; +lookup(127147) -> {"So","ON"}; +lookup(127148) -> {"So","ON"}; +lookup(127149) -> {"So","ON"}; +lookup(127150) -> {"So","ON"}; +lookup(127153) -> {"So","ON"}; +lookup(127154) -> {"So","ON"}; +lookup(127155) -> {"So","ON"}; +lookup(127156) -> {"So","ON"}; +lookup(127157) -> {"So","ON"}; +lookup(127158) -> {"So","ON"}; +lookup(127159) -> {"So","ON"}; +lookup(127160) -> {"So","ON"}; +lookup(127161) -> {"So","ON"}; +lookup(127162) -> {"So","ON"}; +lookup(127163) -> {"So","ON"}; +lookup(127164) -> {"So","ON"}; +lookup(127165) -> {"So","ON"}; +lookup(127166) -> {"So","ON"}; +lookup(127167) -> {"So","ON"}; +lookup(127169) -> {"So","ON"}; +lookup(127170) -> {"So","ON"}; +lookup(127171) -> {"So","ON"}; +lookup(127172) -> {"So","ON"}; +lookup(127173) -> {"So","ON"}; +lookup(127174) -> {"So","ON"}; +lookup(127175) -> {"So","ON"}; +lookup(127176) -> {"So","ON"}; +lookup(127177) -> {"So","ON"}; +lookup(127178) -> {"So","ON"}; +lookup(127179) -> {"So","ON"}; +lookup(127180) -> {"So","ON"}; +lookup(127181) -> {"So","ON"}; +lookup(127182) -> {"So","ON"}; +lookup(127183) -> {"So","ON"}; +lookup(127185) -> {"So","ON"}; +lookup(127186) -> {"So","ON"}; +lookup(127187) -> {"So","ON"}; +lookup(127188) -> {"So","ON"}; +lookup(127189) -> {"So","ON"}; +lookup(127190) -> {"So","ON"}; +lookup(127191) -> {"So","ON"}; +lookup(127192) -> {"So","ON"}; +lookup(127193) -> {"So","ON"}; +lookup(127194) -> {"So","ON"}; +lookup(127195) -> {"So","ON"}; +lookup(127196) -> {"So","ON"}; +lookup(127197) -> {"So","ON"}; +lookup(127198) -> {"So","ON"}; +lookup(127199) -> {"So","ON"}; +lookup(127200) -> {"So","ON"}; +lookup(127201) -> {"So","ON"}; +lookup(127202) -> {"So","ON"}; +lookup(127203) -> {"So","ON"}; +lookup(127204) -> {"So","ON"}; +lookup(127205) -> {"So","ON"}; +lookup(127206) -> {"So","ON"}; +lookup(127207) -> {"So","ON"}; +lookup(127208) -> {"So","ON"}; +lookup(127209) -> {"So","ON"}; +lookup(127210) -> {"So","ON"}; +lookup(127211) -> {"So","ON"}; +lookup(127212) -> {"So","ON"}; +lookup(127213) -> {"So","ON"}; +lookup(127214) -> {"So","ON"}; +lookup(127215) -> {"So","ON"}; +lookup(127216) -> {"So","ON"}; +lookup(127217) -> {"So","ON"}; +lookup(127218) -> {"So","ON"}; +lookup(127219) -> {"So","ON"}; +lookup(127220) -> {"So","ON"}; +lookup(127221) -> {"So","ON"}; +lookup(127232) -> {"No","EN"}; +lookup(127233) -> {"No","EN"}; +lookup(127234) -> {"No","EN"}; +lookup(127235) -> {"No","EN"}; +lookup(127236) -> {"No","EN"}; +lookup(127237) -> {"No","EN"}; +lookup(127238) -> {"No","EN"}; +lookup(127239) -> {"No","EN"}; +lookup(127240) -> {"No","EN"}; +lookup(127241) -> {"No","EN"}; +lookup(127242) -> {"No","EN"}; +lookup(127243) -> {"No","ON"}; +lookup(127244) -> {"No","ON"}; +lookup(127245) -> {"So","ON"}; +lookup(127246) -> {"So","ON"}; +lookup(127247) -> {"So","ON"}; +lookup(127248) -> {"So","L"}; +lookup(127249) -> {"So","L"}; +lookup(127250) -> {"So","L"}; +lookup(127251) -> {"So","L"}; +lookup(127252) -> {"So","L"}; +lookup(127253) -> {"So","L"}; +lookup(127254) -> {"So","L"}; +lookup(127255) -> {"So","L"}; +lookup(127256) -> {"So","L"}; +lookup(127257) -> {"So","L"}; +lookup(127258) -> {"So","L"}; +lookup(127259) -> {"So","L"}; +lookup(127260) -> {"So","L"}; +lookup(127261) -> {"So","L"}; +lookup(127262) -> {"So","L"}; +lookup(127263) -> {"So","L"}; +lookup(127264) -> {"So","L"}; +lookup(127265) -> {"So","L"}; +lookup(127266) -> {"So","L"}; +lookup(127267) -> {"So","L"}; +lookup(127268) -> {"So","L"}; +lookup(127269) -> {"So","L"}; +lookup(127270) -> {"So","L"}; +lookup(127271) -> {"So","L"}; +lookup(127272) -> {"So","L"}; +lookup(127273) -> {"So","L"}; +lookup(127274) -> {"So","L"}; +lookup(127275) -> {"So","L"}; +lookup(127276) -> {"So","L"}; +lookup(127277) -> {"So","L"}; +lookup(127278) -> {"So","L"}; +lookup(127279) -> {"So","ON"}; +lookup(127280) -> {"So","L"}; +lookup(127281) -> {"So","L"}; +lookup(127282) -> {"So","L"}; +lookup(127283) -> {"So","L"}; +lookup(127284) -> {"So","L"}; +lookup(127285) -> {"So","L"}; +lookup(127286) -> {"So","L"}; +lookup(127287) -> {"So","L"}; +lookup(127288) -> {"So","L"}; +lookup(127289) -> {"So","L"}; +lookup(127290) -> {"So","L"}; +lookup(127291) -> {"So","L"}; +lookup(127292) -> {"So","L"}; +lookup(127293) -> {"So","L"}; +lookup(127294) -> {"So","L"}; +lookup(127295) -> {"So","L"}; +lookup(127296) -> {"So","L"}; +lookup(127297) -> {"So","L"}; +lookup(127298) -> {"So","L"}; +lookup(127299) -> {"So","L"}; +lookup(127300) -> {"So","L"}; +lookup(127301) -> {"So","L"}; +lookup(127302) -> {"So","L"}; +lookup(127303) -> {"So","L"}; +lookup(127304) -> {"So","L"}; +lookup(127305) -> {"So","L"}; +lookup(127306) -> {"So","L"}; +lookup(127307) -> {"So","L"}; +lookup(127308) -> {"So","L"}; +lookup(127309) -> {"So","L"}; +lookup(127310) -> {"So","L"}; +lookup(127311) -> {"So","L"}; +lookup(127312) -> {"So","L"}; +lookup(127313) -> {"So","L"}; +lookup(127314) -> {"So","L"}; +lookup(127315) -> {"So","L"}; +lookup(127316) -> {"So","L"}; +lookup(127317) -> {"So","L"}; +lookup(127318) -> {"So","L"}; +lookup(127319) -> {"So","L"}; +lookup(127320) -> {"So","L"}; +lookup(127321) -> {"So","L"}; +lookup(127322) -> {"So","L"}; +lookup(127323) -> {"So","L"}; +lookup(127324) -> {"So","L"}; +lookup(127325) -> {"So","L"}; +lookup(127326) -> {"So","L"}; +lookup(127327) -> {"So","L"}; +lookup(127328) -> {"So","L"}; +lookup(127329) -> {"So","L"}; +lookup(127330) -> {"So","L"}; +lookup(127331) -> {"So","L"}; +lookup(127332) -> {"So","L"}; +lookup(127333) -> {"So","L"}; +lookup(127334) -> {"So","L"}; +lookup(127335) -> {"So","L"}; +lookup(127336) -> {"So","L"}; +lookup(127337) -> {"So","L"}; +lookup(127338) -> {"So","ON"}; +lookup(127339) -> {"So","ON"}; +lookup(127340) -> {"So","ON"}; +lookup(127341) -> {"So","ON"}; +lookup(127342) -> {"So","ON"}; +lookup(127343) -> {"So","ON"}; +lookup(127344) -> {"So","L"}; +lookup(127345) -> {"So","L"}; +lookup(127346) -> {"So","L"}; +lookup(127347) -> {"So","L"}; +lookup(127348) -> {"So","L"}; +lookup(127349) -> {"So","L"}; +lookup(127350) -> {"So","L"}; +lookup(127351) -> {"So","L"}; +lookup(127352) -> {"So","L"}; +lookup(127353) -> {"So","L"}; +lookup(127354) -> {"So","L"}; +lookup(127355) -> {"So","L"}; +lookup(127356) -> {"So","L"}; +lookup(127357) -> {"So","L"}; +lookup(127358) -> {"So","L"}; +lookup(127359) -> {"So","L"}; +lookup(127360) -> {"So","L"}; +lookup(127361) -> {"So","L"}; +lookup(127362) -> {"So","L"}; +lookup(127363) -> {"So","L"}; +lookup(127364) -> {"So","L"}; +lookup(127365) -> {"So","L"}; +lookup(127366) -> {"So","L"}; +lookup(127367) -> {"So","L"}; +lookup(127368) -> {"So","L"}; +lookup(127369) -> {"So","L"}; +lookup(127370) -> {"So","L"}; +lookup(127371) -> {"So","L"}; +lookup(127372) -> {"So","L"}; +lookup(127373) -> {"So","L"}; +lookup(127374) -> {"So","L"}; +lookup(127375) -> {"So","L"}; +lookup(127376) -> {"So","L"}; +lookup(127377) -> {"So","L"}; +lookup(127378) -> {"So","L"}; +lookup(127379) -> {"So","L"}; +lookup(127380) -> {"So","L"}; +lookup(127381) -> {"So","L"}; +lookup(127382) -> {"So","L"}; +lookup(127383) -> {"So","L"}; +lookup(127384) -> {"So","L"}; +lookup(127385) -> {"So","L"}; +lookup(127386) -> {"So","L"}; +lookup(127387) -> {"So","L"}; +lookup(127388) -> {"So","L"}; +lookup(127389) -> {"So","L"}; +lookup(127390) -> {"So","L"}; +lookup(127391) -> {"So","L"}; +lookup(127392) -> {"So","L"}; +lookup(127393) -> {"So","L"}; +lookup(127394) -> {"So","L"}; +lookup(127395) -> {"So","L"}; +lookup(127396) -> {"So","L"}; +lookup(127397) -> {"So","L"}; +lookup(127398) -> {"So","L"}; +lookup(127399) -> {"So","L"}; +lookup(127400) -> {"So","L"}; +lookup(127401) -> {"So","L"}; +lookup(127402) -> {"So","L"}; +lookup(127403) -> {"So","L"}; +lookup(127404) -> {"So","L"}; +lookup(127405) -> {"So","ON"}; +lookup(127462) -> {"So","L"}; +lookup(127463) -> {"So","L"}; +lookup(127464) -> {"So","L"}; +lookup(127465) -> {"So","L"}; +lookup(127466) -> {"So","L"}; +lookup(127467) -> {"So","L"}; +lookup(127468) -> {"So","L"}; +lookup(127469) -> {"So","L"}; +lookup(127470) -> {"So","L"}; +lookup(127471) -> {"So","L"}; +lookup(127472) -> {"So","L"}; +lookup(127473) -> {"So","L"}; +lookup(127474) -> {"So","L"}; +lookup(127475) -> {"So","L"}; +lookup(127476) -> {"So","L"}; +lookup(127477) -> {"So","L"}; +lookup(127478) -> {"So","L"}; +lookup(127479) -> {"So","L"}; +lookup(127480) -> {"So","L"}; +lookup(127481) -> {"So","L"}; +lookup(127482) -> {"So","L"}; +lookup(127483) -> {"So","L"}; +lookup(127484) -> {"So","L"}; +lookup(127485) -> {"So","L"}; +lookup(127486) -> {"So","L"}; +lookup(127487) -> {"So","L"}; +lookup(127488) -> {"So","L"}; +lookup(127489) -> {"So","L"}; +lookup(127490) -> {"So","L"}; +lookup(127504) -> {"So","L"}; +lookup(127505) -> {"So","L"}; +lookup(127506) -> {"So","L"}; +lookup(127507) -> {"So","L"}; +lookup(127508) -> {"So","L"}; +lookup(127509) -> {"So","L"}; +lookup(127510) -> {"So","L"}; +lookup(127511) -> {"So","L"}; +lookup(127512) -> {"So","L"}; +lookup(127513) -> {"So","L"}; +lookup(127514) -> {"So","L"}; +lookup(127515) -> {"So","L"}; +lookup(127516) -> {"So","L"}; +lookup(127517) -> {"So","L"}; +lookup(127518) -> {"So","L"}; +lookup(127519) -> {"So","L"}; +lookup(127520) -> {"So","L"}; +lookup(127521) -> {"So","L"}; +lookup(127522) -> {"So","L"}; +lookup(127523) -> {"So","L"}; +lookup(127524) -> {"So","L"}; +lookup(127525) -> {"So","L"}; +lookup(127526) -> {"So","L"}; +lookup(127527) -> {"So","L"}; +lookup(127528) -> {"So","L"}; +lookup(127529) -> {"So","L"}; +lookup(127530) -> {"So","L"}; +lookup(127531) -> {"So","L"}; +lookup(127532) -> {"So","L"}; +lookup(127533) -> {"So","L"}; +lookup(127534) -> {"So","L"}; +lookup(127535) -> {"So","L"}; +lookup(127536) -> {"So","L"}; +lookup(127537) -> {"So","L"}; +lookup(127538) -> {"So","L"}; +lookup(127539) -> {"So","L"}; +lookup(127540) -> {"So","L"}; +lookup(127541) -> {"So","L"}; +lookup(127542) -> {"So","L"}; +lookup(127543) -> {"So","L"}; +lookup(127544) -> {"So","L"}; +lookup(127545) -> {"So","L"}; +lookup(127546) -> {"So","L"}; +lookup(127547) -> {"So","L"}; +lookup(127552) -> {"So","L"}; +lookup(127553) -> {"So","L"}; +lookup(127554) -> {"So","L"}; +lookup(127555) -> {"So","L"}; +lookup(127556) -> {"So","L"}; +lookup(127557) -> {"So","L"}; +lookup(127558) -> {"So","L"}; +lookup(127559) -> {"So","L"}; +lookup(127560) -> {"So","L"}; +lookup(127568) -> {"So","L"}; +lookup(127569) -> {"So","L"}; +lookup(127584) -> {"So","ON"}; +lookup(127585) -> {"So","ON"}; +lookup(127586) -> {"So","ON"}; +lookup(127587) -> {"So","ON"}; +lookup(127588) -> {"So","ON"}; +lookup(127589) -> {"So","ON"}; +lookup(127744) -> {"So","ON"}; +lookup(127745) -> {"So","ON"}; +lookup(127746) -> {"So","ON"}; +lookup(127747) -> {"So","ON"}; +lookup(127748) -> {"So","ON"}; +lookup(127749) -> {"So","ON"}; +lookup(127750) -> {"So","ON"}; +lookup(127751) -> {"So","ON"}; +lookup(127752) -> {"So","ON"}; +lookup(127753) -> {"So","ON"}; +lookup(127754) -> {"So","ON"}; +lookup(127755) -> {"So","ON"}; +lookup(127756) -> {"So","ON"}; +lookup(127757) -> {"So","ON"}; +lookup(127758) -> {"So","ON"}; +lookup(127759) -> {"So","ON"}; +lookup(127760) -> {"So","ON"}; +lookup(127761) -> {"So","ON"}; +lookup(127762) -> {"So","ON"}; +lookup(127763) -> {"So","ON"}; +lookup(127764) -> {"So","ON"}; +lookup(127765) -> {"So","ON"}; +lookup(127766) -> {"So","ON"}; +lookup(127767) -> {"So","ON"}; +lookup(127768) -> {"So","ON"}; +lookup(127769) -> {"So","ON"}; +lookup(127770) -> {"So","ON"}; +lookup(127771) -> {"So","ON"}; +lookup(127772) -> {"So","ON"}; +lookup(127773) -> {"So","ON"}; +lookup(127774) -> {"So","ON"}; +lookup(127775) -> {"So","ON"}; +lookup(127776) -> {"So","ON"}; +lookup(127777) -> {"So","ON"}; +lookup(127778) -> {"So","ON"}; +lookup(127779) -> {"So","ON"}; +lookup(127780) -> {"So","ON"}; +lookup(127781) -> {"So","ON"}; +lookup(127782) -> {"So","ON"}; +lookup(127783) -> {"So","ON"}; +lookup(127784) -> {"So","ON"}; +lookup(127785) -> {"So","ON"}; +lookup(127786) -> {"So","ON"}; +lookup(127787) -> {"So","ON"}; +lookup(127788) -> {"So","ON"}; +lookup(127789) -> {"So","ON"}; +lookup(127790) -> {"So","ON"}; +lookup(127791) -> {"So","ON"}; +lookup(127792) -> {"So","ON"}; +lookup(127793) -> {"So","ON"}; +lookup(127794) -> {"So","ON"}; +lookup(127795) -> {"So","ON"}; +lookup(127796) -> {"So","ON"}; +lookup(127797) -> {"So","ON"}; +lookup(127798) -> {"So","ON"}; +lookup(127799) -> {"So","ON"}; +lookup(127800) -> {"So","ON"}; +lookup(127801) -> {"So","ON"}; +lookup(127802) -> {"So","ON"}; +lookup(127803) -> {"So","ON"}; +lookup(127804) -> {"So","ON"}; +lookup(127805) -> {"So","ON"}; +lookup(127806) -> {"So","ON"}; +lookup(127807) -> {"So","ON"}; +lookup(127808) -> {"So","ON"}; +lookup(127809) -> {"So","ON"}; +lookup(127810) -> {"So","ON"}; +lookup(127811) -> {"So","ON"}; +lookup(127812) -> {"So","ON"}; +lookup(127813) -> {"So","ON"}; +lookup(127814) -> {"So","ON"}; +lookup(127815) -> {"So","ON"}; +lookup(127816) -> {"So","ON"}; +lookup(127817) -> {"So","ON"}; +lookup(127818) -> {"So","ON"}; +lookup(127819) -> {"So","ON"}; +lookup(127820) -> {"So","ON"}; +lookup(127821) -> {"So","ON"}; +lookup(127822) -> {"So","ON"}; +lookup(127823) -> {"So","ON"}; +lookup(127824) -> {"So","ON"}; +lookup(127825) -> {"So","ON"}; +lookup(127826) -> {"So","ON"}; +lookup(127827) -> {"So","ON"}; +lookup(127828) -> {"So","ON"}; +lookup(127829) -> {"So","ON"}; +lookup(127830) -> {"So","ON"}; +lookup(127831) -> {"So","ON"}; +lookup(127832) -> {"So","ON"}; +lookup(127833) -> {"So","ON"}; +lookup(127834) -> {"So","ON"}; +lookup(127835) -> {"So","ON"}; +lookup(127836) -> {"So","ON"}; +lookup(127837) -> {"So","ON"}; +lookup(127838) -> {"So","ON"}; +lookup(127839) -> {"So","ON"}; +lookup(127840) -> {"So","ON"}; +lookup(127841) -> {"So","ON"}; +lookup(127842) -> {"So","ON"}; +lookup(127843) -> {"So","ON"}; +lookup(127844) -> {"So","ON"}; +lookup(127845) -> {"So","ON"}; +lookup(127846) -> {"So","ON"}; +lookup(127847) -> {"So","ON"}; +lookup(127848) -> {"So","ON"}; +lookup(127849) -> {"So","ON"}; +lookup(127850) -> {"So","ON"}; +lookup(127851) -> {"So","ON"}; +lookup(127852) -> {"So","ON"}; +lookup(127853) -> {"So","ON"}; +lookup(127854) -> {"So","ON"}; +lookup(127855) -> {"So","ON"}; +lookup(127856) -> {"So","ON"}; +lookup(127857) -> {"So","ON"}; +lookup(127858) -> {"So","ON"}; +lookup(127859) -> {"So","ON"}; +lookup(127860) -> {"So","ON"}; +lookup(127861) -> {"So","ON"}; +lookup(127862) -> {"So","ON"}; +lookup(127863) -> {"So","ON"}; +lookup(127864) -> {"So","ON"}; +lookup(127865) -> {"So","ON"}; +lookup(127866) -> {"So","ON"}; +lookup(127867) -> {"So","ON"}; +lookup(127868) -> {"So","ON"}; +lookup(127869) -> {"So","ON"}; +lookup(127870) -> {"So","ON"}; +lookup(127871) -> {"So","ON"}; +lookup(127872) -> {"So","ON"}; +lookup(127873) -> {"So","ON"}; +lookup(127874) -> {"So","ON"}; +lookup(127875) -> {"So","ON"}; +lookup(127876) -> {"So","ON"}; +lookup(127877) -> {"So","ON"}; +lookup(127878) -> {"So","ON"}; +lookup(127879) -> {"So","ON"}; +lookup(127880) -> {"So","ON"}; +lookup(127881) -> {"So","ON"}; +lookup(127882) -> {"So","ON"}; +lookup(127883) -> {"So","ON"}; +lookup(127884) -> {"So","ON"}; +lookup(127885) -> {"So","ON"}; +lookup(127886) -> {"So","ON"}; +lookup(127887) -> {"So","ON"}; +lookup(127888) -> {"So","ON"}; +lookup(127889) -> {"So","ON"}; +lookup(127890) -> {"So","ON"}; +lookup(127891) -> {"So","ON"}; +lookup(127892) -> {"So","ON"}; +lookup(127893) -> {"So","ON"}; +lookup(127894) -> {"So","ON"}; +lookup(127895) -> {"So","ON"}; +lookup(127896) -> {"So","ON"}; +lookup(127897) -> {"So","ON"}; +lookup(127898) -> {"So","ON"}; +lookup(127899) -> {"So","ON"}; +lookup(127900) -> {"So","ON"}; +lookup(127901) -> {"So","ON"}; +lookup(127902) -> {"So","ON"}; +lookup(127903) -> {"So","ON"}; +lookup(127904) -> {"So","ON"}; +lookup(127905) -> {"So","ON"}; +lookup(127906) -> {"So","ON"}; +lookup(127907) -> {"So","ON"}; +lookup(127908) -> {"So","ON"}; +lookup(127909) -> {"So","ON"}; +lookup(127910) -> {"So","ON"}; +lookup(127911) -> {"So","ON"}; +lookup(127912) -> {"So","ON"}; +lookup(127913) -> {"So","ON"}; +lookup(127914) -> {"So","ON"}; +lookup(127915) -> {"So","ON"}; +lookup(127916) -> {"So","ON"}; +lookup(127917) -> {"So","ON"}; +lookup(127918) -> {"So","ON"}; +lookup(127919) -> {"So","ON"}; +lookup(127920) -> {"So","ON"}; +lookup(127921) -> {"So","ON"}; +lookup(127922) -> {"So","ON"}; +lookup(127923) -> {"So","ON"}; +lookup(127924) -> {"So","ON"}; +lookup(127925) -> {"So","ON"}; +lookup(127926) -> {"So","ON"}; +lookup(127927) -> {"So","ON"}; +lookup(127928) -> {"So","ON"}; +lookup(127929) -> {"So","ON"}; +lookup(127930) -> {"So","ON"}; +lookup(127931) -> {"So","ON"}; +lookup(127932) -> {"So","ON"}; +lookup(127933) -> {"So","ON"}; +lookup(127934) -> {"So","ON"}; +lookup(127935) -> {"So","ON"}; +lookup(127936) -> {"So","ON"}; +lookup(127937) -> {"So","ON"}; +lookup(127938) -> {"So","ON"}; +lookup(127939) -> {"So","ON"}; +lookup(127940) -> {"So","ON"}; +lookup(127941) -> {"So","ON"}; +lookup(127942) -> {"So","ON"}; +lookup(127943) -> {"So","ON"}; +lookup(127944) -> {"So","ON"}; +lookup(127945) -> {"So","ON"}; +lookup(127946) -> {"So","ON"}; +lookup(127947) -> {"So","ON"}; +lookup(127948) -> {"So","ON"}; +lookup(127949) -> {"So","ON"}; +lookup(127950) -> {"So","ON"}; +lookup(127951) -> {"So","ON"}; +lookup(127952) -> {"So","ON"}; +lookup(127953) -> {"So","ON"}; +lookup(127954) -> {"So","ON"}; +lookup(127955) -> {"So","ON"}; +lookup(127956) -> {"So","ON"}; +lookup(127957) -> {"So","ON"}; +lookup(127958) -> {"So","ON"}; +lookup(127959) -> {"So","ON"}; +lookup(127960) -> {"So","ON"}; +lookup(127961) -> {"So","ON"}; +lookup(127962) -> {"So","ON"}; +lookup(127963) -> {"So","ON"}; +lookup(127964) -> {"So","ON"}; +lookup(127965) -> {"So","ON"}; +lookup(127966) -> {"So","ON"}; +lookup(127967) -> {"So","ON"}; +lookup(127968) -> {"So","ON"}; +lookup(127969) -> {"So","ON"}; +lookup(127970) -> {"So","ON"}; +lookup(127971) -> {"So","ON"}; +lookup(127972) -> {"So","ON"}; +lookup(127973) -> {"So","ON"}; +lookup(127974) -> {"So","ON"}; +lookup(127975) -> {"So","ON"}; +lookup(127976) -> {"So","ON"}; +lookup(127977) -> {"So","ON"}; +lookup(127978) -> {"So","ON"}; +lookup(127979) -> {"So","ON"}; +lookup(127980) -> {"So","ON"}; +lookup(127981) -> {"So","ON"}; +lookup(127982) -> {"So","ON"}; +lookup(127983) -> {"So","ON"}; +lookup(127984) -> {"So","ON"}; +lookup(127985) -> {"So","ON"}; +lookup(127986) -> {"So","ON"}; +lookup(127987) -> {"So","ON"}; +lookup(127988) -> {"So","ON"}; +lookup(127989) -> {"So","ON"}; +lookup(127990) -> {"So","ON"}; +lookup(127991) -> {"So","ON"}; +lookup(127992) -> {"So","ON"}; +lookup(127993) -> {"So","ON"}; +lookup(127994) -> {"So","ON"}; +lookup(127995) -> {"Sk","ON"}; +lookup(127996) -> {"Sk","ON"}; +lookup(127997) -> {"Sk","ON"}; +lookup(127998) -> {"Sk","ON"}; +lookup(127999) -> {"Sk","ON"}; +lookup(128000) -> {"So","ON"}; +lookup(128001) -> {"So","ON"}; +lookup(128002) -> {"So","ON"}; +lookup(128003) -> {"So","ON"}; +lookup(128004) -> {"So","ON"}; +lookup(128005) -> {"So","ON"}; +lookup(128006) -> {"So","ON"}; +lookup(128007) -> {"So","ON"}; +lookup(128008) -> {"So","ON"}; +lookup(128009) -> {"So","ON"}; +lookup(128010) -> {"So","ON"}; +lookup(128011) -> {"So","ON"}; +lookup(128012) -> {"So","ON"}; +lookup(128013) -> {"So","ON"}; +lookup(128014) -> {"So","ON"}; +lookup(128015) -> {"So","ON"}; +lookup(128016) -> {"So","ON"}; +lookup(128017) -> {"So","ON"}; +lookup(128018) -> {"So","ON"}; +lookup(128019) -> {"So","ON"}; +lookup(128020) -> {"So","ON"}; +lookup(128021) -> {"So","ON"}; +lookup(128022) -> {"So","ON"}; +lookup(128023) -> {"So","ON"}; +lookup(128024) -> {"So","ON"}; +lookup(128025) -> {"So","ON"}; +lookup(128026) -> {"So","ON"}; +lookup(128027) -> {"So","ON"}; +lookup(128028) -> {"So","ON"}; +lookup(128029) -> {"So","ON"}; +lookup(128030) -> {"So","ON"}; +lookup(128031) -> {"So","ON"}; +lookup(128032) -> {"So","ON"}; +lookup(128033) -> {"So","ON"}; +lookup(128034) -> {"So","ON"}; +lookup(128035) -> {"So","ON"}; +lookup(128036) -> {"So","ON"}; +lookup(128037) -> {"So","ON"}; +lookup(128038) -> {"So","ON"}; +lookup(128039) -> {"So","ON"}; +lookup(128040) -> {"So","ON"}; +lookup(128041) -> {"So","ON"}; +lookup(128042) -> {"So","ON"}; +lookup(128043) -> {"So","ON"}; +lookup(128044) -> {"So","ON"}; +lookup(128045) -> {"So","ON"}; +lookup(128046) -> {"So","ON"}; +lookup(128047) -> {"So","ON"}; +lookup(128048) -> {"So","ON"}; +lookup(128049) -> {"So","ON"}; +lookup(128050) -> {"So","ON"}; +lookup(128051) -> {"So","ON"}; +lookup(128052) -> {"So","ON"}; +lookup(128053) -> {"So","ON"}; +lookup(128054) -> {"So","ON"}; +lookup(128055) -> {"So","ON"}; +lookup(128056) -> {"So","ON"}; +lookup(128057) -> {"So","ON"}; +lookup(128058) -> {"So","ON"}; +lookup(128059) -> {"So","ON"}; +lookup(128060) -> {"So","ON"}; +lookup(128061) -> {"So","ON"}; +lookup(128062) -> {"So","ON"}; +lookup(128063) -> {"So","ON"}; +lookup(128064) -> {"So","ON"}; +lookup(128065) -> {"So","ON"}; +lookup(128066) -> {"So","ON"}; +lookup(128067) -> {"So","ON"}; +lookup(128068) -> {"So","ON"}; +lookup(128069) -> {"So","ON"}; +lookup(128070) -> {"So","ON"}; +lookup(128071) -> {"So","ON"}; +lookup(128072) -> {"So","ON"}; +lookup(128073) -> {"So","ON"}; +lookup(128074) -> {"So","ON"}; +lookup(128075) -> {"So","ON"}; +lookup(128076) -> {"So","ON"}; +lookup(128077) -> {"So","ON"}; +lookup(128078) -> {"So","ON"}; +lookup(128079) -> {"So","ON"}; +lookup(128080) -> {"So","ON"}; +lookup(128081) -> {"So","ON"}; +lookup(128082) -> {"So","ON"}; +lookup(128083) -> {"So","ON"}; +lookup(128084) -> {"So","ON"}; +lookup(128085) -> {"So","ON"}; +lookup(128086) -> {"So","ON"}; +lookup(128087) -> {"So","ON"}; +lookup(128088) -> {"So","ON"}; +lookup(128089) -> {"So","ON"}; +lookup(128090) -> {"So","ON"}; +lookup(128091) -> {"So","ON"}; +lookup(128092) -> {"So","ON"}; +lookup(128093) -> {"So","ON"}; +lookup(128094) -> {"So","ON"}; +lookup(128095) -> {"So","ON"}; +lookup(128096) -> {"So","ON"}; +lookup(128097) -> {"So","ON"}; +lookup(128098) -> {"So","ON"}; +lookup(128099) -> {"So","ON"}; +lookup(128100) -> {"So","ON"}; +lookup(128101) -> {"So","ON"}; +lookup(128102) -> {"So","ON"}; +lookup(128103) -> {"So","ON"}; +lookup(128104) -> {"So","ON"}; +lookup(128105) -> {"So","ON"}; +lookup(128106) -> {"So","ON"}; +lookup(128107) -> {"So","ON"}; +lookup(128108) -> {"So","ON"}; +lookup(128109) -> {"So","ON"}; +lookup(128110) -> {"So","ON"}; +lookup(128111) -> {"So","ON"}; +lookup(128112) -> {"So","ON"}; +lookup(128113) -> {"So","ON"}; +lookup(128114) -> {"So","ON"}; +lookup(128115) -> {"So","ON"}; +lookup(128116) -> {"So","ON"}; +lookup(128117) -> {"So","ON"}; +lookup(128118) -> {"So","ON"}; +lookup(128119) -> {"So","ON"}; +lookup(128120) -> {"So","ON"}; +lookup(128121) -> {"So","ON"}; +lookup(128122) -> {"So","ON"}; +lookup(128123) -> {"So","ON"}; +lookup(128124) -> {"So","ON"}; +lookup(128125) -> {"So","ON"}; +lookup(128126) -> {"So","ON"}; +lookup(128127) -> {"So","ON"}; +lookup(128128) -> {"So","ON"}; +lookup(128129) -> {"So","ON"}; +lookup(128130) -> {"So","ON"}; +lookup(128131) -> {"So","ON"}; +lookup(128132) -> {"So","ON"}; +lookup(128133) -> {"So","ON"}; +lookup(128134) -> {"So","ON"}; +lookup(128135) -> {"So","ON"}; +lookup(128136) -> {"So","ON"}; +lookup(128137) -> {"So","ON"}; +lookup(128138) -> {"So","ON"}; +lookup(128139) -> {"So","ON"}; +lookup(128140) -> {"So","ON"}; +lookup(128141) -> {"So","ON"}; +lookup(128142) -> {"So","ON"}; +lookup(128143) -> {"So","ON"}; +lookup(128144) -> {"So","ON"}; +lookup(128145) -> {"So","ON"}; +lookup(128146) -> {"So","ON"}; +lookup(128147) -> {"So","ON"}; +lookup(128148) -> {"So","ON"}; +lookup(128149) -> {"So","ON"}; +lookup(128150) -> {"So","ON"}; +lookup(128151) -> {"So","ON"}; +lookup(128152) -> {"So","ON"}; +lookup(128153) -> {"So","ON"}; +lookup(128154) -> {"So","ON"}; +lookup(128155) -> {"So","ON"}; +lookup(128156) -> {"So","ON"}; +lookup(128157) -> {"So","ON"}; +lookup(128158) -> {"So","ON"}; +lookup(128159) -> {"So","ON"}; +lookup(128160) -> {"So","ON"}; +lookup(128161) -> {"So","ON"}; +lookup(128162) -> {"So","ON"}; +lookup(128163) -> {"So","ON"}; +lookup(128164) -> {"So","ON"}; +lookup(128165) -> {"So","ON"}; +lookup(128166) -> {"So","ON"}; +lookup(128167) -> {"So","ON"}; +lookup(128168) -> {"So","ON"}; +lookup(128169) -> {"So","ON"}; +lookup(128170) -> {"So","ON"}; +lookup(128171) -> {"So","ON"}; +lookup(128172) -> {"So","ON"}; +lookup(128173) -> {"So","ON"}; +lookup(128174) -> {"So","ON"}; +lookup(128175) -> {"So","ON"}; +lookup(128176) -> {"So","ON"}; +lookup(128177) -> {"So","ON"}; +lookup(128178) -> {"So","ON"}; +lookup(128179) -> {"So","ON"}; +lookup(128180) -> {"So","ON"}; +lookup(128181) -> {"So","ON"}; +lookup(128182) -> {"So","ON"}; +lookup(128183) -> {"So","ON"}; +lookup(128184) -> {"So","ON"}; +lookup(128185) -> {"So","ON"}; +lookup(128186) -> {"So","ON"}; +lookup(128187) -> {"So","ON"}; +lookup(128188) -> {"So","ON"}; +lookup(128189) -> {"So","ON"}; +lookup(128190) -> {"So","ON"}; +lookup(128191) -> {"So","ON"}; +lookup(128192) -> {"So","ON"}; +lookup(128193) -> {"So","ON"}; +lookup(128194) -> {"So","ON"}; +lookup(128195) -> {"So","ON"}; +lookup(128196) -> {"So","ON"}; +lookup(128197) -> {"So","ON"}; +lookup(128198) -> {"So","ON"}; +lookup(128199) -> {"So","ON"}; +lookup(128200) -> {"So","ON"}; +lookup(128201) -> {"So","ON"}; +lookup(128202) -> {"So","ON"}; +lookup(128203) -> {"So","ON"}; +lookup(128204) -> {"So","ON"}; +lookup(128205) -> {"So","ON"}; +lookup(128206) -> {"So","ON"}; +lookup(128207) -> {"So","ON"}; +lookup(128208) -> {"So","ON"}; +lookup(128209) -> {"So","ON"}; +lookup(128210) -> {"So","ON"}; +lookup(128211) -> {"So","ON"}; +lookup(128212) -> {"So","ON"}; +lookup(128213) -> {"So","ON"}; +lookup(128214) -> {"So","ON"}; +lookup(128215) -> {"So","ON"}; +lookup(128216) -> {"So","ON"}; +lookup(128217) -> {"So","ON"}; +lookup(128218) -> {"So","ON"}; +lookup(128219) -> {"So","ON"}; +lookup(128220) -> {"So","ON"}; +lookup(128221) -> {"So","ON"}; +lookup(128222) -> {"So","ON"}; +lookup(128223) -> {"So","ON"}; +lookup(128224) -> {"So","ON"}; +lookup(128225) -> {"So","ON"}; +lookup(128226) -> {"So","ON"}; +lookup(128227) -> {"So","ON"}; +lookup(128228) -> {"So","ON"}; +lookup(128229) -> {"So","ON"}; +lookup(128230) -> {"So","ON"}; +lookup(128231) -> {"So","ON"}; +lookup(128232) -> {"So","ON"}; +lookup(128233) -> {"So","ON"}; +lookup(128234) -> {"So","ON"}; +lookup(128235) -> {"So","ON"}; +lookup(128236) -> {"So","ON"}; +lookup(128237) -> {"So","ON"}; +lookup(128238) -> {"So","ON"}; +lookup(128239) -> {"So","ON"}; +lookup(128240) -> {"So","ON"}; +lookup(128241) -> {"So","ON"}; +lookup(128242) -> {"So","ON"}; +lookup(128243) -> {"So","ON"}; +lookup(128244) -> {"So","ON"}; +lookup(128245) -> {"So","ON"}; +lookup(128246) -> {"So","ON"}; +lookup(128247) -> {"So","ON"}; +lookup(128248) -> {"So","ON"}; +lookup(128249) -> {"So","ON"}; +lookup(128250) -> {"So","ON"}; +lookup(128251) -> {"So","ON"}; +lookup(128252) -> {"So","ON"}; +lookup(128253) -> {"So","ON"}; +lookup(128254) -> {"So","ON"}; +lookup(128255) -> {"So","ON"}; +lookup(128256) -> {"So","ON"}; +lookup(128257) -> {"So","ON"}; +lookup(128258) -> {"So","ON"}; +lookup(128259) -> {"So","ON"}; +lookup(128260) -> {"So","ON"}; +lookup(128261) -> {"So","ON"}; +lookup(128262) -> {"So","ON"}; +lookup(128263) -> {"So","ON"}; +lookup(128264) -> {"So","ON"}; +lookup(128265) -> {"So","ON"}; +lookup(128266) -> {"So","ON"}; +lookup(128267) -> {"So","ON"}; +lookup(128268) -> {"So","ON"}; +lookup(128269) -> {"So","ON"}; +lookup(128270) -> {"So","ON"}; +lookup(128271) -> {"So","ON"}; +lookup(128272) -> {"So","ON"}; +lookup(128273) -> {"So","ON"}; +lookup(128274) -> {"So","ON"}; +lookup(128275) -> {"So","ON"}; +lookup(128276) -> {"So","ON"}; +lookup(128277) -> {"So","ON"}; +lookup(128278) -> {"So","ON"}; +lookup(128279) -> {"So","ON"}; +lookup(128280) -> {"So","ON"}; +lookup(128281) -> {"So","ON"}; +lookup(128282) -> {"So","ON"}; +lookup(128283) -> {"So","ON"}; +lookup(128284) -> {"So","ON"}; +lookup(128285) -> {"So","ON"}; +lookup(128286) -> {"So","ON"}; +lookup(128287) -> {"So","ON"}; +lookup(128288) -> {"So","ON"}; +lookup(128289) -> {"So","ON"}; +lookup(128290) -> {"So","ON"}; +lookup(128291) -> {"So","ON"}; +lookup(128292) -> {"So","ON"}; +lookup(128293) -> {"So","ON"}; +lookup(128294) -> {"So","ON"}; +lookup(128295) -> {"So","ON"}; +lookup(128296) -> {"So","ON"}; +lookup(128297) -> {"So","ON"}; +lookup(128298) -> {"So","ON"}; +lookup(128299) -> {"So","ON"}; +lookup(128300) -> {"So","ON"}; +lookup(128301) -> {"So","ON"}; +lookup(128302) -> {"So","ON"}; +lookup(128303) -> {"So","ON"}; +lookup(128304) -> {"So","ON"}; +lookup(128305) -> {"So","ON"}; +lookup(128306) -> {"So","ON"}; +lookup(128307) -> {"So","ON"}; +lookup(128308) -> {"So","ON"}; +lookup(128309) -> {"So","ON"}; +lookup(128310) -> {"So","ON"}; +lookup(128311) -> {"So","ON"}; +lookup(128312) -> {"So","ON"}; +lookup(128313) -> {"So","ON"}; +lookup(128314) -> {"So","ON"}; +lookup(128315) -> {"So","ON"}; +lookup(128316) -> {"So","ON"}; +lookup(128317) -> {"So","ON"}; +lookup(128318) -> {"So","ON"}; +lookup(128319) -> {"So","ON"}; +lookup(128320) -> {"So","ON"}; +lookup(128321) -> {"So","ON"}; +lookup(128322) -> {"So","ON"}; +lookup(128323) -> {"So","ON"}; +lookup(128324) -> {"So","ON"}; +lookup(128325) -> {"So","ON"}; +lookup(128326) -> {"So","ON"}; +lookup(128327) -> {"So","ON"}; +lookup(128328) -> {"So","ON"}; +lookup(128329) -> {"So","ON"}; +lookup(128330) -> {"So","ON"}; +lookup(128331) -> {"So","ON"}; +lookup(128332) -> {"So","ON"}; +lookup(128333) -> {"So","ON"}; +lookup(128334) -> {"So","ON"}; +lookup(128335) -> {"So","ON"}; +lookup(128336) -> {"So","ON"}; +lookup(128337) -> {"So","ON"}; +lookup(128338) -> {"So","ON"}; +lookup(128339) -> {"So","ON"}; +lookup(128340) -> {"So","ON"}; +lookup(128341) -> {"So","ON"}; +lookup(128342) -> {"So","ON"}; +lookup(128343) -> {"So","ON"}; +lookup(128344) -> {"So","ON"}; +lookup(128345) -> {"So","ON"}; +lookup(128346) -> {"So","ON"}; +lookup(128347) -> {"So","ON"}; +lookup(128348) -> {"So","ON"}; +lookup(128349) -> {"So","ON"}; +lookup(128350) -> {"So","ON"}; +lookup(128351) -> {"So","ON"}; +lookup(128352) -> {"So","ON"}; +lookup(128353) -> {"So","ON"}; +lookup(128354) -> {"So","ON"}; +lookup(128355) -> {"So","ON"}; +lookup(128356) -> {"So","ON"}; +lookup(128357) -> {"So","ON"}; +lookup(128358) -> {"So","ON"}; +lookup(128359) -> {"So","ON"}; +lookup(128360) -> {"So","ON"}; +lookup(128361) -> {"So","ON"}; +lookup(128362) -> {"So","ON"}; +lookup(128363) -> {"So","ON"}; +lookup(128364) -> {"So","ON"}; +lookup(128365) -> {"So","ON"}; +lookup(128366) -> {"So","ON"}; +lookup(128367) -> {"So","ON"}; +lookup(128368) -> {"So","ON"}; +lookup(128369) -> {"So","ON"}; +lookup(128370) -> {"So","ON"}; +lookup(128371) -> {"So","ON"}; +lookup(128372) -> {"So","ON"}; +lookup(128373) -> {"So","ON"}; +lookup(128374) -> {"So","ON"}; +lookup(128375) -> {"So","ON"}; +lookup(128376) -> {"So","ON"}; +lookup(128377) -> {"So","ON"}; +lookup(128378) -> {"So","ON"}; +lookup(128379) -> {"So","ON"}; +lookup(128380) -> {"So","ON"}; +lookup(128381) -> {"So","ON"}; +lookup(128382) -> {"So","ON"}; +lookup(128383) -> {"So","ON"}; +lookup(128384) -> {"So","ON"}; +lookup(128385) -> {"So","ON"}; +lookup(128386) -> {"So","ON"}; +lookup(128387) -> {"So","ON"}; +lookup(128388) -> {"So","ON"}; +lookup(128389) -> {"So","ON"}; +lookup(128390) -> {"So","ON"}; +lookup(128391) -> {"So","ON"}; +lookup(128392) -> {"So","ON"}; +lookup(128393) -> {"So","ON"}; +lookup(128394) -> {"So","ON"}; +lookup(128395) -> {"So","ON"}; +lookup(128396) -> {"So","ON"}; +lookup(128397) -> {"So","ON"}; +lookup(128398) -> {"So","ON"}; +lookup(128399) -> {"So","ON"}; +lookup(128400) -> {"So","ON"}; +lookup(128401) -> {"So","ON"}; +lookup(128402) -> {"So","ON"}; +lookup(128403) -> {"So","ON"}; +lookup(128404) -> {"So","ON"}; +lookup(128405) -> {"So","ON"}; +lookup(128406) -> {"So","ON"}; +lookup(128407) -> {"So","ON"}; +lookup(128408) -> {"So","ON"}; +lookup(128409) -> {"So","ON"}; +lookup(128410) -> {"So","ON"}; +lookup(128411) -> {"So","ON"}; +lookup(128412) -> {"So","ON"}; +lookup(128413) -> {"So","ON"}; +lookup(128414) -> {"So","ON"}; +lookup(128415) -> {"So","ON"}; +lookup(128416) -> {"So","ON"}; +lookup(128417) -> {"So","ON"}; +lookup(128418) -> {"So","ON"}; +lookup(128419) -> {"So","ON"}; +lookup(128420) -> {"So","ON"}; +lookup(128421) -> {"So","ON"}; +lookup(128422) -> {"So","ON"}; +lookup(128423) -> {"So","ON"}; +lookup(128424) -> {"So","ON"}; +lookup(128425) -> {"So","ON"}; +lookup(128426) -> {"So","ON"}; +lookup(128427) -> {"So","ON"}; +lookup(128428) -> {"So","ON"}; +lookup(128429) -> {"So","ON"}; +lookup(128430) -> {"So","ON"}; +lookup(128431) -> {"So","ON"}; +lookup(128432) -> {"So","ON"}; +lookup(128433) -> {"So","ON"}; +lookup(128434) -> {"So","ON"}; +lookup(128435) -> {"So","ON"}; +lookup(128436) -> {"So","ON"}; +lookup(128437) -> {"So","ON"}; +lookup(128438) -> {"So","ON"}; +lookup(128439) -> {"So","ON"}; +lookup(128440) -> {"So","ON"}; +lookup(128441) -> {"So","ON"}; +lookup(128442) -> {"So","ON"}; +lookup(128443) -> {"So","ON"}; +lookup(128444) -> {"So","ON"}; +lookup(128445) -> {"So","ON"}; +lookup(128446) -> {"So","ON"}; +lookup(128447) -> {"So","ON"}; +lookup(128448) -> {"So","ON"}; +lookup(128449) -> {"So","ON"}; +lookup(128450) -> {"So","ON"}; +lookup(128451) -> {"So","ON"}; +lookup(128452) -> {"So","ON"}; +lookup(128453) -> {"So","ON"}; +lookup(128454) -> {"So","ON"}; +lookup(128455) -> {"So","ON"}; +lookup(128456) -> {"So","ON"}; +lookup(128457) -> {"So","ON"}; +lookup(128458) -> {"So","ON"}; +lookup(128459) -> {"So","ON"}; +lookup(128460) -> {"So","ON"}; +lookup(128461) -> {"So","ON"}; +lookup(128462) -> {"So","ON"}; +lookup(128463) -> {"So","ON"}; +lookup(128464) -> {"So","ON"}; +lookup(128465) -> {"So","ON"}; +lookup(128466) -> {"So","ON"}; +lookup(128467) -> {"So","ON"}; +lookup(128468) -> {"So","ON"}; +lookup(128469) -> {"So","ON"}; +lookup(128470) -> {"So","ON"}; +lookup(128471) -> {"So","ON"}; +lookup(128472) -> {"So","ON"}; +lookup(128473) -> {"So","ON"}; +lookup(128474) -> {"So","ON"}; +lookup(128475) -> {"So","ON"}; +lookup(128476) -> {"So","ON"}; +lookup(128477) -> {"So","ON"}; +lookup(128478) -> {"So","ON"}; +lookup(128479) -> {"So","ON"}; +lookup(128480) -> {"So","ON"}; +lookup(128481) -> {"So","ON"}; +lookup(128482) -> {"So","ON"}; +lookup(128483) -> {"So","ON"}; +lookup(128484) -> {"So","ON"}; +lookup(128485) -> {"So","ON"}; +lookup(128486) -> {"So","ON"}; +lookup(128487) -> {"So","ON"}; +lookup(128488) -> {"So","ON"}; +lookup(128489) -> {"So","ON"}; +lookup(128490) -> {"So","ON"}; +lookup(128491) -> {"So","ON"}; +lookup(128492) -> {"So","ON"}; +lookup(128493) -> {"So","ON"}; +lookup(128494) -> {"So","ON"}; +lookup(128495) -> {"So","ON"}; +lookup(128496) -> {"So","ON"}; +lookup(128497) -> {"So","ON"}; +lookup(128498) -> {"So","ON"}; +lookup(128499) -> {"So","ON"}; +lookup(128500) -> {"So","ON"}; +lookup(128501) -> {"So","ON"}; +lookup(128502) -> {"So","ON"}; +lookup(128503) -> {"So","ON"}; +lookup(128504) -> {"So","ON"}; +lookup(128505) -> {"So","ON"}; +lookup(128506) -> {"So","ON"}; +lookup(128507) -> {"So","ON"}; +lookup(128508) -> {"So","ON"}; +lookup(128509) -> {"So","ON"}; +lookup(128510) -> {"So","ON"}; +lookup(128511) -> {"So","ON"}; +lookup(128512) -> {"So","ON"}; +lookup(128513) -> {"So","ON"}; +lookup(128514) -> {"So","ON"}; +lookup(128515) -> {"So","ON"}; +lookup(128516) -> {"So","ON"}; +lookup(128517) -> {"So","ON"}; +lookup(128518) -> {"So","ON"}; +lookup(128519) -> {"So","ON"}; +lookup(128520) -> {"So","ON"}; +lookup(128521) -> {"So","ON"}; +lookup(128522) -> {"So","ON"}; +lookup(128523) -> {"So","ON"}; +lookup(128524) -> {"So","ON"}; +lookup(128525) -> {"So","ON"}; +lookup(128526) -> {"So","ON"}; +lookup(128527) -> {"So","ON"}; +lookup(128528) -> {"So","ON"}; +lookup(128529) -> {"So","ON"}; +lookup(128530) -> {"So","ON"}; +lookup(128531) -> {"So","ON"}; +lookup(128532) -> {"So","ON"}; +lookup(128533) -> {"So","ON"}; +lookup(128534) -> {"So","ON"}; +lookup(128535) -> {"So","ON"}; +lookup(128536) -> {"So","ON"}; +lookup(128537) -> {"So","ON"}; +lookup(128538) -> {"So","ON"}; +lookup(128539) -> {"So","ON"}; +lookup(128540) -> {"So","ON"}; +lookup(128541) -> {"So","ON"}; +lookup(128542) -> {"So","ON"}; +lookup(128543) -> {"So","ON"}; +lookup(128544) -> {"So","ON"}; +lookup(128545) -> {"So","ON"}; +lookup(128546) -> {"So","ON"}; +lookup(128547) -> {"So","ON"}; +lookup(128548) -> {"So","ON"}; +lookup(128549) -> {"So","ON"}; +lookup(128550) -> {"So","ON"}; +lookup(128551) -> {"So","ON"}; +lookup(128552) -> {"So","ON"}; +lookup(128553) -> {"So","ON"}; +lookup(128554) -> {"So","ON"}; +lookup(128555) -> {"So","ON"}; +lookup(128556) -> {"So","ON"}; +lookup(128557) -> {"So","ON"}; +lookup(128558) -> {"So","ON"}; +lookup(128559) -> {"So","ON"}; +lookup(128560) -> {"So","ON"}; +lookup(128561) -> {"So","ON"}; +lookup(128562) -> {"So","ON"}; +lookup(128563) -> {"So","ON"}; +lookup(128564) -> {"So","ON"}; +lookup(128565) -> {"So","ON"}; +lookup(128566) -> {"So","ON"}; +lookup(128567) -> {"So","ON"}; +lookup(128568) -> {"So","ON"}; +lookup(128569) -> {"So","ON"}; +lookup(128570) -> {"So","ON"}; +lookup(128571) -> {"So","ON"}; +lookup(128572) -> {"So","ON"}; +lookup(128573) -> {"So","ON"}; +lookup(128574) -> {"So","ON"}; +lookup(128575) -> {"So","ON"}; +lookup(128576) -> {"So","ON"}; +lookup(128577) -> {"So","ON"}; +lookup(128578) -> {"So","ON"}; +lookup(128579) -> {"So","ON"}; +lookup(128580) -> {"So","ON"}; +lookup(128581) -> {"So","ON"}; +lookup(128582) -> {"So","ON"}; +lookup(128583) -> {"So","ON"}; +lookup(128584) -> {"So","ON"}; +lookup(128585) -> {"So","ON"}; +lookup(128586) -> {"So","ON"}; +lookup(128587) -> {"So","ON"}; +lookup(128588) -> {"So","ON"}; +lookup(128589) -> {"So","ON"}; +lookup(128590) -> {"So","ON"}; +lookup(128591) -> {"So","ON"}; +lookup(128592) -> {"So","ON"}; +lookup(128593) -> {"So","ON"}; +lookup(128594) -> {"So","ON"}; +lookup(128595) -> {"So","ON"}; +lookup(128596) -> {"So","ON"}; +lookup(128597) -> {"So","ON"}; +lookup(128598) -> {"So","ON"}; +lookup(128599) -> {"So","ON"}; +lookup(128600) -> {"So","ON"}; +lookup(128601) -> {"So","ON"}; +lookup(128602) -> {"So","ON"}; +lookup(128603) -> {"So","ON"}; +lookup(128604) -> {"So","ON"}; +lookup(128605) -> {"So","ON"}; +lookup(128606) -> {"So","ON"}; +lookup(128607) -> {"So","ON"}; +lookup(128608) -> {"So","ON"}; +lookup(128609) -> {"So","ON"}; +lookup(128610) -> {"So","ON"}; +lookup(128611) -> {"So","ON"}; +lookup(128612) -> {"So","ON"}; +lookup(128613) -> {"So","ON"}; +lookup(128614) -> {"So","ON"}; +lookup(128615) -> {"So","ON"}; +lookup(128616) -> {"So","ON"}; +lookup(128617) -> {"So","ON"}; +lookup(128618) -> {"So","ON"}; +lookup(128619) -> {"So","ON"}; +lookup(128620) -> {"So","ON"}; +lookup(128621) -> {"So","ON"}; +lookup(128622) -> {"So","ON"}; +lookup(128623) -> {"So","ON"}; +lookup(128624) -> {"So","ON"}; +lookup(128625) -> {"So","ON"}; +lookup(128626) -> {"So","ON"}; +lookup(128627) -> {"So","ON"}; +lookup(128628) -> {"So","ON"}; +lookup(128629) -> {"So","ON"}; +lookup(128630) -> {"So","ON"}; +lookup(128631) -> {"So","ON"}; +lookup(128632) -> {"So","ON"}; +lookup(128633) -> {"So","ON"}; +lookup(128634) -> {"So","ON"}; +lookup(128635) -> {"So","ON"}; +lookup(128636) -> {"So","ON"}; +lookup(128637) -> {"So","ON"}; +lookup(128638) -> {"So","ON"}; +lookup(128639) -> {"So","ON"}; +lookup(128640) -> {"So","ON"}; +lookup(128641) -> {"So","ON"}; +lookup(128642) -> {"So","ON"}; +lookup(128643) -> {"So","ON"}; +lookup(128644) -> {"So","ON"}; +lookup(128645) -> {"So","ON"}; +lookup(128646) -> {"So","ON"}; +lookup(128647) -> {"So","ON"}; +lookup(128648) -> {"So","ON"}; +lookup(128649) -> {"So","ON"}; +lookup(128650) -> {"So","ON"}; +lookup(128651) -> {"So","ON"}; +lookup(128652) -> {"So","ON"}; +lookup(128653) -> {"So","ON"}; +lookup(128654) -> {"So","ON"}; +lookup(128655) -> {"So","ON"}; +lookup(128656) -> {"So","ON"}; +lookup(128657) -> {"So","ON"}; +lookup(128658) -> {"So","ON"}; +lookup(128659) -> {"So","ON"}; +lookup(128660) -> {"So","ON"}; +lookup(128661) -> {"So","ON"}; +lookup(128662) -> {"So","ON"}; +lookup(128663) -> {"So","ON"}; +lookup(128664) -> {"So","ON"}; +lookup(128665) -> {"So","ON"}; +lookup(128666) -> {"So","ON"}; +lookup(128667) -> {"So","ON"}; +lookup(128668) -> {"So","ON"}; +lookup(128669) -> {"So","ON"}; +lookup(128670) -> {"So","ON"}; +lookup(128671) -> {"So","ON"}; +lookup(128672) -> {"So","ON"}; +lookup(128673) -> {"So","ON"}; +lookup(128674) -> {"So","ON"}; +lookup(128675) -> {"So","ON"}; +lookup(128676) -> {"So","ON"}; +lookup(128677) -> {"So","ON"}; +lookup(128678) -> {"So","ON"}; +lookup(128679) -> {"So","ON"}; +lookup(128680) -> {"So","ON"}; +lookup(128681) -> {"So","ON"}; +lookup(128682) -> {"So","ON"}; +lookup(128683) -> {"So","ON"}; +lookup(128684) -> {"So","ON"}; +lookup(128685) -> {"So","ON"}; +lookup(128686) -> {"So","ON"}; +lookup(128687) -> {"So","ON"}; +lookup(128688) -> {"So","ON"}; +lookup(128689) -> {"So","ON"}; +lookup(128690) -> {"So","ON"}; +lookup(128691) -> {"So","ON"}; +lookup(128692) -> {"So","ON"}; +lookup(128693) -> {"So","ON"}; +lookup(128694) -> {"So","ON"}; +lookup(128695) -> {"So","ON"}; +lookup(128696) -> {"So","ON"}; +lookup(128697) -> {"So","ON"}; +lookup(128698) -> {"So","ON"}; +lookup(128699) -> {"So","ON"}; +lookup(128700) -> {"So","ON"}; +lookup(128701) -> {"So","ON"}; +lookup(128702) -> {"So","ON"}; +lookup(128703) -> {"So","ON"}; +lookup(128704) -> {"So","ON"}; +lookup(128705) -> {"So","ON"}; +lookup(128706) -> {"So","ON"}; +lookup(128707) -> {"So","ON"}; +lookup(128708) -> {"So","ON"}; +lookup(128709) -> {"So","ON"}; +lookup(128710) -> {"So","ON"}; +lookup(128711) -> {"So","ON"}; +lookup(128712) -> {"So","ON"}; +lookup(128713) -> {"So","ON"}; +lookup(128714) -> {"So","ON"}; +lookup(128715) -> {"So","ON"}; +lookup(128716) -> {"So","ON"}; +lookup(128717) -> {"So","ON"}; +lookup(128718) -> {"So","ON"}; +lookup(128719) -> {"So","ON"}; +lookup(128720) -> {"So","ON"}; +lookup(128721) -> {"So","ON"}; +lookup(128722) -> {"So","ON"}; +lookup(128723) -> {"So","ON"}; +lookup(128724) -> {"So","ON"}; +lookup(128725) -> {"So","ON"}; +lookup(128726) -> {"So","ON"}; +lookup(128727) -> {"So","ON"}; +lookup(128736) -> {"So","ON"}; +lookup(128737) -> {"So","ON"}; +lookup(128738) -> {"So","ON"}; +lookup(128739) -> {"So","ON"}; +lookup(128740) -> {"So","ON"}; +lookup(128741) -> {"So","ON"}; +lookup(128742) -> {"So","ON"}; +lookup(128743) -> {"So","ON"}; +lookup(128744) -> {"So","ON"}; +lookup(128745) -> {"So","ON"}; +lookup(128746) -> {"So","ON"}; +lookup(128747) -> {"So","ON"}; +lookup(128748) -> {"So","ON"}; +lookup(128752) -> {"So","ON"}; +lookup(128753) -> {"So","ON"}; +lookup(128754) -> {"So","ON"}; +lookup(128755) -> {"So","ON"}; +lookup(128756) -> {"So","ON"}; +lookup(128757) -> {"So","ON"}; +lookup(128758) -> {"So","ON"}; +lookup(128759) -> {"So","ON"}; +lookup(128760) -> {"So","ON"}; +lookup(128761) -> {"So","ON"}; +lookup(128762) -> {"So","ON"}; +lookup(128763) -> {"So","ON"}; +lookup(128764) -> {"So","ON"}; +lookup(128768) -> {"So","ON"}; +lookup(128769) -> {"So","ON"}; +lookup(128770) -> {"So","ON"}; +lookup(128771) -> {"So","ON"}; +lookup(128772) -> {"So","ON"}; +lookup(128773) -> {"So","ON"}; +lookup(128774) -> {"So","ON"}; +lookup(128775) -> {"So","ON"}; +lookup(128776) -> {"So","ON"}; +lookup(128777) -> {"So","ON"}; +lookup(128778) -> {"So","ON"}; +lookup(128779) -> {"So","ON"}; +lookup(128780) -> {"So","ON"}; +lookup(128781) -> {"So","ON"}; +lookup(128782) -> {"So","ON"}; +lookup(128783) -> {"So","ON"}; +lookup(128784) -> {"So","ON"}; +lookup(128785) -> {"So","ON"}; +lookup(128786) -> {"So","ON"}; +lookup(128787) -> {"So","ON"}; +lookup(128788) -> {"So","ON"}; +lookup(128789) -> {"So","ON"}; +lookup(128790) -> {"So","ON"}; +lookup(128791) -> {"So","ON"}; +lookup(128792) -> {"So","ON"}; +lookup(128793) -> {"So","ON"}; +lookup(128794) -> {"So","ON"}; +lookup(128795) -> {"So","ON"}; +lookup(128796) -> {"So","ON"}; +lookup(128797) -> {"So","ON"}; +lookup(128798) -> {"So","ON"}; +lookup(128799) -> {"So","ON"}; +lookup(128800) -> {"So","ON"}; +lookup(128801) -> {"So","ON"}; +lookup(128802) -> {"So","ON"}; +lookup(128803) -> {"So","ON"}; +lookup(128804) -> {"So","ON"}; +lookup(128805) -> {"So","ON"}; +lookup(128806) -> {"So","ON"}; +lookup(128807) -> {"So","ON"}; +lookup(128808) -> {"So","ON"}; +lookup(128809) -> {"So","ON"}; +lookup(128810) -> {"So","ON"}; +lookup(128811) -> {"So","ON"}; +lookup(128812) -> {"So","ON"}; +lookup(128813) -> {"So","ON"}; +lookup(128814) -> {"So","ON"}; +lookup(128815) -> {"So","ON"}; +lookup(128816) -> {"So","ON"}; +lookup(128817) -> {"So","ON"}; +lookup(128818) -> {"So","ON"}; +lookup(128819) -> {"So","ON"}; +lookup(128820) -> {"So","ON"}; +lookup(128821) -> {"So","ON"}; +lookup(128822) -> {"So","ON"}; +lookup(128823) -> {"So","ON"}; +lookup(128824) -> {"So","ON"}; +lookup(128825) -> {"So","ON"}; +lookup(128826) -> {"So","ON"}; +lookup(128827) -> {"So","ON"}; +lookup(128828) -> {"So","ON"}; +lookup(128829) -> {"So","ON"}; +lookup(128830) -> {"So","ON"}; +lookup(128831) -> {"So","ON"}; +lookup(128832) -> {"So","ON"}; +lookup(128833) -> {"So","ON"}; +lookup(128834) -> {"So","ON"}; +lookup(128835) -> {"So","ON"}; +lookup(128836) -> {"So","ON"}; +lookup(128837) -> {"So","ON"}; +lookup(128838) -> {"So","ON"}; +lookup(128839) -> {"So","ON"}; +lookup(128840) -> {"So","ON"}; +lookup(128841) -> {"So","ON"}; +lookup(128842) -> {"So","ON"}; +lookup(128843) -> {"So","ON"}; +lookup(128844) -> {"So","ON"}; +lookup(128845) -> {"So","ON"}; +lookup(128846) -> {"So","ON"}; +lookup(128847) -> {"So","ON"}; +lookup(128848) -> {"So","ON"}; +lookup(128849) -> {"So","ON"}; +lookup(128850) -> {"So","ON"}; +lookup(128851) -> {"So","ON"}; +lookup(128852) -> {"So","ON"}; +lookup(128853) -> {"So","ON"}; +lookup(128854) -> {"So","ON"}; +lookup(128855) -> {"So","ON"}; +lookup(128856) -> {"So","ON"}; +lookup(128857) -> {"So","ON"}; +lookup(128858) -> {"So","ON"}; +lookup(128859) -> {"So","ON"}; +lookup(128860) -> {"So","ON"}; +lookup(128861) -> {"So","ON"}; +lookup(128862) -> {"So","ON"}; +lookup(128863) -> {"So","ON"}; +lookup(128864) -> {"So","ON"}; +lookup(128865) -> {"So","ON"}; +lookup(128866) -> {"So","ON"}; +lookup(128867) -> {"So","ON"}; +lookup(128868) -> {"So","ON"}; +lookup(128869) -> {"So","ON"}; +lookup(128870) -> {"So","ON"}; +lookup(128871) -> {"So","ON"}; +lookup(128872) -> {"So","ON"}; +lookup(128873) -> {"So","ON"}; +lookup(128874) -> {"So","ON"}; +lookup(128875) -> {"So","ON"}; +lookup(128876) -> {"So","ON"}; +lookup(128877) -> {"So","ON"}; +lookup(128878) -> {"So","ON"}; +lookup(128879) -> {"So","ON"}; +lookup(128880) -> {"So","ON"}; +lookup(128881) -> {"So","ON"}; +lookup(128882) -> {"So","ON"}; +lookup(128883) -> {"So","ON"}; +lookup(128896) -> {"So","ON"}; +lookup(128897) -> {"So","ON"}; +lookup(128898) -> {"So","ON"}; +lookup(128899) -> {"So","ON"}; +lookup(128900) -> {"So","ON"}; +lookup(128901) -> {"So","ON"}; +lookup(128902) -> {"So","ON"}; +lookup(128903) -> {"So","ON"}; +lookup(128904) -> {"So","ON"}; +lookup(128905) -> {"So","ON"}; +lookup(128906) -> {"So","ON"}; +lookup(128907) -> {"So","ON"}; +lookup(128908) -> {"So","ON"}; +lookup(128909) -> {"So","ON"}; +lookup(128910) -> {"So","ON"}; +lookup(128911) -> {"So","ON"}; +lookup(128912) -> {"So","ON"}; +lookup(128913) -> {"So","ON"}; +lookup(128914) -> {"So","ON"}; +lookup(128915) -> {"So","ON"}; +lookup(128916) -> {"So","ON"}; +lookup(128917) -> {"So","ON"}; +lookup(128918) -> {"So","ON"}; +lookup(128919) -> {"So","ON"}; +lookup(128920) -> {"So","ON"}; +lookup(128921) -> {"So","ON"}; +lookup(128922) -> {"So","ON"}; +lookup(128923) -> {"So","ON"}; +lookup(128924) -> {"So","ON"}; +lookup(128925) -> {"So","ON"}; +lookup(128926) -> {"So","ON"}; +lookup(128927) -> {"So","ON"}; +lookup(128928) -> {"So","ON"}; +lookup(128929) -> {"So","ON"}; +lookup(128930) -> {"So","ON"}; +lookup(128931) -> {"So","ON"}; +lookup(128932) -> {"So","ON"}; +lookup(128933) -> {"So","ON"}; +lookup(128934) -> {"So","ON"}; +lookup(128935) -> {"So","ON"}; +lookup(128936) -> {"So","ON"}; +lookup(128937) -> {"So","ON"}; +lookup(128938) -> {"So","ON"}; +lookup(128939) -> {"So","ON"}; +lookup(128940) -> {"So","ON"}; +lookup(128941) -> {"So","ON"}; +lookup(128942) -> {"So","ON"}; +lookup(128943) -> {"So","ON"}; +lookup(128944) -> {"So","ON"}; +lookup(128945) -> {"So","ON"}; +lookup(128946) -> {"So","ON"}; +lookup(128947) -> {"So","ON"}; +lookup(128948) -> {"So","ON"}; +lookup(128949) -> {"So","ON"}; +lookup(128950) -> {"So","ON"}; +lookup(128951) -> {"So","ON"}; +lookup(128952) -> {"So","ON"}; +lookup(128953) -> {"So","ON"}; +lookup(128954) -> {"So","ON"}; +lookup(128955) -> {"So","ON"}; +lookup(128956) -> {"So","ON"}; +lookup(128957) -> {"So","ON"}; +lookup(128958) -> {"So","ON"}; +lookup(128959) -> {"So","ON"}; +lookup(128960) -> {"So","ON"}; +lookup(128961) -> {"So","ON"}; +lookup(128962) -> {"So","ON"}; +lookup(128963) -> {"So","ON"}; +lookup(128964) -> {"So","ON"}; +lookup(128965) -> {"So","ON"}; +lookup(128966) -> {"So","ON"}; +lookup(128967) -> {"So","ON"}; +lookup(128968) -> {"So","ON"}; +lookup(128969) -> {"So","ON"}; +lookup(128970) -> {"So","ON"}; +lookup(128971) -> {"So","ON"}; +lookup(128972) -> {"So","ON"}; +lookup(128973) -> {"So","ON"}; +lookup(128974) -> {"So","ON"}; +lookup(128975) -> {"So","ON"}; +lookup(128976) -> {"So","ON"}; +lookup(128977) -> {"So","ON"}; +lookup(128978) -> {"So","ON"}; +lookup(128979) -> {"So","ON"}; +lookup(128980) -> {"So","ON"}; +lookup(128981) -> {"So","ON"}; +lookup(128982) -> {"So","ON"}; +lookup(128983) -> {"So","ON"}; +lookup(128984) -> {"So","ON"}; +lookup(128992) -> {"So","ON"}; +lookup(128993) -> {"So","ON"}; +lookup(128994) -> {"So","ON"}; +lookup(128995) -> {"So","ON"}; +lookup(128996) -> {"So","ON"}; +lookup(128997) -> {"So","ON"}; +lookup(128998) -> {"So","ON"}; +lookup(128999) -> {"So","ON"}; +lookup(129000) -> {"So","ON"}; +lookup(129001) -> {"So","ON"}; +lookup(129002) -> {"So","ON"}; +lookup(129003) -> {"So","ON"}; +lookup(129024) -> {"So","ON"}; +lookup(129025) -> {"So","ON"}; +lookup(129026) -> {"So","ON"}; +lookup(129027) -> {"So","ON"}; +lookup(129028) -> {"So","ON"}; +lookup(129029) -> {"So","ON"}; +lookup(129030) -> {"So","ON"}; +lookup(129031) -> {"So","ON"}; +lookup(129032) -> {"So","ON"}; +lookup(129033) -> {"So","ON"}; +lookup(129034) -> {"So","ON"}; +lookup(129035) -> {"So","ON"}; +lookup(129040) -> {"So","ON"}; +lookup(129041) -> {"So","ON"}; +lookup(129042) -> {"So","ON"}; +lookup(129043) -> {"So","ON"}; +lookup(129044) -> {"So","ON"}; +lookup(129045) -> {"So","ON"}; +lookup(129046) -> {"So","ON"}; +lookup(129047) -> {"So","ON"}; +lookup(129048) -> {"So","ON"}; +lookup(129049) -> {"So","ON"}; +lookup(129050) -> {"So","ON"}; +lookup(129051) -> {"So","ON"}; +lookup(129052) -> {"So","ON"}; +lookup(129053) -> {"So","ON"}; +lookup(129054) -> {"So","ON"}; +lookup(129055) -> {"So","ON"}; +lookup(129056) -> {"So","ON"}; +lookup(129057) -> {"So","ON"}; +lookup(129058) -> {"So","ON"}; +lookup(129059) -> {"So","ON"}; +lookup(129060) -> {"So","ON"}; +lookup(129061) -> {"So","ON"}; +lookup(129062) -> {"So","ON"}; +lookup(129063) -> {"So","ON"}; +lookup(129064) -> {"So","ON"}; +lookup(129065) -> {"So","ON"}; +lookup(129066) -> {"So","ON"}; +lookup(129067) -> {"So","ON"}; +lookup(129068) -> {"So","ON"}; +lookup(129069) -> {"So","ON"}; +lookup(129070) -> {"So","ON"}; +lookup(129071) -> {"So","ON"}; +lookup(129072) -> {"So","ON"}; +lookup(129073) -> {"So","ON"}; +lookup(129074) -> {"So","ON"}; +lookup(129075) -> {"So","ON"}; +lookup(129076) -> {"So","ON"}; +lookup(129077) -> {"So","ON"}; +lookup(129078) -> {"So","ON"}; +lookup(129079) -> {"So","ON"}; +lookup(129080) -> {"So","ON"}; +lookup(129081) -> {"So","ON"}; +lookup(129082) -> {"So","ON"}; +lookup(129083) -> {"So","ON"}; +lookup(129084) -> {"So","ON"}; +lookup(129085) -> {"So","ON"}; +lookup(129086) -> {"So","ON"}; +lookup(129087) -> {"So","ON"}; +lookup(129088) -> {"So","ON"}; +lookup(129089) -> {"So","ON"}; +lookup(129090) -> {"So","ON"}; +lookup(129091) -> {"So","ON"}; +lookup(129092) -> {"So","ON"}; +lookup(129093) -> {"So","ON"}; +lookup(129094) -> {"So","ON"}; +lookup(129095) -> {"So","ON"}; +lookup(129104) -> {"So","ON"}; +lookup(129105) -> {"So","ON"}; +lookup(129106) -> {"So","ON"}; +lookup(129107) -> {"So","ON"}; +lookup(129108) -> {"So","ON"}; +lookup(129109) -> {"So","ON"}; +lookup(129110) -> {"So","ON"}; +lookup(129111) -> {"So","ON"}; +lookup(129112) -> {"So","ON"}; +lookup(129113) -> {"So","ON"}; +lookup(129120) -> {"So","ON"}; +lookup(129121) -> {"So","ON"}; +lookup(129122) -> {"So","ON"}; +lookup(129123) -> {"So","ON"}; +lookup(129124) -> {"So","ON"}; +lookup(129125) -> {"So","ON"}; +lookup(129126) -> {"So","ON"}; +lookup(129127) -> {"So","ON"}; +lookup(129128) -> {"So","ON"}; +lookup(129129) -> {"So","ON"}; +lookup(129130) -> {"So","ON"}; +lookup(129131) -> {"So","ON"}; +lookup(129132) -> {"So","ON"}; +lookup(129133) -> {"So","ON"}; +lookup(129134) -> {"So","ON"}; +lookup(129135) -> {"So","ON"}; +lookup(129136) -> {"So","ON"}; +lookup(129137) -> {"So","ON"}; +lookup(129138) -> {"So","ON"}; +lookup(129139) -> {"So","ON"}; +lookup(129140) -> {"So","ON"}; +lookup(129141) -> {"So","ON"}; +lookup(129142) -> {"So","ON"}; +lookup(129143) -> {"So","ON"}; +lookup(129144) -> {"So","ON"}; +lookup(129145) -> {"So","ON"}; +lookup(129146) -> {"So","ON"}; +lookup(129147) -> {"So","ON"}; +lookup(129148) -> {"So","ON"}; +lookup(129149) -> {"So","ON"}; +lookup(129150) -> {"So","ON"}; +lookup(129151) -> {"So","ON"}; +lookup(129152) -> {"So","ON"}; +lookup(129153) -> {"So","ON"}; +lookup(129154) -> {"So","ON"}; +lookup(129155) -> {"So","ON"}; +lookup(129156) -> {"So","ON"}; +lookup(129157) -> {"So","ON"}; +lookup(129158) -> {"So","ON"}; +lookup(129159) -> {"So","ON"}; +lookup(129168) -> {"So","ON"}; +lookup(129169) -> {"So","ON"}; +lookup(129170) -> {"So","ON"}; +lookup(129171) -> {"So","ON"}; +lookup(129172) -> {"So","ON"}; +lookup(129173) -> {"So","ON"}; +lookup(129174) -> {"So","ON"}; +lookup(129175) -> {"So","ON"}; +lookup(129176) -> {"So","ON"}; +lookup(129177) -> {"So","ON"}; +lookup(129178) -> {"So","ON"}; +lookup(129179) -> {"So","ON"}; +lookup(129180) -> {"So","ON"}; +lookup(129181) -> {"So","ON"}; +lookup(129182) -> {"So","ON"}; +lookup(129183) -> {"So","ON"}; +lookup(129184) -> {"So","ON"}; +lookup(129185) -> {"So","ON"}; +lookup(129186) -> {"So","ON"}; +lookup(129187) -> {"So","ON"}; +lookup(129188) -> {"So","ON"}; +lookup(129189) -> {"So","ON"}; +lookup(129190) -> {"So","ON"}; +lookup(129191) -> {"So","ON"}; +lookup(129192) -> {"So","ON"}; +lookup(129193) -> {"So","ON"}; +lookup(129194) -> {"So","ON"}; +lookup(129195) -> {"So","ON"}; +lookup(129196) -> {"So","ON"}; +lookup(129197) -> {"So","ON"}; +lookup(129200) -> {"So","ON"}; +lookup(129201) -> {"So","ON"}; +lookup(129280) -> {"So","ON"}; +lookup(129281) -> {"So","ON"}; +lookup(129282) -> {"So","ON"}; +lookup(129283) -> {"So","ON"}; +lookup(129284) -> {"So","ON"}; +lookup(129285) -> {"So","ON"}; +lookup(129286) -> {"So","ON"}; +lookup(129287) -> {"So","ON"}; +lookup(129288) -> {"So","ON"}; +lookup(129289) -> {"So","ON"}; +lookup(129290) -> {"So","ON"}; +lookup(129291) -> {"So","ON"}; +lookup(129292) -> {"So","ON"}; +lookup(129293) -> {"So","ON"}; +lookup(129294) -> {"So","ON"}; +lookup(129295) -> {"So","ON"}; +lookup(129296) -> {"So","ON"}; +lookup(129297) -> {"So","ON"}; +lookup(129298) -> {"So","ON"}; +lookup(129299) -> {"So","ON"}; +lookup(129300) -> {"So","ON"}; +lookup(129301) -> {"So","ON"}; +lookup(129302) -> {"So","ON"}; +lookup(129303) -> {"So","ON"}; +lookup(129304) -> {"So","ON"}; +lookup(129305) -> {"So","ON"}; +lookup(129306) -> {"So","ON"}; +lookup(129307) -> {"So","ON"}; +lookup(129308) -> {"So","ON"}; +lookup(129309) -> {"So","ON"}; +lookup(129310) -> {"So","ON"}; +lookup(129311) -> {"So","ON"}; +lookup(129312) -> {"So","ON"}; +lookup(129313) -> {"So","ON"}; +lookup(129314) -> {"So","ON"}; +lookup(129315) -> {"So","ON"}; +lookup(129316) -> {"So","ON"}; +lookup(129317) -> {"So","ON"}; +lookup(129318) -> {"So","ON"}; +lookup(129319) -> {"So","ON"}; +lookup(129320) -> {"So","ON"}; +lookup(129321) -> {"So","ON"}; +lookup(129322) -> {"So","ON"}; +lookup(129323) -> {"So","ON"}; +lookup(129324) -> {"So","ON"}; +lookup(129325) -> {"So","ON"}; +lookup(129326) -> {"So","ON"}; +lookup(129327) -> {"So","ON"}; +lookup(129328) -> {"So","ON"}; +lookup(129329) -> {"So","ON"}; +lookup(129330) -> {"So","ON"}; +lookup(129331) -> {"So","ON"}; +lookup(129332) -> {"So","ON"}; +lookup(129333) -> {"So","ON"}; +lookup(129334) -> {"So","ON"}; +lookup(129335) -> {"So","ON"}; +lookup(129336) -> {"So","ON"}; +lookup(129337) -> {"So","ON"}; +lookup(129338) -> {"So","ON"}; +lookup(129339) -> {"So","ON"}; +lookup(129340) -> {"So","ON"}; +lookup(129341) -> {"So","ON"}; +lookup(129342) -> {"So","ON"}; +lookup(129343) -> {"So","ON"}; +lookup(129344) -> {"So","ON"}; +lookup(129345) -> {"So","ON"}; +lookup(129346) -> {"So","ON"}; +lookup(129347) -> {"So","ON"}; +lookup(129348) -> {"So","ON"}; +lookup(129349) -> {"So","ON"}; +lookup(129350) -> {"So","ON"}; +lookup(129351) -> {"So","ON"}; +lookup(129352) -> {"So","ON"}; +lookup(129353) -> {"So","ON"}; +lookup(129354) -> {"So","ON"}; +lookup(129355) -> {"So","ON"}; +lookup(129356) -> {"So","ON"}; +lookup(129357) -> {"So","ON"}; +lookup(129358) -> {"So","ON"}; +lookup(129359) -> {"So","ON"}; +lookup(129360) -> {"So","ON"}; +lookup(129361) -> {"So","ON"}; +lookup(129362) -> {"So","ON"}; +lookup(129363) -> {"So","ON"}; +lookup(129364) -> {"So","ON"}; +lookup(129365) -> {"So","ON"}; +lookup(129366) -> {"So","ON"}; +lookup(129367) -> {"So","ON"}; +lookup(129368) -> {"So","ON"}; +lookup(129369) -> {"So","ON"}; +lookup(129370) -> {"So","ON"}; +lookup(129371) -> {"So","ON"}; +lookup(129372) -> {"So","ON"}; +lookup(129373) -> {"So","ON"}; +lookup(129374) -> {"So","ON"}; +lookup(129375) -> {"So","ON"}; +lookup(129376) -> {"So","ON"}; +lookup(129377) -> {"So","ON"}; +lookup(129378) -> {"So","ON"}; +lookup(129379) -> {"So","ON"}; +lookup(129380) -> {"So","ON"}; +lookup(129381) -> {"So","ON"}; +lookup(129382) -> {"So","ON"}; +lookup(129383) -> {"So","ON"}; +lookup(129384) -> {"So","ON"}; +lookup(129385) -> {"So","ON"}; +lookup(129386) -> {"So","ON"}; +lookup(129387) -> {"So","ON"}; +lookup(129388) -> {"So","ON"}; +lookup(129389) -> {"So","ON"}; +lookup(129390) -> {"So","ON"}; +lookup(129391) -> {"So","ON"}; +lookup(129392) -> {"So","ON"}; +lookup(129393) -> {"So","ON"}; +lookup(129394) -> {"So","ON"}; +lookup(129395) -> {"So","ON"}; +lookup(129396) -> {"So","ON"}; +lookup(129397) -> {"So","ON"}; +lookup(129398) -> {"So","ON"}; +lookup(129399) -> {"So","ON"}; +lookup(129400) -> {"So","ON"}; +lookup(129402) -> {"So","ON"}; +lookup(129403) -> {"So","ON"}; +lookup(129404) -> {"So","ON"}; +lookup(129405) -> {"So","ON"}; +lookup(129406) -> {"So","ON"}; +lookup(129407) -> {"So","ON"}; +lookup(129408) -> {"So","ON"}; +lookup(129409) -> {"So","ON"}; +lookup(129410) -> {"So","ON"}; +lookup(129411) -> {"So","ON"}; +lookup(129412) -> {"So","ON"}; +lookup(129413) -> {"So","ON"}; +lookup(129414) -> {"So","ON"}; +lookup(129415) -> {"So","ON"}; +lookup(129416) -> {"So","ON"}; +lookup(129417) -> {"So","ON"}; +lookup(129418) -> {"So","ON"}; +lookup(129419) -> {"So","ON"}; +lookup(129420) -> {"So","ON"}; +lookup(129421) -> {"So","ON"}; +lookup(129422) -> {"So","ON"}; +lookup(129423) -> {"So","ON"}; +lookup(129424) -> {"So","ON"}; +lookup(129425) -> {"So","ON"}; +lookup(129426) -> {"So","ON"}; +lookup(129427) -> {"So","ON"}; +lookup(129428) -> {"So","ON"}; +lookup(129429) -> {"So","ON"}; +lookup(129430) -> {"So","ON"}; +lookup(129431) -> {"So","ON"}; +lookup(129432) -> {"So","ON"}; +lookup(129433) -> {"So","ON"}; +lookup(129434) -> {"So","ON"}; +lookup(129435) -> {"So","ON"}; +lookup(129436) -> {"So","ON"}; +lookup(129437) -> {"So","ON"}; +lookup(129438) -> {"So","ON"}; +lookup(129439) -> {"So","ON"}; +lookup(129440) -> {"So","ON"}; +lookup(129441) -> {"So","ON"}; +lookup(129442) -> {"So","ON"}; +lookup(129443) -> {"So","ON"}; +lookup(129444) -> {"So","ON"}; +lookup(129445) -> {"So","ON"}; +lookup(129446) -> {"So","ON"}; +lookup(129447) -> {"So","ON"}; +lookup(129448) -> {"So","ON"}; +lookup(129449) -> {"So","ON"}; +lookup(129450) -> {"So","ON"}; +lookup(129451) -> {"So","ON"}; +lookup(129452) -> {"So","ON"}; +lookup(129453) -> {"So","ON"}; +lookup(129454) -> {"So","ON"}; +lookup(129455) -> {"So","ON"}; +lookup(129456) -> {"So","ON"}; +lookup(129457) -> {"So","ON"}; +lookup(129458) -> {"So","ON"}; +lookup(129459) -> {"So","ON"}; +lookup(129460) -> {"So","ON"}; +lookup(129461) -> {"So","ON"}; +lookup(129462) -> {"So","ON"}; +lookup(129463) -> {"So","ON"}; +lookup(129464) -> {"So","ON"}; +lookup(129465) -> {"So","ON"}; +lookup(129466) -> {"So","ON"}; +lookup(129467) -> {"So","ON"}; +lookup(129468) -> {"So","ON"}; +lookup(129469) -> {"So","ON"}; +lookup(129470) -> {"So","ON"}; +lookup(129471) -> {"So","ON"}; +lookup(129472) -> {"So","ON"}; +lookup(129473) -> {"So","ON"}; +lookup(129474) -> {"So","ON"}; +lookup(129475) -> {"So","ON"}; +lookup(129476) -> {"So","ON"}; +lookup(129477) -> {"So","ON"}; +lookup(129478) -> {"So","ON"}; +lookup(129479) -> {"So","ON"}; +lookup(129480) -> {"So","ON"}; +lookup(129481) -> {"So","ON"}; +lookup(129482) -> {"So","ON"}; +lookup(129483) -> {"So","ON"}; +lookup(129485) -> {"So","ON"}; +lookup(129486) -> {"So","ON"}; +lookup(129487) -> {"So","ON"}; +lookup(129488) -> {"So","ON"}; +lookup(129489) -> {"So","ON"}; +lookup(129490) -> {"So","ON"}; +lookup(129491) -> {"So","ON"}; +lookup(129492) -> {"So","ON"}; +lookup(129493) -> {"So","ON"}; +lookup(129494) -> {"So","ON"}; +lookup(129495) -> {"So","ON"}; +lookup(129496) -> {"So","ON"}; +lookup(129497) -> {"So","ON"}; +lookup(129498) -> {"So","ON"}; +lookup(129499) -> {"So","ON"}; +lookup(129500) -> {"So","ON"}; +lookup(129501) -> {"So","ON"}; +lookup(129502) -> {"So","ON"}; +lookup(129503) -> {"So","ON"}; +lookup(129504) -> {"So","ON"}; +lookup(129505) -> {"So","ON"}; +lookup(129506) -> {"So","ON"}; +lookup(129507) -> {"So","ON"}; +lookup(129508) -> {"So","ON"}; +lookup(129509) -> {"So","ON"}; +lookup(129510) -> {"So","ON"}; +lookup(129511) -> {"So","ON"}; +lookup(129512) -> {"So","ON"}; +lookup(129513) -> {"So","ON"}; +lookup(129514) -> {"So","ON"}; +lookup(129515) -> {"So","ON"}; +lookup(129516) -> {"So","ON"}; +lookup(129517) -> {"So","ON"}; +lookup(129518) -> {"So","ON"}; +lookup(129519) -> {"So","ON"}; +lookup(129520) -> {"So","ON"}; +lookup(129521) -> {"So","ON"}; +lookup(129522) -> {"So","ON"}; +lookup(129523) -> {"So","ON"}; +lookup(129524) -> {"So","ON"}; +lookup(129525) -> {"So","ON"}; +lookup(129526) -> {"So","ON"}; +lookup(129527) -> {"So","ON"}; +lookup(129528) -> {"So","ON"}; +lookup(129529) -> {"So","ON"}; +lookup(129530) -> {"So","ON"}; +lookup(129531) -> {"So","ON"}; +lookup(129532) -> {"So","ON"}; +lookup(129533) -> {"So","ON"}; +lookup(129534) -> {"So","ON"}; +lookup(129535) -> {"So","ON"}; +lookup(129536) -> {"So","ON"}; +lookup(129537) -> {"So","ON"}; +lookup(129538) -> {"So","ON"}; +lookup(129539) -> {"So","ON"}; +lookup(129540) -> {"So","ON"}; +lookup(129541) -> {"So","ON"}; +lookup(129542) -> {"So","ON"}; +lookup(129543) -> {"So","ON"}; +lookup(129544) -> {"So","ON"}; +lookup(129545) -> {"So","ON"}; +lookup(129546) -> {"So","ON"}; +lookup(129547) -> {"So","ON"}; +lookup(129548) -> {"So","ON"}; +lookup(129549) -> {"So","ON"}; +lookup(129550) -> {"So","ON"}; +lookup(129551) -> {"So","ON"}; +lookup(129552) -> {"So","ON"}; +lookup(129553) -> {"So","ON"}; +lookup(129554) -> {"So","ON"}; +lookup(129555) -> {"So","ON"}; +lookup(129556) -> {"So","ON"}; +lookup(129557) -> {"So","ON"}; +lookup(129558) -> {"So","ON"}; +lookup(129559) -> {"So","ON"}; +lookup(129560) -> {"So","ON"}; +lookup(129561) -> {"So","ON"}; +lookup(129562) -> {"So","ON"}; +lookup(129563) -> {"So","ON"}; +lookup(129564) -> {"So","ON"}; +lookup(129565) -> {"So","ON"}; +lookup(129566) -> {"So","ON"}; +lookup(129567) -> {"So","ON"}; +lookup(129568) -> {"So","ON"}; +lookup(129569) -> {"So","ON"}; +lookup(129570) -> {"So","ON"}; +lookup(129571) -> {"So","ON"}; +lookup(129572) -> {"So","ON"}; +lookup(129573) -> {"So","ON"}; +lookup(129574) -> {"So","ON"}; +lookup(129575) -> {"So","ON"}; +lookup(129576) -> {"So","ON"}; +lookup(129577) -> {"So","ON"}; +lookup(129578) -> {"So","ON"}; +lookup(129579) -> {"So","ON"}; +lookup(129580) -> {"So","ON"}; +lookup(129581) -> {"So","ON"}; +lookup(129582) -> {"So","ON"}; +lookup(129583) -> {"So","ON"}; +lookup(129584) -> {"So","ON"}; +lookup(129585) -> {"So","ON"}; +lookup(129586) -> {"So","ON"}; +lookup(129587) -> {"So","ON"}; +lookup(129588) -> {"So","ON"}; +lookup(129589) -> {"So","ON"}; +lookup(129590) -> {"So","ON"}; +lookup(129591) -> {"So","ON"}; +lookup(129592) -> {"So","ON"}; +lookup(129593) -> {"So","ON"}; +lookup(129594) -> {"So","ON"}; +lookup(129595) -> {"So","ON"}; +lookup(129596) -> {"So","ON"}; +lookup(129597) -> {"So","ON"}; +lookup(129598) -> {"So","ON"}; +lookup(129599) -> {"So","ON"}; +lookup(129600) -> {"So","ON"}; +lookup(129601) -> {"So","ON"}; +lookup(129602) -> {"So","ON"}; +lookup(129603) -> {"So","ON"}; +lookup(129604) -> {"So","ON"}; +lookup(129605) -> {"So","ON"}; +lookup(129606) -> {"So","ON"}; +lookup(129607) -> {"So","ON"}; +lookup(129608) -> {"So","ON"}; +lookup(129609) -> {"So","ON"}; +lookup(129610) -> {"So","ON"}; +lookup(129611) -> {"So","ON"}; +lookup(129612) -> {"So","ON"}; +lookup(129613) -> {"So","ON"}; +lookup(129614) -> {"So","ON"}; +lookup(129615) -> {"So","ON"}; +lookup(129616) -> {"So","ON"}; +lookup(129617) -> {"So","ON"}; +lookup(129618) -> {"So","ON"}; +lookup(129619) -> {"So","ON"}; +lookup(129632) -> {"So","ON"}; +lookup(129633) -> {"So","ON"}; +lookup(129634) -> {"So","ON"}; +lookup(129635) -> {"So","ON"}; +lookup(129636) -> {"So","ON"}; +lookup(129637) -> {"So","ON"}; +lookup(129638) -> {"So","ON"}; +lookup(129639) -> {"So","ON"}; +lookup(129640) -> {"So","ON"}; +lookup(129641) -> {"So","ON"}; +lookup(129642) -> {"So","ON"}; +lookup(129643) -> {"So","ON"}; +lookup(129644) -> {"So","ON"}; +lookup(129645) -> {"So","ON"}; +lookup(129648) -> {"So","ON"}; +lookup(129649) -> {"So","ON"}; +lookup(129650) -> {"So","ON"}; +lookup(129651) -> {"So","ON"}; +lookup(129652) -> {"So","ON"}; +lookup(129656) -> {"So","ON"}; +lookup(129657) -> {"So","ON"}; +lookup(129658) -> {"So","ON"}; +lookup(129664) -> {"So","ON"}; +lookup(129665) -> {"So","ON"}; +lookup(129666) -> {"So","ON"}; +lookup(129667) -> {"So","ON"}; +lookup(129668) -> {"So","ON"}; +lookup(129669) -> {"So","ON"}; +lookup(129670) -> {"So","ON"}; +lookup(129680) -> {"So","ON"}; +lookup(129681) -> {"So","ON"}; +lookup(129682) -> {"So","ON"}; +lookup(129683) -> {"So","ON"}; +lookup(129684) -> {"So","ON"}; +lookup(129685) -> {"So","ON"}; +lookup(129686) -> {"So","ON"}; +lookup(129687) -> {"So","ON"}; +lookup(129688) -> {"So","ON"}; +lookup(129689) -> {"So","ON"}; +lookup(129690) -> {"So","ON"}; +lookup(129691) -> {"So","ON"}; +lookup(129692) -> {"So","ON"}; +lookup(129693) -> {"So","ON"}; +lookup(129694) -> {"So","ON"}; +lookup(129695) -> {"So","ON"}; +lookup(129696) -> {"So","ON"}; +lookup(129697) -> {"So","ON"}; +lookup(129698) -> {"So","ON"}; +lookup(129699) -> {"So","ON"}; +lookup(129700) -> {"So","ON"}; +lookup(129701) -> {"So","ON"}; +lookup(129702) -> {"So","ON"}; +lookup(129703) -> {"So","ON"}; +lookup(129704) -> {"So","ON"}; +lookup(129712) -> {"So","ON"}; +lookup(129713) -> {"So","ON"}; +lookup(129714) -> {"So","ON"}; +lookup(129715) -> {"So","ON"}; +lookup(129716) -> {"So","ON"}; +lookup(129717) -> {"So","ON"}; +lookup(129718) -> {"So","ON"}; +lookup(129728) -> {"So","ON"}; +lookup(129729) -> {"So","ON"}; +lookup(129730) -> {"So","ON"}; +lookup(129744) -> {"So","ON"}; +lookup(129745) -> {"So","ON"}; +lookup(129746) -> {"So","ON"}; +lookup(129747) -> {"So","ON"}; +lookup(129748) -> {"So","ON"}; +lookup(129749) -> {"So","ON"}; +lookup(129750) -> {"So","ON"}; +lookup(129792) -> {"So","ON"}; +lookup(129793) -> {"So","ON"}; +lookup(129794) -> {"So","ON"}; +lookup(129795) -> {"So","ON"}; +lookup(129796) -> {"So","ON"}; +lookup(129797) -> {"So","ON"}; +lookup(129798) -> {"So","ON"}; +lookup(129799) -> {"So","ON"}; +lookup(129800) -> {"So","ON"}; +lookup(129801) -> {"So","ON"}; +lookup(129802) -> {"So","ON"}; +lookup(129803) -> {"So","ON"}; +lookup(129804) -> {"So","ON"}; +lookup(129805) -> {"So","ON"}; +lookup(129806) -> {"So","ON"}; +lookup(129807) -> {"So","ON"}; +lookup(129808) -> {"So","ON"}; +lookup(129809) -> {"So","ON"}; +lookup(129810) -> {"So","ON"}; +lookup(129811) -> {"So","ON"}; +lookup(129812) -> {"So","ON"}; +lookup(129813) -> {"So","ON"}; +lookup(129814) -> {"So","ON"}; +lookup(129815) -> {"So","ON"}; +lookup(129816) -> {"So","ON"}; +lookup(129817) -> {"So","ON"}; +lookup(129818) -> {"So","ON"}; +lookup(129819) -> {"So","ON"}; +lookup(129820) -> {"So","ON"}; +lookup(129821) -> {"So","ON"}; +lookup(129822) -> {"So","ON"}; +lookup(129823) -> {"So","ON"}; +lookup(129824) -> {"So","ON"}; +lookup(129825) -> {"So","ON"}; +lookup(129826) -> {"So","ON"}; +lookup(129827) -> {"So","ON"}; +lookup(129828) -> {"So","ON"}; +lookup(129829) -> {"So","ON"}; +lookup(129830) -> {"So","ON"}; +lookup(129831) -> {"So","ON"}; +lookup(129832) -> {"So","ON"}; +lookup(129833) -> {"So","ON"}; +lookup(129834) -> {"So","ON"}; +lookup(129835) -> {"So","ON"}; +lookup(129836) -> {"So","ON"}; +lookup(129837) -> {"So","ON"}; +lookup(129838) -> {"So","ON"}; +lookup(129839) -> {"So","ON"}; +lookup(129840) -> {"So","ON"}; +lookup(129841) -> {"So","ON"}; +lookup(129842) -> {"So","ON"}; +lookup(129843) -> {"So","ON"}; +lookup(129844) -> {"So","ON"}; +lookup(129845) -> {"So","ON"}; +lookup(129846) -> {"So","ON"}; +lookup(129847) -> {"So","ON"}; +lookup(129848) -> {"So","ON"}; +lookup(129849) -> {"So","ON"}; +lookup(129850) -> {"So","ON"}; +lookup(129851) -> {"So","ON"}; +lookup(129852) -> {"So","ON"}; +lookup(129853) -> {"So","ON"}; +lookup(129854) -> {"So","ON"}; +lookup(129855) -> {"So","ON"}; +lookup(129856) -> {"So","ON"}; +lookup(129857) -> {"So","ON"}; +lookup(129858) -> {"So","ON"}; +lookup(129859) -> {"So","ON"}; +lookup(129860) -> {"So","ON"}; +lookup(129861) -> {"So","ON"}; +lookup(129862) -> {"So","ON"}; +lookup(129863) -> {"So","ON"}; +lookup(129864) -> {"So","ON"}; +lookup(129865) -> {"So","ON"}; +lookup(129866) -> {"So","ON"}; +lookup(129867) -> {"So","ON"}; +lookup(129868) -> {"So","ON"}; +lookup(129869) -> {"So","ON"}; +lookup(129870) -> {"So","ON"}; +lookup(129871) -> {"So","ON"}; +lookup(129872) -> {"So","ON"}; +lookup(129873) -> {"So","ON"}; +lookup(129874) -> {"So","ON"}; +lookup(129875) -> {"So","ON"}; +lookup(129876) -> {"So","ON"}; +lookup(129877) -> {"So","ON"}; +lookup(129878) -> {"So","ON"}; +lookup(129879) -> {"So","ON"}; +lookup(129880) -> {"So","ON"}; +lookup(129881) -> {"So","ON"}; +lookup(129882) -> {"So","ON"}; +lookup(129883) -> {"So","ON"}; +lookup(129884) -> {"So","ON"}; +lookup(129885) -> {"So","ON"}; +lookup(129886) -> {"So","ON"}; +lookup(129887) -> {"So","ON"}; +lookup(129888) -> {"So","ON"}; +lookup(129889) -> {"So","ON"}; +lookup(129890) -> {"So","ON"}; +lookup(129891) -> {"So","ON"}; +lookup(129892) -> {"So","ON"}; +lookup(129893) -> {"So","ON"}; +lookup(129894) -> {"So","ON"}; +lookup(129895) -> {"So","ON"}; +lookup(129896) -> {"So","ON"}; +lookup(129897) -> {"So","ON"}; +lookup(129898) -> {"So","ON"}; +lookup(129899) -> {"So","ON"}; +lookup(129900) -> {"So","ON"}; +lookup(129901) -> {"So","ON"}; +lookup(129902) -> {"So","ON"}; +lookup(129903) -> {"So","ON"}; +lookup(129904) -> {"So","ON"}; +lookup(129905) -> {"So","ON"}; +lookup(129906) -> {"So","ON"}; +lookup(129907) -> {"So","ON"}; +lookup(129908) -> {"So","ON"}; +lookup(129909) -> {"So","ON"}; +lookup(129910) -> {"So","ON"}; +lookup(129911) -> {"So","ON"}; +lookup(129912) -> {"So","ON"}; +lookup(129913) -> {"So","ON"}; +lookup(129914) -> {"So","ON"}; +lookup(129915) -> {"So","ON"}; +lookup(129916) -> {"So","ON"}; +lookup(129917) -> {"So","ON"}; +lookup(129918) -> {"So","ON"}; +lookup(129919) -> {"So","ON"}; +lookup(129920) -> {"So","ON"}; +lookup(129921) -> {"So","ON"}; +lookup(129922) -> {"So","ON"}; +lookup(129923) -> {"So","ON"}; +lookup(129924) -> {"So","ON"}; +lookup(129925) -> {"So","ON"}; +lookup(129926) -> {"So","ON"}; +lookup(129927) -> {"So","ON"}; +lookup(129928) -> {"So","ON"}; +lookup(129929) -> {"So","ON"}; +lookup(129930) -> {"So","ON"}; +lookup(129931) -> {"So","ON"}; +lookup(129932) -> {"So","ON"}; +lookup(129933) -> {"So","ON"}; +lookup(129934) -> {"So","ON"}; +lookup(129935) -> {"So","ON"}; +lookup(129936) -> {"So","ON"}; +lookup(129937) -> {"So","ON"}; +lookup(129938) -> {"So","ON"}; +lookup(129940) -> {"So","ON"}; +lookup(129941) -> {"So","ON"}; +lookup(129942) -> {"So","ON"}; +lookup(129943) -> {"So","ON"}; +lookup(129944) -> {"So","ON"}; +lookup(129945) -> {"So","ON"}; +lookup(129946) -> {"So","ON"}; +lookup(129947) -> {"So","ON"}; +lookup(129948) -> {"So","ON"}; +lookup(129949) -> {"So","ON"}; +lookup(129950) -> {"So","ON"}; +lookup(129951) -> {"So","ON"}; +lookup(129952) -> {"So","ON"}; +lookup(129953) -> {"So","ON"}; +lookup(129954) -> {"So","ON"}; +lookup(129955) -> {"So","ON"}; +lookup(129956) -> {"So","ON"}; +lookup(129957) -> {"So","ON"}; +lookup(129958) -> {"So","ON"}; +lookup(129959) -> {"So","ON"}; +lookup(129960) -> {"So","ON"}; +lookup(129961) -> {"So","ON"}; +lookup(129962) -> {"So","ON"}; +lookup(129963) -> {"So","ON"}; +lookup(129964) -> {"So","ON"}; +lookup(129965) -> {"So","ON"}; +lookup(129966) -> {"So","ON"}; +lookup(129967) -> {"So","ON"}; +lookup(129968) -> {"So","ON"}; +lookup(129969) -> {"So","ON"}; +lookup(129970) -> {"So","ON"}; +lookup(129971) -> {"So","ON"}; +lookup(129972) -> {"So","ON"}; +lookup(129973) -> {"So","ON"}; +lookup(129974) -> {"So","ON"}; +lookup(129975) -> {"So","ON"}; +lookup(129976) -> {"So","ON"}; +lookup(129977) -> {"So","ON"}; +lookup(129978) -> {"So","ON"}; +lookup(129979) -> {"So","ON"}; +lookup(129980) -> {"So","ON"}; +lookup(129981) -> {"So","ON"}; +lookup(129982) -> {"So","ON"}; +lookup(129983) -> {"So","ON"}; +lookup(129984) -> {"So","ON"}; +lookup(129985) -> {"So","ON"}; +lookup(129986) -> {"So","ON"}; +lookup(129987) -> {"So","ON"}; +lookup(129988) -> {"So","ON"}; +lookup(129989) -> {"So","ON"}; +lookup(129990) -> {"So","ON"}; +lookup(129991) -> {"So","ON"}; +lookup(129992) -> {"So","ON"}; +lookup(129993) -> {"So","ON"}; +lookup(129994) -> {"So","ON"}; +lookup(130032) -> {"Nd","EN"}; +lookup(130033) -> {"Nd","EN"}; +lookup(130034) -> {"Nd","EN"}; +lookup(130035) -> {"Nd","EN"}; +lookup(130036) -> {"Nd","EN"}; +lookup(130037) -> {"Nd","EN"}; +lookup(130038) -> {"Nd","EN"}; +lookup(130039) -> {"Nd","EN"}; +lookup(130040) -> {"Nd","EN"}; +lookup(130041) -> {"Nd","EN"}; +lookup(131072) -> {"Lo","L"}; +lookup(173789) -> {"Lo","L"}; +lookup(173824) -> {"Lo","L"}; +lookup(177972) -> {"Lo","L"}; +lookup(177984) -> {"Lo","L"}; +lookup(178205) -> {"Lo","L"}; +lookup(178208) -> {"Lo","L"}; +lookup(183969) -> {"Lo","L"}; +lookup(183984) -> {"Lo","L"}; +lookup(191456) -> {"Lo","L"}; +lookup(194560) -> {"Lo","L"}; +lookup(194561) -> {"Lo","L"}; +lookup(194562) -> {"Lo","L"}; +lookup(194563) -> {"Lo","L"}; +lookup(194564) -> {"Lo","L"}; +lookup(194565) -> {"Lo","L"}; +lookup(194566) -> {"Lo","L"}; +lookup(194567) -> {"Lo","L"}; +lookup(194568) -> {"Lo","L"}; +lookup(194569) -> {"Lo","L"}; +lookup(194570) -> {"Lo","L"}; +lookup(194571) -> {"Lo","L"}; +lookup(194572) -> {"Lo","L"}; +lookup(194573) -> {"Lo","L"}; +lookup(194574) -> {"Lo","L"}; +lookup(194575) -> {"Lo","L"}; +lookup(194576) -> {"Lo","L"}; +lookup(194577) -> {"Lo","L"}; +lookup(194578) -> {"Lo","L"}; +lookup(194579) -> {"Lo","L"}; +lookup(194580) -> {"Lo","L"}; +lookup(194581) -> {"Lo","L"}; +lookup(194582) -> {"Lo","L"}; +lookup(194583) -> {"Lo","L"}; +lookup(194584) -> {"Lo","L"}; +lookup(194585) -> {"Lo","L"}; +lookup(194586) -> {"Lo","L"}; +lookup(194587) -> {"Lo","L"}; +lookup(194588) -> {"Lo","L"}; +lookup(194589) -> {"Lo","L"}; +lookup(194590) -> {"Lo","L"}; +lookup(194591) -> {"Lo","L"}; +lookup(194592) -> {"Lo","L"}; +lookup(194593) -> {"Lo","L"}; +lookup(194594) -> {"Lo","L"}; +lookup(194595) -> {"Lo","L"}; +lookup(194596) -> {"Lo","L"}; +lookup(194597) -> {"Lo","L"}; +lookup(194598) -> {"Lo","L"}; +lookup(194599) -> {"Lo","L"}; +lookup(194600) -> {"Lo","L"}; +lookup(194601) -> {"Lo","L"}; +lookup(194602) -> {"Lo","L"}; +lookup(194603) -> {"Lo","L"}; +lookup(194604) -> {"Lo","L"}; +lookup(194605) -> {"Lo","L"}; +lookup(194606) -> {"Lo","L"}; +lookup(194607) -> {"Lo","L"}; +lookup(194608) -> {"Lo","L"}; +lookup(194609) -> {"Lo","L"}; +lookup(194610) -> {"Lo","L"}; +lookup(194611) -> {"Lo","L"}; +lookup(194612) -> {"Lo","L"}; +lookup(194613) -> {"Lo","L"}; +lookup(194614) -> {"Lo","L"}; +lookup(194615) -> {"Lo","L"}; +lookup(194616) -> {"Lo","L"}; +lookup(194617) -> {"Lo","L"}; +lookup(194618) -> {"Lo","L"}; +lookup(194619) -> {"Lo","L"}; +lookup(194620) -> {"Lo","L"}; +lookup(194621) -> {"Lo","L"}; +lookup(194622) -> {"Lo","L"}; +lookup(194623) -> {"Lo","L"}; +lookup(194624) -> {"Lo","L"}; +lookup(194625) -> {"Lo","L"}; +lookup(194626) -> {"Lo","L"}; +lookup(194627) -> {"Lo","L"}; +lookup(194628) -> {"Lo","L"}; +lookup(194629) -> {"Lo","L"}; +lookup(194630) -> {"Lo","L"}; +lookup(194631) -> {"Lo","L"}; +lookup(194632) -> {"Lo","L"}; +lookup(194633) -> {"Lo","L"}; +lookup(194634) -> {"Lo","L"}; +lookup(194635) -> {"Lo","L"}; +lookup(194636) -> {"Lo","L"}; +lookup(194637) -> {"Lo","L"}; +lookup(194638) -> {"Lo","L"}; +lookup(194639) -> {"Lo","L"}; +lookup(194640) -> {"Lo","L"}; +lookup(194641) -> {"Lo","L"}; +lookup(194642) -> {"Lo","L"}; +lookup(194643) -> {"Lo","L"}; +lookup(194644) -> {"Lo","L"}; +lookup(194645) -> {"Lo","L"}; +lookup(194646) -> {"Lo","L"}; +lookup(194647) -> {"Lo","L"}; +lookup(194648) -> {"Lo","L"}; +lookup(194649) -> {"Lo","L"}; +lookup(194650) -> {"Lo","L"}; +lookup(194651) -> {"Lo","L"}; +lookup(194652) -> {"Lo","L"}; +lookup(194653) -> {"Lo","L"}; +lookup(194654) -> {"Lo","L"}; +lookup(194655) -> {"Lo","L"}; +lookup(194656) -> {"Lo","L"}; +lookup(194657) -> {"Lo","L"}; +lookup(194658) -> {"Lo","L"}; +lookup(194659) -> {"Lo","L"}; +lookup(194660) -> {"Lo","L"}; +lookup(194661) -> {"Lo","L"}; +lookup(194662) -> {"Lo","L"}; +lookup(194663) -> {"Lo","L"}; +lookup(194664) -> {"Lo","L"}; +lookup(194665) -> {"Lo","L"}; +lookup(194666) -> {"Lo","L"}; +lookup(194667) -> {"Lo","L"}; +lookup(194668) -> {"Lo","L"}; +lookup(194669) -> {"Lo","L"}; +lookup(194670) -> {"Lo","L"}; +lookup(194671) -> {"Lo","L"}; +lookup(194672) -> {"Lo","L"}; +lookup(194673) -> {"Lo","L"}; +lookup(194674) -> {"Lo","L"}; +lookup(194675) -> {"Lo","L"}; +lookup(194676) -> {"Lo","L"}; +lookup(194677) -> {"Lo","L"}; +lookup(194678) -> {"Lo","L"}; +lookup(194679) -> {"Lo","L"}; +lookup(194680) -> {"Lo","L"}; +lookup(194681) -> {"Lo","L"}; +lookup(194682) -> {"Lo","L"}; +lookup(194683) -> {"Lo","L"}; +lookup(194684) -> {"Lo","L"}; +lookup(194685) -> {"Lo","L"}; +lookup(194686) -> {"Lo","L"}; +lookup(194687) -> {"Lo","L"}; +lookup(194688) -> {"Lo","L"}; +lookup(194689) -> {"Lo","L"}; +lookup(194690) -> {"Lo","L"}; +lookup(194691) -> {"Lo","L"}; +lookup(194692) -> {"Lo","L"}; +lookup(194693) -> {"Lo","L"}; +lookup(194694) -> {"Lo","L"}; +lookup(194695) -> {"Lo","L"}; +lookup(194696) -> {"Lo","L"}; +lookup(194697) -> {"Lo","L"}; +lookup(194698) -> {"Lo","L"}; +lookup(194699) -> {"Lo","L"}; +lookup(194700) -> {"Lo","L"}; +lookup(194701) -> {"Lo","L"}; +lookup(194702) -> {"Lo","L"}; +lookup(194703) -> {"Lo","L"}; +lookup(194704) -> {"Lo","L"}; +lookup(194705) -> {"Lo","L"}; +lookup(194706) -> {"Lo","L"}; +lookup(194707) -> {"Lo","L"}; +lookup(194708) -> {"Lo","L"}; +lookup(194709) -> {"Lo","L"}; +lookup(194710) -> {"Lo","L"}; +lookup(194711) -> {"Lo","L"}; +lookup(194712) -> {"Lo","L"}; +lookup(194713) -> {"Lo","L"}; +lookup(194714) -> {"Lo","L"}; +lookup(194715) -> {"Lo","L"}; +lookup(194716) -> {"Lo","L"}; +lookup(194717) -> {"Lo","L"}; +lookup(194718) -> {"Lo","L"}; +lookup(194719) -> {"Lo","L"}; +lookup(194720) -> {"Lo","L"}; +lookup(194721) -> {"Lo","L"}; +lookup(194722) -> {"Lo","L"}; +lookup(194723) -> {"Lo","L"}; +lookup(194724) -> {"Lo","L"}; +lookup(194725) -> {"Lo","L"}; +lookup(194726) -> {"Lo","L"}; +lookup(194727) -> {"Lo","L"}; +lookup(194728) -> {"Lo","L"}; +lookup(194729) -> {"Lo","L"}; +lookup(194730) -> {"Lo","L"}; +lookup(194731) -> {"Lo","L"}; +lookup(194732) -> {"Lo","L"}; +lookup(194733) -> {"Lo","L"}; +lookup(194734) -> {"Lo","L"}; +lookup(194735) -> {"Lo","L"}; +lookup(194736) -> {"Lo","L"}; +lookup(194737) -> {"Lo","L"}; +lookup(194738) -> {"Lo","L"}; +lookup(194739) -> {"Lo","L"}; +lookup(194740) -> {"Lo","L"}; +lookup(194741) -> {"Lo","L"}; +lookup(194742) -> {"Lo","L"}; +lookup(194743) -> {"Lo","L"}; +lookup(194744) -> {"Lo","L"}; +lookup(194745) -> {"Lo","L"}; +lookup(194746) -> {"Lo","L"}; +lookup(194747) -> {"Lo","L"}; +lookup(194748) -> {"Lo","L"}; +lookup(194749) -> {"Lo","L"}; +lookup(194750) -> {"Lo","L"}; +lookup(194751) -> {"Lo","L"}; +lookup(194752) -> {"Lo","L"}; +lookup(194753) -> {"Lo","L"}; +lookup(194754) -> {"Lo","L"}; +lookup(194755) -> {"Lo","L"}; +lookup(194756) -> {"Lo","L"}; +lookup(194757) -> {"Lo","L"}; +lookup(194758) -> {"Lo","L"}; +lookup(194759) -> {"Lo","L"}; +lookup(194760) -> {"Lo","L"}; +lookup(194761) -> {"Lo","L"}; +lookup(194762) -> {"Lo","L"}; +lookup(194763) -> {"Lo","L"}; +lookup(194764) -> {"Lo","L"}; +lookup(194765) -> {"Lo","L"}; +lookup(194766) -> {"Lo","L"}; +lookup(194767) -> {"Lo","L"}; +lookup(194768) -> {"Lo","L"}; +lookup(194769) -> {"Lo","L"}; +lookup(194770) -> {"Lo","L"}; +lookup(194771) -> {"Lo","L"}; +lookup(194772) -> {"Lo","L"}; +lookup(194773) -> {"Lo","L"}; +lookup(194774) -> {"Lo","L"}; +lookup(194775) -> {"Lo","L"}; +lookup(194776) -> {"Lo","L"}; +lookup(194777) -> {"Lo","L"}; +lookup(194778) -> {"Lo","L"}; +lookup(194779) -> {"Lo","L"}; +lookup(194780) -> {"Lo","L"}; +lookup(194781) -> {"Lo","L"}; +lookup(194782) -> {"Lo","L"}; +lookup(194783) -> {"Lo","L"}; +lookup(194784) -> {"Lo","L"}; +lookup(194785) -> {"Lo","L"}; +lookup(194786) -> {"Lo","L"}; +lookup(194787) -> {"Lo","L"}; +lookup(194788) -> {"Lo","L"}; +lookup(194789) -> {"Lo","L"}; +lookup(194790) -> {"Lo","L"}; +lookup(194791) -> {"Lo","L"}; +lookup(194792) -> {"Lo","L"}; +lookup(194793) -> {"Lo","L"}; +lookup(194794) -> {"Lo","L"}; +lookup(194795) -> {"Lo","L"}; +lookup(194796) -> {"Lo","L"}; +lookup(194797) -> {"Lo","L"}; +lookup(194798) -> {"Lo","L"}; +lookup(194799) -> {"Lo","L"}; +lookup(194800) -> {"Lo","L"}; +lookup(194801) -> {"Lo","L"}; +lookup(194802) -> {"Lo","L"}; +lookup(194803) -> {"Lo","L"}; +lookup(194804) -> {"Lo","L"}; +lookup(194805) -> {"Lo","L"}; +lookup(194806) -> {"Lo","L"}; +lookup(194807) -> {"Lo","L"}; +lookup(194808) -> {"Lo","L"}; +lookup(194809) -> {"Lo","L"}; +lookup(194810) -> {"Lo","L"}; +lookup(194811) -> {"Lo","L"}; +lookup(194812) -> {"Lo","L"}; +lookup(194813) -> {"Lo","L"}; +lookup(194814) -> {"Lo","L"}; +lookup(194815) -> {"Lo","L"}; +lookup(194816) -> {"Lo","L"}; +lookup(194817) -> {"Lo","L"}; +lookup(194818) -> {"Lo","L"}; +lookup(194819) -> {"Lo","L"}; +lookup(194820) -> {"Lo","L"}; +lookup(194821) -> {"Lo","L"}; +lookup(194822) -> {"Lo","L"}; +lookup(194823) -> {"Lo","L"}; +lookup(194824) -> {"Lo","L"}; +lookup(194825) -> {"Lo","L"}; +lookup(194826) -> {"Lo","L"}; +lookup(194827) -> {"Lo","L"}; +lookup(194828) -> {"Lo","L"}; +lookup(194829) -> {"Lo","L"}; +lookup(194830) -> {"Lo","L"}; +lookup(194831) -> {"Lo","L"}; +lookup(194832) -> {"Lo","L"}; +lookup(194833) -> {"Lo","L"}; +lookup(194834) -> {"Lo","L"}; +lookup(194835) -> {"Lo","L"}; +lookup(194836) -> {"Lo","L"}; +lookup(194837) -> {"Lo","L"}; +lookup(194838) -> {"Lo","L"}; +lookup(194839) -> {"Lo","L"}; +lookup(194840) -> {"Lo","L"}; +lookup(194841) -> {"Lo","L"}; +lookup(194842) -> {"Lo","L"}; +lookup(194843) -> {"Lo","L"}; +lookup(194844) -> {"Lo","L"}; +lookup(194845) -> {"Lo","L"}; +lookup(194846) -> {"Lo","L"}; +lookup(194847) -> {"Lo","L"}; +lookup(194848) -> {"Lo","L"}; +lookup(194849) -> {"Lo","L"}; +lookup(194850) -> {"Lo","L"}; +lookup(194851) -> {"Lo","L"}; +lookup(194852) -> {"Lo","L"}; +lookup(194853) -> {"Lo","L"}; +lookup(194854) -> {"Lo","L"}; +lookup(194855) -> {"Lo","L"}; +lookup(194856) -> {"Lo","L"}; +lookup(194857) -> {"Lo","L"}; +lookup(194858) -> {"Lo","L"}; +lookup(194859) -> {"Lo","L"}; +lookup(194860) -> {"Lo","L"}; +lookup(194861) -> {"Lo","L"}; +lookup(194862) -> {"Lo","L"}; +lookup(194863) -> {"Lo","L"}; +lookup(194864) -> {"Lo","L"}; +lookup(194865) -> {"Lo","L"}; +lookup(194866) -> {"Lo","L"}; +lookup(194867) -> {"Lo","L"}; +lookup(194868) -> {"Lo","L"}; +lookup(194869) -> {"Lo","L"}; +lookup(194870) -> {"Lo","L"}; +lookup(194871) -> {"Lo","L"}; +lookup(194872) -> {"Lo","L"}; +lookup(194873) -> {"Lo","L"}; +lookup(194874) -> {"Lo","L"}; +lookup(194875) -> {"Lo","L"}; +lookup(194876) -> {"Lo","L"}; +lookup(194877) -> {"Lo","L"}; +lookup(194878) -> {"Lo","L"}; +lookup(194879) -> {"Lo","L"}; +lookup(194880) -> {"Lo","L"}; +lookup(194881) -> {"Lo","L"}; +lookup(194882) -> {"Lo","L"}; +lookup(194883) -> {"Lo","L"}; +lookup(194884) -> {"Lo","L"}; +lookup(194885) -> {"Lo","L"}; +lookup(194886) -> {"Lo","L"}; +lookup(194887) -> {"Lo","L"}; +lookup(194888) -> {"Lo","L"}; +lookup(194889) -> {"Lo","L"}; +lookup(194890) -> {"Lo","L"}; +lookup(194891) -> {"Lo","L"}; +lookup(194892) -> {"Lo","L"}; +lookup(194893) -> {"Lo","L"}; +lookup(194894) -> {"Lo","L"}; +lookup(194895) -> {"Lo","L"}; +lookup(194896) -> {"Lo","L"}; +lookup(194897) -> {"Lo","L"}; +lookup(194898) -> {"Lo","L"}; +lookup(194899) -> {"Lo","L"}; +lookup(194900) -> {"Lo","L"}; +lookup(194901) -> {"Lo","L"}; +lookup(194902) -> {"Lo","L"}; +lookup(194903) -> {"Lo","L"}; +lookup(194904) -> {"Lo","L"}; +lookup(194905) -> {"Lo","L"}; +lookup(194906) -> {"Lo","L"}; +lookup(194907) -> {"Lo","L"}; +lookup(194908) -> {"Lo","L"}; +lookup(194909) -> {"Lo","L"}; +lookup(194910) -> {"Lo","L"}; +lookup(194911) -> {"Lo","L"}; +lookup(194912) -> {"Lo","L"}; +lookup(194913) -> {"Lo","L"}; +lookup(194914) -> {"Lo","L"}; +lookup(194915) -> {"Lo","L"}; +lookup(194916) -> {"Lo","L"}; +lookup(194917) -> {"Lo","L"}; +lookup(194918) -> {"Lo","L"}; +lookup(194919) -> {"Lo","L"}; +lookup(194920) -> {"Lo","L"}; +lookup(194921) -> {"Lo","L"}; +lookup(194922) -> {"Lo","L"}; +lookup(194923) -> {"Lo","L"}; +lookup(194924) -> {"Lo","L"}; +lookup(194925) -> {"Lo","L"}; +lookup(194926) -> {"Lo","L"}; +lookup(194927) -> {"Lo","L"}; +lookup(194928) -> {"Lo","L"}; +lookup(194929) -> {"Lo","L"}; +lookup(194930) -> {"Lo","L"}; +lookup(194931) -> {"Lo","L"}; +lookup(194932) -> {"Lo","L"}; +lookup(194933) -> {"Lo","L"}; +lookup(194934) -> {"Lo","L"}; +lookup(194935) -> {"Lo","L"}; +lookup(194936) -> {"Lo","L"}; +lookup(194937) -> {"Lo","L"}; +lookup(194938) -> {"Lo","L"}; +lookup(194939) -> {"Lo","L"}; +lookup(194940) -> {"Lo","L"}; +lookup(194941) -> {"Lo","L"}; +lookup(194942) -> {"Lo","L"}; +lookup(194943) -> {"Lo","L"}; +lookup(194944) -> {"Lo","L"}; +lookup(194945) -> {"Lo","L"}; +lookup(194946) -> {"Lo","L"}; +lookup(194947) -> {"Lo","L"}; +lookup(194948) -> {"Lo","L"}; +lookup(194949) -> {"Lo","L"}; +lookup(194950) -> {"Lo","L"}; +lookup(194951) -> {"Lo","L"}; +lookup(194952) -> {"Lo","L"}; +lookup(194953) -> {"Lo","L"}; +lookup(194954) -> {"Lo","L"}; +lookup(194955) -> {"Lo","L"}; +lookup(194956) -> {"Lo","L"}; +lookup(194957) -> {"Lo","L"}; +lookup(194958) -> {"Lo","L"}; +lookup(194959) -> {"Lo","L"}; +lookup(194960) -> {"Lo","L"}; +lookup(194961) -> {"Lo","L"}; +lookup(194962) -> {"Lo","L"}; +lookup(194963) -> {"Lo","L"}; +lookup(194964) -> {"Lo","L"}; +lookup(194965) -> {"Lo","L"}; +lookup(194966) -> {"Lo","L"}; +lookup(194967) -> {"Lo","L"}; +lookup(194968) -> {"Lo","L"}; +lookup(194969) -> {"Lo","L"}; +lookup(194970) -> {"Lo","L"}; +lookup(194971) -> {"Lo","L"}; +lookup(194972) -> {"Lo","L"}; +lookup(194973) -> {"Lo","L"}; +lookup(194974) -> {"Lo","L"}; +lookup(194975) -> {"Lo","L"}; +lookup(194976) -> {"Lo","L"}; +lookup(194977) -> {"Lo","L"}; +lookup(194978) -> {"Lo","L"}; +lookup(194979) -> {"Lo","L"}; +lookup(194980) -> {"Lo","L"}; +lookup(194981) -> {"Lo","L"}; +lookup(194982) -> {"Lo","L"}; +lookup(194983) -> {"Lo","L"}; +lookup(194984) -> {"Lo","L"}; +lookup(194985) -> {"Lo","L"}; +lookup(194986) -> {"Lo","L"}; +lookup(194987) -> {"Lo","L"}; +lookup(194988) -> {"Lo","L"}; +lookup(194989) -> {"Lo","L"}; +lookup(194990) -> {"Lo","L"}; +lookup(194991) -> {"Lo","L"}; +lookup(194992) -> {"Lo","L"}; +lookup(194993) -> {"Lo","L"}; +lookup(194994) -> {"Lo","L"}; +lookup(194995) -> {"Lo","L"}; +lookup(194996) -> {"Lo","L"}; +lookup(194997) -> {"Lo","L"}; +lookup(194998) -> {"Lo","L"}; +lookup(194999) -> {"Lo","L"}; +lookup(195000) -> {"Lo","L"}; +lookup(195001) -> {"Lo","L"}; +lookup(195002) -> {"Lo","L"}; +lookup(195003) -> {"Lo","L"}; +lookup(195004) -> {"Lo","L"}; +lookup(195005) -> {"Lo","L"}; +lookup(195006) -> {"Lo","L"}; +lookup(195007) -> {"Lo","L"}; +lookup(195008) -> {"Lo","L"}; +lookup(195009) -> {"Lo","L"}; +lookup(195010) -> {"Lo","L"}; +lookup(195011) -> {"Lo","L"}; +lookup(195012) -> {"Lo","L"}; +lookup(195013) -> {"Lo","L"}; +lookup(195014) -> {"Lo","L"}; +lookup(195015) -> {"Lo","L"}; +lookup(195016) -> {"Lo","L"}; +lookup(195017) -> {"Lo","L"}; +lookup(195018) -> {"Lo","L"}; +lookup(195019) -> {"Lo","L"}; +lookup(195020) -> {"Lo","L"}; +lookup(195021) -> {"Lo","L"}; +lookup(195022) -> {"Lo","L"}; +lookup(195023) -> {"Lo","L"}; +lookup(195024) -> {"Lo","L"}; +lookup(195025) -> {"Lo","L"}; +lookup(195026) -> {"Lo","L"}; +lookup(195027) -> {"Lo","L"}; +lookup(195028) -> {"Lo","L"}; +lookup(195029) -> {"Lo","L"}; +lookup(195030) -> {"Lo","L"}; +lookup(195031) -> {"Lo","L"}; +lookup(195032) -> {"Lo","L"}; +lookup(195033) -> {"Lo","L"}; +lookup(195034) -> {"Lo","L"}; +lookup(195035) -> {"Lo","L"}; +lookup(195036) -> {"Lo","L"}; +lookup(195037) -> {"Lo","L"}; +lookup(195038) -> {"Lo","L"}; +lookup(195039) -> {"Lo","L"}; +lookup(195040) -> {"Lo","L"}; +lookup(195041) -> {"Lo","L"}; +lookup(195042) -> {"Lo","L"}; +lookup(195043) -> {"Lo","L"}; +lookup(195044) -> {"Lo","L"}; +lookup(195045) -> {"Lo","L"}; +lookup(195046) -> {"Lo","L"}; +lookup(195047) -> {"Lo","L"}; +lookup(195048) -> {"Lo","L"}; +lookup(195049) -> {"Lo","L"}; +lookup(195050) -> {"Lo","L"}; +lookup(195051) -> {"Lo","L"}; +lookup(195052) -> {"Lo","L"}; +lookup(195053) -> {"Lo","L"}; +lookup(195054) -> {"Lo","L"}; +lookup(195055) -> {"Lo","L"}; +lookup(195056) -> {"Lo","L"}; +lookup(195057) -> {"Lo","L"}; +lookup(195058) -> {"Lo","L"}; +lookup(195059) -> {"Lo","L"}; +lookup(195060) -> {"Lo","L"}; +lookup(195061) -> {"Lo","L"}; +lookup(195062) -> {"Lo","L"}; +lookup(195063) -> {"Lo","L"}; +lookup(195064) -> {"Lo","L"}; +lookup(195065) -> {"Lo","L"}; +lookup(195066) -> {"Lo","L"}; +lookup(195067) -> {"Lo","L"}; +lookup(195068) -> {"Lo","L"}; +lookup(195069) -> {"Lo","L"}; +lookup(195070) -> {"Lo","L"}; +lookup(195071) -> {"Lo","L"}; +lookup(195072) -> {"Lo","L"}; +lookup(195073) -> {"Lo","L"}; +lookup(195074) -> {"Lo","L"}; +lookup(195075) -> {"Lo","L"}; +lookup(195076) -> {"Lo","L"}; +lookup(195077) -> {"Lo","L"}; +lookup(195078) -> {"Lo","L"}; +lookup(195079) -> {"Lo","L"}; +lookup(195080) -> {"Lo","L"}; +lookup(195081) -> {"Lo","L"}; +lookup(195082) -> {"Lo","L"}; +lookup(195083) -> {"Lo","L"}; +lookup(195084) -> {"Lo","L"}; +lookup(195085) -> {"Lo","L"}; +lookup(195086) -> {"Lo","L"}; +lookup(195087) -> {"Lo","L"}; +lookup(195088) -> {"Lo","L"}; +lookup(195089) -> {"Lo","L"}; +lookup(195090) -> {"Lo","L"}; +lookup(195091) -> {"Lo","L"}; +lookup(195092) -> {"Lo","L"}; +lookup(195093) -> {"Lo","L"}; +lookup(195094) -> {"Lo","L"}; +lookup(195095) -> {"Lo","L"}; +lookup(195096) -> {"Lo","L"}; +lookup(195097) -> {"Lo","L"}; +lookup(195098) -> {"Lo","L"}; +lookup(195099) -> {"Lo","L"}; +lookup(195100) -> {"Lo","L"}; +lookup(195101) -> {"Lo","L"}; +lookup(196608) -> {"Lo","L"}; +lookup(201546) -> {"Lo","L"}; +lookup(917505) -> {"Cf","BN"}; +lookup(917536) -> {"Cf","BN"}; +lookup(917537) -> {"Cf","BN"}; +lookup(917538) -> {"Cf","BN"}; +lookup(917539) -> {"Cf","BN"}; +lookup(917540) -> {"Cf","BN"}; +lookup(917541) -> {"Cf","BN"}; +lookup(917542) -> {"Cf","BN"}; +lookup(917543) -> {"Cf","BN"}; +lookup(917544) -> {"Cf","BN"}; +lookup(917545) -> {"Cf","BN"}; +lookup(917546) -> {"Cf","BN"}; +lookup(917547) -> {"Cf","BN"}; +lookup(917548) -> {"Cf","BN"}; +lookup(917549) -> {"Cf","BN"}; +lookup(917550) -> {"Cf","BN"}; +lookup(917551) -> {"Cf","BN"}; +lookup(917552) -> {"Cf","BN"}; +lookup(917553) -> {"Cf","BN"}; +lookup(917554) -> {"Cf","BN"}; +lookup(917555) -> {"Cf","BN"}; +lookup(917556) -> {"Cf","BN"}; +lookup(917557) -> {"Cf","BN"}; +lookup(917558) -> {"Cf","BN"}; +lookup(917559) -> {"Cf","BN"}; +lookup(917560) -> {"Cf","BN"}; +lookup(917561) -> {"Cf","BN"}; +lookup(917562) -> {"Cf","BN"}; +lookup(917563) -> {"Cf","BN"}; +lookup(917564) -> {"Cf","BN"}; +lookup(917565) -> {"Cf","BN"}; +lookup(917566) -> {"Cf","BN"}; +lookup(917567) -> {"Cf","BN"}; +lookup(917568) -> {"Cf","BN"}; +lookup(917569) -> {"Cf","BN"}; +lookup(917570) -> {"Cf","BN"}; +lookup(917571) -> {"Cf","BN"}; +lookup(917572) -> {"Cf","BN"}; +lookup(917573) -> {"Cf","BN"}; +lookup(917574) -> {"Cf","BN"}; +lookup(917575) -> {"Cf","BN"}; +lookup(917576) -> {"Cf","BN"}; +lookup(917577) -> {"Cf","BN"}; +lookup(917578) -> {"Cf","BN"}; +lookup(917579) -> {"Cf","BN"}; +lookup(917580) -> {"Cf","BN"}; +lookup(917581) -> {"Cf","BN"}; +lookup(917582) -> {"Cf","BN"}; +lookup(917583) -> {"Cf","BN"}; +lookup(917584) -> {"Cf","BN"}; +lookup(917585) -> {"Cf","BN"}; +lookup(917586) -> {"Cf","BN"}; +lookup(917587) -> {"Cf","BN"}; +lookup(917588) -> {"Cf","BN"}; +lookup(917589) -> {"Cf","BN"}; +lookup(917590) -> {"Cf","BN"}; +lookup(917591) -> {"Cf","BN"}; +lookup(917592) -> {"Cf","BN"}; +lookup(917593) -> {"Cf","BN"}; +lookup(917594) -> {"Cf","BN"}; +lookup(917595) -> {"Cf","BN"}; +lookup(917596) -> {"Cf","BN"}; +lookup(917597) -> {"Cf","BN"}; +lookup(917598) -> {"Cf","BN"}; +lookup(917599) -> {"Cf","BN"}; +lookup(917600) -> {"Cf","BN"}; +lookup(917601) -> {"Cf","BN"}; +lookup(917602) -> {"Cf","BN"}; +lookup(917603) -> {"Cf","BN"}; +lookup(917604) -> {"Cf","BN"}; +lookup(917605) -> {"Cf","BN"}; +lookup(917606) -> {"Cf","BN"}; +lookup(917607) -> {"Cf","BN"}; +lookup(917608) -> {"Cf","BN"}; +lookup(917609) -> {"Cf","BN"}; +lookup(917610) -> {"Cf","BN"}; +lookup(917611) -> {"Cf","BN"}; +lookup(917612) -> {"Cf","BN"}; +lookup(917613) -> {"Cf","BN"}; +lookup(917614) -> {"Cf","BN"}; +lookup(917615) -> {"Cf","BN"}; +lookup(917616) -> {"Cf","BN"}; +lookup(917617) -> {"Cf","BN"}; +lookup(917618) -> {"Cf","BN"}; +lookup(917619) -> {"Cf","BN"}; +lookup(917620) -> {"Cf","BN"}; +lookup(917621) -> {"Cf","BN"}; +lookup(917622) -> {"Cf","BN"}; +lookup(917623) -> {"Cf","BN"}; +lookup(917624) -> {"Cf","BN"}; +lookup(917625) -> {"Cf","BN"}; +lookup(917626) -> {"Cf","BN"}; +lookup(917627) -> {"Cf","BN"}; +lookup(917628) -> {"Cf","BN"}; +lookup(917629) -> {"Cf","BN"}; +lookup(917630) -> {"Cf","BN"}; +lookup(917631) -> {"Cf","BN"}; +lookup(917760) -> {"Mn","NSM"}; +lookup(917761) -> {"Mn","NSM"}; +lookup(917762) -> {"Mn","NSM"}; +lookup(917763) -> {"Mn","NSM"}; +lookup(917764) -> {"Mn","NSM"}; +lookup(917765) -> {"Mn","NSM"}; +lookup(917766) -> {"Mn","NSM"}; +lookup(917767) -> {"Mn","NSM"}; +lookup(917768) -> {"Mn","NSM"}; +lookup(917769) -> {"Mn","NSM"}; +lookup(917770) -> {"Mn","NSM"}; +lookup(917771) -> {"Mn","NSM"}; +lookup(917772) -> {"Mn","NSM"}; +lookup(917773) -> {"Mn","NSM"}; +lookup(917774) -> {"Mn","NSM"}; +lookup(917775) -> {"Mn","NSM"}; +lookup(917776) -> {"Mn","NSM"}; +lookup(917777) -> {"Mn","NSM"}; +lookup(917778) -> {"Mn","NSM"}; +lookup(917779) -> {"Mn","NSM"}; +lookup(917780) -> {"Mn","NSM"}; +lookup(917781) -> {"Mn","NSM"}; +lookup(917782) -> {"Mn","NSM"}; +lookup(917783) -> {"Mn","NSM"}; +lookup(917784) -> {"Mn","NSM"}; +lookup(917785) -> {"Mn","NSM"}; +lookup(917786) -> {"Mn","NSM"}; +lookup(917787) -> {"Mn","NSM"}; +lookup(917788) -> {"Mn","NSM"}; +lookup(917789) -> {"Mn","NSM"}; +lookup(917790) -> {"Mn","NSM"}; +lookup(917791) -> {"Mn","NSM"}; +lookup(917792) -> {"Mn","NSM"}; +lookup(917793) -> {"Mn","NSM"}; +lookup(917794) -> {"Mn","NSM"}; +lookup(917795) -> {"Mn","NSM"}; +lookup(917796) -> {"Mn","NSM"}; +lookup(917797) -> {"Mn","NSM"}; +lookup(917798) -> {"Mn","NSM"}; +lookup(917799) -> {"Mn","NSM"}; +lookup(917800) -> {"Mn","NSM"}; +lookup(917801) -> {"Mn","NSM"}; +lookup(917802) -> {"Mn","NSM"}; +lookup(917803) -> {"Mn","NSM"}; +lookup(917804) -> {"Mn","NSM"}; +lookup(917805) -> {"Mn","NSM"}; +lookup(917806) -> {"Mn","NSM"}; +lookup(917807) -> {"Mn","NSM"}; +lookup(917808) -> {"Mn","NSM"}; +lookup(917809) -> {"Mn","NSM"}; +lookup(917810) -> {"Mn","NSM"}; +lookup(917811) -> {"Mn","NSM"}; +lookup(917812) -> {"Mn","NSM"}; +lookup(917813) -> {"Mn","NSM"}; +lookup(917814) -> {"Mn","NSM"}; +lookup(917815) -> {"Mn","NSM"}; +lookup(917816) -> {"Mn","NSM"}; +lookup(917817) -> {"Mn","NSM"}; +lookup(917818) -> {"Mn","NSM"}; +lookup(917819) -> {"Mn","NSM"}; +lookup(917820) -> {"Mn","NSM"}; +lookup(917821) -> {"Mn","NSM"}; +lookup(917822) -> {"Mn","NSM"}; +lookup(917823) -> {"Mn","NSM"}; +lookup(917824) -> {"Mn","NSM"}; +lookup(917825) -> {"Mn","NSM"}; +lookup(917826) -> {"Mn","NSM"}; +lookup(917827) -> {"Mn","NSM"}; +lookup(917828) -> {"Mn","NSM"}; +lookup(917829) -> {"Mn","NSM"}; +lookup(917830) -> {"Mn","NSM"}; +lookup(917831) -> {"Mn","NSM"}; +lookup(917832) -> {"Mn","NSM"}; +lookup(917833) -> {"Mn","NSM"}; +lookup(917834) -> {"Mn","NSM"}; +lookup(917835) -> {"Mn","NSM"}; +lookup(917836) -> {"Mn","NSM"}; +lookup(917837) -> {"Mn","NSM"}; +lookup(917838) -> {"Mn","NSM"}; +lookup(917839) -> {"Mn","NSM"}; +lookup(917840) -> {"Mn","NSM"}; +lookup(917841) -> {"Mn","NSM"}; +lookup(917842) -> {"Mn","NSM"}; +lookup(917843) -> {"Mn","NSM"}; +lookup(917844) -> {"Mn","NSM"}; +lookup(917845) -> {"Mn","NSM"}; +lookup(917846) -> {"Mn","NSM"}; +lookup(917847) -> {"Mn","NSM"}; +lookup(917848) -> {"Mn","NSM"}; +lookup(917849) -> {"Mn","NSM"}; +lookup(917850) -> {"Mn","NSM"}; +lookup(917851) -> {"Mn","NSM"}; +lookup(917852) -> {"Mn","NSM"}; +lookup(917853) -> {"Mn","NSM"}; +lookup(917854) -> {"Mn","NSM"}; +lookup(917855) -> {"Mn","NSM"}; +lookup(917856) -> {"Mn","NSM"}; +lookup(917857) -> {"Mn","NSM"}; +lookup(917858) -> {"Mn","NSM"}; +lookup(917859) -> {"Mn","NSM"}; +lookup(917860) -> {"Mn","NSM"}; +lookup(917861) -> {"Mn","NSM"}; +lookup(917862) -> {"Mn","NSM"}; +lookup(917863) -> {"Mn","NSM"}; +lookup(917864) -> {"Mn","NSM"}; +lookup(917865) -> {"Mn","NSM"}; +lookup(917866) -> {"Mn","NSM"}; +lookup(917867) -> {"Mn","NSM"}; +lookup(917868) -> {"Mn","NSM"}; +lookup(917869) -> {"Mn","NSM"}; +lookup(917870) -> {"Mn","NSM"}; +lookup(917871) -> {"Mn","NSM"}; +lookup(917872) -> {"Mn","NSM"}; +lookup(917873) -> {"Mn","NSM"}; +lookup(917874) -> {"Mn","NSM"}; +lookup(917875) -> {"Mn","NSM"}; +lookup(917876) -> {"Mn","NSM"}; +lookup(917877) -> {"Mn","NSM"}; +lookup(917878) -> {"Mn","NSM"}; +lookup(917879) -> {"Mn","NSM"}; +lookup(917880) -> {"Mn","NSM"}; +lookup(917881) -> {"Mn","NSM"}; +lookup(917882) -> {"Mn","NSM"}; +lookup(917883) -> {"Mn","NSM"}; +lookup(917884) -> {"Mn","NSM"}; +lookup(917885) -> {"Mn","NSM"}; +lookup(917886) -> {"Mn","NSM"}; +lookup(917887) -> {"Mn","NSM"}; +lookup(917888) -> {"Mn","NSM"}; +lookup(917889) -> {"Mn","NSM"}; +lookup(917890) -> {"Mn","NSM"}; +lookup(917891) -> {"Mn","NSM"}; +lookup(917892) -> {"Mn","NSM"}; +lookup(917893) -> {"Mn","NSM"}; +lookup(917894) -> {"Mn","NSM"}; +lookup(917895) -> {"Mn","NSM"}; +lookup(917896) -> {"Mn","NSM"}; +lookup(917897) -> {"Mn","NSM"}; +lookup(917898) -> {"Mn","NSM"}; +lookup(917899) -> {"Mn","NSM"}; +lookup(917900) -> {"Mn","NSM"}; +lookup(917901) -> {"Mn","NSM"}; +lookup(917902) -> {"Mn","NSM"}; +lookup(917903) -> {"Mn","NSM"}; +lookup(917904) -> {"Mn","NSM"}; +lookup(917905) -> {"Mn","NSM"}; +lookup(917906) -> {"Mn","NSM"}; +lookup(917907) -> {"Mn","NSM"}; +lookup(917908) -> {"Mn","NSM"}; +lookup(917909) -> {"Mn","NSM"}; +lookup(917910) -> {"Mn","NSM"}; +lookup(917911) -> {"Mn","NSM"}; +lookup(917912) -> {"Mn","NSM"}; +lookup(917913) -> {"Mn","NSM"}; +lookup(917914) -> {"Mn","NSM"}; +lookup(917915) -> {"Mn","NSM"}; +lookup(917916) -> {"Mn","NSM"}; +lookup(917917) -> {"Mn","NSM"}; +lookup(917918) -> {"Mn","NSM"}; +lookup(917919) -> {"Mn","NSM"}; +lookup(917920) -> {"Mn","NSM"}; +lookup(917921) -> {"Mn","NSM"}; +lookup(917922) -> {"Mn","NSM"}; +lookup(917923) -> {"Mn","NSM"}; +lookup(917924) -> {"Mn","NSM"}; +lookup(917925) -> {"Mn","NSM"}; +lookup(917926) -> {"Mn","NSM"}; +lookup(917927) -> {"Mn","NSM"}; +lookup(917928) -> {"Mn","NSM"}; +lookup(917929) -> {"Mn","NSM"}; +lookup(917930) -> {"Mn","NSM"}; +lookup(917931) -> {"Mn","NSM"}; +lookup(917932) -> {"Mn","NSM"}; +lookup(917933) -> {"Mn","NSM"}; +lookup(917934) -> {"Mn","NSM"}; +lookup(917935) -> {"Mn","NSM"}; +lookup(917936) -> {"Mn","NSM"}; +lookup(917937) -> {"Mn","NSM"}; +lookup(917938) -> {"Mn","NSM"}; +lookup(917939) -> {"Mn","NSM"}; +lookup(917940) -> {"Mn","NSM"}; +lookup(917941) -> {"Mn","NSM"}; +lookup(917942) -> {"Mn","NSM"}; +lookup(917943) -> {"Mn","NSM"}; +lookup(917944) -> {"Mn","NSM"}; +lookup(917945) -> {"Mn","NSM"}; +lookup(917946) -> {"Mn","NSM"}; +lookup(917947) -> {"Mn","NSM"}; +lookup(917948) -> {"Mn","NSM"}; +lookup(917949) -> {"Mn","NSM"}; +lookup(917950) -> {"Mn","NSM"}; +lookup(917951) -> {"Mn","NSM"}; +lookup(917952) -> {"Mn","NSM"}; +lookup(917953) -> {"Mn","NSM"}; +lookup(917954) -> {"Mn","NSM"}; +lookup(917955) -> {"Mn","NSM"}; +lookup(917956) -> {"Mn","NSM"}; +lookup(917957) -> {"Mn","NSM"}; +lookup(917958) -> {"Mn","NSM"}; +lookup(917959) -> {"Mn","NSM"}; +lookup(917960) -> {"Mn","NSM"}; +lookup(917961) -> {"Mn","NSM"}; +lookup(917962) -> {"Mn","NSM"}; +lookup(917963) -> {"Mn","NSM"}; +lookup(917964) -> {"Mn","NSM"}; +lookup(917965) -> {"Mn","NSM"}; +lookup(917966) -> {"Mn","NSM"}; +lookup(917967) -> {"Mn","NSM"}; +lookup(917968) -> {"Mn","NSM"}; +lookup(917969) -> {"Mn","NSM"}; +lookup(917970) -> {"Mn","NSM"}; +lookup(917971) -> {"Mn","NSM"}; +lookup(917972) -> {"Mn","NSM"}; +lookup(917973) -> {"Mn","NSM"}; +lookup(917974) -> {"Mn","NSM"}; +lookup(917975) -> {"Mn","NSM"}; +lookup(917976) -> {"Mn","NSM"}; +lookup(917977) -> {"Mn","NSM"}; +lookup(917978) -> {"Mn","NSM"}; +lookup(917979) -> {"Mn","NSM"}; +lookup(917980) -> {"Mn","NSM"}; +lookup(917981) -> {"Mn","NSM"}; +lookup(917982) -> {"Mn","NSM"}; +lookup(917983) -> {"Mn","NSM"}; +lookup(917984) -> {"Mn","NSM"}; +lookup(917985) -> {"Mn","NSM"}; +lookup(917986) -> {"Mn","NSM"}; +lookup(917987) -> {"Mn","NSM"}; +lookup(917988) -> {"Mn","NSM"}; +lookup(917989) -> {"Mn","NSM"}; +lookup(917990) -> {"Mn","NSM"}; +lookup(917991) -> {"Mn","NSM"}; +lookup(917992) -> {"Mn","NSM"}; +lookup(917993) -> {"Mn","NSM"}; +lookup(917994) -> {"Mn","NSM"}; +lookup(917995) -> {"Mn","NSM"}; +lookup(917996) -> {"Mn","NSM"}; +lookup(917997) -> {"Mn","NSM"}; +lookup(917998) -> {"Mn","NSM"}; +lookup(917999) -> {"Mn","NSM"}; +lookup(983040) -> {"Co","L"}; +lookup(1048573) -> {"Co","L"}; +lookup(1048576) -> {"Co","L"}; +lookup(1114109) -> {"Co","L"}; +lookup(_) -> false. + +joining_types(1536) -> "U"; +joining_types(1537) -> "U"; +joining_types(1538) -> "U"; +joining_types(1539) -> "U"; +joining_types(1540) -> "U"; +joining_types(1541) -> "U"; +joining_types(1544) -> "U"; +joining_types(1547) -> "U"; +joining_types(1568) -> "D"; +joining_types(1569) -> "U"; +joining_types(1570) -> "R"; +joining_types(1571) -> "R"; +joining_types(1572) -> "R"; +joining_types(1573) -> "R"; +joining_types(1574) -> "D"; +joining_types(1575) -> "R"; +joining_types(1576) -> "D"; +joining_types(1577) -> "R"; +joining_types(1578) -> "D"; +joining_types(1579) -> "D"; +joining_types(1580) -> "D"; +joining_types(1581) -> "D"; +joining_types(1582) -> "D"; +joining_types(1583) -> "R"; +joining_types(1584) -> "R"; +joining_types(1585) -> "R"; +joining_types(1586) -> "R"; +joining_types(1587) -> "D"; +joining_types(1588) -> "D"; +joining_types(1589) -> "D"; +joining_types(1590) -> "D"; +joining_types(1591) -> "D"; +joining_types(1592) -> "D"; +joining_types(1593) -> "D"; +joining_types(1594) -> "D"; +joining_types(1595) -> "D"; +joining_types(1596) -> "D"; +joining_types(1597) -> "D"; +joining_types(1598) -> "D"; +joining_types(1599) -> "D"; +joining_types(1600) -> "C"; +joining_types(1601) -> "D"; +joining_types(1602) -> "D"; +joining_types(1603) -> "D"; +joining_types(1604) -> "D"; +joining_types(1605) -> "D"; +joining_types(1606) -> "D"; +joining_types(1607) -> "D"; +joining_types(1608) -> "R"; +joining_types(1609) -> "D"; +joining_types(1610) -> "D"; +joining_types(1646) -> "D"; +joining_types(1647) -> "D"; +joining_types(1649) -> "R"; +joining_types(1650) -> "R"; +joining_types(1651) -> "R"; +joining_types(1652) -> "U"; +joining_types(1653) -> "R"; +joining_types(1654) -> "R"; +joining_types(1655) -> "R"; +joining_types(1656) -> "D"; +joining_types(1657) -> "D"; +joining_types(1658) -> "D"; +joining_types(1659) -> "D"; +joining_types(1660) -> "D"; +joining_types(1661) -> "D"; +joining_types(1662) -> "D"; +joining_types(1663) -> "D"; +joining_types(1664) -> "D"; +joining_types(1665) -> "D"; +joining_types(1666) -> "D"; +joining_types(1667) -> "D"; +joining_types(1668) -> "D"; +joining_types(1669) -> "D"; +joining_types(1670) -> "D"; +joining_types(1671) -> "D"; +joining_types(1672) -> "R"; +joining_types(1673) -> "R"; +joining_types(1674) -> "R"; +joining_types(1675) -> "R"; +joining_types(1676) -> "R"; +joining_types(1677) -> "R"; +joining_types(1678) -> "R"; +joining_types(1679) -> "R"; +joining_types(1680) -> "R"; +joining_types(1681) -> "R"; +joining_types(1682) -> "R"; +joining_types(1683) -> "R"; +joining_types(1684) -> "R"; +joining_types(1685) -> "R"; +joining_types(1686) -> "R"; +joining_types(1687) -> "R"; +joining_types(1688) -> "R"; +joining_types(1689) -> "R"; +joining_types(1690) -> "D"; +joining_types(1691) -> "D"; +joining_types(1692) -> "D"; +joining_types(1693) -> "D"; +joining_types(1694) -> "D"; +joining_types(1695) -> "D"; +joining_types(1696) -> "D"; +joining_types(1697) -> "D"; +joining_types(1698) -> "D"; +joining_types(1699) -> "D"; +joining_types(1700) -> "D"; +joining_types(1701) -> "D"; +joining_types(1702) -> "D"; +joining_types(1703) -> "D"; +joining_types(1704) -> "D"; +joining_types(1705) -> "D"; +joining_types(1706) -> "D"; +joining_types(1707) -> "D"; +joining_types(1708) -> "D"; +joining_types(1709) -> "D"; +joining_types(1710) -> "D"; +joining_types(1711) -> "D"; +joining_types(1712) -> "D"; +joining_types(1713) -> "D"; +joining_types(1714) -> "D"; +joining_types(1715) -> "D"; +joining_types(1716) -> "D"; +joining_types(1717) -> "D"; +joining_types(1718) -> "D"; +joining_types(1719) -> "D"; +joining_types(1720) -> "D"; +joining_types(1721) -> "D"; +joining_types(1722) -> "D"; +joining_types(1723) -> "D"; +joining_types(1724) -> "D"; +joining_types(1725) -> "D"; +joining_types(1726) -> "D"; +joining_types(1727) -> "D"; +joining_types(1728) -> "R"; +joining_types(1729) -> "D"; +joining_types(1730) -> "D"; +joining_types(1731) -> "R"; +joining_types(1732) -> "R"; +joining_types(1733) -> "R"; +joining_types(1734) -> "R"; +joining_types(1735) -> "R"; +joining_types(1736) -> "R"; +joining_types(1737) -> "R"; +joining_types(1738) -> "R"; +joining_types(1739) -> "R"; +joining_types(1740) -> "D"; +joining_types(1741) -> "R"; +joining_types(1742) -> "D"; +joining_types(1743) -> "R"; +joining_types(1744) -> "D"; +joining_types(1745) -> "D"; +joining_types(1746) -> "R"; +joining_types(1747) -> "R"; +joining_types(1749) -> "R"; +joining_types(1757) -> "U"; +joining_types(1774) -> "R"; +joining_types(1775) -> "R"; +joining_types(1786) -> "D"; +joining_types(1787) -> "D"; +joining_types(1788) -> "D"; +joining_types(1791) -> "D"; +joining_types(1807) -> "T"; +joining_types(1808) -> "R"; +joining_types(1810) -> "D"; +joining_types(1811) -> "D"; +joining_types(1812) -> "D"; +joining_types(1813) -> "R"; +joining_types(1814) -> "R"; +joining_types(1815) -> "R"; +joining_types(1816) -> "R"; +joining_types(1817) -> "R"; +joining_types(1818) -> "D"; +joining_types(1819) -> "D"; +joining_types(1820) -> "D"; +joining_types(1821) -> "D"; +joining_types(1822) -> "R"; +joining_types(1823) -> "D"; +joining_types(1824) -> "D"; +joining_types(1825) -> "D"; +joining_types(1826) -> "D"; +joining_types(1827) -> "D"; +joining_types(1828) -> "D"; +joining_types(1829) -> "D"; +joining_types(1830) -> "D"; +joining_types(1831) -> "D"; +joining_types(1832) -> "R"; +joining_types(1833) -> "D"; +joining_types(1834) -> "R"; +joining_types(1835) -> "D"; +joining_types(1836) -> "R"; +joining_types(1837) -> "D"; +joining_types(1838) -> "D"; +joining_types(1839) -> "R"; +joining_types(1869) -> "R"; +joining_types(1870) -> "D"; +joining_types(1871) -> "D"; +joining_types(1872) -> "D"; +joining_types(1873) -> "D"; +joining_types(1874) -> "D"; +joining_types(1875) -> "D"; +joining_types(1876) -> "D"; +joining_types(1877) -> "D"; +joining_types(1878) -> "D"; +joining_types(1879) -> "D"; +joining_types(1880) -> "D"; +joining_types(1881) -> "R"; +joining_types(1882) -> "R"; +joining_types(1883) -> "R"; +joining_types(1884) -> "D"; +joining_types(1885) -> "D"; +joining_types(1886) -> "D"; +joining_types(1887) -> "D"; +joining_types(1888) -> "D"; +joining_types(1889) -> "D"; +joining_types(1890) -> "D"; +joining_types(1891) -> "D"; +joining_types(1892) -> "D"; +joining_types(1893) -> "D"; +joining_types(1894) -> "D"; +joining_types(1895) -> "D"; +joining_types(1896) -> "D"; +joining_types(1897) -> "D"; +joining_types(1898) -> "D"; +joining_types(1899) -> "R"; +joining_types(1900) -> "R"; +joining_types(1901) -> "D"; +joining_types(1902) -> "D"; +joining_types(1903) -> "D"; +joining_types(1904) -> "D"; +joining_types(1905) -> "R"; +joining_types(1906) -> "D"; +joining_types(1907) -> "R"; +joining_types(1908) -> "R"; +joining_types(1909) -> "D"; +joining_types(1910) -> "D"; +joining_types(1911) -> "D"; +joining_types(1912) -> "R"; +joining_types(1913) -> "R"; +joining_types(1914) -> "D"; +joining_types(1915) -> "D"; +joining_types(1916) -> "D"; +joining_types(1917) -> "D"; +joining_types(1918) -> "D"; +joining_types(1919) -> "D"; +joining_types(1994) -> "D"; +joining_types(1995) -> "D"; +joining_types(1996) -> "D"; +joining_types(1997) -> "D"; +joining_types(1998) -> "D"; +joining_types(1999) -> "D"; +joining_types(2000) -> "D"; +joining_types(2001) -> "D"; +joining_types(2002) -> "D"; +joining_types(2003) -> "D"; +joining_types(2004) -> "D"; +joining_types(2005) -> "D"; +joining_types(2006) -> "D"; +joining_types(2007) -> "D"; +joining_types(2008) -> "D"; +joining_types(2009) -> "D"; +joining_types(2010) -> "D"; +joining_types(2011) -> "D"; +joining_types(2012) -> "D"; +joining_types(2013) -> "D"; +joining_types(2014) -> "D"; +joining_types(2015) -> "D"; +joining_types(2016) -> "D"; +joining_types(2017) -> "D"; +joining_types(2018) -> "D"; +joining_types(2019) -> "D"; +joining_types(2020) -> "D"; +joining_types(2021) -> "D"; +joining_types(2022) -> "D"; +joining_types(2023) -> "D"; +joining_types(2024) -> "D"; +joining_types(2025) -> "D"; +joining_types(2026) -> "D"; +joining_types(2042) -> "C"; +joining_types(2112) -> "R"; +joining_types(2113) -> "D"; +joining_types(2114) -> "D"; +joining_types(2115) -> "D"; +joining_types(2116) -> "D"; +joining_types(2117) -> "D"; +joining_types(2118) -> "R"; +joining_types(2119) -> "R"; +joining_types(2120) -> "D"; +joining_types(2121) -> "R"; +joining_types(2122) -> "D"; +joining_types(2123) -> "D"; +joining_types(2124) -> "D"; +joining_types(2125) -> "D"; +joining_types(2126) -> "D"; +joining_types(2127) -> "D"; +joining_types(2128) -> "D"; +joining_types(2129) -> "D"; +joining_types(2130) -> "D"; +joining_types(2131) -> "D"; +joining_types(2132) -> "R"; +joining_types(2133) -> "D"; +joining_types(2134) -> "R"; +joining_types(2135) -> "R"; +joining_types(2136) -> "R"; +joining_types(2144) -> "D"; +joining_types(2145) -> "U"; +joining_types(2146) -> "D"; +joining_types(2147) -> "D"; +joining_types(2148) -> "D"; +joining_types(2149) -> "D"; +joining_types(2150) -> "U"; +joining_types(2151) -> "R"; +joining_types(2152) -> "D"; +joining_types(2153) -> "R"; +joining_types(2154) -> "R"; +joining_types(2208) -> "D"; +joining_types(2209) -> "D"; +joining_types(2210) -> "D"; +joining_types(2211) -> "D"; +joining_types(2212) -> "D"; +joining_types(2213) -> "D"; +joining_types(2214) -> "D"; +joining_types(2215) -> "D"; +joining_types(2216) -> "D"; +joining_types(2217) -> "D"; +joining_types(2218) -> "R"; +joining_types(2219) -> "R"; +joining_types(2220) -> "R"; +joining_types(2221) -> "U"; +joining_types(2222) -> "R"; +joining_types(2223) -> "D"; +joining_types(2224) -> "D"; +joining_types(2225) -> "R"; +joining_types(2226) -> "R"; +joining_types(2227) -> "D"; +joining_types(2228) -> "D"; +joining_types(2230) -> "D"; +joining_types(2231) -> "D"; +joining_types(2232) -> "D"; +joining_types(2233) -> "R"; +joining_types(2234) -> "D"; +joining_types(2235) -> "D"; +joining_types(2236) -> "D"; +joining_types(2237) -> "D"; +joining_types(2238) -> "D"; +joining_types(2239) -> "D"; +joining_types(2240) -> "D"; +joining_types(2241) -> "D"; +joining_types(2242) -> "D"; +joining_types(2243) -> "D"; +joining_types(2244) -> "D"; +joining_types(2245) -> "D"; +joining_types(2246) -> "D"; +joining_types(2247) -> "D"; +joining_types(2274) -> "U"; +joining_types(6150) -> "U"; +joining_types(6151) -> "D"; +joining_types(6154) -> "C"; +joining_types(6158) -> "U"; +joining_types(6176) -> "D"; +joining_types(6177) -> "D"; +joining_types(6178) -> "D"; +joining_types(6179) -> "D"; +joining_types(6180) -> "D"; +joining_types(6181) -> "D"; +joining_types(6182) -> "D"; +joining_types(6183) -> "D"; +joining_types(6184) -> "D"; +joining_types(6185) -> "D"; +joining_types(6186) -> "D"; +joining_types(6187) -> "D"; +joining_types(6188) -> "D"; +joining_types(6189) -> "D"; +joining_types(6190) -> "D"; +joining_types(6191) -> "D"; +joining_types(6192) -> "D"; +joining_types(6193) -> "D"; +joining_types(6194) -> "D"; +joining_types(6195) -> "D"; +joining_types(6196) -> "D"; +joining_types(6197) -> "D"; +joining_types(6198) -> "D"; +joining_types(6199) -> "D"; +joining_types(6200) -> "D"; +joining_types(6201) -> "D"; +joining_types(6202) -> "D"; +joining_types(6203) -> "D"; +joining_types(6204) -> "D"; +joining_types(6205) -> "D"; +joining_types(6206) -> "D"; +joining_types(6207) -> "D"; +joining_types(6208) -> "D"; +joining_types(6209) -> "D"; +joining_types(6210) -> "D"; +joining_types(6211) -> "D"; +joining_types(6212) -> "D"; +joining_types(6213) -> "D"; +joining_types(6214) -> "D"; +joining_types(6215) -> "D"; +joining_types(6216) -> "D"; +joining_types(6217) -> "D"; +joining_types(6218) -> "D"; +joining_types(6219) -> "D"; +joining_types(6220) -> "D"; +joining_types(6221) -> "D"; +joining_types(6222) -> "D"; +joining_types(6223) -> "D"; +joining_types(6224) -> "D"; +joining_types(6225) -> "D"; +joining_types(6226) -> "D"; +joining_types(6227) -> "D"; +joining_types(6228) -> "D"; +joining_types(6229) -> "D"; +joining_types(6230) -> "D"; +joining_types(6231) -> "D"; +joining_types(6232) -> "D"; +joining_types(6233) -> "D"; +joining_types(6234) -> "D"; +joining_types(6235) -> "D"; +joining_types(6236) -> "D"; +joining_types(6237) -> "D"; +joining_types(6238) -> "D"; +joining_types(6239) -> "D"; +joining_types(6240) -> "D"; +joining_types(6241) -> "D"; +joining_types(6242) -> "D"; +joining_types(6243) -> "D"; +joining_types(6244) -> "D"; +joining_types(6245) -> "D"; +joining_types(6246) -> "D"; +joining_types(6247) -> "D"; +joining_types(6248) -> "D"; +joining_types(6249) -> "D"; +joining_types(6250) -> "D"; +joining_types(6251) -> "D"; +joining_types(6252) -> "D"; +joining_types(6253) -> "D"; +joining_types(6254) -> "D"; +joining_types(6255) -> "D"; +joining_types(6256) -> "D"; +joining_types(6257) -> "D"; +joining_types(6258) -> "D"; +joining_types(6259) -> "D"; +joining_types(6260) -> "D"; +joining_types(6261) -> "D"; +joining_types(6262) -> "D"; +joining_types(6263) -> "D"; +joining_types(6264) -> "D"; +joining_types(6272) -> "U"; +joining_types(6273) -> "U"; +joining_types(6274) -> "U"; +joining_types(6275) -> "U"; +joining_types(6276) -> "U"; +joining_types(6277) -> "T"; +joining_types(6278) -> "T"; +joining_types(6279) -> "D"; +joining_types(6280) -> "D"; +joining_types(6281) -> "D"; +joining_types(6282) -> "D"; +joining_types(6283) -> "D"; +joining_types(6284) -> "D"; +joining_types(6285) -> "D"; +joining_types(6286) -> "D"; +joining_types(6287) -> "D"; +joining_types(6288) -> "D"; +joining_types(6289) -> "D"; +joining_types(6290) -> "D"; +joining_types(6291) -> "D"; +joining_types(6292) -> "D"; +joining_types(6293) -> "D"; +joining_types(6294) -> "D"; +joining_types(6295) -> "D"; +joining_types(6296) -> "D"; +joining_types(6297) -> "D"; +joining_types(6298) -> "D"; +joining_types(6299) -> "D"; +joining_types(6300) -> "D"; +joining_types(6301) -> "D"; +joining_types(6302) -> "D"; +joining_types(6303) -> "D"; +joining_types(6304) -> "D"; +joining_types(6305) -> "D"; +joining_types(6306) -> "D"; +joining_types(6307) -> "D"; +joining_types(6308) -> "D"; +joining_types(6309) -> "D"; +joining_types(6310) -> "D"; +joining_types(6311) -> "D"; +joining_types(6312) -> "D"; +joining_types(6314) -> "D"; +joining_types(8204) -> "U"; +joining_types(8205) -> "C"; +joining_types(8239) -> "U"; +joining_types(8294) -> "U"; +joining_types(8295) -> "U"; +joining_types(8296) -> "U"; +joining_types(8297) -> "U"; +joining_types(43072) -> "D"; +joining_types(43073) -> "D"; +joining_types(43074) -> "D"; +joining_types(43075) -> "D"; +joining_types(43076) -> "D"; +joining_types(43077) -> "D"; +joining_types(43078) -> "D"; +joining_types(43079) -> "D"; +joining_types(43080) -> "D"; +joining_types(43081) -> "D"; +joining_types(43082) -> "D"; +joining_types(43083) -> "D"; +joining_types(43084) -> "D"; +joining_types(43085) -> "D"; +joining_types(43086) -> "D"; +joining_types(43087) -> "D"; +joining_types(43088) -> "D"; +joining_types(43089) -> "D"; +joining_types(43090) -> "D"; +joining_types(43091) -> "D"; +joining_types(43092) -> "D"; +joining_types(43093) -> "D"; +joining_types(43094) -> "D"; +joining_types(43095) -> "D"; +joining_types(43096) -> "D"; +joining_types(43097) -> "D"; +joining_types(43098) -> "D"; +joining_types(43099) -> "D"; +joining_types(43100) -> "D"; +joining_types(43101) -> "D"; +joining_types(43102) -> "D"; +joining_types(43103) -> "D"; +joining_types(43104) -> "D"; +joining_types(43105) -> "D"; +joining_types(43106) -> "D"; +joining_types(43107) -> "D"; +joining_types(43108) -> "D"; +joining_types(43109) -> "D"; +joining_types(43110) -> "D"; +joining_types(43111) -> "D"; +joining_types(43112) -> "D"; +joining_types(43113) -> "D"; +joining_types(43114) -> "D"; +joining_types(43115) -> "D"; +joining_types(43116) -> "D"; +joining_types(43117) -> "D"; +joining_types(43118) -> "D"; +joining_types(43119) -> "D"; +joining_types(43120) -> "D"; +joining_types(43121) -> "D"; +joining_types(43122) -> "L"; +joining_types(43123) -> "U"; +joining_types(68288) -> "D"; +joining_types(68289) -> "D"; +joining_types(68290) -> "D"; +joining_types(68291) -> "D"; +joining_types(68292) -> "D"; +joining_types(68293) -> "R"; +joining_types(68294) -> "U"; +joining_types(68295) -> "R"; +joining_types(68296) -> "U"; +joining_types(68297) -> "R"; +joining_types(68298) -> "R"; +joining_types(68299) -> "U"; +joining_types(68300) -> "U"; +joining_types(68301) -> "L"; +joining_types(68302) -> "R"; +joining_types(68303) -> "R"; +joining_types(68304) -> "R"; +joining_types(68305) -> "R"; +joining_types(68306) -> "R"; +joining_types(68307) -> "D"; +joining_types(68308) -> "D"; +joining_types(68309) -> "D"; +joining_types(68310) -> "D"; +joining_types(68311) -> "L"; +joining_types(68312) -> "D"; +joining_types(68313) -> "D"; +joining_types(68314) -> "D"; +joining_types(68315) -> "D"; +joining_types(68316) -> "D"; +joining_types(68317) -> "R"; +joining_types(68318) -> "D"; +joining_types(68319) -> "D"; +joining_types(68320) -> "D"; +joining_types(68321) -> "R"; +joining_types(68322) -> "U"; +joining_types(68323) -> "U"; +joining_types(68324) -> "R"; +joining_types(68331) -> "D"; +joining_types(68332) -> "D"; +joining_types(68333) -> "D"; +joining_types(68334) -> "D"; +joining_types(68335) -> "R"; +joining_types(68480) -> "D"; +joining_types(68481) -> "R"; +joining_types(68482) -> "D"; +joining_types(68483) -> "R"; +joining_types(68484) -> "R"; +joining_types(68485) -> "R"; +joining_types(68486) -> "D"; +joining_types(68487) -> "D"; +joining_types(68488) -> "D"; +joining_types(68489) -> "R"; +joining_types(68490) -> "D"; +joining_types(68491) -> "D"; +joining_types(68492) -> "R"; +joining_types(68493) -> "D"; +joining_types(68494) -> "R"; +joining_types(68495) -> "R"; +joining_types(68496) -> "D"; +joining_types(68497) -> "R"; +joining_types(68521) -> "R"; +joining_types(68522) -> "R"; +joining_types(68523) -> "R"; +joining_types(68524) -> "R"; +joining_types(68525) -> "D"; +joining_types(68526) -> "D"; +joining_types(68527) -> "U"; +joining_types(68864) -> "L"; +joining_types(68865) -> "D"; +joining_types(68866) -> "D"; +joining_types(68867) -> "D"; +joining_types(68868) -> "D"; +joining_types(68869) -> "D"; +joining_types(68870) -> "D"; +joining_types(68871) -> "D"; +joining_types(68872) -> "D"; +joining_types(68873) -> "D"; +joining_types(68874) -> "D"; +joining_types(68875) -> "D"; +joining_types(68876) -> "D"; +joining_types(68877) -> "D"; +joining_types(68878) -> "D"; +joining_types(68879) -> "D"; +joining_types(68880) -> "D"; +joining_types(68881) -> "D"; +joining_types(68882) -> "D"; +joining_types(68883) -> "D"; +joining_types(68884) -> "D"; +joining_types(68885) -> "D"; +joining_types(68886) -> "D"; +joining_types(68887) -> "D"; +joining_types(68888) -> "D"; +joining_types(68889) -> "D"; +joining_types(68890) -> "D"; +joining_types(68891) -> "D"; +joining_types(68892) -> "D"; +joining_types(68893) -> "D"; +joining_types(68894) -> "D"; +joining_types(68895) -> "D"; +joining_types(68896) -> "D"; +joining_types(68897) -> "D"; +joining_types(68898) -> "R"; +joining_types(68899) -> "D"; +joining_types(69424) -> "D"; +joining_types(69425) -> "D"; +joining_types(69426) -> "D"; +joining_types(69427) -> "R"; +joining_types(69428) -> "D"; +joining_types(69429) -> "D"; +joining_types(69430) -> "D"; +joining_types(69431) -> "D"; +joining_types(69432) -> "D"; +joining_types(69433) -> "D"; +joining_types(69434) -> "D"; +joining_types(69435) -> "D"; +joining_types(69436) -> "D"; +joining_types(69437) -> "D"; +joining_types(69438) -> "D"; +joining_types(69439) -> "D"; +joining_types(69440) -> "D"; +joining_types(69441) -> "D"; +joining_types(69442) -> "D"; +joining_types(69443) -> "D"; +joining_types(69444) -> "D"; +joining_types(69445) -> "U"; +joining_types(69457) -> "D"; +joining_types(69458) -> "D"; +joining_types(69459) -> "D"; +joining_types(69460) -> "R"; +joining_types(69552) -> "D"; +joining_types(69553) -> "U"; +joining_types(69554) -> "D"; +joining_types(69555) -> "D"; +joining_types(69556) -> "R"; +joining_types(69557) -> "R"; +joining_types(69558) -> "R"; +joining_types(69559) -> "U"; +joining_types(69560) -> "D"; +joining_types(69561) -> "R"; +joining_types(69562) -> "R"; +joining_types(69563) -> "D"; +joining_types(69564) -> "D"; +joining_types(69565) -> "R"; +joining_types(69566) -> "D"; +joining_types(69567) -> "D"; +joining_types(69568) -> "U"; +joining_types(69569) -> "D"; +joining_types(69570) -> "R"; +joining_types(69571) -> "R"; +joining_types(69572) -> "D"; +joining_types(69573) -> "U"; +joining_types(69574) -> "U"; +joining_types(69575) -> "U"; +joining_types(69576) -> "U"; +joining_types(69577) -> "R"; +joining_types(69578) -> "D"; +joining_types(69579) -> "L"; +joining_types(69821) -> "U"; +joining_types(69837) -> "U"; +joining_types(125184) -> "D"; +joining_types(125185) -> "D"; +joining_types(125186) -> "D"; +joining_types(125187) -> "D"; +joining_types(125188) -> "D"; +joining_types(125189) -> "D"; +joining_types(125190) -> "D"; +joining_types(125191) -> "D"; +joining_types(125192) -> "D"; +joining_types(125193) -> "D"; +joining_types(125194) -> "D"; +joining_types(125195) -> "D"; +joining_types(125196) -> "D"; +joining_types(125197) -> "D"; +joining_types(125198) -> "D"; +joining_types(125199) -> "D"; +joining_types(125200) -> "D"; +joining_types(125201) -> "D"; +joining_types(125202) -> "D"; +joining_types(125203) -> "D"; +joining_types(125204) -> "D"; +joining_types(125205) -> "D"; +joining_types(125206) -> "D"; +joining_types(125207) -> "D"; +joining_types(125208) -> "D"; +joining_types(125209) -> "D"; +joining_types(125210) -> "D"; +joining_types(125211) -> "D"; +joining_types(125212) -> "D"; +joining_types(125213) -> "D"; +joining_types(125214) -> "D"; +joining_types(125215) -> "D"; +joining_types(125216) -> "D"; +joining_types(125217) -> "D"; +joining_types(125218) -> "D"; +joining_types(125219) -> "D"; +joining_types(125220) -> "D"; +joining_types(125221) -> "D"; +joining_types(125222) -> "D"; +joining_types(125223) -> "D"; +joining_types(125224) -> "D"; +joining_types(125225) -> "D"; +joining_types(125226) -> "D"; +joining_types(125227) -> "D"; +joining_types(125228) -> "D"; +joining_types(125229) -> "D"; +joining_types(125230) -> "D"; +joining_types(125231) -> "D"; +joining_types(125232) -> "D"; +joining_types(125233) -> "D"; +joining_types(125234) -> "D"; +joining_types(125235) -> "D"; +joining_types(125236) -> "D"; +joining_types(125237) -> "D"; +joining_types(125238) -> "D"; +joining_types(125239) -> "D"; +joining_types(125240) -> "D"; +joining_types(125241) -> "D"; +joining_types(125242) -> "D"; +joining_types(125243) -> "D"; +joining_types(125244) -> "D"; +joining_types(125245) -> "D"; +joining_types(125246) -> "D"; +joining_types(125247) -> "D"; +joining_types(125248) -> "D"; +joining_types(125249) -> "D"; +joining_types(125250) -> "D"; +joining_types(125251) -> "D"; +joining_types(125259) -> "T"; +joining_types(_) -> undefined. + +scripts(885) -> "greek"; +scripts(890) -> "greek"; +scripts(895) -> "greek"; +scripts(900) -> "greek"; +scripts(902) -> "greek"; +scripts(908) -> "greek"; +scripts(1014) -> "greek"; +scripts(1470) -> "hebrew"; +scripts(1471) -> "hebrew"; +scripts(1472) -> "hebrew"; +scripts(1475) -> "hebrew"; +scripts(1478) -> "hebrew"; +scripts(1479) -> "hebrew"; +scripts(7615) -> "greek"; +scripts(8025) -> "greek"; +scripts(8027) -> "greek"; +scripts(8029) -> "greek"; +scripts(8125) -> "greek"; +scripts(8126) -> "greek"; +scripts(8486) -> "greek"; +scripts(12293) -> "han"; +scripts(12295) -> "han"; +scripts(12347) -> "han"; +scripts(12447) -> "hiragana"; +scripts(12543) -> "katakana"; +scripts(43877) -> "greek"; +scripts(64285) -> "hebrew"; +scripts(64286) -> "hebrew"; +scripts(64297) -> "hebrew"; +scripts(64318) -> "hebrew"; +scripts(65952) -> "greek"; +scripts(110592) -> "katakana"; +scripts(119365) -> "greek"; +scripts(127488) -> "hiragana"; +scripts(CP) when 880 =< CP, CP =< 883 -> "greek"; +scripts(CP) when 886 =< CP, CP =< 887 -> "greek"; +scripts(CP) when 891 =< CP, CP =< 893 -> "greek"; +scripts(CP) when 904 =< CP, CP =< 906 -> "greek"; +scripts(CP) when 910 =< CP, CP =< 929 -> "greek"; +scripts(CP) when 931 =< CP, CP =< 993 -> "greek"; +scripts(CP) when 1008 =< CP, CP =< 1013 -> "greek"; +scripts(CP) when 1015 =< CP, CP =< 1023 -> "greek"; +scripts(CP) when 1425 =< CP, CP =< 1469 -> "hebrew"; +scripts(CP) when 1473 =< CP, CP =< 1474 -> "hebrew"; +scripts(CP) when 1476 =< CP, CP =< 1477 -> "hebrew"; +scripts(CP) when 1488 =< CP, CP =< 1514 -> "hebrew"; +scripts(CP) when 1519 =< CP, CP =< 1522 -> "hebrew"; +scripts(CP) when 1523 =< CP, CP =< 1524 -> "hebrew"; +scripts(CP) when 7462 =< CP, CP =< 7466 -> "greek"; +scripts(CP) when 7517 =< CP, CP =< 7521 -> "greek"; +scripts(CP) when 7526 =< CP, CP =< 7530 -> "greek"; +scripts(CP) when 7936 =< CP, CP =< 7957 -> "greek"; +scripts(CP) when 7960 =< CP, CP =< 7965 -> "greek"; +scripts(CP) when 7968 =< CP, CP =< 8005 -> "greek"; +scripts(CP) when 8008 =< CP, CP =< 8013 -> "greek"; +scripts(CP) when 8016 =< CP, CP =< 8023 -> "greek"; +scripts(CP) when 8031 =< CP, CP =< 8061 -> "greek"; +scripts(CP) when 8064 =< CP, CP =< 8116 -> "greek"; +scripts(CP) when 8118 =< CP, CP =< 8124 -> "greek"; +scripts(CP) when 8127 =< CP, CP =< 8129 -> "greek"; +scripts(CP) when 8130 =< CP, CP =< 8132 -> "greek"; +scripts(CP) when 8134 =< CP, CP =< 8140 -> "greek"; +scripts(CP) when 8141 =< CP, CP =< 8143 -> "greek"; +scripts(CP) when 8144 =< CP, CP =< 8147 -> "greek"; +scripts(CP) when 8150 =< CP, CP =< 8155 -> "greek"; +scripts(CP) when 8157 =< CP, CP =< 8159 -> "greek"; +scripts(CP) when 8160 =< CP, CP =< 8172 -> "greek"; +scripts(CP) when 8173 =< CP, CP =< 8175 -> "greek"; +scripts(CP) when 8178 =< CP, CP =< 8180 -> "greek"; +scripts(CP) when 8182 =< CP, CP =< 8188 -> "greek"; +scripts(CP) when 8189 =< CP, CP =< 8190 -> "greek"; +scripts(CP) when 11904 =< CP, CP =< 11929 -> "han"; +scripts(CP) when 11931 =< CP, CP =< 12019 -> "han"; +scripts(CP) when 12032 =< CP, CP =< 12245 -> "han"; +scripts(CP) when 12321 =< CP, CP =< 12329 -> "han"; +scripts(CP) when 12344 =< CP, CP =< 12346 -> "han"; +scripts(CP) when 12353 =< CP, CP =< 12438 -> "hiragana"; +scripts(CP) when 12445 =< CP, CP =< 12446 -> "hiragana"; +scripts(CP) when 12449 =< CP, CP =< 12538 -> "katakana"; +scripts(CP) when 12541 =< CP, CP =< 12542 -> "katakana"; +scripts(CP) when 12784 =< CP, CP =< 12799 -> "katakana"; +scripts(CP) when 13008 =< CP, CP =< 13054 -> "katakana"; +scripts(CP) when 13056 =< CP, CP =< 13143 -> "katakana"; +scripts(CP) when 13312 =< CP, CP =< 19903 -> "han"; +scripts(CP) when 19968 =< CP, CP =< 40956 -> "han"; +scripts(CP) when 63744 =< CP, CP =< 64109 -> "han"; +scripts(CP) when 64112 =< CP, CP =< 64217 -> "han"; +scripts(CP) when 64287 =< CP, CP =< 64296 -> "hebrew"; +scripts(CP) when 64298 =< CP, CP =< 64310 -> "hebrew"; +scripts(CP) when 64312 =< CP, CP =< 64316 -> "hebrew"; +scripts(CP) when 64320 =< CP, CP =< 64321 -> "hebrew"; +scripts(CP) when 64323 =< CP, CP =< 64324 -> "hebrew"; +scripts(CP) when 64326 =< CP, CP =< 64335 -> "hebrew"; +scripts(CP) when 65382 =< CP, CP =< 65391 -> "katakana"; +scripts(CP) when 65393 =< CP, CP =< 65437 -> "katakana"; +scripts(CP) when 65856 =< CP, CP =< 65908 -> "greek"; +scripts(CP) when 65909 =< CP, CP =< 65912 -> "greek"; +scripts(CP) when 65913 =< CP, CP =< 65929 -> "greek"; +scripts(CP) when 65930 =< CP, CP =< 65931 -> "greek"; +scripts(CP) when 65932 =< CP, CP =< 65934 -> "greek"; +scripts(CP) when 94192 =< CP, CP =< 94193 -> "han"; +scripts(CP) when 110593 =< CP, CP =< 110878 -> "hiragana"; +scripts(CP) when 110928 =< CP, CP =< 110930 -> "hiragana"; +scripts(CP) when 110948 =< CP, CP =< 110951 -> "katakana"; +scripts(CP) when 119296 =< CP, CP =< 119361 -> "greek"; +scripts(CP) when 119362 =< CP, CP =< 119364 -> "greek"; +scripts(CP) when 131072 =< CP, CP =< 173789 -> "han"; +scripts(CP) when 173824 =< CP, CP =< 177972 -> "han"; +scripts(CP) when 177984 =< CP, CP =< 178205 -> "han"; +scripts(CP) when 178208 =< CP, CP =< 183969 -> "han"; +scripts(CP) when 183984 =< CP, CP =< 191456 -> "han"; +scripts(CP) when 194560 =< CP, CP =< 195101 -> "han"; +scripts(CP) when 196608 =< CP, CP =< 201546 -> "han"; +scripts(_) -> false. + diff --git a/deps/idna/src/idna_logger.hrl b/deps/idna/src/idna_logger.hrl new file mode 100644 index 0000000..f880d25 --- /dev/null +++ b/deps/idna/src/idna_logger.hrl @@ -0,0 +1,7 @@ +-ifdef('OTP_RELEASE'). +-include_lib("kernel/include/logger.hrl"). +-else. +-define(LOG_INFO(Format, Args), error_logger:info_msg(Format, Args)). +-define(LOG_ERROR(Format, Args), error_logger:error_msg(Format, Args)). +-define(LOG_WARNING(Format, Args), error_logger:warning_msg(Format, Args)). +-endif. \ No newline at end of file diff --git a/deps/idna/src/idna_mapping.erl b/deps/idna/src/idna_mapping.erl new file mode 100644 index 0000000..3c5537c --- /dev/null +++ b/deps/idna/src/idna_mapping.erl @@ -0,0 +1,8721 @@ +%% +%% this file is generated do not modify +%% see ../uc_spec/gen_idna_mapping.escript + +-module(idna_mapping). +-compile(compressed). +-export([uts46_map/1]). +uts46_map(47) -> '3'; +uts46_map(65) -> {'M', [97]}; +uts46_map(66) -> {'M', [98]}; +uts46_map(67) -> {'M', [99]}; +uts46_map(68) -> {'M', [100]}; +uts46_map(69) -> {'M', [101]}; +uts46_map(70) -> {'M', [102]}; +uts46_map(71) -> {'M', [103]}; +uts46_map(72) -> {'M', [104]}; +uts46_map(73) -> {'M', [105]}; +uts46_map(74) -> {'M', [106]}; +uts46_map(75) -> {'M', [107]}; +uts46_map(76) -> {'M', [108]}; +uts46_map(77) -> {'M', [109]}; +uts46_map(78) -> {'M', [110]}; +uts46_map(79) -> {'M', [111]}; +uts46_map(80) -> {'M', [112]}; +uts46_map(81) -> {'M', [113]}; +uts46_map(82) -> {'M', [114]}; +uts46_map(83) -> {'M', [115]}; +uts46_map(84) -> {'M', [116]}; +uts46_map(85) -> {'M', [117]}; +uts46_map(86) -> {'M', [118]}; +uts46_map(87) -> {'M', [119]}; +uts46_map(88) -> {'M', [120]}; +uts46_map(89) -> {'M', [121]}; +uts46_map(90) -> {'M', [122]}; +uts46_map(160) -> {'3', [32]}; +uts46_map(168) -> {'3', [32,776]}; +uts46_map(169) -> 'V'; +uts46_map(170) -> {'M', [97]}; +uts46_map(173) -> 'I'; +uts46_map(174) -> 'V'; +uts46_map(175) -> {'3', [32,772]}; +uts46_map(178) -> {'M', [50]}; +uts46_map(179) -> {'M', [51]}; +uts46_map(180) -> {'3', [32,769]}; +uts46_map(181) -> {'M', [956]}; +uts46_map(182) -> 'V'; +uts46_map(183) -> 'V'; +uts46_map(184) -> {'3', [32,807]}; +uts46_map(185) -> {'M', [49]}; +uts46_map(186) -> {'M', [111]}; +uts46_map(187) -> 'V'; +uts46_map(188) -> {'M', [49,8260,52]}; +uts46_map(189) -> {'M', [49,8260,50]}; +uts46_map(190) -> {'M', [51,8260,52]}; +uts46_map(191) -> 'V'; +uts46_map(192) -> {'M', [224]}; +uts46_map(193) -> {'M', [225]}; +uts46_map(194) -> {'M', [226]}; +uts46_map(195) -> {'M', [227]}; +uts46_map(196) -> {'M', [228]}; +uts46_map(197) -> {'M', [229]}; +uts46_map(198) -> {'M', [230]}; +uts46_map(199) -> {'M', [231]}; +uts46_map(200) -> {'M', [232]}; +uts46_map(201) -> {'M', [233]}; +uts46_map(202) -> {'M', [234]}; +uts46_map(203) -> {'M', [235]}; +uts46_map(204) -> {'M', [236]}; +uts46_map(205) -> {'M', [237]}; +uts46_map(206) -> {'M', [238]}; +uts46_map(207) -> {'M', [239]}; +uts46_map(208) -> {'M', [240]}; +uts46_map(209) -> {'M', [241]}; +uts46_map(210) -> {'M', [242]}; +uts46_map(211) -> {'M', [243]}; +uts46_map(212) -> {'M', [244]}; +uts46_map(213) -> {'M', [245]}; +uts46_map(214) -> {'M', [246]}; +uts46_map(215) -> 'V'; +uts46_map(216) -> {'M', [248]}; +uts46_map(217) -> {'M', [249]}; +uts46_map(218) -> {'M', [250]}; +uts46_map(219) -> {'M', [251]}; +uts46_map(220) -> {'M', [252]}; +uts46_map(221) -> {'M', [253]}; +uts46_map(222) -> {'M', [254]}; +uts46_map(223) -> {'D', [115,115]}; +uts46_map(247) -> 'V'; +uts46_map(256) -> {'M', [257]}; +uts46_map(257) -> 'V'; +uts46_map(258) -> {'M', [259]}; +uts46_map(259) -> 'V'; +uts46_map(260) -> {'M', [261]}; +uts46_map(261) -> 'V'; +uts46_map(262) -> {'M', [263]}; +uts46_map(263) -> 'V'; +uts46_map(264) -> {'M', [265]}; +uts46_map(265) -> 'V'; +uts46_map(266) -> {'M', [267]}; +uts46_map(267) -> 'V'; +uts46_map(268) -> {'M', [269]}; +uts46_map(269) -> 'V'; +uts46_map(270) -> {'M', [271]}; +uts46_map(271) -> 'V'; +uts46_map(272) -> {'M', [273]}; +uts46_map(273) -> 'V'; +uts46_map(274) -> {'M', [275]}; +uts46_map(275) -> 'V'; +uts46_map(276) -> {'M', [277]}; +uts46_map(277) -> 'V'; +uts46_map(278) -> {'M', [279]}; +uts46_map(279) -> 'V'; +uts46_map(280) -> {'M', [281]}; +uts46_map(281) -> 'V'; +uts46_map(282) -> {'M', [283]}; +uts46_map(283) -> 'V'; +uts46_map(284) -> {'M', [285]}; +uts46_map(285) -> 'V'; +uts46_map(286) -> {'M', [287]}; +uts46_map(287) -> 'V'; +uts46_map(288) -> {'M', [289]}; +uts46_map(289) -> 'V'; +uts46_map(290) -> {'M', [291]}; +uts46_map(291) -> 'V'; +uts46_map(292) -> {'M', [293]}; +uts46_map(293) -> 'V'; +uts46_map(294) -> {'M', [295]}; +uts46_map(295) -> 'V'; +uts46_map(296) -> {'M', [297]}; +uts46_map(297) -> 'V'; +uts46_map(298) -> {'M', [299]}; +uts46_map(299) -> 'V'; +uts46_map(300) -> {'M', [301]}; +uts46_map(301) -> 'V'; +uts46_map(302) -> {'M', [303]}; +uts46_map(303) -> 'V'; +uts46_map(304) -> {'M', [105,775]}; +uts46_map(305) -> 'V'; +uts46_map(308) -> {'M', [309]}; +uts46_map(309) -> 'V'; +uts46_map(310) -> {'M', [311]}; +uts46_map(313) -> {'M', [314]}; +uts46_map(314) -> 'V'; +uts46_map(315) -> {'M', [316]}; +uts46_map(316) -> 'V'; +uts46_map(317) -> {'M', [318]}; +uts46_map(318) -> 'V'; +uts46_map(321) -> {'M', [322]}; +uts46_map(322) -> 'V'; +uts46_map(323) -> {'M', [324]}; +uts46_map(324) -> 'V'; +uts46_map(325) -> {'M', [326]}; +uts46_map(326) -> 'V'; +uts46_map(327) -> {'M', [328]}; +uts46_map(328) -> 'V'; +uts46_map(329) -> {'M', [700,110]}; +uts46_map(330) -> {'M', [331]}; +uts46_map(331) -> 'V'; +uts46_map(332) -> {'M', [333]}; +uts46_map(333) -> 'V'; +uts46_map(334) -> {'M', [335]}; +uts46_map(335) -> 'V'; +uts46_map(336) -> {'M', [337]}; +uts46_map(337) -> 'V'; +uts46_map(338) -> {'M', [339]}; +uts46_map(339) -> 'V'; +uts46_map(340) -> {'M', [341]}; +uts46_map(341) -> 'V'; +uts46_map(342) -> {'M', [343]}; +uts46_map(343) -> 'V'; +uts46_map(344) -> {'M', [345]}; +uts46_map(345) -> 'V'; +uts46_map(346) -> {'M', [347]}; +uts46_map(347) -> 'V'; +uts46_map(348) -> {'M', [349]}; +uts46_map(349) -> 'V'; +uts46_map(350) -> {'M', [351]}; +uts46_map(351) -> 'V'; +uts46_map(352) -> {'M', [353]}; +uts46_map(353) -> 'V'; +uts46_map(354) -> {'M', [355]}; +uts46_map(355) -> 'V'; +uts46_map(356) -> {'M', [357]}; +uts46_map(357) -> 'V'; +uts46_map(358) -> {'M', [359]}; +uts46_map(359) -> 'V'; +uts46_map(360) -> {'M', [361]}; +uts46_map(361) -> 'V'; +uts46_map(362) -> {'M', [363]}; +uts46_map(363) -> 'V'; +uts46_map(364) -> {'M', [365]}; +uts46_map(365) -> 'V'; +uts46_map(366) -> {'M', [367]}; +uts46_map(367) -> 'V'; +uts46_map(368) -> {'M', [369]}; +uts46_map(369) -> 'V'; +uts46_map(370) -> {'M', [371]}; +uts46_map(371) -> 'V'; +uts46_map(372) -> {'M', [373]}; +uts46_map(373) -> 'V'; +uts46_map(374) -> {'M', [375]}; +uts46_map(375) -> 'V'; +uts46_map(376) -> {'M', [255]}; +uts46_map(377) -> {'M', [378]}; +uts46_map(378) -> 'V'; +uts46_map(379) -> {'M', [380]}; +uts46_map(380) -> 'V'; +uts46_map(381) -> {'M', [382]}; +uts46_map(382) -> 'V'; +uts46_map(383) -> {'M', [115]}; +uts46_map(384) -> 'V'; +uts46_map(385) -> {'M', [595]}; +uts46_map(386) -> {'M', [387]}; +uts46_map(387) -> 'V'; +uts46_map(388) -> {'M', [389]}; +uts46_map(389) -> 'V'; +uts46_map(390) -> {'M', [596]}; +uts46_map(391) -> {'M', [392]}; +uts46_map(392) -> 'V'; +uts46_map(393) -> {'M', [598]}; +uts46_map(394) -> {'M', [599]}; +uts46_map(395) -> {'M', [396]}; +uts46_map(398) -> {'M', [477]}; +uts46_map(399) -> {'M', [601]}; +uts46_map(400) -> {'M', [603]}; +uts46_map(401) -> {'M', [402]}; +uts46_map(402) -> 'V'; +uts46_map(403) -> {'M', [608]}; +uts46_map(404) -> {'M', [611]}; +uts46_map(405) -> 'V'; +uts46_map(406) -> {'M', [617]}; +uts46_map(407) -> {'M', [616]}; +uts46_map(408) -> {'M', [409]}; +uts46_map(412) -> {'M', [623]}; +uts46_map(413) -> {'M', [626]}; +uts46_map(414) -> 'V'; +uts46_map(415) -> {'M', [629]}; +uts46_map(416) -> {'M', [417]}; +uts46_map(417) -> 'V'; +uts46_map(418) -> {'M', [419]}; +uts46_map(419) -> 'V'; +uts46_map(420) -> {'M', [421]}; +uts46_map(421) -> 'V'; +uts46_map(422) -> {'M', [640]}; +uts46_map(423) -> {'M', [424]}; +uts46_map(424) -> 'V'; +uts46_map(425) -> {'M', [643]}; +uts46_map(428) -> {'M', [429]}; +uts46_map(429) -> 'V'; +uts46_map(430) -> {'M', [648]}; +uts46_map(431) -> {'M', [432]}; +uts46_map(432) -> 'V'; +uts46_map(433) -> {'M', [650]}; +uts46_map(434) -> {'M', [651]}; +uts46_map(435) -> {'M', [436]}; +uts46_map(436) -> 'V'; +uts46_map(437) -> {'M', [438]}; +uts46_map(438) -> 'V'; +uts46_map(439) -> {'M', [658]}; +uts46_map(440) -> {'M', [441]}; +uts46_map(444) -> {'M', [445]}; +uts46_map(461) -> {'M', [462]}; +uts46_map(462) -> 'V'; +uts46_map(463) -> {'M', [464]}; +uts46_map(464) -> 'V'; +uts46_map(465) -> {'M', [466]}; +uts46_map(466) -> 'V'; +uts46_map(467) -> {'M', [468]}; +uts46_map(468) -> 'V'; +uts46_map(469) -> {'M', [470]}; +uts46_map(470) -> 'V'; +uts46_map(471) -> {'M', [472]}; +uts46_map(472) -> 'V'; +uts46_map(473) -> {'M', [474]}; +uts46_map(474) -> 'V'; +uts46_map(475) -> {'M', [476]}; +uts46_map(478) -> {'M', [479]}; +uts46_map(479) -> 'V'; +uts46_map(480) -> {'M', [481]}; +uts46_map(481) -> 'V'; +uts46_map(482) -> {'M', [483]}; +uts46_map(483) -> 'V'; +uts46_map(484) -> {'M', [485]}; +uts46_map(485) -> 'V'; +uts46_map(486) -> {'M', [487]}; +uts46_map(487) -> 'V'; +uts46_map(488) -> {'M', [489]}; +uts46_map(489) -> 'V'; +uts46_map(490) -> {'M', [491]}; +uts46_map(491) -> 'V'; +uts46_map(492) -> {'M', [493]}; +uts46_map(493) -> 'V'; +uts46_map(494) -> {'M', [495]}; +uts46_map(500) -> {'M', [501]}; +uts46_map(501) -> 'V'; +uts46_map(502) -> {'M', [405]}; +uts46_map(503) -> {'M', [447]}; +uts46_map(504) -> {'M', [505]}; +uts46_map(505) -> 'V'; +uts46_map(506) -> {'M', [507]}; +uts46_map(507) -> 'V'; +uts46_map(508) -> {'M', [509]}; +uts46_map(509) -> 'V'; +uts46_map(510) -> {'M', [511]}; +uts46_map(511) -> 'V'; +uts46_map(512) -> {'M', [513]}; +uts46_map(513) -> 'V'; +uts46_map(514) -> {'M', [515]}; +uts46_map(515) -> 'V'; +uts46_map(516) -> {'M', [517]}; +uts46_map(517) -> 'V'; +uts46_map(518) -> {'M', [519]}; +uts46_map(519) -> 'V'; +uts46_map(520) -> {'M', [521]}; +uts46_map(521) -> 'V'; +uts46_map(522) -> {'M', [523]}; +uts46_map(523) -> 'V'; +uts46_map(524) -> {'M', [525]}; +uts46_map(525) -> 'V'; +uts46_map(526) -> {'M', [527]}; +uts46_map(527) -> 'V'; +uts46_map(528) -> {'M', [529]}; +uts46_map(529) -> 'V'; +uts46_map(530) -> {'M', [531]}; +uts46_map(531) -> 'V'; +uts46_map(532) -> {'M', [533]}; +uts46_map(533) -> 'V'; +uts46_map(534) -> {'M', [535]}; +uts46_map(535) -> 'V'; +uts46_map(536) -> {'M', [537]}; +uts46_map(537) -> 'V'; +uts46_map(538) -> {'M', [539]}; +uts46_map(539) -> 'V'; +uts46_map(540) -> {'M', [541]}; +uts46_map(541) -> 'V'; +uts46_map(542) -> {'M', [543]}; +uts46_map(543) -> 'V'; +uts46_map(544) -> {'M', [414]}; +uts46_map(545) -> 'V'; +uts46_map(546) -> {'M', [547]}; +uts46_map(547) -> 'V'; +uts46_map(548) -> {'M', [549]}; +uts46_map(549) -> 'V'; +uts46_map(550) -> {'M', [551]}; +uts46_map(551) -> 'V'; +uts46_map(552) -> {'M', [553]}; +uts46_map(553) -> 'V'; +uts46_map(554) -> {'M', [555]}; +uts46_map(555) -> 'V'; +uts46_map(556) -> {'M', [557]}; +uts46_map(557) -> 'V'; +uts46_map(558) -> {'M', [559]}; +uts46_map(559) -> 'V'; +uts46_map(560) -> {'M', [561]}; +uts46_map(561) -> 'V'; +uts46_map(562) -> {'M', [563]}; +uts46_map(563) -> 'V'; +uts46_map(570) -> {'M', [11365]}; +uts46_map(571) -> {'M', [572]}; +uts46_map(572) -> 'V'; +uts46_map(573) -> {'M', [410]}; +uts46_map(574) -> {'M', [11366]}; +uts46_map(577) -> {'M', [578]}; +uts46_map(578) -> 'V'; +uts46_map(579) -> {'M', [384]}; +uts46_map(580) -> {'M', [649]}; +uts46_map(581) -> {'M', [652]}; +uts46_map(582) -> {'M', [583]}; +uts46_map(583) -> 'V'; +uts46_map(584) -> {'M', [585]}; +uts46_map(585) -> 'V'; +uts46_map(586) -> {'M', [587]}; +uts46_map(587) -> 'V'; +uts46_map(588) -> {'M', [589]}; +uts46_map(589) -> 'V'; +uts46_map(590) -> {'M', [591]}; +uts46_map(591) -> 'V'; +uts46_map(688) -> {'M', [104]}; +uts46_map(689) -> {'M', [614]}; +uts46_map(690) -> {'M', [106]}; +uts46_map(691) -> {'M', [114]}; +uts46_map(692) -> {'M', [633]}; +uts46_map(693) -> {'M', [635]}; +uts46_map(694) -> {'M', [641]}; +uts46_map(695) -> {'M', [119]}; +uts46_map(696) -> {'M', [121]}; +uts46_map(728) -> {'3', [32,774]}; +uts46_map(729) -> {'3', [32,775]}; +uts46_map(730) -> {'3', [32,778]}; +uts46_map(731) -> {'3', [32,808]}; +uts46_map(732) -> {'3', [32,771]}; +uts46_map(733) -> {'3', [32,779]}; +uts46_map(734) -> 'V'; +uts46_map(735) -> 'V'; +uts46_map(736) -> {'M', [611]}; +uts46_map(737) -> {'M', [108]}; +uts46_map(738) -> {'M', [115]}; +uts46_map(739) -> {'M', [120]}; +uts46_map(740) -> {'M', [661]}; +uts46_map(748) -> 'V'; +uts46_map(749) -> 'V'; +uts46_map(750) -> 'V'; +uts46_map(832) -> {'M', [768]}; +uts46_map(833) -> {'M', [769]}; +uts46_map(834) -> 'V'; +uts46_map(835) -> {'M', [787]}; +uts46_map(836) -> {'M', [776,769]}; +uts46_map(837) -> {'M', [953]}; +uts46_map(847) -> 'I'; +uts46_map(866) -> 'V'; +uts46_map(880) -> {'M', [881]}; +uts46_map(881) -> 'V'; +uts46_map(882) -> {'M', [883]}; +uts46_map(883) -> 'V'; +uts46_map(884) -> {'M', [697]}; +uts46_map(885) -> 'V'; +uts46_map(886) -> {'M', [887]}; +uts46_map(887) -> 'V'; +uts46_map(890) -> {'3', [32,953]}; +uts46_map(894) -> {'3', [59]}; +uts46_map(895) -> {'M', [1011]}; +uts46_map(900) -> {'3', [32,769]}; +uts46_map(901) -> {'3', [32,776,769]}; +uts46_map(902) -> {'M', [940]}; +uts46_map(903) -> {'M', [183]}; +uts46_map(904) -> {'M', [941]}; +uts46_map(905) -> {'M', [942]}; +uts46_map(906) -> {'M', [943]}; +uts46_map(907) -> 'X'; +uts46_map(908) -> {'M', [972]}; +uts46_map(909) -> 'X'; +uts46_map(910) -> {'M', [973]}; +uts46_map(911) -> {'M', [974]}; +uts46_map(912) -> 'V'; +uts46_map(913) -> {'M', [945]}; +uts46_map(914) -> {'M', [946]}; +uts46_map(915) -> {'M', [947]}; +uts46_map(916) -> {'M', [948]}; +uts46_map(917) -> {'M', [949]}; +uts46_map(918) -> {'M', [950]}; +uts46_map(919) -> {'M', [951]}; +uts46_map(920) -> {'M', [952]}; +uts46_map(921) -> {'M', [953]}; +uts46_map(922) -> {'M', [954]}; +uts46_map(923) -> {'M', [955]}; +uts46_map(924) -> {'M', [956]}; +uts46_map(925) -> {'M', [957]}; +uts46_map(926) -> {'M', [958]}; +uts46_map(927) -> {'M', [959]}; +uts46_map(928) -> {'M', [960]}; +uts46_map(929) -> {'M', [961]}; +uts46_map(930) -> 'X'; +uts46_map(931) -> {'M', [963]}; +uts46_map(932) -> {'M', [964]}; +uts46_map(933) -> {'M', [965]}; +uts46_map(934) -> {'M', [966]}; +uts46_map(935) -> {'M', [967]}; +uts46_map(936) -> {'M', [968]}; +uts46_map(937) -> {'M', [969]}; +uts46_map(938) -> {'M', [970]}; +uts46_map(939) -> {'M', [971]}; +uts46_map(962) -> {'D', [963]}; +uts46_map(975) -> {'M', [983]}; +uts46_map(976) -> {'M', [946]}; +uts46_map(977) -> {'M', [952]}; +uts46_map(978) -> {'M', [965]}; +uts46_map(979) -> {'M', [973]}; +uts46_map(980) -> {'M', [971]}; +uts46_map(981) -> {'M', [966]}; +uts46_map(982) -> {'M', [960]}; +uts46_map(983) -> 'V'; +uts46_map(984) -> {'M', [985]}; +uts46_map(985) -> 'V'; +uts46_map(986) -> {'M', [987]}; +uts46_map(987) -> 'V'; +uts46_map(988) -> {'M', [989]}; +uts46_map(989) -> 'V'; +uts46_map(990) -> {'M', [991]}; +uts46_map(991) -> 'V'; +uts46_map(992) -> {'M', [993]}; +uts46_map(993) -> 'V'; +uts46_map(994) -> {'M', [995]}; +uts46_map(995) -> 'V'; +uts46_map(996) -> {'M', [997]}; +uts46_map(997) -> 'V'; +uts46_map(998) -> {'M', [999]}; +uts46_map(999) -> 'V'; +uts46_map(1000) -> {'M', [1001]}; +uts46_map(1001) -> 'V'; +uts46_map(1002) -> {'M', [1003]}; +uts46_map(1003) -> 'V'; +uts46_map(1004) -> {'M', [1005]}; +uts46_map(1005) -> 'V'; +uts46_map(1006) -> {'M', [1007]}; +uts46_map(1007) -> 'V'; +uts46_map(1008) -> {'M', [954]}; +uts46_map(1009) -> {'M', [961]}; +uts46_map(1010) -> {'M', [963]}; +uts46_map(1011) -> 'V'; +uts46_map(1012) -> {'M', [952]}; +uts46_map(1013) -> {'M', [949]}; +uts46_map(1014) -> 'V'; +uts46_map(1015) -> {'M', [1016]}; +uts46_map(1016) -> 'V'; +uts46_map(1017) -> {'M', [963]}; +uts46_map(1018) -> {'M', [1019]}; +uts46_map(1019) -> 'V'; +uts46_map(1020) -> 'V'; +uts46_map(1021) -> {'M', [891]}; +uts46_map(1022) -> {'M', [892]}; +uts46_map(1023) -> {'M', [893]}; +uts46_map(1024) -> {'M', [1104]}; +uts46_map(1025) -> {'M', [1105]}; +uts46_map(1026) -> {'M', [1106]}; +uts46_map(1027) -> {'M', [1107]}; +uts46_map(1028) -> {'M', [1108]}; +uts46_map(1029) -> {'M', [1109]}; +uts46_map(1030) -> {'M', [1110]}; +uts46_map(1031) -> {'M', [1111]}; +uts46_map(1032) -> {'M', [1112]}; +uts46_map(1033) -> {'M', [1113]}; +uts46_map(1034) -> {'M', [1114]}; +uts46_map(1035) -> {'M', [1115]}; +uts46_map(1036) -> {'M', [1116]}; +uts46_map(1037) -> {'M', [1117]}; +uts46_map(1038) -> {'M', [1118]}; +uts46_map(1039) -> {'M', [1119]}; +uts46_map(1040) -> {'M', [1072]}; +uts46_map(1041) -> {'M', [1073]}; +uts46_map(1042) -> {'M', [1074]}; +uts46_map(1043) -> {'M', [1075]}; +uts46_map(1044) -> {'M', [1076]}; +uts46_map(1045) -> {'M', [1077]}; +uts46_map(1046) -> {'M', [1078]}; +uts46_map(1047) -> {'M', [1079]}; +uts46_map(1048) -> {'M', [1080]}; +uts46_map(1049) -> {'M', [1081]}; +uts46_map(1050) -> {'M', [1082]}; +uts46_map(1051) -> {'M', [1083]}; +uts46_map(1052) -> {'M', [1084]}; +uts46_map(1053) -> {'M', [1085]}; +uts46_map(1054) -> {'M', [1086]}; +uts46_map(1055) -> {'M', [1087]}; +uts46_map(1056) -> {'M', [1088]}; +uts46_map(1057) -> {'M', [1089]}; +uts46_map(1058) -> {'M', [1090]}; +uts46_map(1059) -> {'M', [1091]}; +uts46_map(1060) -> {'M', [1092]}; +uts46_map(1061) -> {'M', [1093]}; +uts46_map(1062) -> {'M', [1094]}; +uts46_map(1063) -> {'M', [1095]}; +uts46_map(1064) -> {'M', [1096]}; +uts46_map(1065) -> {'M', [1097]}; +uts46_map(1066) -> {'M', [1098]}; +uts46_map(1067) -> {'M', [1099]}; +uts46_map(1068) -> {'M', [1100]}; +uts46_map(1069) -> {'M', [1101]}; +uts46_map(1070) -> {'M', [1102]}; +uts46_map(1071) -> {'M', [1103]}; +uts46_map(1104) -> 'V'; +uts46_map(1117) -> 'V'; +uts46_map(1120) -> {'M', [1121]}; +uts46_map(1121) -> 'V'; +uts46_map(1122) -> {'M', [1123]}; +uts46_map(1123) -> 'V'; +uts46_map(1124) -> {'M', [1125]}; +uts46_map(1125) -> 'V'; +uts46_map(1126) -> {'M', [1127]}; +uts46_map(1127) -> 'V'; +uts46_map(1128) -> {'M', [1129]}; +uts46_map(1129) -> 'V'; +uts46_map(1130) -> {'M', [1131]}; +uts46_map(1131) -> 'V'; +uts46_map(1132) -> {'M', [1133]}; +uts46_map(1133) -> 'V'; +uts46_map(1134) -> {'M', [1135]}; +uts46_map(1135) -> 'V'; +uts46_map(1136) -> {'M', [1137]}; +uts46_map(1137) -> 'V'; +uts46_map(1138) -> {'M', [1139]}; +uts46_map(1139) -> 'V'; +uts46_map(1140) -> {'M', [1141]}; +uts46_map(1141) -> 'V'; +uts46_map(1142) -> {'M', [1143]}; +uts46_map(1143) -> 'V'; +uts46_map(1144) -> {'M', [1145]}; +uts46_map(1145) -> 'V'; +uts46_map(1146) -> {'M', [1147]}; +uts46_map(1147) -> 'V'; +uts46_map(1148) -> {'M', [1149]}; +uts46_map(1149) -> 'V'; +uts46_map(1150) -> {'M', [1151]}; +uts46_map(1151) -> 'V'; +uts46_map(1152) -> {'M', [1153]}; +uts46_map(1153) -> 'V'; +uts46_map(1154) -> 'V'; +uts46_map(1159) -> 'V'; +uts46_map(1162) -> {'M', [1163]}; +uts46_map(1163) -> 'V'; +uts46_map(1164) -> {'M', [1165]}; +uts46_map(1165) -> 'V'; +uts46_map(1166) -> {'M', [1167]}; +uts46_map(1167) -> 'V'; +uts46_map(1168) -> {'M', [1169]}; +uts46_map(1169) -> 'V'; +uts46_map(1170) -> {'M', [1171]}; +uts46_map(1171) -> 'V'; +uts46_map(1172) -> {'M', [1173]}; +uts46_map(1173) -> 'V'; +uts46_map(1174) -> {'M', [1175]}; +uts46_map(1175) -> 'V'; +uts46_map(1176) -> {'M', [1177]}; +uts46_map(1177) -> 'V'; +uts46_map(1178) -> {'M', [1179]}; +uts46_map(1179) -> 'V'; +uts46_map(1180) -> {'M', [1181]}; +uts46_map(1181) -> 'V'; +uts46_map(1182) -> {'M', [1183]}; +uts46_map(1183) -> 'V'; +uts46_map(1184) -> {'M', [1185]}; +uts46_map(1185) -> 'V'; +uts46_map(1186) -> {'M', [1187]}; +uts46_map(1187) -> 'V'; +uts46_map(1188) -> {'M', [1189]}; +uts46_map(1189) -> 'V'; +uts46_map(1190) -> {'M', [1191]}; +uts46_map(1191) -> 'V'; +uts46_map(1192) -> {'M', [1193]}; +uts46_map(1193) -> 'V'; +uts46_map(1194) -> {'M', [1195]}; +uts46_map(1195) -> 'V'; +uts46_map(1196) -> {'M', [1197]}; +uts46_map(1197) -> 'V'; +uts46_map(1198) -> {'M', [1199]}; +uts46_map(1199) -> 'V'; +uts46_map(1200) -> {'M', [1201]}; +uts46_map(1201) -> 'V'; +uts46_map(1202) -> {'M', [1203]}; +uts46_map(1203) -> 'V'; +uts46_map(1204) -> {'M', [1205]}; +uts46_map(1205) -> 'V'; +uts46_map(1206) -> {'M', [1207]}; +uts46_map(1207) -> 'V'; +uts46_map(1208) -> {'M', [1209]}; +uts46_map(1209) -> 'V'; +uts46_map(1210) -> {'M', [1211]}; +uts46_map(1211) -> 'V'; +uts46_map(1212) -> {'M', [1213]}; +uts46_map(1213) -> 'V'; +uts46_map(1214) -> {'M', [1215]}; +uts46_map(1215) -> 'V'; +uts46_map(1216) -> 'X'; +uts46_map(1217) -> {'M', [1218]}; +uts46_map(1218) -> 'V'; +uts46_map(1219) -> {'M', [1220]}; +uts46_map(1220) -> 'V'; +uts46_map(1221) -> {'M', [1222]}; +uts46_map(1222) -> 'V'; +uts46_map(1223) -> {'M', [1224]}; +uts46_map(1224) -> 'V'; +uts46_map(1225) -> {'M', [1226]}; +uts46_map(1226) -> 'V'; +uts46_map(1227) -> {'M', [1228]}; +uts46_map(1228) -> 'V'; +uts46_map(1229) -> {'M', [1230]}; +uts46_map(1230) -> 'V'; +uts46_map(1231) -> 'V'; +uts46_map(1232) -> {'M', [1233]}; +uts46_map(1233) -> 'V'; +uts46_map(1234) -> {'M', [1235]}; +uts46_map(1235) -> 'V'; +uts46_map(1236) -> {'M', [1237]}; +uts46_map(1237) -> 'V'; +uts46_map(1238) -> {'M', [1239]}; +uts46_map(1239) -> 'V'; +uts46_map(1240) -> {'M', [1241]}; +uts46_map(1241) -> 'V'; +uts46_map(1242) -> {'M', [1243]}; +uts46_map(1243) -> 'V'; +uts46_map(1244) -> {'M', [1245]}; +uts46_map(1245) -> 'V'; +uts46_map(1246) -> {'M', [1247]}; +uts46_map(1247) -> 'V'; +uts46_map(1248) -> {'M', [1249]}; +uts46_map(1249) -> 'V'; +uts46_map(1250) -> {'M', [1251]}; +uts46_map(1251) -> 'V'; +uts46_map(1252) -> {'M', [1253]}; +uts46_map(1253) -> 'V'; +uts46_map(1254) -> {'M', [1255]}; +uts46_map(1255) -> 'V'; +uts46_map(1256) -> {'M', [1257]}; +uts46_map(1257) -> 'V'; +uts46_map(1258) -> {'M', [1259]}; +uts46_map(1259) -> 'V'; +uts46_map(1260) -> {'M', [1261]}; +uts46_map(1261) -> 'V'; +uts46_map(1262) -> {'M', [1263]}; +uts46_map(1263) -> 'V'; +uts46_map(1264) -> {'M', [1265]}; +uts46_map(1265) -> 'V'; +uts46_map(1266) -> {'M', [1267]}; +uts46_map(1267) -> 'V'; +uts46_map(1268) -> {'M', [1269]}; +uts46_map(1269) -> 'V'; +uts46_map(1270) -> {'M', [1271]}; +uts46_map(1271) -> 'V'; +uts46_map(1272) -> {'M', [1273]}; +uts46_map(1273) -> 'V'; +uts46_map(1274) -> {'M', [1275]}; +uts46_map(1275) -> 'V'; +uts46_map(1276) -> {'M', [1277]}; +uts46_map(1277) -> 'V'; +uts46_map(1278) -> {'M', [1279]}; +uts46_map(1279) -> 'V'; +uts46_map(1280) -> {'M', [1281]}; +uts46_map(1281) -> 'V'; +uts46_map(1282) -> {'M', [1283]}; +uts46_map(1283) -> 'V'; +uts46_map(1284) -> {'M', [1285]}; +uts46_map(1285) -> 'V'; +uts46_map(1286) -> {'M', [1287]}; +uts46_map(1287) -> 'V'; +uts46_map(1288) -> {'M', [1289]}; +uts46_map(1289) -> 'V'; +uts46_map(1290) -> {'M', [1291]}; +uts46_map(1291) -> 'V'; +uts46_map(1292) -> {'M', [1293]}; +uts46_map(1293) -> 'V'; +uts46_map(1294) -> {'M', [1295]}; +uts46_map(1295) -> 'V'; +uts46_map(1296) -> {'M', [1297]}; +uts46_map(1297) -> 'V'; +uts46_map(1298) -> {'M', [1299]}; +uts46_map(1299) -> 'V'; +uts46_map(1300) -> {'M', [1301]}; +uts46_map(1301) -> 'V'; +uts46_map(1302) -> {'M', [1303]}; +uts46_map(1303) -> 'V'; +uts46_map(1304) -> {'M', [1305]}; +uts46_map(1305) -> 'V'; +uts46_map(1306) -> {'M', [1307]}; +uts46_map(1307) -> 'V'; +uts46_map(1308) -> {'M', [1309]}; +uts46_map(1309) -> 'V'; +uts46_map(1310) -> {'M', [1311]}; +uts46_map(1311) -> 'V'; +uts46_map(1312) -> {'M', [1313]}; +uts46_map(1313) -> 'V'; +uts46_map(1314) -> {'M', [1315]}; +uts46_map(1315) -> 'V'; +uts46_map(1316) -> {'M', [1317]}; +uts46_map(1317) -> 'V'; +uts46_map(1318) -> {'M', [1319]}; +uts46_map(1319) -> 'V'; +uts46_map(1320) -> {'M', [1321]}; +uts46_map(1321) -> 'V'; +uts46_map(1322) -> {'M', [1323]}; +uts46_map(1323) -> 'V'; +uts46_map(1324) -> {'M', [1325]}; +uts46_map(1325) -> 'V'; +uts46_map(1326) -> {'M', [1327]}; +uts46_map(1327) -> 'V'; +uts46_map(1328) -> 'X'; +uts46_map(1329) -> {'M', [1377]}; +uts46_map(1330) -> {'M', [1378]}; +uts46_map(1331) -> {'M', [1379]}; +uts46_map(1332) -> {'M', [1380]}; +uts46_map(1333) -> {'M', [1381]}; +uts46_map(1334) -> {'M', [1382]}; +uts46_map(1335) -> {'M', [1383]}; +uts46_map(1336) -> {'M', [1384]}; +uts46_map(1337) -> {'M', [1385]}; +uts46_map(1338) -> {'M', [1386]}; +uts46_map(1339) -> {'M', [1387]}; +uts46_map(1340) -> {'M', [1388]}; +uts46_map(1341) -> {'M', [1389]}; +uts46_map(1342) -> {'M', [1390]}; +uts46_map(1343) -> {'M', [1391]}; +uts46_map(1344) -> {'M', [1392]}; +uts46_map(1345) -> {'M', [1393]}; +uts46_map(1346) -> {'M', [1394]}; +uts46_map(1347) -> {'M', [1395]}; +uts46_map(1348) -> {'M', [1396]}; +uts46_map(1349) -> {'M', [1397]}; +uts46_map(1350) -> {'M', [1398]}; +uts46_map(1351) -> {'M', [1399]}; +uts46_map(1352) -> {'M', [1400]}; +uts46_map(1353) -> {'M', [1401]}; +uts46_map(1354) -> {'M', [1402]}; +uts46_map(1355) -> {'M', [1403]}; +uts46_map(1356) -> {'M', [1404]}; +uts46_map(1357) -> {'M', [1405]}; +uts46_map(1358) -> {'M', [1406]}; +uts46_map(1359) -> {'M', [1407]}; +uts46_map(1360) -> {'M', [1408]}; +uts46_map(1361) -> {'M', [1409]}; +uts46_map(1362) -> {'M', [1410]}; +uts46_map(1363) -> {'M', [1411]}; +uts46_map(1364) -> {'M', [1412]}; +uts46_map(1365) -> {'M', [1413]}; +uts46_map(1366) -> {'M', [1414]}; +uts46_map(1369) -> 'V'; +uts46_map(1376) -> 'V'; +uts46_map(1415) -> {'M', [1381,1410]}; +uts46_map(1416) -> 'V'; +uts46_map(1417) -> 'V'; +uts46_map(1418) -> 'V'; +uts46_map(1423) -> 'V'; +uts46_map(1424) -> 'X'; +uts46_map(1442) -> 'V'; +uts46_map(1466) -> 'V'; +uts46_map(1470) -> 'V'; +uts46_map(1471) -> 'V'; +uts46_map(1472) -> 'V'; +uts46_map(1475) -> 'V'; +uts46_map(1476) -> 'V'; +uts46_map(1477) -> 'V'; +uts46_map(1478) -> 'V'; +uts46_map(1479) -> 'V'; +uts46_map(1519) -> 'V'; +uts46_map(1540) -> 'X'; +uts46_map(1541) -> 'X'; +uts46_map(1547) -> 'V'; +uts46_map(1548) -> 'V'; +uts46_map(1563) -> 'V'; +uts46_map(1564) -> 'X'; +uts46_map(1565) -> 'X'; +uts46_map(1566) -> 'V'; +uts46_map(1567) -> 'V'; +uts46_map(1568) -> 'V'; +uts46_map(1600) -> 'V'; +uts46_map(1631) -> 'V'; +uts46_map(1653) -> {'M', [1575,1652]}; +uts46_map(1654) -> {'M', [1608,1652]}; +uts46_map(1655) -> {'M', [1735,1652]}; +uts46_map(1656) -> {'M', [1610,1652]}; +uts46_map(1727) -> 'V'; +uts46_map(1743) -> 'V'; +uts46_map(1748) -> 'V'; +uts46_map(1757) -> 'X'; +uts46_map(1758) -> 'V'; +uts46_map(1769) -> 'V'; +uts46_map(1791) -> 'V'; +uts46_map(1806) -> 'X'; +uts46_map(1807) -> 'X'; +uts46_map(1969) -> 'V'; +uts46_map(2045) -> 'V'; +uts46_map(2111) -> 'X'; +uts46_map(2142) -> 'V'; +uts46_map(2143) -> 'X'; +uts46_map(2208) -> 'V'; +uts46_map(2209) -> 'V'; +uts46_map(2229) -> 'X'; +uts46_map(2259) -> 'V'; +uts46_map(2274) -> 'X'; +uts46_map(2275) -> 'V'; +uts46_map(2303) -> 'V'; +uts46_map(2304) -> 'V'; +uts46_map(2308) -> 'V'; +uts46_map(2382) -> 'V'; +uts46_map(2383) -> 'V'; +uts46_map(2389) -> 'V'; +uts46_map(2392) -> {'M', [2325,2364]}; +uts46_map(2393) -> {'M', [2326,2364]}; +uts46_map(2394) -> {'M', [2327,2364]}; +uts46_map(2395) -> {'M', [2332,2364]}; +uts46_map(2396) -> {'M', [2337,2364]}; +uts46_map(2397) -> {'M', [2338,2364]}; +uts46_map(2398) -> {'M', [2347,2364]}; +uts46_map(2399) -> {'M', [2351,2364]}; +uts46_map(2416) -> 'V'; +uts46_map(2424) -> 'V'; +uts46_map(2429) -> 'V'; +uts46_map(2432) -> 'V'; +uts46_map(2436) -> 'X'; +uts46_map(2473) -> 'X'; +uts46_map(2481) -> 'X'; +uts46_map(2482) -> 'V'; +uts46_map(2492) -> 'V'; +uts46_map(2493) -> 'V'; +uts46_map(2510) -> 'V'; +uts46_map(2519) -> 'V'; +uts46_map(2524) -> {'M', [2465,2492]}; +uts46_map(2525) -> {'M', [2466,2492]}; +uts46_map(2526) -> 'X'; +uts46_map(2527) -> {'M', [2479,2492]}; +uts46_map(2555) -> 'V'; +uts46_map(2556) -> 'V'; +uts46_map(2557) -> 'V'; +uts46_map(2558) -> 'V'; +uts46_map(2561) -> 'V'; +uts46_map(2562) -> 'V'; +uts46_map(2563) -> 'V'; +uts46_map(2564) -> 'X'; +uts46_map(2601) -> 'X'; +uts46_map(2609) -> 'X'; +uts46_map(2610) -> 'V'; +uts46_map(2611) -> {'M', [2610,2620]}; +uts46_map(2612) -> 'X'; +uts46_map(2613) -> 'V'; +uts46_map(2614) -> {'M', [2616,2620]}; +uts46_map(2615) -> 'X'; +uts46_map(2620) -> 'V'; +uts46_map(2621) -> 'X'; +uts46_map(2641) -> 'V'; +uts46_map(2649) -> {'M', [2582,2620]}; +uts46_map(2650) -> {'M', [2583,2620]}; +uts46_map(2651) -> {'M', [2588,2620]}; +uts46_map(2652) -> 'V'; +uts46_map(2653) -> 'X'; +uts46_map(2654) -> {'M', [2603,2620]}; +uts46_map(2677) -> 'V'; +uts46_map(2678) -> 'V'; +uts46_map(2692) -> 'X'; +uts46_map(2700) -> 'V'; +uts46_map(2701) -> 'V'; +uts46_map(2702) -> 'X'; +uts46_map(2706) -> 'X'; +uts46_map(2729) -> 'X'; +uts46_map(2737) -> 'X'; +uts46_map(2740) -> 'X'; +uts46_map(2758) -> 'X'; +uts46_map(2762) -> 'X'; +uts46_map(2768) -> 'V'; +uts46_map(2784) -> 'V'; +uts46_map(2800) -> 'V'; +uts46_map(2801) -> 'V'; +uts46_map(2809) -> 'V'; +uts46_map(2816) -> 'X'; +uts46_map(2820) -> 'X'; +uts46_map(2857) -> 'X'; +uts46_map(2865) -> 'X'; +uts46_map(2868) -> 'X'; +uts46_map(2869) -> 'V'; +uts46_map(2884) -> 'V'; +uts46_map(2901) -> 'V'; +uts46_map(2908) -> {'M', [2849,2876]}; +uts46_map(2909) -> {'M', [2850,2876]}; +uts46_map(2910) -> 'X'; +uts46_map(2928) -> 'V'; +uts46_map(2929) -> 'V'; +uts46_map(2948) -> 'X'; +uts46_map(2961) -> 'X'; +uts46_map(2971) -> 'X'; +uts46_map(2972) -> 'V'; +uts46_map(2973) -> 'X'; +uts46_map(2998) -> 'V'; +uts46_map(3017) -> 'X'; +uts46_map(3024) -> 'V'; +uts46_map(3031) -> 'V'; +uts46_map(3046) -> 'V'; +uts46_map(3072) -> 'V'; +uts46_map(3076) -> 'V'; +uts46_map(3085) -> 'X'; +uts46_map(3089) -> 'X'; +uts46_map(3113) -> 'X'; +uts46_map(3124) -> 'V'; +uts46_map(3133) -> 'V'; +uts46_map(3141) -> 'X'; +uts46_map(3145) -> 'X'; +uts46_map(3159) -> 'X'; +uts46_map(3162) -> 'V'; +uts46_map(3191) -> 'V'; +uts46_map(3200) -> 'V'; +uts46_map(3201) -> 'V'; +uts46_map(3204) -> 'V'; +uts46_map(3213) -> 'X'; +uts46_map(3217) -> 'X'; +uts46_map(3241) -> 'X'; +uts46_map(3252) -> 'X'; +uts46_map(3269) -> 'X'; +uts46_map(3273) -> 'X'; +uts46_map(3294) -> 'V'; +uts46_map(3295) -> 'X'; +uts46_map(3312) -> 'X'; +uts46_map(3328) -> 'V'; +uts46_map(3329) -> 'V'; +uts46_map(3332) -> 'V'; +uts46_map(3341) -> 'X'; +uts46_map(3345) -> 'X'; +uts46_map(3369) -> 'V'; +uts46_map(3386) -> 'V'; +uts46_map(3389) -> 'V'; +uts46_map(3396) -> 'V'; +uts46_map(3397) -> 'X'; +uts46_map(3401) -> 'X'; +uts46_map(3406) -> 'V'; +uts46_map(3407) -> 'V'; +uts46_map(3415) -> 'V'; +uts46_map(3423) -> 'V'; +uts46_map(3449) -> 'V'; +uts46_map(3456) -> 'X'; +uts46_map(3457) -> 'V'; +uts46_map(3460) -> 'X'; +uts46_map(3506) -> 'X'; +uts46_map(3516) -> 'X'; +uts46_map(3517) -> 'V'; +uts46_map(3530) -> 'V'; +uts46_map(3541) -> 'X'; +uts46_map(3542) -> 'V'; +uts46_map(3543) -> 'X'; +uts46_map(3572) -> 'V'; +uts46_map(3635) -> {'M', [3661,3634]}; +uts46_map(3647) -> 'V'; +uts46_map(3663) -> 'V'; +uts46_map(3715) -> 'X'; +uts46_map(3716) -> 'V'; +uts46_map(3717) -> 'X'; +uts46_map(3718) -> 'V'; +uts46_map(3721) -> 'V'; +uts46_map(3722) -> 'V'; +uts46_map(3723) -> 'X'; +uts46_map(3724) -> 'V'; +uts46_map(3725) -> 'V'; +uts46_map(3736) -> 'V'; +uts46_map(3744) -> 'V'; +uts46_map(3748) -> 'X'; +uts46_map(3749) -> 'V'; +uts46_map(3750) -> 'X'; +uts46_map(3751) -> 'V'; +uts46_map(3756) -> 'V'; +uts46_map(3763) -> {'M', [3789,3762]}; +uts46_map(3770) -> 'V'; +uts46_map(3781) -> 'X'; +uts46_map(3782) -> 'V'; +uts46_map(3783) -> 'X'; +uts46_map(3804) -> {'M', [3755,3737]}; +uts46_map(3805) -> {'M', [3755,3745]}; +uts46_map(3840) -> 'V'; +uts46_map(3851) -> 'V'; +uts46_map(3852) -> {'M', [3851]}; +uts46_map(3893) -> 'V'; +uts46_map(3894) -> 'V'; +uts46_map(3895) -> 'V'; +uts46_map(3896) -> 'V'; +uts46_map(3897) -> 'V'; +uts46_map(3907) -> {'M', [3906,4023]}; +uts46_map(3912) -> 'X'; +uts46_map(3917) -> {'M', [3916,4023]}; +uts46_map(3922) -> {'M', [3921,4023]}; +uts46_map(3927) -> {'M', [3926,4023]}; +uts46_map(3932) -> {'M', [3931,4023]}; +uts46_map(3945) -> {'M', [3904,4021]}; +uts46_map(3946) -> 'V'; +uts46_map(3955) -> {'M', [3953,3954]}; +uts46_map(3956) -> 'V'; +uts46_map(3957) -> {'M', [3953,3956]}; +uts46_map(3958) -> {'M', [4018,3968]}; +uts46_map(3959) -> {'M', [4018,3953,3968]}; +uts46_map(3960) -> {'M', [4019,3968]}; +uts46_map(3961) -> {'M', [4019,3953,3968]}; +uts46_map(3969) -> {'M', [3953,3968]}; +uts46_map(3973) -> 'V'; +uts46_map(3987) -> {'M', [3986,4023]}; +uts46_map(3990) -> 'V'; +uts46_map(3991) -> 'V'; +uts46_map(3992) -> 'X'; +uts46_map(3997) -> {'M', [3996,4023]}; +uts46_map(4002) -> {'M', [4001,4023]}; +uts46_map(4007) -> {'M', [4006,4023]}; +uts46_map(4012) -> {'M', [4011,4023]}; +uts46_map(4013) -> 'V'; +uts46_map(4024) -> 'V'; +uts46_map(4025) -> {'M', [3984,4021]}; +uts46_map(4029) -> 'X'; +uts46_map(4038) -> 'V'; +uts46_map(4045) -> 'X'; +uts46_map(4046) -> 'V'; +uts46_map(4047) -> 'V'; +uts46_map(4130) -> 'V'; +uts46_map(4136) -> 'V'; +uts46_map(4139) -> 'V'; +uts46_map(4294) -> 'X'; +uts46_map(4295) -> {'M', [11559]}; +uts46_map(4301) -> {'M', [11565]}; +uts46_map(4347) -> 'V'; +uts46_map(4348) -> {'M', [4316]}; +uts46_map(4615) -> 'V'; +uts46_map(4679) -> 'V'; +uts46_map(4680) -> 'V'; +uts46_map(4681) -> 'X'; +uts46_map(4695) -> 'X'; +uts46_map(4696) -> 'V'; +uts46_map(4697) -> 'X'; +uts46_map(4743) -> 'V'; +uts46_map(4744) -> 'V'; +uts46_map(4745) -> 'X'; +uts46_map(4783) -> 'V'; +uts46_map(4784) -> 'V'; +uts46_map(4785) -> 'X'; +uts46_map(4799) -> 'X'; +uts46_map(4800) -> 'V'; +uts46_map(4801) -> 'X'; +uts46_map(4815) -> 'V'; +uts46_map(4823) -> 'X'; +uts46_map(4847) -> 'V'; +uts46_map(4879) -> 'V'; +uts46_map(4880) -> 'V'; +uts46_map(4881) -> 'X'; +uts46_map(4895) -> 'V'; +uts46_map(4935) -> 'V'; +uts46_map(4959) -> 'V'; +uts46_map(4960) -> 'V'; +uts46_map(5109) -> 'V'; +uts46_map(5112) -> {'M', [5104]}; +uts46_map(5113) -> {'M', [5105]}; +uts46_map(5114) -> {'M', [5106]}; +uts46_map(5115) -> {'M', [5107]}; +uts46_map(5116) -> {'M', [5108]}; +uts46_map(5117) -> {'M', [5109]}; +uts46_map(5120) -> 'V'; +uts46_map(5760) -> 'X'; +uts46_map(5901) -> 'X'; +uts46_map(5997) -> 'X'; +uts46_map(6001) -> 'X'; +uts46_map(6103) -> 'V'; +uts46_map(6108) -> 'V'; +uts46_map(6109) -> 'V'; +uts46_map(6150) -> 'X'; +uts46_map(6158) -> 'X'; +uts46_map(6159) -> 'X'; +uts46_map(6264) -> 'V'; +uts46_map(6314) -> 'V'; +uts46_map(6431) -> 'X'; +uts46_map(6464) -> 'V'; +uts46_map(6618) -> 'V'; +uts46_map(6751) -> 'X'; +uts46_map(6823) -> 'V'; +uts46_map(6846) -> 'V'; +uts46_map(7296) -> {'M', [1074]}; +uts46_map(7297) -> {'M', [1076]}; +uts46_map(7298) -> {'M', [1086]}; +uts46_map(7299) -> {'M', [1089]}; +uts46_map(7302) -> {'M', [1098]}; +uts46_map(7303) -> {'M', [1123]}; +uts46_map(7304) -> {'M', [42571]}; +uts46_map(7312) -> {'M', [4304]}; +uts46_map(7313) -> {'M', [4305]}; +uts46_map(7314) -> {'M', [4306]}; +uts46_map(7315) -> {'M', [4307]}; +uts46_map(7316) -> {'M', [4308]}; +uts46_map(7317) -> {'M', [4309]}; +uts46_map(7318) -> {'M', [4310]}; +uts46_map(7319) -> {'M', [4311]}; +uts46_map(7320) -> {'M', [4312]}; +uts46_map(7321) -> {'M', [4313]}; +uts46_map(7322) -> {'M', [4314]}; +uts46_map(7323) -> {'M', [4315]}; +uts46_map(7324) -> {'M', [4316]}; +uts46_map(7325) -> {'M', [4317]}; +uts46_map(7326) -> {'M', [4318]}; +uts46_map(7327) -> {'M', [4319]}; +uts46_map(7328) -> {'M', [4320]}; +uts46_map(7329) -> {'M', [4321]}; +uts46_map(7330) -> {'M', [4322]}; +uts46_map(7331) -> {'M', [4323]}; +uts46_map(7332) -> {'M', [4324]}; +uts46_map(7333) -> {'M', [4325]}; +uts46_map(7334) -> {'M', [4326]}; +uts46_map(7335) -> {'M', [4327]}; +uts46_map(7336) -> {'M', [4328]}; +uts46_map(7337) -> {'M', [4329]}; +uts46_map(7338) -> {'M', [4330]}; +uts46_map(7339) -> {'M', [4331]}; +uts46_map(7340) -> {'M', [4332]}; +uts46_map(7341) -> {'M', [4333]}; +uts46_map(7342) -> {'M', [4334]}; +uts46_map(7343) -> {'M', [4335]}; +uts46_map(7344) -> {'M', [4336]}; +uts46_map(7345) -> {'M', [4337]}; +uts46_map(7346) -> {'M', [4338]}; +uts46_map(7347) -> {'M', [4339]}; +uts46_map(7348) -> {'M', [4340]}; +uts46_map(7349) -> {'M', [4341]}; +uts46_map(7350) -> {'M', [4342]}; +uts46_map(7351) -> {'M', [4343]}; +uts46_map(7352) -> {'M', [4344]}; +uts46_map(7353) -> {'M', [4345]}; +uts46_map(7354) -> {'M', [4346]}; +uts46_map(7357) -> {'M', [4349]}; +uts46_map(7358) -> {'M', [4350]}; +uts46_map(7359) -> {'M', [4351]}; +uts46_map(7379) -> 'V'; +uts46_map(7415) -> 'V'; +uts46_map(7418) -> 'V'; +uts46_map(7468) -> {'M', [97]}; +uts46_map(7469) -> {'M', [230]}; +uts46_map(7470) -> {'M', [98]}; +uts46_map(7471) -> 'V'; +uts46_map(7472) -> {'M', [100]}; +uts46_map(7473) -> {'M', [101]}; +uts46_map(7474) -> {'M', [477]}; +uts46_map(7475) -> {'M', [103]}; +uts46_map(7476) -> {'M', [104]}; +uts46_map(7477) -> {'M', [105]}; +uts46_map(7478) -> {'M', [106]}; +uts46_map(7479) -> {'M', [107]}; +uts46_map(7480) -> {'M', [108]}; +uts46_map(7481) -> {'M', [109]}; +uts46_map(7482) -> {'M', [110]}; +uts46_map(7483) -> 'V'; +uts46_map(7484) -> {'M', [111]}; +uts46_map(7485) -> {'M', [547]}; +uts46_map(7486) -> {'M', [112]}; +uts46_map(7487) -> {'M', [114]}; +uts46_map(7488) -> {'M', [116]}; +uts46_map(7489) -> {'M', [117]}; +uts46_map(7490) -> {'M', [119]}; +uts46_map(7491) -> {'M', [97]}; +uts46_map(7492) -> {'M', [592]}; +uts46_map(7493) -> {'M', [593]}; +uts46_map(7494) -> {'M', [7426]}; +uts46_map(7495) -> {'M', [98]}; +uts46_map(7496) -> {'M', [100]}; +uts46_map(7497) -> {'M', [101]}; +uts46_map(7498) -> {'M', [601]}; +uts46_map(7499) -> {'M', [603]}; +uts46_map(7500) -> {'M', [604]}; +uts46_map(7501) -> {'M', [103]}; +uts46_map(7502) -> 'V'; +uts46_map(7503) -> {'M', [107]}; +uts46_map(7504) -> {'M', [109]}; +uts46_map(7505) -> {'M', [331]}; +uts46_map(7506) -> {'M', [111]}; +uts46_map(7507) -> {'M', [596]}; +uts46_map(7508) -> {'M', [7446]}; +uts46_map(7509) -> {'M', [7447]}; +uts46_map(7510) -> {'M', [112]}; +uts46_map(7511) -> {'M', [116]}; +uts46_map(7512) -> {'M', [117]}; +uts46_map(7513) -> {'M', [7453]}; +uts46_map(7514) -> {'M', [623]}; +uts46_map(7515) -> {'M', [118]}; +uts46_map(7516) -> {'M', [7461]}; +uts46_map(7517) -> {'M', [946]}; +uts46_map(7518) -> {'M', [947]}; +uts46_map(7519) -> {'M', [948]}; +uts46_map(7520) -> {'M', [966]}; +uts46_map(7521) -> {'M', [967]}; +uts46_map(7522) -> {'M', [105]}; +uts46_map(7523) -> {'M', [114]}; +uts46_map(7524) -> {'M', [117]}; +uts46_map(7525) -> {'M', [118]}; +uts46_map(7526) -> {'M', [946]}; +uts46_map(7527) -> {'M', [947]}; +uts46_map(7528) -> {'M', [961]}; +uts46_map(7529) -> {'M', [966]}; +uts46_map(7530) -> {'M', [967]}; +uts46_map(7531) -> 'V'; +uts46_map(7544) -> {'M', [1085]}; +uts46_map(7579) -> {'M', [594]}; +uts46_map(7580) -> {'M', [99]}; +uts46_map(7581) -> {'M', [597]}; +uts46_map(7582) -> {'M', [240]}; +uts46_map(7583) -> {'M', [604]}; +uts46_map(7584) -> {'M', [102]}; +uts46_map(7585) -> {'M', [607]}; +uts46_map(7586) -> {'M', [609]}; +uts46_map(7587) -> {'M', [613]}; +uts46_map(7588) -> {'M', [616]}; +uts46_map(7589) -> {'M', [617]}; +uts46_map(7590) -> {'M', [618]}; +uts46_map(7591) -> {'M', [7547]}; +uts46_map(7592) -> {'M', [669]}; +uts46_map(7593) -> {'M', [621]}; +uts46_map(7594) -> {'M', [7557]}; +uts46_map(7595) -> {'M', [671]}; +uts46_map(7596) -> {'M', [625]}; +uts46_map(7597) -> {'M', [624]}; +uts46_map(7598) -> {'M', [626]}; +uts46_map(7599) -> {'M', [627]}; +uts46_map(7600) -> {'M', [628]}; +uts46_map(7601) -> {'M', [629]}; +uts46_map(7602) -> {'M', [632]}; +uts46_map(7603) -> {'M', [642]}; +uts46_map(7604) -> {'M', [643]}; +uts46_map(7605) -> {'M', [427]}; +uts46_map(7606) -> {'M', [649]}; +uts46_map(7607) -> {'M', [650]}; +uts46_map(7608) -> {'M', [7452]}; +uts46_map(7609) -> {'M', [651]}; +uts46_map(7610) -> {'M', [652]}; +uts46_map(7611) -> {'M', [122]}; +uts46_map(7612) -> {'M', [656]}; +uts46_map(7613) -> {'M', [657]}; +uts46_map(7614) -> {'M', [658]}; +uts46_map(7615) -> {'M', [952]}; +uts46_map(7674) -> 'X'; +uts46_map(7675) -> 'V'; +uts46_map(7676) -> 'V'; +uts46_map(7677) -> 'V'; +uts46_map(7680) -> {'M', [7681]}; +uts46_map(7681) -> 'V'; +uts46_map(7682) -> {'M', [7683]}; +uts46_map(7683) -> 'V'; +uts46_map(7684) -> {'M', [7685]}; +uts46_map(7685) -> 'V'; +uts46_map(7686) -> {'M', [7687]}; +uts46_map(7687) -> 'V'; +uts46_map(7688) -> {'M', [7689]}; +uts46_map(7689) -> 'V'; +uts46_map(7690) -> {'M', [7691]}; +uts46_map(7691) -> 'V'; +uts46_map(7692) -> {'M', [7693]}; +uts46_map(7693) -> 'V'; +uts46_map(7694) -> {'M', [7695]}; +uts46_map(7695) -> 'V'; +uts46_map(7696) -> {'M', [7697]}; +uts46_map(7697) -> 'V'; +uts46_map(7698) -> {'M', [7699]}; +uts46_map(7699) -> 'V'; +uts46_map(7700) -> {'M', [7701]}; +uts46_map(7701) -> 'V'; +uts46_map(7702) -> {'M', [7703]}; +uts46_map(7703) -> 'V'; +uts46_map(7704) -> {'M', [7705]}; +uts46_map(7705) -> 'V'; +uts46_map(7706) -> {'M', [7707]}; +uts46_map(7707) -> 'V'; +uts46_map(7708) -> {'M', [7709]}; +uts46_map(7709) -> 'V'; +uts46_map(7710) -> {'M', [7711]}; +uts46_map(7711) -> 'V'; +uts46_map(7712) -> {'M', [7713]}; +uts46_map(7713) -> 'V'; +uts46_map(7714) -> {'M', [7715]}; +uts46_map(7715) -> 'V'; +uts46_map(7716) -> {'M', [7717]}; +uts46_map(7717) -> 'V'; +uts46_map(7718) -> {'M', [7719]}; +uts46_map(7719) -> 'V'; +uts46_map(7720) -> {'M', [7721]}; +uts46_map(7721) -> 'V'; +uts46_map(7722) -> {'M', [7723]}; +uts46_map(7723) -> 'V'; +uts46_map(7724) -> {'M', [7725]}; +uts46_map(7725) -> 'V'; +uts46_map(7726) -> {'M', [7727]}; +uts46_map(7727) -> 'V'; +uts46_map(7728) -> {'M', [7729]}; +uts46_map(7729) -> 'V'; +uts46_map(7730) -> {'M', [7731]}; +uts46_map(7731) -> 'V'; +uts46_map(7732) -> {'M', [7733]}; +uts46_map(7733) -> 'V'; +uts46_map(7734) -> {'M', [7735]}; +uts46_map(7735) -> 'V'; +uts46_map(7736) -> {'M', [7737]}; +uts46_map(7737) -> 'V'; +uts46_map(7738) -> {'M', [7739]}; +uts46_map(7739) -> 'V'; +uts46_map(7740) -> {'M', [7741]}; +uts46_map(7741) -> 'V'; +uts46_map(7742) -> {'M', [7743]}; +uts46_map(7743) -> 'V'; +uts46_map(7744) -> {'M', [7745]}; +uts46_map(7745) -> 'V'; +uts46_map(7746) -> {'M', [7747]}; +uts46_map(7747) -> 'V'; +uts46_map(7748) -> {'M', [7749]}; +uts46_map(7749) -> 'V'; +uts46_map(7750) -> {'M', [7751]}; +uts46_map(7751) -> 'V'; +uts46_map(7752) -> {'M', [7753]}; +uts46_map(7753) -> 'V'; +uts46_map(7754) -> {'M', [7755]}; +uts46_map(7755) -> 'V'; +uts46_map(7756) -> {'M', [7757]}; +uts46_map(7757) -> 'V'; +uts46_map(7758) -> {'M', [7759]}; +uts46_map(7759) -> 'V'; +uts46_map(7760) -> {'M', [7761]}; +uts46_map(7761) -> 'V'; +uts46_map(7762) -> {'M', [7763]}; +uts46_map(7763) -> 'V'; +uts46_map(7764) -> {'M', [7765]}; +uts46_map(7765) -> 'V'; +uts46_map(7766) -> {'M', [7767]}; +uts46_map(7767) -> 'V'; +uts46_map(7768) -> {'M', [7769]}; +uts46_map(7769) -> 'V'; +uts46_map(7770) -> {'M', [7771]}; +uts46_map(7771) -> 'V'; +uts46_map(7772) -> {'M', [7773]}; +uts46_map(7773) -> 'V'; +uts46_map(7774) -> {'M', [7775]}; +uts46_map(7775) -> 'V'; +uts46_map(7776) -> {'M', [7777]}; +uts46_map(7777) -> 'V'; +uts46_map(7778) -> {'M', [7779]}; +uts46_map(7779) -> 'V'; +uts46_map(7780) -> {'M', [7781]}; +uts46_map(7781) -> 'V'; +uts46_map(7782) -> {'M', [7783]}; +uts46_map(7783) -> 'V'; +uts46_map(7784) -> {'M', [7785]}; +uts46_map(7785) -> 'V'; +uts46_map(7786) -> {'M', [7787]}; +uts46_map(7787) -> 'V'; +uts46_map(7788) -> {'M', [7789]}; +uts46_map(7789) -> 'V'; +uts46_map(7790) -> {'M', [7791]}; +uts46_map(7791) -> 'V'; +uts46_map(7792) -> {'M', [7793]}; +uts46_map(7793) -> 'V'; +uts46_map(7794) -> {'M', [7795]}; +uts46_map(7795) -> 'V'; +uts46_map(7796) -> {'M', [7797]}; +uts46_map(7797) -> 'V'; +uts46_map(7798) -> {'M', [7799]}; +uts46_map(7799) -> 'V'; +uts46_map(7800) -> {'M', [7801]}; +uts46_map(7801) -> 'V'; +uts46_map(7802) -> {'M', [7803]}; +uts46_map(7803) -> 'V'; +uts46_map(7804) -> {'M', [7805]}; +uts46_map(7805) -> 'V'; +uts46_map(7806) -> {'M', [7807]}; +uts46_map(7807) -> 'V'; +uts46_map(7808) -> {'M', [7809]}; +uts46_map(7809) -> 'V'; +uts46_map(7810) -> {'M', [7811]}; +uts46_map(7811) -> 'V'; +uts46_map(7812) -> {'M', [7813]}; +uts46_map(7813) -> 'V'; +uts46_map(7814) -> {'M', [7815]}; +uts46_map(7815) -> 'V'; +uts46_map(7816) -> {'M', [7817]}; +uts46_map(7817) -> 'V'; +uts46_map(7818) -> {'M', [7819]}; +uts46_map(7819) -> 'V'; +uts46_map(7820) -> {'M', [7821]}; +uts46_map(7821) -> 'V'; +uts46_map(7822) -> {'M', [7823]}; +uts46_map(7823) -> 'V'; +uts46_map(7824) -> {'M', [7825]}; +uts46_map(7825) -> 'V'; +uts46_map(7826) -> {'M', [7827]}; +uts46_map(7827) -> 'V'; +uts46_map(7828) -> {'M', [7829]}; +uts46_map(7834) -> {'M', [97,702]}; +uts46_map(7835) -> {'M', [7777]}; +uts46_map(7838) -> {'M', [115,115]}; +uts46_map(7839) -> 'V'; +uts46_map(7840) -> {'M', [7841]}; +uts46_map(7841) -> 'V'; +uts46_map(7842) -> {'M', [7843]}; +uts46_map(7843) -> 'V'; +uts46_map(7844) -> {'M', [7845]}; +uts46_map(7845) -> 'V'; +uts46_map(7846) -> {'M', [7847]}; +uts46_map(7847) -> 'V'; +uts46_map(7848) -> {'M', [7849]}; +uts46_map(7849) -> 'V'; +uts46_map(7850) -> {'M', [7851]}; +uts46_map(7851) -> 'V'; +uts46_map(7852) -> {'M', [7853]}; +uts46_map(7853) -> 'V'; +uts46_map(7854) -> {'M', [7855]}; +uts46_map(7855) -> 'V'; +uts46_map(7856) -> {'M', [7857]}; +uts46_map(7857) -> 'V'; +uts46_map(7858) -> {'M', [7859]}; +uts46_map(7859) -> 'V'; +uts46_map(7860) -> {'M', [7861]}; +uts46_map(7861) -> 'V'; +uts46_map(7862) -> {'M', [7863]}; +uts46_map(7863) -> 'V'; +uts46_map(7864) -> {'M', [7865]}; +uts46_map(7865) -> 'V'; +uts46_map(7866) -> {'M', [7867]}; +uts46_map(7867) -> 'V'; +uts46_map(7868) -> {'M', [7869]}; +uts46_map(7869) -> 'V'; +uts46_map(7870) -> {'M', [7871]}; +uts46_map(7871) -> 'V'; +uts46_map(7872) -> {'M', [7873]}; +uts46_map(7873) -> 'V'; +uts46_map(7874) -> {'M', [7875]}; +uts46_map(7875) -> 'V'; +uts46_map(7876) -> {'M', [7877]}; +uts46_map(7877) -> 'V'; +uts46_map(7878) -> {'M', [7879]}; +uts46_map(7879) -> 'V'; +uts46_map(7880) -> {'M', [7881]}; +uts46_map(7881) -> 'V'; +uts46_map(7882) -> {'M', [7883]}; +uts46_map(7883) -> 'V'; +uts46_map(7884) -> {'M', [7885]}; +uts46_map(7885) -> 'V'; +uts46_map(7886) -> {'M', [7887]}; +uts46_map(7887) -> 'V'; +uts46_map(7888) -> {'M', [7889]}; +uts46_map(7889) -> 'V'; +uts46_map(7890) -> {'M', [7891]}; +uts46_map(7891) -> 'V'; +uts46_map(7892) -> {'M', [7893]}; +uts46_map(7893) -> 'V'; +uts46_map(7894) -> {'M', [7895]}; +uts46_map(7895) -> 'V'; +uts46_map(7896) -> {'M', [7897]}; +uts46_map(7897) -> 'V'; +uts46_map(7898) -> {'M', [7899]}; +uts46_map(7899) -> 'V'; +uts46_map(7900) -> {'M', [7901]}; +uts46_map(7901) -> 'V'; +uts46_map(7902) -> {'M', [7903]}; +uts46_map(7903) -> 'V'; +uts46_map(7904) -> {'M', [7905]}; +uts46_map(7905) -> 'V'; +uts46_map(7906) -> {'M', [7907]}; +uts46_map(7907) -> 'V'; +uts46_map(7908) -> {'M', [7909]}; +uts46_map(7909) -> 'V'; +uts46_map(7910) -> {'M', [7911]}; +uts46_map(7911) -> 'V'; +uts46_map(7912) -> {'M', [7913]}; +uts46_map(7913) -> 'V'; +uts46_map(7914) -> {'M', [7915]}; +uts46_map(7915) -> 'V'; +uts46_map(7916) -> {'M', [7917]}; +uts46_map(7917) -> 'V'; +uts46_map(7918) -> {'M', [7919]}; +uts46_map(7919) -> 'V'; +uts46_map(7920) -> {'M', [7921]}; +uts46_map(7921) -> 'V'; +uts46_map(7922) -> {'M', [7923]}; +uts46_map(7923) -> 'V'; +uts46_map(7924) -> {'M', [7925]}; +uts46_map(7925) -> 'V'; +uts46_map(7926) -> {'M', [7927]}; +uts46_map(7927) -> 'V'; +uts46_map(7928) -> {'M', [7929]}; +uts46_map(7929) -> 'V'; +uts46_map(7930) -> {'M', [7931]}; +uts46_map(7931) -> 'V'; +uts46_map(7932) -> {'M', [7933]}; +uts46_map(7933) -> 'V'; +uts46_map(7934) -> {'M', [7935]}; +uts46_map(7935) -> 'V'; +uts46_map(7944) -> {'M', [7936]}; +uts46_map(7945) -> {'M', [7937]}; +uts46_map(7946) -> {'M', [7938]}; +uts46_map(7947) -> {'M', [7939]}; +uts46_map(7948) -> {'M', [7940]}; +uts46_map(7949) -> {'M', [7941]}; +uts46_map(7950) -> {'M', [7942]}; +uts46_map(7951) -> {'M', [7943]}; +uts46_map(7960) -> {'M', [7952]}; +uts46_map(7961) -> {'M', [7953]}; +uts46_map(7962) -> {'M', [7954]}; +uts46_map(7963) -> {'M', [7955]}; +uts46_map(7964) -> {'M', [7956]}; +uts46_map(7965) -> {'M', [7957]}; +uts46_map(7976) -> {'M', [7968]}; +uts46_map(7977) -> {'M', [7969]}; +uts46_map(7978) -> {'M', [7970]}; +uts46_map(7979) -> {'M', [7971]}; +uts46_map(7980) -> {'M', [7972]}; +uts46_map(7981) -> {'M', [7973]}; +uts46_map(7982) -> {'M', [7974]}; +uts46_map(7983) -> {'M', [7975]}; +uts46_map(7992) -> {'M', [7984]}; +uts46_map(7993) -> {'M', [7985]}; +uts46_map(7994) -> {'M', [7986]}; +uts46_map(7995) -> {'M', [7987]}; +uts46_map(7996) -> {'M', [7988]}; +uts46_map(7997) -> {'M', [7989]}; +uts46_map(7998) -> {'M', [7990]}; +uts46_map(7999) -> {'M', [7991]}; +uts46_map(8008) -> {'M', [8000]}; +uts46_map(8009) -> {'M', [8001]}; +uts46_map(8010) -> {'M', [8002]}; +uts46_map(8011) -> {'M', [8003]}; +uts46_map(8012) -> {'M', [8004]}; +uts46_map(8013) -> {'M', [8005]}; +uts46_map(8024) -> 'X'; +uts46_map(8025) -> {'M', [8017]}; +uts46_map(8026) -> 'X'; +uts46_map(8027) -> {'M', [8019]}; +uts46_map(8028) -> 'X'; +uts46_map(8029) -> {'M', [8021]}; +uts46_map(8030) -> 'X'; +uts46_map(8031) -> {'M', [8023]}; +uts46_map(8040) -> {'M', [8032]}; +uts46_map(8041) -> {'M', [8033]}; +uts46_map(8042) -> {'M', [8034]}; +uts46_map(8043) -> {'M', [8035]}; +uts46_map(8044) -> {'M', [8036]}; +uts46_map(8045) -> {'M', [8037]}; +uts46_map(8046) -> {'M', [8038]}; +uts46_map(8047) -> {'M', [8039]}; +uts46_map(8048) -> 'V'; +uts46_map(8049) -> {'M', [940]}; +uts46_map(8050) -> 'V'; +uts46_map(8051) -> {'M', [941]}; +uts46_map(8052) -> 'V'; +uts46_map(8053) -> {'M', [942]}; +uts46_map(8054) -> 'V'; +uts46_map(8055) -> {'M', [943]}; +uts46_map(8056) -> 'V'; +uts46_map(8057) -> {'M', [972]}; +uts46_map(8058) -> 'V'; +uts46_map(8059) -> {'M', [973]}; +uts46_map(8060) -> 'V'; +uts46_map(8061) -> {'M', [974]}; +uts46_map(8064) -> {'M', [7936,953]}; +uts46_map(8065) -> {'M', [7937,953]}; +uts46_map(8066) -> {'M', [7938,953]}; +uts46_map(8067) -> {'M', [7939,953]}; +uts46_map(8068) -> {'M', [7940,953]}; +uts46_map(8069) -> {'M', [7941,953]}; +uts46_map(8070) -> {'M', [7942,953]}; +uts46_map(8071) -> {'M', [7943,953]}; +uts46_map(8072) -> {'M', [7936,953]}; +uts46_map(8073) -> {'M', [7937,953]}; +uts46_map(8074) -> {'M', [7938,953]}; +uts46_map(8075) -> {'M', [7939,953]}; +uts46_map(8076) -> {'M', [7940,953]}; +uts46_map(8077) -> {'M', [7941,953]}; +uts46_map(8078) -> {'M', [7942,953]}; +uts46_map(8079) -> {'M', [7943,953]}; +uts46_map(8080) -> {'M', [7968,953]}; +uts46_map(8081) -> {'M', [7969,953]}; +uts46_map(8082) -> {'M', [7970,953]}; +uts46_map(8083) -> {'M', [7971,953]}; +uts46_map(8084) -> {'M', [7972,953]}; +uts46_map(8085) -> {'M', [7973,953]}; +uts46_map(8086) -> {'M', [7974,953]}; +uts46_map(8087) -> {'M', [7975,953]}; +uts46_map(8088) -> {'M', [7968,953]}; +uts46_map(8089) -> {'M', [7969,953]}; +uts46_map(8090) -> {'M', [7970,953]}; +uts46_map(8091) -> {'M', [7971,953]}; +uts46_map(8092) -> {'M', [7972,953]}; +uts46_map(8093) -> {'M', [7973,953]}; +uts46_map(8094) -> {'M', [7974,953]}; +uts46_map(8095) -> {'M', [7975,953]}; +uts46_map(8096) -> {'M', [8032,953]}; +uts46_map(8097) -> {'M', [8033,953]}; +uts46_map(8098) -> {'M', [8034,953]}; +uts46_map(8099) -> {'M', [8035,953]}; +uts46_map(8100) -> {'M', [8036,953]}; +uts46_map(8101) -> {'M', [8037,953]}; +uts46_map(8102) -> {'M', [8038,953]}; +uts46_map(8103) -> {'M', [8039,953]}; +uts46_map(8104) -> {'M', [8032,953]}; +uts46_map(8105) -> {'M', [8033,953]}; +uts46_map(8106) -> {'M', [8034,953]}; +uts46_map(8107) -> {'M', [8035,953]}; +uts46_map(8108) -> {'M', [8036,953]}; +uts46_map(8109) -> {'M', [8037,953]}; +uts46_map(8110) -> {'M', [8038,953]}; +uts46_map(8111) -> {'M', [8039,953]}; +uts46_map(8114) -> {'M', [8048,953]}; +uts46_map(8115) -> {'M', [945,953]}; +uts46_map(8116) -> {'M', [940,953]}; +uts46_map(8117) -> 'X'; +uts46_map(8118) -> 'V'; +uts46_map(8119) -> {'M', [8118,953]}; +uts46_map(8120) -> {'M', [8112]}; +uts46_map(8121) -> {'M', [8113]}; +uts46_map(8122) -> {'M', [8048]}; +uts46_map(8123) -> {'M', [940]}; +uts46_map(8124) -> {'M', [945,953]}; +uts46_map(8125) -> {'3', [32,787]}; +uts46_map(8126) -> {'M', [953]}; +uts46_map(8127) -> {'3', [32,787]}; +uts46_map(8128) -> {'3', [32,834]}; +uts46_map(8129) -> {'3', [32,776,834]}; +uts46_map(8130) -> {'M', [8052,953]}; +uts46_map(8131) -> {'M', [951,953]}; +uts46_map(8132) -> {'M', [942,953]}; +uts46_map(8133) -> 'X'; +uts46_map(8134) -> 'V'; +uts46_map(8135) -> {'M', [8134,953]}; +uts46_map(8136) -> {'M', [8050]}; +uts46_map(8137) -> {'M', [941]}; +uts46_map(8138) -> {'M', [8052]}; +uts46_map(8139) -> {'M', [942]}; +uts46_map(8140) -> {'M', [951,953]}; +uts46_map(8141) -> {'3', [32,787,768]}; +uts46_map(8142) -> {'3', [32,787,769]}; +uts46_map(8143) -> {'3', [32,787,834]}; +uts46_map(8147) -> {'M', [912]}; +uts46_map(8152) -> {'M', [8144]}; +uts46_map(8153) -> {'M', [8145]}; +uts46_map(8154) -> {'M', [8054]}; +uts46_map(8155) -> {'M', [943]}; +uts46_map(8156) -> 'X'; +uts46_map(8157) -> {'3', [32,788,768]}; +uts46_map(8158) -> {'3', [32,788,769]}; +uts46_map(8159) -> {'3', [32,788,834]}; +uts46_map(8163) -> {'M', [944]}; +uts46_map(8168) -> {'M', [8160]}; +uts46_map(8169) -> {'M', [8161]}; +uts46_map(8170) -> {'M', [8058]}; +uts46_map(8171) -> {'M', [973]}; +uts46_map(8172) -> {'M', [8165]}; +uts46_map(8173) -> {'3', [32,776,768]}; +uts46_map(8174) -> {'3', [32,776,769]}; +uts46_map(8175) -> {'3', [96]}; +uts46_map(8178) -> {'M', [8060,953]}; +uts46_map(8179) -> {'M', [969,953]}; +uts46_map(8180) -> {'M', [974,953]}; +uts46_map(8181) -> 'X'; +uts46_map(8182) -> 'V'; +uts46_map(8183) -> {'M', [8182,953]}; +uts46_map(8184) -> {'M', [8056]}; +uts46_map(8185) -> {'M', [972]}; +uts46_map(8186) -> {'M', [8060]}; +uts46_map(8187) -> {'M', [974]}; +uts46_map(8188) -> {'M', [969,953]}; +uts46_map(8189) -> {'3', [32,769]}; +uts46_map(8190) -> {'3', [32,788]}; +uts46_map(8191) -> 'X'; +uts46_map(8203) -> 'I'; +uts46_map(8208) -> 'V'; +uts46_map(8209) -> {'M', [8208]}; +uts46_map(8215) -> {'3', [32,819]}; +uts46_map(8231) -> 'V'; +uts46_map(8239) -> {'3', [32]}; +uts46_map(8243) -> {'M', [8242,8242]}; +uts46_map(8244) -> {'M', [8242,8242,8242]}; +uts46_map(8245) -> 'V'; +uts46_map(8246) -> {'M', [8245,8245]}; +uts46_map(8247) -> {'M', [8245,8245,8245]}; +uts46_map(8252) -> {'3', [33,33]}; +uts46_map(8253) -> 'V'; +uts46_map(8254) -> {'3', [32,773]}; +uts46_map(8263) -> {'3', [63,63]}; +uts46_map(8264) -> {'3', [63,33]}; +uts46_map(8265) -> {'3', [33,63]}; +uts46_map(8279) -> {'M', [8242,8242,8242,8242]}; +uts46_map(8287) -> {'3', [32]}; +uts46_map(8288) -> 'I'; +uts46_map(8292) -> 'I'; +uts46_map(8293) -> 'X'; +uts46_map(8304) -> {'M', [48]}; +uts46_map(8305) -> {'M', [105]}; +uts46_map(8308) -> {'M', [52]}; +uts46_map(8309) -> {'M', [53]}; +uts46_map(8310) -> {'M', [54]}; +uts46_map(8311) -> {'M', [55]}; +uts46_map(8312) -> {'M', [56]}; +uts46_map(8313) -> {'M', [57]}; +uts46_map(8314) -> {'3', [43]}; +uts46_map(8315) -> {'M', [8722]}; +uts46_map(8316) -> {'3', [61]}; +uts46_map(8317) -> {'3', [40]}; +uts46_map(8318) -> {'3', [41]}; +uts46_map(8319) -> {'M', [110]}; +uts46_map(8320) -> {'M', [48]}; +uts46_map(8321) -> {'M', [49]}; +uts46_map(8322) -> {'M', [50]}; +uts46_map(8323) -> {'M', [51]}; +uts46_map(8324) -> {'M', [52]}; +uts46_map(8325) -> {'M', [53]}; +uts46_map(8326) -> {'M', [54]}; +uts46_map(8327) -> {'M', [55]}; +uts46_map(8328) -> {'M', [56]}; +uts46_map(8329) -> {'M', [57]}; +uts46_map(8330) -> {'3', [43]}; +uts46_map(8331) -> {'M', [8722]}; +uts46_map(8332) -> {'3', [61]}; +uts46_map(8333) -> {'3', [40]}; +uts46_map(8334) -> {'3', [41]}; +uts46_map(8335) -> 'X'; +uts46_map(8336) -> {'M', [97]}; +uts46_map(8337) -> {'M', [101]}; +uts46_map(8338) -> {'M', [111]}; +uts46_map(8339) -> {'M', [120]}; +uts46_map(8340) -> {'M', [601]}; +uts46_map(8341) -> {'M', [104]}; +uts46_map(8342) -> {'M', [107]}; +uts46_map(8343) -> {'M', [108]}; +uts46_map(8344) -> {'M', [109]}; +uts46_map(8345) -> {'M', [110]}; +uts46_map(8346) -> {'M', [112]}; +uts46_map(8347) -> {'M', [115]}; +uts46_map(8348) -> {'M', [116]}; +uts46_map(8360) -> {'M', [114,115]}; +uts46_map(8363) -> 'V'; +uts46_map(8364) -> 'V'; +uts46_map(8377) -> 'V'; +uts46_map(8378) -> 'V'; +uts46_map(8382) -> 'V'; +uts46_map(8383) -> 'V'; +uts46_map(8427) -> 'V'; +uts46_map(8432) -> 'V'; +uts46_map(8448) -> {'3', [97,47,99]}; +uts46_map(8449) -> {'3', [97,47,115]}; +uts46_map(8450) -> {'M', [99]}; +uts46_map(8451) -> {'M', [176,99]}; +uts46_map(8452) -> 'V'; +uts46_map(8453) -> {'3', [99,47,111]}; +uts46_map(8454) -> {'3', [99,47,117]}; +uts46_map(8455) -> {'M', [603]}; +uts46_map(8456) -> 'V'; +uts46_map(8457) -> {'M', [176,102]}; +uts46_map(8458) -> {'M', [103]}; +uts46_map(8463) -> {'M', [295]}; +uts46_map(8468) -> 'V'; +uts46_map(8469) -> {'M', [110]}; +uts46_map(8470) -> {'M', [110,111]}; +uts46_map(8473) -> {'M', [112]}; +uts46_map(8474) -> {'M', [113]}; +uts46_map(8480) -> {'M', [115,109]}; +uts46_map(8481) -> {'M', [116,101,108]}; +uts46_map(8482) -> {'M', [116,109]}; +uts46_map(8483) -> 'V'; +uts46_map(8484) -> {'M', [122]}; +uts46_map(8485) -> 'V'; +uts46_map(8486) -> {'M', [969]}; +uts46_map(8487) -> 'V'; +uts46_map(8488) -> {'M', [122]}; +uts46_map(8489) -> 'V'; +uts46_map(8490) -> {'M', [107]}; +uts46_map(8491) -> {'M', [229]}; +uts46_map(8492) -> {'M', [98]}; +uts46_map(8493) -> {'M', [99]}; +uts46_map(8494) -> 'V'; +uts46_map(8497) -> {'M', [102]}; +uts46_map(8498) -> 'X'; +uts46_map(8499) -> {'M', [109]}; +uts46_map(8500) -> {'M', [111]}; +uts46_map(8501) -> {'M', [1488]}; +uts46_map(8502) -> {'M', [1489]}; +uts46_map(8503) -> {'M', [1490]}; +uts46_map(8504) -> {'M', [1491]}; +uts46_map(8505) -> {'M', [105]}; +uts46_map(8506) -> 'V'; +uts46_map(8507) -> {'M', [102,97,120]}; +uts46_map(8508) -> {'M', [960]}; +uts46_map(8511) -> {'M', [960]}; +uts46_map(8512) -> {'M', [8721]}; +uts46_map(8519) -> {'M', [101]}; +uts46_map(8520) -> {'M', [105]}; +uts46_map(8521) -> {'M', [106]}; +uts46_map(8524) -> 'V'; +uts46_map(8525) -> 'V'; +uts46_map(8526) -> 'V'; +uts46_map(8527) -> 'V'; +uts46_map(8528) -> {'M', [49,8260,55]}; +uts46_map(8529) -> {'M', [49,8260,57]}; +uts46_map(8530) -> {'M', [49,8260,49,48]}; +uts46_map(8531) -> {'M', [49,8260,51]}; +uts46_map(8532) -> {'M', [50,8260,51]}; +uts46_map(8533) -> {'M', [49,8260,53]}; +uts46_map(8534) -> {'M', [50,8260,53]}; +uts46_map(8535) -> {'M', [51,8260,53]}; +uts46_map(8536) -> {'M', [52,8260,53]}; +uts46_map(8537) -> {'M', [49,8260,54]}; +uts46_map(8538) -> {'M', [53,8260,54]}; +uts46_map(8539) -> {'M', [49,8260,56]}; +uts46_map(8540) -> {'M', [51,8260,56]}; +uts46_map(8541) -> {'M', [53,8260,56]}; +uts46_map(8542) -> {'M', [55,8260,56]}; +uts46_map(8543) -> {'M', [49,8260]}; +uts46_map(8544) -> {'M', [105]}; +uts46_map(8545) -> {'M', [105,105]}; +uts46_map(8546) -> {'M', [105,105,105]}; +uts46_map(8547) -> {'M', [105,118]}; +uts46_map(8548) -> {'M', [118]}; +uts46_map(8549) -> {'M', [118,105]}; +uts46_map(8550) -> {'M', [118,105,105]}; +uts46_map(8551) -> {'M', [118,105,105,105]}; +uts46_map(8552) -> {'M', [105,120]}; +uts46_map(8553) -> {'M', [120]}; +uts46_map(8554) -> {'M', [120,105]}; +uts46_map(8555) -> {'M', [120,105,105]}; +uts46_map(8556) -> {'M', [108]}; +uts46_map(8557) -> {'M', [99]}; +uts46_map(8558) -> {'M', [100]}; +uts46_map(8559) -> {'M', [109]}; +uts46_map(8560) -> {'M', [105]}; +uts46_map(8561) -> {'M', [105,105]}; +uts46_map(8562) -> {'M', [105,105,105]}; +uts46_map(8563) -> {'M', [105,118]}; +uts46_map(8564) -> {'M', [118]}; +uts46_map(8565) -> {'M', [118,105]}; +uts46_map(8566) -> {'M', [118,105,105]}; +uts46_map(8567) -> {'M', [118,105,105,105]}; +uts46_map(8568) -> {'M', [105,120]}; +uts46_map(8569) -> {'M', [120]}; +uts46_map(8570) -> {'M', [120,105]}; +uts46_map(8571) -> {'M', [120,105,105]}; +uts46_map(8572) -> {'M', [108]}; +uts46_map(8573) -> {'M', [99]}; +uts46_map(8574) -> {'M', [100]}; +uts46_map(8575) -> {'M', [109]}; +uts46_map(8579) -> 'X'; +uts46_map(8580) -> 'V'; +uts46_map(8585) -> {'M', [48,8260,51]}; +uts46_map(8748) -> {'M', [8747,8747]}; +uts46_map(8749) -> {'M', [8747,8747,8747]}; +uts46_map(8750) -> 'V'; +uts46_map(8751) -> {'M', [8750,8750]}; +uts46_map(8752) -> {'M', [8750,8750,8750]}; +uts46_map(8800) -> '3'; +uts46_map(8960) -> 'V'; +uts46_map(8961) -> 'V'; +uts46_map(9001) -> {'M', [12296]}; +uts46_map(9002) -> {'M', [12297]}; +uts46_map(9083) -> 'V'; +uts46_map(9084) -> 'V'; +uts46_map(9192) -> 'V'; +uts46_map(9215) -> 'V'; +uts46_map(9312) -> {'M', [49]}; +uts46_map(9313) -> {'M', [50]}; +uts46_map(9314) -> {'M', [51]}; +uts46_map(9315) -> {'M', [52]}; +uts46_map(9316) -> {'M', [53]}; +uts46_map(9317) -> {'M', [54]}; +uts46_map(9318) -> {'M', [55]}; +uts46_map(9319) -> {'M', [56]}; +uts46_map(9320) -> {'M', [57]}; +uts46_map(9321) -> {'M', [49,48]}; +uts46_map(9322) -> {'M', [49,49]}; +uts46_map(9323) -> {'M', [49,50]}; +uts46_map(9324) -> {'M', [49,51]}; +uts46_map(9325) -> {'M', [49,52]}; +uts46_map(9326) -> {'M', [49,53]}; +uts46_map(9327) -> {'M', [49,54]}; +uts46_map(9328) -> {'M', [49,55]}; +uts46_map(9329) -> {'M', [49,56]}; +uts46_map(9330) -> {'M', [49,57]}; +uts46_map(9331) -> {'M', [50,48]}; +uts46_map(9332) -> {'3', [40,49,41]}; +uts46_map(9333) -> {'3', [40,50,41]}; +uts46_map(9334) -> {'3', [40,51,41]}; +uts46_map(9335) -> {'3', [40,52,41]}; +uts46_map(9336) -> {'3', [40,53,41]}; +uts46_map(9337) -> {'3', [40,54,41]}; +uts46_map(9338) -> {'3', [40,55,41]}; +uts46_map(9339) -> {'3', [40,56,41]}; +uts46_map(9340) -> {'3', [40,57,41]}; +uts46_map(9341) -> {'3', [40,49,48,41]}; +uts46_map(9342) -> {'3', [40,49,49,41]}; +uts46_map(9343) -> {'3', [40,49,50,41]}; +uts46_map(9344) -> {'3', [40,49,51,41]}; +uts46_map(9345) -> {'3', [40,49,52,41]}; +uts46_map(9346) -> {'3', [40,49,53,41]}; +uts46_map(9347) -> {'3', [40,49,54,41]}; +uts46_map(9348) -> {'3', [40,49,55,41]}; +uts46_map(9349) -> {'3', [40,49,56,41]}; +uts46_map(9350) -> {'3', [40,49,57,41]}; +uts46_map(9351) -> {'3', [40,50,48,41]}; +uts46_map(9372) -> {'3', [40,97,41]}; +uts46_map(9373) -> {'3', [40,98,41]}; +uts46_map(9374) -> {'3', [40,99,41]}; +uts46_map(9375) -> {'3', [40,100,41]}; +uts46_map(9376) -> {'3', [40,101,41]}; +uts46_map(9377) -> {'3', [40,102,41]}; +uts46_map(9378) -> {'3', [40,103,41]}; +uts46_map(9379) -> {'3', [40,104,41]}; +uts46_map(9380) -> {'3', [40,105,41]}; +uts46_map(9381) -> {'3', [40,106,41]}; +uts46_map(9382) -> {'3', [40,107,41]}; +uts46_map(9383) -> {'3', [40,108,41]}; +uts46_map(9384) -> {'3', [40,109,41]}; +uts46_map(9385) -> {'3', [40,110,41]}; +uts46_map(9386) -> {'3', [40,111,41]}; +uts46_map(9387) -> {'3', [40,112,41]}; +uts46_map(9388) -> {'3', [40,113,41]}; +uts46_map(9389) -> {'3', [40,114,41]}; +uts46_map(9390) -> {'3', [40,115,41]}; +uts46_map(9391) -> {'3', [40,116,41]}; +uts46_map(9392) -> {'3', [40,117,41]}; +uts46_map(9393) -> {'3', [40,118,41]}; +uts46_map(9394) -> {'3', [40,119,41]}; +uts46_map(9395) -> {'3', [40,120,41]}; +uts46_map(9396) -> {'3', [40,121,41]}; +uts46_map(9397) -> {'3', [40,122,41]}; +uts46_map(9398) -> {'M', [97]}; +uts46_map(9399) -> {'M', [98]}; +uts46_map(9400) -> {'M', [99]}; +uts46_map(9401) -> {'M', [100]}; +uts46_map(9402) -> {'M', [101]}; +uts46_map(9403) -> {'M', [102]}; +uts46_map(9404) -> {'M', [103]}; +uts46_map(9405) -> {'M', [104]}; +uts46_map(9406) -> {'M', [105]}; +uts46_map(9407) -> {'M', [106]}; +uts46_map(9408) -> {'M', [107]}; +uts46_map(9409) -> {'M', [108]}; +uts46_map(9410) -> {'M', [109]}; +uts46_map(9411) -> {'M', [110]}; +uts46_map(9412) -> {'M', [111]}; +uts46_map(9413) -> {'M', [112]}; +uts46_map(9414) -> {'M', [113]}; +uts46_map(9415) -> {'M', [114]}; +uts46_map(9416) -> {'M', [115]}; +uts46_map(9417) -> {'M', [116]}; +uts46_map(9418) -> {'M', [117]}; +uts46_map(9419) -> {'M', [118]}; +uts46_map(9420) -> {'M', [119]}; +uts46_map(9421) -> {'M', [120]}; +uts46_map(9422) -> {'M', [121]}; +uts46_map(9423) -> {'M', [122]}; +uts46_map(9424) -> {'M', [97]}; +uts46_map(9425) -> {'M', [98]}; +uts46_map(9426) -> {'M', [99]}; +uts46_map(9427) -> {'M', [100]}; +uts46_map(9428) -> {'M', [101]}; +uts46_map(9429) -> {'M', [102]}; +uts46_map(9430) -> {'M', [103]}; +uts46_map(9431) -> {'M', [104]}; +uts46_map(9432) -> {'M', [105]}; +uts46_map(9433) -> {'M', [106]}; +uts46_map(9434) -> {'M', [107]}; +uts46_map(9435) -> {'M', [108]}; +uts46_map(9436) -> {'M', [109]}; +uts46_map(9437) -> {'M', [110]}; +uts46_map(9438) -> {'M', [111]}; +uts46_map(9439) -> {'M', [112]}; +uts46_map(9440) -> {'M', [113]}; +uts46_map(9441) -> {'M', [114]}; +uts46_map(9442) -> {'M', [115]}; +uts46_map(9443) -> {'M', [116]}; +uts46_map(9444) -> {'M', [117]}; +uts46_map(9445) -> {'M', [118]}; +uts46_map(9446) -> {'M', [119]}; +uts46_map(9447) -> {'M', [120]}; +uts46_map(9448) -> {'M', [121]}; +uts46_map(9449) -> {'M', [122]}; +uts46_map(9450) -> {'M', [48]}; +uts46_map(9471) -> 'V'; +uts46_map(9752) -> 'V'; +uts46_map(9753) -> 'V'; +uts46_map(9885) -> 'V'; +uts46_map(9906) -> 'V'; +uts46_map(9934) -> 'V'; +uts46_map(9954) -> 'V'; +uts46_map(9955) -> 'V'; +uts46_map(9984) -> 'V'; +uts46_map(9989) -> 'V'; +uts46_map(10024) -> 'V'; +uts46_map(10060) -> 'V'; +uts46_map(10061) -> 'V'; +uts46_map(10062) -> 'V'; +uts46_map(10070) -> 'V'; +uts46_map(10071) -> 'V'; +uts46_map(10160) -> 'V'; +uts46_map(10175) -> 'V'; +uts46_map(10187) -> 'V'; +uts46_map(10188) -> 'V'; +uts46_map(10189) -> 'V'; +uts46_map(10764) -> {'M', [8747,8747,8747,8747]}; +uts46_map(10868) -> {'3', [58,58,61]}; +uts46_map(10869) -> {'3', [61,61]}; +uts46_map(10870) -> {'3', [61,61,61]}; +uts46_map(10972) -> {'M', [10973,824]}; +uts46_map(11158) -> 'X'; +uts46_map(11159) -> 'V'; +uts46_map(11209) -> 'V'; +uts46_map(11218) -> 'V'; +uts46_map(11263) -> 'V'; +uts46_map(11264) -> {'M', [11312]}; +uts46_map(11265) -> {'M', [11313]}; +uts46_map(11266) -> {'M', [11314]}; +uts46_map(11267) -> {'M', [11315]}; +uts46_map(11268) -> {'M', [11316]}; +uts46_map(11269) -> {'M', [11317]}; +uts46_map(11270) -> {'M', [11318]}; +uts46_map(11271) -> {'M', [11319]}; +uts46_map(11272) -> {'M', [11320]}; +uts46_map(11273) -> {'M', [11321]}; +uts46_map(11274) -> {'M', [11322]}; +uts46_map(11275) -> {'M', [11323]}; +uts46_map(11276) -> {'M', [11324]}; +uts46_map(11277) -> {'M', [11325]}; +uts46_map(11278) -> {'M', [11326]}; +uts46_map(11279) -> {'M', [11327]}; +uts46_map(11280) -> {'M', [11328]}; +uts46_map(11281) -> {'M', [11329]}; +uts46_map(11282) -> {'M', [11330]}; +uts46_map(11283) -> {'M', [11331]}; +uts46_map(11284) -> {'M', [11332]}; +uts46_map(11285) -> {'M', [11333]}; +uts46_map(11286) -> {'M', [11334]}; +uts46_map(11287) -> {'M', [11335]}; +uts46_map(11288) -> {'M', [11336]}; +uts46_map(11289) -> {'M', [11337]}; +uts46_map(11290) -> {'M', [11338]}; +uts46_map(11291) -> {'M', [11339]}; +uts46_map(11292) -> {'M', [11340]}; +uts46_map(11293) -> {'M', [11341]}; +uts46_map(11294) -> {'M', [11342]}; +uts46_map(11295) -> {'M', [11343]}; +uts46_map(11296) -> {'M', [11344]}; +uts46_map(11297) -> {'M', [11345]}; +uts46_map(11298) -> {'M', [11346]}; +uts46_map(11299) -> {'M', [11347]}; +uts46_map(11300) -> {'M', [11348]}; +uts46_map(11301) -> {'M', [11349]}; +uts46_map(11302) -> {'M', [11350]}; +uts46_map(11303) -> {'M', [11351]}; +uts46_map(11304) -> {'M', [11352]}; +uts46_map(11305) -> {'M', [11353]}; +uts46_map(11306) -> {'M', [11354]}; +uts46_map(11307) -> {'M', [11355]}; +uts46_map(11308) -> {'M', [11356]}; +uts46_map(11309) -> {'M', [11357]}; +uts46_map(11310) -> {'M', [11358]}; +uts46_map(11311) -> 'X'; +uts46_map(11359) -> 'X'; +uts46_map(11360) -> {'M', [11361]}; +uts46_map(11361) -> 'V'; +uts46_map(11362) -> {'M', [619]}; +uts46_map(11363) -> {'M', [7549]}; +uts46_map(11364) -> {'M', [637]}; +uts46_map(11367) -> {'M', [11368]}; +uts46_map(11368) -> 'V'; +uts46_map(11369) -> {'M', [11370]}; +uts46_map(11370) -> 'V'; +uts46_map(11371) -> {'M', [11372]}; +uts46_map(11372) -> 'V'; +uts46_map(11373) -> {'M', [593]}; +uts46_map(11374) -> {'M', [625]}; +uts46_map(11375) -> {'M', [592]}; +uts46_map(11376) -> {'M', [594]}; +uts46_map(11377) -> 'V'; +uts46_map(11378) -> {'M', [11379]}; +uts46_map(11379) -> 'V'; +uts46_map(11380) -> 'V'; +uts46_map(11381) -> {'M', [11382]}; +uts46_map(11388) -> {'M', [106]}; +uts46_map(11389) -> {'M', [118]}; +uts46_map(11390) -> {'M', [575]}; +uts46_map(11391) -> {'M', [576]}; +uts46_map(11392) -> {'M', [11393]}; +uts46_map(11393) -> 'V'; +uts46_map(11394) -> {'M', [11395]}; +uts46_map(11395) -> 'V'; +uts46_map(11396) -> {'M', [11397]}; +uts46_map(11397) -> 'V'; +uts46_map(11398) -> {'M', [11399]}; +uts46_map(11399) -> 'V'; +uts46_map(11400) -> {'M', [11401]}; +uts46_map(11401) -> 'V'; +uts46_map(11402) -> {'M', [11403]}; +uts46_map(11403) -> 'V'; +uts46_map(11404) -> {'M', [11405]}; +uts46_map(11405) -> 'V'; +uts46_map(11406) -> {'M', [11407]}; +uts46_map(11407) -> 'V'; +uts46_map(11408) -> {'M', [11409]}; +uts46_map(11409) -> 'V'; +uts46_map(11410) -> {'M', [11411]}; +uts46_map(11411) -> 'V'; +uts46_map(11412) -> {'M', [11413]}; +uts46_map(11413) -> 'V'; +uts46_map(11414) -> {'M', [11415]}; +uts46_map(11415) -> 'V'; +uts46_map(11416) -> {'M', [11417]}; +uts46_map(11417) -> 'V'; +uts46_map(11418) -> {'M', [11419]}; +uts46_map(11419) -> 'V'; +uts46_map(11420) -> {'M', [11421]}; +uts46_map(11421) -> 'V'; +uts46_map(11422) -> {'M', [11423]}; +uts46_map(11423) -> 'V'; +uts46_map(11424) -> {'M', [11425]}; +uts46_map(11425) -> 'V'; +uts46_map(11426) -> {'M', [11427]}; +uts46_map(11427) -> 'V'; +uts46_map(11428) -> {'M', [11429]}; +uts46_map(11429) -> 'V'; +uts46_map(11430) -> {'M', [11431]}; +uts46_map(11431) -> 'V'; +uts46_map(11432) -> {'M', [11433]}; +uts46_map(11433) -> 'V'; +uts46_map(11434) -> {'M', [11435]}; +uts46_map(11435) -> 'V'; +uts46_map(11436) -> {'M', [11437]}; +uts46_map(11437) -> 'V'; +uts46_map(11438) -> {'M', [11439]}; +uts46_map(11439) -> 'V'; +uts46_map(11440) -> {'M', [11441]}; +uts46_map(11441) -> 'V'; +uts46_map(11442) -> {'M', [11443]}; +uts46_map(11443) -> 'V'; +uts46_map(11444) -> {'M', [11445]}; +uts46_map(11445) -> 'V'; +uts46_map(11446) -> {'M', [11447]}; +uts46_map(11447) -> 'V'; +uts46_map(11448) -> {'M', [11449]}; +uts46_map(11449) -> 'V'; +uts46_map(11450) -> {'M', [11451]}; +uts46_map(11451) -> 'V'; +uts46_map(11452) -> {'M', [11453]}; +uts46_map(11453) -> 'V'; +uts46_map(11454) -> {'M', [11455]}; +uts46_map(11455) -> 'V'; +uts46_map(11456) -> {'M', [11457]}; +uts46_map(11457) -> 'V'; +uts46_map(11458) -> {'M', [11459]}; +uts46_map(11459) -> 'V'; +uts46_map(11460) -> {'M', [11461]}; +uts46_map(11461) -> 'V'; +uts46_map(11462) -> {'M', [11463]}; +uts46_map(11463) -> 'V'; +uts46_map(11464) -> {'M', [11465]}; +uts46_map(11465) -> 'V'; +uts46_map(11466) -> {'M', [11467]}; +uts46_map(11467) -> 'V'; +uts46_map(11468) -> {'M', [11469]}; +uts46_map(11469) -> 'V'; +uts46_map(11470) -> {'M', [11471]}; +uts46_map(11471) -> 'V'; +uts46_map(11472) -> {'M', [11473]}; +uts46_map(11473) -> 'V'; +uts46_map(11474) -> {'M', [11475]}; +uts46_map(11475) -> 'V'; +uts46_map(11476) -> {'M', [11477]}; +uts46_map(11477) -> 'V'; +uts46_map(11478) -> {'M', [11479]}; +uts46_map(11479) -> 'V'; +uts46_map(11480) -> {'M', [11481]}; +uts46_map(11481) -> 'V'; +uts46_map(11482) -> {'M', [11483]}; +uts46_map(11483) -> 'V'; +uts46_map(11484) -> {'M', [11485]}; +uts46_map(11485) -> 'V'; +uts46_map(11486) -> {'M', [11487]}; +uts46_map(11487) -> 'V'; +uts46_map(11488) -> {'M', [11489]}; +uts46_map(11489) -> 'V'; +uts46_map(11490) -> {'M', [11491]}; +uts46_map(11499) -> {'M', [11500]}; +uts46_map(11500) -> 'V'; +uts46_map(11501) -> {'M', [11502]}; +uts46_map(11506) -> {'M', [11507]}; +uts46_map(11507) -> 'V'; +uts46_map(11558) -> 'X'; +uts46_map(11559) -> 'V'; +uts46_map(11565) -> 'V'; +uts46_map(11631) -> {'M', [11617]}; +uts46_map(11632) -> 'V'; +uts46_map(11647) -> 'V'; +uts46_map(11687) -> 'X'; +uts46_map(11695) -> 'X'; +uts46_map(11703) -> 'X'; +uts46_map(11711) -> 'X'; +uts46_map(11719) -> 'X'; +uts46_map(11727) -> 'X'; +uts46_map(11735) -> 'X'; +uts46_map(11743) -> 'X'; +uts46_map(11823) -> 'V'; +uts46_map(11824) -> 'V'; +uts46_map(11825) -> 'V'; +uts46_map(11855) -> 'V'; +uts46_map(11930) -> 'X'; +uts46_map(11935) -> {'M', [27597]}; +uts46_map(12019) -> {'M', [40863]}; +uts46_map(12032) -> {'M', [19968]}; +uts46_map(12033) -> {'M', [20008]}; +uts46_map(12034) -> {'M', [20022]}; +uts46_map(12035) -> {'M', [20031]}; +uts46_map(12036) -> {'M', [20057]}; +uts46_map(12037) -> {'M', [20101]}; +uts46_map(12038) -> {'M', [20108]}; +uts46_map(12039) -> {'M', [20128]}; +uts46_map(12040) -> {'M', [20154]}; +uts46_map(12041) -> {'M', [20799]}; +uts46_map(12042) -> {'M', [20837]}; +uts46_map(12043) -> {'M', [20843]}; +uts46_map(12044) -> {'M', [20866]}; +uts46_map(12045) -> {'M', [20886]}; +uts46_map(12046) -> {'M', [20907]}; +uts46_map(12047) -> {'M', [20960]}; +uts46_map(12048) -> {'M', [20981]}; +uts46_map(12049) -> {'M', [20992]}; +uts46_map(12050) -> {'M', [21147]}; +uts46_map(12051) -> {'M', [21241]}; +uts46_map(12052) -> {'M', [21269]}; +uts46_map(12053) -> {'M', [21274]}; +uts46_map(12054) -> {'M', [21304]}; +uts46_map(12055) -> {'M', [21313]}; +uts46_map(12056) -> {'M', [21340]}; +uts46_map(12057) -> {'M', [21353]}; +uts46_map(12058) -> {'M', [21378]}; +uts46_map(12059) -> {'M', [21430]}; +uts46_map(12060) -> {'M', [21448]}; +uts46_map(12061) -> {'M', [21475]}; +uts46_map(12062) -> {'M', [22231]}; +uts46_map(12063) -> {'M', [22303]}; +uts46_map(12064) -> {'M', [22763]}; +uts46_map(12065) -> {'M', [22786]}; +uts46_map(12066) -> {'M', [22794]}; +uts46_map(12067) -> {'M', [22805]}; +uts46_map(12068) -> {'M', [22823]}; +uts46_map(12069) -> {'M', [22899]}; +uts46_map(12070) -> {'M', [23376]}; +uts46_map(12071) -> {'M', [23424]}; +uts46_map(12072) -> {'M', [23544]}; +uts46_map(12073) -> {'M', [23567]}; +uts46_map(12074) -> {'M', [23586]}; +uts46_map(12075) -> {'M', [23608]}; +uts46_map(12076) -> {'M', [23662]}; +uts46_map(12077) -> {'M', [23665]}; +uts46_map(12078) -> {'M', [24027]}; +uts46_map(12079) -> {'M', [24037]}; +uts46_map(12080) -> {'M', [24049]}; +uts46_map(12081) -> {'M', [24062]}; +uts46_map(12082) -> {'M', [24178]}; +uts46_map(12083) -> {'M', [24186]}; +uts46_map(12084) -> {'M', [24191]}; +uts46_map(12085) -> {'M', [24308]}; +uts46_map(12086) -> {'M', [24318]}; +uts46_map(12087) -> {'M', [24331]}; +uts46_map(12088) -> {'M', [24339]}; +uts46_map(12089) -> {'M', [24400]}; +uts46_map(12090) -> {'M', [24417]}; +uts46_map(12091) -> {'M', [24435]}; +uts46_map(12092) -> {'M', [24515]}; +uts46_map(12093) -> {'M', [25096]}; +uts46_map(12094) -> {'M', [25142]}; +uts46_map(12095) -> {'M', [25163]}; +uts46_map(12096) -> {'M', [25903]}; +uts46_map(12097) -> {'M', [25908]}; +uts46_map(12098) -> {'M', [25991]}; +uts46_map(12099) -> {'M', [26007]}; +uts46_map(12100) -> {'M', [26020]}; +uts46_map(12101) -> {'M', [26041]}; +uts46_map(12102) -> {'M', [26080]}; +uts46_map(12103) -> {'M', [26085]}; +uts46_map(12104) -> {'M', [26352]}; +uts46_map(12105) -> {'M', [26376]}; +uts46_map(12106) -> {'M', [26408]}; +uts46_map(12107) -> {'M', [27424]}; +uts46_map(12108) -> {'M', [27490]}; +uts46_map(12109) -> {'M', [27513]}; +uts46_map(12110) -> {'M', [27571]}; +uts46_map(12111) -> {'M', [27595]}; +uts46_map(12112) -> {'M', [27604]}; +uts46_map(12113) -> {'M', [27611]}; +uts46_map(12114) -> {'M', [27663]}; +uts46_map(12115) -> {'M', [27668]}; +uts46_map(12116) -> {'M', [27700]}; +uts46_map(12117) -> {'M', [28779]}; +uts46_map(12118) -> {'M', [29226]}; +uts46_map(12119) -> {'M', [29238]}; +uts46_map(12120) -> {'M', [29243]}; +uts46_map(12121) -> {'M', [29247]}; +uts46_map(12122) -> {'M', [29255]}; +uts46_map(12123) -> {'M', [29273]}; +uts46_map(12124) -> {'M', [29275]}; +uts46_map(12125) -> {'M', [29356]}; +uts46_map(12126) -> {'M', [29572]}; +uts46_map(12127) -> {'M', [29577]}; +uts46_map(12128) -> {'M', [29916]}; +uts46_map(12129) -> {'M', [29926]}; +uts46_map(12130) -> {'M', [29976]}; +uts46_map(12131) -> {'M', [29983]}; +uts46_map(12132) -> {'M', [29992]}; +uts46_map(12133) -> {'M', [30000]}; +uts46_map(12134) -> {'M', [30091]}; +uts46_map(12135) -> {'M', [30098]}; +uts46_map(12136) -> {'M', [30326]}; +uts46_map(12137) -> {'M', [30333]}; +uts46_map(12138) -> {'M', [30382]}; +uts46_map(12139) -> {'M', [30399]}; +uts46_map(12140) -> {'M', [30446]}; +uts46_map(12141) -> {'M', [30683]}; +uts46_map(12142) -> {'M', [30690]}; +uts46_map(12143) -> {'M', [30707]}; +uts46_map(12144) -> {'M', [31034]}; +uts46_map(12145) -> {'M', [31160]}; +uts46_map(12146) -> {'M', [31166]}; +uts46_map(12147) -> {'M', [31348]}; +uts46_map(12148) -> {'M', [31435]}; +uts46_map(12149) -> {'M', [31481]}; +uts46_map(12150) -> {'M', [31859]}; +uts46_map(12151) -> {'M', [31992]}; +uts46_map(12152) -> {'M', [32566]}; +uts46_map(12153) -> {'M', [32593]}; +uts46_map(12154) -> {'M', [32650]}; +uts46_map(12155) -> {'M', [32701]}; +uts46_map(12156) -> {'M', [32769]}; +uts46_map(12157) -> {'M', [32780]}; +uts46_map(12158) -> {'M', [32786]}; +uts46_map(12159) -> {'M', [32819]}; +uts46_map(12160) -> {'M', [32895]}; +uts46_map(12161) -> {'M', [32905]}; +uts46_map(12162) -> {'M', [33251]}; +uts46_map(12163) -> {'M', [33258]}; +uts46_map(12164) -> {'M', [33267]}; +uts46_map(12165) -> {'M', [33276]}; +uts46_map(12166) -> {'M', [33292]}; +uts46_map(12167) -> {'M', [33307]}; +uts46_map(12168) -> {'M', [33311]}; +uts46_map(12169) -> {'M', [33390]}; +uts46_map(12170) -> {'M', [33394]}; +uts46_map(12171) -> {'M', [33400]}; +uts46_map(12172) -> {'M', [34381]}; +uts46_map(12173) -> {'M', [34411]}; +uts46_map(12174) -> {'M', [34880]}; +uts46_map(12175) -> {'M', [34892]}; +uts46_map(12176) -> {'M', [34915]}; +uts46_map(12177) -> {'M', [35198]}; +uts46_map(12178) -> {'M', [35211]}; +uts46_map(12179) -> {'M', [35282]}; +uts46_map(12180) -> {'M', [35328]}; +uts46_map(12181) -> {'M', [35895]}; +uts46_map(12182) -> {'M', [35910]}; +uts46_map(12183) -> {'M', [35925]}; +uts46_map(12184) -> {'M', [35960]}; +uts46_map(12185) -> {'M', [35997]}; +uts46_map(12186) -> {'M', [36196]}; +uts46_map(12187) -> {'M', [36208]}; +uts46_map(12188) -> {'M', [36275]}; +uts46_map(12189) -> {'M', [36523]}; +uts46_map(12190) -> {'M', [36554]}; +uts46_map(12191) -> {'M', [36763]}; +uts46_map(12192) -> {'M', [36784]}; +uts46_map(12193) -> {'M', [36789]}; +uts46_map(12194) -> {'M', [37009]}; +uts46_map(12195) -> {'M', [37193]}; +uts46_map(12196) -> {'M', [37318]}; +uts46_map(12197) -> {'M', [37324]}; +uts46_map(12198) -> {'M', [37329]}; +uts46_map(12199) -> {'M', [38263]}; +uts46_map(12200) -> {'M', [38272]}; +uts46_map(12201) -> {'M', [38428]}; +uts46_map(12202) -> {'M', [38582]}; +uts46_map(12203) -> {'M', [38585]}; +uts46_map(12204) -> {'M', [38632]}; +uts46_map(12205) -> {'M', [38737]}; +uts46_map(12206) -> {'M', [38750]}; +uts46_map(12207) -> {'M', [38754]}; +uts46_map(12208) -> {'M', [38761]}; +uts46_map(12209) -> {'M', [38859]}; +uts46_map(12210) -> {'M', [38893]}; +uts46_map(12211) -> {'M', [38899]}; +uts46_map(12212) -> {'M', [38913]}; +uts46_map(12213) -> {'M', [39080]}; +uts46_map(12214) -> {'M', [39131]}; +uts46_map(12215) -> {'M', [39135]}; +uts46_map(12216) -> {'M', [39318]}; +uts46_map(12217) -> {'M', [39321]}; +uts46_map(12218) -> {'M', [39340]}; +uts46_map(12219) -> {'M', [39592]}; +uts46_map(12220) -> {'M', [39640]}; +uts46_map(12221) -> {'M', [39647]}; +uts46_map(12222) -> {'M', [39717]}; +uts46_map(12223) -> {'M', [39727]}; +uts46_map(12224) -> {'M', [39730]}; +uts46_map(12225) -> {'M', [39740]}; +uts46_map(12226) -> {'M', [39770]}; +uts46_map(12227) -> {'M', [40165]}; +uts46_map(12228) -> {'M', [40565]}; +uts46_map(12229) -> {'M', [40575]}; +uts46_map(12230) -> {'M', [40613]}; +uts46_map(12231) -> {'M', [40635]}; +uts46_map(12232) -> {'M', [40643]}; +uts46_map(12233) -> {'M', [40653]}; +uts46_map(12234) -> {'M', [40657]}; +uts46_map(12235) -> {'M', [40697]}; +uts46_map(12236) -> {'M', [40701]}; +uts46_map(12237) -> {'M', [40718]}; +uts46_map(12238) -> {'M', [40723]}; +uts46_map(12239) -> {'M', [40736]}; +uts46_map(12240) -> {'M', [40763]}; +uts46_map(12241) -> {'M', [40778]}; +uts46_map(12242) -> {'M', [40786]}; +uts46_map(12243) -> {'M', [40845]}; +uts46_map(12244) -> {'M', [40860]}; +uts46_map(12245) -> {'M', [40864]}; +uts46_map(12288) -> {'3', [32]}; +uts46_map(12289) -> 'V'; +uts46_map(12290) -> {'M', [46]}; +uts46_map(12342) -> {'M', [12306]}; +uts46_map(12343) -> 'V'; +uts46_map(12344) -> {'M', [21313]}; +uts46_map(12345) -> {'M', [21316]}; +uts46_map(12346) -> {'M', [21317]}; +uts46_map(12347) -> 'V'; +uts46_map(12348) -> 'V'; +uts46_map(12349) -> 'V'; +uts46_map(12350) -> 'V'; +uts46_map(12351) -> 'V'; +uts46_map(12352) -> 'X'; +uts46_map(12443) -> {'3', [32,12441]}; +uts46_map(12444) -> {'3', [32,12442]}; +uts46_map(12447) -> {'M', [12424,12426]}; +uts46_map(12448) -> 'V'; +uts46_map(12543) -> {'M', [12467,12488]}; +uts46_map(12589) -> 'V'; +uts46_map(12590) -> 'V'; +uts46_map(12591) -> 'V'; +uts46_map(12592) -> 'X'; +uts46_map(12593) -> {'M', [4352]}; +uts46_map(12594) -> {'M', [4353]}; +uts46_map(12595) -> {'M', [4522]}; +uts46_map(12596) -> {'M', [4354]}; +uts46_map(12597) -> {'M', [4524]}; +uts46_map(12598) -> {'M', [4525]}; +uts46_map(12599) -> {'M', [4355]}; +uts46_map(12600) -> {'M', [4356]}; +uts46_map(12601) -> {'M', [4357]}; +uts46_map(12602) -> {'M', [4528]}; +uts46_map(12603) -> {'M', [4529]}; +uts46_map(12604) -> {'M', [4530]}; +uts46_map(12605) -> {'M', [4531]}; +uts46_map(12606) -> {'M', [4532]}; +uts46_map(12607) -> {'M', [4533]}; +uts46_map(12608) -> {'M', [4378]}; +uts46_map(12609) -> {'M', [4358]}; +uts46_map(12610) -> {'M', [4359]}; +uts46_map(12611) -> {'M', [4360]}; +uts46_map(12612) -> {'M', [4385]}; +uts46_map(12613) -> {'M', [4361]}; +uts46_map(12614) -> {'M', [4362]}; +uts46_map(12615) -> {'M', [4363]}; +uts46_map(12616) -> {'M', [4364]}; +uts46_map(12617) -> {'M', [4365]}; +uts46_map(12618) -> {'M', [4366]}; +uts46_map(12619) -> {'M', [4367]}; +uts46_map(12620) -> {'M', [4368]}; +uts46_map(12621) -> {'M', [4369]}; +uts46_map(12622) -> {'M', [4370]}; +uts46_map(12623) -> {'M', [4449]}; +uts46_map(12624) -> {'M', [4450]}; +uts46_map(12625) -> {'M', [4451]}; +uts46_map(12626) -> {'M', [4452]}; +uts46_map(12627) -> {'M', [4453]}; +uts46_map(12628) -> {'M', [4454]}; +uts46_map(12629) -> {'M', [4455]}; +uts46_map(12630) -> {'M', [4456]}; +uts46_map(12631) -> {'M', [4457]}; +uts46_map(12632) -> {'M', [4458]}; +uts46_map(12633) -> {'M', [4459]}; +uts46_map(12634) -> {'M', [4460]}; +uts46_map(12635) -> {'M', [4461]}; +uts46_map(12636) -> {'M', [4462]}; +uts46_map(12637) -> {'M', [4463]}; +uts46_map(12638) -> {'M', [4464]}; +uts46_map(12639) -> {'M', [4465]}; +uts46_map(12640) -> {'M', [4466]}; +uts46_map(12641) -> {'M', [4467]}; +uts46_map(12642) -> {'M', [4468]}; +uts46_map(12643) -> {'M', [4469]}; +uts46_map(12644) -> 'X'; +uts46_map(12645) -> {'M', [4372]}; +uts46_map(12646) -> {'M', [4373]}; +uts46_map(12647) -> {'M', [4551]}; +uts46_map(12648) -> {'M', [4552]}; +uts46_map(12649) -> {'M', [4556]}; +uts46_map(12650) -> {'M', [4558]}; +uts46_map(12651) -> {'M', [4563]}; +uts46_map(12652) -> {'M', [4567]}; +uts46_map(12653) -> {'M', [4569]}; +uts46_map(12654) -> {'M', [4380]}; +uts46_map(12655) -> {'M', [4573]}; +uts46_map(12656) -> {'M', [4575]}; +uts46_map(12657) -> {'M', [4381]}; +uts46_map(12658) -> {'M', [4382]}; +uts46_map(12659) -> {'M', [4384]}; +uts46_map(12660) -> {'M', [4386]}; +uts46_map(12661) -> {'M', [4387]}; +uts46_map(12662) -> {'M', [4391]}; +uts46_map(12663) -> {'M', [4393]}; +uts46_map(12664) -> {'M', [4395]}; +uts46_map(12665) -> {'M', [4396]}; +uts46_map(12666) -> {'M', [4397]}; +uts46_map(12667) -> {'M', [4398]}; +uts46_map(12668) -> {'M', [4399]}; +uts46_map(12669) -> {'M', [4402]}; +uts46_map(12670) -> {'M', [4406]}; +uts46_map(12671) -> {'M', [4416]}; +uts46_map(12672) -> {'M', [4423]}; +uts46_map(12673) -> {'M', [4428]}; +uts46_map(12674) -> {'M', [4593]}; +uts46_map(12675) -> {'M', [4594]}; +uts46_map(12676) -> {'M', [4439]}; +uts46_map(12677) -> {'M', [4440]}; +uts46_map(12678) -> {'M', [4441]}; +uts46_map(12679) -> {'M', [4484]}; +uts46_map(12680) -> {'M', [4485]}; +uts46_map(12681) -> {'M', [4488]}; +uts46_map(12682) -> {'M', [4497]}; +uts46_map(12683) -> {'M', [4498]}; +uts46_map(12684) -> {'M', [4500]}; +uts46_map(12685) -> {'M', [4510]}; +uts46_map(12686) -> {'M', [4513]}; +uts46_map(12687) -> 'X'; +uts46_map(12690) -> {'M', [19968]}; +uts46_map(12691) -> {'M', [20108]}; +uts46_map(12692) -> {'M', [19977]}; +uts46_map(12693) -> {'M', [22235]}; +uts46_map(12694) -> {'M', [19978]}; +uts46_map(12695) -> {'M', [20013]}; +uts46_map(12696) -> {'M', [19979]}; +uts46_map(12697) -> {'M', [30002]}; +uts46_map(12698) -> {'M', [20057]}; +uts46_map(12699) -> {'M', [19993]}; +uts46_map(12700) -> {'M', [19969]}; +uts46_map(12701) -> {'M', [22825]}; +uts46_map(12702) -> {'M', [22320]}; +uts46_map(12703) -> {'M', [20154]}; +uts46_map(12800) -> {'3', [40,4352,41]}; +uts46_map(12801) -> {'3', [40,4354,41]}; +uts46_map(12802) -> {'3', [40,4355,41]}; +uts46_map(12803) -> {'3', [40,4357,41]}; +uts46_map(12804) -> {'3', [40,4358,41]}; +uts46_map(12805) -> {'3', [40,4359,41]}; +uts46_map(12806) -> {'3', [40,4361,41]}; +uts46_map(12807) -> {'3', [40,4363,41]}; +uts46_map(12808) -> {'3', [40,4364,41]}; +uts46_map(12809) -> {'3', [40,4366,41]}; +uts46_map(12810) -> {'3', [40,4367,41]}; +uts46_map(12811) -> {'3', [40,4368,41]}; +uts46_map(12812) -> {'3', [40,4369,41]}; +uts46_map(12813) -> {'3', [40,4370,41]}; +uts46_map(12814) -> {'3', [40,44032,41]}; +uts46_map(12815) -> {'3', [40,45208,41]}; +uts46_map(12816) -> {'3', [40,45796,41]}; +uts46_map(12817) -> {'3', [40,46972,41]}; +uts46_map(12818) -> {'3', [40,47560,41]}; +uts46_map(12819) -> {'3', [40,48148,41]}; +uts46_map(12820) -> {'3', [40,49324,41]}; +uts46_map(12821) -> {'3', [40,50500,41]}; +uts46_map(12822) -> {'3', [40,51088,41]}; +uts46_map(12823) -> {'3', [40,52264,41]}; +uts46_map(12824) -> {'3', [40,52852,41]}; +uts46_map(12825) -> {'3', [40,53440,41]}; +uts46_map(12826) -> {'3', [40,54028,41]}; +uts46_map(12827) -> {'3', [40,54616,41]}; +uts46_map(12828) -> {'3', [40,51452,41]}; +uts46_map(12829) -> {'3', [40,50724,51204,41]}; +uts46_map(12830) -> {'3', [40,50724,54980,41]}; +uts46_map(12831) -> 'X'; +uts46_map(12832) -> {'3', [40,19968,41]}; +uts46_map(12833) -> {'3', [40,20108,41]}; +uts46_map(12834) -> {'3', [40,19977,41]}; +uts46_map(12835) -> {'3', [40,22235,41]}; +uts46_map(12836) -> {'3', [40,20116,41]}; +uts46_map(12837) -> {'3', [40,20845,41]}; +uts46_map(12838) -> {'3', [40,19971,41]}; +uts46_map(12839) -> {'3', [40,20843,41]}; +uts46_map(12840) -> {'3', [40,20061,41]}; +uts46_map(12841) -> {'3', [40,21313,41]}; +uts46_map(12842) -> {'3', [40,26376,41]}; +uts46_map(12843) -> {'3', [40,28779,41]}; +uts46_map(12844) -> {'3', [40,27700,41]}; +uts46_map(12845) -> {'3', [40,26408,41]}; +uts46_map(12846) -> {'3', [40,37329,41]}; +uts46_map(12847) -> {'3', [40,22303,41]}; +uts46_map(12848) -> {'3', [40,26085,41]}; +uts46_map(12849) -> {'3', [40,26666,41]}; +uts46_map(12850) -> {'3', [40,26377,41]}; +uts46_map(12851) -> {'3', [40,31038,41]}; +uts46_map(12852) -> {'3', [40,21517,41]}; +uts46_map(12853) -> {'3', [40,29305,41]}; +uts46_map(12854) -> {'3', [40,36001,41]}; +uts46_map(12855) -> {'3', [40,31069,41]}; +uts46_map(12856) -> {'3', [40,21172,41]}; +uts46_map(12857) -> {'3', [40,20195,41]}; +uts46_map(12858) -> {'3', [40,21628,41]}; +uts46_map(12859) -> {'3', [40,23398,41]}; +uts46_map(12860) -> {'3', [40,30435,41]}; +uts46_map(12861) -> {'3', [40,20225,41]}; +uts46_map(12862) -> {'3', [40,36039,41]}; +uts46_map(12863) -> {'3', [40,21332,41]}; +uts46_map(12864) -> {'3', [40,31085,41]}; +uts46_map(12865) -> {'3', [40,20241,41]}; +uts46_map(12866) -> {'3', [40,33258,41]}; +uts46_map(12867) -> {'3', [40,33267,41]}; +uts46_map(12868) -> {'M', [21839]}; +uts46_map(12869) -> {'M', [24188]}; +uts46_map(12870) -> {'M', [25991]}; +uts46_map(12871) -> {'M', [31631]}; +uts46_map(12880) -> {'M', [112,116,101]}; +uts46_map(12881) -> {'M', [50,49]}; +uts46_map(12882) -> {'M', [50,50]}; +uts46_map(12883) -> {'M', [50,51]}; +uts46_map(12884) -> {'M', [50,52]}; +uts46_map(12885) -> {'M', [50,53]}; +uts46_map(12886) -> {'M', [50,54]}; +uts46_map(12887) -> {'M', [50,55]}; +uts46_map(12888) -> {'M', [50,56]}; +uts46_map(12889) -> {'M', [50,57]}; +uts46_map(12890) -> {'M', [51,48]}; +uts46_map(12891) -> {'M', [51,49]}; +uts46_map(12892) -> {'M', [51,50]}; +uts46_map(12893) -> {'M', [51,51]}; +uts46_map(12894) -> {'M', [51,52]}; +uts46_map(12895) -> {'M', [51,53]}; +uts46_map(12896) -> {'M', [4352]}; +uts46_map(12897) -> {'M', [4354]}; +uts46_map(12898) -> {'M', [4355]}; +uts46_map(12899) -> {'M', [4357]}; +uts46_map(12900) -> {'M', [4358]}; +uts46_map(12901) -> {'M', [4359]}; +uts46_map(12902) -> {'M', [4361]}; +uts46_map(12903) -> {'M', [4363]}; +uts46_map(12904) -> {'M', [4364]}; +uts46_map(12905) -> {'M', [4366]}; +uts46_map(12906) -> {'M', [4367]}; +uts46_map(12907) -> {'M', [4368]}; +uts46_map(12908) -> {'M', [4369]}; +uts46_map(12909) -> {'M', [4370]}; +uts46_map(12910) -> {'M', [44032]}; +uts46_map(12911) -> {'M', [45208]}; +uts46_map(12912) -> {'M', [45796]}; +uts46_map(12913) -> {'M', [46972]}; +uts46_map(12914) -> {'M', [47560]}; +uts46_map(12915) -> {'M', [48148]}; +uts46_map(12916) -> {'M', [49324]}; +uts46_map(12917) -> {'M', [50500]}; +uts46_map(12918) -> {'M', [51088]}; +uts46_map(12919) -> {'M', [52264]}; +uts46_map(12920) -> {'M', [52852]}; +uts46_map(12921) -> {'M', [53440]}; +uts46_map(12922) -> {'M', [54028]}; +uts46_map(12923) -> {'M', [54616]}; +uts46_map(12924) -> {'M', [52280,44256]}; +uts46_map(12925) -> {'M', [51452,51032]}; +uts46_map(12926) -> {'M', [50864]}; +uts46_map(12927) -> 'V'; +uts46_map(12928) -> {'M', [19968]}; +uts46_map(12929) -> {'M', [20108]}; +uts46_map(12930) -> {'M', [19977]}; +uts46_map(12931) -> {'M', [22235]}; +uts46_map(12932) -> {'M', [20116]}; +uts46_map(12933) -> {'M', [20845]}; +uts46_map(12934) -> {'M', [19971]}; +uts46_map(12935) -> {'M', [20843]}; +uts46_map(12936) -> {'M', [20061]}; +uts46_map(12937) -> {'M', [21313]}; +uts46_map(12938) -> {'M', [26376]}; +uts46_map(12939) -> {'M', [28779]}; +uts46_map(12940) -> {'M', [27700]}; +uts46_map(12941) -> {'M', [26408]}; +uts46_map(12942) -> {'M', [37329]}; +uts46_map(12943) -> {'M', [22303]}; +uts46_map(12944) -> {'M', [26085]}; +uts46_map(12945) -> {'M', [26666]}; +uts46_map(12946) -> {'M', [26377]}; +uts46_map(12947) -> {'M', [31038]}; +uts46_map(12948) -> {'M', [21517]}; +uts46_map(12949) -> {'M', [29305]}; +uts46_map(12950) -> {'M', [36001]}; +uts46_map(12951) -> {'M', [31069]}; +uts46_map(12952) -> {'M', [21172]}; +uts46_map(12953) -> {'M', [31192]}; +uts46_map(12954) -> {'M', [30007]}; +uts46_map(12955) -> {'M', [22899]}; +uts46_map(12956) -> {'M', [36969]}; +uts46_map(12957) -> {'M', [20778]}; +uts46_map(12958) -> {'M', [21360]}; +uts46_map(12959) -> {'M', [27880]}; +uts46_map(12960) -> {'M', [38917]}; +uts46_map(12961) -> {'M', [20241]}; +uts46_map(12962) -> {'M', [20889]}; +uts46_map(12963) -> {'M', [27491]}; +uts46_map(12964) -> {'M', [19978]}; +uts46_map(12965) -> {'M', [20013]}; +uts46_map(12966) -> {'M', [19979]}; +uts46_map(12967) -> {'M', [24038]}; +uts46_map(12968) -> {'M', [21491]}; +uts46_map(12969) -> {'M', [21307]}; +uts46_map(12970) -> {'M', [23447]}; +uts46_map(12971) -> {'M', [23398]}; +uts46_map(12972) -> {'M', [30435]}; +uts46_map(12973) -> {'M', [20225]}; +uts46_map(12974) -> {'M', [36039]}; +uts46_map(12975) -> {'M', [21332]}; +uts46_map(12976) -> {'M', [22812]}; +uts46_map(12977) -> {'M', [51,54]}; +uts46_map(12978) -> {'M', [51,55]}; +uts46_map(12979) -> {'M', [51,56]}; +uts46_map(12980) -> {'M', [51,57]}; +uts46_map(12981) -> {'M', [52,48]}; +uts46_map(12982) -> {'M', [52,49]}; +uts46_map(12983) -> {'M', [52,50]}; +uts46_map(12984) -> {'M', [52,51]}; +uts46_map(12985) -> {'M', [52,52]}; +uts46_map(12986) -> {'M', [52,53]}; +uts46_map(12987) -> {'M', [52,54]}; +uts46_map(12988) -> {'M', [52,55]}; +uts46_map(12989) -> {'M', [52,56]}; +uts46_map(12990) -> {'M', [52,57]}; +uts46_map(12991) -> {'M', [53,48]}; +uts46_map(12992) -> {'M', [49,26376]}; +uts46_map(12993) -> {'M', [50,26376]}; +uts46_map(12994) -> {'M', [51,26376]}; +uts46_map(12995) -> {'M', [52,26376]}; +uts46_map(12996) -> {'M', [53,26376]}; +uts46_map(12997) -> {'M', [54,26376]}; +uts46_map(12998) -> {'M', [55,26376]}; +uts46_map(12999) -> {'M', [56,26376]}; +uts46_map(13000) -> {'M', [57,26376]}; +uts46_map(13001) -> {'M', [49,48,26376]}; +uts46_map(13002) -> {'M', [49,49,26376]}; +uts46_map(13003) -> {'M', [49,50,26376]}; +uts46_map(13004) -> {'M', [104,103]}; +uts46_map(13005) -> {'M', [101,114,103]}; +uts46_map(13006) -> {'M', [101,118]}; +uts46_map(13007) -> {'M', [108,116,100]}; +uts46_map(13008) -> {'M', [12450]}; +uts46_map(13009) -> {'M', [12452]}; +uts46_map(13010) -> {'M', [12454]}; +uts46_map(13011) -> {'M', [12456]}; +uts46_map(13012) -> {'M', [12458]}; +uts46_map(13013) -> {'M', [12459]}; +uts46_map(13014) -> {'M', [12461]}; +uts46_map(13015) -> {'M', [12463]}; +uts46_map(13016) -> {'M', [12465]}; +uts46_map(13017) -> {'M', [12467]}; +uts46_map(13018) -> {'M', [12469]}; +uts46_map(13019) -> {'M', [12471]}; +uts46_map(13020) -> {'M', [12473]}; +uts46_map(13021) -> {'M', [12475]}; +uts46_map(13022) -> {'M', [12477]}; +uts46_map(13023) -> {'M', [12479]}; +uts46_map(13024) -> {'M', [12481]}; +uts46_map(13025) -> {'M', [12484]}; +uts46_map(13026) -> {'M', [12486]}; +uts46_map(13027) -> {'M', [12488]}; +uts46_map(13028) -> {'M', [12490]}; +uts46_map(13029) -> {'M', [12491]}; +uts46_map(13030) -> {'M', [12492]}; +uts46_map(13031) -> {'M', [12493]}; +uts46_map(13032) -> {'M', [12494]}; +uts46_map(13033) -> {'M', [12495]}; +uts46_map(13034) -> {'M', [12498]}; +uts46_map(13035) -> {'M', [12501]}; +uts46_map(13036) -> {'M', [12504]}; +uts46_map(13037) -> {'M', [12507]}; +uts46_map(13038) -> {'M', [12510]}; +uts46_map(13039) -> {'M', [12511]}; +uts46_map(13040) -> {'M', [12512]}; +uts46_map(13041) -> {'M', [12513]}; +uts46_map(13042) -> {'M', [12514]}; +uts46_map(13043) -> {'M', [12516]}; +uts46_map(13044) -> {'M', [12518]}; +uts46_map(13045) -> {'M', [12520]}; +uts46_map(13046) -> {'M', [12521]}; +uts46_map(13047) -> {'M', [12522]}; +uts46_map(13048) -> {'M', [12523]}; +uts46_map(13049) -> {'M', [12524]}; +uts46_map(13050) -> {'M', [12525]}; +uts46_map(13051) -> {'M', [12527]}; +uts46_map(13052) -> {'M', [12528]}; +uts46_map(13053) -> {'M', [12529]}; +uts46_map(13054) -> {'M', [12530]}; +uts46_map(13055) -> {'M', [20196,21644]}; +uts46_map(13056) -> {'M', [12450,12497,12540,12488]}; +uts46_map(13057) -> {'M', [12450,12523,12501,12449]}; +uts46_map(13058) -> {'M', [12450,12531,12506,12450]}; +uts46_map(13059) -> {'M', [12450,12540,12523]}; +uts46_map(13060) -> {'M', [12452,12491,12531,12464]}; +uts46_map(13061) -> {'M', [12452,12531,12481]}; +uts46_map(13062) -> {'M', [12454,12457,12531]}; +uts46_map(13063) -> {'M', [12456,12473,12463,12540,12489]}; +uts46_map(13064) -> {'M', [12456,12540,12459,12540]}; +uts46_map(13065) -> {'M', [12458,12531,12473]}; +uts46_map(13066) -> {'M', [12458,12540,12512]}; +uts46_map(13067) -> {'M', [12459,12452,12522]}; +uts46_map(13068) -> {'M', [12459,12521,12483,12488]}; +uts46_map(13069) -> {'M', [12459,12525,12522,12540]}; +uts46_map(13070) -> {'M', [12460,12525,12531]}; +uts46_map(13071) -> {'M', [12460,12531,12510]}; +uts46_map(13072) -> {'M', [12462,12460]}; +uts46_map(13073) -> {'M', [12462,12491,12540]}; +uts46_map(13074) -> {'M', [12461,12517,12522,12540]}; +uts46_map(13075) -> {'M', [12462,12523,12480,12540]}; +uts46_map(13076) -> {'M', [12461,12525]}; +uts46_map(13077) -> {'M', [12461,12525,12464,12521,12512]}; +uts46_map(13078) -> {'M', [12461,12525,12513,12540,12488,12523]}; +uts46_map(13079) -> {'M', [12461,12525,12527,12483,12488]}; +uts46_map(13080) -> {'M', [12464,12521,12512]}; +uts46_map(13081) -> {'M', [12464,12521,12512,12488,12531]}; +uts46_map(13082) -> {'M', [12463,12523,12476,12452,12525]}; +uts46_map(13083) -> {'M', [12463,12525,12540,12493]}; +uts46_map(13084) -> {'M', [12465,12540,12473]}; +uts46_map(13085) -> {'M', [12467,12523,12490]}; +uts46_map(13086) -> {'M', [12467,12540,12509]}; +uts46_map(13087) -> {'M', [12469,12452,12463,12523]}; +uts46_map(13088) -> {'M', [12469,12531,12481,12540,12512]}; +uts46_map(13089) -> {'M', [12471,12522,12531,12464]}; +uts46_map(13090) -> {'M', [12475,12531,12481]}; +uts46_map(13091) -> {'M', [12475,12531,12488]}; +uts46_map(13092) -> {'M', [12480,12540,12473]}; +uts46_map(13093) -> {'M', [12487,12471]}; +uts46_map(13094) -> {'M', [12489,12523]}; +uts46_map(13095) -> {'M', [12488,12531]}; +uts46_map(13096) -> {'M', [12490,12494]}; +uts46_map(13097) -> {'M', [12494,12483,12488]}; +uts46_map(13098) -> {'M', [12495,12452,12484]}; +uts46_map(13099) -> {'M', [12497,12540,12475,12531,12488]}; +uts46_map(13100) -> {'M', [12497,12540,12484]}; +uts46_map(13101) -> {'M', [12496,12540,12524,12523]}; +uts46_map(13102) -> {'M', [12500,12450,12473,12488,12523]}; +uts46_map(13103) -> {'M', [12500,12463,12523]}; +uts46_map(13104) -> {'M', [12500,12467]}; +uts46_map(13105) -> {'M', [12499,12523]}; +uts46_map(13106) -> {'M', [12501,12449,12521,12483,12489]}; +uts46_map(13107) -> {'M', [12501,12451,12540,12488]}; +uts46_map(13108) -> {'M', [12502,12483,12471,12455,12523]}; +uts46_map(13109) -> {'M', [12501,12521,12531]}; +uts46_map(13110) -> {'M', [12504,12463,12479,12540,12523]}; +uts46_map(13111) -> {'M', [12506,12477]}; +uts46_map(13112) -> {'M', [12506,12491,12498]}; +uts46_map(13113) -> {'M', [12504,12523,12484]}; +uts46_map(13114) -> {'M', [12506,12531,12473]}; +uts46_map(13115) -> {'M', [12506,12540,12472]}; +uts46_map(13116) -> {'M', [12505,12540,12479]}; +uts46_map(13117) -> {'M', [12509,12452,12531,12488]}; +uts46_map(13118) -> {'M', [12508,12523,12488]}; +uts46_map(13119) -> {'M', [12507,12531]}; +uts46_map(13120) -> {'M', [12509,12531,12489]}; +uts46_map(13121) -> {'M', [12507,12540,12523]}; +uts46_map(13122) -> {'M', [12507,12540,12531]}; +uts46_map(13123) -> {'M', [12510,12452,12463,12525]}; +uts46_map(13124) -> {'M', [12510,12452,12523]}; +uts46_map(13125) -> {'M', [12510,12483,12495]}; +uts46_map(13126) -> {'M', [12510,12523,12463]}; +uts46_map(13127) -> {'M', [12510,12531,12471,12519,12531]}; +uts46_map(13128) -> {'M', [12511,12463,12525,12531]}; +uts46_map(13129) -> {'M', [12511,12522]}; +uts46_map(13130) -> {'M', [12511,12522,12496,12540,12523]}; +uts46_map(13131) -> {'M', [12513,12460]}; +uts46_map(13132) -> {'M', [12513,12460,12488,12531]}; +uts46_map(13133) -> {'M', [12513,12540,12488,12523]}; +uts46_map(13134) -> {'M', [12516,12540,12489]}; +uts46_map(13135) -> {'M', [12516,12540,12523]}; +uts46_map(13136) -> {'M', [12518,12450,12531]}; +uts46_map(13137) -> {'M', [12522,12483,12488,12523]}; +uts46_map(13138) -> {'M', [12522,12521]}; +uts46_map(13139) -> {'M', [12523,12500,12540]}; +uts46_map(13140) -> {'M', [12523,12540,12502,12523]}; +uts46_map(13141) -> {'M', [12524,12512]}; +uts46_map(13142) -> {'M', [12524,12531,12488,12466,12531]}; +uts46_map(13143) -> {'M', [12527,12483,12488]}; +uts46_map(13144) -> {'M', [48,28857]}; +uts46_map(13145) -> {'M', [49,28857]}; +uts46_map(13146) -> {'M', [50,28857]}; +uts46_map(13147) -> {'M', [51,28857]}; +uts46_map(13148) -> {'M', [52,28857]}; +uts46_map(13149) -> {'M', [53,28857]}; +uts46_map(13150) -> {'M', [54,28857]}; +uts46_map(13151) -> {'M', [55,28857]}; +uts46_map(13152) -> {'M', [56,28857]}; +uts46_map(13153) -> {'M', [57,28857]}; +uts46_map(13154) -> {'M', [49,48,28857]}; +uts46_map(13155) -> {'M', [49,49,28857]}; +uts46_map(13156) -> {'M', [49,50,28857]}; +uts46_map(13157) -> {'M', [49,51,28857]}; +uts46_map(13158) -> {'M', [49,52,28857]}; +uts46_map(13159) -> {'M', [49,53,28857]}; +uts46_map(13160) -> {'M', [49,54,28857]}; +uts46_map(13161) -> {'M', [49,55,28857]}; +uts46_map(13162) -> {'M', [49,56,28857]}; +uts46_map(13163) -> {'M', [49,57,28857]}; +uts46_map(13164) -> {'M', [50,48,28857]}; +uts46_map(13165) -> {'M', [50,49,28857]}; +uts46_map(13166) -> {'M', [50,50,28857]}; +uts46_map(13167) -> {'M', [50,51,28857]}; +uts46_map(13168) -> {'M', [50,52,28857]}; +uts46_map(13169) -> {'M', [104,112,97]}; +uts46_map(13170) -> {'M', [100,97]}; +uts46_map(13171) -> {'M', [97,117]}; +uts46_map(13172) -> {'M', [98,97,114]}; +uts46_map(13173) -> {'M', [111,118]}; +uts46_map(13174) -> {'M', [112,99]}; +uts46_map(13175) -> {'M', [100,109]}; +uts46_map(13176) -> {'M', [100,109,50]}; +uts46_map(13177) -> {'M', [100,109,51]}; +uts46_map(13178) -> {'M', [105,117]}; +uts46_map(13179) -> {'M', [24179,25104]}; +uts46_map(13180) -> {'M', [26157,21644]}; +uts46_map(13181) -> {'M', [22823,27491]}; +uts46_map(13182) -> {'M', [26126,27835]}; +uts46_map(13183) -> {'M', [26666,24335,20250,31038]}; +uts46_map(13184) -> {'M', [112,97]}; +uts46_map(13185) -> {'M', [110,97]}; +uts46_map(13186) -> {'M', [956,97]}; +uts46_map(13187) -> {'M', [109,97]}; +uts46_map(13188) -> {'M', [107,97]}; +uts46_map(13189) -> {'M', [107,98]}; +uts46_map(13190) -> {'M', [109,98]}; +uts46_map(13191) -> {'M', [103,98]}; +uts46_map(13192) -> {'M', [99,97,108]}; +uts46_map(13193) -> {'M', [107,99,97,108]}; +uts46_map(13194) -> {'M', [112,102]}; +uts46_map(13195) -> {'M', [110,102]}; +uts46_map(13196) -> {'M', [956,102]}; +uts46_map(13197) -> {'M', [956,103]}; +uts46_map(13198) -> {'M', [109,103]}; +uts46_map(13199) -> {'M', [107,103]}; +uts46_map(13200) -> {'M', [104,122]}; +uts46_map(13201) -> {'M', [107,104,122]}; +uts46_map(13202) -> {'M', [109,104,122]}; +uts46_map(13203) -> {'M', [103,104,122]}; +uts46_map(13204) -> {'M', [116,104,122]}; +uts46_map(13205) -> {'M', [956,108]}; +uts46_map(13206) -> {'M', [109,108]}; +uts46_map(13207) -> {'M', [100,108]}; +uts46_map(13208) -> {'M', [107,108]}; +uts46_map(13209) -> {'M', [102,109]}; +uts46_map(13210) -> {'M', [110,109]}; +uts46_map(13211) -> {'M', [956,109]}; +uts46_map(13212) -> {'M', [109,109]}; +uts46_map(13213) -> {'M', [99,109]}; +uts46_map(13214) -> {'M', [107,109]}; +uts46_map(13215) -> {'M', [109,109,50]}; +uts46_map(13216) -> {'M', [99,109,50]}; +uts46_map(13217) -> {'M', [109,50]}; +uts46_map(13218) -> {'M', [107,109,50]}; +uts46_map(13219) -> {'M', [109,109,51]}; +uts46_map(13220) -> {'M', [99,109,51]}; +uts46_map(13221) -> {'M', [109,51]}; +uts46_map(13222) -> {'M', [107,109,51]}; +uts46_map(13223) -> {'M', [109,8725,115]}; +uts46_map(13224) -> {'M', [109,8725,115,50]}; +uts46_map(13225) -> {'M', [112,97]}; +uts46_map(13226) -> {'M', [107,112,97]}; +uts46_map(13227) -> {'M', [109,112,97]}; +uts46_map(13228) -> {'M', [103,112,97]}; +uts46_map(13229) -> {'M', [114,97,100]}; +uts46_map(13230) -> {'M', [114,97,100,8725,115]}; +uts46_map(13231) -> {'M', [114,97,100,8725,115,50]}; +uts46_map(13232) -> {'M', [112,115]}; +uts46_map(13233) -> {'M', [110,115]}; +uts46_map(13234) -> {'M', [956,115]}; +uts46_map(13235) -> {'M', [109,115]}; +uts46_map(13236) -> {'M', [112,118]}; +uts46_map(13237) -> {'M', [110,118]}; +uts46_map(13238) -> {'M', [956,118]}; +uts46_map(13239) -> {'M', [109,118]}; +uts46_map(13240) -> {'M', [107,118]}; +uts46_map(13241) -> {'M', [109,118]}; +uts46_map(13242) -> {'M', [112,119]}; +uts46_map(13243) -> {'M', [110,119]}; +uts46_map(13244) -> {'M', [956,119]}; +uts46_map(13245) -> {'M', [109,119]}; +uts46_map(13246) -> {'M', [107,119]}; +uts46_map(13247) -> {'M', [109,119]}; +uts46_map(13248) -> {'M', [107,969]}; +uts46_map(13249) -> {'M', [109,969]}; +uts46_map(13250) -> 'X'; +uts46_map(13251) -> {'M', [98,113]}; +uts46_map(13252) -> {'M', [99,99]}; +uts46_map(13253) -> {'M', [99,100]}; +uts46_map(13254) -> {'M', [99,8725,107,103]}; +uts46_map(13255) -> 'X'; +uts46_map(13256) -> {'M', [100,98]}; +uts46_map(13257) -> {'M', [103,121]}; +uts46_map(13258) -> {'M', [104,97]}; +uts46_map(13259) -> {'M', [104,112]}; +uts46_map(13260) -> {'M', [105,110]}; +uts46_map(13261) -> {'M', [107,107]}; +uts46_map(13262) -> {'M', [107,109]}; +uts46_map(13263) -> {'M', [107,116]}; +uts46_map(13264) -> {'M', [108,109]}; +uts46_map(13265) -> {'M', [108,110]}; +uts46_map(13266) -> {'M', [108,111,103]}; +uts46_map(13267) -> {'M', [108,120]}; +uts46_map(13268) -> {'M', [109,98]}; +uts46_map(13269) -> {'M', [109,105,108]}; +uts46_map(13270) -> {'M', [109,111,108]}; +uts46_map(13271) -> {'M', [112,104]}; +uts46_map(13272) -> 'X'; +uts46_map(13273) -> {'M', [112,112,109]}; +uts46_map(13274) -> {'M', [112,114]}; +uts46_map(13275) -> {'M', [115,114]}; +uts46_map(13276) -> {'M', [115,118]}; +uts46_map(13277) -> {'M', [119,98]}; +uts46_map(13278) -> {'M', [118,8725,109]}; +uts46_map(13279) -> {'M', [97,8725,109]}; +uts46_map(13280) -> {'M', [49,26085]}; +uts46_map(13281) -> {'M', [50,26085]}; +uts46_map(13282) -> {'M', [51,26085]}; +uts46_map(13283) -> {'M', [52,26085]}; +uts46_map(13284) -> {'M', [53,26085]}; +uts46_map(13285) -> {'M', [54,26085]}; +uts46_map(13286) -> {'M', [55,26085]}; +uts46_map(13287) -> {'M', [56,26085]}; +uts46_map(13288) -> {'M', [57,26085]}; +uts46_map(13289) -> {'M', [49,48,26085]}; +uts46_map(13290) -> {'M', [49,49,26085]}; +uts46_map(13291) -> {'M', [49,50,26085]}; +uts46_map(13292) -> {'M', [49,51,26085]}; +uts46_map(13293) -> {'M', [49,52,26085]}; +uts46_map(13294) -> {'M', [49,53,26085]}; +uts46_map(13295) -> {'M', [49,54,26085]}; +uts46_map(13296) -> {'M', [49,55,26085]}; +uts46_map(13297) -> {'M', [49,56,26085]}; +uts46_map(13298) -> {'M', [49,57,26085]}; +uts46_map(13299) -> {'M', [50,48,26085]}; +uts46_map(13300) -> {'M', [50,49,26085]}; +uts46_map(13301) -> {'M', [50,50,26085]}; +uts46_map(13302) -> {'M', [50,51,26085]}; +uts46_map(13303) -> {'M', [50,52,26085]}; +uts46_map(13304) -> {'M', [50,53,26085]}; +uts46_map(13305) -> {'M', [50,54,26085]}; +uts46_map(13306) -> {'M', [50,55,26085]}; +uts46_map(13307) -> {'M', [50,56,26085]}; +uts46_map(13308) -> {'M', [50,57,26085]}; +uts46_map(13309) -> {'M', [51,48,26085]}; +uts46_map(13310) -> {'M', [51,49,26085]}; +uts46_map(13311) -> {'M', [103,97,108]}; +uts46_map(40908) -> 'V'; +uts46_map(42164) -> 'V'; +uts46_map(42177) -> 'V'; +uts46_map(42181) -> 'V'; +uts46_map(42182) -> 'V'; +uts46_map(42560) -> {'M', [42561]}; +uts46_map(42561) -> 'V'; +uts46_map(42562) -> {'M', [42563]}; +uts46_map(42563) -> 'V'; +uts46_map(42564) -> {'M', [42565]}; +uts46_map(42565) -> 'V'; +uts46_map(42566) -> {'M', [42567]}; +uts46_map(42567) -> 'V'; +uts46_map(42568) -> {'M', [42569]}; +uts46_map(42569) -> 'V'; +uts46_map(42570) -> {'M', [42571]}; +uts46_map(42571) -> 'V'; +uts46_map(42572) -> {'M', [42573]}; +uts46_map(42573) -> 'V'; +uts46_map(42574) -> {'M', [42575]}; +uts46_map(42575) -> 'V'; +uts46_map(42576) -> {'M', [42577]}; +uts46_map(42577) -> 'V'; +uts46_map(42578) -> {'M', [42579]}; +uts46_map(42579) -> 'V'; +uts46_map(42580) -> {'M', [42581]}; +uts46_map(42581) -> 'V'; +uts46_map(42582) -> {'M', [42583]}; +uts46_map(42583) -> 'V'; +uts46_map(42584) -> {'M', [42585]}; +uts46_map(42585) -> 'V'; +uts46_map(42586) -> {'M', [42587]}; +uts46_map(42587) -> 'V'; +uts46_map(42588) -> {'M', [42589]}; +uts46_map(42589) -> 'V'; +uts46_map(42590) -> {'M', [42591]}; +uts46_map(42591) -> 'V'; +uts46_map(42592) -> {'M', [42593]}; +uts46_map(42593) -> 'V'; +uts46_map(42594) -> {'M', [42595]}; +uts46_map(42595) -> 'V'; +uts46_map(42596) -> {'M', [42597]}; +uts46_map(42597) -> 'V'; +uts46_map(42598) -> {'M', [42599]}; +uts46_map(42599) -> 'V'; +uts46_map(42600) -> {'M', [42601]}; +uts46_map(42601) -> 'V'; +uts46_map(42602) -> {'M', [42603]}; +uts46_map(42603) -> 'V'; +uts46_map(42604) -> {'M', [42605]}; +uts46_map(42622) -> 'V'; +uts46_map(42623) -> 'V'; +uts46_map(42624) -> {'M', [42625]}; +uts46_map(42625) -> 'V'; +uts46_map(42626) -> {'M', [42627]}; +uts46_map(42627) -> 'V'; +uts46_map(42628) -> {'M', [42629]}; +uts46_map(42629) -> 'V'; +uts46_map(42630) -> {'M', [42631]}; +uts46_map(42631) -> 'V'; +uts46_map(42632) -> {'M', [42633]}; +uts46_map(42633) -> 'V'; +uts46_map(42634) -> {'M', [42635]}; +uts46_map(42635) -> 'V'; +uts46_map(42636) -> {'M', [42637]}; +uts46_map(42637) -> 'V'; +uts46_map(42638) -> {'M', [42639]}; +uts46_map(42639) -> 'V'; +uts46_map(42640) -> {'M', [42641]}; +uts46_map(42641) -> 'V'; +uts46_map(42642) -> {'M', [42643]}; +uts46_map(42643) -> 'V'; +uts46_map(42644) -> {'M', [42645]}; +uts46_map(42645) -> 'V'; +uts46_map(42646) -> {'M', [42647]}; +uts46_map(42647) -> 'V'; +uts46_map(42648) -> {'M', [42649]}; +uts46_map(42649) -> 'V'; +uts46_map(42650) -> {'M', [42651]}; +uts46_map(42651) -> 'V'; +uts46_map(42652) -> {'M', [1098]}; +uts46_map(42653) -> {'M', [1100]}; +uts46_map(42654) -> 'V'; +uts46_map(42655) -> 'V'; +uts46_map(42786) -> {'M', [42787]}; +uts46_map(42787) -> 'V'; +uts46_map(42788) -> {'M', [42789]}; +uts46_map(42789) -> 'V'; +uts46_map(42790) -> {'M', [42791]}; +uts46_map(42791) -> 'V'; +uts46_map(42792) -> {'M', [42793]}; +uts46_map(42793) -> 'V'; +uts46_map(42794) -> {'M', [42795]}; +uts46_map(42795) -> 'V'; +uts46_map(42796) -> {'M', [42797]}; +uts46_map(42797) -> 'V'; +uts46_map(42798) -> {'M', [42799]}; +uts46_map(42802) -> {'M', [42803]}; +uts46_map(42803) -> 'V'; +uts46_map(42804) -> {'M', [42805]}; +uts46_map(42805) -> 'V'; +uts46_map(42806) -> {'M', [42807]}; +uts46_map(42807) -> 'V'; +uts46_map(42808) -> {'M', [42809]}; +uts46_map(42809) -> 'V'; +uts46_map(42810) -> {'M', [42811]}; +uts46_map(42811) -> 'V'; +uts46_map(42812) -> {'M', [42813]}; +uts46_map(42813) -> 'V'; +uts46_map(42814) -> {'M', [42815]}; +uts46_map(42815) -> 'V'; +uts46_map(42816) -> {'M', [42817]}; +uts46_map(42817) -> 'V'; +uts46_map(42818) -> {'M', [42819]}; +uts46_map(42819) -> 'V'; +uts46_map(42820) -> {'M', [42821]}; +uts46_map(42821) -> 'V'; +uts46_map(42822) -> {'M', [42823]}; +uts46_map(42823) -> 'V'; +uts46_map(42824) -> {'M', [42825]}; +uts46_map(42825) -> 'V'; +uts46_map(42826) -> {'M', [42827]}; +uts46_map(42827) -> 'V'; +uts46_map(42828) -> {'M', [42829]}; +uts46_map(42829) -> 'V'; +uts46_map(42830) -> {'M', [42831]}; +uts46_map(42831) -> 'V'; +uts46_map(42832) -> {'M', [42833]}; +uts46_map(42833) -> 'V'; +uts46_map(42834) -> {'M', [42835]}; +uts46_map(42835) -> 'V'; +uts46_map(42836) -> {'M', [42837]}; +uts46_map(42837) -> 'V'; +uts46_map(42838) -> {'M', [42839]}; +uts46_map(42839) -> 'V'; +uts46_map(42840) -> {'M', [42841]}; +uts46_map(42841) -> 'V'; +uts46_map(42842) -> {'M', [42843]}; +uts46_map(42843) -> 'V'; +uts46_map(42844) -> {'M', [42845]}; +uts46_map(42845) -> 'V'; +uts46_map(42846) -> {'M', [42847]}; +uts46_map(42847) -> 'V'; +uts46_map(42848) -> {'M', [42849]}; +uts46_map(42849) -> 'V'; +uts46_map(42850) -> {'M', [42851]}; +uts46_map(42851) -> 'V'; +uts46_map(42852) -> {'M', [42853]}; +uts46_map(42853) -> 'V'; +uts46_map(42854) -> {'M', [42855]}; +uts46_map(42855) -> 'V'; +uts46_map(42856) -> {'M', [42857]}; +uts46_map(42857) -> 'V'; +uts46_map(42858) -> {'M', [42859]}; +uts46_map(42859) -> 'V'; +uts46_map(42860) -> {'M', [42861]}; +uts46_map(42861) -> 'V'; +uts46_map(42862) -> {'M', [42863]}; +uts46_map(42863) -> 'V'; +uts46_map(42864) -> {'M', [42863]}; +uts46_map(42873) -> {'M', [42874]}; +uts46_map(42874) -> 'V'; +uts46_map(42875) -> {'M', [42876]}; +uts46_map(42876) -> 'V'; +uts46_map(42877) -> {'M', [7545]}; +uts46_map(42878) -> {'M', [42879]}; +uts46_map(42879) -> 'V'; +uts46_map(42880) -> {'M', [42881]}; +uts46_map(42881) -> 'V'; +uts46_map(42882) -> {'M', [42883]}; +uts46_map(42883) -> 'V'; +uts46_map(42884) -> {'M', [42885]}; +uts46_map(42885) -> 'V'; +uts46_map(42886) -> {'M', [42887]}; +uts46_map(42891) -> {'M', [42892]}; +uts46_map(42892) -> 'V'; +uts46_map(42893) -> {'M', [613]}; +uts46_map(42894) -> 'V'; +uts46_map(42895) -> 'V'; +uts46_map(42896) -> {'M', [42897]}; +uts46_map(42897) -> 'V'; +uts46_map(42898) -> {'M', [42899]}; +uts46_map(42899) -> 'V'; +uts46_map(42902) -> {'M', [42903]}; +uts46_map(42903) -> 'V'; +uts46_map(42904) -> {'M', [42905]}; +uts46_map(42905) -> 'V'; +uts46_map(42906) -> {'M', [42907]}; +uts46_map(42907) -> 'V'; +uts46_map(42908) -> {'M', [42909]}; +uts46_map(42909) -> 'V'; +uts46_map(42910) -> {'M', [42911]}; +uts46_map(42911) -> 'V'; +uts46_map(42912) -> {'M', [42913]}; +uts46_map(42913) -> 'V'; +uts46_map(42914) -> {'M', [42915]}; +uts46_map(42915) -> 'V'; +uts46_map(42916) -> {'M', [42917]}; +uts46_map(42917) -> 'V'; +uts46_map(42918) -> {'M', [42919]}; +uts46_map(42919) -> 'V'; +uts46_map(42920) -> {'M', [42921]}; +uts46_map(42921) -> 'V'; +uts46_map(42922) -> {'M', [614]}; +uts46_map(42923) -> {'M', [604]}; +uts46_map(42924) -> {'M', [609]}; +uts46_map(42925) -> {'M', [620]}; +uts46_map(42926) -> {'M', [618]}; +uts46_map(42927) -> 'V'; +uts46_map(42928) -> {'M', [670]}; +uts46_map(42929) -> {'M', [647]}; +uts46_map(42930) -> {'M', [669]}; +uts46_map(42931) -> {'M', [43859]}; +uts46_map(42932) -> {'M', [42933]}; +uts46_map(42933) -> 'V'; +uts46_map(42934) -> {'M', [42935]}; +uts46_map(42935) -> 'V'; +uts46_map(42936) -> {'M', [42937]}; +uts46_map(42937) -> 'V'; +uts46_map(42938) -> {'M', [42939]}; +uts46_map(42939) -> 'V'; +uts46_map(42940) -> {'M', [42941]}; +uts46_map(42941) -> 'V'; +uts46_map(42942) -> {'M', [42943]}; +uts46_map(42943) -> 'V'; +uts46_map(42946) -> {'M', [42947]}; +uts46_map(42947) -> 'V'; +uts46_map(42948) -> {'M', [42900]}; +uts46_map(42949) -> {'M', [642]}; +uts46_map(42950) -> {'M', [7566]}; +uts46_map(42951) -> {'M', [42952]}; +uts46_map(42952) -> 'V'; +uts46_map(42953) -> {'M', [42954]}; +uts46_map(42954) -> 'V'; +uts46_map(42997) -> {'M', [42998]}; +uts46_map(42998) -> 'V'; +uts46_map(42999) -> 'V'; +uts46_map(43000) -> {'M', [295]}; +uts46_map(43001) -> {'M', [339]}; +uts46_map(43002) -> 'V'; +uts46_map(43052) -> 'V'; +uts46_map(43205) -> 'V'; +uts46_map(43259) -> 'V'; +uts46_map(43260) -> 'V'; +uts46_map(43261) -> 'V'; +uts46_map(43359) -> 'V'; +uts46_map(43470) -> 'X'; +uts46_map(43519) -> 'X'; +uts46_map(43815) -> 'X'; +uts46_map(43823) -> 'X'; +uts46_map(43867) -> 'V'; +uts46_map(43868) -> {'M', [42791]}; +uts46_map(43869) -> {'M', [43831]}; +uts46_map(43870) -> {'M', [619]}; +uts46_map(43871) -> {'M', [43858]}; +uts46_map(43880) -> 'V'; +uts46_map(43881) -> {'M', [653]}; +uts46_map(43888) -> {'M', [5024]}; +uts46_map(43889) -> {'M', [5025]}; +uts46_map(43890) -> {'M', [5026]}; +uts46_map(43891) -> {'M', [5027]}; +uts46_map(43892) -> {'M', [5028]}; +uts46_map(43893) -> {'M', [5029]}; +uts46_map(43894) -> {'M', [5030]}; +uts46_map(43895) -> {'M', [5031]}; +uts46_map(43896) -> {'M', [5032]}; +uts46_map(43897) -> {'M', [5033]}; +uts46_map(43898) -> {'M', [5034]}; +uts46_map(43899) -> {'M', [5035]}; +uts46_map(43900) -> {'M', [5036]}; +uts46_map(43901) -> {'M', [5037]}; +uts46_map(43902) -> {'M', [5038]}; +uts46_map(43903) -> {'M', [5039]}; +uts46_map(43904) -> {'M', [5040]}; +uts46_map(43905) -> {'M', [5041]}; +uts46_map(43906) -> {'M', [5042]}; +uts46_map(43907) -> {'M', [5043]}; +uts46_map(43908) -> {'M', [5044]}; +uts46_map(43909) -> {'M', [5045]}; +uts46_map(43910) -> {'M', [5046]}; +uts46_map(43911) -> {'M', [5047]}; +uts46_map(43912) -> {'M', [5048]}; +uts46_map(43913) -> {'M', [5049]}; +uts46_map(43914) -> {'M', [5050]}; +uts46_map(43915) -> {'M', [5051]}; +uts46_map(43916) -> {'M', [5052]}; +uts46_map(43917) -> {'M', [5053]}; +uts46_map(43918) -> {'M', [5054]}; +uts46_map(43919) -> {'M', [5055]}; +uts46_map(43920) -> {'M', [5056]}; +uts46_map(43921) -> {'M', [5057]}; +uts46_map(43922) -> {'M', [5058]}; +uts46_map(43923) -> {'M', [5059]}; +uts46_map(43924) -> {'M', [5060]}; +uts46_map(43925) -> {'M', [5061]}; +uts46_map(43926) -> {'M', [5062]}; +uts46_map(43927) -> {'M', [5063]}; +uts46_map(43928) -> {'M', [5064]}; +uts46_map(43929) -> {'M', [5065]}; +uts46_map(43930) -> {'M', [5066]}; +uts46_map(43931) -> {'M', [5067]}; +uts46_map(43932) -> {'M', [5068]}; +uts46_map(43933) -> {'M', [5069]}; +uts46_map(43934) -> {'M', [5070]}; +uts46_map(43935) -> {'M', [5071]}; +uts46_map(43936) -> {'M', [5072]}; +uts46_map(43937) -> {'M', [5073]}; +uts46_map(43938) -> {'M', [5074]}; +uts46_map(43939) -> {'M', [5075]}; +uts46_map(43940) -> {'M', [5076]}; +uts46_map(43941) -> {'M', [5077]}; +uts46_map(43942) -> {'M', [5078]}; +uts46_map(43943) -> {'M', [5079]}; +uts46_map(43944) -> {'M', [5080]}; +uts46_map(43945) -> {'M', [5081]}; +uts46_map(43946) -> {'M', [5082]}; +uts46_map(43947) -> {'M', [5083]}; +uts46_map(43948) -> {'M', [5084]}; +uts46_map(43949) -> {'M', [5085]}; +uts46_map(43950) -> {'M', [5086]}; +uts46_map(43951) -> {'M', [5087]}; +uts46_map(43952) -> {'M', [5088]}; +uts46_map(43953) -> {'M', [5089]}; +uts46_map(43954) -> {'M', [5090]}; +uts46_map(43955) -> {'M', [5091]}; +uts46_map(43956) -> {'M', [5092]}; +uts46_map(43957) -> {'M', [5093]}; +uts46_map(43958) -> {'M', [5094]}; +uts46_map(43959) -> {'M', [5095]}; +uts46_map(43960) -> {'M', [5096]}; +uts46_map(43961) -> {'M', [5097]}; +uts46_map(43962) -> {'M', [5098]}; +uts46_map(43963) -> {'M', [5099]}; +uts46_map(43964) -> {'M', [5100]}; +uts46_map(43965) -> {'M', [5101]}; +uts46_map(43966) -> {'M', [5102]}; +uts46_map(43967) -> {'M', [5103]}; +uts46_map(44011) -> 'V'; +uts46_map(63744) -> {'M', [35912]}; +uts46_map(63745) -> {'M', [26356]}; +uts46_map(63746) -> {'M', [36554]}; +uts46_map(63747) -> {'M', [36040]}; +uts46_map(63748) -> {'M', [28369]}; +uts46_map(63749) -> {'M', [20018]}; +uts46_map(63750) -> {'M', [21477]}; +uts46_map(63753) -> {'M', [22865]}; +uts46_map(63754) -> {'M', [37329]}; +uts46_map(63755) -> {'M', [21895]}; +uts46_map(63756) -> {'M', [22856]}; +uts46_map(63757) -> {'M', [25078]}; +uts46_map(63758) -> {'M', [30313]}; +uts46_map(63759) -> {'M', [32645]}; +uts46_map(63760) -> {'M', [34367]}; +uts46_map(63761) -> {'M', [34746]}; +uts46_map(63762) -> {'M', [35064]}; +uts46_map(63763) -> {'M', [37007]}; +uts46_map(63764) -> {'M', [27138]}; +uts46_map(63765) -> {'M', [27931]}; +uts46_map(63766) -> {'M', [28889]}; +uts46_map(63767) -> {'M', [29662]}; +uts46_map(63768) -> {'M', [33853]}; +uts46_map(63769) -> {'M', [37226]}; +uts46_map(63770) -> {'M', [39409]}; +uts46_map(63771) -> {'M', [20098]}; +uts46_map(63772) -> {'M', [21365]}; +uts46_map(63773) -> {'M', [27396]}; +uts46_map(63774) -> {'M', [29211]}; +uts46_map(63775) -> {'M', [34349]}; +uts46_map(63776) -> {'M', [40478]}; +uts46_map(63777) -> {'M', [23888]}; +uts46_map(63778) -> {'M', [28651]}; +uts46_map(63779) -> {'M', [34253]}; +uts46_map(63780) -> {'M', [35172]}; +uts46_map(63781) -> {'M', [25289]}; +uts46_map(63782) -> {'M', [33240]}; +uts46_map(63783) -> {'M', [34847]}; +uts46_map(63784) -> {'M', [24266]}; +uts46_map(63785) -> {'M', [26391]}; +uts46_map(63786) -> {'M', [28010]}; +uts46_map(63787) -> {'M', [29436]}; +uts46_map(63788) -> {'M', [37070]}; +uts46_map(63789) -> {'M', [20358]}; +uts46_map(63790) -> {'M', [20919]}; +uts46_map(63791) -> {'M', [21214]}; +uts46_map(63792) -> {'M', [25796]}; +uts46_map(63793) -> {'M', [27347]}; +uts46_map(63794) -> {'M', [29200]}; +uts46_map(63795) -> {'M', [30439]}; +uts46_map(63796) -> {'M', [32769]}; +uts46_map(63797) -> {'M', [34310]}; +uts46_map(63798) -> {'M', [34396]}; +uts46_map(63799) -> {'M', [36335]}; +uts46_map(63800) -> {'M', [38706]}; +uts46_map(63801) -> {'M', [39791]}; +uts46_map(63802) -> {'M', [40442]}; +uts46_map(63803) -> {'M', [30860]}; +uts46_map(63804) -> {'M', [31103]}; +uts46_map(63805) -> {'M', [32160]}; +uts46_map(63806) -> {'M', [33737]}; +uts46_map(63807) -> {'M', [37636]}; +uts46_map(63808) -> {'M', [40575]}; +uts46_map(63809) -> {'M', [35542]}; +uts46_map(63810) -> {'M', [22751]}; +uts46_map(63811) -> {'M', [24324]}; +uts46_map(63812) -> {'M', [31840]}; +uts46_map(63813) -> {'M', [32894]}; +uts46_map(63814) -> {'M', [29282]}; +uts46_map(63815) -> {'M', [30922]}; +uts46_map(63816) -> {'M', [36034]}; +uts46_map(63817) -> {'M', [38647]}; +uts46_map(63818) -> {'M', [22744]}; +uts46_map(63819) -> {'M', [23650]}; +uts46_map(63820) -> {'M', [27155]}; +uts46_map(63821) -> {'M', [28122]}; +uts46_map(63822) -> {'M', [28431]}; +uts46_map(63823) -> {'M', [32047]}; +uts46_map(63824) -> {'M', [32311]}; +uts46_map(63825) -> {'M', [38475]}; +uts46_map(63826) -> {'M', [21202]}; +uts46_map(63827) -> {'M', [32907]}; +uts46_map(63828) -> {'M', [20956]}; +uts46_map(63829) -> {'M', [20940]}; +uts46_map(63830) -> {'M', [31260]}; +uts46_map(63831) -> {'M', [32190]}; +uts46_map(63832) -> {'M', [33777]}; +uts46_map(63833) -> {'M', [38517]}; +uts46_map(63834) -> {'M', [35712]}; +uts46_map(63835) -> {'M', [25295]}; +uts46_map(63836) -> {'M', [27138]}; +uts46_map(63837) -> {'M', [35582]}; +uts46_map(63838) -> {'M', [20025]}; +uts46_map(63839) -> {'M', [23527]}; +uts46_map(63840) -> {'M', [24594]}; +uts46_map(63841) -> {'M', [29575]}; +uts46_map(63842) -> {'M', [30064]}; +uts46_map(63843) -> {'M', [21271]}; +uts46_map(63844) -> {'M', [30971]}; +uts46_map(63845) -> {'M', [20415]}; +uts46_map(63846) -> {'M', [24489]}; +uts46_map(63847) -> {'M', [19981]}; +uts46_map(63848) -> {'M', [27852]}; +uts46_map(63849) -> {'M', [25976]}; +uts46_map(63850) -> {'M', [32034]}; +uts46_map(63851) -> {'M', [21443]}; +uts46_map(63852) -> {'M', [22622]}; +uts46_map(63853) -> {'M', [30465]}; +uts46_map(63854) -> {'M', [33865]}; +uts46_map(63855) -> {'M', [35498]}; +uts46_map(63856) -> {'M', [27578]}; +uts46_map(63857) -> {'M', [36784]}; +uts46_map(63858) -> {'M', [27784]}; +uts46_map(63859) -> {'M', [25342]}; +uts46_map(63860) -> {'M', [33509]}; +uts46_map(63861) -> {'M', [25504]}; +uts46_map(63862) -> {'M', [30053]}; +uts46_map(63863) -> {'M', [20142]}; +uts46_map(63864) -> {'M', [20841]}; +uts46_map(63865) -> {'M', [20937]}; +uts46_map(63866) -> {'M', [26753]}; +uts46_map(63867) -> {'M', [31975]}; +uts46_map(63868) -> {'M', [33391]}; +uts46_map(63869) -> {'M', [35538]}; +uts46_map(63870) -> {'M', [37327]}; +uts46_map(63871) -> {'M', [21237]}; +uts46_map(63872) -> {'M', [21570]}; +uts46_map(63873) -> {'M', [22899]}; +uts46_map(63874) -> {'M', [24300]}; +uts46_map(63875) -> {'M', [26053]}; +uts46_map(63876) -> {'M', [28670]}; +uts46_map(63877) -> {'M', [31018]}; +uts46_map(63878) -> {'M', [38317]}; +uts46_map(63879) -> {'M', [39530]}; +uts46_map(63880) -> {'M', [40599]}; +uts46_map(63881) -> {'M', [40654]}; +uts46_map(63882) -> {'M', [21147]}; +uts46_map(63883) -> {'M', [26310]}; +uts46_map(63884) -> {'M', [27511]}; +uts46_map(63885) -> {'M', [36706]}; +uts46_map(63886) -> {'M', [24180]}; +uts46_map(63887) -> {'M', [24976]}; +uts46_map(63888) -> {'M', [25088]}; +uts46_map(63889) -> {'M', [25754]}; +uts46_map(63890) -> {'M', [28451]}; +uts46_map(63891) -> {'M', [29001]}; +uts46_map(63892) -> {'M', [29833]}; +uts46_map(63893) -> {'M', [31178]}; +uts46_map(63894) -> {'M', [32244]}; +uts46_map(63895) -> {'M', [32879]}; +uts46_map(63896) -> {'M', [36646]}; +uts46_map(63897) -> {'M', [34030]}; +uts46_map(63898) -> {'M', [36899]}; +uts46_map(63899) -> {'M', [37706]}; +uts46_map(63900) -> {'M', [21015]}; +uts46_map(63901) -> {'M', [21155]}; +uts46_map(63902) -> {'M', [21693]}; +uts46_map(63903) -> {'M', [28872]}; +uts46_map(63904) -> {'M', [35010]}; +uts46_map(63905) -> {'M', [35498]}; +uts46_map(63906) -> {'M', [24265]}; +uts46_map(63907) -> {'M', [24565]}; +uts46_map(63908) -> {'M', [25467]}; +uts46_map(63909) -> {'M', [27566]}; +uts46_map(63910) -> {'M', [31806]}; +uts46_map(63911) -> {'M', [29557]}; +uts46_map(63912) -> {'M', [20196]}; +uts46_map(63913) -> {'M', [22265]}; +uts46_map(63914) -> {'M', [23527]}; +uts46_map(63915) -> {'M', [23994]}; +uts46_map(63916) -> {'M', [24604]}; +uts46_map(63917) -> {'M', [29618]}; +uts46_map(63918) -> {'M', [29801]}; +uts46_map(63919) -> {'M', [32666]}; +uts46_map(63920) -> {'M', [32838]}; +uts46_map(63921) -> {'M', [37428]}; +uts46_map(63922) -> {'M', [38646]}; +uts46_map(63923) -> {'M', [38728]}; +uts46_map(63924) -> {'M', [38936]}; +uts46_map(63925) -> {'M', [20363]}; +uts46_map(63926) -> {'M', [31150]}; +uts46_map(63927) -> {'M', [37300]}; +uts46_map(63928) -> {'M', [38584]}; +uts46_map(63929) -> {'M', [24801]}; +uts46_map(63930) -> {'M', [20102]}; +uts46_map(63931) -> {'M', [20698]}; +uts46_map(63932) -> {'M', [23534]}; +uts46_map(63933) -> {'M', [23615]}; +uts46_map(63934) -> {'M', [26009]}; +uts46_map(63935) -> {'M', [27138]}; +uts46_map(63936) -> {'M', [29134]}; +uts46_map(63937) -> {'M', [30274]}; +uts46_map(63938) -> {'M', [34044]}; +uts46_map(63939) -> {'M', [36988]}; +uts46_map(63940) -> {'M', [40845]}; +uts46_map(63941) -> {'M', [26248]}; +uts46_map(63942) -> {'M', [38446]}; +uts46_map(63943) -> {'M', [21129]}; +uts46_map(63944) -> {'M', [26491]}; +uts46_map(63945) -> {'M', [26611]}; +uts46_map(63946) -> {'M', [27969]}; +uts46_map(63947) -> {'M', [28316]}; +uts46_map(63948) -> {'M', [29705]}; +uts46_map(63949) -> {'M', [30041]}; +uts46_map(63950) -> {'M', [30827]}; +uts46_map(63951) -> {'M', [32016]}; +uts46_map(63952) -> {'M', [39006]}; +uts46_map(63953) -> {'M', [20845]}; +uts46_map(63954) -> {'M', [25134]}; +uts46_map(63955) -> {'M', [38520]}; +uts46_map(63956) -> {'M', [20523]}; +uts46_map(63957) -> {'M', [23833]}; +uts46_map(63958) -> {'M', [28138]}; +uts46_map(63959) -> {'M', [36650]}; +uts46_map(63960) -> {'M', [24459]}; +uts46_map(63961) -> {'M', [24900]}; +uts46_map(63962) -> {'M', [26647]}; +uts46_map(63963) -> {'M', [29575]}; +uts46_map(63964) -> {'M', [38534]}; +uts46_map(63965) -> {'M', [21033]}; +uts46_map(63966) -> {'M', [21519]}; +uts46_map(63967) -> {'M', [23653]}; +uts46_map(63968) -> {'M', [26131]}; +uts46_map(63969) -> {'M', [26446]}; +uts46_map(63970) -> {'M', [26792]}; +uts46_map(63971) -> {'M', [27877]}; +uts46_map(63972) -> {'M', [29702]}; +uts46_map(63973) -> {'M', [30178]}; +uts46_map(63974) -> {'M', [32633]}; +uts46_map(63975) -> {'M', [35023]}; +uts46_map(63976) -> {'M', [35041]}; +uts46_map(63977) -> {'M', [37324]}; +uts46_map(63978) -> {'M', [38626]}; +uts46_map(63979) -> {'M', [21311]}; +uts46_map(63980) -> {'M', [28346]}; +uts46_map(63981) -> {'M', [21533]}; +uts46_map(63982) -> {'M', [29136]}; +uts46_map(63983) -> {'M', [29848]}; +uts46_map(63984) -> {'M', [34298]}; +uts46_map(63985) -> {'M', [38563]}; +uts46_map(63986) -> {'M', [40023]}; +uts46_map(63987) -> {'M', [40607]}; +uts46_map(63988) -> {'M', [26519]}; +uts46_map(63989) -> {'M', [28107]}; +uts46_map(63990) -> {'M', [33256]}; +uts46_map(63991) -> {'M', [31435]}; +uts46_map(63992) -> {'M', [31520]}; +uts46_map(63993) -> {'M', [31890]}; +uts46_map(63994) -> {'M', [29376]}; +uts46_map(63995) -> {'M', [28825]}; +uts46_map(63996) -> {'M', [35672]}; +uts46_map(63997) -> {'M', [20160]}; +uts46_map(63998) -> {'M', [33590]}; +uts46_map(63999) -> {'M', [21050]}; +uts46_map(64000) -> {'M', [20999]}; +uts46_map(64001) -> {'M', [24230]}; +uts46_map(64002) -> {'M', [25299]}; +uts46_map(64003) -> {'M', [31958]}; +uts46_map(64004) -> {'M', [23429]}; +uts46_map(64005) -> {'M', [27934]}; +uts46_map(64006) -> {'M', [26292]}; +uts46_map(64007) -> {'M', [36667]}; +uts46_map(64008) -> {'M', [34892]}; +uts46_map(64009) -> {'M', [38477]}; +uts46_map(64010) -> {'M', [35211]}; +uts46_map(64011) -> {'M', [24275]}; +uts46_map(64012) -> {'M', [20800]}; +uts46_map(64013) -> {'M', [21952]}; +uts46_map(64016) -> {'M', [22618]}; +uts46_map(64017) -> 'V'; +uts46_map(64018) -> {'M', [26228]}; +uts46_map(64021) -> {'M', [20958]}; +uts46_map(64022) -> {'M', [29482]}; +uts46_map(64023) -> {'M', [30410]}; +uts46_map(64024) -> {'M', [31036]}; +uts46_map(64025) -> {'M', [31070]}; +uts46_map(64026) -> {'M', [31077]}; +uts46_map(64027) -> {'M', [31119]}; +uts46_map(64028) -> {'M', [38742]}; +uts46_map(64029) -> {'M', [31934]}; +uts46_map(64030) -> {'M', [32701]}; +uts46_map(64031) -> 'V'; +uts46_map(64032) -> {'M', [34322]}; +uts46_map(64033) -> 'V'; +uts46_map(64034) -> {'M', [35576]}; +uts46_map(64037) -> {'M', [36920]}; +uts46_map(64038) -> {'M', [37117]}; +uts46_map(64042) -> {'M', [39151]}; +uts46_map(64043) -> {'M', [39164]}; +uts46_map(64044) -> {'M', [39208]}; +uts46_map(64045) -> {'M', [40372]}; +uts46_map(64046) -> {'M', [37086]}; +uts46_map(64047) -> {'M', [38583]}; +uts46_map(64048) -> {'M', [20398]}; +uts46_map(64049) -> {'M', [20711]}; +uts46_map(64050) -> {'M', [20813]}; +uts46_map(64051) -> {'M', [21193]}; +uts46_map(64052) -> {'M', [21220]}; +uts46_map(64053) -> {'M', [21329]}; +uts46_map(64054) -> {'M', [21917]}; +uts46_map(64055) -> {'M', [22022]}; +uts46_map(64056) -> {'M', [22120]}; +uts46_map(64057) -> {'M', [22592]}; +uts46_map(64058) -> {'M', [22696]}; +uts46_map(64059) -> {'M', [23652]}; +uts46_map(64060) -> {'M', [23662]}; +uts46_map(64061) -> {'M', [24724]}; +uts46_map(64062) -> {'M', [24936]}; +uts46_map(64063) -> {'M', [24974]}; +uts46_map(64064) -> {'M', [25074]}; +uts46_map(64065) -> {'M', [25935]}; +uts46_map(64066) -> {'M', [26082]}; +uts46_map(64067) -> {'M', [26257]}; +uts46_map(64068) -> {'M', [26757]}; +uts46_map(64069) -> {'M', [28023]}; +uts46_map(64070) -> {'M', [28186]}; +uts46_map(64071) -> {'M', [28450]}; +uts46_map(64072) -> {'M', [29038]}; +uts46_map(64073) -> {'M', [29227]}; +uts46_map(64074) -> {'M', [29730]}; +uts46_map(64075) -> {'M', [30865]}; +uts46_map(64076) -> {'M', [31038]}; +uts46_map(64077) -> {'M', [31049]}; +uts46_map(64078) -> {'M', [31048]}; +uts46_map(64079) -> {'M', [31056]}; +uts46_map(64080) -> {'M', [31062]}; +uts46_map(64081) -> {'M', [31069]}; +uts46_map(64082) -> {'M', [31117]}; +uts46_map(64083) -> {'M', [31118]}; +uts46_map(64084) -> {'M', [31296]}; +uts46_map(64085) -> {'M', [31361]}; +uts46_map(64086) -> {'M', [31680]}; +uts46_map(64087) -> {'M', [32244]}; +uts46_map(64088) -> {'M', [32265]}; +uts46_map(64089) -> {'M', [32321]}; +uts46_map(64090) -> {'M', [32626]}; +uts46_map(64091) -> {'M', [32773]}; +uts46_map(64092) -> {'M', [33261]}; +uts46_map(64095) -> {'M', [33879]}; +uts46_map(64096) -> {'M', [35088]}; +uts46_map(64097) -> {'M', [35222]}; +uts46_map(64098) -> {'M', [35585]}; +uts46_map(64099) -> {'M', [35641]}; +uts46_map(64100) -> {'M', [36051]}; +uts46_map(64101) -> {'M', [36104]}; +uts46_map(64102) -> {'M', [36790]}; +uts46_map(64103) -> {'M', [36920]}; +uts46_map(64104) -> {'M', [38627]}; +uts46_map(64105) -> {'M', [38911]}; +uts46_map(64106) -> {'M', [38971]}; +uts46_map(64107) -> {'M', [24693]}; +uts46_map(64108) -> {'M', [148206]}; +uts46_map(64109) -> {'M', [33304]}; +uts46_map(64112) -> {'M', [20006]}; +uts46_map(64113) -> {'M', [20917]}; +uts46_map(64114) -> {'M', [20840]}; +uts46_map(64115) -> {'M', [20352]}; +uts46_map(64116) -> {'M', [20805]}; +uts46_map(64117) -> {'M', [20864]}; +uts46_map(64118) -> {'M', [21191]}; +uts46_map(64119) -> {'M', [21242]}; +uts46_map(64120) -> {'M', [21917]}; +uts46_map(64121) -> {'M', [21845]}; +uts46_map(64122) -> {'M', [21913]}; +uts46_map(64123) -> {'M', [21986]}; +uts46_map(64124) -> {'M', [22618]}; +uts46_map(64125) -> {'M', [22707]}; +uts46_map(64126) -> {'M', [22852]}; +uts46_map(64127) -> {'M', [22868]}; +uts46_map(64128) -> {'M', [23138]}; +uts46_map(64129) -> {'M', [23336]}; +uts46_map(64130) -> {'M', [24274]}; +uts46_map(64131) -> {'M', [24281]}; +uts46_map(64132) -> {'M', [24425]}; +uts46_map(64133) -> {'M', [24493]}; +uts46_map(64134) -> {'M', [24792]}; +uts46_map(64135) -> {'M', [24910]}; +uts46_map(64136) -> {'M', [24840]}; +uts46_map(64137) -> {'M', [24974]}; +uts46_map(64138) -> {'M', [24928]}; +uts46_map(64139) -> {'M', [25074]}; +uts46_map(64140) -> {'M', [25140]}; +uts46_map(64141) -> {'M', [25540]}; +uts46_map(64142) -> {'M', [25628]}; +uts46_map(64143) -> {'M', [25682]}; +uts46_map(64144) -> {'M', [25942]}; +uts46_map(64145) -> {'M', [26228]}; +uts46_map(64146) -> {'M', [26391]}; +uts46_map(64147) -> {'M', [26395]}; +uts46_map(64148) -> {'M', [26454]}; +uts46_map(64149) -> {'M', [27513]}; +uts46_map(64150) -> {'M', [27578]}; +uts46_map(64151) -> {'M', [27969]}; +uts46_map(64152) -> {'M', [28379]}; +uts46_map(64153) -> {'M', [28363]}; +uts46_map(64154) -> {'M', [28450]}; +uts46_map(64155) -> {'M', [28702]}; +uts46_map(64156) -> {'M', [29038]}; +uts46_map(64157) -> {'M', [30631]}; +uts46_map(64158) -> {'M', [29237]}; +uts46_map(64159) -> {'M', [29359]}; +uts46_map(64160) -> {'M', [29482]}; +uts46_map(64161) -> {'M', [29809]}; +uts46_map(64162) -> {'M', [29958]}; +uts46_map(64163) -> {'M', [30011]}; +uts46_map(64164) -> {'M', [30237]}; +uts46_map(64165) -> {'M', [30239]}; +uts46_map(64166) -> {'M', [30410]}; +uts46_map(64167) -> {'M', [30427]}; +uts46_map(64168) -> {'M', [30452]}; +uts46_map(64169) -> {'M', [30538]}; +uts46_map(64170) -> {'M', [30528]}; +uts46_map(64171) -> {'M', [30924]}; +uts46_map(64172) -> {'M', [31409]}; +uts46_map(64173) -> {'M', [31680]}; +uts46_map(64174) -> {'M', [31867]}; +uts46_map(64175) -> {'M', [32091]}; +uts46_map(64176) -> {'M', [32244]}; +uts46_map(64177) -> {'M', [32574]}; +uts46_map(64178) -> {'M', [32773]}; +uts46_map(64179) -> {'M', [33618]}; +uts46_map(64180) -> {'M', [33775]}; +uts46_map(64181) -> {'M', [34681]}; +uts46_map(64182) -> {'M', [35137]}; +uts46_map(64183) -> {'M', [35206]}; +uts46_map(64184) -> {'M', [35222]}; +uts46_map(64185) -> {'M', [35519]}; +uts46_map(64186) -> {'M', [35576]}; +uts46_map(64187) -> {'M', [35531]}; +uts46_map(64188) -> {'M', [35585]}; +uts46_map(64189) -> {'M', [35582]}; +uts46_map(64190) -> {'M', [35565]}; +uts46_map(64191) -> {'M', [35641]}; +uts46_map(64192) -> {'M', [35722]}; +uts46_map(64193) -> {'M', [36104]}; +uts46_map(64194) -> {'M', [36664]}; +uts46_map(64195) -> {'M', [36978]}; +uts46_map(64196) -> {'M', [37273]}; +uts46_map(64197) -> {'M', [37494]}; +uts46_map(64198) -> {'M', [38524]}; +uts46_map(64199) -> {'M', [38627]}; +uts46_map(64200) -> {'M', [38742]}; +uts46_map(64201) -> {'M', [38875]}; +uts46_map(64202) -> {'M', [38911]}; +uts46_map(64203) -> {'M', [38923]}; +uts46_map(64204) -> {'M', [38971]}; +uts46_map(64205) -> {'M', [39698]}; +uts46_map(64206) -> {'M', [40860]}; +uts46_map(64207) -> {'M', [141386]}; +uts46_map(64208) -> {'M', [141380]}; +uts46_map(64209) -> {'M', [144341]}; +uts46_map(64210) -> {'M', [15261]}; +uts46_map(64211) -> {'M', [16408]}; +uts46_map(64212) -> {'M', [16441]}; +uts46_map(64213) -> {'M', [152137]}; +uts46_map(64214) -> {'M', [154832]}; +uts46_map(64215) -> {'M', [163539]}; +uts46_map(64216) -> {'M', [40771]}; +uts46_map(64217) -> {'M', [40846]}; +uts46_map(64256) -> {'M', [102,102]}; +uts46_map(64257) -> {'M', [102,105]}; +uts46_map(64258) -> {'M', [102,108]}; +uts46_map(64259) -> {'M', [102,102,105]}; +uts46_map(64260) -> {'M', [102,102,108]}; +uts46_map(64275) -> {'M', [1396,1398]}; +uts46_map(64276) -> {'M', [1396,1381]}; +uts46_map(64277) -> {'M', [1396,1387]}; +uts46_map(64278) -> {'M', [1406,1398]}; +uts46_map(64279) -> {'M', [1396,1389]}; +uts46_map(64285) -> {'M', [1497,1460]}; +uts46_map(64286) -> 'V'; +uts46_map(64287) -> {'M', [1522,1463]}; +uts46_map(64288) -> {'M', [1506]}; +uts46_map(64289) -> {'M', [1488]}; +uts46_map(64290) -> {'M', [1491]}; +uts46_map(64291) -> {'M', [1492]}; +uts46_map(64292) -> {'M', [1499]}; +uts46_map(64293) -> {'M', [1500]}; +uts46_map(64294) -> {'M', [1501]}; +uts46_map(64295) -> {'M', [1512]}; +uts46_map(64296) -> {'M', [1514]}; +uts46_map(64297) -> {'3', [43]}; +uts46_map(64298) -> {'M', [1513,1473]}; +uts46_map(64299) -> {'M', [1513,1474]}; +uts46_map(64300) -> {'M', [1513,1468,1473]}; +uts46_map(64301) -> {'M', [1513,1468,1474]}; +uts46_map(64302) -> {'M', [1488,1463]}; +uts46_map(64303) -> {'M', [1488,1464]}; +uts46_map(64304) -> {'M', [1488,1468]}; +uts46_map(64305) -> {'M', [1489,1468]}; +uts46_map(64306) -> {'M', [1490,1468]}; +uts46_map(64307) -> {'M', [1491,1468]}; +uts46_map(64308) -> {'M', [1492,1468]}; +uts46_map(64309) -> {'M', [1493,1468]}; +uts46_map(64310) -> {'M', [1494,1468]}; +uts46_map(64311) -> 'X'; +uts46_map(64312) -> {'M', [1496,1468]}; +uts46_map(64313) -> {'M', [1497,1468]}; +uts46_map(64314) -> {'M', [1498,1468]}; +uts46_map(64315) -> {'M', [1499,1468]}; +uts46_map(64316) -> {'M', [1500,1468]}; +uts46_map(64317) -> 'X'; +uts46_map(64318) -> {'M', [1502,1468]}; +uts46_map(64319) -> 'X'; +uts46_map(64320) -> {'M', [1504,1468]}; +uts46_map(64321) -> {'M', [1505,1468]}; +uts46_map(64322) -> 'X'; +uts46_map(64323) -> {'M', [1507,1468]}; +uts46_map(64324) -> {'M', [1508,1468]}; +uts46_map(64325) -> 'X'; +uts46_map(64326) -> {'M', [1510,1468]}; +uts46_map(64327) -> {'M', [1511,1468]}; +uts46_map(64328) -> {'M', [1512,1468]}; +uts46_map(64329) -> {'M', [1513,1468]}; +uts46_map(64330) -> {'M', [1514,1468]}; +uts46_map(64331) -> {'M', [1493,1465]}; +uts46_map(64332) -> {'M', [1489,1471]}; +uts46_map(64333) -> {'M', [1499,1471]}; +uts46_map(64334) -> {'M', [1508,1471]}; +uts46_map(64335) -> {'M', [1488,1500]}; +uts46_map(64477) -> {'M', [1735,1652]}; +uts46_map(64512) -> {'M', [1574,1580]}; +uts46_map(64513) -> {'M', [1574,1581]}; +uts46_map(64514) -> {'M', [1574,1605]}; +uts46_map(64515) -> {'M', [1574,1609]}; +uts46_map(64516) -> {'M', [1574,1610]}; +uts46_map(64517) -> {'M', [1576,1580]}; +uts46_map(64518) -> {'M', [1576,1581]}; +uts46_map(64519) -> {'M', [1576,1582]}; +uts46_map(64520) -> {'M', [1576,1605]}; +uts46_map(64521) -> {'M', [1576,1609]}; +uts46_map(64522) -> {'M', [1576,1610]}; +uts46_map(64523) -> {'M', [1578,1580]}; +uts46_map(64524) -> {'M', [1578,1581]}; +uts46_map(64525) -> {'M', [1578,1582]}; +uts46_map(64526) -> {'M', [1578,1605]}; +uts46_map(64527) -> {'M', [1578,1609]}; +uts46_map(64528) -> {'M', [1578,1610]}; +uts46_map(64529) -> {'M', [1579,1580]}; +uts46_map(64530) -> {'M', [1579,1605]}; +uts46_map(64531) -> {'M', [1579,1609]}; +uts46_map(64532) -> {'M', [1579,1610]}; +uts46_map(64533) -> {'M', [1580,1581]}; +uts46_map(64534) -> {'M', [1580,1605]}; +uts46_map(64535) -> {'M', [1581,1580]}; +uts46_map(64536) -> {'M', [1581,1605]}; +uts46_map(64537) -> {'M', [1582,1580]}; +uts46_map(64538) -> {'M', [1582,1581]}; +uts46_map(64539) -> {'M', [1582,1605]}; +uts46_map(64540) -> {'M', [1587,1580]}; +uts46_map(64541) -> {'M', [1587,1581]}; +uts46_map(64542) -> {'M', [1587,1582]}; +uts46_map(64543) -> {'M', [1587,1605]}; +uts46_map(64544) -> {'M', [1589,1581]}; +uts46_map(64545) -> {'M', [1589,1605]}; +uts46_map(64546) -> {'M', [1590,1580]}; +uts46_map(64547) -> {'M', [1590,1581]}; +uts46_map(64548) -> {'M', [1590,1582]}; +uts46_map(64549) -> {'M', [1590,1605]}; +uts46_map(64550) -> {'M', [1591,1581]}; +uts46_map(64551) -> {'M', [1591,1605]}; +uts46_map(64552) -> {'M', [1592,1605]}; +uts46_map(64553) -> {'M', [1593,1580]}; +uts46_map(64554) -> {'M', [1593,1605]}; +uts46_map(64555) -> {'M', [1594,1580]}; +uts46_map(64556) -> {'M', [1594,1605]}; +uts46_map(64557) -> {'M', [1601,1580]}; +uts46_map(64558) -> {'M', [1601,1581]}; +uts46_map(64559) -> {'M', [1601,1582]}; +uts46_map(64560) -> {'M', [1601,1605]}; +uts46_map(64561) -> {'M', [1601,1609]}; +uts46_map(64562) -> {'M', [1601,1610]}; +uts46_map(64563) -> {'M', [1602,1581]}; +uts46_map(64564) -> {'M', [1602,1605]}; +uts46_map(64565) -> {'M', [1602,1609]}; +uts46_map(64566) -> {'M', [1602,1610]}; +uts46_map(64567) -> {'M', [1603,1575]}; +uts46_map(64568) -> {'M', [1603,1580]}; +uts46_map(64569) -> {'M', [1603,1581]}; +uts46_map(64570) -> {'M', [1603,1582]}; +uts46_map(64571) -> {'M', [1603,1604]}; +uts46_map(64572) -> {'M', [1603,1605]}; +uts46_map(64573) -> {'M', [1603,1609]}; +uts46_map(64574) -> {'M', [1603,1610]}; +uts46_map(64575) -> {'M', [1604,1580]}; +uts46_map(64576) -> {'M', [1604,1581]}; +uts46_map(64577) -> {'M', [1604,1582]}; +uts46_map(64578) -> {'M', [1604,1605]}; +uts46_map(64579) -> {'M', [1604,1609]}; +uts46_map(64580) -> {'M', [1604,1610]}; +uts46_map(64581) -> {'M', [1605,1580]}; +uts46_map(64582) -> {'M', [1605,1581]}; +uts46_map(64583) -> {'M', [1605,1582]}; +uts46_map(64584) -> {'M', [1605,1605]}; +uts46_map(64585) -> {'M', [1605,1609]}; +uts46_map(64586) -> {'M', [1605,1610]}; +uts46_map(64587) -> {'M', [1606,1580]}; +uts46_map(64588) -> {'M', [1606,1581]}; +uts46_map(64589) -> {'M', [1606,1582]}; +uts46_map(64590) -> {'M', [1606,1605]}; +uts46_map(64591) -> {'M', [1606,1609]}; +uts46_map(64592) -> {'M', [1606,1610]}; +uts46_map(64593) -> {'M', [1607,1580]}; +uts46_map(64594) -> {'M', [1607,1605]}; +uts46_map(64595) -> {'M', [1607,1609]}; +uts46_map(64596) -> {'M', [1607,1610]}; +uts46_map(64597) -> {'M', [1610,1580]}; +uts46_map(64598) -> {'M', [1610,1581]}; +uts46_map(64599) -> {'M', [1610,1582]}; +uts46_map(64600) -> {'M', [1610,1605]}; +uts46_map(64601) -> {'M', [1610,1609]}; +uts46_map(64602) -> {'M', [1610,1610]}; +uts46_map(64603) -> {'M', [1584,1648]}; +uts46_map(64604) -> {'M', [1585,1648]}; +uts46_map(64605) -> {'M', [1609,1648]}; +uts46_map(64606) -> {'3', [32,1612,1617]}; +uts46_map(64607) -> {'3', [32,1613,1617]}; +uts46_map(64608) -> {'3', [32,1614,1617]}; +uts46_map(64609) -> {'3', [32,1615,1617]}; +uts46_map(64610) -> {'3', [32,1616,1617]}; +uts46_map(64611) -> {'3', [32,1617,1648]}; +uts46_map(64612) -> {'M', [1574,1585]}; +uts46_map(64613) -> {'M', [1574,1586]}; +uts46_map(64614) -> {'M', [1574,1605]}; +uts46_map(64615) -> {'M', [1574,1606]}; +uts46_map(64616) -> {'M', [1574,1609]}; +uts46_map(64617) -> {'M', [1574,1610]}; +uts46_map(64618) -> {'M', [1576,1585]}; +uts46_map(64619) -> {'M', [1576,1586]}; +uts46_map(64620) -> {'M', [1576,1605]}; +uts46_map(64621) -> {'M', [1576,1606]}; +uts46_map(64622) -> {'M', [1576,1609]}; +uts46_map(64623) -> {'M', [1576,1610]}; +uts46_map(64624) -> {'M', [1578,1585]}; +uts46_map(64625) -> {'M', [1578,1586]}; +uts46_map(64626) -> {'M', [1578,1605]}; +uts46_map(64627) -> {'M', [1578,1606]}; +uts46_map(64628) -> {'M', [1578,1609]}; +uts46_map(64629) -> {'M', [1578,1610]}; +uts46_map(64630) -> {'M', [1579,1585]}; +uts46_map(64631) -> {'M', [1579,1586]}; +uts46_map(64632) -> {'M', [1579,1605]}; +uts46_map(64633) -> {'M', [1579,1606]}; +uts46_map(64634) -> {'M', [1579,1609]}; +uts46_map(64635) -> {'M', [1579,1610]}; +uts46_map(64636) -> {'M', [1601,1609]}; +uts46_map(64637) -> {'M', [1601,1610]}; +uts46_map(64638) -> {'M', [1602,1609]}; +uts46_map(64639) -> {'M', [1602,1610]}; +uts46_map(64640) -> {'M', [1603,1575]}; +uts46_map(64641) -> {'M', [1603,1604]}; +uts46_map(64642) -> {'M', [1603,1605]}; +uts46_map(64643) -> {'M', [1603,1609]}; +uts46_map(64644) -> {'M', [1603,1610]}; +uts46_map(64645) -> {'M', [1604,1605]}; +uts46_map(64646) -> {'M', [1604,1609]}; +uts46_map(64647) -> {'M', [1604,1610]}; +uts46_map(64648) -> {'M', [1605,1575]}; +uts46_map(64649) -> {'M', [1605,1605]}; +uts46_map(64650) -> {'M', [1606,1585]}; +uts46_map(64651) -> {'M', [1606,1586]}; +uts46_map(64652) -> {'M', [1606,1605]}; +uts46_map(64653) -> {'M', [1606,1606]}; +uts46_map(64654) -> {'M', [1606,1609]}; +uts46_map(64655) -> {'M', [1606,1610]}; +uts46_map(64656) -> {'M', [1609,1648]}; +uts46_map(64657) -> {'M', [1610,1585]}; +uts46_map(64658) -> {'M', [1610,1586]}; +uts46_map(64659) -> {'M', [1610,1605]}; +uts46_map(64660) -> {'M', [1610,1606]}; +uts46_map(64661) -> {'M', [1610,1609]}; +uts46_map(64662) -> {'M', [1610,1610]}; +uts46_map(64663) -> {'M', [1574,1580]}; +uts46_map(64664) -> {'M', [1574,1581]}; +uts46_map(64665) -> {'M', [1574,1582]}; +uts46_map(64666) -> {'M', [1574,1605]}; +uts46_map(64667) -> {'M', [1574,1607]}; +uts46_map(64668) -> {'M', [1576,1580]}; +uts46_map(64669) -> {'M', [1576,1581]}; +uts46_map(64670) -> {'M', [1576,1582]}; +uts46_map(64671) -> {'M', [1576,1605]}; +uts46_map(64672) -> {'M', [1576,1607]}; +uts46_map(64673) -> {'M', [1578,1580]}; +uts46_map(64674) -> {'M', [1578,1581]}; +uts46_map(64675) -> {'M', [1578,1582]}; +uts46_map(64676) -> {'M', [1578,1605]}; +uts46_map(64677) -> {'M', [1578,1607]}; +uts46_map(64678) -> {'M', [1579,1605]}; +uts46_map(64679) -> {'M', [1580,1581]}; +uts46_map(64680) -> {'M', [1580,1605]}; +uts46_map(64681) -> {'M', [1581,1580]}; +uts46_map(64682) -> {'M', [1581,1605]}; +uts46_map(64683) -> {'M', [1582,1580]}; +uts46_map(64684) -> {'M', [1582,1605]}; +uts46_map(64685) -> {'M', [1587,1580]}; +uts46_map(64686) -> {'M', [1587,1581]}; +uts46_map(64687) -> {'M', [1587,1582]}; +uts46_map(64688) -> {'M', [1587,1605]}; +uts46_map(64689) -> {'M', [1589,1581]}; +uts46_map(64690) -> {'M', [1589,1582]}; +uts46_map(64691) -> {'M', [1589,1605]}; +uts46_map(64692) -> {'M', [1590,1580]}; +uts46_map(64693) -> {'M', [1590,1581]}; +uts46_map(64694) -> {'M', [1590,1582]}; +uts46_map(64695) -> {'M', [1590,1605]}; +uts46_map(64696) -> {'M', [1591,1581]}; +uts46_map(64697) -> {'M', [1592,1605]}; +uts46_map(64698) -> {'M', [1593,1580]}; +uts46_map(64699) -> {'M', [1593,1605]}; +uts46_map(64700) -> {'M', [1594,1580]}; +uts46_map(64701) -> {'M', [1594,1605]}; +uts46_map(64702) -> {'M', [1601,1580]}; +uts46_map(64703) -> {'M', [1601,1581]}; +uts46_map(64704) -> {'M', [1601,1582]}; +uts46_map(64705) -> {'M', [1601,1605]}; +uts46_map(64706) -> {'M', [1602,1581]}; +uts46_map(64707) -> {'M', [1602,1605]}; +uts46_map(64708) -> {'M', [1603,1580]}; +uts46_map(64709) -> {'M', [1603,1581]}; +uts46_map(64710) -> {'M', [1603,1582]}; +uts46_map(64711) -> {'M', [1603,1604]}; +uts46_map(64712) -> {'M', [1603,1605]}; +uts46_map(64713) -> {'M', [1604,1580]}; +uts46_map(64714) -> {'M', [1604,1581]}; +uts46_map(64715) -> {'M', [1604,1582]}; +uts46_map(64716) -> {'M', [1604,1605]}; +uts46_map(64717) -> {'M', [1604,1607]}; +uts46_map(64718) -> {'M', [1605,1580]}; +uts46_map(64719) -> {'M', [1605,1581]}; +uts46_map(64720) -> {'M', [1605,1582]}; +uts46_map(64721) -> {'M', [1605,1605]}; +uts46_map(64722) -> {'M', [1606,1580]}; +uts46_map(64723) -> {'M', [1606,1581]}; +uts46_map(64724) -> {'M', [1606,1582]}; +uts46_map(64725) -> {'M', [1606,1605]}; +uts46_map(64726) -> {'M', [1606,1607]}; +uts46_map(64727) -> {'M', [1607,1580]}; +uts46_map(64728) -> {'M', [1607,1605]}; +uts46_map(64729) -> {'M', [1607,1648]}; +uts46_map(64730) -> {'M', [1610,1580]}; +uts46_map(64731) -> {'M', [1610,1581]}; +uts46_map(64732) -> {'M', [1610,1582]}; +uts46_map(64733) -> {'M', [1610,1605]}; +uts46_map(64734) -> {'M', [1610,1607]}; +uts46_map(64735) -> {'M', [1574,1605]}; +uts46_map(64736) -> {'M', [1574,1607]}; +uts46_map(64737) -> {'M', [1576,1605]}; +uts46_map(64738) -> {'M', [1576,1607]}; +uts46_map(64739) -> {'M', [1578,1605]}; +uts46_map(64740) -> {'M', [1578,1607]}; +uts46_map(64741) -> {'M', [1579,1605]}; +uts46_map(64742) -> {'M', [1579,1607]}; +uts46_map(64743) -> {'M', [1587,1605]}; +uts46_map(64744) -> {'M', [1587,1607]}; +uts46_map(64745) -> {'M', [1588,1605]}; +uts46_map(64746) -> {'M', [1588,1607]}; +uts46_map(64747) -> {'M', [1603,1604]}; +uts46_map(64748) -> {'M', [1603,1605]}; +uts46_map(64749) -> {'M', [1604,1605]}; +uts46_map(64750) -> {'M', [1606,1605]}; +uts46_map(64751) -> {'M', [1606,1607]}; +uts46_map(64752) -> {'M', [1610,1605]}; +uts46_map(64753) -> {'M', [1610,1607]}; +uts46_map(64754) -> {'M', [1600,1614,1617]}; +uts46_map(64755) -> {'M', [1600,1615,1617]}; +uts46_map(64756) -> {'M', [1600,1616,1617]}; +uts46_map(64757) -> {'M', [1591,1609]}; +uts46_map(64758) -> {'M', [1591,1610]}; +uts46_map(64759) -> {'M', [1593,1609]}; +uts46_map(64760) -> {'M', [1593,1610]}; +uts46_map(64761) -> {'M', [1594,1609]}; +uts46_map(64762) -> {'M', [1594,1610]}; +uts46_map(64763) -> {'M', [1587,1609]}; +uts46_map(64764) -> {'M', [1587,1610]}; +uts46_map(64765) -> {'M', [1588,1609]}; +uts46_map(64766) -> {'M', [1588,1610]}; +uts46_map(64767) -> {'M', [1581,1609]}; +uts46_map(64768) -> {'M', [1581,1610]}; +uts46_map(64769) -> {'M', [1580,1609]}; +uts46_map(64770) -> {'M', [1580,1610]}; +uts46_map(64771) -> {'M', [1582,1609]}; +uts46_map(64772) -> {'M', [1582,1610]}; +uts46_map(64773) -> {'M', [1589,1609]}; +uts46_map(64774) -> {'M', [1589,1610]}; +uts46_map(64775) -> {'M', [1590,1609]}; +uts46_map(64776) -> {'M', [1590,1610]}; +uts46_map(64777) -> {'M', [1588,1580]}; +uts46_map(64778) -> {'M', [1588,1581]}; +uts46_map(64779) -> {'M', [1588,1582]}; +uts46_map(64780) -> {'M', [1588,1605]}; +uts46_map(64781) -> {'M', [1588,1585]}; +uts46_map(64782) -> {'M', [1587,1585]}; +uts46_map(64783) -> {'M', [1589,1585]}; +uts46_map(64784) -> {'M', [1590,1585]}; +uts46_map(64785) -> {'M', [1591,1609]}; +uts46_map(64786) -> {'M', [1591,1610]}; +uts46_map(64787) -> {'M', [1593,1609]}; +uts46_map(64788) -> {'M', [1593,1610]}; +uts46_map(64789) -> {'M', [1594,1609]}; +uts46_map(64790) -> {'M', [1594,1610]}; +uts46_map(64791) -> {'M', [1587,1609]}; +uts46_map(64792) -> {'M', [1587,1610]}; +uts46_map(64793) -> {'M', [1588,1609]}; +uts46_map(64794) -> {'M', [1588,1610]}; +uts46_map(64795) -> {'M', [1581,1609]}; +uts46_map(64796) -> {'M', [1581,1610]}; +uts46_map(64797) -> {'M', [1580,1609]}; +uts46_map(64798) -> {'M', [1580,1610]}; +uts46_map(64799) -> {'M', [1582,1609]}; +uts46_map(64800) -> {'M', [1582,1610]}; +uts46_map(64801) -> {'M', [1589,1609]}; +uts46_map(64802) -> {'M', [1589,1610]}; +uts46_map(64803) -> {'M', [1590,1609]}; +uts46_map(64804) -> {'M', [1590,1610]}; +uts46_map(64805) -> {'M', [1588,1580]}; +uts46_map(64806) -> {'M', [1588,1581]}; +uts46_map(64807) -> {'M', [1588,1582]}; +uts46_map(64808) -> {'M', [1588,1605]}; +uts46_map(64809) -> {'M', [1588,1585]}; +uts46_map(64810) -> {'M', [1587,1585]}; +uts46_map(64811) -> {'M', [1589,1585]}; +uts46_map(64812) -> {'M', [1590,1585]}; +uts46_map(64813) -> {'M', [1588,1580]}; +uts46_map(64814) -> {'M', [1588,1581]}; +uts46_map(64815) -> {'M', [1588,1582]}; +uts46_map(64816) -> {'M', [1588,1605]}; +uts46_map(64817) -> {'M', [1587,1607]}; +uts46_map(64818) -> {'M', [1588,1607]}; +uts46_map(64819) -> {'M', [1591,1605]}; +uts46_map(64820) -> {'M', [1587,1580]}; +uts46_map(64821) -> {'M', [1587,1581]}; +uts46_map(64822) -> {'M', [1587,1582]}; +uts46_map(64823) -> {'M', [1588,1580]}; +uts46_map(64824) -> {'M', [1588,1581]}; +uts46_map(64825) -> {'M', [1588,1582]}; +uts46_map(64826) -> {'M', [1591,1605]}; +uts46_map(64827) -> {'M', [1592,1605]}; +uts46_map(64848) -> {'M', [1578,1580,1605]}; +uts46_map(64851) -> {'M', [1578,1581,1605]}; +uts46_map(64852) -> {'M', [1578,1582,1605]}; +uts46_map(64853) -> {'M', [1578,1605,1580]}; +uts46_map(64854) -> {'M', [1578,1605,1581]}; +uts46_map(64855) -> {'M', [1578,1605,1582]}; +uts46_map(64858) -> {'M', [1581,1605,1610]}; +uts46_map(64859) -> {'M', [1581,1605,1609]}; +uts46_map(64860) -> {'M', [1587,1581,1580]}; +uts46_map(64861) -> {'M', [1587,1580,1581]}; +uts46_map(64862) -> {'M', [1587,1580,1609]}; +uts46_map(64865) -> {'M', [1587,1605,1580]}; +uts46_map(64870) -> {'M', [1589,1605,1605]}; +uts46_map(64873) -> {'M', [1588,1580,1610]}; +uts46_map(64878) -> {'M', [1590,1581,1609]}; +uts46_map(64883) -> {'M', [1591,1605,1605]}; +uts46_map(64884) -> {'M', [1591,1605,1610]}; +uts46_map(64885) -> {'M', [1593,1580,1605]}; +uts46_map(64888) -> {'M', [1593,1605,1609]}; +uts46_map(64889) -> {'M', [1594,1605,1605]}; +uts46_map(64890) -> {'M', [1594,1605,1610]}; +uts46_map(64891) -> {'M', [1594,1605,1609]}; +uts46_map(64894) -> {'M', [1602,1605,1581]}; +uts46_map(64895) -> {'M', [1602,1605,1605]}; +uts46_map(64896) -> {'M', [1604,1581,1605]}; +uts46_map(64897) -> {'M', [1604,1581,1610]}; +uts46_map(64898) -> {'M', [1604,1581,1609]}; +uts46_map(64905) -> {'M', [1605,1581,1580]}; +uts46_map(64906) -> {'M', [1605,1581,1605]}; +uts46_map(64907) -> {'M', [1605,1581,1610]}; +uts46_map(64908) -> {'M', [1605,1580,1581]}; +uts46_map(64909) -> {'M', [1605,1580,1605]}; +uts46_map(64910) -> {'M', [1605,1582,1580]}; +uts46_map(64911) -> {'M', [1605,1582,1605]}; +uts46_map(64914) -> {'M', [1605,1580,1582]}; +uts46_map(64915) -> {'M', [1607,1605,1580]}; +uts46_map(64916) -> {'M', [1607,1605,1605]}; +uts46_map(64917) -> {'M', [1606,1581,1605]}; +uts46_map(64918) -> {'M', [1606,1581,1609]}; +uts46_map(64921) -> {'M', [1606,1580,1609]}; +uts46_map(64922) -> {'M', [1606,1605,1610]}; +uts46_map(64923) -> {'M', [1606,1605,1609]}; +uts46_map(64926) -> {'M', [1576,1582,1610]}; +uts46_map(64927) -> {'M', [1578,1580,1610]}; +uts46_map(64928) -> {'M', [1578,1580,1609]}; +uts46_map(64929) -> {'M', [1578,1582,1610]}; +uts46_map(64930) -> {'M', [1578,1582,1609]}; +uts46_map(64931) -> {'M', [1578,1605,1610]}; +uts46_map(64932) -> {'M', [1578,1605,1609]}; +uts46_map(64933) -> {'M', [1580,1605,1610]}; +uts46_map(64934) -> {'M', [1580,1581,1609]}; +uts46_map(64935) -> {'M', [1580,1605,1609]}; +uts46_map(64936) -> {'M', [1587,1582,1609]}; +uts46_map(64937) -> {'M', [1589,1581,1610]}; +uts46_map(64938) -> {'M', [1588,1581,1610]}; +uts46_map(64939) -> {'M', [1590,1581,1610]}; +uts46_map(64940) -> {'M', [1604,1580,1610]}; +uts46_map(64941) -> {'M', [1604,1605,1610]}; +uts46_map(64942) -> {'M', [1610,1581,1610]}; +uts46_map(64943) -> {'M', [1610,1580,1610]}; +uts46_map(64944) -> {'M', [1610,1605,1610]}; +uts46_map(64945) -> {'M', [1605,1605,1610]}; +uts46_map(64946) -> {'M', [1602,1605,1610]}; +uts46_map(64947) -> {'M', [1606,1581,1610]}; +uts46_map(64948) -> {'M', [1602,1605,1581]}; +uts46_map(64949) -> {'M', [1604,1581,1605]}; +uts46_map(64950) -> {'M', [1593,1605,1610]}; +uts46_map(64951) -> {'M', [1603,1605,1610]}; +uts46_map(64952) -> {'M', [1606,1580,1581]}; +uts46_map(64953) -> {'M', [1605,1582,1610]}; +uts46_map(64954) -> {'M', [1604,1580,1605]}; +uts46_map(64955) -> {'M', [1603,1605,1605]}; +uts46_map(64956) -> {'M', [1604,1580,1605]}; +uts46_map(64957) -> {'M', [1606,1580,1581]}; +uts46_map(64958) -> {'M', [1580,1581,1610]}; +uts46_map(64959) -> {'M', [1581,1580,1610]}; +uts46_map(64960) -> {'M', [1605,1580,1610]}; +uts46_map(64961) -> {'M', [1601,1605,1610]}; +uts46_map(64962) -> {'M', [1576,1581,1610]}; +uts46_map(64963) -> {'M', [1603,1605,1605]}; +uts46_map(64964) -> {'M', [1593,1580,1605]}; +uts46_map(64965) -> {'M', [1589,1605,1605]}; +uts46_map(64966) -> {'M', [1587,1582,1610]}; +uts46_map(64967) -> {'M', [1606,1580,1610]}; +uts46_map(65008) -> {'M', [1589,1604,1746]}; +uts46_map(65009) -> {'M', [1602,1604,1746]}; +uts46_map(65010) -> {'M', [1575,1604,1604,1607]}; +uts46_map(65011) -> {'M', [1575,1603,1576,1585]}; +uts46_map(65012) -> {'M', [1605,1581,1605,1583]}; +uts46_map(65013) -> {'M', [1589,1604,1593,1605]}; +uts46_map(65014) -> {'M', [1585,1587,1608,1604]}; +uts46_map(65015) -> {'M', [1593,1604,1610,1607]}; +uts46_map(65016) -> {'M', [1608,1587,1604,1605]}; +uts46_map(65017) -> {'M', [1589,1604,1609]}; +uts46_map(65018) -> {'3', [1589,1604,1609,32,1575,1604,1604,1607,32,1593,1604,1610,1607,32,1608,1587,1604,1605]}; +uts46_map(65019) -> {'3', [1580,1604,32,1580,1604,1575,1604,1607]}; +uts46_map(65020) -> {'M', [1585,1740,1575,1604]}; +uts46_map(65021) -> 'V'; +uts46_map(65040) -> {'3', [44]}; +uts46_map(65041) -> {'M', [12289]}; +uts46_map(65042) -> 'X'; +uts46_map(65043) -> {'3', [58]}; +uts46_map(65044) -> {'3', [59]}; +uts46_map(65045) -> {'3', [33]}; +uts46_map(65046) -> {'3', [63]}; +uts46_map(65047) -> {'M', [12310]}; +uts46_map(65048) -> {'M', [12311]}; +uts46_map(65049) -> 'X'; +uts46_map(65072) -> 'X'; +uts46_map(65073) -> {'M', [8212]}; +uts46_map(65074) -> {'M', [8211]}; +uts46_map(65077) -> {'3', [40]}; +uts46_map(65078) -> {'3', [41]}; +uts46_map(65079) -> {'3', [123]}; +uts46_map(65080) -> {'3', [125]}; +uts46_map(65081) -> {'M', [12308]}; +uts46_map(65082) -> {'M', [12309]}; +uts46_map(65083) -> {'M', [12304]}; +uts46_map(65084) -> {'M', [12305]}; +uts46_map(65085) -> {'M', [12298]}; +uts46_map(65086) -> {'M', [12299]}; +uts46_map(65087) -> {'M', [12296]}; +uts46_map(65088) -> {'M', [12297]}; +uts46_map(65089) -> {'M', [12300]}; +uts46_map(65090) -> {'M', [12301]}; +uts46_map(65091) -> {'M', [12302]}; +uts46_map(65092) -> {'M', [12303]}; +uts46_map(65095) -> {'3', [91]}; +uts46_map(65096) -> {'3', [93]}; +uts46_map(65104) -> {'3', [44]}; +uts46_map(65105) -> {'M', [12289]}; +uts46_map(65106) -> 'X'; +uts46_map(65107) -> 'X'; +uts46_map(65108) -> {'3', [59]}; +uts46_map(65109) -> {'3', [58]}; +uts46_map(65110) -> {'3', [63]}; +uts46_map(65111) -> {'3', [33]}; +uts46_map(65112) -> {'M', [8212]}; +uts46_map(65113) -> {'3', [40]}; +uts46_map(65114) -> {'3', [41]}; +uts46_map(65115) -> {'3', [123]}; +uts46_map(65116) -> {'3', [125]}; +uts46_map(65117) -> {'M', [12308]}; +uts46_map(65118) -> {'M', [12309]}; +uts46_map(65119) -> {'3', [35]}; +uts46_map(65120) -> {'3', [38]}; +uts46_map(65121) -> {'3', [42]}; +uts46_map(65122) -> {'3', [43]}; +uts46_map(65123) -> {'M', [45]}; +uts46_map(65124) -> {'3', [60]}; +uts46_map(65125) -> {'3', [62]}; +uts46_map(65126) -> {'3', [61]}; +uts46_map(65127) -> 'X'; +uts46_map(65128) -> {'3', [92]}; +uts46_map(65129) -> {'3', [36]}; +uts46_map(65130) -> {'3', [37]}; +uts46_map(65131) -> {'3', [64]}; +uts46_map(65136) -> {'3', [32,1611]}; +uts46_map(65137) -> {'M', [1600,1611]}; +uts46_map(65138) -> {'3', [32,1612]}; +uts46_map(65139) -> 'V'; +uts46_map(65140) -> {'3', [32,1613]}; +uts46_map(65141) -> 'X'; +uts46_map(65142) -> {'3', [32,1614]}; +uts46_map(65143) -> {'M', [1600,1614]}; +uts46_map(65144) -> {'3', [32,1615]}; +uts46_map(65145) -> {'M', [1600,1615]}; +uts46_map(65146) -> {'3', [32,1616]}; +uts46_map(65147) -> {'M', [1600,1616]}; +uts46_map(65148) -> {'3', [32,1617]}; +uts46_map(65149) -> {'M', [1600,1617]}; +uts46_map(65150) -> {'3', [32,1618]}; +uts46_map(65151) -> {'M', [1600,1618]}; +uts46_map(65152) -> {'M', [1569]}; +uts46_map(65279) -> 'I'; +uts46_map(65280) -> 'X'; +uts46_map(65281) -> {'3', [33]}; +uts46_map(65282) -> {'3', [34]}; +uts46_map(65283) -> {'3', [35]}; +uts46_map(65284) -> {'3', [36]}; +uts46_map(65285) -> {'3', [37]}; +uts46_map(65286) -> {'3', [38]}; +uts46_map(65287) -> {'3', [39]}; +uts46_map(65288) -> {'3', [40]}; +uts46_map(65289) -> {'3', [41]}; +uts46_map(65290) -> {'3', [42]}; +uts46_map(65291) -> {'3', [43]}; +uts46_map(65292) -> {'3', [44]}; +uts46_map(65293) -> {'M', [45]}; +uts46_map(65294) -> {'M', [46]}; +uts46_map(65295) -> {'3', [47]}; +uts46_map(65296) -> {'M', [48]}; +uts46_map(65297) -> {'M', [49]}; +uts46_map(65298) -> {'M', [50]}; +uts46_map(65299) -> {'M', [51]}; +uts46_map(65300) -> {'M', [52]}; +uts46_map(65301) -> {'M', [53]}; +uts46_map(65302) -> {'M', [54]}; +uts46_map(65303) -> {'M', [55]}; +uts46_map(65304) -> {'M', [56]}; +uts46_map(65305) -> {'M', [57]}; +uts46_map(65306) -> {'3', [58]}; +uts46_map(65307) -> {'3', [59]}; +uts46_map(65308) -> {'3', [60]}; +uts46_map(65309) -> {'3', [61]}; +uts46_map(65310) -> {'3', [62]}; +uts46_map(65311) -> {'3', [63]}; +uts46_map(65312) -> {'3', [64]}; +uts46_map(65313) -> {'M', [97]}; +uts46_map(65314) -> {'M', [98]}; +uts46_map(65315) -> {'M', [99]}; +uts46_map(65316) -> {'M', [100]}; +uts46_map(65317) -> {'M', [101]}; +uts46_map(65318) -> {'M', [102]}; +uts46_map(65319) -> {'M', [103]}; +uts46_map(65320) -> {'M', [104]}; +uts46_map(65321) -> {'M', [105]}; +uts46_map(65322) -> {'M', [106]}; +uts46_map(65323) -> {'M', [107]}; +uts46_map(65324) -> {'M', [108]}; +uts46_map(65325) -> {'M', [109]}; +uts46_map(65326) -> {'M', [110]}; +uts46_map(65327) -> {'M', [111]}; +uts46_map(65328) -> {'M', [112]}; +uts46_map(65329) -> {'M', [113]}; +uts46_map(65330) -> {'M', [114]}; +uts46_map(65331) -> {'M', [115]}; +uts46_map(65332) -> {'M', [116]}; +uts46_map(65333) -> {'M', [117]}; +uts46_map(65334) -> {'M', [118]}; +uts46_map(65335) -> {'M', [119]}; +uts46_map(65336) -> {'M', [120]}; +uts46_map(65337) -> {'M', [121]}; +uts46_map(65338) -> {'M', [122]}; +uts46_map(65339) -> {'3', [91]}; +uts46_map(65340) -> {'3', [92]}; +uts46_map(65341) -> {'3', [93]}; +uts46_map(65342) -> {'3', [94]}; +uts46_map(65343) -> {'3', [95]}; +uts46_map(65344) -> {'3', [96]}; +uts46_map(65345) -> {'M', [97]}; +uts46_map(65346) -> {'M', [98]}; +uts46_map(65347) -> {'M', [99]}; +uts46_map(65348) -> {'M', [100]}; +uts46_map(65349) -> {'M', [101]}; +uts46_map(65350) -> {'M', [102]}; +uts46_map(65351) -> {'M', [103]}; +uts46_map(65352) -> {'M', [104]}; +uts46_map(65353) -> {'M', [105]}; +uts46_map(65354) -> {'M', [106]}; +uts46_map(65355) -> {'M', [107]}; +uts46_map(65356) -> {'M', [108]}; +uts46_map(65357) -> {'M', [109]}; +uts46_map(65358) -> {'M', [110]}; +uts46_map(65359) -> {'M', [111]}; +uts46_map(65360) -> {'M', [112]}; +uts46_map(65361) -> {'M', [113]}; +uts46_map(65362) -> {'M', [114]}; +uts46_map(65363) -> {'M', [115]}; +uts46_map(65364) -> {'M', [116]}; +uts46_map(65365) -> {'M', [117]}; +uts46_map(65366) -> {'M', [118]}; +uts46_map(65367) -> {'M', [119]}; +uts46_map(65368) -> {'M', [120]}; +uts46_map(65369) -> {'M', [121]}; +uts46_map(65370) -> {'M', [122]}; +uts46_map(65371) -> {'3', [123]}; +uts46_map(65372) -> {'3', [124]}; +uts46_map(65373) -> {'3', [125]}; +uts46_map(65374) -> {'3', [126]}; +uts46_map(65375) -> {'M', [10629]}; +uts46_map(65376) -> {'M', [10630]}; +uts46_map(65377) -> {'M', [46]}; +uts46_map(65378) -> {'M', [12300]}; +uts46_map(65379) -> {'M', [12301]}; +uts46_map(65380) -> {'M', [12289]}; +uts46_map(65381) -> {'M', [12539]}; +uts46_map(65382) -> {'M', [12530]}; +uts46_map(65383) -> {'M', [12449]}; +uts46_map(65384) -> {'M', [12451]}; +uts46_map(65385) -> {'M', [12453]}; +uts46_map(65386) -> {'M', [12455]}; +uts46_map(65387) -> {'M', [12457]}; +uts46_map(65388) -> {'M', [12515]}; +uts46_map(65389) -> {'M', [12517]}; +uts46_map(65390) -> {'M', [12519]}; +uts46_map(65391) -> {'M', [12483]}; +uts46_map(65392) -> {'M', [12540]}; +uts46_map(65393) -> {'M', [12450]}; +uts46_map(65394) -> {'M', [12452]}; +uts46_map(65395) -> {'M', [12454]}; +uts46_map(65396) -> {'M', [12456]}; +uts46_map(65397) -> {'M', [12458]}; +uts46_map(65398) -> {'M', [12459]}; +uts46_map(65399) -> {'M', [12461]}; +uts46_map(65400) -> {'M', [12463]}; +uts46_map(65401) -> {'M', [12465]}; +uts46_map(65402) -> {'M', [12467]}; +uts46_map(65403) -> {'M', [12469]}; +uts46_map(65404) -> {'M', [12471]}; +uts46_map(65405) -> {'M', [12473]}; +uts46_map(65406) -> {'M', [12475]}; +uts46_map(65407) -> {'M', [12477]}; +uts46_map(65408) -> {'M', [12479]}; +uts46_map(65409) -> {'M', [12481]}; +uts46_map(65410) -> {'M', [12484]}; +uts46_map(65411) -> {'M', [12486]}; +uts46_map(65412) -> {'M', [12488]}; +uts46_map(65413) -> {'M', [12490]}; +uts46_map(65414) -> {'M', [12491]}; +uts46_map(65415) -> {'M', [12492]}; +uts46_map(65416) -> {'M', [12493]}; +uts46_map(65417) -> {'M', [12494]}; +uts46_map(65418) -> {'M', [12495]}; +uts46_map(65419) -> {'M', [12498]}; +uts46_map(65420) -> {'M', [12501]}; +uts46_map(65421) -> {'M', [12504]}; +uts46_map(65422) -> {'M', [12507]}; +uts46_map(65423) -> {'M', [12510]}; +uts46_map(65424) -> {'M', [12511]}; +uts46_map(65425) -> {'M', [12512]}; +uts46_map(65426) -> {'M', [12513]}; +uts46_map(65427) -> {'M', [12514]}; +uts46_map(65428) -> {'M', [12516]}; +uts46_map(65429) -> {'M', [12518]}; +uts46_map(65430) -> {'M', [12520]}; +uts46_map(65431) -> {'M', [12521]}; +uts46_map(65432) -> {'M', [12522]}; +uts46_map(65433) -> {'M', [12523]}; +uts46_map(65434) -> {'M', [12524]}; +uts46_map(65435) -> {'M', [12525]}; +uts46_map(65436) -> {'M', [12527]}; +uts46_map(65437) -> {'M', [12531]}; +uts46_map(65438) -> {'M', [12441]}; +uts46_map(65439) -> {'M', [12442]}; +uts46_map(65440) -> 'X'; +uts46_map(65441) -> {'M', [4352]}; +uts46_map(65442) -> {'M', [4353]}; +uts46_map(65443) -> {'M', [4522]}; +uts46_map(65444) -> {'M', [4354]}; +uts46_map(65445) -> {'M', [4524]}; +uts46_map(65446) -> {'M', [4525]}; +uts46_map(65447) -> {'M', [4355]}; +uts46_map(65448) -> {'M', [4356]}; +uts46_map(65449) -> {'M', [4357]}; +uts46_map(65450) -> {'M', [4528]}; +uts46_map(65451) -> {'M', [4529]}; +uts46_map(65452) -> {'M', [4530]}; +uts46_map(65453) -> {'M', [4531]}; +uts46_map(65454) -> {'M', [4532]}; +uts46_map(65455) -> {'M', [4533]}; +uts46_map(65456) -> {'M', [4378]}; +uts46_map(65457) -> {'M', [4358]}; +uts46_map(65458) -> {'M', [4359]}; +uts46_map(65459) -> {'M', [4360]}; +uts46_map(65460) -> {'M', [4385]}; +uts46_map(65461) -> {'M', [4361]}; +uts46_map(65462) -> {'M', [4362]}; +uts46_map(65463) -> {'M', [4363]}; +uts46_map(65464) -> {'M', [4364]}; +uts46_map(65465) -> {'M', [4365]}; +uts46_map(65466) -> {'M', [4366]}; +uts46_map(65467) -> {'M', [4367]}; +uts46_map(65468) -> {'M', [4368]}; +uts46_map(65469) -> {'M', [4369]}; +uts46_map(65470) -> {'M', [4370]}; +uts46_map(65474) -> {'M', [4449]}; +uts46_map(65475) -> {'M', [4450]}; +uts46_map(65476) -> {'M', [4451]}; +uts46_map(65477) -> {'M', [4452]}; +uts46_map(65478) -> {'M', [4453]}; +uts46_map(65479) -> {'M', [4454]}; +uts46_map(65482) -> {'M', [4455]}; +uts46_map(65483) -> {'M', [4456]}; +uts46_map(65484) -> {'M', [4457]}; +uts46_map(65485) -> {'M', [4458]}; +uts46_map(65486) -> {'M', [4459]}; +uts46_map(65487) -> {'M', [4460]}; +uts46_map(65490) -> {'M', [4461]}; +uts46_map(65491) -> {'M', [4462]}; +uts46_map(65492) -> {'M', [4463]}; +uts46_map(65493) -> {'M', [4464]}; +uts46_map(65494) -> {'M', [4465]}; +uts46_map(65495) -> {'M', [4466]}; +uts46_map(65498) -> {'M', [4467]}; +uts46_map(65499) -> {'M', [4468]}; +uts46_map(65500) -> {'M', [4469]}; +uts46_map(65504) -> {'M', [162]}; +uts46_map(65505) -> {'M', [163]}; +uts46_map(65506) -> {'M', [172]}; +uts46_map(65507) -> {'3', [32,772]}; +uts46_map(65508) -> {'M', [166]}; +uts46_map(65509) -> {'M', [165]}; +uts46_map(65510) -> {'M', [8361]}; +uts46_map(65511) -> 'X'; +uts46_map(65512) -> {'M', [9474]}; +uts46_map(65513) -> {'M', [8592]}; +uts46_map(65514) -> {'M', [8593]}; +uts46_map(65515) -> {'M', [8594]}; +uts46_map(65516) -> {'M', [8595]}; +uts46_map(65517) -> {'M', [9632]}; +uts46_map(65518) -> {'M', [9675]}; +uts46_map(65532) -> 'X'; +uts46_map(65533) -> 'X'; +uts46_map(65548) -> 'X'; +uts46_map(65575) -> 'X'; +uts46_map(65595) -> 'X'; +uts46_map(65598) -> 'X'; +uts46_map(65935) -> 'X'; +uts46_map(65948) -> 'V'; +uts46_map(65952) -> 'V'; +uts46_map(66045) -> 'V'; +uts46_map(66272) -> 'V'; +uts46_map(66335) -> 'V'; +uts46_map(66369) -> 'V'; +uts46_map(66378) -> 'V'; +uts46_map(66462) -> 'X'; +uts46_map(66463) -> 'V'; +uts46_map(66560) -> {'M', [66600]}; +uts46_map(66561) -> {'M', [66601]}; +uts46_map(66562) -> {'M', [66602]}; +uts46_map(66563) -> {'M', [66603]}; +uts46_map(66564) -> {'M', [66604]}; +uts46_map(66565) -> {'M', [66605]}; +uts46_map(66566) -> {'M', [66606]}; +uts46_map(66567) -> {'M', [66607]}; +uts46_map(66568) -> {'M', [66608]}; +uts46_map(66569) -> {'M', [66609]}; +uts46_map(66570) -> {'M', [66610]}; +uts46_map(66571) -> {'M', [66611]}; +uts46_map(66572) -> {'M', [66612]}; +uts46_map(66573) -> {'M', [66613]}; +uts46_map(66574) -> {'M', [66614]}; +uts46_map(66575) -> {'M', [66615]}; +uts46_map(66576) -> {'M', [66616]}; +uts46_map(66577) -> {'M', [66617]}; +uts46_map(66578) -> {'M', [66618]}; +uts46_map(66579) -> {'M', [66619]}; +uts46_map(66580) -> {'M', [66620]}; +uts46_map(66581) -> {'M', [66621]}; +uts46_map(66582) -> {'M', [66622]}; +uts46_map(66583) -> {'M', [66623]}; +uts46_map(66584) -> {'M', [66624]}; +uts46_map(66585) -> {'M', [66625]}; +uts46_map(66586) -> {'M', [66626]}; +uts46_map(66587) -> {'M', [66627]}; +uts46_map(66588) -> {'M', [66628]}; +uts46_map(66589) -> {'M', [66629]}; +uts46_map(66590) -> {'M', [66630]}; +uts46_map(66591) -> {'M', [66631]}; +uts46_map(66592) -> {'M', [66632]}; +uts46_map(66593) -> {'M', [66633]}; +uts46_map(66594) -> {'M', [66634]}; +uts46_map(66595) -> {'M', [66635]}; +uts46_map(66596) -> {'M', [66636]}; +uts46_map(66597) -> {'M', [66637]}; +uts46_map(66598) -> {'M', [66638]}; +uts46_map(66599) -> {'M', [66639]}; +uts46_map(66736) -> {'M', [66776]}; +uts46_map(66737) -> {'M', [66777]}; +uts46_map(66738) -> {'M', [66778]}; +uts46_map(66739) -> {'M', [66779]}; +uts46_map(66740) -> {'M', [66780]}; +uts46_map(66741) -> {'M', [66781]}; +uts46_map(66742) -> {'M', [66782]}; +uts46_map(66743) -> {'M', [66783]}; +uts46_map(66744) -> {'M', [66784]}; +uts46_map(66745) -> {'M', [66785]}; +uts46_map(66746) -> {'M', [66786]}; +uts46_map(66747) -> {'M', [66787]}; +uts46_map(66748) -> {'M', [66788]}; +uts46_map(66749) -> {'M', [66789]}; +uts46_map(66750) -> {'M', [66790]}; +uts46_map(66751) -> {'M', [66791]}; +uts46_map(66752) -> {'M', [66792]}; +uts46_map(66753) -> {'M', [66793]}; +uts46_map(66754) -> {'M', [66794]}; +uts46_map(66755) -> {'M', [66795]}; +uts46_map(66756) -> {'M', [66796]}; +uts46_map(66757) -> {'M', [66797]}; +uts46_map(66758) -> {'M', [66798]}; +uts46_map(66759) -> {'M', [66799]}; +uts46_map(66760) -> {'M', [66800]}; +uts46_map(66761) -> {'M', [66801]}; +uts46_map(66762) -> {'M', [66802]}; +uts46_map(66763) -> {'M', [66803]}; +uts46_map(66764) -> {'M', [66804]}; +uts46_map(66765) -> {'M', [66805]}; +uts46_map(66766) -> {'M', [66806]}; +uts46_map(66767) -> {'M', [66807]}; +uts46_map(66768) -> {'M', [66808]}; +uts46_map(66769) -> {'M', [66809]}; +uts46_map(66770) -> {'M', [66810]}; +uts46_map(66771) -> {'M', [66811]}; +uts46_map(66927) -> 'V'; +uts46_map(67592) -> 'V'; +uts46_map(67593) -> 'X'; +uts46_map(67638) -> 'X'; +uts46_map(67644) -> 'V'; +uts46_map(67647) -> 'V'; +uts46_map(67670) -> 'X'; +uts46_map(67827) -> 'X'; +uts46_map(67871) -> 'V'; +uts46_map(67903) -> 'V'; +uts46_map(68100) -> 'X'; +uts46_map(68116) -> 'X'; +uts46_map(68120) -> 'X'; +uts46_map(68159) -> 'V'; +uts46_map(68168) -> 'V'; +uts46_map(68296) -> 'V'; +uts46_map(68736) -> {'M', [68800]}; +uts46_map(68737) -> {'M', [68801]}; +uts46_map(68738) -> {'M', [68802]}; +uts46_map(68739) -> {'M', [68803]}; +uts46_map(68740) -> {'M', [68804]}; +uts46_map(68741) -> {'M', [68805]}; +uts46_map(68742) -> {'M', [68806]}; +uts46_map(68743) -> {'M', [68807]}; +uts46_map(68744) -> {'M', [68808]}; +uts46_map(68745) -> {'M', [68809]}; +uts46_map(68746) -> {'M', [68810]}; +uts46_map(68747) -> {'M', [68811]}; +uts46_map(68748) -> {'M', [68812]}; +uts46_map(68749) -> {'M', [68813]}; +uts46_map(68750) -> {'M', [68814]}; +uts46_map(68751) -> {'M', [68815]}; +uts46_map(68752) -> {'M', [68816]}; +uts46_map(68753) -> {'M', [68817]}; +uts46_map(68754) -> {'M', [68818]}; +uts46_map(68755) -> {'M', [68819]}; +uts46_map(68756) -> {'M', [68820]}; +uts46_map(68757) -> {'M', [68821]}; +uts46_map(68758) -> {'M', [68822]}; +uts46_map(68759) -> {'M', [68823]}; +uts46_map(68760) -> {'M', [68824]}; +uts46_map(68761) -> {'M', [68825]}; +uts46_map(68762) -> {'M', [68826]}; +uts46_map(68763) -> {'M', [68827]}; +uts46_map(68764) -> {'M', [68828]}; +uts46_map(68765) -> {'M', [68829]}; +uts46_map(68766) -> {'M', [68830]}; +uts46_map(68767) -> {'M', [68831]}; +uts46_map(68768) -> {'M', [68832]}; +uts46_map(68769) -> {'M', [68833]}; +uts46_map(68770) -> {'M', [68834]}; +uts46_map(68771) -> {'M', [68835]}; +uts46_map(68772) -> {'M', [68836]}; +uts46_map(68773) -> {'M', [68837]}; +uts46_map(68774) -> {'M', [68838]}; +uts46_map(68775) -> {'M', [68839]}; +uts46_map(68776) -> {'M', [68840]}; +uts46_map(68777) -> {'M', [68841]}; +uts46_map(68778) -> {'M', [68842]}; +uts46_map(68779) -> {'M', [68843]}; +uts46_map(68780) -> {'M', [68844]}; +uts46_map(68781) -> {'M', [68845]}; +uts46_map(68782) -> {'M', [68846]}; +uts46_map(68783) -> {'M', [68847]}; +uts46_map(68784) -> {'M', [68848]}; +uts46_map(68785) -> {'M', [68849]}; +uts46_map(68786) -> {'M', [68850]}; +uts46_map(69247) -> 'X'; +uts46_map(69290) -> 'X'; +uts46_map(69293) -> 'V'; +uts46_map(69415) -> 'V'; +uts46_map(69759) -> 'V'; +uts46_map(69821) -> 'X'; +uts46_map(69837) -> 'X'; +uts46_map(69941) -> 'X'; +uts46_map(69959) -> 'V'; +uts46_map(70006) -> 'V'; +uts46_map(70093) -> 'V'; +uts46_map(70106) -> 'V'; +uts46_map(70107) -> 'V'; +uts46_map(70108) -> 'V'; +uts46_map(70112) -> 'X'; +uts46_map(70162) -> 'X'; +uts46_map(70206) -> 'V'; +uts46_map(70279) -> 'X'; +uts46_map(70280) -> 'V'; +uts46_map(70281) -> 'X'; +uts46_map(70286) -> 'X'; +uts46_map(70302) -> 'X'; +uts46_map(70313) -> 'V'; +uts46_map(70400) -> 'V'; +uts46_map(70404) -> 'X'; +uts46_map(70441) -> 'X'; +uts46_map(70449) -> 'X'; +uts46_map(70452) -> 'X'; +uts46_map(70458) -> 'X'; +uts46_map(70459) -> 'V'; +uts46_map(70480) -> 'V'; +uts46_map(70487) -> 'V'; +uts46_map(70746) -> 'V'; +uts46_map(70747) -> 'V'; +uts46_map(70748) -> 'X'; +uts46_map(70749) -> 'V'; +uts46_map(70750) -> 'V'; +uts46_map(70751) -> 'V'; +uts46_map(70854) -> 'V'; +uts46_map(70855) -> 'V'; +uts46_map(71236) -> 'V'; +uts46_map(71352) -> 'V'; +uts46_map(71450) -> 'V'; +uts46_map(71739) -> 'V'; +uts46_map(71840) -> {'M', [71872]}; +uts46_map(71841) -> {'M', [71873]}; +uts46_map(71842) -> {'M', [71874]}; +uts46_map(71843) -> {'M', [71875]}; +uts46_map(71844) -> {'M', [71876]}; +uts46_map(71845) -> {'M', [71877]}; +uts46_map(71846) -> {'M', [71878]}; +uts46_map(71847) -> {'M', [71879]}; +uts46_map(71848) -> {'M', [71880]}; +uts46_map(71849) -> {'M', [71881]}; +uts46_map(71850) -> {'M', [71882]}; +uts46_map(71851) -> {'M', [71883]}; +uts46_map(71852) -> {'M', [71884]}; +uts46_map(71853) -> {'M', [71885]}; +uts46_map(71854) -> {'M', [71886]}; +uts46_map(71855) -> {'M', [71887]}; +uts46_map(71856) -> {'M', [71888]}; +uts46_map(71857) -> {'M', [71889]}; +uts46_map(71858) -> {'M', [71890]}; +uts46_map(71859) -> {'M', [71891]}; +uts46_map(71860) -> {'M', [71892]}; +uts46_map(71861) -> {'M', [71893]}; +uts46_map(71862) -> {'M', [71894]}; +uts46_map(71863) -> {'M', [71895]}; +uts46_map(71864) -> {'M', [71896]}; +uts46_map(71865) -> {'M', [71897]}; +uts46_map(71866) -> {'M', [71898]}; +uts46_map(71867) -> {'M', [71899]}; +uts46_map(71868) -> {'M', [71900]}; +uts46_map(71869) -> {'M', [71901]}; +uts46_map(71870) -> {'M', [71902]}; +uts46_map(71871) -> {'M', [71903]}; +uts46_map(71935) -> 'V'; +uts46_map(71945) -> 'V'; +uts46_map(71956) -> 'X'; +uts46_map(71959) -> 'X'; +uts46_map(71990) -> 'X'; +uts46_map(72162) -> 'V'; +uts46_map(72263) -> 'V'; +uts46_map(72349) -> 'V'; +uts46_map(72713) -> 'X'; +uts46_map(72759) -> 'X'; +uts46_map(72872) -> 'X'; +uts46_map(72967) -> 'X'; +uts46_map(72970) -> 'X'; +uts46_map(73018) -> 'V'; +uts46_map(73019) -> 'X'; +uts46_map(73022) -> 'X'; +uts46_map(73062) -> 'X'; +uts46_map(73065) -> 'X'; +uts46_map(73103) -> 'X'; +uts46_map(73106) -> 'X'; +uts46_map(73648) -> 'V'; +uts46_map(73727) -> 'V'; +uts46_map(74649) -> 'V'; +uts46_map(74863) -> 'X'; +uts46_map(74868) -> 'V'; +uts46_map(78895) -> 'X'; +uts46_map(92767) -> 'X'; +uts46_map(92917) -> 'V'; +uts46_map(93018) -> 'X'; +uts46_map(93026) -> 'X'; +uts46_map(93760) -> {'M', [93792]}; +uts46_map(93761) -> {'M', [93793]}; +uts46_map(93762) -> {'M', [93794]}; +uts46_map(93763) -> {'M', [93795]}; +uts46_map(93764) -> {'M', [93796]}; +uts46_map(93765) -> {'M', [93797]}; +uts46_map(93766) -> {'M', [93798]}; +uts46_map(93767) -> {'M', [93799]}; +uts46_map(93768) -> {'M', [93800]}; +uts46_map(93769) -> {'M', [93801]}; +uts46_map(93770) -> {'M', [93802]}; +uts46_map(93771) -> {'M', [93803]}; +uts46_map(93772) -> {'M', [93804]}; +uts46_map(93773) -> {'M', [93805]}; +uts46_map(93774) -> {'M', [93806]}; +uts46_map(93775) -> {'M', [93807]}; +uts46_map(93776) -> {'M', [93808]}; +uts46_map(93777) -> {'M', [93809]}; +uts46_map(93778) -> {'M', [93810]}; +uts46_map(93779) -> {'M', [93811]}; +uts46_map(93780) -> {'M', [93812]}; +uts46_map(93781) -> {'M', [93813]}; +uts46_map(93782) -> {'M', [93814]}; +uts46_map(93783) -> {'M', [93815]}; +uts46_map(93784) -> {'M', [93816]}; +uts46_map(93785) -> {'M', [93817]}; +uts46_map(93786) -> {'M', [93818]}; +uts46_map(93787) -> {'M', [93819]}; +uts46_map(93788) -> {'M', [93820]}; +uts46_map(93789) -> {'M', [93821]}; +uts46_map(93790) -> {'M', [93822]}; +uts46_map(93791) -> {'M', [93823]}; +uts46_map(94031) -> 'V'; +uts46_map(94176) -> 'V'; +uts46_map(94177) -> 'V'; +uts46_map(94178) -> 'V'; +uts46_map(94179) -> 'V'; +uts46_map(94180) -> 'V'; +uts46_map(113820) -> 'V'; +uts46_map(113823) -> 'V'; +uts46_map(119081) -> 'V'; +uts46_map(119134) -> {'M', [119127,119141]}; +uts46_map(119135) -> {'M', [119128,119141]}; +uts46_map(119136) -> {'M', [119128,119141,119150]}; +uts46_map(119137) -> {'M', [119128,119141,119151]}; +uts46_map(119138) -> {'M', [119128,119141,119152]}; +uts46_map(119139) -> {'M', [119128,119141,119153]}; +uts46_map(119140) -> {'M', [119128,119141,119154]}; +uts46_map(119227) -> {'M', [119225,119141]}; +uts46_map(119228) -> {'M', [119226,119141]}; +uts46_map(119229) -> {'M', [119225,119141,119150]}; +uts46_map(119230) -> {'M', [119226,119141,119150]}; +uts46_map(119231) -> {'M', [119225,119141,119151]}; +uts46_map(119232) -> {'M', [119226,119141,119151]}; +uts46_map(119808) -> {'M', [97]}; +uts46_map(119809) -> {'M', [98]}; +uts46_map(119810) -> {'M', [99]}; +uts46_map(119811) -> {'M', [100]}; +uts46_map(119812) -> {'M', [101]}; +uts46_map(119813) -> {'M', [102]}; +uts46_map(119814) -> {'M', [103]}; +uts46_map(119815) -> {'M', [104]}; +uts46_map(119816) -> {'M', [105]}; +uts46_map(119817) -> {'M', [106]}; +uts46_map(119818) -> {'M', [107]}; +uts46_map(119819) -> {'M', [108]}; +uts46_map(119820) -> {'M', [109]}; +uts46_map(119821) -> {'M', [110]}; +uts46_map(119822) -> {'M', [111]}; +uts46_map(119823) -> {'M', [112]}; +uts46_map(119824) -> {'M', [113]}; +uts46_map(119825) -> {'M', [114]}; +uts46_map(119826) -> {'M', [115]}; +uts46_map(119827) -> {'M', [116]}; +uts46_map(119828) -> {'M', [117]}; +uts46_map(119829) -> {'M', [118]}; +uts46_map(119830) -> {'M', [119]}; +uts46_map(119831) -> {'M', [120]}; +uts46_map(119832) -> {'M', [121]}; +uts46_map(119833) -> {'M', [122]}; +uts46_map(119834) -> {'M', [97]}; +uts46_map(119835) -> {'M', [98]}; +uts46_map(119836) -> {'M', [99]}; +uts46_map(119837) -> {'M', [100]}; +uts46_map(119838) -> {'M', [101]}; +uts46_map(119839) -> {'M', [102]}; +uts46_map(119840) -> {'M', [103]}; +uts46_map(119841) -> {'M', [104]}; +uts46_map(119842) -> {'M', [105]}; +uts46_map(119843) -> {'M', [106]}; +uts46_map(119844) -> {'M', [107]}; +uts46_map(119845) -> {'M', [108]}; +uts46_map(119846) -> {'M', [109]}; +uts46_map(119847) -> {'M', [110]}; +uts46_map(119848) -> {'M', [111]}; +uts46_map(119849) -> {'M', [112]}; +uts46_map(119850) -> {'M', [113]}; +uts46_map(119851) -> {'M', [114]}; +uts46_map(119852) -> {'M', [115]}; +uts46_map(119853) -> {'M', [116]}; +uts46_map(119854) -> {'M', [117]}; +uts46_map(119855) -> {'M', [118]}; +uts46_map(119856) -> {'M', [119]}; +uts46_map(119857) -> {'M', [120]}; +uts46_map(119858) -> {'M', [121]}; +uts46_map(119859) -> {'M', [122]}; +uts46_map(119860) -> {'M', [97]}; +uts46_map(119861) -> {'M', [98]}; +uts46_map(119862) -> {'M', [99]}; +uts46_map(119863) -> {'M', [100]}; +uts46_map(119864) -> {'M', [101]}; +uts46_map(119865) -> {'M', [102]}; +uts46_map(119866) -> {'M', [103]}; +uts46_map(119867) -> {'M', [104]}; +uts46_map(119868) -> {'M', [105]}; +uts46_map(119869) -> {'M', [106]}; +uts46_map(119870) -> {'M', [107]}; +uts46_map(119871) -> {'M', [108]}; +uts46_map(119872) -> {'M', [109]}; +uts46_map(119873) -> {'M', [110]}; +uts46_map(119874) -> {'M', [111]}; +uts46_map(119875) -> {'M', [112]}; +uts46_map(119876) -> {'M', [113]}; +uts46_map(119877) -> {'M', [114]}; +uts46_map(119878) -> {'M', [115]}; +uts46_map(119879) -> {'M', [116]}; +uts46_map(119880) -> {'M', [117]}; +uts46_map(119881) -> {'M', [118]}; +uts46_map(119882) -> {'M', [119]}; +uts46_map(119883) -> {'M', [120]}; +uts46_map(119884) -> {'M', [121]}; +uts46_map(119885) -> {'M', [122]}; +uts46_map(119886) -> {'M', [97]}; +uts46_map(119887) -> {'M', [98]}; +uts46_map(119888) -> {'M', [99]}; +uts46_map(119889) -> {'M', [100]}; +uts46_map(119890) -> {'M', [101]}; +uts46_map(119891) -> {'M', [102]}; +uts46_map(119892) -> {'M', [103]}; +uts46_map(119893) -> 'X'; +uts46_map(119894) -> {'M', [105]}; +uts46_map(119895) -> {'M', [106]}; +uts46_map(119896) -> {'M', [107]}; +uts46_map(119897) -> {'M', [108]}; +uts46_map(119898) -> {'M', [109]}; +uts46_map(119899) -> {'M', [110]}; +uts46_map(119900) -> {'M', [111]}; +uts46_map(119901) -> {'M', [112]}; +uts46_map(119902) -> {'M', [113]}; +uts46_map(119903) -> {'M', [114]}; +uts46_map(119904) -> {'M', [115]}; +uts46_map(119905) -> {'M', [116]}; +uts46_map(119906) -> {'M', [117]}; +uts46_map(119907) -> {'M', [118]}; +uts46_map(119908) -> {'M', [119]}; +uts46_map(119909) -> {'M', [120]}; +uts46_map(119910) -> {'M', [121]}; +uts46_map(119911) -> {'M', [122]}; +uts46_map(119912) -> {'M', [97]}; +uts46_map(119913) -> {'M', [98]}; +uts46_map(119914) -> {'M', [99]}; +uts46_map(119915) -> {'M', [100]}; +uts46_map(119916) -> {'M', [101]}; +uts46_map(119917) -> {'M', [102]}; +uts46_map(119918) -> {'M', [103]}; +uts46_map(119919) -> {'M', [104]}; +uts46_map(119920) -> {'M', [105]}; +uts46_map(119921) -> {'M', [106]}; +uts46_map(119922) -> {'M', [107]}; +uts46_map(119923) -> {'M', [108]}; +uts46_map(119924) -> {'M', [109]}; +uts46_map(119925) -> {'M', [110]}; +uts46_map(119926) -> {'M', [111]}; +uts46_map(119927) -> {'M', [112]}; +uts46_map(119928) -> {'M', [113]}; +uts46_map(119929) -> {'M', [114]}; +uts46_map(119930) -> {'M', [115]}; +uts46_map(119931) -> {'M', [116]}; +uts46_map(119932) -> {'M', [117]}; +uts46_map(119933) -> {'M', [118]}; +uts46_map(119934) -> {'M', [119]}; +uts46_map(119935) -> {'M', [120]}; +uts46_map(119936) -> {'M', [121]}; +uts46_map(119937) -> {'M', [122]}; +uts46_map(119938) -> {'M', [97]}; +uts46_map(119939) -> {'M', [98]}; +uts46_map(119940) -> {'M', [99]}; +uts46_map(119941) -> {'M', [100]}; +uts46_map(119942) -> {'M', [101]}; +uts46_map(119943) -> {'M', [102]}; +uts46_map(119944) -> {'M', [103]}; +uts46_map(119945) -> {'M', [104]}; +uts46_map(119946) -> {'M', [105]}; +uts46_map(119947) -> {'M', [106]}; +uts46_map(119948) -> {'M', [107]}; +uts46_map(119949) -> {'M', [108]}; +uts46_map(119950) -> {'M', [109]}; +uts46_map(119951) -> {'M', [110]}; +uts46_map(119952) -> {'M', [111]}; +uts46_map(119953) -> {'M', [112]}; +uts46_map(119954) -> {'M', [113]}; +uts46_map(119955) -> {'M', [114]}; +uts46_map(119956) -> {'M', [115]}; +uts46_map(119957) -> {'M', [116]}; +uts46_map(119958) -> {'M', [117]}; +uts46_map(119959) -> {'M', [118]}; +uts46_map(119960) -> {'M', [119]}; +uts46_map(119961) -> {'M', [120]}; +uts46_map(119962) -> {'M', [121]}; +uts46_map(119963) -> {'M', [122]}; +uts46_map(119964) -> {'M', [97]}; +uts46_map(119965) -> 'X'; +uts46_map(119966) -> {'M', [99]}; +uts46_map(119967) -> {'M', [100]}; +uts46_map(119970) -> {'M', [103]}; +uts46_map(119973) -> {'M', [106]}; +uts46_map(119974) -> {'M', [107]}; +uts46_map(119977) -> {'M', [110]}; +uts46_map(119978) -> {'M', [111]}; +uts46_map(119979) -> {'M', [112]}; +uts46_map(119980) -> {'M', [113]}; +uts46_map(119981) -> 'X'; +uts46_map(119982) -> {'M', [115]}; +uts46_map(119983) -> {'M', [116]}; +uts46_map(119984) -> {'M', [117]}; +uts46_map(119985) -> {'M', [118]}; +uts46_map(119986) -> {'M', [119]}; +uts46_map(119987) -> {'M', [120]}; +uts46_map(119988) -> {'M', [121]}; +uts46_map(119989) -> {'M', [122]}; +uts46_map(119990) -> {'M', [97]}; +uts46_map(119991) -> {'M', [98]}; +uts46_map(119992) -> {'M', [99]}; +uts46_map(119993) -> {'M', [100]}; +uts46_map(119994) -> 'X'; +uts46_map(119995) -> {'M', [102]}; +uts46_map(119996) -> 'X'; +uts46_map(119997) -> {'M', [104]}; +uts46_map(119998) -> {'M', [105]}; +uts46_map(119999) -> {'M', [106]}; +uts46_map(120000) -> {'M', [107]}; +uts46_map(120001) -> {'M', [108]}; +uts46_map(120002) -> {'M', [109]}; +uts46_map(120003) -> {'M', [110]}; +uts46_map(120004) -> 'X'; +uts46_map(120005) -> {'M', [112]}; +uts46_map(120006) -> {'M', [113]}; +uts46_map(120007) -> {'M', [114]}; +uts46_map(120008) -> {'M', [115]}; +uts46_map(120009) -> {'M', [116]}; +uts46_map(120010) -> {'M', [117]}; +uts46_map(120011) -> {'M', [118]}; +uts46_map(120012) -> {'M', [119]}; +uts46_map(120013) -> {'M', [120]}; +uts46_map(120014) -> {'M', [121]}; +uts46_map(120015) -> {'M', [122]}; +uts46_map(120016) -> {'M', [97]}; +uts46_map(120017) -> {'M', [98]}; +uts46_map(120018) -> {'M', [99]}; +uts46_map(120019) -> {'M', [100]}; +uts46_map(120020) -> {'M', [101]}; +uts46_map(120021) -> {'M', [102]}; +uts46_map(120022) -> {'M', [103]}; +uts46_map(120023) -> {'M', [104]}; +uts46_map(120024) -> {'M', [105]}; +uts46_map(120025) -> {'M', [106]}; +uts46_map(120026) -> {'M', [107]}; +uts46_map(120027) -> {'M', [108]}; +uts46_map(120028) -> {'M', [109]}; +uts46_map(120029) -> {'M', [110]}; +uts46_map(120030) -> {'M', [111]}; +uts46_map(120031) -> {'M', [112]}; +uts46_map(120032) -> {'M', [113]}; +uts46_map(120033) -> {'M', [114]}; +uts46_map(120034) -> {'M', [115]}; +uts46_map(120035) -> {'M', [116]}; +uts46_map(120036) -> {'M', [117]}; +uts46_map(120037) -> {'M', [118]}; +uts46_map(120038) -> {'M', [119]}; +uts46_map(120039) -> {'M', [120]}; +uts46_map(120040) -> {'M', [121]}; +uts46_map(120041) -> {'M', [122]}; +uts46_map(120042) -> {'M', [97]}; +uts46_map(120043) -> {'M', [98]}; +uts46_map(120044) -> {'M', [99]}; +uts46_map(120045) -> {'M', [100]}; +uts46_map(120046) -> {'M', [101]}; +uts46_map(120047) -> {'M', [102]}; +uts46_map(120048) -> {'M', [103]}; +uts46_map(120049) -> {'M', [104]}; +uts46_map(120050) -> {'M', [105]}; +uts46_map(120051) -> {'M', [106]}; +uts46_map(120052) -> {'M', [107]}; +uts46_map(120053) -> {'M', [108]}; +uts46_map(120054) -> {'M', [109]}; +uts46_map(120055) -> {'M', [110]}; +uts46_map(120056) -> {'M', [111]}; +uts46_map(120057) -> {'M', [112]}; +uts46_map(120058) -> {'M', [113]}; +uts46_map(120059) -> {'M', [114]}; +uts46_map(120060) -> {'M', [115]}; +uts46_map(120061) -> {'M', [116]}; +uts46_map(120062) -> {'M', [117]}; +uts46_map(120063) -> {'M', [118]}; +uts46_map(120064) -> {'M', [119]}; +uts46_map(120065) -> {'M', [120]}; +uts46_map(120066) -> {'M', [121]}; +uts46_map(120067) -> {'M', [122]}; +uts46_map(120068) -> {'M', [97]}; +uts46_map(120069) -> {'M', [98]}; +uts46_map(120070) -> 'X'; +uts46_map(120071) -> {'M', [100]}; +uts46_map(120072) -> {'M', [101]}; +uts46_map(120073) -> {'M', [102]}; +uts46_map(120074) -> {'M', [103]}; +uts46_map(120077) -> {'M', [106]}; +uts46_map(120078) -> {'M', [107]}; +uts46_map(120079) -> {'M', [108]}; +uts46_map(120080) -> {'M', [109]}; +uts46_map(120081) -> {'M', [110]}; +uts46_map(120082) -> {'M', [111]}; +uts46_map(120083) -> {'M', [112]}; +uts46_map(120084) -> {'M', [113]}; +uts46_map(120085) -> 'X'; +uts46_map(120086) -> {'M', [115]}; +uts46_map(120087) -> {'M', [116]}; +uts46_map(120088) -> {'M', [117]}; +uts46_map(120089) -> {'M', [118]}; +uts46_map(120090) -> {'M', [119]}; +uts46_map(120091) -> {'M', [120]}; +uts46_map(120092) -> {'M', [121]}; +uts46_map(120093) -> 'X'; +uts46_map(120094) -> {'M', [97]}; +uts46_map(120095) -> {'M', [98]}; +uts46_map(120096) -> {'M', [99]}; +uts46_map(120097) -> {'M', [100]}; +uts46_map(120098) -> {'M', [101]}; +uts46_map(120099) -> {'M', [102]}; +uts46_map(120100) -> {'M', [103]}; +uts46_map(120101) -> {'M', [104]}; +uts46_map(120102) -> {'M', [105]}; +uts46_map(120103) -> {'M', [106]}; +uts46_map(120104) -> {'M', [107]}; +uts46_map(120105) -> {'M', [108]}; +uts46_map(120106) -> {'M', [109]}; +uts46_map(120107) -> {'M', [110]}; +uts46_map(120108) -> {'M', [111]}; +uts46_map(120109) -> {'M', [112]}; +uts46_map(120110) -> {'M', [113]}; +uts46_map(120111) -> {'M', [114]}; +uts46_map(120112) -> {'M', [115]}; +uts46_map(120113) -> {'M', [116]}; +uts46_map(120114) -> {'M', [117]}; +uts46_map(120115) -> {'M', [118]}; +uts46_map(120116) -> {'M', [119]}; +uts46_map(120117) -> {'M', [120]}; +uts46_map(120118) -> {'M', [121]}; +uts46_map(120119) -> {'M', [122]}; +uts46_map(120120) -> {'M', [97]}; +uts46_map(120121) -> {'M', [98]}; +uts46_map(120122) -> 'X'; +uts46_map(120123) -> {'M', [100]}; +uts46_map(120124) -> {'M', [101]}; +uts46_map(120125) -> {'M', [102]}; +uts46_map(120126) -> {'M', [103]}; +uts46_map(120127) -> 'X'; +uts46_map(120128) -> {'M', [105]}; +uts46_map(120129) -> {'M', [106]}; +uts46_map(120130) -> {'M', [107]}; +uts46_map(120131) -> {'M', [108]}; +uts46_map(120132) -> {'M', [109]}; +uts46_map(120133) -> 'X'; +uts46_map(120134) -> {'M', [111]}; +uts46_map(120138) -> {'M', [115]}; +uts46_map(120139) -> {'M', [116]}; +uts46_map(120140) -> {'M', [117]}; +uts46_map(120141) -> {'M', [118]}; +uts46_map(120142) -> {'M', [119]}; +uts46_map(120143) -> {'M', [120]}; +uts46_map(120144) -> {'M', [121]}; +uts46_map(120145) -> 'X'; +uts46_map(120146) -> {'M', [97]}; +uts46_map(120147) -> {'M', [98]}; +uts46_map(120148) -> {'M', [99]}; +uts46_map(120149) -> {'M', [100]}; +uts46_map(120150) -> {'M', [101]}; +uts46_map(120151) -> {'M', [102]}; +uts46_map(120152) -> {'M', [103]}; +uts46_map(120153) -> {'M', [104]}; +uts46_map(120154) -> {'M', [105]}; +uts46_map(120155) -> {'M', [106]}; +uts46_map(120156) -> {'M', [107]}; +uts46_map(120157) -> {'M', [108]}; +uts46_map(120158) -> {'M', [109]}; +uts46_map(120159) -> {'M', [110]}; +uts46_map(120160) -> {'M', [111]}; +uts46_map(120161) -> {'M', [112]}; +uts46_map(120162) -> {'M', [113]}; +uts46_map(120163) -> {'M', [114]}; +uts46_map(120164) -> {'M', [115]}; +uts46_map(120165) -> {'M', [116]}; +uts46_map(120166) -> {'M', [117]}; +uts46_map(120167) -> {'M', [118]}; +uts46_map(120168) -> {'M', [119]}; +uts46_map(120169) -> {'M', [120]}; +uts46_map(120170) -> {'M', [121]}; +uts46_map(120171) -> {'M', [122]}; +uts46_map(120172) -> {'M', [97]}; +uts46_map(120173) -> {'M', [98]}; +uts46_map(120174) -> {'M', [99]}; +uts46_map(120175) -> {'M', [100]}; +uts46_map(120176) -> {'M', [101]}; +uts46_map(120177) -> {'M', [102]}; +uts46_map(120178) -> {'M', [103]}; +uts46_map(120179) -> {'M', [104]}; +uts46_map(120180) -> {'M', [105]}; +uts46_map(120181) -> {'M', [106]}; +uts46_map(120182) -> {'M', [107]}; +uts46_map(120183) -> {'M', [108]}; +uts46_map(120184) -> {'M', [109]}; +uts46_map(120185) -> {'M', [110]}; +uts46_map(120186) -> {'M', [111]}; +uts46_map(120187) -> {'M', [112]}; +uts46_map(120188) -> {'M', [113]}; +uts46_map(120189) -> {'M', [114]}; +uts46_map(120190) -> {'M', [115]}; +uts46_map(120191) -> {'M', [116]}; +uts46_map(120192) -> {'M', [117]}; +uts46_map(120193) -> {'M', [118]}; +uts46_map(120194) -> {'M', [119]}; +uts46_map(120195) -> {'M', [120]}; +uts46_map(120196) -> {'M', [121]}; +uts46_map(120197) -> {'M', [122]}; +uts46_map(120198) -> {'M', [97]}; +uts46_map(120199) -> {'M', [98]}; +uts46_map(120200) -> {'M', [99]}; +uts46_map(120201) -> {'M', [100]}; +uts46_map(120202) -> {'M', [101]}; +uts46_map(120203) -> {'M', [102]}; +uts46_map(120204) -> {'M', [103]}; +uts46_map(120205) -> {'M', [104]}; +uts46_map(120206) -> {'M', [105]}; +uts46_map(120207) -> {'M', [106]}; +uts46_map(120208) -> {'M', [107]}; +uts46_map(120209) -> {'M', [108]}; +uts46_map(120210) -> {'M', [109]}; +uts46_map(120211) -> {'M', [110]}; +uts46_map(120212) -> {'M', [111]}; +uts46_map(120213) -> {'M', [112]}; +uts46_map(120214) -> {'M', [113]}; +uts46_map(120215) -> {'M', [114]}; +uts46_map(120216) -> {'M', [115]}; +uts46_map(120217) -> {'M', [116]}; +uts46_map(120218) -> {'M', [117]}; +uts46_map(120219) -> {'M', [118]}; +uts46_map(120220) -> {'M', [119]}; +uts46_map(120221) -> {'M', [120]}; +uts46_map(120222) -> {'M', [121]}; +uts46_map(120223) -> {'M', [122]}; +uts46_map(120224) -> {'M', [97]}; +uts46_map(120225) -> {'M', [98]}; +uts46_map(120226) -> {'M', [99]}; +uts46_map(120227) -> {'M', [100]}; +uts46_map(120228) -> {'M', [101]}; +uts46_map(120229) -> {'M', [102]}; +uts46_map(120230) -> {'M', [103]}; +uts46_map(120231) -> {'M', [104]}; +uts46_map(120232) -> {'M', [105]}; +uts46_map(120233) -> {'M', [106]}; +uts46_map(120234) -> {'M', [107]}; +uts46_map(120235) -> {'M', [108]}; +uts46_map(120236) -> {'M', [109]}; +uts46_map(120237) -> {'M', [110]}; +uts46_map(120238) -> {'M', [111]}; +uts46_map(120239) -> {'M', [112]}; +uts46_map(120240) -> {'M', [113]}; +uts46_map(120241) -> {'M', [114]}; +uts46_map(120242) -> {'M', [115]}; +uts46_map(120243) -> {'M', [116]}; +uts46_map(120244) -> {'M', [117]}; +uts46_map(120245) -> {'M', [118]}; +uts46_map(120246) -> {'M', [119]}; +uts46_map(120247) -> {'M', [120]}; +uts46_map(120248) -> {'M', [121]}; +uts46_map(120249) -> {'M', [122]}; +uts46_map(120250) -> {'M', [97]}; +uts46_map(120251) -> {'M', [98]}; +uts46_map(120252) -> {'M', [99]}; +uts46_map(120253) -> {'M', [100]}; +uts46_map(120254) -> {'M', [101]}; +uts46_map(120255) -> {'M', [102]}; +uts46_map(120256) -> {'M', [103]}; +uts46_map(120257) -> {'M', [104]}; +uts46_map(120258) -> {'M', [105]}; +uts46_map(120259) -> {'M', [106]}; +uts46_map(120260) -> {'M', [107]}; +uts46_map(120261) -> {'M', [108]}; +uts46_map(120262) -> {'M', [109]}; +uts46_map(120263) -> {'M', [110]}; +uts46_map(120264) -> {'M', [111]}; +uts46_map(120265) -> {'M', [112]}; +uts46_map(120266) -> {'M', [113]}; +uts46_map(120267) -> {'M', [114]}; +uts46_map(120268) -> {'M', [115]}; +uts46_map(120269) -> {'M', [116]}; +uts46_map(120270) -> {'M', [117]}; +uts46_map(120271) -> {'M', [118]}; +uts46_map(120272) -> {'M', [119]}; +uts46_map(120273) -> {'M', [120]}; +uts46_map(120274) -> {'M', [121]}; +uts46_map(120275) -> {'M', [122]}; +uts46_map(120276) -> {'M', [97]}; +uts46_map(120277) -> {'M', [98]}; +uts46_map(120278) -> {'M', [99]}; +uts46_map(120279) -> {'M', [100]}; +uts46_map(120280) -> {'M', [101]}; +uts46_map(120281) -> {'M', [102]}; +uts46_map(120282) -> {'M', [103]}; +uts46_map(120283) -> {'M', [104]}; +uts46_map(120284) -> {'M', [105]}; +uts46_map(120285) -> {'M', [106]}; +uts46_map(120286) -> {'M', [107]}; +uts46_map(120287) -> {'M', [108]}; +uts46_map(120288) -> {'M', [109]}; +uts46_map(120289) -> {'M', [110]}; +uts46_map(120290) -> {'M', [111]}; +uts46_map(120291) -> {'M', [112]}; +uts46_map(120292) -> {'M', [113]}; +uts46_map(120293) -> {'M', [114]}; +uts46_map(120294) -> {'M', [115]}; +uts46_map(120295) -> {'M', [116]}; +uts46_map(120296) -> {'M', [117]}; +uts46_map(120297) -> {'M', [118]}; +uts46_map(120298) -> {'M', [119]}; +uts46_map(120299) -> {'M', [120]}; +uts46_map(120300) -> {'M', [121]}; +uts46_map(120301) -> {'M', [122]}; +uts46_map(120302) -> {'M', [97]}; +uts46_map(120303) -> {'M', [98]}; +uts46_map(120304) -> {'M', [99]}; +uts46_map(120305) -> {'M', [100]}; +uts46_map(120306) -> {'M', [101]}; +uts46_map(120307) -> {'M', [102]}; +uts46_map(120308) -> {'M', [103]}; +uts46_map(120309) -> {'M', [104]}; +uts46_map(120310) -> {'M', [105]}; +uts46_map(120311) -> {'M', [106]}; +uts46_map(120312) -> {'M', [107]}; +uts46_map(120313) -> {'M', [108]}; +uts46_map(120314) -> {'M', [109]}; +uts46_map(120315) -> {'M', [110]}; +uts46_map(120316) -> {'M', [111]}; +uts46_map(120317) -> {'M', [112]}; +uts46_map(120318) -> {'M', [113]}; +uts46_map(120319) -> {'M', [114]}; +uts46_map(120320) -> {'M', [115]}; +uts46_map(120321) -> {'M', [116]}; +uts46_map(120322) -> {'M', [117]}; +uts46_map(120323) -> {'M', [118]}; +uts46_map(120324) -> {'M', [119]}; +uts46_map(120325) -> {'M', [120]}; +uts46_map(120326) -> {'M', [121]}; +uts46_map(120327) -> {'M', [122]}; +uts46_map(120328) -> {'M', [97]}; +uts46_map(120329) -> {'M', [98]}; +uts46_map(120330) -> {'M', [99]}; +uts46_map(120331) -> {'M', [100]}; +uts46_map(120332) -> {'M', [101]}; +uts46_map(120333) -> {'M', [102]}; +uts46_map(120334) -> {'M', [103]}; +uts46_map(120335) -> {'M', [104]}; +uts46_map(120336) -> {'M', [105]}; +uts46_map(120337) -> {'M', [106]}; +uts46_map(120338) -> {'M', [107]}; +uts46_map(120339) -> {'M', [108]}; +uts46_map(120340) -> {'M', [109]}; +uts46_map(120341) -> {'M', [110]}; +uts46_map(120342) -> {'M', [111]}; +uts46_map(120343) -> {'M', [112]}; +uts46_map(120344) -> {'M', [113]}; +uts46_map(120345) -> {'M', [114]}; +uts46_map(120346) -> {'M', [115]}; +uts46_map(120347) -> {'M', [116]}; +uts46_map(120348) -> {'M', [117]}; +uts46_map(120349) -> {'M', [118]}; +uts46_map(120350) -> {'M', [119]}; +uts46_map(120351) -> {'M', [120]}; +uts46_map(120352) -> {'M', [121]}; +uts46_map(120353) -> {'M', [122]}; +uts46_map(120354) -> {'M', [97]}; +uts46_map(120355) -> {'M', [98]}; +uts46_map(120356) -> {'M', [99]}; +uts46_map(120357) -> {'M', [100]}; +uts46_map(120358) -> {'M', [101]}; +uts46_map(120359) -> {'M', [102]}; +uts46_map(120360) -> {'M', [103]}; +uts46_map(120361) -> {'M', [104]}; +uts46_map(120362) -> {'M', [105]}; +uts46_map(120363) -> {'M', [106]}; +uts46_map(120364) -> {'M', [107]}; +uts46_map(120365) -> {'M', [108]}; +uts46_map(120366) -> {'M', [109]}; +uts46_map(120367) -> {'M', [110]}; +uts46_map(120368) -> {'M', [111]}; +uts46_map(120369) -> {'M', [112]}; +uts46_map(120370) -> {'M', [113]}; +uts46_map(120371) -> {'M', [114]}; +uts46_map(120372) -> {'M', [115]}; +uts46_map(120373) -> {'M', [116]}; +uts46_map(120374) -> {'M', [117]}; +uts46_map(120375) -> {'M', [118]}; +uts46_map(120376) -> {'M', [119]}; +uts46_map(120377) -> {'M', [120]}; +uts46_map(120378) -> {'M', [121]}; +uts46_map(120379) -> {'M', [122]}; +uts46_map(120380) -> {'M', [97]}; +uts46_map(120381) -> {'M', [98]}; +uts46_map(120382) -> {'M', [99]}; +uts46_map(120383) -> {'M', [100]}; +uts46_map(120384) -> {'M', [101]}; +uts46_map(120385) -> {'M', [102]}; +uts46_map(120386) -> {'M', [103]}; +uts46_map(120387) -> {'M', [104]}; +uts46_map(120388) -> {'M', [105]}; +uts46_map(120389) -> {'M', [106]}; +uts46_map(120390) -> {'M', [107]}; +uts46_map(120391) -> {'M', [108]}; +uts46_map(120392) -> {'M', [109]}; +uts46_map(120393) -> {'M', [110]}; +uts46_map(120394) -> {'M', [111]}; +uts46_map(120395) -> {'M', [112]}; +uts46_map(120396) -> {'M', [113]}; +uts46_map(120397) -> {'M', [114]}; +uts46_map(120398) -> {'M', [115]}; +uts46_map(120399) -> {'M', [116]}; +uts46_map(120400) -> {'M', [117]}; +uts46_map(120401) -> {'M', [118]}; +uts46_map(120402) -> {'M', [119]}; +uts46_map(120403) -> {'M', [120]}; +uts46_map(120404) -> {'M', [121]}; +uts46_map(120405) -> {'M', [122]}; +uts46_map(120406) -> {'M', [97]}; +uts46_map(120407) -> {'M', [98]}; +uts46_map(120408) -> {'M', [99]}; +uts46_map(120409) -> {'M', [100]}; +uts46_map(120410) -> {'M', [101]}; +uts46_map(120411) -> {'M', [102]}; +uts46_map(120412) -> {'M', [103]}; +uts46_map(120413) -> {'M', [104]}; +uts46_map(120414) -> {'M', [105]}; +uts46_map(120415) -> {'M', [106]}; +uts46_map(120416) -> {'M', [107]}; +uts46_map(120417) -> {'M', [108]}; +uts46_map(120418) -> {'M', [109]}; +uts46_map(120419) -> {'M', [110]}; +uts46_map(120420) -> {'M', [111]}; +uts46_map(120421) -> {'M', [112]}; +uts46_map(120422) -> {'M', [113]}; +uts46_map(120423) -> {'M', [114]}; +uts46_map(120424) -> {'M', [115]}; +uts46_map(120425) -> {'M', [116]}; +uts46_map(120426) -> {'M', [117]}; +uts46_map(120427) -> {'M', [118]}; +uts46_map(120428) -> {'M', [119]}; +uts46_map(120429) -> {'M', [120]}; +uts46_map(120430) -> {'M', [121]}; +uts46_map(120431) -> {'M', [122]}; +uts46_map(120432) -> {'M', [97]}; +uts46_map(120433) -> {'M', [98]}; +uts46_map(120434) -> {'M', [99]}; +uts46_map(120435) -> {'M', [100]}; +uts46_map(120436) -> {'M', [101]}; +uts46_map(120437) -> {'M', [102]}; +uts46_map(120438) -> {'M', [103]}; +uts46_map(120439) -> {'M', [104]}; +uts46_map(120440) -> {'M', [105]}; +uts46_map(120441) -> {'M', [106]}; +uts46_map(120442) -> {'M', [107]}; +uts46_map(120443) -> {'M', [108]}; +uts46_map(120444) -> {'M', [109]}; +uts46_map(120445) -> {'M', [110]}; +uts46_map(120446) -> {'M', [111]}; +uts46_map(120447) -> {'M', [112]}; +uts46_map(120448) -> {'M', [113]}; +uts46_map(120449) -> {'M', [114]}; +uts46_map(120450) -> {'M', [115]}; +uts46_map(120451) -> {'M', [116]}; +uts46_map(120452) -> {'M', [117]}; +uts46_map(120453) -> {'M', [118]}; +uts46_map(120454) -> {'M', [119]}; +uts46_map(120455) -> {'M', [120]}; +uts46_map(120456) -> {'M', [121]}; +uts46_map(120457) -> {'M', [122]}; +uts46_map(120458) -> {'M', [97]}; +uts46_map(120459) -> {'M', [98]}; +uts46_map(120460) -> {'M', [99]}; +uts46_map(120461) -> {'M', [100]}; +uts46_map(120462) -> {'M', [101]}; +uts46_map(120463) -> {'M', [102]}; +uts46_map(120464) -> {'M', [103]}; +uts46_map(120465) -> {'M', [104]}; +uts46_map(120466) -> {'M', [105]}; +uts46_map(120467) -> {'M', [106]}; +uts46_map(120468) -> {'M', [107]}; +uts46_map(120469) -> {'M', [108]}; +uts46_map(120470) -> {'M', [109]}; +uts46_map(120471) -> {'M', [110]}; +uts46_map(120472) -> {'M', [111]}; +uts46_map(120473) -> {'M', [112]}; +uts46_map(120474) -> {'M', [113]}; +uts46_map(120475) -> {'M', [114]}; +uts46_map(120476) -> {'M', [115]}; +uts46_map(120477) -> {'M', [116]}; +uts46_map(120478) -> {'M', [117]}; +uts46_map(120479) -> {'M', [118]}; +uts46_map(120480) -> {'M', [119]}; +uts46_map(120481) -> {'M', [120]}; +uts46_map(120482) -> {'M', [121]}; +uts46_map(120483) -> {'M', [122]}; +uts46_map(120484) -> {'M', [305]}; +uts46_map(120485) -> {'M', [567]}; +uts46_map(120488) -> {'M', [945]}; +uts46_map(120489) -> {'M', [946]}; +uts46_map(120490) -> {'M', [947]}; +uts46_map(120491) -> {'M', [948]}; +uts46_map(120492) -> {'M', [949]}; +uts46_map(120493) -> {'M', [950]}; +uts46_map(120494) -> {'M', [951]}; +uts46_map(120495) -> {'M', [952]}; +uts46_map(120496) -> {'M', [953]}; +uts46_map(120497) -> {'M', [954]}; +uts46_map(120498) -> {'M', [955]}; +uts46_map(120499) -> {'M', [956]}; +uts46_map(120500) -> {'M', [957]}; +uts46_map(120501) -> {'M', [958]}; +uts46_map(120502) -> {'M', [959]}; +uts46_map(120503) -> {'M', [960]}; +uts46_map(120504) -> {'M', [961]}; +uts46_map(120505) -> {'M', [952]}; +uts46_map(120506) -> {'M', [963]}; +uts46_map(120507) -> {'M', [964]}; +uts46_map(120508) -> {'M', [965]}; +uts46_map(120509) -> {'M', [966]}; +uts46_map(120510) -> {'M', [967]}; +uts46_map(120511) -> {'M', [968]}; +uts46_map(120512) -> {'M', [969]}; +uts46_map(120513) -> {'M', [8711]}; +uts46_map(120514) -> {'M', [945]}; +uts46_map(120515) -> {'M', [946]}; +uts46_map(120516) -> {'M', [947]}; +uts46_map(120517) -> {'M', [948]}; +uts46_map(120518) -> {'M', [949]}; +uts46_map(120519) -> {'M', [950]}; +uts46_map(120520) -> {'M', [951]}; +uts46_map(120521) -> {'M', [952]}; +uts46_map(120522) -> {'M', [953]}; +uts46_map(120523) -> {'M', [954]}; +uts46_map(120524) -> {'M', [955]}; +uts46_map(120525) -> {'M', [956]}; +uts46_map(120526) -> {'M', [957]}; +uts46_map(120527) -> {'M', [958]}; +uts46_map(120528) -> {'M', [959]}; +uts46_map(120529) -> {'M', [960]}; +uts46_map(120530) -> {'M', [961]}; +uts46_map(120533) -> {'M', [964]}; +uts46_map(120534) -> {'M', [965]}; +uts46_map(120535) -> {'M', [966]}; +uts46_map(120536) -> {'M', [967]}; +uts46_map(120537) -> {'M', [968]}; +uts46_map(120538) -> {'M', [969]}; +uts46_map(120539) -> {'M', [8706]}; +uts46_map(120540) -> {'M', [949]}; +uts46_map(120541) -> {'M', [952]}; +uts46_map(120542) -> {'M', [954]}; +uts46_map(120543) -> {'M', [966]}; +uts46_map(120544) -> {'M', [961]}; +uts46_map(120545) -> {'M', [960]}; +uts46_map(120546) -> {'M', [945]}; +uts46_map(120547) -> {'M', [946]}; +uts46_map(120548) -> {'M', [947]}; +uts46_map(120549) -> {'M', [948]}; +uts46_map(120550) -> {'M', [949]}; +uts46_map(120551) -> {'M', [950]}; +uts46_map(120552) -> {'M', [951]}; +uts46_map(120553) -> {'M', [952]}; +uts46_map(120554) -> {'M', [953]}; +uts46_map(120555) -> {'M', [954]}; +uts46_map(120556) -> {'M', [955]}; +uts46_map(120557) -> {'M', [956]}; +uts46_map(120558) -> {'M', [957]}; +uts46_map(120559) -> {'M', [958]}; +uts46_map(120560) -> {'M', [959]}; +uts46_map(120561) -> {'M', [960]}; +uts46_map(120562) -> {'M', [961]}; +uts46_map(120563) -> {'M', [952]}; +uts46_map(120564) -> {'M', [963]}; +uts46_map(120565) -> {'M', [964]}; +uts46_map(120566) -> {'M', [965]}; +uts46_map(120567) -> {'M', [966]}; +uts46_map(120568) -> {'M', [967]}; +uts46_map(120569) -> {'M', [968]}; +uts46_map(120570) -> {'M', [969]}; +uts46_map(120571) -> {'M', [8711]}; +uts46_map(120572) -> {'M', [945]}; +uts46_map(120573) -> {'M', [946]}; +uts46_map(120574) -> {'M', [947]}; +uts46_map(120575) -> {'M', [948]}; +uts46_map(120576) -> {'M', [949]}; +uts46_map(120577) -> {'M', [950]}; +uts46_map(120578) -> {'M', [951]}; +uts46_map(120579) -> {'M', [952]}; +uts46_map(120580) -> {'M', [953]}; +uts46_map(120581) -> {'M', [954]}; +uts46_map(120582) -> {'M', [955]}; +uts46_map(120583) -> {'M', [956]}; +uts46_map(120584) -> {'M', [957]}; +uts46_map(120585) -> {'M', [958]}; +uts46_map(120586) -> {'M', [959]}; +uts46_map(120587) -> {'M', [960]}; +uts46_map(120588) -> {'M', [961]}; +uts46_map(120591) -> {'M', [964]}; +uts46_map(120592) -> {'M', [965]}; +uts46_map(120593) -> {'M', [966]}; +uts46_map(120594) -> {'M', [967]}; +uts46_map(120595) -> {'M', [968]}; +uts46_map(120596) -> {'M', [969]}; +uts46_map(120597) -> {'M', [8706]}; +uts46_map(120598) -> {'M', [949]}; +uts46_map(120599) -> {'M', [952]}; +uts46_map(120600) -> {'M', [954]}; +uts46_map(120601) -> {'M', [966]}; +uts46_map(120602) -> {'M', [961]}; +uts46_map(120603) -> {'M', [960]}; +uts46_map(120604) -> {'M', [945]}; +uts46_map(120605) -> {'M', [946]}; +uts46_map(120606) -> {'M', [947]}; +uts46_map(120607) -> {'M', [948]}; +uts46_map(120608) -> {'M', [949]}; +uts46_map(120609) -> {'M', [950]}; +uts46_map(120610) -> {'M', [951]}; +uts46_map(120611) -> {'M', [952]}; +uts46_map(120612) -> {'M', [953]}; +uts46_map(120613) -> {'M', [954]}; +uts46_map(120614) -> {'M', [955]}; +uts46_map(120615) -> {'M', [956]}; +uts46_map(120616) -> {'M', [957]}; +uts46_map(120617) -> {'M', [958]}; +uts46_map(120618) -> {'M', [959]}; +uts46_map(120619) -> {'M', [960]}; +uts46_map(120620) -> {'M', [961]}; +uts46_map(120621) -> {'M', [952]}; +uts46_map(120622) -> {'M', [963]}; +uts46_map(120623) -> {'M', [964]}; +uts46_map(120624) -> {'M', [965]}; +uts46_map(120625) -> {'M', [966]}; +uts46_map(120626) -> {'M', [967]}; +uts46_map(120627) -> {'M', [968]}; +uts46_map(120628) -> {'M', [969]}; +uts46_map(120629) -> {'M', [8711]}; +uts46_map(120630) -> {'M', [945]}; +uts46_map(120631) -> {'M', [946]}; +uts46_map(120632) -> {'M', [947]}; +uts46_map(120633) -> {'M', [948]}; +uts46_map(120634) -> {'M', [949]}; +uts46_map(120635) -> {'M', [950]}; +uts46_map(120636) -> {'M', [951]}; +uts46_map(120637) -> {'M', [952]}; +uts46_map(120638) -> {'M', [953]}; +uts46_map(120639) -> {'M', [954]}; +uts46_map(120640) -> {'M', [955]}; +uts46_map(120641) -> {'M', [956]}; +uts46_map(120642) -> {'M', [957]}; +uts46_map(120643) -> {'M', [958]}; +uts46_map(120644) -> {'M', [959]}; +uts46_map(120645) -> {'M', [960]}; +uts46_map(120646) -> {'M', [961]}; +uts46_map(120649) -> {'M', [964]}; +uts46_map(120650) -> {'M', [965]}; +uts46_map(120651) -> {'M', [966]}; +uts46_map(120652) -> {'M', [967]}; +uts46_map(120653) -> {'M', [968]}; +uts46_map(120654) -> {'M', [969]}; +uts46_map(120655) -> {'M', [8706]}; +uts46_map(120656) -> {'M', [949]}; +uts46_map(120657) -> {'M', [952]}; +uts46_map(120658) -> {'M', [954]}; +uts46_map(120659) -> {'M', [966]}; +uts46_map(120660) -> {'M', [961]}; +uts46_map(120661) -> {'M', [960]}; +uts46_map(120662) -> {'M', [945]}; +uts46_map(120663) -> {'M', [946]}; +uts46_map(120664) -> {'M', [947]}; +uts46_map(120665) -> {'M', [948]}; +uts46_map(120666) -> {'M', [949]}; +uts46_map(120667) -> {'M', [950]}; +uts46_map(120668) -> {'M', [951]}; +uts46_map(120669) -> {'M', [952]}; +uts46_map(120670) -> {'M', [953]}; +uts46_map(120671) -> {'M', [954]}; +uts46_map(120672) -> {'M', [955]}; +uts46_map(120673) -> {'M', [956]}; +uts46_map(120674) -> {'M', [957]}; +uts46_map(120675) -> {'M', [958]}; +uts46_map(120676) -> {'M', [959]}; +uts46_map(120677) -> {'M', [960]}; +uts46_map(120678) -> {'M', [961]}; +uts46_map(120679) -> {'M', [952]}; +uts46_map(120680) -> {'M', [963]}; +uts46_map(120681) -> {'M', [964]}; +uts46_map(120682) -> {'M', [965]}; +uts46_map(120683) -> {'M', [966]}; +uts46_map(120684) -> {'M', [967]}; +uts46_map(120685) -> {'M', [968]}; +uts46_map(120686) -> {'M', [969]}; +uts46_map(120687) -> {'M', [8711]}; +uts46_map(120688) -> {'M', [945]}; +uts46_map(120689) -> {'M', [946]}; +uts46_map(120690) -> {'M', [947]}; +uts46_map(120691) -> {'M', [948]}; +uts46_map(120692) -> {'M', [949]}; +uts46_map(120693) -> {'M', [950]}; +uts46_map(120694) -> {'M', [951]}; +uts46_map(120695) -> {'M', [952]}; +uts46_map(120696) -> {'M', [953]}; +uts46_map(120697) -> {'M', [954]}; +uts46_map(120698) -> {'M', [955]}; +uts46_map(120699) -> {'M', [956]}; +uts46_map(120700) -> {'M', [957]}; +uts46_map(120701) -> {'M', [958]}; +uts46_map(120702) -> {'M', [959]}; +uts46_map(120703) -> {'M', [960]}; +uts46_map(120704) -> {'M', [961]}; +uts46_map(120707) -> {'M', [964]}; +uts46_map(120708) -> {'M', [965]}; +uts46_map(120709) -> {'M', [966]}; +uts46_map(120710) -> {'M', [967]}; +uts46_map(120711) -> {'M', [968]}; +uts46_map(120712) -> {'M', [969]}; +uts46_map(120713) -> {'M', [8706]}; +uts46_map(120714) -> {'M', [949]}; +uts46_map(120715) -> {'M', [952]}; +uts46_map(120716) -> {'M', [954]}; +uts46_map(120717) -> {'M', [966]}; +uts46_map(120718) -> {'M', [961]}; +uts46_map(120719) -> {'M', [960]}; +uts46_map(120720) -> {'M', [945]}; +uts46_map(120721) -> {'M', [946]}; +uts46_map(120722) -> {'M', [947]}; +uts46_map(120723) -> {'M', [948]}; +uts46_map(120724) -> {'M', [949]}; +uts46_map(120725) -> {'M', [950]}; +uts46_map(120726) -> {'M', [951]}; +uts46_map(120727) -> {'M', [952]}; +uts46_map(120728) -> {'M', [953]}; +uts46_map(120729) -> {'M', [954]}; +uts46_map(120730) -> {'M', [955]}; +uts46_map(120731) -> {'M', [956]}; +uts46_map(120732) -> {'M', [957]}; +uts46_map(120733) -> {'M', [958]}; +uts46_map(120734) -> {'M', [959]}; +uts46_map(120735) -> {'M', [960]}; +uts46_map(120736) -> {'M', [961]}; +uts46_map(120737) -> {'M', [952]}; +uts46_map(120738) -> {'M', [963]}; +uts46_map(120739) -> {'M', [964]}; +uts46_map(120740) -> {'M', [965]}; +uts46_map(120741) -> {'M', [966]}; +uts46_map(120742) -> {'M', [967]}; +uts46_map(120743) -> {'M', [968]}; +uts46_map(120744) -> {'M', [969]}; +uts46_map(120745) -> {'M', [8711]}; +uts46_map(120746) -> {'M', [945]}; +uts46_map(120747) -> {'M', [946]}; +uts46_map(120748) -> {'M', [947]}; +uts46_map(120749) -> {'M', [948]}; +uts46_map(120750) -> {'M', [949]}; +uts46_map(120751) -> {'M', [950]}; +uts46_map(120752) -> {'M', [951]}; +uts46_map(120753) -> {'M', [952]}; +uts46_map(120754) -> {'M', [953]}; +uts46_map(120755) -> {'M', [954]}; +uts46_map(120756) -> {'M', [955]}; +uts46_map(120757) -> {'M', [956]}; +uts46_map(120758) -> {'M', [957]}; +uts46_map(120759) -> {'M', [958]}; +uts46_map(120760) -> {'M', [959]}; +uts46_map(120761) -> {'M', [960]}; +uts46_map(120762) -> {'M', [961]}; +uts46_map(120765) -> {'M', [964]}; +uts46_map(120766) -> {'M', [965]}; +uts46_map(120767) -> {'M', [966]}; +uts46_map(120768) -> {'M', [967]}; +uts46_map(120769) -> {'M', [968]}; +uts46_map(120770) -> {'M', [969]}; +uts46_map(120771) -> {'M', [8706]}; +uts46_map(120772) -> {'M', [949]}; +uts46_map(120773) -> {'M', [952]}; +uts46_map(120774) -> {'M', [954]}; +uts46_map(120775) -> {'M', [966]}; +uts46_map(120776) -> {'M', [961]}; +uts46_map(120777) -> {'M', [960]}; +uts46_map(120782) -> {'M', [48]}; +uts46_map(120783) -> {'M', [49]}; +uts46_map(120784) -> {'M', [50]}; +uts46_map(120785) -> {'M', [51]}; +uts46_map(120786) -> {'M', [52]}; +uts46_map(120787) -> {'M', [53]}; +uts46_map(120788) -> {'M', [54]}; +uts46_map(120789) -> {'M', [55]}; +uts46_map(120790) -> {'M', [56]}; +uts46_map(120791) -> {'M', [57]}; +uts46_map(120792) -> {'M', [48]}; +uts46_map(120793) -> {'M', [49]}; +uts46_map(120794) -> {'M', [50]}; +uts46_map(120795) -> {'M', [51]}; +uts46_map(120796) -> {'M', [52]}; +uts46_map(120797) -> {'M', [53]}; +uts46_map(120798) -> {'M', [54]}; +uts46_map(120799) -> {'M', [55]}; +uts46_map(120800) -> {'M', [56]}; +uts46_map(120801) -> {'M', [57]}; +uts46_map(120802) -> {'M', [48]}; +uts46_map(120803) -> {'M', [49]}; +uts46_map(120804) -> {'M', [50]}; +uts46_map(120805) -> {'M', [51]}; +uts46_map(120806) -> {'M', [52]}; +uts46_map(120807) -> {'M', [53]}; +uts46_map(120808) -> {'M', [54]}; +uts46_map(120809) -> {'M', [55]}; +uts46_map(120810) -> {'M', [56]}; +uts46_map(120811) -> {'M', [57]}; +uts46_map(120812) -> {'M', [48]}; +uts46_map(120813) -> {'M', [49]}; +uts46_map(120814) -> {'M', [50]}; +uts46_map(120815) -> {'M', [51]}; +uts46_map(120816) -> {'M', [52]}; +uts46_map(120817) -> {'M', [53]}; +uts46_map(120818) -> {'M', [54]}; +uts46_map(120819) -> {'M', [55]}; +uts46_map(120820) -> {'M', [56]}; +uts46_map(120821) -> {'M', [57]}; +uts46_map(120822) -> {'M', [48]}; +uts46_map(120823) -> {'M', [49]}; +uts46_map(120824) -> {'M', [50]}; +uts46_map(120825) -> {'M', [51]}; +uts46_map(120826) -> {'M', [52]}; +uts46_map(120827) -> {'M', [53]}; +uts46_map(120828) -> {'M', [54]}; +uts46_map(120829) -> {'M', [55]}; +uts46_map(120830) -> {'M', [56]}; +uts46_map(120831) -> {'M', [57]}; +uts46_map(121461) -> 'V'; +uts46_map(121476) -> 'V'; +uts46_map(121504) -> 'X'; +uts46_map(122887) -> 'X'; +uts46_map(122914) -> 'X'; +uts46_map(122917) -> 'X'; +uts46_map(123214) -> 'V'; +uts46_map(123215) -> 'V'; +uts46_map(123647) -> 'V'; +uts46_map(125184) -> {'M', [125218]}; +uts46_map(125185) -> {'M', [125219]}; +uts46_map(125186) -> {'M', [125220]}; +uts46_map(125187) -> {'M', [125221]}; +uts46_map(125188) -> {'M', [125222]}; +uts46_map(125189) -> {'M', [125223]}; +uts46_map(125190) -> {'M', [125224]}; +uts46_map(125191) -> {'M', [125225]}; +uts46_map(125192) -> {'M', [125226]}; +uts46_map(125193) -> {'M', [125227]}; +uts46_map(125194) -> {'M', [125228]}; +uts46_map(125195) -> {'M', [125229]}; +uts46_map(125196) -> {'M', [125230]}; +uts46_map(125197) -> {'M', [125231]}; +uts46_map(125198) -> {'M', [125232]}; +uts46_map(125199) -> {'M', [125233]}; +uts46_map(125200) -> {'M', [125234]}; +uts46_map(125201) -> {'M', [125235]}; +uts46_map(125202) -> {'M', [125236]}; +uts46_map(125203) -> {'M', [125237]}; +uts46_map(125204) -> {'M', [125238]}; +uts46_map(125205) -> {'M', [125239]}; +uts46_map(125206) -> {'M', [125240]}; +uts46_map(125207) -> {'M', [125241]}; +uts46_map(125208) -> {'M', [125242]}; +uts46_map(125209) -> {'M', [125243]}; +uts46_map(125210) -> {'M', [125244]}; +uts46_map(125211) -> {'M', [125245]}; +uts46_map(125212) -> {'M', [125246]}; +uts46_map(125213) -> {'M', [125247]}; +uts46_map(125214) -> {'M', [125248]}; +uts46_map(125215) -> {'M', [125249]}; +uts46_map(125216) -> {'M', [125250]}; +uts46_map(125217) -> {'M', [125251]}; +uts46_map(125259) -> 'V'; +uts46_map(126464) -> {'M', [1575]}; +uts46_map(126465) -> {'M', [1576]}; +uts46_map(126466) -> {'M', [1580]}; +uts46_map(126467) -> {'M', [1583]}; +uts46_map(126468) -> 'X'; +uts46_map(126469) -> {'M', [1608]}; +uts46_map(126470) -> {'M', [1586]}; +uts46_map(126471) -> {'M', [1581]}; +uts46_map(126472) -> {'M', [1591]}; +uts46_map(126473) -> {'M', [1610]}; +uts46_map(126474) -> {'M', [1603]}; +uts46_map(126475) -> {'M', [1604]}; +uts46_map(126476) -> {'M', [1605]}; +uts46_map(126477) -> {'M', [1606]}; +uts46_map(126478) -> {'M', [1587]}; +uts46_map(126479) -> {'M', [1593]}; +uts46_map(126480) -> {'M', [1601]}; +uts46_map(126481) -> {'M', [1589]}; +uts46_map(126482) -> {'M', [1602]}; +uts46_map(126483) -> {'M', [1585]}; +uts46_map(126484) -> {'M', [1588]}; +uts46_map(126485) -> {'M', [1578]}; +uts46_map(126486) -> {'M', [1579]}; +uts46_map(126487) -> {'M', [1582]}; +uts46_map(126488) -> {'M', [1584]}; +uts46_map(126489) -> {'M', [1590]}; +uts46_map(126490) -> {'M', [1592]}; +uts46_map(126491) -> {'M', [1594]}; +uts46_map(126492) -> {'M', [1646]}; +uts46_map(126493) -> {'M', [1722]}; +uts46_map(126494) -> {'M', [1697]}; +uts46_map(126495) -> {'M', [1647]}; +uts46_map(126496) -> 'X'; +uts46_map(126497) -> {'M', [1576]}; +uts46_map(126498) -> {'M', [1580]}; +uts46_map(126499) -> 'X'; +uts46_map(126500) -> {'M', [1607]}; +uts46_map(126503) -> {'M', [1581]}; +uts46_map(126504) -> 'X'; +uts46_map(126505) -> {'M', [1610]}; +uts46_map(126506) -> {'M', [1603]}; +uts46_map(126507) -> {'M', [1604]}; +uts46_map(126508) -> {'M', [1605]}; +uts46_map(126509) -> {'M', [1606]}; +uts46_map(126510) -> {'M', [1587]}; +uts46_map(126511) -> {'M', [1593]}; +uts46_map(126512) -> {'M', [1601]}; +uts46_map(126513) -> {'M', [1589]}; +uts46_map(126514) -> {'M', [1602]}; +uts46_map(126515) -> 'X'; +uts46_map(126516) -> {'M', [1588]}; +uts46_map(126517) -> {'M', [1578]}; +uts46_map(126518) -> {'M', [1579]}; +uts46_map(126519) -> {'M', [1582]}; +uts46_map(126520) -> 'X'; +uts46_map(126521) -> {'M', [1590]}; +uts46_map(126522) -> 'X'; +uts46_map(126523) -> {'M', [1594]}; +uts46_map(126530) -> {'M', [1580]}; +uts46_map(126535) -> {'M', [1581]}; +uts46_map(126536) -> 'X'; +uts46_map(126537) -> {'M', [1610]}; +uts46_map(126538) -> 'X'; +uts46_map(126539) -> {'M', [1604]}; +uts46_map(126540) -> 'X'; +uts46_map(126541) -> {'M', [1606]}; +uts46_map(126542) -> {'M', [1587]}; +uts46_map(126543) -> {'M', [1593]}; +uts46_map(126544) -> 'X'; +uts46_map(126545) -> {'M', [1589]}; +uts46_map(126546) -> {'M', [1602]}; +uts46_map(126547) -> 'X'; +uts46_map(126548) -> {'M', [1588]}; +uts46_map(126551) -> {'M', [1582]}; +uts46_map(126552) -> 'X'; +uts46_map(126553) -> {'M', [1590]}; +uts46_map(126554) -> 'X'; +uts46_map(126555) -> {'M', [1594]}; +uts46_map(126556) -> 'X'; +uts46_map(126557) -> {'M', [1722]}; +uts46_map(126558) -> 'X'; +uts46_map(126559) -> {'M', [1647]}; +uts46_map(126560) -> 'X'; +uts46_map(126561) -> {'M', [1576]}; +uts46_map(126562) -> {'M', [1580]}; +uts46_map(126563) -> 'X'; +uts46_map(126564) -> {'M', [1607]}; +uts46_map(126567) -> {'M', [1581]}; +uts46_map(126568) -> {'M', [1591]}; +uts46_map(126569) -> {'M', [1610]}; +uts46_map(126570) -> {'M', [1603]}; +uts46_map(126571) -> 'X'; +uts46_map(126572) -> {'M', [1605]}; +uts46_map(126573) -> {'M', [1606]}; +uts46_map(126574) -> {'M', [1587]}; +uts46_map(126575) -> {'M', [1593]}; +uts46_map(126576) -> {'M', [1601]}; +uts46_map(126577) -> {'M', [1589]}; +uts46_map(126578) -> {'M', [1602]}; +uts46_map(126579) -> 'X'; +uts46_map(126580) -> {'M', [1588]}; +uts46_map(126581) -> {'M', [1578]}; +uts46_map(126582) -> {'M', [1579]}; +uts46_map(126583) -> {'M', [1582]}; +uts46_map(126584) -> 'X'; +uts46_map(126585) -> {'M', [1590]}; +uts46_map(126586) -> {'M', [1592]}; +uts46_map(126587) -> {'M', [1594]}; +uts46_map(126588) -> {'M', [1646]}; +uts46_map(126589) -> 'X'; +uts46_map(126590) -> {'M', [1697]}; +uts46_map(126591) -> 'X'; +uts46_map(126592) -> {'M', [1575]}; +uts46_map(126593) -> {'M', [1576]}; +uts46_map(126594) -> {'M', [1580]}; +uts46_map(126595) -> {'M', [1583]}; +uts46_map(126596) -> {'M', [1607]}; +uts46_map(126597) -> {'M', [1608]}; +uts46_map(126598) -> {'M', [1586]}; +uts46_map(126599) -> {'M', [1581]}; +uts46_map(126600) -> {'M', [1591]}; +uts46_map(126601) -> {'M', [1610]}; +uts46_map(126602) -> 'X'; +uts46_map(126603) -> {'M', [1604]}; +uts46_map(126604) -> {'M', [1605]}; +uts46_map(126605) -> {'M', [1606]}; +uts46_map(126606) -> {'M', [1587]}; +uts46_map(126607) -> {'M', [1593]}; +uts46_map(126608) -> {'M', [1601]}; +uts46_map(126609) -> {'M', [1589]}; +uts46_map(126610) -> {'M', [1602]}; +uts46_map(126611) -> {'M', [1585]}; +uts46_map(126612) -> {'M', [1588]}; +uts46_map(126613) -> {'M', [1578]}; +uts46_map(126614) -> {'M', [1579]}; +uts46_map(126615) -> {'M', [1582]}; +uts46_map(126616) -> {'M', [1584]}; +uts46_map(126617) -> {'M', [1590]}; +uts46_map(126618) -> {'M', [1592]}; +uts46_map(126619) -> {'M', [1594]}; +uts46_map(126625) -> {'M', [1576]}; +uts46_map(126626) -> {'M', [1580]}; +uts46_map(126627) -> {'M', [1583]}; +uts46_map(126628) -> 'X'; +uts46_map(126629) -> {'M', [1608]}; +uts46_map(126630) -> {'M', [1586]}; +uts46_map(126631) -> {'M', [1581]}; +uts46_map(126632) -> {'M', [1591]}; +uts46_map(126633) -> {'M', [1610]}; +uts46_map(126634) -> 'X'; +uts46_map(126635) -> {'M', [1604]}; +uts46_map(126636) -> {'M', [1605]}; +uts46_map(126637) -> {'M', [1606]}; +uts46_map(126638) -> {'M', [1587]}; +uts46_map(126639) -> {'M', [1593]}; +uts46_map(126640) -> {'M', [1601]}; +uts46_map(126641) -> {'M', [1589]}; +uts46_map(126642) -> {'M', [1602]}; +uts46_map(126643) -> {'M', [1585]}; +uts46_map(126644) -> {'M', [1588]}; +uts46_map(126645) -> {'M', [1578]}; +uts46_map(126646) -> {'M', [1579]}; +uts46_map(126647) -> {'M', [1582]}; +uts46_map(126648) -> {'M', [1584]}; +uts46_map(126649) -> {'M', [1590]}; +uts46_map(126650) -> {'M', [1592]}; +uts46_map(126651) -> {'M', [1594]}; +uts46_map(127167) -> 'V'; +uts46_map(127168) -> 'X'; +uts46_map(127184) -> 'X'; +uts46_map(127232) -> 'X'; +uts46_map(127233) -> {'3', [48,44]}; +uts46_map(127234) -> {'3', [49,44]}; +uts46_map(127235) -> {'3', [50,44]}; +uts46_map(127236) -> {'3', [51,44]}; +uts46_map(127237) -> {'3', [52,44]}; +uts46_map(127238) -> {'3', [53,44]}; +uts46_map(127239) -> {'3', [54,44]}; +uts46_map(127240) -> {'3', [55,44]}; +uts46_map(127241) -> {'3', [56,44]}; +uts46_map(127242) -> {'3', [57,44]}; +uts46_map(127248) -> {'3', [40,97,41]}; +uts46_map(127249) -> {'3', [40,98,41]}; +uts46_map(127250) -> {'3', [40,99,41]}; +uts46_map(127251) -> {'3', [40,100,41]}; +uts46_map(127252) -> {'3', [40,101,41]}; +uts46_map(127253) -> {'3', [40,102,41]}; +uts46_map(127254) -> {'3', [40,103,41]}; +uts46_map(127255) -> {'3', [40,104,41]}; +uts46_map(127256) -> {'3', [40,105,41]}; +uts46_map(127257) -> {'3', [40,106,41]}; +uts46_map(127258) -> {'3', [40,107,41]}; +uts46_map(127259) -> {'3', [40,108,41]}; +uts46_map(127260) -> {'3', [40,109,41]}; +uts46_map(127261) -> {'3', [40,110,41]}; +uts46_map(127262) -> {'3', [40,111,41]}; +uts46_map(127263) -> {'3', [40,112,41]}; +uts46_map(127264) -> {'3', [40,113,41]}; +uts46_map(127265) -> {'3', [40,114,41]}; +uts46_map(127266) -> {'3', [40,115,41]}; +uts46_map(127267) -> {'3', [40,116,41]}; +uts46_map(127268) -> {'3', [40,117,41]}; +uts46_map(127269) -> {'3', [40,118,41]}; +uts46_map(127270) -> {'3', [40,119,41]}; +uts46_map(127271) -> {'3', [40,120,41]}; +uts46_map(127272) -> {'3', [40,121,41]}; +uts46_map(127273) -> {'3', [40,122,41]}; +uts46_map(127274) -> {'M', [12308,115,12309]}; +uts46_map(127275) -> {'M', [99]}; +uts46_map(127276) -> {'M', [114]}; +uts46_map(127277) -> {'M', [99,100]}; +uts46_map(127278) -> {'M', [119,122]}; +uts46_map(127279) -> 'V'; +uts46_map(127280) -> {'M', [97]}; +uts46_map(127281) -> {'M', [98]}; +uts46_map(127282) -> {'M', [99]}; +uts46_map(127283) -> {'M', [100]}; +uts46_map(127284) -> {'M', [101]}; +uts46_map(127285) -> {'M', [102]}; +uts46_map(127286) -> {'M', [103]}; +uts46_map(127287) -> {'M', [104]}; +uts46_map(127288) -> {'M', [105]}; +uts46_map(127289) -> {'M', [106]}; +uts46_map(127290) -> {'M', [107]}; +uts46_map(127291) -> {'M', [108]}; +uts46_map(127292) -> {'M', [109]}; +uts46_map(127293) -> {'M', [110]}; +uts46_map(127294) -> {'M', [111]}; +uts46_map(127295) -> {'M', [112]}; +uts46_map(127296) -> {'M', [113]}; +uts46_map(127297) -> {'M', [114]}; +uts46_map(127298) -> {'M', [115]}; +uts46_map(127299) -> {'M', [116]}; +uts46_map(127300) -> {'M', [117]}; +uts46_map(127301) -> {'M', [118]}; +uts46_map(127302) -> {'M', [119]}; +uts46_map(127303) -> {'M', [120]}; +uts46_map(127304) -> {'M', [121]}; +uts46_map(127305) -> {'M', [122]}; +uts46_map(127306) -> {'M', [104,118]}; +uts46_map(127307) -> {'M', [109,118]}; +uts46_map(127308) -> {'M', [115,100]}; +uts46_map(127309) -> {'M', [115,115]}; +uts46_map(127310) -> {'M', [112,112,118]}; +uts46_map(127311) -> {'M', [119,99]}; +uts46_map(127319) -> 'V'; +uts46_map(127327) -> 'V'; +uts46_map(127338) -> {'M', [109,99]}; +uts46_map(127339) -> {'M', [109,100]}; +uts46_map(127340) -> {'M', [109,114]}; +uts46_map(127353) -> 'V'; +uts46_map(127354) -> 'V'; +uts46_map(127359) -> 'V'; +uts46_map(127376) -> {'M', [100,106]}; +uts46_map(127405) -> 'V'; +uts46_map(127488) -> {'M', [12411,12363]}; +uts46_map(127489) -> {'M', [12467,12467]}; +uts46_map(127490) -> {'M', [12469]}; +uts46_map(127504) -> {'M', [25163]}; +uts46_map(127505) -> {'M', [23383]}; +uts46_map(127506) -> {'M', [21452]}; +uts46_map(127507) -> {'M', [12487]}; +uts46_map(127508) -> {'M', [20108]}; +uts46_map(127509) -> {'M', [22810]}; +uts46_map(127510) -> {'M', [35299]}; +uts46_map(127511) -> {'M', [22825]}; +uts46_map(127512) -> {'M', [20132]}; +uts46_map(127513) -> {'M', [26144]}; +uts46_map(127514) -> {'M', [28961]}; +uts46_map(127515) -> {'M', [26009]}; +uts46_map(127516) -> {'M', [21069]}; +uts46_map(127517) -> {'M', [24460]}; +uts46_map(127518) -> {'M', [20877]}; +uts46_map(127519) -> {'M', [26032]}; +uts46_map(127520) -> {'M', [21021]}; +uts46_map(127521) -> {'M', [32066]}; +uts46_map(127522) -> {'M', [29983]}; +uts46_map(127523) -> {'M', [36009]}; +uts46_map(127524) -> {'M', [22768]}; +uts46_map(127525) -> {'M', [21561]}; +uts46_map(127526) -> {'M', [28436]}; +uts46_map(127527) -> {'M', [25237]}; +uts46_map(127528) -> {'M', [25429]}; +uts46_map(127529) -> {'M', [19968]}; +uts46_map(127530) -> {'M', [19977]}; +uts46_map(127531) -> {'M', [36938]}; +uts46_map(127532) -> {'M', [24038]}; +uts46_map(127533) -> {'M', [20013]}; +uts46_map(127534) -> {'M', [21491]}; +uts46_map(127535) -> {'M', [25351]}; +uts46_map(127536) -> {'M', [36208]}; +uts46_map(127537) -> {'M', [25171]}; +uts46_map(127538) -> {'M', [31105]}; +uts46_map(127539) -> {'M', [31354]}; +uts46_map(127540) -> {'M', [21512]}; +uts46_map(127541) -> {'M', [28288]}; +uts46_map(127542) -> {'M', [26377]}; +uts46_map(127543) -> {'M', [26376]}; +uts46_map(127544) -> {'M', [30003]}; +uts46_map(127545) -> {'M', [21106]}; +uts46_map(127546) -> {'M', [21942]}; +uts46_map(127547) -> {'M', [37197]}; +uts46_map(127552) -> {'M', [12308,26412,12309]}; +uts46_map(127553) -> {'M', [12308,19977,12309]}; +uts46_map(127554) -> {'M', [12308,20108,12309]}; +uts46_map(127555) -> {'M', [12308,23433,12309]}; +uts46_map(127556) -> {'M', [12308,28857,12309]}; +uts46_map(127557) -> {'M', [12308,25171,12309]}; +uts46_map(127558) -> {'M', [12308,30423,12309]}; +uts46_map(127559) -> {'M', [12308,21213,12309]}; +uts46_map(127560) -> {'M', [12308,25943,12309]}; +uts46_map(127568) -> {'M', [24471]}; +uts46_map(127569) -> {'M', [21487]}; +uts46_map(127798) -> 'V'; +uts46_map(127869) -> 'V'; +uts46_map(127941) -> 'V'; +uts46_map(128063) -> 'V'; +uts46_map(128064) -> 'V'; +uts46_map(128065) -> 'V'; +uts46_map(128248) -> 'V'; +uts46_map(128255) -> 'V'; +uts46_map(128378) -> 'V'; +uts46_map(128420) -> 'V'; +uts46_map(128512) -> 'V'; +uts46_map(128529) -> 'V'; +uts46_map(128533) -> 'V'; +uts46_map(128534) -> 'V'; +uts46_map(128535) -> 'V'; +uts46_map(128536) -> 'V'; +uts46_map(128537) -> 'V'; +uts46_map(128538) -> 'V'; +uts46_map(128539) -> 'V'; +uts46_map(128543) -> 'V'; +uts46_map(128556) -> 'V'; +uts46_map(128557) -> 'V'; +uts46_map(128564) -> 'V'; +uts46_map(128720) -> 'V'; +uts46_map(128725) -> 'V'; +uts46_map(128761) -> 'V'; +uts46_map(128762) -> 'V'; +uts46_map(129292) -> 'V'; +uts46_map(129311) -> 'V'; +uts46_map(129328) -> 'V'; +uts46_map(129343) -> 'V'; +uts46_map(129356) -> 'V'; +uts46_map(129393) -> 'V'; +uts46_map(129394) -> 'V'; +uts46_map(129401) -> 'X'; +uts46_map(129402) -> 'V'; +uts46_map(129403) -> 'V'; +uts46_map(129472) -> 'V'; +uts46_map(129483) -> 'V'; +uts46_map(129484) -> 'X'; +uts46_map(129652) -> 'V'; +uts46_map(129939) -> 'X'; +uts46_map(130032) -> {'M', [48]}; +uts46_map(130033) -> {'M', [49]}; +uts46_map(130034) -> {'M', [50]}; +uts46_map(130035) -> {'M', [51]}; +uts46_map(130036) -> {'M', [52]}; +uts46_map(130037) -> {'M', [53]}; +uts46_map(130038) -> {'M', [54]}; +uts46_map(130039) -> {'M', [55]}; +uts46_map(130040) -> {'M', [56]}; +uts46_map(130041) -> {'M', [57]}; +uts46_map(194560) -> {'M', [20029]}; +uts46_map(194561) -> {'M', [20024]}; +uts46_map(194562) -> {'M', [20033]}; +uts46_map(194563) -> {'M', [131362]}; +uts46_map(194564) -> {'M', [20320]}; +uts46_map(194565) -> {'M', [20398]}; +uts46_map(194566) -> {'M', [20411]}; +uts46_map(194567) -> {'M', [20482]}; +uts46_map(194568) -> {'M', [20602]}; +uts46_map(194569) -> {'M', [20633]}; +uts46_map(194570) -> {'M', [20711]}; +uts46_map(194571) -> {'M', [20687]}; +uts46_map(194572) -> {'M', [13470]}; +uts46_map(194573) -> {'M', [132666]}; +uts46_map(194574) -> {'M', [20813]}; +uts46_map(194575) -> {'M', [20820]}; +uts46_map(194576) -> {'M', [20836]}; +uts46_map(194577) -> {'M', [20855]}; +uts46_map(194578) -> {'M', [132380]}; +uts46_map(194579) -> {'M', [13497]}; +uts46_map(194580) -> {'M', [20839]}; +uts46_map(194581) -> {'M', [20877]}; +uts46_map(194582) -> {'M', [132427]}; +uts46_map(194583) -> {'M', [20887]}; +uts46_map(194584) -> {'M', [20900]}; +uts46_map(194585) -> {'M', [20172]}; +uts46_map(194586) -> {'M', [20908]}; +uts46_map(194587) -> {'M', [20917]}; +uts46_map(194588) -> {'M', [168415]}; +uts46_map(194589) -> {'M', [20981]}; +uts46_map(194590) -> {'M', [20995]}; +uts46_map(194591) -> {'M', [13535]}; +uts46_map(194592) -> {'M', [21051]}; +uts46_map(194593) -> {'M', [21062]}; +uts46_map(194594) -> {'M', [21106]}; +uts46_map(194595) -> {'M', [21111]}; +uts46_map(194596) -> {'M', [13589]}; +uts46_map(194597) -> {'M', [21191]}; +uts46_map(194598) -> {'M', [21193]}; +uts46_map(194599) -> {'M', [21220]}; +uts46_map(194600) -> {'M', [21242]}; +uts46_map(194601) -> {'M', [21253]}; +uts46_map(194602) -> {'M', [21254]}; +uts46_map(194603) -> {'M', [21271]}; +uts46_map(194604) -> {'M', [21321]}; +uts46_map(194605) -> {'M', [21329]}; +uts46_map(194606) -> {'M', [21338]}; +uts46_map(194607) -> {'M', [21363]}; +uts46_map(194608) -> {'M', [21373]}; +uts46_map(194612) -> {'M', [133676]}; +uts46_map(194613) -> {'M', [28784]}; +uts46_map(194614) -> {'M', [21450]}; +uts46_map(194615) -> {'M', [21471]}; +uts46_map(194616) -> {'M', [133987]}; +uts46_map(194617) -> {'M', [21483]}; +uts46_map(194618) -> {'M', [21489]}; +uts46_map(194619) -> {'M', [21510]}; +uts46_map(194620) -> {'M', [21662]}; +uts46_map(194621) -> {'M', [21560]}; +uts46_map(194622) -> {'M', [21576]}; +uts46_map(194623) -> {'M', [21608]}; +uts46_map(194624) -> {'M', [21666]}; +uts46_map(194625) -> {'M', [21750]}; +uts46_map(194626) -> {'M', [21776]}; +uts46_map(194627) -> {'M', [21843]}; +uts46_map(194628) -> {'M', [21859]}; +uts46_map(194631) -> {'M', [21913]}; +uts46_map(194632) -> {'M', [21931]}; +uts46_map(194633) -> {'M', [21939]}; +uts46_map(194634) -> {'M', [21954]}; +uts46_map(194635) -> {'M', [22294]}; +uts46_map(194636) -> {'M', [22022]}; +uts46_map(194637) -> {'M', [22295]}; +uts46_map(194638) -> {'M', [22097]}; +uts46_map(194639) -> {'M', [22132]}; +uts46_map(194640) -> {'M', [20999]}; +uts46_map(194641) -> {'M', [22766]}; +uts46_map(194642) -> {'M', [22478]}; +uts46_map(194643) -> {'M', [22516]}; +uts46_map(194644) -> {'M', [22541]}; +uts46_map(194645) -> {'M', [22411]}; +uts46_map(194646) -> {'M', [22578]}; +uts46_map(194647) -> {'M', [22577]}; +uts46_map(194648) -> {'M', [22700]}; +uts46_map(194649) -> {'M', [136420]}; +uts46_map(194650) -> {'M', [22770]}; +uts46_map(194651) -> {'M', [22775]}; +uts46_map(194652) -> {'M', [22790]}; +uts46_map(194653) -> {'M', [22810]}; +uts46_map(194654) -> {'M', [22818]}; +uts46_map(194655) -> {'M', [22882]}; +uts46_map(194656) -> {'M', [136872]}; +uts46_map(194657) -> {'M', [136938]}; +uts46_map(194658) -> {'M', [23020]}; +uts46_map(194659) -> {'M', [23067]}; +uts46_map(194660) -> {'M', [23079]}; +uts46_map(194661) -> {'M', [23000]}; +uts46_map(194662) -> {'M', [23142]}; +uts46_map(194663) -> {'M', [14062]}; +uts46_map(194664) -> 'X'; +uts46_map(194665) -> {'M', [23304]}; +uts46_map(194668) -> {'M', [137672]}; +uts46_map(194669) -> {'M', [23491]}; +uts46_map(194670) -> {'M', [23512]}; +uts46_map(194671) -> {'M', [23527]}; +uts46_map(194672) -> {'M', [23539]}; +uts46_map(194673) -> {'M', [138008]}; +uts46_map(194674) -> {'M', [23551]}; +uts46_map(194675) -> {'M', [23558]}; +uts46_map(194676) -> 'X'; +uts46_map(194677) -> {'M', [23586]}; +uts46_map(194678) -> {'M', [14209]}; +uts46_map(194679) -> {'M', [23648]}; +uts46_map(194680) -> {'M', [23662]}; +uts46_map(194681) -> {'M', [23744]}; +uts46_map(194682) -> {'M', [23693]}; +uts46_map(194683) -> {'M', [138724]}; +uts46_map(194684) -> {'M', [23875]}; +uts46_map(194685) -> {'M', [138726]}; +uts46_map(194686) -> {'M', [23918]}; +uts46_map(194687) -> {'M', [23915]}; +uts46_map(194688) -> {'M', [23932]}; +uts46_map(194689) -> {'M', [24033]}; +uts46_map(194690) -> {'M', [24034]}; +uts46_map(194691) -> {'M', [14383]}; +uts46_map(194692) -> {'M', [24061]}; +uts46_map(194693) -> {'M', [24104]}; +uts46_map(194694) -> {'M', [24125]}; +uts46_map(194695) -> {'M', [24169]}; +uts46_map(194696) -> {'M', [14434]}; +uts46_map(194697) -> {'M', [139651]}; +uts46_map(194698) -> {'M', [14460]}; +uts46_map(194699) -> {'M', [24240]}; +uts46_map(194700) -> {'M', [24243]}; +uts46_map(194701) -> {'M', [24246]}; +uts46_map(194702) -> {'M', [24266]}; +uts46_map(194703) -> {'M', [172946]}; +uts46_map(194704) -> {'M', [24318]}; +uts46_map(194707) -> {'M', [33281]}; +uts46_map(194710) -> {'M', [14535]}; +uts46_map(194711) -> {'M', [144056]}; +uts46_map(194712) -> {'M', [156122]}; +uts46_map(194713) -> {'M', [24418]}; +uts46_map(194714) -> {'M', [24427]}; +uts46_map(194715) -> {'M', [14563]}; +uts46_map(194716) -> {'M', [24474]}; +uts46_map(194717) -> {'M', [24525]}; +uts46_map(194718) -> {'M', [24535]}; +uts46_map(194719) -> {'M', [24569]}; +uts46_map(194720) -> {'M', [24705]}; +uts46_map(194721) -> {'M', [14650]}; +uts46_map(194722) -> {'M', [14620]}; +uts46_map(194723) -> {'M', [24724]}; +uts46_map(194724) -> {'M', [141012]}; +uts46_map(194725) -> {'M', [24775]}; +uts46_map(194726) -> {'M', [24904]}; +uts46_map(194727) -> {'M', [24908]}; +uts46_map(194728) -> {'M', [24910]}; +uts46_map(194729) -> {'M', [24908]}; +uts46_map(194730) -> {'M', [24954]}; +uts46_map(194731) -> {'M', [24974]}; +uts46_map(194732) -> {'M', [25010]}; +uts46_map(194733) -> {'M', [24996]}; +uts46_map(194734) -> {'M', [25007]}; +uts46_map(194735) -> {'M', [25054]}; +uts46_map(194736) -> {'M', [25074]}; +uts46_map(194737) -> {'M', [25078]}; +uts46_map(194738) -> {'M', [25104]}; +uts46_map(194739) -> {'M', [25115]}; +uts46_map(194740) -> {'M', [25181]}; +uts46_map(194741) -> {'M', [25265]}; +uts46_map(194742) -> {'M', [25300]}; +uts46_map(194743) -> {'M', [25424]}; +uts46_map(194744) -> {'M', [142092]}; +uts46_map(194745) -> {'M', [25405]}; +uts46_map(194746) -> {'M', [25340]}; +uts46_map(194747) -> {'M', [25448]}; +uts46_map(194748) -> {'M', [25475]}; +uts46_map(194749) -> {'M', [25572]}; +uts46_map(194750) -> {'M', [142321]}; +uts46_map(194751) -> {'M', [25634]}; +uts46_map(194752) -> {'M', [25541]}; +uts46_map(194753) -> {'M', [25513]}; +uts46_map(194754) -> {'M', [14894]}; +uts46_map(194755) -> {'M', [25705]}; +uts46_map(194756) -> {'M', [25726]}; +uts46_map(194757) -> {'M', [25757]}; +uts46_map(194758) -> {'M', [25719]}; +uts46_map(194759) -> {'M', [14956]}; +uts46_map(194760) -> {'M', [25935]}; +uts46_map(194761) -> {'M', [25964]}; +uts46_map(194762) -> {'M', [143370]}; +uts46_map(194763) -> {'M', [26083]}; +uts46_map(194764) -> {'M', [26360]}; +uts46_map(194765) -> {'M', [26185]}; +uts46_map(194766) -> {'M', [15129]}; +uts46_map(194767) -> {'M', [26257]}; +uts46_map(194768) -> {'M', [15112]}; +uts46_map(194769) -> {'M', [15076]}; +uts46_map(194770) -> {'M', [20882]}; +uts46_map(194771) -> {'M', [20885]}; +uts46_map(194772) -> {'M', [26368]}; +uts46_map(194773) -> {'M', [26268]}; +uts46_map(194774) -> {'M', [32941]}; +uts46_map(194775) -> {'M', [17369]}; +uts46_map(194776) -> {'M', [26391]}; +uts46_map(194777) -> {'M', [26395]}; +uts46_map(194778) -> {'M', [26401]}; +uts46_map(194779) -> {'M', [26462]}; +uts46_map(194780) -> {'M', [26451]}; +uts46_map(194781) -> {'M', [144323]}; +uts46_map(194782) -> {'M', [15177]}; +uts46_map(194783) -> {'M', [26618]}; +uts46_map(194784) -> {'M', [26501]}; +uts46_map(194785) -> {'M', [26706]}; +uts46_map(194786) -> {'M', [26757]}; +uts46_map(194787) -> {'M', [144493]}; +uts46_map(194788) -> {'M', [26766]}; +uts46_map(194789) -> {'M', [26655]}; +uts46_map(194790) -> {'M', [26900]}; +uts46_map(194791) -> {'M', [15261]}; +uts46_map(194792) -> {'M', [26946]}; +uts46_map(194793) -> {'M', [27043]}; +uts46_map(194794) -> {'M', [27114]}; +uts46_map(194795) -> {'M', [27304]}; +uts46_map(194796) -> {'M', [145059]}; +uts46_map(194797) -> {'M', [27355]}; +uts46_map(194798) -> {'M', [15384]}; +uts46_map(194799) -> {'M', [27425]}; +uts46_map(194800) -> {'M', [145575]}; +uts46_map(194801) -> {'M', [27476]}; +uts46_map(194802) -> {'M', [15438]}; +uts46_map(194803) -> {'M', [27506]}; +uts46_map(194804) -> {'M', [27551]}; +uts46_map(194805) -> {'M', [27578]}; +uts46_map(194806) -> {'M', [27579]}; +uts46_map(194807) -> {'M', [146061]}; +uts46_map(194808) -> {'M', [138507]}; +uts46_map(194809) -> {'M', [146170]}; +uts46_map(194810) -> {'M', [27726]}; +uts46_map(194811) -> {'M', [146620]}; +uts46_map(194812) -> {'M', [27839]}; +uts46_map(194813) -> {'M', [27853]}; +uts46_map(194814) -> {'M', [27751]}; +uts46_map(194815) -> {'M', [27926]}; +uts46_map(194816) -> {'M', [27966]}; +uts46_map(194817) -> {'M', [28023]}; +uts46_map(194818) -> {'M', [27969]}; +uts46_map(194819) -> {'M', [28009]}; +uts46_map(194820) -> {'M', [28024]}; +uts46_map(194821) -> {'M', [28037]}; +uts46_map(194822) -> {'M', [146718]}; +uts46_map(194823) -> {'M', [27956]}; +uts46_map(194824) -> {'M', [28207]}; +uts46_map(194825) -> {'M', [28270]}; +uts46_map(194826) -> {'M', [15667]}; +uts46_map(194827) -> {'M', [28363]}; +uts46_map(194828) -> {'M', [28359]}; +uts46_map(194829) -> {'M', [147153]}; +uts46_map(194830) -> {'M', [28153]}; +uts46_map(194831) -> {'M', [28526]}; +uts46_map(194832) -> {'M', [147294]}; +uts46_map(194833) -> {'M', [147342]}; +uts46_map(194834) -> {'M', [28614]}; +uts46_map(194835) -> {'M', [28729]}; +uts46_map(194836) -> {'M', [28702]}; +uts46_map(194837) -> {'M', [28699]}; +uts46_map(194838) -> {'M', [15766]}; +uts46_map(194839) -> {'M', [28746]}; +uts46_map(194840) -> {'M', [28797]}; +uts46_map(194841) -> {'M', [28791]}; +uts46_map(194842) -> {'M', [28845]}; +uts46_map(194843) -> {'M', [132389]}; +uts46_map(194844) -> {'M', [28997]}; +uts46_map(194845) -> {'M', [148067]}; +uts46_map(194846) -> {'M', [29084]}; +uts46_map(194847) -> 'X'; +uts46_map(194848) -> {'M', [29224]}; +uts46_map(194849) -> {'M', [29237]}; +uts46_map(194850) -> {'M', [29264]}; +uts46_map(194851) -> {'M', [149000]}; +uts46_map(194852) -> {'M', [29312]}; +uts46_map(194853) -> {'M', [29333]}; +uts46_map(194854) -> {'M', [149301]}; +uts46_map(194855) -> {'M', [149524]}; +uts46_map(194856) -> {'M', [29562]}; +uts46_map(194857) -> {'M', [29579]}; +uts46_map(194858) -> {'M', [16044]}; +uts46_map(194859) -> {'M', [29605]}; +uts46_map(194862) -> {'M', [29767]}; +uts46_map(194863) -> {'M', [29788]}; +uts46_map(194864) -> {'M', [29809]}; +uts46_map(194865) -> {'M', [29829]}; +uts46_map(194866) -> {'M', [29898]}; +uts46_map(194867) -> {'M', [16155]}; +uts46_map(194868) -> {'M', [29988]}; +uts46_map(194869) -> {'M', [150582]}; +uts46_map(194870) -> {'M', [30014]}; +uts46_map(194871) -> {'M', [150674]}; +uts46_map(194872) -> {'M', [30064]}; +uts46_map(194873) -> {'M', [139679]}; +uts46_map(194874) -> {'M', [30224]}; +uts46_map(194875) -> {'M', [151457]}; +uts46_map(194876) -> {'M', [151480]}; +uts46_map(194877) -> {'M', [151620]}; +uts46_map(194878) -> {'M', [16380]}; +uts46_map(194879) -> {'M', [16392]}; +uts46_map(194880) -> {'M', [30452]}; +uts46_map(194881) -> {'M', [151795]}; +uts46_map(194882) -> {'M', [151794]}; +uts46_map(194883) -> {'M', [151833]}; +uts46_map(194884) -> {'M', [151859]}; +uts46_map(194885) -> {'M', [30494]}; +uts46_map(194888) -> {'M', [30538]}; +uts46_map(194889) -> {'M', [16441]}; +uts46_map(194890) -> {'M', [30603]}; +uts46_map(194891) -> {'M', [16454]}; +uts46_map(194892) -> {'M', [16534]}; +uts46_map(194893) -> {'M', [152605]}; +uts46_map(194894) -> {'M', [30798]}; +uts46_map(194895) -> {'M', [30860]}; +uts46_map(194896) -> {'M', [30924]}; +uts46_map(194897) -> {'M', [16611]}; +uts46_map(194898) -> {'M', [153126]}; +uts46_map(194899) -> {'M', [31062]}; +uts46_map(194900) -> {'M', [153242]}; +uts46_map(194901) -> {'M', [153285]}; +uts46_map(194902) -> {'M', [31119]}; +uts46_map(194903) -> {'M', [31211]}; +uts46_map(194904) -> {'M', [16687]}; +uts46_map(194905) -> {'M', [31296]}; +uts46_map(194906) -> {'M', [31306]}; +uts46_map(194907) -> {'M', [31311]}; +uts46_map(194908) -> {'M', [153980]}; +uts46_map(194911) -> 'X'; +uts46_map(194912) -> {'M', [16898]}; +uts46_map(194913) -> {'M', [154539]}; +uts46_map(194914) -> {'M', [31686]}; +uts46_map(194915) -> {'M', [31689]}; +uts46_map(194916) -> {'M', [16935]}; +uts46_map(194917) -> {'M', [154752]}; +uts46_map(194918) -> {'M', [31954]}; +uts46_map(194919) -> {'M', [17056]}; +uts46_map(194920) -> {'M', [31976]}; +uts46_map(194921) -> {'M', [31971]}; +uts46_map(194922) -> {'M', [32000]}; +uts46_map(194923) -> {'M', [155526]}; +uts46_map(194924) -> {'M', [32099]}; +uts46_map(194925) -> {'M', [17153]}; +uts46_map(194926) -> {'M', [32199]}; +uts46_map(194927) -> {'M', [32258]}; +uts46_map(194928) -> {'M', [32325]}; +uts46_map(194929) -> {'M', [17204]}; +uts46_map(194930) -> {'M', [156200]}; +uts46_map(194931) -> {'M', [156231]}; +uts46_map(194932) -> {'M', [17241]}; +uts46_map(194933) -> {'M', [156377]}; +uts46_map(194934) -> {'M', [32634]}; +uts46_map(194935) -> {'M', [156478]}; +uts46_map(194936) -> {'M', [32661]}; +uts46_map(194937) -> {'M', [32762]}; +uts46_map(194938) -> {'M', [32773]}; +uts46_map(194939) -> {'M', [156890]}; +uts46_map(194940) -> {'M', [156963]}; +uts46_map(194941) -> {'M', [32864]}; +uts46_map(194942) -> {'M', [157096]}; +uts46_map(194943) -> {'M', [32880]}; +uts46_map(194944) -> {'M', [144223]}; +uts46_map(194945) -> {'M', [17365]}; +uts46_map(194946) -> {'M', [32946]}; +uts46_map(194947) -> {'M', [33027]}; +uts46_map(194948) -> {'M', [17419]}; +uts46_map(194949) -> {'M', [33086]}; +uts46_map(194950) -> {'M', [23221]}; +uts46_map(194951) -> {'M', [157607]}; +uts46_map(194952) -> {'M', [157621]}; +uts46_map(194953) -> {'M', [144275]}; +uts46_map(194954) -> {'M', [144284]}; +uts46_map(194955) -> {'M', [33281]}; +uts46_map(194956) -> {'M', [33284]}; +uts46_map(194957) -> {'M', [36766]}; +uts46_map(194958) -> {'M', [17515]}; +uts46_map(194959) -> {'M', [33425]}; +uts46_map(194960) -> {'M', [33419]}; +uts46_map(194961) -> {'M', [33437]}; +uts46_map(194962) -> {'M', [21171]}; +uts46_map(194963) -> {'M', [33457]}; +uts46_map(194964) -> {'M', [33459]}; +uts46_map(194965) -> {'M', [33469]}; +uts46_map(194966) -> {'M', [33510]}; +uts46_map(194967) -> {'M', [158524]}; +uts46_map(194968) -> {'M', [33509]}; +uts46_map(194969) -> {'M', [33565]}; +uts46_map(194970) -> {'M', [33635]}; +uts46_map(194971) -> {'M', [33709]}; +uts46_map(194972) -> {'M', [33571]}; +uts46_map(194973) -> {'M', [33725]}; +uts46_map(194974) -> {'M', [33767]}; +uts46_map(194975) -> {'M', [33879]}; +uts46_map(194976) -> {'M', [33619]}; +uts46_map(194977) -> {'M', [33738]}; +uts46_map(194978) -> {'M', [33740]}; +uts46_map(194979) -> {'M', [33756]}; +uts46_map(194980) -> {'M', [158774]}; +uts46_map(194981) -> {'M', [159083]}; +uts46_map(194982) -> {'M', [158933]}; +uts46_map(194983) -> {'M', [17707]}; +uts46_map(194984) -> {'M', [34033]}; +uts46_map(194985) -> {'M', [34035]}; +uts46_map(194986) -> {'M', [34070]}; +uts46_map(194987) -> {'M', [160714]}; +uts46_map(194988) -> {'M', [34148]}; +uts46_map(194989) -> {'M', [159532]}; +uts46_map(194990) -> {'M', [17757]}; +uts46_map(194991) -> {'M', [17761]}; +uts46_map(194992) -> {'M', [159665]}; +uts46_map(194993) -> {'M', [159954]}; +uts46_map(194994) -> {'M', [17771]}; +uts46_map(194995) -> {'M', [34384]}; +uts46_map(194996) -> {'M', [34396]}; +uts46_map(194997) -> {'M', [34407]}; +uts46_map(194998) -> {'M', [34409]}; +uts46_map(194999) -> {'M', [34473]}; +uts46_map(195000) -> {'M', [34440]}; +uts46_map(195001) -> {'M', [34574]}; +uts46_map(195002) -> {'M', [34530]}; +uts46_map(195003) -> {'M', [34681]}; +uts46_map(195004) -> {'M', [34600]}; +uts46_map(195005) -> {'M', [34667]}; +uts46_map(195006) -> {'M', [34694]}; +uts46_map(195007) -> 'X'; +uts46_map(195008) -> {'M', [34785]}; +uts46_map(195009) -> {'M', [34817]}; +uts46_map(195010) -> {'M', [17913]}; +uts46_map(195011) -> {'M', [34912]}; +uts46_map(195012) -> {'M', [34915]}; +uts46_map(195013) -> {'M', [161383]}; +uts46_map(195014) -> {'M', [35031]}; +uts46_map(195015) -> {'M', [35038]}; +uts46_map(195016) -> {'M', [17973]}; +uts46_map(195017) -> {'M', [35066]}; +uts46_map(195018) -> {'M', [13499]}; +uts46_map(195019) -> {'M', [161966]}; +uts46_map(195020) -> {'M', [162150]}; +uts46_map(195021) -> {'M', [18110]}; +uts46_map(195022) -> {'M', [18119]}; +uts46_map(195023) -> {'M', [35488]}; +uts46_map(195024) -> {'M', [35565]}; +uts46_map(195025) -> {'M', [35722]}; +uts46_map(195026) -> {'M', [35925]}; +uts46_map(195027) -> {'M', [162984]}; +uts46_map(195028) -> {'M', [36011]}; +uts46_map(195029) -> {'M', [36033]}; +uts46_map(195030) -> {'M', [36123]}; +uts46_map(195031) -> {'M', [36215]}; +uts46_map(195032) -> {'M', [163631]}; +uts46_map(195033) -> {'M', [133124]}; +uts46_map(195034) -> {'M', [36299]}; +uts46_map(195035) -> {'M', [36284]}; +uts46_map(195036) -> {'M', [36336]}; +uts46_map(195037) -> {'M', [133342]}; +uts46_map(195038) -> {'M', [36564]}; +uts46_map(195039) -> {'M', [36664]}; +uts46_map(195040) -> {'M', [165330]}; +uts46_map(195041) -> {'M', [165357]}; +uts46_map(195042) -> {'M', [37012]}; +uts46_map(195043) -> {'M', [37105]}; +uts46_map(195044) -> {'M', [37137]}; +uts46_map(195045) -> {'M', [165678]}; +uts46_map(195046) -> {'M', [37147]}; +uts46_map(195047) -> {'M', [37432]}; +uts46_map(195048) -> {'M', [37591]}; +uts46_map(195049) -> {'M', [37592]}; +uts46_map(195050) -> {'M', [37500]}; +uts46_map(195051) -> {'M', [37881]}; +uts46_map(195052) -> {'M', [37909]}; +uts46_map(195053) -> {'M', [166906]}; +uts46_map(195054) -> {'M', [38283]}; +uts46_map(195055) -> {'M', [18837]}; +uts46_map(195056) -> {'M', [38327]}; +uts46_map(195057) -> {'M', [167287]}; +uts46_map(195058) -> {'M', [18918]}; +uts46_map(195059) -> {'M', [38595]}; +uts46_map(195060) -> {'M', [23986]}; +uts46_map(195061) -> {'M', [38691]}; +uts46_map(195062) -> {'M', [168261]}; +uts46_map(195063) -> {'M', [168474]}; +uts46_map(195064) -> {'M', [19054]}; +uts46_map(195065) -> {'M', [19062]}; +uts46_map(195066) -> {'M', [38880]}; +uts46_map(195067) -> {'M', [168970]}; +uts46_map(195068) -> {'M', [19122]}; +uts46_map(195069) -> {'M', [169110]}; +uts46_map(195072) -> {'M', [38953]}; +uts46_map(195073) -> {'M', [169398]}; +uts46_map(195074) -> {'M', [39138]}; +uts46_map(195075) -> {'M', [19251]}; +uts46_map(195076) -> {'M', [39209]}; +uts46_map(195077) -> {'M', [39335]}; +uts46_map(195078) -> {'M', [39362]}; +uts46_map(195079) -> {'M', [39422]}; +uts46_map(195080) -> {'M', [19406]}; +uts46_map(195081) -> {'M', [170800]}; +uts46_map(195082) -> {'M', [39698]}; +uts46_map(195083) -> {'M', [40000]}; +uts46_map(195084) -> {'M', [40189]}; +uts46_map(195085) -> {'M', [19662]}; +uts46_map(195086) -> {'M', [19693]}; +uts46_map(195087) -> {'M', [40295]}; +uts46_map(195088) -> {'M', [172238]}; +uts46_map(195089) -> {'M', [19704]}; +uts46_map(195090) -> {'M', [172293]}; +uts46_map(195091) -> {'M', [172558]}; +uts46_map(195092) -> {'M', [172689]}; +uts46_map(195093) -> {'M', [40635]}; +uts46_map(195094) -> {'M', [19798]}; +uts46_map(195095) -> {'M', [40697]}; +uts46_map(195096) -> {'M', [40702]}; +uts46_map(195097) -> {'M', [40709]}; +uts46_map(195098) -> {'M', [40719]}; +uts46_map(195099) -> {'M', [40726]}; +uts46_map(195100) -> {'M', [40763]}; +uts46_map(195101) -> {'M', [173568]}; +uts46_map(917504) -> 'X'; +uts46_map(917505) -> 'X'; +uts46_map(CP) when 0 =< CP, CP =< 44 -> '3'; +uts46_map(CP) when 45 =< CP, CP =< 46 -> 'V'; +uts46_map(CP) when 48 =< CP, CP =< 57 -> 'V'; +uts46_map(CP) when 58 =< CP, CP =< 64 -> '3'; +uts46_map(CP) when 91 =< CP, CP =< 96 -> '3'; +uts46_map(CP) when 97 =< CP, CP =< 122 -> 'V'; +uts46_map(CP) when 123 =< CP, CP =< 127 -> '3'; +uts46_map(CP) when 128 =< CP, CP =< 159 -> 'X'; +uts46_map(CP) when 161 =< CP, CP =< 167 -> 'V'; +uts46_map(CP) when 171 =< CP, CP =< 172 -> 'V'; +uts46_map(CP) when 176 =< CP, CP =< 177 -> 'V'; +uts46_map(CP) when 224 =< CP, CP =< 246 -> 'V'; +uts46_map(CP) when 248 =< CP, CP =< 255 -> 'V'; +uts46_map(CP) when 306 =< CP, CP =< 307 -> {'M', [105,106]}; +uts46_map(CP) when 311 =< CP, CP =< 312 -> 'V'; +uts46_map(CP) when 319 =< CP, CP =< 320 -> {'M', [108,183]}; +uts46_map(CP) when 396 =< CP, CP =< 397 -> 'V'; +uts46_map(CP) when 409 =< CP, CP =< 411 -> 'V'; +uts46_map(CP) when 426 =< CP, CP =< 427 -> 'V'; +uts46_map(CP) when 441 =< CP, CP =< 443 -> 'V'; +uts46_map(CP) when 445 =< CP, CP =< 451 -> 'V'; +uts46_map(CP) when 452 =< CP, CP =< 454 -> {'M', [100,382]}; +uts46_map(CP) when 455 =< CP, CP =< 457 -> {'M', [108,106]}; +uts46_map(CP) when 458 =< CP, CP =< 460 -> {'M', [110,106]}; +uts46_map(CP) when 476 =< CP, CP =< 477 -> 'V'; +uts46_map(CP) when 495 =< CP, CP =< 496 -> 'V'; +uts46_map(CP) when 497 =< CP, CP =< 499 -> {'M', [100,122]}; +uts46_map(CP) when 564 =< CP, CP =< 566 -> 'V'; +uts46_map(CP) when 567 =< CP, CP =< 569 -> 'V'; +uts46_map(CP) when 575 =< CP, CP =< 576 -> 'V'; +uts46_map(CP) when 592 =< CP, CP =< 680 -> 'V'; +uts46_map(CP) when 681 =< CP, CP =< 685 -> 'V'; +uts46_map(CP) when 686 =< CP, CP =< 687 -> 'V'; +uts46_map(CP) when 697 =< CP, CP =< 705 -> 'V'; +uts46_map(CP) when 706 =< CP, CP =< 709 -> 'V'; +uts46_map(CP) when 710 =< CP, CP =< 721 -> 'V'; +uts46_map(CP) when 722 =< CP, CP =< 727 -> 'V'; +uts46_map(CP) when 741 =< CP, CP =< 745 -> 'V'; +uts46_map(CP) when 746 =< CP, CP =< 747 -> 'V'; +uts46_map(CP) when 751 =< CP, CP =< 767 -> 'V'; +uts46_map(CP) when 768 =< CP, CP =< 831 -> 'V'; +uts46_map(CP) when 838 =< CP, CP =< 846 -> 'V'; +uts46_map(CP) when 848 =< CP, CP =< 855 -> 'V'; +uts46_map(CP) when 856 =< CP, CP =< 860 -> 'V'; +uts46_map(CP) when 861 =< CP, CP =< 863 -> 'V'; +uts46_map(CP) when 864 =< CP, CP =< 865 -> 'V'; +uts46_map(CP) when 867 =< CP, CP =< 879 -> 'V'; +uts46_map(CP) when 888 =< CP, CP =< 889 -> 'X'; +uts46_map(CP) when 891 =< CP, CP =< 893 -> 'V'; +uts46_map(CP) when 896 =< CP, CP =< 899 -> 'X'; +uts46_map(CP) when 940 =< CP, CP =< 961 -> 'V'; +uts46_map(CP) when 963 =< CP, CP =< 974 -> 'V'; +uts46_map(CP) when 1072 =< CP, CP =< 1103 -> 'V'; +uts46_map(CP) when 1105 =< CP, CP =< 1116 -> 'V'; +uts46_map(CP) when 1118 =< CP, CP =< 1119 -> 'V'; +uts46_map(CP) when 1155 =< CP, CP =< 1158 -> 'V'; +uts46_map(CP) when 1160 =< CP, CP =< 1161 -> 'V'; +uts46_map(CP) when 1367 =< CP, CP =< 1368 -> 'X'; +uts46_map(CP) when 1370 =< CP, CP =< 1375 -> 'V'; +uts46_map(CP) when 1377 =< CP, CP =< 1414 -> 'V'; +uts46_map(CP) when 1419 =< CP, CP =< 1420 -> 'X'; +uts46_map(CP) when 1421 =< CP, CP =< 1422 -> 'V'; +uts46_map(CP) when 1425 =< CP, CP =< 1441 -> 'V'; +uts46_map(CP) when 1443 =< CP, CP =< 1455 -> 'V'; +uts46_map(CP) when 1456 =< CP, CP =< 1465 -> 'V'; +uts46_map(CP) when 1467 =< CP, CP =< 1469 -> 'V'; +uts46_map(CP) when 1473 =< CP, CP =< 1474 -> 'V'; +uts46_map(CP) when 1480 =< CP, CP =< 1487 -> 'X'; +uts46_map(CP) when 1488 =< CP, CP =< 1514 -> 'V'; +uts46_map(CP) when 1515 =< CP, CP =< 1518 -> 'X'; +uts46_map(CP) when 1520 =< CP, CP =< 1524 -> 'V'; +uts46_map(CP) when 1525 =< CP, CP =< 1535 -> 'X'; +uts46_map(CP) when 1536 =< CP, CP =< 1539 -> 'X'; +uts46_map(CP) when 1542 =< CP, CP =< 1546 -> 'V'; +uts46_map(CP) when 1549 =< CP, CP =< 1551 -> 'V'; +uts46_map(CP) when 1552 =< CP, CP =< 1557 -> 'V'; +uts46_map(CP) when 1558 =< CP, CP =< 1562 -> 'V'; +uts46_map(CP) when 1569 =< CP, CP =< 1594 -> 'V'; +uts46_map(CP) when 1595 =< CP, CP =< 1599 -> 'V'; +uts46_map(CP) when 1601 =< CP, CP =< 1618 -> 'V'; +uts46_map(CP) when 1619 =< CP, CP =< 1621 -> 'V'; +uts46_map(CP) when 1622 =< CP, CP =< 1624 -> 'V'; +uts46_map(CP) when 1625 =< CP, CP =< 1630 -> 'V'; +uts46_map(CP) when 1632 =< CP, CP =< 1641 -> 'V'; +uts46_map(CP) when 1642 =< CP, CP =< 1645 -> 'V'; +uts46_map(CP) when 1646 =< CP, CP =< 1647 -> 'V'; +uts46_map(CP) when 1648 =< CP, CP =< 1652 -> 'V'; +uts46_map(CP) when 1657 =< CP, CP =< 1719 -> 'V'; +uts46_map(CP) when 1720 =< CP, CP =< 1721 -> 'V'; +uts46_map(CP) when 1722 =< CP, CP =< 1726 -> 'V'; +uts46_map(CP) when 1728 =< CP, CP =< 1742 -> 'V'; +uts46_map(CP) when 1744 =< CP, CP =< 1747 -> 'V'; +uts46_map(CP) when 1749 =< CP, CP =< 1756 -> 'V'; +uts46_map(CP) when 1759 =< CP, CP =< 1768 -> 'V'; +uts46_map(CP) when 1770 =< CP, CP =< 1773 -> 'V'; +uts46_map(CP) when 1774 =< CP, CP =< 1775 -> 'V'; +uts46_map(CP) when 1776 =< CP, CP =< 1785 -> 'V'; +uts46_map(CP) when 1786 =< CP, CP =< 1790 -> 'V'; +uts46_map(CP) when 1792 =< CP, CP =< 1805 -> 'V'; +uts46_map(CP) when 1808 =< CP, CP =< 1836 -> 'V'; +uts46_map(CP) when 1837 =< CP, CP =< 1839 -> 'V'; +uts46_map(CP) when 1840 =< CP, CP =< 1866 -> 'V'; +uts46_map(CP) when 1867 =< CP, CP =< 1868 -> 'X'; +uts46_map(CP) when 1869 =< CP, CP =< 1871 -> 'V'; +uts46_map(CP) when 1872 =< CP, CP =< 1901 -> 'V'; +uts46_map(CP) when 1902 =< CP, CP =< 1919 -> 'V'; +uts46_map(CP) when 1920 =< CP, CP =< 1968 -> 'V'; +uts46_map(CP) when 1970 =< CP, CP =< 1983 -> 'X'; +uts46_map(CP) when 1984 =< CP, CP =< 2037 -> 'V'; +uts46_map(CP) when 2038 =< CP, CP =< 2042 -> 'V'; +uts46_map(CP) when 2043 =< CP, CP =< 2044 -> 'X'; +uts46_map(CP) when 2046 =< CP, CP =< 2047 -> 'V'; +uts46_map(CP) when 2048 =< CP, CP =< 2093 -> 'V'; +uts46_map(CP) when 2094 =< CP, CP =< 2095 -> 'X'; +uts46_map(CP) when 2096 =< CP, CP =< 2110 -> 'V'; +uts46_map(CP) when 2112 =< CP, CP =< 2139 -> 'V'; +uts46_map(CP) when 2140 =< CP, CP =< 2141 -> 'X'; +uts46_map(CP) when 2144 =< CP, CP =< 2154 -> 'V'; +uts46_map(CP) when 2155 =< CP, CP =< 2207 -> 'X'; +uts46_map(CP) when 2210 =< CP, CP =< 2220 -> 'V'; +uts46_map(CP) when 2221 =< CP, CP =< 2226 -> 'V'; +uts46_map(CP) when 2227 =< CP, CP =< 2228 -> 'V'; +uts46_map(CP) when 2230 =< CP, CP =< 2237 -> 'V'; +uts46_map(CP) when 2238 =< CP, CP =< 2247 -> 'V'; +uts46_map(CP) when 2248 =< CP, CP =< 2258 -> 'X'; +uts46_map(CP) when 2260 =< CP, CP =< 2273 -> 'V'; +uts46_map(CP) when 2276 =< CP, CP =< 2302 -> 'V'; +uts46_map(CP) when 2305 =< CP, CP =< 2307 -> 'V'; +uts46_map(CP) when 2309 =< CP, CP =< 2361 -> 'V'; +uts46_map(CP) when 2362 =< CP, CP =< 2363 -> 'V'; +uts46_map(CP) when 2364 =< CP, CP =< 2381 -> 'V'; +uts46_map(CP) when 2384 =< CP, CP =< 2388 -> 'V'; +uts46_map(CP) when 2390 =< CP, CP =< 2391 -> 'V'; +uts46_map(CP) when 2400 =< CP, CP =< 2403 -> 'V'; +uts46_map(CP) when 2404 =< CP, CP =< 2405 -> 'V'; +uts46_map(CP) when 2406 =< CP, CP =< 2415 -> 'V'; +uts46_map(CP) when 2417 =< CP, CP =< 2418 -> 'V'; +uts46_map(CP) when 2419 =< CP, CP =< 2423 -> 'V'; +uts46_map(CP) when 2425 =< CP, CP =< 2426 -> 'V'; +uts46_map(CP) when 2427 =< CP, CP =< 2428 -> 'V'; +uts46_map(CP) when 2430 =< CP, CP =< 2431 -> 'V'; +uts46_map(CP) when 2433 =< CP, CP =< 2435 -> 'V'; +uts46_map(CP) when 2437 =< CP, CP =< 2444 -> 'V'; +uts46_map(CP) when 2445 =< CP, CP =< 2446 -> 'X'; +uts46_map(CP) when 2447 =< CP, CP =< 2448 -> 'V'; +uts46_map(CP) when 2449 =< CP, CP =< 2450 -> 'X'; +uts46_map(CP) when 2451 =< CP, CP =< 2472 -> 'V'; +uts46_map(CP) when 2474 =< CP, CP =< 2480 -> 'V'; +uts46_map(CP) when 2483 =< CP, CP =< 2485 -> 'X'; +uts46_map(CP) when 2486 =< CP, CP =< 2489 -> 'V'; +uts46_map(CP) when 2490 =< CP, CP =< 2491 -> 'X'; +uts46_map(CP) when 2494 =< CP, CP =< 2500 -> 'V'; +uts46_map(CP) when 2501 =< CP, CP =< 2502 -> 'X'; +uts46_map(CP) when 2503 =< CP, CP =< 2504 -> 'V'; +uts46_map(CP) when 2505 =< CP, CP =< 2506 -> 'X'; +uts46_map(CP) when 2507 =< CP, CP =< 2509 -> 'V'; +uts46_map(CP) when 2511 =< CP, CP =< 2518 -> 'X'; +uts46_map(CP) when 2520 =< CP, CP =< 2523 -> 'X'; +uts46_map(CP) when 2528 =< CP, CP =< 2531 -> 'V'; +uts46_map(CP) when 2532 =< CP, CP =< 2533 -> 'X'; +uts46_map(CP) when 2534 =< CP, CP =< 2545 -> 'V'; +uts46_map(CP) when 2546 =< CP, CP =< 2554 -> 'V'; +uts46_map(CP) when 2559 =< CP, CP =< 2560 -> 'X'; +uts46_map(CP) when 2565 =< CP, CP =< 2570 -> 'V'; +uts46_map(CP) when 2571 =< CP, CP =< 2574 -> 'X'; +uts46_map(CP) when 2575 =< CP, CP =< 2576 -> 'V'; +uts46_map(CP) when 2577 =< CP, CP =< 2578 -> 'X'; +uts46_map(CP) when 2579 =< CP, CP =< 2600 -> 'V'; +uts46_map(CP) when 2602 =< CP, CP =< 2608 -> 'V'; +uts46_map(CP) when 2616 =< CP, CP =< 2617 -> 'V'; +uts46_map(CP) when 2618 =< CP, CP =< 2619 -> 'X'; +uts46_map(CP) when 2622 =< CP, CP =< 2626 -> 'V'; +uts46_map(CP) when 2627 =< CP, CP =< 2630 -> 'X'; +uts46_map(CP) when 2631 =< CP, CP =< 2632 -> 'V'; +uts46_map(CP) when 2633 =< CP, CP =< 2634 -> 'X'; +uts46_map(CP) when 2635 =< CP, CP =< 2637 -> 'V'; +uts46_map(CP) when 2638 =< CP, CP =< 2640 -> 'X'; +uts46_map(CP) when 2642 =< CP, CP =< 2648 -> 'X'; +uts46_map(CP) when 2655 =< CP, CP =< 2661 -> 'X'; +uts46_map(CP) when 2662 =< CP, CP =< 2676 -> 'V'; +uts46_map(CP) when 2679 =< CP, CP =< 2688 -> 'X'; +uts46_map(CP) when 2689 =< CP, CP =< 2691 -> 'V'; +uts46_map(CP) when 2693 =< CP, CP =< 2699 -> 'V'; +uts46_map(CP) when 2703 =< CP, CP =< 2705 -> 'V'; +uts46_map(CP) when 2707 =< CP, CP =< 2728 -> 'V'; +uts46_map(CP) when 2730 =< CP, CP =< 2736 -> 'V'; +uts46_map(CP) when 2738 =< CP, CP =< 2739 -> 'V'; +uts46_map(CP) when 2741 =< CP, CP =< 2745 -> 'V'; +uts46_map(CP) when 2746 =< CP, CP =< 2747 -> 'X'; +uts46_map(CP) when 2748 =< CP, CP =< 2757 -> 'V'; +uts46_map(CP) when 2759 =< CP, CP =< 2761 -> 'V'; +uts46_map(CP) when 2763 =< CP, CP =< 2765 -> 'V'; +uts46_map(CP) when 2766 =< CP, CP =< 2767 -> 'X'; +uts46_map(CP) when 2769 =< CP, CP =< 2783 -> 'X'; +uts46_map(CP) when 2785 =< CP, CP =< 2787 -> 'V'; +uts46_map(CP) when 2788 =< CP, CP =< 2789 -> 'X'; +uts46_map(CP) when 2790 =< CP, CP =< 2799 -> 'V'; +uts46_map(CP) when 2802 =< CP, CP =< 2808 -> 'X'; +uts46_map(CP) when 2810 =< CP, CP =< 2815 -> 'V'; +uts46_map(CP) when 2817 =< CP, CP =< 2819 -> 'V'; +uts46_map(CP) when 2821 =< CP, CP =< 2828 -> 'V'; +uts46_map(CP) when 2829 =< CP, CP =< 2830 -> 'X'; +uts46_map(CP) when 2831 =< CP, CP =< 2832 -> 'V'; +uts46_map(CP) when 2833 =< CP, CP =< 2834 -> 'X'; +uts46_map(CP) when 2835 =< CP, CP =< 2856 -> 'V'; +uts46_map(CP) when 2858 =< CP, CP =< 2864 -> 'V'; +uts46_map(CP) when 2866 =< CP, CP =< 2867 -> 'V'; +uts46_map(CP) when 2870 =< CP, CP =< 2873 -> 'V'; +uts46_map(CP) when 2874 =< CP, CP =< 2875 -> 'X'; +uts46_map(CP) when 2876 =< CP, CP =< 2883 -> 'V'; +uts46_map(CP) when 2885 =< CP, CP =< 2886 -> 'X'; +uts46_map(CP) when 2887 =< CP, CP =< 2888 -> 'V'; +uts46_map(CP) when 2889 =< CP, CP =< 2890 -> 'X'; +uts46_map(CP) when 2891 =< CP, CP =< 2893 -> 'V'; +uts46_map(CP) when 2894 =< CP, CP =< 2900 -> 'X'; +uts46_map(CP) when 2902 =< CP, CP =< 2903 -> 'V'; +uts46_map(CP) when 2904 =< CP, CP =< 2907 -> 'X'; +uts46_map(CP) when 2911 =< CP, CP =< 2913 -> 'V'; +uts46_map(CP) when 2914 =< CP, CP =< 2915 -> 'V'; +uts46_map(CP) when 2916 =< CP, CP =< 2917 -> 'X'; +uts46_map(CP) when 2918 =< CP, CP =< 2927 -> 'V'; +uts46_map(CP) when 2930 =< CP, CP =< 2935 -> 'V'; +uts46_map(CP) when 2936 =< CP, CP =< 2945 -> 'X'; +uts46_map(CP) when 2946 =< CP, CP =< 2947 -> 'V'; +uts46_map(CP) when 2949 =< CP, CP =< 2954 -> 'V'; +uts46_map(CP) when 2955 =< CP, CP =< 2957 -> 'X'; +uts46_map(CP) when 2958 =< CP, CP =< 2960 -> 'V'; +uts46_map(CP) when 2962 =< CP, CP =< 2965 -> 'V'; +uts46_map(CP) when 2966 =< CP, CP =< 2968 -> 'X'; +uts46_map(CP) when 2969 =< CP, CP =< 2970 -> 'V'; +uts46_map(CP) when 2974 =< CP, CP =< 2975 -> 'V'; +uts46_map(CP) when 2976 =< CP, CP =< 2978 -> 'X'; +uts46_map(CP) when 2979 =< CP, CP =< 2980 -> 'V'; +uts46_map(CP) when 2981 =< CP, CP =< 2983 -> 'X'; +uts46_map(CP) when 2984 =< CP, CP =< 2986 -> 'V'; +uts46_map(CP) when 2987 =< CP, CP =< 2989 -> 'X'; +uts46_map(CP) when 2990 =< CP, CP =< 2997 -> 'V'; +uts46_map(CP) when 2999 =< CP, CP =< 3001 -> 'V'; +uts46_map(CP) when 3002 =< CP, CP =< 3005 -> 'X'; +uts46_map(CP) when 3006 =< CP, CP =< 3010 -> 'V'; +uts46_map(CP) when 3011 =< CP, CP =< 3013 -> 'X'; +uts46_map(CP) when 3014 =< CP, CP =< 3016 -> 'V'; +uts46_map(CP) when 3018 =< CP, CP =< 3021 -> 'V'; +uts46_map(CP) when 3022 =< CP, CP =< 3023 -> 'X'; +uts46_map(CP) when 3025 =< CP, CP =< 3030 -> 'X'; +uts46_map(CP) when 3032 =< CP, CP =< 3045 -> 'X'; +uts46_map(CP) when 3047 =< CP, CP =< 3055 -> 'V'; +uts46_map(CP) when 3056 =< CP, CP =< 3058 -> 'V'; +uts46_map(CP) when 3059 =< CP, CP =< 3066 -> 'V'; +uts46_map(CP) when 3067 =< CP, CP =< 3071 -> 'X'; +uts46_map(CP) when 3073 =< CP, CP =< 3075 -> 'V'; +uts46_map(CP) when 3077 =< CP, CP =< 3084 -> 'V'; +uts46_map(CP) when 3086 =< CP, CP =< 3088 -> 'V'; +uts46_map(CP) when 3090 =< CP, CP =< 3112 -> 'V'; +uts46_map(CP) when 3114 =< CP, CP =< 3123 -> 'V'; +uts46_map(CP) when 3125 =< CP, CP =< 3129 -> 'V'; +uts46_map(CP) when 3130 =< CP, CP =< 3132 -> 'X'; +uts46_map(CP) when 3134 =< CP, CP =< 3140 -> 'V'; +uts46_map(CP) when 3142 =< CP, CP =< 3144 -> 'V'; +uts46_map(CP) when 3146 =< CP, CP =< 3149 -> 'V'; +uts46_map(CP) when 3150 =< CP, CP =< 3156 -> 'X'; +uts46_map(CP) when 3157 =< CP, CP =< 3158 -> 'V'; +uts46_map(CP) when 3160 =< CP, CP =< 3161 -> 'V'; +uts46_map(CP) when 3163 =< CP, CP =< 3167 -> 'X'; +uts46_map(CP) when 3168 =< CP, CP =< 3169 -> 'V'; +uts46_map(CP) when 3170 =< CP, CP =< 3171 -> 'V'; +uts46_map(CP) when 3172 =< CP, CP =< 3173 -> 'X'; +uts46_map(CP) when 3174 =< CP, CP =< 3183 -> 'V'; +uts46_map(CP) when 3184 =< CP, CP =< 3190 -> 'X'; +uts46_map(CP) when 3192 =< CP, CP =< 3199 -> 'V'; +uts46_map(CP) when 3202 =< CP, CP =< 3203 -> 'V'; +uts46_map(CP) when 3205 =< CP, CP =< 3212 -> 'V'; +uts46_map(CP) when 3214 =< CP, CP =< 3216 -> 'V'; +uts46_map(CP) when 3218 =< CP, CP =< 3240 -> 'V'; +uts46_map(CP) when 3242 =< CP, CP =< 3251 -> 'V'; +uts46_map(CP) when 3253 =< CP, CP =< 3257 -> 'V'; +uts46_map(CP) when 3258 =< CP, CP =< 3259 -> 'X'; +uts46_map(CP) when 3260 =< CP, CP =< 3261 -> 'V'; +uts46_map(CP) when 3262 =< CP, CP =< 3268 -> 'V'; +uts46_map(CP) when 3270 =< CP, CP =< 3272 -> 'V'; +uts46_map(CP) when 3274 =< CP, CP =< 3277 -> 'V'; +uts46_map(CP) when 3278 =< CP, CP =< 3284 -> 'X'; +uts46_map(CP) when 3285 =< CP, CP =< 3286 -> 'V'; +uts46_map(CP) when 3287 =< CP, CP =< 3293 -> 'X'; +uts46_map(CP) when 3296 =< CP, CP =< 3297 -> 'V'; +uts46_map(CP) when 3298 =< CP, CP =< 3299 -> 'V'; +uts46_map(CP) when 3300 =< CP, CP =< 3301 -> 'X'; +uts46_map(CP) when 3302 =< CP, CP =< 3311 -> 'V'; +uts46_map(CP) when 3313 =< CP, CP =< 3314 -> 'V'; +uts46_map(CP) when 3315 =< CP, CP =< 3327 -> 'X'; +uts46_map(CP) when 3330 =< CP, CP =< 3331 -> 'V'; +uts46_map(CP) when 3333 =< CP, CP =< 3340 -> 'V'; +uts46_map(CP) when 3342 =< CP, CP =< 3344 -> 'V'; +uts46_map(CP) when 3346 =< CP, CP =< 3368 -> 'V'; +uts46_map(CP) when 3370 =< CP, CP =< 3385 -> 'V'; +uts46_map(CP) when 3387 =< CP, CP =< 3388 -> 'V'; +uts46_map(CP) when 3390 =< CP, CP =< 3395 -> 'V'; +uts46_map(CP) when 3398 =< CP, CP =< 3400 -> 'V'; +uts46_map(CP) when 3402 =< CP, CP =< 3405 -> 'V'; +uts46_map(CP) when 3408 =< CP, CP =< 3411 -> 'X'; +uts46_map(CP) when 3412 =< CP, CP =< 3414 -> 'V'; +uts46_map(CP) when 3416 =< CP, CP =< 3422 -> 'V'; +uts46_map(CP) when 3424 =< CP, CP =< 3425 -> 'V'; +uts46_map(CP) when 3426 =< CP, CP =< 3427 -> 'V'; +uts46_map(CP) when 3428 =< CP, CP =< 3429 -> 'X'; +uts46_map(CP) when 3430 =< CP, CP =< 3439 -> 'V'; +uts46_map(CP) when 3440 =< CP, CP =< 3445 -> 'V'; +uts46_map(CP) when 3446 =< CP, CP =< 3448 -> 'V'; +uts46_map(CP) when 3450 =< CP, CP =< 3455 -> 'V'; +uts46_map(CP) when 3458 =< CP, CP =< 3459 -> 'V'; +uts46_map(CP) when 3461 =< CP, CP =< 3478 -> 'V'; +uts46_map(CP) when 3479 =< CP, CP =< 3481 -> 'X'; +uts46_map(CP) when 3482 =< CP, CP =< 3505 -> 'V'; +uts46_map(CP) when 3507 =< CP, CP =< 3515 -> 'V'; +uts46_map(CP) when 3518 =< CP, CP =< 3519 -> 'X'; +uts46_map(CP) when 3520 =< CP, CP =< 3526 -> 'V'; +uts46_map(CP) when 3527 =< CP, CP =< 3529 -> 'X'; +uts46_map(CP) when 3531 =< CP, CP =< 3534 -> 'X'; +uts46_map(CP) when 3535 =< CP, CP =< 3540 -> 'V'; +uts46_map(CP) when 3544 =< CP, CP =< 3551 -> 'V'; +uts46_map(CP) when 3552 =< CP, CP =< 3557 -> 'X'; +uts46_map(CP) when 3558 =< CP, CP =< 3567 -> 'V'; +uts46_map(CP) when 3568 =< CP, CP =< 3569 -> 'X'; +uts46_map(CP) when 3570 =< CP, CP =< 3571 -> 'V'; +uts46_map(CP) when 3573 =< CP, CP =< 3584 -> 'X'; +uts46_map(CP) when 3585 =< CP, CP =< 3634 -> 'V'; +uts46_map(CP) when 3636 =< CP, CP =< 3642 -> 'V'; +uts46_map(CP) when 3643 =< CP, CP =< 3646 -> 'X'; +uts46_map(CP) when 3648 =< CP, CP =< 3662 -> 'V'; +uts46_map(CP) when 3664 =< CP, CP =< 3673 -> 'V'; +uts46_map(CP) when 3674 =< CP, CP =< 3675 -> 'V'; +uts46_map(CP) when 3676 =< CP, CP =< 3712 -> 'X'; +uts46_map(CP) when 3713 =< CP, CP =< 3714 -> 'V'; +uts46_map(CP) when 3719 =< CP, CP =< 3720 -> 'V'; +uts46_map(CP) when 3726 =< CP, CP =< 3731 -> 'V'; +uts46_map(CP) when 3732 =< CP, CP =< 3735 -> 'V'; +uts46_map(CP) when 3737 =< CP, CP =< 3743 -> 'V'; +uts46_map(CP) when 3745 =< CP, CP =< 3747 -> 'V'; +uts46_map(CP) when 3752 =< CP, CP =< 3753 -> 'V'; +uts46_map(CP) when 3754 =< CP, CP =< 3755 -> 'V'; +uts46_map(CP) when 3757 =< CP, CP =< 3762 -> 'V'; +uts46_map(CP) when 3764 =< CP, CP =< 3769 -> 'V'; +uts46_map(CP) when 3771 =< CP, CP =< 3773 -> 'V'; +uts46_map(CP) when 3774 =< CP, CP =< 3775 -> 'X'; +uts46_map(CP) when 3776 =< CP, CP =< 3780 -> 'V'; +uts46_map(CP) when 3784 =< CP, CP =< 3789 -> 'V'; +uts46_map(CP) when 3790 =< CP, CP =< 3791 -> 'X'; +uts46_map(CP) when 3792 =< CP, CP =< 3801 -> 'V'; +uts46_map(CP) when 3802 =< CP, CP =< 3803 -> 'X'; +uts46_map(CP) when 3806 =< CP, CP =< 3807 -> 'V'; +uts46_map(CP) when 3808 =< CP, CP =< 3839 -> 'X'; +uts46_map(CP) when 3841 =< CP, CP =< 3850 -> 'V'; +uts46_map(CP) when 3853 =< CP, CP =< 3863 -> 'V'; +uts46_map(CP) when 3864 =< CP, CP =< 3865 -> 'V'; +uts46_map(CP) when 3866 =< CP, CP =< 3871 -> 'V'; +uts46_map(CP) when 3872 =< CP, CP =< 3881 -> 'V'; +uts46_map(CP) when 3882 =< CP, CP =< 3892 -> 'V'; +uts46_map(CP) when 3898 =< CP, CP =< 3901 -> 'V'; +uts46_map(CP) when 3902 =< CP, CP =< 3906 -> 'V'; +uts46_map(CP) when 3908 =< CP, CP =< 3911 -> 'V'; +uts46_map(CP) when 3913 =< CP, CP =< 3916 -> 'V'; +uts46_map(CP) when 3918 =< CP, CP =< 3921 -> 'V'; +uts46_map(CP) when 3923 =< CP, CP =< 3926 -> 'V'; +uts46_map(CP) when 3928 =< CP, CP =< 3931 -> 'V'; +uts46_map(CP) when 3933 =< CP, CP =< 3944 -> 'V'; +uts46_map(CP) when 3947 =< CP, CP =< 3948 -> 'V'; +uts46_map(CP) when 3949 =< CP, CP =< 3952 -> 'X'; +uts46_map(CP) when 3953 =< CP, CP =< 3954 -> 'V'; +uts46_map(CP) when 3962 =< CP, CP =< 3968 -> 'V'; +uts46_map(CP) when 3970 =< CP, CP =< 3972 -> 'V'; +uts46_map(CP) when 3974 =< CP, CP =< 3979 -> 'V'; +uts46_map(CP) when 3980 =< CP, CP =< 3983 -> 'V'; +uts46_map(CP) when 3984 =< CP, CP =< 3986 -> 'V'; +uts46_map(CP) when 3988 =< CP, CP =< 3989 -> 'V'; +uts46_map(CP) when 3993 =< CP, CP =< 3996 -> 'V'; +uts46_map(CP) when 3998 =< CP, CP =< 4001 -> 'V'; +uts46_map(CP) when 4003 =< CP, CP =< 4006 -> 'V'; +uts46_map(CP) when 4008 =< CP, CP =< 4011 -> 'V'; +uts46_map(CP) when 4014 =< CP, CP =< 4016 -> 'V'; +uts46_map(CP) when 4017 =< CP, CP =< 4023 -> 'V'; +uts46_map(CP) when 4026 =< CP, CP =< 4028 -> 'V'; +uts46_map(CP) when 4030 =< CP, CP =< 4037 -> 'V'; +uts46_map(CP) when 4039 =< CP, CP =< 4044 -> 'V'; +uts46_map(CP) when 4048 =< CP, CP =< 4049 -> 'V'; +uts46_map(CP) when 4050 =< CP, CP =< 4052 -> 'V'; +uts46_map(CP) when 4053 =< CP, CP =< 4056 -> 'V'; +uts46_map(CP) when 4057 =< CP, CP =< 4058 -> 'V'; +uts46_map(CP) when 4059 =< CP, CP =< 4095 -> 'X'; +uts46_map(CP) when 4096 =< CP, CP =< 4129 -> 'V'; +uts46_map(CP) when 4131 =< CP, CP =< 4135 -> 'V'; +uts46_map(CP) when 4137 =< CP, CP =< 4138 -> 'V'; +uts46_map(CP) when 4140 =< CP, CP =< 4146 -> 'V'; +uts46_map(CP) when 4147 =< CP, CP =< 4149 -> 'V'; +uts46_map(CP) when 4150 =< CP, CP =< 4153 -> 'V'; +uts46_map(CP) when 4154 =< CP, CP =< 4159 -> 'V'; +uts46_map(CP) when 4160 =< CP, CP =< 4169 -> 'V'; +uts46_map(CP) when 4170 =< CP, CP =< 4175 -> 'V'; +uts46_map(CP) when 4176 =< CP, CP =< 4185 -> 'V'; +uts46_map(CP) when 4186 =< CP, CP =< 4249 -> 'V'; +uts46_map(CP) when 4250 =< CP, CP =< 4253 -> 'V'; +uts46_map(CP) when 4254 =< CP, CP =< 4255 -> 'V'; +uts46_map(CP) when 4256 =< CP, CP =< 4293 -> 'X'; +uts46_map(CP) when 4296 =< CP, CP =< 4300 -> 'X'; +uts46_map(CP) when 4302 =< CP, CP =< 4303 -> 'X'; +uts46_map(CP) when 4304 =< CP, CP =< 4342 -> 'V'; +uts46_map(CP) when 4343 =< CP, CP =< 4344 -> 'V'; +uts46_map(CP) when 4345 =< CP, CP =< 4346 -> 'V'; +uts46_map(CP) when 4349 =< CP, CP =< 4351 -> 'V'; +uts46_map(CP) when 4352 =< CP, CP =< 4441 -> 'V'; +uts46_map(CP) when 4442 =< CP, CP =< 4446 -> 'V'; +uts46_map(CP) when 4447 =< CP, CP =< 4448 -> 'X'; +uts46_map(CP) when 4449 =< CP, CP =< 4514 -> 'V'; +uts46_map(CP) when 4515 =< CP, CP =< 4519 -> 'V'; +uts46_map(CP) when 4520 =< CP, CP =< 4601 -> 'V'; +uts46_map(CP) when 4602 =< CP, CP =< 4607 -> 'V'; +uts46_map(CP) when 4608 =< CP, CP =< 4614 -> 'V'; +uts46_map(CP) when 4616 =< CP, CP =< 4678 -> 'V'; +uts46_map(CP) when 4682 =< CP, CP =< 4685 -> 'V'; +uts46_map(CP) when 4686 =< CP, CP =< 4687 -> 'X'; +uts46_map(CP) when 4688 =< CP, CP =< 4694 -> 'V'; +uts46_map(CP) when 4698 =< CP, CP =< 4701 -> 'V'; +uts46_map(CP) when 4702 =< CP, CP =< 4703 -> 'X'; +uts46_map(CP) when 4704 =< CP, CP =< 4742 -> 'V'; +uts46_map(CP) when 4746 =< CP, CP =< 4749 -> 'V'; +uts46_map(CP) when 4750 =< CP, CP =< 4751 -> 'X'; +uts46_map(CP) when 4752 =< CP, CP =< 4782 -> 'V'; +uts46_map(CP) when 4786 =< CP, CP =< 4789 -> 'V'; +uts46_map(CP) when 4790 =< CP, CP =< 4791 -> 'X'; +uts46_map(CP) when 4792 =< CP, CP =< 4798 -> 'V'; +uts46_map(CP) when 4802 =< CP, CP =< 4805 -> 'V'; +uts46_map(CP) when 4806 =< CP, CP =< 4807 -> 'X'; +uts46_map(CP) when 4808 =< CP, CP =< 4814 -> 'V'; +uts46_map(CP) when 4816 =< CP, CP =< 4822 -> 'V'; +uts46_map(CP) when 4824 =< CP, CP =< 4846 -> 'V'; +uts46_map(CP) when 4848 =< CP, CP =< 4878 -> 'V'; +uts46_map(CP) when 4882 =< CP, CP =< 4885 -> 'V'; +uts46_map(CP) when 4886 =< CP, CP =< 4887 -> 'X'; +uts46_map(CP) when 4888 =< CP, CP =< 4894 -> 'V'; +uts46_map(CP) when 4896 =< CP, CP =< 4934 -> 'V'; +uts46_map(CP) when 4936 =< CP, CP =< 4954 -> 'V'; +uts46_map(CP) when 4955 =< CP, CP =< 4956 -> 'X'; +uts46_map(CP) when 4957 =< CP, CP =< 4958 -> 'V'; +uts46_map(CP) when 4961 =< CP, CP =< 4988 -> 'V'; +uts46_map(CP) when 4989 =< CP, CP =< 4991 -> 'X'; +uts46_map(CP) when 4992 =< CP, CP =< 5007 -> 'V'; +uts46_map(CP) when 5008 =< CP, CP =< 5017 -> 'V'; +uts46_map(CP) when 5018 =< CP, CP =< 5023 -> 'X'; +uts46_map(CP) when 5024 =< CP, CP =< 5108 -> 'V'; +uts46_map(CP) when 5110 =< CP, CP =< 5111 -> 'X'; +uts46_map(CP) when 5118 =< CP, CP =< 5119 -> 'X'; +uts46_map(CP) when 5121 =< CP, CP =< 5740 -> 'V'; +uts46_map(CP) when 5741 =< CP, CP =< 5742 -> 'V'; +uts46_map(CP) when 5743 =< CP, CP =< 5750 -> 'V'; +uts46_map(CP) when 5751 =< CP, CP =< 5759 -> 'V'; +uts46_map(CP) when 5761 =< CP, CP =< 5786 -> 'V'; +uts46_map(CP) when 5787 =< CP, CP =< 5788 -> 'V'; +uts46_map(CP) when 5789 =< CP, CP =< 5791 -> 'X'; +uts46_map(CP) when 5792 =< CP, CP =< 5866 -> 'V'; +uts46_map(CP) when 5867 =< CP, CP =< 5872 -> 'V'; +uts46_map(CP) when 5873 =< CP, CP =< 5880 -> 'V'; +uts46_map(CP) when 5881 =< CP, CP =< 5887 -> 'X'; +uts46_map(CP) when 5888 =< CP, CP =< 5900 -> 'V'; +uts46_map(CP) when 5902 =< CP, CP =< 5908 -> 'V'; +uts46_map(CP) when 5909 =< CP, CP =< 5919 -> 'X'; +uts46_map(CP) when 5920 =< CP, CP =< 5940 -> 'V'; +uts46_map(CP) when 5941 =< CP, CP =< 5942 -> 'V'; +uts46_map(CP) when 5943 =< CP, CP =< 5951 -> 'X'; +uts46_map(CP) when 5952 =< CP, CP =< 5971 -> 'V'; +uts46_map(CP) when 5972 =< CP, CP =< 5983 -> 'X'; +uts46_map(CP) when 5984 =< CP, CP =< 5996 -> 'V'; +uts46_map(CP) when 5998 =< CP, CP =< 6000 -> 'V'; +uts46_map(CP) when 6002 =< CP, CP =< 6003 -> 'V'; +uts46_map(CP) when 6004 =< CP, CP =< 6015 -> 'X'; +uts46_map(CP) when 6016 =< CP, CP =< 6067 -> 'V'; +uts46_map(CP) when 6068 =< CP, CP =< 6069 -> 'X'; +uts46_map(CP) when 6070 =< CP, CP =< 6099 -> 'V'; +uts46_map(CP) when 6100 =< CP, CP =< 6102 -> 'V'; +uts46_map(CP) when 6104 =< CP, CP =< 6107 -> 'V'; +uts46_map(CP) when 6110 =< CP, CP =< 6111 -> 'X'; +uts46_map(CP) when 6112 =< CP, CP =< 6121 -> 'V'; +uts46_map(CP) when 6122 =< CP, CP =< 6127 -> 'X'; +uts46_map(CP) when 6128 =< CP, CP =< 6137 -> 'V'; +uts46_map(CP) when 6138 =< CP, CP =< 6143 -> 'X'; +uts46_map(CP) when 6144 =< CP, CP =< 6149 -> 'V'; +uts46_map(CP) when 6151 =< CP, CP =< 6154 -> 'V'; +uts46_map(CP) when 6155 =< CP, CP =< 6157 -> 'I'; +uts46_map(CP) when 6160 =< CP, CP =< 6169 -> 'V'; +uts46_map(CP) when 6170 =< CP, CP =< 6175 -> 'X'; +uts46_map(CP) when 6176 =< CP, CP =< 6263 -> 'V'; +uts46_map(CP) when 6265 =< CP, CP =< 6271 -> 'X'; +uts46_map(CP) when 6272 =< CP, CP =< 6313 -> 'V'; +uts46_map(CP) when 6315 =< CP, CP =< 6319 -> 'X'; +uts46_map(CP) when 6320 =< CP, CP =< 6389 -> 'V'; +uts46_map(CP) when 6390 =< CP, CP =< 6399 -> 'X'; +uts46_map(CP) when 6400 =< CP, CP =< 6428 -> 'V'; +uts46_map(CP) when 6429 =< CP, CP =< 6430 -> 'V'; +uts46_map(CP) when 6432 =< CP, CP =< 6443 -> 'V'; +uts46_map(CP) when 6444 =< CP, CP =< 6447 -> 'X'; +uts46_map(CP) when 6448 =< CP, CP =< 6459 -> 'V'; +uts46_map(CP) when 6460 =< CP, CP =< 6463 -> 'X'; +uts46_map(CP) when 6465 =< CP, CP =< 6467 -> 'X'; +uts46_map(CP) when 6468 =< CP, CP =< 6469 -> 'V'; +uts46_map(CP) when 6470 =< CP, CP =< 6509 -> 'V'; +uts46_map(CP) when 6510 =< CP, CP =< 6511 -> 'X'; +uts46_map(CP) when 6512 =< CP, CP =< 6516 -> 'V'; +uts46_map(CP) when 6517 =< CP, CP =< 6527 -> 'X'; +uts46_map(CP) when 6528 =< CP, CP =< 6569 -> 'V'; +uts46_map(CP) when 6570 =< CP, CP =< 6571 -> 'V'; +uts46_map(CP) when 6572 =< CP, CP =< 6575 -> 'X'; +uts46_map(CP) when 6576 =< CP, CP =< 6601 -> 'V'; +uts46_map(CP) when 6602 =< CP, CP =< 6607 -> 'X'; +uts46_map(CP) when 6608 =< CP, CP =< 6617 -> 'V'; +uts46_map(CP) when 6619 =< CP, CP =< 6621 -> 'X'; +uts46_map(CP) when 6622 =< CP, CP =< 6623 -> 'V'; +uts46_map(CP) when 6624 =< CP, CP =< 6655 -> 'V'; +uts46_map(CP) when 6656 =< CP, CP =< 6683 -> 'V'; +uts46_map(CP) when 6684 =< CP, CP =< 6685 -> 'X'; +uts46_map(CP) when 6686 =< CP, CP =< 6687 -> 'V'; +uts46_map(CP) when 6688 =< CP, CP =< 6750 -> 'V'; +uts46_map(CP) when 6752 =< CP, CP =< 6780 -> 'V'; +uts46_map(CP) when 6781 =< CP, CP =< 6782 -> 'X'; +uts46_map(CP) when 6783 =< CP, CP =< 6793 -> 'V'; +uts46_map(CP) when 6794 =< CP, CP =< 6799 -> 'X'; +uts46_map(CP) when 6800 =< CP, CP =< 6809 -> 'V'; +uts46_map(CP) when 6810 =< CP, CP =< 6815 -> 'X'; +uts46_map(CP) when 6816 =< CP, CP =< 6822 -> 'V'; +uts46_map(CP) when 6824 =< CP, CP =< 6829 -> 'V'; +uts46_map(CP) when 6830 =< CP, CP =< 6831 -> 'X'; +uts46_map(CP) when 6832 =< CP, CP =< 6845 -> 'V'; +uts46_map(CP) when 6847 =< CP, CP =< 6848 -> 'V'; +uts46_map(CP) when 6849 =< CP, CP =< 6911 -> 'X'; +uts46_map(CP) when 6912 =< CP, CP =< 6987 -> 'V'; +uts46_map(CP) when 6988 =< CP, CP =< 6991 -> 'X'; +uts46_map(CP) when 6992 =< CP, CP =< 7001 -> 'V'; +uts46_map(CP) when 7002 =< CP, CP =< 7018 -> 'V'; +uts46_map(CP) when 7019 =< CP, CP =< 7027 -> 'V'; +uts46_map(CP) when 7028 =< CP, CP =< 7036 -> 'V'; +uts46_map(CP) when 7037 =< CP, CP =< 7039 -> 'X'; +uts46_map(CP) when 7040 =< CP, CP =< 7082 -> 'V'; +uts46_map(CP) when 7083 =< CP, CP =< 7085 -> 'V'; +uts46_map(CP) when 7086 =< CP, CP =< 7097 -> 'V'; +uts46_map(CP) when 7098 =< CP, CP =< 7103 -> 'V'; +uts46_map(CP) when 7104 =< CP, CP =< 7155 -> 'V'; +uts46_map(CP) when 7156 =< CP, CP =< 7163 -> 'X'; +uts46_map(CP) when 7164 =< CP, CP =< 7167 -> 'V'; +uts46_map(CP) when 7168 =< CP, CP =< 7223 -> 'V'; +uts46_map(CP) when 7224 =< CP, CP =< 7226 -> 'X'; +uts46_map(CP) when 7227 =< CP, CP =< 7231 -> 'V'; +uts46_map(CP) when 7232 =< CP, CP =< 7241 -> 'V'; +uts46_map(CP) when 7242 =< CP, CP =< 7244 -> 'X'; +uts46_map(CP) when 7245 =< CP, CP =< 7293 -> 'V'; +uts46_map(CP) when 7294 =< CP, CP =< 7295 -> 'V'; +uts46_map(CP) when 7300 =< CP, CP =< 7301 -> {'M', [1090]}; +uts46_map(CP) when 7305 =< CP, CP =< 7311 -> 'X'; +uts46_map(CP) when 7355 =< CP, CP =< 7356 -> 'X'; +uts46_map(CP) when 7360 =< CP, CP =< 7367 -> 'V'; +uts46_map(CP) when 7368 =< CP, CP =< 7375 -> 'X'; +uts46_map(CP) when 7376 =< CP, CP =< 7378 -> 'V'; +uts46_map(CP) when 7380 =< CP, CP =< 7410 -> 'V'; +uts46_map(CP) when 7411 =< CP, CP =< 7414 -> 'V'; +uts46_map(CP) when 7416 =< CP, CP =< 7417 -> 'V'; +uts46_map(CP) when 7419 =< CP, CP =< 7423 -> 'X'; +uts46_map(CP) when 7424 =< CP, CP =< 7467 -> 'V'; +uts46_map(CP) when 7532 =< CP, CP =< 7543 -> 'V'; +uts46_map(CP) when 7545 =< CP, CP =< 7578 -> 'V'; +uts46_map(CP) when 7616 =< CP, CP =< 7619 -> 'V'; +uts46_map(CP) when 7620 =< CP, CP =< 7626 -> 'V'; +uts46_map(CP) when 7627 =< CP, CP =< 7654 -> 'V'; +uts46_map(CP) when 7655 =< CP, CP =< 7669 -> 'V'; +uts46_map(CP) when 7670 =< CP, CP =< 7673 -> 'V'; +uts46_map(CP) when 7678 =< CP, CP =< 7679 -> 'V'; +uts46_map(CP) when 7829 =< CP, CP =< 7833 -> 'V'; +uts46_map(CP) when 7836 =< CP, CP =< 7837 -> 'V'; +uts46_map(CP) when 7936 =< CP, CP =< 7943 -> 'V'; +uts46_map(CP) when 7952 =< CP, CP =< 7957 -> 'V'; +uts46_map(CP) when 7958 =< CP, CP =< 7959 -> 'X'; +uts46_map(CP) when 7966 =< CP, CP =< 7967 -> 'X'; +uts46_map(CP) when 7968 =< CP, CP =< 7975 -> 'V'; +uts46_map(CP) when 7984 =< CP, CP =< 7991 -> 'V'; +uts46_map(CP) when 8000 =< CP, CP =< 8005 -> 'V'; +uts46_map(CP) when 8006 =< CP, CP =< 8007 -> 'X'; +uts46_map(CP) when 8014 =< CP, CP =< 8015 -> 'X'; +uts46_map(CP) when 8016 =< CP, CP =< 8023 -> 'V'; +uts46_map(CP) when 8032 =< CP, CP =< 8039 -> 'V'; +uts46_map(CP) when 8062 =< CP, CP =< 8063 -> 'X'; +uts46_map(CP) when 8112 =< CP, CP =< 8113 -> 'V'; +uts46_map(CP) when 8144 =< CP, CP =< 8146 -> 'V'; +uts46_map(CP) when 8148 =< CP, CP =< 8149 -> 'X'; +uts46_map(CP) when 8150 =< CP, CP =< 8151 -> 'V'; +uts46_map(CP) when 8160 =< CP, CP =< 8162 -> 'V'; +uts46_map(CP) when 8164 =< CP, CP =< 8167 -> 'V'; +uts46_map(CP) when 8176 =< CP, CP =< 8177 -> 'X'; +uts46_map(CP) when 8192 =< CP, CP =< 8202 -> {'3', [32]}; +uts46_map(CP) when 8204 =< CP, CP =< 8205 -> {'D', []}; +uts46_map(CP) when 8206 =< CP, CP =< 8207 -> 'X'; +uts46_map(CP) when 8210 =< CP, CP =< 8214 -> 'V'; +uts46_map(CP) when 8216 =< CP, CP =< 8227 -> 'V'; +uts46_map(CP) when 8228 =< CP, CP =< 8230 -> 'X'; +uts46_map(CP) when 8232 =< CP, CP =< 8238 -> 'X'; +uts46_map(CP) when 8240 =< CP, CP =< 8242 -> 'V'; +uts46_map(CP) when 8248 =< CP, CP =< 8251 -> 'V'; +uts46_map(CP) when 8255 =< CP, CP =< 8262 -> 'V'; +uts46_map(CP) when 8266 =< CP, CP =< 8269 -> 'V'; +uts46_map(CP) when 8270 =< CP, CP =< 8274 -> 'V'; +uts46_map(CP) when 8275 =< CP, CP =< 8276 -> 'V'; +uts46_map(CP) when 8277 =< CP, CP =< 8278 -> 'V'; +uts46_map(CP) when 8280 =< CP, CP =< 8286 -> 'V'; +uts46_map(CP) when 8289 =< CP, CP =< 8291 -> 'X'; +uts46_map(CP) when 8294 =< CP, CP =< 8297 -> 'X'; +uts46_map(CP) when 8298 =< CP, CP =< 8303 -> 'X'; +uts46_map(CP) when 8306 =< CP, CP =< 8307 -> 'X'; +uts46_map(CP) when 8349 =< CP, CP =< 8351 -> 'X'; +uts46_map(CP) when 8352 =< CP, CP =< 8359 -> 'V'; +uts46_map(CP) when 8361 =< CP, CP =< 8362 -> 'V'; +uts46_map(CP) when 8365 =< CP, CP =< 8367 -> 'V'; +uts46_map(CP) when 8368 =< CP, CP =< 8369 -> 'V'; +uts46_map(CP) when 8370 =< CP, CP =< 8373 -> 'V'; +uts46_map(CP) when 8374 =< CP, CP =< 8376 -> 'V'; +uts46_map(CP) when 8379 =< CP, CP =< 8381 -> 'V'; +uts46_map(CP) when 8384 =< CP, CP =< 8399 -> 'X'; +uts46_map(CP) when 8400 =< CP, CP =< 8417 -> 'V'; +uts46_map(CP) when 8418 =< CP, CP =< 8419 -> 'V'; +uts46_map(CP) when 8420 =< CP, CP =< 8426 -> 'V'; +uts46_map(CP) when 8428 =< CP, CP =< 8431 -> 'V'; +uts46_map(CP) when 8433 =< CP, CP =< 8447 -> 'X'; +uts46_map(CP) when 8459 =< CP, CP =< 8462 -> {'M', [104]}; +uts46_map(CP) when 8464 =< CP, CP =< 8465 -> {'M', [105]}; +uts46_map(CP) when 8466 =< CP, CP =< 8467 -> {'M', [108]}; +uts46_map(CP) when 8471 =< CP, CP =< 8472 -> 'V'; +uts46_map(CP) when 8475 =< CP, CP =< 8477 -> {'M', [114]}; +uts46_map(CP) when 8478 =< CP, CP =< 8479 -> 'V'; +uts46_map(CP) when 8495 =< CP, CP =< 8496 -> {'M', [101]}; +uts46_map(CP) when 8509 =< CP, CP =< 8510 -> {'M', [947]}; +uts46_map(CP) when 8513 =< CP, CP =< 8516 -> 'V'; +uts46_map(CP) when 8517 =< CP, CP =< 8518 -> {'M', [100]}; +uts46_map(CP) when 8522 =< CP, CP =< 8523 -> 'V'; +uts46_map(CP) when 8576 =< CP, CP =< 8578 -> 'V'; +uts46_map(CP) when 8581 =< CP, CP =< 8584 -> 'V'; +uts46_map(CP) when 8586 =< CP, CP =< 8587 -> 'V'; +uts46_map(CP) when 8588 =< CP, CP =< 8591 -> 'X'; +uts46_map(CP) when 8592 =< CP, CP =< 8682 -> 'V'; +uts46_map(CP) when 8683 =< CP, CP =< 8691 -> 'V'; +uts46_map(CP) when 8692 =< CP, CP =< 8703 -> 'V'; +uts46_map(CP) when 8704 =< CP, CP =< 8747 -> 'V'; +uts46_map(CP) when 8753 =< CP, CP =< 8799 -> 'V'; +uts46_map(CP) when 8801 =< CP, CP =< 8813 -> 'V'; +uts46_map(CP) when 8814 =< CP, CP =< 8815 -> '3'; +uts46_map(CP) when 8816 =< CP, CP =< 8945 -> 'V'; +uts46_map(CP) when 8946 =< CP, CP =< 8959 -> 'V'; +uts46_map(CP) when 8962 =< CP, CP =< 9000 -> 'V'; +uts46_map(CP) when 9003 =< CP, CP =< 9082 -> 'V'; +uts46_map(CP) when 9085 =< CP, CP =< 9114 -> 'V'; +uts46_map(CP) when 9115 =< CP, CP =< 9166 -> 'V'; +uts46_map(CP) when 9167 =< CP, CP =< 9168 -> 'V'; +uts46_map(CP) when 9169 =< CP, CP =< 9179 -> 'V'; +uts46_map(CP) when 9180 =< CP, CP =< 9191 -> 'V'; +uts46_map(CP) when 9193 =< CP, CP =< 9203 -> 'V'; +uts46_map(CP) when 9204 =< CP, CP =< 9210 -> 'V'; +uts46_map(CP) when 9211 =< CP, CP =< 9214 -> 'V'; +uts46_map(CP) when 9216 =< CP, CP =< 9252 -> 'V'; +uts46_map(CP) when 9253 =< CP, CP =< 9254 -> 'V'; +uts46_map(CP) when 9255 =< CP, CP =< 9279 -> 'X'; +uts46_map(CP) when 9280 =< CP, CP =< 9290 -> 'V'; +uts46_map(CP) when 9291 =< CP, CP =< 9311 -> 'X'; +uts46_map(CP) when 9352 =< CP, CP =< 9371 -> 'X'; +uts46_map(CP) when 9451 =< CP, CP =< 9470 -> 'V'; +uts46_map(CP) when 9472 =< CP, CP =< 9621 -> 'V'; +uts46_map(CP) when 9622 =< CP, CP =< 9631 -> 'V'; +uts46_map(CP) when 9632 =< CP, CP =< 9711 -> 'V'; +uts46_map(CP) when 9712 =< CP, CP =< 9719 -> 'V'; +uts46_map(CP) when 9720 =< CP, CP =< 9727 -> 'V'; +uts46_map(CP) when 9728 =< CP, CP =< 9747 -> 'V'; +uts46_map(CP) when 9748 =< CP, CP =< 9749 -> 'V'; +uts46_map(CP) when 9750 =< CP, CP =< 9751 -> 'V'; +uts46_map(CP) when 9754 =< CP, CP =< 9839 -> 'V'; +uts46_map(CP) when 9840 =< CP, CP =< 9841 -> 'V'; +uts46_map(CP) when 9842 =< CP, CP =< 9853 -> 'V'; +uts46_map(CP) when 9854 =< CP, CP =< 9855 -> 'V'; +uts46_map(CP) when 9856 =< CP, CP =< 9865 -> 'V'; +uts46_map(CP) when 9866 =< CP, CP =< 9873 -> 'V'; +uts46_map(CP) when 9874 =< CP, CP =< 9884 -> 'V'; +uts46_map(CP) when 9886 =< CP, CP =< 9887 -> 'V'; +uts46_map(CP) when 9888 =< CP, CP =< 9889 -> 'V'; +uts46_map(CP) when 9890 =< CP, CP =< 9905 -> 'V'; +uts46_map(CP) when 9907 =< CP, CP =< 9916 -> 'V'; +uts46_map(CP) when 9917 =< CP, CP =< 9919 -> 'V'; +uts46_map(CP) when 9920 =< CP, CP =< 9923 -> 'V'; +uts46_map(CP) when 9924 =< CP, CP =< 9933 -> 'V'; +uts46_map(CP) when 9935 =< CP, CP =< 9953 -> 'V'; +uts46_map(CP) when 9956 =< CP, CP =< 9959 -> 'V'; +uts46_map(CP) when 9960 =< CP, CP =< 9983 -> 'V'; +uts46_map(CP) when 9985 =< CP, CP =< 9988 -> 'V'; +uts46_map(CP) when 9990 =< CP, CP =< 9993 -> 'V'; +uts46_map(CP) when 9994 =< CP, CP =< 9995 -> 'V'; +uts46_map(CP) when 9996 =< CP, CP =< 10023 -> 'V'; +uts46_map(CP) when 10025 =< CP, CP =< 10059 -> 'V'; +uts46_map(CP) when 10063 =< CP, CP =< 10066 -> 'V'; +uts46_map(CP) when 10067 =< CP, CP =< 10069 -> 'V'; +uts46_map(CP) when 10072 =< CP, CP =< 10078 -> 'V'; +uts46_map(CP) when 10079 =< CP, CP =< 10080 -> 'V'; +uts46_map(CP) when 10081 =< CP, CP =< 10087 -> 'V'; +uts46_map(CP) when 10088 =< CP, CP =< 10101 -> 'V'; +uts46_map(CP) when 10102 =< CP, CP =< 10132 -> 'V'; +uts46_map(CP) when 10133 =< CP, CP =< 10135 -> 'V'; +uts46_map(CP) when 10136 =< CP, CP =< 10159 -> 'V'; +uts46_map(CP) when 10161 =< CP, CP =< 10174 -> 'V'; +uts46_map(CP) when 10176 =< CP, CP =< 10182 -> 'V'; +uts46_map(CP) when 10183 =< CP, CP =< 10186 -> 'V'; +uts46_map(CP) when 10190 =< CP, CP =< 10191 -> 'V'; +uts46_map(CP) when 10192 =< CP, CP =< 10219 -> 'V'; +uts46_map(CP) when 10220 =< CP, CP =< 10223 -> 'V'; +uts46_map(CP) when 10224 =< CP, CP =< 10239 -> 'V'; +uts46_map(CP) when 10240 =< CP, CP =< 10495 -> 'V'; +uts46_map(CP) when 10496 =< CP, CP =< 10763 -> 'V'; +uts46_map(CP) when 10765 =< CP, CP =< 10867 -> 'V'; +uts46_map(CP) when 10871 =< CP, CP =< 10971 -> 'V'; +uts46_map(CP) when 10973 =< CP, CP =< 11007 -> 'V'; +uts46_map(CP) when 11008 =< CP, CP =< 11021 -> 'V'; +uts46_map(CP) when 11022 =< CP, CP =< 11027 -> 'V'; +uts46_map(CP) when 11028 =< CP, CP =< 11034 -> 'V'; +uts46_map(CP) when 11035 =< CP, CP =< 11039 -> 'V'; +uts46_map(CP) when 11040 =< CP, CP =< 11043 -> 'V'; +uts46_map(CP) when 11044 =< CP, CP =< 11084 -> 'V'; +uts46_map(CP) when 11085 =< CP, CP =< 11087 -> 'V'; +uts46_map(CP) when 11088 =< CP, CP =< 11092 -> 'V'; +uts46_map(CP) when 11093 =< CP, CP =< 11097 -> 'V'; +uts46_map(CP) when 11098 =< CP, CP =< 11123 -> 'V'; +uts46_map(CP) when 11124 =< CP, CP =< 11125 -> 'X'; +uts46_map(CP) when 11126 =< CP, CP =< 11157 -> 'V'; +uts46_map(CP) when 11160 =< CP, CP =< 11193 -> 'V'; +uts46_map(CP) when 11194 =< CP, CP =< 11196 -> 'V'; +uts46_map(CP) when 11197 =< CP, CP =< 11208 -> 'V'; +uts46_map(CP) when 11210 =< CP, CP =< 11217 -> 'V'; +uts46_map(CP) when 11219 =< CP, CP =< 11243 -> 'V'; +uts46_map(CP) when 11244 =< CP, CP =< 11247 -> 'V'; +uts46_map(CP) when 11248 =< CP, CP =< 11262 -> 'V'; +uts46_map(CP) when 11312 =< CP, CP =< 11358 -> 'V'; +uts46_map(CP) when 11365 =< CP, CP =< 11366 -> 'V'; +uts46_map(CP) when 11382 =< CP, CP =< 11383 -> 'V'; +uts46_map(CP) when 11384 =< CP, CP =< 11387 -> 'V'; +uts46_map(CP) when 11491 =< CP, CP =< 11492 -> 'V'; +uts46_map(CP) when 11493 =< CP, CP =< 11498 -> 'V'; +uts46_map(CP) when 11502 =< CP, CP =< 11505 -> 'V'; +uts46_map(CP) when 11508 =< CP, CP =< 11512 -> 'X'; +uts46_map(CP) when 11513 =< CP, CP =< 11519 -> 'V'; +uts46_map(CP) when 11520 =< CP, CP =< 11557 -> 'V'; +uts46_map(CP) when 11560 =< CP, CP =< 11564 -> 'X'; +uts46_map(CP) when 11566 =< CP, CP =< 11567 -> 'X'; +uts46_map(CP) when 11568 =< CP, CP =< 11621 -> 'V'; +uts46_map(CP) when 11622 =< CP, CP =< 11623 -> 'V'; +uts46_map(CP) when 11624 =< CP, CP =< 11630 -> 'X'; +uts46_map(CP) when 11633 =< CP, CP =< 11646 -> 'X'; +uts46_map(CP) when 11648 =< CP, CP =< 11670 -> 'V'; +uts46_map(CP) when 11671 =< CP, CP =< 11679 -> 'X'; +uts46_map(CP) when 11680 =< CP, CP =< 11686 -> 'V'; +uts46_map(CP) when 11688 =< CP, CP =< 11694 -> 'V'; +uts46_map(CP) when 11696 =< CP, CP =< 11702 -> 'V'; +uts46_map(CP) when 11704 =< CP, CP =< 11710 -> 'V'; +uts46_map(CP) when 11712 =< CP, CP =< 11718 -> 'V'; +uts46_map(CP) when 11720 =< CP, CP =< 11726 -> 'V'; +uts46_map(CP) when 11728 =< CP, CP =< 11734 -> 'V'; +uts46_map(CP) when 11736 =< CP, CP =< 11742 -> 'V'; +uts46_map(CP) when 11744 =< CP, CP =< 11775 -> 'V'; +uts46_map(CP) when 11776 =< CP, CP =< 11799 -> 'V'; +uts46_map(CP) when 11800 =< CP, CP =< 11803 -> 'V'; +uts46_map(CP) when 11804 =< CP, CP =< 11805 -> 'V'; +uts46_map(CP) when 11806 =< CP, CP =< 11822 -> 'V'; +uts46_map(CP) when 11826 =< CP, CP =< 11835 -> 'V'; +uts46_map(CP) when 11836 =< CP, CP =< 11842 -> 'V'; +uts46_map(CP) when 11843 =< CP, CP =< 11844 -> 'V'; +uts46_map(CP) when 11845 =< CP, CP =< 11849 -> 'V'; +uts46_map(CP) when 11850 =< CP, CP =< 11854 -> 'V'; +uts46_map(CP) when 11856 =< CP, CP =< 11858 -> 'V'; +uts46_map(CP) when 11859 =< CP, CP =< 11903 -> 'X'; +uts46_map(CP) when 11904 =< CP, CP =< 11929 -> 'V'; +uts46_map(CP) when 11931 =< CP, CP =< 11934 -> 'V'; +uts46_map(CP) when 11936 =< CP, CP =< 12018 -> 'V'; +uts46_map(CP) when 12020 =< CP, CP =< 12031 -> 'X'; +uts46_map(CP) when 12246 =< CP, CP =< 12271 -> 'X'; +uts46_map(CP) when 12272 =< CP, CP =< 12283 -> 'X'; +uts46_map(CP) when 12284 =< CP, CP =< 12287 -> 'X'; +uts46_map(CP) when 12291 =< CP, CP =< 12292 -> 'V'; +uts46_map(CP) when 12293 =< CP, CP =< 12295 -> 'V'; +uts46_map(CP) when 12296 =< CP, CP =< 12329 -> 'V'; +uts46_map(CP) when 12330 =< CP, CP =< 12333 -> 'V'; +uts46_map(CP) when 12334 =< CP, CP =< 12341 -> 'V'; +uts46_map(CP) when 12353 =< CP, CP =< 12436 -> 'V'; +uts46_map(CP) when 12437 =< CP, CP =< 12438 -> 'V'; +uts46_map(CP) when 12439 =< CP, CP =< 12440 -> 'X'; +uts46_map(CP) when 12441 =< CP, CP =< 12442 -> 'V'; +uts46_map(CP) when 12445 =< CP, CP =< 12446 -> 'V'; +uts46_map(CP) when 12449 =< CP, CP =< 12542 -> 'V'; +uts46_map(CP) when 12544 =< CP, CP =< 12548 -> 'X'; +uts46_map(CP) when 12549 =< CP, CP =< 12588 -> 'V'; +uts46_map(CP) when 12688 =< CP, CP =< 12689 -> 'V'; +uts46_map(CP) when 12704 =< CP, CP =< 12727 -> 'V'; +uts46_map(CP) when 12728 =< CP, CP =< 12730 -> 'V'; +uts46_map(CP) when 12731 =< CP, CP =< 12735 -> 'V'; +uts46_map(CP) when 12736 =< CP, CP =< 12751 -> 'V'; +uts46_map(CP) when 12752 =< CP, CP =< 12771 -> 'V'; +uts46_map(CP) when 12772 =< CP, CP =< 12783 -> 'X'; +uts46_map(CP) when 12784 =< CP, CP =< 12799 -> 'V'; +uts46_map(CP) when 12872 =< CP, CP =< 12879 -> 'V'; +uts46_map(CP) when 13312 =< CP, CP =< 19893 -> 'V'; +uts46_map(CP) when 19894 =< CP, CP =< 19903 -> 'V'; +uts46_map(CP) when 19904 =< CP, CP =< 19967 -> 'V'; +uts46_map(CP) when 19968 =< CP, CP =< 40869 -> 'V'; +uts46_map(CP) when 40870 =< CP, CP =< 40891 -> 'V'; +uts46_map(CP) when 40892 =< CP, CP =< 40899 -> 'V'; +uts46_map(CP) when 40900 =< CP, CP =< 40907 -> 'V'; +uts46_map(CP) when 40909 =< CP, CP =< 40917 -> 'V'; +uts46_map(CP) when 40918 =< CP, CP =< 40938 -> 'V'; +uts46_map(CP) when 40939 =< CP, CP =< 40943 -> 'V'; +uts46_map(CP) when 40944 =< CP, CP =< 40956 -> 'V'; +uts46_map(CP) when 40957 =< CP, CP =< 40959 -> 'X'; +uts46_map(CP) when 40960 =< CP, CP =< 42124 -> 'V'; +uts46_map(CP) when 42125 =< CP, CP =< 42127 -> 'X'; +uts46_map(CP) when 42128 =< CP, CP =< 42145 -> 'V'; +uts46_map(CP) when 42146 =< CP, CP =< 42147 -> 'V'; +uts46_map(CP) when 42148 =< CP, CP =< 42163 -> 'V'; +uts46_map(CP) when 42165 =< CP, CP =< 42176 -> 'V'; +uts46_map(CP) when 42178 =< CP, CP =< 42180 -> 'V'; +uts46_map(CP) when 42183 =< CP, CP =< 42191 -> 'X'; +uts46_map(CP) when 42192 =< CP, CP =< 42237 -> 'V'; +uts46_map(CP) when 42238 =< CP, CP =< 42239 -> 'V'; +uts46_map(CP) when 42240 =< CP, CP =< 42508 -> 'V'; +uts46_map(CP) when 42509 =< CP, CP =< 42511 -> 'V'; +uts46_map(CP) when 42512 =< CP, CP =< 42539 -> 'V'; +uts46_map(CP) when 42540 =< CP, CP =< 42559 -> 'X'; +uts46_map(CP) when 42605 =< CP, CP =< 42607 -> 'V'; +uts46_map(CP) when 42608 =< CP, CP =< 42611 -> 'V'; +uts46_map(CP) when 42612 =< CP, CP =< 42619 -> 'V'; +uts46_map(CP) when 42620 =< CP, CP =< 42621 -> 'V'; +uts46_map(CP) when 42656 =< CP, CP =< 42725 -> 'V'; +uts46_map(CP) when 42726 =< CP, CP =< 42735 -> 'V'; +uts46_map(CP) when 42736 =< CP, CP =< 42737 -> 'V'; +uts46_map(CP) when 42738 =< CP, CP =< 42743 -> 'V'; +uts46_map(CP) when 42744 =< CP, CP =< 42751 -> 'X'; +uts46_map(CP) when 42752 =< CP, CP =< 42774 -> 'V'; +uts46_map(CP) when 42775 =< CP, CP =< 42778 -> 'V'; +uts46_map(CP) when 42779 =< CP, CP =< 42783 -> 'V'; +uts46_map(CP) when 42784 =< CP, CP =< 42785 -> 'V'; +uts46_map(CP) when 42799 =< CP, CP =< 42801 -> 'V'; +uts46_map(CP) when 42865 =< CP, CP =< 42872 -> 'V'; +uts46_map(CP) when 42887 =< CP, CP =< 42888 -> 'V'; +uts46_map(CP) when 42889 =< CP, CP =< 42890 -> 'V'; +uts46_map(CP) when 42900 =< CP, CP =< 42901 -> 'V'; +uts46_map(CP) when 42944 =< CP, CP =< 42945 -> 'X'; +uts46_map(CP) when 42955 =< CP, CP =< 42996 -> 'X'; +uts46_map(CP) when 43003 =< CP, CP =< 43007 -> 'V'; +uts46_map(CP) when 43008 =< CP, CP =< 43047 -> 'V'; +uts46_map(CP) when 43048 =< CP, CP =< 43051 -> 'V'; +uts46_map(CP) when 43053 =< CP, CP =< 43055 -> 'X'; +uts46_map(CP) when 43056 =< CP, CP =< 43065 -> 'V'; +uts46_map(CP) when 43066 =< CP, CP =< 43071 -> 'X'; +uts46_map(CP) when 43072 =< CP, CP =< 43123 -> 'V'; +uts46_map(CP) when 43124 =< CP, CP =< 43127 -> 'V'; +uts46_map(CP) when 43128 =< CP, CP =< 43135 -> 'X'; +uts46_map(CP) when 43136 =< CP, CP =< 43204 -> 'V'; +uts46_map(CP) when 43206 =< CP, CP =< 43213 -> 'X'; +uts46_map(CP) when 43214 =< CP, CP =< 43215 -> 'V'; +uts46_map(CP) when 43216 =< CP, CP =< 43225 -> 'V'; +uts46_map(CP) when 43226 =< CP, CP =< 43231 -> 'X'; +uts46_map(CP) when 43232 =< CP, CP =< 43255 -> 'V'; +uts46_map(CP) when 43256 =< CP, CP =< 43258 -> 'V'; +uts46_map(CP) when 43262 =< CP, CP =< 43263 -> 'V'; +uts46_map(CP) when 43264 =< CP, CP =< 43309 -> 'V'; +uts46_map(CP) when 43310 =< CP, CP =< 43311 -> 'V'; +uts46_map(CP) when 43312 =< CP, CP =< 43347 -> 'V'; +uts46_map(CP) when 43348 =< CP, CP =< 43358 -> 'X'; +uts46_map(CP) when 43360 =< CP, CP =< 43388 -> 'V'; +uts46_map(CP) when 43389 =< CP, CP =< 43391 -> 'X'; +uts46_map(CP) when 43392 =< CP, CP =< 43456 -> 'V'; +uts46_map(CP) when 43457 =< CP, CP =< 43469 -> 'V'; +uts46_map(CP) when 43471 =< CP, CP =< 43481 -> 'V'; +uts46_map(CP) when 43482 =< CP, CP =< 43485 -> 'X'; +uts46_map(CP) when 43486 =< CP, CP =< 43487 -> 'V'; +uts46_map(CP) when 43488 =< CP, CP =< 43518 -> 'V'; +uts46_map(CP) when 43520 =< CP, CP =< 43574 -> 'V'; +uts46_map(CP) when 43575 =< CP, CP =< 43583 -> 'X'; +uts46_map(CP) when 43584 =< CP, CP =< 43597 -> 'V'; +uts46_map(CP) when 43598 =< CP, CP =< 43599 -> 'X'; +uts46_map(CP) when 43600 =< CP, CP =< 43609 -> 'V'; +uts46_map(CP) when 43610 =< CP, CP =< 43611 -> 'X'; +uts46_map(CP) when 43612 =< CP, CP =< 43615 -> 'V'; +uts46_map(CP) when 43616 =< CP, CP =< 43638 -> 'V'; +uts46_map(CP) when 43639 =< CP, CP =< 43641 -> 'V'; +uts46_map(CP) when 43642 =< CP, CP =< 43643 -> 'V'; +uts46_map(CP) when 43644 =< CP, CP =< 43647 -> 'V'; +uts46_map(CP) when 43648 =< CP, CP =< 43714 -> 'V'; +uts46_map(CP) when 43715 =< CP, CP =< 43738 -> 'X'; +uts46_map(CP) when 43739 =< CP, CP =< 43741 -> 'V'; +uts46_map(CP) when 43742 =< CP, CP =< 43743 -> 'V'; +uts46_map(CP) when 43744 =< CP, CP =< 43759 -> 'V'; +uts46_map(CP) when 43760 =< CP, CP =< 43761 -> 'V'; +uts46_map(CP) when 43762 =< CP, CP =< 43766 -> 'V'; +uts46_map(CP) when 43767 =< CP, CP =< 43776 -> 'X'; +uts46_map(CP) when 43777 =< CP, CP =< 43782 -> 'V'; +uts46_map(CP) when 43783 =< CP, CP =< 43784 -> 'X'; +uts46_map(CP) when 43785 =< CP, CP =< 43790 -> 'V'; +uts46_map(CP) when 43791 =< CP, CP =< 43792 -> 'X'; +uts46_map(CP) when 43793 =< CP, CP =< 43798 -> 'V'; +uts46_map(CP) when 43799 =< CP, CP =< 43807 -> 'X'; +uts46_map(CP) when 43808 =< CP, CP =< 43814 -> 'V'; +uts46_map(CP) when 43816 =< CP, CP =< 43822 -> 'V'; +uts46_map(CP) when 43824 =< CP, CP =< 43866 -> 'V'; +uts46_map(CP) when 43872 =< CP, CP =< 43875 -> 'V'; +uts46_map(CP) when 43876 =< CP, CP =< 43877 -> 'V'; +uts46_map(CP) when 43878 =< CP, CP =< 43879 -> 'V'; +uts46_map(CP) when 43882 =< CP, CP =< 43883 -> 'V'; +uts46_map(CP) when 43884 =< CP, CP =< 43887 -> 'X'; +uts46_map(CP) when 43968 =< CP, CP =< 44010 -> 'V'; +uts46_map(CP) when 44012 =< CP, CP =< 44013 -> 'V'; +uts46_map(CP) when 44014 =< CP, CP =< 44015 -> 'X'; +uts46_map(CP) when 44016 =< CP, CP =< 44025 -> 'V'; +uts46_map(CP) when 44026 =< CP, CP =< 44031 -> 'X'; +uts46_map(CP) when 44032 =< CP, CP =< 55203 -> 'V'; +uts46_map(CP) when 55204 =< CP, CP =< 55215 -> 'X'; +uts46_map(CP) when 55216 =< CP, CP =< 55238 -> 'V'; +uts46_map(CP) when 55239 =< CP, CP =< 55242 -> 'X'; +uts46_map(CP) when 55243 =< CP, CP =< 55291 -> 'V'; +uts46_map(CP) when 55292 =< CP, CP =< 55295 -> 'X'; +uts46_map(CP) when 55296 =< CP, CP =< 57343 -> 'X'; +uts46_map(CP) when 57344 =< CP, CP =< 63743 -> 'X'; +uts46_map(CP) when 63751 =< CP, CP =< 63752 -> {'M', [40860]}; +uts46_map(CP) when 64014 =< CP, CP =< 64015 -> 'V'; +uts46_map(CP) when 64019 =< CP, CP =< 64020 -> 'V'; +uts46_map(CP) when 64035 =< CP, CP =< 64036 -> 'V'; +uts46_map(CP) when 64039 =< CP, CP =< 64041 -> 'V'; +uts46_map(CP) when 64093 =< CP, CP =< 64094 -> {'M', [33401]}; +uts46_map(CP) when 64110 =< CP, CP =< 64111 -> 'X'; +uts46_map(CP) when 64218 =< CP, CP =< 64255 -> 'X'; +uts46_map(CP) when 64261 =< CP, CP =< 64262 -> {'M', [115,116]}; +uts46_map(CP) when 64263 =< CP, CP =< 64274 -> 'X'; +uts46_map(CP) when 64280 =< CP, CP =< 64284 -> 'X'; +uts46_map(CP) when 64336 =< CP, CP =< 64337 -> {'M', [1649]}; +uts46_map(CP) when 64338 =< CP, CP =< 64341 -> {'M', [1659]}; +uts46_map(CP) when 64342 =< CP, CP =< 64345 -> {'M', [1662]}; +uts46_map(CP) when 64346 =< CP, CP =< 64349 -> {'M', [1664]}; +uts46_map(CP) when 64350 =< CP, CP =< 64353 -> {'M', [1658]}; +uts46_map(CP) when 64354 =< CP, CP =< 64357 -> {'M', [1663]}; +uts46_map(CP) when 64358 =< CP, CP =< 64361 -> {'M', [1657]}; +uts46_map(CP) when 64362 =< CP, CP =< 64365 -> {'M', [1700]}; +uts46_map(CP) when 64366 =< CP, CP =< 64369 -> {'M', [1702]}; +uts46_map(CP) when 64370 =< CP, CP =< 64373 -> {'M', [1668]}; +uts46_map(CP) when 64374 =< CP, CP =< 64377 -> {'M', [1667]}; +uts46_map(CP) when 64378 =< CP, CP =< 64381 -> {'M', [1670]}; +uts46_map(CP) when 64382 =< CP, CP =< 64385 -> {'M', [1671]}; +uts46_map(CP) when 64386 =< CP, CP =< 64387 -> {'M', [1677]}; +uts46_map(CP) when 64388 =< CP, CP =< 64389 -> {'M', [1676]}; +uts46_map(CP) when 64390 =< CP, CP =< 64391 -> {'M', [1678]}; +uts46_map(CP) when 64392 =< CP, CP =< 64393 -> {'M', [1672]}; +uts46_map(CP) when 64394 =< CP, CP =< 64395 -> {'M', [1688]}; +uts46_map(CP) when 64396 =< CP, CP =< 64397 -> {'M', [1681]}; +uts46_map(CP) when 64398 =< CP, CP =< 64401 -> {'M', [1705]}; +uts46_map(CP) when 64402 =< CP, CP =< 64405 -> {'M', [1711]}; +uts46_map(CP) when 64406 =< CP, CP =< 64409 -> {'M', [1715]}; +uts46_map(CP) when 64410 =< CP, CP =< 64413 -> {'M', [1713]}; +uts46_map(CP) when 64414 =< CP, CP =< 64415 -> {'M', [1722]}; +uts46_map(CP) when 64416 =< CP, CP =< 64419 -> {'M', [1723]}; +uts46_map(CP) when 64420 =< CP, CP =< 64421 -> {'M', [1728]}; +uts46_map(CP) when 64422 =< CP, CP =< 64425 -> {'M', [1729]}; +uts46_map(CP) when 64426 =< CP, CP =< 64429 -> {'M', [1726]}; +uts46_map(CP) when 64430 =< CP, CP =< 64431 -> {'M', [1746]}; +uts46_map(CP) when 64432 =< CP, CP =< 64433 -> {'M', [1747]}; +uts46_map(CP) when 64434 =< CP, CP =< 64449 -> 'V'; +uts46_map(CP) when 64450 =< CP, CP =< 64466 -> 'X'; +uts46_map(CP) when 64467 =< CP, CP =< 64470 -> {'M', [1709]}; +uts46_map(CP) when 64471 =< CP, CP =< 64472 -> {'M', [1735]}; +uts46_map(CP) when 64473 =< CP, CP =< 64474 -> {'M', [1734]}; +uts46_map(CP) when 64475 =< CP, CP =< 64476 -> {'M', [1736]}; +uts46_map(CP) when 64478 =< CP, CP =< 64479 -> {'M', [1739]}; +uts46_map(CP) when 64480 =< CP, CP =< 64481 -> {'M', [1733]}; +uts46_map(CP) when 64482 =< CP, CP =< 64483 -> {'M', [1737]}; +uts46_map(CP) when 64484 =< CP, CP =< 64487 -> {'M', [1744]}; +uts46_map(CP) when 64488 =< CP, CP =< 64489 -> {'M', [1609]}; +uts46_map(CP) when 64490 =< CP, CP =< 64491 -> {'M', [1574,1575]}; +uts46_map(CP) when 64492 =< CP, CP =< 64493 -> {'M', [1574,1749]}; +uts46_map(CP) when 64494 =< CP, CP =< 64495 -> {'M', [1574,1608]}; +uts46_map(CP) when 64496 =< CP, CP =< 64497 -> {'M', [1574,1735]}; +uts46_map(CP) when 64498 =< CP, CP =< 64499 -> {'M', [1574,1734]}; +uts46_map(CP) when 64500 =< CP, CP =< 64501 -> {'M', [1574,1736]}; +uts46_map(CP) when 64502 =< CP, CP =< 64504 -> {'M', [1574,1744]}; +uts46_map(CP) when 64505 =< CP, CP =< 64507 -> {'M', [1574,1609]}; +uts46_map(CP) when 64508 =< CP, CP =< 64511 -> {'M', [1740]}; +uts46_map(CP) when 64828 =< CP, CP =< 64829 -> {'M', [1575,1611]}; +uts46_map(CP) when 64830 =< CP, CP =< 64831 -> 'V'; +uts46_map(CP) when 64832 =< CP, CP =< 64847 -> 'X'; +uts46_map(CP) when 64849 =< CP, CP =< 64850 -> {'M', [1578,1581,1580]}; +uts46_map(CP) when 64856 =< CP, CP =< 64857 -> {'M', [1580,1605,1581]}; +uts46_map(CP) when 64863 =< CP, CP =< 64864 -> {'M', [1587,1605,1581]}; +uts46_map(CP) when 64866 =< CP, CP =< 64867 -> {'M', [1587,1605,1605]}; +uts46_map(CP) when 64868 =< CP, CP =< 64869 -> {'M', [1589,1581,1581]}; +uts46_map(CP) when 64871 =< CP, CP =< 64872 -> {'M', [1588,1581,1605]}; +uts46_map(CP) when 64874 =< CP, CP =< 64875 -> {'M', [1588,1605,1582]}; +uts46_map(CP) when 64876 =< CP, CP =< 64877 -> {'M', [1588,1605,1605]}; +uts46_map(CP) when 64879 =< CP, CP =< 64880 -> {'M', [1590,1582,1605]}; +uts46_map(CP) when 64881 =< CP, CP =< 64882 -> {'M', [1591,1605,1581]}; +uts46_map(CP) when 64886 =< CP, CP =< 64887 -> {'M', [1593,1605,1605]}; +uts46_map(CP) when 64892 =< CP, CP =< 64893 -> {'M', [1601,1582,1605]}; +uts46_map(CP) when 64899 =< CP, CP =< 64900 -> {'M', [1604,1580,1580]}; +uts46_map(CP) when 64901 =< CP, CP =< 64902 -> {'M', [1604,1582,1605]}; +uts46_map(CP) when 64903 =< CP, CP =< 64904 -> {'M', [1604,1605,1581]}; +uts46_map(CP) when 64912 =< CP, CP =< 64913 -> 'X'; +uts46_map(CP) when 64919 =< CP, CP =< 64920 -> {'M', [1606,1580,1605]}; +uts46_map(CP) when 64924 =< CP, CP =< 64925 -> {'M', [1610,1605,1605]}; +uts46_map(CP) when 64968 =< CP, CP =< 64975 -> 'X'; +uts46_map(CP) when 64976 =< CP, CP =< 65007 -> 'X'; +uts46_map(CP) when 65022 =< CP, CP =< 65023 -> 'X'; +uts46_map(CP) when 65024 =< CP, CP =< 65039 -> 'I'; +uts46_map(CP) when 65050 =< CP, CP =< 65055 -> 'X'; +uts46_map(CP) when 65056 =< CP, CP =< 65059 -> 'V'; +uts46_map(CP) when 65060 =< CP, CP =< 65062 -> 'V'; +uts46_map(CP) when 65063 =< CP, CP =< 65069 -> 'V'; +uts46_map(CP) when 65070 =< CP, CP =< 65071 -> 'V'; +uts46_map(CP) when 65075 =< CP, CP =< 65076 -> {'3', [95]}; +uts46_map(CP) when 65093 =< CP, CP =< 65094 -> 'V'; +uts46_map(CP) when 65097 =< CP, CP =< 65100 -> {'3', [32,773]}; +uts46_map(CP) when 65101 =< CP, CP =< 65103 -> {'3', [95]}; +uts46_map(CP) when 65132 =< CP, CP =< 65135 -> 'X'; +uts46_map(CP) when 65153 =< CP, CP =< 65154 -> {'M', [1570]}; +uts46_map(CP) when 65155 =< CP, CP =< 65156 -> {'M', [1571]}; +uts46_map(CP) when 65157 =< CP, CP =< 65158 -> {'M', [1572]}; +uts46_map(CP) when 65159 =< CP, CP =< 65160 -> {'M', [1573]}; +uts46_map(CP) when 65161 =< CP, CP =< 65164 -> {'M', [1574]}; +uts46_map(CP) when 65165 =< CP, CP =< 65166 -> {'M', [1575]}; +uts46_map(CP) when 65167 =< CP, CP =< 65170 -> {'M', [1576]}; +uts46_map(CP) when 65171 =< CP, CP =< 65172 -> {'M', [1577]}; +uts46_map(CP) when 65173 =< CP, CP =< 65176 -> {'M', [1578]}; +uts46_map(CP) when 65177 =< CP, CP =< 65180 -> {'M', [1579]}; +uts46_map(CP) when 65181 =< CP, CP =< 65184 -> {'M', [1580]}; +uts46_map(CP) when 65185 =< CP, CP =< 65188 -> {'M', [1581]}; +uts46_map(CP) when 65189 =< CP, CP =< 65192 -> {'M', [1582]}; +uts46_map(CP) when 65193 =< CP, CP =< 65194 -> {'M', [1583]}; +uts46_map(CP) when 65195 =< CP, CP =< 65196 -> {'M', [1584]}; +uts46_map(CP) when 65197 =< CP, CP =< 65198 -> {'M', [1585]}; +uts46_map(CP) when 65199 =< CP, CP =< 65200 -> {'M', [1586]}; +uts46_map(CP) when 65201 =< CP, CP =< 65204 -> {'M', [1587]}; +uts46_map(CP) when 65205 =< CP, CP =< 65208 -> {'M', [1588]}; +uts46_map(CP) when 65209 =< CP, CP =< 65212 -> {'M', [1589]}; +uts46_map(CP) when 65213 =< CP, CP =< 65216 -> {'M', [1590]}; +uts46_map(CP) when 65217 =< CP, CP =< 65220 -> {'M', [1591]}; +uts46_map(CP) when 65221 =< CP, CP =< 65224 -> {'M', [1592]}; +uts46_map(CP) when 65225 =< CP, CP =< 65228 -> {'M', [1593]}; +uts46_map(CP) when 65229 =< CP, CP =< 65232 -> {'M', [1594]}; +uts46_map(CP) when 65233 =< CP, CP =< 65236 -> {'M', [1601]}; +uts46_map(CP) when 65237 =< CP, CP =< 65240 -> {'M', [1602]}; +uts46_map(CP) when 65241 =< CP, CP =< 65244 -> {'M', [1603]}; +uts46_map(CP) when 65245 =< CP, CP =< 65248 -> {'M', [1604]}; +uts46_map(CP) when 65249 =< CP, CP =< 65252 -> {'M', [1605]}; +uts46_map(CP) when 65253 =< CP, CP =< 65256 -> {'M', [1606]}; +uts46_map(CP) when 65257 =< CP, CP =< 65260 -> {'M', [1607]}; +uts46_map(CP) when 65261 =< CP, CP =< 65262 -> {'M', [1608]}; +uts46_map(CP) when 65263 =< CP, CP =< 65264 -> {'M', [1609]}; +uts46_map(CP) when 65265 =< CP, CP =< 65268 -> {'M', [1610]}; +uts46_map(CP) when 65269 =< CP, CP =< 65270 -> {'M', [1604,1570]}; +uts46_map(CP) when 65271 =< CP, CP =< 65272 -> {'M', [1604,1571]}; +uts46_map(CP) when 65273 =< CP, CP =< 65274 -> {'M', [1604,1573]}; +uts46_map(CP) when 65275 =< CP, CP =< 65276 -> {'M', [1604,1575]}; +uts46_map(CP) when 65277 =< CP, CP =< 65278 -> 'X'; +uts46_map(CP) when 65471 =< CP, CP =< 65473 -> 'X'; +uts46_map(CP) when 65480 =< CP, CP =< 65481 -> 'X'; +uts46_map(CP) when 65488 =< CP, CP =< 65489 -> 'X'; +uts46_map(CP) when 65496 =< CP, CP =< 65497 -> 'X'; +uts46_map(CP) when 65501 =< CP, CP =< 65503 -> 'X'; +uts46_map(CP) when 65519 =< CP, CP =< 65528 -> 'X'; +uts46_map(CP) when 65529 =< CP, CP =< 65531 -> 'X'; +uts46_map(CP) when 65534 =< CP, CP =< 65535 -> 'X'; +uts46_map(CP) when 65536 =< CP, CP =< 65547 -> 'V'; +uts46_map(CP) when 65549 =< CP, CP =< 65574 -> 'V'; +uts46_map(CP) when 65576 =< CP, CP =< 65594 -> 'V'; +uts46_map(CP) when 65596 =< CP, CP =< 65597 -> 'V'; +uts46_map(CP) when 65599 =< CP, CP =< 65613 -> 'V'; +uts46_map(CP) when 65614 =< CP, CP =< 65615 -> 'X'; +uts46_map(CP) when 65616 =< CP, CP =< 65629 -> 'V'; +uts46_map(CP) when 65630 =< CP, CP =< 65663 -> 'X'; +uts46_map(CP) when 65664 =< CP, CP =< 65786 -> 'V'; +uts46_map(CP) when 65787 =< CP, CP =< 65791 -> 'X'; +uts46_map(CP) when 65792 =< CP, CP =< 65794 -> 'V'; +uts46_map(CP) when 65795 =< CP, CP =< 65798 -> 'X'; +uts46_map(CP) when 65799 =< CP, CP =< 65843 -> 'V'; +uts46_map(CP) when 65844 =< CP, CP =< 65846 -> 'X'; +uts46_map(CP) when 65847 =< CP, CP =< 65855 -> 'V'; +uts46_map(CP) when 65856 =< CP, CP =< 65930 -> 'V'; +uts46_map(CP) when 65931 =< CP, CP =< 65932 -> 'V'; +uts46_map(CP) when 65933 =< CP, CP =< 65934 -> 'V'; +uts46_map(CP) when 65936 =< CP, CP =< 65947 -> 'V'; +uts46_map(CP) when 65949 =< CP, CP =< 65951 -> 'X'; +uts46_map(CP) when 65953 =< CP, CP =< 65999 -> 'X'; +uts46_map(CP) when 66000 =< CP, CP =< 66044 -> 'V'; +uts46_map(CP) when 66046 =< CP, CP =< 66175 -> 'X'; +uts46_map(CP) when 66176 =< CP, CP =< 66204 -> 'V'; +uts46_map(CP) when 66205 =< CP, CP =< 66207 -> 'X'; +uts46_map(CP) when 66208 =< CP, CP =< 66256 -> 'V'; +uts46_map(CP) when 66257 =< CP, CP =< 66271 -> 'X'; +uts46_map(CP) when 66273 =< CP, CP =< 66299 -> 'V'; +uts46_map(CP) when 66300 =< CP, CP =< 66303 -> 'X'; +uts46_map(CP) when 66304 =< CP, CP =< 66334 -> 'V'; +uts46_map(CP) when 66336 =< CP, CP =< 66339 -> 'V'; +uts46_map(CP) when 66340 =< CP, CP =< 66348 -> 'X'; +uts46_map(CP) when 66349 =< CP, CP =< 66351 -> 'V'; +uts46_map(CP) when 66352 =< CP, CP =< 66368 -> 'V'; +uts46_map(CP) when 66370 =< CP, CP =< 66377 -> 'V'; +uts46_map(CP) when 66379 =< CP, CP =< 66383 -> 'X'; +uts46_map(CP) when 66384 =< CP, CP =< 66426 -> 'V'; +uts46_map(CP) when 66427 =< CP, CP =< 66431 -> 'X'; +uts46_map(CP) when 66432 =< CP, CP =< 66461 -> 'V'; +uts46_map(CP) when 66464 =< CP, CP =< 66499 -> 'V'; +uts46_map(CP) when 66500 =< CP, CP =< 66503 -> 'X'; +uts46_map(CP) when 66504 =< CP, CP =< 66511 -> 'V'; +uts46_map(CP) when 66512 =< CP, CP =< 66517 -> 'V'; +uts46_map(CP) when 66518 =< CP, CP =< 66559 -> 'X'; +uts46_map(CP) when 66600 =< CP, CP =< 66637 -> 'V'; +uts46_map(CP) when 66638 =< CP, CP =< 66717 -> 'V'; +uts46_map(CP) when 66718 =< CP, CP =< 66719 -> 'X'; +uts46_map(CP) when 66720 =< CP, CP =< 66729 -> 'V'; +uts46_map(CP) when 66730 =< CP, CP =< 66735 -> 'X'; +uts46_map(CP) when 66772 =< CP, CP =< 66775 -> 'X'; +uts46_map(CP) when 66776 =< CP, CP =< 66811 -> 'V'; +uts46_map(CP) when 66812 =< CP, CP =< 66815 -> 'X'; +uts46_map(CP) when 66816 =< CP, CP =< 66855 -> 'V'; +uts46_map(CP) when 66856 =< CP, CP =< 66863 -> 'X'; +uts46_map(CP) when 66864 =< CP, CP =< 66915 -> 'V'; +uts46_map(CP) when 66916 =< CP, CP =< 66926 -> 'X'; +uts46_map(CP) when 66928 =< CP, CP =< 67071 -> 'X'; +uts46_map(CP) when 67072 =< CP, CP =< 67382 -> 'V'; +uts46_map(CP) when 67383 =< CP, CP =< 67391 -> 'X'; +uts46_map(CP) when 67392 =< CP, CP =< 67413 -> 'V'; +uts46_map(CP) when 67414 =< CP, CP =< 67423 -> 'X'; +uts46_map(CP) when 67424 =< CP, CP =< 67431 -> 'V'; +uts46_map(CP) when 67432 =< CP, CP =< 67583 -> 'X'; +uts46_map(CP) when 67584 =< CP, CP =< 67589 -> 'V'; +uts46_map(CP) when 67590 =< CP, CP =< 67591 -> 'X'; +uts46_map(CP) when 67594 =< CP, CP =< 67637 -> 'V'; +uts46_map(CP) when 67639 =< CP, CP =< 67640 -> 'V'; +uts46_map(CP) when 67641 =< CP, CP =< 67643 -> 'X'; +uts46_map(CP) when 67645 =< CP, CP =< 67646 -> 'X'; +uts46_map(CP) when 67648 =< CP, CP =< 67669 -> 'V'; +uts46_map(CP) when 67671 =< CP, CP =< 67679 -> 'V'; +uts46_map(CP) when 67680 =< CP, CP =< 67702 -> 'V'; +uts46_map(CP) when 67703 =< CP, CP =< 67711 -> 'V'; +uts46_map(CP) when 67712 =< CP, CP =< 67742 -> 'V'; +uts46_map(CP) when 67743 =< CP, CP =< 67750 -> 'X'; +uts46_map(CP) when 67751 =< CP, CP =< 67759 -> 'V'; +uts46_map(CP) when 67760 =< CP, CP =< 67807 -> 'X'; +uts46_map(CP) when 67808 =< CP, CP =< 67826 -> 'V'; +uts46_map(CP) when 67828 =< CP, CP =< 67829 -> 'V'; +uts46_map(CP) when 67830 =< CP, CP =< 67834 -> 'X'; +uts46_map(CP) when 67835 =< CP, CP =< 67839 -> 'V'; +uts46_map(CP) when 67840 =< CP, CP =< 67861 -> 'V'; +uts46_map(CP) when 67862 =< CP, CP =< 67865 -> 'V'; +uts46_map(CP) when 67866 =< CP, CP =< 67867 -> 'V'; +uts46_map(CP) when 67868 =< CP, CP =< 67870 -> 'X'; +uts46_map(CP) when 67872 =< CP, CP =< 67897 -> 'V'; +uts46_map(CP) when 67898 =< CP, CP =< 67902 -> 'X'; +uts46_map(CP) when 67904 =< CP, CP =< 67967 -> 'X'; +uts46_map(CP) when 67968 =< CP, CP =< 68023 -> 'V'; +uts46_map(CP) when 68024 =< CP, CP =< 68027 -> 'X'; +uts46_map(CP) when 68028 =< CP, CP =< 68029 -> 'V'; +uts46_map(CP) when 68030 =< CP, CP =< 68031 -> 'V'; +uts46_map(CP) when 68032 =< CP, CP =< 68047 -> 'V'; +uts46_map(CP) when 68048 =< CP, CP =< 68049 -> 'X'; +uts46_map(CP) when 68050 =< CP, CP =< 68095 -> 'V'; +uts46_map(CP) when 68096 =< CP, CP =< 68099 -> 'V'; +uts46_map(CP) when 68101 =< CP, CP =< 68102 -> 'V'; +uts46_map(CP) when 68103 =< CP, CP =< 68107 -> 'X'; +uts46_map(CP) when 68108 =< CP, CP =< 68115 -> 'V'; +uts46_map(CP) when 68117 =< CP, CP =< 68119 -> 'V'; +uts46_map(CP) when 68121 =< CP, CP =< 68147 -> 'V'; +uts46_map(CP) when 68148 =< CP, CP =< 68149 -> 'V'; +uts46_map(CP) when 68150 =< CP, CP =< 68151 -> 'X'; +uts46_map(CP) when 68152 =< CP, CP =< 68154 -> 'V'; +uts46_map(CP) when 68155 =< CP, CP =< 68158 -> 'X'; +uts46_map(CP) when 68160 =< CP, CP =< 68167 -> 'V'; +uts46_map(CP) when 68169 =< CP, CP =< 68175 -> 'X'; +uts46_map(CP) when 68176 =< CP, CP =< 68184 -> 'V'; +uts46_map(CP) when 68185 =< CP, CP =< 68191 -> 'X'; +uts46_map(CP) when 68192 =< CP, CP =< 68220 -> 'V'; +uts46_map(CP) when 68221 =< CP, CP =< 68223 -> 'V'; +uts46_map(CP) when 68224 =< CP, CP =< 68252 -> 'V'; +uts46_map(CP) when 68253 =< CP, CP =< 68255 -> 'V'; +uts46_map(CP) when 68256 =< CP, CP =< 68287 -> 'X'; +uts46_map(CP) when 68288 =< CP, CP =< 68295 -> 'V'; +uts46_map(CP) when 68297 =< CP, CP =< 68326 -> 'V'; +uts46_map(CP) when 68327 =< CP, CP =< 68330 -> 'X'; +uts46_map(CP) when 68331 =< CP, CP =< 68342 -> 'V'; +uts46_map(CP) when 68343 =< CP, CP =< 68351 -> 'X'; +uts46_map(CP) when 68352 =< CP, CP =< 68405 -> 'V'; +uts46_map(CP) when 68406 =< CP, CP =< 68408 -> 'X'; +uts46_map(CP) when 68409 =< CP, CP =< 68415 -> 'V'; +uts46_map(CP) when 68416 =< CP, CP =< 68437 -> 'V'; +uts46_map(CP) when 68438 =< CP, CP =< 68439 -> 'X'; +uts46_map(CP) when 68440 =< CP, CP =< 68447 -> 'V'; +uts46_map(CP) when 68448 =< CP, CP =< 68466 -> 'V'; +uts46_map(CP) when 68467 =< CP, CP =< 68471 -> 'X'; +uts46_map(CP) when 68472 =< CP, CP =< 68479 -> 'V'; +uts46_map(CP) when 68480 =< CP, CP =< 68497 -> 'V'; +uts46_map(CP) when 68498 =< CP, CP =< 68504 -> 'X'; +uts46_map(CP) when 68505 =< CP, CP =< 68508 -> 'V'; +uts46_map(CP) when 68509 =< CP, CP =< 68520 -> 'X'; +uts46_map(CP) when 68521 =< CP, CP =< 68527 -> 'V'; +uts46_map(CP) when 68528 =< CP, CP =< 68607 -> 'X'; +uts46_map(CP) when 68608 =< CP, CP =< 68680 -> 'V'; +uts46_map(CP) when 68681 =< CP, CP =< 68735 -> 'X'; +uts46_map(CP) when 68787 =< CP, CP =< 68799 -> 'X'; +uts46_map(CP) when 68800 =< CP, CP =< 68850 -> 'V'; +uts46_map(CP) when 68851 =< CP, CP =< 68857 -> 'X'; +uts46_map(CP) when 68858 =< CP, CP =< 68863 -> 'V'; +uts46_map(CP) when 68864 =< CP, CP =< 68903 -> 'V'; +uts46_map(CP) when 68904 =< CP, CP =< 68911 -> 'X'; +uts46_map(CP) when 68912 =< CP, CP =< 68921 -> 'V'; +uts46_map(CP) when 68922 =< CP, CP =< 69215 -> 'X'; +uts46_map(CP) when 69216 =< CP, CP =< 69246 -> 'V'; +uts46_map(CP) when 69248 =< CP, CP =< 69289 -> 'V'; +uts46_map(CP) when 69291 =< CP, CP =< 69292 -> 'V'; +uts46_map(CP) when 69294 =< CP, CP =< 69295 -> 'X'; +uts46_map(CP) when 69296 =< CP, CP =< 69297 -> 'V'; +uts46_map(CP) when 69298 =< CP, CP =< 69375 -> 'X'; +uts46_map(CP) when 69376 =< CP, CP =< 69404 -> 'V'; +uts46_map(CP) when 69405 =< CP, CP =< 69414 -> 'V'; +uts46_map(CP) when 69416 =< CP, CP =< 69423 -> 'X'; +uts46_map(CP) when 69424 =< CP, CP =< 69456 -> 'V'; +uts46_map(CP) when 69457 =< CP, CP =< 69465 -> 'V'; +uts46_map(CP) when 69466 =< CP, CP =< 69551 -> 'X'; +uts46_map(CP) when 69552 =< CP, CP =< 69572 -> 'V'; +uts46_map(CP) when 69573 =< CP, CP =< 69579 -> 'V'; +uts46_map(CP) when 69580 =< CP, CP =< 69599 -> 'X'; +uts46_map(CP) when 69600 =< CP, CP =< 69622 -> 'V'; +uts46_map(CP) when 69623 =< CP, CP =< 69631 -> 'X'; +uts46_map(CP) when 69632 =< CP, CP =< 69702 -> 'V'; +uts46_map(CP) when 69703 =< CP, CP =< 69709 -> 'V'; +uts46_map(CP) when 69710 =< CP, CP =< 69713 -> 'X'; +uts46_map(CP) when 69714 =< CP, CP =< 69733 -> 'V'; +uts46_map(CP) when 69734 =< CP, CP =< 69743 -> 'V'; +uts46_map(CP) when 69744 =< CP, CP =< 69758 -> 'X'; +uts46_map(CP) when 69760 =< CP, CP =< 69818 -> 'V'; +uts46_map(CP) when 69819 =< CP, CP =< 69820 -> 'V'; +uts46_map(CP) when 69822 =< CP, CP =< 69825 -> 'V'; +uts46_map(CP) when 69826 =< CP, CP =< 69836 -> 'X'; +uts46_map(CP) when 69838 =< CP, CP =< 69839 -> 'X'; +uts46_map(CP) when 69840 =< CP, CP =< 69864 -> 'V'; +uts46_map(CP) when 69865 =< CP, CP =< 69871 -> 'X'; +uts46_map(CP) when 69872 =< CP, CP =< 69881 -> 'V'; +uts46_map(CP) when 69882 =< CP, CP =< 69887 -> 'X'; +uts46_map(CP) when 69888 =< CP, CP =< 69940 -> 'V'; +uts46_map(CP) when 69942 =< CP, CP =< 69951 -> 'V'; +uts46_map(CP) when 69952 =< CP, CP =< 69955 -> 'V'; +uts46_map(CP) when 69956 =< CP, CP =< 69958 -> 'V'; +uts46_map(CP) when 69960 =< CP, CP =< 69967 -> 'X'; +uts46_map(CP) when 69968 =< CP, CP =< 70003 -> 'V'; +uts46_map(CP) when 70004 =< CP, CP =< 70005 -> 'V'; +uts46_map(CP) when 70007 =< CP, CP =< 70015 -> 'X'; +uts46_map(CP) when 70016 =< CP, CP =< 70084 -> 'V'; +uts46_map(CP) when 70085 =< CP, CP =< 70088 -> 'V'; +uts46_map(CP) when 70089 =< CP, CP =< 70092 -> 'V'; +uts46_map(CP) when 70094 =< CP, CP =< 70095 -> 'V'; +uts46_map(CP) when 70096 =< CP, CP =< 70105 -> 'V'; +uts46_map(CP) when 70109 =< CP, CP =< 70111 -> 'V'; +uts46_map(CP) when 70113 =< CP, CP =< 70132 -> 'V'; +uts46_map(CP) when 70133 =< CP, CP =< 70143 -> 'X'; +uts46_map(CP) when 70144 =< CP, CP =< 70161 -> 'V'; +uts46_map(CP) when 70163 =< CP, CP =< 70199 -> 'V'; +uts46_map(CP) when 70200 =< CP, CP =< 70205 -> 'V'; +uts46_map(CP) when 70207 =< CP, CP =< 70271 -> 'X'; +uts46_map(CP) when 70272 =< CP, CP =< 70278 -> 'V'; +uts46_map(CP) when 70282 =< CP, CP =< 70285 -> 'V'; +uts46_map(CP) when 70287 =< CP, CP =< 70301 -> 'V'; +uts46_map(CP) when 70303 =< CP, CP =< 70312 -> 'V'; +uts46_map(CP) when 70314 =< CP, CP =< 70319 -> 'X'; +uts46_map(CP) when 70320 =< CP, CP =< 70378 -> 'V'; +uts46_map(CP) when 70379 =< CP, CP =< 70383 -> 'X'; +uts46_map(CP) when 70384 =< CP, CP =< 70393 -> 'V'; +uts46_map(CP) when 70394 =< CP, CP =< 70399 -> 'X'; +uts46_map(CP) when 70401 =< CP, CP =< 70403 -> 'V'; +uts46_map(CP) when 70405 =< CP, CP =< 70412 -> 'V'; +uts46_map(CP) when 70413 =< CP, CP =< 70414 -> 'X'; +uts46_map(CP) when 70415 =< CP, CP =< 70416 -> 'V'; +uts46_map(CP) when 70417 =< CP, CP =< 70418 -> 'X'; +uts46_map(CP) when 70419 =< CP, CP =< 70440 -> 'V'; +uts46_map(CP) when 70442 =< CP, CP =< 70448 -> 'V'; +uts46_map(CP) when 70450 =< CP, CP =< 70451 -> 'V'; +uts46_map(CP) when 70453 =< CP, CP =< 70457 -> 'V'; +uts46_map(CP) when 70460 =< CP, CP =< 70468 -> 'V'; +uts46_map(CP) when 70469 =< CP, CP =< 70470 -> 'X'; +uts46_map(CP) when 70471 =< CP, CP =< 70472 -> 'V'; +uts46_map(CP) when 70473 =< CP, CP =< 70474 -> 'X'; +uts46_map(CP) when 70475 =< CP, CP =< 70477 -> 'V'; +uts46_map(CP) when 70478 =< CP, CP =< 70479 -> 'X'; +uts46_map(CP) when 70481 =< CP, CP =< 70486 -> 'X'; +uts46_map(CP) when 70488 =< CP, CP =< 70492 -> 'X'; +uts46_map(CP) when 70493 =< CP, CP =< 70499 -> 'V'; +uts46_map(CP) when 70500 =< CP, CP =< 70501 -> 'X'; +uts46_map(CP) when 70502 =< CP, CP =< 70508 -> 'V'; +uts46_map(CP) when 70509 =< CP, CP =< 70511 -> 'X'; +uts46_map(CP) when 70512 =< CP, CP =< 70516 -> 'V'; +uts46_map(CP) when 70517 =< CP, CP =< 70655 -> 'X'; +uts46_map(CP) when 70656 =< CP, CP =< 70730 -> 'V'; +uts46_map(CP) when 70731 =< CP, CP =< 70735 -> 'V'; +uts46_map(CP) when 70736 =< CP, CP =< 70745 -> 'V'; +uts46_map(CP) when 70752 =< CP, CP =< 70753 -> 'V'; +uts46_map(CP) when 70754 =< CP, CP =< 70783 -> 'X'; +uts46_map(CP) when 70784 =< CP, CP =< 70853 -> 'V'; +uts46_map(CP) when 70856 =< CP, CP =< 70863 -> 'X'; +uts46_map(CP) when 70864 =< CP, CP =< 70873 -> 'V'; +uts46_map(CP) when 70874 =< CP, CP =< 71039 -> 'X'; +uts46_map(CP) when 71040 =< CP, CP =< 71093 -> 'V'; +uts46_map(CP) when 71094 =< CP, CP =< 71095 -> 'X'; +uts46_map(CP) when 71096 =< CP, CP =< 71104 -> 'V'; +uts46_map(CP) when 71105 =< CP, CP =< 71113 -> 'V'; +uts46_map(CP) when 71114 =< CP, CP =< 71127 -> 'V'; +uts46_map(CP) when 71128 =< CP, CP =< 71133 -> 'V'; +uts46_map(CP) when 71134 =< CP, CP =< 71167 -> 'X'; +uts46_map(CP) when 71168 =< CP, CP =< 71232 -> 'V'; +uts46_map(CP) when 71233 =< CP, CP =< 71235 -> 'V'; +uts46_map(CP) when 71237 =< CP, CP =< 71247 -> 'X'; +uts46_map(CP) when 71248 =< CP, CP =< 71257 -> 'V'; +uts46_map(CP) when 71258 =< CP, CP =< 71263 -> 'X'; +uts46_map(CP) when 71264 =< CP, CP =< 71276 -> 'V'; +uts46_map(CP) when 71277 =< CP, CP =< 71295 -> 'X'; +uts46_map(CP) when 71296 =< CP, CP =< 71351 -> 'V'; +uts46_map(CP) when 71353 =< CP, CP =< 71359 -> 'X'; +uts46_map(CP) when 71360 =< CP, CP =< 71369 -> 'V'; +uts46_map(CP) when 71370 =< CP, CP =< 71423 -> 'X'; +uts46_map(CP) when 71424 =< CP, CP =< 71449 -> 'V'; +uts46_map(CP) when 71451 =< CP, CP =< 71452 -> 'X'; +uts46_map(CP) when 71453 =< CP, CP =< 71467 -> 'V'; +uts46_map(CP) when 71468 =< CP, CP =< 71471 -> 'X'; +uts46_map(CP) when 71472 =< CP, CP =< 71481 -> 'V'; +uts46_map(CP) when 71482 =< CP, CP =< 71487 -> 'V'; +uts46_map(CP) when 71488 =< CP, CP =< 71679 -> 'X'; +uts46_map(CP) when 71680 =< CP, CP =< 71738 -> 'V'; +uts46_map(CP) when 71740 =< CP, CP =< 71839 -> 'X'; +uts46_map(CP) when 71872 =< CP, CP =< 71913 -> 'V'; +uts46_map(CP) when 71914 =< CP, CP =< 71922 -> 'V'; +uts46_map(CP) when 71923 =< CP, CP =< 71934 -> 'X'; +uts46_map(CP) when 71936 =< CP, CP =< 71942 -> 'V'; +uts46_map(CP) when 71943 =< CP, CP =< 71944 -> 'X'; +uts46_map(CP) when 71946 =< CP, CP =< 71947 -> 'X'; +uts46_map(CP) when 71948 =< CP, CP =< 71955 -> 'V'; +uts46_map(CP) when 71957 =< CP, CP =< 71958 -> 'V'; +uts46_map(CP) when 71960 =< CP, CP =< 71989 -> 'V'; +uts46_map(CP) when 71991 =< CP, CP =< 71992 -> 'V'; +uts46_map(CP) when 71993 =< CP, CP =< 71994 -> 'X'; +uts46_map(CP) when 71995 =< CP, CP =< 72003 -> 'V'; +uts46_map(CP) when 72004 =< CP, CP =< 72006 -> 'V'; +uts46_map(CP) when 72007 =< CP, CP =< 72015 -> 'X'; +uts46_map(CP) when 72016 =< CP, CP =< 72025 -> 'V'; +uts46_map(CP) when 72026 =< CP, CP =< 72095 -> 'X'; +uts46_map(CP) when 72096 =< CP, CP =< 72103 -> 'V'; +uts46_map(CP) when 72104 =< CP, CP =< 72105 -> 'X'; +uts46_map(CP) when 72106 =< CP, CP =< 72151 -> 'V'; +uts46_map(CP) when 72152 =< CP, CP =< 72153 -> 'X'; +uts46_map(CP) when 72154 =< CP, CP =< 72161 -> 'V'; +uts46_map(CP) when 72163 =< CP, CP =< 72164 -> 'V'; +uts46_map(CP) when 72165 =< CP, CP =< 72191 -> 'X'; +uts46_map(CP) when 72192 =< CP, CP =< 72254 -> 'V'; +uts46_map(CP) when 72255 =< CP, CP =< 72262 -> 'V'; +uts46_map(CP) when 72264 =< CP, CP =< 72271 -> 'X'; +uts46_map(CP) when 72272 =< CP, CP =< 72323 -> 'V'; +uts46_map(CP) when 72324 =< CP, CP =< 72325 -> 'V'; +uts46_map(CP) when 72326 =< CP, CP =< 72345 -> 'V'; +uts46_map(CP) when 72346 =< CP, CP =< 72348 -> 'V'; +uts46_map(CP) when 72350 =< CP, CP =< 72354 -> 'V'; +uts46_map(CP) when 72355 =< CP, CP =< 72383 -> 'X'; +uts46_map(CP) when 72384 =< CP, CP =< 72440 -> 'V'; +uts46_map(CP) when 72441 =< CP, CP =< 72703 -> 'X'; +uts46_map(CP) when 72704 =< CP, CP =< 72712 -> 'V'; +uts46_map(CP) when 72714 =< CP, CP =< 72758 -> 'V'; +uts46_map(CP) when 72760 =< CP, CP =< 72768 -> 'V'; +uts46_map(CP) when 72769 =< CP, CP =< 72773 -> 'V'; +uts46_map(CP) when 72774 =< CP, CP =< 72783 -> 'X'; +uts46_map(CP) when 72784 =< CP, CP =< 72793 -> 'V'; +uts46_map(CP) when 72794 =< CP, CP =< 72812 -> 'V'; +uts46_map(CP) when 72813 =< CP, CP =< 72815 -> 'X'; +uts46_map(CP) when 72816 =< CP, CP =< 72817 -> 'V'; +uts46_map(CP) when 72818 =< CP, CP =< 72847 -> 'V'; +uts46_map(CP) when 72848 =< CP, CP =< 72849 -> 'X'; +uts46_map(CP) when 72850 =< CP, CP =< 72871 -> 'V'; +uts46_map(CP) when 72873 =< CP, CP =< 72886 -> 'V'; +uts46_map(CP) when 72887 =< CP, CP =< 72959 -> 'X'; +uts46_map(CP) when 72960 =< CP, CP =< 72966 -> 'V'; +uts46_map(CP) when 72968 =< CP, CP =< 72969 -> 'V'; +uts46_map(CP) when 72971 =< CP, CP =< 73014 -> 'V'; +uts46_map(CP) when 73015 =< CP, CP =< 73017 -> 'X'; +uts46_map(CP) when 73020 =< CP, CP =< 73021 -> 'V'; +uts46_map(CP) when 73023 =< CP, CP =< 73031 -> 'V'; +uts46_map(CP) when 73032 =< CP, CP =< 73039 -> 'X'; +uts46_map(CP) when 73040 =< CP, CP =< 73049 -> 'V'; +uts46_map(CP) when 73050 =< CP, CP =< 73055 -> 'X'; +uts46_map(CP) when 73056 =< CP, CP =< 73061 -> 'V'; +uts46_map(CP) when 73063 =< CP, CP =< 73064 -> 'V'; +uts46_map(CP) when 73066 =< CP, CP =< 73102 -> 'V'; +uts46_map(CP) when 73104 =< CP, CP =< 73105 -> 'V'; +uts46_map(CP) when 73107 =< CP, CP =< 73112 -> 'V'; +uts46_map(CP) when 73113 =< CP, CP =< 73119 -> 'X'; +uts46_map(CP) when 73120 =< CP, CP =< 73129 -> 'V'; +uts46_map(CP) when 73130 =< CP, CP =< 73439 -> 'X'; +uts46_map(CP) when 73440 =< CP, CP =< 73462 -> 'V'; +uts46_map(CP) when 73463 =< CP, CP =< 73464 -> 'V'; +uts46_map(CP) when 73465 =< CP, CP =< 73647 -> 'X'; +uts46_map(CP) when 73649 =< CP, CP =< 73663 -> 'X'; +uts46_map(CP) when 73664 =< CP, CP =< 73713 -> 'V'; +uts46_map(CP) when 73714 =< CP, CP =< 73726 -> 'X'; +uts46_map(CP) when 73728 =< CP, CP =< 74606 -> 'V'; +uts46_map(CP) when 74607 =< CP, CP =< 74648 -> 'V'; +uts46_map(CP) when 74650 =< CP, CP =< 74751 -> 'X'; +uts46_map(CP) when 74752 =< CP, CP =< 74850 -> 'V'; +uts46_map(CP) when 74851 =< CP, CP =< 74862 -> 'V'; +uts46_map(CP) when 74864 =< CP, CP =< 74867 -> 'V'; +uts46_map(CP) when 74869 =< CP, CP =< 74879 -> 'X'; +uts46_map(CP) when 74880 =< CP, CP =< 75075 -> 'V'; +uts46_map(CP) when 75076 =< CP, CP =< 77823 -> 'X'; +uts46_map(CP) when 77824 =< CP, CP =< 78894 -> 'V'; +uts46_map(CP) when 78896 =< CP, CP =< 78904 -> 'X'; +uts46_map(CP) when 78905 =< CP, CP =< 82943 -> 'X'; +uts46_map(CP) when 82944 =< CP, CP =< 83526 -> 'V'; +uts46_map(CP) when 83527 =< CP, CP =< 92159 -> 'X'; +uts46_map(CP) when 92160 =< CP, CP =< 92728 -> 'V'; +uts46_map(CP) when 92729 =< CP, CP =< 92735 -> 'X'; +uts46_map(CP) when 92736 =< CP, CP =< 92766 -> 'V'; +uts46_map(CP) when 92768 =< CP, CP =< 92777 -> 'V'; +uts46_map(CP) when 92778 =< CP, CP =< 92781 -> 'X'; +uts46_map(CP) when 92782 =< CP, CP =< 92783 -> 'V'; +uts46_map(CP) when 92784 =< CP, CP =< 92879 -> 'X'; +uts46_map(CP) when 92880 =< CP, CP =< 92909 -> 'V'; +uts46_map(CP) when 92910 =< CP, CP =< 92911 -> 'X'; +uts46_map(CP) when 92912 =< CP, CP =< 92916 -> 'V'; +uts46_map(CP) when 92918 =< CP, CP =< 92927 -> 'X'; +uts46_map(CP) when 92928 =< CP, CP =< 92982 -> 'V'; +uts46_map(CP) when 92983 =< CP, CP =< 92991 -> 'V'; +uts46_map(CP) when 92992 =< CP, CP =< 92995 -> 'V'; +uts46_map(CP) when 92996 =< CP, CP =< 92997 -> 'V'; +uts46_map(CP) when 92998 =< CP, CP =< 93007 -> 'X'; +uts46_map(CP) when 93008 =< CP, CP =< 93017 -> 'V'; +uts46_map(CP) when 93019 =< CP, CP =< 93025 -> 'V'; +uts46_map(CP) when 93027 =< CP, CP =< 93047 -> 'V'; +uts46_map(CP) when 93048 =< CP, CP =< 93052 -> 'X'; +uts46_map(CP) when 93053 =< CP, CP =< 93071 -> 'V'; +uts46_map(CP) when 93072 =< CP, CP =< 93759 -> 'X'; +uts46_map(CP) when 93792 =< CP, CP =< 93823 -> 'V'; +uts46_map(CP) when 93824 =< CP, CP =< 93850 -> 'V'; +uts46_map(CP) when 93851 =< CP, CP =< 93951 -> 'X'; +uts46_map(CP) when 93952 =< CP, CP =< 94020 -> 'V'; +uts46_map(CP) when 94021 =< CP, CP =< 94026 -> 'V'; +uts46_map(CP) when 94027 =< CP, CP =< 94030 -> 'X'; +uts46_map(CP) when 94032 =< CP, CP =< 94078 -> 'V'; +uts46_map(CP) when 94079 =< CP, CP =< 94087 -> 'V'; +uts46_map(CP) when 94088 =< CP, CP =< 94094 -> 'X'; +uts46_map(CP) when 94095 =< CP, CP =< 94111 -> 'V'; +uts46_map(CP) when 94112 =< CP, CP =< 94175 -> 'X'; +uts46_map(CP) when 94181 =< CP, CP =< 94191 -> 'X'; +uts46_map(CP) when 94192 =< CP, CP =< 94193 -> 'V'; +uts46_map(CP) when 94194 =< CP, CP =< 94207 -> 'X'; +uts46_map(CP) when 94208 =< CP, CP =< 100332 -> 'V'; +uts46_map(CP) when 100333 =< CP, CP =< 100337 -> 'V'; +uts46_map(CP) when 100338 =< CP, CP =< 100343 -> 'V'; +uts46_map(CP) when 100344 =< CP, CP =< 100351 -> 'X'; +uts46_map(CP) when 100352 =< CP, CP =< 101106 -> 'V'; +uts46_map(CP) when 101107 =< CP, CP =< 101589 -> 'V'; +uts46_map(CP) when 101590 =< CP, CP =< 101631 -> 'X'; +uts46_map(CP) when 101632 =< CP, CP =< 101640 -> 'V'; +uts46_map(CP) when 101641 =< CP, CP =< 110591 -> 'X'; +uts46_map(CP) when 110592 =< CP, CP =< 110593 -> 'V'; +uts46_map(CP) when 110594 =< CP, CP =< 110878 -> 'V'; +uts46_map(CP) when 110879 =< CP, CP =< 110927 -> 'X'; +uts46_map(CP) when 110928 =< CP, CP =< 110930 -> 'V'; +uts46_map(CP) when 110931 =< CP, CP =< 110947 -> 'X'; +uts46_map(CP) when 110948 =< CP, CP =< 110951 -> 'V'; +uts46_map(CP) when 110952 =< CP, CP =< 110959 -> 'X'; +uts46_map(CP) when 110960 =< CP, CP =< 111355 -> 'V'; +uts46_map(CP) when 111356 =< CP, CP =< 113663 -> 'X'; +uts46_map(CP) when 113664 =< CP, CP =< 113770 -> 'V'; +uts46_map(CP) when 113771 =< CP, CP =< 113775 -> 'X'; +uts46_map(CP) when 113776 =< CP, CP =< 113788 -> 'V'; +uts46_map(CP) when 113789 =< CP, CP =< 113791 -> 'X'; +uts46_map(CP) when 113792 =< CP, CP =< 113800 -> 'V'; +uts46_map(CP) when 113801 =< CP, CP =< 113807 -> 'X'; +uts46_map(CP) when 113808 =< CP, CP =< 113817 -> 'V'; +uts46_map(CP) when 113818 =< CP, CP =< 113819 -> 'X'; +uts46_map(CP) when 113821 =< CP, CP =< 113822 -> 'V'; +uts46_map(CP) when 113824 =< CP, CP =< 113827 -> 'I'; +uts46_map(CP) when 113828 =< CP, CP =< 118783 -> 'X'; +uts46_map(CP) when 118784 =< CP, CP =< 119029 -> 'V'; +uts46_map(CP) when 119030 =< CP, CP =< 119039 -> 'X'; +uts46_map(CP) when 119040 =< CP, CP =< 119078 -> 'V'; +uts46_map(CP) when 119079 =< CP, CP =< 119080 -> 'X'; +uts46_map(CP) when 119082 =< CP, CP =< 119133 -> 'V'; +uts46_map(CP) when 119141 =< CP, CP =< 119154 -> 'V'; +uts46_map(CP) when 119155 =< CP, CP =< 119162 -> 'X'; +uts46_map(CP) when 119163 =< CP, CP =< 119226 -> 'V'; +uts46_map(CP) when 119233 =< CP, CP =< 119261 -> 'V'; +uts46_map(CP) when 119262 =< CP, CP =< 119272 -> 'V'; +uts46_map(CP) when 119273 =< CP, CP =< 119295 -> 'X'; +uts46_map(CP) when 119296 =< CP, CP =< 119365 -> 'V'; +uts46_map(CP) when 119366 =< CP, CP =< 119519 -> 'X'; +uts46_map(CP) when 119520 =< CP, CP =< 119539 -> 'V'; +uts46_map(CP) when 119540 =< CP, CP =< 119551 -> 'X'; +uts46_map(CP) when 119552 =< CP, CP =< 119638 -> 'V'; +uts46_map(CP) when 119639 =< CP, CP =< 119647 -> 'X'; +uts46_map(CP) when 119648 =< CP, CP =< 119665 -> 'V'; +uts46_map(CP) when 119666 =< CP, CP =< 119672 -> 'V'; +uts46_map(CP) when 119673 =< CP, CP =< 119807 -> 'X'; +uts46_map(CP) when 119968 =< CP, CP =< 119969 -> 'X'; +uts46_map(CP) when 119971 =< CP, CP =< 119972 -> 'X'; +uts46_map(CP) when 119975 =< CP, CP =< 119976 -> 'X'; +uts46_map(CP) when 120075 =< CP, CP =< 120076 -> 'X'; +uts46_map(CP) when 120135 =< CP, CP =< 120137 -> 'X'; +uts46_map(CP) when 120486 =< CP, CP =< 120487 -> 'X'; +uts46_map(CP) when 120531 =< CP, CP =< 120532 -> {'M', [963]}; +uts46_map(CP) when 120589 =< CP, CP =< 120590 -> {'M', [963]}; +uts46_map(CP) when 120647 =< CP, CP =< 120648 -> {'M', [963]}; +uts46_map(CP) when 120705 =< CP, CP =< 120706 -> {'M', [963]}; +uts46_map(CP) when 120763 =< CP, CP =< 120764 -> {'M', [963]}; +uts46_map(CP) when 120778 =< CP, CP =< 120779 -> {'M', [989]}; +uts46_map(CP) when 120780 =< CP, CP =< 120781 -> 'X'; +uts46_map(CP) when 120832 =< CP, CP =< 121343 -> 'V'; +uts46_map(CP) when 121344 =< CP, CP =< 121398 -> 'V'; +uts46_map(CP) when 121399 =< CP, CP =< 121402 -> 'V'; +uts46_map(CP) when 121403 =< CP, CP =< 121452 -> 'V'; +uts46_map(CP) when 121453 =< CP, CP =< 121460 -> 'V'; +uts46_map(CP) when 121462 =< CP, CP =< 121475 -> 'V'; +uts46_map(CP) when 121477 =< CP, CP =< 121483 -> 'V'; +uts46_map(CP) when 121484 =< CP, CP =< 121498 -> 'X'; +uts46_map(CP) when 121499 =< CP, CP =< 121503 -> 'V'; +uts46_map(CP) when 121505 =< CP, CP =< 121519 -> 'V'; +uts46_map(CP) when 121520 =< CP, CP =< 122879 -> 'X'; +uts46_map(CP) when 122880 =< CP, CP =< 122886 -> 'V'; +uts46_map(CP) when 122888 =< CP, CP =< 122904 -> 'V'; +uts46_map(CP) when 122905 =< CP, CP =< 122906 -> 'X'; +uts46_map(CP) when 122907 =< CP, CP =< 122913 -> 'V'; +uts46_map(CP) when 122915 =< CP, CP =< 122916 -> 'V'; +uts46_map(CP) when 122918 =< CP, CP =< 122922 -> 'V'; +uts46_map(CP) when 122923 =< CP, CP =< 123135 -> 'X'; +uts46_map(CP) when 123136 =< CP, CP =< 123180 -> 'V'; +uts46_map(CP) when 123181 =< CP, CP =< 123183 -> 'X'; +uts46_map(CP) when 123184 =< CP, CP =< 123197 -> 'V'; +uts46_map(CP) when 123198 =< CP, CP =< 123199 -> 'X'; +uts46_map(CP) when 123200 =< CP, CP =< 123209 -> 'V'; +uts46_map(CP) when 123210 =< CP, CP =< 123213 -> 'X'; +uts46_map(CP) when 123216 =< CP, CP =< 123583 -> 'X'; +uts46_map(CP) when 123584 =< CP, CP =< 123641 -> 'V'; +uts46_map(CP) when 123642 =< CP, CP =< 123646 -> 'X'; +uts46_map(CP) when 123648 =< CP, CP =< 124927 -> 'X'; +uts46_map(CP) when 124928 =< CP, CP =< 125124 -> 'V'; +uts46_map(CP) when 125125 =< CP, CP =< 125126 -> 'X'; +uts46_map(CP) when 125127 =< CP, CP =< 125135 -> 'V'; +uts46_map(CP) when 125136 =< CP, CP =< 125142 -> 'V'; +uts46_map(CP) when 125143 =< CP, CP =< 125183 -> 'X'; +uts46_map(CP) when 125218 =< CP, CP =< 125258 -> 'V'; +uts46_map(CP) when 125260 =< CP, CP =< 125263 -> 'X'; +uts46_map(CP) when 125264 =< CP, CP =< 125273 -> 'V'; +uts46_map(CP) when 125274 =< CP, CP =< 125277 -> 'X'; +uts46_map(CP) when 125278 =< CP, CP =< 125279 -> 'V'; +uts46_map(CP) when 125280 =< CP, CP =< 126064 -> 'X'; +uts46_map(CP) when 126065 =< CP, CP =< 126132 -> 'V'; +uts46_map(CP) when 126133 =< CP, CP =< 126208 -> 'X'; +uts46_map(CP) when 126209 =< CP, CP =< 126269 -> 'V'; +uts46_map(CP) when 126270 =< CP, CP =< 126463 -> 'X'; +uts46_map(CP) when 126501 =< CP, CP =< 126502 -> 'X'; +uts46_map(CP) when 126524 =< CP, CP =< 126529 -> 'X'; +uts46_map(CP) when 126531 =< CP, CP =< 126534 -> 'X'; +uts46_map(CP) when 126549 =< CP, CP =< 126550 -> 'X'; +uts46_map(CP) when 126565 =< CP, CP =< 126566 -> 'X'; +uts46_map(CP) when 126620 =< CP, CP =< 126624 -> 'X'; +uts46_map(CP) when 126652 =< CP, CP =< 126703 -> 'X'; +uts46_map(CP) when 126704 =< CP, CP =< 126705 -> 'V'; +uts46_map(CP) when 126706 =< CP, CP =< 126975 -> 'X'; +uts46_map(CP) when 126976 =< CP, CP =< 127019 -> 'V'; +uts46_map(CP) when 127020 =< CP, CP =< 127023 -> 'X'; +uts46_map(CP) when 127024 =< CP, CP =< 127123 -> 'V'; +uts46_map(CP) when 127124 =< CP, CP =< 127135 -> 'X'; +uts46_map(CP) when 127136 =< CP, CP =< 127150 -> 'V'; +uts46_map(CP) when 127151 =< CP, CP =< 127152 -> 'X'; +uts46_map(CP) when 127153 =< CP, CP =< 127166 -> 'V'; +uts46_map(CP) when 127169 =< CP, CP =< 127183 -> 'V'; +uts46_map(CP) when 127185 =< CP, CP =< 127199 -> 'V'; +uts46_map(CP) when 127200 =< CP, CP =< 127221 -> 'V'; +uts46_map(CP) when 127222 =< CP, CP =< 127231 -> 'X'; +uts46_map(CP) when 127243 =< CP, CP =< 127244 -> 'V'; +uts46_map(CP) when 127245 =< CP, CP =< 127247 -> 'V'; +uts46_map(CP) when 127312 =< CP, CP =< 127318 -> 'V'; +uts46_map(CP) when 127320 =< CP, CP =< 127326 -> 'V'; +uts46_map(CP) when 127328 =< CP, CP =< 127337 -> 'V'; +uts46_map(CP) when 127341 =< CP, CP =< 127343 -> 'V'; +uts46_map(CP) when 127344 =< CP, CP =< 127352 -> 'V'; +uts46_map(CP) when 127355 =< CP, CP =< 127356 -> 'V'; +uts46_map(CP) when 127357 =< CP, CP =< 127358 -> 'V'; +uts46_map(CP) when 127360 =< CP, CP =< 127369 -> 'V'; +uts46_map(CP) when 127370 =< CP, CP =< 127373 -> 'V'; +uts46_map(CP) when 127374 =< CP, CP =< 127375 -> 'V'; +uts46_map(CP) when 127377 =< CP, CP =< 127386 -> 'V'; +uts46_map(CP) when 127387 =< CP, CP =< 127404 -> 'V'; +uts46_map(CP) when 127406 =< CP, CP =< 127461 -> 'X'; +uts46_map(CP) when 127462 =< CP, CP =< 127487 -> 'V'; +uts46_map(CP) when 127491 =< CP, CP =< 127503 -> 'X'; +uts46_map(CP) when 127548 =< CP, CP =< 127551 -> 'X'; +uts46_map(CP) when 127561 =< CP, CP =< 127567 -> 'X'; +uts46_map(CP) when 127570 =< CP, CP =< 127583 -> 'X'; +uts46_map(CP) when 127584 =< CP, CP =< 127589 -> 'V'; +uts46_map(CP) when 127590 =< CP, CP =< 127743 -> 'X'; +uts46_map(CP) when 127744 =< CP, CP =< 127776 -> 'V'; +uts46_map(CP) when 127777 =< CP, CP =< 127788 -> 'V'; +uts46_map(CP) when 127789 =< CP, CP =< 127791 -> 'V'; +uts46_map(CP) when 127792 =< CP, CP =< 127797 -> 'V'; +uts46_map(CP) when 127799 =< CP, CP =< 127868 -> 'V'; +uts46_map(CP) when 127870 =< CP, CP =< 127871 -> 'V'; +uts46_map(CP) when 127872 =< CP, CP =< 127891 -> 'V'; +uts46_map(CP) when 127892 =< CP, CP =< 127903 -> 'V'; +uts46_map(CP) when 127904 =< CP, CP =< 127940 -> 'V'; +uts46_map(CP) when 127942 =< CP, CP =< 127946 -> 'V'; +uts46_map(CP) when 127947 =< CP, CP =< 127950 -> 'V'; +uts46_map(CP) when 127951 =< CP, CP =< 127955 -> 'V'; +uts46_map(CP) when 127956 =< CP, CP =< 127967 -> 'V'; +uts46_map(CP) when 127968 =< CP, CP =< 127984 -> 'V'; +uts46_map(CP) when 127985 =< CP, CP =< 127991 -> 'V'; +uts46_map(CP) when 127992 =< CP, CP =< 127999 -> 'V'; +uts46_map(CP) when 128000 =< CP, CP =< 128062 -> 'V'; +uts46_map(CP) when 128066 =< CP, CP =< 128247 -> 'V'; +uts46_map(CP) when 128249 =< CP, CP =< 128252 -> 'V'; +uts46_map(CP) when 128253 =< CP, CP =< 128254 -> 'V'; +uts46_map(CP) when 128256 =< CP, CP =< 128317 -> 'V'; +uts46_map(CP) when 128318 =< CP, CP =< 128319 -> 'V'; +uts46_map(CP) when 128320 =< CP, CP =< 128323 -> 'V'; +uts46_map(CP) when 128324 =< CP, CP =< 128330 -> 'V'; +uts46_map(CP) when 128331 =< CP, CP =< 128335 -> 'V'; +uts46_map(CP) when 128336 =< CP, CP =< 128359 -> 'V'; +uts46_map(CP) when 128360 =< CP, CP =< 128377 -> 'V'; +uts46_map(CP) when 128379 =< CP, CP =< 128419 -> 'V'; +uts46_map(CP) when 128421 =< CP, CP =< 128506 -> 'V'; +uts46_map(CP) when 128507 =< CP, CP =< 128511 -> 'V'; +uts46_map(CP) when 128513 =< CP, CP =< 128528 -> 'V'; +uts46_map(CP) when 128530 =< CP, CP =< 128532 -> 'V'; +uts46_map(CP) when 128540 =< CP, CP =< 128542 -> 'V'; +uts46_map(CP) when 128544 =< CP, CP =< 128549 -> 'V'; +uts46_map(CP) when 128550 =< CP, CP =< 128551 -> 'V'; +uts46_map(CP) when 128552 =< CP, CP =< 128555 -> 'V'; +uts46_map(CP) when 128558 =< CP, CP =< 128559 -> 'V'; +uts46_map(CP) when 128560 =< CP, CP =< 128563 -> 'V'; +uts46_map(CP) when 128565 =< CP, CP =< 128576 -> 'V'; +uts46_map(CP) when 128577 =< CP, CP =< 128578 -> 'V'; +uts46_map(CP) when 128579 =< CP, CP =< 128580 -> 'V'; +uts46_map(CP) when 128581 =< CP, CP =< 128591 -> 'V'; +uts46_map(CP) when 128592 =< CP, CP =< 128639 -> 'V'; +uts46_map(CP) when 128640 =< CP, CP =< 128709 -> 'V'; +uts46_map(CP) when 128710 =< CP, CP =< 128719 -> 'V'; +uts46_map(CP) when 128721 =< CP, CP =< 128722 -> 'V'; +uts46_map(CP) when 128723 =< CP, CP =< 128724 -> 'V'; +uts46_map(CP) when 128726 =< CP, CP =< 128727 -> 'V'; +uts46_map(CP) when 128728 =< CP, CP =< 128735 -> 'X'; +uts46_map(CP) when 128736 =< CP, CP =< 128748 -> 'V'; +uts46_map(CP) when 128749 =< CP, CP =< 128751 -> 'X'; +uts46_map(CP) when 128752 =< CP, CP =< 128755 -> 'V'; +uts46_map(CP) when 128756 =< CP, CP =< 128758 -> 'V'; +uts46_map(CP) when 128759 =< CP, CP =< 128760 -> 'V'; +uts46_map(CP) when 128763 =< CP, CP =< 128764 -> 'V'; +uts46_map(CP) when 128765 =< CP, CP =< 128767 -> 'X'; +uts46_map(CP) when 128768 =< CP, CP =< 128883 -> 'V'; +uts46_map(CP) when 128884 =< CP, CP =< 128895 -> 'X'; +uts46_map(CP) when 128896 =< CP, CP =< 128980 -> 'V'; +uts46_map(CP) when 128981 =< CP, CP =< 128984 -> 'V'; +uts46_map(CP) when 128985 =< CP, CP =< 128991 -> 'X'; +uts46_map(CP) when 128992 =< CP, CP =< 129003 -> 'V'; +uts46_map(CP) when 129004 =< CP, CP =< 129023 -> 'X'; +uts46_map(CP) when 129024 =< CP, CP =< 129035 -> 'V'; +uts46_map(CP) when 129036 =< CP, CP =< 129039 -> 'X'; +uts46_map(CP) when 129040 =< CP, CP =< 129095 -> 'V'; +uts46_map(CP) when 129096 =< CP, CP =< 129103 -> 'X'; +uts46_map(CP) when 129104 =< CP, CP =< 129113 -> 'V'; +uts46_map(CP) when 129114 =< CP, CP =< 129119 -> 'X'; +uts46_map(CP) when 129120 =< CP, CP =< 129159 -> 'V'; +uts46_map(CP) when 129160 =< CP, CP =< 129167 -> 'X'; +uts46_map(CP) when 129168 =< CP, CP =< 129197 -> 'V'; +uts46_map(CP) when 129198 =< CP, CP =< 129199 -> 'X'; +uts46_map(CP) when 129200 =< CP, CP =< 129201 -> 'V'; +uts46_map(CP) when 129202 =< CP, CP =< 129279 -> 'X'; +uts46_map(CP) when 129280 =< CP, CP =< 129291 -> 'V'; +uts46_map(CP) when 129293 =< CP, CP =< 129295 -> 'V'; +uts46_map(CP) when 129296 =< CP, CP =< 129304 -> 'V'; +uts46_map(CP) when 129305 =< CP, CP =< 129310 -> 'V'; +uts46_map(CP) when 129312 =< CP, CP =< 129319 -> 'V'; +uts46_map(CP) when 129320 =< CP, CP =< 129327 -> 'V'; +uts46_map(CP) when 129329 =< CP, CP =< 129330 -> 'V'; +uts46_map(CP) when 129331 =< CP, CP =< 129342 -> 'V'; +uts46_map(CP) when 129344 =< CP, CP =< 129355 -> 'V'; +uts46_map(CP) when 129357 =< CP, CP =< 129359 -> 'V'; +uts46_map(CP) when 129360 =< CP, CP =< 129374 -> 'V'; +uts46_map(CP) when 129375 =< CP, CP =< 129387 -> 'V'; +uts46_map(CP) when 129388 =< CP, CP =< 129392 -> 'V'; +uts46_map(CP) when 129395 =< CP, CP =< 129398 -> 'V'; +uts46_map(CP) when 129399 =< CP, CP =< 129400 -> 'V'; +uts46_map(CP) when 129404 =< CP, CP =< 129407 -> 'V'; +uts46_map(CP) when 129408 =< CP, CP =< 129412 -> 'V'; +uts46_map(CP) when 129413 =< CP, CP =< 129425 -> 'V'; +uts46_map(CP) when 129426 =< CP, CP =< 129431 -> 'V'; +uts46_map(CP) when 129432 =< CP, CP =< 129442 -> 'V'; +uts46_map(CP) when 129443 =< CP, CP =< 129444 -> 'V'; +uts46_map(CP) when 129445 =< CP, CP =< 129450 -> 'V'; +uts46_map(CP) when 129451 =< CP, CP =< 129453 -> 'V'; +uts46_map(CP) when 129454 =< CP, CP =< 129455 -> 'V'; +uts46_map(CP) when 129456 =< CP, CP =< 129465 -> 'V'; +uts46_map(CP) when 129466 =< CP, CP =< 129471 -> 'V'; +uts46_map(CP) when 129473 =< CP, CP =< 129474 -> 'V'; +uts46_map(CP) when 129475 =< CP, CP =< 129482 -> 'V'; +uts46_map(CP) when 129485 =< CP, CP =< 129487 -> 'V'; +uts46_map(CP) when 129488 =< CP, CP =< 129510 -> 'V'; +uts46_map(CP) when 129511 =< CP, CP =< 129535 -> 'V'; +uts46_map(CP) when 129536 =< CP, CP =< 129619 -> 'V'; +uts46_map(CP) when 129620 =< CP, CP =< 129631 -> 'X'; +uts46_map(CP) when 129632 =< CP, CP =< 129645 -> 'V'; +uts46_map(CP) when 129646 =< CP, CP =< 129647 -> 'X'; +uts46_map(CP) when 129648 =< CP, CP =< 129651 -> 'V'; +uts46_map(CP) when 129653 =< CP, CP =< 129655 -> 'X'; +uts46_map(CP) when 129656 =< CP, CP =< 129658 -> 'V'; +uts46_map(CP) when 129659 =< CP, CP =< 129663 -> 'X'; +uts46_map(CP) when 129664 =< CP, CP =< 129666 -> 'V'; +uts46_map(CP) when 129667 =< CP, CP =< 129670 -> 'V'; +uts46_map(CP) when 129671 =< CP, CP =< 129679 -> 'X'; +uts46_map(CP) when 129680 =< CP, CP =< 129685 -> 'V'; +uts46_map(CP) when 129686 =< CP, CP =< 129704 -> 'V'; +uts46_map(CP) when 129705 =< CP, CP =< 129711 -> 'X'; +uts46_map(CP) when 129712 =< CP, CP =< 129718 -> 'V'; +uts46_map(CP) when 129719 =< CP, CP =< 129727 -> 'X'; +uts46_map(CP) when 129728 =< CP, CP =< 129730 -> 'V'; +uts46_map(CP) when 129731 =< CP, CP =< 129743 -> 'X'; +uts46_map(CP) when 129744 =< CP, CP =< 129750 -> 'V'; +uts46_map(CP) when 129751 =< CP, CP =< 129791 -> 'X'; +uts46_map(CP) when 129792 =< CP, CP =< 129938 -> 'V'; +uts46_map(CP) when 129940 =< CP, CP =< 129994 -> 'V'; +uts46_map(CP) when 129995 =< CP, CP =< 130031 -> 'X'; +uts46_map(CP) when 130042 =< CP, CP =< 131069 -> 'X'; +uts46_map(CP) when 131070 =< CP, CP =< 131071 -> 'X'; +uts46_map(CP) when 131072 =< CP, CP =< 173782 -> 'V'; +uts46_map(CP) when 173783 =< CP, CP =< 173789 -> 'V'; +uts46_map(CP) when 173790 =< CP, CP =< 173823 -> 'X'; +uts46_map(CP) when 173824 =< CP, CP =< 177972 -> 'V'; +uts46_map(CP) when 177973 =< CP, CP =< 177983 -> 'X'; +uts46_map(CP) when 177984 =< CP, CP =< 178205 -> 'V'; +uts46_map(CP) when 178206 =< CP, CP =< 178207 -> 'X'; +uts46_map(CP) when 178208 =< CP, CP =< 183969 -> 'V'; +uts46_map(CP) when 183970 =< CP, CP =< 183983 -> 'X'; +uts46_map(CP) when 183984 =< CP, CP =< 191456 -> 'V'; +uts46_map(CP) when 191457 =< CP, CP =< 194559 -> 'X'; +uts46_map(CP) when 194609 =< CP, CP =< 194611 -> {'M', [21375]}; +uts46_map(CP) when 194629 =< CP, CP =< 194630 -> {'M', [21892]}; +uts46_map(CP) when 194666 =< CP, CP =< 194667 -> {'M', [23358]}; +uts46_map(CP) when 194705 =< CP, CP =< 194706 -> {'M', [140081]}; +uts46_map(CP) when 194708 =< CP, CP =< 194709 -> {'M', [24354]}; +uts46_map(CP) when 194860 =< CP, CP =< 194861 -> {'M', [16056]}; +uts46_map(CP) when 194886 =< CP, CP =< 194887 -> {'M', [30495]}; +uts46_map(CP) when 194909 =< CP, CP =< 194910 -> {'M', [154279]}; +uts46_map(CP) when 195070 =< CP, CP =< 195071 -> {'M', [38923]}; +uts46_map(CP) when 195102 =< CP, CP =< 196605 -> 'X'; +uts46_map(CP) when 196606 =< CP, CP =< 196607 -> 'X'; +uts46_map(CP) when 196608 =< CP, CP =< 201546 -> 'V'; +uts46_map(CP) when 201547 =< CP, CP =< 262141 -> 'X'; +uts46_map(CP) when 262142 =< CP, CP =< 262143 -> 'X'; +uts46_map(CP) when 262144 =< CP, CP =< 327677 -> 'X'; +uts46_map(CP) when 327678 =< CP, CP =< 327679 -> 'X'; +uts46_map(CP) when 327680 =< CP, CP =< 393213 -> 'X'; +uts46_map(CP) when 393214 =< CP, CP =< 393215 -> 'X'; +uts46_map(CP) when 393216 =< CP, CP =< 458749 -> 'X'; +uts46_map(CP) when 458750 =< CP, CP =< 458751 -> 'X'; +uts46_map(CP) when 458752 =< CP, CP =< 524285 -> 'X'; +uts46_map(CP) when 524286 =< CP, CP =< 524287 -> 'X'; +uts46_map(CP) when 524288 =< CP, CP =< 589821 -> 'X'; +uts46_map(CP) when 589822 =< CP, CP =< 589823 -> 'X'; +uts46_map(CP) when 589824 =< CP, CP =< 655357 -> 'X'; +uts46_map(CP) when 655358 =< CP, CP =< 655359 -> 'X'; +uts46_map(CP) when 655360 =< CP, CP =< 720893 -> 'X'; +uts46_map(CP) when 720894 =< CP, CP =< 720895 -> 'X'; +uts46_map(CP) when 720896 =< CP, CP =< 786429 -> 'X'; +uts46_map(CP) when 786430 =< CP, CP =< 786431 -> 'X'; +uts46_map(CP) when 786432 =< CP, CP =< 851965 -> 'X'; +uts46_map(CP) when 851966 =< CP, CP =< 851967 -> 'X'; +uts46_map(CP) when 851968 =< CP, CP =< 917501 -> 'X'; +uts46_map(CP) when 917502 =< CP, CP =< 917503 -> 'X'; +uts46_map(CP) when 917506 =< CP, CP =< 917535 -> 'X'; +uts46_map(CP) when 917536 =< CP, CP =< 917631 -> 'X'; +uts46_map(CP) when 917632 =< CP, CP =< 917759 -> 'X'; +uts46_map(CP) when 917760 =< CP, CP =< 917999 -> 'I'; +uts46_map(CP) when 918000 =< CP, CP =< 983037 -> 'X'; +uts46_map(CP) when 983038 =< CP, CP =< 983039 -> 'X'; +uts46_map(CP) when 983040 =< CP, CP =< 1048573 -> 'X'; +uts46_map(CP) when 1048574 =< CP, CP =< 1048575 -> 'X'; +uts46_map(CP) when 1048576 =< CP, CP =< 1114109 -> 'X'; +uts46_map(CP) when 1114110 =< CP, CP =< 1114111 -> 'X'; +uts46_map(_) -> erlang:error(badarg). diff --git a/deps/idna/src/idna_table.erl b/deps/idna/src/idna_table.erl new file mode 100644 index 0000000..152477b --- /dev/null +++ b/deps/idna/src/idna_table.erl @@ -0,0 +1,2916 @@ +%% +%% this file is generated do not modify +%% see ../uc_spec/gen_idna_table.escript + +-module(idna_table). +-compile(compressed). +-export([lookup/1]). +-export([disallowed_p/1, contextj_p/1, contexto_p/1, unassigned_p/1, valid_p/1]). +disallowed_p(CP) -> lookup(CP) == 'DISALLOWED'. +contextj_p(CP) -> lookup(CP) == 'CONTEXTJ'. +contexto_p(CP) -> lookup(CP) == 'CONTEXTO'. +unassigned_p(CP) -> lookup(CP) == 'UNASSIGNED'. +valid_p(CP) -> lookup(CP) == 'PVALID'. +lookup(45) -> 'PVALID'; +lookup(183) -> 'CONTEXTO'; +lookup(247) -> 'DISALLOWED'; +lookup(256) -> 'DISALLOWED'; +lookup(257) -> 'PVALID'; +lookup(258) -> 'DISALLOWED'; +lookup(259) -> 'PVALID'; +lookup(260) -> 'DISALLOWED'; +lookup(261) -> 'PVALID'; +lookup(262) -> 'DISALLOWED'; +lookup(263) -> 'PVALID'; +lookup(264) -> 'DISALLOWED'; +lookup(265) -> 'PVALID'; +lookup(266) -> 'DISALLOWED'; +lookup(267) -> 'PVALID'; +lookup(268) -> 'DISALLOWED'; +lookup(269) -> 'PVALID'; +lookup(270) -> 'DISALLOWED'; +lookup(271) -> 'PVALID'; +lookup(272) -> 'DISALLOWED'; +lookup(273) -> 'PVALID'; +lookup(274) -> 'DISALLOWED'; +lookup(275) -> 'PVALID'; +lookup(276) -> 'DISALLOWED'; +lookup(277) -> 'PVALID'; +lookup(278) -> 'DISALLOWED'; +lookup(279) -> 'PVALID'; +lookup(280) -> 'DISALLOWED'; +lookup(281) -> 'PVALID'; +lookup(282) -> 'DISALLOWED'; +lookup(283) -> 'PVALID'; +lookup(284) -> 'DISALLOWED'; +lookup(285) -> 'PVALID'; +lookup(286) -> 'DISALLOWED'; +lookup(287) -> 'PVALID'; +lookup(288) -> 'DISALLOWED'; +lookup(289) -> 'PVALID'; +lookup(290) -> 'DISALLOWED'; +lookup(291) -> 'PVALID'; +lookup(292) -> 'DISALLOWED'; +lookup(293) -> 'PVALID'; +lookup(294) -> 'DISALLOWED'; +lookup(295) -> 'PVALID'; +lookup(296) -> 'DISALLOWED'; +lookup(297) -> 'PVALID'; +lookup(298) -> 'DISALLOWED'; +lookup(299) -> 'PVALID'; +lookup(300) -> 'DISALLOWED'; +lookup(301) -> 'PVALID'; +lookup(302) -> 'DISALLOWED'; +lookup(303) -> 'PVALID'; +lookup(304) -> 'DISALLOWED'; +lookup(305) -> 'PVALID'; +lookup(309) -> 'PVALID'; +lookup(310) -> 'DISALLOWED'; +lookup(313) -> 'DISALLOWED'; +lookup(314) -> 'PVALID'; +lookup(315) -> 'DISALLOWED'; +lookup(316) -> 'PVALID'; +lookup(317) -> 'DISALLOWED'; +lookup(318) -> 'PVALID'; +lookup(322) -> 'PVALID'; +lookup(323) -> 'DISALLOWED'; +lookup(324) -> 'PVALID'; +lookup(325) -> 'DISALLOWED'; +lookup(326) -> 'PVALID'; +lookup(327) -> 'DISALLOWED'; +lookup(328) -> 'PVALID'; +lookup(331) -> 'PVALID'; +lookup(332) -> 'DISALLOWED'; +lookup(333) -> 'PVALID'; +lookup(334) -> 'DISALLOWED'; +lookup(335) -> 'PVALID'; +lookup(336) -> 'DISALLOWED'; +lookup(337) -> 'PVALID'; +lookup(338) -> 'DISALLOWED'; +lookup(339) -> 'PVALID'; +lookup(340) -> 'DISALLOWED'; +lookup(341) -> 'PVALID'; +lookup(342) -> 'DISALLOWED'; +lookup(343) -> 'PVALID'; +lookup(344) -> 'DISALLOWED'; +lookup(345) -> 'PVALID'; +lookup(346) -> 'DISALLOWED'; +lookup(347) -> 'PVALID'; +lookup(348) -> 'DISALLOWED'; +lookup(349) -> 'PVALID'; +lookup(350) -> 'DISALLOWED'; +lookup(351) -> 'PVALID'; +lookup(352) -> 'DISALLOWED'; +lookup(353) -> 'PVALID'; +lookup(354) -> 'DISALLOWED'; +lookup(355) -> 'PVALID'; +lookup(356) -> 'DISALLOWED'; +lookup(357) -> 'PVALID'; +lookup(358) -> 'DISALLOWED'; +lookup(359) -> 'PVALID'; +lookup(360) -> 'DISALLOWED'; +lookup(361) -> 'PVALID'; +lookup(362) -> 'DISALLOWED'; +lookup(363) -> 'PVALID'; +lookup(364) -> 'DISALLOWED'; +lookup(365) -> 'PVALID'; +lookup(366) -> 'DISALLOWED'; +lookup(367) -> 'PVALID'; +lookup(368) -> 'DISALLOWED'; +lookup(369) -> 'PVALID'; +lookup(370) -> 'DISALLOWED'; +lookup(371) -> 'PVALID'; +lookup(372) -> 'DISALLOWED'; +lookup(373) -> 'PVALID'; +lookup(374) -> 'DISALLOWED'; +lookup(375) -> 'PVALID'; +lookup(378) -> 'PVALID'; +lookup(379) -> 'DISALLOWED'; +lookup(380) -> 'PVALID'; +lookup(381) -> 'DISALLOWED'; +lookup(382) -> 'PVALID'; +lookup(383) -> 'DISALLOWED'; +lookup(384) -> 'PVALID'; +lookup(387) -> 'PVALID'; +lookup(388) -> 'DISALLOWED'; +lookup(389) -> 'PVALID'; +lookup(392) -> 'PVALID'; +lookup(402) -> 'PVALID'; +lookup(405) -> 'PVALID'; +lookup(414) -> 'PVALID'; +lookup(417) -> 'PVALID'; +lookup(418) -> 'DISALLOWED'; +lookup(419) -> 'PVALID'; +lookup(420) -> 'DISALLOWED'; +lookup(421) -> 'PVALID'; +lookup(424) -> 'PVALID'; +lookup(425) -> 'DISALLOWED'; +lookup(428) -> 'DISALLOWED'; +lookup(429) -> 'PVALID'; +lookup(432) -> 'PVALID'; +lookup(436) -> 'PVALID'; +lookup(437) -> 'DISALLOWED'; +lookup(438) -> 'PVALID'; +lookup(444) -> 'DISALLOWED'; +lookup(462) -> 'PVALID'; +lookup(463) -> 'DISALLOWED'; +lookup(464) -> 'PVALID'; +lookup(465) -> 'DISALLOWED'; +lookup(466) -> 'PVALID'; +lookup(467) -> 'DISALLOWED'; +lookup(468) -> 'PVALID'; +lookup(469) -> 'DISALLOWED'; +lookup(470) -> 'PVALID'; +lookup(471) -> 'DISALLOWED'; +lookup(472) -> 'PVALID'; +lookup(473) -> 'DISALLOWED'; +lookup(474) -> 'PVALID'; +lookup(475) -> 'DISALLOWED'; +lookup(478) -> 'DISALLOWED'; +lookup(479) -> 'PVALID'; +lookup(480) -> 'DISALLOWED'; +lookup(481) -> 'PVALID'; +lookup(482) -> 'DISALLOWED'; +lookup(483) -> 'PVALID'; +lookup(484) -> 'DISALLOWED'; +lookup(485) -> 'PVALID'; +lookup(486) -> 'DISALLOWED'; +lookup(487) -> 'PVALID'; +lookup(488) -> 'DISALLOWED'; +lookup(489) -> 'PVALID'; +lookup(490) -> 'DISALLOWED'; +lookup(491) -> 'PVALID'; +lookup(492) -> 'DISALLOWED'; +lookup(493) -> 'PVALID'; +lookup(494) -> 'DISALLOWED'; +lookup(501) -> 'PVALID'; +lookup(505) -> 'PVALID'; +lookup(506) -> 'DISALLOWED'; +lookup(507) -> 'PVALID'; +lookup(508) -> 'DISALLOWED'; +lookup(509) -> 'PVALID'; +lookup(510) -> 'DISALLOWED'; +lookup(511) -> 'PVALID'; +lookup(512) -> 'DISALLOWED'; +lookup(513) -> 'PVALID'; +lookup(514) -> 'DISALLOWED'; +lookup(515) -> 'PVALID'; +lookup(516) -> 'DISALLOWED'; +lookup(517) -> 'PVALID'; +lookup(518) -> 'DISALLOWED'; +lookup(519) -> 'PVALID'; +lookup(520) -> 'DISALLOWED'; +lookup(521) -> 'PVALID'; +lookup(522) -> 'DISALLOWED'; +lookup(523) -> 'PVALID'; +lookup(524) -> 'DISALLOWED'; +lookup(525) -> 'PVALID'; +lookup(526) -> 'DISALLOWED'; +lookup(527) -> 'PVALID'; +lookup(528) -> 'DISALLOWED'; +lookup(529) -> 'PVALID'; +lookup(530) -> 'DISALLOWED'; +lookup(531) -> 'PVALID'; +lookup(532) -> 'DISALLOWED'; +lookup(533) -> 'PVALID'; +lookup(534) -> 'DISALLOWED'; +lookup(535) -> 'PVALID'; +lookup(536) -> 'DISALLOWED'; +lookup(537) -> 'PVALID'; +lookup(538) -> 'DISALLOWED'; +lookup(539) -> 'PVALID'; +lookup(540) -> 'DISALLOWED'; +lookup(541) -> 'PVALID'; +lookup(542) -> 'DISALLOWED'; +lookup(543) -> 'PVALID'; +lookup(544) -> 'DISALLOWED'; +lookup(545) -> 'PVALID'; +lookup(546) -> 'DISALLOWED'; +lookup(547) -> 'PVALID'; +lookup(548) -> 'DISALLOWED'; +lookup(549) -> 'PVALID'; +lookup(550) -> 'DISALLOWED'; +lookup(551) -> 'PVALID'; +lookup(552) -> 'DISALLOWED'; +lookup(553) -> 'PVALID'; +lookup(554) -> 'DISALLOWED'; +lookup(555) -> 'PVALID'; +lookup(556) -> 'DISALLOWED'; +lookup(557) -> 'PVALID'; +lookup(558) -> 'DISALLOWED'; +lookup(559) -> 'PVALID'; +lookup(560) -> 'DISALLOWED'; +lookup(561) -> 'PVALID'; +lookup(562) -> 'DISALLOWED'; +lookup(572) -> 'PVALID'; +lookup(577) -> 'DISALLOWED'; +lookup(578) -> 'PVALID'; +lookup(583) -> 'PVALID'; +lookup(584) -> 'DISALLOWED'; +lookup(585) -> 'PVALID'; +lookup(586) -> 'DISALLOWED'; +lookup(587) -> 'PVALID'; +lookup(588) -> 'DISALLOWED'; +lookup(589) -> 'PVALID'; +lookup(590) -> 'DISALLOWED'; +lookup(748) -> 'PVALID'; +lookup(749) -> 'DISALLOWED'; +lookup(750) -> 'PVALID'; +lookup(834) -> 'PVALID'; +lookup(847) -> 'DISALLOWED'; +lookup(880) -> 'DISALLOWED'; +lookup(881) -> 'PVALID'; +lookup(882) -> 'DISALLOWED'; +lookup(883) -> 'PVALID'; +lookup(884) -> 'DISALLOWED'; +lookup(885) -> 'CONTEXTO'; +lookup(886) -> 'DISALLOWED'; +lookup(887) -> 'PVALID'; +lookup(890) -> 'DISALLOWED'; +lookup(907) -> 'UNASSIGNED'; +lookup(908) -> 'DISALLOWED'; +lookup(909) -> 'UNASSIGNED'; +lookup(912) -> 'PVALID'; +lookup(930) -> 'UNASSIGNED'; +lookup(983) -> 'PVALID'; +lookup(984) -> 'DISALLOWED'; +lookup(985) -> 'PVALID'; +lookup(986) -> 'DISALLOWED'; +lookup(987) -> 'PVALID'; +lookup(988) -> 'DISALLOWED'; +lookup(989) -> 'PVALID'; +lookup(990) -> 'DISALLOWED'; +lookup(991) -> 'PVALID'; +lookup(992) -> 'DISALLOWED'; +lookup(993) -> 'PVALID'; +lookup(994) -> 'DISALLOWED'; +lookup(995) -> 'PVALID'; +lookup(996) -> 'DISALLOWED'; +lookup(997) -> 'PVALID'; +lookup(998) -> 'DISALLOWED'; +lookup(999) -> 'PVALID'; +lookup(1000) -> 'DISALLOWED'; +lookup(1001) -> 'PVALID'; +lookup(1002) -> 'DISALLOWED'; +lookup(1003) -> 'PVALID'; +lookup(1004) -> 'DISALLOWED'; +lookup(1005) -> 'PVALID'; +lookup(1006) -> 'DISALLOWED'; +lookup(1007) -> 'PVALID'; +lookup(1011) -> 'PVALID'; +lookup(1016) -> 'PVALID'; +lookup(1120) -> 'DISALLOWED'; +lookup(1121) -> 'PVALID'; +lookup(1122) -> 'DISALLOWED'; +lookup(1123) -> 'PVALID'; +lookup(1124) -> 'DISALLOWED'; +lookup(1125) -> 'PVALID'; +lookup(1126) -> 'DISALLOWED'; +lookup(1127) -> 'PVALID'; +lookup(1128) -> 'DISALLOWED'; +lookup(1129) -> 'PVALID'; +lookup(1130) -> 'DISALLOWED'; +lookup(1131) -> 'PVALID'; +lookup(1132) -> 'DISALLOWED'; +lookup(1133) -> 'PVALID'; +lookup(1134) -> 'DISALLOWED'; +lookup(1135) -> 'PVALID'; +lookup(1136) -> 'DISALLOWED'; +lookup(1137) -> 'PVALID'; +lookup(1138) -> 'DISALLOWED'; +lookup(1139) -> 'PVALID'; +lookup(1140) -> 'DISALLOWED'; +lookup(1141) -> 'PVALID'; +lookup(1142) -> 'DISALLOWED'; +lookup(1143) -> 'PVALID'; +lookup(1144) -> 'DISALLOWED'; +lookup(1145) -> 'PVALID'; +lookup(1146) -> 'DISALLOWED'; +lookup(1147) -> 'PVALID'; +lookup(1148) -> 'DISALLOWED'; +lookup(1149) -> 'PVALID'; +lookup(1150) -> 'DISALLOWED'; +lookup(1151) -> 'PVALID'; +lookup(1152) -> 'DISALLOWED'; +lookup(1153) -> 'PVALID'; +lookup(1154) -> 'DISALLOWED'; +lookup(1163) -> 'PVALID'; +lookup(1164) -> 'DISALLOWED'; +lookup(1165) -> 'PVALID'; +lookup(1166) -> 'DISALLOWED'; +lookup(1167) -> 'PVALID'; +lookup(1168) -> 'DISALLOWED'; +lookup(1169) -> 'PVALID'; +lookup(1170) -> 'DISALLOWED'; +lookup(1171) -> 'PVALID'; +lookup(1172) -> 'DISALLOWED'; +lookup(1173) -> 'PVALID'; +lookup(1174) -> 'DISALLOWED'; +lookup(1175) -> 'PVALID'; +lookup(1176) -> 'DISALLOWED'; +lookup(1177) -> 'PVALID'; +lookup(1178) -> 'DISALLOWED'; +lookup(1179) -> 'PVALID'; +lookup(1180) -> 'DISALLOWED'; +lookup(1181) -> 'PVALID'; +lookup(1182) -> 'DISALLOWED'; +lookup(1183) -> 'PVALID'; +lookup(1184) -> 'DISALLOWED'; +lookup(1185) -> 'PVALID'; +lookup(1186) -> 'DISALLOWED'; +lookup(1187) -> 'PVALID'; +lookup(1188) -> 'DISALLOWED'; +lookup(1189) -> 'PVALID'; +lookup(1190) -> 'DISALLOWED'; +lookup(1191) -> 'PVALID'; +lookup(1192) -> 'DISALLOWED'; +lookup(1193) -> 'PVALID'; +lookup(1194) -> 'DISALLOWED'; +lookup(1195) -> 'PVALID'; +lookup(1196) -> 'DISALLOWED'; +lookup(1197) -> 'PVALID'; +lookup(1198) -> 'DISALLOWED'; +lookup(1199) -> 'PVALID'; +lookup(1200) -> 'DISALLOWED'; +lookup(1201) -> 'PVALID'; +lookup(1202) -> 'DISALLOWED'; +lookup(1203) -> 'PVALID'; +lookup(1204) -> 'DISALLOWED'; +lookup(1205) -> 'PVALID'; +lookup(1206) -> 'DISALLOWED'; +lookup(1207) -> 'PVALID'; +lookup(1208) -> 'DISALLOWED'; +lookup(1209) -> 'PVALID'; +lookup(1210) -> 'DISALLOWED'; +lookup(1211) -> 'PVALID'; +lookup(1212) -> 'DISALLOWED'; +lookup(1213) -> 'PVALID'; +lookup(1214) -> 'DISALLOWED'; +lookup(1215) -> 'PVALID'; +lookup(1218) -> 'PVALID'; +lookup(1219) -> 'DISALLOWED'; +lookup(1220) -> 'PVALID'; +lookup(1221) -> 'DISALLOWED'; +lookup(1222) -> 'PVALID'; +lookup(1223) -> 'DISALLOWED'; +lookup(1224) -> 'PVALID'; +lookup(1225) -> 'DISALLOWED'; +lookup(1226) -> 'PVALID'; +lookup(1227) -> 'DISALLOWED'; +lookup(1228) -> 'PVALID'; +lookup(1229) -> 'DISALLOWED'; +lookup(1232) -> 'DISALLOWED'; +lookup(1233) -> 'PVALID'; +lookup(1234) -> 'DISALLOWED'; +lookup(1235) -> 'PVALID'; +lookup(1236) -> 'DISALLOWED'; +lookup(1237) -> 'PVALID'; +lookup(1238) -> 'DISALLOWED'; +lookup(1239) -> 'PVALID'; +lookup(1240) -> 'DISALLOWED'; +lookup(1241) -> 'PVALID'; +lookup(1242) -> 'DISALLOWED'; +lookup(1243) -> 'PVALID'; +lookup(1244) -> 'DISALLOWED'; +lookup(1245) -> 'PVALID'; +lookup(1246) -> 'DISALLOWED'; +lookup(1247) -> 'PVALID'; +lookup(1248) -> 'DISALLOWED'; +lookup(1249) -> 'PVALID'; +lookup(1250) -> 'DISALLOWED'; +lookup(1251) -> 'PVALID'; +lookup(1252) -> 'DISALLOWED'; +lookup(1253) -> 'PVALID'; +lookup(1254) -> 'DISALLOWED'; +lookup(1255) -> 'PVALID'; +lookup(1256) -> 'DISALLOWED'; +lookup(1257) -> 'PVALID'; +lookup(1258) -> 'DISALLOWED'; +lookup(1259) -> 'PVALID'; +lookup(1260) -> 'DISALLOWED'; +lookup(1261) -> 'PVALID'; +lookup(1262) -> 'DISALLOWED'; +lookup(1263) -> 'PVALID'; +lookup(1264) -> 'DISALLOWED'; +lookup(1265) -> 'PVALID'; +lookup(1266) -> 'DISALLOWED'; +lookup(1267) -> 'PVALID'; +lookup(1268) -> 'DISALLOWED'; +lookup(1269) -> 'PVALID'; +lookup(1270) -> 'DISALLOWED'; +lookup(1271) -> 'PVALID'; +lookup(1272) -> 'DISALLOWED'; +lookup(1273) -> 'PVALID'; +lookup(1274) -> 'DISALLOWED'; +lookup(1275) -> 'PVALID'; +lookup(1276) -> 'DISALLOWED'; +lookup(1277) -> 'PVALID'; +lookup(1278) -> 'DISALLOWED'; +lookup(1279) -> 'PVALID'; +lookup(1280) -> 'DISALLOWED'; +lookup(1281) -> 'PVALID'; +lookup(1282) -> 'DISALLOWED'; +lookup(1283) -> 'PVALID'; +lookup(1284) -> 'DISALLOWED'; +lookup(1285) -> 'PVALID'; +lookup(1286) -> 'DISALLOWED'; +lookup(1287) -> 'PVALID'; +lookup(1288) -> 'DISALLOWED'; +lookup(1289) -> 'PVALID'; +lookup(1290) -> 'DISALLOWED'; +lookup(1291) -> 'PVALID'; +lookup(1292) -> 'DISALLOWED'; +lookup(1293) -> 'PVALID'; +lookup(1294) -> 'DISALLOWED'; +lookup(1295) -> 'PVALID'; +lookup(1296) -> 'DISALLOWED'; +lookup(1297) -> 'PVALID'; +lookup(1298) -> 'DISALLOWED'; +lookup(1299) -> 'PVALID'; +lookup(1300) -> 'DISALLOWED'; +lookup(1301) -> 'PVALID'; +lookup(1302) -> 'DISALLOWED'; +lookup(1303) -> 'PVALID'; +lookup(1304) -> 'DISALLOWED'; +lookup(1305) -> 'PVALID'; +lookup(1306) -> 'DISALLOWED'; +lookup(1307) -> 'PVALID'; +lookup(1308) -> 'DISALLOWED'; +lookup(1309) -> 'PVALID'; +lookup(1310) -> 'DISALLOWED'; +lookup(1311) -> 'PVALID'; +lookup(1312) -> 'DISALLOWED'; +lookup(1313) -> 'PVALID'; +lookup(1314) -> 'DISALLOWED'; +lookup(1315) -> 'PVALID'; +lookup(1316) -> 'DISALLOWED'; +lookup(1317) -> 'PVALID'; +lookup(1318) -> 'DISALLOWED'; +lookup(1319) -> 'PVALID'; +lookup(1320) -> 'DISALLOWED'; +lookup(1321) -> 'PVALID'; +lookup(1322) -> 'DISALLOWED'; +lookup(1323) -> 'PVALID'; +lookup(1324) -> 'DISALLOWED'; +lookup(1325) -> 'PVALID'; +lookup(1326) -> 'DISALLOWED'; +lookup(1327) -> 'PVALID'; +lookup(1328) -> 'UNASSIGNED'; +lookup(1369) -> 'PVALID'; +lookup(1415) -> 'DISALLOWED'; +lookup(1416) -> 'PVALID'; +lookup(1424) -> 'UNASSIGNED'; +lookup(1470) -> 'DISALLOWED'; +lookup(1471) -> 'PVALID'; +lookup(1472) -> 'DISALLOWED'; +lookup(1475) -> 'DISALLOWED'; +lookup(1478) -> 'DISALLOWED'; +lookup(1479) -> 'PVALID'; +lookup(1565) -> 'UNASSIGNED'; +lookup(1600) -> 'DISALLOWED'; +lookup(1748) -> 'DISALLOWED'; +lookup(1769) -> 'DISALLOWED'; +lookup(1806) -> 'UNASSIGNED'; +lookup(1807) -> 'DISALLOWED'; +lookup(2045) -> 'PVALID'; +lookup(2111) -> 'UNASSIGNED'; +lookup(2142) -> 'DISALLOWED'; +lookup(2143) -> 'UNASSIGNED'; +lookup(2229) -> 'UNASSIGNED'; +lookup(2274) -> 'DISALLOWED'; +lookup(2416) -> 'DISALLOWED'; +lookup(2436) -> 'UNASSIGNED'; +lookup(2473) -> 'UNASSIGNED'; +lookup(2481) -> 'UNASSIGNED'; +lookup(2482) -> 'PVALID'; +lookup(2519) -> 'PVALID'; +lookup(2526) -> 'UNASSIGNED'; +lookup(2527) -> 'DISALLOWED'; +lookup(2556) -> 'PVALID'; +lookup(2557) -> 'DISALLOWED'; +lookup(2558) -> 'PVALID'; +lookup(2564) -> 'UNASSIGNED'; +lookup(2601) -> 'UNASSIGNED'; +lookup(2609) -> 'UNASSIGNED'; +lookup(2610) -> 'PVALID'; +lookup(2611) -> 'DISALLOWED'; +lookup(2612) -> 'UNASSIGNED'; +lookup(2613) -> 'PVALID'; +lookup(2614) -> 'DISALLOWED'; +lookup(2615) -> 'UNASSIGNED'; +lookup(2620) -> 'PVALID'; +lookup(2621) -> 'UNASSIGNED'; +lookup(2641) -> 'PVALID'; +lookup(2652) -> 'PVALID'; +lookup(2653) -> 'UNASSIGNED'; +lookup(2654) -> 'DISALLOWED'; +lookup(2678) -> 'DISALLOWED'; +lookup(2692) -> 'UNASSIGNED'; +lookup(2702) -> 'UNASSIGNED'; +lookup(2706) -> 'UNASSIGNED'; +lookup(2729) -> 'UNASSIGNED'; +lookup(2737) -> 'UNASSIGNED'; +lookup(2740) -> 'UNASSIGNED'; +lookup(2758) -> 'UNASSIGNED'; +lookup(2762) -> 'UNASSIGNED'; +lookup(2768) -> 'PVALID'; +lookup(2816) -> 'UNASSIGNED'; +lookup(2820) -> 'UNASSIGNED'; +lookup(2857) -> 'UNASSIGNED'; +lookup(2865) -> 'UNASSIGNED'; +lookup(2868) -> 'UNASSIGNED'; +lookup(2910) -> 'UNASSIGNED'; +lookup(2928) -> 'DISALLOWED'; +lookup(2929) -> 'PVALID'; +lookup(2948) -> 'UNASSIGNED'; +lookup(2961) -> 'UNASSIGNED'; +lookup(2971) -> 'UNASSIGNED'; +lookup(2972) -> 'PVALID'; +lookup(2973) -> 'UNASSIGNED'; +lookup(3017) -> 'UNASSIGNED'; +lookup(3024) -> 'PVALID'; +lookup(3031) -> 'PVALID'; +lookup(3085) -> 'UNASSIGNED'; +lookup(3089) -> 'UNASSIGNED'; +lookup(3113) -> 'UNASSIGNED'; +lookup(3141) -> 'UNASSIGNED'; +lookup(3145) -> 'UNASSIGNED'; +lookup(3159) -> 'UNASSIGNED'; +lookup(3204) -> 'DISALLOWED'; +lookup(3213) -> 'UNASSIGNED'; +lookup(3217) -> 'UNASSIGNED'; +lookup(3241) -> 'UNASSIGNED'; +lookup(3252) -> 'UNASSIGNED'; +lookup(3269) -> 'UNASSIGNED'; +lookup(3273) -> 'UNASSIGNED'; +lookup(3294) -> 'PVALID'; +lookup(3295) -> 'UNASSIGNED'; +lookup(3312) -> 'UNASSIGNED'; +lookup(3341) -> 'UNASSIGNED'; +lookup(3345) -> 'UNASSIGNED'; +lookup(3397) -> 'UNASSIGNED'; +lookup(3401) -> 'UNASSIGNED'; +lookup(3407) -> 'DISALLOWED'; +lookup(3456) -> 'UNASSIGNED'; +lookup(3460) -> 'UNASSIGNED'; +lookup(3506) -> 'UNASSIGNED'; +lookup(3516) -> 'UNASSIGNED'; +lookup(3517) -> 'PVALID'; +lookup(3530) -> 'PVALID'; +lookup(3541) -> 'UNASSIGNED'; +lookup(3542) -> 'PVALID'; +lookup(3543) -> 'UNASSIGNED'; +lookup(3572) -> 'DISALLOWED'; +lookup(3635) -> 'DISALLOWED'; +lookup(3647) -> 'DISALLOWED'; +lookup(3663) -> 'DISALLOWED'; +lookup(3715) -> 'UNASSIGNED'; +lookup(3716) -> 'PVALID'; +lookup(3717) -> 'UNASSIGNED'; +lookup(3723) -> 'UNASSIGNED'; +lookup(3748) -> 'UNASSIGNED'; +lookup(3749) -> 'PVALID'; +lookup(3750) -> 'UNASSIGNED'; +lookup(3763) -> 'DISALLOWED'; +lookup(3781) -> 'UNASSIGNED'; +lookup(3782) -> 'PVALID'; +lookup(3783) -> 'UNASSIGNED'; +lookup(3840) -> 'PVALID'; +lookup(3851) -> 'PVALID'; +lookup(3893) -> 'PVALID'; +lookup(3894) -> 'DISALLOWED'; +lookup(3895) -> 'PVALID'; +lookup(3896) -> 'DISALLOWED'; +lookup(3897) -> 'PVALID'; +lookup(3907) -> 'DISALLOWED'; +lookup(3912) -> 'UNASSIGNED'; +lookup(3917) -> 'DISALLOWED'; +lookup(3922) -> 'DISALLOWED'; +lookup(3927) -> 'DISALLOWED'; +lookup(3932) -> 'DISALLOWED'; +lookup(3945) -> 'DISALLOWED'; +lookup(3955) -> 'DISALLOWED'; +lookup(3956) -> 'PVALID'; +lookup(3969) -> 'DISALLOWED'; +lookup(3973) -> 'DISALLOWED'; +lookup(3987) -> 'DISALLOWED'; +lookup(3992) -> 'UNASSIGNED'; +lookup(3997) -> 'DISALLOWED'; +lookup(4002) -> 'DISALLOWED'; +lookup(4007) -> 'DISALLOWED'; +lookup(4012) -> 'DISALLOWED'; +lookup(4025) -> 'DISALLOWED'; +lookup(4029) -> 'UNASSIGNED'; +lookup(4038) -> 'PVALID'; +lookup(4045) -> 'UNASSIGNED'; +lookup(4294) -> 'UNASSIGNED'; +lookup(4295) -> 'DISALLOWED'; +lookup(4301) -> 'DISALLOWED'; +lookup(4681) -> 'UNASSIGNED'; +lookup(4695) -> 'UNASSIGNED'; +lookup(4696) -> 'PVALID'; +lookup(4697) -> 'UNASSIGNED'; +lookup(4745) -> 'UNASSIGNED'; +lookup(4785) -> 'UNASSIGNED'; +lookup(4799) -> 'UNASSIGNED'; +lookup(4800) -> 'PVALID'; +lookup(4801) -> 'UNASSIGNED'; +lookup(4823) -> 'UNASSIGNED'; +lookup(4881) -> 'UNASSIGNED'; +lookup(5120) -> 'DISALLOWED'; +lookup(5760) -> 'DISALLOWED'; +lookup(5901) -> 'UNASSIGNED'; +lookup(5997) -> 'UNASSIGNED'; +lookup(6001) -> 'UNASSIGNED'; +lookup(6103) -> 'PVALID'; +lookup(6159) -> 'UNASSIGNED'; +lookup(6431) -> 'UNASSIGNED'; +lookup(6464) -> 'DISALLOWED'; +lookup(6618) -> 'DISALLOWED'; +lookup(6751) -> 'UNASSIGNED'; +lookup(6823) -> 'PVALID'; +lookup(6846) -> 'DISALLOWED'; +lookup(7379) -> 'DISALLOWED'; +lookup(7471) -> 'PVALID'; +lookup(7483) -> 'PVALID'; +lookup(7502) -> 'PVALID'; +lookup(7544) -> 'DISALLOWED'; +lookup(7674) -> 'UNASSIGNED'; +lookup(7680) -> 'DISALLOWED'; +lookup(7681) -> 'PVALID'; +lookup(7682) -> 'DISALLOWED'; +lookup(7683) -> 'PVALID'; +lookup(7684) -> 'DISALLOWED'; +lookup(7685) -> 'PVALID'; +lookup(7686) -> 'DISALLOWED'; +lookup(7687) -> 'PVALID'; +lookup(7688) -> 'DISALLOWED'; +lookup(7689) -> 'PVALID'; +lookup(7690) -> 'DISALLOWED'; +lookup(7691) -> 'PVALID'; +lookup(7692) -> 'DISALLOWED'; +lookup(7693) -> 'PVALID'; +lookup(7694) -> 'DISALLOWED'; +lookup(7695) -> 'PVALID'; +lookup(7696) -> 'DISALLOWED'; +lookup(7697) -> 'PVALID'; +lookup(7698) -> 'DISALLOWED'; +lookup(7699) -> 'PVALID'; +lookup(7700) -> 'DISALLOWED'; +lookup(7701) -> 'PVALID'; +lookup(7702) -> 'DISALLOWED'; +lookup(7703) -> 'PVALID'; +lookup(7704) -> 'DISALLOWED'; +lookup(7705) -> 'PVALID'; +lookup(7706) -> 'DISALLOWED'; +lookup(7707) -> 'PVALID'; +lookup(7708) -> 'DISALLOWED'; +lookup(7709) -> 'PVALID'; +lookup(7710) -> 'DISALLOWED'; +lookup(7711) -> 'PVALID'; +lookup(7712) -> 'DISALLOWED'; +lookup(7713) -> 'PVALID'; +lookup(7714) -> 'DISALLOWED'; +lookup(7715) -> 'PVALID'; +lookup(7716) -> 'DISALLOWED'; +lookup(7717) -> 'PVALID'; +lookup(7718) -> 'DISALLOWED'; +lookup(7719) -> 'PVALID'; +lookup(7720) -> 'DISALLOWED'; +lookup(7721) -> 'PVALID'; +lookup(7722) -> 'DISALLOWED'; +lookup(7723) -> 'PVALID'; +lookup(7724) -> 'DISALLOWED'; +lookup(7725) -> 'PVALID'; +lookup(7726) -> 'DISALLOWED'; +lookup(7727) -> 'PVALID'; +lookup(7728) -> 'DISALLOWED'; +lookup(7729) -> 'PVALID'; +lookup(7730) -> 'DISALLOWED'; +lookup(7731) -> 'PVALID'; +lookup(7732) -> 'DISALLOWED'; +lookup(7733) -> 'PVALID'; +lookup(7734) -> 'DISALLOWED'; +lookup(7735) -> 'PVALID'; +lookup(7736) -> 'DISALLOWED'; +lookup(7737) -> 'PVALID'; +lookup(7738) -> 'DISALLOWED'; +lookup(7739) -> 'PVALID'; +lookup(7740) -> 'DISALLOWED'; +lookup(7741) -> 'PVALID'; +lookup(7742) -> 'DISALLOWED'; +lookup(7743) -> 'PVALID'; +lookup(7744) -> 'DISALLOWED'; +lookup(7745) -> 'PVALID'; +lookup(7746) -> 'DISALLOWED'; +lookup(7747) -> 'PVALID'; +lookup(7748) -> 'DISALLOWED'; +lookup(7749) -> 'PVALID'; +lookup(7750) -> 'DISALLOWED'; +lookup(7751) -> 'PVALID'; +lookup(7752) -> 'DISALLOWED'; +lookup(7753) -> 'PVALID'; +lookup(7754) -> 'DISALLOWED'; +lookup(7755) -> 'PVALID'; +lookup(7756) -> 'DISALLOWED'; +lookup(7757) -> 'PVALID'; +lookup(7758) -> 'DISALLOWED'; +lookup(7759) -> 'PVALID'; +lookup(7760) -> 'DISALLOWED'; +lookup(7761) -> 'PVALID'; +lookup(7762) -> 'DISALLOWED'; +lookup(7763) -> 'PVALID'; +lookup(7764) -> 'DISALLOWED'; +lookup(7765) -> 'PVALID'; +lookup(7766) -> 'DISALLOWED'; +lookup(7767) -> 'PVALID'; +lookup(7768) -> 'DISALLOWED'; +lookup(7769) -> 'PVALID'; +lookup(7770) -> 'DISALLOWED'; +lookup(7771) -> 'PVALID'; +lookup(7772) -> 'DISALLOWED'; +lookup(7773) -> 'PVALID'; +lookup(7774) -> 'DISALLOWED'; +lookup(7775) -> 'PVALID'; +lookup(7776) -> 'DISALLOWED'; +lookup(7777) -> 'PVALID'; +lookup(7778) -> 'DISALLOWED'; +lookup(7779) -> 'PVALID'; +lookup(7780) -> 'DISALLOWED'; +lookup(7781) -> 'PVALID'; +lookup(7782) -> 'DISALLOWED'; +lookup(7783) -> 'PVALID'; +lookup(7784) -> 'DISALLOWED'; +lookup(7785) -> 'PVALID'; +lookup(7786) -> 'DISALLOWED'; +lookup(7787) -> 'PVALID'; +lookup(7788) -> 'DISALLOWED'; +lookup(7789) -> 'PVALID'; +lookup(7790) -> 'DISALLOWED'; +lookup(7791) -> 'PVALID'; +lookup(7792) -> 'DISALLOWED'; +lookup(7793) -> 'PVALID'; +lookup(7794) -> 'DISALLOWED'; +lookup(7795) -> 'PVALID'; +lookup(7796) -> 'DISALLOWED'; +lookup(7797) -> 'PVALID'; +lookup(7798) -> 'DISALLOWED'; +lookup(7799) -> 'PVALID'; +lookup(7800) -> 'DISALLOWED'; +lookup(7801) -> 'PVALID'; +lookup(7802) -> 'DISALLOWED'; +lookup(7803) -> 'PVALID'; +lookup(7804) -> 'DISALLOWED'; +lookup(7805) -> 'PVALID'; +lookup(7806) -> 'DISALLOWED'; +lookup(7807) -> 'PVALID'; +lookup(7808) -> 'DISALLOWED'; +lookup(7809) -> 'PVALID'; +lookup(7810) -> 'DISALLOWED'; +lookup(7811) -> 'PVALID'; +lookup(7812) -> 'DISALLOWED'; +lookup(7813) -> 'PVALID'; +lookup(7814) -> 'DISALLOWED'; +lookup(7815) -> 'PVALID'; +lookup(7816) -> 'DISALLOWED'; +lookup(7817) -> 'PVALID'; +lookup(7818) -> 'DISALLOWED'; +lookup(7819) -> 'PVALID'; +lookup(7820) -> 'DISALLOWED'; +lookup(7821) -> 'PVALID'; +lookup(7822) -> 'DISALLOWED'; +lookup(7823) -> 'PVALID'; +lookup(7824) -> 'DISALLOWED'; +lookup(7825) -> 'PVALID'; +lookup(7826) -> 'DISALLOWED'; +lookup(7827) -> 'PVALID'; +lookup(7828) -> 'DISALLOWED'; +lookup(7838) -> 'DISALLOWED'; +lookup(7839) -> 'PVALID'; +lookup(7840) -> 'DISALLOWED'; +lookup(7841) -> 'PVALID'; +lookup(7842) -> 'DISALLOWED'; +lookup(7843) -> 'PVALID'; +lookup(7844) -> 'DISALLOWED'; +lookup(7845) -> 'PVALID'; +lookup(7846) -> 'DISALLOWED'; +lookup(7847) -> 'PVALID'; +lookup(7848) -> 'DISALLOWED'; +lookup(7849) -> 'PVALID'; +lookup(7850) -> 'DISALLOWED'; +lookup(7851) -> 'PVALID'; +lookup(7852) -> 'DISALLOWED'; +lookup(7853) -> 'PVALID'; +lookup(7854) -> 'DISALLOWED'; +lookup(7855) -> 'PVALID'; +lookup(7856) -> 'DISALLOWED'; +lookup(7857) -> 'PVALID'; +lookup(7858) -> 'DISALLOWED'; +lookup(7859) -> 'PVALID'; +lookup(7860) -> 'DISALLOWED'; +lookup(7861) -> 'PVALID'; +lookup(7862) -> 'DISALLOWED'; +lookup(7863) -> 'PVALID'; +lookup(7864) -> 'DISALLOWED'; +lookup(7865) -> 'PVALID'; +lookup(7866) -> 'DISALLOWED'; +lookup(7867) -> 'PVALID'; +lookup(7868) -> 'DISALLOWED'; +lookup(7869) -> 'PVALID'; +lookup(7870) -> 'DISALLOWED'; +lookup(7871) -> 'PVALID'; +lookup(7872) -> 'DISALLOWED'; +lookup(7873) -> 'PVALID'; +lookup(7874) -> 'DISALLOWED'; +lookup(7875) -> 'PVALID'; +lookup(7876) -> 'DISALLOWED'; +lookup(7877) -> 'PVALID'; +lookup(7878) -> 'DISALLOWED'; +lookup(7879) -> 'PVALID'; +lookup(7880) -> 'DISALLOWED'; +lookup(7881) -> 'PVALID'; +lookup(7882) -> 'DISALLOWED'; +lookup(7883) -> 'PVALID'; +lookup(7884) -> 'DISALLOWED'; +lookup(7885) -> 'PVALID'; +lookup(7886) -> 'DISALLOWED'; +lookup(7887) -> 'PVALID'; +lookup(7888) -> 'DISALLOWED'; +lookup(7889) -> 'PVALID'; +lookup(7890) -> 'DISALLOWED'; +lookup(7891) -> 'PVALID'; +lookup(7892) -> 'DISALLOWED'; +lookup(7893) -> 'PVALID'; +lookup(7894) -> 'DISALLOWED'; +lookup(7895) -> 'PVALID'; +lookup(7896) -> 'DISALLOWED'; +lookup(7897) -> 'PVALID'; +lookup(7898) -> 'DISALLOWED'; +lookup(7899) -> 'PVALID'; +lookup(7900) -> 'DISALLOWED'; +lookup(7901) -> 'PVALID'; +lookup(7902) -> 'DISALLOWED'; +lookup(7903) -> 'PVALID'; +lookup(7904) -> 'DISALLOWED'; +lookup(7905) -> 'PVALID'; +lookup(7906) -> 'DISALLOWED'; +lookup(7907) -> 'PVALID'; +lookup(7908) -> 'DISALLOWED'; +lookup(7909) -> 'PVALID'; +lookup(7910) -> 'DISALLOWED'; +lookup(7911) -> 'PVALID'; +lookup(7912) -> 'DISALLOWED'; +lookup(7913) -> 'PVALID'; +lookup(7914) -> 'DISALLOWED'; +lookup(7915) -> 'PVALID'; +lookup(7916) -> 'DISALLOWED'; +lookup(7917) -> 'PVALID'; +lookup(7918) -> 'DISALLOWED'; +lookup(7919) -> 'PVALID'; +lookup(7920) -> 'DISALLOWED'; +lookup(7921) -> 'PVALID'; +lookup(7922) -> 'DISALLOWED'; +lookup(7923) -> 'PVALID'; +lookup(7924) -> 'DISALLOWED'; +lookup(7925) -> 'PVALID'; +lookup(7926) -> 'DISALLOWED'; +lookup(7927) -> 'PVALID'; +lookup(7928) -> 'DISALLOWED'; +lookup(7929) -> 'PVALID'; +lookup(7930) -> 'DISALLOWED'; +lookup(7931) -> 'PVALID'; +lookup(7932) -> 'DISALLOWED'; +lookup(7933) -> 'PVALID'; +lookup(7934) -> 'DISALLOWED'; +lookup(8024) -> 'UNASSIGNED'; +lookup(8025) -> 'DISALLOWED'; +lookup(8026) -> 'UNASSIGNED'; +lookup(8027) -> 'DISALLOWED'; +lookup(8028) -> 'UNASSIGNED'; +lookup(8029) -> 'DISALLOWED'; +lookup(8030) -> 'UNASSIGNED'; +lookup(8031) -> 'DISALLOWED'; +lookup(8048) -> 'PVALID'; +lookup(8049) -> 'DISALLOWED'; +lookup(8050) -> 'PVALID'; +lookup(8051) -> 'DISALLOWED'; +lookup(8052) -> 'PVALID'; +lookup(8053) -> 'DISALLOWED'; +lookup(8054) -> 'PVALID'; +lookup(8055) -> 'DISALLOWED'; +lookup(8056) -> 'PVALID'; +lookup(8057) -> 'DISALLOWED'; +lookup(8058) -> 'PVALID'; +lookup(8059) -> 'DISALLOWED'; +lookup(8060) -> 'PVALID'; +lookup(8061) -> 'DISALLOWED'; +lookup(8117) -> 'UNASSIGNED'; +lookup(8118) -> 'PVALID'; +lookup(8133) -> 'UNASSIGNED'; +lookup(8134) -> 'PVALID'; +lookup(8147) -> 'DISALLOWED'; +lookup(8156) -> 'UNASSIGNED'; +lookup(8163) -> 'DISALLOWED'; +lookup(8181) -> 'UNASSIGNED'; +lookup(8182) -> 'PVALID'; +lookup(8191) -> 'UNASSIGNED'; +lookup(8293) -> 'UNASSIGNED'; +lookup(8335) -> 'UNASSIGNED'; +lookup(8526) -> 'PVALID'; +lookup(8580) -> 'PVALID'; +lookup(11158) -> 'UNASSIGNED'; +lookup(11311) -> 'UNASSIGNED'; +lookup(11359) -> 'UNASSIGNED'; +lookup(11360) -> 'DISALLOWED'; +lookup(11361) -> 'PVALID'; +lookup(11367) -> 'DISALLOWED'; +lookup(11368) -> 'PVALID'; +lookup(11369) -> 'DISALLOWED'; +lookup(11370) -> 'PVALID'; +lookup(11371) -> 'DISALLOWED'; +lookup(11372) -> 'PVALID'; +lookup(11377) -> 'PVALID'; +lookup(11378) -> 'DISALLOWED'; +lookup(11381) -> 'DISALLOWED'; +lookup(11393) -> 'PVALID'; +lookup(11394) -> 'DISALLOWED'; +lookup(11395) -> 'PVALID'; +lookup(11396) -> 'DISALLOWED'; +lookup(11397) -> 'PVALID'; +lookup(11398) -> 'DISALLOWED'; +lookup(11399) -> 'PVALID'; +lookup(11400) -> 'DISALLOWED'; +lookup(11401) -> 'PVALID'; +lookup(11402) -> 'DISALLOWED'; +lookup(11403) -> 'PVALID'; +lookup(11404) -> 'DISALLOWED'; +lookup(11405) -> 'PVALID'; +lookup(11406) -> 'DISALLOWED'; +lookup(11407) -> 'PVALID'; +lookup(11408) -> 'DISALLOWED'; +lookup(11409) -> 'PVALID'; +lookup(11410) -> 'DISALLOWED'; +lookup(11411) -> 'PVALID'; +lookup(11412) -> 'DISALLOWED'; +lookup(11413) -> 'PVALID'; +lookup(11414) -> 'DISALLOWED'; +lookup(11415) -> 'PVALID'; +lookup(11416) -> 'DISALLOWED'; +lookup(11417) -> 'PVALID'; +lookup(11418) -> 'DISALLOWED'; +lookup(11419) -> 'PVALID'; +lookup(11420) -> 'DISALLOWED'; +lookup(11421) -> 'PVALID'; +lookup(11422) -> 'DISALLOWED'; +lookup(11423) -> 'PVALID'; +lookup(11424) -> 'DISALLOWED'; +lookup(11425) -> 'PVALID'; +lookup(11426) -> 'DISALLOWED'; +lookup(11427) -> 'PVALID'; +lookup(11428) -> 'DISALLOWED'; +lookup(11429) -> 'PVALID'; +lookup(11430) -> 'DISALLOWED'; +lookup(11431) -> 'PVALID'; +lookup(11432) -> 'DISALLOWED'; +lookup(11433) -> 'PVALID'; +lookup(11434) -> 'DISALLOWED'; +lookup(11435) -> 'PVALID'; +lookup(11436) -> 'DISALLOWED'; +lookup(11437) -> 'PVALID'; +lookup(11438) -> 'DISALLOWED'; +lookup(11439) -> 'PVALID'; +lookup(11440) -> 'DISALLOWED'; +lookup(11441) -> 'PVALID'; +lookup(11442) -> 'DISALLOWED'; +lookup(11443) -> 'PVALID'; +lookup(11444) -> 'DISALLOWED'; +lookup(11445) -> 'PVALID'; +lookup(11446) -> 'DISALLOWED'; +lookup(11447) -> 'PVALID'; +lookup(11448) -> 'DISALLOWED'; +lookup(11449) -> 'PVALID'; +lookup(11450) -> 'DISALLOWED'; +lookup(11451) -> 'PVALID'; +lookup(11452) -> 'DISALLOWED'; +lookup(11453) -> 'PVALID'; +lookup(11454) -> 'DISALLOWED'; +lookup(11455) -> 'PVALID'; +lookup(11456) -> 'DISALLOWED'; +lookup(11457) -> 'PVALID'; +lookup(11458) -> 'DISALLOWED'; +lookup(11459) -> 'PVALID'; +lookup(11460) -> 'DISALLOWED'; +lookup(11461) -> 'PVALID'; +lookup(11462) -> 'DISALLOWED'; +lookup(11463) -> 'PVALID'; +lookup(11464) -> 'DISALLOWED'; +lookup(11465) -> 'PVALID'; +lookup(11466) -> 'DISALLOWED'; +lookup(11467) -> 'PVALID'; +lookup(11468) -> 'DISALLOWED'; +lookup(11469) -> 'PVALID'; +lookup(11470) -> 'DISALLOWED'; +lookup(11471) -> 'PVALID'; +lookup(11472) -> 'DISALLOWED'; +lookup(11473) -> 'PVALID'; +lookup(11474) -> 'DISALLOWED'; +lookup(11475) -> 'PVALID'; +lookup(11476) -> 'DISALLOWED'; +lookup(11477) -> 'PVALID'; +lookup(11478) -> 'DISALLOWED'; +lookup(11479) -> 'PVALID'; +lookup(11480) -> 'DISALLOWED'; +lookup(11481) -> 'PVALID'; +lookup(11482) -> 'DISALLOWED'; +lookup(11483) -> 'PVALID'; +lookup(11484) -> 'DISALLOWED'; +lookup(11485) -> 'PVALID'; +lookup(11486) -> 'DISALLOWED'; +lookup(11487) -> 'PVALID'; +lookup(11488) -> 'DISALLOWED'; +lookup(11489) -> 'PVALID'; +lookup(11490) -> 'DISALLOWED'; +lookup(11500) -> 'PVALID'; +lookup(11501) -> 'DISALLOWED'; +lookup(11506) -> 'DISALLOWED'; +lookup(11507) -> 'PVALID'; +lookup(11558) -> 'UNASSIGNED'; +lookup(11559) -> 'PVALID'; +lookup(11565) -> 'PVALID'; +lookup(11687) -> 'UNASSIGNED'; +lookup(11695) -> 'UNASSIGNED'; +lookup(11703) -> 'UNASSIGNED'; +lookup(11711) -> 'UNASSIGNED'; +lookup(11719) -> 'UNASSIGNED'; +lookup(11727) -> 'UNASSIGNED'; +lookup(11735) -> 'UNASSIGNED'; +lookup(11743) -> 'UNASSIGNED'; +lookup(11823) -> 'PVALID'; +lookup(11930) -> 'UNASSIGNED'; +lookup(12348) -> 'PVALID'; +lookup(12352) -> 'UNASSIGNED'; +lookup(12539) -> 'CONTEXTO'; +lookup(12543) -> 'DISALLOWED'; +lookup(12592) -> 'UNASSIGNED'; +lookup(12687) -> 'UNASSIGNED'; +lookup(12831) -> 'UNASSIGNED'; +lookup(42560) -> 'DISALLOWED'; +lookup(42561) -> 'PVALID'; +lookup(42562) -> 'DISALLOWED'; +lookup(42563) -> 'PVALID'; +lookup(42564) -> 'DISALLOWED'; +lookup(42565) -> 'PVALID'; +lookup(42566) -> 'DISALLOWED'; +lookup(42567) -> 'PVALID'; +lookup(42568) -> 'DISALLOWED'; +lookup(42569) -> 'PVALID'; +lookup(42570) -> 'DISALLOWED'; +lookup(42571) -> 'PVALID'; +lookup(42572) -> 'DISALLOWED'; +lookup(42573) -> 'PVALID'; +lookup(42574) -> 'DISALLOWED'; +lookup(42575) -> 'PVALID'; +lookup(42576) -> 'DISALLOWED'; +lookup(42577) -> 'PVALID'; +lookup(42578) -> 'DISALLOWED'; +lookup(42579) -> 'PVALID'; +lookup(42580) -> 'DISALLOWED'; +lookup(42581) -> 'PVALID'; +lookup(42582) -> 'DISALLOWED'; +lookup(42583) -> 'PVALID'; +lookup(42584) -> 'DISALLOWED'; +lookup(42585) -> 'PVALID'; +lookup(42586) -> 'DISALLOWED'; +lookup(42587) -> 'PVALID'; +lookup(42588) -> 'DISALLOWED'; +lookup(42589) -> 'PVALID'; +lookup(42590) -> 'DISALLOWED'; +lookup(42591) -> 'PVALID'; +lookup(42592) -> 'DISALLOWED'; +lookup(42593) -> 'PVALID'; +lookup(42594) -> 'DISALLOWED'; +lookup(42595) -> 'PVALID'; +lookup(42596) -> 'DISALLOWED'; +lookup(42597) -> 'PVALID'; +lookup(42598) -> 'DISALLOWED'; +lookup(42599) -> 'PVALID'; +lookup(42600) -> 'DISALLOWED'; +lookup(42601) -> 'PVALID'; +lookup(42602) -> 'DISALLOWED'; +lookup(42603) -> 'PVALID'; +lookup(42604) -> 'DISALLOWED'; +lookup(42622) -> 'DISALLOWED'; +lookup(42623) -> 'PVALID'; +lookup(42624) -> 'DISALLOWED'; +lookup(42625) -> 'PVALID'; +lookup(42626) -> 'DISALLOWED'; +lookup(42627) -> 'PVALID'; +lookup(42628) -> 'DISALLOWED'; +lookup(42629) -> 'PVALID'; +lookup(42630) -> 'DISALLOWED'; +lookup(42631) -> 'PVALID'; +lookup(42632) -> 'DISALLOWED'; +lookup(42633) -> 'PVALID'; +lookup(42634) -> 'DISALLOWED'; +lookup(42635) -> 'PVALID'; +lookup(42636) -> 'DISALLOWED'; +lookup(42637) -> 'PVALID'; +lookup(42638) -> 'DISALLOWED'; +lookup(42639) -> 'PVALID'; +lookup(42640) -> 'DISALLOWED'; +lookup(42641) -> 'PVALID'; +lookup(42642) -> 'DISALLOWED'; +lookup(42643) -> 'PVALID'; +lookup(42644) -> 'DISALLOWED'; +lookup(42645) -> 'PVALID'; +lookup(42646) -> 'DISALLOWED'; +lookup(42647) -> 'PVALID'; +lookup(42648) -> 'DISALLOWED'; +lookup(42649) -> 'PVALID'; +lookup(42650) -> 'DISALLOWED'; +lookup(42651) -> 'PVALID'; +lookup(42787) -> 'PVALID'; +lookup(42788) -> 'DISALLOWED'; +lookup(42789) -> 'PVALID'; +lookup(42790) -> 'DISALLOWED'; +lookup(42791) -> 'PVALID'; +lookup(42792) -> 'DISALLOWED'; +lookup(42793) -> 'PVALID'; +lookup(42794) -> 'DISALLOWED'; +lookup(42795) -> 'PVALID'; +lookup(42796) -> 'DISALLOWED'; +lookup(42797) -> 'PVALID'; +lookup(42798) -> 'DISALLOWED'; +lookup(42802) -> 'DISALLOWED'; +lookup(42803) -> 'PVALID'; +lookup(42804) -> 'DISALLOWED'; +lookup(42805) -> 'PVALID'; +lookup(42806) -> 'DISALLOWED'; +lookup(42807) -> 'PVALID'; +lookup(42808) -> 'DISALLOWED'; +lookup(42809) -> 'PVALID'; +lookup(42810) -> 'DISALLOWED'; +lookup(42811) -> 'PVALID'; +lookup(42812) -> 'DISALLOWED'; +lookup(42813) -> 'PVALID'; +lookup(42814) -> 'DISALLOWED'; +lookup(42815) -> 'PVALID'; +lookup(42816) -> 'DISALLOWED'; +lookup(42817) -> 'PVALID'; +lookup(42818) -> 'DISALLOWED'; +lookup(42819) -> 'PVALID'; +lookup(42820) -> 'DISALLOWED'; +lookup(42821) -> 'PVALID'; +lookup(42822) -> 'DISALLOWED'; +lookup(42823) -> 'PVALID'; +lookup(42824) -> 'DISALLOWED'; +lookup(42825) -> 'PVALID'; +lookup(42826) -> 'DISALLOWED'; +lookup(42827) -> 'PVALID'; +lookup(42828) -> 'DISALLOWED'; +lookup(42829) -> 'PVALID'; +lookup(42830) -> 'DISALLOWED'; +lookup(42831) -> 'PVALID'; +lookup(42832) -> 'DISALLOWED'; +lookup(42833) -> 'PVALID'; +lookup(42834) -> 'DISALLOWED'; +lookup(42835) -> 'PVALID'; +lookup(42836) -> 'DISALLOWED'; +lookup(42837) -> 'PVALID'; +lookup(42838) -> 'DISALLOWED'; +lookup(42839) -> 'PVALID'; +lookup(42840) -> 'DISALLOWED'; +lookup(42841) -> 'PVALID'; +lookup(42842) -> 'DISALLOWED'; +lookup(42843) -> 'PVALID'; +lookup(42844) -> 'DISALLOWED'; +lookup(42845) -> 'PVALID'; +lookup(42846) -> 'DISALLOWED'; +lookup(42847) -> 'PVALID'; +lookup(42848) -> 'DISALLOWED'; +lookup(42849) -> 'PVALID'; +lookup(42850) -> 'DISALLOWED'; +lookup(42851) -> 'PVALID'; +lookup(42852) -> 'DISALLOWED'; +lookup(42853) -> 'PVALID'; +lookup(42854) -> 'DISALLOWED'; +lookup(42855) -> 'PVALID'; +lookup(42856) -> 'DISALLOWED'; +lookup(42857) -> 'PVALID'; +lookup(42858) -> 'DISALLOWED'; +lookup(42859) -> 'PVALID'; +lookup(42860) -> 'DISALLOWED'; +lookup(42861) -> 'PVALID'; +lookup(42862) -> 'DISALLOWED'; +lookup(42863) -> 'PVALID'; +lookup(42864) -> 'DISALLOWED'; +lookup(42873) -> 'DISALLOWED'; +lookup(42874) -> 'PVALID'; +lookup(42875) -> 'DISALLOWED'; +lookup(42876) -> 'PVALID'; +lookup(42879) -> 'PVALID'; +lookup(42880) -> 'DISALLOWED'; +lookup(42881) -> 'PVALID'; +lookup(42882) -> 'DISALLOWED'; +lookup(42883) -> 'PVALID'; +lookup(42884) -> 'DISALLOWED'; +lookup(42885) -> 'PVALID'; +lookup(42886) -> 'DISALLOWED'; +lookup(42892) -> 'PVALID'; +lookup(42893) -> 'DISALLOWED'; +lookup(42896) -> 'DISALLOWED'; +lookup(42897) -> 'PVALID'; +lookup(42898) -> 'DISALLOWED'; +lookup(42902) -> 'DISALLOWED'; +lookup(42903) -> 'PVALID'; +lookup(42904) -> 'DISALLOWED'; +lookup(42905) -> 'PVALID'; +lookup(42906) -> 'DISALLOWED'; +lookup(42907) -> 'PVALID'; +lookup(42908) -> 'DISALLOWED'; +lookup(42909) -> 'PVALID'; +lookup(42910) -> 'DISALLOWED'; +lookup(42911) -> 'PVALID'; +lookup(42912) -> 'DISALLOWED'; +lookup(42913) -> 'PVALID'; +lookup(42914) -> 'DISALLOWED'; +lookup(42915) -> 'PVALID'; +lookup(42916) -> 'DISALLOWED'; +lookup(42917) -> 'PVALID'; +lookup(42918) -> 'DISALLOWED'; +lookup(42919) -> 'PVALID'; +lookup(42920) -> 'DISALLOWED'; +lookup(42921) -> 'PVALID'; +lookup(42927) -> 'PVALID'; +lookup(42933) -> 'PVALID'; +lookup(42934) -> 'DISALLOWED'; +lookup(42935) -> 'PVALID'; +lookup(42936) -> 'DISALLOWED'; +lookup(42937) -> 'PVALID'; +lookup(42938) -> 'DISALLOWED'; +lookup(42939) -> 'PVALID'; +lookup(42940) -> 'DISALLOWED'; +lookup(42941) -> 'PVALID'; +lookup(42942) -> 'DISALLOWED'; +lookup(42943) -> 'PVALID'; +lookup(42946) -> 'DISALLOWED'; +lookup(42947) -> 'PVALID'; +lookup(42952) -> 'PVALID'; +lookup(42953) -> 'DISALLOWED'; +lookup(42954) -> 'PVALID'; +lookup(42997) -> 'DISALLOWED'; +lookup(43052) -> 'PVALID'; +lookup(43259) -> 'PVALID'; +lookup(43260) -> 'DISALLOWED'; +lookup(43470) -> 'UNASSIGNED'; +lookup(43519) -> 'UNASSIGNED'; +lookup(43815) -> 'UNASSIGNED'; +lookup(43823) -> 'UNASSIGNED'; +lookup(44011) -> 'DISALLOWED'; +lookup(64016) -> 'DISALLOWED'; +lookup(64017) -> 'PVALID'; +lookup(64018) -> 'DISALLOWED'; +lookup(64031) -> 'PVALID'; +lookup(64032) -> 'DISALLOWED'; +lookup(64033) -> 'PVALID'; +lookup(64034) -> 'DISALLOWED'; +lookup(64285) -> 'DISALLOWED'; +lookup(64286) -> 'PVALID'; +lookup(64311) -> 'UNASSIGNED'; +lookup(64317) -> 'UNASSIGNED'; +lookup(64318) -> 'DISALLOWED'; +lookup(64319) -> 'UNASSIGNED'; +lookup(64322) -> 'UNASSIGNED'; +lookup(64325) -> 'UNASSIGNED'; +lookup(65107) -> 'UNASSIGNED'; +lookup(65127) -> 'UNASSIGNED'; +lookup(65139) -> 'PVALID'; +lookup(65140) -> 'DISALLOWED'; +lookup(65141) -> 'UNASSIGNED'; +lookup(65279) -> 'DISALLOWED'; +lookup(65280) -> 'UNASSIGNED'; +lookup(65511) -> 'UNASSIGNED'; +lookup(65548) -> 'UNASSIGNED'; +lookup(65575) -> 'UNASSIGNED'; +lookup(65595) -> 'UNASSIGNED'; +lookup(65598) -> 'UNASSIGNED'; +lookup(65935) -> 'UNASSIGNED'; +lookup(65952) -> 'DISALLOWED'; +lookup(66045) -> 'PVALID'; +lookup(66272) -> 'PVALID'; +lookup(66369) -> 'DISALLOWED'; +lookup(66378) -> 'DISALLOWED'; +lookup(66462) -> 'UNASSIGNED'; +lookup(66463) -> 'DISALLOWED'; +lookup(66927) -> 'DISALLOWED'; +lookup(67592) -> 'PVALID'; +lookup(67593) -> 'UNASSIGNED'; +lookup(67638) -> 'UNASSIGNED'; +lookup(67644) -> 'PVALID'; +lookup(67670) -> 'UNASSIGNED'; +lookup(67827) -> 'UNASSIGNED'; +lookup(67871) -> 'DISALLOWED'; +lookup(67903) -> 'DISALLOWED'; +lookup(68100) -> 'UNASSIGNED'; +lookup(68116) -> 'UNASSIGNED'; +lookup(68120) -> 'UNASSIGNED'; +lookup(68159) -> 'PVALID'; +lookup(68296) -> 'DISALLOWED'; +lookup(69247) -> 'UNASSIGNED'; +lookup(69290) -> 'UNASSIGNED'; +lookup(69293) -> 'DISALLOWED'; +lookup(69415) -> 'PVALID'; +lookup(69837) -> 'DISALLOWED'; +lookup(69941) -> 'UNASSIGNED'; +lookup(70006) -> 'PVALID'; +lookup(70093) -> 'DISALLOWED'; +lookup(70107) -> 'DISALLOWED'; +lookup(70108) -> 'PVALID'; +lookup(70112) -> 'UNASSIGNED'; +lookup(70162) -> 'UNASSIGNED'; +lookup(70206) -> 'PVALID'; +lookup(70279) -> 'UNASSIGNED'; +lookup(70280) -> 'PVALID'; +lookup(70281) -> 'UNASSIGNED'; +lookup(70286) -> 'UNASSIGNED'; +lookup(70302) -> 'UNASSIGNED'; +lookup(70313) -> 'DISALLOWED'; +lookup(70404) -> 'UNASSIGNED'; +lookup(70441) -> 'UNASSIGNED'; +lookup(70449) -> 'UNASSIGNED'; +lookup(70452) -> 'UNASSIGNED'; +lookup(70458) -> 'UNASSIGNED'; +lookup(70480) -> 'PVALID'; +lookup(70487) -> 'PVALID'; +lookup(70748) -> 'UNASSIGNED'; +lookup(70749) -> 'DISALLOWED'; +lookup(70854) -> 'DISALLOWED'; +lookup(70855) -> 'PVALID'; +lookup(71236) -> 'PVALID'; +lookup(71739) -> 'DISALLOWED'; +lookup(71945) -> 'PVALID'; +lookup(71956) -> 'UNASSIGNED'; +lookup(71959) -> 'UNASSIGNED'; +lookup(71990) -> 'UNASSIGNED'; +lookup(72162) -> 'DISALLOWED'; +lookup(72263) -> 'PVALID'; +lookup(72349) -> 'PVALID'; +lookup(72713) -> 'UNASSIGNED'; +lookup(72759) -> 'UNASSIGNED'; +lookup(72872) -> 'UNASSIGNED'; +lookup(72967) -> 'UNASSIGNED'; +lookup(72970) -> 'UNASSIGNED'; +lookup(73018) -> 'PVALID'; +lookup(73019) -> 'UNASSIGNED'; +lookup(73022) -> 'UNASSIGNED'; +lookup(73062) -> 'UNASSIGNED'; +lookup(73065) -> 'UNASSIGNED'; +lookup(73103) -> 'UNASSIGNED'; +lookup(73106) -> 'UNASSIGNED'; +lookup(73648) -> 'PVALID'; +lookup(73727) -> 'DISALLOWED'; +lookup(74863) -> 'UNASSIGNED'; +lookup(78895) -> 'UNASSIGNED'; +lookup(92767) -> 'UNASSIGNED'; +lookup(92917) -> 'DISALLOWED'; +lookup(93018) -> 'UNASSIGNED'; +lookup(93026) -> 'UNASSIGNED'; +lookup(94178) -> 'DISALLOWED'; +lookup(113820) -> 'DISALLOWED'; +lookup(119893) -> 'UNASSIGNED'; +lookup(119965) -> 'UNASSIGNED'; +lookup(119970) -> 'DISALLOWED'; +lookup(119981) -> 'UNASSIGNED'; +lookup(119994) -> 'UNASSIGNED'; +lookup(119995) -> 'DISALLOWED'; +lookup(119996) -> 'UNASSIGNED'; +lookup(120004) -> 'UNASSIGNED'; +lookup(120070) -> 'UNASSIGNED'; +lookup(120085) -> 'UNASSIGNED'; +lookup(120093) -> 'UNASSIGNED'; +lookup(120122) -> 'UNASSIGNED'; +lookup(120127) -> 'UNASSIGNED'; +lookup(120133) -> 'UNASSIGNED'; +lookup(120134) -> 'DISALLOWED'; +lookup(120145) -> 'UNASSIGNED'; +lookup(121461) -> 'PVALID'; +lookup(121476) -> 'PVALID'; +lookup(121504) -> 'UNASSIGNED'; +lookup(122887) -> 'UNASSIGNED'; +lookup(122914) -> 'UNASSIGNED'; +lookup(122917) -> 'UNASSIGNED'; +lookup(123214) -> 'PVALID'; +lookup(123215) -> 'DISALLOWED'; +lookup(123647) -> 'DISALLOWED'; +lookup(126468) -> 'UNASSIGNED'; +lookup(126496) -> 'UNASSIGNED'; +lookup(126499) -> 'UNASSIGNED'; +lookup(126500) -> 'DISALLOWED'; +lookup(126503) -> 'DISALLOWED'; +lookup(126504) -> 'UNASSIGNED'; +lookup(126515) -> 'UNASSIGNED'; +lookup(126520) -> 'UNASSIGNED'; +lookup(126521) -> 'DISALLOWED'; +lookup(126522) -> 'UNASSIGNED'; +lookup(126523) -> 'DISALLOWED'; +lookup(126530) -> 'DISALLOWED'; +lookup(126535) -> 'DISALLOWED'; +lookup(126536) -> 'UNASSIGNED'; +lookup(126537) -> 'DISALLOWED'; +lookup(126538) -> 'UNASSIGNED'; +lookup(126539) -> 'DISALLOWED'; +lookup(126540) -> 'UNASSIGNED'; +lookup(126544) -> 'UNASSIGNED'; +lookup(126547) -> 'UNASSIGNED'; +lookup(126548) -> 'DISALLOWED'; +lookup(126551) -> 'DISALLOWED'; +lookup(126552) -> 'UNASSIGNED'; +lookup(126553) -> 'DISALLOWED'; +lookup(126554) -> 'UNASSIGNED'; +lookup(126555) -> 'DISALLOWED'; +lookup(126556) -> 'UNASSIGNED'; +lookup(126557) -> 'DISALLOWED'; +lookup(126558) -> 'UNASSIGNED'; +lookup(126559) -> 'DISALLOWED'; +lookup(126560) -> 'UNASSIGNED'; +lookup(126563) -> 'UNASSIGNED'; +lookup(126564) -> 'DISALLOWED'; +lookup(126571) -> 'UNASSIGNED'; +lookup(126579) -> 'UNASSIGNED'; +lookup(126584) -> 'UNASSIGNED'; +lookup(126589) -> 'UNASSIGNED'; +lookup(126590) -> 'DISALLOWED'; +lookup(126591) -> 'UNASSIGNED'; +lookup(126602) -> 'UNASSIGNED'; +lookup(126628) -> 'UNASSIGNED'; +lookup(126634) -> 'UNASSIGNED'; +lookup(127168) -> 'UNASSIGNED'; +lookup(127184) -> 'UNASSIGNED'; +lookup(129401) -> 'UNASSIGNED'; +lookup(129484) -> 'UNASSIGNED'; +lookup(129939) -> 'UNASSIGNED'; +lookup(917504) -> 'UNASSIGNED'; +lookup(917505) -> 'DISALLOWED'; +lookup(CP) when 0 =< CP, CP =< 44 -> 'DISALLOWED'; +lookup(CP) when 46 =< CP, CP =< 47 -> 'DISALLOWED'; +lookup(CP) when 48 =< CP, CP =< 57 -> 'PVALID'; +lookup(CP) when 58 =< CP, CP =< 96 -> 'DISALLOWED'; +lookup(CP) when 97 =< CP, CP =< 122 -> 'PVALID'; +lookup(CP) when 123 =< CP, CP =< 182 -> 'DISALLOWED'; +lookup(CP) when 184 =< CP, CP =< 222 -> 'DISALLOWED'; +lookup(CP) when 223 =< CP, CP =< 246 -> 'PVALID'; +lookup(CP) when 248 =< CP, CP =< 255 -> 'PVALID'; +lookup(CP) when 306 =< CP, CP =< 308 -> 'DISALLOWED'; +lookup(CP) when 311 =< CP, CP =< 312 -> 'PVALID'; +lookup(CP) when 319 =< CP, CP =< 321 -> 'DISALLOWED'; +lookup(CP) when 329 =< CP, CP =< 330 -> 'DISALLOWED'; +lookup(CP) when 376 =< CP, CP =< 377 -> 'DISALLOWED'; +lookup(CP) when 385 =< CP, CP =< 386 -> 'DISALLOWED'; +lookup(CP) when 390 =< CP, CP =< 391 -> 'DISALLOWED'; +lookup(CP) when 393 =< CP, CP =< 395 -> 'DISALLOWED'; +lookup(CP) when 396 =< CP, CP =< 397 -> 'PVALID'; +lookup(CP) when 398 =< CP, CP =< 401 -> 'DISALLOWED'; +lookup(CP) when 403 =< CP, CP =< 404 -> 'DISALLOWED'; +lookup(CP) when 406 =< CP, CP =< 408 -> 'DISALLOWED'; +lookup(CP) when 409 =< CP, CP =< 411 -> 'PVALID'; +lookup(CP) when 412 =< CP, CP =< 413 -> 'DISALLOWED'; +lookup(CP) when 415 =< CP, CP =< 416 -> 'DISALLOWED'; +lookup(CP) when 422 =< CP, CP =< 423 -> 'DISALLOWED'; +lookup(CP) when 426 =< CP, CP =< 427 -> 'PVALID'; +lookup(CP) when 430 =< CP, CP =< 431 -> 'DISALLOWED'; +lookup(CP) when 433 =< CP, CP =< 435 -> 'DISALLOWED'; +lookup(CP) when 439 =< CP, CP =< 440 -> 'DISALLOWED'; +lookup(CP) when 441 =< CP, CP =< 443 -> 'PVALID'; +lookup(CP) when 445 =< CP, CP =< 451 -> 'PVALID'; +lookup(CP) when 452 =< CP, CP =< 461 -> 'DISALLOWED'; +lookup(CP) when 476 =< CP, CP =< 477 -> 'PVALID'; +lookup(CP) when 495 =< CP, CP =< 496 -> 'PVALID'; +lookup(CP) when 497 =< CP, CP =< 500 -> 'DISALLOWED'; +lookup(CP) when 502 =< CP, CP =< 504 -> 'DISALLOWED'; +lookup(CP) when 563 =< CP, CP =< 569 -> 'PVALID'; +lookup(CP) when 570 =< CP, CP =< 571 -> 'DISALLOWED'; +lookup(CP) when 573 =< CP, CP =< 574 -> 'DISALLOWED'; +lookup(CP) when 575 =< CP, CP =< 576 -> 'PVALID'; +lookup(CP) when 579 =< CP, CP =< 582 -> 'DISALLOWED'; +lookup(CP) when 591 =< CP, CP =< 687 -> 'PVALID'; +lookup(CP) when 688 =< CP, CP =< 696 -> 'DISALLOWED'; +lookup(CP) when 697 =< CP, CP =< 705 -> 'PVALID'; +lookup(CP) when 706 =< CP, CP =< 709 -> 'DISALLOWED'; +lookup(CP) when 710 =< CP, CP =< 721 -> 'PVALID'; +lookup(CP) when 722 =< CP, CP =< 747 -> 'DISALLOWED'; +lookup(CP) when 751 =< CP, CP =< 767 -> 'DISALLOWED'; +lookup(CP) when 768 =< CP, CP =< 831 -> 'PVALID'; +lookup(CP) when 832 =< CP, CP =< 833 -> 'DISALLOWED'; +lookup(CP) when 835 =< CP, CP =< 837 -> 'DISALLOWED'; +lookup(CP) when 838 =< CP, CP =< 846 -> 'PVALID'; +lookup(CP) when 848 =< CP, CP =< 879 -> 'PVALID'; +lookup(CP) when 888 =< CP, CP =< 889 -> 'UNASSIGNED'; +lookup(CP) when 891 =< CP, CP =< 893 -> 'PVALID'; +lookup(CP) when 894 =< CP, CP =< 895 -> 'DISALLOWED'; +lookup(CP) when 896 =< CP, CP =< 899 -> 'UNASSIGNED'; +lookup(CP) when 900 =< CP, CP =< 906 -> 'DISALLOWED'; +lookup(CP) when 910 =< CP, CP =< 911 -> 'DISALLOWED'; +lookup(CP) when 913 =< CP, CP =< 929 -> 'DISALLOWED'; +lookup(CP) when 931 =< CP, CP =< 939 -> 'DISALLOWED'; +lookup(CP) when 940 =< CP, CP =< 974 -> 'PVALID'; +lookup(CP) when 975 =< CP, CP =< 982 -> 'DISALLOWED'; +lookup(CP) when 1008 =< CP, CP =< 1010 -> 'DISALLOWED'; +lookup(CP) when 1012 =< CP, CP =< 1015 -> 'DISALLOWED'; +lookup(CP) when 1017 =< CP, CP =< 1018 -> 'DISALLOWED'; +lookup(CP) when 1019 =< CP, CP =< 1020 -> 'PVALID'; +lookup(CP) when 1021 =< CP, CP =< 1071 -> 'DISALLOWED'; +lookup(CP) when 1072 =< CP, CP =< 1119 -> 'PVALID'; +lookup(CP) when 1155 =< CP, CP =< 1159 -> 'PVALID'; +lookup(CP) when 1160 =< CP, CP =< 1162 -> 'DISALLOWED'; +lookup(CP) when 1216 =< CP, CP =< 1217 -> 'DISALLOWED'; +lookup(CP) when 1230 =< CP, CP =< 1231 -> 'PVALID'; +lookup(CP) when 1329 =< CP, CP =< 1366 -> 'DISALLOWED'; +lookup(CP) when 1367 =< CP, CP =< 1368 -> 'UNASSIGNED'; +lookup(CP) when 1370 =< CP, CP =< 1375 -> 'DISALLOWED'; +lookup(CP) when 1376 =< CP, CP =< 1414 -> 'PVALID'; +lookup(CP) when 1417 =< CP, CP =< 1418 -> 'DISALLOWED'; +lookup(CP) when 1419 =< CP, CP =< 1420 -> 'UNASSIGNED'; +lookup(CP) when 1421 =< CP, CP =< 1423 -> 'DISALLOWED'; +lookup(CP) when 1425 =< CP, CP =< 1469 -> 'PVALID'; +lookup(CP) when 1473 =< CP, CP =< 1474 -> 'PVALID'; +lookup(CP) when 1476 =< CP, CP =< 1477 -> 'PVALID'; +lookup(CP) when 1480 =< CP, CP =< 1487 -> 'UNASSIGNED'; +lookup(CP) when 1488 =< CP, CP =< 1514 -> 'PVALID'; +lookup(CP) when 1515 =< CP, CP =< 1518 -> 'UNASSIGNED'; +lookup(CP) when 1519 =< CP, CP =< 1522 -> 'PVALID'; +lookup(CP) when 1523 =< CP, CP =< 1524 -> 'CONTEXTO'; +lookup(CP) when 1525 =< CP, CP =< 1535 -> 'UNASSIGNED'; +lookup(CP) when 1536 =< CP, CP =< 1551 -> 'DISALLOWED'; +lookup(CP) when 1552 =< CP, CP =< 1562 -> 'PVALID'; +lookup(CP) when 1563 =< CP, CP =< 1564 -> 'DISALLOWED'; +lookup(CP) when 1566 =< CP, CP =< 1567 -> 'DISALLOWED'; +lookup(CP) when 1568 =< CP, CP =< 1599 -> 'PVALID'; +lookup(CP) when 1601 =< CP, CP =< 1631 -> 'PVALID'; +lookup(CP) when 1632 =< CP, CP =< 1641 -> 'CONTEXTO'; +lookup(CP) when 1642 =< CP, CP =< 1645 -> 'DISALLOWED'; +lookup(CP) when 1646 =< CP, CP =< 1652 -> 'PVALID'; +lookup(CP) when 1653 =< CP, CP =< 1656 -> 'DISALLOWED'; +lookup(CP) when 1657 =< CP, CP =< 1747 -> 'PVALID'; +lookup(CP) when 1749 =< CP, CP =< 1756 -> 'PVALID'; +lookup(CP) when 1757 =< CP, CP =< 1758 -> 'DISALLOWED'; +lookup(CP) when 1759 =< CP, CP =< 1768 -> 'PVALID'; +lookup(CP) when 1770 =< CP, CP =< 1775 -> 'PVALID'; +lookup(CP) when 1776 =< CP, CP =< 1785 -> 'CONTEXTO'; +lookup(CP) when 1786 =< CP, CP =< 1791 -> 'PVALID'; +lookup(CP) when 1792 =< CP, CP =< 1805 -> 'DISALLOWED'; +lookup(CP) when 1808 =< CP, CP =< 1866 -> 'PVALID'; +lookup(CP) when 1867 =< CP, CP =< 1868 -> 'UNASSIGNED'; +lookup(CP) when 1869 =< CP, CP =< 1969 -> 'PVALID'; +lookup(CP) when 1970 =< CP, CP =< 1983 -> 'UNASSIGNED'; +lookup(CP) when 1984 =< CP, CP =< 2037 -> 'PVALID'; +lookup(CP) when 2038 =< CP, CP =< 2042 -> 'DISALLOWED'; +lookup(CP) when 2043 =< CP, CP =< 2044 -> 'UNASSIGNED'; +lookup(CP) when 2046 =< CP, CP =< 2047 -> 'DISALLOWED'; +lookup(CP) when 2048 =< CP, CP =< 2093 -> 'PVALID'; +lookup(CP) when 2094 =< CP, CP =< 2095 -> 'UNASSIGNED'; +lookup(CP) when 2096 =< CP, CP =< 2110 -> 'DISALLOWED'; +lookup(CP) when 2112 =< CP, CP =< 2139 -> 'PVALID'; +lookup(CP) when 2140 =< CP, CP =< 2141 -> 'UNASSIGNED'; +lookup(CP) when 2144 =< CP, CP =< 2154 -> 'PVALID'; +lookup(CP) when 2155 =< CP, CP =< 2207 -> 'UNASSIGNED'; +lookup(CP) when 2208 =< CP, CP =< 2228 -> 'PVALID'; +lookup(CP) when 2230 =< CP, CP =< 2247 -> 'PVALID'; +lookup(CP) when 2248 =< CP, CP =< 2258 -> 'UNASSIGNED'; +lookup(CP) when 2259 =< CP, CP =< 2273 -> 'PVALID'; +lookup(CP) when 2275 =< CP, CP =< 2391 -> 'PVALID'; +lookup(CP) when 2392 =< CP, CP =< 2399 -> 'DISALLOWED'; +lookup(CP) when 2400 =< CP, CP =< 2403 -> 'PVALID'; +lookup(CP) when 2404 =< CP, CP =< 2405 -> 'DISALLOWED'; +lookup(CP) when 2406 =< CP, CP =< 2415 -> 'PVALID'; +lookup(CP) when 2417 =< CP, CP =< 2435 -> 'PVALID'; +lookup(CP) when 2437 =< CP, CP =< 2444 -> 'PVALID'; +lookup(CP) when 2445 =< CP, CP =< 2446 -> 'UNASSIGNED'; +lookup(CP) when 2447 =< CP, CP =< 2448 -> 'PVALID'; +lookup(CP) when 2449 =< CP, CP =< 2450 -> 'UNASSIGNED'; +lookup(CP) when 2451 =< CP, CP =< 2472 -> 'PVALID'; +lookup(CP) when 2474 =< CP, CP =< 2480 -> 'PVALID'; +lookup(CP) when 2483 =< CP, CP =< 2485 -> 'UNASSIGNED'; +lookup(CP) when 2486 =< CP, CP =< 2489 -> 'PVALID'; +lookup(CP) when 2490 =< CP, CP =< 2491 -> 'UNASSIGNED'; +lookup(CP) when 2492 =< CP, CP =< 2500 -> 'PVALID'; +lookup(CP) when 2501 =< CP, CP =< 2502 -> 'UNASSIGNED'; +lookup(CP) when 2503 =< CP, CP =< 2504 -> 'PVALID'; +lookup(CP) when 2505 =< CP, CP =< 2506 -> 'UNASSIGNED'; +lookup(CP) when 2507 =< CP, CP =< 2510 -> 'PVALID'; +lookup(CP) when 2511 =< CP, CP =< 2518 -> 'UNASSIGNED'; +lookup(CP) when 2520 =< CP, CP =< 2523 -> 'UNASSIGNED'; +lookup(CP) when 2524 =< CP, CP =< 2525 -> 'DISALLOWED'; +lookup(CP) when 2528 =< CP, CP =< 2531 -> 'PVALID'; +lookup(CP) when 2532 =< CP, CP =< 2533 -> 'UNASSIGNED'; +lookup(CP) when 2534 =< CP, CP =< 2545 -> 'PVALID'; +lookup(CP) when 2546 =< CP, CP =< 2555 -> 'DISALLOWED'; +lookup(CP) when 2559 =< CP, CP =< 2560 -> 'UNASSIGNED'; +lookup(CP) when 2561 =< CP, CP =< 2563 -> 'PVALID'; +lookup(CP) when 2565 =< CP, CP =< 2570 -> 'PVALID'; +lookup(CP) when 2571 =< CP, CP =< 2574 -> 'UNASSIGNED'; +lookup(CP) when 2575 =< CP, CP =< 2576 -> 'PVALID'; +lookup(CP) when 2577 =< CP, CP =< 2578 -> 'UNASSIGNED'; +lookup(CP) when 2579 =< CP, CP =< 2600 -> 'PVALID'; +lookup(CP) when 2602 =< CP, CP =< 2608 -> 'PVALID'; +lookup(CP) when 2616 =< CP, CP =< 2617 -> 'PVALID'; +lookup(CP) when 2618 =< CP, CP =< 2619 -> 'UNASSIGNED'; +lookup(CP) when 2622 =< CP, CP =< 2626 -> 'PVALID'; +lookup(CP) when 2627 =< CP, CP =< 2630 -> 'UNASSIGNED'; +lookup(CP) when 2631 =< CP, CP =< 2632 -> 'PVALID'; +lookup(CP) when 2633 =< CP, CP =< 2634 -> 'UNASSIGNED'; +lookup(CP) when 2635 =< CP, CP =< 2637 -> 'PVALID'; +lookup(CP) when 2638 =< CP, CP =< 2640 -> 'UNASSIGNED'; +lookup(CP) when 2642 =< CP, CP =< 2648 -> 'UNASSIGNED'; +lookup(CP) when 2649 =< CP, CP =< 2651 -> 'DISALLOWED'; +lookup(CP) when 2655 =< CP, CP =< 2661 -> 'UNASSIGNED'; +lookup(CP) when 2662 =< CP, CP =< 2677 -> 'PVALID'; +lookup(CP) when 2679 =< CP, CP =< 2688 -> 'UNASSIGNED'; +lookup(CP) when 2689 =< CP, CP =< 2691 -> 'PVALID'; +lookup(CP) when 2693 =< CP, CP =< 2701 -> 'PVALID'; +lookup(CP) when 2703 =< CP, CP =< 2705 -> 'PVALID'; +lookup(CP) when 2707 =< CP, CP =< 2728 -> 'PVALID'; +lookup(CP) when 2730 =< CP, CP =< 2736 -> 'PVALID'; +lookup(CP) when 2738 =< CP, CP =< 2739 -> 'PVALID'; +lookup(CP) when 2741 =< CP, CP =< 2745 -> 'PVALID'; +lookup(CP) when 2746 =< CP, CP =< 2747 -> 'UNASSIGNED'; +lookup(CP) when 2748 =< CP, CP =< 2757 -> 'PVALID'; +lookup(CP) when 2759 =< CP, CP =< 2761 -> 'PVALID'; +lookup(CP) when 2763 =< CP, CP =< 2765 -> 'PVALID'; +lookup(CP) when 2766 =< CP, CP =< 2767 -> 'UNASSIGNED'; +lookup(CP) when 2769 =< CP, CP =< 2783 -> 'UNASSIGNED'; +lookup(CP) when 2784 =< CP, CP =< 2787 -> 'PVALID'; +lookup(CP) when 2788 =< CP, CP =< 2789 -> 'UNASSIGNED'; +lookup(CP) when 2790 =< CP, CP =< 2799 -> 'PVALID'; +lookup(CP) when 2800 =< CP, CP =< 2801 -> 'DISALLOWED'; +lookup(CP) when 2802 =< CP, CP =< 2808 -> 'UNASSIGNED'; +lookup(CP) when 2809 =< CP, CP =< 2815 -> 'PVALID'; +lookup(CP) when 2817 =< CP, CP =< 2819 -> 'PVALID'; +lookup(CP) when 2821 =< CP, CP =< 2828 -> 'PVALID'; +lookup(CP) when 2829 =< CP, CP =< 2830 -> 'UNASSIGNED'; +lookup(CP) when 2831 =< CP, CP =< 2832 -> 'PVALID'; +lookup(CP) when 2833 =< CP, CP =< 2834 -> 'UNASSIGNED'; +lookup(CP) when 2835 =< CP, CP =< 2856 -> 'PVALID'; +lookup(CP) when 2858 =< CP, CP =< 2864 -> 'PVALID'; +lookup(CP) when 2866 =< CP, CP =< 2867 -> 'PVALID'; +lookup(CP) when 2869 =< CP, CP =< 2873 -> 'PVALID'; +lookup(CP) when 2874 =< CP, CP =< 2875 -> 'UNASSIGNED'; +lookup(CP) when 2876 =< CP, CP =< 2884 -> 'PVALID'; +lookup(CP) when 2885 =< CP, CP =< 2886 -> 'UNASSIGNED'; +lookup(CP) when 2887 =< CP, CP =< 2888 -> 'PVALID'; +lookup(CP) when 2889 =< CP, CP =< 2890 -> 'UNASSIGNED'; +lookup(CP) when 2891 =< CP, CP =< 2893 -> 'PVALID'; +lookup(CP) when 2894 =< CP, CP =< 2900 -> 'UNASSIGNED'; +lookup(CP) when 2901 =< CP, CP =< 2903 -> 'PVALID'; +lookup(CP) when 2904 =< CP, CP =< 2907 -> 'UNASSIGNED'; +lookup(CP) when 2908 =< CP, CP =< 2909 -> 'DISALLOWED'; +lookup(CP) when 2911 =< CP, CP =< 2915 -> 'PVALID'; +lookup(CP) when 2916 =< CP, CP =< 2917 -> 'UNASSIGNED'; +lookup(CP) when 2918 =< CP, CP =< 2927 -> 'PVALID'; +lookup(CP) when 2930 =< CP, CP =< 2935 -> 'DISALLOWED'; +lookup(CP) when 2936 =< CP, CP =< 2945 -> 'UNASSIGNED'; +lookup(CP) when 2946 =< CP, CP =< 2947 -> 'PVALID'; +lookup(CP) when 2949 =< CP, CP =< 2954 -> 'PVALID'; +lookup(CP) when 2955 =< CP, CP =< 2957 -> 'UNASSIGNED'; +lookup(CP) when 2958 =< CP, CP =< 2960 -> 'PVALID'; +lookup(CP) when 2962 =< CP, CP =< 2965 -> 'PVALID'; +lookup(CP) when 2966 =< CP, CP =< 2968 -> 'UNASSIGNED'; +lookup(CP) when 2969 =< CP, CP =< 2970 -> 'PVALID'; +lookup(CP) when 2974 =< CP, CP =< 2975 -> 'PVALID'; +lookup(CP) when 2976 =< CP, CP =< 2978 -> 'UNASSIGNED'; +lookup(CP) when 2979 =< CP, CP =< 2980 -> 'PVALID'; +lookup(CP) when 2981 =< CP, CP =< 2983 -> 'UNASSIGNED'; +lookup(CP) when 2984 =< CP, CP =< 2986 -> 'PVALID'; +lookup(CP) when 2987 =< CP, CP =< 2989 -> 'UNASSIGNED'; +lookup(CP) when 2990 =< CP, CP =< 3001 -> 'PVALID'; +lookup(CP) when 3002 =< CP, CP =< 3005 -> 'UNASSIGNED'; +lookup(CP) when 3006 =< CP, CP =< 3010 -> 'PVALID'; +lookup(CP) when 3011 =< CP, CP =< 3013 -> 'UNASSIGNED'; +lookup(CP) when 3014 =< CP, CP =< 3016 -> 'PVALID'; +lookup(CP) when 3018 =< CP, CP =< 3021 -> 'PVALID'; +lookup(CP) when 3022 =< CP, CP =< 3023 -> 'UNASSIGNED'; +lookup(CP) when 3025 =< CP, CP =< 3030 -> 'UNASSIGNED'; +lookup(CP) when 3032 =< CP, CP =< 3045 -> 'UNASSIGNED'; +lookup(CP) when 3046 =< CP, CP =< 3055 -> 'PVALID'; +lookup(CP) when 3056 =< CP, CP =< 3066 -> 'DISALLOWED'; +lookup(CP) when 3067 =< CP, CP =< 3071 -> 'UNASSIGNED'; +lookup(CP) when 3072 =< CP, CP =< 3084 -> 'PVALID'; +lookup(CP) when 3086 =< CP, CP =< 3088 -> 'PVALID'; +lookup(CP) when 3090 =< CP, CP =< 3112 -> 'PVALID'; +lookup(CP) when 3114 =< CP, CP =< 3129 -> 'PVALID'; +lookup(CP) when 3130 =< CP, CP =< 3132 -> 'UNASSIGNED'; +lookup(CP) when 3133 =< CP, CP =< 3140 -> 'PVALID'; +lookup(CP) when 3142 =< CP, CP =< 3144 -> 'PVALID'; +lookup(CP) when 3146 =< CP, CP =< 3149 -> 'PVALID'; +lookup(CP) when 3150 =< CP, CP =< 3156 -> 'UNASSIGNED'; +lookup(CP) when 3157 =< CP, CP =< 3158 -> 'PVALID'; +lookup(CP) when 3160 =< CP, CP =< 3162 -> 'PVALID'; +lookup(CP) when 3163 =< CP, CP =< 3167 -> 'UNASSIGNED'; +lookup(CP) when 3168 =< CP, CP =< 3171 -> 'PVALID'; +lookup(CP) when 3172 =< CP, CP =< 3173 -> 'UNASSIGNED'; +lookup(CP) when 3174 =< CP, CP =< 3183 -> 'PVALID'; +lookup(CP) when 3184 =< CP, CP =< 3190 -> 'UNASSIGNED'; +lookup(CP) when 3191 =< CP, CP =< 3199 -> 'DISALLOWED'; +lookup(CP) when 3200 =< CP, CP =< 3203 -> 'PVALID'; +lookup(CP) when 3205 =< CP, CP =< 3212 -> 'PVALID'; +lookup(CP) when 3214 =< CP, CP =< 3216 -> 'PVALID'; +lookup(CP) when 3218 =< CP, CP =< 3240 -> 'PVALID'; +lookup(CP) when 3242 =< CP, CP =< 3251 -> 'PVALID'; +lookup(CP) when 3253 =< CP, CP =< 3257 -> 'PVALID'; +lookup(CP) when 3258 =< CP, CP =< 3259 -> 'UNASSIGNED'; +lookup(CP) when 3260 =< CP, CP =< 3268 -> 'PVALID'; +lookup(CP) when 3270 =< CP, CP =< 3272 -> 'PVALID'; +lookup(CP) when 3274 =< CP, CP =< 3277 -> 'PVALID'; +lookup(CP) when 3278 =< CP, CP =< 3284 -> 'UNASSIGNED'; +lookup(CP) when 3285 =< CP, CP =< 3286 -> 'PVALID'; +lookup(CP) when 3287 =< CP, CP =< 3293 -> 'UNASSIGNED'; +lookup(CP) when 3296 =< CP, CP =< 3299 -> 'PVALID'; +lookup(CP) when 3300 =< CP, CP =< 3301 -> 'UNASSIGNED'; +lookup(CP) when 3302 =< CP, CP =< 3311 -> 'PVALID'; +lookup(CP) when 3313 =< CP, CP =< 3314 -> 'PVALID'; +lookup(CP) when 3315 =< CP, CP =< 3327 -> 'UNASSIGNED'; +lookup(CP) when 3328 =< CP, CP =< 3340 -> 'PVALID'; +lookup(CP) when 3342 =< CP, CP =< 3344 -> 'PVALID'; +lookup(CP) when 3346 =< CP, CP =< 3396 -> 'PVALID'; +lookup(CP) when 3398 =< CP, CP =< 3400 -> 'PVALID'; +lookup(CP) when 3402 =< CP, CP =< 3406 -> 'PVALID'; +lookup(CP) when 3408 =< CP, CP =< 3411 -> 'UNASSIGNED'; +lookup(CP) when 3412 =< CP, CP =< 3415 -> 'PVALID'; +lookup(CP) when 3416 =< CP, CP =< 3422 -> 'DISALLOWED'; +lookup(CP) when 3423 =< CP, CP =< 3427 -> 'PVALID'; +lookup(CP) when 3428 =< CP, CP =< 3429 -> 'UNASSIGNED'; +lookup(CP) when 3430 =< CP, CP =< 3439 -> 'PVALID'; +lookup(CP) when 3440 =< CP, CP =< 3449 -> 'DISALLOWED'; +lookup(CP) when 3450 =< CP, CP =< 3455 -> 'PVALID'; +lookup(CP) when 3457 =< CP, CP =< 3459 -> 'PVALID'; +lookup(CP) when 3461 =< CP, CP =< 3478 -> 'PVALID'; +lookup(CP) when 3479 =< CP, CP =< 3481 -> 'UNASSIGNED'; +lookup(CP) when 3482 =< CP, CP =< 3505 -> 'PVALID'; +lookup(CP) when 3507 =< CP, CP =< 3515 -> 'PVALID'; +lookup(CP) when 3518 =< CP, CP =< 3519 -> 'UNASSIGNED'; +lookup(CP) when 3520 =< CP, CP =< 3526 -> 'PVALID'; +lookup(CP) when 3527 =< CP, CP =< 3529 -> 'UNASSIGNED'; +lookup(CP) when 3531 =< CP, CP =< 3534 -> 'UNASSIGNED'; +lookup(CP) when 3535 =< CP, CP =< 3540 -> 'PVALID'; +lookup(CP) when 3544 =< CP, CP =< 3551 -> 'PVALID'; +lookup(CP) when 3552 =< CP, CP =< 3557 -> 'UNASSIGNED'; +lookup(CP) when 3558 =< CP, CP =< 3567 -> 'PVALID'; +lookup(CP) when 3568 =< CP, CP =< 3569 -> 'UNASSIGNED'; +lookup(CP) when 3570 =< CP, CP =< 3571 -> 'PVALID'; +lookup(CP) when 3573 =< CP, CP =< 3584 -> 'UNASSIGNED'; +lookup(CP) when 3585 =< CP, CP =< 3634 -> 'PVALID'; +lookup(CP) when 3636 =< CP, CP =< 3642 -> 'PVALID'; +lookup(CP) when 3643 =< CP, CP =< 3646 -> 'UNASSIGNED'; +lookup(CP) when 3648 =< CP, CP =< 3662 -> 'PVALID'; +lookup(CP) when 3664 =< CP, CP =< 3673 -> 'PVALID'; +lookup(CP) when 3674 =< CP, CP =< 3675 -> 'DISALLOWED'; +lookup(CP) when 3676 =< CP, CP =< 3712 -> 'UNASSIGNED'; +lookup(CP) when 3713 =< CP, CP =< 3714 -> 'PVALID'; +lookup(CP) when 3718 =< CP, CP =< 3722 -> 'PVALID'; +lookup(CP) when 3724 =< CP, CP =< 3747 -> 'PVALID'; +lookup(CP) when 3751 =< CP, CP =< 3762 -> 'PVALID'; +lookup(CP) when 3764 =< CP, CP =< 3773 -> 'PVALID'; +lookup(CP) when 3774 =< CP, CP =< 3775 -> 'UNASSIGNED'; +lookup(CP) when 3776 =< CP, CP =< 3780 -> 'PVALID'; +lookup(CP) when 3784 =< CP, CP =< 3789 -> 'PVALID'; +lookup(CP) when 3790 =< CP, CP =< 3791 -> 'UNASSIGNED'; +lookup(CP) when 3792 =< CP, CP =< 3801 -> 'PVALID'; +lookup(CP) when 3802 =< CP, CP =< 3803 -> 'UNASSIGNED'; +lookup(CP) when 3804 =< CP, CP =< 3805 -> 'DISALLOWED'; +lookup(CP) when 3806 =< CP, CP =< 3807 -> 'PVALID'; +lookup(CP) when 3808 =< CP, CP =< 3839 -> 'UNASSIGNED'; +lookup(CP) when 3841 =< CP, CP =< 3850 -> 'DISALLOWED'; +lookup(CP) when 3852 =< CP, CP =< 3863 -> 'DISALLOWED'; +lookup(CP) when 3864 =< CP, CP =< 3865 -> 'PVALID'; +lookup(CP) when 3866 =< CP, CP =< 3871 -> 'DISALLOWED'; +lookup(CP) when 3872 =< CP, CP =< 3881 -> 'PVALID'; +lookup(CP) when 3882 =< CP, CP =< 3892 -> 'DISALLOWED'; +lookup(CP) when 3898 =< CP, CP =< 3901 -> 'DISALLOWED'; +lookup(CP) when 3902 =< CP, CP =< 3906 -> 'PVALID'; +lookup(CP) when 3908 =< CP, CP =< 3911 -> 'PVALID'; +lookup(CP) when 3913 =< CP, CP =< 3916 -> 'PVALID'; +lookup(CP) when 3918 =< CP, CP =< 3921 -> 'PVALID'; +lookup(CP) when 3923 =< CP, CP =< 3926 -> 'PVALID'; +lookup(CP) when 3928 =< CP, CP =< 3931 -> 'PVALID'; +lookup(CP) when 3933 =< CP, CP =< 3944 -> 'PVALID'; +lookup(CP) when 3946 =< CP, CP =< 3948 -> 'PVALID'; +lookup(CP) when 3949 =< CP, CP =< 3952 -> 'UNASSIGNED'; +lookup(CP) when 3953 =< CP, CP =< 3954 -> 'PVALID'; +lookup(CP) when 3957 =< CP, CP =< 3961 -> 'DISALLOWED'; +lookup(CP) when 3962 =< CP, CP =< 3968 -> 'PVALID'; +lookup(CP) when 3970 =< CP, CP =< 3972 -> 'PVALID'; +lookup(CP) when 3974 =< CP, CP =< 3986 -> 'PVALID'; +lookup(CP) when 3988 =< CP, CP =< 3991 -> 'PVALID'; +lookup(CP) when 3993 =< CP, CP =< 3996 -> 'PVALID'; +lookup(CP) when 3998 =< CP, CP =< 4001 -> 'PVALID'; +lookup(CP) when 4003 =< CP, CP =< 4006 -> 'PVALID'; +lookup(CP) when 4008 =< CP, CP =< 4011 -> 'PVALID'; +lookup(CP) when 4013 =< CP, CP =< 4024 -> 'PVALID'; +lookup(CP) when 4026 =< CP, CP =< 4028 -> 'PVALID'; +lookup(CP) when 4030 =< CP, CP =< 4037 -> 'DISALLOWED'; +lookup(CP) when 4039 =< CP, CP =< 4044 -> 'DISALLOWED'; +lookup(CP) when 4046 =< CP, CP =< 4058 -> 'DISALLOWED'; +lookup(CP) when 4059 =< CP, CP =< 4095 -> 'UNASSIGNED'; +lookup(CP) when 4096 =< CP, CP =< 4169 -> 'PVALID'; +lookup(CP) when 4170 =< CP, CP =< 4175 -> 'DISALLOWED'; +lookup(CP) when 4176 =< CP, CP =< 4253 -> 'PVALID'; +lookup(CP) when 4254 =< CP, CP =< 4293 -> 'DISALLOWED'; +lookup(CP) when 4296 =< CP, CP =< 4300 -> 'UNASSIGNED'; +lookup(CP) when 4302 =< CP, CP =< 4303 -> 'UNASSIGNED'; +lookup(CP) when 4304 =< CP, CP =< 4346 -> 'PVALID'; +lookup(CP) when 4347 =< CP, CP =< 4348 -> 'DISALLOWED'; +lookup(CP) when 4349 =< CP, CP =< 4351 -> 'PVALID'; +lookup(CP) when 4352 =< CP, CP =< 4607 -> 'DISALLOWED'; +lookup(CP) when 4608 =< CP, CP =< 4680 -> 'PVALID'; +lookup(CP) when 4682 =< CP, CP =< 4685 -> 'PVALID'; +lookup(CP) when 4686 =< CP, CP =< 4687 -> 'UNASSIGNED'; +lookup(CP) when 4688 =< CP, CP =< 4694 -> 'PVALID'; +lookup(CP) when 4698 =< CP, CP =< 4701 -> 'PVALID'; +lookup(CP) when 4702 =< CP, CP =< 4703 -> 'UNASSIGNED'; +lookup(CP) when 4704 =< CP, CP =< 4744 -> 'PVALID'; +lookup(CP) when 4746 =< CP, CP =< 4749 -> 'PVALID'; +lookup(CP) when 4750 =< CP, CP =< 4751 -> 'UNASSIGNED'; +lookup(CP) when 4752 =< CP, CP =< 4784 -> 'PVALID'; +lookup(CP) when 4786 =< CP, CP =< 4789 -> 'PVALID'; +lookup(CP) when 4790 =< CP, CP =< 4791 -> 'UNASSIGNED'; +lookup(CP) when 4792 =< CP, CP =< 4798 -> 'PVALID'; +lookup(CP) when 4802 =< CP, CP =< 4805 -> 'PVALID'; +lookup(CP) when 4806 =< CP, CP =< 4807 -> 'UNASSIGNED'; +lookup(CP) when 4808 =< CP, CP =< 4822 -> 'PVALID'; +lookup(CP) when 4824 =< CP, CP =< 4880 -> 'PVALID'; +lookup(CP) when 4882 =< CP, CP =< 4885 -> 'PVALID'; +lookup(CP) when 4886 =< CP, CP =< 4887 -> 'UNASSIGNED'; +lookup(CP) when 4888 =< CP, CP =< 4954 -> 'PVALID'; +lookup(CP) when 4955 =< CP, CP =< 4956 -> 'UNASSIGNED'; +lookup(CP) when 4957 =< CP, CP =< 4959 -> 'PVALID'; +lookup(CP) when 4960 =< CP, CP =< 4988 -> 'DISALLOWED'; +lookup(CP) when 4989 =< CP, CP =< 4991 -> 'UNASSIGNED'; +lookup(CP) when 4992 =< CP, CP =< 5007 -> 'PVALID'; +lookup(CP) when 5008 =< CP, CP =< 5017 -> 'DISALLOWED'; +lookup(CP) when 5018 =< CP, CP =< 5023 -> 'UNASSIGNED'; +lookup(CP) when 5024 =< CP, CP =< 5109 -> 'PVALID'; +lookup(CP) when 5110 =< CP, CP =< 5111 -> 'UNASSIGNED'; +lookup(CP) when 5112 =< CP, CP =< 5117 -> 'DISALLOWED'; +lookup(CP) when 5118 =< CP, CP =< 5119 -> 'UNASSIGNED'; +lookup(CP) when 5121 =< CP, CP =< 5740 -> 'PVALID'; +lookup(CP) when 5741 =< CP, CP =< 5742 -> 'DISALLOWED'; +lookup(CP) when 5743 =< CP, CP =< 5759 -> 'PVALID'; +lookup(CP) when 5761 =< CP, CP =< 5786 -> 'PVALID'; +lookup(CP) when 5787 =< CP, CP =< 5788 -> 'DISALLOWED'; +lookup(CP) when 5789 =< CP, CP =< 5791 -> 'UNASSIGNED'; +lookup(CP) when 5792 =< CP, CP =< 5866 -> 'PVALID'; +lookup(CP) when 5867 =< CP, CP =< 5872 -> 'DISALLOWED'; +lookup(CP) when 5873 =< CP, CP =< 5880 -> 'PVALID'; +lookup(CP) when 5881 =< CP, CP =< 5887 -> 'UNASSIGNED'; +lookup(CP) when 5888 =< CP, CP =< 5900 -> 'PVALID'; +lookup(CP) when 5902 =< CP, CP =< 5908 -> 'PVALID'; +lookup(CP) when 5909 =< CP, CP =< 5919 -> 'UNASSIGNED'; +lookup(CP) when 5920 =< CP, CP =< 5940 -> 'PVALID'; +lookup(CP) when 5941 =< CP, CP =< 5942 -> 'DISALLOWED'; +lookup(CP) when 5943 =< CP, CP =< 5951 -> 'UNASSIGNED'; +lookup(CP) when 5952 =< CP, CP =< 5971 -> 'PVALID'; +lookup(CP) when 5972 =< CP, CP =< 5983 -> 'UNASSIGNED'; +lookup(CP) when 5984 =< CP, CP =< 5996 -> 'PVALID'; +lookup(CP) when 5998 =< CP, CP =< 6000 -> 'PVALID'; +lookup(CP) when 6002 =< CP, CP =< 6003 -> 'PVALID'; +lookup(CP) when 6004 =< CP, CP =< 6015 -> 'UNASSIGNED'; +lookup(CP) when 6016 =< CP, CP =< 6067 -> 'PVALID'; +lookup(CP) when 6068 =< CP, CP =< 6069 -> 'DISALLOWED'; +lookup(CP) when 6070 =< CP, CP =< 6099 -> 'PVALID'; +lookup(CP) when 6100 =< CP, CP =< 6102 -> 'DISALLOWED'; +lookup(CP) when 6104 =< CP, CP =< 6107 -> 'DISALLOWED'; +lookup(CP) when 6108 =< CP, CP =< 6109 -> 'PVALID'; +lookup(CP) when 6110 =< CP, CP =< 6111 -> 'UNASSIGNED'; +lookup(CP) when 6112 =< CP, CP =< 6121 -> 'PVALID'; +lookup(CP) when 6122 =< CP, CP =< 6127 -> 'UNASSIGNED'; +lookup(CP) when 6128 =< CP, CP =< 6137 -> 'DISALLOWED'; +lookup(CP) when 6138 =< CP, CP =< 6143 -> 'UNASSIGNED'; +lookup(CP) when 6144 =< CP, CP =< 6158 -> 'DISALLOWED'; +lookup(CP) when 6160 =< CP, CP =< 6169 -> 'PVALID'; +lookup(CP) when 6170 =< CP, CP =< 6175 -> 'UNASSIGNED'; +lookup(CP) when 6176 =< CP, CP =< 6264 -> 'PVALID'; +lookup(CP) when 6265 =< CP, CP =< 6271 -> 'UNASSIGNED'; +lookup(CP) when 6272 =< CP, CP =< 6314 -> 'PVALID'; +lookup(CP) when 6315 =< CP, CP =< 6319 -> 'UNASSIGNED'; +lookup(CP) when 6320 =< CP, CP =< 6389 -> 'PVALID'; +lookup(CP) when 6390 =< CP, CP =< 6399 -> 'UNASSIGNED'; +lookup(CP) when 6400 =< CP, CP =< 6430 -> 'PVALID'; +lookup(CP) when 6432 =< CP, CP =< 6443 -> 'PVALID'; +lookup(CP) when 6444 =< CP, CP =< 6447 -> 'UNASSIGNED'; +lookup(CP) when 6448 =< CP, CP =< 6459 -> 'PVALID'; +lookup(CP) when 6460 =< CP, CP =< 6463 -> 'UNASSIGNED'; +lookup(CP) when 6465 =< CP, CP =< 6467 -> 'UNASSIGNED'; +lookup(CP) when 6468 =< CP, CP =< 6469 -> 'DISALLOWED'; +lookup(CP) when 6470 =< CP, CP =< 6509 -> 'PVALID'; +lookup(CP) when 6510 =< CP, CP =< 6511 -> 'UNASSIGNED'; +lookup(CP) when 6512 =< CP, CP =< 6516 -> 'PVALID'; +lookup(CP) when 6517 =< CP, CP =< 6527 -> 'UNASSIGNED'; +lookup(CP) when 6528 =< CP, CP =< 6571 -> 'PVALID'; +lookup(CP) when 6572 =< CP, CP =< 6575 -> 'UNASSIGNED'; +lookup(CP) when 6576 =< CP, CP =< 6601 -> 'PVALID'; +lookup(CP) when 6602 =< CP, CP =< 6607 -> 'UNASSIGNED'; +lookup(CP) when 6608 =< CP, CP =< 6617 -> 'PVALID'; +lookup(CP) when 6619 =< CP, CP =< 6621 -> 'UNASSIGNED'; +lookup(CP) when 6622 =< CP, CP =< 6655 -> 'DISALLOWED'; +lookup(CP) when 6656 =< CP, CP =< 6683 -> 'PVALID'; +lookup(CP) when 6684 =< CP, CP =< 6685 -> 'UNASSIGNED'; +lookup(CP) when 6686 =< CP, CP =< 6687 -> 'DISALLOWED'; +lookup(CP) when 6688 =< CP, CP =< 6750 -> 'PVALID'; +lookup(CP) when 6752 =< CP, CP =< 6780 -> 'PVALID'; +lookup(CP) when 6781 =< CP, CP =< 6782 -> 'UNASSIGNED'; +lookup(CP) when 6783 =< CP, CP =< 6793 -> 'PVALID'; +lookup(CP) when 6794 =< CP, CP =< 6799 -> 'UNASSIGNED'; +lookup(CP) when 6800 =< CP, CP =< 6809 -> 'PVALID'; +lookup(CP) when 6810 =< CP, CP =< 6815 -> 'UNASSIGNED'; +lookup(CP) when 6816 =< CP, CP =< 6822 -> 'DISALLOWED'; +lookup(CP) when 6824 =< CP, CP =< 6829 -> 'DISALLOWED'; +lookup(CP) when 6830 =< CP, CP =< 6831 -> 'UNASSIGNED'; +lookup(CP) when 6832 =< CP, CP =< 6845 -> 'PVALID'; +lookup(CP) when 6847 =< CP, CP =< 6848 -> 'PVALID'; +lookup(CP) when 6849 =< CP, CP =< 6911 -> 'UNASSIGNED'; +lookup(CP) when 6912 =< CP, CP =< 6987 -> 'PVALID'; +lookup(CP) when 6988 =< CP, CP =< 6991 -> 'UNASSIGNED'; +lookup(CP) when 6992 =< CP, CP =< 7001 -> 'PVALID'; +lookup(CP) when 7002 =< CP, CP =< 7018 -> 'DISALLOWED'; +lookup(CP) when 7019 =< CP, CP =< 7027 -> 'PVALID'; +lookup(CP) when 7028 =< CP, CP =< 7036 -> 'DISALLOWED'; +lookup(CP) when 7037 =< CP, CP =< 7039 -> 'UNASSIGNED'; +lookup(CP) when 7040 =< CP, CP =< 7155 -> 'PVALID'; +lookup(CP) when 7156 =< CP, CP =< 7163 -> 'UNASSIGNED'; +lookup(CP) when 7164 =< CP, CP =< 7167 -> 'DISALLOWED'; +lookup(CP) when 7168 =< CP, CP =< 7223 -> 'PVALID'; +lookup(CP) when 7224 =< CP, CP =< 7226 -> 'UNASSIGNED'; +lookup(CP) when 7227 =< CP, CP =< 7231 -> 'DISALLOWED'; +lookup(CP) when 7232 =< CP, CP =< 7241 -> 'PVALID'; +lookup(CP) when 7242 =< CP, CP =< 7244 -> 'UNASSIGNED'; +lookup(CP) when 7245 =< CP, CP =< 7293 -> 'PVALID'; +lookup(CP) when 7294 =< CP, CP =< 7304 -> 'DISALLOWED'; +lookup(CP) when 7305 =< CP, CP =< 7311 -> 'UNASSIGNED'; +lookup(CP) when 7312 =< CP, CP =< 7354 -> 'DISALLOWED'; +lookup(CP) when 7355 =< CP, CP =< 7356 -> 'UNASSIGNED'; +lookup(CP) when 7357 =< CP, CP =< 7367 -> 'DISALLOWED'; +lookup(CP) when 7368 =< CP, CP =< 7375 -> 'UNASSIGNED'; +lookup(CP) when 7376 =< CP, CP =< 7378 -> 'PVALID'; +lookup(CP) when 7380 =< CP, CP =< 7418 -> 'PVALID'; +lookup(CP) when 7419 =< CP, CP =< 7423 -> 'UNASSIGNED'; +lookup(CP) when 7424 =< CP, CP =< 7467 -> 'PVALID'; +lookup(CP) when 7468 =< CP, CP =< 7470 -> 'DISALLOWED'; +lookup(CP) when 7472 =< CP, CP =< 7482 -> 'DISALLOWED'; +lookup(CP) when 7484 =< CP, CP =< 7501 -> 'DISALLOWED'; +lookup(CP) when 7503 =< CP, CP =< 7530 -> 'DISALLOWED'; +lookup(CP) when 7531 =< CP, CP =< 7543 -> 'PVALID'; +lookup(CP) when 7545 =< CP, CP =< 7578 -> 'PVALID'; +lookup(CP) when 7579 =< CP, CP =< 7615 -> 'DISALLOWED'; +lookup(CP) when 7616 =< CP, CP =< 7673 -> 'PVALID'; +lookup(CP) when 7675 =< CP, CP =< 7679 -> 'PVALID'; +lookup(CP) when 7829 =< CP, CP =< 7833 -> 'PVALID'; +lookup(CP) when 7834 =< CP, CP =< 7835 -> 'DISALLOWED'; +lookup(CP) when 7836 =< CP, CP =< 7837 -> 'PVALID'; +lookup(CP) when 7935 =< CP, CP =< 7943 -> 'PVALID'; +lookup(CP) when 7944 =< CP, CP =< 7951 -> 'DISALLOWED'; +lookup(CP) when 7952 =< CP, CP =< 7957 -> 'PVALID'; +lookup(CP) when 7958 =< CP, CP =< 7959 -> 'UNASSIGNED'; +lookup(CP) when 7960 =< CP, CP =< 7965 -> 'DISALLOWED'; +lookup(CP) when 7966 =< CP, CP =< 7967 -> 'UNASSIGNED'; +lookup(CP) when 7968 =< CP, CP =< 7975 -> 'PVALID'; +lookup(CP) when 7976 =< CP, CP =< 7983 -> 'DISALLOWED'; +lookup(CP) when 7984 =< CP, CP =< 7991 -> 'PVALID'; +lookup(CP) when 7992 =< CP, CP =< 7999 -> 'DISALLOWED'; +lookup(CP) when 8000 =< CP, CP =< 8005 -> 'PVALID'; +lookup(CP) when 8006 =< CP, CP =< 8007 -> 'UNASSIGNED'; +lookup(CP) when 8008 =< CP, CP =< 8013 -> 'DISALLOWED'; +lookup(CP) when 8014 =< CP, CP =< 8015 -> 'UNASSIGNED'; +lookup(CP) when 8016 =< CP, CP =< 8023 -> 'PVALID'; +lookup(CP) when 8032 =< CP, CP =< 8039 -> 'PVALID'; +lookup(CP) when 8040 =< CP, CP =< 8047 -> 'DISALLOWED'; +lookup(CP) when 8062 =< CP, CP =< 8063 -> 'UNASSIGNED'; +lookup(CP) when 8064 =< CP, CP =< 8111 -> 'DISALLOWED'; +lookup(CP) when 8112 =< CP, CP =< 8113 -> 'PVALID'; +lookup(CP) when 8114 =< CP, CP =< 8116 -> 'DISALLOWED'; +lookup(CP) when 8119 =< CP, CP =< 8132 -> 'DISALLOWED'; +lookup(CP) when 8135 =< CP, CP =< 8143 -> 'DISALLOWED'; +lookup(CP) when 8144 =< CP, CP =< 8146 -> 'PVALID'; +lookup(CP) when 8148 =< CP, CP =< 8149 -> 'UNASSIGNED'; +lookup(CP) when 8150 =< CP, CP =< 8151 -> 'PVALID'; +lookup(CP) when 8152 =< CP, CP =< 8155 -> 'DISALLOWED'; +lookup(CP) when 8157 =< CP, CP =< 8159 -> 'DISALLOWED'; +lookup(CP) when 8160 =< CP, CP =< 8162 -> 'PVALID'; +lookup(CP) when 8164 =< CP, CP =< 8167 -> 'PVALID'; +lookup(CP) when 8168 =< CP, CP =< 8175 -> 'DISALLOWED'; +lookup(CP) when 8176 =< CP, CP =< 8177 -> 'UNASSIGNED'; +lookup(CP) when 8178 =< CP, CP =< 8180 -> 'DISALLOWED'; +lookup(CP) when 8183 =< CP, CP =< 8190 -> 'DISALLOWED'; +lookup(CP) when 8192 =< CP, CP =< 8203 -> 'DISALLOWED'; +lookup(CP) when 8204 =< CP, CP =< 8205 -> 'CONTEXTJ'; +lookup(CP) when 8206 =< CP, CP =< 8292 -> 'DISALLOWED'; +lookup(CP) when 8294 =< CP, CP =< 8305 -> 'DISALLOWED'; +lookup(CP) when 8306 =< CP, CP =< 8307 -> 'UNASSIGNED'; +lookup(CP) when 8308 =< CP, CP =< 8334 -> 'DISALLOWED'; +lookup(CP) when 8336 =< CP, CP =< 8348 -> 'DISALLOWED'; +lookup(CP) when 8349 =< CP, CP =< 8351 -> 'UNASSIGNED'; +lookup(CP) when 8352 =< CP, CP =< 8383 -> 'DISALLOWED'; +lookup(CP) when 8384 =< CP, CP =< 8399 -> 'UNASSIGNED'; +lookup(CP) when 8400 =< CP, CP =< 8432 -> 'DISALLOWED'; +lookup(CP) when 8433 =< CP, CP =< 8447 -> 'UNASSIGNED'; +lookup(CP) when 8448 =< CP, CP =< 8525 -> 'DISALLOWED'; +lookup(CP) when 8527 =< CP, CP =< 8579 -> 'DISALLOWED'; +lookup(CP) when 8581 =< CP, CP =< 8587 -> 'DISALLOWED'; +lookup(CP) when 8588 =< CP, CP =< 8591 -> 'UNASSIGNED'; +lookup(CP) when 8592 =< CP, CP =< 9254 -> 'DISALLOWED'; +lookup(CP) when 9255 =< CP, CP =< 9279 -> 'UNASSIGNED'; +lookup(CP) when 9280 =< CP, CP =< 9290 -> 'DISALLOWED'; +lookup(CP) when 9291 =< CP, CP =< 9311 -> 'UNASSIGNED'; +lookup(CP) when 9312 =< CP, CP =< 11123 -> 'DISALLOWED'; +lookup(CP) when 11124 =< CP, CP =< 11125 -> 'UNASSIGNED'; +lookup(CP) when 11126 =< CP, CP =< 11157 -> 'DISALLOWED'; +lookup(CP) when 11159 =< CP, CP =< 11310 -> 'DISALLOWED'; +lookup(CP) when 11312 =< CP, CP =< 11358 -> 'PVALID'; +lookup(CP) when 11362 =< CP, CP =< 11364 -> 'DISALLOWED'; +lookup(CP) when 11365 =< CP, CP =< 11366 -> 'PVALID'; +lookup(CP) when 11373 =< CP, CP =< 11376 -> 'DISALLOWED'; +lookup(CP) when 11379 =< CP, CP =< 11380 -> 'PVALID'; +lookup(CP) when 11382 =< CP, CP =< 11387 -> 'PVALID'; +lookup(CP) when 11388 =< CP, CP =< 11392 -> 'DISALLOWED'; +lookup(CP) when 11491 =< CP, CP =< 11492 -> 'PVALID'; +lookup(CP) when 11493 =< CP, CP =< 11499 -> 'DISALLOWED'; +lookup(CP) when 11502 =< CP, CP =< 11505 -> 'PVALID'; +lookup(CP) when 11508 =< CP, CP =< 11512 -> 'UNASSIGNED'; +lookup(CP) when 11513 =< CP, CP =< 11519 -> 'DISALLOWED'; +lookup(CP) when 11520 =< CP, CP =< 11557 -> 'PVALID'; +lookup(CP) when 11560 =< CP, CP =< 11564 -> 'UNASSIGNED'; +lookup(CP) when 11566 =< CP, CP =< 11567 -> 'UNASSIGNED'; +lookup(CP) when 11568 =< CP, CP =< 11623 -> 'PVALID'; +lookup(CP) when 11624 =< CP, CP =< 11630 -> 'UNASSIGNED'; +lookup(CP) when 11631 =< CP, CP =< 11632 -> 'DISALLOWED'; +lookup(CP) when 11633 =< CP, CP =< 11646 -> 'UNASSIGNED'; +lookup(CP) when 11647 =< CP, CP =< 11670 -> 'PVALID'; +lookup(CP) when 11671 =< CP, CP =< 11679 -> 'UNASSIGNED'; +lookup(CP) when 11680 =< CP, CP =< 11686 -> 'PVALID'; +lookup(CP) when 11688 =< CP, CP =< 11694 -> 'PVALID'; +lookup(CP) when 11696 =< CP, CP =< 11702 -> 'PVALID'; +lookup(CP) when 11704 =< CP, CP =< 11710 -> 'PVALID'; +lookup(CP) when 11712 =< CP, CP =< 11718 -> 'PVALID'; +lookup(CP) when 11720 =< CP, CP =< 11726 -> 'PVALID'; +lookup(CP) when 11728 =< CP, CP =< 11734 -> 'PVALID'; +lookup(CP) when 11736 =< CP, CP =< 11742 -> 'PVALID'; +lookup(CP) when 11744 =< CP, CP =< 11775 -> 'PVALID'; +lookup(CP) when 11776 =< CP, CP =< 11822 -> 'DISALLOWED'; +lookup(CP) when 11824 =< CP, CP =< 11858 -> 'DISALLOWED'; +lookup(CP) when 11859 =< CP, CP =< 11903 -> 'UNASSIGNED'; +lookup(CP) when 11904 =< CP, CP =< 11929 -> 'DISALLOWED'; +lookup(CP) when 11931 =< CP, CP =< 12019 -> 'DISALLOWED'; +lookup(CP) when 12020 =< CP, CP =< 12031 -> 'UNASSIGNED'; +lookup(CP) when 12032 =< CP, CP =< 12245 -> 'DISALLOWED'; +lookup(CP) when 12246 =< CP, CP =< 12271 -> 'UNASSIGNED'; +lookup(CP) when 12272 =< CP, CP =< 12283 -> 'DISALLOWED'; +lookup(CP) when 12284 =< CP, CP =< 12287 -> 'UNASSIGNED'; +lookup(CP) when 12288 =< CP, CP =< 12292 -> 'DISALLOWED'; +lookup(CP) when 12293 =< CP, CP =< 12295 -> 'PVALID'; +lookup(CP) when 12296 =< CP, CP =< 12329 -> 'DISALLOWED'; +lookup(CP) when 12330 =< CP, CP =< 12333 -> 'PVALID'; +lookup(CP) when 12334 =< CP, CP =< 12347 -> 'DISALLOWED'; +lookup(CP) when 12349 =< CP, CP =< 12351 -> 'DISALLOWED'; +lookup(CP) when 12353 =< CP, CP =< 12438 -> 'PVALID'; +lookup(CP) when 12439 =< CP, CP =< 12440 -> 'UNASSIGNED'; +lookup(CP) when 12441 =< CP, CP =< 12442 -> 'PVALID'; +lookup(CP) when 12443 =< CP, CP =< 12444 -> 'DISALLOWED'; +lookup(CP) when 12445 =< CP, CP =< 12446 -> 'PVALID'; +lookup(CP) when 12447 =< CP, CP =< 12448 -> 'DISALLOWED'; +lookup(CP) when 12449 =< CP, CP =< 12538 -> 'PVALID'; +lookup(CP) when 12540 =< CP, CP =< 12542 -> 'PVALID'; +lookup(CP) when 12544 =< CP, CP =< 12548 -> 'UNASSIGNED'; +lookup(CP) when 12549 =< CP, CP =< 12591 -> 'PVALID'; +lookup(CP) when 12593 =< CP, CP =< 12686 -> 'DISALLOWED'; +lookup(CP) when 12688 =< CP, CP =< 12703 -> 'DISALLOWED'; +lookup(CP) when 12704 =< CP, CP =< 12735 -> 'PVALID'; +lookup(CP) when 12736 =< CP, CP =< 12771 -> 'DISALLOWED'; +lookup(CP) when 12772 =< CP, CP =< 12783 -> 'UNASSIGNED'; +lookup(CP) when 12784 =< CP, CP =< 12799 -> 'PVALID'; +lookup(CP) when 12800 =< CP, CP =< 12830 -> 'DISALLOWED'; +lookup(CP) when 12832 =< CP, CP =< 13311 -> 'DISALLOWED'; +lookup(CP) when 13312 =< CP, CP =< 19903 -> 'PVALID'; +lookup(CP) when 19904 =< CP, CP =< 19967 -> 'DISALLOWED'; +lookup(CP) when 19968 =< CP, CP =< 40956 -> 'PVALID'; +lookup(CP) when 40957 =< CP, CP =< 40959 -> 'UNASSIGNED'; +lookup(CP) when 40960 =< CP, CP =< 42124 -> 'PVALID'; +lookup(CP) when 42125 =< CP, CP =< 42127 -> 'UNASSIGNED'; +lookup(CP) when 42128 =< CP, CP =< 42182 -> 'DISALLOWED'; +lookup(CP) when 42183 =< CP, CP =< 42191 -> 'UNASSIGNED'; +lookup(CP) when 42192 =< CP, CP =< 42237 -> 'PVALID'; +lookup(CP) when 42238 =< CP, CP =< 42239 -> 'DISALLOWED'; +lookup(CP) when 42240 =< CP, CP =< 42508 -> 'PVALID'; +lookup(CP) when 42509 =< CP, CP =< 42511 -> 'DISALLOWED'; +lookup(CP) when 42512 =< CP, CP =< 42539 -> 'PVALID'; +lookup(CP) when 42540 =< CP, CP =< 42559 -> 'UNASSIGNED'; +lookup(CP) when 42605 =< CP, CP =< 42607 -> 'PVALID'; +lookup(CP) when 42608 =< CP, CP =< 42611 -> 'DISALLOWED'; +lookup(CP) when 42612 =< CP, CP =< 42621 -> 'PVALID'; +lookup(CP) when 42652 =< CP, CP =< 42653 -> 'DISALLOWED'; +lookup(CP) when 42654 =< CP, CP =< 42725 -> 'PVALID'; +lookup(CP) when 42726 =< CP, CP =< 42735 -> 'DISALLOWED'; +lookup(CP) when 42736 =< CP, CP =< 42737 -> 'PVALID'; +lookup(CP) when 42738 =< CP, CP =< 42743 -> 'DISALLOWED'; +lookup(CP) when 42744 =< CP, CP =< 42751 -> 'UNASSIGNED'; +lookup(CP) when 42752 =< CP, CP =< 42774 -> 'DISALLOWED'; +lookup(CP) when 42775 =< CP, CP =< 42783 -> 'PVALID'; +lookup(CP) when 42784 =< CP, CP =< 42786 -> 'DISALLOWED'; +lookup(CP) when 42799 =< CP, CP =< 42801 -> 'PVALID'; +lookup(CP) when 42865 =< CP, CP =< 42872 -> 'PVALID'; +lookup(CP) when 42877 =< CP, CP =< 42878 -> 'DISALLOWED'; +lookup(CP) when 42887 =< CP, CP =< 42888 -> 'PVALID'; +lookup(CP) when 42889 =< CP, CP =< 42891 -> 'DISALLOWED'; +lookup(CP) when 42894 =< CP, CP =< 42895 -> 'PVALID'; +lookup(CP) when 42899 =< CP, CP =< 42901 -> 'PVALID'; +lookup(CP) when 42922 =< CP, CP =< 42926 -> 'DISALLOWED'; +lookup(CP) when 42928 =< CP, CP =< 42932 -> 'DISALLOWED'; +lookup(CP) when 42944 =< CP, CP =< 42945 -> 'UNASSIGNED'; +lookup(CP) when 42948 =< CP, CP =< 42951 -> 'DISALLOWED'; +lookup(CP) when 42955 =< CP, CP =< 42996 -> 'UNASSIGNED'; +lookup(CP) when 42998 =< CP, CP =< 42999 -> 'PVALID'; +lookup(CP) when 43000 =< CP, CP =< 43001 -> 'DISALLOWED'; +lookup(CP) when 43002 =< CP, CP =< 43047 -> 'PVALID'; +lookup(CP) when 43048 =< CP, CP =< 43051 -> 'DISALLOWED'; +lookup(CP) when 43053 =< CP, CP =< 43055 -> 'UNASSIGNED'; +lookup(CP) when 43056 =< CP, CP =< 43065 -> 'DISALLOWED'; +lookup(CP) when 43066 =< CP, CP =< 43071 -> 'UNASSIGNED'; +lookup(CP) when 43072 =< CP, CP =< 43123 -> 'PVALID'; +lookup(CP) when 43124 =< CP, CP =< 43127 -> 'DISALLOWED'; +lookup(CP) when 43128 =< CP, CP =< 43135 -> 'UNASSIGNED'; +lookup(CP) when 43136 =< CP, CP =< 43205 -> 'PVALID'; +lookup(CP) when 43206 =< CP, CP =< 43213 -> 'UNASSIGNED'; +lookup(CP) when 43214 =< CP, CP =< 43215 -> 'DISALLOWED'; +lookup(CP) when 43216 =< CP, CP =< 43225 -> 'PVALID'; +lookup(CP) when 43226 =< CP, CP =< 43231 -> 'UNASSIGNED'; +lookup(CP) when 43232 =< CP, CP =< 43255 -> 'PVALID'; +lookup(CP) when 43256 =< CP, CP =< 43258 -> 'DISALLOWED'; +lookup(CP) when 43261 =< CP, CP =< 43309 -> 'PVALID'; +lookup(CP) when 43310 =< CP, CP =< 43311 -> 'DISALLOWED'; +lookup(CP) when 43312 =< CP, CP =< 43347 -> 'PVALID'; +lookup(CP) when 43348 =< CP, CP =< 43358 -> 'UNASSIGNED'; +lookup(CP) when 43359 =< CP, CP =< 43388 -> 'DISALLOWED'; +lookup(CP) when 43389 =< CP, CP =< 43391 -> 'UNASSIGNED'; +lookup(CP) when 43392 =< CP, CP =< 43456 -> 'PVALID'; +lookup(CP) when 43457 =< CP, CP =< 43469 -> 'DISALLOWED'; +lookup(CP) when 43471 =< CP, CP =< 43481 -> 'PVALID'; +lookup(CP) when 43482 =< CP, CP =< 43485 -> 'UNASSIGNED'; +lookup(CP) when 43486 =< CP, CP =< 43487 -> 'DISALLOWED'; +lookup(CP) when 43488 =< CP, CP =< 43518 -> 'PVALID'; +lookup(CP) when 43520 =< CP, CP =< 43574 -> 'PVALID'; +lookup(CP) when 43575 =< CP, CP =< 43583 -> 'UNASSIGNED'; +lookup(CP) when 43584 =< CP, CP =< 43597 -> 'PVALID'; +lookup(CP) when 43598 =< CP, CP =< 43599 -> 'UNASSIGNED'; +lookup(CP) when 43600 =< CP, CP =< 43609 -> 'PVALID'; +lookup(CP) when 43610 =< CP, CP =< 43611 -> 'UNASSIGNED'; +lookup(CP) when 43612 =< CP, CP =< 43615 -> 'DISALLOWED'; +lookup(CP) when 43616 =< CP, CP =< 43638 -> 'PVALID'; +lookup(CP) when 43639 =< CP, CP =< 43641 -> 'DISALLOWED'; +lookup(CP) when 43642 =< CP, CP =< 43714 -> 'PVALID'; +lookup(CP) when 43715 =< CP, CP =< 43738 -> 'UNASSIGNED'; +lookup(CP) when 43739 =< CP, CP =< 43741 -> 'PVALID'; +lookup(CP) when 43742 =< CP, CP =< 43743 -> 'DISALLOWED'; +lookup(CP) when 43744 =< CP, CP =< 43759 -> 'PVALID'; +lookup(CP) when 43760 =< CP, CP =< 43761 -> 'DISALLOWED'; +lookup(CP) when 43762 =< CP, CP =< 43766 -> 'PVALID'; +lookup(CP) when 43767 =< CP, CP =< 43776 -> 'UNASSIGNED'; +lookup(CP) when 43777 =< CP, CP =< 43782 -> 'PVALID'; +lookup(CP) when 43783 =< CP, CP =< 43784 -> 'UNASSIGNED'; +lookup(CP) when 43785 =< CP, CP =< 43790 -> 'PVALID'; +lookup(CP) when 43791 =< CP, CP =< 43792 -> 'UNASSIGNED'; +lookup(CP) when 43793 =< CP, CP =< 43798 -> 'PVALID'; +lookup(CP) when 43799 =< CP, CP =< 43807 -> 'UNASSIGNED'; +lookup(CP) when 43808 =< CP, CP =< 43814 -> 'PVALID'; +lookup(CP) when 43816 =< CP, CP =< 43822 -> 'PVALID'; +lookup(CP) when 43824 =< CP, CP =< 43866 -> 'PVALID'; +lookup(CP) when 43867 =< CP, CP =< 43871 -> 'DISALLOWED'; +lookup(CP) when 43872 =< CP, CP =< 43880 -> 'PVALID'; +lookup(CP) when 43881 =< CP, CP =< 43883 -> 'DISALLOWED'; +lookup(CP) when 43884 =< CP, CP =< 43887 -> 'UNASSIGNED'; +lookup(CP) when 43888 =< CP, CP =< 43967 -> 'DISALLOWED'; +lookup(CP) when 43968 =< CP, CP =< 44010 -> 'PVALID'; +lookup(CP) when 44012 =< CP, CP =< 44013 -> 'PVALID'; +lookup(CP) when 44014 =< CP, CP =< 44015 -> 'UNASSIGNED'; +lookup(CP) when 44016 =< CP, CP =< 44025 -> 'PVALID'; +lookup(CP) when 44026 =< CP, CP =< 44031 -> 'UNASSIGNED'; +lookup(CP) when 44032 =< CP, CP =< 55203 -> 'PVALID'; +lookup(CP) when 55204 =< CP, CP =< 55215 -> 'UNASSIGNED'; +lookup(CP) when 55216 =< CP, CP =< 55238 -> 'DISALLOWED'; +lookup(CP) when 55239 =< CP, CP =< 55242 -> 'UNASSIGNED'; +lookup(CP) when 55243 =< CP, CP =< 55291 -> 'DISALLOWED'; +lookup(CP) when 55292 =< CP, CP =< 55295 -> 'UNASSIGNED'; +lookup(CP) when 55296 =< CP, CP =< 64013 -> 'DISALLOWED'; +lookup(CP) when 64014 =< CP, CP =< 64015 -> 'PVALID'; +lookup(CP) when 64019 =< CP, CP =< 64020 -> 'PVALID'; +lookup(CP) when 64021 =< CP, CP =< 64030 -> 'DISALLOWED'; +lookup(CP) when 64035 =< CP, CP =< 64036 -> 'PVALID'; +lookup(CP) when 64037 =< CP, CP =< 64038 -> 'DISALLOWED'; +lookup(CP) when 64039 =< CP, CP =< 64041 -> 'PVALID'; +lookup(CP) when 64042 =< CP, CP =< 64109 -> 'DISALLOWED'; +lookup(CP) when 64110 =< CP, CP =< 64111 -> 'UNASSIGNED'; +lookup(CP) when 64112 =< CP, CP =< 64217 -> 'DISALLOWED'; +lookup(CP) when 64218 =< CP, CP =< 64255 -> 'UNASSIGNED'; +lookup(CP) when 64256 =< CP, CP =< 64262 -> 'DISALLOWED'; +lookup(CP) when 64263 =< CP, CP =< 64274 -> 'UNASSIGNED'; +lookup(CP) when 64275 =< CP, CP =< 64279 -> 'DISALLOWED'; +lookup(CP) when 64280 =< CP, CP =< 64284 -> 'UNASSIGNED'; +lookup(CP) when 64287 =< CP, CP =< 64310 -> 'DISALLOWED'; +lookup(CP) when 64312 =< CP, CP =< 64316 -> 'DISALLOWED'; +lookup(CP) when 64320 =< CP, CP =< 64321 -> 'DISALLOWED'; +lookup(CP) when 64323 =< CP, CP =< 64324 -> 'DISALLOWED'; +lookup(CP) when 64326 =< CP, CP =< 64449 -> 'DISALLOWED'; +lookup(CP) when 64450 =< CP, CP =< 64466 -> 'UNASSIGNED'; +lookup(CP) when 64467 =< CP, CP =< 64831 -> 'DISALLOWED'; +lookup(CP) when 64832 =< CP, CP =< 64847 -> 'UNASSIGNED'; +lookup(CP) when 64848 =< CP, CP =< 64911 -> 'DISALLOWED'; +lookup(CP) when 64912 =< CP, CP =< 64913 -> 'UNASSIGNED'; +lookup(CP) when 64914 =< CP, CP =< 64967 -> 'DISALLOWED'; +lookup(CP) when 64968 =< CP, CP =< 64975 -> 'UNASSIGNED'; +lookup(CP) when 64976 =< CP, CP =< 65021 -> 'DISALLOWED'; +lookup(CP) when 65022 =< CP, CP =< 65023 -> 'UNASSIGNED'; +lookup(CP) when 65024 =< CP, CP =< 65049 -> 'DISALLOWED'; +lookup(CP) when 65050 =< CP, CP =< 65055 -> 'UNASSIGNED'; +lookup(CP) when 65056 =< CP, CP =< 65071 -> 'PVALID'; +lookup(CP) when 65072 =< CP, CP =< 65106 -> 'DISALLOWED'; +lookup(CP) when 65108 =< CP, CP =< 65126 -> 'DISALLOWED'; +lookup(CP) when 65128 =< CP, CP =< 65131 -> 'DISALLOWED'; +lookup(CP) when 65132 =< CP, CP =< 65135 -> 'UNASSIGNED'; +lookup(CP) when 65136 =< CP, CP =< 65138 -> 'DISALLOWED'; +lookup(CP) when 65142 =< CP, CP =< 65276 -> 'DISALLOWED'; +lookup(CP) when 65277 =< CP, CP =< 65278 -> 'UNASSIGNED'; +lookup(CP) when 65281 =< CP, CP =< 65470 -> 'DISALLOWED'; +lookup(CP) when 65471 =< CP, CP =< 65473 -> 'UNASSIGNED'; +lookup(CP) when 65474 =< CP, CP =< 65479 -> 'DISALLOWED'; +lookup(CP) when 65480 =< CP, CP =< 65481 -> 'UNASSIGNED'; +lookup(CP) when 65482 =< CP, CP =< 65487 -> 'DISALLOWED'; +lookup(CP) when 65488 =< CP, CP =< 65489 -> 'UNASSIGNED'; +lookup(CP) when 65490 =< CP, CP =< 65495 -> 'DISALLOWED'; +lookup(CP) when 65496 =< CP, CP =< 65497 -> 'UNASSIGNED'; +lookup(CP) when 65498 =< CP, CP =< 65500 -> 'DISALLOWED'; +lookup(CP) when 65501 =< CP, CP =< 65503 -> 'UNASSIGNED'; +lookup(CP) when 65504 =< CP, CP =< 65510 -> 'DISALLOWED'; +lookup(CP) when 65512 =< CP, CP =< 65518 -> 'DISALLOWED'; +lookup(CP) when 65519 =< CP, CP =< 65528 -> 'UNASSIGNED'; +lookup(CP) when 65529 =< CP, CP =< 65535 -> 'DISALLOWED'; +lookup(CP) when 65536 =< CP, CP =< 65547 -> 'PVALID'; +lookup(CP) when 65549 =< CP, CP =< 65574 -> 'PVALID'; +lookup(CP) when 65576 =< CP, CP =< 65594 -> 'PVALID'; +lookup(CP) when 65596 =< CP, CP =< 65597 -> 'PVALID'; +lookup(CP) when 65599 =< CP, CP =< 65613 -> 'PVALID'; +lookup(CP) when 65614 =< CP, CP =< 65615 -> 'UNASSIGNED'; +lookup(CP) when 65616 =< CP, CP =< 65629 -> 'PVALID'; +lookup(CP) when 65630 =< CP, CP =< 65663 -> 'UNASSIGNED'; +lookup(CP) when 65664 =< CP, CP =< 65786 -> 'PVALID'; +lookup(CP) when 65787 =< CP, CP =< 65791 -> 'UNASSIGNED'; +lookup(CP) when 65792 =< CP, CP =< 65794 -> 'DISALLOWED'; +lookup(CP) when 65795 =< CP, CP =< 65798 -> 'UNASSIGNED'; +lookup(CP) when 65799 =< CP, CP =< 65843 -> 'DISALLOWED'; +lookup(CP) when 65844 =< CP, CP =< 65846 -> 'UNASSIGNED'; +lookup(CP) when 65847 =< CP, CP =< 65934 -> 'DISALLOWED'; +lookup(CP) when 65936 =< CP, CP =< 65948 -> 'DISALLOWED'; +lookup(CP) when 65949 =< CP, CP =< 65951 -> 'UNASSIGNED'; +lookup(CP) when 65953 =< CP, CP =< 65999 -> 'UNASSIGNED'; +lookup(CP) when 66000 =< CP, CP =< 66044 -> 'DISALLOWED'; +lookup(CP) when 66046 =< CP, CP =< 66175 -> 'UNASSIGNED'; +lookup(CP) when 66176 =< CP, CP =< 66204 -> 'PVALID'; +lookup(CP) when 66205 =< CP, CP =< 66207 -> 'UNASSIGNED'; +lookup(CP) when 66208 =< CP, CP =< 66256 -> 'PVALID'; +lookup(CP) when 66257 =< CP, CP =< 66271 -> 'UNASSIGNED'; +lookup(CP) when 66273 =< CP, CP =< 66299 -> 'DISALLOWED'; +lookup(CP) when 66300 =< CP, CP =< 66303 -> 'UNASSIGNED'; +lookup(CP) when 66304 =< CP, CP =< 66335 -> 'PVALID'; +lookup(CP) when 66336 =< CP, CP =< 66339 -> 'DISALLOWED'; +lookup(CP) when 66340 =< CP, CP =< 66348 -> 'UNASSIGNED'; +lookup(CP) when 66349 =< CP, CP =< 66368 -> 'PVALID'; +lookup(CP) when 66370 =< CP, CP =< 66377 -> 'PVALID'; +lookup(CP) when 66379 =< CP, CP =< 66383 -> 'UNASSIGNED'; +lookup(CP) when 66384 =< CP, CP =< 66426 -> 'PVALID'; +lookup(CP) when 66427 =< CP, CP =< 66431 -> 'UNASSIGNED'; +lookup(CP) when 66432 =< CP, CP =< 66461 -> 'PVALID'; +lookup(CP) when 66464 =< CP, CP =< 66499 -> 'PVALID'; +lookup(CP) when 66500 =< CP, CP =< 66503 -> 'UNASSIGNED'; +lookup(CP) when 66504 =< CP, CP =< 66511 -> 'PVALID'; +lookup(CP) when 66512 =< CP, CP =< 66517 -> 'DISALLOWED'; +lookup(CP) when 66518 =< CP, CP =< 66559 -> 'UNASSIGNED'; +lookup(CP) when 66560 =< CP, CP =< 66599 -> 'DISALLOWED'; +lookup(CP) when 66600 =< CP, CP =< 66717 -> 'PVALID'; +lookup(CP) when 66718 =< CP, CP =< 66719 -> 'UNASSIGNED'; +lookup(CP) when 66720 =< CP, CP =< 66729 -> 'PVALID'; +lookup(CP) when 66730 =< CP, CP =< 66735 -> 'UNASSIGNED'; +lookup(CP) when 66736 =< CP, CP =< 66771 -> 'DISALLOWED'; +lookup(CP) when 66772 =< CP, CP =< 66775 -> 'UNASSIGNED'; +lookup(CP) when 66776 =< CP, CP =< 66811 -> 'PVALID'; +lookup(CP) when 66812 =< CP, CP =< 66815 -> 'UNASSIGNED'; +lookup(CP) when 66816 =< CP, CP =< 66855 -> 'PVALID'; +lookup(CP) when 66856 =< CP, CP =< 66863 -> 'UNASSIGNED'; +lookup(CP) when 66864 =< CP, CP =< 66915 -> 'PVALID'; +lookup(CP) when 66916 =< CP, CP =< 66926 -> 'UNASSIGNED'; +lookup(CP) when 66928 =< CP, CP =< 67071 -> 'UNASSIGNED'; +lookup(CP) when 67072 =< CP, CP =< 67382 -> 'PVALID'; +lookup(CP) when 67383 =< CP, CP =< 67391 -> 'UNASSIGNED'; +lookup(CP) when 67392 =< CP, CP =< 67413 -> 'PVALID'; +lookup(CP) when 67414 =< CP, CP =< 67423 -> 'UNASSIGNED'; +lookup(CP) when 67424 =< CP, CP =< 67431 -> 'PVALID'; +lookup(CP) when 67432 =< CP, CP =< 67583 -> 'UNASSIGNED'; +lookup(CP) when 67584 =< CP, CP =< 67589 -> 'PVALID'; +lookup(CP) when 67590 =< CP, CP =< 67591 -> 'UNASSIGNED'; +lookup(CP) when 67594 =< CP, CP =< 67637 -> 'PVALID'; +lookup(CP) when 67639 =< CP, CP =< 67640 -> 'PVALID'; +lookup(CP) when 67641 =< CP, CP =< 67643 -> 'UNASSIGNED'; +lookup(CP) when 67645 =< CP, CP =< 67646 -> 'UNASSIGNED'; +lookup(CP) when 67647 =< CP, CP =< 67669 -> 'PVALID'; +lookup(CP) when 67671 =< CP, CP =< 67679 -> 'DISALLOWED'; +lookup(CP) when 67680 =< CP, CP =< 67702 -> 'PVALID'; +lookup(CP) when 67703 =< CP, CP =< 67711 -> 'DISALLOWED'; +lookup(CP) when 67712 =< CP, CP =< 67742 -> 'PVALID'; +lookup(CP) when 67743 =< CP, CP =< 67750 -> 'UNASSIGNED'; +lookup(CP) when 67751 =< CP, CP =< 67759 -> 'DISALLOWED'; +lookup(CP) when 67760 =< CP, CP =< 67807 -> 'UNASSIGNED'; +lookup(CP) when 67808 =< CP, CP =< 67826 -> 'PVALID'; +lookup(CP) when 67828 =< CP, CP =< 67829 -> 'PVALID'; +lookup(CP) when 67830 =< CP, CP =< 67834 -> 'UNASSIGNED'; +lookup(CP) when 67835 =< CP, CP =< 67839 -> 'DISALLOWED'; +lookup(CP) when 67840 =< CP, CP =< 67861 -> 'PVALID'; +lookup(CP) when 67862 =< CP, CP =< 67867 -> 'DISALLOWED'; +lookup(CP) when 67868 =< CP, CP =< 67870 -> 'UNASSIGNED'; +lookup(CP) when 67872 =< CP, CP =< 67897 -> 'PVALID'; +lookup(CP) when 67898 =< CP, CP =< 67902 -> 'UNASSIGNED'; +lookup(CP) when 67904 =< CP, CP =< 67967 -> 'UNASSIGNED'; +lookup(CP) when 67968 =< CP, CP =< 68023 -> 'PVALID'; +lookup(CP) when 68024 =< CP, CP =< 68027 -> 'UNASSIGNED'; +lookup(CP) when 68028 =< CP, CP =< 68029 -> 'DISALLOWED'; +lookup(CP) when 68030 =< CP, CP =< 68031 -> 'PVALID'; +lookup(CP) when 68032 =< CP, CP =< 68047 -> 'DISALLOWED'; +lookup(CP) when 68048 =< CP, CP =< 68049 -> 'UNASSIGNED'; +lookup(CP) when 68050 =< CP, CP =< 68095 -> 'DISALLOWED'; +lookup(CP) when 68096 =< CP, CP =< 68099 -> 'PVALID'; +lookup(CP) when 68101 =< CP, CP =< 68102 -> 'PVALID'; +lookup(CP) when 68103 =< CP, CP =< 68107 -> 'UNASSIGNED'; +lookup(CP) when 68108 =< CP, CP =< 68115 -> 'PVALID'; +lookup(CP) when 68117 =< CP, CP =< 68119 -> 'PVALID'; +lookup(CP) when 68121 =< CP, CP =< 68149 -> 'PVALID'; +lookup(CP) when 68150 =< CP, CP =< 68151 -> 'UNASSIGNED'; +lookup(CP) when 68152 =< CP, CP =< 68154 -> 'PVALID'; +lookup(CP) when 68155 =< CP, CP =< 68158 -> 'UNASSIGNED'; +lookup(CP) when 68160 =< CP, CP =< 68168 -> 'DISALLOWED'; +lookup(CP) when 68169 =< CP, CP =< 68175 -> 'UNASSIGNED'; +lookup(CP) when 68176 =< CP, CP =< 68184 -> 'DISALLOWED'; +lookup(CP) when 68185 =< CP, CP =< 68191 -> 'UNASSIGNED'; +lookup(CP) when 68192 =< CP, CP =< 68220 -> 'PVALID'; +lookup(CP) when 68221 =< CP, CP =< 68223 -> 'DISALLOWED'; +lookup(CP) when 68224 =< CP, CP =< 68252 -> 'PVALID'; +lookup(CP) when 68253 =< CP, CP =< 68255 -> 'DISALLOWED'; +lookup(CP) when 68256 =< CP, CP =< 68287 -> 'UNASSIGNED'; +lookup(CP) when 68288 =< CP, CP =< 68295 -> 'PVALID'; +lookup(CP) when 68297 =< CP, CP =< 68326 -> 'PVALID'; +lookup(CP) when 68327 =< CP, CP =< 68330 -> 'UNASSIGNED'; +lookup(CP) when 68331 =< CP, CP =< 68342 -> 'DISALLOWED'; +lookup(CP) when 68343 =< CP, CP =< 68351 -> 'UNASSIGNED'; +lookup(CP) when 68352 =< CP, CP =< 68405 -> 'PVALID'; +lookup(CP) when 68406 =< CP, CP =< 68408 -> 'UNASSIGNED'; +lookup(CP) when 68409 =< CP, CP =< 68415 -> 'DISALLOWED'; +lookup(CP) when 68416 =< CP, CP =< 68437 -> 'PVALID'; +lookup(CP) when 68438 =< CP, CP =< 68439 -> 'UNASSIGNED'; +lookup(CP) when 68440 =< CP, CP =< 68447 -> 'DISALLOWED'; +lookup(CP) when 68448 =< CP, CP =< 68466 -> 'PVALID'; +lookup(CP) when 68467 =< CP, CP =< 68471 -> 'UNASSIGNED'; +lookup(CP) when 68472 =< CP, CP =< 68479 -> 'DISALLOWED'; +lookup(CP) when 68480 =< CP, CP =< 68497 -> 'PVALID'; +lookup(CP) when 68498 =< CP, CP =< 68504 -> 'UNASSIGNED'; +lookup(CP) when 68505 =< CP, CP =< 68508 -> 'DISALLOWED'; +lookup(CP) when 68509 =< CP, CP =< 68520 -> 'UNASSIGNED'; +lookup(CP) when 68521 =< CP, CP =< 68527 -> 'DISALLOWED'; +lookup(CP) when 68528 =< CP, CP =< 68607 -> 'UNASSIGNED'; +lookup(CP) when 68608 =< CP, CP =< 68680 -> 'PVALID'; +lookup(CP) when 68681 =< CP, CP =< 68735 -> 'UNASSIGNED'; +lookup(CP) when 68736 =< CP, CP =< 68786 -> 'DISALLOWED'; +lookup(CP) when 68787 =< CP, CP =< 68799 -> 'UNASSIGNED'; +lookup(CP) when 68800 =< CP, CP =< 68850 -> 'PVALID'; +lookup(CP) when 68851 =< CP, CP =< 68857 -> 'UNASSIGNED'; +lookup(CP) when 68858 =< CP, CP =< 68863 -> 'DISALLOWED'; +lookup(CP) when 68864 =< CP, CP =< 68903 -> 'PVALID'; +lookup(CP) when 68904 =< CP, CP =< 68911 -> 'UNASSIGNED'; +lookup(CP) when 68912 =< CP, CP =< 68921 -> 'PVALID'; +lookup(CP) when 68922 =< CP, CP =< 69215 -> 'UNASSIGNED'; +lookup(CP) when 69216 =< CP, CP =< 69246 -> 'DISALLOWED'; +lookup(CP) when 69248 =< CP, CP =< 69289 -> 'PVALID'; +lookup(CP) when 69291 =< CP, CP =< 69292 -> 'PVALID'; +lookup(CP) when 69294 =< CP, CP =< 69295 -> 'UNASSIGNED'; +lookup(CP) when 69296 =< CP, CP =< 69297 -> 'PVALID'; +lookup(CP) when 69298 =< CP, CP =< 69375 -> 'UNASSIGNED'; +lookup(CP) when 69376 =< CP, CP =< 69404 -> 'PVALID'; +lookup(CP) when 69405 =< CP, CP =< 69414 -> 'DISALLOWED'; +lookup(CP) when 69416 =< CP, CP =< 69423 -> 'UNASSIGNED'; +lookup(CP) when 69424 =< CP, CP =< 69456 -> 'PVALID'; +lookup(CP) when 69457 =< CP, CP =< 69465 -> 'DISALLOWED'; +lookup(CP) when 69466 =< CP, CP =< 69551 -> 'UNASSIGNED'; +lookup(CP) when 69552 =< CP, CP =< 69572 -> 'PVALID'; +lookup(CP) when 69573 =< CP, CP =< 69579 -> 'DISALLOWED'; +lookup(CP) when 69580 =< CP, CP =< 69599 -> 'UNASSIGNED'; +lookup(CP) when 69600 =< CP, CP =< 69622 -> 'PVALID'; +lookup(CP) when 69623 =< CP, CP =< 69631 -> 'UNASSIGNED'; +lookup(CP) when 69632 =< CP, CP =< 69702 -> 'PVALID'; +lookup(CP) when 69703 =< CP, CP =< 69709 -> 'DISALLOWED'; +lookup(CP) when 69710 =< CP, CP =< 69713 -> 'UNASSIGNED'; +lookup(CP) when 69714 =< CP, CP =< 69733 -> 'DISALLOWED'; +lookup(CP) when 69734 =< CP, CP =< 69743 -> 'PVALID'; +lookup(CP) when 69744 =< CP, CP =< 69758 -> 'UNASSIGNED'; +lookup(CP) when 69759 =< CP, CP =< 69818 -> 'PVALID'; +lookup(CP) when 69819 =< CP, CP =< 69825 -> 'DISALLOWED'; +lookup(CP) when 69826 =< CP, CP =< 69836 -> 'UNASSIGNED'; +lookup(CP) when 69838 =< CP, CP =< 69839 -> 'UNASSIGNED'; +lookup(CP) when 69840 =< CP, CP =< 69864 -> 'PVALID'; +lookup(CP) when 69865 =< CP, CP =< 69871 -> 'UNASSIGNED'; +lookup(CP) when 69872 =< CP, CP =< 69881 -> 'PVALID'; +lookup(CP) when 69882 =< CP, CP =< 69887 -> 'UNASSIGNED'; +lookup(CP) when 69888 =< CP, CP =< 69940 -> 'PVALID'; +lookup(CP) when 69942 =< CP, CP =< 69951 -> 'PVALID'; +lookup(CP) when 69952 =< CP, CP =< 69955 -> 'DISALLOWED'; +lookup(CP) when 69956 =< CP, CP =< 69959 -> 'PVALID'; +lookup(CP) when 69960 =< CP, CP =< 69967 -> 'UNASSIGNED'; +lookup(CP) when 69968 =< CP, CP =< 70003 -> 'PVALID'; +lookup(CP) when 70004 =< CP, CP =< 70005 -> 'DISALLOWED'; +lookup(CP) when 70007 =< CP, CP =< 70015 -> 'UNASSIGNED'; +lookup(CP) when 70016 =< CP, CP =< 70084 -> 'PVALID'; +lookup(CP) when 70085 =< CP, CP =< 70088 -> 'DISALLOWED'; +lookup(CP) when 70089 =< CP, CP =< 70092 -> 'PVALID'; +lookup(CP) when 70094 =< CP, CP =< 70106 -> 'PVALID'; +lookup(CP) when 70109 =< CP, CP =< 70111 -> 'DISALLOWED'; +lookup(CP) when 70113 =< CP, CP =< 70132 -> 'DISALLOWED'; +lookup(CP) when 70133 =< CP, CP =< 70143 -> 'UNASSIGNED'; +lookup(CP) when 70144 =< CP, CP =< 70161 -> 'PVALID'; +lookup(CP) when 70163 =< CP, CP =< 70199 -> 'PVALID'; +lookup(CP) when 70200 =< CP, CP =< 70205 -> 'DISALLOWED'; +lookup(CP) when 70207 =< CP, CP =< 70271 -> 'UNASSIGNED'; +lookup(CP) when 70272 =< CP, CP =< 70278 -> 'PVALID'; +lookup(CP) when 70282 =< CP, CP =< 70285 -> 'PVALID'; +lookup(CP) when 70287 =< CP, CP =< 70301 -> 'PVALID'; +lookup(CP) when 70303 =< CP, CP =< 70312 -> 'PVALID'; +lookup(CP) when 70314 =< CP, CP =< 70319 -> 'UNASSIGNED'; +lookup(CP) when 70320 =< CP, CP =< 70378 -> 'PVALID'; +lookup(CP) when 70379 =< CP, CP =< 70383 -> 'UNASSIGNED'; +lookup(CP) when 70384 =< CP, CP =< 70393 -> 'PVALID'; +lookup(CP) when 70394 =< CP, CP =< 70399 -> 'UNASSIGNED'; +lookup(CP) when 70400 =< CP, CP =< 70403 -> 'PVALID'; +lookup(CP) when 70405 =< CP, CP =< 70412 -> 'PVALID'; +lookup(CP) when 70413 =< CP, CP =< 70414 -> 'UNASSIGNED'; +lookup(CP) when 70415 =< CP, CP =< 70416 -> 'PVALID'; +lookup(CP) when 70417 =< CP, CP =< 70418 -> 'UNASSIGNED'; +lookup(CP) when 70419 =< CP, CP =< 70440 -> 'PVALID'; +lookup(CP) when 70442 =< CP, CP =< 70448 -> 'PVALID'; +lookup(CP) when 70450 =< CP, CP =< 70451 -> 'PVALID'; +lookup(CP) when 70453 =< CP, CP =< 70457 -> 'PVALID'; +lookup(CP) when 70459 =< CP, CP =< 70468 -> 'PVALID'; +lookup(CP) when 70469 =< CP, CP =< 70470 -> 'UNASSIGNED'; +lookup(CP) when 70471 =< CP, CP =< 70472 -> 'PVALID'; +lookup(CP) when 70473 =< CP, CP =< 70474 -> 'UNASSIGNED'; +lookup(CP) when 70475 =< CP, CP =< 70477 -> 'PVALID'; +lookup(CP) when 70478 =< CP, CP =< 70479 -> 'UNASSIGNED'; +lookup(CP) when 70481 =< CP, CP =< 70486 -> 'UNASSIGNED'; +lookup(CP) when 70488 =< CP, CP =< 70492 -> 'UNASSIGNED'; +lookup(CP) when 70493 =< CP, CP =< 70499 -> 'PVALID'; +lookup(CP) when 70500 =< CP, CP =< 70501 -> 'UNASSIGNED'; +lookup(CP) when 70502 =< CP, CP =< 70508 -> 'PVALID'; +lookup(CP) when 70509 =< CP, CP =< 70511 -> 'UNASSIGNED'; +lookup(CP) when 70512 =< CP, CP =< 70516 -> 'PVALID'; +lookup(CP) when 70517 =< CP, CP =< 70655 -> 'UNASSIGNED'; +lookup(CP) when 70656 =< CP, CP =< 70730 -> 'PVALID'; +lookup(CP) when 70731 =< CP, CP =< 70735 -> 'DISALLOWED'; +lookup(CP) when 70736 =< CP, CP =< 70745 -> 'PVALID'; +lookup(CP) when 70746 =< CP, CP =< 70747 -> 'DISALLOWED'; +lookup(CP) when 70750 =< CP, CP =< 70753 -> 'PVALID'; +lookup(CP) when 70754 =< CP, CP =< 70783 -> 'UNASSIGNED'; +lookup(CP) when 70784 =< CP, CP =< 70853 -> 'PVALID'; +lookup(CP) when 70856 =< CP, CP =< 70863 -> 'UNASSIGNED'; +lookup(CP) when 70864 =< CP, CP =< 70873 -> 'PVALID'; +lookup(CP) when 70874 =< CP, CP =< 71039 -> 'UNASSIGNED'; +lookup(CP) when 71040 =< CP, CP =< 71093 -> 'PVALID'; +lookup(CP) when 71094 =< CP, CP =< 71095 -> 'UNASSIGNED'; +lookup(CP) when 71096 =< CP, CP =< 71104 -> 'PVALID'; +lookup(CP) when 71105 =< CP, CP =< 71127 -> 'DISALLOWED'; +lookup(CP) when 71128 =< CP, CP =< 71133 -> 'PVALID'; +lookup(CP) when 71134 =< CP, CP =< 71167 -> 'UNASSIGNED'; +lookup(CP) when 71168 =< CP, CP =< 71232 -> 'PVALID'; +lookup(CP) when 71233 =< CP, CP =< 71235 -> 'DISALLOWED'; +lookup(CP) when 71237 =< CP, CP =< 71247 -> 'UNASSIGNED'; +lookup(CP) when 71248 =< CP, CP =< 71257 -> 'PVALID'; +lookup(CP) when 71258 =< CP, CP =< 71263 -> 'UNASSIGNED'; +lookup(CP) when 71264 =< CP, CP =< 71276 -> 'DISALLOWED'; +lookup(CP) when 71277 =< CP, CP =< 71295 -> 'UNASSIGNED'; +lookup(CP) when 71296 =< CP, CP =< 71352 -> 'PVALID'; +lookup(CP) when 71353 =< CP, CP =< 71359 -> 'UNASSIGNED'; +lookup(CP) when 71360 =< CP, CP =< 71369 -> 'PVALID'; +lookup(CP) when 71370 =< CP, CP =< 71423 -> 'UNASSIGNED'; +lookup(CP) when 71424 =< CP, CP =< 71450 -> 'PVALID'; +lookup(CP) when 71451 =< CP, CP =< 71452 -> 'UNASSIGNED'; +lookup(CP) when 71453 =< CP, CP =< 71467 -> 'PVALID'; +lookup(CP) when 71468 =< CP, CP =< 71471 -> 'UNASSIGNED'; +lookup(CP) when 71472 =< CP, CP =< 71481 -> 'PVALID'; +lookup(CP) when 71482 =< CP, CP =< 71487 -> 'DISALLOWED'; +lookup(CP) when 71488 =< CP, CP =< 71679 -> 'UNASSIGNED'; +lookup(CP) when 71680 =< CP, CP =< 71738 -> 'PVALID'; +lookup(CP) when 71740 =< CP, CP =< 71839 -> 'UNASSIGNED'; +lookup(CP) when 71840 =< CP, CP =< 71871 -> 'DISALLOWED'; +lookup(CP) when 71872 =< CP, CP =< 71913 -> 'PVALID'; +lookup(CP) when 71914 =< CP, CP =< 71922 -> 'DISALLOWED'; +lookup(CP) when 71923 =< CP, CP =< 71934 -> 'UNASSIGNED'; +lookup(CP) when 71935 =< CP, CP =< 71942 -> 'PVALID'; +lookup(CP) when 71943 =< CP, CP =< 71944 -> 'UNASSIGNED'; +lookup(CP) when 71946 =< CP, CP =< 71947 -> 'UNASSIGNED'; +lookup(CP) when 71948 =< CP, CP =< 71955 -> 'PVALID'; +lookup(CP) when 71957 =< CP, CP =< 71958 -> 'PVALID'; +lookup(CP) when 71960 =< CP, CP =< 71989 -> 'PVALID'; +lookup(CP) when 71991 =< CP, CP =< 71992 -> 'PVALID'; +lookup(CP) when 71993 =< CP, CP =< 71994 -> 'UNASSIGNED'; +lookup(CP) when 71995 =< CP, CP =< 72003 -> 'PVALID'; +lookup(CP) when 72004 =< CP, CP =< 72006 -> 'DISALLOWED'; +lookup(CP) when 72007 =< CP, CP =< 72015 -> 'UNASSIGNED'; +lookup(CP) when 72016 =< CP, CP =< 72025 -> 'PVALID'; +lookup(CP) when 72026 =< CP, CP =< 72095 -> 'UNASSIGNED'; +lookup(CP) when 72096 =< CP, CP =< 72103 -> 'PVALID'; +lookup(CP) when 72104 =< CP, CP =< 72105 -> 'UNASSIGNED'; +lookup(CP) when 72106 =< CP, CP =< 72151 -> 'PVALID'; +lookup(CP) when 72152 =< CP, CP =< 72153 -> 'UNASSIGNED'; +lookup(CP) when 72154 =< CP, CP =< 72161 -> 'PVALID'; +lookup(CP) when 72163 =< CP, CP =< 72164 -> 'PVALID'; +lookup(CP) when 72165 =< CP, CP =< 72191 -> 'UNASSIGNED'; +lookup(CP) when 72192 =< CP, CP =< 72254 -> 'PVALID'; +lookup(CP) when 72255 =< CP, CP =< 72262 -> 'DISALLOWED'; +lookup(CP) when 72264 =< CP, CP =< 72271 -> 'UNASSIGNED'; +lookup(CP) when 72272 =< CP, CP =< 72345 -> 'PVALID'; +lookup(CP) when 72346 =< CP, CP =< 72348 -> 'DISALLOWED'; +lookup(CP) when 72350 =< CP, CP =< 72354 -> 'DISALLOWED'; +lookup(CP) when 72355 =< CP, CP =< 72383 -> 'UNASSIGNED'; +lookup(CP) when 72384 =< CP, CP =< 72440 -> 'PVALID'; +lookup(CP) when 72441 =< CP, CP =< 72703 -> 'UNASSIGNED'; +lookup(CP) when 72704 =< CP, CP =< 72712 -> 'PVALID'; +lookup(CP) when 72714 =< CP, CP =< 72758 -> 'PVALID'; +lookup(CP) when 72760 =< CP, CP =< 72768 -> 'PVALID'; +lookup(CP) when 72769 =< CP, CP =< 72773 -> 'DISALLOWED'; +lookup(CP) when 72774 =< CP, CP =< 72783 -> 'UNASSIGNED'; +lookup(CP) when 72784 =< CP, CP =< 72793 -> 'PVALID'; +lookup(CP) when 72794 =< CP, CP =< 72812 -> 'DISALLOWED'; +lookup(CP) when 72813 =< CP, CP =< 72815 -> 'UNASSIGNED'; +lookup(CP) when 72816 =< CP, CP =< 72817 -> 'DISALLOWED'; +lookup(CP) when 72818 =< CP, CP =< 72847 -> 'PVALID'; +lookup(CP) when 72848 =< CP, CP =< 72849 -> 'UNASSIGNED'; +lookup(CP) when 72850 =< CP, CP =< 72871 -> 'PVALID'; +lookup(CP) when 72873 =< CP, CP =< 72886 -> 'PVALID'; +lookup(CP) when 72887 =< CP, CP =< 72959 -> 'UNASSIGNED'; +lookup(CP) when 72960 =< CP, CP =< 72966 -> 'PVALID'; +lookup(CP) when 72968 =< CP, CP =< 72969 -> 'PVALID'; +lookup(CP) when 72971 =< CP, CP =< 73014 -> 'PVALID'; +lookup(CP) when 73015 =< CP, CP =< 73017 -> 'UNASSIGNED'; +lookup(CP) when 73020 =< CP, CP =< 73021 -> 'PVALID'; +lookup(CP) when 73023 =< CP, CP =< 73031 -> 'PVALID'; +lookup(CP) when 73032 =< CP, CP =< 73039 -> 'UNASSIGNED'; +lookup(CP) when 73040 =< CP, CP =< 73049 -> 'PVALID'; +lookup(CP) when 73050 =< CP, CP =< 73055 -> 'UNASSIGNED'; +lookup(CP) when 73056 =< CP, CP =< 73061 -> 'PVALID'; +lookup(CP) when 73063 =< CP, CP =< 73064 -> 'PVALID'; +lookup(CP) when 73066 =< CP, CP =< 73102 -> 'PVALID'; +lookup(CP) when 73104 =< CP, CP =< 73105 -> 'PVALID'; +lookup(CP) when 73107 =< CP, CP =< 73112 -> 'PVALID'; +lookup(CP) when 73113 =< CP, CP =< 73119 -> 'UNASSIGNED'; +lookup(CP) when 73120 =< CP, CP =< 73129 -> 'PVALID'; +lookup(CP) when 73130 =< CP, CP =< 73439 -> 'UNASSIGNED'; +lookup(CP) when 73440 =< CP, CP =< 73462 -> 'PVALID'; +lookup(CP) when 73463 =< CP, CP =< 73464 -> 'DISALLOWED'; +lookup(CP) when 73465 =< CP, CP =< 73647 -> 'UNASSIGNED'; +lookup(CP) when 73649 =< CP, CP =< 73663 -> 'UNASSIGNED'; +lookup(CP) when 73664 =< CP, CP =< 73713 -> 'DISALLOWED'; +lookup(CP) when 73714 =< CP, CP =< 73726 -> 'UNASSIGNED'; +lookup(CP) when 73728 =< CP, CP =< 74649 -> 'PVALID'; +lookup(CP) when 74650 =< CP, CP =< 74751 -> 'UNASSIGNED'; +lookup(CP) when 74752 =< CP, CP =< 74862 -> 'DISALLOWED'; +lookup(CP) when 74864 =< CP, CP =< 74868 -> 'DISALLOWED'; +lookup(CP) when 74869 =< CP, CP =< 74879 -> 'UNASSIGNED'; +lookup(CP) when 74880 =< CP, CP =< 75075 -> 'PVALID'; +lookup(CP) when 75076 =< CP, CP =< 77823 -> 'UNASSIGNED'; +lookup(CP) when 77824 =< CP, CP =< 78894 -> 'PVALID'; +lookup(CP) when 78896 =< CP, CP =< 78904 -> 'DISALLOWED'; +lookup(CP) when 78905 =< CP, CP =< 82943 -> 'UNASSIGNED'; +lookup(CP) when 82944 =< CP, CP =< 83526 -> 'PVALID'; +lookup(CP) when 83527 =< CP, CP =< 92159 -> 'UNASSIGNED'; +lookup(CP) when 92160 =< CP, CP =< 92728 -> 'PVALID'; +lookup(CP) when 92729 =< CP, CP =< 92735 -> 'UNASSIGNED'; +lookup(CP) when 92736 =< CP, CP =< 92766 -> 'PVALID'; +lookup(CP) when 92768 =< CP, CP =< 92777 -> 'PVALID'; +lookup(CP) when 92778 =< CP, CP =< 92781 -> 'UNASSIGNED'; +lookup(CP) when 92782 =< CP, CP =< 92783 -> 'DISALLOWED'; +lookup(CP) when 92784 =< CP, CP =< 92879 -> 'UNASSIGNED'; +lookup(CP) when 92880 =< CP, CP =< 92909 -> 'PVALID'; +lookup(CP) when 92910 =< CP, CP =< 92911 -> 'UNASSIGNED'; +lookup(CP) when 92912 =< CP, CP =< 92916 -> 'PVALID'; +lookup(CP) when 92918 =< CP, CP =< 92927 -> 'UNASSIGNED'; +lookup(CP) when 92928 =< CP, CP =< 92982 -> 'PVALID'; +lookup(CP) when 92983 =< CP, CP =< 92991 -> 'DISALLOWED'; +lookup(CP) when 92992 =< CP, CP =< 92995 -> 'PVALID'; +lookup(CP) when 92996 =< CP, CP =< 92997 -> 'DISALLOWED'; +lookup(CP) when 92998 =< CP, CP =< 93007 -> 'UNASSIGNED'; +lookup(CP) when 93008 =< CP, CP =< 93017 -> 'PVALID'; +lookup(CP) when 93019 =< CP, CP =< 93025 -> 'DISALLOWED'; +lookup(CP) when 93027 =< CP, CP =< 93047 -> 'PVALID'; +lookup(CP) when 93048 =< CP, CP =< 93052 -> 'UNASSIGNED'; +lookup(CP) when 93053 =< CP, CP =< 93071 -> 'PVALID'; +lookup(CP) when 93072 =< CP, CP =< 93759 -> 'UNASSIGNED'; +lookup(CP) when 93760 =< CP, CP =< 93791 -> 'DISALLOWED'; +lookup(CP) when 93792 =< CP, CP =< 93823 -> 'PVALID'; +lookup(CP) when 93824 =< CP, CP =< 93850 -> 'DISALLOWED'; +lookup(CP) when 93851 =< CP, CP =< 93951 -> 'UNASSIGNED'; +lookup(CP) when 93952 =< CP, CP =< 94026 -> 'PVALID'; +lookup(CP) when 94027 =< CP, CP =< 94030 -> 'UNASSIGNED'; +lookup(CP) when 94031 =< CP, CP =< 94087 -> 'PVALID'; +lookup(CP) when 94088 =< CP, CP =< 94094 -> 'UNASSIGNED'; +lookup(CP) when 94095 =< CP, CP =< 94111 -> 'PVALID'; +lookup(CP) when 94112 =< CP, CP =< 94175 -> 'UNASSIGNED'; +lookup(CP) when 94176 =< CP, CP =< 94177 -> 'PVALID'; +lookup(CP) when 94179 =< CP, CP =< 94180 -> 'PVALID'; +lookup(CP) when 94181 =< CP, CP =< 94191 -> 'UNASSIGNED'; +lookup(CP) when 94192 =< CP, CP =< 94193 -> 'PVALID'; +lookup(CP) when 94194 =< CP, CP =< 94207 -> 'UNASSIGNED'; +lookup(CP) when 94208 =< CP, CP =< 100343 -> 'PVALID'; +lookup(CP) when 100344 =< CP, CP =< 100351 -> 'UNASSIGNED'; +lookup(CP) when 100352 =< CP, CP =< 101589 -> 'PVALID'; +lookup(CP) when 101590 =< CP, CP =< 101631 -> 'UNASSIGNED'; +lookup(CP) when 101632 =< CP, CP =< 101640 -> 'PVALID'; +lookup(CP) when 101641 =< CP, CP =< 110591 -> 'UNASSIGNED'; +lookup(CP) when 110592 =< CP, CP =< 110878 -> 'PVALID'; +lookup(CP) when 110879 =< CP, CP =< 110927 -> 'UNASSIGNED'; +lookup(CP) when 110928 =< CP, CP =< 110930 -> 'PVALID'; +lookup(CP) when 110931 =< CP, CP =< 110947 -> 'UNASSIGNED'; +lookup(CP) when 110948 =< CP, CP =< 110951 -> 'PVALID'; +lookup(CP) when 110952 =< CP, CP =< 110959 -> 'UNASSIGNED'; +lookup(CP) when 110960 =< CP, CP =< 111355 -> 'PVALID'; +lookup(CP) when 111356 =< CP, CP =< 113663 -> 'UNASSIGNED'; +lookup(CP) when 113664 =< CP, CP =< 113770 -> 'PVALID'; +lookup(CP) when 113771 =< CP, CP =< 113775 -> 'UNASSIGNED'; +lookup(CP) when 113776 =< CP, CP =< 113788 -> 'PVALID'; +lookup(CP) when 113789 =< CP, CP =< 113791 -> 'UNASSIGNED'; +lookup(CP) when 113792 =< CP, CP =< 113800 -> 'PVALID'; +lookup(CP) when 113801 =< CP, CP =< 113807 -> 'UNASSIGNED'; +lookup(CP) when 113808 =< CP, CP =< 113817 -> 'PVALID'; +lookup(CP) when 113818 =< CP, CP =< 113819 -> 'UNASSIGNED'; +lookup(CP) when 113821 =< CP, CP =< 113822 -> 'PVALID'; +lookup(CP) when 113823 =< CP, CP =< 113827 -> 'DISALLOWED'; +lookup(CP) when 113828 =< CP, CP =< 118783 -> 'UNASSIGNED'; +lookup(CP) when 118784 =< CP, CP =< 119029 -> 'DISALLOWED'; +lookup(CP) when 119030 =< CP, CP =< 119039 -> 'UNASSIGNED'; +lookup(CP) when 119040 =< CP, CP =< 119078 -> 'DISALLOWED'; +lookup(CP) when 119079 =< CP, CP =< 119080 -> 'UNASSIGNED'; +lookup(CP) when 119081 =< CP, CP =< 119272 -> 'DISALLOWED'; +lookup(CP) when 119273 =< CP, CP =< 119295 -> 'UNASSIGNED'; +lookup(CP) when 119296 =< CP, CP =< 119365 -> 'DISALLOWED'; +lookup(CP) when 119366 =< CP, CP =< 119519 -> 'UNASSIGNED'; +lookup(CP) when 119520 =< CP, CP =< 119539 -> 'DISALLOWED'; +lookup(CP) when 119540 =< CP, CP =< 119551 -> 'UNASSIGNED'; +lookup(CP) when 119552 =< CP, CP =< 119638 -> 'DISALLOWED'; +lookup(CP) when 119639 =< CP, CP =< 119647 -> 'UNASSIGNED'; +lookup(CP) when 119648 =< CP, CP =< 119672 -> 'DISALLOWED'; +lookup(CP) when 119673 =< CP, CP =< 119807 -> 'UNASSIGNED'; +lookup(CP) when 119808 =< CP, CP =< 119892 -> 'DISALLOWED'; +lookup(CP) when 119894 =< CP, CP =< 119964 -> 'DISALLOWED'; +lookup(CP) when 119966 =< CP, CP =< 119967 -> 'DISALLOWED'; +lookup(CP) when 119968 =< CP, CP =< 119969 -> 'UNASSIGNED'; +lookup(CP) when 119971 =< CP, CP =< 119972 -> 'UNASSIGNED'; +lookup(CP) when 119973 =< CP, CP =< 119974 -> 'DISALLOWED'; +lookup(CP) when 119975 =< CP, CP =< 119976 -> 'UNASSIGNED'; +lookup(CP) when 119977 =< CP, CP =< 119980 -> 'DISALLOWED'; +lookup(CP) when 119982 =< CP, CP =< 119993 -> 'DISALLOWED'; +lookup(CP) when 119997 =< CP, CP =< 120003 -> 'DISALLOWED'; +lookup(CP) when 120005 =< CP, CP =< 120069 -> 'DISALLOWED'; +lookup(CP) when 120071 =< CP, CP =< 120074 -> 'DISALLOWED'; +lookup(CP) when 120075 =< CP, CP =< 120076 -> 'UNASSIGNED'; +lookup(CP) when 120077 =< CP, CP =< 120084 -> 'DISALLOWED'; +lookup(CP) when 120086 =< CP, CP =< 120092 -> 'DISALLOWED'; +lookup(CP) when 120094 =< CP, CP =< 120121 -> 'DISALLOWED'; +lookup(CP) when 120123 =< CP, CP =< 120126 -> 'DISALLOWED'; +lookup(CP) when 120128 =< CP, CP =< 120132 -> 'DISALLOWED'; +lookup(CP) when 120135 =< CP, CP =< 120137 -> 'UNASSIGNED'; +lookup(CP) when 120138 =< CP, CP =< 120144 -> 'DISALLOWED'; +lookup(CP) when 120146 =< CP, CP =< 120485 -> 'DISALLOWED'; +lookup(CP) when 120486 =< CP, CP =< 120487 -> 'UNASSIGNED'; +lookup(CP) when 120488 =< CP, CP =< 120779 -> 'DISALLOWED'; +lookup(CP) when 120780 =< CP, CP =< 120781 -> 'UNASSIGNED'; +lookup(CP) when 120782 =< CP, CP =< 121343 -> 'DISALLOWED'; +lookup(CP) when 121344 =< CP, CP =< 121398 -> 'PVALID'; +lookup(CP) when 121399 =< CP, CP =< 121402 -> 'DISALLOWED'; +lookup(CP) when 121403 =< CP, CP =< 121452 -> 'PVALID'; +lookup(CP) when 121453 =< CP, CP =< 121460 -> 'DISALLOWED'; +lookup(CP) when 121462 =< CP, CP =< 121475 -> 'DISALLOWED'; +lookup(CP) when 121477 =< CP, CP =< 121483 -> 'DISALLOWED'; +lookup(CP) when 121484 =< CP, CP =< 121498 -> 'UNASSIGNED'; +lookup(CP) when 121499 =< CP, CP =< 121503 -> 'PVALID'; +lookup(CP) when 121505 =< CP, CP =< 121519 -> 'PVALID'; +lookup(CP) when 121520 =< CP, CP =< 122879 -> 'UNASSIGNED'; +lookup(CP) when 122880 =< CP, CP =< 122886 -> 'PVALID'; +lookup(CP) when 122888 =< CP, CP =< 122904 -> 'PVALID'; +lookup(CP) when 122905 =< CP, CP =< 122906 -> 'UNASSIGNED'; +lookup(CP) when 122907 =< CP, CP =< 122913 -> 'PVALID'; +lookup(CP) when 122915 =< CP, CP =< 122916 -> 'PVALID'; +lookup(CP) when 122918 =< CP, CP =< 122922 -> 'PVALID'; +lookup(CP) when 122923 =< CP, CP =< 123135 -> 'UNASSIGNED'; +lookup(CP) when 123136 =< CP, CP =< 123180 -> 'PVALID'; +lookup(CP) when 123181 =< CP, CP =< 123183 -> 'UNASSIGNED'; +lookup(CP) when 123184 =< CP, CP =< 123197 -> 'PVALID'; +lookup(CP) when 123198 =< CP, CP =< 123199 -> 'UNASSIGNED'; +lookup(CP) when 123200 =< CP, CP =< 123209 -> 'PVALID'; +lookup(CP) when 123210 =< CP, CP =< 123213 -> 'UNASSIGNED'; +lookup(CP) when 123216 =< CP, CP =< 123583 -> 'UNASSIGNED'; +lookup(CP) when 123584 =< CP, CP =< 123641 -> 'PVALID'; +lookup(CP) when 123642 =< CP, CP =< 123646 -> 'UNASSIGNED'; +lookup(CP) when 123648 =< CP, CP =< 124927 -> 'UNASSIGNED'; +lookup(CP) when 124928 =< CP, CP =< 125124 -> 'PVALID'; +lookup(CP) when 125125 =< CP, CP =< 125126 -> 'UNASSIGNED'; +lookup(CP) when 125127 =< CP, CP =< 125135 -> 'DISALLOWED'; +lookup(CP) when 125136 =< CP, CP =< 125142 -> 'PVALID'; +lookup(CP) when 125143 =< CP, CP =< 125183 -> 'UNASSIGNED'; +lookup(CP) when 125184 =< CP, CP =< 125217 -> 'DISALLOWED'; +lookup(CP) when 125218 =< CP, CP =< 125259 -> 'PVALID'; +lookup(CP) when 125260 =< CP, CP =< 125263 -> 'UNASSIGNED'; +lookup(CP) when 125264 =< CP, CP =< 125273 -> 'PVALID'; +lookup(CP) when 125274 =< CP, CP =< 125277 -> 'UNASSIGNED'; +lookup(CP) when 125278 =< CP, CP =< 125279 -> 'DISALLOWED'; +lookup(CP) when 125280 =< CP, CP =< 126064 -> 'UNASSIGNED'; +lookup(CP) when 126065 =< CP, CP =< 126132 -> 'DISALLOWED'; +lookup(CP) when 126133 =< CP, CP =< 126208 -> 'UNASSIGNED'; +lookup(CP) when 126209 =< CP, CP =< 126269 -> 'DISALLOWED'; +lookup(CP) when 126270 =< CP, CP =< 126463 -> 'UNASSIGNED'; +lookup(CP) when 126464 =< CP, CP =< 126467 -> 'DISALLOWED'; +lookup(CP) when 126469 =< CP, CP =< 126495 -> 'DISALLOWED'; +lookup(CP) when 126497 =< CP, CP =< 126498 -> 'DISALLOWED'; +lookup(CP) when 126501 =< CP, CP =< 126502 -> 'UNASSIGNED'; +lookup(CP) when 126505 =< CP, CP =< 126514 -> 'DISALLOWED'; +lookup(CP) when 126516 =< CP, CP =< 126519 -> 'DISALLOWED'; +lookup(CP) when 126524 =< CP, CP =< 126529 -> 'UNASSIGNED'; +lookup(CP) when 126531 =< CP, CP =< 126534 -> 'UNASSIGNED'; +lookup(CP) when 126541 =< CP, CP =< 126543 -> 'DISALLOWED'; +lookup(CP) when 126545 =< CP, CP =< 126546 -> 'DISALLOWED'; +lookup(CP) when 126549 =< CP, CP =< 126550 -> 'UNASSIGNED'; +lookup(CP) when 126561 =< CP, CP =< 126562 -> 'DISALLOWED'; +lookup(CP) when 126565 =< CP, CP =< 126566 -> 'UNASSIGNED'; +lookup(CP) when 126567 =< CP, CP =< 126570 -> 'DISALLOWED'; +lookup(CP) when 126572 =< CP, CP =< 126578 -> 'DISALLOWED'; +lookup(CP) when 126580 =< CP, CP =< 126583 -> 'DISALLOWED'; +lookup(CP) when 126585 =< CP, CP =< 126588 -> 'DISALLOWED'; +lookup(CP) when 126592 =< CP, CP =< 126601 -> 'DISALLOWED'; +lookup(CP) when 126603 =< CP, CP =< 126619 -> 'DISALLOWED'; +lookup(CP) when 126620 =< CP, CP =< 126624 -> 'UNASSIGNED'; +lookup(CP) when 126625 =< CP, CP =< 126627 -> 'DISALLOWED'; +lookup(CP) when 126629 =< CP, CP =< 126633 -> 'DISALLOWED'; +lookup(CP) when 126635 =< CP, CP =< 126651 -> 'DISALLOWED'; +lookup(CP) when 126652 =< CP, CP =< 126703 -> 'UNASSIGNED'; +lookup(CP) when 126704 =< CP, CP =< 126705 -> 'DISALLOWED'; +lookup(CP) when 126706 =< CP, CP =< 126975 -> 'UNASSIGNED'; +lookup(CP) when 126976 =< CP, CP =< 127019 -> 'DISALLOWED'; +lookup(CP) when 127020 =< CP, CP =< 127023 -> 'UNASSIGNED'; +lookup(CP) when 127024 =< CP, CP =< 127123 -> 'DISALLOWED'; +lookup(CP) when 127124 =< CP, CP =< 127135 -> 'UNASSIGNED'; +lookup(CP) when 127136 =< CP, CP =< 127150 -> 'DISALLOWED'; +lookup(CP) when 127151 =< CP, CP =< 127152 -> 'UNASSIGNED'; +lookup(CP) when 127153 =< CP, CP =< 127167 -> 'DISALLOWED'; +lookup(CP) when 127169 =< CP, CP =< 127183 -> 'DISALLOWED'; +lookup(CP) when 127185 =< CP, CP =< 127221 -> 'DISALLOWED'; +lookup(CP) when 127222 =< CP, CP =< 127231 -> 'UNASSIGNED'; +lookup(CP) when 127232 =< CP, CP =< 127405 -> 'DISALLOWED'; +lookup(CP) when 127406 =< CP, CP =< 127461 -> 'UNASSIGNED'; +lookup(CP) when 127462 =< CP, CP =< 127490 -> 'DISALLOWED'; +lookup(CP) when 127491 =< CP, CP =< 127503 -> 'UNASSIGNED'; +lookup(CP) when 127504 =< CP, CP =< 127547 -> 'DISALLOWED'; +lookup(CP) when 127548 =< CP, CP =< 127551 -> 'UNASSIGNED'; +lookup(CP) when 127552 =< CP, CP =< 127560 -> 'DISALLOWED'; +lookup(CP) when 127561 =< CP, CP =< 127567 -> 'UNASSIGNED'; +lookup(CP) when 127568 =< CP, CP =< 127569 -> 'DISALLOWED'; +lookup(CP) when 127570 =< CP, CP =< 127583 -> 'UNASSIGNED'; +lookup(CP) when 127584 =< CP, CP =< 127589 -> 'DISALLOWED'; +lookup(CP) when 127590 =< CP, CP =< 127743 -> 'UNASSIGNED'; +lookup(CP) when 127744 =< CP, CP =< 128727 -> 'DISALLOWED'; +lookup(CP) when 128728 =< CP, CP =< 128735 -> 'UNASSIGNED'; +lookup(CP) when 128736 =< CP, CP =< 128748 -> 'DISALLOWED'; +lookup(CP) when 128749 =< CP, CP =< 128751 -> 'UNASSIGNED'; +lookup(CP) when 128752 =< CP, CP =< 128764 -> 'DISALLOWED'; +lookup(CP) when 128765 =< CP, CP =< 128767 -> 'UNASSIGNED'; +lookup(CP) when 128768 =< CP, CP =< 128883 -> 'DISALLOWED'; +lookup(CP) when 128884 =< CP, CP =< 128895 -> 'UNASSIGNED'; +lookup(CP) when 128896 =< CP, CP =< 128984 -> 'DISALLOWED'; +lookup(CP) when 128985 =< CP, CP =< 128991 -> 'UNASSIGNED'; +lookup(CP) when 128992 =< CP, CP =< 129003 -> 'DISALLOWED'; +lookup(CP) when 129004 =< CP, CP =< 129023 -> 'UNASSIGNED'; +lookup(CP) when 129024 =< CP, CP =< 129035 -> 'DISALLOWED'; +lookup(CP) when 129036 =< CP, CP =< 129039 -> 'UNASSIGNED'; +lookup(CP) when 129040 =< CP, CP =< 129095 -> 'DISALLOWED'; +lookup(CP) when 129096 =< CP, CP =< 129103 -> 'UNASSIGNED'; +lookup(CP) when 129104 =< CP, CP =< 129113 -> 'DISALLOWED'; +lookup(CP) when 129114 =< CP, CP =< 129119 -> 'UNASSIGNED'; +lookup(CP) when 129120 =< CP, CP =< 129159 -> 'DISALLOWED'; +lookup(CP) when 129160 =< CP, CP =< 129167 -> 'UNASSIGNED'; +lookup(CP) when 129168 =< CP, CP =< 129197 -> 'DISALLOWED'; +lookup(CP) when 129198 =< CP, CP =< 129199 -> 'UNASSIGNED'; +lookup(CP) when 129200 =< CP, CP =< 129201 -> 'DISALLOWED'; +lookup(CP) when 129202 =< CP, CP =< 129279 -> 'UNASSIGNED'; +lookup(CP) when 129280 =< CP, CP =< 129400 -> 'DISALLOWED'; +lookup(CP) when 129402 =< CP, CP =< 129483 -> 'DISALLOWED'; +lookup(CP) when 129485 =< CP, CP =< 129619 -> 'DISALLOWED'; +lookup(CP) when 129620 =< CP, CP =< 129631 -> 'UNASSIGNED'; +lookup(CP) when 129632 =< CP, CP =< 129645 -> 'DISALLOWED'; +lookup(CP) when 129646 =< CP, CP =< 129647 -> 'UNASSIGNED'; +lookup(CP) when 129648 =< CP, CP =< 129652 -> 'DISALLOWED'; +lookup(CP) when 129653 =< CP, CP =< 129655 -> 'UNASSIGNED'; +lookup(CP) when 129656 =< CP, CP =< 129658 -> 'DISALLOWED'; +lookup(CP) when 129659 =< CP, CP =< 129663 -> 'UNASSIGNED'; +lookup(CP) when 129664 =< CP, CP =< 129670 -> 'DISALLOWED'; +lookup(CP) when 129671 =< CP, CP =< 129679 -> 'UNASSIGNED'; +lookup(CP) when 129680 =< CP, CP =< 129704 -> 'DISALLOWED'; +lookup(CP) when 129705 =< CP, CP =< 129711 -> 'UNASSIGNED'; +lookup(CP) when 129712 =< CP, CP =< 129718 -> 'DISALLOWED'; +lookup(CP) when 129719 =< CP, CP =< 129727 -> 'UNASSIGNED'; +lookup(CP) when 129728 =< CP, CP =< 129730 -> 'DISALLOWED'; +lookup(CP) when 129731 =< CP, CP =< 129743 -> 'UNASSIGNED'; +lookup(CP) when 129744 =< CP, CP =< 129750 -> 'DISALLOWED'; +lookup(CP) when 129751 =< CP, CP =< 129791 -> 'UNASSIGNED'; +lookup(CP) when 129792 =< CP, CP =< 129938 -> 'DISALLOWED'; +lookup(CP) when 129940 =< CP, CP =< 129994 -> 'DISALLOWED'; +lookup(CP) when 129995 =< CP, CP =< 130031 -> 'UNASSIGNED'; +lookup(CP) when 130032 =< CP, CP =< 130041 -> 'DISALLOWED'; +lookup(CP) when 130042 =< CP, CP =< 131069 -> 'UNASSIGNED'; +lookup(CP) when 131070 =< CP, CP =< 131071 -> 'DISALLOWED'; +lookup(CP) when 131072 =< CP, CP =< 173789 -> 'PVALID'; +lookup(CP) when 173790 =< CP, CP =< 173823 -> 'UNASSIGNED'; +lookup(CP) when 173824 =< CP, CP =< 177972 -> 'PVALID'; +lookup(CP) when 177973 =< CP, CP =< 177983 -> 'UNASSIGNED'; +lookup(CP) when 177984 =< CP, CP =< 178205 -> 'PVALID'; +lookup(CP) when 178206 =< CP, CP =< 178207 -> 'UNASSIGNED'; +lookup(CP) when 178208 =< CP, CP =< 183969 -> 'PVALID'; +lookup(CP) when 183970 =< CP, CP =< 183983 -> 'UNASSIGNED'; +lookup(CP) when 183984 =< CP, CP =< 191456 -> 'PVALID'; +lookup(CP) when 191457 =< CP, CP =< 194559 -> 'UNASSIGNED'; +lookup(CP) when 194560 =< CP, CP =< 195101 -> 'DISALLOWED'; +lookup(CP) when 195102 =< CP, CP =< 196605 -> 'UNASSIGNED'; +lookup(CP) when 196606 =< CP, CP =< 196607 -> 'DISALLOWED'; +lookup(CP) when 196608 =< CP, CP =< 201546 -> 'PVALID'; +lookup(CP) when 201547 =< CP, CP =< 262141 -> 'UNASSIGNED'; +lookup(CP) when 262142 =< CP, CP =< 262143 -> 'DISALLOWED'; +lookup(CP) when 262144 =< CP, CP =< 327677 -> 'UNASSIGNED'; +lookup(CP) when 327678 =< CP, CP =< 327679 -> 'DISALLOWED'; +lookup(CP) when 327680 =< CP, CP =< 393213 -> 'UNASSIGNED'; +lookup(CP) when 393214 =< CP, CP =< 393215 -> 'DISALLOWED'; +lookup(CP) when 393216 =< CP, CP =< 458749 -> 'UNASSIGNED'; +lookup(CP) when 458750 =< CP, CP =< 458751 -> 'DISALLOWED'; +lookup(CP) when 458752 =< CP, CP =< 524285 -> 'UNASSIGNED'; +lookup(CP) when 524286 =< CP, CP =< 524287 -> 'DISALLOWED'; +lookup(CP) when 524288 =< CP, CP =< 589821 -> 'UNASSIGNED'; +lookup(CP) when 589822 =< CP, CP =< 589823 -> 'DISALLOWED'; +lookup(CP) when 589824 =< CP, CP =< 655357 -> 'UNASSIGNED'; +lookup(CP) when 655358 =< CP, CP =< 655359 -> 'DISALLOWED'; +lookup(CP) when 655360 =< CP, CP =< 720893 -> 'UNASSIGNED'; +lookup(CP) when 720894 =< CP, CP =< 720895 -> 'DISALLOWED'; +lookup(CP) when 720896 =< CP, CP =< 786429 -> 'UNASSIGNED'; +lookup(CP) when 786430 =< CP, CP =< 786431 -> 'DISALLOWED'; +lookup(CP) when 786432 =< CP, CP =< 851965 -> 'UNASSIGNED'; +lookup(CP) when 851966 =< CP, CP =< 851967 -> 'DISALLOWED'; +lookup(CP) when 851968 =< CP, CP =< 917501 -> 'UNASSIGNED'; +lookup(CP) when 917502 =< CP, CP =< 917503 -> 'DISALLOWED'; +lookup(CP) when 917506 =< CP, CP =< 917535 -> 'UNASSIGNED'; +lookup(CP) when 917536 =< CP, CP =< 917631 -> 'DISALLOWED'; +lookup(CP) when 917632 =< CP, CP =< 917759 -> 'UNASSIGNED'; +lookup(CP) when 917760 =< CP, CP =< 917999 -> 'DISALLOWED'; +lookup(CP) when 918000 =< CP, CP =< 983037 -> 'UNASSIGNED'; +lookup(CP) when 983038 =< CP, CP =< 1114111 -> 'DISALLOWED'; +lookup(_) -> 'UNASSIGNED'. \ No newline at end of file diff --git a/deps/idna/src/idna_ucs.erl b/deps/idna/src/idna_ucs.erl new file mode 100644 index 0000000..f1bac2f --- /dev/null +++ b/deps/idna/src/idna_ucs.erl @@ -0,0 +1,165 @@ +%%% -*- erlang -*- +%% +%% Copyright Ericsson AB 2005-2016. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. + + +-module(idna_ucs). + +-compile([verbose,report_warnings,warn_unused_vars]). + + +%%% Micellaneous predicates +-export([is_iso10646/1, is_unicode/1, is_ascii/1]). + +%%% UTF-8 encoding and decoding +-export([to_utf8/1, from_utf8/1]). + +%%% Test if Ch is a legitimate ISO-10646 character code +is_iso10646(Ch) when is_integer(Ch), Ch >= 0 -> + if Ch < 16#D800 -> true; + Ch < 16#E000 -> false; % Surrogates + Ch < 16#FFFE -> true; + Ch =< 16#FFFF -> false; % FFFE and FFFF (not characters) + Ch =< 16#7FFFFFFF -> true; + true -> false + end; +is_iso10646(_) -> false. + +%%% Test if Ch is a legitimate ISO-10646 character code capable of +%%% being encoded in a UTF-16 string. +is_unicode(Ch) when Ch < 16#110000 -> is_iso10646(Ch); +is_unicode(_) -> false. + +%%% Test for legitimate ASCII code +is_ascii(Ch) when is_integer(Ch), Ch >= 0, Ch =< 127 -> true; +is_ascii(_) -> false. + + +%%% UTF-8 encoding and decoding +to_utf8(List) when is_list(List) -> lists:flatmap(fun to_utf8/1, List); +to_utf8(Ch) -> char_to_utf8(Ch). + +from_utf8(Bin) when is_binary(Bin) -> from_utf8(binary_to_list(Bin)); +from_utf8(List) -> + case expand_utf8(List) of + {Result,0} -> Result; + {_Res,_NumBadChar} -> + exit({ucs,{bad_utf8_character_code}}) + end. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% UTF-8 support +%%% Possible errors encoding UTF-8: +%%% - Non-character values (something other than 0 .. 2^31-1). +%%% - Surrogate pair code in string. +%%% - 16#FFFE or 16#FFFF character in string. +%%% Possible errors decoding UTF-8: +%%% - 10xxxxxx or 1111111x as initial byte. +%%% - Insufficient number of 10xxxxxx octets following an initial octet of +%%% multi-octet sequence. +%%% - Non-canonical encoding used. +%%% - Surrogate-pair code encoded as UTF-8. +%%% - 16#FFFE or 16#FFFF character in string. +char_to_utf8(Ch) when is_integer(Ch), Ch >= 0 -> + if Ch < 128 -> + %% 0yyyyyyy + [Ch]; + Ch < 16#800 -> + %% 110xxxxy 10yyyyyy + [16#C0 + (Ch bsr 6), + 128+(Ch band 16#3F)]; + Ch < 16#10000 -> + %% 1110xxxx 10xyyyyy 10yyyyyy + if Ch < 16#D800; Ch > 16#DFFF, Ch < 16#FFFE -> + [16#E0 + (Ch bsr 12), + 128+((Ch bsr 6) band 16#3F), + 128+(Ch band 16#3F)] + end; + Ch < 16#200000 -> + %% 11110xxx 10xxyyyy 10yyyyyy 10yyyyyy + [16#F0+(Ch bsr 18), + 128+((Ch bsr 12) band 16#3F), + 128+((Ch bsr 6) band 16#3F), + 128+(Ch band 16#3F)]; + Ch < 16#4000000 -> + %% 111110xx 10xxxyyy 10yyyyyy 10yyyyyy 10yyyyyy + [16#F8+(Ch bsr 24), + 128+((Ch bsr 18) band 16#3F), + 128+((Ch bsr 12) band 16#3F), + 128+((Ch bsr 6) band 16#3F), + 128+(Ch band 16#3F)]; + Ch < 16#80000000 -> + %% 1111110x 10xxxxyy 10yyyyyy 10yyyyyy 10yyyyyy 10yyyyyy + [16#FC+(Ch bsr 30), + 128+((Ch bsr 24) band 16#3F), + 128+((Ch bsr 18) band 16#3F), + 128+((Ch bsr 12) band 16#3F), + 128+((Ch bsr 6) band 16#3F), + 128+(Ch band 16#3F)] + end. + + + + +%% expand_utf8([Byte]) -> {[UnicodeChar],NumberOfBadBytes} +%% Expand UTF8 byte sequences to ISO 10646/Unicode +%% charactes. Any illegal bytes are removed and the number of +%% bad bytes are returned. +%% +%% Reference: +%% RFC 3629: "UTF-8, a transformation format of ISO 10646". + +expand_utf8(Str) -> + expand_utf8_1(Str, [], 0). + +expand_utf8_1([C|Cs], Acc, Bad) when C < 16#80 -> + %% Plain Ascii character. + expand_utf8_1(Cs, [C|Acc], Bad); +expand_utf8_1([C1,C2|Cs], Acc, Bad) when C1 band 16#E0 =:= 16#C0, + C2 band 16#C0 =:= 16#80 -> + case ((C1 band 16#1F) bsl 6) bor (C2 band 16#3F) of + C when 16#80 =< C -> + expand_utf8_1(Cs, [C|Acc], Bad); + _ -> + %% Bad range. + expand_utf8_1(Cs, Acc, Bad+1) + end; +expand_utf8_1([C1,C2,C3|Cs], Acc, Bad) when C1 band 16#F0 =:= 16#E0, + C2 band 16#C0 =:= 16#80, + C3 band 16#C0 =:= 16#80 -> + case ((((C1 band 16#0F) bsl 6) bor (C2 band 16#3F)) bsl 6) bor + (C3 band 16#3F) of + C when 16#800 =< C -> + expand_utf8_1(Cs, [C|Acc], Bad); + _ -> + %% Bad range. + expand_utf8_1(Cs, Acc, Bad+1) + end; +expand_utf8_1([C1,C2,C3,C4|Cs], Acc, Bad) when C1 band 16#F8 =:= 16#F0, + C2 band 16#C0 =:= 16#80, + C3 band 16#C0 =:= 16#80, + C4 band 16#C0 =:= 16#80 -> + case ((((((C1 band 16#0F) bsl 6) bor (C2 band 16#3F)) bsl 6) bor + (C3 band 16#3F)) bsl 6) bor (C4 band 16#3F) of + C when 16#10000 =< C -> + expand_utf8_1(Cs, [C|Acc], Bad); + _ -> + %% Bad range. + expand_utf8_1(Cs, Acc, Bad+1) + end; +expand_utf8_1([_|Cs], Acc, Bad) -> + %% Ignore bad character. + expand_utf8_1(Cs, Acc, Bad+1); +expand_utf8_1([], Acc, Bad) -> {lists:reverse(Acc),Bad}. diff --git a/deps/idna/src/punycode.erl b/deps/idna/src/punycode.erl new file mode 100644 index 0000000..8b28f2a --- /dev/null +++ b/deps/idna/src/punycode.erl @@ -0,0 +1,173 @@ +%% -*- coding: utf-8 -*- +%%% +%%% This file is part of erlang-idna released under the MIT license. +%%% See the LICENSE for more information. +%%% +%% @doc Punycode ([RFC 3492](http://tools.ietf.org/html/rfc3492)) implementation. + +-module(punycode). + + +-export([encode/1, + decode/1]). + +-define(BASE, 36). +-define(TMIN, 1). +-define(TMAX, 26). +-define(SKEW, 38). +-define(DAMP, 700). +-define(INITIAL_BIAS, 72). +-define(INITIAL_N, 128). +-define(DELIMITER, $-). + + +-define(MAX, 1 bsl 32 - 1). + +%% @doc Convert Unicode to Punycode. +%% +%% exit with an overflow error on overflow, which can only happen on inputs +%% that would take more than 63 encoded bytes, the DNS limit on domain name labels. +-spec encode(string()) -> string(). +encode(Input) -> + Output0 = lists:filtermap(fun + (C) when C < 16#80 -> {true, C}; + (_) -> false + end, Input), + B = length(Output0), + Output = case B > 0 of + true -> Output0 ++ [?DELIMITER]; + false -> Output0 + end, + H = B, + encode(Input, Output, H, B, ?INITIAL_N, 0, ?INITIAL_BIAS). + + +encode(Input, Output, H, B, N, Delta, Bias) when H < length(Input) -> + M = lists:min(lists:filter(fun(C) -> C >= N end, Input)), + Delta1 = case (M - N) > ((?MAX - Delta) / (H +1)) of + false -> Delta + (M - N) * (H + 1); + true -> exit(oveflow) + end, + {Output2, H2, Delta2, N2, Bias2} = encode1(Input, Output, H, B, M, Delta1, Bias), + encode(Input, Output2, H2, B, N2, Delta2, Bias2); +encode(_, Output, _, _, _, _, _) -> + Output. + +encode1([C|Rest], Output, H, B, N, Delta, Bias) when C < N -> + Delta2 = Delta + 1, + case Delta2 of + 0 -> exit(oveflow); + _ -> + encode1(Rest, Output, H, B, N, Delta2, Bias) + end; +encode1([C|Rest], Output, H, B, N, Delta, Bias) when C == N -> + encode2(Rest, Output, H, B, N, Delta, Bias, Delta, ?BASE); +encode1([_|Rest], Output, H, B, N, Delta, Bias) -> + encode1(Rest, Output, H, B, N, Delta, Bias); +encode1([], Output, H, _B, N, Delta, Bias) -> + {Output, H, Delta + 1, N +1, Bias}. + +encode2(Rest, Output, H, B, N, Delta, Bias, Q, K) -> + T = if + K =< Bias -> ?TMIN; + K >= (Bias + ?TMAX) -> ?TMAX; + true -> K - Bias + end, + case Q < T of + true -> + CodePoint = to_digit(Q), + Output2 = Output ++ [CodePoint], + Bias2 = adapt(Delta, H +1, H == B), + Delta2 = 0, + H2 = H + 1, + encode1(Rest, Output2, H2, B, N, Delta2, Bias2); + false -> + CodePoint = to_digit(T + ((Q - T) rem (?BASE - T))), + Output2 = Output ++ [CodePoint], + Q2 = (Q - T) div (?BASE - T), + encode2(Rest, Output2, H, B, N, Delta, Bias, Q2, K + ?BASE) + end. + +to_digit(V) when V >= 0, V =< 25 -> V + $a; +to_digit(V) when V >= 26, V =< 35 -> V - 26 + $0; +to_digit(_) -> exit(badarg). + + +%% @doc Convert Punycode to Unicode. +%% exit with an overflow or badarg errors if malformed or overflow. +%% Overflow can only happen on inputs that take more than 63 encoded bytes, +%% the DNS limit on domain name labels. +-spec decode(string()) -> string(). +decode(Input) -> + {Output, Input2} = case string:rstr(Input, [?DELIMITER]) of + 0 -> {"", Input}; + Pos -> + {lists:sublist(Input, Pos - 1), lists:sublist(Input, Pos + 1, length(Input) )} + end, + decode(Input2, Output, ?INITIAL_N, ?INITIAL_BIAS, 0). + +decode([], Output, _, _, _) -> Output; +decode(Input, Output, N, Bias, I) -> + decode(Input, Output, N, Bias, I, I, 1, ?BASE). + +decode([C|Rest], Output, N, Bias, I0, OldI, Weight, K) -> + Digit = digit(C), + I1 = case Digit > ((?MAX - I0 ) div Weight) of + false -> I0 + (Digit * Weight); + true -> exit(overflow) + end, + T = if + K =< Bias -> ?TMIN; + K >= (Bias + ?TMAX) -> ?TMAX; + true -> K - Bias + end, + case Digit < T of + true -> + Len = length(Output), + Bias2 = adapt(I1 - OldI, Len + 1, (OldI =:= 0)), + {N2, I2}= case (I1 div (Len +1)) > (?MAX - N) of + false -> + {N + (I1 div (Len + 1)), I1 rem (Len + 1)}; + true -> + exit(overflow) + end, + Output2 = insert(Output, N2, [], I2), + decode(Rest, Output2, N2, Bias2, I2+1); + false -> + case Weight > (?MAX div (?BASE - T)) of + false -> + decode(Rest, Output, N, Bias, I1, OldI, Weight * (?BASE - T), K + ?BASE); + true -> + exit(overflow) + end + end. + +insert(Tail, CP, Head, 0) -> + Head ++ [CP | Tail]; +insert([], _CP, _Head, I) when I > 0-> + exit(overflow); +insert([C | Tail], CP, Head, I) -> + insert(Tail, CP, Head ++ [C], I - 1). + + +digit(C) when C >= $0, C =< $9 -> C - $0 + 26; +digit(C) when C >= $A, C =< $Z -> C - $A; +digit(C) when C >= $a, C =< $z -> C - $a; +digit(_) -> exit(badarg). + +adapt(Delta, NumPoints, FirstTime) -> + Delta2 = case FirstTime of + true -> + Delta div ?DAMP; + false -> + Delta div 2 + end, + adapt(Delta2 + (Delta2 div NumPoints), 0). + +adapt(Delta, K) -> + case Delta > (((?BASE - ?TMIN) * ?TMAX) div 2) of + true -> + adapt(Delta div (?BASE - ?TMIN), K + ?BASE); + false -> + K + (((?BASE - ?TMIN + 1) * Delta) div (Delta + ?SKEW)) + end. \ No newline at end of file diff --git a/deps/jason/.hex b/deps/jason/.hex new file mode 100644 index 0000000..b7e0a46 Binary files /dev/null and b/deps/jason/.hex differ diff --git a/deps/jason/CHANGELOG.md b/deps/jason/CHANGELOG.md new file mode 100644 index 0000000..9a24e59 --- /dev/null +++ b/deps/jason/CHANGELOG.md @@ -0,0 +1,136 @@ +# Changelog + +## 1.4.5 (05.05.2026) + +* Add support for Decimal 3.0 + +## 1.4.4 (26.07.2024) + +* Fix warnings on Elixir 1.17 by conditionally compiling Decimal support + +## 1.4.3 (29.06.2024) + +* Fix derive with _ struct key + +## 1.4.2 (29.06.2024) + +* Fix compiler warnings for Elixir 1.17 + +## 1.4.1 (06.07.2023) + +* Add limit to decoded integer sizes of 1024 digits. This can be changed + with the `decoding_integer_digit_limit` app env config. + +## 1.4.0 (12.09.2022) + +### Enhancements + +* Use the `:erlang.float_to_binary(_, [:short])` function, instead of `io_lib_format.fwrite_g/1` + where available (OTP 24.1+). This provides equivalent output with much less memory used + and significantly improved performance. + +## 1.3.0 (21.12.2021) + +### Enhancements + +* Add the `Jason.OrderedObject` struct +* Support decoding objects preserving all the keys with `objects: :ordered_objects` option +* Support decoding floats to `Decimal` with `floats: :decimals` option +* Add `~j` and `~J` sigils in module `Jason.Sigil` to support writing JSON literals in code + +### Fixes +* Fix error reporting when decoding strings (it was possible to mis-attribute the offending byte) +* Verify fields given to `@derive` + +## 1.2.2 (08.09.2020) + +### Enhancements + +* Support Decimal 2.0 + +## 1.2.1 (04.05.2020) + +### Security + +* Fix `html_safe` escaping in `Jason.encode` + + The ` + +`NimblePool` is a tiny resource-pool implementation. + +Pools in the Erlang VM, and therefore in Elixir, are generally process-based: they manage a group of processes. The downside of said pools is that when they have to manage resources, such as sockets or ports, the additional process leads to overhead. + +In such pools, you usually end-up with two scenarios: + + * You invoke the pool manager, which returns the pooled process, which performs the operation on the socket or port for you, returning you the reply. This approach is non-optimal because all of the data sent and returned by the resource needs to be copied between processes + + * You invoke the pool manager, which returns the pooled process, which gives you access to the resource. Then you can act directly on the resource, avoiding the data copying, but you need to keep the state of the resource in sync with the process + +NimblePool allows you to implement the second scenario without the addition of processes, which leads to a simpler and more efficient implementation. You should consider using NimblePool whenever you have to manage sockets, ports, or NIF resources and you want the client to perform one-off operations on them. For example, NimblePool is a good solution to manage HTTP/1 connections, ports that need to communicate with long-running programs, etc. + +The downside of NimblePool is that, because all resources are under a single process, any resource management operation will happen on this single process, which is more likely to become a bottleneck. This can be addressed, however, by starting one NimblePool per scheduler and by doing scheduler-based dispatches. + +NimblePool may not be a good option to manage processes. After all, the goal of NimblePool is to avoid creating processes for resources. If you already have a process, using a process-based pool such as `poolboy` will provide a better abstraction. + +Finally, avoid using NimblePool to manage resources that support multiplexing, such as HTTP/2 connections. In fact, pools are not a good option to manage resources with multiplexing in general, as the pool removes the ability to multiplex. + +## Types of callbacks + +NimblePool has two types of callbacks. Worker callbacks and pool callbacks. The worker callbacks configure the behaviour of each worker, such as initialization, checkin and checkout. The pool callbacks configure general pool behaviour, such as initialization and queueing. + +## Examples + +To use `NimblePool`, you must define a module that implements the pool worker logic, outlined in the `NimblePool` behaviour. + +### Port-based example + +The example below keeps ports on the pool and check them out on every command. Please read the docs for `Port` before using the approach below, especially in regards to zombie ports. + +```elixir +defmodule PortPool do + @behaviour NimblePool + + @doc ~S""" + Executes a given command against a port kept by the pool. + + First we start a pool of ports: + + iex> child = {NimblePool, worker: {PortPool, :cat}, name: PortPool} + iex> Supervisor.start_link([child], strategy: :one_for_one) + + Now we can run commands against the ports in the pool: + + iex> PortPool.command(PortPool, "hello\n") + "hello\n" + iex> PortPool.command(PortPool, "world\n") + "world\n" + + """ + def command(pool, command, opts \\ []) do + pool_timeout = Keyword.get(opts, :pool_timeout, 5000) + receive_timeout = Keyword.get(opts, :receive_timeout, 15000) + + NimblePool.checkout!(pool, :checkout, fn _from, port -> + send(port, {self(), {:command, command}}) + + receive do + {^port, {:data, data}} -> + try do + Process.unlink(port) + {data, :ok} + rescue + _ -> {data, :close} + end + after + receive_timeout -> + exit(:receive_timeout) + end + end, pool_timeout) + end + + @impl NimblePool + def init_worker(:cat = pool_state) do + path = System.find_executable("cat") + port = Port.open({:spawn_executable, path}, [:binary, args: ["-"]]) + {:ok, port, pool_state} + end + + @impl NimblePool + # Transfer the port to the caller + def handle_checkout(:checkout, {pid, _}, port, pool_state) do + Port.connect(port, pid) + {:ok, port, port, pool_state} + end + + @impl NimblePool + # We got it back + def handle_checkin(:ok, _from, port, pool_state) do + {:ok, port, pool_state} + end + + def handle_checkin(:close, _from, _port, pool_state) do + {:remove, :closed, pool_state} + end + + @impl NimblePool + # On terminate, effectively close it + def terminate_worker(_reason, port, pool_state) do + Port.close(port) + {:ok, pool_state} + end +end +``` + +### HTTP/1-based example + +The pool below uses [Mint](https://hexdocs.pm/mint) for HTTP/1 connections. It establishes connections eagerly. A better approach may be to establish connections lazily on checkout, as done by [Finch](https://github.com/keathley/finch), which is built on top of [Mint](https://github.com/elixir-mint/mint) + [NimbleOptions](https://github.com/dashbitco/nimble_options). + +```elixir +defmodule HTTP1Pool do + @behaviour NimblePool + + @doc ~S""" + Executes a given command against a connection kept by the pool. + + First we start the pool: + + child = {NimblePool, worker: {HTTP1Pool, {:https, "elixir-lang.org", 443}}, name: HTTP1Pool} + Supervisor.start_link([child], strategy: :one_for_one) + + Then we can use the connections in the pool: + + iex> HTTP1Pool.get(HTTP1Pool, "/") + {:ok, %{status: 200, ...}} + + """ + def get(pool, path, opts \\ []) do + pool_timeout = Keyword.get(opts, :pool_timeout, 5000) + receive_timeout = Keyword.get(opts, :receive_timeout, 15000) + + NimblePool.checkout!( + pool, + :checkout, + fn _from, conn -> + {{kind, result_or_error}, conn} = + with {:ok, conn, ref} <- Mint.HTTP1.request(conn, "GET", path, [], nil), + {:ok, conn, result} <- receive_response([], conn, ref, %{}, receive_timeout) do + {{:ok, result}, transfer_if_open(conn)} + end + + {{kind, result_or_error}, conn} + end, + pool_timeout + ) + end + + defp transfer_if_open(conn) do + if Mint.HTTP1.open?(conn) do + {:ok, conn} + else + :closed + end + end + + defp receive_response([], conn, ref, response, timeout) do + {:ok, conn, entries} = Mint.HTTP1.recv(conn, 0, timeout) + receive_response(entries, conn, ref, response, timeout) + end + + defp receive_response([entry | entries], conn, ref, response, timeout) do + case entry do + {kind, ^ref, value} when kind in [:status, :headers] -> + response = Map.put(response, kind, value) + receive_response(entries, conn, ref, response, timeout) + + {:data, ^ref, data} -> + response = Map.update(response, :data, data, &(&1 <> data)) + receive_response(entries, conn, ref, response, timeout) + + {:done, ^ref} -> + {:ok, conn, response} + + {:error, ^ref, error} -> + {:error, conn, error} + end + end + + @impl NimblePool + def init_worker({scheme, host, port} = pool_state) do + parent = self() + + async = fn -> + # TODO: Add back-off + {:ok, conn} = Mint.HTTP1.connect(scheme, host, port, []) + {:ok, conn} = Mint.HTTP1.controlling_process(conn, parent) + conn + end + + {:async, async, pool_state} + end + + @impl NimblePool + # Transfer the conn to the caller. + # If we lost the connection, then we remove it to try again. + def handle_checkout(:checkout, _from, conn, pool_state) do + with {:ok, conn} <- Mint.HTTP1.set_mode(conn, :passive) do + {:ok, conn, conn, pool_state} + else + _ -> {:remove, :closed, pool_state} + end + end + + @impl NimblePool + # We got it back. + def handle_checkin(state, _from, _old_conn, pool_state) do + with {:ok, conn} <- state, + {:ok, conn} <- Mint.HTTP1.set_mode(conn, :active) do + {:ok, conn, pool_state} + else + {:error, _} -> {:remove, :closed, pool_state} + end + end + + @impl NimblePool + # If it is closed, drop it. + def handle_info(message, conn) do + case Mint.HTTP1.stream(conn, message) do + {:ok, _, _} -> {:ok, conn} + {:error, _, _, _} -> {:remove, :closed} + :unknown -> {:ok, conn} + end + end + + @impl NimblePool + # On terminate, effectively close it. + # This will succeed even if it was already closed or if we don't own it. + def terminate_worker(_reason, conn, pool_state) do + Mint.HTTP1.close(conn) + {:ok, pool_state} + end +end +``` + + + +## Installation + +Add `nimble_pool` to your list of dependencies in `mix.exs`: + +```elixir +def deps do + [{:nimble_pool, "~> 1.0"}] +end +``` + +## Nimble* + +All nimble libraries by Dashbit: + + * [NimbleCSV](https://github.com/dashbitco/nimble_csv) - simple and fast CSV parsing + * [NimbleOptions](https://github.com/dashbitco/nimble_options) - tiny library for validating and documenting high-level options + * [NimbleParsec](https://github.com/dashbitco/nimble_parsec) - simple and fast parser combinators + * [NimblePool](https://github.com/dashbitco/nimble_pool) - tiny resource-pool implementation + * [NimblePublisher](https://github.com/dashbitco/nimble_publisher) - a minimal filesystem-based publishing engine with Markdown support and code highlighting + * [NimbleTOTP](https://github.com/dashbitco/nimble_totp) - tiny library for generating time-based one time passwords (TOTP) + +## License + +Copyright 2019 Plataformatec \ +Copyright 2020 Dashbit + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +[docs]: https://hexdocs.pm/nimble_pool diff --git a/deps/nimble_pool/hex_metadata.config b/deps/nimble_pool/hex_metadata.config new file mode 100644 index 0000000..53d65a8 --- /dev/null +++ b/deps/nimble_pool/hex_metadata.config @@ -0,0 +1,13 @@ +{<<"links">>,[{<<"GitHub">>,<<"https://github.com/dashbitco/nimble_pool">>}]}. +{<<"name">>,<<"nimble_pool">>}. +{<<"version">>,<<"1.1.0">>}. +{<<"description">>,<<"A tiny resource-pool implementation">>}. +{<<"elixir">>,<<"~> 1.7">>}. +{<<"app">>,<<"nimble_pool">>}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"requirements">>,[]}. +{<<"files">>, + [<<"lib">>,<<"lib/nimble_pool">>,<<"lib/nimble_pool/application.ex">>, + <<"lib/nimble_pool.ex">>,<<".formatter.exs">>,<<"mix.exs">>,<<"README.md">>, + <<"CHANGELOG.md">>]}. +{<<"build_tools">>,[<<"mix">>]}. diff --git a/deps/nimble_pool/lib/nimble_pool.ex b/deps/nimble_pool/lib/nimble_pool.ex new file mode 100644 index 0000000..3920892 --- /dev/null +++ b/deps/nimble_pool/lib/nimble_pool.ex @@ -0,0 +1,1120 @@ +defmodule NimblePool do + @external_resource "README.md" + @moduledoc "README.md" + |> File.read!() + |> String.split("") + |> Enum.fetch!(1) + + use GenServer + require Logger + + @type from :: {pid, reference} + @type init_arg :: term + @type pool_state :: term + @type worker_state :: term + @type client_state :: term + @type user_reason :: term + + @typedoc since: "1.1.0" + @type pool :: GenServer.server() + + @doc """ + Initializes the worker. + + It receives the worker argument passed to `start_link/1` if `c:init_pool/1` is + not implemented, otherwise the pool state returned by `c:init_pool/1`. It must + return `{:ok, worker_state, pool_state}` or `{:async, fun, pool_state}`, where the `fun` + is a zero-arity function that must return the worker state. + + If this callback returns `{:async, fun, pool_state}`, `fun` is executed in a **separate + one-off process**. Because of this, if you start resources that the pool needs to "own", + you need to transfer ownership to the pool process. For example, if your async `fun` + opens a `:gen_tcp` socket, you'll have to use `:gen_tcp.controlling_process/2` to transfer + ownership back to the pool. + + > #### Blocking the pool {: .warning} + > + > This callback is synchronous and therefore will block the pool, potentially + > for a significant amount of time since it's executed in the pool process once + > per worker. > If you need to perform long initialization, consider using the + > `{:async, fun, pool_state}` return type. + """ + @doc callback: :worker + @callback init_worker(pool_state) :: + {:ok, worker_state, pool_state} | {:async, (-> worker_state), pool_state} + + @doc """ + Initializes the pool. + + It receives the worker argument passed to `start_link/1` and must + return `{:ok, pool_state}` upon successful initialization, + `:ignore` to exit normally, or `{:stop, reason}` to exit with `reason` + and return `{:error, reason}`. + + This is a good place to perform a registration, for example. + + It must return the `pool_state`. The `pool_state` is given to + `init_worker`. By default, it simply returns the given arguments. + + This callback is optional. + + ## Examples + + @impl NimblePool + def init_pool(options) do + Registry.register(options[:registry], :some_key, :some_value) + end + + """ + @doc callback: :pool + @callback init_pool(init_arg) :: {:ok, pool_state} | :ignore | {:stop, reason :: any()} + + @doc """ + Checks a worker out. + + The `maybe_wrapped_command` is the `command` passed to `checkout!/4` if the worker + doesn't implement the `c:handle_enqueue/2` callback, otherwise it's the possibly-wrapped + command returned by `c:handle_enqueue/2`. + + This callback must return one of: + + * `{:ok, client_state, worker_state, pool_state}` — the client state is given to + the callback function passed to `checkout!/4`. `worker_state` and `pool_state` + can potentially update the state of the checked-out worker and the pool. + + * `{:remove, reason, pool_state}` — `NimblePool` will remove the checked-out worker and + attempt to checkout another worker. + + * `{:skip, Exception.t(), pool_state}` — `NimblePool` will skip the checkout, the client will + raise the returned exception, and the worker will be left ready for the next + checkout attempt. + + > #### Blocking the pool {: .warning} + > + > This callback is synchronous and therefore will block the pool. + > Avoid performing long work in here. Instead, do as much work as + > possible on the client. + + Once the worker is checked out, the worker won't handle any + messages targeted to `c:handle_info/2`. + """ + @doc callback: :worker + @callback handle_checkout(maybe_wrapped_command :: term, from, worker_state, pool_state) :: + {:ok, client_state, worker_state, pool_state} + | {:remove, user_reason, pool_state} + | {:skip, Exception.t(), pool_state} + + @doc """ + Checks a worker back in the pool. + + It receives the potentially-updated `client_state`, returned by the `checkout!/4` + anonymous function, and it must return either + `{:ok, worker_state, pool_state}` or `{:remove, reason, pool_state}`. + + > #### Blocking the pool {: .warning} + > + > This callback is synchronous and therefore will block the pool. + > Avoid performing long work in here, instead do as much work as + > possible on the client. + + Once the connection is checked in, it may immediately be handed + to another client, without traversing any of the messages in the + pool inbox. + + This callback is optional. + """ + @doc callback: :worker + @callback handle_checkin(client_state, from, worker_state, pool_state) :: + {:ok, worker_state, pool_state} | {:remove, user_reason, pool_state} + + @doc """ + Handles an update instruction from a checked out worker. + + See `update/2` for more information. + + This callback is optional. + """ + @doc callback: :worker + @callback handle_update(message :: term, worker_state, pool_state) :: + {:ok, worker_state, pool_state} + + @doc """ + Receives a message in the pool and handles it as each worker. + + It receives the `message` and it must return either + `{:ok, worker_state}` to update the worker state, or `{:remove, reason}` to + remove the worker. + + Since there is only a single pool process that can receive messages, this + callback is executed once for every worker when the pool receives `message`. + + > #### Blocking the pool {: .warning} + > + > This callback is synchronous and therefore will block the pool while it + > executes for each worker. Avoid performing long work in here. + + This callback is optional. + """ + @doc callback: :worker + @callback handle_info(message :: term, worker_state) :: + {:ok, worker_state} | {:remove, user_reason} + + @doc """ + Executed by the pool whenever a request to check out a worker is enqueued. + + The `command` argument should be treated as an opaque value, but it can be + wrapped with some data to be used in `c:handle_checkout/4`. + + It must return either `{:ok, maybe_wrapped_command, pool_state}` or + `{:skip, Exception.t(), pool_state}` if checkout is to be skipped. + + > #### Blocking the pool {: .warning} + > + > This callback is synchronous and therefore will block the pool. + > Avoid performing long work in here. + + This callback is optional. + + ## Examples + + @impl NimblePool + def handle_enqueue(command, pool_state) do + {:ok, {:wrapped, command}, pool_state} + end + + """ + @doc callback: :pool + @callback handle_enqueue(command :: term, pool_state) :: + {:ok, maybe_wrapped_command :: term, pool_state} + | {:skip, Exception.t(), pool_state} + + @doc """ + Terminates a worker. + + The `reason` argument is: + + * `:DOWN` whenever the client link breaks + * `:timeout` whenever the client times out + * one of `:throw`, `:error`, `:exit` whenever the client crashes with one + of the reasons above. + * `reason` if at any point you return `{:remove, reason}` + * if any callback raises, the raised exception will be given as `reason`. + + It receives the latest known `worker_state`, which may not + be the latest state. For example, if a client checks out the + state and crashes, we don't fully know the `client_state`, + so the `c:terminate_worker/3` callback needs to take such scenarios + into account. + + This callback must always return `{:ok, pool_state}` with the potentially-updated + pool state. + + This callback is optional. + """ + @doc callback: :pool + @callback terminate_worker( + reason :: :DOWN | :timeout | :throw | :error | :exit | user_reason, + worker_state, + pool_state + ) :: + {:ok, pool_state} + + @doc """ + Handle pings due to inactivity on the worker. + + Executed whenever the idle worker periodic timer verifies that a worker has been idle + on the pool for longer than the `:worker_idle_timeout` pool configuration (in milliseconds). + + This callback must return one of the following values: + + * `{:ok, worker_state}`: Updates worker state. + + * `{:remove, user_reason}`: The pool will proceed to the standard worker termination + defined in `terminate_worker/3`. + + * `{:stop, user_reason}`: The entire pool process will be terminated, and `terminate_worker/3` + will be called for every worker on the pool. + + This callback is optional. + + ## Max idle pings + + The `:max_idle_pings` pool option is useful to prevent sequential termination of a large number + of workers. However, it is important to keep in mind the following behaviours whenever + utilizing it. + + * If you are not terminating workers with `c:handle_ping/2`, you may end up pinging only + the same workers over and over again because each cycle will ping only the first + `:max_idle_pings` workers. + + * If you are terminating workers with `c:handle_ping/2`, the last worker may be terminated + after up to `worker_idle_timeout + worker_idle_timeout * ceil(number_of_workers/max_idle_pings)`, + instead of `2 * worker_idle_timeout` milliseconds of idle time. + + For instance consider a pool with 10 workers and a ping of 1 second. + + Given a negligible worker termination time and a worst-case scenario where all the workers + go idle right after a verification cycle is started, then without `max_idle_pings` the + last worker will be terminated in the next cycle (2 seconds), whereas with a + `max_idle_pings` of 2 the last worker will be terminated only in the 5th cycle (6 seconds). + + ## Disclaimers + + * On lazy pools, if no worker is currently on the pool the callback will never be called. + Therefore you can not rely on this callback to terminate empty lazy pools. + + * On not lazy pools, if you return `{:remove, user_reason}` you may end up + terminating and initializing workers at the same time every idle verification cycle. + + * On large pools, if many resources go idle at the same cycle, you may end up terminating + a large number of workers sequentially, which could lead to the pool being unable to + fulfill requests. See `:max_idle_pings` option to prevent this. + + """ + @doc callback: :worker + @callback handle_ping( + worker_state, + pool_state + ) :: + {:ok, worker_state} | {:remove, user_reason()} | {:stop, user_reason()} + + @doc """ + Handle pool termination. + + The `reason` argmument is the same given to GenServer's terminate/2 callback. + + It is not necessary to terminate workers here because the + `terminate_worker/3` callback has already been invoked. + + This should be used only for clean up extra resources that can not be + handled by `terminate_worker/3` callback. + + This callback is optional. + """ + @doc callback: :pool + @callback terminate_pool( + reason :: :DOWN | :timeout | :throw | :error | :exit | user_reason, + pool_state + ) :: :ok + + @doc """ + Handle cancelled checkout requests. + + This callback is executed when a checkout request is cancelled unexpectedly. + + The context argument may be `:queued` or `:checked_out`: + + * `:queued` means the cancellation happened before resource checkout. This may happen + when the pool is starving under load and can not serve resources. + + * `:checked_out` means the cancellation happened after resource checkout. This may happen + when the function given to `checkout!/4` raises. + + This callback is optional. + """ + @doc callback: :pool + @callback handle_cancelled( + context :: :queued | :checked_out, + pool_state + ) :: :ok + + @optional_callbacks init_pool: 1, + handle_checkin: 4, + handle_info: 2, + handle_enqueue: 2, + handle_update: 3, + handle_ping: 2, + terminate_worker: 3, + terminate_pool: 2, + handle_cancelled: 2 + + @doc """ + Defines a pool to be started under the supervision tree. + + It accepts the same options as `start_link/1` with the + addition or `:restart` and `:shutdown` that control the + "Child Specification". + + ## Examples + + NimblePool.child_spec(worker: {__MODULE__, :some_arg}, restart: :temporary) + + """ + @spec child_spec(keyword) :: Supervisor.child_spec() + def child_spec(opts) when is_list(opts) do + {worker, _} = Keyword.fetch!(opts, :worker) + {restart, opts} = Keyword.pop(opts, :restart, :permanent) + {shutdown, opts} = Keyword.pop(opts, :shutdown, 5_000) + + %{ + id: worker, + start: {__MODULE__, :start_link, [opts]}, + shutdown: shutdown, + restart: restart + } + end + + @doc """ + Starts a pool. + + ## Options + + * `:worker` - a `{worker_mod, worker_init_arg}` tuple with the worker + module that implements the `NimblePool` behaviour and the worker + initial argument. This argument is **required**. + + * `:pool_size` - how many workers in the pool. Defaults to `10`. + + * `:lazy` - When `true`, workers are started lazily, only when necessary. + Defaults to `false`. + + * `:worker_idle_timeout` - Timeout in milliseconds to tag a worker as idle. + If not nil, starts a periodic timer on the same frequency that will ping + all idle workers using `handle_ping/2` optional callback . + Defaults to no timeout. + + * `:max_idle_pings` - Defines a limit to the number of workers that can be pinged + for each cycle of the `handle_ping/2` optional callback. + Defaults to no limit. See `handle_ping/2` for more details. + + """ + @spec start_link(keyword) :: GenServer.on_start() + def start_link(opts) when is_list(opts) do + {{worker, arg}, opts} = + Keyword.pop_lazy(opts, :worker, fn -> + raise ArgumentError, "missing required :worker option" + end) + + {pool_size, opts} = Keyword.pop(opts, :pool_size, 10) + {lazy, opts} = Keyword.pop(opts, :lazy, false) + {worker_idle_timeout, opts} = Keyword.pop(opts, :worker_idle_timeout, nil) + {max_idle_pings, opts} = Keyword.pop(opts, :max_idle_pings, -1) + + unless is_atom(worker) do + raise ArgumentError, "worker must be an atom, got: #{inspect(worker)}" + end + + unless is_integer(pool_size) and pool_size > 0 do + raise ArgumentError, "pool_size must be a positive integer, got: #{inspect(pool_size)}" + end + + GenServer.start_link( + __MODULE__, + {worker, arg, pool_size, lazy, worker_idle_timeout, max_idle_pings}, + opts + ) + end + + @doc """ + Stops the given `pool`. + + The pool exits with the given `reason`. The pool has `timeout` milliseconds + to terminate, otherwise it will be brutally terminated. + + ## Examples + + NimblePool.stop(pool) + #=> :ok + + """ + @spec stop(pool, reason :: term, timeout) :: :ok + def stop(pool, reason \\ :normal, timeout \\ :infinity) do + GenServer.stop(pool, reason, timeout) + end + + @doc """ + Checks out a worker from the pool. + + It expects a command, which will be passed to the `c:handle_checkout/4` + callback. The `c:handle_checkout/4` callback will return a client state, + which is given to the `function`. + + The `function` receives two arguments, the request + (`{pid(), reference()}`) and the `client_state`. + The function must return a two-element tuple, where the first element is the + return value for `checkout!/4`, and the second element is the updated `client_state`, + which will be given as the first argument to `c:handle_checkin/4`. + + `checkout!/4` also has an optional `timeout` value. This value will be applied + to the checkout operation itself. The "check in" operation happens asynchronously. + """ + @spec checkout!(pool, command :: term, function, timeout) :: result + when function: (from, client_state -> {result, client_state}), result: var + def checkout!(pool, command, function, timeout \\ 5_000) when is_function(function, 2) do + # Re-implementation of gen.erl call to avoid multiple monitors. + pid = GenServer.whereis(pool) + + unless pid do + exit!(:noproc, :checkout, [pool]) + end + + ref = Process.monitor(pid) + send_call(pid, ref, {:checkout, command, deadline(timeout)}) + + receive do + {^ref, {:skipped, exception}} -> + raise exception + + {^ref, client_state} -> + Process.demonitor(ref, [:flush]) + + try do + function.({pid, ref}, client_state) + catch + kind, reason -> + send(pid, {__MODULE__, :cancel, ref, kind}) + :erlang.raise(kind, reason, __STACKTRACE__) + else + {result, client_state} -> + send(pid, {__MODULE__, :checkin, ref, client_state}) + result + end + + {:DOWN, ^ref, _, _, :noconnection} -> + exit!({:nodedown, get_node(pid)}, :checkout, [pool]) + + {:DOWN, ^ref, _, _, reason} -> + exit!(reason, :checkout, [pool]) + after + timeout -> + send(pid, {__MODULE__, :cancel, ref, :timeout}) + Process.demonitor(ref, [:flush]) + exit!(:timeout, :checkout, [pool]) + end + end + + @doc """ + Sends an **update** instruction to the pool about the checked out worker. + + This must be called inside the `checkout!/4` callback function with + the `from` value given to `c:handle_checkout/4`. + + This is useful to update the pool's state before effectively + checking the state in, which is handy when transferring + resources requires two steps. + """ + @spec update(from, command :: term) :: :ok + def update({pid, ref} = _from, command) do + send(pid, {__MODULE__, :update, ref, command}) + :ok + end + + defp deadline(timeout) when is_integer(timeout) do + System.monotonic_time() + System.convert_time_unit(timeout, :millisecond, :native) + end + + defp deadline(:infinity), do: :infinity + + defp get_node({_, node}), do: node + defp get_node(pid) when is_pid(pid), do: node(pid) + + defp send_call(pid, ref, message) do + # Auto-connect is asynchronous. But we still use :noconnect to make sure + # we send on the monitored connection, and not trigger a new auto-connect. + Process.send(pid, {:"$gen_call", {self(), ref}, message}, [:noconnect]) + end + + defp exit!(reason, fun, args) do + exit({reason, {__MODULE__, fun, args}}) + end + + ## Callbacks + + @impl true + def init({worker, arg, pool_size, lazy, worker_idle_timeout, max_idle_pings}) do + Process.flag(:trap_exit, true) + + case Code.ensure_loaded(worker) do + {:module, _} -> + :ok + + {:error, reason} -> + raise ArgumentError, "failed to load worker module #{inspect(worker)}: #{inspect(reason)}" + end + + lazy = if lazy, do: pool_size, else: nil + + if worker_idle_timeout do + if function_exported?(worker, :handle_ping, 2) do + Process.send_after(self(), :check_idle, worker_idle_timeout) + else + IO.warn( + ":worker_idle_timeout was given but the worker does not export a handle_ping/2 callback" + ) + end + end + + with {:ok, pool_state} <- do_init_pool(worker, arg) do + {pool_state, resources, async} = + if is_nil(lazy) do + Enum.reduce(1..pool_size, {pool_state, :queue.new(), %{}}, fn + _, {pool_state, resources, async} -> + init_worker(worker, pool_state, resources, async, worker_idle_timeout) + end) + else + {pool_state, :queue.new(), %{}} + end + + state = %{ + worker: worker, + queue: :queue.new(), + requests: %{}, + monitors: %{}, + resources: resources, + async: async, + state: pool_state, + lazy: lazy, + worker_idle_timeout: worker_idle_timeout, + max_idle_pings: max_idle_pings + } + + {:ok, state} + end + end + + @impl true + def handle_call({:checkout, command, deadline}, {pid, ref} = from, state) do + %{requests: requests, monitors: monitors, worker: worker, state: pool_state} = state + mon_ref = Process.monitor(pid) + requests = Map.put(requests, ref, {pid, mon_ref, :command, command, deadline}) + monitors = Map.put(monitors, mon_ref, ref) + state = %{state | requests: requests, monitors: monitors} + + case handle_enqueue(worker, command, pool_state) do + {:ok, command, pool_state} -> + {:noreply, maybe_checkout(command, mon_ref, deadline, from, %{state | state: pool_state})} + + {:skip, exception, pool_state} -> + state = remove_request(%{state | state: pool_state}, ref, mon_ref) + {:reply, {:skipped, exception}, state} + end + end + + @impl true + def handle_info({__MODULE__, :update, ref, command}, state) do + %{requests: requests, state: pool_state, worker: worker} = state + + case requests do + %{^ref => {pid, mon_ref, :state, worker_state}} -> + {:ok, worker_state, pool_state} = worker.handle_update(command, worker_state, pool_state) + requests = Map.put(requests, ref, {pid, mon_ref, :state, worker_state}) + {:noreply, %{state | requests: requests, state: pool_state}} + + %{} -> + exit(:unexpected_precheckin) + end + end + + @impl true + def handle_info({__MODULE__, :checkin, ref, worker_client_state}, state) do + %{ + requests: requests, + resources: resources, + worker: worker, + state: pool_state, + worker_idle_timeout: worker_idle_timeout + } = state + + case requests do + %{^ref => {pid, mon_ref, :state, worker_server_state}} -> + checkin = + if function_exported?(worker, :handle_checkin, 4) do + args = [worker_client_state, {pid, ref}, worker_server_state, pool_state] + apply_worker_callback(pool_state, worker, :handle_checkin, args) + else + {:ok, worker_server_state, pool_state} + end + + {resources, state} = + case checkin do + {:ok, worker_server_state, pool_state} -> + {:queue.in({worker_server_state, get_metadata(worker_idle_timeout)}, resources), + %{state | state: pool_state}} + + {:remove, reason, pool_state} -> + {resources, + remove_worker(reason, worker_server_state, %{state | state: pool_state})} + end + + state = remove_request(state, ref, mon_ref) + {:noreply, maybe_checkout(%{state | resources: resources})} + + %{} -> + exit(:unexpected_checkin) + end + end + + @impl true + def handle_info({__MODULE__, :cancel, ref, reason}, state) do + cancel_request_ref(ref, reason, state) + end + + @impl true + def handle_info({__MODULE__, :init_worker}, state) do + %{ + async: async, + resources: resources, + worker: worker, + state: pool_state, + worker_idle_timeout: worker_idle_timeout + } = state + + {pool_state, resources, async} = + init_worker(worker, pool_state, resources, async, worker_idle_timeout) + + {:noreply, maybe_checkout(%{state | async: async, resources: resources, state: pool_state})} + end + + @impl true + def handle_info({:DOWN, ref, _, _, _} = down, state) do + %{monitors: monitors, async: async} = state + + case monitors do + %{^ref => request_ref} -> + cancel_request_ref(request_ref, :DOWN, state) + + %{} -> + case async do + %{^ref => _} -> remove_async_ref(ref, state) + %{} -> maybe_handle_info(down, state) + end + end + end + + @impl true + def handle_info({:EXIT, pid, _reason} = exit, state) do + %{async: async} = state + + case async do + %{^pid => _} -> {:noreply, %{state | async: Map.delete(async, pid)}} + %{} -> maybe_handle_info(exit, state) + end + end + + @impl true + def handle_info({ref, worker_state} = reply, state) when is_reference(ref) do + %{async: async, resources: resources, worker_idle_timeout: worker_idle_timeout} = state + + case async do + %{^ref => _} -> + Process.demonitor(ref, [:flush]) + resources = :queue.in({worker_state, get_metadata(worker_idle_timeout)}, resources) + async = Map.delete(async, ref) + state = %{state | async: async, resources: resources} + {:noreply, maybe_checkout(state)} + + %{} -> + maybe_handle_info(reply, state) + end + end + + @impl true + def handle_info( + :check_idle, + %{resources: resources, worker_idle_timeout: worker_idle_timeout} = state + ) do + case check_idle_resources(resources, state) do + {:ok, new_resources, new_state} -> + Process.send_after(self(), :check_idle, worker_idle_timeout) + {:noreply, %{new_state | resources: new_resources}} + + {:stop, reason, state} -> + {:stop, {:shutdown, reason}, state} + end + end + + @impl true + def handle_info(msg, state) do + maybe_handle_info(msg, state) + end + + @impl true + def terminate(reason, %{worker: worker, resources: resources} = state) do + for {worker_server_state, _} <- :queue.to_list(resources) do + maybe_terminate_worker(reason, worker_server_state, state) + end + + if function_exported?(worker, :terminate_pool, 2) do + worker.terminate_pool(reason, state) + end + + :ok + end + + defp do_init_pool(worker, arg) do + if function_exported?(worker, :init_pool, 1) do + worker.init_pool(arg) + else + {:ok, arg} + end + end + + defp remove_async_ref(ref, state) do + %{ + async: async, + resources: resources, + worker: worker, + state: pool_state, + worker_idle_timeout: worker_idle_timeout + } = state + + # If an async worker failed to start, we try to start another one + # immediately, even if the pool is lazy, as we assume there is an + # immediate need for this resource. + {pool_state, resources, async} = + init_worker(worker, pool_state, resources, Map.delete(async, ref), worker_idle_timeout) + + {:noreply, %{state | resources: resources, async: async, state: pool_state}} + end + + defp cancel_request_ref( + ref, + reason, + %{requests: requests, worker: worker, state: pool_state} = state + ) do + case requests do + # Exited or timed out before we could serve it + %{^ref => {_, mon_ref, :command, _command, _deadline}} -> + if function_exported?(worker, :handle_cancelled, 2) do + args = [:queued, pool_state] + apply_worker_callback(worker, :handle_cancelled, args) + end + + {:noreply, remove_request(state, ref, mon_ref)} + + # Exited or errored during client processing + %{^ref => {_, mon_ref, :state, worker_server_state}} -> + if function_exported?(worker, :handle_cancelled, 2) do + args = [:checked_out, pool_state] + apply_worker_callback(worker, :handle_cancelled, args) + end + + state = remove_request(state, ref, mon_ref) + {:noreply, remove_worker(reason, worker_server_state, state)} + + # The client timed out, sent us a message, and we dropped the deadlined request + %{} -> + if function_exported?(worker, :handle_cancelled, 2) do + args = [:queued, pool_state] + apply_worker_callback(worker, :handle_cancelled, args) + end + + {:noreply, state} + end + end + + defp maybe_handle_info(msg, state) do + %{resources: resources, worker: worker, worker_idle_timeout: worker_idle_timeout} = state + + if function_exported?(worker, :handle_info, 2) do + {resources, state} = + Enum.reduce(:queue.to_list(resources), {:queue.new(), state}, fn + {worker_server_state, _}, {resources, state} -> + case apply_worker_callback(worker, :handle_info, [msg, worker_server_state]) do + {:ok, worker_server_state} -> + {:queue.in({worker_server_state, get_metadata(worker_idle_timeout)}, resources), + state} + + {:remove, reason} -> + {resources, remove_worker(reason, worker_server_state, state)} + end + end) + + {:noreply, %{state | resources: resources}} + else + {:noreply, state} + end + end + + defp maybe_checkout(%{queue: queue, requests: requests} = state) do + case :queue.out(queue) do + {{:value, {pid, ref}}, queue} -> + case requests do + # The request still exists, so we are good to go + %{^ref => {^pid, mon_ref, :command, command, deadline}} -> + maybe_checkout(command, mon_ref, deadline, {pid, ref}, %{state | queue: queue}) + + # It should never happen + %{^ref => _} -> + exit(:unexpected_checkout) + + # The request is no longer active, do nothing + %{} -> + maybe_checkout(%{state | queue: queue}) + end + + {:empty, _queue} -> + state + end + end + + defp maybe_checkout(command, mon_ref, deadline, {pid, ref} = from, state) do + if past_deadline?(deadline) do + state = remove_request(state, ref, mon_ref) + maybe_checkout(state) + else + %{resources: resources, requests: requests, worker: worker, queue: queue, state: pool_state} = + state = init_worker_if_lazy_and_empty(state) + + case :queue.out(resources) do + {{:value, {worker_server_state, _}}, resources} -> + args = [command, from, worker_server_state, pool_state] + + case apply_worker_callback(pool_state, worker, :handle_checkout, args) do + {:ok, worker_client_state, worker_server_state, pool_state} -> + GenServer.reply({pid, ref}, worker_client_state) + + requests = Map.put(requests, ref, {pid, mon_ref, :state, worker_server_state}) + %{state | resources: resources, requests: requests, state: pool_state} + + {:remove, reason, pool_state} -> + state = remove_worker(reason, worker_server_state, %{state | state: pool_state}) + maybe_checkout(command, mon_ref, deadline, from, %{state | resources: resources}) + + {:skip, exception, pool_state} -> + GenServer.reply({pid, ref}, {:skipped, exception}) + remove_request(%{state | state: pool_state}, ref, mon_ref) + + other -> + raise """ + unexpected return from #{inspect(worker)}.handle_checkout/4. + + Expected: {:ok, client_state, server_state, pool_state} | {:remove, reason, pool_state} | {:skip, Exception.t(), pool_state} + Got: #{inspect(other)} + """ + end + + {:empty, _} -> + %{state | queue: :queue.in(from, queue)} + end + end + end + + defp init_worker_if_lazy_and_empty(%{lazy: nil} = state), do: state + + defp init_worker_if_lazy_and_empty( + %{lazy: lazy, resources: resources, worker_idle_timeout: worker_idle_timeout} = state + ) do + if lazy > 0 and :queue.is_empty(resources) do + %{async: async, worker: worker, state: pool_state} = state + + {pool_state, resources, async} = + init_worker(worker, pool_state, resources, async, worker_idle_timeout) + + %{state | async: async, resources: resources, state: pool_state, lazy: lazy - 1} + else + state + end + end + + defp past_deadline?(deadline) when is_integer(deadline) do + System.monotonic_time() >= deadline + end + + defp past_deadline?(:infinity), do: false + + defp remove_worker(reason, worker_server_state, state) do + state = maybe_terminate_worker(reason, worker_server_state, state) + + if lazy = state.lazy do + %{state | lazy: lazy + 1} + else + schedule_init() + state + end + end + + defp check_idle_resources(resources, state) do + now_in_ms = System.monotonic_time(:millisecond) + do_check_idle_resources(resources, now_in_ms, state, :queue.new(), state.max_idle_pings) + end + + defp do_check_idle_resources(resources, _now_in_ms, state, new_resources, 0) do + {:ok, :queue.join(new_resources, resources), state} + end + + defp do_check_idle_resources(resources, now_in_ms, state, new_resources, remaining_pings) do + case :queue.out(resources) do + {:empty, _} -> + {:ok, new_resources, state} + + {{:value, resource_data}, next_resources} -> + {worker_server_state, worker_metadata} = resource_data + time_diff = now_in_ms - worker_metadata + + if time_diff >= state.worker_idle_timeout do + case maybe_ping_worker(worker_server_state, state) do + {:ok, new_worker_state} -> + # We don't need to update the worker_metadata because, by definition, + # if we are checking for idle resources again and the timestamp is the same, + # it is because it has to be checked again. + new_resource_data = {new_worker_state, worker_metadata} + new_resources = :queue.in(new_resource_data, new_resources) + + do_check_idle_resources( + next_resources, + now_in_ms, + state, + new_resources, + remaining_pings - 1 + ) + + {:remove, user_reason} -> + new_state = remove_worker(user_reason, worker_server_state, state) + + do_check_idle_resources( + next_resources, + now_in_ms, + new_state, + new_resources, + remaining_pings - 1 + ) + + {:stop, reason} -> + {:stop, reason, state} + end + else + {:ok, :queue.join(new_resources, resources), state} + end + end + end + + defp maybe_ping_worker(worker_server_state, state) do + %{worker: worker, state: pool_state} = state + + args = [worker_server_state, pool_state] + + case apply_worker_callback(worker, :handle_ping, args) do + {:ok, worker_state} -> + {:ok, worker_state} + + {:remove, user_reason} -> + {:remove, user_reason} + + {:stop, user_reason} -> + {:stop, user_reason} + + other -> + raise """ + unexpected return from #{inspect(worker)}.handle_ping/2. + + Expected: + + {:remove, reason} + | {:ok, worker_state} + | {:stop, reason} + + Got: #{inspect(other)} + """ + end + end + + defp maybe_terminate_worker(reason, worker_server_state, state) do + %{worker: worker, state: pool_state} = state + + if function_exported?(worker, :terminate_worker, 3) do + args = [reason, worker_server_state, pool_state] + + case apply_worker_callback(worker, :terminate_worker, args) do + {:ok, pool_state} -> + %{state | state: pool_state} + + {:remove, _reason} -> + state + + other -> + raise """ + unexpected return from #{inspect(worker)}.terminate_worker/3. + + Expected: + + {:ok, pool_state} + + Got: #{inspect(other)} + """ + end + else + state + end + end + + defp init_worker(worker, pool_state, resources, async, worker_idle_timeout) do + case apply_worker_callback(worker, :init_worker, [pool_state]) do + {:ok, worker_state, pool_state} -> + {pool_state, :queue.in({worker_state, get_metadata(worker_idle_timeout)}, resources), + async} + + {:async, fun, pool_state} when is_function(fun, 0) -> + %{ref: ref, pid: pid} = Task.Supervisor.async(NimblePool.TaskSupervisor, fun) + {pool_state, resources, async |> Map.put(ref, pid) |> Map.put(pid, ref)} + + {:remove, _reason} -> + send(self(), {__MODULE__, :init_worker}) + {pool_state, resources, async} + + other -> + raise """ + unexpected return from #{inspect(worker)}.init_worker/1. + + Expected: + + {:ok, worker_state, pool_state} + | {:async, (() -> worker_state), pool_state} + + Got: #{inspect(other)} + """ + end + end + + defp schedule_init() do + send(self(), {__MODULE__, :init_worker}) + end + + defp apply_worker_callback(worker, fun, args) do + do_apply_worker_callback(worker, fun, args, &{:remove, &1}) + end + + defp apply_worker_callback(pool_state, worker, fun, args) do + do_apply_worker_callback(worker, fun, args, &{:remove, &1, pool_state}) + end + + defp do_apply_worker_callback(worker, fun, args, catch_fun) do + try do + apply(worker, fun, args) + catch + kind, reason -> + reason = Exception.normalize(kind, reason, __STACKTRACE__) + + Logger.error( + [ + "Error during #{inspect(worker)}.#{fun}/#{length(args)} callback:\n" + | Exception.format(kind, reason, __STACKTRACE__) + ], + crash_reason: {crash_reason(kind, reason), __STACKTRACE__} + ) + + catch_fun.(reason) + end + end + + defp crash_reason(:throw, value), do: {:nocatch, value} + defp crash_reason(_, value), do: value + + defp remove_request(pool_state, ref, mon_ref) do + requests = Map.delete(pool_state.requests, ref) + monitors = Map.delete(pool_state.monitors, mon_ref) + Process.demonitor(mon_ref, [:flush]) + %{pool_state | requests: requests, monitors: monitors} + end + + defp handle_enqueue(worker, command, pool_state) do + if function_exported?(worker, :handle_enqueue, 2) do + worker.handle_enqueue(command, pool_state) + else + {:ok, command, pool_state} + end + end + + defp get_metadata(nil), do: nil + defp get_metadata(_worker_idle_timeout), do: System.monotonic_time(:millisecond) +end diff --git a/deps/nimble_pool/lib/nimble_pool/application.ex b/deps/nimble_pool/lib/nimble_pool/application.ex new file mode 100644 index 0000000..0c5751e --- /dev/null +++ b/deps/nimble_pool/lib/nimble_pool/application.ex @@ -0,0 +1,12 @@ +defmodule NimblePool.Application do + @moduledoc false + use Application + + def start(_type, _opts) do + children = [ + {Task.Supervisor, name: NimblePool.TaskSupervisor} + ] + + Supervisor.start_link(children, strategy: :one_for_one) + end +end diff --git a/deps/nimble_pool/mix.exs b/deps/nimble_pool/mix.exs new file mode 100644 index 0000000..56cf84b --- /dev/null +++ b/deps/nimble_pool/mix.exs @@ -0,0 +1,56 @@ +defmodule NimblePool.MixProject do + use Mix.Project + + @version "1.1.0" + @url "https://github.com/dashbitco/nimble_pool" + + def project do + [ + app: :nimble_pool, + version: @version, + elixir: "~> 1.7", + start_permanent: Mix.env() == :prod, + name: "NimblePool", + description: "A tiny resource-pool implementation", + deps: deps(), + docs: docs(), + package: package(), + test_coverage: [tool: ExCoveralls], + preferred_cli_env: ["coveralls.html": :test] + ] + end + + def application do + [ + mod: {NimblePool.Application, []}, + extra_applications: [:logger] + ] + end + + defp deps do + [ + {:ex_doc, "~> 0.21", only: :docs}, + {:excoveralls, "~> 0.16.1", only: :test} + ] + end + + defp docs do + [ + main: "NimblePool", + source_ref: "v#{@version}", + source_url: @url, + groups_for_functions: [ + "Worker callbacks": &(&1[:callback] == :worker), + "Pool callbacks": &(&1[:callback] == :pool) + ] + ] + end + + defp package do + %{ + licenses: ["Apache-2.0"], + maintainers: ["José Valim"], + links: %{"GitHub" => @url} + } + end +end diff --git a/deps/oidcc/.hex b/deps/oidcc/.hex new file mode 100644 index 0000000..1200854 Binary files /dev/null and b/deps/oidcc/.hex differ diff --git a/deps/oidcc/LICENSE b/deps/oidcc/LICENSE new file mode 100644 index 0000000..6e78ee3 --- /dev/null +++ b/deps/oidcc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 Jonatan Männchen / Erlang Ecosystem Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/deps/oidcc/LICENSES/Apache-2.0.txt b/deps/oidcc/LICENSES/Apache-2.0.txt new file mode 100644 index 0000000..137069b --- /dev/null +++ b/deps/oidcc/LICENSES/Apache-2.0.txt @@ -0,0 +1,73 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/deps/oidcc/LICENSES/LicenseRef-EEF-Logo.txt b/deps/oidcc/LICENSES/LicenseRef-EEF-Logo.txt new file mode 100644 index 0000000..e2e1ae6 --- /dev/null +++ b/deps/oidcc/LICENSES/LicenseRef-EEF-Logo.txt @@ -0,0 +1,109 @@ +ERLANG ECOSYSTEM FOUNDATION TRADEMARK POLICY + +This Trademark Policy (“Policy”) governs the use of any logos, trademarks, +service marks, and trade names (collectively, the “Marks”) owned by the +Erlang Ecosystem Foundation (“EEF” or “we”). By using the Marks, you agree to +comply with this Policy. This Policy does not grant any license to use the Marks +except as expressly permitted herein. + +------------------------- +1. OWNERSHIP AND SCOPE +------------------------- + +1.1 Ownership +All rights, title, and interest in and to the Marks belong exclusively to the +EEF. Your use of the Marks benefits the EEF. + +1.2 Scope of Policy +This Policy applies to all uses of the Marks, including digital and print +publications, software, advertising, marketing, and other promotional materials. +This Policy applies to both word marks and logos. Logo usage is generally more +restricted than textual references. + +------------------------- +2. PERMISSIBLE USE +------------------------- + +2.1 EEF Projects +Individuals and entities may use the Marks in projects officially affiliated +with or governed by the EEF, provided such use complies with this Policy and any +applicable brand guidelines. If you are unsure whether a project is officially +affiliated, please contact eef@erlef.org. + +2.2 By Permission +You may request explicit written permission from the EEF to use the Marks. To +request permission, email [eef@erlef.org](mailto:eef@erlef.org) with details +regarding the proposed use. You may not use the Marks until you have received +our written permission. + +2.3 Nominative Use +You may use the Marks as necessary to identify or refer to the EEF or its +projects (commonly referred to as nominative fair use), for example in news +articles, product reviews, or scholarly works. Such use must: +- Be factual and not misleading, +- Not imply sponsorship, endorsement, or false association with the EEF, +- Use only as much of the Mark as is necessary to identify the EEF. + +------------------------- +3. PROHIBITED USES +------------------------- + +3.1 Misrepresentation +You may not use the Marks to suggest or imply that you are endorsed by, +sponsored by, affiliated with, or otherwise associated with the EEF unless you +have explicit written permission. For example, you may not use the Marks in a +way that suggests your project is an official EEF project unless it is. + +3.2 Modifications or Derivatives +You may not alter the Marks, create derivatives, or incorporate them into +another name, logo, or design without prior written approval. For example, +changing the fonts, colors, or proportions of an EEF logo is not allowed without +permission. + +3.3 Confusingly Similar +You may not use any mark, design, or logo that is confusingly similar to the +Marks in a way that may mislead others into thinking they are part of or +endorsed by the EEF. + +------------------------- +4. QUALITY AND USAGE GUIDELINES +------------------------- + +4.1 Quality Standards +Any use of the Marks must comply with any brand guidelines or usage standards +published by the EEF (if applicable). The EEF may request modifications or +discontinuation of uses that do not comply with this Policy. + +4.2 Integrity of the Mark +You may not display the Marks in a way that is deceptive, objectionable, or +damaging to the EEF’s reputation, or in any manner that violates applicable +laws or regulations. + +------------------------- +5. TERMINATION OF PERMISSION +------------------------- + +The EEF may revoke permission to use the Marks at any time, at its discretion, +upon notice. Once permission is revoked, you must immediately discontinue all +use of the Marks unless otherwise authorized in writing. + +------------------------- +6. DISCLAIMER AND LIMITATION OF LIABILITY +------------------------- + +The EEF disclaims any warranties that might be construed from this Policy or any +other documentation related to the Marks. Under no circumstances will the EEF be +held liable to any party for any direct, indirect, special, or other +consequential damages arising from use of the Marks. + +------------------------- +7. CONTACT INFORMATION +------------------------- + +For questions, clarifications, or to request permission to use the Marks, please +contact: + +Erlang Ecosystem Foundation +Attn: Trademark Usage +Email: eef@erlef.org +Website: https://erlef.org diff --git a/deps/oidcc/LICENSES/LicenseRef-OpenID-Mark.txt b/deps/oidcc/LICENSES/LicenseRef-OpenID-Mark.txt new file mode 100644 index 0000000..94b64e7 --- /dev/null +++ b/deps/oidcc/LICENSES/LicenseRef-OpenID-Mark.txt @@ -0,0 +1,4 @@ +Usage only in conformance with the OpenID Foundation Terms and Conditions for +Certification of Conformance to an OpenID Connect Conformance Profile: + +https://openid.net/mark/ \ No newline at end of file diff --git a/deps/oidcc/README.md b/deps/oidcc/README.md new file mode 100644 index 0000000..94235fa --- /dev/null +++ b/deps/oidcc/README.md @@ -0,0 +1,356 @@ + + +
+ OpenID Connect Logo +
+ +# oidcc + +OpenID Connect client library for Erlang. + +[![EEF Security WG project](https://img.shields.io/badge/EEF-Security-black)](https://github.com/erlef/security-wg) +[![Main Branch](https://github.com/erlef/oidcc/actions/workflows/branch_main.yml/badge.svg?branch=main)](https://github.com/erlef/oidcc/actions/workflows/branch_main.yml) +[![Module Version](https://img.shields.io/hexpm/v/oidcc.svg)](https://hex.pm/packages/oidcc) +[![Total Download](https://img.shields.io/hexpm/dt/oidcc.svg)](https://hex.pm/packages/oidcc) +[![License](https://img.shields.io/hexpm/l/oidcc.svg)](https://github.com/erlef/oidcc/blob/main/LICENSE) +[![Last Updated](https://img.shields.io/github/last-commit/erlef/oidcc.svg)](https://github.com/erlef/oidcc/commits/master) +[![Coverage Status](https://coveralls.io/repos/github/erlef/oidcc/badge.svg?branch=main)](https://coveralls.io/github/erlef/oidcc?branch=main) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9602/badge)](https://www.bestpractices.dev/projects/9602) +[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/erlef/oidcc/badge)](https://scorecard.dev/viewer/?uri=github.com/erlef/oidcc) +[![REUSE status](https://api.reuse.software/badge/github.com/erlef/oidcc)](https://api.reuse.software/info/github.com/erlef/oidcc) + +
+ + + + + OpenID Connect Certified Logo + + +OpenID Certified by [Jonatan Männchen](https://github.com/maennchen) at the +[Erlang Ecosystem Foundation](https://github.com/erlef) of multiple Relaying +Party conformance profiles of the OpenID Connect protocol: +For details, check the +[Conformance Test Suite](https://github.com/erlef/oidcc_conformance). + +
+ + + + + Erlang Ecosystem Foundation Logo + + +The refactoring for `v3` and the certification is funded as an +[Erlang Ecosystem Foundation](https://erlef.org/) stipend entered by the +[Security Working Group](https://erlef.org/wg/security). + +
+ + + + + + Security Audit For Erlang and Elixir + + +A security audit was performed by [SAFE-Erlang-Elixir](https://github.com/SAFE-Erlang-Elixir) more info [HERE](https://www.erlang-solutions.com/landings/security-audit-for-erlang-2/). + +
+ +## Supported Features + +* [Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html) + (`[ISSUER]/.well-known/openid-configuration`) +* [Client Registration](https://openid.net/specs/openid-connect-registration-1_0.html) +* Authorization (Code Flow) + * [Request Object](https://openid.net/specs/openid-connect-core-1_0.html#RequestObject) + * [PKCE](https://oauth.net/2/pkce/) + * [Pushed Authorization Requests](https://datatracker.ietf.org/doc/html/rfc9126) + * [Authorization Server Issuer Identification](https://datatracker.ietf.org/doc/html/rfc9207) +* Token + * Authorization: `client_secret_basic`, `client_secret_post`, + `client_secret_jwt`, and `private_key_jwt` + * Grant Types: `authorization_code`, `refresh_token`, `jwt_bearer`, and + `client_credentials` + * Automatic JWK Refreshing when needed +* Userinfo + * [JWT Response](https://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse) + * [Aggregated and Distributed Claims](https://openid.net/specs/openid-connect-core-1_0.html#AggregatedDistributedClaims) +* [Token Introspection](https://datatracker.ietf.org/doc/html/rfc7662) +* Logout + * [RP-Initiated](https://openid.net/specs/openid-connect-rpinitiated-1_0.html) +* [JWT Secured Authorization Response Mode for OAuth 2.0 (JARM)](https://openid.net/specs/oauth-v2-jarm-final.html) +* [Demonstrating Proof of Possession (DPoP)](https://datatracker.ietf.org/doc/html/rfc9449) +* [OAuth 2 Purpose Request Parameter](https://cdn.connectid.com.au/specifications/oauth2-purpose-01.html) +* Profiles + * [FAPI 2.0 Security Profile](https://openid.bitbucket.io/fapi/fapi-2_0-security-profile.html) + * [FAPI 2.0 Message Signing](https://openid.bitbucket.io/fapi/fapi-2_0-message-signing.html) + +## Setup + +**Please note that the minimum supported Erlang OTP version is OTP26.** + + + +### Erlang + +**directly** + +```erlang +{ok, Pid} = + oidcc_provider_configuration_worker:start_link(#{ + issuer => <<"https://accounts.google.com">>, + name => {local, google_config_provider} + }). +``` + +**via `supervisor`** + +```erlang +-behaviour(supervisor). + +%% ... + +init(_Args) -> + SupFlags = #{strategy => one_for_one}, + ChildSpecs = [ + #{ + id => oidcc_provider_configuration_worker, + start => + {oidcc_provider_configuration_worker, start_link, [ + #{ + issuer => "https://accounts.google.com", + name => {local, myapp_oidcc_config_provider} + } + ]}, + shutdown => brutal_kill + } + ], + {ok, {SupFlags, ChildSpecs}}. +``` + +### Elixir + +**directly** + +```elixir +{:ok, _pid} = + Oidcc.ProviderConfiguration.Worker.start_link(%{ + issuer: "https://accounts.google.com", + name: Myapp.OidccConfigProvider + }) +``` + +**via `Supervisor`** + +```elixir +Supervisor.init( + [ + {Oidcc.ProviderConfiguration.Worker, + %{ + issuer: "https://accounts.google.com", + name: Myapp.OidccConfigProvider + }} + ], + strategy: :one_for_one +) +``` + +**using [`igniter`](https://hex.pm/packages/igniter)** + +```bash +mix oidcc.gen.provider_configuration_worker \ + --name MyApp.OidccConfigProvider \ + --issuer https://accounts.google.com +``` + + + +## Usage + + + +### Companion libraries + +`oidcc` offers integrations for various libraries: + + + +- [`oidcc_cowboy`](https://hex.pm/packages/oidcc_cowboy) - Integrations for + [`cowboy`](https://hex.pm/packages/cowboy) +- [`oidcc_plug`](https://hex.pm/packages/oidcc_plug) - Integrations for + [`plug`](https://hex.pm/packages/plug) and + [`phoenix`](https://hex.pm/packages/phoenix) +- [`ueberauth_oidcc`](https://hex.pm/packages/ueberauth_oidcc) - Integration for + [`ueberauth`](https://hex.pm/packages/ueberauth) + +### Erlang + +```erlang +%% Create redirect URI for authorization +{ok, RedirectUri} = oidcc:create_redirect_url( + myapp_oidcc_config_provider, + <<"client_id">>, + <<"client_secret">>, + #{redirect_uri => <<"https://example.com/callback">>} +), + +%% Redirect user to `RedirectUri` + +%% Retrieve `code` query / form param from redirect back + +%% Exchange code for token +{ok, Token} = + oidcc:retrieve_token( + AuthCode, + myapp_oidcc_config_provider, + <<"client_id">>, + <<"client_secret">>, + #{redirect_uri => <<"https://example.com/callback">>} + ), + +%% Load userinfo for token +{ok, Claims} = + oidcc:retrieve_userinfo( + Token, + myapp_oidcc_config_provider, + <<"client_id">>, + <<"client_secret">>, + #{} + ), + +%% Load introspection for access token +{ok, Introspection} = + oidcc:introspect_token( + Token, + myapp_oidcc_config_provider, + <<"client_id">>, + <<"client_secret">>, + #{} + ), + +%% Refresh token when it expires +{ok, RefreshedToken} = + oidcc:refresh_token( + Token, + myapp_oidcc_config_provider, + <<"client_id">>, + <<"client_secret">>, + #{} + ). +``` + +for more details, see https://hexdocs.pm/oidcc/oidcc.html + +### Elixir + +```elixir +# Create redirect URI for authorization +{:ok, redirect_uri} = + Oidcc.create_redirect_url( + Myapp.OidccConfigProvider, + "client_id", + "client_secret", + %{redirect_uri: "https://example.com/callback"} + ) + +# Redirect user to `redirect_uri` + +# Retrieve `code` query / form param from redirect back + +# Exchange code for token +{:ok, token} = + Oidcc.retrieve_token( + auth_code, + Myapp.OidccConfigProvider, + "client_id", + "client_secret", + %{redirect_uri: "https://example.com/callback"} + ) + +# Load userinfo for token +{:ok, claims} = + Oidcc.retrieve_userinfo( + token, + Myapp.OidccConfigProvider, + "client_id", + "client_secret", + %{expected_subject: "sub"} + ) + +# Load introspection for access token +{:ok, introspection} = + Oidcc.introspect_token( + token, + Myapp.OidccConfigProvider, + "client_id", + "client_secret" + ) + +# Refresh token when it expires +{:ok, refreshed_token} = + Oidcc.refresh_token( + token, + Myapp.OidccConfigProvider, + "client_id", + "client_secret" + ) +``` + +for more details, see https://hexdocs.pm/oidcc/Oidcc.html + + diff --git a/deps/oidcc/hex_metadata.config b/deps/oidcc/hex_metadata.config new file mode 100644 index 0000000..c1dd187 --- /dev/null +++ b/deps/oidcc/hex_metadata.config @@ -0,0 +1,61 @@ +{<<"links">>,[{<<"Github">>,<<"https://github.com/erlef/oidcc">>}]}. +{<<"name">>,<<"oidcc">>}. +{<<"version">>,<<"3.7.2">>}. +{<<"description">>,<<"OpenID Connect client library for the BEAM.">>}. +{<<"elixir">>,<<"~> 1.15">>}. +{<<"app">>,<<"oidcc">>}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"build_tools">>,[<<"rebar3">>,<<"mix">>]}. +{<<"files">>, + [<<"include">>,<<"include/oidcc.hrl">>, + <<"include/oidcc_client_context.hrl">>, + <<"include/oidcc_client_registration.hrl">>,<<"include/oidcc_token.hrl">>, + <<"include/oidcc_provider_configuration.hrl">>,<<"include/internal">>, + <<"include/internal/doc.hrl">>,<<"include/oidcc_token_introspection.hrl">>, + <<"lib">>,<<"lib/mix">>,<<"lib/mix/tasks">>, + <<"lib/mix/tasks/oidcc.gen.provider_configuration_worker.ex">>, + <<"lib/oidcc">>,<<"lib/oidcc/authorization.ex">>, + <<"lib/oidcc/provider_configuration.ex">>,<<"lib/oidcc/token">>, + <<"lib/oidcc/token/id.ex">>,<<"lib/oidcc/token/access.ex">>, + <<"lib/oidcc/token/refresh.ex">>,<<"lib/oidcc/client_context.ex">>, + <<"lib/oidcc/logout.ex">>,<<"lib/oidcc/token.ex">>, + <<"lib/oidcc/provider_configuration">>, + <<"lib/oidcc/provider_configuration/worker.ex">>, + <<"lib/oidcc/client_registration.ex">>,<<"lib/oidcc/client_registration">>, + <<"lib/oidcc/client_registration/response.ex">>, + <<"lib/oidcc/record_struct.ex">>,<<"lib/oidcc/token_introspection.ex">>, + <<"lib/oidcc/userinfo.ex">>,<<"lib/oidcc.ex">>,<<"LICENSE">>,<<"LICENSES">>, + <<"LICENSES/Apache-2.0.txt">>,<<"LICENSES/LicenseRef-OpenID-Mark.txt">>, + <<"LICENSES/LicenseRef-EEF-Logo.txt">>,<<"mix.exs">>,<<"README.md">>, + <<"rebar.config">>,<<"src">>, + <<"src/oidcc_provider_configuration_worker.erl">>, + <<"src/oidcc_token_introspection.erl">>,<<"src/oidcc_backoff.erl">>, + <<"src/oidcc_userinfo.erl">>,<<"src/oidcc_scope.erl">>, + <<"src/oidcc_provider_configuration.erl">>, + <<"src/oidcc_client_registration.erl">>,<<"src/oidcc.erl">>, + <<"src/oidcc_http_util.erl">>,<<"src/oidcc_token.erl">>, + <<"src/oidcc_jwt_util.erl">>,<<"src/oidcc.app.src">>, + <<"src/oidcc_logout.erl">>,<<"src/oidcc_decode_util.erl">>, + <<"src/oidcc_authorization.erl">>,<<"src/oidcc_client_context.erl">>, + <<"src/oidcc_profile.erl">>,<<"src/oidcc_auth_util.erl">>]}. +{<<"requirements">>, + [[{<<"name">>,<<"telemetry">>}, + {<<"app">>,<<"telemetry">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 1.2">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"telemetry_registry">>}, + {<<"app">>,<<"telemetry_registry">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 0.3.1">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"jose">>}, + {<<"app">>,<<"jose">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 1.11">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"igniter">>}, + {<<"app">>,<<"igniter">>}, + {<<"optional">>,true}, + {<<"requirement">>,<<"~> 0.6.3 or ~> 0.7.0">>}, + {<<"repository">>,<<"hexpm">>}]]}. diff --git a/deps/oidcc/include/internal/doc.hrl b/deps/oidcc/include/internal/doc.hrl new file mode 100644 index 0000000..d46e10a --- /dev/null +++ b/deps/oidcc/include/internal/doc.hrl @@ -0,0 +1,11 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +%% TODO: Remove the following macros as soon as only OTP >= 27 is supported. +-if(?OTP_RELEASE >= 27). + -define(MODULEDOC(Str), -moduledoc(Str)). + -define(DOC(Str), -doc(Str)). +-else. + -define(MODULEDOC(Str), -compile([])). + -define(DOC(Str), -compile([])). +-endif. diff --git a/deps/oidcc/include/oidcc.hrl b/deps/oidcc/include/oidcc.hrl new file mode 100644 index 0000000..7848666 --- /dev/null +++ b/deps/oidcc/include/oidcc.hrl @@ -0,0 +1,13 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-ifndef(OIDCC_HRL). + +-include("oidcc_provider_configuration.hrl"). +-include("oidcc_client_context.hrl"). +-include("oidcc_client_registration.hrl"). +-include("oidcc_token.hrl"). + +-define(OIDCC_HRL, 1). + +-endif. diff --git a/deps/oidcc/include/oidcc_client_context.hrl b/deps/oidcc/include/oidcc_client_context.hrl new file mode 100644 index 0000000..546adf0 --- /dev/null +++ b/deps/oidcc/include/oidcc_client_context.hrl @@ -0,0 +1,16 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-ifndef(OIDCC_CLIENT_CONTEXT_HRL). + +-record(oidcc_client_context, { + provider_configuration :: oidcc_provider_configuration:t(), + jwks :: jose_jwk:key(), + client_id :: binary(), + client_secret :: binary() | unauthenticated, + client_jwks = none :: jose_jwk:key() | none +}). + +-define(OIDCC_CLIENT_CONTEXT_HRL, 1). + +-endif. diff --git a/deps/oidcc/include/oidcc_client_registration.hrl b/deps/oidcc/include/oidcc_client_registration.hrl new file mode 100644 index 0000000..18e918d --- /dev/null +++ b/deps/oidcc/include/oidcc_client_registration.hrl @@ -0,0 +1,93 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-ifndef(OIDCC_CLIENT_REGISTRATION_HRL). + +%% @see https://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata +%% @see https://openid.net/specs/openid-connect-rpinitiated-1_0.html#ClientMetadata +-record(oidcc_client_registration, { + %% OpenID Connect Dynamic Client Registration 1.0 + redirect_uris :: [uri_string:uri_string()], + %% OpenID Connect Dynamic Client Registration 1.0 + response_types = undefined :: [binary()] | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + grant_types = undefined :: [binary()] | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + application_type = web :: web | native, + %% OpenID Connect Dynamic Client Registration 1.0 + contacts = undefined :: [binary()] | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + client_name = undefined :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + logo_uri = undefined :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + client_uri = undefined :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + policy_uri = undefined :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + tos_uri = undefined :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + jwks = undefined :: jose_jwk:key() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + jwks_uri = undefined :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + sector_identifier_uri = undefined :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + subject_type = undefined :: pairwise | public | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + id_token_signed_response_alg = undefined :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + id_token_encrypted_response_alg = undefined :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + id_token_encrypted_response_enc = undefined :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + userinfo_signed_response_alg = undefined :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + userinfo_encrypted_response_alg = undefined :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + userinfo_encrypted_response_enc = undefined :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + request_object_signing_alg = undefined :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + request_object_encryption_alg = undefined :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + request_object_encryption_enc = undefined :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + token_endpoint_auth_method = <<"client_secret_basic">> :: binary(), + %% OpenID Connect Dynamic Client Registration 1.0 + token_endpoint_auth_signing_alg = undefined :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + default_max_age = undefined :: pos_integer() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + require_auth_time = false :: boolean(), + %% OpenID Connect Dynamic Client Registration 1.0 + default_acr_values = undefined :: [binary()] | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + initiate_login_uri = undefined :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + request_uris = undefined :: [uri_string:uri_string()] | undefined, + %% OpenID Connect RP-Initiated Logout 1.0 + post_logout_redirect_uris = undefined :: [uri_string:uri_string()] | undefined, + %% OAuth 2.0 Pushed Authorization Requests + require_pushed_authorization_requests = false :: boolean(), + %% OAuth 2.0 Demonstrating Proof of Possession (DPoP) + dpop_bound_access_tokens = false :: boolean(), + %% Unknown Fields + extra_fields = #{} :: #{binary() => term()} +}). + +%% @see https://openid.net/specs/openid-connect-registration-1_0.html#RegistrationResponse +-record(oidcc_client_registration_response, { + client_id :: binary(), + client_secret = undefined :: binary() | undefined, + registration_access_token = undefined :: binary() | undefined, + registration_client_uri = undefined :: uri_string:uri_string() | undefined, + client_id_issued_at = undefined :: pos_integer() | undefined, + client_secret_expires_at = undefined :: pos_integer() | undefined, + %% Unknown Fields + extra_fields = #{} :: #{binary() => term()} +}). + +-define(OIDCC_CLIENT_REGISTRATION_HRL, 1). + +-endif. diff --git a/deps/oidcc/include/oidcc_provider_configuration.hrl b/deps/oidcc/include/oidcc_provider_configuration.hrl new file mode 100644 index 0000000..d7f94c3 --- /dev/null +++ b/deps/oidcc/include/oidcc_provider_configuration.hrl @@ -0,0 +1,127 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-ifndef(oidcc_provider_configuration_HRL). + +%% @see https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata +%% @see https://datatracker.ietf.org/doc/html/draft-jones-oauth-discovery-01#section-4.1 +%% @see https://openid.net/specs/openid-connect-rpinitiated-1_0.html#OPMetadata +-record(oidcc_provider_configuration, + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + { + issuer :: uri_string:uri_string(), + issuer_regex :: binary() | undefined, + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + authorization_endpoint :: uri_string:uri_string(), + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + token_endpoint = undefined :: uri_string:uri_string() | undefined, + %% OpenID Connect Discovery 1.0 + userinfo_endpoint = undefined :: uri_string:uri_string() | undefined, + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + jwks_uri = undefined :: uri_string:uri_string() | undefined, + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + registration_endpoint = undefined :: uri_string:uri_string() | undefined, + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + scopes_supported :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + response_types_supported :: [binary()], + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + response_modes_supported = [<<"query">>, <<"fragment">>] :: [binary()], + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + grant_types_supported = [<<"authorization_code">>, <<"implicit">>] :: [binary()], + %% OpenID Connect Discovery 1.0 + acr_values_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 + subject_types_supported :: [pairwise | public], + %% OpenID Connect Discovery 1.0 + id_token_signing_alg_values_supported :: [binary()], + %% OpenID Connect Discovery 1.0 + id_token_encryption_alg_values_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 + id_token_encryption_enc_values_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 + userinfo_signing_alg_values_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 + userinfo_encryption_alg_values_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 + userinfo_encryption_enc_values_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 + request_object_signing_alg_values_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 + request_object_encryption_alg_values_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 + request_object_encryption_enc_values_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + token_endpoint_auth_methods_supported = [<<"client_secret_basic">>] :: [binary()], + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + token_endpoint_auth_signing_alg_values_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 + display_values_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 + claim_types_supported = [normal] :: [normal | aggregated | distributed], + %% OpenID Connect Discovery 1.0 + claims_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + service_documentation = undefined :: uri_string:uri_string() | undefined, + %% OpenID Connect Discovery 1.0 + claims_locales_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + ui_locales_supported = undefined :: [binary()] | undefined, + %% OpenID Connect Discovery 1.0 + claims_parameter_supported = false :: boolean(), + %% OpenID Connect Discovery 1.0 + request_parameter_supported = false :: boolean(), + %% OpenID Connect Discovery 1.0 + request_uri_parameter_supported = true :: boolean(), + %% OpenID Connect Discovery 1.0 + require_request_uri_registration = false :: boolean(), + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + op_policy_uri = undefined :: uri_string:uri_string() | undefined, + %% OpenID Connect Discovery 1.0 / OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + op_tos_uri = undefined :: uri_string:uri_string() | undefined, + %% OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + revocation_endpoint = undefined :: uri_string:uri_string() | undefined, + %% OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + revocation_endpoint_auth_methods_supported = [<<"client_secret_basic">>] :: [binary()], + %% OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + revocation_endpoint_auth_signing_alg_values_supported = undefined :: + [binary()] | undefined, + %% OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + introspection_endpoint = undefined :: uri_string:uri_string() | undefined, + %% OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + introspection_endpoint_auth_methods_supported = [<<"client_secret_basic">>] :: [binary()], + %% OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + introspection_endpoint_auth_signing_alg_values_supported = undefined :: + [binary()] | undefined, + %% OAuth 2.0 Discovery (draft-jones-oauth-discovery-01) + code_challenge_methods_supported = undefined :: [binary()] | undefined, + %% OpenID Connect RP-Initiated Logout 1.0 + end_session_endpoint = undefined :: uri_string:uri_string() | undefined, + %% OAuth 2.0 Pushed Authorization Requests + require_pushed_authorization_requests = false :: boolean(), + %% OAuth 2.0 Pushed Authorization Requests + pushed_authorization_request_endpoint = undefined :: uri_string:uri_string() | undefined, + %% JWT Secured Authorization Response Mode for OAuth 2.0 (JARM) + authorization_signing_alg_values_supported = undefined :: [binary()] | undefined, + %% JWT Secured Authorization Response Mode for OAuth 2.0 (JARM) + authorization_encryption_alg_values_supported = undefined :: [binary()] | undefined, + %% JWT Secured Authorization Response Mode for OAuth 2.0 (JARM) + authorization_encryption_enc_values_supported = undefined :: [binary()] | undefined, + %% OAuth 2.0 Authorization Server Issuer Identification (RFC9207) + authorization_response_iss_parameter_supported = false :: boolean(), + %% OAuth 2.0 Demonstrating Proof of Possession (DPoP) + dpop_signing_alg_values_supported = undefined :: [binary()] | undefined, + %% RFC 9101 The OAuth 2.0 Authorization Framework: JWT-Secured Authorization Request (JAR) + require_signed_request_object = false :: boolean(), + %% RFC 8705 OAuth 2.0 Mutual-TLS Client Authentication and Certificate-Bound Access Tokens + mtls_endpoint_aliases = #{} :: #{binary() => uri_string:uri_string()}, + %% RFC 8705 OAuth 2.0 Mutual-TLS Client Authentication and Certificate-Bound Access Tokens + tls_client_certificate_bound_access_tokens = false :: boolean(), + %% Unknown Fields + extra_fields = #{} :: #{binary() => term()} + } +). + +-define(oidcc_provider_configuration_HRL, 1). + +-endif. diff --git a/deps/oidcc/include/oidcc_token.hrl b/deps/oidcc/include/oidcc_token.hrl new file mode 100644 index 0000000..46af792 --- /dev/null +++ b/deps/oidcc/include/oidcc_token.hrl @@ -0,0 +1,22 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-ifndef(OIDCC_TOKEN_HRL). + +-record(oidcc_token_id, {token :: binary(), claims :: oidcc_jwt_util:claims()}). +-record(oidcc_token_access, { + token :: binary(), + expires = undefined :: pos_integer() | undefined, + type = <<"Bearer">> :: binary() +}). +-record(oidcc_token_refresh, {token :: binary()}). +-record(oidcc_token, { + id :: oidcc_token:id() | none, + access :: oidcc_token:access() | none, + refresh :: oidcc_token:refresh() | none, + scope :: oidcc_scope:scopes() +}). + +-define(OIDCC_TOKEN_HRL, 1). + +-endif. diff --git a/deps/oidcc/include/oidcc_token_introspection.hrl b/deps/oidcc/include/oidcc_token_introspection.hrl new file mode 100644 index 0000000..c8ec6cc --- /dev/null +++ b/deps/oidcc/include/oidcc_token_introspection.hrl @@ -0,0 +1,25 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-ifndef(OIDCC_TOKEN_INTROSPECTION_HRL). + +%% @see https://datatracker.ietf.org/doc/html/rfc7662#section-2.2 +-record(oidcc_token_introspection, { + active :: boolean(), + client_id :: binary(), + exp :: pos_integer() | undefined, + scope :: oidcc_scope:scopes(), + username :: binary() | undefined, + token_type :: binary() | undefined, + iat :: pos_integer() | undefined, + nbf :: pos_integer() | undefined, + sub :: binary() | undefined, + aud :: binary() | undefined, + iss :: binary() | undefined, + jti :: binary() | undefined, + extra :: #{binary() := term()} +}). + +-define(OIDCC_TOKEN_INTROSPECTION_HRL, 1). + +-endif. diff --git a/deps/oidcc/lib/mix/tasks/oidcc.gen.provider_configuration_worker.ex b/deps/oidcc/lib/mix/tasks/oidcc.gen.provider_configuration_worker.ex new file mode 100644 index 0000000..a3fbe39 --- /dev/null +++ b/deps/oidcc/lib/mix/tasks/oidcc.gen.provider_configuration_worker.ex @@ -0,0 +1,148 @@ +# SPDX-FileCopyrightText: 2025 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +case Code.ensure_loaded(Igniter.Mix.Task) do + {:module, Igniter.Mix.Task} -> + defmodule Mix.Tasks.Oidcc.Gen.ProviderConfigurationWorker do + @example """ + mix oidcc.gen.provider_configuration_worker \\ + --name MyApp.OpenIDProvider \\ + --issuer https://accounts.google.com \ + """ + + @shortdoc "Generate an OpenID Connect provider configuration worker" + + @moduledoc """ + #{@shortdoc} + + Adds an `Oidcc.ProviderConfiguration.Worker` to your application and + configures it via the `runtime.exs` configuration file. + + ## Example + + ```bash + #{@example} + ``` + + ## Options + + * `--name` or `-n` - The name of the provider configuration worker + * `--issuer` or `-i` - The issuer of the provider + """ + + use Igniter.Mix.Task + + alias Igniter.Project.Application + alias Igniter.Project.Config + alias Igniter.Project.Module + + @impl Igniter.Mix.Task + def info(_argv, _composing_task) do + %Igniter.Mix.Task.Info{ + # dependencies to add + adds_deps: [], + # dependencies to add and call their associated installers, if they exist + installs: [], + # An example invocation + example: @example, + # Accept additional arguments that are not in your schema + # Does not guarantee that, when composed, the only options you get are the ones you define + extra_args?: false, + # A list of environments that this should be installed in, only relevant if this is an installer. + only: nil, + # a list of positional arguments, i.e `[:file]` + positional: [], + # Other tasks your task composes using `Igniter.compose_task`, passing in the CLI argv + # This ensures your option schema includes options from nested tasks + composes: [], + # `OptionParser` schema + schema: [name: :string, issuer: :string], + # CLI aliases + aliases: [n: :name, i: :issuer] + } + end + + @impl Igniter.Mix.Task + def igniter(igniter) do + options = setup_options(igniter) + + igniter + |> configure_issuer(options) + |> add_application_worker(options) + end + + defp setup_options(igniter) do + igniter.args.options + |> Keyword.update( + :name, + Module.module_name(igniter, "OpenIDProvider"), + &Module.parse/1 + ) + |> Keyword.put(:app_name, Igniter.Project.Application.app_name(igniter)) + end + + defp configure_issuer(igniter, options) do + env_prefix = + options[:name] |> Macro.underscore() |> String.upcase() |> String.replace("/", "_") + + config = + case Keyword.fetch(options, :issuer) do + {:ok, issuer} -> + quote do + [issuer: System.get_env(unquote("#{env_prefix}_ISSUER"), unquote(issuer))] + end + + :error -> + quote do + [issuer: System.fetch_env!(unquote("#{env_prefix}_ISSUER"))] + end + end + + Config.configure_new( + igniter, + "runtime.exs", + options[:app_name], + [options[:name]], + {:code, config} + ) + end + + defp add_application_worker(igniter, options) do + Application.add_new_child( + igniter, + {Oidcc.ProviderConfiguration.Worker, + {:code, + quote do + %{ + name: unquote(options[:name]), + issuer: + Application.fetch_env!(unquote(options[:app_name]), unquote(options[:name]))[ + :issuer + ] + } + end}} + ) + end + end + + _ -> + defmodule Mix.Tasks.Oidcc.Gen.ProviderConfigurationWorker do + @shortdoc "Generate an OpenID Connect provider configuration worker | Install `igniter` to use" + @moduledoc @shortdoc + + use Mix.Task + + @impl Mix.Task + def run(_argv) do + Mix.shell().error(""" + The task 'oidcc.gen.provider_configuration_worker' requires igniter to be run. + + Please install igniter and try again. + + For more information, see: https://hexdocs.pm/igniter + """) + + exit({:shutdown, 1}) + end + end +end diff --git a/deps/oidcc/lib/oidcc.ex b/deps/oidcc/lib/oidcc.ex new file mode 100644 index 0000000..5652ec8 --- /dev/null +++ b/deps/oidcc/lib/oidcc.ex @@ -0,0 +1,411 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc do + @moduledoc """ + OpenID Connect High Level Interface + + ## Setup + + {:ok, _pid} = + Oidcc.ProviderConfiguration.Worker.start_link(%{ + issuer: "https://accounts.google.com", + name: MyApp.GoogleConfigProvider + }) + + or via a supervisor + + Supervisor.init([ + {Oidcc.ProviderConfiguration.Worker, %{issuer: "https://accounts.google.com"}} + ], strategy: :one_for_one) + + ## Global Configuration + + * `max_clock_skew` (default `0`) - Maximum allowed clock skew for JWT + `exp` / `nbf` validation, in seconds + """ + @moduledoc since: "3.0.0" + + @doc """ + Create Auth Redirect URL + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://accounts.google.com" + ...> }) + ...> + ...> {:ok, _redirect_uri} = + ...> Oidcc.create_redirect_url( + ...> pid, + ...> "client_id", + ...> "client_secret", + ...> %{redirect_uri: "https://my.server/return"} + ...> ) + + """ + @doc since: "3.0.0" + @spec create_redirect_url( + provider_configuration_name :: GenServer.name(), + client_id :: String.t(), + client_secret :: String.t() | :unauthenticated, + opts :: :oidcc_authorization.opts() | :oidcc_client_context.opts() + ) :: + {:ok, :uri_string.uri_string()} + | {:error, :oidcc_client_context.error()} + def create_redirect_url(provider_configuration_name, client_id, client_secret, opts), + do: :oidcc.create_redirect_url(provider_configuration_name, client_id, client_secret, opts) + + @doc """ + retrieve the token using the authcode received before and directly validate + the result. + + the authcode was sent to the local endpoint by the OpenId Connect provider, + using redirects + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://api.login.yahoo.com" + ...> }) + ...> + ...> # Get auth_code from redirect + ...> auth_code = "auth_code" + ...> + ...> Oidcc.retrieve_token( + ...> auth_code, + ...> pid, + ...> "client_id", + ...> "client_secret", + ...> %{redirect_uri: "https://my.server/return"} + ...> ) + ...> # => {:ok, %Oidcc.Token{}} + + """ + @doc since: "3.0.0" + @spec retrieve_token( + auth_code :: String.t(), + provider_configuration_name :: GenServer.name(), + client_id :: String.t(), + client_secret :: String.t() | :unauthenticated, + opts :: :oidcc_token.retrieve_opts() | :oidcc_client_context.opts() + ) :: + {:ok, Oidcc.Token.t()} | {:error, :oidcc_client_context.error() | :oidcc_token.error()} + def retrieve_token(auth_code, provider_configuration_name, client_id, client_secret, opts), + do: + auth_code + |> :oidcc.retrieve_token( + provider_configuration_name, + client_id, + client_secret, + opts + ) + |> Oidcc.Token.normalize_token_response() + + @doc """ + Refresh Token + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://api.login.yahoo.com" + ...> }) + ...> + ...> # Get refresh_token from redirect + ...> refresh_token = "refresh_token" + ...> + ...> Oidcc.refresh_token( + ...> refresh_token, + ...> pid, + ...> "client_id", + ...> "client_secret", + ...> %{expected_subject: "sub_from_initial_id_token"} + ...> ) + ...> # => {:ok, %Oidcc.Token{}} + + """ + @doc since: "3.0.0" + @spec refresh_token( + refresh_token :: String.t(), + provider_configuration_name :: GenServer.name(), + client_id :: String.t(), + client_secret :: String.t() | :unauthenticated, + opts :: :oidcc_token.refresh_opts() | :oidcc_client_context.opts() + ) :: {:ok, Oidcc.Token.t()} | {:error, :oidcc_token.error()} + @spec refresh_token( + token :: Oidcc.Token.t(), + provider_configuration_name :: GenServer.name(), + client_id :: String.t(), + client_secret :: String.t() | :unauthenticated, + opts :: :oidcc_token.refresh_opts_no_sub() | :oidcc_client_context.opts() + ) :: + {:ok, Oidcc.Token.t()} | {:error, :oidcc_client_context.error() | :oidcc_token.error()} + def refresh_token(token, provider_configuration_name, client_id, client_secret, opts \\ %{}) do + token = + case token do + %Oidcc.Token{} = token -> Oidcc.Token.struct_to_record(token) + token when is_binary(token) -> token + end + + token + |> :oidcc.refresh_token( + provider_configuration_name, + client_id, + client_secret, + opts + ) + |> Oidcc.Token.normalize_token_response() + end + + @doc """ + Introspect the given access token + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://api.login.yahoo.com" + ...> }) + ...> + ...> Oidcc.introspect_token( + ...> "access_token", + ...> pid, + ...> "client_id", + ...> "client_secret" + ...> ) + ...> # => {:ok, %Oidcc.TokenIntrospection{}} + + """ + @doc since: "3.0.0" + @spec introspect_token( + access_token :: String.t() | Oidcc.Token.t(), + provider_configuration_name :: GenServer.name(), + client_id :: String.t(), + client_secret :: String.t(), + opts :: :oidcc_token_introspection.opts() | :oidcc_client_context.opts() + ) :: + {:ok, Oidcc.TokenIntrospection.t()} + | {:error, :oidcc_client_context.error() | :oidcc_token_introspection.error()} + def introspect_token( + token, + provider_configuration_name, + client_id, + client_secret, + opts \\ %{} + ) do + token = + case token do + %Oidcc.Token{} = token -> Oidcc.Token.struct_to_record(token) + token when is_binary(token) -> token + end + + with {:ok, introspection} <- + :oidcc.introspect_token( + token, + provider_configuration_name, + client_id, + client_secret, + opts + ) do + {:ok, Oidcc.TokenIntrospection.record_to_struct(introspection)} + end + end + + @doc """ + Load userinfo for the given token + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://api.login.yahoo.com" + ...> }) + ...> + ...> # Get access_token from Oidcc.Token.retrieve/3 + ...> access_token = "access_token" + ...> + ...> Oidcc.retrieve_userinfo( + ...> access_token, + ...> pid, + ...> "client_id", + ...> "client_secret", + ...> %{expected_subject: "sub"} + ...> ) + ...> # => {:ok, %{"sub" => "sub"}} + + """ + @doc since: "3.0.0" + @spec retrieve_userinfo( + token :: Oidcc.Token.t(), + provider_configuration_name :: GenServer.name(), + client_id :: String.t(), + client_secret :: String.t() | :unauthenticated, + opts :: :oidcc_userinfo.retrieve_opts_no_sub() | :oidcc_client_context.opts() + ) :: {:ok, :oidcc_jwt_util.claims()} | {:error, :oidcc_userinfo.error()} + @spec retrieve_userinfo( + access_token :: String.t(), + provider_configuration_name :: GenServer.name(), + client_id :: String.t(), + client_secret :: String.t(), + opts :: :oidcc_userinfo.retrieve_opts() | :oidcc_client_context.opts() + ) :: + {:ok, :oidcc_jwt_util.claims()} + | {:error, :oidcc_client_context.error() | :oidcc_userinfo.error()} + def retrieve_userinfo(token, provider_configuration_name, client_id, client_secret, opts \\ %{}) do + token = + case token do + %Oidcc.Token{} = token -> Oidcc.Token.struct_to_record(token) + token when is_binary(token) -> token + end + + :oidcc.retrieve_userinfo(token, provider_configuration_name, client_id, client_secret, opts) + end + + @doc """ + Retrieve JSON Web Token (JWT) Profile Token + + https://datatracker.ietf.org/doc/html/rfc7523#section-4 + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://erlef-test-w4a8z2.zitadel.cloud" + ...> }) + ...> + ...> %{"key" => key, "keyId" => kid, "userId" => subject} = "JWT_PROFILE" + ...> |> System.fetch_env!() + ...> |> JOSE.decode() + ...> + ...> jwk = JOSE.JWK.from_pem(key) + ...> + ...> {:ok, %Oidcc.Token{}} = + ...> Oidcc.jwt_profile_token( + ...> subject, + ...> pid, + ...> "JWT Profile Test", + ...> "client_secret", + ...> jwk, + ...> %{scope: ["openid", "urn:zitadel:iam:org:project:id:zitadel:aud"], kid: kid} + ...> ) + + """ + @doc since: "3.0.0" + @spec jwt_profile_token( + subject :: String.t(), + provider_configuration_name :: GenServer.name(), + client_id :: String.t(), + client_secret :: String.t() | :unauthenticated, + jwk :: JOSE.JWK.t(), + opts :: :oidcc_token.jwt_profile_opts() | :oidcc_client_context.opts() + ) :: + {:ok, Oidcc.Token.t()} | {:error, :oidcc_client_context.error() | :oidcc_token.error()} + def jwt_profile_token(subject, provider_configuration_name, client_id, client_secret, jwk, opts) do + jwk = JOSE.JWK.to_record(jwk) + + subject + |> :oidcc.jwt_profile_token( + provider_configuration_name, + client_id, + client_secret, + jwk, + opts + ) + |> Oidcc.Token.normalize_token_response() + end + + @doc """ + Retrieve Client Credential Token + + See https://datatracker.ietf.org/doc/html/rfc6749#section-1.3.4 + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://erlef-test-w4a8z2.zitadel.cloud" + ...> }) + ...> + ...> {:ok, %Oidcc.Token{}} = + ...> Oidcc.client_credentials_token( + ...> pid, + ...> System.fetch_env!("CLIENT_CREDENTIALS_CLIENT_ID"), + ...> System.fetch_env!("CLIENT_CREDENTIALS_CLIENT_SECRET"), + ...> %{scope: ["openid"]} + ...> ) + + """ + @doc since: "3.0.0" + @spec client_credentials_token( + provider_configuration_name :: GenServer.name(), + client_id :: String.t(), + client_secret :: String.t(), + opts :: :oidcc_token.client_credentials_opts() | :oidcc_client_context.opts() + ) :: + {:ok, Oidcc.Token.t()} | {:error, :oidcc_client_context.error() | :oidcc_token.error()} + def client_credentials_token(provider_configuration_name, client_id, client_secret, opts), + do: + provider_configuration_name + |> :oidcc.client_credentials_token( + client_id, + client_secret, + opts + ) + |> Oidcc.Token.normalize_token_response() + + @doc """ + Create Initiate URI for Relaying Party initiated Logout + + See [https://openid.net/specs/openid-connect-rpinitiated-1_0.html#RPLogout] + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://erlef-test-w4a8z2.zitadel.cloud" + ...> }) + ...> + ...> # Get access_token from Oidcc.Token.retrieve/3 + ...> token = "token" + ...> + ...> {:ok, _redirect_uri} = Oidcc.initiate_logout_url( + ...> token, + ...> pid, + ...> "client_id" + ...> ) + + """ + @doc since: "3.0.0" + @spec initiate_logout_url( + token :: id_token | Oidcc.Token.t() | :undefined, + provider_configuration_name :: GenServer.name(), + client_id :: String.t(), + opts :: :oidcc_logout.initiate_url_opts() | :oidcc_client_context.opts() + ) :: + {:ok, :uri_string.uri_string()} + | {:error, :oidcc_client_context.error() | :oidcc_logout.error()} + when id_token: String.t() + def initiate_logout_url( + token, + provider_configuration_name, + client_id, + opts \\ %{} + ) do + token = + case token do + %Oidcc.Token{} = token -> Oidcc.Token.struct_to_record(token) + token when is_binary(token) -> token + :undefined -> :undefined + end + + :oidcc.initiate_logout_url( + token, + provider_configuration_name, + client_id, + opts + ) + end +end diff --git a/deps/oidcc/lib/oidcc/authorization.ex b/deps/oidcc/lib/oidcc/authorization.ex new file mode 100644 index 0000000..345e2a8 --- /dev/null +++ b/deps/oidcc/lib/oidcc/authorization.ex @@ -0,0 +1,75 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.Authorization do + use TelemetryRegistry + + telemetry_event(%{ + event: [:oidcc, :par_request, :start], + description: "Emitted at the start of executing a PAR request", + measurements: "%{system_time: non_neg_integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :par_request, :stop], + description: "Emitted at the end of executing a PAR request", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :par_request, :exception], + description: "Emitted at the end of executing a PAR request", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + @moduledoc """ + Functions to start an OpenID Connect Authorization + + ## Telemetry + + #{telemetry_docs()} + """ + @moduledoc since: "3.0.0" + + alias Oidcc.ClientContext + + @doc """ + Create Auth Redirect URL + + For a high level interface using `Oidcc.ProviderConfiguration.Worker` + see `Oidcc.create_redirect_url/4`. + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://accounts.google.com" + ...> }) + ...> + ...> {:ok, client_context} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> pid, + ...> "client_id", + ...> "client_secret" + ...> ) + ...> + ...> {:ok, _redirect_uri} = + ...> Oidcc.Authorization.create_redirect_url( + ...> client_context, + ...> %{redirect_uri: "https://my.server/return"} + ...> ) + """ + @doc since: "3.0.0" + @spec create_redirect_url( + client_context :: ClientContext.t(), + opts :: :oidcc_authorization.opts() + ) :: {:ok, :uri_string.uri_string()} | {:error, :oidcc_authorization.error()} + def create_redirect_url(client_context, opts), + do: + client_context + |> ClientContext.struct_to_record() + |> :oidcc_authorization.create_redirect_url(opts) +end diff --git a/deps/oidcc/lib/oidcc/client_context.ex b/deps/oidcc/lib/oidcc/client_context.ex new file mode 100644 index 0000000..83798f9 --- /dev/null +++ b/deps/oidcc/lib/oidcc/client_context.ex @@ -0,0 +1,208 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.ClientContext do + @moduledoc """ + Client Configuration for authorization, token exchange and userinfo + + For most projects, it makes sense to use + `Oidcc.ProviderConfiguration.Worker` and the high-level + interface of `Oidcc`. In that case direct usage of this + module is not needed. + """ + @moduledoc since: "3.0.0" + + use Oidcc.RecordStruct, + internal_name: :context, + record_name: :oidcc_client_context, + hrl: "include/oidcc_client_context.hrl" + + alias Oidcc.ProviderConfiguration + + @typedoc since: "3.0.0" + @type t() :: authenticated_t() | unauthenticated_t() + + @typedoc since: "3.0.0" + @type authenticated_t() :: %__MODULE__{ + provider_configuration: ProviderConfiguration.t(), + jwks: JOSE.JWK.t(), + client_id: String.t(), + client_secret: String.t(), + client_jwks: JOSE.JWK.t() | :none + } + + @typedoc since: "3.0.0" + @type unauthenticated_t() :: %__MODULE__{ + provider_configuration: ProviderConfiguration.t(), + jwks: JOSE.JWK.t(), + client_id: String.t(), + client_secret: :unauthenticated, + client_jwks: :none + } + + @doc """ + Create Client Context from a `Oidcc.ProviderConfiguration.Worker` + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://accounts.google.com", + ...> name: __MODULE__.GoogleConfigProvider + ...> }) + ...> + ...> {:ok, %Oidcc.ClientContext{}} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> __MODULE__.GoogleConfigProvider, + ...> "client_id", + ...> "client_Secret" + ...> ) + ...> + ...> {:ok, %Oidcc.ClientContext{}} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> pid, + ...> "client_id", + ...> "client_Secret", + ...> %{client_jwks: JOSE.JWK.generate_key(16)} + ...> ) + """ + @doc since: "3.0.0" + @spec from_configuration_worker( + provider_name :: GenServer.name(), + client_id :: String.t(), + client_secret :: String.t(), + opts :: :oidcc_client_context.authenticated_opts() + ) :: {:ok, authenticated_t()} | {:error, :oidcc_client_context.error()} + @spec from_configuration_worker( + provider_name :: GenServer.name(), + client_id :: String.t(), + client_secret :: :unauthenticated, + opts :: :oidcc_client_context.unauthenticated_opts() + ) :: {:ok, unauthenticated_t()} | {:error, :oidcc_client_context.error()} + def from_configuration_worker(provider_name, client_id, client_secret, opts \\ %{}) do + opts = Map.update(opts, :client_jwks, :none, &JOSE.JWK.to_record/1) + + with {:ok, client_context} <- + :oidcc_client_context.from_configuration_worker( + provider_name, + client_id, + client_secret, + opts + ) do + {:ok, record_to_struct(client_context)} + end + end + + @doc """ + Create Client Context manually + + ## Examples + + iex> {:ok, {configuration, _expiry}} = + ...> Oidcc.ProviderConfiguration.load_configuration( + ...> "https://login.salesforce.com" + ...> ) + ...> + ...> {:ok, {jwks, _expiry}} = + ...> Oidcc.ProviderConfiguration.load_jwks( + ...> configuration.jwks_uri + ...> ) + ...> + ...> %Oidcc.ClientContext{} = + ...> Oidcc.ClientContext.from_manual( + ...> configuration, + ...> jwks, + ...> "client_id", + ...> "client_Secret", + ...> %{client_jwks: JOSE.JWK.generate_key(16)} + ...> ) + """ + @doc since: "3.0.0" + @spec from_manual( + configuration :: ProviderConfiguration.t(), + jwks :: JOSE.JWK.t(), + client_id :: String.t(), + client_secret :: String.t(), + opts :: :oidcc_client_context.authenticated_opts() + ) :: authenticated_t() + @spec from_manual( + configuration :: ProviderConfiguration.t(), + jwks :: JOSE.JWK.t(), + client_id :: String.t(), + client_secret :: :unauthenticated, + opts :: :oidcc_client_context.unauthenticated_opts() + ) :: unauthenticated_t() + def from_manual(configuration, jwks, client_id, client_secret, opts \\ %{}) do + configuration = ProviderConfiguration.struct_to_record(configuration) + jwks = JOSE.JWK.to_record(jwks) + opts = Map.update(opts, :client_jwks, :none, &JOSE.JWK.to_record/1) + + configuration + |> :oidcc_client_context.from_manual(jwks, client_id, client_secret, opts) + |> record_to_struct() + end + + @doc """ + Apply OpenID Connect / OAuth2 Profiles to the context + + See `:oidcc_client_context.apply_profiles/2` for more. + + ## Examples + + iex> {:ok, _pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://accounts.google.com", + ...> name: __MODULE__.GoogleConfigProvider + ...> }) + ...> + ...> {:ok, client_context} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> __MODULE__.GoogleConfigProvider, + ...> "client_id", + ...> "client_Secret" + ...> ) + ...> + ...> {:ok, %Oidcc.ClientContext{}, %{}} = + ...> Oidcc.ClientContext.apply_profiles( + ...> client_context, + ...> %{profiles: [:fapi2_message_signing]} + ...> ) + """ + @doc since: "3.2.0" + @spec apply_profiles(t(), :oidcc_profile.opts()) :: + {:ok, t(), :oidcc_profile.opts_no_profiles()} | {:error, :oidcc_client_context.error()} + def apply_profiles(client_context, opts) do + case :oidcc_client_context.apply_profiles(struct_to_record(client_context), opts) do + {:ok, context_record, opts} -> + {:ok, record_to_struct(context_record), opts} + + {:error, reason} -> + {:error, reason} + end + end + + @impl Oidcc.RecordStruct + def record_to_struct(record) do + record + |> super() + |> Map.update!(:provider_configuration, &ProviderConfiguration.record_to_struct/1) + |> Map.update!(:jwks, &JOSE.JWK.from_record/1) + |> update_if_not_none(:client_jwks, &JOSE.JWK.from_record/1) + end + + @impl Oidcc.RecordStruct + def struct_to_record(struct) do + struct + |> Map.update!(:provider_configuration, &ProviderConfiguration.struct_to_record/1) + |> Map.update!(:jwks, &JOSE.JWK.to_record/1) + |> update_if_not_none(:client_jwks, &JOSE.JWK.to_record/1) + |> super() + end + + defp update_if_not_none(map, key, callback) do + Map.update!(map, key, fn + :none -> :none + other -> callback.(other) + end) + end +end diff --git a/deps/oidcc/lib/oidcc/client_registration.ex b/deps/oidcc/lib/oidcc/client_registration.ex new file mode 100644 index 0000000..13aa30f --- /dev/null +++ b/deps/oidcc/lib/oidcc/client_registration.ex @@ -0,0 +1,148 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.ClientRegistration do + use TelemetryRegistry + + telemetry_event(%{ + event: [:oidcc, :register_client, :start], + description: "Emitted at the start of registering the client", + measurements: "%{system_time: non_neg_integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string()}" + }) + + telemetry_event(%{ + event: [:oidcc, :register_client, :stop], + description: "Emitted at the end of registering the client", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string()}" + }) + + telemetry_event(%{ + event: [:oidcc, :register_client, :exception], + description: "Emitted at the end of registering the client", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string()}" + }) + + @moduledoc """ + Dynamic Client Registration Utilities + + ## Telemetry + + #{telemetry_docs()} + """ + @moduledoc since: "3.0.0" + + use Oidcc.RecordStruct, + internal_name: :metadata, + record_name: :oidcc_client_registration, + record_type_module: :oidcc_client_registration, + record_type_name: :t, + hrl: "include/oidcc_client_registration.hrl" + + alias Oidcc.ClientRegistration.Response + alias Oidcc.ProviderConfiguration + + @typedoc """ + Client Metadata Struct + + See https://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata and + https://openid.net/specs/openid-connect-rpinitiated-1_0.html#ClientMetadata + """ + @typedoc since: "3.0.0" + @type t() :: %__MODULE__{ + redirect_uris: [:uri_string.uri_string()], + response_types: [String.t()] | :undefined, + grant_types: [String.t()] | :undefined, + application_type: :web | :native, + contacts: [String.t()] | :undefined, + client_name: String.t() | :undefined, + logo_uri: :uri_string.uri_string() | :undefined, + client_uri: :uri_string.uri_string() | :undefined, + policy_uri: :uri_string.uri_string() | :undefined, + tos_uri: :uri_string.uri_string() | :undefined, + jwks: :jose_jwk.key() | :undefined, + jwks_uri: :uri_string.uri_string() | :undefined, + sector_identifier_uri: :uri_string.uri_string() | :undefined, + subject_type: :pairwise | :public | :undefined, + id_token_signed_response_alg: String.t() | :undefined, + id_token_encrypted_response_alg: String.t() | :undefined, + id_token_encrypted_response_enc: String.t() | :undefined, + userinfo_signed_response_alg: String.t() | :undefined, + userinfo_encrypted_response_alg: String.t() | :undefined, + userinfo_encrypted_response_enc: String.t() | :undefined, + request_object_signing_alg: String.t() | :undefined, + request_object_encryption_alg: String.t() | :undefined, + request_object_encryption_enc: String.t() | :undefined, + token_endpoint_auth_method: String.t(), + token_endpoint_auth_signing_alg: String.t() | :undefined, + default_max_age: pos_integer() | :undefined, + require_auth_time: boolean(), + default_acr_values: [String.t()] | :undefined, + initiate_login_uri: :uri_string.uri_string() | :undefined, + request_uris: [:uri_string.uri_string()] | :undefined, + post_logout_redirect_uris: [:uri_string.uri_string()] | :undefined, + require_pushed_authorization_requests: boolean(), + dpop_bound_access_tokens: boolean(), + extra_fields: %{String.t() => term()} + } + + @doc """ + Register Client + + ## Examples + + iex> {:ok, {provider_configuration, _expiry}} = + ...> Oidcc.ProviderConfiguration.load_configuration("https://accounts.google.com") + ...> + ...> Oidcc.ClientRegistration.register( + ...> provider_configuration, + ...> %Oidcc.ClientRegistration{ + ...> redirect_uris: ["https://your.application.com/oidcc/callback"] + ...> }, + ...> %{initial_access_token: "optional token you got from the provider"} + ...> ) + ...> # {:ok, %Oidcc.ClientRegistration.Response{ + ...> # client_id: client_id, + ...> # client_secret: client_secret + ...> # }} + + """ + @doc since: "3.0.0" + @spec register(provider_configuration, registration, opts) :: + {:ok, Response.t()} | {:error, :oidcc_client_registration.error()} + when provider_configuration: ProviderConfiguration.t(), + registration: t(), + opts: :oidcc_client_registration.opts() + def register(provider_configuration, registration, opts \\ %{}) do + provider_configuration = ProviderConfiguration.struct_to_record(provider_configuration) + registration = struct_to_record(registration) + + with {:ok, response} <- + :oidcc_client_registration.register(provider_configuration, registration, opts) do + {:ok, Response.record_to_struct(response)} + end + end + + @impl Oidcc.RecordStruct + def record_to_struct(record) do + record + |> super() + |> update_if_not_undefined(:jwks, &JOSE.JWK.from_record/1) + end + + @impl Oidcc.RecordStruct + def struct_to_record(struct) do + struct + |> update_if_not_undefined(:jwks, &JOSE.JWK.to_record/1) + |> super() + end + + defp update_if_not_undefined(map, key, callback) do + Map.update!(map, key, fn + :undefined -> :undefined + other -> callback.(other) + end) + end +end diff --git a/deps/oidcc/lib/oidcc/client_registration/response.ex b/deps/oidcc/lib/oidcc/client_registration/response.ex new file mode 100644 index 0000000..df5babf --- /dev/null +++ b/deps/oidcc/lib/oidcc/client_registration/response.ex @@ -0,0 +1,32 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.ClientRegistration.Response do + @moduledoc """ + Client Registration Response Struct + """ + @moduledoc since: "3.0.0" + + use Oidcc.RecordStruct, + internal_name: :response, + record_name: :oidcc_client_registration_response, + record_type_module: :oidcc_client_registration, + record_type_name: :response, + hrl: "include/oidcc_client_registration.hrl" + + @typedoc """ + Client Registration Response Struct + + See https://openid.net/specs/openid-connect-registration-1_0.html#RegistrationResponse + """ + @typedoc since: "3.0.0" + @type t() :: %__MODULE__{ + client_id: String.t(), + client_secret: String.t() | :undefined, + registration_access_token: String.t() | :undefined, + registration_client_uri: :uri_string.uri_string() | :undefined, + client_id_issued_at: pos_integer() | :undefined, + client_secret_expires_at: pos_integer() | :undefined, + extra_fields: %{String.t() => term()} + } +end diff --git a/deps/oidcc/lib/oidcc/logout.ex b/deps/oidcc/lib/oidcc/logout.ex new file mode 100644 index 0000000..e12f84a --- /dev/null +++ b/deps/oidcc/lib/oidcc/logout.ex @@ -0,0 +1,65 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.Logout do + @moduledoc """ + Logout from the OpenID Provider + """ + @moduledoc since: "3.0.0" + + alias Oidcc.ClientContext + alias Oidcc.Token + + @doc """ + Initiate URI for Relaying Party initiated Logout + + See https://openid.net/specs/openid-connect-rpinitiated-1_0.html#RPLogout + + For a high level interface using `Oidcc.ProviderConfiguration.Worker` + see `Oidcc.initiate_logout_url/4`. + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://erlef-test-w4a8z2.zitadel.cloud" + ...> }) + ...> + ...> {:ok, client_context} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> pid, + ...> "client_id", + ...> :unauthenticated + ...> ) + ...> + ...> # Get `token` from `Oidcc.retrieve_token/5` + ...> token = "token" + ...> + ...> {:ok, _redirect_uri} = + ...> Oidcc.Logout.initiate_url( + ...> token, + ...> client_context, + ...> %{post_logout_redirect_uri: "https://my.server/return"} + ...> ) + """ + @doc since: "3.0.0" + @spec initiate_url( + token :: id_token | Token.t() | :undefined, + client_context :: ClientContext.t(), + opts :: :oidcc_logout.initiate_url_opts() + ) :: + {:ok, :uri_string.uri_string()} + | {:error, :oidcc_logout.error()} + when id_token: String.t() + def initiate_url(token, client_context, opts \\ %{}) do + client_context = ClientContext.struct_to_record(client_context) + + token = + case token do + token when is_binary(token) -> token + %Token{} = token -> Token.struct_to_record(token) + end + + :oidcc_logout.initiate_url(token, client_context, opts) + end +end diff --git a/deps/oidcc/lib/oidcc/provider_configuration.ex b/deps/oidcc/lib/oidcc/provider_configuration.ex new file mode 100644 index 0000000..d341091 --- /dev/null +++ b/deps/oidcc/lib/oidcc/provider_configuration.ex @@ -0,0 +1,198 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.ProviderConfiguration do + use TelemetryRegistry + + telemetry_event(%{ + event: [:oidcc, :load_configuration, :start], + description: "Emitted at the start of loading the provider configuration", + measurements: "%{system_time: non_neg_integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string()}" + }) + + telemetry_event(%{ + event: [:oidcc, :load_configuration, :stop], + description: "Emitted at the end of loading the provider configuration", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string()}" + }) + + telemetry_event(%{ + event: [:oidcc, :load_configuration, :exception], + description: "Emitted at the end of loading the provider configuration", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string()}" + }) + + telemetry_event(%{ + event: [:oidcc, :load_jwks, :start], + description: "Emitted at the start of loading the provider jwks", + measurements: "%{system_time: non_neg_integer(), monotonic_time: integer()}", + metadata: "%{jwks_uri: :uri_string.uri_string()}" + }) + + telemetry_event(%{ + event: [:oidcc, :load_jwks, :stop], + description: "Emitted at the end of loading the provider jwks", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{jwks_uri: :uri_string.uri_string()}" + }) + + telemetry_event(%{ + event: [:oidcc, :load_jwks, :exception], + description: "Emitted at the end of loading the provider jwks", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{jwks_uri: :uri_string.uri_string()}" + }) + + @moduledoc """ + Tooling to load and parse Openid Configuration + + ## Telemetry + + #{telemetry_docs()} + """ + @moduledoc since: "3.0.0" + + use Oidcc.RecordStruct, + internal_name: :configuration, + record_name: :oidcc_provider_configuration, + hrl: "include/oidcc_provider_configuration.hrl" + + @typedoc """ + Configuration Struct + + For details on the fields see: + * https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata + * https://datatracker.ietf.org/doc/html/draft-jones-oauth-discovery-01#section-4.1 + * https://openid.net/specs/openid-connect-rpinitiated-1_0.html#OPMetadata + """ + @typedoc since: "3.0.0" + @type t() :: %__MODULE__{ + issuer: :uri_string.uri_string(), + issuer_regex: binary() | :undefined, + authorization_endpoint: :uri_string.uri_string(), + token_endpoint: :uri_string.uri_string() | :undefined, + userinfo_endpoint: :uri_string.uri_string() | :undefined, + jwks_uri: :uri_string.uri_string() | :undefined, + registration_endpoint: :uri_string.uri_string() | :undefined, + scopes_supported: [String.t()] | :undefined, + response_types_supported: [String.t()], + response_modes_supported: [String.t()], + grant_types_supported: [String.t()], + acr_values_supported: [String.t()] | :undefined, + subject_types_supported: [:pairwise | :public], + id_token_signing_alg_values_supported: [String.t()], + id_token_encryption_alg_values_supported: [String.t()] | :undefined, + id_token_encryption_enc_values_supported: [String.t()] | :undefined, + userinfo_signing_alg_values_supported: [String.t()] | :undefined, + userinfo_encryption_alg_values_supported: [String.t()] | :undefined, + userinfo_encryption_enc_values_supported: [String.t()] | :undefined, + request_object_signing_alg_values_supported: [String.t()] | :undefined, + request_object_encryption_alg_values_supported: [String.t()] | :undefined, + request_object_encryption_enc_values_supported: [String.t()] | :undefined, + token_endpoint_auth_methods_supported: [String.t()], + token_endpoint_auth_signing_alg_values_supported: [String.t()] | :undefined, + display_values_supported: [String.t()] | :undefined, + claim_types_supported: [:normal | :aggregated | :distributed], + claims_supported: [String.t()] | :undefined, + service_documentation: :uri_string.uri_string() | :undefined, + claims_locales_supported: [String.t()] | :undefined, + ui_locales_supported: [String.t()] | :undefined, + claims_parameter_supported: boolean(), + request_parameter_supported: boolean(), + request_uri_parameter_supported: boolean(), + require_request_uri_registration: boolean(), + op_policy_uri: :uri_string.uri_string() | :undefined, + op_tos_uri: :uri_string.uri_string() | :undefined, + revocation_endpoint: :uri_string.uri_string() | :undefined, + revocation_endpoint_auth_methods_supported: [String.t()], + revocation_endpoint_auth_signing_alg_values_supported: [String.t()] | :undefined, + introspection_endpoint: :uri_string.uri_string() | :undefined, + introspection_endpoint_auth_methods_supported: [String.t()], + introspection_endpoint_auth_signing_alg_values_supported: [String.t()] | :undefined, + code_challenge_methods_supported: [String.t()] | :undefined, + end_session_endpoint: :uri_string.uri_string() | :undefined, + require_pushed_authorization_requests: boolean(), + pushed_authorization_request_endpoint: :uri_string.uri_string() | :undefined, + authorization_response_iss_parameter_supported: boolean(), + authorization_signing_alg_values_supported: [String.t()] | :undefined, + authorization_encryption_alg_values_supported: [String.t()] | :undefined, + authorization_encryption_enc_values_supported: [String.t()] | :undefined, + dpop_signing_alg_values_supported: [String.t()] | :undefined, + require_signed_request_object: boolean(), + mtls_endpoint_aliases: %{binary() => :uri_string.uri_string()}, + tls_client_certificate_bound_access_tokens: boolean(), + extra_fields: %{String.t() => term()} + } + + @doc """ + Load OpenID Configuration + + ## Examples + + iex> {:ok, { + ...> %ProviderConfiguration{issuer: "https://accounts.google.com"}, + ...> _expiry + ...> }} = Oidcc.ProviderConfiguration.load_configuration("https://accounts.google.com") + """ + @doc since: "3.0.0" + @spec load_configuration( + issuer :: :uri_string.uri_string(), + opts :: :oidcc_provider_configuration.opts() + ) :: + {:ok, {configuration :: t(), expiry :: pos_integer()}} + | {:error, :oidcc_provider_configuration.error()} + def load_configuration(issuer, opts \\ %{}) do + with {:ok, {configuration, expiry}} <- + :oidcc_provider_configuration.load_configuration(issuer, opts) do + {:ok, {record_to_struct(configuration), expiry}} + end + end + + @doc """ + Load JWKs + + ## Examples + + iex> {:ok, {%JOSE.JWK{}, _expiry}} = + ...> Oidcc.ProviderConfiguration.load_jwks("https://www.googleapis.com/oauth2/v3/certs") + """ + @doc since: "3.0.0" + @spec load_jwks( + jwks_uri :: :uri_string.uri_string(), + opts :: :oidcc_provider_configuration.opts() + ) :: + {:ok, {jwks :: JOSE.JWK.t(), expiry :: pos_integer()}} + | {:error, :oidcc_provider_configuration.error()} + def load_jwks(jwks_uri, opts \\ %{}) do + with {:ok, {jwks, expiry}} <- + :oidcc_provider_configuration.load_jwks(jwks_uri, opts) do + {:ok, {JOSE.JWK.from_record(jwks), expiry}} + end + end + + @doc """ + Decode JSON into OpenID configuration + + ## Examples + + iex> {:ok, {{~c"HTTP/1.1",200, ~c"OK"}, _headers, body}} = + ...> :httpc.request("https://accounts.google.com/.well-known/openid-configuration") + ...> + ...> decoded_json = body |> to_string() |> JOSE.decode() + ...> + ...> {:ok, %ProviderConfiguration{issuer: "https://accounts.google.com"}} = + ...> Oidcc.ProviderConfiguration.decode_configuration(decoded_json) + """ + @doc since: "3.0.0" + @spec decode_configuration(configuration :: map(), opts :: :oidcc_provider_configuration.opts()) :: + {:ok, t()} | {:error, :oidcc_provider_configuration.error()} + def decode_configuration(configuration, opts \\ %{}) do + with {:ok, configuration} <- + :oidcc_provider_configuration.decode_configuration(configuration, opts) do + {:ok, record_to_struct(configuration)} + end + end +end diff --git a/deps/oidcc/lib/oidcc/provider_configuration/worker.ex b/deps/oidcc/lib/oidcc/provider_configuration/worker.ex new file mode 100644 index 0000000..50160e3 --- /dev/null +++ b/deps/oidcc/lib/oidcc/provider_configuration/worker.ex @@ -0,0 +1,153 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.ProviderConfiguration.Worker do + @moduledoc """ + OIDC Config Provider Worker + + Loads and continuously refreshes the OIDC configuration and JWKs + + ## Usage in Supervisor + + ```elixir + Supervisor.init([ + {Oidcc.ProviderConfiguration.Worker, %{issuer: "https://accounts.google.com"}} + ], strategy: :one_for_one) + ``` + """ + @moduledoc since: "3.0.0" + + alias Oidcc.ProviderConfiguration + + @typedoc """ + See `t:oidcc_provider_configuration_worker.opts/0` + """ + @typedoc since: "3.0.0" + @type opts() :: %{ + optional(:name) => GenServer.name(), + required(:issuer) => :uri_string.uri_string(), + optional(:provider_configuration_opts) => :oidcc_provider_configuration.opts(), + optional(:backoff_min) => :oidcc_backoff.min(), + optional(:backoff_max) => :oidcc_backoff.max(), + optional(:backoff_type) => :oidcc_backoff.type() + } + + @doc """ + Start Configuration Worker + + ## Examples + + iex> {:ok, _pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://accounts.google.com", + ...> name: __MODULE__.GoogleConfigProvider + ...> }) + """ + @doc since: "3.0.0" + @spec start_link(opts :: opts()) :: GenServer.on_start() + def start_link(opts) + + def start_link(%{name: name} = opts) when is_atom(name), + do: :oidcc_provider_configuration_worker.start_link(%{opts | name: {:local, name}}) + + def start_link(opts), do: :oidcc_provider_configuration_worker.start_link(opts) + + @spec child_spec(opts :: opts()) :: Supervisor.child_spec() + def child_spec(opts), + do: + Supervisor.child_spec( + %{ + id: __MODULE__, + start: {__MODULE__, :start_link, [opts]} + }, + [] + ) + + @doc """ + Get Configuration + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://accounts.google.com" + ...> }) + ...> %Oidcc.ProviderConfiguration{issuer: "https://accounts.google.com"} = + ...> Oidcc.ProviderConfiguration.Worker.get_provider_configuration(pid) + """ + @doc since: "3.0.0" + @spec get_provider_configuration(name :: GenServer.name()) :: ProviderConfiguration.t() + def get_provider_configuration(name), + do: + name + |> :oidcc_provider_configuration_worker.get_provider_configuration() + |> ProviderConfiguration.record_to_struct() + + @doc """ + Get Parsed Jwks + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://accounts.google.com" + ...> }) + ...> %JOSE.JWK{} = + ...> Oidcc.ProviderConfiguration.Worker.get_jwks(pid) + """ + @doc since: "3.0.0" + @spec get_jwks(name :: GenServer.name()) :: JOSE.JWK.t() + def get_jwks(name), + do: + name + |> :oidcc_provider_configuration_worker.get_jwks() + |> JOSE.JWK.from_record() + + @doc """ + Refresh Configuration + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://accounts.google.com" + ...> }) + ...> :ok = Oidcc.ProviderConfiguration.Worker.refresh_configuration(pid) + """ + @doc since: "3.0.0" + @spec refresh_configuration(name :: GenServer.name()) :: :ok + def refresh_configuration(name), + do: :oidcc_provider_configuration_worker.refresh_configuration(name) + + @doc """ + Refresh JWKs + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://accounts.google.com" + ...> }) + ...> :ok = Oidcc.ProviderConfiguration.Worker.refresh_jwks(pid) + """ + @doc since: "3.0.0" + @spec refresh_jwks(name :: GenServer.name()) :: :ok + def refresh_jwks(name), + do: :oidcc_provider_configuration_worker.refresh_jwks(name) + + @doc """ + Refresh JWKs if the provided `Kid` is not matching any currently loaded keys + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://accounts.google.com" + ...> }) + ...> :ok = Oidcc.ProviderConfiguration.Worker.refresh_jwks_for_unknown_kid(pid, "kid") + """ + @doc since: "3.0.0" + @spec refresh_jwks_for_unknown_kid(name :: GenServer.name(), kid :: String.t()) :: :ok + def refresh_jwks_for_unknown_kid(name, kid), + do: :oidcc_provider_configuration_worker.refresh_jwks_for_unknown_kid(name, kid) +end diff --git a/deps/oidcc/lib/oidcc/record_struct.ex b/deps/oidcc/lib/oidcc/record_struct.ex new file mode 100644 index 0000000..79a462a --- /dev/null +++ b/deps/oidcc/lib/oidcc/record_struct.ex @@ -0,0 +1,54 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.RecordStruct do + @moduledoc false + + @callback record_to_struct(record :: tuple()) :: struct() + @callback struct_to_record(struct :: struct()) :: tuple() + + defmacro __using__(options) do + internal_name = Keyword.fetch!(options, :internal_name) + record_name = Keyword.fetch!(options, :record_name) + record_type_module = Keyword.get(options, :record_type_module, record_name) + record_type_name = Keyword.get(options, :record_type_name, :t) + hrl = Keyword.fetch!(options, :hrl) + + quote bind_quoted: [ + internal_name: internal_name, + record_name: record_name, + record_type_module: record_type_module, + record_type_name: record_type_name, + hrl: hrl, + behaviour: __MODULE__ + ] do + @behaviour behaviour + + require Record + + record = Record.extract(record_name, from: hrl) + keys = :lists.map(&elem(&1, 0), record) + vals = :lists.map(&{&1, [], nil}, keys) + pairs = :lists.zip(keys, vals) + + Record.defrecordp(internal_name, record_name, record) + + defstruct record + + @doc false + @impl behaviour + @spec record_to_struct(record :: unquote(record_type_module).unquote(record_type_name)()) :: + t() + def record_to_struct(record), do: struct!(__MODULE__, unquote(internal_name)(record)) + + @doc false + @impl behaviour + @spec struct_to_record(struct :: t()) :: + unquote(record_type_module).unquote(record_type_name)() + def struct_to_record(%__MODULE__{unquote_splicing(pairs)}), + do: {unquote(record_name), unquote_splicing(vals)} + + defoverridable record_to_struct: 1, struct_to_record: 1 + end + end +end diff --git a/deps/oidcc/lib/oidcc/token.ex b/deps/oidcc/lib/oidcc/token.ex new file mode 100644 index 0000000..0bcf721 --- /dev/null +++ b/deps/oidcc/lib/oidcc/token.ex @@ -0,0 +1,492 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.Token do + use TelemetryRegistry + + telemetry_event(%{ + event: [:oidcc, :request_token, :start], + description: "Emitted at the start of requesting a code token", + measurements: "%{system_time: non_neg_integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :request_token, :stop], + description: "Emitted at the end of requesting a code token", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :request_token, :exception], + description: "Emitted at the end of requesting a code token", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :refresh_token, :start], + description: "Emitted at the start of refreshing a token", + measurements: "%{system_time: non_neg_integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :refresh_token, :stop], + description: "Emitted at the end of refreshing a token", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :refresh_token, :exception], + description: "Emitted at the end of refreshing a token", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :jwt_profile_token, :start], + description: "Emitted at the start of exchanging a JWT profile token", + measurements: "%{system_time: non_neg_integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :jwt_profile_token, :stop], + description: "Emitted at the end of exchanging a JWT profile token", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :jwt_profile_token, :exception], + description: "Emitted at the end of exchanging a JWT profile token", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :client_credentials, :start], + description: "Emitted at the start of requesting a client credentials token", + measurements: "%{system_time: non_neg_integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :client_credentials, :stop], + description: "Emitted at the end of requesting a client credentials token", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :client_credentials, :exception], + description: "Emitted at the end of requesting a client credentials token", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + @moduledoc """ + Facilitate OpenID Code/Token Exchanges + + ## Telemetry + + #{telemetry_docs()} + """ + @moduledoc since: "3.0.0" + + use Oidcc.RecordStruct, + internal_name: :token, + record_name: :oidcc_token, + hrl: "include/oidcc_token.hrl" + + alias Oidcc.ClientContext + alias Oidcc.Token.Access + alias Oidcc.Token.Id + alias Oidcc.Token.Refresh + + @typedoc since: "3.0.0" + @type t() :: %__MODULE__{ + id: Id.t() | none, + access: Access.t() | none, + refresh: Refresh.t() | none, + scope: :oidcc_scope.scopes() + } + + @type retrieve_opts() :: :oidcc_token.retrieve_opts() + + @doc """ + retrieve the token using the authcode received before and directly validate + the result. + + the authcode was sent to the local endpoint by the OpenId Connect provider, + using redirects + + For a high level interface using `Oidcc.ProviderConfiguration.Worker` + see `Oidcc.retrieve_token/5`. + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://api.login.yahoo.com" + ...> }) + ...> + ...> {:ok, client_context} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> pid, + ...> "client_id", + ...> "client_secret" + ...> ) + ...> + ...> # Get auth_code from redirect + ...> auth_code = "auth_code" + ...> + ...> Oidcc.Token.retrieve( + ...> auth_code, + ...> client_context, + ...> %{redirect_uri: "https://my.server/return"} + ...> ) + ...> # => {:ok, %Oidcc.Token{}} + + """ + @doc since: "3.0.0" + @spec retrieve( + auth_code :: String.t(), + client_context :: ClientContext.t(), + opts :: retrieve_opts() + ) :: + {:ok, t()} | {:error, :oidcc_token.error()} + def retrieve(auth_code, client_context, opts) do + client_context = ClientContext.struct_to_record(client_context) + + auth_code + |> :oidcc_token.retrieve(client_context, opts) + |> normalize_token_response() + end + + @doc """ + Validate the JARM response, returning the valid claims as a map. + + the response was sent to the local endpoint by the OpenId Connect provider, + using redirects + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://api.login.yahoo.com" + ...> }) + ...> + ...> {:ok, client_context} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> pid, + ...> "client_id", + ...> "client_secret" + ...> ) + ...> + ...> # Get auth_code from redirect + ...> response = "JWT" + ...> + ...> Oidcc.Token.validate_jarm( + ...> response, + ...> client_context, + ...> %{} + ...> ) + ...> # => {:ok, %{"code" => auth_code}} + + """ + @doc since: "3.2.0" + @spec validate_jarm( + response :: String.t(), + client_context :: ClientContext.t(), + opts :: :oidcc_token.validate_jarm_opts() + ) :: + {:ok, :oidcc_jwt_util.claims()} | {:error, :oidcc_token.error()} + def validate_jarm(response, client_context, opts) do + client_context = ClientContext.struct_to_record(client_context) + + :oidcc_token.validate_jarm(response, client_context, opts) + end + + @doc """ + Refresh Token + + For a high level interface using `Oidcc.ProviderConfiguration.Worker` + see `Oidcc.refresh_token/5`. + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://api.login.yahoo.com" + ...> }) + ...> + ...> {:ok, client_context} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> pid, + ...> "client_id", + ...> "client_secret" + ...> ) + ...> + ...> # Get refresh_token from redirect + ...> refresh_token = "refresh_token" + ...> + ...> Oidcc.Token.refresh( + ...> refresh_token, + ...> client_context, + ...> %{expected_subject: "sub"} + ...> ) + ...> # => {:ok, %Oidcc.Token{}} + + """ + @doc since: "3.0.0" + @spec refresh( + refresh_token :: String.t(), + client_context :: ClientContext.t(), + opts :: :oidcc_token.refresh_opts() + ) :: {:ok, t()} | {:error, :oidcc_token.error()} + @spec refresh( + token :: t(), + client_context :: ClientContext.t(), + opts :: :oidcc_token.refresh_opts_no_sub() + ) :: {:ok, t()} | {:error, :oidcc_token.error()} + def refresh(token, client_context, opts) do + token = + case token do + token when is_binary(token) -> token + %__MODULE__{} = token -> struct_to_record(token) + end + + client_context = ClientContext.struct_to_record(client_context) + + token + |> :oidcc_token.refresh(client_context, opts) + |> normalize_token_response() + end + + @doc """ + Validate ID Token + + Usually the id token is validated using `retrieve/3`. + If you get the token passed from somewhere else, this function can validate it. + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://api.login.yahoo.com" + ...> }) + ...> + ...> {:ok, client_context} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> pid, + ...> "client_id", + ...> "client_secret" + ...> ) + ...> + ...> #Get IdToken from somewhere + ...> id_token = "id_token" + ...> + ...> Oidcc.Token.validate_id_token(id_token, client_context, :any) + ...> # => {:ok, %{"sub" => "sub", ... }} + + """ + @doc since: "3.0.0" + @spec validate_id_token( + id_token :: String.t(), + client_context :: ClientContext.t(), + nonce_or_opts :: String.t() | :any | retrieve_opts() + ) :: {:ok, :oidcc_jwt_util.claims()} | {:error, :oidcc_token.error()} + def validate_id_token(id_token, client_context, nonce_or_opts), + do: + :oidcc_token.validate_id_token( + id_token, + ClientContext.struct_to_record(client_context), + nonce_or_opts + ) + + @doc """ + Validate JWT + + Validates a generic JWT (such as an access token) from the given provider. + Useful if the issuer is shared between multiple applications, and the access token + generated for a user at one client is used to validate their access at another client. + + Validating an arbitrary JWT token (not an ID token) is not covered by the OpenID + Connect specification. Therefore the signing / encryption algorithms are not + derieved from the provider configuration, but must be provided by the caller. + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://api.login.yahoo.com" + ...> }) + ...> + ...> {:ok, client_context} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> pid, + ...> "client_id", + ...> "client_secret" + ...> ) + ...> + ...> # Get JWT from Authorization header + ...> jwt = "jwt" + ...> + ...> opts = %{ + ...> signing_algs: client_context.provider_configuration.id_token_signing_alg_values_supported + ...> } + ...> + ...> Oidcc.Token.validate_jwt(jwt, client_context, opts) + ...> # => {:ok, %{"sub" => "sub", ... }} + + """ + @doc since: "3.0.0" + @spec validate_jwt( + jwt :: String.t(), + client_context :: ClientContext.t(), + opts :: :oidcc_token.validate_jwt_opts() + ) :: {:ok, :oidcc_jwt_util.claims()} | {:error, :oidcc_token.error()} + def validate_jwt(jwt, client_context, opts), + do: + :oidcc_token.validate_jwt( + jwt, + ClientContext.struct_to_record(client_context), + opts + ) + + @doc """ + Retrieve JSON Web Token (JWT) Profile Token + + See https://datatracker.ietf.org/doc/html/rfc7523#section-4 + + For a high level interface using `Oidcc.ProviderConfiguration.Worker` + see `Oidcc.jwt_profile_token/6`. + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://erlef-test-w4a8z2.zitadel.cloud" + ...> }) + ...> + ...> {:ok, client_context} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> pid, + ...> "JWT Profile Test", + ...> "client_secret" + ...> ) + ...> + ...> %{"key" => key, "keyId" => kid, "userId" => subject} = "JWT_PROFILE" + ...> |> System.fetch_env!() + ...> |> JOSE.decode() + ...> + ...> jwk = JOSE.JWK.from_pem(key) + ...> + ...> {:ok, %Oidcc.Token{}} = + ...> Oidcc.Token.jwt_profile( + ...> subject, + ...> client_context, + ...> jwk, + ...> %{scope: ["openid", "urn:zitadel:iam:org:project:id:zitadel:aud"], kid: kid} + ...> ) + + """ + @doc since: "3.0.0" + @spec jwt_profile( + subject :: String.t(), + client_context :: ClientContext.t(), + jwk :: JOSE.JWK.t(), + opts :: :oidcc_token.jwt_profile_opts() + ) :: {:ok, t()} | {:error, :oidcc_token.error()} + def jwt_profile(subject, client_context, jwk, opts) do + jwk = JOSE.JWK.to_record(jwk) + client_context = ClientContext.struct_to_record(client_context) + + subject + |> :oidcc_token.jwt_profile(client_context, jwk, opts) + |> normalize_token_response() + end + + @doc """ + Retrieve Client Credential Token + + See https://datatracker.ietf.org/doc/html/rfc6749#section-1.3.4 + + For a high level interface using `Oidcc.ProviderConfiguration.Worker` + see `Oidcc.client_credentials_token/4`. + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://erlef-test-w4a8z2.zitadel.cloud" + ...> }) + ...> + ...> {:ok, client_context} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> pid, + ...> System.fetch_env!("CLIENT_CREDENTIALS_CLIENT_ID"), + ...> System.fetch_env!("CLIENT_CREDENTIALS_CLIENT_SECRET") + ...> ) + ...> + ...> {:ok, %Oidcc.Token{}} = + ...> Oidcc.Token.client_credentials( + ...> client_context, + ...> %{scope: ["openid"]} + ...> ) + + """ + @doc since: "3.0.0" + @spec client_credentials( + client_context :: ClientContext.t(), + opts :: :oidcc_token.client_credentials_opts() + ) :: {:ok, t()} | {:error, :oidcc_token.error()} + def client_credentials(client_context, opts), + do: + client_context + |> ClientContext.struct_to_record() + |> :oidcc_token.client_credentials(opts) + |> normalize_token_response() + + @doc false + @spec normalize_token_response( + response :: {:ok, :oidcc_token.t()} | {:error, :oidcc_token.error()} + ) :: + {:ok, t()} | {:error, :oidcc_token.error()} + def normalize_token_response(response) + def normalize_token_response({:ok, token}), do: {:ok, record_to_struct(token)} + + def normalize_token_response({:error, {:none_alg_used, token}}), + do: {:error, {:none_alg_used, record_to_struct(token)}} + + def normalize_token_response({:error, reason}), do: {:error, reason} + + @impl Oidcc.RecordStruct + def record_to_struct(record) do + record + |> super() + |> update_if_not_none(:id, &Id.record_to_struct/1) + |> update_if_not_none(:access, &Access.record_to_struct/1) + |> update_if_not_none(:refresh, &Refresh.record_to_struct/1) + end + + @impl Oidcc.RecordStruct + def struct_to_record(struct) do + struct + |> update_if_not_none(:id, &Id.struct_to_record/1) + |> update_if_not_none(:access, &Access.struct_to_record/1) + |> update_if_not_none(:refresh, &Refresh.struct_to_record/1) + |> super() + end + + defp update_if_not_none(map, key, callback) do + Map.update!(map, key, fn + :none -> :none + other -> callback.(other) + end) + end +end diff --git a/deps/oidcc/lib/oidcc/token/access.ex b/deps/oidcc/lib/oidcc/token/access.ex new file mode 100644 index 0000000..d6105c3 --- /dev/null +++ b/deps/oidcc/lib/oidcc/token/access.ex @@ -0,0 +1,61 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.Token.Access do + @moduledoc """ + Access Token struct. + + See `t::oidcc_token.access/0` + """ + @moduledoc since: "3.0.0" + + alias Oidcc.ClientContext + + use Oidcc.RecordStruct, + internal_name: :token, + record_name: :oidcc_token_access, + record_type_module: :oidcc_token, + record_type_name: :access, + hrl: "include/oidcc_token.hrl" + + @typedoc since: "3.0.0" + @type t() :: %__MODULE__{ + token: String.t(), + expires: pos_integer() | :undefined, + type: String.t() + } + + @doc """ + Generate a map of authorization headers to use when using the given + `Oidcc.Token.Access` struct to access an API endpoint. + """ + @doc since: "3.2.0" + @spec authorization_headers( + access_token :: t(), + method :: :get | :post, + endpoint :: String.t(), + client_context :: ClientContext.t() + ) :: %{String.t() => String.t()} + @spec authorization_headers( + access_token :: t(), + method :: :get | :post, + endpoint :: String.t(), + client_context :: ClientContext.t(), + opts :: :oidcc_token.authorization_headers_opts() + ) :: %{String.t() => String.t()} + def authorization_headers( + access_token, + method, + endpoint, + client_context, + opts \\ %{} + ), + do: + :oidcc_token.authorization_headers( + struct_to_record(access_token), + method, + endpoint, + ClientContext.struct_to_record(client_context), + opts + ) +end diff --git a/deps/oidcc/lib/oidcc/token/id.ex b/deps/oidcc/lib/oidcc/token/id.ex new file mode 100644 index 0000000..b927a48 --- /dev/null +++ b/deps/oidcc/lib/oidcc/token/id.ex @@ -0,0 +1,22 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.Token.Id do + @moduledoc """ + ID Token struct + """ + @moduledoc since: "3.0.0" + + use Oidcc.RecordStruct, + internal_name: :token, + record_name: :oidcc_token_id, + record_type_module: :oidcc_token, + record_type_name: :id, + hrl: "include/oidcc_token.hrl" + + @typedoc since: "3.0.0" + @type t() :: %__MODULE__{ + token: String.t(), + claims: :oidcc_jwt_util.claims() + } +end diff --git a/deps/oidcc/lib/oidcc/token/refresh.ex b/deps/oidcc/lib/oidcc/token/refresh.ex new file mode 100644 index 0000000..7b61bcc --- /dev/null +++ b/deps/oidcc/lib/oidcc/token/refresh.ex @@ -0,0 +1,21 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.Token.Refresh do + @moduledoc """ + Refresh Token struct + """ + @moduledoc since: "3.0.0" + + use Oidcc.RecordStruct, + internal_name: :token, + record_name: :oidcc_token_refresh, + record_type_module: :oidcc_token, + record_type_name: :refresh, + hrl: "include/oidcc_token.hrl" + + @typedoc since: "3.0.0" + @type t() :: %__MODULE__{ + token: String.t() + } +end diff --git a/deps/oidcc/lib/oidcc/token_introspection.ex b/deps/oidcc/lib/oidcc/token_introspection.ex new file mode 100644 index 0000000..616b557 --- /dev/null +++ b/deps/oidcc/lib/oidcc/token_introspection.ex @@ -0,0 +1,114 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.TokenIntrospection do + use TelemetryRegistry + + telemetry_event(%{ + event: [:oidcc, :introspect_token, :start], + description: "Emitted at the start of introspecting the token", + measurements: "%{system_time: non_neg_integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :introspect_token, :stop], + description: "Emitted at the end of introspecting the token", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :introspect_token, :exception], + description: "Emitted at the end of introspecting the token", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + @moduledoc """ + OAuth Token Introspection + + See https://datatracker.ietf.org/doc/html/rfc7662 + + ## Telemetry + + #{telemetry_docs()} + """ + @moduledoc since: "3.0.0" + + use Oidcc.RecordStruct, + internal_name: :introspection, + record_name: :oidcc_token_introspection, + hrl: "include/oidcc_token_introspection.hrl" + + alias Oidcc.ClientContext + alias Oidcc.Token + + @typedoc """ + For details on the fields see: + * https://datatracker.ietf.org/doc/html/rfc7662#section-2.2 + """ + @typedoc since: "3.0.0" + @type t() :: %__MODULE__{ + active: boolean(), + client_id: binary(), + exp: pos_integer() | :undefined, + scope: :oidcc_scope.scopes(), + username: binary() | :undefined, + token_type: binary() | :undefined, + iat: pos_integer() | :undefined, + nbf: pos_integer() | :undefined, + sub: binary() | :undefined, + aud: binary() | :undefined, + iss: binary() | :undefined, + jti: binary() | :undefined, + extra: %{binary() => term()} + } + + @doc """ + Introspect the given access token + + For a high level interface using `Oidcc.ProviderConfiguration.Worker` + see `Oidcc.introspect_token/5`. + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://api.login.yahoo.com" + ...> }) + ...> + ...> {:ok, client_context} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> pid, + ...> "client_id", + ...> "client_secret" + ...> ) + ...> + ...> Oidcc.TokenIntrospection.introspect( + ...> "access_token", + ...> client_context + ...> ) + ...> # => {:ok, %Oidcc.TokenIntrospection{}} + """ + @doc since: "3.0.0" + @spec introspect( + token :: String.t() | Token.t(), + client_context :: ClientContext.t(), + opts :: :oidcc_token_introspection.opts() + ) :: {:ok, t()} | {:error, :oidcc_token_introspection.error()} + def introspect(token, client_context, opts \\ %{}) do + client_context = ClientContext.struct_to_record(client_context) + + token = + case token do + token when is_binary(token) -> token + %Token{} = token -> Token.struct_to_record(token) + end + + with {:ok, introspection} <- + :oidcc_token_introspection.introspect(token, client_context, opts) do + {:ok, record_to_struct(introspection)} + end + end +end diff --git a/deps/oidcc/lib/oidcc/userinfo.ex b/deps/oidcc/lib/oidcc/userinfo.ex new file mode 100644 index 0000000..92e1527 --- /dev/null +++ b/deps/oidcc/lib/oidcc/userinfo.ex @@ -0,0 +1,95 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.Userinfo do + use TelemetryRegistry + + telemetry_event(%{ + event: [:oidcc, :userinfo, :start], + description: "Emitted at the start of loading userinfo", + measurements: "%{system_time: non_neg_integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :userinfo, :stop], + description: "Emitted at the end of loading userinfo", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + telemetry_event(%{ + event: [:oidcc, :userinfo, :exception], + description: "Emitted at the end of loading userinfo", + measurements: "%{duration: integer(), monotonic_time: integer()}", + metadata: "%{issuer: :uri_string.uri_string(), client_id: String.t()}" + }) + + @moduledoc """ + OpenID Connect Userinfo + + See https://openid.net/specs/openid-connect-core-1_0.html#UserInfo + + ## Telemetry + + #{telemetry_docs()} + """ + @moduledoc since: "3.0.0" + + alias Oidcc.ClientContext + alias Oidcc.Token + + @doc """ + Load userinfo for the given token + + For a high level interface using `Oidcc.ProviderConfiguration.Worker` + see `Oidcc.retrieve_userinfo/5`. + + ## Examples + + iex> {:ok, pid} = + ...> Oidcc.ProviderConfiguration.Worker.start_link(%{ + ...> issuer: "https://api.login.yahoo.com" + ...> }) + ...> + ...> {:ok, client_context} = + ...> Oidcc.ClientContext.from_configuration_worker( + ...> pid, + ...> "client_id", + ...> "client_secret" + ...> ) + ...> + ...> # Get access_token from Oidcc.Token.retrieve/3 + ...> access_token = "access_token" + ...> + ...> Oidcc.Userinfo.retrieve( + ...> access_token, + ...> client_context, + ...> %{expected_subject: "sub"} + ...> ) + ...> # => {:ok, %{"sub" => "sub"}} + + """ + @doc since: "3.0.0" + @spec retrieve( + access_token :: String.t(), + client_context :: ClientContext.t(), + opts :: :oidcc_userinfo.retrieve_opts() + ) :: {:ok, :oidcc_jwt_util.claims()} | {:error, :oidcc_userinfo.error()} + @spec retrieve( + token :: Token.t(), + client_context :: ClientContext.t(), + opts :: :oidcc_userinfo.retrieve_opts() + ) :: {:ok, :oidcc_jwt_util.claims()} | {:error, :oidcc_userinfo.error()} + def retrieve(token, client_context, opts) do + token = + case token do + token when is_binary(token) -> token + %Token{} = token -> Token.struct_to_record(token) + end + + client_context = ClientContext.struct_to_record(client_context) + + :oidcc_userinfo.retrieve(token, client_context, opts) + end +end diff --git a/deps/oidcc/mix.exs b/deps/oidcc/mix.exs new file mode 100644 index 0000000..eb419fb --- /dev/null +++ b/deps/oidcc/mix.exs @@ -0,0 +1,89 @@ +# SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +# SPDX-License-Identifier: Apache-2.0 + +defmodule Oidcc.Mixfile do + use Mix.Project + + {:ok, [{:application, :oidcc, props}]} = :file.consult(~c"src/oidcc.app.src") + @props Keyword.take(props, [:applications, :description, :env, :mod, :licenses, :vsn]) + + def project() do + [ + app: :oidcc, + version: to_string(@props[:vsn]), + elixir: "~> 1.15", + erlc_options: erlc_options(Mix.env()), + build_embedded: Mix.env() == :prod, + start_permanent: Mix.env() == :prod, + deps: deps(), + name: "Oidcc", + source_url: "https://github.com/erlef/oidcc", + docs: &docs/0, + description: to_string(@props[:description]), + package: package(), + test_coverage: [ignore_modules: [Oidcc.RecordStruct]], + dialyzer: [ + plt_add_apps: [:mix] + ] + ] + end + + def application, + do: [ + extra_applications: extra_applications(Mix.env()) + ] + + defp extra_applications(env) + defp extra_applications(:dev), do: [:inets, :ssl, :edoc, :xmerl] + defp extra_applications(_env), do: [:inets, :ssl] + + defp deps() do + [ + {:telemetry, "~> 1.2"}, + {:telemetry_registry, "~> 0.3.1"}, + {:jose, "~> 1.11"}, + {:jsx, "~> 3.1", only: :test}, + {:mock, "~> 0.3.8", only: :test}, + {:ex_doc, "~> 0.29", only: :dev, runtime: false}, + {:credo, "~> 1.7", only: :dev, runtime: false}, + {:dialyxir, "~> 1.4", only: :dev, runtime: false}, + {:igniter, "~> 0.6.3 or ~> 0.7.0", optional: true} + ] + end + + defp erlc_options(:prod), do: [] + + defp erlc_options(_enc), + do: [:debug_info, :warn_unused_import, :warn_export_vars, :warnings_as_errors, :verbose] + + defp package() do + [ + maintainers: ["Jonatan Männchen"], + build_tools: ["rebar3", "mix"], + files: [ + "include", + "lib", + "LICENSE*", + "mix.exs", + "README*", + "rebar.config", + "src" + ], + licenses: Enum.map(@props[:licenses], &to_string/1), + links: %{"Github" => "https://github.com/erlef/oidcc"} + ] + end + + defp docs do + {ref, 0} = System.cmd("git", ["rev-parse", "--verify", "--quiet", "HEAD"]) + + [ + source_ref: ref, + main: "readme", + extras: ["README.md" | Path.wildcard(Path.join(__DIR__, "guides/**/*.md"))], + groups_for_modules: [Erlang: [~r/oidcc/], "Elixir": [~r/^Oidcc/]], + logo: "assets/logo.svg", + assets: %{"assets" => "assets"} + ] + end +end diff --git a/deps/oidcc/rebar.config b/deps/oidcc/rebar.config new file mode 100644 index 0000000..5cfe39c --- /dev/null +++ b/deps/oidcc/rebar.config @@ -0,0 +1,57 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +{erl_opts, [warn_unused_import, warn_export_vars, verbose, report, debug_info]}. + +{minimum_otp_vsn, "26"}. + +{deps, [ + {telemetry, "~> 1.2"}, + {telemetry_registry, "~> 0.3.1"}, + {jose, "~> 1.11"} +]}. + +%% TODO: Remove once the following issue is resolved: +%% https://github.com/potatosalad/erlang-jose/issues/168 +{overrides, [ + {override, jose, [{erl_opts, []}]} +]}. + +{project_plugins, [ + %% Revert back to released version when this PR is merged & released: + %% https://github.com/markusn/coveralls-erl/pull/36 + {coveralls, ".*", + {git, "https://github.com/RoadRunnr/coveralls-erl.git", {branch, "feature/git-info"}}}, + {erlfmt, "~> 1.3"}, + rebar3_hank, + rebar3_lint +]}. + +{validate_app_modules, true}. + +{profiles, [ + {test, [ + {deps, [ + {meck, "~> 0.9.2"}, + {jsx, "~> 3.1"} + ]}, + {cover_enabled, true}, + {cover_export_enabled, true}, + {coveralls_coverdata, "_build/test/cover/*.coverdata"}, + {coveralls_service_name, "github"}, + {cover_opts, [verbose]} + ]} +]}. + +{hank, [ + {ignore, [ + {"test/**/*_SUITE.erl", [unnecessary_function_arguments]}, + "include/**/*.hrl", + "certification/**/*", + "deps/**/*" + ]} +]}. + +{erlfmt, [write]}. + +{shell, [{apps, [oidcc]}]}. diff --git a/deps/oidcc/src/oidcc.app.src b/deps/oidcc/src/oidcc.app.src new file mode 100644 index 0000000..d90294c --- /dev/null +++ b/deps/oidcc/src/oidcc.app.src @@ -0,0 +1,13 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +{application, oidcc, [ + {description, "OpenID Connect client library for the BEAM."}, + {vsn, "3.7.2"}, + {registered, []}, + {applications, [kernel, stdlib, inets, ssl, public_key, telemetry, jose]}, + {env, []}, + {modules, []}, + {licenses, ["Apache-2.0"]}, + {links, []} +]}. diff --git a/deps/oidcc/src/oidcc.erl b/deps/oidcc/src/oidcc.erl new file mode 100644 index 0000000..caba13d --- /dev/null +++ b/deps/oidcc/src/oidcc.erl @@ -0,0 +1,517 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC(""" +OpenID Connect High Level Interface + +## Setup + +```erlang +{ok, Pid} = + oidcc_provider_configuration_worker:start_link(#{ + issuer => <<"https://accounts.google.com">>, + name => {local, google_config_provider} + }). +``` + +(or via a `m:supervisor`) + +See `m:oidcc_provider_configuration_worker` for details + +## Global Configuration + +* `max_clock_skew` (default `0`) - Maximum allowed clock skew for JWT + `exp` / `nbf` validation, in seconds +"""). +?MODULEDOC(#{since => <<"3.0.0">>}). + +-export([client_credentials_token/4]). +-export([create_redirect_url/4]). +-export([initiate_logout_url/4]). +-export([introspect_token/5]). +-export([jwt_profile_token/6]). +-export([refresh_token/5]). +-export([retrieve_token/5]). +-export([retrieve_userinfo/5]). + +?DOC(""" +Create Auth Redirect URL + +## Examples + +```erlang +{ok, RedirectUri} = + oidcc:create_redirect_url( + provider_name, + <<"client_id">>, + <<"client_secret">> + #{redirect_uri: <<"https://my.server/return"} + ), + +%% RedirectUri = https://my.provider/auth?scope=openid&response_type=code&client_id=client_id&redirect_uri=https%3A%2F%2Fmy.server%2Freturn +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec create_redirect_url( + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + Opts +) -> + {ok, Uri} | {error, oidcc_client_context:error() | oidcc_authorization:error()} +when + ProviderConfigurationWorkerName :: gen_server:server_ref(), + ClientId :: binary(), + ClientSecret :: binary() | unauthenticated, + Opts :: oidcc_authorization:opts() | oidcc_client_context:opts(), + Uri :: uri_string:uri_string(). +create_redirect_url(ProviderConfigurationWorkerName, ClientId, ClientSecret, Opts) -> + {ClientContextOpts, OtherOpts0} = extract_client_context_opts(Opts), + maybe + {ok, ClientContext0} ?= + oidcc_client_context:from_configuration_worker( + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + ClientContextOpts + ), + {ok, ClientContext, OtherOpts} = oidcc_profile:apply_profiles(ClientContext0, OtherOpts0), + oidcc_authorization:create_redirect_url(ClientContext, OtherOpts) + end. + +?DOC(""" +Retrieve the token using the authcode received before and directly validate +the result. + +The authcode was sent to the local endpoint by the OpenId Connect provider, +using redirects. + +## Examples + +```erlang +%% Get AuthCode from Redirect + +{ok, #oidcc_token{}} = + oidcc:retrieve_token( + AuthCode, + provider_name, + <<"client_id">>, + <<"client_secret">>, + #{redirect_uri => <<"https://example.com/callback">>} + ). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec retrieve_token( + AuthCode, + ProviderConfigurationWorkerName, + ClientId, + ClientSecret | unauthenticated, + Opts +) -> + {ok, oidcc_token:t()} | {error, oidcc_client_context:error() | oidcc_token:error()} +when + AuthCode :: binary(), + ProviderConfigurationWorkerName :: gen_server:server_ref(), + ClientId :: binary(), + ClientSecret :: binary(), + Opts :: oidcc_token:retrieve_opts() | oidcc_client_context:opts(). +retrieve_token( + AuthCode, + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + Opts +) -> + {ClientContextOpts, OtherOpts} = extract_client_context_opts(Opts), + + RefreshJwksFun = oidcc_jwt_util:refresh_jwks_fun(ProviderConfigurationWorkerName), + OptsWithRefresh0 = maps_put_new(refresh_jwks, RefreshJwksFun, OtherOpts), + + maybe + {ok, ClientContext0} ?= + oidcc_client_context:from_configuration_worker( + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + ClientContextOpts + ), + {ok, ClientContext, OptsWithRefresh} = oidcc_profile:apply_profiles( + ClientContext0, OptsWithRefresh0 + ), + oidcc_token:retrieve(AuthCode, ClientContext, OptsWithRefresh) + end. + +?DOC(""" +Load userinfo for the given token. + +## Examples + +```erlang +%% Get Token + +{ok, #{<<"sub">> => Sub}} = + oidcc:retrieve_userinfo( + Token, + provider_name, + <<"client_id">>, + <<"client_secret">>, + #{} + ). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec retrieve_userinfo + ( + Token, + ProviderConfigurationWorkerName, + ClientId, + ClientSecret | unauthenticated, + Opts + ) -> + {ok, map()} | {error, oidcc_client_context:error() | oidcc_userinfo:error()} + when + Token :: oidcc_token:t(), + ProviderConfigurationWorkerName :: gen_server:server_ref(), + ClientId :: binary(), + ClientSecret :: binary() | unauthenticated, + Opts :: oidcc_userinfo:retrieve_opts_no_sub() | oidcc_client_context:opts(); + (Token, ProviderConfigurationWorkerName, ClientId, ClientSecret, Opts) -> + {ok, map()} | {error, any()} + when + Token :: binary(), + ProviderConfigurationWorkerName :: gen_server:server_ref(), + ClientId :: binary(), + ClientSecret :: binary(), + Opts :: oidcc_userinfo:retrieve_opts(). +retrieve_userinfo( + Token, + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + Opts +) -> + {ClientContextOpts, OtherOpts0} = extract_client_context_opts(Opts), + + maybe + {ok, ClientContext0} ?= + oidcc_client_context:from_configuration_worker( + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + ClientContextOpts + ), + {ok, ClientContext, OtherOpts} = oidcc_profile:apply_profiles(ClientContext0, OtherOpts0), + oidcc_userinfo:retrieve(Token, ClientContext, OtherOpts) + end. + +?DOC(""" +Refresh Token. + +## Examples + +```erlang +%% Get Token and wait for its expiry + +{ok, #oidcc_token{}} = + oidcc:refresh_token( + Token, + provider_name, + <<"client_id">>, + <<"client_secret">>, + #{expected_subject => <<"sub_from_initial_id_token">>} + ). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec refresh_token + ( + RefreshToken, + ProviderConfigurationWorkerName, + ClientId, + ClientSecret | unauthenticated, + Opts + ) -> + {ok, oidcc_token:t()} | {error, oidcc_client_context:error() | oidcc_token:error()} + when + RefreshToken :: binary(), + ProviderConfigurationWorkerName :: gen_server:server_ref(), + ClientId :: binary(), + ClientSecret :: binary(), + Opts :: oidcc_token:refresh_opts() | oidcc_client_context:opts(); + ( + Token, + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + Opts + ) -> + {ok, oidcc_token:t()} | {error, oidcc_client_context:error() | oidcc_token:error()} + when + Token :: oidcc_token:t(), + ProviderConfigurationWorkerName :: gen_server:server_ref(), + ClientId :: binary(), + ClientSecret :: binary(), + Opts :: oidcc_token:refresh_opts_no_sub(). +refresh_token( + RefreshToken, + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + Opts +) -> + {ClientContextOpts, OtherOpts} = extract_client_context_opts(Opts), + + RefreshJwksFun = oidcc_jwt_util:refresh_jwks_fun(ProviderConfigurationWorkerName), + OptsWithRefresh0 = maps_put_new(refresh_jwks, RefreshJwksFun, OtherOpts), + + maybe + {ok, ClientContext0} ?= + oidcc_client_context:from_configuration_worker( + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + ClientContextOpts + ), + {ok, ClientContext, OptsWithRefresh} = oidcc_profile:apply_profiles( + ClientContext0, OptsWithRefresh0 + ), + oidcc_token:refresh(RefreshToken, ClientContext, OptsWithRefresh) + end. + +?DOC(""" +Introspect the given access token. + +## Examples + +```erlang +%% Get AccessToken + +{ok, #oidcc_token_introspection{active = True}} = + oidcc:introspect_token( + AccessToken, + provider_name, + <<"client_id">>, + <<"client_secret">>, + #{} + ). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec introspect_token( + Token, + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + Opts +) -> + {ok, oidcc_token_introspection:t()} + | {error, oidcc_client_context:error() | oidcc_token_introspection:error()} +when + Token :: oidcc_token:t() | binary(), + ProviderConfigurationWorkerName :: gen_server:server_ref(), + ClientId :: binary(), + ClientSecret :: binary(), + Opts :: oidcc_token_introspection:opts() | oidcc_client_context:opts(). +introspect_token( + Token, + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + Opts +) -> + {ClientContextOpts, OtherOpts0} = extract_client_context_opts(Opts), + + maybe + {ok, ClientContext0} ?= + oidcc_client_context:from_configuration_worker( + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + ClientContextOpts + ), + {ok, ClientContext, OtherOpts} = oidcc_profile:apply_profiles(ClientContext0, OtherOpts0), + oidcc_token_introspection:introspect(Token, ClientContext, OtherOpts) + end. + +?DOC(""" +Retrieve JSON Web Token (JWT) Profile Token. + +See https://datatracker.ietf.org/doc/html/rfc7523#section-4. + +## Examples + +```erlang +{ok, KeyJson} = file:read_file("jwt-profile.json"), +KeyMap = jose:decode(KeyJson), +Key = jose_jwk:from_pem(maps:get(<<"key">>, KeyMap)), + +{ok, #oidcc_token{}} = + oidcc_token:jwt_profile( + <<"subject">>, + provider_name, + <<"client_id">>, + <<"client_secret">>, + Key, + #{ + scope => [<<"scope">>], + kid => maps:get(<<"keyId">>, KeyMap) + } + ). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec jwt_profile_token( + Subject, + ProviderConfigurationWorkerName, + ClientId, + ClientSecret | unauthenticated, + Jwk, + Opts +) -> {ok, oidcc_token:t()} | {error, oidcc_client_context:error() | oidcc_token:error()} when + Subject :: binary(), + ProviderConfigurationWorkerName :: gen_server:server_ref(), + ClientId :: binary(), + ClientSecret :: binary(), + Jwk :: jose_jwk:key(), + Opts :: oidcc_token:jwt_profile_opts() | oidcc_client_context:opts(). +jwt_profile_token(Subject, ProviderConfigurationWorkerName, ClientId, ClientSecret, Jwk, Opts) -> + {ClientContextOpts, OtherOpts} = extract_client_context_opts(Opts), + + RefreshJwksFun = oidcc_jwt_util:refresh_jwks_fun(ProviderConfigurationWorkerName), + OptsWithRefresh0 = maps_put_new(refresh_jwks, RefreshJwksFun, OtherOpts), + + maybe + {ok, ClientContext0} ?= + oidcc_client_context:from_configuration_worker( + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + ClientContextOpts + ), + {ok, ClientContext, OptsWithRefresh} = oidcc_profile:apply_profiles( + ClientContext0, OptsWithRefresh0 + ), + oidcc_token:jwt_profile(Subject, ClientContext, Jwk, OptsWithRefresh) + end. + +?DOC(""" +Retrieve Client Credential Token. + +See https://datatracker.ietf.org/doc/html/rfc6749#section-1.3.4. + +## Examples + +```erlang +{ok, #oidcc_token{}} = + oidcc:client_credentials_token( + provider_name, + <<"client_id">>, + <<"client_secret">>, + #{scope => [<<"scope">>]} + ). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec client_credentials_token( + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + Opts +) -> {ok, oidcc_token:t()} | {error, oidcc_client_context:error() | oidcc_token:error()} when + ProviderConfigurationWorkerName :: gen_server:server_ref(), + ClientId :: binary(), + ClientSecret :: binary(), + Opts :: oidcc_token:client_credentials_opts() | oidcc_client_context:opts(). +client_credentials_token(ProviderConfigurationWorkerName, ClientId, ClientSecret, Opts) -> + {ClientContextOpts, OtherOpts} = extract_client_context_opts(Opts), + + RefreshJwksFun = oidcc_jwt_util:refresh_jwks_fun(ProviderConfigurationWorkerName), + OptsWithRefresh0 = maps_put_new(refresh_jwks, RefreshJwksFun, OtherOpts), + + maybe + {ok, ClientContext0} ?= + oidcc_client_context:from_configuration_worker( + ProviderConfigurationWorkerName, + ClientId, + ClientSecret, + ClientContextOpts + ), + {ok, ClientContext, OptsWithRefresh} = oidcc_profile:apply_profiles( + ClientContext0, OptsWithRefresh0 + ), + oidcc_token:client_credentials(ClientContext, OptsWithRefresh) + end. + +?DOC(""" +Create Initiate URI for Relaying Party initiated Logout. + +See https://openid.net/specs/openid-connect-rpinitiated-1_0.html#RPLogout. + +## Examples + +```erlang +%% Get `Token` from `oidcc_token` + +{ok, RedirectUri} = + oidcc:initiate_logout_url( + Token, + provider_name, + <<"client_id">>, + #{post_logout_redirect_uri: <<"https://my.server/return"}} + ). + +%% RedirectUri = https://my.provider/logout?id_token_hint=IDToken&client_id=ClientId&post_logout_redirect_uri=https%3A%2F%2Fmy.server%2Freturn +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec initiate_logout_url( + Token, + ProviderConfigurationWorkerName, + ClientId, + Opts +) -> + {ok, uri_string:uri_string()} | {error, oidcc_client_context:error() | oidcc_logout:error()} +when + Token :: IdToken | oidcc_token:t() | undefined, + IdToken :: binary(), + ProviderConfigurationWorkerName :: gen_server:server_ref(), + ClientId :: binary(), + Opts :: oidcc_logout:initiate_url_opts() | oidcc_client_context:unauthenticated_opts(). +initiate_logout_url(Token, ProviderConfigurationWorkerName, ClientId, Opts) -> + {ClientContextOpts, OtherOpts0} = extract_client_context_opts(Opts), + + maybe + {ok, ClientContext0} ?= + oidcc_client_context:from_configuration_worker( + ProviderConfigurationWorkerName, + ClientId, + unauthenticated, + ClientContextOpts + ), + {ok, ClientContext, OtherOpts} = oidcc_profile:apply_profiles(ClientContext0, OtherOpts0), + oidcc_logout:initiate_url(Token, ClientContext, OtherOpts) + end. + +-spec maps_put_new(Key, Value, Map1) -> Map2 when + Key :: term(), Value :: term(), Map1 :: map(), Map2 :: map(). +maps_put_new(Key, Value, Map) -> + case maps:is_key(Key, Map) of + true -> Map; + false -> maps:put(Key, Value, Map) + end. + +-spec extract_client_context_opts(Opts) -> {ClientContextOpts, RestOpts} when + Opts :: RestOpts | ClientContextOpts, + RestOpts :: map(), + ClientContextOpts :: oidcc_client_context:opts(). +extract_client_context_opts(Opts) -> + { + maps:with([client_jwks], Opts), + maps:without([client_jwks], Opts) + }. diff --git a/deps/oidcc/src/oidcc_auth_util.erl b/deps/oidcc/src/oidcc_auth_util.erl new file mode 100644 index 0000000..418a5d4 --- /dev/null +++ b/deps/oidcc/src/oidcc_auth_util.erl @@ -0,0 +1,401 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_auth_util). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC("Authentication Utilities"). +?MODULEDOC(#{since => <<"3.2.0">>}). + +-include("oidcc_client_context.hrl"). +-include("oidcc_provider_configuration.hrl"). + +-include_lib("jose/include/jose_jwk.hrl"). + +-export_type([auth_method/0, error/0]). + +?DOC(#{since => <<"3.2.0">>}). +-type auth_method() :: + none + | client_secret_basic + | client_secret_post + | client_secret_jwt + | private_key_jwt + | tls_client_auth. + +?DOC(#{since => <<"3.2.0">>}). +-type error() :: no_supported_auth_method. + +-export([add_client_authentication/6]). +-export([add_dpop_proof_header/5]). +-export([add_authorization_header/6]). +-export([maybe_mtls_endpoint/4]). + +?DOC(false). +-spec add_client_authentication( + QueryList, Header, SupportedAuthMethods, AllowAlgorithms, Opts, ClientContext +) -> + {ok, {oidcc_http_util:query_params(), [oidcc_http_util:http_header()]}, auth_method()} + | {error, error()} +when + QueryList :: oidcc_http_util:query_params(), + Header :: [oidcc_http_util:http_header()], + SupportedAuthMethods :: [binary()] | undefined, + AllowAlgorithms :: [binary()] | undefined, + Opts :: map(), + ClientContext :: oidcc_client_context:t(). +add_client_authentication(_QueryList, _Header, undefined, _AllowAlgs, _Opts, _ClientContext) -> + {error, no_supported_auth_method}; +add_client_authentication( + QueryList0, Header0, SupportedAuthMethods, AllowAlgorithms, Opts, ClientContext +) -> + PreferredAuthMethods = maps:get(preferred_auth_methods, Opts, [ + private_key_jwt, + tls_client_auth, + client_secret_jwt, + client_secret_post, + client_secret_basic, + none + ]), + case select_preferred_auth(PreferredAuthMethods, SupportedAuthMethods) of + {ok, AuthMethod} -> + case + add_authentication( + QueryList0, Header0, AuthMethod, AllowAlgorithms, Opts, ClientContext + ) + of + {ok, {QueryList, Header}} -> + {ok, {QueryList, Header}, AuthMethod}; + {error, _} -> + add_client_authentication( + QueryList0, + Header0, + SupportedAuthMethods -- [atom_to_binary(AuthMethod)], + AllowAlgorithms, + Opts, + ClientContext + ) + end; + {error, Reason} -> + {error, Reason} + end. + +-spec add_authentication( + QueryList, + Header, + AuthMethod, + AllowAlgorithms, + Opts, + ClientContext +) -> + {ok, {oidcc_http_util:query_params(), [oidcc_http_util:http_header()]}} + | {error, auth_method_not_possible} +when + QueryList :: oidcc_http_util:query_params(), + Header :: [oidcc_http_util:http_header()], + AuthMethod :: auth_method(), + AllowAlgorithms :: [binary()] | undefined, + Opts :: map(), + ClientContext :: oidcc_client_context:t(). +add_authentication( + QsBodyList, + Header, + none, + _AllowArgs, + _Opts, + #oidcc_client_context{client_id = ClientId} +) -> + NewBodyList = [{<<"client_id">>, ClientId} | QsBodyList], + {ok, {NewBodyList, Header}}; +add_authentication( + _QsBodyList, + _Header, + _Method, + _AllowAlgs, + _Opts, + #oidcc_client_context{client_secret = unauthenticated} +) -> + {error, auth_method_not_possible}; +add_authentication( + QsBodyList, + Header, + client_secret_basic, + _AllowAlgs, + _Opts, + #oidcc_client_context{client_id = ClientId, client_secret = ClientSecret} +) -> + NewHeader = [oidcc_http_util:basic_auth_header(ClientId, ClientSecret) | Header], + {ok, {QsBodyList, NewHeader}}; +add_authentication( + QsBodyList, + Header, + client_secret_post, + _AllowAlgs, + _Opts, + #oidcc_client_context{client_id = ClientId, client_secret = ClientSecret} +) -> + NewBodyList = + [{<<"client_id">>, ClientId}, {<<"client_secret">>, ClientSecret} | QsBodyList], + {ok, {NewBodyList, Header}}; +add_authentication( + QsBodyList, + Header, + client_secret_jwt, + AllowAlgorithms, + Opts, + ClientContext +) -> + #oidcc_client_context{ + client_secret = ClientSecret + } = ClientContext, + + maybe + [_ | _] ?= AllowAlgorithms, + #jose_jwk{} = + OctJwk ?= + oidcc_jwt_util:client_secret_oct_keys( + AllowAlgorithms, + ClientSecret + ), + {ok, ClientAssertion} ?= + signed_client_assertion( + AllowAlgorithms, + Opts, + ClientContext, + OctJwk + ), + {ok, add_jwt_bearer_assertion(ClientAssertion, QsBodyList, Header, ClientContext)} + else + _ -> + {error, auth_method_not_possible} + end; +add_authentication( + QsBodyList, + Header, + private_key_jwt, + AllowAlgorithms, + Opts, + ClientContext +) -> + #oidcc_client_context{ + client_jwks = ClientJwks + } = ClientContext, + + maybe + [_ | _] ?= AllowAlgorithms, + #jose_jwk{} ?= ClientJwks, + {ok, ClientAssertion} ?= + signed_client_assertion(AllowAlgorithms, Opts, ClientContext, ClientJwks), + {ok, add_jwt_bearer_assertion(ClientAssertion, QsBodyList, Header, ClientContext)} + else + _ -> + {error, auth_method_not_possible} + end; +add_authentication( + QsBodyList, + Header, + tls_client_auth, + _AllowAlgorithms, + Opts, + #oidcc_client_context{client_id = ClientId} +) -> + case Opts of + #{request_opts := #{ssl := _}} -> + %% only supported if custom SSL params are provided + NewBodyList = [{<<"client_id">>, ClientId} | QsBodyList], + {ok, {NewBodyList, Header}}; + _ -> + {error, auth_method_not_possible} + end. + +-spec select_preferred_auth(PreferredAuthMethods, AuthMethodsSupported) -> + {ok, auth_method()} | {error, error()} +when + PreferredAuthMethods :: [auth_method(), ...], + AuthMethodsSupported :: [binary()]. +select_preferred_auth(PreferredAuthMethods, AuthMethodsSupported) -> + PreferredAuthMethodSearchFun = fun(AuthMethod) -> + lists:member(atom_to_binary(AuthMethod), AuthMethodsSupported) + end, + + case lists:search(PreferredAuthMethodSearchFun, PreferredAuthMethods) of + {value, AuthMethod} -> + {ok, AuthMethod}; + false -> + {error, no_supported_auth_method} + end. + +-spec signed_client_assertion(AllowAlgorithms, Opts, ClientContext, Jwk) -> + {ok, binary()} | {error, term()} +when + AllowAlgorithms :: [binary()], + Jwk :: jose_jwk:key(), + Opts :: map(), + ClientContext :: oidcc_client_context:t(). +signed_client_assertion(AllowAlgorithms, Opts, ClientContext, Jwk) -> + Jwt = jose_jwt:from(token_request_claims(Opts, ClientContext)), + + oidcc_jwt_util:sign(Jwt, Jwk, AllowAlgorithms). + +-spec token_request_claims(Opts, ClientContext) -> oidcc_jwt_util:claims() when + Opts :: map(), + ClientContext :: oidcc_client_context:t(). +token_request_claims(Opts, #oidcc_client_context{ + client_id = ClientId, + provider_configuration = #oidcc_provider_configuration{issuer = Issuer} +}) -> + Audience = maps:get(audience, Opts, Issuer), + MaxClockSkew = + case application:get_env(oidcc, max_clock_skew) of + undefined -> 0; + {ok, ClockSkew} -> ClockSkew + end, + + maps:merge( + #{ + <<"iss">> => ClientId, + <<"sub">> => ClientId, + <<"aud">> => Audience, + <<"jti">> => random_string(32), + <<"iat">> => os:system_time(seconds), + <<"exp">> => os:system_time(seconds) + 30, + <<"nbf">> => os:system_time(seconds) - MaxClockSkew + }, + maps:get(token_request_claims, Opts, #{}) + ). + +-spec add_jwt_bearer_assertion(ClientAssertion, Body, Header, ClientContext) -> {Body, Header} when + ClientAssertion :: binary(), + Body :: oidcc_http_util:query_params(), + Header :: [oidcc_http_util:http_header()], + ClientContext :: oidcc_client_context:t(). +add_jwt_bearer_assertion(ClientAssertion, Body, Header, ClientContext) -> + #oidcc_client_context{client_id = ClientId} = ClientContext, + { + [ + {<<"client_assertion_type">>, + <<"urn:ietf:params:oauth:client-assertion-type:jwt-bearer">>}, + {<<"client_assertion">>, ClientAssertion}, + {<<"client_id">>, ClientId} + | Body + ], + Header + }. + +?DOC(false). +-spec add_dpop_proof_header(Header, Method, Endpoint, Opts, ClientContext) -> Header when + Header :: [oidcc_http_util:http_header()], + Method :: post | get, + Endpoint :: uri_string:uri_string(), + Opts :: #{nonce => binary()}, + ClientContext :: oidcc_client_context:t(). +add_dpop_proof_header(Header, Method, Endpoint, Opts, ClientContext) -> + Claims = + case Opts of + #{nonce := Nonce} -> + #{<<"nonce">> => Nonce}; + _ -> + #{} + end, + case dpop_proof(Method, Endpoint, Claims, ClientContext) of + {ok, SignedRequestObject} -> + [{"dpop", SignedRequestObject} | Header]; + error -> + Header + end. + +?DOC(false). +-spec add_authorization_header( + AccessToken, AccessTokenType, Method, Endpoint, Opts, ClientContext +) -> + Header +when + AccessToken :: binary(), + AccessTokenType :: binary(), + Method :: post | get, + Endpoint :: uri_string:uri_string(), + Opts :: #{dpop_nonce => binary()}, + ClientContext :: oidcc_client_context:t(), + Header :: [oidcc_http_util:http_header()]. +add_authorization_header( + AccessToken, AccessTokenType, Method, Endpoint, Opts, ClientContext +) -> + maybe + true ?= string:casefold(<<"dpop">>) =:= string:casefold(AccessTokenType), + Claims0 = + case Opts of + #{dpop_nonce := Nonce} -> + #{<<"nonce">> => Nonce}; + _ -> + #{} + end, + Claims = Claims0#{ + <<"ath">> => base64:encode(crypto:hash(sha256, AccessToken), #{ + mode => urlsafe, padding => false + }) + }, + {ok, SignedRequestObject} ?= dpop_proof(Method, Endpoint, Claims, ClientContext), + [ + {"authorization", [AccessTokenType, <<" ">>, AccessToken]}, + {"dpop", SignedRequestObject} + ] + else + _ -> + [oidcc_http_util:bearer_auth_header(AccessToken)] + end. + +?DOC(false). +-spec maybe_mtls_endpoint( + Endpoint, auth_method(), MtlsEndpointName, ClientContext +) -> Endpoint when + Endpoint :: uri_string:uri_string(), + MtlsEndpointName :: binary(), + ClientContext :: oidcc_client_context:t(). +maybe_mtls_endpoint(Endpoint, tls_client_auth, MtlsEndpointName, ClientContext) -> + case + ClientContext#oidcc_client_context.provider_configuration#oidcc_provider_configuration.mtls_endpoint_aliases + of + #{MtlsEndpointName := MtlsEndpoint} -> + MtlsEndpoint; + _ -> + Endpoint + end; +maybe_mtls_endpoint(Endpoint, _AuthMethod, _EndpointName, _ClientContext) -> + Endpoint. + +-spec dpop_proof(Method, Endpoint, Claims, ClientContext) -> {ok, binary()} | error when + Method :: post | get, + Endpoint :: uri_string:uri_string(), + Claims :: map(), + ClientContext :: oidcc_client_context:t(). +dpop_proof(Method, Endpoint, Claims0, #oidcc_client_context{ + client_jwks = #jose_jwk{} = ClientJwks, + provider_configuration = #oidcc_provider_configuration{ + dpop_signing_alg_values_supported = [_ | _] = SigningAlgSupported + } +}) -> + MaxClockSkew = + case application:get_env(oidcc, max_clock_skew) of + undefined -> 0; + {ok, ClockSkew} -> ClockSkew + end, + HtmClaim = string:uppercase(atom_to_binary(Method, utf8)), + Claims = Claims0#{ + <<"jti">> => random_string(32), + <<"htm">> => HtmClaim, + <<"htu">> => iolist_to_binary(Endpoint), + <<"iat">> => os:system_time(seconds), + <<"exp">> => os:system_time(seconds) + 30, + <<"nbf">> => os:system_time(seconds) - MaxClockSkew + }, + Jwt = jose_jwt:from(Claims), + + oidcc_jwt_util:sign_dpop(Jwt, ClientJwks, SigningAlgSupported); +dpop_proof(_Method, _Endpoint, _Claims, _ClientContext) -> + error. + +-spec random_string(Bytes :: pos_integer()) -> binary(). +random_string(Bytes) -> + base64:encode(crypto:strong_rand_bytes(Bytes), #{mode => urlsafe, padding => false}). diff --git a/deps/oidcc/src/oidcc_authorization.erl b/deps/oidcc/src/oidcc_authorization.erl new file mode 100644 index 0000000..c6a62bf --- /dev/null +++ b/deps/oidcc/src/oidcc_authorization.erl @@ -0,0 +1,469 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_authorization). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC("Functions to start an OpenID Connect Authorization"). +?MODULEDOC(#{since => <<"3.0.0">>}). + +-include("oidcc_client_context.hrl"). +-include("oidcc_provider_configuration.hrl"). + +-include_lib("jose/include/jose_jwk.hrl"). + +-export([create_redirect_url/2]). + +-export_type([error/0]). +-export_type([opts/0]). + +?DOC(""" +Configure authorization redirect URL. + +See https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest. + +## Parameters + +* `scopes` - list of scopes to request (defaults to `[<<"openid">>]`) +* `state` - state to pass to the provider +* `nonce` - nonce to pass to the provider +* `purpose` - purpose of the authorization request, see [https://cdn.connectid.com.au/specifications/oauth2-purpose-01.html] +* `require_purpose` - whether to require a `purpose` value +* `pkce_verifier` - PKCE verifier (random string), see [https://datatracker.ietf.org/doc/html/rfc7636#section-4.1] +* `require_pkce` - whether to require PKCE when getting the token +* `redirect_uri` - redirect target after authorization is completed +* `url_extension` - add custom query parameters to the authorization URL +* `response_mode` - response mode to use (defaults to `<<"query">>`) +"""). +?MODULEDOC(#{since => <<"3.0.0">>}). +-type opts() :: + #{ + scopes => oidcc_scope:scopes(), + state => binary(), + nonce => binary(), + pkce_verifier => binary(), + require_pkce => boolean(), + purpose => binary(), + require_purpose => boolean(), + redirect_uri => uri_string:uri_string(), + url_extension => oidcc_http_util:query_params(), + response_mode => binary() + }. + +?MODULEDOC(#{since => <<"3.0.0">>}). +-type error() :: + {grant_type_not_supported, authorization_code} + | par_required + | request_object_required + | pkce_verifier_required + | purpose_required + | no_supported_code_challenge + | oidcc_http_util:error(). + +-telemetry_event(#{ + event => [oidcc, par_request, start], + description => <<"Emitted at the start of executing a PAR request">>, + measurements => <<"#{system_time => non_neg_integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, par_request, stop], + description => <<"Emitted at the end of executing a PAR request">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, par_request, exception], + description => <<"Emitted at the end of executing a PAR request">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +?DOC(""" +Create Auth Redirect URL. + +For a high level interface using `m:oidcc_provider_configuration_worker` +see `oidcc:create_redirect_url/4`. + +## Examples + +```erlang +{ok, ClientContext} = + oidcc_client_context:from_configuration_worker(provider_name, + <<"client_id">>, + <<"client_secret">>), + +{ok, RedirectUri} = + oidcc_authorization:create_redirect_url(ClientContext, + #{redirect_uri: <<"https://my.server/return">}), + +%% RedirectUri = https://my.provider/auth?scope=openid&response_type=code&client_id=client_id&redirect_uri=https%3A%2F%2Fmy.server%2Freturn +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec create_redirect_url(ClientContext, Opts) -> {ok, Uri} | {error, error()} when + ClientContext :: oidcc_client_context:t(), + Opts :: opts(), + Uri :: uri_string:uri_string(). +create_redirect_url(#oidcc_client_context{} = ClientContext, Opts) -> + #oidcc_client_context{provider_configuration = ProviderConfiguration} = ClientContext, + + #oidcc_provider_configuration{ + authorization_endpoint = AuthEndpoint, grant_types_supported = GrantTypesSupported + } = + ProviderConfiguration, + + maybe + true ?= lists:member(<<"authorization_code">>, GrantTypesSupported), + {ok, QueryParams} ?= redirect_params(ClientContext, Opts), + QueryString = uri_string:compose_query(QueryParams), + {ok, [AuthEndpoint, <<"?">>, QueryString]} + else + {error, Reason} -> + {error, Reason}; + false -> + {error, {grant_type_not_supported, authorization_code}} + end. + +-spec redirect_params(ClientContext, Opts) -> {ok, oidcc_http_util:query_params()} when + ClientContext :: oidcc_client_context:t(), + Opts :: opts(). +redirect_params(#oidcc_client_context{client_id = ClientId} = ClientContext, Opts) -> + UrlExtension = maps:get(url_extension, Opts, []), + QueryParams0 = + [ + {<<"response_type">>, maps:get(response_type, Opts, <<"code">>)}, + {<<"client_id">>, ClientId}, + {<<"redirect_uri">>, maps:get(redirect_uri, Opts)} + ], + QueryParams1 = maybe_append(<<"state">>, maps:get(state, Opts, undefined), QueryParams0), + QueryParams2 = maybe_append(<<"nonce">>, maps:get(nonce, Opts, undefined), QueryParams1), + QueryParams3 = maybe_append(<<"purpose">>, maps:get(purpose, Opts, undefined), QueryParams2), + QueryParams4 = + case maps:get(response_mode, Opts, <<"query">>) of + <<"query">> -> + QueryParams3; + ResponseMode when is_binary(ResponseMode) -> + [{<<"response_mode">>, ResponseMode} | QueryParams3] + end, + maybe + ok ?= validate_purpose_required(Opts), + {ok, QueryParams5} ?= + append_code_challenge( + Opts, QueryParams4, ClientContext + ), + QueryParams6 = oidcc_scope:query_append_scope( + maps:get(scopes, Opts, [openid]), QueryParams5 + ), + QueryParams7 = maybe_append_dpop_jkt(QueryParams6, ClientContext), + {ok, QueryParams} ?= attempt_request_object(QueryParams7, ClientContext, UrlExtension), + attempt_par(QueryParams, ClientContext, Opts) + end. + +-spec append_code_challenge(Opts, QueryParams, ClientContext) -> + {ok, oidcc_http_util:query_params()} | {error, error()} +when + Opts :: opts(), + QueryParams :: oidcc_http_util:query_params(), + ClientContext :: oidcc_client_context:t(). +append_code_challenge(#{pkce_verifier := CodeVerifier} = Opts, QueryParams, ClientContext) -> + #oidcc_client_context{provider_configuration = ProviderConfiguration} = ClientContext, + #oidcc_provider_configuration{code_challenge_methods_supported = CodeChallengeMethodsSupported} = + ProviderConfiguration, + RequirePkce = maps:get(require_pkce, Opts, false), + case CodeChallengeMethodsSupported of + undefined when RequirePkce -> + {error, no_supported_code_challenge}; + undefined -> + {ok, QueryParams}; + Methods when is_list(Methods) -> + case + { + lists:member(<<"S256">>, CodeChallengeMethodsSupported), + lists:member(<<"plain">>, CodeChallengeMethodsSupported) + } + of + {true, _PlainSupported} -> + CodeChallenge = base64:encode(crypto:hash(sha256, CodeVerifier), #{ + mode => urlsafe, padding => false + }), + {ok, [ + {<<"code_challenge">>, CodeChallenge}, + {<<"code_challenge_method">>, <<"S256">>} + | QueryParams + ]}; + {false, true} -> + {ok, [ + {<<"code_challenge">>, CodeVerifier}, + {<<"code_challenge_method">>, <<"plain">>} + | QueryParams + ]}; + {false, false} when RequirePkce -> + {error, no_supported_code_challenge}; + {false, false} -> + {ok, QueryParams} + end + end; +append_code_challenge(#{require_pkce := true}, _QueryParams, _ClientContext) -> + {error, pkce_verifier_required}; +append_code_challenge(_Opts, QueryParams, _ClientContext) -> + {ok, QueryParams}. + +-spec maybe_append(Key, Value, QueryParams) -> QueryParams when + Key :: unicode:chardata(), + Value :: unicode:chardata() | true | undefined, + QueryParams :: oidcc_http_util:query_params(). +maybe_append(_Key, undefined, QueryParams) -> + QueryParams; +maybe_append(Key, Value, QueryParams) -> + [{Key, Value} | QueryParams]. + +-spec validate_purpose_required(Opts) -> ok | {error, purpose_required} when + Opts :: opts(). +validate_purpose_required(#{purpose := Purpose}) when is_binary(Purpose) -> + ok; +validate_purpose_required(#{purpose_required := true}) -> + {error, purpose_required}; +validate_purpose_required(_Opts) -> + ok. + +-spec maybe_append_dpop_jkt(QueryParams, ClientContext) -> + QueryParams +when + ClientContext :: oidcc_client_context:t(), + QueryParams :: oidcc_http_util:query_params(). +maybe_append_dpop_jkt( + QueryParams, + #oidcc_client_context{ + client_jwks = #jose_jwk{} = ClientJwks, + provider_configuration = #oidcc_provider_configuration{ + dpop_signing_alg_values_supported = [_ | _] + } + } +) -> + case oidcc_jwt_util:thumbprint(ClientJwks) of + {ok, Thumbprint} -> + [{<<"dpop_jkt">>, Thumbprint} | QueryParams]; + error -> + QueryParams + end; +maybe_append_dpop_jkt(QueryParams, _ClientContext) -> + QueryParams. + +-spec attempt_request_object(QueryParams, ClientContext, UrlExtension) -> + {ok, QueryParams} | {error, error()} +when + QueryParams :: oidcc_http_util:query_params(), + UrlExtension :: oidcc_http_util:query_params(), + ClientContext :: oidcc_client_context:t(). +attempt_request_object( + QueryParams, + #oidcc_client_context{ + client_id = ClientId, + client_secret = ClientSecret, + client_jwks = ClientJwks, + provider_configuration = #oidcc_provider_configuration{ + issuer = Issuer, + request_parameter_supported = true, + require_signed_request_object = RequireSignedRequestObject, + request_object_signing_alg_values_supported = SigningAlgSupported0, + request_object_encryption_alg_values_supported = EncryptionAlgSupported0, + request_object_encryption_enc_values_supported = EncryptionEncSupported0 + }, + jwks = Jwks + }, + UrlExtension +) when ClientSecret =/= unauthenticated -> + SigningAlgSupported = + case SigningAlgSupported0 of + undefined -> []; + SigningAlgs -> SigningAlgs + end, + EncryptionAlgSupported = + case EncryptionAlgSupported0 of + undefined -> []; + EncryptionAlgs -> EncryptionAlgs + end, + EncryptionEncSupported = + case EncryptionEncSupported0 of + undefined -> []; + EncryptionEncs -> EncryptionEncs + end, + + JwksWithClientJwks = + case ClientJwks of + none -> Jwks; + #jose_jwk{} -> oidcc_jwt_util:merge_jwks(Jwks, ClientJwks) + end, + + SigningJwks = oidcc_jwt_util:merge_client_secret_oct_keys( + JwksWithClientJwks, SigningAlgSupported, ClientSecret + ), + EncryptionJwks = oidcc_jwt_util:merge_client_secret_oct_keys( + JwksWithClientJwks, EncryptionAlgSupported, ClientSecret + ), + + MaxClockSkew = + case application:get_env(oidcc, max_clock_skew) of + undefined -> 0; + {ok, ClockSkew} -> ClockSkew + end, + + Claims = maps:merge( + #{ + <<"iss">> => ClientId, + <<"aud">> => Issuer, + <<"jti">> => random_string(32), + <<"iat">> => os:system_time(seconds), + <<"exp">> => os:system_time(seconds) + 30, + <<"nbf">> => os:system_time(seconds) - MaxClockSkew + }, + maps:from_list(QueryParams ++ UrlExtension) + ), + Jwt = jose_jwt:from(Claims), + + case oidcc_jwt_util:sign(Jwt, SigningJwks, deprioritize_none_alg(SigningAlgSupported)) of + {error, no_supported_alg_or_key} when RequireSignedRequestObject -> + {error, request_object_required}; + {error, no_supported_alg_or_key} -> + {ok, QueryParams ++ UrlExtension}; + {ok, SignedRequestObject} -> + case + oidcc_jwt_util:encrypt( + SignedRequestObject, + EncryptionJwks, + deprioritize_none_alg(EncryptionAlgSupported), + EncryptionEncSupported + ) + of + {ok, EncryptedRequestObject} -> + {ok, + [{<<"request">>, EncryptedRequestObject} | essential_params(QueryParams)] ++ + UrlExtension}; + {error, no_supported_alg_or_key} -> + {ok, + [{<<"request">>, SignedRequestObject} | essential_params(QueryParams)] ++ + UrlExtension} + end + end; +attempt_request_object( + _QueryParams, + #oidcc_client_context{ + provider_configuration = #oidcc_provider_configuration{require_signed_request_object = true} + }, + _UrlExtension +) -> + {error, request_object_required}; +attempt_request_object(QueryParams, _ClientContext, UrlExtension) -> + {ok, QueryParams ++ UrlExtension}. + +-spec attempt_par(QueryParams, ClientContext, Opts) -> + {ok, QueryParams} | {error, error()} +when + QueryParams :: oidcc_http_util:query_params(), + ClientContext :: oidcc_client_context:t(), + Opts :: opts(). +attempt_par( + _QueryParams, + #oidcc_client_context{ + provider_configuration = #oidcc_provider_configuration{ + require_pushed_authorization_requests = true, + pushed_authorization_request_endpoint = undefined + } + }, + _Opts +) -> + {error, par_required}; +attempt_par( + QueryParams, + #oidcc_client_context{ + provider_configuration = #oidcc_provider_configuration{ + pushed_authorization_request_endpoint = undefined + } + }, + _Opts +) -> + {ok, QueryParams}; +attempt_par( + QueryParams, + #oidcc_client_context{ + client_id = ClientId, + provider_configuration = + #oidcc_provider_configuration{ + issuer = Issuer, + token_endpoint_auth_methods_supported = SupportedAuthMethods, + token_endpoint_auth_signing_alg_values_supported = SigningAlgs, + pushed_authorization_request_endpoint = PushedAuthorizationRequestEndpoint0 + } + } = ClientContext, + Opts +) -> + Header0 = [{"accept", "application/json"}], + + TelemetryOpts = #{ + topic => [oidcc, par_request], extra_meta => #{issuer => Issuer, client_id => ClientId} + }, + + RequestOpts = maps:get(request_opts, Opts, #{}), + + maybe + {ok, {Body0, Header}, AuthMethod} ?= + oidcc_auth_util:add_client_authentication( + QueryParams, + Header0, + SupportedAuthMethods, + SigningAlgs, + Opts, + ClientContext + ), + %% ensure no duplicate parameters (such as client_id) + Body = lists:ukeysort(1, Body0), + PushedAuthorizationRequestEndpoint = oidcc_auth_util:maybe_mtls_endpoint( + PushedAuthorizationRequestEndpoint0, + AuthMethod, + <<"pushed_authorization_request_endpoint">>, + ClientContext + ), + Request = + {PushedAuthorizationRequestEndpoint, Header, "application/x-www-form-urlencoded", + uri_string:compose_query(Body)}, + {ok, {{json, ParResponse}, _Headers}} ?= + oidcc_http_util:request(post, Request, TelemetryOpts, RequestOpts), + #{<<"request_uri">> := ParRequestUri} ?= ParResponse, + {ok, [{<<"request_uri">>, ParRequestUri}, {<<"client_id">>, ClientId}]} + else + {error, Reason} -> {error, Reason}; + #{} = JsonResponse -> {error, {http_error, 201, JsonResponse}} + end. + +-spec essential_params(QueryParams :: oidcc_http_util:query_params()) -> + oidcc_http_util:query_params(). +essential_params(QueryParams) -> + lists:filter( + fun + ({<<"scope">>, _Value}) -> true; + ({<<"response_type">>, _Value}) -> true; + ({<<"client_id">>, _Value}) -> true; + (_Other) -> false + end, + QueryParams + ). + +-spec deprioritize_none_alg(Algorithms :: [binary()]) -> [binary()]. +deprioritize_none_alg(Algorithms) -> + {WithNone, WithoutNone} = lists:partition( + fun + (<<"none">>) -> true; + (_) -> false + end, + Algorithms + ), + WithoutNone ++ WithNone. + +-spec random_string(Bytes :: pos_integer()) -> binary(). +random_string(Bytes) -> + base64:encode(crypto:strong_rand_bytes(Bytes), #{mode => urlsafe, padding => false}). diff --git a/deps/oidcc/src/oidcc_backoff.erl b/deps/oidcc/src/oidcc_backoff.erl new file mode 100644 index 0000000..0ed1885 --- /dev/null +++ b/deps/oidcc/src/oidcc_backoff.erl @@ -0,0 +1,59 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_backoff). + +-include("internal/doc.hrl"). +?MODULEDOC(""" +Backoff Handling + +Based on [`db_connection`](https://github.com/elixir-ecto/db_connection/blob/8ef1f2ea54922873590b8939f2dad6b031c5b49c/lib/db_connection/backoff.ex#L24) +"""). +?MODULEDOC(#{since => <<"3.2.0">>}). + +-export_type([type/0]). +-export_type([min/0]). +-export_type([max/0]). +-export_type([state/0]). + +-export([handle_retry/4]). + +?DOC(#{since => <<"3.2.0">>}). +-type type() :: stop | exponential | random | random_exponential. + +?DOC(#{since => <<"3.2.0">>}). +-type min() :: pos_integer(). + +?DOC(#{since => <<"3.2.0">>}). +-type max() :: pos_integer(). + +?DOC(#{since => <<"3.2.0">>}). +-opaque state() :: pos_integer() | {pos_integer(), pos_integer()}. + +?DOC(false). +-spec handle_retry(Type, Min, Max, State) -> stop | {Wait, State} when + Type :: type(), Min :: min(), Max :: max(), State :: undefined | state(), Wait :: pos_integer(). +handle_retry(Type, Min, Max, State) when Min > 0, Max > 0, Max >= Min -> + priv_handle_retry(Type, Min, Max, State). + +-spec priv_handle_retry(Type, Min, Max, State) -> stop | {Wait, State} when + Type :: type(), Min :: min(), Max :: max(), State :: undefined | state(), Wait :: pos_integer(). +priv_handle_retry(stop, _Min, _Max, undefined) -> + stop; +priv_handle_retry(random, Min, Max, State) -> + {rand(Min, Max), State}; +priv_handle_retry(exponential, Min, _Max, undefined) -> + {Min, Min}; +priv_handle_retry(exponential, _Min, Max, State) -> + Wait = min(State * 2, Max), + {Wait, Wait}; +priv_handle_retry(random_exponential, Min, Max, undefined) -> + Lower = max(Min, Max div 3), + priv_handle_retry(random_exponential, Min, Max, {Lower, Lower}); +priv_handle_retry(random_exponential, _Min, Max, {Prev, Lower}) -> + NextMin = min(Prev, Lower), + NextMax = min(Prev * 3, Max), + Next = rand(NextMin, NextMax), + priv_handle_retry(random, NextMin, NextMax, {Next, Lower}). + +rand(Min, Max) -> rand:uniform(Max - Min + 1) + Min - 1. diff --git a/deps/oidcc/src/oidcc_client_context.erl b/deps/oidcc/src/oidcc_client_context.erl new file mode 100644 index 0000000..24a6d0f --- /dev/null +++ b/deps/oidcc/src/oidcc_client_context.erl @@ -0,0 +1,286 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_client_context). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC(""" +Client Configuration for authorization, token exchange, and userinfo. + +For most projects, it makes sense to use `m:oidcc_provider_configuration_worker` and the high-level +interface of `oidcc`. In that case, direct usage of this module is not needed. + +To use the record, import the definition: + +```erlang +-include_lib(["oidcc/include/oidcc_client_context.hrl"]). +``` +"""). +?MODULEDOC(#{since => <<"3.0.0">>}). + +-include("oidcc_client_context.hrl"). +-include("oidcc_provider_configuration.hrl"). + +-include_lib("jose/include/jose_jwk.hrl"). + +-export_type([authenticated_opts/0]). +-export_type([authenticated_t/0]). +-export_type([error/0]). +-export_type([opts/0]). +-export_type([t/0]). +-export_type([unauthenticated_opts/0]). +-export_type([unauthenticated_t/0]). + +-export([from_configuration_worker/3]). +-export([from_configuration_worker/4]). +-export([from_manual/4]). +-export([from_manual/5]). +-export([apply_profiles/2]). + +-type t() :: authenticated_t() | unauthenticated_t(). + +?DOC(#{since => <<"3.0.0">>}). +-type authenticated_t() :: #oidcc_client_context{ + provider_configuration :: oidcc_provider_configuration:t(), + jwks :: jose_jwk:key(), + client_id :: binary(), + client_secret :: binary(), + client_jwks :: jose_jwk:key() | none +}. + +?DOC(#{since => <<"3.0.0">>}). +-type unauthenticated_t() :: #oidcc_client_context{ + provider_configuration :: oidcc_provider_configuration:t(), + jwks :: jose_jwk:key(), + client_id :: binary(), + client_secret :: unauthenticated, + client_jwks :: none +}. + +?DOC(#{since => <<"3.0.0">>}). +-type authenticated_opts() :: #{ + client_jwks => jose_jwk:key() +}. + +?DOC(#{since => <<"3.0.0">>}). +-type unauthenticated_opts() :: #{}. + +?DOC(#{since => <<"3.0.0">>}). +-type opts() :: authenticated_opts() | unauthenticated_opts(). + +?DOC(#{since => <<"3.0.0">>}). +-type error() :: provider_not_ready. + +?DOC(""" +Create Client Context from a `m:oidcc_provider_configuration_worker`. + +See `from_configuration_worker/4`. +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec from_configuration_worker + (ProviderName, ClientId, ClientSecret) -> {ok, authenticated_t()} | {error, error()} when + ProviderName :: gen_server:server_ref(), + ClientId :: binary(), + ClientSecret :: binary(); + (ProviderName, ClientId, ClientSecret) -> {ok, unauthenticated_t()} | {error, error()} when + ProviderName :: gen_server:server_ref(), + ClientId :: binary(), + ClientSecret :: unauthenticated. +from_configuration_worker(ProviderName, ClientId, ClientSecret) -> + from_configuration_worker(ProviderName, ClientId, ClientSecret, #{}). + +?DOC(""" +Create Client Context from a `m:oidcc_provider_configuration_worker`. + +## Examples + +```erlang +{ok, Pid} = + oidcc_provider_configuration_worker:start_link(#{ + issuer => <<"https://login.salesforce.com">> + }), + +{ok, #oidcc_client_context{}} = + oidcc_client_context:from_configuration_worker(Pid, + <<"client_id">>, + <<"client_secret">>). +``` + +```erlang +{ok, Pid} = + oidcc_provider_configuration_worker:start_link(#{ + issuer => <<"https://login.salesforce.com">>, + name => {local, salesforce_provider} + }), + +{ok, #oidcc_client_context{}} = + oidcc_client_context:from_configuration_worker( + salesforce_provider, + <<"client_id">>, + <<"client_secret">>, + #{client_jwks => jose_jwk:generate_key(16)} + ). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec from_configuration_worker + (ProviderName, ClientId, ClientSecret, Opts) -> + {ok, authenticated_t()} | {error, error()} + when + ProviderName :: gen_server:server_ref(), + ClientId :: binary(), + ClientSecret :: binary(), + Opts :: authenticated_opts(); + (ProviderName, ClientId, ClientSecret, Opts) -> + {ok, unauthenticated_t()} | {error, error()} + when + ProviderName :: gen_server:server_ref(), + ClientId :: binary(), + ClientSecret :: unauthenticated, + Opts :: unauthenticated_opts(). +from_configuration_worker(ProviderName, ClientId, ClientSecret, Opts) when is_pid(ProviderName) -> + maybe + #oidcc_provider_configuration{} = + ProviderConfiguration ?= + oidcc_provider_configuration_worker:get_provider_configuration(ProviderName), + #jose_jwk{} = Jwks ?= oidcc_provider_configuration_worker:get_jwks(ProviderName), + {ok, + from_manual( + ProviderConfiguration, + Jwks, + ClientId, + ClientSecret, + Opts + )} + else + undefined -> {error, provider_not_ready} + end; +from_configuration_worker(ProviderName, ClientId, ClientSecret, Opts) -> + case erlang:whereis(ProviderName) of + undefined -> + {error, provider_not_ready}; + Pid -> + from_configuration_worker(Pid, ClientId, ClientSecret, Opts) + end. + +?DOC(""" +Create Client Context manually. + +See `from_manual/5`. +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec from_manual + (Configuration, Jwks, ClientId, ClientSecret) -> authenticated_t() when + Configuration :: oidcc_provider_configuration:t(), + Jwks :: jose_jwk:key(), + ClientId :: binary(), + ClientSecret :: binary(); + (Configuration, Jwks, ClientId, ClientSecret) -> unauthenticated_t() when + Configuration :: oidcc_provider_configuration:t(), + Jwks :: jose_jwk:key(), + ClientId :: binary(), + ClientSecret :: unauthenticated. +from_manual(Configuration, Jwks, ClientId, ClientSecret) -> + from_manual(Configuration, Jwks, ClientId, ClientSecret, #{}). + +?DOC(""" +Create Client Context manually. + +## Examples + +```erlang +{ok, Configuration} = + oidcc_provider_configuration:load_configuration(<<"https://login.salesforce.com">>, []), + +#oidcc_provider_configuration{jwks_uri = JwksUri} = Configuration, + +{ok, Jwks} = oidcc_provider_configuration:load_jwks(JwksUri, []). + +#oidcc_client_context{} = + oidcc_client_context:from_manual( + Metadata, + Jwks, + <<"client_id">>, + <<"client_secret">>, + #{client_jwks => jose_jwk:generate_key(16)} + ). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec from_manual + (Configuration, Jwks, ClientId, ClientSecret, Opts) -> authenticated_t() when + Configuration :: oidcc_provider_configuration:t(), + Jwks :: jose_jwk:key(), + ClientId :: binary(), + ClientSecret :: binary(), + Opts :: authenticated_opts(); + (Configuration, Jwks, ClientId, ClientSecret, Opts) -> unauthenticated_t() when + Configuration :: oidcc_provider_configuration:t(), + Jwks :: jose_jwk:key(), + ClientId :: binary(), + ClientSecret :: unauthenticated, + Opts :: unauthenticated_opts(). +from_manual( + #oidcc_provider_configuration{} = Configuration, + #jose_jwk{} = Jwks, + ClientId, + unauthenticated, + _Opts +) when is_binary(ClientId) -> + #oidcc_client_context{ + provider_configuration = Configuration, + jwks = Jwks, + client_id = ClientId, + client_secret = unauthenticated + }; +from_manual( + #oidcc_provider_configuration{} = Configuration, + #jose_jwk{} = Jwks, + ClientId, + ClientSecret, + Opts +) when is_binary(ClientId), is_binary(ClientSecret) -> + #oidcc_client_context{ + provider_configuration = Configuration, + jwks = Jwks, + client_id = ClientId, + client_secret = ClientSecret, + client_jwks = maps:get(client_jwks, Opts, none) + }. + +?DOC(""" +Apply OpenID Connect / OAuth2 Profiles to the context. + +Currently, the only supported profiles are: +- `fapi2_security_profile` - https://openid.bitbucket.io/fapi/fapi-2_0-security-profile.html +- `fapi2_message_signing` - https://openid.bitbucket.io/fapi/fapi-2_0-message-signing.html + +It returns an updated `t:t/0` record and a map of options to +be merged into the `m:oidcc_authorization` and `m:oidcc_token` functions. + +## Examples + +```erlang +ClientContext = #oidcc_client_context{} = oidcc_client_context:from_...(...), + +{#oidcc_client_context{} = ClientContext1, Opts} = oidcc_client_context:apply_profiles( + ClientContext, + #{ + profiles => [fapi2_message_signing] + }), + +{ok, Uri} = oidcc_authorization:create_redirect_uri( + ClientContext1, + maps:merge(Opts, #{...}) +). +``` +"""). +?DOC(#{since => <<"3.2.0">>}). +-spec apply_profiles(ClientContext, oidcc_profile:opts()) -> + {ok, ClientContext, oidcc_profile:opts_no_profiles()} | {error, oidcc_profile:error()} +when + ClientContext :: oidcc_client_context:t(). +apply_profiles(ClientContext, Opts) -> + oidcc_profile:apply_profiles(ClientContext, Opts). diff --git a/deps/oidcc/src/oidcc_client_registration.erl b/deps/oidcc/src/oidcc_client_registration.erl new file mode 100644 index 0000000..87a3888 --- /dev/null +++ b/deps/oidcc/src/oidcc_client_registration.erl @@ -0,0 +1,364 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_client_registration). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC(""" +Dynamic Client Registration Utilities. + +See https://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata. + +## Records + +To use the record, import the definition: + +```erlang +-include_lib(["oidcc/include/oidcc_client_registration.hrl"]). +``` + +## Telemetry + +See [`Oidcc.ClientRegistration`](`m:'Elixir.Oidcc.ClientRegistration'`). +"""). +?MODULEDOC(#{since => <<"3.0.0">>}). + +-include("oidcc_client_registration.hrl"). +-include("oidcc_provider_configuration.hrl"). + +-export([register/3]). + +-export_type([error/0]). +-export_type([opts/0]). +-export_type([response/0]). +-export_type([t/0]). + +?DOC(""" +Configure configuration loading / parsing. + +## Parameters + +* `initial_access_token` - Access Token for registration +* `request_opts` - config for HTTP request +"""). +?DOC(#{since => <<"3.0.0">>}). +-type opts() :: #{ + initial_access_token => binary() | undefined, + request_opts => oidcc_http_util:request_opts() +}. + +?DOC(""" +Record containing Client Registration Metadata. + +See https://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata and +https://openid.net/specs/openid-connect-rpinitiated-1_0.html#ClientMetadata. + +All unrecognized fields are stored in `extra_fields`. +"""). +?DOC(#{since => <<"3.0.0">>}). +-type t() :: + #oidcc_client_registration{ + %% OpenID Connect Dynamic Client Registration 1.0 + redirect_uris :: [uri_string:uri_string()], + %% OpenID Connect Dynamic Client Registration 1.0 + response_types :: [binary()] | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + grant_types :: [binary()] | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + application_type :: web | native, + %% OpenID Connect Dynamic Client Registration 1.0 + contacts :: [binary()] | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + client_name :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + logo_uri :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + client_uri :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + policy_uri :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + tos_uri :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + jwks :: jose_jwk:key() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + jwks_uri :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + sector_identifier_uri :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + subject_type :: pairwise | public | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + id_token_signed_response_alg :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + id_token_encrypted_response_alg :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + id_token_encrypted_response_enc :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + userinfo_signed_response_alg :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + userinfo_encrypted_response_alg :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + userinfo_encrypted_response_enc :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + request_object_signing_alg :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + request_object_encryption_alg :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + request_object_encryption_enc :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + token_endpoint_auth_method :: erlang:binary(), + %% OpenID Connect Dynamic Client Registration 1.0 + token_endpoint_auth_signing_alg :: binary() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + default_max_age :: pos_integer() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + require_auth_time :: boolean(), + %% OpenID Connect Dynamic Client Registration 1.0 + default_acr_values :: [binary()] | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + initiate_login_uri :: uri_string:uri_string() | undefined, + %% OpenID Connect Dynamic Client Registration 1.0 + request_uris :: [uri_string:uri_string()] | undefined, + %% OpenID Connect RP-Initiated Logout 1.0 + post_logout_redirect_uris :: [uri_string:uri_string()] | undefined, + %% OAuth 2.0 Pushed Authorization Requests + require_pushed_authorization_requests :: boolean(), + %% OAuth 2.0 Demonstrating Proof of Possession (DPoP) + dpop_bound_access_tokens :: boolean(), + %% Unknown Fields + extra_fields :: #{binary() => term()} + }. + +?DOC(""" +Record containing Client Registration Response. + +See https://openid.net/specs/openid-connect-registration-1_0.html#RegistrationResponse. + +All unrecognized fields are stored in `extra_fields`. +"""). +?DOC(#{since => <<"3.0.0">>}). +-type response() :: + #oidcc_client_registration_response{ + client_id :: erlang:binary(), + client_secret :: binary() | undefined, + registration_access_token :: binary() | undefined, + registration_client_uri :: uri_string:uri_string() | undefined, + client_id_issued_at :: pos_integer() | undefined, + client_secret_expires_at :: pos_integer() | undefined, + %% Unknown Fields + extra_fields :: #{binary() => term()} + }. + +?DOC(#{since => <<"3.0.0">>}). +-type error() :: + registration_not_supported + | invalid_content_type + | oidcc_decode_util:error() + | oidcc_http_util:error(). + +-telemetry_event(#{ + event => [oidcc, register_client, start], + description => <<"Emitted at the start of registering the client">>, + measurements => <<"#{system_time => non_neg_integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string()}">> +}). + +-telemetry_event(#{ + event => [oidcc, register_client, stop], + description => <<"Emitted at the end of registering the client">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string()}">> +}). + +-telemetry_event(#{ + event => [oidcc, register_client, exception], + description => <<"Emitted at the end of registering the client">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string()}">> +}). + +?DOC(""" +Register Client. + +## Examples + +```erlang +{ok, ProviderConfiguration} = + oidcc_provider_configuration:load_configuration("https://your.issuer"), + +{ok, #oidcc_client_registration_response{ + client_id = ClientId, + client_secret = ClientSecret +}} = + oidcc_client_registration:register( + ProviderConfiguration, + #oidcc_client_registration{ + redirect_uris = ["https://your.application.com/oidcc/callback"] + }, + #{initial_access_token => <<"optional token you got from the provider">>} + ). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec register(ProviderConfiguration, Registration, Opts) -> + {ok, response()} | {error, error()} +when + ProviderConfiguration :: oidcc_provider_configuration:t(), + Registration :: t(), + Opts :: opts(). +register(#oidcc_provider_configuration{registration_endpoint = undefined}, _Registration, _Opts) -> + {error, registration_not_supported}; +register( + #oidcc_provider_configuration{issuer = Issuer, registration_endpoint = RegistrationEndpoint}, + Registration, + Opts +) -> + RegistrationBody = encode(Registration), + TelemetryOpts = #{topic => [oidcc, register_client], extra_meta => #{issuer => Issuer}}, + RequestOpts = maps:get(request_opts, Opts, #{}), + Headers = + case maps:get(initial_access_token, Opts, undefined) of + undefined -> []; + Token -> [{"authorization", ["Bearer ", Token]}] + end, + Request = {RegistrationEndpoint, Headers, "application/json", RegistrationBody}, + + maybe + {ok, {{json, ResponseMap}, _Headers}} ?= + oidcc_http_util:request(post, Request, TelemetryOpts, RequestOpts), + {ok, #oidcc_client_registration_response{} = Response} ?= decode_response(ResponseMap), + {ok, Response} + else + {error, Reason} -> {error, Reason}; + {ok, {{_Format, _Body}, _}} -> {error, invalid_content_type} + end. + +-spec decode_response(Response) -> {ok, response()} | {error, error()} when Response :: map(). +decode_response(Response) -> + case + oidcc_decode_util:extract( + Response, + [ + {required, client_id, fun oidcc_decode_util:parse_setting_binary/2}, + {optional, client_secret, undefined, fun oidcc_decode_util:parse_setting_binary/2}, + {optional, registration_access_token, undefined, + fun oidcc_decode_util:parse_setting_binary/2}, + {optional, registration_client_uri, undefined, + fun oidcc_decode_util:parse_setting_uri_https/2}, + {optional, client_id_issued_at, undefined, + fun oidcc_decode_util:parse_setting_number/2}, + {optional, client_secret_expires_at, undefined, + fun oidcc_decode_util:parse_setting_number/2} + ], + #{} + ) + of + {ok, { + #{ + client_id := ClientId, + client_secret := ClientSecret, + registration_access_token := RegistrationAccessToken, + registration_client_uri := RegistrationClientUri, + client_id_issued_at := ClientIdIssuedAt, + client_secret_expires_at := ClientSecretExpiresAt + }, + ExtraFields + }} -> + {ok, #oidcc_client_registration_response{ + client_id = ClientId, + client_secret = ClientSecret, + registration_access_token = RegistrationAccessToken, + registration_client_uri = RegistrationClientUri, + client_id_issued_at = ClientIdIssuedAt, + client_secret_expires_at = ClientSecretExpiresAt, + extra_fields = ExtraFields + }}; + {error, Reason} -> + {error, Reason} + end. + +-spec encode(Metadata) -> binary() when Metadata :: t(). +encode(#oidcc_client_registration{ + redirect_uris = RedirectUris, + response_types = ResponseTypes, + grant_types = GrantTypes, + application_type = ApplicationType, + contacts = Contacts, + client_name = ClientName, + logo_uri = LogoUri, + client_uri = ClientUri, + policy_uri = PolicyUri, + tos_uri = TosUri, + jwks = Jwks, + sector_identifier_uri = SectorIdentifierUri, + subject_type = SubjectType, + id_token_signed_response_alg = IdTokenSignedResponseAlg, + id_token_encrypted_response_alg = IdTokenencryptedResponseAlg, + id_token_encrypted_response_enc = IdTokenEncryptedResponseEnc, + userinfo_signed_response_alg = UserinfoSignedResponseAlg, + userinfo_encrypted_response_alg = UserinfoEncryptedResponseAlg, + userinfo_encrypted_response_enc = UserinfoEncryptedResponseEnc, + request_object_signing_alg = RequestObjectSigningAlg, + request_object_encryption_alg = RequestObjectEncryptionAlg, + request_object_encryption_enc = RequestObjectEncryptionEnc, + token_endpoint_auth_method = TokenEndpointAuthMethod, + token_endpoint_auth_signing_alg = TokenEndpointAuthSigningAlg, + default_max_age = DefaultMaxAge, + require_auth_time = RequireAuthTime, + default_acr_values = DefaultAcrValues, + initiate_login_uri = InitiateLoginUri, + request_uris = RequestUris, + post_logout_redirect_uris = PostLogoutRedirectUris, + require_pushed_authorization_requests = RequirePushedAuthorizationRequests, + extra_fields = ExtraFields +}) -> + Map0 = #{ + redirect_uris => RedirectUris, + response_types => ResponseTypes, + grant_types => GrantTypes, + application_type => ApplicationType, + contacts => Contacts, + client_name => ClientName, + logo_uri => LogoUri, + client_uri => ClientUri, + policy_uri => PolicyUri, + tos_uri => TosUri, + jwks => + case Jwks of + undefined -> + undefined; + _ -> + {_KeyType, KeyMap} = jose_jwk:to_map(Jwks), + KeyMap + end, + sector_identifier_uri => SectorIdentifierUri, + subject_type => SubjectType, + id_token_signed_response_alg => IdTokenSignedResponseAlg, + id_token_encrypted_response_alg => IdTokenencryptedResponseAlg, + id_token_encrypted_response_enc => IdTokenEncryptedResponseEnc, + userinfo_signed_response_alg => UserinfoSignedResponseAlg, + userinfo_encrypted_response_alg => UserinfoEncryptedResponseAlg, + userinfo_encrypted_response_enc => UserinfoEncryptedResponseEnc, + request_object_signing_alg => RequestObjectSigningAlg, + request_object_encryption_alg => RequestObjectEncryptionAlg, + request_object_encryption_enc => RequestObjectEncryptionEnc, + token_endpoint_auth_method => TokenEndpointAuthMethod, + token_endpoint_auth_signing_alg => TokenEndpointAuthSigningAlg, + default_max_age => DefaultMaxAge, + require_auth_time => RequireAuthTime, + default_acr_values => DefaultAcrValues, + initiate_login_uri => InitiateLoginUri, + request_uris => RequestUris, + post_logout_redirect_uris => PostLogoutRedirectUris, + require_pushed_authorization_requests => RequirePushedAuthorizationRequests + }, + Map1 = maps:merge(Map0, ExtraFields), + Map = maps:filter( + fun + (_Key, undefined) -> false; + (_Key, _Value) -> true + end, + Map1 + ), + jose:encode(Map). diff --git a/deps/oidcc/src/oidcc_decode_util.erl b/deps/oidcc/src/oidcc_decode_util.erl new file mode 100644 index 0000000..6f0d90a --- /dev/null +++ b/deps/oidcc/src/oidcc_decode_util.erl @@ -0,0 +1,225 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_decode_util). + +-include("internal/doc.hrl"). +?MODULEDOC("Response Decoding Utils"). +?MODULEDOC(#{since => <<"3.0.0">>}). + +-export([extract/3]). +-export([parse_setting_binary/2]). +-export([parse_setting_binary_list/2]). +-export([parse_setting_boolean/2]). +-export([parse_setting_list_enum/3]). +-export([parse_setting_number/2]). +-export([parse_setting_uri/2]). +-export([parse_setting_uri_https/2]). +-export([parse_setting_uri_map/2]). +-export([parse_setting_uri_https_map/2]). + +-export_type([error/0]). + +?DOC(#{since => <<"3.0.0">>}). +-type error() :: + {missing_config_property, Key :: atom()} + | {invalid_config_property, { + Type :: + uri + | uri_https + | binary + | number + | list_of_binaries + | boolean + | scopes_including_openid + | enum + | alg_no_none, + Field :: atom() + }}. + +?DOC(false). +-spec extract( + Map :: #{binary() => term()}, + Keys :: [{required, Key, ParseFn} | {optional, Key, Default, ParseFn}], + Acc :: #{atom() => term()} +) -> + {ok, {Matched, Rest}} | {error, error()} +when + Key :: atom(), + Default :: term(), + ParseFn :: fun((Setting :: term(), Key) -> {ok, term()} | {error, error()}), + Matched :: #{Key => Default | undefined | term()}, + Rest :: #{binary() => term()}. +extract(Map1, [{required, Key, ParseFn} | RestKeys], Acc) -> + case maps:take(atom_to_binary(Key), Map1) of + {Value, Map2} -> + case ParseFn(Value, Key) of + {ok, Parsed} -> + extract(Map2, RestKeys, maps:put(Key, Parsed, Acc)); + {error, Reason} -> + {error, Reason} + end; + error -> + {error, {missing_config_property, Key}} + end; +extract(Map1, [{optional, Key, Default, ParseFn} | RestKeys], Acc) -> + case maps:take(atom_to_binary(Key), Map1) of + {undefined, Map2} -> + extract(Map2, RestKeys, maps:put(Key, Default, Acc)); + {Value, Map2} -> + case ParseFn(Value, Key) of + {ok, Parsed} -> + extract(Map2, RestKeys, maps:put(Key, Parsed, Acc)); + {error, Reason} -> + {error, Reason} + end; + error -> + extract(Map1, RestKeys, maps:put(Key, Default, Acc)) + end; +extract(Map, [], Acc) -> + {ok, {Acc, Map}}. + +?DOC(false). +-spec parse_setting_uri(Setting :: term(), Field :: atom()) -> + {ok, uri_string:uri_string()} | {error, error()}. +parse_setting_uri(Setting, _Field) when is_binary(Setting) -> + {ok, Setting}; +parse_setting_uri(_Setting, Field) -> + {error, {invalid_config_property, {uri, Field}}}. + +?DOC(false). +-spec parse_setting_uri_https(Setting :: term(), Field :: atom()) -> + {ok, uri_string:uri_string()} | {error, error()}. +parse_setting_uri_https(Setting, Field) when is_binary(Setting) -> + case uri_string:parse(Setting) of + #{scheme := <<"https">>} -> + {ok, Setting}; + #{scheme := _Scheme} -> + {error, {invalid_config_property, {uri_https, Field}}} + end; +parse_setting_uri_https(_Setting, Field) -> + {error, {invalid_config_property, {uri_https, Field}}}. + +?DOC(false). +-spec parse_setting_uri_map(Setting :: term(), Field :: atom()) -> + {ok, #{binary() => uri_string:uri_string()}} | {error, error()}. +parse_setting_uri_map(Setting, Field) -> + do_parse_setting_uri_map(Setting, Field, fun parse_setting_uri/2). + +?DOC(false). +-spec parse_setting_uri_https_map(Setting :: term(), Field :: atom()) -> + {ok, #{binary() => uri_string:uri_string()}} | {error, error()}. +parse_setting_uri_https_map(Setting, Field) -> + do_parse_setting_uri_map(Setting, Field, fun parse_setting_uri_https/2). + +do_parse_setting_uri_map(#{} = Setting, Field, Parser) -> + SettingList = maps:to_list(Setting), + case + lists:foldl( + fun + (_Elem, {error, Reason}) -> + {error, Reason}; + ({BinKey, Value}, {ok, Acc}) when is_binary(BinKey) -> + case Parser(Value, Field) of + {ok, SettingValue} -> + {ok, [{BinKey, SettingValue} | Acc]}; + {error, Reason} -> + {error, Reason} + end; + (_, _) -> + {error, {invalid_config_property, {uri_map, Field}}} + end, + + {ok, []}, + SettingList + ) + of + {ok, ParsedList} -> + {ok, maps:from_list(ParsedList)}; + {error, Reason} -> + {error, Reason} + end; +do_parse_setting_uri_map(_Setting, Field, _Parser) -> + {error, {invalid_config_property, {uri_map, Field}}}. + +?DOC(false). +-spec parse_setting_binary(Setting :: term(), Field :: atom()) -> + {ok, binary()} | {error, error()}. +parse_setting_binary(Setting, _Field) when is_binary(Setting) -> + {ok, Setting}; +parse_setting_binary(_Setting, Field) -> + {error, {invalid_config_property, {binary, Field}}}. + +?DOC(false). +-spec parse_setting_binary_list(Setting :: term(), Field :: atom()) -> + {ok, [binary()]} | {error, error()}. +parse_setting_binary_list(Setting, Field) when is_list(Setting) -> + case lists:all(fun is_binary/1, Setting) of + true -> + {ok, Setting}; + false -> + {error, {invalid_config_property, {list_of_binaries, Field}}} + end; +parse_setting_binary_list(_Setting, Field) -> + {error, {invalid_config_property, {list_of_binaries, Field}}}. + +?DOC(false). +-spec parse_setting_number(Setting :: term(), Field :: atom()) -> + {ok, integer()} | {error, error()}. +parse_setting_number(Setting, _Field) when is_integer(Setting) -> + {ok, Setting}; +parse_setting_number(_Setting, Field) -> + {error, {invalid_config_property, {number, Field}}}. + +?DOC(false). +-spec parse_setting_boolean(Setting :: term(), Field :: atom()) -> + {ok, boolean()} | {error, error()}. +parse_setting_boolean(Setting, _Field) when is_boolean(Setting) -> + {ok, Setting}; +parse_setting_boolean(_Setting, Field) -> + {error, {invalid_config_property, {boolean, Field}}}. + +?DOC(false). +-spec parse_setting_list_enum( + Setting :: term(), + Field :: atom(), + Parse :: fun((binary()) -> {ok, Value} | error) +) -> + {ok, [Value]} | {error, error()} +when + Value :: term(). +parse_setting_list_enum(Setting, Field, Parse) -> + case parse_setting_binary_list(Setting, Field) of + {ok, Values} -> + Parsed = + lists:map( + fun(Value) -> + case Parse(Value) of + {ok, ParsedValue} -> + {ok, ParsedValue}; + error -> + {error, Value} + end + end, + Values + ), + + case + lists:filter( + fun + ({ok, _Value}) -> + false; + ({error, _Value}) -> + true + end, + Parsed + ) + of + [] -> + {ok, lists:map(fun({ok, Value}) -> Value end, Parsed)}; + [{error, _InvalidValue} | _Rest] -> + {error, {invalid_config_property, {enum, Field}}} + end; + {error, Reason} -> + {error, Reason} + end. diff --git a/deps/oidcc/src/oidcc_http_util.erl b/deps/oidcc/src/oidcc_http_util.erl new file mode 100644 index 0000000..ff859d2 --- /dev/null +++ b/deps/oidcc/src/oidcc_http_util.erl @@ -0,0 +1,216 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_http_util). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC("HTTP Client Utilities"). + +-export([basic_auth_header/2]). +-export([bearer_auth_header/1]). +-export([headers_to_cache_deadline/2]). +-export([request/4]). + +-export_type([ + http_header/0, error/0, httpc_error/0, query_params/0, telemetry_opts/0, request_opts/0 +]). + +?DOC("See `uri_string:compose_query/1`."). +?DOC(#{since => <<"3.0.0">>}). +-type query_params() :: [{unicode:chardata(), unicode:chardata() | true}]. + +?DOC("See `httpc:request/5`."). +?DOC(#{since => <<"3.0.0">>}). +-type http_header() :: {Field :: [byte()] | binary(), Value :: iodata()}. + +?DOC(#{since => <<"3.0.0">>}). +-type error() :: + {http_error, StatusCode :: pos_integer(), HttpBodyResult :: binary() | map()} + | {use_dpop_nonce, Nonce :: binary(), HttpBodyResult :: binary() | map()} + | invalid_content_type + | httpc_error(). + +?DOC("See `httpc:request/5` for additional errors."). +?DOC(#{since => <<"3.0.0">>}). +-type httpc_error() :: term(). + +?DOC(""" +See `httpc:request/5`. + +## Parameters + +* `timeout` - timeout for request +* `ssl` - TLS config +"""). +?DOC(#{since => <<"3.0.0">>}). +-type request_opts() :: #{ + timeout => timeout(), + ssl => [ssl:tls_option()], + httpc_profile => atom() | pid() +}. + +?DOC(#{since => <<"3.0.0">>}). +-type telemetry_opts() :: #{ + topic := [atom()], + extra_meta => map() +}. + +?DOC(false). +-spec basic_auth_header(User, Secret) -> http_header() when + User :: binary(), + Secret :: binary(). +basic_auth_header(User, Secret) -> + UserEnc = uri_string:compose_query([{User, true}]), + SecretEnc = uri_string:compose_query([{Secret, true}]), + RawAuth = <>/binary, SecretEnc/binary>>, + AuthData = base64:encode(RawAuth), + {"authorization", [<<"Basic ">>, AuthData]}. + +?DOC(false). +-spec bearer_auth_header(Token) -> http_header() when Token :: binary(). +bearer_auth_header(Token) -> + {"authorization", [<<"Bearer ">>, Token]}. + +?DOC(false). +-spec request(Method, Request, TelemetryOpts, RequestOpts) -> + {ok, {{json, term()} | {jwt, binary()}, [http_header()]}} + | {error, error()} +when + Method :: head | get | put | patch | post | trace | options | delete, + Request :: + {uri_string:uri_string(), [http_header()]} + | { + uri_string:uri_string(), + [http_header()], + ContentType :: uri_string:uri_string(), + HttpBody + }, + HttpBody :: + iolist() + | binary() + | { + fun((Accumulator :: term()) -> eof | {ok, iolist(), Accumulator :: term()}), + Accumulator :: term() + } + | {chunkify, fun((Accumulator :: term()) -> eof | {ok, iolist(), Accumulator :: term()}), + Accumulator :: term()}, + TelemetryOpts :: telemetry_opts(), + RequestOpts :: request_opts(). +request(Method, Request, TelemetryOpts, RequestOpts) -> + TelemetryTopic = maps:get(topic, TelemetryOpts), + TelemetryExtraMeta = maps:get(extra_meta, TelemetryOpts, #{}), + Timeout = maps:get(timeout, RequestOpts, timer:minutes(1)), + SslOpts = maps:get(ssl, RequestOpts, undefined), + HttpProfile = maps:get(httpc_profile, RequestOpts, default), + + HttpOpts0 = [{timeout, Timeout}], + HttpOpts = + case SslOpts of + undefined -> HttpOpts0; + _Opts -> [{ssl, SslOpts} | HttpOpts0] + end, + + telemetry:span( + TelemetryTopic, + TelemetryExtraMeta, + fun() -> + maybe + {ok, {_StatusLine, Headers, _Result} = Response} ?= + httpc:request( + Method, + Request, + HttpOpts, + [{body_format, binary}], + HttpProfile + ), + {ok, BodyAndFormat} ?= extract_successful_response(Response), + {{ok, {BodyAndFormat, Headers}}, TelemetryExtraMeta} + else + {error, Reason} -> + {{error, Reason}, maps:put(error, Reason, TelemetryExtraMeta)} + end + end + ). + +-spec extract_successful_response({StatusLine, [HttpHeader], HttpBodyResult}) -> + {ok, {json, term()} | {jwt, binary()}} | {error, error()} +when + StatusLine :: {HttpVersion, StatusCode, string()}, + HttpVersion :: uri_string:uri_string(), + StatusCode :: pos_integer(), + HttpHeader :: http_header(), + HttpBodyResult :: binary(). +extract_successful_response({{_HttpVersion, Status, _HttpStatusName}, Headers, HttpBodyResult}) when + Status == 200 orelse Status == 201 +-> + case fetch_content_type(Headers) of + json -> + {ok, {json, jose:decode(HttpBodyResult)}}; + jwt -> + {ok, {jwt, HttpBodyResult}}; + unknown -> + {error, invalid_content_type} + end; +extract_successful_response({{_HttpVersion, StatusCode, _HttpStatusName}, Headers, HttpBodyResult}) -> + Body = + case fetch_content_type(Headers) of + json -> + jose:decode(HttpBodyResult); + jwt -> + HttpBodyResult; + unknown -> + HttpBodyResult + end, + case proplists:lookup("dpop-nonce", Headers) of + {"dpop-nonce", DpopNonce} -> + {error, {use_dpop_nonce, iolist_to_binary(DpopNonce), Body}}; + _ -> + {error, {http_error, StatusCode, Body}} + end. + +-spec fetch_content_type(Headers) -> json | jwt | unknown when Headers :: [http_header()]. +fetch_content_type(Headers) -> + case proplists:lookup("content-type", Headers) of + {"content-type", "application/jwk-set+json" ++ _Rest} -> + json; + {"content-type", "application/json" ++ _Rest} -> + json; + {"content-type", "application/jwt" ++ _Rest} -> + jwt; + _Other -> + unknown + end. + +-spec headers_to_cache_deadline(Headers, DefaultExpiry) -> pos_integer() when + Headers :: [{Header :: binary(), Value :: binary()}], DefaultExpiry :: non_neg_integer(). +headers_to_cache_deadline(Headers, DefaultExpiry) -> + case proplists:lookup("cache-control", Headers) of + {"cache-control", Cache} -> + try + cache_deadline(Cache, DefaultExpiry) + catch + _:_ -> + DefaultExpiry + end; + none -> + DefaultExpiry + end. + +-spec cache_deadline(Cache :: iodata(), Fallback :: pos_integer()) -> pos_integer(). +cache_deadline(Cache, Fallback) -> + Entries = + binary:split(iolist_to_binary(Cache), [<<",">>, <<"=">>, <<" ">>], [global, trim_all]), + MaxAge = + fun + (<<"0">>, true) -> + Fallback; + (Entry, true) -> + erlang:convert_time_unit(binary_to_integer(Entry), second, millisecond); + (<<"max-age">>, _) -> + true; + (_, Res) -> + Res + end, + lists:foldl(MaxAge, Fallback, Entries). diff --git a/deps/oidcc/src/oidcc_jwt_util.erl b/deps/oidcc/src/oidcc_jwt_util.erl new file mode 100644 index 0000000..bfc7507 --- /dev/null +++ b/deps/oidcc/src/oidcc_jwt_util.erl @@ -0,0 +1,518 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_jwt_util). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC("JWT Utilities"). + +-include_lib("jose/include/jose_jwe.hrl"). +-include_lib("jose/include/jose_jwk.hrl"). +-include_lib("jose/include/jose_jws.hrl"). +-include_lib("jose/include/jose_jwt.hrl"). + +-export([client_secret_oct_keys/2]). +-export([merge_client_secret_oct_keys/3]). +-export([decrypt_and_verify/5]). +-export([encrypt/4]). +-export([evaluate_for_all_keys/2]). +-export([merge_jwks/2]). +-export([peek_payload/1]). +-export([refresh_jwks_fun/1]). +-export([sign/3]). +-export([sign/4]). +-export([sign_dpop/3]). +-export([thumbprint/1]). +-export([verify_claims/2]). +-export([verify_not_none_alg/1]). +-export([verify_signature/3]). + +-export_type([claims/0]). +-export_type([error/0]). +-export_type([refresh_jwks_for_unknown_kid_fun/0]). + +?DOC(#{since => <<"3.0.0">>}). +-type refresh_jwks_for_unknown_kid_fun() :: + fun((Jwks :: jose_jwk:key(), Kid :: binary()) -> {ok, jose_jwk:key()} | {error, term()}). + +?DOC(#{since => <<"3.0.0">>}). +-type error() :: + no_matching_key + | invalid_jwt_token + | {no_matching_key_with_kid, Kid :: binary()} + | none_alg_used + | {none_alg_used, Jwt :: #jose_jwt{}, Jws :: #jose_jws{}} + | not_encrypted. + +?DOC(#{since => <<"3.0.0">>}). +-type claims() :: #{binary() => term()}. + +%% Function to decide if the jwks should be reladed to find a matching key for `Kid` +%% +%% A default function is provided in `oidcc:retrieve_token/5` +%% and `oidcc:retrieve_userinfo/5`. +%% +%% The default implementation does not implement any rate limiting. +%% +%% Checking of jwk sets is a bit wonky because of partial support +%% in jose. see: https://github.com/potatosalad/erlang-jose/issues/28 +?DOC(false). +-spec verify_signature(Token, AllowAlgorithms, Jwks) -> + {ok, {Jwt, Jws}} + | {error, error()} +when + Token :: binary(), + AllowAlgorithms :: [binary()], + Jwks :: jose_jwk:key(), + Jwt :: #jose_jwt{}, + Jws :: #jose_jws{}. +verify_signature(Token, AllowAlgorithms, #jose_jwk{keys = {jose_jwk_set, Keys}}) -> + lists:foldl( + fun + (_Key, {ok, _Res} = Acc) -> + Acc; + (Key, Acc) -> + case {verify_signature(Token, AllowAlgorithms, Key), Acc} of + {{ok, Res}, _Acc} -> + {ok, Res}; + {_Res, {error, {no_matching_key_with_kid, Kid}}} -> + {error, {no_matching_key_with_kid, Kid}}; + {Res, _Acc} -> + Res + end + end, + {error, no_matching_key}, + Keys + ); +verify_signature(Token, AllowAlgorithms, #jose_jwk{} = Jwks) -> + try + Kid = + case jose_jwt:peek_protected(Token) of + #jose_jws{fields = #{<<"kid">> := IntKid}} -> + IntKid; + #jose_jws{} -> + none + end, + case Jwks of + #jose_jwk{fields = #{<<"kid">> := CmpKid}} when CmpKid =/= Kid, Kid =/= none -> + {error, {no_matching_key_with_kid, Kid}}; + #jose_jwk{} -> + case jose_jwt:verify_strict(Jwks, AllowAlgorithms, Token) of + {true, Jwt, Jws} -> + {ok, {Jwt, Jws}}; + {false, Jwt, #jose_jws{alg = {jose_jws_alg_none, none}} = Jws} -> + {error, {none_alg_used, Jwt, Jws}}; + {false, _Jwt, _Jws} -> + {error, no_matching_key} + end + end + catch + error:{badarg, [_Token]} -> + {error, invalid_jwt_token}; + %% Some Keys crash if a non matching alg is provided + error:function_clause -> + {error, invalid_jwt_token} + end. + +?DOC(false). +-spec verify_claims(Claims, ExpClaims) -> ok | {error, {missing_claim, ExpClaim, Claims}} when + Claims :: claims(), + ExpClaim :: {binary(), term() | {regex, binary()}}, + ExpClaims :: [ExpClaim]. +verify_claims(Claims, ExpClaims) -> + CheckExpectedClaims = + fun + ({Key, {regex, Pattern}}) -> + case maps:get(Key, Claims, none) of + none -> + true; + Value when is_binary(Value) -> + case re:run(Value, Pattern, [{capture, none}]) of + match -> + false; + nomatch -> + true + end; + _Other -> + true + end; + ({Key, Value}) -> + case maps:get(Key, Claims, none) of + Value -> + false; + _Other -> + true + end + end, + case lists:filter(CheckExpectedClaims, ExpClaims) of + [] -> + ok; + [Claim | _Rest] -> + {error, {missing_claim, Claim, Claims}} + end. + +?DOC(false). +-spec client_secret_oct_keys(AllowedAlgorithms, ClientSecret) -> jose_jwk:key() | none when + AllowedAlgorithms :: [binary()] | undefined, + ClientSecret :: binary() | unauthenticated. +client_secret_oct_keys(undefined, _ClientSecret) -> + none; +client_secret_oct_keys(_AllowedAlgorithms, unauthenticated) -> + none; +client_secret_oct_keys(AllowedAlgorithms, ClientSecret) -> + case + lists:member(<<"HS256">>, AllowedAlgorithms) or + lists:member(<<"HS384">>, AllowedAlgorithms) or + lists:member(<<"HS512">>, AllowedAlgorithms) + of + true -> + jose_jwk:from_oct(ClientSecret); + false -> + none + end. + +?DOC(false). +-spec merge_client_secret_oct_keys(Jwks :: jose_jwk:key(), AllowedAlgorithms, ClientSecret) -> + jose_jwk:key() +when + AllowedAlgorithms :: [binary()] | undefined, + ClientSecret :: binary() | unauthenticated. +merge_client_secret_oct_keys(Jwks, AllowedAlgorithms, ClientSecret) -> + case client_secret_oct_keys(AllowedAlgorithms, ClientSecret) of + none -> + Jwks; + OctKeys -> + merge_jwks(Jwks, OctKeys) + end. + +?DOC(false). +-spec refresh_jwks_fun(ProviderConfigurationWorkerName) -> + refresh_jwks_for_unknown_kid_fun() +when + ProviderConfigurationWorkerName :: gen_server:server_ref(). +refresh_jwks_fun(ProviderConfigurationWorkerName) -> + fun(_Jwks, Kid) -> + oidcc_provider_configuration_worker:refresh_jwks_for_unknown_kid( + ProviderConfigurationWorkerName, + Kid + ), + {ok, oidcc_provider_configuration_worker:get_jwks(ProviderConfigurationWorkerName)} + end. + +?DOC(false). +-spec merge_jwks(Left :: jose_jwk:key(), Right :: jose_jwk:key()) -> jose_jwk:key(). +merge_jwks(#jose_jwk{keys = {jose_jwk_set, LeftKeys}, fields = LeftFields}, #jose_jwk{ + keys = {jose_jwk_set, RightKeys}, fields = RightFields +}) -> + #jose_jwk{ + keys = {jose_jwk_set, LeftKeys ++ RightKeys}, fields = maps:merge(LeftFields, RightFields) + }; +merge_jwks(#jose_jwk{} = Left, #jose_jwk{keys = {jose_jwk_set, _RightKeys}} = Right) -> + merge_jwks(#jose_jwk{keys = {jose_jwk_set, [Left]}}, Right); +merge_jwks(Left, Right) -> + merge_jwks(Left, #jose_jwk{keys = {jose_jwk_set, [Right]}}). + +?DOC(false). +-spec sign(Jwt :: #jose_jwt{}, Jwk :: jose_jwk:key(), SupportedAlgorithms :: [binary()]) -> + {ok, binary()} | {error, no_supported_alg_or_key}. +sign(Jwt, Jwk, SupportedAlgorithms) -> + sign(Jwt, Jwk, SupportedAlgorithms, #{}). + +?DOC(false). +-spec sign( + Jwt :: #jose_jwt{}, Jwk :: jose_jwk:key(), SupportedAlgorithms :: [binary()], JwsFields :: map() +) -> + {ok, binary()} | {error, no_supported_alg_or_key}. +sign(_Jwt, _Jwk, [], _JwsFields) -> + {error, no_supported_alg_or_key}; +sign(Jwt, Jwk, [Algorithm | RestAlgorithms], JwsFields0) -> + maybe + #jose_jws{fields = JwsFields} = + Jws0 ?= jose_jws:from_map(JwsFields0#{<<"alg">> => Algorithm}), + SigningCallback = fun + (#jose_jwk{fields = Fields} = Key) when Algorithm =/= <<"none">> -> + %% add the kid field to the JWS signature if present + KidField = maps:with([<<"kid">>], Fields), + Jws = Jws0#jose_jws{fields = maps:merge(KidField, JwsFields)}, + try + %% ensure key is either for signatures, or not specified + ok = + case Fields of + #{<<"use">> := <<"sig">>} -> ok; + #{<<"use">> := _} -> error; + #{} -> ok + end, + {_Jws, Token} = jose_jws:compact(jose_jwt:sign(Key, Jws, Jwt)), + {ok, Token} + catch + error:{badmatch, _} -> error; + error:not_supported -> error; + error:{not_supported, _Alg} -> error; + %% Some Keys crash if a public key is provided + error:function_clause -> error + end; + (#jose_jwk{} = Key) when Algorithm == <<"none">> -> + try + {_Jws, Token} = jose_jws:compact(jose_jwt:sign(Key, Jws0, Jwt)), + {ok, Token} + catch + error:not_supported -> error; + error:{not_supported, _Alg} -> error + end; + (_Key) -> + error + end, + {ok, Token} ?= evaluate_for_all_keys(Jwk, SigningCallback), + {ok, Token} + else + _ -> sign(Jwt, Jwk, RestAlgorithms, JwsFields0) + end. + +?DOC(false). +-spec decrypt_and_verify( + Jwt :: binary(), + Jwks :: jose_jwk:key(), + SigningAlgs :: [binary()] | undefined, + EncryptionAlgs :: [binary()] | undefined, + EncryptionEncs :: [binary()] | undefined +) -> + {ok, {#jose_jwt{}, #jose_jwe{} | #jose_jws{}}} | {error, error()}. +decrypt_and_verify(Jwt, Jwks, SigningAlgs, EncryptionAlgs, EncryptionEncs) -> + %% we call jwe_peek_protected/1 before `decrypt/4' so that we can + %% handle unencrypted tokens in the case where SupportedAlgorithms / + %% SupportedEncValues are undefined (where `decrypt/4' returns + %% {error, no_supported_alg_or_key}). + case jwe_peek_protected(Jwt) of + {ok, Jwe} -> + case decrypt(Jwt, Jwks, EncryptionAlgs, EncryptionEncs) of + {ok, Decrypted} -> + verify_decrypted_token(Decrypted, SigningAlgs, Jwe, Jwks); + {error, Reason} -> + {error, Reason} + end; + {error, not_encrypted} -> + %% signed JWT + verify_signature(Jwt, SigningAlgs, Jwks); + {error, Reason} -> + {error, Reason} + end. + +-spec jwe_peek_protected(Jwt :: binary()) -> + {ok, #jose_jwe{}} | {error, not_encrypted | no_matching_key}. +jwe_peek_protected(Jwt) -> + %% jose_jwt:peek_protected(Jwt) doesn't work with encrypted tokens + maybe + [ProtectedEncoded, _, _, _, _] ?= binary:split(Jwt, <<".">>, [global]), + Protected = jose_jwa_base64url:decode(ProtectedEncoded), + #jose_jwe{} = Jwe ?= jose_jwe:from(Protected), + {ok, Jwe} + else + [_, _, _] -> + {error, not_encrypted}; + _ -> + {error, no_matching_key} + end. + +-spec decrypt( + Jwt :: binary(), + Jwk :: jose_jwk:key(), + SupportedAlgorithms :: [binary()] | undefined, + SupportedEncValues :: [binary()] | undefined +) -> + {ok, binary()} | {error, error()}. +decrypt(_Jwt, _Jwk, undefined, _SupportedEncValues) -> + {error, no_supported_alg_or_key}; +decrypt(_Jwt, _Jwk, _SupportedAlgorithms, undefined) -> + {error, no_supported_alg_or_key}; +decrypt(Jwt, #jose_jwk{keys = {jose_jwk_set, Keys}}, SupportedAlgorithms, SupportedEncValues) -> + lists:foldl( + fun + (_Key, {ok, _Res} = Acc) -> + Acc; + (Key, Acc) -> + case {decrypt(Jwt, Key, SupportedAlgorithms, SupportedEncValues), Acc} of + {{ok, Res}, _Acc} -> + {ok, Res}; + {_Res, {error, {no_matching_key_with_kid, Kid}}} -> + {error, {no_matching_key_with_kid, Kid}}; + {Res, _Acc} -> + Res + end + end, + {error, no_matching_key}, + Keys + ); +decrypt(Jwt, #jose_jwk{} = Jwk, SupportedAlgorithms, SupportedEncValues) -> + maybe + {ok, Jwe} ?= jwe_peek_protected(Jwt), + {_, #{<<"alg">> := JwtAlg, <<"enc">> := JwtEnc}} = jose_jwe:to_map(Jwe), + ok ?= verify_in_list(JwtAlg, SupportedAlgorithms), + ok ?= verify_in_list(JwtEnc, SupportedEncValues), + Kid = + case Jwe of + #jose_jwe{fields = #{<<"kid">> := IntKid}} -> + IntKid; + #jose_jwe{} -> + none + end, + case Jwk of + #jose_jwk{fields = #{<<"kid">> := CmpKid}} when CmpKid =/= Kid, Kid =/= none -> + {error, {no_matching_key_with_kid, Kid}}; + #jose_jwk{fields = #{<<"use">> := NotEnc}} when NotEnc =/= <<"enc">> -> + {error, no_matching_key}; + _ -> + try + {Token, _Jwe} = jose_jwe:block_decrypt(Jwk, Jwt), + {ok, Token} + catch + error:_ when Kid =:= none -> + {error, no_matching_key}; + error:_ -> + {error, {no_matching_key_with_kid, Kid}} + end + end + end. + +verify_in_list(Value, List) -> + case lists:member(Value, List) of + true -> + ok; + false -> + {error, no_matching_key} + end. + +verify_decrypted_token(Jwt, SigningAlgs, Jwe, Jwks) -> + case verify_signature(Jwt, SigningAlgs, Jwks) of + {ok, Result} -> + %% encrypted + signed (nested) JWT + {ok, Result}; + {error, invalid_jwt_token} -> + %% encrypted JWT + try + {ok, {jose_jwt:from_binary(Jwt), Jwe}} + catch + _ -> {error, invalid_jwt_token} + end; + {error, Reason} -> + {error, Reason} + end. + +?DOC(false). +-spec encrypt( + Jwt :: binary(), + Jwk :: jose_jwk:key(), + SupportedAlgorithms :: [binary()] | undefined, + SupportedEncValues :: [binary()] | undefined +) -> + {ok, binary()} | {error, no_supported_alg_or_key}. +encrypt(_Jwt, _Jwk, undefined, _SupportedEncValues) -> + {error, no_supported_alg_or_key}; +encrypt(_Jwt, _Jwk, _SupportedAlgorithms, undefined) -> + {error, no_supported_alg_or_key}; +encrypt(Jwt, Jwk, SupportedAlgorithms, SupportedEncValues) -> + encrypt(Jwt, Jwk, SupportedAlgorithms, SupportedEncValues, SupportedEncValues). + +-spec encrypt( + Jwt :: binary(), + Jwk :: jose_jwk:key(), + SupportedAlgorithms :: [binary()], + SupportedEncValues :: [binary()], + AccEncValues :: [binary()] +) -> + {ok, binary()} | {error, no_supported_alg_or_key}. +encrypt(_Jwt, _Jwk, [], _SupportedEncValues, _AccEncValues) -> + {error, no_supported_alg_or_key}; +encrypt(Jwt, Jwk, [_Algorithm | RestAlgorithms], SupportedEncValues, []) -> + encrypt(Jwt, Jwk, RestAlgorithms, SupportedEncValues, SupportedEncValues); +encrypt(Jwt, Jwk, [Algorithm | _RestAlgorithms] = SupportedAlgorithms, SupportedEncValues, [ + EncValue | RestEncValues +]) -> + JweParams0 = #{<<"alg">> => Algorithm, <<"enc">> => EncValue}, + EncryptionCallback = fun + (#jose_jwk{fields = #{<<"use">> := NotEnc}}) when NotEnc =/= <<"enc">> -> + error; + (#jose_jwk{fields = Fields} = Key) -> + try + JweParams = + case maps:get(<<"kid">>, Fields, undefined) of + undefined -> JweParams0; + Kid -> maps:put(<<"kid">>, Kid, JweParams0) + end, + Jwe = jose_jwe:from_map(JweParams), + {_Jws, Token} = jose_jwe:compact(jose_jwk:block_encrypt(Jwt, Jwe, Key)), + {ok, Token} + catch + error:undef -> error; + error:{not_supported, _Alg} -> error + end; + (_Key) -> + error + end, + case evaluate_for_all_keys(Jwk, EncryptionCallback) of + {ok, Token} -> {ok, Token}; + error -> encrypt(Jwt, Jwk, SupportedAlgorithms, SupportedEncValues, RestEncValues) + end. + +?DOC(false). +-spec thumbprint(Jwk :: jose_jwk:key()) -> {ok, binary()} | error. +thumbprint(Jwk) -> + evaluate_for_all_keys(Jwk, fun + (#jose_jwk{fields = #{<<"use">> := Use}}) when Use =/= <<"sig">> -> + error; + (Key) -> + {ok, jose_jwk:thumbprint(Key)} + end). + +?DOC(false). +-spec sign_dpop(Jwt :: #jose_jwt{}, Jwk :: jose_jwk:key(), SigningAlgSupported :: [binary()]) -> + {ok, binary()} | {error, no_supported_alg_or_key}. +sign_dpop(Jwt, Jwk, SigningAlgSupported) -> + evaluate_for_all_keys(Jwk, fun + (#jose_jwk{fields = #{<<"use">> := Use}}) when Use =/= <<"sig">> -> + error; + (Key) -> + {_, PublicJwk} = jose_jwk:to_public_map(Key), + sign(Jwt, Key, SigningAlgSupported, #{ + <<"typ">> => <<"dpop+jwt">>, <<"jwk">> => PublicJwk + }) + end). + +?DOC(false). +-spec evaluate_for_all_keys(Jwk :: jose_jwk:key(), fun((jose_jwk:key()) -> {ok, Result} | error)) -> + {ok, Result} | error +when + Result :: term(). +evaluate_for_all_keys(#jose_jwk{keys = {jose_jwk_set, Keys}}, Callback) -> + lists:foldl( + fun + (_Key, {ok, Result}) -> + {ok, Result}; + (Key, error) -> + evaluate_for_all_keys(Key, Callback) + end, + error, + Keys + ); +evaluate_for_all_keys(#jose_jwk{} = Jwk, Callback) -> + Callback(Jwk). + +?DOC(false). +-spec verify_not_none_alg(#jose_jws{}) -> ok | {error, none_alg_used}. +verify_not_none_alg(#jose_jws{fields = #{<<"alg">> := <<"none">>}}) -> + {error, none_alg_used}; +verify_not_none_alg(#jose_jws{}) -> + ok. + +?DOC(false). +-spec peek_payload(binary()) -> {ok, #jose_jwt{}} | {error, invalid_jwt_token}. +peek_payload(Jwt) -> + try + {ok, jose_jwt:peek_payload(Jwt)} + catch + error:{badarg, [_Token]} -> + {error, invalid_jwt_token}; + error:function_clause -> + {error, invalid_jwt_token} + end. diff --git a/deps/oidcc/src/oidcc_logout.erl b/deps/oidcc/src/oidcc_logout.erl new file mode 100644 index 0000000..5c652f2 --- /dev/null +++ b/deps/oidcc/src/oidcc_logout.erl @@ -0,0 +1,121 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_logout). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC("Logout from the OpenID Provider."). +?MODULEDOC(#{since => <<"3.0.0">>}). + +-include("oidcc_client_context.hrl"). +-include("oidcc_provider_configuration.hrl"). +-include("oidcc_token.hrl"). + +-export([initiate_url/3]). + +-export_type([error/0]). +-export_type([initiate_url_opts/0]). + +?DOC(#{since => <<"3.0.0">>}). +-type error() :: end_session_endpoint_not_supported. + +?DOC(""" +Configure Relaying Party initiated Logout URI. + +See https://openid.net/specs/openid-connect-rpinitiated-1_0.html#RPLogout. + +## Parameters + +* `logout_hint` - logout_hint to pass to the provider +* `post_logout_redirect_uri` - Post Logout Redirect URI to pass to the provider +* `state` - state to pass to the provider +* `ui_locales` - UI locales to pass to the provider +* `extra_query_params` - extra query params to add to the URI +"""). +?DOC(#{since => <<"3.0.0">>}). +-type initiate_url_opts() :: #{ + logout_hint => binary(), + post_logout_redirect_uri => uri_string:uri_string(), + state => binary(), + ui_locales => binary(), + extra_query_params => oidcc_http_util:query_params() +}. + +?DOC(""" +Initiate URI for Relaying Party initiated Logout. + +See https://openid.net/specs/openid-connect-rpinitiated-1_0.html#RPLogout. + +For a high level interface using `m:oidcc_provider_configuration_worker` +see `oidcc:initiate_logout_url/4`. + +## Examples + +```erlang +{ok, ClientContext} = oidcc_client_context:from_configuration_worker( + provider_name, + <<"client_id">>, + unauthenticated +), + +%% Get `Token` from `oidcc_token` + +{ok, RedirectUri} = + oidcc_logout:initiate_url( + Token, + ClientContext, + #{post_logout_redirect_uri: <<"https://my.server/return">} +), + +%% RedirectUri = https://my.provider/logout?id_token_hint=IDToken&client_id=ClientId&post_logout_redirect_uri=https%3A%2F%2Fmy.server%2Freturn +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec initiate_url(Token, ClientContext, Opts) -> + {ok, uri_string:uri_string()} | {error, error()} +when + Token :: IdToken | oidcc_token:t() | undefined, + IdToken :: binary(), + ClientContext :: oidcc_client_context:t(), + Opts :: initiate_url_opts(). +initiate_url(#oidcc_token{id = #oidcc_token_id{token = IdToken}}, ClientContext, Opts) -> + initiate_url(IdToken, ClientContext, Opts); +initiate_url(IdToken, ClientContext, Opts) -> + #oidcc_client_context{ + provider_configuration = Configuration, + client_id = ClientId + } = ClientContext, + #oidcc_provider_configuration{end_session_endpoint = EndSessionEndpoint} = + Configuration, + + QueryParams0 = [ + {"id_token_hint", IdToken}, + {"logout_hint", maps:get(logout_hint, Opts, undefined)}, + {"client_id", ClientId}, + {"post_logout_redirect_uri", maps:get(post_logout_redirect_uri, Opts, undefined)}, + {"state", maps:get(state, Opts, undefined)}, + {"ui_locales", maps:get(ui_locales, Opts, undefined)} + | maps:get(extra_query_params, Opts, []) + ], + QueryParams1 = lists:filter( + fun + ({_Name, undefined}) -> false; + ({_Name, _Value}) -> true + end, + QueryParams0 + ), + + case EndSessionEndpoint of + undefined -> + {error, end_session_endpoint_not_supported}; + Uri0 -> + UriMap0 = uri_string:parse(Uri0), + QueryString0 = maps:get(query, UriMap0, <<"">>), + QueryParams = uri_string:dissect_query(QueryString0) ++ QueryParams1, + QueryString = uri_string:compose_query(QueryParams), + UriMap = maps:put(query, QueryString, UriMap0), + Uri = uri_string:recompose(UriMap), + {ok, Uri} + end. diff --git a/deps/oidcc/src/oidcc_profile.erl b/deps/oidcc/src/oidcc_profile.erl new file mode 100644 index 0000000..fd7e693 --- /dev/null +++ b/deps/oidcc/src/oidcc_profile.erl @@ -0,0 +1,308 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_profile). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC("OpenID Profile Utilities"). +?MODULEDOC(#{since => <<"3.2.0">>}). + +-include("oidcc_client_context.hrl"). +-include("oidcc_provider_configuration.hrl"). + +-export([apply_profiles/2]). + +-export_type([profile/0]). +-export_type([opts/0]). +-export_type([opts_no_profiles/0]). +-export_type([error/0]). + +?DOC(#{since => <<"3.2.0">>}). +-type profile() :: + mtls_constrain | fapi2_security_profile | fapi2_message_signing | fapi2_connectid_au. + +?DOC(#{since => <<"3.2.0">>}). +-type opts() :: #{ + profiles => [profile()], + require_pkce => boolean(), + trusted_audiences => [binary()] | any, + preferred_auth_methods => [oidcc_auth_util:auth_method()], + request_opts => oidcc_http_util:request_opts() +}. + +?DOC(#{since => <<"3.2.0">>}). +-type opts_no_profiles() :: #{ + require_pkce => boolean(), + trusted_audiences => [binary()] | any, + preferred_auth_methods => [oidcc_auth_util:auth_method()], + request_opts => oidcc_http_util:request_opts() +}. + +?DOC(#{since => <<"3.2.0">>}). +-type error() :: {unknown_profile, atom()}. + +?DOC(false). +-spec apply_profiles(ClientContext, opts()) -> + {ok, ClientContext, opts_no_profiles()} | {error, error()} +when + ClientContext :: oidcc_client_context:t(). +apply_profiles( + #oidcc_client_context{} = ClientContext0, + #{profiles := [fapi2_security_profile | RestProfiles]} = Opts0 +) -> + %% FAPI2 Security Profile + %% - https://openid.bitbucket.io/fapi/fapi-security-profile-2_0.html + {ClientContext1, Opts1} = enforce_s256_pkce(ClientContext0, Opts0), + ClientContext2 = limit_response_types([<<"code">>], ClientContext1), + ClientContext3 = enforce_par(ClientContext2), + ClientContext4 = enforce_iss_parameter(ClientContext3), + ClientContext = limit_signing_alg_values( + [ + <<"PS256">>, + <<"PS384">>, + <<"PS512">>, + <<"ES256">>, + <<"ES384">>, + <<"ES512">>, + <<"EdDSA">> + ], + ClientContext4 + ), + Opts2 = Opts1#{profiles => RestProfiles}, + Opts3 = map_put_new(trusted_audiences, [], Opts2), + Opts4 = map_put_new(preferred_auth_methods, [private_key_jwt, tls_client_auth], Opts3), + Opts5 = put_tls_defaults(Opts4), + Opts = limit_tls_ciphers( + [ + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" + ], + Opts5 + ), + apply_profiles(ClientContext, Opts); +apply_profiles( + #oidcc_client_context{} = ClientContext0, + #{profiles := [fapi2_message_signing | RestProfiles]} = Opts0 +) -> + %% FAPI2 Message Signing: + %% - https://openid.bitbucket.io/fapi/fapi-2_0-message-signing.html + + ClientContext = limit_response_modes( + [<<"jwt">>, <<"query.jwt">>, <<"form_post.jwt">>], ClientContext0 + ), + + %% TODO force require_signed_request_object once the conformance suite can + %% validate it (currently, the suite fails if this is enabled) + %% TODO require signed token introspection responses + + %% Also require everything from FAPI2 Security Profile + Opts = Opts0#{profiles => [fapi2_security_profile | RestProfiles]}, + apply_profiles(ClientContext, Opts); +apply_profiles( + #oidcc_client_context{} = ClientContext0, + #{profiles := [fapi2_connectid_au | RestProfiles]} = Opts0 +) -> + %% FAPI2 ConnectID profile + maybe + %% Require everything from FAPI2 Message Signing, and use mTLS + %% sender-constrained tokens + {ok, ClientContext1, Opts1} ?= + apply_profiles(ClientContext0, Opts0#{ + profiles => [fapi2_message_signing, mtls_constrain | RestProfiles] + }), + %% Require `purpose' field + Opts2 = Opts1#{require_purpose => true}, + {ok, ClientContext1, Opts2} + end; +apply_profiles( + #oidcc_client_context{} = ClientContext0, + #{profiles := [mtls_constrain | RestProfiles]} = Opts0 +) -> + %% If a PAR endpoint is present in the mTLS aliases, use that as the default + #oidcc_client_context{provider_configuration = Configuration0} = ClientContext0, + Configuration1 = + case Configuration0#oidcc_provider_configuration.mtls_endpoint_aliases of + #{ + <<"pushed_authorization_request_endpoint">> := MtlsParEndpoint + } -> + Configuration0#oidcc_provider_configuration{ + pushed_authorization_request_endpoint = MtlsParEndpoint + }; + _ -> + Configuration0 + end, + %% If the token endpoint is present in the mTLS aliases, use that as the default + Configuration2 = + case Configuration1#oidcc_provider_configuration.mtls_endpoint_aliases of + #{ + <<"token_endpoint">> := MtlsTokenEndpoint + } -> + Configuration1#oidcc_provider_configuration{ + token_endpoint = MtlsTokenEndpoint + }; + _ -> + Configuration1 + end, + %% If the userinfo endpoint is present in the mTLS aliases, use that as the default + Configuration3 = + case Configuration2#oidcc_provider_configuration.mtls_endpoint_aliases of + #{ + <<"userinfo_endpoint">> := MtlsUserinfoEndpoint + } -> + Configuration2#oidcc_provider_configuration{ + userinfo_endpoint = MtlsUserinfoEndpoint + }; + _ -> + Configuration2 + end, + %% If the introspection endpoint is present in the mTLS aliases, use that as the default + Configuration4 = + case Configuration3#oidcc_provider_configuration.mtls_endpoint_aliases of + #{ + <<"introspection_endpoint">> := MtlsIntrospectionEndpoint + } -> + Configuration3#oidcc_provider_configuration{ + introspection_endpoint = MtlsIntrospectionEndpoint + }; + _ -> + Configuration3 + end, + ClientContext1 = ClientContext0#oidcc_client_context{ + provider_configuration = Configuration4 + }, + Opts1 = Opts0#{profiles := RestProfiles}, + apply_profiles(ClientContext1, Opts1); +apply_profiles(#oidcc_client_context{}, #{profiles := [UnknownProfile | _]}) -> + {error, {unknown_profile, UnknownProfile}}; +apply_profiles(#oidcc_client_context{} = ClientContext, #{profiles := []} = Opts0) -> + Opts = maps:remove(profiles, Opts0), + apply_profiles(ClientContext, Opts); +apply_profiles(#oidcc_client_context{} = ClientContext, #{} = Opts) -> + {ok, ClientContext, Opts}. + +enforce_s256_pkce(ClientContext0, Opts0) -> + #oidcc_client_context{ + provider_configuration = + ProviderConfiguration0 = #oidcc_provider_configuration{ + code_challenge_methods_supported = CodeChallengeMethodsSupported + } + } = ClientContext0, + ProviderConfiguration = ProviderConfiguration0#oidcc_provider_configuration{ + code_challenge_methods_supported = limit_values([<<"S256">>], CodeChallengeMethodsSupported) + }, + ClientContext = ClientContext0#oidcc_client_context{ + provider_configuration = ProviderConfiguration + }, + Opts = Opts0#{require_pkce => true}, + {ClientContext, Opts}. + +limit_response_types(Types, ClientContext0) -> + #oidcc_client_context{provider_configuration = ProviderConfiguration0} = ClientContext0, + #oidcc_provider_configuration{ + response_types_supported = ResponseTypes + } = ProviderConfiguration0, + ProviderConfiguration = ProviderConfiguration0#oidcc_provider_configuration{ + response_types_supported = limit_values(Types, ResponseTypes) + }, + ClientContext = ClientContext0#oidcc_client_context{ + provider_configuration = ProviderConfiguration + }, + ClientContext. + +limit_response_modes(Modes, ClientContext0) -> + #oidcc_client_context{provider_configuration = ProviderConfiguration0} = ClientContext0, + #oidcc_provider_configuration{ + response_modes_supported = ResponseModes + } = ProviderConfiguration0, + ProviderConfiguration = ProviderConfiguration0#oidcc_provider_configuration{ + response_modes_supported = limit_values(Modes, ResponseModes) + }, + ClientContext = ClientContext0#oidcc_client_context{ + provider_configuration = ProviderConfiguration + }, + ClientContext. + +enforce_par(ClientContext0) -> + #oidcc_client_context{provider_configuration = ProviderConfiguration0} = ClientContext0, + ProviderConfiguration = ProviderConfiguration0#oidcc_provider_configuration{ + require_pushed_authorization_requests = true + }, + ClientContext = ClientContext0#oidcc_client_context{ + provider_configuration = ProviderConfiguration + }, + ClientContext. + +enforce_iss_parameter(ClientContext0) -> + #oidcc_client_context{provider_configuration = ProviderConfiguration0} = ClientContext0, + ProviderConfiguration = ProviderConfiguration0#oidcc_provider_configuration{ + authorization_response_iss_parameter_supported = true + }, + ClientContext = ClientContext0#oidcc_client_context{ + provider_configuration = ProviderConfiguration + }, + ClientContext. + +limit_signing_alg_values(AlgSupported, ClientContext0) -> + #oidcc_client_context{provider_configuration = ProviderConfiguration0} = ClientContext0, + #oidcc_provider_configuration{ + id_token_signing_alg_values_supported = IdAlg, + userinfo_signing_alg_values_supported = UserinfoAlg, + request_object_signing_alg_values_supported = RequestObjectAlg, + token_endpoint_auth_signing_alg_values_supported = TokenAlg, + revocation_endpoint_auth_signing_alg_values_supported = RevocationAlg, + introspection_endpoint_auth_signing_alg_values_supported = IntrospectionAlg, + authorization_signing_alg_values_supported = AuthorizationAlg, + dpop_signing_alg_values_supported = DpopAlg + } = ProviderConfiguration0, + ProviderConfiguration = ProviderConfiguration0#oidcc_provider_configuration{ + id_token_signing_alg_values_supported = limit_values(AlgSupported, IdAlg), + userinfo_signing_alg_values_supported = limit_values(AlgSupported, UserinfoAlg), + request_object_signing_alg_values_supported = limit_values(AlgSupported, RequestObjectAlg), + token_endpoint_auth_signing_alg_values_supported = limit_values(AlgSupported, TokenAlg), + revocation_endpoint_auth_signing_alg_values_supported = limit_values( + AlgSupported, RevocationAlg + ), + introspection_endpoint_auth_signing_alg_values_supported = limit_values( + AlgSupported, IntrospectionAlg + ), + authorization_signing_alg_values_supported = limit_values(AlgSupported, AuthorizationAlg), + dpop_signing_alg_values_supported = limit_values(AlgSupported, DpopAlg) + }, + ClientContext = ClientContext0#oidcc_client_context{ + provider_configuration = ProviderConfiguration + }, + ClientContext. + +put_tls_defaults(Opts) -> + RequestOpts0 = maps:get(request_opts, Opts, #{}), + SslOpts0 = maps:get(ssl, RequestOpts0, []), + SslOpts1 = SslOpts0 ++ httpc:ssl_verify_host_options(true), + SslOpts = lists:ukeysort(1, SslOpts1), + RequestOpts = RequestOpts0#{ssl => SslOpts}, + Opts#{request_opts => RequestOpts}. + +limit_tls_ciphers(SupportedCipherStrs, Opts) -> + RequestOpts0 = maps:get(request_opts, Opts, #{}), + SslOpts0 = maps:get(ssl, RequestOpts0, []), + SupportedCiphers = lists:map(fun ssl:str_to_suite/1, SupportedCipherStrs), + SslOpts1 = [{ciphers, SupportedCiphers} | SslOpts0], + SslOpts = lists:ukeysort(1, SslOpts1), + RequestOpts = RequestOpts0#{ssl => SslOpts}, + Opts#{request_opts => RequestOpts}. + +limit_values(_Limit, undefined) -> + undefined; +limit_values(Limit, Values) -> + [V || V <- Values, lists:member(V, Limit)]. + +map_put_new(Key, Value, Map) -> + case Map of + #{Key := _} -> + Map; + _ -> + Map#{Key => Value} + end. diff --git a/deps/oidcc/src/oidcc_provider_configuration.erl b/deps/oidcc/src/oidcc_provider_configuration.erl new file mode 100644 index 0000000..3932a36 --- /dev/null +++ b/deps/oidcc/src/oidcc_provider_configuration.erl @@ -0,0 +1,689 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_provider_configuration). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC(""" +Tooling to load and parse Openid Configuration. + +## Records + +To use the record, import the definition: + +```erlang +-include_lib(["oidcc/include/oidcc_provider_configuration.hrl"]). +``` + +## Telemetry + +See [`Oidcc.ProviderConfiguration`](`m:'Elixir.Oidcc.ProviderConfiguration'`). +"""). +?MODULEDOC(#{since => <<"3.0.0">>}). + +-include("oidcc_provider_configuration.hrl"). + +-export([decode_configuration/1]). +-export([decode_configuration/2]). +-export([load_configuration/1]). +-export([load_configuration/2]). +-export([load_jwks/2]). + +-export_type([error/0]). +-export_type([opts/0]). +-export_type([quirks/0]). +-export_type([t/0]). + +?DOC(""" +Allow Specification Non-compliance. + +## Exceptions + +* `allow_unsafe_http` - Allow unsafe HTTP. Use this for development + providers and **never in production**. +* `document_overrides` - a map to merge with the real OIDD document, + in case the OP left out some values. +* `issuer_regex` - Optional regex pattern to match against the issuer claim + instead of requiring an exact match. This may be necessary for certain providers that do not + conform to the OpenID specification, such as Microsoft Entra ID where + the issuer is 'https://login.microsoftonline.com/{tenantid}/v2.0' in the + [OpenID configuration](https://login.microsoftonline.com/common/v2.0/.well-known/openid-configuration). +"""). +?DOC(#{since => <<"3.1.0">>}). +-type quirks() :: #{ + allow_unsafe_http => boolean(), + document_overrides => map(), + issuer_regex => binary() +}. + +?DOC(""" +Configure configuration loading / parsing. + +## Parameters + +* `fallback_expiry` - How long to keep configuration cached if the server doesn't specify expiry. +* `request_opts` - config for HTTP request. +"""). +?DOC(#{since => <<"3.0.0">>}). +-type opts() :: #{ + fallback_expiry => timeout(), + request_opts => oidcc_http_util:request_opts(), + quirks => quirks() +}. + +?DOC(""" +Record containing OpenID and OAuth 2.0 Configuration. + +See: +* https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata +* https://datatracker.ietf.org/doc/html/draft-jones-oauth-discovery-01#section-4.1 +* https://openid.net/specs/openid-connect-rpinitiated-1_0.html#OPMetadata + +All unrecognized fields are stored in `extra_fields`. +"""). +?DOC(#{since => <<"3.0.0">>}). +-type t() :: + #oidcc_provider_configuration{ + issuer :: uri_string:uri_string(), + issuer_regex :: binary() | undefined, + authorization_endpoint :: uri_string:uri_string(), + token_endpoint :: uri_string:uri_string() | undefined, + userinfo_endpoint :: uri_string:uri_string() | undefined, + jwks_uri :: uri_string:uri_string() | undefined, + registration_endpoint :: uri_string:uri_string() | undefined, + scopes_supported :: [binary()] | undefined, + response_types_supported :: [binary()], + response_modes_supported :: [binary()], + grant_types_supported :: [binary()], + acr_values_supported :: [binary()] | undefined, + subject_types_supported :: [pairwise | public], + id_token_signing_alg_values_supported :: [binary()], + id_token_encryption_alg_values_supported :: + [binary()] | undefined, + id_token_encryption_enc_values_supported :: + [binary()] | undefined, + userinfo_signing_alg_values_supported :: [binary()] | undefined, + userinfo_encryption_alg_values_supported :: + [binary()] | undefined, + userinfo_encryption_enc_values_supported :: + [binary()] | undefined, + request_object_signing_alg_values_supported :: + [binary()] | undefined, + request_object_encryption_alg_values_supported :: + [binary()] | undefined, + request_object_encryption_enc_values_supported :: + [binary()] | undefined, + token_endpoint_auth_methods_supported :: [binary()], + token_endpoint_auth_signing_alg_values_supported :: + [binary()] | undefined, + display_values_supported :: [binary()] | undefined, + claim_types_supported :: [normal | aggregated | distributed], + claims_supported :: [binary()] | undefined, + service_documentation :: uri_string:uri_string() | undefined, + claims_locales_supported :: [binary()] | undefined, + ui_locales_supported :: [binary()] | undefined, + claims_parameter_supported :: boolean(), + request_parameter_supported :: boolean(), + request_uri_parameter_supported :: boolean(), + require_request_uri_registration :: boolean(), + op_policy_uri :: uri_string:uri_string() | undefined, + op_tos_uri :: uri_string:uri_string() | undefined, + revocation_endpoint :: uri_string:uri_string() | undefined, + revocation_endpoint_auth_methods_supported :: [binary()], + revocation_endpoint_auth_signing_alg_values_supported :: + [binary()] | undefined, + introspection_endpoint :: uri_string:uri_string() | undefined, + introspection_endpoint_auth_methods_supported :: [binary()], + introspection_endpoint_auth_signing_alg_values_supported :: + [binary()] | undefined, + code_challenge_methods_supported :: [binary()] | undefined, + end_session_endpoint :: uri_string:uri_string() | undefined, + require_pushed_authorization_requests :: boolean(), + pushed_authorization_request_endpoint :: uri_string:uri_string() | undefined, + authorization_signing_alg_values_supported :: [binary()] | undefined, + authorization_encryption_alg_values_supported :: [binary()] | undefined, + authorization_encryption_enc_values_supported :: [binary()] | undefined, + authorization_response_iss_parameter_supported :: boolean(), + dpop_signing_alg_values_supported :: [binary()] | undefined, + require_signed_request_object :: boolean(), + mtls_endpoint_aliases :: #{binary() => uri_string:uri_string()}, + extra_fields :: #{binary() => term()} + }. + +?DOC(#{since => <<"3.0.0">>}). +-type error() :: + invalid_content_type + | {issuer_mismatch, Issuer :: binary()} + | oidcc_decode_util:error() + | oidcc_http_util:error(). + +-define(DEFAULT_CONFIG_EXPIRY, timer:minutes(15)). + +-telemetry_event(#{ + event => [oidcc, load_configuration, start], + description => <<"Emitted at the start of loading the provider configuration">>, + measurements => <<"#{system_time => non_neg_integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string()}">> +}). + +-telemetry_event(#{ + event => [oidcc, load_configuration, stop], + description => <<"Emitted at the end of loading the provider configuration">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string()}">> +}). + +-telemetry_event(#{ + event => [oidcc, load_configuration, exception], + description => <<"Emitted at the end of loading the provider configuration">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string()}">> +}). + +-telemetry_event(#{ + event => [oidcc, load_jwks, start], + description => <<"Emitted at the start of loading the provider jwks">>, + measurements => <<"#{system_time => non_neg_integer()}">>, + metadata => <<"#{jwks_uri => uri_string:uri_string()}">> +}). + +-telemetry_event(#{ + event => [oidcc, load_jwks, stop], + description => <<"Emitted at the end of loading the provider jwks">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{jwks_uri => uri_string:uri_string()}">> +}). + +-telemetry_event(#{ + event => [oidcc, load_jwks, exception], + description => <<"Emitted at the end of loading the provider jwks">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{jwks_uri => uri_string:uri_string()}">> +}). + +?DOC(""" +Load OpenID Configuration into a `t:oidcc_provider_configuration:t/0` record. + +## Examples + +```erlang +{ok, #oidcc_provider_configuration{}} = + oidcc_provider_configuration:load_configuration("https://accounts.google.com"). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec load_configuration(Issuer, Opts) -> + {ok, {Configuration :: t(), Expiry :: pos_integer()}} | {error, error()} +when + Issuer :: uri_string:uri_string(), + Opts :: opts(). +load_configuration(Issuer0, Opts) -> + Issuer = binary:list_to_bin([Issuer0]), + TelemetryOpts = #{topic => [oidcc, load_configuration], extra_meta => #{issuer => Issuer}}, + RequestOpts = maps:get(request_opts, Opts, #{}), + + RequestUrl = url_join(".well-known/openid-configuration", Issuer), + Request = {RequestUrl, []}, + + Quirks = maps:get(quirks, Opts, #{}), + % this quirk is deprecated, but we keep the support for backwards compatibility. + AllowIssuerMismatch = maps:get(allow_issuer_mismatch, Quirks, false), + + DefaultExpiry = maps:get(fallback_expiry, Opts, ?DEFAULT_CONFIG_EXPIRY), + + maybe + {ok, {{json, ConfigurationMap}, Headers}} ?= + oidcc_http_util:request(get, Request, TelemetryOpts, RequestOpts), + Expiry = oidcc_http_util:headers_to_cache_deadline(Headers, DefaultExpiry), + {ok, + #oidcc_provider_configuration{issuer = ConfigIssuer, issuer_regex = ConfigIssuerRegex} = + Configuration} ?= + decode_configuration(ConfigurationMap, #{quirks => Quirks}), + case ConfigIssuer of + Issuer -> + {ok, {Configuration, Expiry}}; + _ when is_binary(ConfigIssuerRegex) -> + case re:run(Issuer, ConfigIssuerRegex, [{capture, none}]) of + match -> + {ok, {Configuration, Expiry}}; + nomatch -> + {error, {issuer_mismatch, ConfigIssuer}} + end; + _DifferentIssuer when AllowIssuerMismatch -> {ok, {Configuration, Expiry}}; + DifferentIssuer when not AllowIssuerMismatch -> + {error, {issuer_mismatch, DifferentIssuer}} + end + else + {error, Reason} -> + {error, Reason}; + {ok, {{_Format, _Body}, _Headers}} -> + {error, invalid_content_type} + end. + +?DOC("See `load_configuration/2`."). +?DOC(#{since => <<"3.1.0">>}). +-spec load_configuration(Issuer) -> + {ok, {Configuration :: t(), Expiry :: pos_integer()}} | {error, error()} +when + Issuer :: uri_string:uri_string(). +load_configuration(Issuer) -> load_configuration(Issuer, #{}). + +?DOC(""" +Load JWKs into a `t:jose_jwk:key/0` record. + +## Examples + +```erlang +{ok, #jose_jwk{}} = + oidcc_provider_configuration:load_jwks("https://www.googleapis.com/oauth2/v3/certs"). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec load_jwks(JwksUri, Opts) -> + {ok, {Jwks :: jose_jwk:key(), Expiry :: pos_integer()}} | {error, term()} +when + JwksUri :: uri_string:uri_string(), + Opts :: opts(). +load_jwks(JwksUri, Opts) -> + TelemetryOpts = #{topic => [oidcc, load_jwks], extra_meta => #{jwks_uri => JwksUri}}, + RequestOpts = maps:get(request_opts, Opts, #{}), + + DefaultExpiry = maps:get(fallback_expiry, Opts, ?DEFAULT_CONFIG_EXPIRY), + + maybe + {ok, {{json, JwksBinary}, Headers}} ?= + oidcc_http_util:request(get, {JwksUri, []}, TelemetryOpts, RequestOpts), + Expiry = oidcc_http_util:headers_to_cache_deadline(Headers, DefaultExpiry), + Jwks = jose_jwk:from(JwksBinary), + {ok, {Jwks, Expiry}} + else + {error, Reason} -> {error, Reason}; + {ok, {{_Format, _Body}, _Headers}} -> {error, invalid_content_type} + end. + +?DOC(""" +Decode JSON into a `t:oidcc_provider_configuration:t/0` record. + +## Examples + +```erlang +{ok, {{"HTTP/1.1",200,"OK"}, _Headers, Body}} = + httpc:request("https://accounts.google.com/.well-known/openid-configuration"), + +{ok, DecodedJson} = your_json_lib:decode(Body), + +{ok, #oidcc_provider_configuration{}} = + oidcc_provider_configuration:decode_configuration(DecodedJson). +``` +"""). +?DOC(#{since => <<"3.1.0">>}). +-spec decode_configuration(Configuration, Opts) -> {ok, t()} | {error, error()} when + Configuration :: map(), Opts :: opts(). +decode_configuration(Configuration0, Opts) -> + Quirks = maps:get(quirks, Opts, #{}), + AllowUnsafeHttp = maps:get(allow_unsafe_http, Quirks, false), + IssuerRegex = maps:get(issuer_regex, Quirks, undefined), + + DocumentOverrides = maps:get(document_overrides, Quirks, #{}), + Configuration = maps:merge(Configuration0, DocumentOverrides), + + maybe + {ok, { + #{ + issuer := Issuer, + authorization_endpoint := AuthorizationEndpoint, + authorization_endpoint := AuthorizationEndpoint, + token_endpoint := TokenEndpoint, + userinfo_endpoint := UserinfoEndpoint, + jwks_uri := JwksUri, + registration_endpoint := RegistrationEndpoint, + scopes_supported := ScopesSupported, + response_types_supported := ResponseTypesSupported, + response_modes_supported := ResponseModesSupported, + grant_types_supported := GrantTypesSupported, + acr_values_supported := AcrValuesSupported, + subject_types_supported := SubjectTypesSupported, + id_token_signing_alg_values_supported := IdTokenSigningAlgValuesSupported, + id_token_encryption_alg_values_supported := IdTokenEncryptionAlgValuesSupported, + id_token_encryption_enc_values_supported := IdTokenEncryptionEncValuesSupported, + userinfo_signing_alg_values_supported := UserinfoSigningAlgValuesSupported, + userinfo_encryption_alg_values_supported := UserinfoEncryptionAlgValuesSupported, + userinfo_encryption_enc_values_supported := UserinfoEncryptionEncValuesSupported, + request_object_signing_alg_values_supported := + RequestObjectSigningAlgValuesSupported, + request_object_encryption_alg_values_supported := + RequestObjectEncryptionAlgValuesSupported, + request_object_encryption_enc_values_supported := + RequestObjectEncryptionEncValuesSupported, + token_endpoint_auth_methods_supported := TokenEndpointAuthMethodsSupported, + token_endpoint_auth_signing_alg_values_supported := + TokenEndpointAuthSigningAlgValuesSupported, + display_values_supported := DisplayValuesSupported, + claim_types_supported := ClaimTypesSupported, + claims_supported := ClaimsSupported, + service_documentation := ServiceDocumentation, + claims_locales_supported := ClaimsLocalesSupported, + ui_locales_supported := UiLocalesSupported, + claims_parameter_supported := ClaimsParameterSupported, + request_parameter_supported := RequestParameterSupported, + request_uri_parameter_supported := RequestUriParameterSupported, + require_request_uri_registration := RequireRequestUriRegistration, + op_policy_uri := OpPolicyUri, + op_tos_uri := OpTosUri, + revocation_endpoint := RevocationEndpoint, + revocation_endpoint_auth_methods_supported := + RevocationEndpointAuthMethodsSupported, + revocation_endpoint_auth_signing_alg_values_supported := + RevocationEndpointAuthSigningAlgValuesSupported, + introspection_endpoint := IntrospectionEndpoint, + introspection_endpoint_auth_methods_supported := + IntrospectionEndpointAuthMethodsSupported, + introspection_endpoint_auth_signing_alg_values_supported := + IntrospectionEndpointAuthSigningAlgValuesSupported, + code_challenge_methods_supported := CodeChallengeMethodsSupported, + end_session_endpoint := EndSessionEndpoint, + require_pushed_authorization_requests := RequirePushedAuthorizationRequests, + pushed_authorization_request_endpoint := PushedAuthorizationRequestEndpoint, + authorization_signing_alg_values_supported := + AuthorizationSigningAlgValuesSupported, + authorization_encryption_alg_values_supported := + AuthorizationEncryptionAlgValuesSupported, + authorization_encryption_enc_values_supported := + AuthorizationEncryptionEncValuesSupported, + authorization_response_iss_parameter_supported := + AuthorizationResponseIssParameterSupported, + dpop_signing_alg_values_supported := DpopSigningAlgValuesSupported, + require_signed_request_object := RequireSignedRequestObject, + mtls_endpoint_aliases := MtlsEndpointAliases, + tls_client_certificate_bound_access_tokens := TlsClientCertificateBoundAccessTokens + }, + ExtraFields + }} ?= + oidcc_decode_util:extract( + Configuration, + [ + {required, issuer, fun oidcc_decode_util:parse_setting_uri/2}, + {required, authorization_endpoint, fun oidcc_decode_util:parse_setting_uri/2}, + {optional, token_endpoint, undefined, + fun oidcc_decode_util:parse_setting_uri/2}, + {optional, userinfo_endpoint, undefined, + case AllowUnsafeHttp of + true -> fun oidcc_decode_util:parse_setting_uri/2; + false -> fun oidcc_decode_util:parse_setting_uri_https/2 + end}, + {required, jwks_uri, fun oidcc_decode_util:parse_setting_uri/2}, + {optional, registration_endpoint, undefined, + fun oidcc_decode_util:parse_setting_uri/2}, + {required, scopes_supported, fun parse_scopes_supported/2}, + {required, response_types_supported, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, response_modes_supported, [<<"query">>, <<"fragment">>], + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, grant_types_supported, [<<"authorization_code">>, <<"implicit">>], + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, acr_values_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {required, subject_types_supported, fun parse_subject_types_supported/2}, + {required, id_token_signing_alg_values_supported, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, id_token_encryption_alg_values_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, id_token_encryption_enc_values_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, userinfo_signing_alg_values_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, userinfo_encryption_alg_values_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, userinfo_encryption_enc_values_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, request_object_signing_alg_values_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, request_object_encryption_alg_values_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, request_object_encryption_enc_values_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, token_endpoint_auth_methods_supported, [<<"client_secret_basic">>], + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, token_endpoint_auth_signing_alg_values_supported, undefined, + fun parse_token_signing_alg_values_no_none/2}, + {optional, display_values_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, claim_types_supported, [normal], fun parse_claim_types_supported/2}, + {optional, claims_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, service_documentation, undefined, + fun oidcc_decode_util:parse_setting_uri/2}, + {optional, claims_locales_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, ui_locales_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, claims_parameter_supported, false, + fun oidcc_decode_util:parse_setting_boolean/2}, + {optional, request_parameter_supported, false, + fun oidcc_decode_util:parse_setting_boolean/2}, + {optional, request_uri_parameter_supported, true, + fun oidcc_decode_util:parse_setting_boolean/2}, + {optional, require_request_uri_registration, false, + fun oidcc_decode_util:parse_setting_boolean/2}, + {optional, op_policy_uri, undefined, fun oidcc_decode_util:parse_setting_uri/2}, + {optional, op_tos_uri, undefined, fun oidcc_decode_util:parse_setting_uri/2}, + {optional, revocation_endpoint, undefined, + fun oidcc_decode_util:parse_setting_uri/2}, + {optional, revocation_endpoint_auth_methods_supported, + [<<"client_secret_basic">>], + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, revocation_endpoint_auth_signing_alg_values_supported, undefined, + fun parse_token_signing_alg_values_no_none/2}, + {optional, introspection_endpoint, undefined, + fun oidcc_decode_util:parse_setting_uri/2}, + {optional, introspection_endpoint_auth_methods_supported, + [<<"client_secret_basic">>], + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, introspection_endpoint_auth_signing_alg_values_supported, undefined, + fun parse_token_signing_alg_values_no_none/2}, + {optional, code_challenge_methods_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, end_session_endpoint, undefined, + case AllowUnsafeHttp of + true -> fun oidcc_decode_util:parse_setting_uri/2; + false -> fun oidcc_decode_util:parse_setting_uri_https/2 + end}, + {optional, require_pushed_authorization_requests, false, + fun oidcc_decode_util:parse_setting_boolean/2}, + {optional, pushed_authorization_request_endpoint, undefined, + case AllowUnsafeHttp of + true -> fun oidcc_decode_util:parse_setting_uri/2; + false -> fun oidcc_decode_util:parse_setting_uri_https/2 + end}, + {optional, authorization_signing_alg_values_supported, undefined, + fun parse_token_signing_alg_values_no_none/2}, + {optional, authorization_encryption_alg_values_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, authorization_encryption_enc_values_supported, undefined, + fun oidcc_decode_util:parse_setting_binary_list/2}, + {optional, authorization_response_iss_parameter_supported, false, + fun oidcc_decode_util:parse_setting_boolean/2}, + {optional, dpop_signing_alg_values_supported, undefined, + fun parse_token_signing_alg_values_no_none/2}, + {optional, require_signed_request_object, false, + fun oidcc_decode_util:parse_setting_boolean/2}, + {optional, mtls_endpoint_aliases, #{}, + case AllowUnsafeHttp of + true -> fun oidcc_decode_util:parse_setting_uri_map/2; + false -> fun oidcc_decode_util:parse_setting_uri_https_map/2 + end}, + {optional, tls_client_certificate_bound_access_tokens, false, + fun oidcc_decode_util:parse_setting_boolean/2} + ], + #{} + ), + {ok, #oidcc_provider_configuration{ + issuer = Issuer, + issuer_regex = IssuerRegex, + authorization_endpoint = AuthorizationEndpoint, + token_endpoint = TokenEndpoint, + userinfo_endpoint = UserinfoEndpoint, + jwks_uri = JwksUri, + registration_endpoint = RegistrationEndpoint, + scopes_supported = ScopesSupported, + response_types_supported = ResponseTypesSupported, + response_modes_supported = ResponseModesSupported, + grant_types_supported = GrantTypesSupported, + acr_values_supported = AcrValuesSupported, + subject_types_supported = SubjectTypesSupported, + id_token_signing_alg_values_supported = + IdTokenSigningAlgValuesSupported, + id_token_encryption_alg_values_supported = + IdTokenEncryptionAlgValuesSupported, + id_token_encryption_enc_values_supported = + IdTokenEncryptionEncValuesSupported, + userinfo_signing_alg_values_supported = + UserinfoSigningAlgValuesSupported, + userinfo_encryption_alg_values_supported = + UserinfoEncryptionAlgValuesSupported, + userinfo_encryption_enc_values_supported = + UserinfoEncryptionEncValuesSupported, + request_object_signing_alg_values_supported = + RequestObjectSigningAlgValuesSupported, + request_object_encryption_alg_values_supported = + RequestObjectEncryptionAlgValuesSupported, + request_object_encryption_enc_values_supported = + RequestObjectEncryptionEncValuesSupported, + token_endpoint_auth_methods_supported = + TokenEndpointAuthMethodsSupported, + token_endpoint_auth_signing_alg_values_supported = + TokenEndpointAuthSigningAlgValuesSupported, + display_values_supported = DisplayValuesSupported, + claim_types_supported = ClaimTypesSupported, + claims_supported = ClaimsSupported, + service_documentation = ServiceDocumentation, + claims_locales_supported = ClaimsLocalesSupported, + ui_locales_supported = UiLocalesSupported, + claims_parameter_supported = ClaimsParameterSupported, + request_parameter_supported = RequestParameterSupported, + request_uri_parameter_supported = + RequestUriParameterSupported, + require_request_uri_registration = + RequireRequestUriRegistration, + op_policy_uri = OpPolicyUri, + op_tos_uri = OpTosUri, + revocation_endpoint = RevocationEndpoint, + revocation_endpoint_auth_methods_supported = + RevocationEndpointAuthMethodsSupported, + revocation_endpoint_auth_signing_alg_values_supported = + RevocationEndpointAuthSigningAlgValuesSupported, + introspection_endpoint = IntrospectionEndpoint, + introspection_endpoint_auth_methods_supported = + IntrospectionEndpointAuthMethodsSupported, + introspection_endpoint_auth_signing_alg_values_supported = + IntrospectionEndpointAuthSigningAlgValuesSupported, + code_challenge_methods_supported = + CodeChallengeMethodsSupported, + end_session_endpoint = EndSessionEndpoint, + require_pushed_authorization_requests = RequirePushedAuthorizationRequests, + pushed_authorization_request_endpoint = PushedAuthorizationRequestEndpoint, + authorization_signing_alg_values_supported = AuthorizationSigningAlgValuesSupported, + authorization_encryption_alg_values_supported = + AuthorizationEncryptionAlgValuesSupported, + authorization_encryption_enc_values_supported = + AuthorizationEncryptionEncValuesSupported, + authorization_response_iss_parameter_supported = + AuthorizationResponseIssParameterSupported, + dpop_signing_alg_values_supported = DpopSigningAlgValuesSupported, + require_signed_request_object = RequireSignedRequestObject, + mtls_endpoint_aliases = MtlsEndpointAliases, + tls_client_certificate_bound_access_tokens = TlsClientCertificateBoundAccessTokens, + extra_fields = ExtraFields + }} + end. + +?DOC("See `decode_configuration/2`."). +?DOC(#{since => <<"3.0.0">>}). +-spec decode_configuration(Configuration) -> {ok, t()} | {error, error()} when + Configuration :: map(). +decode_configuration(Configuration) -> decode_configuration(Configuration, #{}). + +-spec parse_scopes_supported(Setting :: term(), Field :: atom()) -> + {ok, [binary()]} | {error, error()}. +parse_scopes_supported(Setting, Field) -> + case oidcc_decode_util:parse_setting_binary_list(Setting, Field) of + {ok, Scopes} -> + case lists:member(<<"openid">>, Scopes) of + true -> + {ok, Scopes}; + false -> + {error, {invalid_config_property, {scopes_including_openid, Field}}} + end; + {error, Reason} -> + {error, Reason} + end. + +-spec parse_subject_types_supported(Setting :: term(), Field :: atom()) -> + {ok, [binary()]} | {error, error()}. +parse_subject_types_supported(Setting, Field) -> + oidcc_decode_util:parse_setting_list_enum( + Setting, + Field, + fun + (<<"pairwise">>) -> + {ok, pairwise}; + (<<"public">>) -> + {ok, public}; + (_SubjectType) -> + error + end + ). + +-spec parse_token_signing_alg_values_no_none(Setting :: term(), Field :: atom()) -> + {ok, [binary()]} | {error, error()}. +parse_token_signing_alg_values_no_none(Setting, Field) -> + case oidcc_decode_util:parse_setting_binary_list(Setting, Field) of + {ok, SigningAlgValues} -> + case + lists:any( + fun + (<<"none">>) -> + true; + (_) -> + false + end, + SigningAlgValues + ) + of + false -> + {ok, SigningAlgValues}; + true -> + {error, {invalid_config_property, {alg_no_none, Field}}} + end; + {error, Reason} -> + {error, Reason} + end. + +-spec parse_claim_types_supported(Setting :: term(), Field :: atom()) -> + {ok, [binary()]} | {error, error()}. +parse_claim_types_supported(Setting, Field) -> + oidcc_decode_util:parse_setting_list_enum( + Setting, + Field, + fun + (<<"normal">>) -> + {ok, normal}; + (<<"aggregated">>) -> + {ok, aggregated}; + (<<"distributed">>) -> + {ok, distributed}; + (_ClaimType) -> + error + end + ). + +-spec url_join(RefURI :: uri_string:uri_string(), BaseURI :: uri_string:uri_string()) -> + uri_string:uri_string(). +url_join(RefURI, BaseURI) -> + BaseURIBinary = iolist_to_binary(BaseURI), + case binary_part(BaseURIBinary, byte_size(BaseURIBinary) - 1, 1) of + <<"/">> -> uri_string:resolve(RefURI, BaseURI); + _ -> uri_string:resolve(RefURI, [BaseURI, "/"]) + end. diff --git a/deps/oidcc/src/oidcc_provider_configuration_worker.erl b/deps/oidcc/src/oidcc_provider_configuration_worker.erl new file mode 100644 index 0000000..2f4eb4b --- /dev/null +++ b/deps/oidcc/src/oidcc_provider_configuration_worker.erl @@ -0,0 +1,442 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_provider_configuration_worker). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC(""" +OIDC Config Provider Worker + +Loads and continuously refreshes the OIDC configuration and JWKs. + +The worker supports reading values concurrently via an ETS table. To use +this performance improvement, the worker has to be registered with a +`{local, Name}`. No name / `{global, Name}` and `{via, RegModule, ViaName}` +are not supported. +"""). +?MODULEDOC(#{since => <<"3.0.0">>}). + +-behaviour(gen_server). + +-include("oidcc_provider_configuration.hrl"). + +-include_lib("jose/include/jose_jwk.hrl"). + +-export([get_jwks/1]). +-export([get_provider_configuration/1]). +-export([handle_call/3]). +-export([handle_cast/2]). +-export([handle_continue/2]). +-export([handle_info/2]). +-export([init/1]). +-export([refresh_configuration/1]). +-export([refresh_jwks/1]). +-export([refresh_jwks_for_unknown_kid/2]). +-export([start_link/1]). + +-export_type([opts/0]). + +?DOC(""" +Configuration Options + +* `name` - The gen_server name of the provider. +* `issuer` - The issuer URI. +* `provider_configuration_opts` - Options for the provider configuration + fetching. +* `backoff_min` - The minimum backoff interval in ms (default: `1_000`). +* `backoff_max` - The maximum backoff interval in ms (default: `30_000`). +* `backoff_type` - The backoff strategy, `stop` for no backoff and to stop, + `exponential` for exponential, `random` for random, and `random_exponential` + for random exponential (default: `stop`). +"""). +?DOC(#{since => <<"3.0.0">>}). +-type opts() :: #{ + name => gen_server:server_name(), + issuer := uri_string:uri_string(), + provider_configuration_opts => oidcc_provider_configuration:opts(), + backoff_min => oidcc_backoff:min(), + backoff_max => oidcc_backoff:max(), + backoff_type => oidcc_backoff:type() +}. + +-record(state, { + provider_configuration = undefined :: #oidcc_provider_configuration{} | undefined, + jwks = undefined :: jose_jwk:key() | undefined, + issuer :: uri_string:uri_string(), + provider_configuration_opts :: oidcc_provider_configuration:opts(), + configuration_refresh_timer = undefined :: timer:tref() | undefined, + jwks_refresh_timer = undefined :: timer:tref() | undefined, + ets_table = undefined :: ets:table() | undefined, + backoff_min = 1000 :: oidcc_backoff:min(), + backoff_max = 30000 :: oidcc_backoff:max(), + backoff_type = stop :: oidcc_backoff:type(), + backoff_state = undefined :: oidcc_backoff:state() | undefined +}). + +-type state() :: #state{}. + +?DOC(""" +Start Configuration Provider. + +## Examples + +```erlang +{ok, Pid} = + oidcc_provider_configuration_worker:start_link(#{ + issuer => <<"https://accounts.google.com">>, + name => {local, google_config_provider} + }). +``` + +```erlang +%% ... +-behaviour(supervisor). + +%% ... + +init(_opts) -> + SupFlags = #{strategy => one_for_one, intensity => 1, period => 5}, + ChildSpecs = [#{id => google_config_provider, + start => {oidcc_provider_configuration_worker, + start_link, + [ + #{issuer => <<"https://accounts.google.com">>} + ]}, + restart => permanent, + type => worker, + modules => [oidcc_provider_configuration_worker]}], + {ok, {SupFlags, ChildSpecs}}. +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec start_link(Opts :: opts()) -> gen_server:start_ret(). +start_link(Opts) -> + case maps:get(name, Opts, undefined) of + undefined -> + gen_server:start_link(?MODULE, Opts, []); + Name -> + gen_server:start_link(Name, ?MODULE, Opts, []) + end. + +?DOC(false). +init(Opts) -> + EtsTable = register_ets_table(Opts), + maybe + {ok, Issuer} ?= get_issuer(Opts), + ProviderConfigurationOpts = maps:get(provider_configuration_opts, Opts, #{}), + {ok, + #state{ + issuer = Issuer, + provider_configuration_opts = ProviderConfigurationOpts, + ets_table = EtsTable, + backoff_min = maps:get(backoff_min, Opts, 1000), + backoff_max = maps:get(backoff_max, Opts, 30000), + backoff_type = maps:get(backoff_type, Opts, stop) + }, + {continue, load_configuration}} + end. + +?DOC(false). +handle_call( + get_provider_configuration, _From, #state{provider_configuration = Configuration} = State +) -> + {reply, Configuration, State}; +handle_call(get_jwks, _From, #state{jwks = Jwks} = State) -> + {reply, Jwks, State}. + +?DOC(false). +handle_cast(refresh_configuration, State) -> + {noreply, State, {continue, load_configuration}}; +handle_cast(refresh_jwks, State) -> + {noreply, State, {continue, load_jwks}}; +handle_cast( + {refresh_jwks_for_unknown_kid, _Kid}, + #state{jwks = #jose_jwk{keys = {jose_jwk_set, []}}} = State +) -> + {noreply, State, {continue, load_jwks}}; +handle_cast({refresh_jwks_for_unknown_kid, Kid}, #state{jwks = Jwks} = State) -> + case has_kid(Jwks, Kid) of + false -> + {noreply, State, {continue, load_jwks}}; + true -> + {noreply, State}; + unknown -> + {noreply, State} + end. + +?DOC(false). +handle_continue( + load_configuration, + #state{ + issuer = Issuer, + provider_configuration = OldProviderConfiguration, + provider_configuration_opts = ProviderConfigurationOpts, + configuration_refresh_timer = RefreshTimer, + ets_table = EtsTable + } = + State +) -> + maybe_cancel_timer(RefreshTimer), + + maybe + {ok, {Configuration, Expiry}} ?= + oidcc_provider_configuration:load_configuration( + Issuer, + ProviderConfigurationOpts + ), + #oidcc_provider_configuration{jwks_uri = JwksUri} = Configuration, + {ok, NewTimer} = timer:send_after(Expiry, configuration_expired), + ok = store_in_ets(EtsTable, provider_configuration, Configuration), + NewState = State#state{ + provider_configuration = Configuration, + configuration_refresh_timer = NewTimer + }, + case OldProviderConfiguration of + undefined -> {noreply, NewState, {continue, load_jwks}}; + #oidcc_provider_configuration{jwks_uri = JwksUri} -> {noreply, NewState}; + #oidcc_provider_configuration{} -> {noreply, NewState, {continue, load_jwks}} + end + else + {error, Reason} -> handle_backoff_retry(configuration_load_failed, Reason, State) + end; +handle_continue( + load_jwks, + #state{ + provider_configuration = Configuration, + provider_configuration_opts = ProviderConfigurationOpts, + jwks_refresh_timer = OldTimer, + ets_table = EtsTable + } = + State +) -> + #oidcc_provider_configuration{jwks_uri = JwksUri} = Configuration, + + maybe_cancel_timer(OldTimer), + + maybe + {ok, {Jwks, Expiry}} ?= + oidcc_provider_configuration:load_jwks(JwksUri, ProviderConfigurationOpts), + {ok, NewTimer} = timer:send_after(Expiry, jwks_expired), + ok = store_in_ets(EtsTable, jwks, Jwks), + {noreply, State#state{ + jwks = Jwks, + jwks_refresh_timer = NewTimer, + backoff_state = undefined + }} + else + {error, Reason} -> handle_backoff_retry(jwks_load_failed, Reason, State) + end. + +?DOC(false). +handle_info(backoff_retry, State) -> + {noreply, State, {continue, load_configuration}}; +handle_info(configuration_expired, State) -> + {noreply, State#state{jwks_refresh_timer = undefined}, {continue, load_configuration}}; +handle_info(jwks_expired, State) -> + {noreply, State#state{jwks_refresh_timer = undefined}, {continue, load_jwks}}. + +?DOC("Get Configuration."). +-spec get_provider_configuration(Name :: gen_server:server_ref()) -> + oidcc_provider_configuration:t() | undefined. +get_provider_configuration(Name) -> + lookup_in_ets_or_call(Name, provider_configuration, get_provider_configuration). + +?DOC("Get Parsed Jwks."). +-spec get_jwks(Name :: gen_server:server_ref()) -> jose_jwk:key() | undefined. +get_jwks(Name) -> + lookup_in_ets_or_call(Name, jwks, get_jwks). + +?DOC(""" +Refresh Configuration. + +## Examples + +```erlang +{ok, Pid} = + oidcc_provider_configuration_worker:start_link(#{ + issuer => <<"https://accounts.google.com">> + }). + +%% Later + +oidcc_provider_configuration_worker:refresh_configuration(Pid). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec refresh_configuration(Name :: gen_server:server_ref()) -> ok. +refresh_configuration(Name) -> + refresh_configuration(Name, true). + +-spec refresh_configuration(Name :: gen_server:server_ref(), Synchronous :: boolean()) -> ok. +refresh_configuration(Name, false) -> + gen_server:cast(Name, refresh_configuration); +refresh_configuration(Name, true) -> + refresh_configuration(Name, false), + gen_server:call(Name, get_provider_configuration), + ok. + +?DOC(""" +Refresh JWKs. + +## Examples + +```erlang +{ok, Pid} = + oidcc_provider_configuration_worker:start_link(#{ + issuer => <<"https://accounts.google.com">> + }). + +%% Later + +oidcc_provider_configuration_worker:refresh_jwks(Pid). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec refresh_jwks(Name :: gen_server:server_ref()) -> ok. +refresh_jwks(Name) -> refresh_jwks(Name, true). + +-spec refresh_jwks(Name :: gen_server:server_ref(), Synchronous :: boolean()) -> ok. +refresh_jwks(Name, false) -> + gen_server:cast(Name, refresh_jwks); +refresh_jwks(Name, true) -> + refresh_jwks(Name, false), + gen_server:call(Name, get_jwks), + ok. + +?DOC(""" +Refresh JWKs if the provided `Kid` is not matching any currently loaded keys. + +## Examples + +```erlang +{ok, Pid} = + oidcc_provider_configuration_worker:start_link(#{ + issuer => <<"https://accounts.google.com">> + }). + +oidcc_provider_configuration_worker:refresh_jwks_for_unknown_kid(Pid, <<"kid">>). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec refresh_jwks_for_unknown_kid(Name :: gen_server:server_ref(), Kid :: binary()) -> + ok. +refresh_jwks_for_unknown_kid(Name, Kid) -> + refresh_jwks_for_unknown_kid(Name, Kid, true). + +-spec refresh_jwks_for_unknown_kid( + Name :: gen_server:server_ref(), Kid :: binary(), Synchronous :: boolean() +) -> + ok. +refresh_jwks_for_unknown_kid(Name, Kid, false) -> + gen_server:cast(Name, {refresh_jwks_for_unknown_kid, Kid}); +refresh_jwks_for_unknown_kid(Name, Kid, true) -> + refresh_jwks_for_unknown_kid(Name, Kid, false), + gen_server:call(Name, get_jwks), + ok. + +-spec get_issuer(Opts :: opts()) -> {ok, binary()} | {error, issuer_required}. +get_issuer(Opts) -> + case maps:get(issuer, Opts, undefined) of + undefined -> + {error, issuer_required}; + Issuer when erlang:is_binary(Issuer) -> + {ok, Issuer} + end. + +?DOC(""" +Checking of existing kid values is a bit wonky because of partial support +in jose. See: https://github.com/potatosalad/erlang-jose/issues/28. +"""). +-spec has_kid(Jwk :: jose_jwk:key(), Kid :: binary()) -> boolean() | unknown. +has_kid(#jose_jwk{fields = #{<<"kid">> := Kid}}, Kid) -> + true; +has_kid(#jose_jwk{fields = #{<<"kid">> := _}}, _Kid) -> + false; +has_kid(#jose_jwk{keys = {jose_jwk_set, Keys}}, Kid) -> + lists:foldl( + fun + (_Key, Acc) when is_boolean(Acc) -> + Acc; + (Key, unknown) -> + has_kid(Key, Kid) + end, + unknown, + Keys + ). + +-spec maybe_cancel_timer(Timer :: undefined | timer:tref()) -> ok. +maybe_cancel_timer(undefined) -> + ok; +maybe_cancel_timer(TRef) -> + {ok, cancel} = timer:cancel(TRef). + +-spec store_in_ets(Table :: ets:table() | undefined, Key :: atom(), Value :: term()) -> ok. +store_in_ets(undefined, _Key, _Value) -> + ok; +store_in_ets(Table, Key, Value) -> + true = ets:insert(Table, [{Key, Value}]), + ok. + +-spec lookup_in_ets_or_call(Name :: gen_server:server_ref(), Key :: atom(), Call :: term()) -> + term(). +lookup_in_ets_or_call(Name, Key, Call) -> + maybe + {ok, TableName} ?= get_ets_table_name(Name), + [{Key, Value}] ?= ets:lookup(TableName, Key), + Value + else + %% Fall Back to synchronous gen_server lookup if ets table can't be + %% located or the value is not present yet + _ -> gen_server:call(Name, Call) + end. + +-spec get_ets_table_name(WorkerRef :: gen_server:server_ref()) -> + {ok, gen_server:server_ref()} | error. +get_ets_table_name(Name) when is_atom(Name) -> + {ok, Name}; +get_ets_table_name(_Ref) -> + error. + +-spec register_ets_table(Opts :: opts()) -> ets:table() | undefined. +register_ets_table(Opts) -> + case maps:get(name, Opts, undefined) of + {local, Name} -> + ets:new(Name, [named_table, bag, protected, {read_concurrency, true}]); + _OtherName -> + undefined + end. + +-spec handle_backoff_retry(ErrorType, Reason, State) -> + {stop, {ErrorType, Reason}, State} | {noreply, State} +when + ErrorType :: jwks_load_failed | configuration_load_failed, + Reason :: term(), + State :: state(). +handle_backoff_retry( + ErrorType, + Reason, + #state{ + issuer = Issuer, + backoff_min = BackoffMin, + backoff_max = BackoffMax, + backoff_type = BackoffType, + backoff_state = BackoffState + } = State +) -> + ErrorDetails = {ErrorType, Reason}, + case oidcc_backoff:handle_retry(BackoffType, BackoffMin, BackoffMax, BackoffState) of + stop -> + {stop, ErrorDetails, State}; + {Wait, NewBackoffState} -> + logger:error( + "Metadata load failed for issuer ~s. Retrying in ~w ms. Error Details: ~w", + [Issuer, Wait, ErrorDetails], + #{error => ErrorDetails} + ), + timer:send_after(Wait, backoff_retry), + {noreply, State#state{ + backoff_state = NewBackoffState + }} + end. diff --git a/deps/oidcc/src/oidcc_scope.erl b/deps/oidcc/src/oidcc_scope.erl new file mode 100644 index 0000000..8097aea --- /dev/null +++ b/deps/oidcc/src/oidcc_scope.erl @@ -0,0 +1,76 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_scope). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC("OpenID Scope Utilities"). +?MODULEDOC(#{since => <<"3.0.0">>}). + +-export([parse/1]). +-export([query_append_scope/2]). +-export([scopes_to_bin/1]). + +-export_type([scopes/0]). +-export_type([t/0]). + +?DOC(#{since => <<"3.0.0">>}). +-type scopes() :: [nonempty_binary() | atom() | nonempty_string()]. + +?DOC(#{since => <<"3.0.0">>}). +-type t() :: binary(). + +?DOC(""" +Compose `t:scopes/0` into `t:t/0`. + +## Examples + +```erlang +<<"openid profile email">> = oidcc_scope:scopes_to_bin( + [<<"openid">>, profile, "email"]). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec scopes_to_bin(Scopes :: scopes()) -> t(). +scopes_to_bin(Scopes) -> + NormalizedScopes = + lists:map( + fun + (Scope) when is_binary(Scope) -> + Scope; + (Scope) when is_atom(Scope) -> + atom_to_binary(Scope, utf8); + (Scope) when is_list(Scope) -> + list_to_binary(Scope) + end, + Scopes + ), + SeparatedScopes = lists:join(<<" ">>, NormalizedScopes), + list_to_binary(SeparatedScopes). + +?DOC(false). +-spec query_append_scope(Scope, QueryList) -> QueryList when + Scope :: t() | scopes(), + QueryList :: [{unicode:chardata(), unicode:chardata() | true}]. +query_append_scope(<<>>, QueryList) -> + QueryList; +query_append_scope(Scope, QueryList) when is_binary(Scope) -> + [{<<"scope">>, Scope} | QueryList]; +query_append_scope(Scopes, QueryList) when is_list(Scopes) -> + query_append_scope(scopes_to_bin(Scopes), QueryList). + +?DOC(""" +Parse `t:t/0` into `t:scopes/0`. + +## Examples + +```erlang +[<<"openid">>, <<"profile">>] = oidcc_scope:parse(<<"openid profile">>). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec parse(Scope :: t()) -> scopes(). +parse(Scope) -> + binary:split(Scope, [<<" ">>], [trim_all, global]). diff --git a/deps/oidcc/src/oidcc_token.erl b/deps/oidcc/src/oidcc_token.erl new file mode 100644 index 0000000..8280ef7 --- /dev/null +++ b/deps/oidcc/src/oidcc_token.erl @@ -0,0 +1,1379 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_token). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC(""" +Facilitate OpenID Code/Token Exchanges. + +## Records + +To use the records, import the definition: + +```erlang +-include_lib(["oidcc/include/oidcc_token.hrl"]). +``` + +## Telemetry + +See [`Oidcc.Token`](`m:'Elixir.Oidcc.Token'`). +"""). +?MODULEDOC(#{since => <<"3.0.0">>}). + +-include("oidcc_client_context.hrl"). +-include("oidcc_provider_configuration.hrl"). +-include("oidcc_token.hrl"). + +-include_lib("jose/include/jose_jwe.hrl"). +-include_lib("jose/include/jose_jwk.hrl"). +-include_lib("jose/include/jose_jws.hrl"). +-include_lib("jose/include/jose_jwt.hrl"). + +-export([client_credentials/2]). +-export([jwt_profile/4]). +-export([refresh/3]). +-export([retrieve/3]). +-export([validate_jarm/3]). +-export([validate_id_token/3]). +-export([validate_jwt/3]). +-export([authorization_headers/4]). +-export([authorization_headers/5]). + +-export_type([access/0]). +-export_type([authorization_headers_opts/0]). +-export_type([client_credentials_opts/0]). +-export_type([error/0]). +-export_type([id/0]). +-export_type([jwt_profile_opts/0]). +-export_type([refresh/0]). +-export_type([refresh_opts/0]). +-export_type([refresh_opts_no_sub/0]). +-export_type([retrieve_opts/0]). +-export_type([validate_jarm_opts/0]). +-export_type([validate_jwt_opts/0]). +-export_type([t/0]). + +?DOC(""" +ID Token Wrapper. + +## Fields + +* `token` - The retrieved token. +* `claims` - Unpacked claims of the verified token. +"""). +?DOC(#{since => <<"3.0.0">>}). +-type id() :: #oidcc_token_id{token :: binary(), claims :: oidcc_jwt_util:claims()}. + +?DOC(""" +Access Token Wrapper. + +## Fields + +* `token` - The retrieved token. +* `expires` - Number of seconds the token is valid. +"""). +?DOC(#{since => <<"3.0.0">>}). +-type access() :: + #oidcc_token_access{token :: binary(), expires :: pos_integer() | undefined, type :: binary()}. + +?DOC(""" +Refresh Token Wrapper. + +## Fields + +* `token` - The retrieved token. +"""). +?DOC(#{since => <<"3.0.0">>}). +-type refresh() :: #oidcc_token_refresh{token :: binary()}. + +?DOC(""" +Token Response Wrapper. + +## Fields + +* `id` - `t:id/0`. +* `access` - `t:access/0`. +* `refresh` - `t:refresh/0`. +* `scope` - `t:oidcc_scope:scopes/0`. +"""). +?DOC(#{since => <<"3.0.0">>}). +-type t() :: + #oidcc_token{ + id :: oidcc_token:id() | none, + access :: oidcc_token:access() | none, + refresh :: oidcc_token:refresh() | none, + scope :: oidcc_scope:scopes() + }. + +?DOC(""" +Options for retrieving a token. + +See https://datatracker.ietf.org/doc/html/rfc6749#section-4.1.3. + +## Fields + +* `pkce_verifier` - PKCE verifier (random string previously given to + `m:oidcc_authorization`), see + https://datatracker.ietf.org/doc/html/rfc7636#section-4.1. +* `require_pkce` - whether to require PKCE when getting the token. +* `nonce` - Nonce to check. +* `scope` - Scope to store with the token. +* `refresh_jwks` - How to handle tokens with an unknown `kid`. + See `t:oidcc_jwt_util:refresh_jwks_for_unknown_kid_fun/0`. +* `redirect_uri` - Redirect URI given to `oidcc_authorization:create_redirect_url/2`. +* `dpop_nonce` - if using DPoP, the `nonce` value to use in the proof claim. +* `trusted_audiences` - if present, a list of additional audience values to + accept. Defaults to `any` which allows any additional values. +* `validate_azp` - if `client_id`, validate that the `azp` claim matches the + client id. If `any`, skip the validation. Defaults to `client_id`. If a + binary or a list of binaries is given, validate that the `azp` claim matches + one of those. +* `token_request_claims` - Additional claims to use with the token request. +"""). +?DOC(#{since => <<"3.0.0">>}). +-type retrieve_opts() :: + #{ + pkce_verifier => binary(), + require_pkce => boolean(), + nonce => binary() | any, + scope => oidcc_scope:scopes(), + preferred_auth_methods => [oidcc_auth_util:auth_method(), ...], + refresh_jwks => oidcc_jwt_util:refresh_jwks_for_unknown_kid_fun(), + redirect_uri => uri_string:uri_string(), + request_opts => oidcc_http_util:request_opts(), + url_extension => oidcc_http_util:query_params(), + body_extension => oidcc_http_util:query_params(), + dpop_nonce => binary(), + trusted_audiences => [binary()] | any, + validate_azp => binary() | [binary()] | client_id | any, + token_request_claims => #{binary() => binary() | integer()} + }. + +?DOC("See `t:refresh_opts_no_sub/0`."). +?DOC(#{since => <<"3.0.0">>}). +-type refresh_opts_no_sub() :: + #{ + scope => oidcc_scope:scopes(), + preferred_auth_methods => [oidcc_auth_util:auth_method(), ...], + refresh_jwks => oidcc_jwt_util:refresh_jwks_for_unknown_kid_fun(), + request_opts => oidcc_http_util:request_opts(), + url_extension => oidcc_http_util:query_params(), + body_extension => oidcc_http_util:query_params(), + dpop_nonce => binary(), + trusted_audiences => [binary()] | any, + validate_azp => binary() | [binary()] | client_id | any, + token_request_claims => #{binary() => binary() | integer()} + }. + +?DOC(#{since => <<"3.0.0">>}). +-type refresh_opts() :: + #{ + scope => oidcc_scope:scopes(), + preferred_auth_methods => [oidcc_auth_util:auth_method(), ...], + refresh_jwks => oidcc_jwt_util:refresh_jwks_for_unknown_kid_fun(), + expected_subject := binary(), + request_opts => oidcc_http_util:request_opts(), + url_extension => oidcc_http_util:query_params(), + body_extension => oidcc_http_util:query_params(), + dpop_nonce => binary(), + trusted_audiences => [binary()] | any, + validate_azp => binary() | [binary()] | client_id | any, + token_request_claims => #{binary() => binary() | integer()} + }. + +?DOC(""" +Options for refreshing a token. + +See https://datatracker.ietf.org/doc/html/rfc6749#section-4.1.3. + +## Fields + +* `scope` - Scope to store with the token. +* `refresh_jwks` - How to handle tokens with an unknown `kid`. + See `t:oidcc_jwt_util:refresh_jwks_for_unknown_kid_fun/0`. +* `expected_subject` - `sub` of the original token. +"""). +?DOC(#{since => <<"3.2.0">>}). +-type validate_jarm_opts() :: + #{ + trusted_audiences => [binary()] | any + }. + +?DOC(#{since => <<"3.0.0">>}). +-type jwt_profile_opts() :: #{ + scope => oidcc_scope:scopes(), + refresh_jwks => oidcc_jwt_util:refresh_jwks_for_unknown_kid_fun(), + request_opts => oidcc_http_util:request_opts(), + kid => binary(), + url_extension => oidcc_http_util:query_params(), + body_extension => oidcc_http_util:query_params() +}. + +?DOC(#{since => <<"3.0.0">>}). +-type client_credentials_opts() :: #{ + scope => oidcc_scope:scopes(), + refresh_jwks => oidcc_jwt_util:refresh_jwks_for_unknown_kid_fun(), + request_opts => oidcc_http_util:request_opts(), + url_extension => oidcc_http_util:query_params(), + body_extension => oidcc_http_util:query_params() +}. + +?DOC(#{since => <<"3.0.0">>}). +-type authorization_headers_opts() :: #{ + dpop_nonce => binary() +}. + +?DOC(#{since => <<"3.2.0">>}). +-type validate_jwt_opts() :: + #{ + signing_algs => [binary()] | undefined, + encryption_algs => [binary()] | undefined, + encryption_encs => [binary()] | undefined, + trusted_audiences => [binary()] | any, + refresh_jwks => oidcc_jwt_util:refresh_jwks_for_unknown_kid_fun() + }. + +?DOC(#{since => <<"3.0.0">>}). +-type error() :: + {missing_claim, MissingClaim :: binary(), Claims :: oidcc_jwt_util:claims()} + | pkce_verifier_required + | no_supported_auth_method + | bad_access_token_hash + | sub_invalid + | token_expired + | token_not_yet_valid + | {none_alg_used, Token :: t()} + | {missing_claim, ExpClaim :: {binary(), term()}, Claims :: oidcc_jwt_util:claims()} + | {grant_type_not_supported, + authorization_code | refresh_token | jwt_bearer | client_credentials} + | {invalid_property, { + Field :: id_token | refresh_token | access_token | expires_in | scopes, GivenValue :: term() + }} + | no_supported_code_challenge + | oidcc_jwt_util:error() + | oidcc_http_util:error(). + +-telemetry_event(#{ + event => [oidcc, request_token, start], + description => <<"Emitted at the start of requesting a code token">>, + measurements => <<"#{system_time => non_neg_integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, request_token, stop], + description => <<"Emitted at the end of requesting a code token">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, request_token, exception], + description => <<"Emitted at the end of requesting a code token">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, refresh_token, start], + description => <<"Emitted at the start of refreshing a token">>, + measurements => <<"#{system_time => non_neg_integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, refresh_token, stop], + description => <<"Emitted at the end of refreshing a token">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, refresh_token, exception], + description => <<"Emitted at the end of refreshing a token">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, jwt_profile_token, start], + description => <<"Emitted at the start of exchanging a JWT profile token">>, + measurements => <<"#{system_time => non_neg_integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, jwt_profile_token, stop], + description => <<"Emitted at the end of exchanging a JWT profile token">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, jwt_profile_token, exception], + description => <<"Emitted at the end of exchanging a JWT profile token">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, client_credentials, start], + description => <<"Emitted at the start of exchanging a client credentials token">>, + measurements => <<"#{system_time => non_neg_integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, client_credentials, stop], + description => <<"Emitted at the end of exchanging a client credentials token">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, client_credentials, exception], + description => <<"Emitted at the end of exchanging a client credentials token">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +?DOC(""" +Retrieve the token using the authcode received before and directly validate +the result. + +The authcode was sent to the local endpoint by the OpenId Connect provider, +using redirects. + +For a high level interface using `m:oidcc_provider_configuration_worker` +see `oidcc:retrieve_token/5`. + +## Examples + +```erlang +{ok, ClientContext} = + oidcc_client_context:from_configuration_worker(provider_name, + <<"client_id">>, + <<"client_secret">>), + +%% Get AuthCode from Redirect + +{ok, #oidcc_token{}} = + oidcc:retrieve(AuthCode, ClientContext, #{ + redirect_uri => <<"https://example.com/callback">>}). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec retrieve(AuthCode, ClientContext, Opts) -> + {ok, t()} | {error, error()} +when + AuthCode :: binary(), + ClientContext :: oidcc_client_context:t(), + Opts :: retrieve_opts(). +retrieve(AuthCode, ClientContext, Opts) -> + #oidcc_client_context{ + provider_configuration = Configuration, + client_id = ClientId + } = ClientContext, + #oidcc_provider_configuration{issuer = Issuer, grant_types_supported = GrantTypesSupported} = + Configuration, + + case lists:member(<<"authorization_code">>, GrantTypesSupported) of + true -> + QsBody = + [ + {<<"grant_type">>, <<"authorization_code">>}, + {<<"code">>, AuthCode}, + {<<"redirect_uri">>, maps:get(redirect_uri, Opts)} + ], + + TelemetryOpts = #{ + topic => [oidcc, request_token], + extra_meta => #{issuer => Issuer, client_id => ClientId} + }, + + maybe + {ok, Token} ?= + retrieve_a_token( + QsBody, ClientContext, Opts, TelemetryOpts, true + ), + extract_response(Token, ClientContext, Opts) + end; + false -> + {error, {grant_type_not_supported, authorization_code}} + end. + +?DOC(""" +Validate the JARM response, returning the valid claims as a map. + +The response was sent to the local endpoint by the OpenId Connect provider, +using redirects. + +## Examples + +```erlang +{ok, ClientContext} = + oidcc_client_context:from_configuration_worker(provider_name, + <<"client_id">>, + <<"client_secret">>), + +%% Get Response from Redirect + +{ok, #{<<"code">> := AuthCode}} = + oidcc:validate_jarm(Response, ClientContext, #{}), + +{ok, #oidcc_token{}} = oidcc:retrieve(AuthCode, ClientContext, + #{redirect_uri => <<"https://redirect.example/">>}). +``` +"""). +?DOC(#{since => <<"3.2.0">>}). +-spec validate_jarm(Response, ClientContext, Opts) -> + {ok, oidcc_jwt_util:claims()} | {error, error()} +when + Response :: binary(), + ClientContext :: oidcc_client_context:t(), + Opts :: validate_jarm_opts(). +validate_jarm(Response, ClientContext, Opts) -> + #oidcc_client_context{ + provider_configuration = Configuration, + client_id = ClientId, + client_secret = ClientSecret, + client_jwks = ClientJwks, + jwks = Jwks0 + } = ClientContext, + #oidcc_provider_configuration{ + issuer = Issuer, + authorization_signing_alg_values_supported = SigningAlgSupported, + authorization_encryption_alg_values_supported = EncryptionAlgSupported, + authorization_encryption_enc_values_supported = EncryptionEncSupported + } = + Configuration, + + Jwks1 = + case ClientJwks of + none -> Jwks0; + #jose_jwk{} -> oidcc_jwt_util:merge_jwks(Jwks0, ClientJwks) + end, + + Jwks2 = oidcc_jwt_util:merge_client_secret_oct_keys(Jwks1, SigningAlgSupported, ClientSecret), + Jwks = oidcc_jwt_util:merge_client_secret_oct_keys( + Jwks2, EncryptionAlgSupported, ClientSecret + ), + ExpClaims = [{<<"iss">>, Issuer}], + TrustedAudiences = maps:get(trusted_audiences, Opts, any), + %% https://openid.net/specs/oauth-v2-jarm-final.html#name-processing-rules + %% 1. decrypt if necessary + %% 2. validate <<"iss">> claim + %% 3. validate <<"aud">> claim + %% 4. validate <<"exp">> claim + %% 5. validate signature (valid, not <<"none">> alg) + %% 6. continue processing + maybe + {ok, {#jose_jwt{fields = Claims}, Jws}} ?= + oidcc_jwt_util:decrypt_and_verify( + Response, Jwks, SigningAlgSupported, EncryptionAlgSupported, EncryptionEncSupported + ), + ok ?= oidcc_jwt_util:verify_claims(Claims, ExpClaims), + ok ?= verify_aud_claim(Claims, ClientId, TrustedAudiences), + ok ?= verify_exp_claim(Claims), + ok ?= verify_nbf_claim(Claims), + ok ?= oidcc_jwt_util:verify_not_none_alg(Jws), + {ok, Claims} + end. + +?DOC(""" +Refresh Token + +For a high level interface using `m:oidcc_provider_configuration_worker` +see `oidcc:refresh_token/5`. + +## Examples + +```erlang +{ok, ClientContext} = + oidcc_client_context:from_configuration_worker(provider_name, + <<"client_id">>, + <<"client_secret">>), + +%% Get AuthCode from Redirect + +{ok, Token} = + oidcc_token:retrieve(AuthCode, ClientContext, #{ + redirect_uri => <<"https://example.com/callback">>}). + +%% Later + +{ok, #oidcc_token{}} = + oidcc_token:refresh(Token, + ClientContext, + #{expected_subject => <<"sub_from_initial_id_token">>}). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec refresh + (RefreshToken, ClientContext, Opts) -> + {ok, t()} | {error, error()} + when + RefreshToken :: binary(), + ClientContext :: oidcc_client_context:t(), + Opts :: refresh_opts(); + (Token, ClientContext, Opts) -> + {ok, t()} | {error, error()} + when + Token :: oidcc_token:t(), + ClientContext :: oidcc_client_context:t(), + Opts :: refresh_opts_no_sub(). +refresh( + #oidcc_token{ + refresh = #oidcc_token_refresh{token = RefreshToken}, + id = #oidcc_token_id{claims = #{<<"sub">> := ExpectedSubject}} + }, + ClientContext, + Opts +) -> + refresh(RefreshToken, ClientContext, maps:put(expected_subject, ExpectedSubject, Opts)); +refresh(RefreshToken, ClientContext, Opts) -> + #oidcc_client_context{ + provider_configuration = Configuration, + client_id = ClientId + } = ClientContext, + #oidcc_provider_configuration{issuer = Issuer, grant_types_supported = GrantTypesSupported} = + Configuration, + + case lists:member(<<"refresh_token">>, GrantTypesSupported) of + true -> + ExpectedSub = maps:get(expected_subject, Opts), + Scope = maps:get(scope, Opts, []), + QueryString = + [{<<"refresh_token">>, RefreshToken}, {<<"grant_type">>, <<"refresh_token">>}], + QueryString1 = oidcc_scope:query_append_scope(Scope, QueryString), + + TelemetryOpts = #{ + topic => [oidcc, refresh_token], + extra_meta => #{issuer => Issuer, client_id => ClientId} + }, + + maybe + {ok, Token} ?= + retrieve_a_token(QueryString1, ClientContext, Opts, TelemetryOpts, true), + {ok, TokenRecord} ?= + extract_response(Token, ClientContext, maps:put(nonce, any, Opts)), + case TokenRecord of + #oidcc_token{id = #oidcc_token_id{claims = #{<<"sub">> := ExpectedSub}}} -> + {ok, TokenRecord}; + #oidcc_token{} -> + {error, sub_invalid} + end + end; + false -> + {error, {grant_type_not_supported, refresh_token}} + end. + +?DOC(""" +Retrieve JSON Web Token (JWT) Profile Token + +See [https://datatracker.ietf.org/doc/html/rfc7523#section-4] + +For a high level interface using {@link oidcc_provider_configuration_worker} +see {@link oidcc:jwt_profile_token/6}. + +## Examples + +```erlang +{ok, ClientContext} = + oidcc_client_context:from_configuration_worker(provider_name, + <<"client_id">>, + <<"client_secret">>), + +{ok, KeyJson} = file:read_file("jwt-profile.json"), +KeyMap = jose:decode(KeyJson), +Key = jose_jwk:from_pem(maps:get(<<"key">>, KeyMap)), + +{ok, #oidcc_token{}} = + oidcc_token:jwt_profile(<<"subject">>, + ClientContext, + Key, + #{scope => [<<"scope">>], + kid => maps:get(<<"keyId">>, KeyMap)}). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec jwt_profile(Subject, ClientContext, Jwk, Opts) -> {ok, t()} | {error, error()} when + Subject :: binary(), + ClientContext :: oidcc_client_context:t(), + Jwk :: jose_jwk:key(), + Opts :: jwt_profile_opts(). +jwt_profile(Subject, ClientContext, Jwk, Opts) -> + #oidcc_client_context{provider_configuration = Configuration, client_id = ClientId} = + ClientContext, + #oidcc_provider_configuration{issuer = Issuer, grant_types_supported = GrantTypesSupported} = + Configuration, + + case lists:member(<<"urn:ietf:params:oauth:grant-type:jwt-bearer">>, GrantTypesSupported) of + true -> + Iat = os:system_time(seconds), + Exp = Iat + 60, + + AssertionClaims = #{ + <<"iss">> => Subject, + <<"sub">> => Subject, + <<"aud">> => [Issuer], + <<"exp">> => Exp, + <<"iat">> => Iat, + <<"nbf">> => Iat + }, + AssertionJwt = jose_jwt:from(AssertionClaims), + + AssertionJws0 = #{ + <<"alg">> => <<"RS256">>, + <<"typ">> => <<"JWT">> + }, + AssertionJws = + case maps:get(kid, Opts, none) of + none -> AssertionJws0; + Kid -> maps:put(<<"kid">>, Kid, AssertionJws0) + end, + + {_Jws, Assertion} = jose_jws:compact(jose_jwt:sign(Jwk, AssertionJws, AssertionJwt)), + + Scope = maps:get(scope, Opts, []), + QueryString = + [ + {<<"assertion">>, Assertion}, + {<<"grant_type">>, <<"urn:ietf:params:oauth:grant-type:jwt-bearer">>} + ], + QueryString1 = oidcc_scope:query_append_scope(Scope, QueryString), + + TelemetryOpts = #{ + topic => [oidcc, jwt_profile_token], + extra_meta => #{issuer => Issuer, client_id => ClientId} + }, + + maybe + {ok, Token} ?= + retrieve_a_token(QueryString1, ClientContext, Opts, TelemetryOpts, false), + {ok, TokenRecord} ?= + extract_response(Token, ClientContext, maps:put(nonce, any, Opts)), + case TokenRecord of + #oidcc_token{id = none} -> + {ok, TokenRecord}; + #oidcc_token{id = #oidcc_token_id{claims = #{<<"sub">> := Subject}}} -> + {ok, TokenRecord}; + #oidcc_token{} -> + {error, sub_invalid} + end + end; + false -> + {error, {grant_type_not_supported, jwt_bearer}} + end. + +?DOC(""" +Retrieve Client Credential Token + +See https://datatracker.ietf.org/doc/html/rfc6749#section-1.3.4 + +For a high level interface using `m:oidcc_provider_configuration_worker` +see `oidcc:client_credentials_token/4`. + +## Examples + +```erlang +{ok, ClientContext} = + oidcc_client_context:from_configuration_worker(provider_name, + <<"client_id">>, + <<"client_secret">>), + +{ok, #oidcc_token{}} = + oidcc_token:client_credentials(ClientContext, + #{scope => [<<"scope">>]}). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec client_credentials(ClientContext, Opts) -> {ok, t()} | {error, error()} when + ClientContext :: oidcc_client_context:authenticated_t(), + Opts :: client_credentials_opts(). +client_credentials(ClientContext, Opts) -> + #oidcc_client_context{ + provider_configuration = Configuration, + client_id = ClientId + } = ClientContext, + #oidcc_provider_configuration{issuer = Issuer, grant_types_supported = GrantTypesSupported} = + Configuration, + + case lists:member(<<"client_credentials">>, GrantTypesSupported) of + true -> + Scope = maps:get(scope, Opts, []), + QueryString = [{<<"grant_type">>, <<"client_credentials">>}], + QueryString1 = oidcc_scope:query_append_scope(Scope, QueryString), + + TelemetryOpts = #{ + topic => [oidcc, client_credentials], + extra_meta => #{issuer => Issuer, client_id => ClientId} + }, + + maybe + {ok, Token} ?= + retrieve_a_token(QueryString1, ClientContext, Opts, TelemetryOpts, true), + extract_response(Token, ClientContext, maps:put(nonce, any, Opts)) + end; + false -> + {error, {grant_type_not_supported, client_credentials}} + end. + +-spec extract_response(TokenResponseBody, ClientContext, Opts) -> + {ok, t()} | {error, error()} +when + TokenResponseBody :: map(), + ClientContext :: oidcc_client_context:t(), + Opts :: retrieve_opts(). +extract_response(TokenResponseBody, ClientContext, Opts) -> + maybe + {ok, Scopes} ?= extract_scope(TokenResponseBody, Opts), + {ok, AccessExpire} ?= extract_expiry(TokenResponseBody), + {ok, AccessTokenRecord} ?= extract_access_token(TokenResponseBody, AccessExpire), + {ok, RefreshTokenRecord} ?= extract_refresh_token(TokenResponseBody), + {ok, {IdTokenRecord, NoneUsed}} ?= extract_id_token(TokenResponseBody, ClientContext, Opts), + TokenRecord = #oidcc_token{ + id = IdTokenRecord, + access = AccessTokenRecord, + refresh = RefreshTokenRecord, + scope = Scopes + }, + ok ?= verify_access_token_map_hash(TokenRecord), + %% If none alg was used, continue with checks to allow the user to decide + %% if he wants to use the result + case NoneUsed of + true -> + {error, {none_alg_used, TokenRecord}}; + false -> + {ok, TokenRecord} + end + end. + +-spec extract_scope(TokenMap, Opts) -> {ok, oidcc_scope:scopes()} | {error, error()} when + TokenMap :: map(), Opts :: retrieve_opts(). +extract_scope(TokenMap, Opts) -> + Scopes = maps:get(scope, Opts, []), + case maps:get(<<"scope">>, TokenMap, oidcc_scope:scopes_to_bin(Scopes)) of + ScopeBinary when is_binary(ScopeBinary) -> + {ok, oidcc_scope:parse(ScopeBinary)}; + %% Some providers (e.g. Apple and Twitch) are setting the scope value + %% as list of string. This extends compatibility for those. + ScopeList when is_list(ScopeList) -> + {ok, ScopeList}; + ScopeOther -> + {error, {invalid_property, {scope, ScopeOther}}} + end. + +-spec extract_expiry(TokenMap) -> {ok, undefined | integer()} | {error, error()} when + TokenMap :: map(). +extract_expiry(TokenMap) -> + case maps:get(<<"expires_in">>, TokenMap, undefined) of + undefined -> + {ok, undefined}; + ExpiresInNum when is_integer(ExpiresInNum) -> + {ok, ExpiresInNum}; + ExpiresInBinary when is_binary(ExpiresInBinary) -> + try + {ok, binary_to_integer(ExpiresInBinary)} + catch + error:badarg -> + {error, {invalid_property, {expires_in, ExpiresInBinary}}} + end; + ExpiresInOther -> + {error, {invalid_property, {expires_in, ExpiresInOther}}} + end. + +-spec extract_access_token(TokenMap, Expiry) -> {ok, access()} | {error, error()} when + TokenMap :: map(), + Expiry :: integer(). +extract_access_token(TokenMap, Expiry) -> + case maps:get(<<"access_token">>, TokenMap, none) of + none -> + {ok, none}; + Token when is_binary(Token) -> + TokenType = maps:get(<<"token_type">>, TokenMap, <<"Bearer">>), + {ok, #oidcc_token_access{token = Token, expires = Expiry, type = TokenType}}; + Other -> + {error, {invalid_property, {access_token, Other}}} + end. + +-spec extract_refresh_token(TokenMap) -> {ok, refresh()} | {error, error()} when + TokenMap :: map(). +extract_refresh_token(TokenMap) -> + case maps:get(<<"refresh_token">>, TokenMap, none) of + none -> + {ok, none}; + Token when is_binary(Token) -> + {ok, #oidcc_token_refresh{token = Token}}; + Other -> + {error, {invalid_property, {refresh_token, Other}}} + end. + +-spec extract_id_token(TokenMap, ClientContext, Opts) -> + {ok, {TokenRecord, NoneUsed}} | {error, error()} +when + TokenMap :: map(), + ClientContext :: oidcc_client_context:t(), + Opts :: retrieve_opts(), + TokenRecord :: id(), + NoneUsed :: boolean(). +extract_id_token(TokenMap, ClientContext, Opts) -> + case maps:get(<<"id_token">>, TokenMap, none) of + none -> + {ok, {none, false}}; + Token when is_binary(Token) -> + case validate_id_token(Token, ClientContext, Opts) of + {ok, OkClaims} -> + {ok, {#oidcc_token_id{token = Token, claims = OkClaims}, false}}; + {error, {none_alg_used, NoneClaims}} -> + {ok, {#oidcc_token_id{token = Token, claims = NoneClaims}, true}}; + {error, Reason} -> + {error, Reason} + end; + Other -> + {error, {invalid_property, {id_token, Other}}} + end. + +-spec verify_access_token_map_hash(TokenRecord :: t()) -> + ok | {error, error()}. +verify_access_token_map_hash(#oidcc_token{ + id = + #oidcc_token_id{ + claims = + #{<<"at_hash">> := ExpectedHash} + }, + access = #oidcc_token_access{token = AccessToken} +}) -> + <> = crypto:hash(sha256, AccessToken), + case base64:encode(BinHash, #{mode => urlsafe, padding => false}) of + ExpectedHash -> + ok; + _Other -> + {error, bad_access_token_hash} + end; +verify_access_token_map_hash(#oidcc_token{}) -> + ok. + +?DOC(""" +Validate ID Token + +Usually the id token is validated using `retrieve/3`. + +If you get the token passed from somewhere else, this function can validate it. + +## Validations + +* `iss` claim must match the issuer of the provider, or match the regex pattern if `issuer_regex` is configured in quirks. +* `nonce` claim must match the `nonce` option. +* `aud` claim must match the `trusted_audiences` option. +* `exp` claim must be in the future. +* `nbf` claim must be in the past. +* `azp` claim must match the client id by default. This can be disabled by setting the `validate_azp` option to `any`. + If the `validate_azp` option is set to a `binary()` or a list of binaries, the `azp` claim must match one of those values. + +## Examples + +```erlang +{ok, ClientContext} = + oidcc_client_context:from_configuration_worker(provider_name, + <<"client_id">>, + <<"client_secret">>), + +%% Get IdToken from somewhere + +{ok, Claims} = + oidcc:validate_id_token(IdToken, ClientContext, ExpectedNonce). +``` + +## Regex Issuer Validation + +You can use a regex pattern to validate the issuer claim by adding an `issuer_regex` +to the quirks map when creating the provider configuration. See the documentation for `validate_jwt/3` +for more details. +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec validate_id_token(IdToken, ClientContext, NonceOrOpts) -> + {ok, Claims} | {error, error()} +when + IdToken :: binary(), + ClientContext :: oidcc_client_context:t(), + NonceOrOpts :: Nonce | retrieve_opts(), + Nonce :: binary() | any, + Claims :: oidcc_jwt_util:claims(). +validate_id_token(IdToken, ClientContext, Nonce) when is_binary(Nonce) -> + validate_id_token(IdToken, ClientContext, #{nonce => Nonce}); +validate_id_token(IdToken, ClientContext, any) -> + validate_id_token(IdToken, ClientContext, #{nonce => any}); +validate_id_token(IdToken, ClientContext, Opts) when is_map(Opts) -> + #oidcc_client_context{ + provider_configuration = Configuration, + client_id = ClientId + } = + ClientContext, + #oidcc_provider_configuration{ + id_token_signing_alg_values_supported = AllowAlgorithms, + id_token_encryption_alg_values_supported = EncryptionAlgs, + id_token_encryption_enc_values_supported = EncryptionEncs + } = + Configuration, + + ValidateOpts = maps:merge(Opts, #{ + signing_algs => AllowAlgorithms, + encryption_algs => EncryptionAlgs, + encryption_encs => EncryptionEncs + }), + + Nonce = maps:get(nonce, Opts, any), + + ExpClaims = + case Nonce of + any -> []; + Bin when is_binary(Bin) -> [{<<"nonce">>, Nonce}] + end, + + ValidateAzp = + case maps:get(validate_azp, Opts, client_id) of + client_id -> [ClientId]; + any -> any; + Binary when is_binary(Binary) -> [Binary]; + List when is_list(List) -> List + end, + + validate_jwt(IdToken, ClientContext, ValidateOpts, fun(Claims) -> + maybe + ok ?= oidcc_jwt_util:verify_claims(Claims, ExpClaims), + ok ?= verify_missing_required_claims(Claims), + ok ?= verify_azp_claim(Claims, ValidateAzp), + ok + end + end). + +?DOC(""" +Validate JWT + +Validates a generic JWT (such as an access token) from the given provider. +Useful if the issuer is shared between multiple applications, and the access token +generated for a user at one client is used to validate their access at another client. + +Validating an arbitrary JWT token (not an ID token) is not covered by the OpenID +Connect specification. Therefore the signing / encryption algorithms are not +derieved from the provider configuration, but must be provided by the caller. + +## Validations + +* `iss` claim must match the issuer of the provider, or match the regex pattern if `issuer_regex` is configured in quirks. +* `aud` claim must match the `trusted_audiences` option. +* `exp` claim must be in the future. +* `nbf` claim must be in the past. + +## Examples + +```erlang +{ok, ClientContext} = + oidcc_client_context:from_configuration_worker(provider_name, + <<"client_id">>, + <<"client_secret">>), +%% Get Jwt from Authorization header +Jwt = <<"jwt">>, + +Opts = #{ + signing_algs => [<<"RS256">>] +}, + +{ok, Claims} = + oidcc:validate_jwt(Jwt, ClientContext, Opts). +``` + +## Regex Issuer Validation + +You can use a regex pattern to validate the issuer claim by adding an `issuer_regex` +to the quirks map when creating the provider configuration: + +```erlang +{ok, {ProviderConfig, _}} = + oidcc_provider_configuration:load_configuration(Issuer, #{ + quirks => #{ + issuer_regex => <<"^https://accounts\\.example\\.com/[a-z0-9]+">> + } + }), +``` + +This will allow tokens with issuer claims that match the regex pattern to validate successfully. +"""). +?DOC(#{since => <<"3.2.0">>}). +-spec validate_jwt(Token, ClientContext, Opts) -> + {ok, Claims} | {error, error()} +when + Token :: binary(), + ClientContext :: oidcc_client_context:t(), + Opts :: validate_jwt_opts(), + Claims :: oidcc_jwt_util:claims(). +validate_jwt(Token, ClientContext, Opts) when is_map(Opts) -> + validate_jwt(Token, ClientContext, Opts, fun(_Claims) -> ok end). + +-spec validate_jwt(Token, ClientContext, Opts, AdditionalClaimValidation) -> + {ok, Claims} | {error, error()} +when + Token :: binary(), + ClientContext :: oidcc_client_context:t(), + Opts :: validate_jwt_opts(), + Claims :: oidcc_jwt_util:claims(), + AdditionalClaimValidation :: fun((Claims) -> ok | {error, error()}). +validate_jwt(Token, ClientContext, Opts, AdditionalClaimValidation) -> + RefreshJwksFun = maps:get(refresh_jwks, Opts, undefined), + unknown_kid_retry( + fun(RefreshedClientContext) -> + int_validate_jwt( + Token, RefreshedClientContext, Opts, AdditionalClaimValidation + ) + end, + ClientContext, + RefreshJwksFun + ). + +-spec int_validate_jwt(Token, ClientContext, Opts, AdditionalClaimValidation) -> + {ok, Claims} | {error, error()} +when + Token :: binary(), + ClientContext :: oidcc_client_context:t(), + Opts :: validate_jwt_opts(), + Claims :: oidcc_jwt_util:claims(), + AdditionalClaimValidation :: fun((Claims) -> ok | {error, error()}). +int_validate_jwt(Token, ClientContext, Opts, AdditionalClaimValidation) -> + #oidcc_client_context{ + provider_configuration = Configuration, + jwks = #jose_jwk{} = Jwks0, + client_id = ClientId, + client_secret = ClientSecret, + client_jwks = ClientJwks + } = + ClientContext, + #oidcc_provider_configuration{ + issuer = Issuer, + issuer_regex = IssuerRegex + } = + Configuration, + + SigningAlgs = maps:get(signing_algs, Opts, []), + EncryptionAlgs = maps:get(encryption_algs, Opts, []), + EncryptionEncs = maps:get(encryption_encs, Opts, []), + + case {SigningAlgs, EncryptionAlgs} of + {[], []} -> + error(badarg, [Token, ClientContext, Opts], []); + _ -> + ok + end, + + Jwks1 = + case ClientJwks of + none -> Jwks0; + #jose_jwk{} -> oidcc_jwt_util:merge_jwks(Jwks0, ClientJwks) + end, + Jwks2 = oidcc_jwt_util:merge_client_secret_oct_keys(Jwks1, SigningAlgs, ClientSecret), + Jwks = oidcc_jwt_util:merge_client_secret_oct_keys(Jwks2, EncryptionAlgs, ClientSecret), + TrustedAudiences = maps:get(trusted_audiences, Opts, any), + + maybe + {ok, {#jose_jwt{fields = Claims}, Jws}} ?= + rescue_none_validated_jwt( + oidcc_jwt_util:decrypt_and_verify( + Token, Jwks, SigningAlgs, EncryptionAlgs, EncryptionEncs + ) + ), + ExpectedClaims = + case IssuerRegex of + undefined -> + [{<<"iss">>, Issuer}]; + Pattern -> + [{<<"iss">>, {regex, Pattern}}] + end, + ok ?= oidcc_jwt_util:verify_claims(Claims, ExpectedClaims), + ok ?= verify_missing_required_claims(Claims), + ok ?= verify_aud_claim(Claims, ClientId, TrustedAudiences), + ok ?= verify_exp_claim(Claims), + ok ?= verify_nbf_claim(Claims), + ok ?= AdditionalClaimValidation(Claims), + case Jws of + #jose_jws{alg = {jose_jws_alg_none, none}} -> + {error, {none_alg_used, Claims}}; + #jose_jws{} -> + {ok, Claims}; + #jose_jwe{} -> + {ok, Claims} + end + end. + +?DOC(""" +Authorization headers + +Generate a map of authorization headers to use when using the given +access token to access an API endpoint. + +## Examples + +```erlang +{ok, ClientContext} = + oidcc_client_context:from_configuration_worker(provider_name, + <<"client_id">>, + <<"client_secret">>), +%% Get Access Token record from somewhere +Headers = + oidcc:authorization_headers(AccessTokenRecord, :get, Url, ClientContext). +``` +"""). +?DOC(#{since => "3.2.0"}). +-spec authorization_headers(AccessTokenRecord, Method, Endpoint, ClientContext) -> HeaderMap when + AccessTokenRecord :: access(), + Method :: post | get, + Endpoint :: uri_string:uri_string(), + ClientContext :: oidcc_client_context:t(), + HeaderMap :: #{binary() => binary()}. +-spec authorization_headers(AccessTokenRecord, Method, Endpoint, ClientContext, Opts) -> + HeaderMap +when + AccessTokenRecord :: access(), + Method :: post | get, + Endpoint :: uri_string:uri_string(), + ClientContext :: oidcc_client_context:t(), + Opts :: authorization_headers_opts(), + HeaderMap :: #{binary() => binary()}. +authorization_headers(AccessTokenRecord, Method, Endpoint, ClientContext) -> + authorization_headers(AccessTokenRecord, Method, Endpoint, ClientContext, #{}). + +authorization_headers( + #oidcc_token_access{} = AccessTokenRecord, + Method, + Endpoint, + #oidcc_client_context{} = ClientContext, + Opts +) -> + #oidcc_token_access{token = AccessToken, type = AccessTokenType} = AccessTokenRecord, + Header = oidcc_auth_util:add_authorization_header( + AccessToken, AccessTokenType, Method, Endpoint, Opts, ClientContext + ), + maps:from_list([{list_to_binary(Key), list_to_binary([Value])} || {Key, Value} <- Header]). + +-spec verify_aud_claim(Claims, ClientId, TrustedAudiences) -> ok | {error, error()} when + Claims :: oidcc_jwt_util:claims(), ClientId :: binary(), TrustedAudiences :: [binary()] | any. +verify_aud_claim(#{<<"aud">> := ClientId}, ClientId, _TrustedAudiences) -> + ok; +verify_aud_claim(#{<<"aud">> := Audience} = Claims, ClientId, any) when is_list(Audience) -> + case lists:member(ClientId, Audience) of + true -> ok; + false -> {error, {missing_claim, {<<"aud">>, ClientId}, Claims}} + end; +verify_aud_claim(#{<<"aud">> := Audience} = Claims, ClientId, TrustedAudiences0) when + is_list(Audience) +-> + TrustedAudiences = [ClientId | TrustedAudiences0], + maybe + true ?= lists:member(ClientId, Audience), + [] ?= [A || A <- Audience, not lists:member(A, TrustedAudiences)], + ok + else + _ -> {error, {missing_claim, {<<"aud">>, ClientId}, Claims}} + end; +verify_aud_claim(Claims, ClientId, _TrustedAudiences) -> + {error, {missing_claim, {<<"aud">>, ClientId}, Claims}}. + +-spec verify_azp_claim(Claims, Mode) -> ok | {error, error()} when + Claims :: oidcc_jwt_util:claims(), Mode :: [binary()] | any. +verify_azp_claim(_Claims, any) -> + ok; +verify_azp_claim(#{<<"azp">> := Azp}, AllowedAzp) when is_list(AllowedAzp) -> + case lists:member(Azp, AllowedAzp) of + true -> ok; + false -> {error, {missing_claim, {<<"azp">>, AllowedAzp}, #{<<"azp">> => Azp}}} + end; +verify_azp_claim(_, _Mode) -> + ok. + +-spec verify_exp_claim(Claims) -> ok | {error, error()} when Claims :: oidcc_jwt_util:claims(). +verify_exp_claim(#{<<"exp">> := Expiry}) -> + MaxClockSkew = + case application:get_env(oidcc, max_clock_skew) of + undefined -> 0; + {ok, ClockSkew} -> ClockSkew + end, + case erlang:system_time(second) > Expiry + MaxClockSkew of + true -> {error, token_expired}; + false -> ok + end; +verify_exp_claim(Claims) -> + {error, {missing_claim, <<"exp">>, Claims}}. + +-spec verify_nbf_claim(Claims) -> ok | {error, error()} when Claims :: oidcc_jwt_util:claims(). +verify_nbf_claim(#{<<"nbf">> := Expiry}) -> + MaxClockSkew = + case application:get_env(oidcc, max_clock_skew) of + undefined -> 0; + {ok, ClockSkew} -> ClockSkew + end, + case erlang:system_time(second) < Expiry - MaxClockSkew of + true -> {error, token_not_yet_valid}; + false -> ok + end; +verify_nbf_claim(_Claims) -> + ok. + +-spec verify_missing_required_claims(Claims) -> ok | {error, error()} when + Claims :: oidcc_jwt_util:claims(). +verify_missing_required_claims(Claims) -> + Required = [<<"iss">>, <<"sub">>, <<"aud">>, <<"exp">>, <<"iat">>], + CheckKeys = fun(Key, _Val, Acc) -> lists:delete(Key, Acc) end, + case maps:fold(CheckKeys, Required, Claims) of + [] -> + ok; + [MissingClaim | _Rest] -> + {error, {missing_claim, MissingClaim, Claims}} + end. + +-spec retrieve_a_token( + QsBodyIn, ClientContext, Opts, TelemetryOpts, AuthenticateClient +) -> + {ok, map()} | {error, error()} +when + QsBodyIn :: oidcc_http_util:query_params(), + ClientContext :: oidcc_client_context:t(), + Opts :: retrieve_opts() | refresh_opts(), + TelemetryOpts :: oidcc_http_util:telemetry_opts(), + AuthenticateClient :: boolean(). +retrieve_a_token(QsBodyIn, ClientContext, Opts, TelemetryOpts, AuthenticateClient) -> + #oidcc_client_context{provider_configuration = Configuration} = + ClientContext, + #oidcc_provider_configuration{ + token_endpoint = TokenEndpoint0, + token_endpoint_auth_methods_supported = SupportedAuthMethods0, + token_endpoint_auth_signing_alg_values_supported = SigningAlgs + } = + Configuration, + + QueryParams = maps:get(url_extension, Opts, []), + + Header0 = [{"accept", "application/jwt, application/json"}], + + QsBody0 = QsBodyIn ++ maps:get(body_extension, Opts, []), + + SupportedAuthMethods = + case AuthenticateClient of + true -> SupportedAuthMethods0; + false -> [<<"none">>] + end, + + DpopOpts = + case Opts of + #{dpop_nonce := DpopNonce} -> + #{nonce => DpopNonce}; + _ -> + #{} + end, + maybe + {ok, QsBody} ?= add_pkce_verifier(QsBody0, Opts, ClientContext), + {ok, {Body, Header1}, AuthMethod} ?= + oidcc_auth_util:add_client_authentication( + QsBody, Header0, SupportedAuthMethods, SigningAlgs, Opts, ClientContext + ), + TokenEndpoint = oidcc_auth_util:maybe_mtls_endpoint( + TokenEndpoint0, AuthMethod, <<"token_endpoint">>, ClientContext + ), + Endpoint = + case QueryParams of + [] -> TokenEndpoint; + _ -> [TokenEndpoint, <<"?">>, uri_string:compose_query(QueryParams)] + end, + Header = oidcc_auth_util:add_dpop_proof_header( + Header1, post, Endpoint, DpopOpts, ClientContext + ), + Request = + {Endpoint, Header, "application/x-www-form-urlencoded", uri_string:compose_query(Body)}, + RequestOpts = maps:get(request_opts, Opts, #{}), + {ok, {{json, TokenResponse}, _Headers}} ?= + oidcc_http_util:request(post, Request, TelemetryOpts, RequestOpts), + {ok, TokenResponse} + else + {error, {use_dpop_nonce, NewDpopNonce, _}} when DpopOpts =:= #{} -> + %% only retry automatically if we didn't use a nonce the first time + %% (to avoid infinite loops) + retrieve_a_token( + QsBodyIn, + ClientContext, + Opts#{dpop_nonce => NewDpopNonce}, + TelemetryOpts, + AuthenticateClient + ); + {error, Reason} -> + {error, Reason} + end. + +-spec add_pkce_verifier(QueryList, Opts, ClientContext) -> + {ok, oidcc_http_util:query_params()} | {error, error()} +when + QueryList :: oidcc_http_util:query_params(), + Opts :: retrieve_opts() | refresh_opts(), + ClientContext :: oidcc_client_context:t(). +add_pkce_verifier(BodyQs, #{pkce_verifier := PkceVerifier} = Opts, ClientContext) -> + #oidcc_client_context{provider_configuration = ProviderConfiguration} = ClientContext, + #oidcc_provider_configuration{code_challenge_methods_supported = CodeChallengeMethodsSupported} = + ProviderConfiguration, + RequirePkce = maps:get(require_pkce, Opts, false), + + case CodeChallengeMethodsSupported of + undefined when RequirePkce -> + {error, no_supported_code_challenge}; + undefined -> + {ok, BodyQs}; + Methods when is_list(Methods) -> + case + lists:member(<<"S256">>, CodeChallengeMethodsSupported) or + lists:member(<<"plain">>, CodeChallengeMethodsSupported) + of + true -> + {ok, [{<<"code_verifier">>, PkceVerifier} | BodyQs]}; + false when RequirePkce -> + {error, no_supported_code_challenge}; + false -> + {ok, BodyQs} + end + end; +add_pkce_verifier(_BodyQs, #{require_pkce := true}, _ClientContext) -> + {error, pkce_verifier_required}; +add_pkce_verifier(BodyQs, _Opts, _ClientContext) -> + {ok, BodyQs}. + +-spec rescue_none_validated_jwt(Result) -> Response when + Response :: {ok, {#jose_jwt{}, #jose_jwe{} | #jose_jws{}}} | {error, oidcc_jwt_util:error()}, + Result :: {ok, {#jose_jwt{}, #jose_jwe{} | #jose_jws{}}} | {error, oidcc_jwt_util:error()}. +rescue_none_validated_jwt({ok, Valid}) -> + {ok, Valid}; +rescue_none_validated_jwt({error, {none_alg_used, Jwt0, Jws0}}) -> + {ok, {Jwt0, Jws0}}; +rescue_none_validated_jwt(Other) -> + Other. + +-spec unknown_kid_retry(Function, ClientContext, RefreshJwksFun) -> + {ok, Result} | {error, Error} +when + Function :: fun((ClientContext) -> {ok, Result} | {error, Error}), + ClientContext :: oidcc_client_context:t(), + RefreshJwksFun :: undefined | oidcc_jwt_util:refresh_jwks_for_unknown_kid_fun(), + Result :: term(), + Error :: term(). +unknown_kid_retry(Function, ClientContext, RefreshJwksFun) -> + maybe + {ok, Result} ?= Function(ClientContext), + {ok, Result} + else + {error, {no_matching_key_with_kid, Kid}} when RefreshJwksFun =/= undefined -> + #oidcc_client_context{jwks = OldJwks} = ClientContext, + maybe + {ok, RefreshedJwks} ?= RefreshJwksFun(OldJwks, Kid), + RefreshedClientContext = ClientContext#oidcc_client_context{jwks = RefreshedJwks}, + Function(RefreshedClientContext) + end; + {error, Reason} -> + {error, Reason} + end. diff --git a/deps/oidcc/src/oidcc_token_introspection.erl b/deps/oidcc/src/oidcc_token_introspection.erl new file mode 100644 index 0000000..7567b59 --- /dev/null +++ b/deps/oidcc/src/oidcc_token_introspection.erl @@ -0,0 +1,259 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_token_introspection). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC(""" +OAuth Token Introspection. + +See https://datatracker.ietf.org/doc/html/rfc7662. + +## Records + +To use the records, import the definition: + +```erlang +-include_lib(["oidcc/include/oidcc_token_introspection.hrl"]). +``` + +## Telemetry + +See [`Oidcc.TokenIntrospection`](`m:'Elixir.Oidcc.TokenIntrospection'`). +"""). +?MODULEDOC(#{since => <<"3.0.0">>}). + +-include("oidcc_client_context.hrl"). +-include("oidcc_provider_configuration.hrl"). +-include("oidcc_token.hrl"). +-include("oidcc_token_introspection.hrl"). + +-export([introspect/3]). + +-export_type([error/0]). +-export_type([opts/0]). +-export_type([t/0]). + +?DOC(""" +Introspection Result. + +See https://datatracker.ietf.org/doc/html/rfc7662#section-2.2. +"""). +?DOC(#{since => <<"3.0.0">>}). +-type t() :: #oidcc_token_introspection{ + active :: boolean(), + client_id :: binary(), + exp :: pos_integer(), + scope :: oidcc_scope:scopes(), + username :: binary(), + iss :: binary() +}. + +?DOC(#{since => <<"3.0.0">>}). +-type opts() :: #{ + preferred_auth_methods => [oidcc_auth_util:auth_method(), ...], + request_opts => oidcc_http_util:request_opts(), + dpop_nonce => binary(), + client_self_only => boolean() +}. + +?DOC(#{since => <<"3.0.0">>}). +-type error() :: client_id_mismatch | introspection_not_supported | oidcc_http_util:error(). + +-telemetry_event(#{ + event => [oidcc, load_configuration, start], + description => <<"Emitted at the start of introspecting the token">>, + measurements => <<"#{system_time => non_neg_integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, load_configuration, stop], + description => <<"Emitted at the end of introspecting the token">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, load_configuration, exception], + description => <<"Emitted at the end of introspecting the token">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +?DOC(""" +Introspect the given access token. + +For a high level interface using `m:oidcc_provider_configuration_worker` +see `oidcc:introspect_token/5`. + +## Examples + +```erlang +{ok, ClientContext} = + oidcc_client_context:from_configuration_worker(provider_name, + <<"client_id">>, + <<"client_secret">>), + +%% Get AccessToken + +{ok, #oidcc_token_introspection{active = True}} = + oidcc_token_introspection:introspect(AccessToken, ClientContext, #{}). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec introspect(Token, ClientContext, Opts) -> + {ok, t()} + | {error, error()} +when + Token :: oidcc_token:t() | binary(), + ClientContext :: oidcc_client_context:authenticated_t(), + Opts :: opts(). +introspect( + #oidcc_token{access = #oidcc_token_access{token = AccessToken}}, + ClientContext, + Opts +) -> + introspect(AccessToken, ClientContext, Opts); +introspect(AccessToken, ClientContext, Opts) -> + #oidcc_client_context{ + provider_configuration = Configuration, + client_id = ClientId, + client_secret = ClientSecret + } = ClientContext, + #oidcc_provider_configuration{ + introspection_endpoint = Endpoint0, + issuer = Issuer, + introspection_endpoint_auth_methods_supported = SupportedAuthMethods, + introspection_endpoint_auth_signing_alg_values_supported = AllowAlgorithms + } = Configuration, + + case Endpoint0 of + undefined -> + {error, introspection_not_supported}; + _ -> + Header0 = [{"accept", "application/json"}], + Body0 = [{<<"token">>, AccessToken}], + + RequestOpts = maps:get(request_opts, Opts, #{}), + TelemetryOpts = #{ + topic => [oidcc, introspect_token], + extra_meta => #{issuer => Issuer, client_id => ClientId} + }, + DpopOpts = + case Opts of + #{dpop_nonce := DpopNonce} -> + #{nonce => DpopNonce}; + _ -> + #{} + end, + maybe + {ok, {Body, Header1}, AuthMethod} ?= + oidcc_auth_util:add_client_authentication( + Body0, Header0, SupportedAuthMethods, AllowAlgorithms, Opts, ClientContext + ), + Endpoint = oidcc_auth_util:maybe_mtls_endpoint( + Endpoint0, + AuthMethod, + <<"introspection_endpoint">>, + ClientContext + ), + Header = oidcc_auth_util:add_dpop_proof_header( + Header1, post, Endpoint, DpopOpts, ClientContext + ), + Request = + {Endpoint, Header, "application/x-www-form-urlencoded", + uri_string:compose_query(Body)}, + {ok, {{json, Token}, _Headers}} ?= + oidcc_http_util:request(post, Request, TelemetryOpts, RequestOpts), + {ok, TokenMap} ?= extract_response(Token), + client_match(TokenMap, ClientContext, maps:get(client_self_only, Opts, true)) + else + {error, {use_dpop_nonce, NewDpopNonce, _}} when + DpopOpts =:= #{} + -> + %% only retry automatically if we didn't use a nonce the first time + %% (to avoid infinite loops) + introspect( + AccessToken, + ClientContext, + Opts#{dpop_nonce => NewDpopNonce} + ); + {error, Reason} -> + {error, Reason} + end + end. + +-spec client_match(Introspection, ClientContext, ClientSelfOnly) -> + {ok, t()} | {error, error()} +when + Introspection :: t(), + ClientContext :: oidcc_client_context:t(), + ClientSelfOnly :: boolean(). +client_match(Introspection, _, false) -> + {ok, Introspection}; +client_match( + #oidcc_token_introspection{client_id = ClientId} = Introspection, + #oidcc_client_context{client_id = ClientId}, + true +) -> + {ok, Introspection}; +client_match(_Introspection, _ClientContext, true) -> + {error, client_id_mismatch}. + +-spec extract_response(TokenMap) -> + {ok, t()} +when + TokenMap :: map(). +extract_response(TokenMap) -> + Active = + case maps:get(<<"active">>, TokenMap, undefined) of + true -> + true; + _ -> + false + end, + Scope = maps:get(<<"scope">>, TokenMap, <<"">>), + Username = maps:get(<<"username">>, TokenMap, undefined), + TokenType = maps:get(<<"token_type">>, TokenMap, undefined), + Exp = maps:get(<<"exp">>, TokenMap, undefined), + Iat = maps:get(<<"iat">>, TokenMap, undefined), + Nbf = maps:get(<<"nbf">>, TokenMap, undefined), + Sub = maps:get(<<"sub">>, TokenMap, undefined), + Aud = maps:get(<<"aud">>, TokenMap, undefined), + Iss = maps:get(<<"iss">>, TokenMap, undefined), + Jti = maps:get(<<"jti">>, TokenMap, undefined), + ClientId = maps:get(<<"client_id">>, TokenMap, undefined), + {ok, #oidcc_token_introspection{ + active = Active, + scope = oidcc_scope:parse(Scope), + client_id = ClientId, + username = Username, + exp = Exp, + token_type = TokenType, + iat = Iat, + nbf = Nbf, + sub = Sub, + aud = Aud, + iss = Iss, + jti = Jti, + extra = maps:without( + [ + <<"scope">>, + <<"active">>, + <<"username">>, + <<"exp">>, + <<"client_id">>, + <<"token_type">>, + <<"iat">>, + <<"nbf">>, + <<"sub">>, + <<"aud">>, + <<"iss">>, + <<"jti">> + ], + TokenMap + ) + }}. diff --git a/deps/oidcc/src/oidcc_userinfo.erl b/deps/oidcc/src/oidcc_userinfo.erl new file mode 100644 index 0000000..8ff3007 --- /dev/null +++ b/deps/oidcc/src/oidcc_userinfo.erl @@ -0,0 +1,390 @@ +%% SPDX-FileCopyrightText: 2023 Erlang Ecosystem Foundation +%% SPDX-License-Identifier: Apache-2.0 + +-module(oidcc_userinfo). + +-feature(maybe_expr, enable). + +-include("internal/doc.hrl"). +?MODULEDOC(""" +OpenID Connect Userinfo + +See https://openid.net/specs/openid-connect-core-1_0.html#UserInfo + +## Telemetry + +See [`Oidcc.Userinfo`](`m:'Elixir.Oidcc.Userinfo'`). +"""). +?MODULEDOC(#{since => <<"3.0.0">>}). + +-include("oidcc_client_context.hrl"). +-include("oidcc_provider_configuration.hrl"). +-include("oidcc_token.hrl"). + +-include_lib("jose/include/jose_jwe.hrl"). +-include_lib("jose/include/jose_jwk.hrl"). +-include_lib("jose/include/jose_jws.hrl"). +-include_lib("jose/include/jose_jwt.hrl"). + +-export([retrieve/3]). + +-export_type([error/0]). +-export_type([retrieve_opts/0]). +-export_type([retrieve_opts_no_sub/0]). + +?DOC("See `t:retrieve_opts/0`."). +?DOC(#{since => <<"3.0.0">>}). +-type retrieve_opts_no_sub() :: + #{ + refresh_jwks => oidcc_jwt_util:refresh_jwks_for_unknown_kid_fun(), + dpop_nonce => binary() + }. + +?DOC(""" +Configure userinfo request + +See https://openid.net/specs/openid-connect-core-1_0.html#UserInfoRequest + +## Parameters + +* `refresh_jwks` - How to handle tokens with an unknown `kid`. + See `t:oidcc_jwt_util:refresh_jwks_for_unknown_kid_fun/0` +* `expected_subject` - expected subject for the userinfo + (`sub` from id token) +* `dpop_nonce` - if using DPoP, the `nonce` value to use in the + proof claim +"""). +?DOC(#{since => <<"3.0.0">>}). +-type retrieve_opts() :: + #{ + refresh_jwks => oidcc_jwt_util:refresh_jwks_for_unknown_kid_fun(), + expected_subject => binary() | any, + dpop_nonce => binary() + }. + +?DOC(#{since => <<"3.0.0">>}). +-type error() :: + {distributed_claim_not_found, {ClaimSource :: binary(), ClaimName :: binary()}} + | no_access_token + | invalid_content_type + | bad_subject + | oidcc_jwt_util:error() + | oidcc_http_util:error(). + +-telemetry_event(#{ + event => [oidcc, userinfo, start], + description => <<"Emitted at the start of loading userinfo">>, + measurements => <<"#{system_time => non_neg_integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, userinfo, stop], + description => <<"Emitted at the end of loading userinfo">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +-telemetry_event(#{ + event => [oidcc, userinfo, exception], + description => <<"Emitted at the end of loading userinfo">>, + measurements => <<"#{duration => integer(), monotonic_time => integer()}">>, + metadata => <<"#{issuer => uri_string:uri_string(), client_id => binary()}">> +}). + +?DOC(""" +Load userinfo for the given token + +For a high level interface using `m:oidcc_provider_configuration_worker`, see +`oidcc:retrieve_userinfo/5`. + +## Examples + +```erlang +{ok, ClientContext} = + oidcc_client_context:from_configuration_worker(provider_name, + <<"client_id">>, + <<"client_secret">>), + +%% Get Token + +{ok, #{<<"sub">> => Sub}} = + oidcc_userinfo:retrieve(Token, ClientContext, #{}). +``` +"""). +?DOC(#{since => <<"3.0.0">>}). +-spec retrieve + (Token, ClientContext, Opts) -> {ok, oidcc_jwt_util:claims()} | {error, error()} when + Token :: oidcc_token:t(), + ClientContext :: oidcc_client_context:t(), + Opts :: retrieve_opts_no_sub(); + (Token, ClientContext, Opts) -> {ok, oidcc_jwt_util:claims()} | {error, error()} when + Token :: oidcc_token:access() | binary(), + ClientContext :: oidcc_client_context:t(), + Opts :: retrieve_opts(). +retrieve( + #oidcc_token{access = #oidcc_token_access{} = AccessTokenRecord, id = IdTokenRecord}, + ClientContext, + Opts +) -> + #oidcc_token_id{claims = #{<<"sub">> := ExpectedSubject}} = IdTokenRecord, + retrieve( + AccessTokenRecord, + ClientContext, + maps:put(expected_subject, ExpectedSubject, Opts) + ); +retrieve(#oidcc_token{access = none}, #oidcc_client_context{}, _Opts) -> + {error, no_access_token}; +retrieve(#oidcc_token_access{} = AccessTokenRecord, #oidcc_client_context{} = ClientContext, Opts) -> + #oidcc_client_context{ + provider_configuration = Configuration, + client_id = ClientId + } = ClientContext, + #oidcc_provider_configuration{ + issuer = Issuer + } = Configuration, + #oidcc_token_access{token = AccessToken, type = AccessTokenType} = AccessTokenRecord, + + %% Dialyzer gets confused about the type of Opts here (thinking that it + %% loses the expected_subject key), so we perform a no-op map operation to + %% separate the two. + %% + AuthorizationOpts = Opts#{}, + Endpoint = + case Configuration of + #oidcc_provider_configuration{ + tls_client_certificate_bound_access_tokens = true, + mtls_endpoint_aliases = #{ + <<"userinfo_endpoint">> := MtlsEndpoint + } + } -> + MtlsEndpoint; + #oidcc_provider_configuration{ + userinfo_endpoint = UserinfoEndpoint + } -> + UserinfoEndpoint + end, + Header = oidcc_auth_util:add_authorization_header( + AccessToken, AccessTokenType, get, Endpoint, AuthorizationOpts, ClientContext + ), + Request = {Endpoint, Header}, + RequestOpts = maps:get(request_opts, Opts, #{}), + TelemetryOpts = #{ + topic => [oidcc, userinfo], + extra_meta => #{issuer => Issuer, client_id => ClientId} + }, + + HasDpopNonce = maps:is_key(dpop_nonce, AuthorizationOpts), + + maybe + {ok, {UserinfoResponse, _Headers}} ?= + oidcc_http_util:request(get, Request, TelemetryOpts, RequestOpts), + {ok, Claims} ?= validate_userinfo_body(UserinfoResponse, ClientContext, Opts), + lookup_distributed_claims(Claims, ClientContext, Opts) + else + {error, {use_dpop_nonce, DpopNonce, _}} when not HasDpopNonce -> + %% retry once if we didn't provide a nonce the first time + retrieve(AccessTokenRecord, ClientContext, Opts#{dpop_nonce => DpopNonce}); + {error, Reason} -> + {error, Reason} + end; +retrieve(AccessToken, #oidcc_client_context{} = ClientContext, Opts) when is_binary(AccessToken) -> + AccessTokenRecord = #oidcc_token_access{token = AccessToken}, + retrieve(AccessTokenRecord, ClientContext, Opts). + +-spec validate_userinfo_body(Body, ClientContext, Opts) -> + {ok, Claims} | {error, error()} +when + Body :: {json, map()} | {jwt, binary()}, + ClientContext :: oidcc_client_context:t(), + Opts :: retrieve_opts(), + Claims :: oidcc_jwt_util:claims(). +validate_userinfo_body({json, Userinfo}, _ClientContext, Opts) -> + ExpectedSubject = maps:get(expected_subject, Opts), + + case {ExpectedSubject, Userinfo} of + {any, Map} -> {ok, Map}; + {ExpectedSubject, #{<<"sub">> := ExpectedSubject} = Map} -> {ok, Map}; + {_, #{}} -> {error, bad_subject} + end; +validate_userinfo_body({jwt, UserinfoBody}, ClientContext, Opts0) -> + #oidcc_client_context{provider_configuration = Configuration, client_id = ClientId} = + ClientContext, + #oidcc_provider_configuration{issuer = Issuer} = Configuration, + ExpectedSubject = maps:get(expected_subject, Opts0), + %% only validate these claims if the token is signed: + %% https://openid.net/specs/openid-connect-core-1_0.html#rfc.section.5.3.2 + ExpectedSignedClaims = [ + {<<"aud">>, ClientId}, + {<<"iss">>, Issuer} + ], + ExpectedClaims = + case maps:get(expected_subject, Opts0) of + any -> []; + ExpectedSubject -> [{<<"sub">>, ExpectedSubject}] + end, + Opts = maps:merge( + #{ + expected_signed_claims => ExpectedSignedClaims, + expected_claims => ExpectedClaims + }, + Opts0 + ), + validate_userinfo_token( + UserinfoBody, + ClientContext, + Opts + ). + +-spec validate_userinfo_token(Token, ClientContext, Opts) -> + {ok, Claims} | {error, error()} +when + Token :: binary(), + ClientContext :: oidcc_client_context:t(), + Opts :: + #{ + refresh_jwks => oidcc_jwt_util:refresh_jwks_for_unknown_kid_fun(), + expected_subject => binary(), + expected_signed_claims => [{binary(), term()}], + expected_claims => [{binary(), term()}] + }, + Claims :: oidcc_jwt_util:claims(). +validate_userinfo_token(UserinfoToken, ClientContext, Opts) -> + RefreshJwksFun = maps:get(refresh_jwks, Opts, undefined), + #oidcc_client_context{ + provider_configuration = Configuration, + jwks = #jose_jwk{} = Jwks0, + client_id = ClientId, + client_secret = ClientSecret, + client_jwks = ClientJwks + } = + ClientContext, + #oidcc_provider_configuration{ + userinfo_signing_alg_values_supported = AllowAlgorithms, + userinfo_encryption_alg_values_supported = EncryptionAlgs, + userinfo_encryption_enc_values_supported = EncryptionEncs, + issuer = Issuer + } = + Configuration, + maybe + Jwks1 = oidcc_jwt_util:merge_client_secret_oct_keys(Jwks0, AllowAlgorithms, ClientSecret), + Jwks2 = oidcc_jwt_util:merge_client_secret_oct_keys(Jwks1, EncryptionAlgs, ClientSecret), + Jwks = + case ClientJwks of + #jose_jwk{} -> + oidcc_jwt_util:merge_jwks(Jwks2, ClientJwks); + _ -> + Jwks2 + end, + {ok, {#jose_jwt{fields = Claims}, JwsOrJwe}} ?= + oidcc_jwt_util:decrypt_and_verify( + UserinfoToken, + Jwks, + AllowAlgorithms, + EncryptionAlgs, + EncryptionEncs + ), + ExpClaims = + case JwsOrJwe of + #jose_jws{} -> + maps:get(expected_claims, Opts, []) ++ + maps:get(expected_signed_claims, Opts, []); + #jose_jwe{} -> + maps:get(expected_claims, Opts, []) + end, + ok ?= oidcc_jwt_util:verify_claims(Claims, ExpClaims), + {ok, maps:remove(nonce, Claims)} + else + {error, {no_matching_key_with_kid, Kid}} when RefreshJwksFun =/= undefined -> + maybe + {ok, RefreshedJwks} ?= RefreshJwksFun(Jwks0, Kid), + RefreshedClientContext = ClientContext#oidcc_client_context{jwks = RefreshedJwks}, + validate_userinfo_token(UserinfoToken, RefreshedClientContext, Opts) + end; + {error, Reason} -> + {error, Reason} + end. + +-spec lookup_distributed_claims(Claims, ClientContext, Opts) -> + {ok, Claims} | {error, error()} +when + Claims :: oidcc_jwt_util:claims(), + ClientContext :: oidcc_client_context:t(), + Opts :: retrieve_opts(). +lookup_distributed_claims( + #{ + <<"_claim_names">> := ClaimNames, + <<"_claim_sources">> := ClaimSources + } = + Claims, + ClientContext, + Opts +) -> + maybe + {ok, DistributedClaims} ?= + lookup_distributed_claim(maps:to_list(ClaimSources), Opts, []), + {ok, ValidatedClaims} ?= + validate_distributed_claim(DistributedClaims, ClientContext, Opts, #{}), + combine_claim(maps:to_list(ClaimNames), ValidatedClaims, Claims) + end; +lookup_distributed_claims(Claims, _ClientContext, _Opts) -> + {ok, Claims}. + +-spec lookup_distributed_claim(Claims, Opts, Acc) -> {ok, Acc} | {error, error()} when + Claims :: [{binary(), #{binary() := binary()}}], + Opts :: retrieve_opts(), + Acc :: [{binary(), binary()}]. +lookup_distributed_claim([], _Opts, Acc) -> + {ok, Acc}; +lookup_distributed_claim([{ClaimName, #{<<"JWT">> := Jwt}} | Rest], Opts, Acc) -> + lookup_distributed_claim(Rest, Opts, [{ClaimName, Jwt} | Acc]); +lookup_distributed_claim( + [ + {ClaimName, #{<<"endpoint">> := Endpoint, <<"access_token">> := AccessToken}} + | Rest + ], + Opts, + Acc +) -> + Request = + {Endpoint, [oidcc_http_util:bearer_auth_header(AccessToken), {"accept", "application/jwt"}]}, + + TelemetryOpts = #{ + topic => [oidcc, userinfo_distributed_claim], extra_meta => #{endpoint => Endpoint} + }, + RequestOpts = maps:get(request_opts, Opts, #{}), + + maybe + {ok, {{jwt, Jwt}, _}} ?= oidcc_http_util:request(get, Request, TelemetryOpts, RequestOpts), + lookup_distributed_claim(Rest, Opts, [{ClaimName, Jwt} | Acc]) + else + {error, Reason} -> + {error, Reason}; + {ok, {{_Format, _Body}, _Headers}} -> + {error, invalid_content_type} + end. + +-spec validate_distributed_claim(Claims, ClientContext, Opts, Acc) -> + {ok, Acc} | {error, error()} +when + Claims :: [{binary(), #{binary() := binary()}}], + Opts :: retrieve_opts(), + ClientContext :: oidcc_client_context:t(), + Acc :: #{binary() => #{binary() => term()}}. +validate_distributed_claim([], _ClientContext, _Opts, Acc) -> + {ok, Acc}; +validate_distributed_claim([{ClaimName, Token} | Rest], ClientContext, Opts, Acc) -> + maybe + {ok, Claims} ?= validate_userinfo_token(Token, ClientContext, Opts), + validate_distributed_claim(Rest, ClientContext, Opts, maps:put(ClaimName, Claims, Acc)) + end. + +combine_claim([], _DistributedClaims, Acc) -> + {ok, Acc}; +combine_claim([{ClaimName, ClaimSource} | Rest], DistributedClaims, Acc) -> + case DistributedClaims of + #{ClaimSource := #{ClaimName := ClaimValue}} -> + combine_claim(Rest, DistributedClaims, maps:put(ClaimName, ClaimValue, Acc)); + #{} -> + {error, {distributed_claim_not_found, {ClaimSource, ClaimName}}} + end. diff --git a/deps/phoenix/.formatter.exs b/deps/phoenix/.formatter.exs new file mode 100644 index 0000000..b9c538e --- /dev/null +++ b/deps/phoenix/.formatter.exs @@ -0,0 +1,105 @@ +locals_without_parens = [ + # Phoenix.Channel + intercept: 1, + + # Phoenix.Router + connect: 3, + connect: 4, + delete: 3, + delete: 4, + forward: 2, + forward: 3, + forward: 4, + get: 3, + get: 4, + head: 3, + head: 4, + match: 4, + match: 5, + options: 3, + options: 4, + patch: 3, + patch: 4, + pipeline: 2, + pipe_through: 1, + post: 3, + post: 4, + put: 3, + put: 4, + resources: 2, + resources: 3, + resources: 4, + trace: 4, + + # Phoenix.Controller + action_fallback: 1, + + # Phoenix.Endpoint + plug: 1, + plug: 2, + socket: 2, + socket: 3, + + # Phoenix.Socket + channel: 2, + channel: 3, + + # Phoenix.ChannelTest + assert_broadcast: 2, + assert_broadcast: 3, + assert_push: 2, + assert_push: 3, + assert_reply: 2, + assert_reply: 3, + assert_reply: 4, + refute_broadcast: 2, + refute_broadcast: 3, + refute_push: 2, + refute_push: 3, + refute_reply: 2, + refute_reply: 3, + refute_reply: 4, + + # Phoenix.ConnTest + assert_error_sent: 2, + + # Phoenix.Live{Dashboard,View} + attr: 2, + attr: 3, + embed_templates: 1, + embed_templates: 2, + live: 2, + live: 3, + live: 4, + live_dashboard: 1, + live_dashboard: 2, + on_mount: 1, + slot: 1, + slot: 2, + slot: 3, + + # Phoenix.LiveViewTest + assert_patch: 1, + assert_patch: 2, + assert_patch: 3, + assert_patched: 2, + assert_push_event: 3, + assert_push_event: 4, + assert_redirect: 1, + assert_redirect: 2, + assert_redirect: 3, + assert_redirected: 2, + assert_reply: 2, + assert_reply: 3, + refute_redirected: 1, + refute_redirected: 2, + refute_patched: 1, + refute_patched: 2, + refute_push_event: 3, + refute_push_event: 4 +] + +[ + locals_without_parens: locals_without_parens, + export: [locals_without_parens: locals_without_parens] +] diff --git a/deps/phoenix/.hex b/deps/phoenix/.hex new file mode 100644 index 0000000..7b9f445 Binary files /dev/null and b/deps/phoenix/.hex differ diff --git a/deps/phoenix/CHANGELOG.md b/deps/phoenix/CHANGELOG.md new file mode 100644 index 0000000..154c52f --- /dev/null +++ b/deps/phoenix/CHANGELOG.md @@ -0,0 +1,160 @@ +# Changelog for v1.8 + +This release requires Erlang/OTP 25+. + +## Streamlined generators + + * Extend tailwindcss support in new apps with [daisyUI](https://daisyui.com/) for light/dark/system mode support for entire app, including core components + * Simplify layout handling for new apps. Now there is only a single `root.html.heex` which wraps the render pipeline. Other dynamic layouts, like `app.html.heex` are called as needed within templates as regular function components + * Simplify core components and live generators to more closely match basic `phx.gen.html` crud. This serves as a better base for seasoned devs to start with, and lessens the amount of code newcomers need to get up to speed with on the basics + * Introduce magic links (passwordless auth) and "sudo mode" to `mix phx.gen.auth` while simplifying the generated structure + * Introduce scopes to Phoenix generators, designed to make secure data access the *default*, not something you remember (or forget) to do later + +## `put_secure_browser_headers` + +`put_secure_browser_headers` has been updated to the latest security practices. In particular, it sets the `content-security-policy` header to `"base-uri 'self'; frame-ancestors 'self';"` if none is set, restricting embedding of your application and the use of `` element to same origin respectively. If you expect your application to be embedded by third-parties, you want to consult the documentation. + +The headers `x-download-options` and `x-frame-options` are no longer set as they have been deprecated by standards. + +## Deprecations + +This release introduces deprecation warnings for several features that have been soft-deprecated in the past. + + * `use Phoenix.Controller` must now specify the `:formats` option, which may be set to an empty list if the formats are not known upfront + * The `:namespace` and `:put_default_views` options on `use Phoenix.Controller` are deprecated and emit a warning on use + * Specifying layouts without modules, such as `put_layout(conn, :print)` or `put_layout(conn, html: :print)` is deprecated + * The `:trailing_slash` option in `Phoenix.Router` has been deprecated in favor of using `Phoenix.VerifiedRoutes`. The overall usage of helpers will be deprecated in the future + +## Potential breaking changes + + * The `config` variable is no longer available in `Phoenix.Endpoint`. In the past, it was possible to read your endpoint configuration at compile-time via an injected variable named `config`, which is no longer supported. Use `Application.compile_env/3` instead, which is tracked by the Elixir compiler and lead to a better developer experience. This may also lead to errors on application boot if you were previously incorrectly setting compile time config at runtime. + +## 1.8.7 (2026-05-06) + +### Bug fixes +- Fix invalid status when longpoll request times out + +### Enhancements +- Mask `token` parameter [in logs](https://hexdocs.pm/phoenix/Phoenix.Logger.html#module-parameter-filtering) by default (in addition to "password") + +### JavaScript Client Bug Fixes +- Fix encoding of non-ASCII metadata in binary channel messages + +## 1.8.6 (2026-05-05) + +### Security fixes +- [CVE-2026-32689](https://github.com/phoenixframework/phoenix/security/advisories/GHSA-628h-q48j-jr6q): Fix Phoenix.Socket Longpoll transport memory exhaustion in nd-JSON body splitting + +## 1.8.5 (2026-03-05) + +### JavaScript Client Bug Fixes +- Fix socket connecting on visibility change when never established + +### Enhancements +- Fix warnings on Elixir 1.20 + +## 1.8.4 (2026-02-23) + +### JavaScript Client Bug Fixes +- Fix bug reconnecting connections when close was gracefully initiated by server +- Fix LongPoll transport name in sessionStorage and logs + +### Enhancements +- Adds guards support in `assert_push`, `assert_broadcast`, and `assert_reply` +- Enable purging in Phoenix code server for Elixir 1.20 + +## 1.8.3 (2025-12-08) + +### Enhancements + - Add top-level phoenix config: `sort_verified_routes_query_params` to enable sorting query params in verified routes during tests + +### Bug fixes + - Fix endpoint port config in an umbrella application. ([#6549](https://github.com/phoenixframework/phoenix/pull/6549)) + - Drop incoming channel messages with stale join refs + +## 1.8.2 (2025-11-26) + +### Bug fixes + - [phoenix.js] fix issue where LongPoll can cause "unmatched topic" errors (observed on iOS only) ([#6538](https://github.com/phoenixframework/phoenix/pull/6538)) + - [phx.gen.live] fix tests when schema and table names are equal ([#6477](https://github.com/phoenixframework/phoenix/pull/6477)) + - [Verified Routes] do not add path prefixes for static routes + - [Phoenix.Endpoint] fix LongPoll being active by default since 1.8.0 ([#6487](https://github.com/phoenixframework/phoenix/pull/6487)) + +### Enhancements + - [phoenix.js] socket now stops reconnection attempts while the page is hidden ([#6534](https://github.com/phoenixframework/phoenix/pull/6534)) + - [phx.new] (re-)add `<.input field={@form[:foo]} type="hidden" />` support in core components + - [phx.new] set `force_ssl` in `prod.exs` by default ([#6435](https://github.com/phoenixframework/phoenix/pull/6435)) + - [phx.new] change `--docker` base image to debian trixie ([#6521](https://github.com/phoenixframework/phoenix/pull/6521)) + - [Phoenix.Socket.assign/2] allow passing a function as second argument `assign(socket, fn _existing_assigns -> %{this_gets: "merged"} end)` ([#6530](https://github.com/phoenixframework/phoenix/pull/6530)) + - [Phoenix.Controller.assign/2] allow passing a function as second argument ([#6542](https://github.com/phoenixframework/phoenix/pull/6542)) + - [Phoenix.Controller.assign/2] support keyword lists and maps as second argument similar to LiveView ([#6513](https://github.com/phoenixframework/phoenix/pull/6513)) + - [Presence] support custom dispatcher for `presence_diff` broadcast ([#6500](https://github.com/phoenixframework/phoenix/pull/6500)) + - [AGENTS.md] add short test guidelines to usage rules + +## 1.8.1 (2025-08-28) + +### Bug fixes + - [phx.new] Fix AGENTS.md failing to include CSS and JavaScript sections + +## 1.8.0 (2025-08-05) + +### Bug fixes + - [phx.new] Don't include node_modules override in generated `tsconfig.json` + +### Enhancements + - [phx.gen.live|html|json] - Make context argument optional. Defaults to the plural name. + - [phx.new] Add `mix precommit` alias + - [phx.new] Add `AGENTS.md` generation compatible with [`usage_rules`](https://hexdocs.pm/usage_rules/) + - [phx.new] Add `usage_rules` folder to installer, allowing to sync generic Phoenix rules into new projects + - [phx.new] Use LiveView 1.1 release in generated code + - [phx.new] Ensure theme selector and flash closing works without LiveView + +## 1.8.0-rc.4 (2025-07-14) + +### Bug Fixes + - Fix phx.gen.presence PubSub server name for umbrella apps + - Fix `phx.gen.live` subscribing to pubsub in disconnected mounts + +### Enhancements + - [phx.new] Initialize initial git repo when git is installed + - [phx.new] Opt-in to HEEx `:debug_tags_location` in development + - [phx.gen.live|html|json|context] Make context name optional and inflect based on schema when missing + - [phx.gen.*] Use new Ecto 3.13 `Repo.transact/2` in generators + - [phx.gen.auth] Warn when using `phx.gen.auth` without esbuild as features assume `phoenix_html.js` in bundle + - Add `security.md` guide for security best practices + - [phoenix.js] - Add fetch() support to LongPoll when XMLHTTPRequest is not available + - Optimize parameter scrubbing by precompiling patterns + +## 1.8.0-rc.3 (2025-05-07) + +### Enhancements + - [phx.gen.auth] Allow configuring the scope's assign key in phx.gen.auth + - [phx.new] Do not override theme in root layout if explicitly set + +## 1.8.0-rc.2 (2025-04-29) + +### Bug Fixes + - [phx.gen.live] Only subscribe to pubsub if connected + - [phx.gen.auth] Remove unused current_password field + - [phx.gen.auth] Use context_app for scopes to fix generated scopes in umbrella apps + +## 1.8.0-rc.1 (2025-04-16) + +### Enhancements + - [phx.new] Support PORT in dev + - [phx.gen.auth] Replace `utc_now/0 + truncate/1` with `utc_now/1` + - [phx.gen.auth] Make dev mailbox link more obvious + +### Bug Fixes + - [phx.new] Fix Tailwind custom variants for loading classes (#6194) + - [phx.new] Fix heroicons path for umbrella apps + - [phx.gen.auth] Fix missing index for scoped resources (#6186) + - [phx.gen.live] Fix crash when an open :show page gets a PubSub broadcast for items (#6197) + +## 1.8.0-rc.0 (2025-04-01) 🚀 + +- First release candidate! + +## v1.7 + +The CHANGELOG for v1.7 releases can be found in the [v1.7 branch](https://github.com/phoenixframework/phoenix/blob/v1.7/CHANGELOG.md). diff --git a/deps/phoenix/LICENSE.md b/deps/phoenix/LICENSE.md new file mode 100644 index 0000000..a2197e0 --- /dev/null +++ b/deps/phoenix/LICENSE.md @@ -0,0 +1,22 @@ +# MIT License + +Copyright (c) 2014 Chris McCord + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/phoenix/README.md b/deps/phoenix/README.md new file mode 100644 index 0000000..5fd7d09 --- /dev/null +++ b/deps/phoenix/README.md @@ -0,0 +1,98 @@ + + + + Phoenix logo + + +> Peace of mind from prototype to production. + +[![Build Status](https://github.com/phoenixframework/phoenix/workflows/CI/badge.svg)](https://github.com/phoenixframework/phoenix/actions/workflows/ci.yml) [![Hex.pm](https://img.shields.io/hexpm/v/phoenix.svg)](https://hex.pm/packages/phoenix) [![Documentation](https://img.shields.io/badge/documentation-gray)](https://hexdocs.pm/phoenix) + +## Getting started + +See the official site at . + +Install the latest version of Phoenix by following the instructions at . + +## Documentation + +API documentation is available at . + +Phoenix.js documentation is available at . + +## Contributing + +We appreciate any contribution to Phoenix. Check our [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) and [CONTRIBUTING.md](CONTRIBUTING.md) guides for more information. We usually keep a list of features and bugs in the [issue tracker][4]. + +### Generating a Phoenix project from unreleased versions + +You can create a new project using the latest Phoenix source installer (the `phx.new` Mix task) with the following steps: + +1. Remove any previously installed `phx_new` archives so that Mix will pick up the local source code. This can be done with `mix archive.uninstall phx_new` or by simply deleting the file, which is usually in `~/.mix/archives/`. +2. Copy this repo via `git clone https://github.com/phoenixframework/phoenix` or by downloading it +3. Run the `phx.new` Mix task from within the `installer` directory, for example: + +```bash +cd phoenix/installer +mix phx.new dev_app --dev +``` + +The `--dev` flag will configure your new project's `:phoenix` dep as a relative path dependency, pointing to your local Phoenix checkout: + +```elixir +defp deps do + [{:phoenix, path: "../..", override: true}, +``` + +To create projects outside of the `installer/` directory, add the latest archive to your machine by following the instructions in [installer/README.md](https://github.com/phoenixframework/phoenix/blob/main/installer/README.md) + +### Building from source + +To build the documentation: + +```bash +npm install +MIX_ENV=docs mix docs +``` + +To build Phoenix: + +```bash +mix deps.get +mix compile +``` + +To build the Phoenix installer: + +```bash +mix deps.get +mix compile +mix archive.build +``` + +To build Phoenix.js: + +```bash +cd assets +npm install +``` + +## Important links + +* [#elixir][1] on [Libera][2] IRC +* [elixir-lang Slack channel][3] +* [Issues tracker][4] +* [Phoenix Forum (questions and proposals)][5] +* Visit Phoenix's sponsor, DockYard, for expert [Phoenix Consulting](https://dockyard.com/phoenix-consulting) + + [1]: https://web.libera.chat/?channels=#elixir + [2]: https://libera.chat/ + [3]: https://elixir-lang.slack.com/ + [4]: https://github.com/phoenixframework/phoenix/issues + [5]: https://elixirforum.com/c/phoenix-forum + +## Copyright and License + +Copyright (c) 2014, Chris McCord. + +Phoenix source code is licensed under the [MIT License](LICENSE.md). diff --git a/deps/phoenix/assets/js/phoenix/ajax.js b/deps/phoenix/assets/js/phoenix/ajax.js new file mode 100644 index 0000000..4393e09 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/ajax.js @@ -0,0 +1,116 @@ +import { + global, + XHR_STATES +} from "./constants" + +export default class Ajax { + + static request(method, endPoint, headers, body, timeout, ontimeout, callback){ + if(global.XDomainRequest){ + let req = new global.XDomainRequest() // IE8, IE9 + return this.xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback) + } else if(global.XMLHttpRequest){ + let req = new global.XMLHttpRequest() // IE7+, Firefox, Chrome, Opera, Safari + return this.xhrRequest(req, method, endPoint, headers, body, timeout, ontimeout, callback) + } else if(global.fetch && global.AbortController){ + // Fetch with AbortController for modern browsers + return this.fetchRequest(method, endPoint, headers, body, timeout, ontimeout, callback) + } else { + throw new Error("No suitable XMLHttpRequest implementation found") + } + } + + static fetchRequest(method, endPoint, headers, body, timeout, ontimeout, callback){ + let options = { + method, + headers, + body, + } + let controller = null + if(timeout){ + controller = new AbortController() + const _timeoutId = setTimeout(() => controller.abort(), timeout) + options.signal = controller.signal + } + global.fetch(endPoint, options) + .then(response => response.text()) + .then(data => this.parseJSON(data)) + .then(data => callback && callback(data)) + .catch(err => { + if(err.name === "AbortError" && ontimeout){ + ontimeout() + } else { + callback && callback(null) + } + }) + return controller + } + + static xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback){ + req.timeout = timeout + req.open(method, endPoint) + req.onload = () => { + let response = this.parseJSON(req.responseText) + callback && callback(response) + } + if(ontimeout){ req.ontimeout = ontimeout } + + // Work around bug in IE9 that requires an attached onprogress handler + req.onprogress = () => { } + + req.send(body) + return req + } + + static xhrRequest(req, method, endPoint, headers, body, timeout, ontimeout, callback){ + req.open(method, endPoint, true) + req.timeout = timeout + for(let [key, value] of Object.entries(headers)){ + req.setRequestHeader(key, value) + } + req.onerror = () => callback && callback(null) + req.onreadystatechange = () => { + if(req.readyState === XHR_STATES.complete && callback){ + let response = this.parseJSON(req.responseText) + callback(response) + } + } + if(ontimeout){ req.ontimeout = ontimeout } + + req.send(body) + return req + } + + static parseJSON(resp){ + if(!resp || resp === ""){ return null } + + try { + return JSON.parse(resp) + } catch { + console && console.log("failed to parse JSON response", resp) + return null + } + } + + static serialize(obj, parentKey){ + let queryStr = [] + for(var key in obj){ + if(!Object.prototype.hasOwnProperty.call(obj, key)){ continue } + let paramKey = parentKey ? `${parentKey}[${key}]` : key + let paramVal = obj[key] + if(typeof paramVal === "object"){ + queryStr.push(this.serialize(paramVal, paramKey)) + } else { + queryStr.push(encodeURIComponent(paramKey) + "=" + encodeURIComponent(paramVal)) + } + } + return queryStr.join("&") + } + + static appendParams(url, params){ + if(Object.keys(params).length === 0){ return url } + + let prefix = url.match(/\?/) ? "&" : "?" + return `${url}${prefix}${this.serialize(params)}` + } +} diff --git a/deps/phoenix/assets/js/phoenix/channel.js b/deps/phoenix/assets/js/phoenix/channel.js new file mode 100644 index 0000000..76bcb33 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/channel.js @@ -0,0 +1,311 @@ +import {closure} from "./utils" +import { + CHANNEL_EVENTS, + CHANNEL_STATES, +} from "./constants" + +import Push from "./push" +import Timer from "./timer" + +/** + * + * @param {string} topic + * @param {(Object|function)} params + * @param {Socket} socket + */ +export default class Channel { + constructor(topic, params, socket){ + this.state = CHANNEL_STATES.closed + this.topic = topic + this.params = closure(params || {}) + this.socket = socket + this.bindings = [] + this.bindingRef = 0 + this.timeout = this.socket.timeout + this.joinedOnce = false + this.joinPush = new Push(this, CHANNEL_EVENTS.join, this.params, this.timeout) + this.pushBuffer = [] + this.stateChangeRefs = [] + + this.rejoinTimer = new Timer(() => { + if(this.socket.isConnected()){ this.rejoin() } + }, this.socket.rejoinAfterMs) + this.stateChangeRefs.push(this.socket.onError(() => this.rejoinTimer.reset())) + this.stateChangeRefs.push(this.socket.onOpen(() => { + this.rejoinTimer.reset() + if(this.isErrored()){ this.rejoin() } + }) + ) + this.joinPush.receive("ok", () => { + this.state = CHANNEL_STATES.joined + this.rejoinTimer.reset() + this.pushBuffer.forEach(pushEvent => pushEvent.send()) + this.pushBuffer = [] + }) + this.joinPush.receive("error", () => { + this.state = CHANNEL_STATES.errored + if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() } + }) + this.onClose(() => { + this.rejoinTimer.reset() + if(this.socket.hasLogger()) this.socket.log("channel", `close ${this.topic} ${this.joinRef()}`) + this.state = CHANNEL_STATES.closed + this.socket.remove(this) + }) + this.onError(reason => { + if(this.socket.hasLogger()) this.socket.log("channel", `error ${this.topic}`, reason) + if(this.isJoining()){ this.joinPush.reset() } + this.state = CHANNEL_STATES.errored + if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() } + }) + this.joinPush.receive("timeout", () => { + if(this.socket.hasLogger()) this.socket.log("channel", `timeout ${this.topic} (${this.joinRef()})`, this.joinPush.timeout) + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), this.timeout) + leavePush.send() + this.state = CHANNEL_STATES.errored + this.joinPush.reset() + if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() } + }) + this.on(CHANNEL_EVENTS.reply, (payload, ref) => { + this.trigger(this.replyEventName(ref), payload) + }) + } + + /** + * Join the channel + * @param {integer} timeout + * @returns {Push} + */ + join(timeout = this.timeout){ + if(this.joinedOnce){ + throw new Error("tried to join multiple times. 'join' can only be called a single time per channel instance") + } else { + this.timeout = timeout + this.joinedOnce = true + this.rejoin() + return this.joinPush + } + } + + /** + * Hook into channel close + * @param {Function} callback + */ + onClose(callback){ + this.on(CHANNEL_EVENTS.close, callback) + } + + /** + * Hook into channel errors + * @param {Function} callback + */ + onError(callback){ + return this.on(CHANNEL_EVENTS.error, reason => callback(reason)) + } + + /** + * Subscribes on channel events + * + * Subscription returns a ref counter, which can be used later to + * unsubscribe the exact event listener + * + * @example + * const ref1 = channel.on("event", do_stuff) + * const ref2 = channel.on("event", do_other_stuff) + * channel.off("event", ref1) + * // Since unsubscription, do_stuff won't fire, + * // while do_other_stuff will keep firing on the "event" + * + * @param {string} event + * @param {Function} callback + * @returns {integer} ref + */ + on(event, callback){ + let ref = this.bindingRef++ + this.bindings.push({event, ref, callback}) + return ref + } + + /** + * Unsubscribes off of channel events + * + * Use the ref returned from a channel.on() to unsubscribe one + * handler, or pass nothing for the ref to unsubscribe all + * handlers for the given event. + * + * @example + * // Unsubscribe the do_stuff handler + * const ref1 = channel.on("event", do_stuff) + * channel.off("event", ref1) + * + * // Unsubscribe all handlers from event + * channel.off("event") + * + * @param {string} event + * @param {integer} ref + */ + off(event, ref){ + this.bindings = this.bindings.filter((bind) => { + return !(bind.event === event && (typeof ref === "undefined" || ref === bind.ref)) + }) + } + + /** + * @private + */ + canPush(){ return this.socket.isConnected() && this.isJoined() } + + /** + * Sends a message `event` to phoenix with the payload `payload`. + * Phoenix receives this in the `handle_in(event, payload, socket)` + * function. if phoenix replies or it times out (default 10000ms), + * then optionally the reply can be received. + * + * @example + * channel.push("event") + * .receive("ok", payload => console.log("phoenix replied:", payload)) + * .receive("error", err => console.log("phoenix errored", err)) + * .receive("timeout", () => console.log("timed out pushing")) + * @param {string} event + * @param {Object} payload + * @param {number} [timeout] + * @returns {Push} + */ + push(event, payload, timeout = this.timeout){ + payload = payload || {} + if(!this.joinedOnce){ + throw new Error(`tried to push '${event}' to '${this.topic}' before joining. Use channel.join() before pushing events`) + } + let pushEvent = new Push(this, event, function (){ return payload }, timeout) + if(this.canPush()){ + pushEvent.send() + } else { + pushEvent.startTimeout() + this.pushBuffer.push(pushEvent) + } + + return pushEvent + } + + /** Leaves the channel + * + * Unsubscribes from server events, and + * instructs channel to terminate on server + * + * Triggers onClose() hooks + * + * To receive leave acknowledgements, use the `receive` + * hook to bind to the server ack, ie: + * + * @example + * channel.leave().receive("ok", () => alert("left!") ) + * + * @param {integer} timeout + * @returns {Push} + */ + leave(timeout = this.timeout){ + this.rejoinTimer.reset() + this.joinPush.cancelTimeout() + + this.state = CHANNEL_STATES.leaving + let onClose = () => { + if(this.socket.hasLogger()) this.socket.log("channel", `leave ${this.topic}`) + this.trigger(CHANNEL_EVENTS.close, "leave") + } + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), timeout) + leavePush.receive("ok", () => onClose()) + .receive("timeout", () => onClose()) + leavePush.send() + if(!this.canPush()){ leavePush.trigger("ok", {}) } + + return leavePush + } + + /** + * Overridable message hook + * + * Receives all events for specialized message handling + * before dispatching to the channel callbacks. + * + * Must return the payload, modified or unmodified + * @param {string} event + * @param {Object} payload + * @param {integer} ref + * @returns {Object} + */ + onMessage(_event, payload, _ref){ return payload } + + /** + * @private + */ + isMember(topic, event, payload, joinRef){ + if(this.topic !== topic){ return false } + + if(joinRef && joinRef !== this.joinRef()){ + if(this.socket.hasLogger()) this.socket.log("channel", "dropping outdated message", {topic, event, payload, joinRef}) + return false + } else { + return true + } + } + + /** + * @private + */ + joinRef(){ return this.joinPush.ref } + + /** + * @private + */ + rejoin(timeout = this.timeout){ + if(this.isLeaving()){ return } + this.socket.leaveOpenTopic(this.topic) + this.state = CHANNEL_STATES.joining + this.joinPush.resend(timeout) + } + + /** + * @private + */ + trigger(event, payload, ref, joinRef){ + let handledPayload = this.onMessage(event, payload, ref, joinRef) + if(payload && !handledPayload){ throw new Error("channel onMessage callbacks must return the payload, modified or unmodified") } + + let eventBindings = this.bindings.filter(bind => bind.event === event) + + for(let i = 0; i < eventBindings.length; i++){ + let bind = eventBindings[i] + bind.callback(handledPayload, ref, joinRef || this.joinRef()) + } + } + + /** + * @private + */ + replyEventName(ref){ return `chan_reply_${ref}` } + + /** + * @private + */ + isClosed(){ return this.state === CHANNEL_STATES.closed } + + /** + * @private + */ + isErrored(){ return this.state === CHANNEL_STATES.errored } + + /** + * @private + */ + isJoined(){ return this.state === CHANNEL_STATES.joined } + + /** + * @private + */ + isJoining(){ return this.state === CHANNEL_STATES.joining } + + /** + * @private + */ + isLeaving(){ return this.state === CHANNEL_STATES.leaving } +} diff --git a/deps/phoenix/assets/js/phoenix/constants.js b/deps/phoenix/assets/js/phoenix/constants.js new file mode 100644 index 0000000..4a367c1 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/constants.js @@ -0,0 +1,31 @@ +export const globalSelf = typeof self !== "undefined" ? self : null +export const phxWindow = typeof window !== "undefined" ? window : null +export const global = globalSelf || phxWindow || globalThis +export const DEFAULT_VSN = "2.0.0" +export const SOCKET_STATES = {connecting: 0, open: 1, closing: 2, closed: 3} +export const MAX_LONGPOLL_BATCH_SIZE = 100; +export const DEFAULT_TIMEOUT = 10000 +export const WS_CLOSE_NORMAL = 1000 +export const CHANNEL_STATES = { + closed: "closed", + errored: "errored", + joined: "joined", + joining: "joining", + leaving: "leaving", +} +export const CHANNEL_EVENTS = { + close: "phx_close", + error: "phx_error", + join: "phx_join", + reply: "phx_reply", + leave: "phx_leave" +} + +export const TRANSPORTS = { + longpoll: "longpoll", + websocket: "websocket" +} +export const XHR_STATES = { + complete: 4 +} +export const AUTH_TOKEN_PREFIX = "base64url.bearer.phx." diff --git a/deps/phoenix/assets/js/phoenix/index.js b/deps/phoenix/assets/js/phoenix/index.js new file mode 100644 index 0000000..1c93968 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/index.js @@ -0,0 +1,207 @@ +/** + * Phoenix Channels JavaScript client + * + * ## Socket Connection + * + * A single connection is established to the server and + * channels are multiplexed over the connection. + * Connect to the server using the `Socket` class: + * + * ```javascript + * let socket = new Socket("/socket", {params: {userToken: "123"}}) + * socket.connect() + * ``` + * + * The `Socket` constructor takes the mount point of the socket, + * the authentication params, as well as options that can be found in + * the Socket docs, such as configuring the `LongPoll` transport, and + * heartbeat. + * + * ## Channels + * + * Channels are isolated, concurrent processes on the server that + * subscribe to topics and broker events between the client and server. + * To join a channel, you must provide the topic, and channel params for + * authorization. Here's an example chat room example where `"new_msg"` + * events are listened for, messages are pushed to the server, and + * the channel is joined with ok/error/timeout matches: + * + * ``` + * let channel = socket.channel("room:123", {token: roomToken}) + * channel.on("new_msg", msg => console.log("Got message", msg) ) + * $input.onEnter( e => { + * channel.push("new_msg", {body: e.target.val}, 10000) + * .receive("ok", (msg) => console.log("created message", msg) ) + * .receive("error", (reasons) => console.log("create failed", reasons) ) + * .receive("timeout", () => console.log("Networking issue...") ) + * }) + * + * channel.join() + * .receive("ok", ({messages}) => console.log("catching up", messages) ) + * .receive("error", ({reason}) => console.log("failed join", reason) ) + * .receive("timeout", () => console.log("Networking issue. Still waiting...")) + *``` + * + * ## Joining + * + * Creating a channel with `socket.channel(topic, params)`, binds the params to + * `channel.params`, which are sent up on `channel.join()`. + * Subsequent rejoins will send up the modified params for + * updating authorization params, or passing up last_message_id information. + * Successful joins receive an "ok" status, while unsuccessful joins + * receive "error". + * + * With the default serializers and WebSocket transport, JSON text frames are + * used for pushing a JSON object literal. If an `ArrayBuffer` instance is provided, + * binary encoding will be used and the message will be sent with the binary + * opcode. + * + * *Note*: binary messages are only supported on the WebSocket transport. + * + * ## Duplicate Join Subscriptions + * + * While the client may join any number of topics on any number of channels, + * the client may only hold a single subscription for each unique topic at any + * given time. When attempting to create a duplicate subscription, + * the server will close the existing channel, log a warning, and + * spawn a new channel for the topic. The client will have their + * `channel.onClose` callbacks fired for the existing channel, and the new + * channel join will have its receive hooks processed as normal. + * + * ## Pushing Messages + * + * From the previous example, we can see that pushing messages to the server + * can be done with `channel.push(eventName, payload)` and we can optionally + * receive responses from the push. Additionally, we can use + * `receive("timeout", callback)` to abort waiting for our other `receive` hooks + * and take action after some period of waiting. The default timeout is 10000ms. + * + * + * ## Socket Hooks + * + * Lifecycle events of the multiplexed connection can be hooked into via + * `socket.onError()` and `socket.onClose()` events, ie: + * + * ``` + * socket.onError( () => console.log("there was an error with the connection!") ) + * socket.onClose( () => console.log("the connection dropped") ) + * ``` + * + * + * ## Channel Hooks + * + * For each joined channel, you can bind to `onError` and `onClose` events + * to monitor the channel lifecycle, ie: + * + * ``` + * channel.onError( () => console.log("there was an error!") ) + * channel.onClose( () => console.log("the channel has gone away gracefully") ) + * ``` + * + * ### onError hooks + * + * `onError` hooks are invoked if the socket connection drops, or the channel + * crashes on the server. In either case, a channel rejoin is attempted + * automatically in an exponential backoff manner. + * + * ### onClose hooks + * + * `onClose` hooks are invoked only in two cases. 1) the channel explicitly + * closed on the server, or 2). The client explicitly closed, by calling + * `channel.leave()` + * + * + * ## Presence + * + * The `Presence` object provides features for syncing presence information + * from the server with the client and handling presences joining and leaving. + * + * ### Syncing state from the server + * + * To sync presence state from the server, first instantiate an object and + * pass your channel in to track lifecycle events: + * + * ``` + * let channel = socket.channel("some:topic") + * let presence = new Presence(channel) + * ``` + * + * Next, use the `presence.onSync` callback to react to state changes + * from the server. For example, to render the list of users every time + * the list changes, you could write: + * + * ``` + * presence.onSync(() => { + * myRenderUsersFunction(presence.list()) + * }) + * ``` + * + * ### Listing Presences + * + * `presence.list` is used to return a list of presence information + * based on the local state of metadata. By default, all presence + * metadata is returned, but a `listBy` function can be supplied to + * allow the client to select which metadata to use for a given presence. + * For example, you may have a user online from different devices with + * a metadata status of "online", but they have set themselves to "away" + * on another device. In this case, the app may choose to use the "away" + * status for what appears on the UI. The example below defines a `listBy` + * function which prioritizes the first metadata which was registered for + * each user. This could be the first tab they opened, or the first device + * they came online from: + * + * ``` + * let listBy = (id, {metas: [first, ...rest]}) => { + * first.count = rest.length + 1 // count of this user's presences + * first.id = id + * return first + * } + * let onlineUsers = presence.list(listBy) + * ``` + * + * ### Handling individual presence join and leave events + * + * The `presence.onJoin` and `presence.onLeave` callbacks can be used to + * react to individual presences joining and leaving the app. For example: + * + * ``` + * let presence = new Presence(channel) + * + * // detect if user has joined for the 1st time or from another tab/device + * presence.onJoin((id, current, newPres) => { + * if(!current){ + * console.log("user has entered for the first time", newPres) + * } else { + * console.log("user additional presence", newPres) + * } + * }) + * + * // detect if user has left from all tabs/devices, or is still present + * presence.onLeave((id, current, leftPres) => { + * if(current.metas.length === 0){ + * console.log("user has left from all devices", leftPres) + * } else { + * console.log("user left from a device", leftPres) + * } + * }) + * // receive presence data from server + * presence.onSync(() => { + * displayUsers(presence.list()) + * }) + * ``` + * @module phoenix + */ + +import Channel from "./channel" +import LongPoll from "./longpoll" +import Presence from "./presence" +import Serializer from "./serializer" +import Socket from "./socket" + +export { + Channel, + LongPoll, + Presence, + Serializer, + Socket +} diff --git a/deps/phoenix/assets/js/phoenix/longpoll.js b/deps/phoenix/assets/js/phoenix/longpoll.js new file mode 100644 index 0000000..517bf0f --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/longpoll.js @@ -0,0 +1,199 @@ +import { + SOCKET_STATES, + TRANSPORTS, + AUTH_TOKEN_PREFIX, + MAX_LONGPOLL_BATCH_SIZE +} from "./constants" + +import Ajax from "./ajax" + +let arrayBufferToBase64 = (buffer) => { + let binary = "" + let bytes = new Uint8Array(buffer) + let len = bytes.byteLength + for(let i = 0; i < len; i++){ binary += String.fromCharCode(bytes[i]) } + return btoa(binary) +} + +export default class LongPoll { + + constructor(endPoint, protocols){ + // we only support subprotocols for authToken + // ["phoenix", "base64url.bearer.phx.BASE64_ENCODED_TOKEN"] + if(protocols && protocols.length === 2 && protocols[1].startsWith(AUTH_TOKEN_PREFIX)){ + this.authToken = atob(protocols[1].slice(AUTH_TOKEN_PREFIX.length)) + } + this.endPoint = null + this.token = null + this.skipHeartbeat = true + this.reqs = new Set() + this.awaitingBatchAck = false + this.currentBatch = null + this.currentBatchTimer = null + this.batchBuffer = [] + this.onopen = function (){ } // noop + this.onerror = function (){ } // noop + this.onmessage = function (){ } // noop + this.onclose = function (){ } // noop + this.pollEndpoint = this.normalizeEndpoint(endPoint) + this.readyState = SOCKET_STATES.connecting + // we must wait for the caller to finish setting up our callbacks and timeout properties + setTimeout(() => this.poll(), 0) + } + + normalizeEndpoint(endPoint){ + return (endPoint + .replace("ws://", "http://") + .replace("wss://", "https://") + .replace(new RegExp("(.*)\/" + TRANSPORTS.websocket), "$1/" + TRANSPORTS.longpoll)) + } + + endpointURL(){ + return Ajax.appendParams(this.pollEndpoint, {token: this.token}) + } + + closeAndRetry(code, reason, wasClean){ + this.close(code, reason, wasClean) + this.readyState = SOCKET_STATES.connecting + } + + ontimeout(){ + this.onerror("timeout") + this.closeAndRetry(1005, "timeout", false) + } + + isActive(){ return this.readyState === SOCKET_STATES.open || this.readyState === SOCKET_STATES.connecting } + + poll(){ + const headers = {"Accept": "application/json"} + if(this.authToken){ + headers["X-Phoenix-AuthToken"] = this.authToken + } + this.ajax("GET", headers, null, () => this.ontimeout(), resp => { + if(resp){ + var {status, token, messages} = resp + if(status === 410 && this.token !== null){ + // In case we already have a token, this means that our existing session + // is gone. We fail so that the client rejoins its channels. + this.onerror(410) + this.closeAndRetry(3410, "session_gone", false) + return + } + this.token = token + } else { + status = 0 + } + + switch(status){ + case 200: + messages.forEach(msg => { + // Tasks are what things like event handlers, setTimeout callbacks, + // promise resolves and more are run within. + // In modern browsers, there are two different kinds of tasks, + // microtasks and macrotasks. + // Microtasks are mainly used for Promises, while macrotasks are + // used for everything else. + // Microtasks always have priority over macrotasks. If the JS engine + // is looking for a task to run, it will always try to empty the + // microtask queue before attempting to run anything from the + // macrotask queue. + // + // For the WebSocket transport, messages always arrive in their own + // event. This means that if any promises are resolved from within, + // their callbacks will always finish execution by the time the + // next message event handler is run. + // + // In order to emulate this behaviour, we need to make sure each + // onmessage handler is run within its own macrotask. + setTimeout(() => this.onmessage({data: msg}), 0) + }) + this.poll() + break + case 204: + this.poll() + break + case 410: + this.readyState = SOCKET_STATES.open + this.onopen({}) + this.poll() + break + case 403: + this.onerror(403) + this.close(1008, "forbidden", false) + break + case 0: + case 500: + this.onerror(500) + this.closeAndRetry(1011, "internal server error", 500) + break + default: throw new Error(`unhandled poll status ${status}`) + } + }) + } + + // we collect all pushes within the current event loop by + // setTimeout 0, which optimizes back-to-back procedural + // pushes against an empty buffer + + send(body){ + if(typeof(body) !== "string"){ body = arrayBufferToBase64(body) } + if(this.currentBatch){ + this.currentBatch.push(body) + } else if(this.awaitingBatchAck){ + this.batchBuffer.push(body) + } else { + this.currentBatch = [body] + this.currentBatchTimer = setTimeout(() => { + this.batchSend(this.currentBatch) + this.currentBatch = null + }, 0) + } + } + + batchSend(messages, offset = 0){ + this.awaitingBatchAck = true + const next = offset + MAX_LONGPOLL_BATCH_SIZE + const batch = messages.slice(offset, next) + this.ajax("POST", {"Content-Type": "application/x-ndjson"}, batch.join("\n"), () => this.onerror("timeout"), resp => { + if(!resp || resp.status !== 200){ + this.awaitingBatchAck = false + this.onerror(resp && resp.status) + this.closeAndRetry(1011, "internal server error", false) + } else if(next < messages.length){ + this.batchSend(messages, next) + } else if(this.batchBuffer.length > 0){ + this.batchSend(this.batchBuffer) + this.batchBuffer = [] + } else { + this.awaitingBatchAck = false + } + }) + } + + close(code, reason, wasClean){ + for(let req of this.reqs){ req.abort() } + this.readyState = SOCKET_STATES.closed + let opts = Object.assign({code: 1000, reason: undefined, wasClean: true}, {code, reason, wasClean}) + this.batchBuffer = [] + clearTimeout(this.currentBatchTimer) + this.currentBatchTimer = null + if(typeof(CloseEvent) !== "undefined"){ + this.onclose(new CloseEvent("close", opts)) + } else { + this.onclose(opts) + } + } + + ajax(method, headers, body, onCallerTimeout, callback){ + let req + let ontimeout = () => { + this.reqs.delete(req) + onCallerTimeout() + } + req = Ajax.request(method, this.endpointURL(), headers, body, this.timeout, ontimeout, resp => { + this.reqs.delete(req) + if(this.isActive()){ callback(resp) } + }) + this.reqs.add(req) + } +} diff --git a/deps/phoenix/assets/js/phoenix/presence.js b/deps/phoenix/assets/js/phoenix/presence.js new file mode 100644 index 0000000..cfb5af6 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/presence.js @@ -0,0 +1,162 @@ +/** + * Initializes the Presence + * @param {Channel} channel - The Channel + * @param {Object} opts - The options, + * for example `{events: {state: "state", diff: "diff"}}` + */ +export default class Presence { + + constructor(channel, opts = {}){ + let events = opts.events || {state: "presence_state", diff: "presence_diff"} + this.state = {} + this.pendingDiffs = [] + this.channel = channel + this.joinRef = null + this.caller = { + onJoin: function (){ }, + onLeave: function (){ }, + onSync: function (){ } + } + + this.channel.on(events.state, newState => { + let {onJoin, onLeave, onSync} = this.caller + + this.joinRef = this.channel.joinRef() + this.state = Presence.syncState(this.state, newState, onJoin, onLeave) + + this.pendingDiffs.forEach(diff => { + this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave) + }) + this.pendingDiffs = [] + onSync() + }) + + this.channel.on(events.diff, diff => { + let {onJoin, onLeave, onSync} = this.caller + + if(this.inPendingSyncState()){ + this.pendingDiffs.push(diff) + } else { + this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave) + onSync() + } + }) + } + + onJoin(callback){ this.caller.onJoin = callback } + + onLeave(callback){ this.caller.onLeave = callback } + + onSync(callback){ this.caller.onSync = callback } + + list(by){ return Presence.list(this.state, by) } + + inPendingSyncState(){ + return !this.joinRef || (this.joinRef !== this.channel.joinRef()) + } + + // lower-level public static API + + /** + * Used to sync the list of presences on the server + * with the client's state. An optional `onJoin` and `onLeave` callback can + * be provided to react to changes in the client's local presences across + * disconnects and reconnects with the server. + * + * @returns {Presence} + */ + static syncState(currentState, newState, onJoin, onLeave){ + let state = this.clone(currentState) + let joins = {} + let leaves = {} + + this.map(state, (key, presence) => { + if(!newState[key]){ + leaves[key] = presence + } + }) + this.map(newState, (key, newPresence) => { + let currentPresence = state[key] + if(currentPresence){ + let newRefs = newPresence.metas.map(m => m.phx_ref) + let curRefs = currentPresence.metas.map(m => m.phx_ref) + let joinedMetas = newPresence.metas.filter(m => curRefs.indexOf(m.phx_ref) < 0) + let leftMetas = currentPresence.metas.filter(m => newRefs.indexOf(m.phx_ref) < 0) + if(joinedMetas.length > 0){ + joins[key] = newPresence + joins[key].metas = joinedMetas + } + if(leftMetas.length > 0){ + leaves[key] = this.clone(currentPresence) + leaves[key].metas = leftMetas + } + } else { + joins[key] = newPresence + } + }) + return this.syncDiff(state, {joins: joins, leaves: leaves}, onJoin, onLeave) + } + + /** + * + * Used to sync a diff of presence join and leave + * events from the server, as they happen. Like `syncState`, `syncDiff` + * accepts optional `onJoin` and `onLeave` callbacks to react to a user + * joining or leaving from a device. + * + * @returns {Presence} + */ + static syncDiff(state, diff, onJoin, onLeave){ + let {joins, leaves} = this.clone(diff) + if(!onJoin){ onJoin = function (){ } } + if(!onLeave){ onLeave = function (){ } } + + this.map(joins, (key, newPresence) => { + let currentPresence = state[key] + state[key] = this.clone(newPresence) + if(currentPresence){ + let joinedRefs = state[key].metas.map(m => m.phx_ref) + let curMetas = currentPresence.metas.filter(m => joinedRefs.indexOf(m.phx_ref) < 0) + state[key].metas.unshift(...curMetas) + } + onJoin(key, currentPresence, newPresence) + }) + this.map(leaves, (key, leftPresence) => { + let currentPresence = state[key] + if(!currentPresence){ return } + let refsToRemove = leftPresence.metas.map(m => m.phx_ref) + currentPresence.metas = currentPresence.metas.filter(p => { + return refsToRemove.indexOf(p.phx_ref) < 0 + }) + onLeave(key, currentPresence, leftPresence) + if(currentPresence.metas.length === 0){ + delete state[key] + } + }) + return state + } + + /** + * Returns the array of presences, with selected metadata. + * + * @param {Object} presences + * @param {Function} chooser + * + * @returns {Presence} + */ + static list(presences, chooser){ + if(!chooser){ chooser = function (key, pres){ return pres } } + + return this.map(presences, (key, presence) => { + return chooser(key, presence) + }) + } + + // private + + static map(obj, func){ + return Object.getOwnPropertyNames(obj).map(key => func(key, obj[key])) + } + + static clone(obj){ return JSON.parse(JSON.stringify(obj)) } +} diff --git a/deps/phoenix/assets/js/phoenix/push.js b/deps/phoenix/assets/js/phoenix/push.js new file mode 100644 index 0000000..2e497a2 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/push.js @@ -0,0 +1,128 @@ +/** + * Initializes the Push + * @param {Channel} channel - The Channel + * @param {string} event - The event, for example `"phx_join"` + * @param {Object} payload - The payload, for example `{user_id: 123}` + * @param {number} timeout - The push timeout in milliseconds + */ +export default class Push { + constructor(channel, event, payload, timeout){ + this.channel = channel + this.event = event + this.payload = payload || function (){ return {} } + this.receivedResp = null + this.timeout = timeout + this.timeoutTimer = null + this.recHooks = [] + this.sent = false + } + + /** + * + * @param {number} timeout + */ + resend(timeout){ + this.timeout = timeout + this.reset() + this.send() + } + + /** + * + */ + send(){ + if(this.hasReceived("timeout")){ return } + this.startTimeout() + this.sent = true + this.channel.socket.push({ + topic: this.channel.topic, + event: this.event, + payload: this.payload(), + ref: this.ref, + join_ref: this.channel.joinRef() + }) + } + + /** + * + * @param {*} status + * @param {*} callback + */ + receive(status, callback){ + if(this.hasReceived(status)){ + callback(this.receivedResp.response) + } + + this.recHooks.push({status, callback}) + return this + } + + /** + * @private + */ + reset(){ + this.cancelRefEvent() + this.ref = null + this.refEvent = null + this.receivedResp = null + this.sent = false + } + + /** + * @private + */ + matchReceive({status, response, _ref}){ + this.recHooks.filter(h => h.status === status) + .forEach(h => h.callback(response)) + } + + /** + * @private + */ + cancelRefEvent(){ + if(!this.refEvent){ return } + this.channel.off(this.refEvent) + } + + /** + * @private + */ + cancelTimeout(){ + clearTimeout(this.timeoutTimer) + this.timeoutTimer = null + } + + /** + * @private + */ + startTimeout(){ + if(this.timeoutTimer){ this.cancelTimeout() } + this.ref = this.channel.socket.makeRef() + this.refEvent = this.channel.replyEventName(this.ref) + + this.channel.on(this.refEvent, payload => { + this.cancelRefEvent() + this.cancelTimeout() + this.receivedResp = payload + this.matchReceive(payload) + }) + + this.timeoutTimer = setTimeout(() => { + this.trigger("timeout", {}) + }, this.timeout) + } + + /** + * @private + */ + hasReceived(status){ + return this.receivedResp && this.receivedResp.status === status + } + + /** + * @private + */ + trigger(status, response){ + this.channel.trigger(this.refEvent, {status, response}) + } +} diff --git a/deps/phoenix/assets/js/phoenix/serializer.js b/deps/phoenix/assets/js/phoenix/serializer.js new file mode 100644 index 0000000..b2f7867 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/serializer.js @@ -0,0 +1,130 @@ +/* The default serializer for encoding and decoding messages */ +import { + CHANNEL_EVENTS +} from "./constants" + +export default { + HEADER_LENGTH: 1, + META_LENGTH: 4, + KINDS: {push: 0, reply: 1, broadcast: 2}, + + encode(msg, callback){ + if(msg.payload.constructor === ArrayBuffer){ + return callback(this.binaryEncode(msg)) + } else { + let payload = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload] + return callback(JSON.stringify(payload)) + } + }, + + decode(rawPayload, callback){ + if(rawPayload.constructor === ArrayBuffer){ + return callback(this.binaryDecode(rawPayload)) + } else { + let [join_ref, ref, topic, event, payload] = JSON.parse(rawPayload) + return callback({join_ref, ref, topic, event, payload}) + } + }, + + // private + + binaryEncode(message){ + let {join_ref, ref, event, topic, payload} = message + let encoder = new TextEncoder() + let joinRefBytes = encoder.encode(join_ref) + let refBytes = encoder.encode(ref) + let topicBytes = encoder.encode(topic) + let eventBytes = encoder.encode(event) + + this.assertFieldSize(joinRefBytes.byteLength, "join_ref") + this.assertFieldSize(refBytes.byteLength, "ref") + this.assertFieldSize(topicBytes.byteLength, "topic") + this.assertFieldSize(eventBytes.byteLength, "event") + + let metaLength = this.META_LENGTH + joinRefBytes.byteLength + refBytes.byteLength + topicBytes.byteLength + eventBytes.byteLength + let header = new ArrayBuffer(this.HEADER_LENGTH + metaLength) + let headerBytes = new Uint8Array(header) + let view = new DataView(header) + let offset = 0 + + view.setUint8(offset++, this.KINDS.push) // kind + view.setUint8(offset++, joinRefBytes.byteLength) + view.setUint8(offset++, refBytes.byteLength) + view.setUint8(offset++, topicBytes.byteLength) + view.setUint8(offset++, eventBytes.byteLength) + headerBytes.set(joinRefBytes, offset); offset += joinRefBytes.byteLength + headerBytes.set(refBytes, offset); offset += refBytes.byteLength + headerBytes.set(topicBytes, offset); offset += topicBytes.byteLength + headerBytes.set(eventBytes, offset); offset += eventBytes.byteLength + + var combined = new Uint8Array(header.byteLength + payload.byteLength) + combined.set(headerBytes, 0) + combined.set(new Uint8Array(payload), header.byteLength) + + return combined.buffer + }, + + assertFieldSize(size, name){ + if(size > 255){ + throw new Error(`unable to convert ${name} to binary: must be less than or equal to 255 bytes, but is ${size} bytes`) + } + }, + + binaryDecode(buffer){ + let view = new DataView(buffer) + let kind = view.getUint8(0) + let decoder = new TextDecoder() + switch(kind){ + case this.KINDS.push: return this.decodePush(buffer, view, decoder) + case this.KINDS.reply: return this.decodeReply(buffer, view, decoder) + case this.KINDS.broadcast: return this.decodeBroadcast(buffer, view, decoder) + } + }, + + decodePush(buffer, view, decoder){ + let joinRefSize = view.getUint8(1) + let topicSize = view.getUint8(2) + let eventSize = view.getUint8(3) + let offset = this.HEADER_LENGTH + this.META_LENGTH - 1 // pushes have no ref + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)) + offset = offset + joinRefSize + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)) + offset = offset + topicSize + let event = decoder.decode(buffer.slice(offset, offset + eventSize)) + offset = offset + eventSize + let data = buffer.slice(offset, buffer.byteLength) + return {join_ref: joinRef, ref: null, topic: topic, event: event, payload: data} + }, + + decodeReply(buffer, view, decoder){ + let joinRefSize = view.getUint8(1) + let refSize = view.getUint8(2) + let topicSize = view.getUint8(3) + let eventSize = view.getUint8(4) + let offset = this.HEADER_LENGTH + this.META_LENGTH + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)) + offset = offset + joinRefSize + let ref = decoder.decode(buffer.slice(offset, offset + refSize)) + offset = offset + refSize + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)) + offset = offset + topicSize + let event = decoder.decode(buffer.slice(offset, offset + eventSize)) + offset = offset + eventSize + let data = buffer.slice(offset, buffer.byteLength) + let payload = {status: event, response: data} + return {join_ref: joinRef, ref: ref, topic: topic, event: CHANNEL_EVENTS.reply, payload: payload} + }, + + decodeBroadcast(buffer, view, decoder){ + let topicSize = view.getUint8(1) + let eventSize = view.getUint8(2) + let offset = this.HEADER_LENGTH + 2 + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)) + offset = offset + topicSize + let event = decoder.decode(buffer.slice(offset, offset + eventSize)) + offset = offset + eventSize + let data = buffer.slice(offset, buffer.byteLength) + + return {join_ref: null, ref: null, topic: topic, event: event, payload: data} + } +} diff --git a/deps/phoenix/assets/js/phoenix/socket.js b/deps/phoenix/assets/js/phoenix/socket.js new file mode 100644 index 0000000..204a562 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/socket.js @@ -0,0 +1,710 @@ +import { + global, + phxWindow, + CHANNEL_EVENTS, + DEFAULT_TIMEOUT, + DEFAULT_VSN, + SOCKET_STATES, + TRANSPORTS, + WS_CLOSE_NORMAL, + AUTH_TOKEN_PREFIX +} from "./constants" + +import { + closure +} from "./utils" + +import Ajax from "./ajax" +import Channel from "./channel" +import LongPoll from "./longpoll" +import Serializer from "./serializer" +import Timer from "./timer" + +/** Initializes the Socket * + * + * For IE8 support use an ES5-shim (https://github.com/es-shims/es5-shim) + * + * @param {string} endPoint - The string WebSocket endpoint, ie, `"ws://example.com/socket"`, + * `"wss://example.com"` + * `"/socket"` (inherited host & protocol) + * @param {Object} [opts] - Optional configuration + * @param {Function} [opts.transport] - The Websocket Transport, for example WebSocket or Phoenix.LongPoll. + * + * Defaults to WebSocket with automatic LongPoll fallback if WebSocket is not defined. + * To fallback to LongPoll when WebSocket attempts fail, use `longPollFallbackMs: 2500`. + * + * @param {number} [opts.longPollFallbackMs] - The millisecond time to attempt the primary transport + * before falling back to the LongPoll transport. Disabled by default. + * + * @param {boolean} [opts.debug] - When true, enables debug logging. Default false. + * + * @param {Function} [opts.encode] - The function to encode outgoing messages. + * + * Defaults to JSON encoder. + * + * @param {Function} [opts.decode] - The function to decode incoming messages. + * + * Defaults to JSON: + * + * ```javascript + * (payload, callback) => callback(JSON.parse(payload)) + * ``` + * + * @param {number} [opts.timeout] - The default timeout in milliseconds to trigger push timeouts. + * + * Defaults `DEFAULT_TIMEOUT` + * @param {number} [opts.heartbeatIntervalMs] - The millisec interval to send a heartbeat message + * @param {Function} [opts.reconnectAfterMs] - The optional function that returns the + * socket reconnect interval, in milliseconds. + * + * Defaults to stepped backoff of: + * + * ```javascript + * function(tries){ + * return [10, 50, 100, 150, 200, 250, 500, 1000, 2000][tries - 1] || 5000 + * } + * ```` + * + * @param {Function} [opts.rejoinAfterMs] - The optional function that returns the millisec + * rejoin interval for individual channels. + * + * ```javascript + * function(tries){ + * return [1000, 2000, 5000][tries - 1] || 10000 + * } + * ```` + * + * @param {Function} [opts.logger] - The optional function for specialized logging, ie: + * + * ```javascript + * function(kind, msg, data) { + * console.log(`${kind}: ${msg}`, data) + * } + * ``` + * + * @param {number} [opts.longpollerTimeout] - The maximum timeout of a long poll AJAX request. + * + * Defaults to 20s (double the server long poll timer). + * + * @param {(Object|function)} [opts.params] - The optional params to pass when connecting + * @param {string} [opts.authToken] - the optional authentication token to be exposed on the server + * under the `:auth_token` connect_info key. + * @param {string} [opts.binaryType] - The binary type to use for binary WebSocket frames. + * + * Defaults to "arraybuffer" + * + * @param {vsn} [opts.vsn] - The serializer's protocol version to send on connect. + * + * Defaults to DEFAULT_VSN. + * + * @param {Object} [opts.sessionStorage] - An optional Storage compatible object + * Phoenix uses sessionStorage for longpoll fallback history. Overriding the store is + * useful when Phoenix won't have access to `sessionStorage`. For example, This could + * happen if a site loads a cross-domain channel in an iframe. Example usage: + * + * class InMemoryStorage { + * constructor() { this.storage = {} } + * getItem(keyName) { return this.storage[keyName] || null } + * removeItem(keyName) { delete this.storage[keyName] } + * setItem(keyName, keyValue) { this.storage[keyName] = keyValue } + * } + * +*/ +export default class Socket { + constructor(endPoint, opts = {}){ + this.stateChangeCallbacks = {open: [], close: [], error: [], message: []} + this.channels = [] + this.sendBuffer = [] + this.ref = 0 + this.fallbackRef = null + this.timeout = opts.timeout || DEFAULT_TIMEOUT + this.transport = opts.transport || global.WebSocket || LongPoll + this.primaryPassedHealthCheck = false + this.longPollFallbackMs = opts.longPollFallbackMs + this.fallbackTimer = null + this.sessionStore = opts.sessionStorage || (global && global.sessionStorage) + this.establishedConnections = 0 + this.defaultEncoder = Serializer.encode.bind(Serializer) + this.defaultDecoder = Serializer.decode.bind(Serializer) + // We start with closeWasClean true to avoid the visibility change + // logic from connecting if the socket was never connected in the first place. + // transportConnect sets it to false on open. + this.closeWasClean = true + this.disconnecting = false + this.binaryType = opts.binaryType || "arraybuffer" + this.connectClock = 1 + this.pageHidden = false + if(this.transport !== LongPoll){ + this.encode = opts.encode || this.defaultEncoder + this.decode = opts.decode || this.defaultDecoder + } else { + this.encode = this.defaultEncoder + this.decode = this.defaultDecoder + } + let awaitingConnectionOnPageShow = null + if(phxWindow && phxWindow.addEventListener){ + phxWindow.addEventListener("pagehide", _e => { + if(this.conn){ + this.disconnect() + awaitingConnectionOnPageShow = this.connectClock + } + }) + phxWindow.addEventListener("pageshow", _e => { + if(awaitingConnectionOnPageShow === this.connectClock){ + awaitingConnectionOnPageShow = null + this.connect() + } + }) + phxWindow.addEventListener("visibilitychange", () => { + if(document.visibilityState === "hidden"){ + this.pageHidden = true + } else { + this.pageHidden = false + // reconnect immediately + if(!this.isConnected() && !this.closeWasClean){ + this.teardown(() => this.connect()) + } + } + }) + } + this.heartbeatIntervalMs = opts.heartbeatIntervalMs || 30000 + this.rejoinAfterMs = (tries) => { + if(opts.rejoinAfterMs){ + return opts.rejoinAfterMs(tries) + } else { + return [1000, 2000, 5000][tries - 1] || 10000 + } + } + this.reconnectAfterMs = (tries) => { + if(opts.reconnectAfterMs){ + return opts.reconnectAfterMs(tries) + } else { + return [10, 50, 100, 150, 200, 250, 500, 1000, 2000][tries - 1] || 5000 + } + } + this.logger = opts.logger || null + if(!this.logger && opts.debug){ + this.logger = (kind, msg, data) => { console.log(`${kind}: ${msg}`, data) } + } + this.longpollerTimeout = opts.longpollerTimeout || 20000 + this.params = closure(opts.params || {}) + this.endPoint = `${endPoint}/${TRANSPORTS.websocket}` + this.vsn = opts.vsn || DEFAULT_VSN + this.heartbeatTimeoutTimer = null + this.heartbeatTimer = null + this.pendingHeartbeatRef = null + this.reconnectTimer = new Timer(() => { + if(this.pageHidden){ + this.log("Not reconnecting as page is hidden!") + this.teardown() + return + } + this.teardown(() => this.connect()) + }, this.reconnectAfterMs) + this.authToken = opts.authToken + } + + /** + * Returns the LongPoll transport reference + */ + getLongPollTransport(){ return LongPoll } + + /** + * Disconnects and replaces the active transport + * + * @param {Function} newTransport - The new transport class to instantiate + * + */ + replaceTransport(newTransport){ + this.connectClock++ + this.closeWasClean = true + clearTimeout(this.fallbackTimer) + this.reconnectTimer.reset() + if(this.conn){ + this.conn.close() + this.conn = null + } + this.transport = newTransport + } + + /** + * Returns the socket protocol + * + * @returns {string} + */ + protocol(){ return location.protocol.match(/^https/) ? "wss" : "ws" } + + /** + * The fully qualified socket url + * + * @returns {string} + */ + endPointURL(){ + let uri = Ajax.appendParams( + Ajax.appendParams(this.endPoint, this.params()), {vsn: this.vsn}) + if(uri.charAt(0) !== "/"){ return uri } + if(uri.charAt(1) === "/"){ return `${this.protocol()}:${uri}` } + + return `${this.protocol()}://${location.host}${uri}` + } + + /** + * Disconnects the socket + * + * See https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent#Status_codes for valid status codes. + * + * @param {Function} callback - Optional callback which is called after socket is disconnected. + * @param {integer} code - A status code for disconnection (Optional). + * @param {string} reason - A textual description of the reason to disconnect. (Optional) + */ + disconnect(callback, code, reason){ + this.connectClock++ + this.disconnecting = true + this.closeWasClean = true + clearTimeout(this.fallbackTimer) + this.reconnectTimer.reset() + this.teardown(() => { + this.disconnecting = false + callback && callback() + }, code, reason) + } + + /** + * + * @param {Object} params - The params to send when connecting, for example `{user_id: userToken}` + * + * Passing params to connect is deprecated; pass them in the Socket constructor instead: + * `new Socket("/socket", {params: {user_id: userToken}})`. + */ + connect(params){ + if(params){ + console && console.log("passing params to connect is deprecated. Instead pass :params to the Socket constructor") + this.params = closure(params) + } + if(this.conn && !this.disconnecting){ return } + if(this.longPollFallbackMs && this.transport !== LongPoll){ + this.connectWithFallback(LongPoll, this.longPollFallbackMs) + } else { + this.transportConnect() + } + } + + /** + * Logs the message. Override `this.logger` for specialized logging. noops by default + * @param {string} kind + * @param {string} msg + * @param {Object} data + */ + log(kind, msg, data){ this.logger && this.logger(kind, msg, data) } + + /** + * Returns true if a logger has been set on this socket. + */ + hasLogger(){ return this.logger !== null } + + /** + * Registers callbacks for connection open events + * + * @example socket.onOpen(function(){ console.info("the socket was opened") }) + * + * @param {Function} callback + */ + onOpen(callback){ + let ref = this.makeRef() + this.stateChangeCallbacks.open.push([ref, callback]) + return ref + } + + /** + * Registers callbacks for connection close events + * @param {Function} callback + */ + onClose(callback){ + let ref = this.makeRef() + this.stateChangeCallbacks.close.push([ref, callback]) + return ref + } + + /** + * Registers callbacks for connection error events + * + * @example socket.onError(function(error){ alert("An error occurred") }) + * + * @param {Function} callback + */ + onError(callback){ + let ref = this.makeRef() + this.stateChangeCallbacks.error.push([ref, callback]) + return ref + } + + /** + * Registers callbacks for connection message events + * @param {Function} callback + */ + onMessage(callback){ + let ref = this.makeRef() + this.stateChangeCallbacks.message.push([ref, callback]) + return ref + } + + /** + * Pings the server and invokes the callback with the RTT in milliseconds + * @param {Function} callback + * + * Returns true if the ping was pushed or false if unable to be pushed. + */ + ping(callback){ + if(!this.isConnected()){ return false } + let ref = this.makeRef() + let startTime = Date.now() + this.push({topic: "phoenix", event: "heartbeat", payload: {}, ref: ref}) + let onMsgRef = this.onMessage(msg => { + if(msg.ref === ref){ + this.off([onMsgRef]) + callback(Date.now() - startTime) + } + }) + return true + } + + /** + * @private + * + * @param {Function} + */ + transportName(transport){ + // JavaScript minification, enabled by default in production in Phoenix + // projects, renames symbols to reduce code size. + // See https://esbuild.github.io/api/#keep-names. + // This helper ensures we return the correct name for the LongPoll transport + // even after minification. The other common transport is WebSocket, which + // is native to browsers and does not need special handling. + switch(transport){ + case LongPoll: return "LongPoll" + default: return transport.name + } + } + + /** + * @private + */ + transportConnect(){ + this.connectClock++ + this.closeWasClean = false + let protocols = undefined + // Sec-WebSocket-Protocol based token + // (longpoll uses Authorization header instead) + if(this.authToken){ + protocols = ["phoenix", `${AUTH_TOKEN_PREFIX}${btoa(this.authToken).replace(/=/g, "")}`] + } + this.conn = new this.transport(this.endPointURL(), protocols) + this.conn.binaryType = this.binaryType + this.conn.timeout = this.longpollerTimeout + this.conn.onopen = () => this.onConnOpen() + this.conn.onerror = error => this.onConnError(error) + this.conn.onmessage = event => this.onConnMessage(event) + this.conn.onclose = event => this.onConnClose(event) + } + + getSession(key){ return this.sessionStore && this.sessionStore.getItem(key) } + + storeSession(key, val){ this.sessionStore && this.sessionStore.setItem(key, val) } + + connectWithFallback(fallbackTransport, fallbackThreshold = 2500){ + clearTimeout(this.fallbackTimer) + let established = false + let primaryTransport = true + let openRef, errorRef + let fallbackTransportName = this.transportName(fallbackTransport) + let fallback = (reason) => { + this.log("transport", `falling back to ${fallbackTransportName}...`, reason) + this.off([openRef, errorRef]) + primaryTransport = false + this.replaceTransport(fallbackTransport) + this.transportConnect() + } + if(this.getSession(`phx:fallback:${fallbackTransportName}`)){ return fallback("memorized") } + + this.fallbackTimer = setTimeout(fallback, fallbackThreshold) + + errorRef = this.onError(reason => { + this.log("transport", "error", reason) + if(primaryTransport && !established){ + clearTimeout(this.fallbackTimer) + fallback(reason) + } + }) + if(this.fallbackRef){ + this.off([this.fallbackRef]) + } + this.fallbackRef = this.onOpen(() => { + established = true + if(!primaryTransport){ + let fallbackTransportName = this.transportName(fallbackTransport) + // only memorize LP if we never connected to primary + if(!this.primaryPassedHealthCheck){ this.storeSession(`phx:fallback:${fallbackTransportName}`, "true") } + return this.log("transport", `established ${fallbackTransportName} fallback`) + } + // if we've established primary, give the fallback a new period to attempt ping + clearTimeout(this.fallbackTimer) + this.fallbackTimer = setTimeout(fallback, fallbackThreshold) + this.ping(rtt => { + this.log("transport", "connected to primary after", rtt) + this.primaryPassedHealthCheck = true + clearTimeout(this.fallbackTimer) + }) + }) + this.transportConnect() + } + + clearHeartbeats(){ + clearTimeout(this.heartbeatTimer) + clearTimeout(this.heartbeatTimeoutTimer) + } + + onConnOpen(){ + if(this.hasLogger()) this.log("transport", `${this.transportName(this.transport)} connected to ${this.endPointURL()}`) + this.closeWasClean = false + this.disconnecting = false + this.establishedConnections++ + this.flushSendBuffer() + this.reconnectTimer.reset() + this.resetHeartbeat() + this.stateChangeCallbacks.open.forEach(([, callback]) => callback()) + } + + /** + * @private + */ + + heartbeatTimeout(){ + if(this.pendingHeartbeatRef){ + this.pendingHeartbeatRef = null + if(this.hasLogger()){ this.log("transport", "heartbeat timeout. Attempting to re-establish connection") } + this.triggerChanError() + this.closeWasClean = false + this.teardown(() => this.reconnectTimer.scheduleTimeout(), WS_CLOSE_NORMAL, "heartbeat timeout") + } + } + + resetHeartbeat(){ + if(this.conn && this.conn.skipHeartbeat){ return } + this.pendingHeartbeatRef = null + this.clearHeartbeats() + this.heartbeatTimer = setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs) + } + + teardown(callback, code, reason){ + if(!this.conn){ + return callback && callback() + } + + // If someone calls connect before we finish tearing down, + // we create a new connection, but we still want to finish tearing down the old one. + const connToClose = this.conn + + this.waitForBufferDone(connToClose, () => { + if(code){ connToClose.close(code, reason || "") } else { connToClose.close() } + + this.waitForSocketClosed(connToClose, () => { + if(this.conn === connToClose){ + this.conn.onopen = function (){ } // noop + this.conn.onerror = function (){ } // noop + this.conn.onmessage = function (){ } // noop + this.conn.onclose = function (){ } // noop + this.conn = null + } + + callback && callback() + }) + }) + } + + waitForBufferDone(conn, callback, tries = 1){ + if(tries === 5 || !conn.bufferedAmount){ + callback() + return + } + + setTimeout(() => { + this.waitForBufferDone(conn, callback, tries + 1) + }, 150 * tries) + } + + waitForSocketClosed(conn, callback, tries = 1){ + if(tries === 5 || conn.readyState === SOCKET_STATES.closed){ + callback() + return + } + + setTimeout(() => { + this.waitForSocketClosed(conn, callback, tries + 1) + }, 150 * tries) + } + + onConnClose(event){ + if(this.conn) this.conn.onclose = () => {} // noop to prevent recursive calls in teardown + let closeCode = event && event.code + if(this.hasLogger()) this.log("transport", "close", event) + this.triggerChanError() + this.clearHeartbeats() + if(!this.closeWasClean && closeCode !== 1000){ + this.reconnectTimer.scheduleTimeout() + } + this.stateChangeCallbacks.close.forEach(([, callback]) => callback(event)) + } + + /** + * @private + */ + onConnError(error){ + if(this.hasLogger()) this.log("transport", "error", error) + let transportBefore = this.transport + let establishedBefore = this.establishedConnections + this.stateChangeCallbacks.error.forEach(([, callback]) => { + callback(error, transportBefore, establishedBefore) + }) + if(transportBefore === this.transport || establishedBefore > 0){ + this.triggerChanError() + } + } + + /** + * @private + */ + triggerChanError(){ + this.channels.forEach(channel => { + if(!(channel.isErrored() || channel.isLeaving() || channel.isClosed())){ + channel.trigger(CHANNEL_EVENTS.error) + } + }) + } + + /** + * @returns {string} + */ + connectionState(){ + switch(this.conn && this.conn.readyState){ + case SOCKET_STATES.connecting: return "connecting" + case SOCKET_STATES.open: return "open" + case SOCKET_STATES.closing: return "closing" + default: return "closed" + } + } + + /** + * @returns {boolean} + */ + isConnected(){ return this.connectionState() === "open" } + + /** + * @private + * + * @param {Channel} + */ + remove(channel){ + this.off(channel.stateChangeRefs) + this.channels = this.channels.filter(c => c !== channel) + } + + /** + * Removes `onOpen`, `onClose`, `onError,` and `onMessage` registrations. + * + * @param {refs} - list of refs returned by calls to + * `onOpen`, `onClose`, `onError,` and `onMessage` + */ + off(refs){ + for(let key in this.stateChangeCallbacks){ + this.stateChangeCallbacks[key] = this.stateChangeCallbacks[key].filter(([ref]) => { + return refs.indexOf(ref) === -1 + }) + } + } + + /** + * Initiates a new channel for the given topic + * + * @param {string} topic + * @param {Object} chanParams - Parameters for the channel + * @returns {Channel} + */ + channel(topic, chanParams = {}){ + let chan = new Channel(topic, chanParams, this) + this.channels.push(chan) + return chan + } + + /** + * @param {Object} data + */ + push(data){ + if(this.hasLogger()){ + let {topic, event, payload, ref, join_ref} = data + this.log("push", `${topic} ${event} (${join_ref}, ${ref})`, payload) + } + + if(this.isConnected()){ + this.encode(data, result => this.conn.send(result)) + } else { + this.sendBuffer.push(() => this.encode(data, result => this.conn.send(result))) + } + } + + /** + * Return the next message ref, accounting for overflows + * @returns {string} + */ + makeRef(){ + let newRef = this.ref + 1 + if(newRef === this.ref){ this.ref = 0 } else { this.ref = newRef } + + return this.ref.toString() + } + + sendHeartbeat(){ + if(this.pendingHeartbeatRef && !this.isConnected()){ return } + this.pendingHeartbeatRef = this.makeRef() + this.push({topic: "phoenix", event: "heartbeat", payload: {}, ref: this.pendingHeartbeatRef}) + this.heartbeatTimeoutTimer = setTimeout(() => this.heartbeatTimeout(), this.heartbeatIntervalMs) + } + + flushSendBuffer(){ + if(this.isConnected() && this.sendBuffer.length > 0){ + this.sendBuffer.forEach(callback => callback()) + this.sendBuffer = [] + } + } + + onConnMessage(rawMessage){ + this.decode(rawMessage.data, msg => { + let {topic, event, payload, ref, join_ref} = msg + if(ref && ref === this.pendingHeartbeatRef){ + this.clearHeartbeats() + this.pendingHeartbeatRef = null + this.heartbeatTimer = setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs) + } + + if(this.hasLogger()) this.log("receive", `${payload.status || ""} ${topic} ${event} ${ref && "(" + ref + ")" || ""}`, payload) + + for(let i = 0; i < this.channels.length; i++){ + const channel = this.channels[i] + if(!channel.isMember(topic, event, payload, join_ref)){ continue } + channel.trigger(event, payload, ref, join_ref) + } + + for(let i = 0; i < this.stateChangeCallbacks.message.length; i++){ + let [, callback] = this.stateChangeCallbacks.message[i] + callback(msg) + } + }) + } + + leaveOpenTopic(topic){ + let dupChannel = this.channels.find(c => c.topic === topic && (c.isJoined() || c.isJoining())) + if(dupChannel){ + if(this.hasLogger()) this.log("transport", `leaving duplicate topic "${topic}"`) + dupChannel.leave() + } + } +} diff --git a/deps/phoenix/assets/js/phoenix/timer.js b/deps/phoenix/assets/js/phoenix/timer.js new file mode 100644 index 0000000..5784ce5 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/timer.js @@ -0,0 +1,42 @@ +/** + * + * Creates a timer that accepts a `timerCalc` function to perform + * calculated timeout retries, such as exponential backoff. + * + * @example + * let reconnectTimer = new Timer(() => this.connect(), function(tries){ + * return [1000, 5000, 10000][tries - 1] || 10000 + * }) + * reconnectTimer.scheduleTimeout() // fires after 1000 + * reconnectTimer.scheduleTimeout() // fires after 5000 + * reconnectTimer.reset() + * reconnectTimer.scheduleTimeout() // fires after 1000 + * + * @param {Function} callback + * @param {Function} timerCalc + */ +export default class Timer { + constructor(callback, timerCalc){ + this.callback = callback + this.timerCalc = timerCalc + this.timer = null + this.tries = 0 + } + + reset(){ + this.tries = 0 + clearTimeout(this.timer) + } + + /** + * Cancels any previous scheduleTimeout and schedules callback + */ + scheduleTimeout(){ + clearTimeout(this.timer) + + this.timer = setTimeout(() => { + this.tries = this.tries + 1 + this.callback() + }, this.timerCalc(this.tries + 1)) + } +} diff --git a/deps/phoenix/assets/js/phoenix/utils.js b/deps/phoenix/assets/js/phoenix/utils.js new file mode 100644 index 0000000..b3a701a --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/utils.js @@ -0,0 +1,9 @@ +// wraps value in closure or returns closure +export let closure = (value) => { + if(typeof value === "function"){ + return value + } else { + let closure = function (){ return value } + return closure + } +} diff --git a/deps/phoenix/hex_metadata.config b/deps/phoenix/hex_metadata.config new file mode 100644 index 0000000..3330a23 --- /dev/null +++ b/deps/phoenix/hex_metadata.config @@ -0,0 +1,227 @@ +{<<"links">>, + [{<<"Changelog">>,<<"https://hexdocs.pm/phoenix/changelog.html">>}, + {<<"GitHub">>,<<"https://github.com/phoenixframework/phoenix">>}]}. +{<<"name">>,<<"phoenix">>}. +{<<"version">>,<<"1.8.7">>}. +{<<"description">>,<<"Peace of mind from prototype to production">>}. +{<<"elixir">>,<<"~> 1.15">>}. +{<<"app">>,<<"phoenix">>}. +{<<"licenses">>,[<<"MIT">>]}. +{<<"files">>, + [<<"assets/js">>,<<"assets/js/phoenix">>,<<"assets/js/phoenix/channel.js">>, + <<"assets/js/phoenix/presence.js">>,<<"assets/js/phoenix/push.js">>, + <<"assets/js/phoenix/timer.js">>,<<"assets/js/phoenix/utils.js">>, + <<"assets/js/phoenix/ajax.js">>,<<"assets/js/phoenix/constants.js">>, + <<"assets/js/phoenix/index.js">>,<<"assets/js/phoenix/longpoll.js">>, + <<"assets/js/phoenix/serializer.js">>,<<"assets/js/phoenix/socket.js">>, + <<"lib">>,<<"lib/mix">>,<<"lib/mix/tasks">>,<<"lib/mix/tasks/phx.ex">>, + <<"lib/mix/tasks/phx.gen.auth">>, + <<"lib/mix/tasks/phx.gen.auth/migration.ex">>, + <<"lib/mix/tasks/phx.gen.auth/hashing_library.ex">>, + <<"lib/mix/tasks/phx.gen.auth/injector.ex">>, + <<"lib/mix/tasks/phx.gen.secret.ex">>, + <<"lib/mix/tasks/compile.phoenix.ex">>, + <<"lib/mix/tasks/phx.digest.clean.ex">>,<<"lib/mix/tasks/phx.digest.ex">>, + <<"lib/mix/tasks/phx.gen.auth.ex">>,<<"lib/mix/tasks/phx.gen.cert.ex">>, + <<"lib/mix/tasks/phx.gen.channel.ex">>, + <<"lib/mix/tasks/phx.gen.context.ex">>, + <<"lib/mix/tasks/phx.gen.embedded.ex">>,<<"lib/mix/tasks/phx.gen.ex">>, + <<"lib/mix/tasks/phx.gen.html.ex">>,<<"lib/mix/tasks/phx.gen.json.ex">>, + <<"lib/mix/tasks/phx.gen.live.ex">>,<<"lib/mix/tasks/phx.gen.notifier.ex">>, + <<"lib/mix/tasks/phx.gen.presence.ex">>, + <<"lib/mix/tasks/phx.gen.release.ex">>, + <<"lib/mix/tasks/phx.gen.schema.ex">>,<<"lib/mix/tasks/phx.gen.socket.ex">>, + <<"lib/mix/tasks/phx.routes.ex">>,<<"lib/mix/tasks/phx.server.ex">>, + <<"lib/mix/phoenix">>,<<"lib/mix/phoenix/context.ex">>, + <<"lib/mix/phoenix/schema.ex">>,<<"lib/mix/phoenix/scope.ex">>, + <<"lib/mix/phoenix.ex">>,<<"lib/phoenix">>,<<"lib/phoenix/channel">>, + <<"lib/phoenix/channel/server.ex">>,<<"lib/phoenix/code_reloader">>, + <<"lib/phoenix/code_reloader/proxy.ex">>, + <<"lib/phoenix/code_reloader/mix_listener.ex">>, + <<"lib/phoenix/code_reloader/server.ex">>,<<"lib/phoenix/controller">>, + <<"lib/phoenix/controller/pipeline.ex">>,<<"lib/phoenix/endpoint">>, + <<"lib/phoenix/endpoint/cowboy2_adapter.ex">>, + <<"lib/phoenix/endpoint/watcher.ex">>, + <<"lib/phoenix/endpoint/render_errors.ex">>, + <<"lib/phoenix/endpoint/supervisor.ex">>, + <<"lib/phoenix/endpoint/sync_code_reload_plug.ex">>, + <<"lib/phoenix/router">>,<<"lib/phoenix/router/helpers.ex">>, + <<"lib/phoenix/router/resource.ex">>, + <<"lib/phoenix/router/console_formatter.ex">>, + <<"lib/phoenix/router/route.ex">>,<<"lib/phoenix/router/scope.ex">>, + <<"lib/phoenix/socket">>,<<"lib/phoenix/socket/serializer.ex">>, + <<"lib/phoenix/socket/serializers">>, + <<"lib/phoenix/socket/serializers/v1_json_serializer.ex">>, + <<"lib/phoenix/socket/serializers/v2_json_serializer.ex">>, + <<"lib/phoenix/socket/message.ex">>, + <<"lib/phoenix/socket/pool_supervisor.ex">>, + <<"lib/phoenix/socket/transport.ex">>,<<"lib/phoenix/test">>, + <<"lib/phoenix/test/conn_test.ex">>,<<"lib/phoenix/test/channel_test.ex">>, + <<"lib/phoenix/transports">>,<<"lib/phoenix/transports/long_poll.ex">>, + <<"lib/phoenix/transports/long_poll_server.ex">>, + <<"lib/phoenix/transports/websocket.ex">>,<<"lib/phoenix/digester">>, + <<"lib/phoenix/digester/compressor.ex">>,<<"lib/phoenix/digester/gzip.ex">>, + <<"lib/phoenix/exceptions.ex">>,<<"lib/phoenix/channel.ex">>, + <<"lib/phoenix/code_reloader.ex">>,<<"lib/phoenix/config.ex">>, + <<"lib/phoenix/controller.ex">>,<<"lib/phoenix/debug.ex">>, + <<"lib/phoenix/digester.ex">>,<<"lib/phoenix/endpoint.ex">>, + <<"lib/phoenix/flash.ex">>,<<"lib/phoenix/logger.ex">>, + <<"lib/phoenix/naming.ex">>,<<"lib/phoenix/param.ex">>, + <<"lib/phoenix/presence.ex">>,<<"lib/phoenix/router.ex">>, + <<"lib/phoenix/socket.ex">>,<<"lib/phoenix/token.ex">>, + <<"lib/phoenix/verified_routes.ex">>,<<"lib/phoenix.ex">>,<<"priv">>, + <<"priv/static">>,<<"priv/static/phoenix.png">>, + <<"priv/static/favicon.ico">>,<<"priv/static/phoenix-orange.png">>, + <<"priv/static/phoenix.cjs.js">>,<<"priv/static/phoenix.cjs.js.map">>, + <<"priv/static/phoenix.js">>,<<"priv/static/phoenix.min.js">>, + <<"priv/static/phoenix.mjs">>,<<"priv/static/phoenix.mjs.map">>, + <<"priv/templates">>,<<"priv/templates/phx.gen.live">>, + <<"priv/templates/phx.gen.live/core_components.ex.eex">>, + <<"priv/templates/phx.gen.live/form.ex.eex">>, + <<"priv/templates/phx.gen.live/index.ex.eex">>, + <<"priv/templates/phx.gen.live/live_test.exs.eex">>, + <<"priv/templates/phx.gen.live/show.ex.eex">>, + <<"priv/templates/phx.gen.release">>, + <<"priv/templates/phx.gen.release/rel">>, + <<"priv/templates/phx.gen.release/rel/migrate.bat.eex">>, + <<"priv/templates/phx.gen.release/rel/migrate.sh.eex">>, + <<"priv/templates/phx.gen.release/rel/server.bat.eex">>, + <<"priv/templates/phx.gen.release/rel/server.sh.eex">>, + <<"priv/templates/phx.gen.release/Dockerfile.eex">>, + <<"priv/templates/phx.gen.release/dockerignore.eex">>, + <<"priv/templates/phx.gen.release/release.ex.eex">>, + <<"priv/templates/phx.gen.auth">>, + <<"priv/templates/phx.gen.auth/AGENTS.md.eex">>, + <<"priv/templates/phx.gen.auth/auth.ex.eex">>, + <<"priv/templates/phx.gen.auth/auth_test.exs.eex">>, + <<"priv/templates/phx.gen.auth/confirmation_live.ex.eex">>, + <<"priv/templates/phx.gen.auth/confirmation_live_test.exs.eex">>, + <<"priv/templates/phx.gen.auth/conn_case.exs.eex">>, + <<"priv/templates/phx.gen.auth/context_fixtures_functions.ex.eex">>, + <<"priv/templates/phx.gen.auth/context_functions.ex.eex">>, + <<"priv/templates/phx.gen.auth/login_live.ex.eex">>, + <<"priv/templates/phx.gen.auth/login_live_test.exs.eex">>, + <<"priv/templates/phx.gen.auth/migration.ex.eex">>, + <<"priv/templates/phx.gen.auth/notifier.ex.eex">>, + <<"priv/templates/phx.gen.auth/registration_controller.ex.eex">>, + <<"priv/templates/phx.gen.auth/registration_controller_test.exs.eex">>, + <<"priv/templates/phx.gen.auth/registration_html.ex.eex">>, + <<"priv/templates/phx.gen.auth/registration_live.ex.eex">>, + <<"priv/templates/phx.gen.auth/registration_live_test.exs.eex">>, + <<"priv/templates/phx.gen.auth/registration_new.html.heex.eex">>, + <<"priv/templates/phx.gen.auth/routes.ex.eex">>, + <<"priv/templates/phx.gen.auth/schema.ex.eex">>, + <<"priv/templates/phx.gen.auth/schema_token.ex.eex">>, + <<"priv/templates/phx.gen.auth/scope.ex.eex">>, + <<"priv/templates/phx.gen.auth/session_confirm.html.heex.eex">>, + <<"priv/templates/phx.gen.auth/session_controller.ex.eex">>, + <<"priv/templates/phx.gen.auth/session_controller_test.exs.eex">>, + <<"priv/templates/phx.gen.auth/session_html.ex.eex">>, + <<"priv/templates/phx.gen.auth/session_new.html.heex.eex">>, + <<"priv/templates/phx.gen.auth/settings_controller.ex.eex">>, + <<"priv/templates/phx.gen.auth/settings_controller_test.exs.eex">>, + <<"priv/templates/phx.gen.auth/settings_edit.html.heex.eex">>, + <<"priv/templates/phx.gen.auth/settings_html.ex.eex">>, + <<"priv/templates/phx.gen.auth/settings_live.ex.eex">>, + <<"priv/templates/phx.gen.auth/settings_live_test.exs.eex">>, + <<"priv/templates/phx.gen.auth/test_cases.exs.eex">>, + <<"priv/templates/phx.gen.channel">>, + <<"priv/templates/phx.gen.channel/channel.ex.eex">>, + <<"priv/templates/phx.gen.channel/channel_case.ex.eex">>, + <<"priv/templates/phx.gen.channel/channel_test.exs.eex">>, + <<"priv/templates/phx.gen.context">>, + <<"priv/templates/phx.gen.context/access_no_schema.ex.eex">>, + <<"priv/templates/phx.gen.context/access_no_schema_scope.ex.eex">>, + <<"priv/templates/phx.gen.context/context.ex.eex">>, + <<"priv/templates/phx.gen.context/context_test.exs.eex">>, + <<"priv/templates/phx.gen.context/fixtures.ex.eex">>, + <<"priv/templates/phx.gen.context/fixtures_module.ex.eex">>, + <<"priv/templates/phx.gen.context/schema_access.ex.eex">>, + <<"priv/templates/phx.gen.context/schema_access_scope.ex.eex">>, + <<"priv/templates/phx.gen.context/test_cases.exs.eex">>, + <<"priv/templates/phx.gen.context/test_cases_scope.exs.eex">>, + <<"priv/templates/phx.gen.embedded">>, + <<"priv/templates/phx.gen.embedded/embedded_schema.ex.eex">>, + <<"priv/templates/phx.gen.html">>, + <<"priv/templates/phx.gen.html/controller.ex.eex">>, + <<"priv/templates/phx.gen.html/controller_test.exs.eex">>, + <<"priv/templates/phx.gen.html/edit.html.heex.eex">>, + <<"priv/templates/phx.gen.html/html.ex.eex">>, + <<"priv/templates/phx.gen.html/index.html.heex.eex">>, + <<"priv/templates/phx.gen.html/new.html.heex.eex">>, + <<"priv/templates/phx.gen.html/resource_form.html.heex.eex">>, + <<"priv/templates/phx.gen.html/show.html.heex.eex">>, + <<"priv/templates/phx.gen.json">>, + <<"priv/templates/phx.gen.json/changeset_json.ex.eex">>, + <<"priv/templates/phx.gen.json/controller.ex.eex">>, + <<"priv/templates/phx.gen.json/controller_test.exs.eex">>, + <<"priv/templates/phx.gen.json/fallback_controller.ex.eex">>, + <<"priv/templates/phx.gen.json/json.ex.eex">>, + <<"priv/templates/phx.gen.notifier">>, + <<"priv/templates/phx.gen.notifier/notifier.ex.eex">>, + <<"priv/templates/phx.gen.notifier/notifier_test.exs.eex">>, + <<"priv/templates/phx.gen.presence">>, + <<"priv/templates/phx.gen.presence/presence.ex.eex">>, + <<"priv/templates/phx.gen.schema">>, + <<"priv/templates/phx.gen.schema/migration.exs.eex">>, + <<"priv/templates/phx.gen.schema/schema.ex.eex">>, + <<"priv/templates/phx.gen.socket">>, + <<"priv/templates/phx.gen.socket/socket.ex.eex">>, + <<"priv/templates/phx.gen.socket/socket.js.eex">>,<<"usage-rules">>, + <<"usage-rules/ecto.md">>,<<"usage-rules/elixir.md">>, + <<"usage-rules/html.md">>,<<"usage-rules/liveview.md">>, + <<"usage-rules/phoenix.md">>,<<"CHANGELOG.md">>,<<"LICENSE.md">>, + <<"mix.exs">>,<<"package.json">>,<<"README.md">>,<<".formatter.exs">>, + <<"installer/templates/phx_web/components/core_components.ex.eex">>]}. +{<<"requirements">>, + [[{<<"name">>,<<"plug">>}, + {<<"app">>,<<"plug">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 1.14">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"plug_crypto">>}, + {<<"app">>,<<"plug_crypto">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 1.2 or ~> 2.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"telemetry">>}, + {<<"app">>,<<"telemetry">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 0.4 or ~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"phoenix_pubsub">>}, + {<<"app">>,<<"phoenix_pubsub">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 2.1">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"phoenix_template">>}, + {<<"app">>,<<"phoenix_template">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"websock_adapter">>}, + {<<"app">>,<<"websock_adapter">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 0.5.3">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"phoenix_view">>}, + {<<"app">>,<<"phoenix_view">>}, + {<<"optional">>,true}, + {<<"requirement">>,<<"~> 2.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"plug_cowboy">>}, + {<<"app">>,<<"plug_cowboy">>}, + {<<"optional">>,true}, + {<<"requirement">>,<<"~> 2.7">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"bandit">>}, + {<<"app">>,<<"bandit">>}, + {<<"optional">>,true}, + {<<"requirement">>,<<"~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}], + [{<<"name">>,<<"jason">>}, + {<<"app">>,<<"jason">>}, + {<<"optional">>,true}, + {<<"requirement">>,<<"~> 1.0">>}, + {<<"repository">>,<<"hexpm">>}]]}. +{<<"build_tools">>,[<<"mix">>]}. diff --git a/deps/phoenix/installer/templates/phx_web/components/core_components.ex.eex b/deps/phoenix/installer/templates/phx_web/components/core_components.ex.eex new file mode 100644 index 0000000..bb643fe --- /dev/null +++ b/deps/phoenix/installer/templates/phx_web/components/core_components.ex.eex @@ -0,0 +1,520 @@ +defmodule <%= @web_namespace %>.CoreComponents do + @moduledoc """ + Provides core UI components. + + At first glance, this module may seem daunting, but its goal is to provide + core building blocks for your application, such as tables, forms, and + inputs. The components consist mostly of markup and are well-documented + with doc strings and declarative assigns. You may customize and style + them in any way you want, based on your application growth and needs. + + The foundation for styling is Tailwind CSS, a utility-first CSS framework, + augmented with daisyUI, a Tailwind CSS plugin that provides UI components + and themes. Here are useful references: + + * [daisyUI](https://daisyui.com/docs/intro/) - a good place to get + started and see the available components. + + * [Tailwind CSS](https://tailwindcss.com) - the foundational framework + we build on. You will use it for layout, sizing, flexbox, grid, and + spacing. + + * [Heroicons](https://heroicons.com) - see `icon/1` for usage. + + * [Phoenix.Component](https://hexdocs.pm/phoenix_live_view/Phoenix.Component.html) - + the component system used by Phoenix. Some components, such as `<.link>` + and `<.form>`, are defined there. + + """ + use Phoenix.Component<%= if @gettext do %> + use Gettext, backend: <%= @web_namespace %>.Gettext<% end %><%= if @live do %> + + alias Phoenix.LiveView.JS<% end %> + + @doc """ + Renders flash notices. + + ## Examples + + <.flash kind={:info} flash={@flash} /> + <.flash + id="welcome-back" + kind={:info} + phx-mounted={show("#welcome-back") |> JS.remove_attribute("hidden")} + hidden + > + Welcome Back! + + """ + attr :id, :string, doc: "the optional id of flash container" + attr :flash, :map, default: %{}, doc: "the map of flash messages to display" + attr :title, :string, default: nil + attr :kind, :atom, values: [:info, :error], doc: "used for styling and flash lookup" + attr :rest, :global, doc: "the arbitrary HTML attributes to add to the flash container" + + slot :inner_block, doc: "the optional inner block that renders the flash message" + + def flash(assigns) do + assigns = assign_new(assigns, :id, fn -> "flash-#{assigns.kind}" end) + + ~H""" +
+ phx-click={JS.push("lv:clear-flash", value: %{key: @kind}) |> hide("##{@id}")}<% else %> + data-flash<% end %> + role="alert" + class="toast toast-top toast-end z-50" + {@rest} + > +
+ <.icon :if={@kind == :info} name="hero-information-circle" class="size-5 shrink-0" /> + <.icon :if={@kind == :error} name="hero-exclamation-circle" class="size-5 shrink-0" /> +
+

{@title}

+

{msg}

+
+
+ +
+
+ """ + end + + @doc """ + Renders a button with navigation support. + + ## Examples + + <.button>Send! + <.button phx-click="go" variant="primary">Send! + <.button navigate={~p"/"}>Home + """ + attr :rest, :global, include: ~w(href navigate patch method download name value disabled) + attr :class, :any + attr :variant, :string, values: ~w(primary) + slot :inner_block, required: true + + def button(%{rest: rest} = assigns) do + variants = %{"primary" => "btn-primary", nil => "btn-primary btn-soft"} + + assigns = + assign_new(assigns, :class, fn -> + ["btn", Map.fetch!(variants, assigns[:variant])] + end) + + if rest[:href] || rest[:navigate] || rest[:patch] do + ~H""" + <.link class={@class} {@rest}> + {render_slot(@inner_block)} + + """ + else + ~H""" + + """ + end + end + + @doc """ + Renders an input with label and error messages. + + A `Phoenix.HTML.FormField` may be passed as argument, + which is used to retrieve the input name, id, and values. + Otherwise all attributes may be passed explicitly. + + ## Types + + This function accepts all HTML input types, considering that: + + * You may also set `type="select"` to render a ` + """ + end + + def input(%{type: "checkbox"} = assigns) do + assigns = + assign_new(assigns, :checked, fn -> + Phoenix.HTML.Form.normalize_value("checkbox", assigns[:value]) + end) + + ~H""" +
+ + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + def input(%{type: "select"} = assigns) do + ~H""" +
+ + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + def input(%{type: "textarea"} = assigns) do + ~H""" +
+ + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + # All other inputs text, datetime-local, url, password, etc. are handled here... + def input(assigns) do + ~H""" +
+ + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + # Helper used by inputs to generate form errors + defp error(assigns) do + ~H""" +

+ <.icon name="hero-exclamation-circle" class="size-5" /> + {render_slot(@inner_block)} +

+ """ + end + + @doc """ + Renders a header with title. + """ + slot :inner_block, required: true + slot :subtitle + slot :actions + + def header(assigns) do + ~H""" +
+
+

+ {render_slot(@inner_block)} +

+

+ {render_slot(@subtitle)} +

+
+
{render_slot(@actions)}
+
+ """ + end + + @doc """ + Renders a table with generic styling. + + ## Examples + + <.table id="users" rows={@users}> + <:col :let={user} label="id">{user.id} + <:col :let={user} label="username">{user.username} + + """ + attr :id, :string, required: true + attr :rows, :list, required: true + attr :row_id, :any, default: nil, doc: "the function for generating the row id" + attr :row_click, :any, default: nil, doc: "the function for handling phx-click on each row" + + attr :row_item, :any, + default: &Function.identity/1, + doc: "the function for mapping each row before calling the :col and :action slots" + + slot :col, required: true do + attr :label, :string + end + + slot :action, doc: "the slot for showing user actions in the last table column" + + def table(assigns) do + assigns = + with %{rows: %Phoenix.LiveView.LiveStream{}} <- assigns do + assign(assigns, row_id: assigns.row_id || fn {id, _item} -> id end) + end + + ~H""" + + + + + + + + + + + + + +
{col[:label]} + <%= maybe_eex_gettext.("Actions", @gettext) %> +
+ {render_slot(col, @row_item.(row))} + +
+ <%%= for action <- @action do %> + {render_slot(action, @row_item.(row))} + <%% end %> +
+
+ """ + end + + @doc """ + Renders a data list. + + ## Examples + + <.list> + <:item title="Title">{@post.title} + <:item title="Views">{@post.views} + + """ + slot :item, required: true do + attr :title, :string, required: true + end + + def list(assigns) do + ~H""" +
    +
  • +
    +
    {item.title}
    +
    {render_slot(item)}
    +
    +
  • +
+ """ + end + + @doc """ + Renders a [Heroicon](https://heroicons.com). + + Heroicons come in three styles – outline, solid, and mini. + By default, the outline style is used, but solid and mini may + be applied by using the `-solid` and `-mini` suffix. + + You can customize the size and colors of the icons by setting + width, height, and background color classes. + + Icons are extracted from the `deps/heroicons` directory and bundled within + your compiled app.css by the plugin in `assets/vendor/heroicons.js`. + + ## Examples + + <.icon name="hero-x-mark" /> + <.icon name="hero-arrow-path" class="ml-1 size-3 motion-safe:animate-spin" /> + """ + attr :name, :string, required: true + attr :class, :any, default: "size-4" + + def icon(%{name: "hero-" <> _} = assigns) do + ~H""" + + """ + end<%= if @live do %> + + ## JS Commands + + def show(js \\ %JS{}, selector) do + JS.show(js, + to: selector, + time: 300, + transition: + {"transition-all ease-out duration-300", + "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95", + "opacity-100 translate-y-0 sm:scale-100"} + ) + end + + def hide(js \\ %JS{}, selector) do + JS.hide(js, + to: selector, + time: 200, + transition: + {"transition-all ease-in duration-200", "opacity-100 translate-y-0 sm:scale-100", + "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95"} + ) + end<% end %> + + @doc """ + Translates an error message using gettext. + """<%= if @gettext do %> + def translate_error({msg, opts}) do + # When using gettext, we typically pass the strings we want + # to translate as a static argument: + # + # # Translate the number of files with plural rules + # dngettext("errors", "1 file", "%{count} files", count) + # + # However the error messages in our forms and APIs are generated + # dynamically, so we need to translate them by calling Gettext + # with our gettext backend as first argument. Translations are + # available in the errors.po file (as we use the "errors" domain). + if count = opts[:count] do + Gettext.dngettext(<%= @web_namespace %>.Gettext, "errors", msg, msg, count, opts) + else + Gettext.dgettext(<%= @web_namespace %>.Gettext, "errors", msg, opts) + end + end<% else %> + def translate_error({msg, opts}) do + # You can make use of gettext to translate error messages by + # uncommenting and adjusting the following code: + + # if count = opts[:count] do + # Gettext.dngettext(<%= @web_namespace %>.Gettext, "errors", msg, msg, count, opts) + # else + # Gettext.dgettext(<%= @web_namespace %>.Gettext, "errors", msg, opts) + # end + + Enum.reduce(opts, msg, fn {key, value}, acc -> + String.replace(acc, "%{#{key}}", fn _ -> to_string(value) end) + end) + end<% end %> + + @doc """ + Translates the errors for a field from a keyword list of errors. + """ + def translate_errors(errors, field) when is_list(errors) do + for {^field, {msg, opts}} <- errors, do: translate_error({msg, opts}) + end +end diff --git a/deps/phoenix/lib/mix/phoenix.ex b/deps/phoenix/lib/mix/phoenix.ex new file mode 100644 index 0000000..407ad26 --- /dev/null +++ b/deps/phoenix/lib/mix/phoenix.ex @@ -0,0 +1,440 @@ +defmodule Mix.Phoenix do + # Conveniences for Phoenix tasks. + @moduledoc false + + @doc """ + Evals EEx files from source dir. + + Files are evaluated against EEx according to + the given binding. + """ + def eval_from(apps, source_file_path, binding) do + sources = Enum.map(apps, &to_app_source(&1, source_file_path)) + + content = + Enum.find_value(sources, fn source -> + File.exists?(source) && File.read!(source) + end) || raise "could not find #{source_file_path} in any of the sources" + + EEx.eval_string(content, binding) + end + + @doc """ + Copies files from source dir to target dir + according to the given map. + + Files are evaluated against EEx according to + the given binding. + """ + def copy_from(apps, source_dir, binding, mapping) when is_list(mapping) do + roots = Enum.map(apps, &to_app_source(&1, source_dir)) + + binding = + Keyword.merge(binding, + maybe_heex_attr_gettext: &maybe_heex_attr_gettext/2, + maybe_eex_gettext: &maybe_eex_gettext/2 + ) + + for {format, source_file_path, target} <- mapping do + source = + Enum.find_value(roots, fn root -> + source = Path.join(root, source_file_path) + # for backwards compatibility, we also check for files with missing .eex extension + source_with_stripped_eex = String.replace_suffix(source, ".eex", "") + + cond do + File.exists?(source) -> source + File.exists?(source_with_stripped_eex) -> source_with_stripped_eex + true -> nil + end + end) || raise "could not find #{source_file_path} in any of the sources" + + case format do + :text -> + Mix.Generator.create_file(target, File.read!(source)) + + :eex -> + Mix.Generator.create_file(target, EEx.eval_file(source, binding)) + + :new_eex -> + if File.exists?(target) do + :ok + else + Mix.Generator.create_file(target, EEx.eval_file(source, binding)) + end + end + end + end + + defp to_app_source(path, source_dir) when is_binary(path), + do: Path.join(path, source_dir) + + defp to_app_source(app, source_dir) when is_atom(app), + do: Application.app_dir(app, source_dir) + + @doc """ + Inflects path, scope, alias and more from the given name. + + ## Examples + + iex> Mix.Phoenix.inflect("user") + [alias: "User", + human: "User", + base: "Phoenix", + web_module: "PhoenixWeb", + module: "Phoenix.User", + scoped: "User", + singular: "user", + path: "user"] + + iex> Mix.Phoenix.inflect("Admin.User") + [alias: "User", + human: "User", + base: "Phoenix", + web_module: "PhoenixWeb", + module: "Phoenix.Admin.User", + scoped: "Admin.User", + singular: "user", + path: "admin/user"] + + iex> Mix.Phoenix.inflect("Admin.SuperUser") + [alias: "SuperUser", + human: "Super user", + base: "Phoenix", + web_module: "PhoenixWeb", + module: "Phoenix.Admin.SuperUser", + scoped: "Admin.SuperUser", + singular: "super_user", + path: "admin/super_user"] + + """ + def inflect(singular) do + base = Mix.Phoenix.base() + web_module = base |> web_module() |> inspect() + scoped = Phoenix.Naming.camelize(singular) + path = Phoenix.Naming.underscore(scoped) + singular = String.split(path, "/") |> List.last() + module = Module.concat(base, scoped) |> inspect + alias = String.split(module, ".") |> List.last() + human = Phoenix.Naming.humanize(singular) + + [ + alias: alias, + human: human, + base: base, + web_module: web_module, + module: module, + scoped: scoped, + singular: singular, + path: path + ] + end + + @doc """ + Checks the availability of a given module name. + """ + def check_module_name_availability!(name) do + name = Module.concat(Elixir, name) + + if Code.ensure_loaded?(name) do + Mix.raise("Module name #{inspect(name)} is already taken, please choose another name") + end + end + + @doc """ + Returns the module base name based on the configuration value. + + config :my_app + namespace: My.App + + """ + def base do + app_base(otp_app()) + end + + @doc """ + Returns the context module base name based on the configuration value. + + config :my_app + namespace: My.App + + """ + def context_base(ctx_app) do + app_base(ctx_app) + end + + defp app_base(app) do + case Application.get_env(app, :namespace, app) do + ^app -> app |> to_string() |> Phoenix.Naming.camelize() + mod -> mod |> inspect() + end + end + + @doc """ + Returns the OTP app from the Mix project configuration. + """ + def otp_app do + Mix.Project.config() |> Keyword.fetch!(:app) + end + + @doc """ + Returns all compiled modules in a project. + """ + def modules do + Mix.Project.compile_path() + |> Path.join("*.beam") + |> Path.wildcard() + |> Enum.map(&beam_to_module/1) + end + + defp beam_to_module(path) do + path |> Path.basename(".beam") |> String.to_atom() + end + + @doc """ + The paths to look for template files for generators. + + Defaults to checking the current app's `priv` directory, + and falls back to Phoenix's `priv` directory. + """ + def generator_paths do + [".", :phoenix] + end + + @doc """ + Checks if the given `app_path` is inside an umbrella. + """ + def in_umbrella?(app_path) do + umbrella = Path.expand(Path.join([app_path, "..", ".."])) + mix_path = Path.join(umbrella, "mix.exs") + apps_path = Path.join(umbrella, "apps") + File.exists?(mix_path) && File.exists?(apps_path) + end + + @doc """ + Returns the web prefix to be used in generated file specs. + """ + def web_path(ctx_app, rel_path \\ "") when is_atom(ctx_app) do + this_app = otp_app() + + if ctx_app == this_app do + Path.join(["lib", "#{this_app}_web", rel_path]) + else + Path.join(["lib", to_string(this_app), rel_path]) + end + end + + @doc """ + Returns the context app path prefix to be used in generated context files. + """ + def context_app_path(ctx_app, rel_path) when is_atom(ctx_app) do + this_app = otp_app() + + if ctx_app == this_app do + rel_path + else + app_path = + case Application.get_env(this_app, :generators)[:context_app] do + {^ctx_app, path} -> Path.relative_to_cwd(path) + _ -> mix_app_path(ctx_app, this_app) + end + + Path.join(app_path, rel_path) + end + end + + @doc """ + Returns the context lib path to be used in generated context files. + """ + def context_lib_path(ctx_app, rel_path) when is_atom(ctx_app) do + context_app_path(ctx_app, Path.join(["lib", to_string(ctx_app), rel_path])) + end + + @doc """ + Returns the context test path to be used in generated context files. + """ + def context_test_path(ctx_app, rel_path) when is_atom(ctx_app) do + context_app_path(ctx_app, Path.join(["test", to_string(ctx_app), rel_path])) + end + + @doc """ + Returns the OTP context app. + """ + def context_app do + this_app = otp_app() + + case fetch_context_app(this_app) do + {:ok, app} -> app + :error -> this_app + end + end + + @doc """ + Returns the test prefix to be used in generated file specs. + """ + def web_test_path(ctx_app, rel_path \\ "") when is_atom(ctx_app) do + this_app = otp_app() + + if ctx_app == this_app do + Path.join(["test", "#{this_app}_web", rel_path]) + else + Path.join(["test", to_string(this_app), rel_path]) + end + end + + defp fetch_context_app(this_otp_app) do + case Application.get_env(this_otp_app, :generators)[:context_app] do + nil -> + :error + + false -> + Mix.raise(""" + no context_app configured for current application #{this_otp_app}. + + Add the context_app generators config in config.exs, or pass the + --context-app option explicitly to the generators. For example: + + via config: + + config :#{this_otp_app}, :generators, + context_app: :some_app + + via cli option: + + mix phx.gen.[task] --context-app some_app + + Note: cli option only works when `context_app` is not set to `false` + in the config. + """) + + {app, _path} -> + {:ok, app} + + app -> + {:ok, app} + end + end + + defp mix_app_path(app, this_otp_app) do + case Mix.Project.deps_paths() do + %{^app => path} -> + Path.relative_to_cwd(path) + + deps -> + Mix.raise(""" + no directory for context_app #{inspect(app)} found in #{this_otp_app}'s deps. + + Ensure you have listed #{inspect(app)} as an in_umbrella dependency in mix.exs: + + def deps do + [ + {:#{app}, in_umbrella: true}, + ... + ] + end + + Existing deps: + + #{inspect(Map.keys(deps))} + + """) + end + end + + @doc """ + Prompts to continue if any files exist. + """ + def prompt_for_conflicts(generator_files) do + file_paths = + Enum.flat_map(generator_files, fn + {:new_eex, _, _path} -> [] + {_kind, _, path} -> [path] + end) + + case Enum.filter(file_paths, &File.exists?(&1)) do + [] -> + :ok + + conflicts -> + Mix.shell().info(""" + The following files conflict with new files to be generated: + + #{Enum.map_join(conflicts, "\n", &" * #{&1}")} + + See the --web option to namespace similarly named resources + """) + + unless Mix.shell().yes?("Proceed with interactive overwrite?") do + System.halt() + end + end + end + + @doc """ + Returns the web module prefix. + """ + def web_module(base) do + if base |> to_string() |> String.ends_with?("Web") do + Module.concat([base]) + else + Module.concat(["#{base}Web"]) + end + end + + def to_text(data) do + inspect(data, limit: :infinity, printable_limit: :infinity) + end + + def prepend_newline(string) do + "\n" <> string + end + + @doc """ + Ensures user's LiveView is compatible with the current generators. + """ + def ensure_live_view_compat!(generator_mod) do + vsn = Application.spec(:phoenix_live_view)[:vsn] + + # if lv is not installed, such as in phoenix's own test env, do not raise + if vsn && Version.compare("#{vsn}", "1.0.0-rc.7") != :gt do + raise "#{inspect(generator_mod)} requires :phoenix_live_view >= 1.0.0, got: #{vsn}" + end + end + + # In the context of a HEEx attribute value, transforms a given message into a + # dynamic `gettext` call or a fixed-value string attribute, depending on the + # `gettext?` parameter. + # + # ## Examples + # + # iex> ~s|| + # ~S|| + # + # iex> ~s|| + # ~S|| + defp maybe_heex_attr_gettext(message, gettext?) do + if gettext? do + ~s|{gettext(#{inspect(message)})}| + else + inspect(message) + end + end + + # In the context of an EEx template, transforms a given message into a dynamic + # `gettext` call or the message as is, depending on the `gettext?` parameter. + # + # ## Examples + # + # iex> ~s|#{maybe_eex_gettext("Hello", true)}| + # ~S|<%= gettext("Hello") %>| + # + # iex> ~s|#{maybe_eex_gettext("Hello", false)}| + # ~S|Hello| + defp maybe_eex_gettext(message, gettext?) do + if gettext? do + ~s|<%= gettext(#{inspect(message)}) %>| + else + message + end + end +end diff --git a/deps/phoenix/lib/mix/phoenix/context.ex b/deps/phoenix/lib/mix/phoenix/context.ex new file mode 100644 index 0000000..ae05a98 --- /dev/null +++ b/deps/phoenix/lib/mix/phoenix/context.ex @@ -0,0 +1,104 @@ +defmodule Mix.Phoenix.Context do + @moduledoc false + + alias Mix.Phoenix.{Context, Schema} + + defstruct name: nil, + module: nil, + schema: nil, + alias: nil, + base_module: nil, + web_module: nil, + basename: nil, + file: nil, + test_file: nil, + test_fixtures_file: nil, + dir: nil, + generate?: true, + context_app: nil, + opts: [], + scope: nil + + def valid?(context) do + context =~ ~r/^[A-Z]\w*(\.[A-Z]\w*)*$/ + end + + def new(context_name, opts) do + new(context_name, %Schema{}, opts) + end + + def new(context_name, %Schema{} = schema, opts) do + ctx_app = opts[:context_app] || Mix.Phoenix.context_app() + base = Module.concat([Mix.Phoenix.context_base(ctx_app)]) + module = Module.concat(base, context_name) + alias = Module.concat([module |> Module.split() |> List.last()]) + basedir = Phoenix.Naming.underscore(context_name) + basename = Path.basename(basedir) + dir = Mix.Phoenix.context_lib_path(ctx_app, basedir) + file = dir <> ".ex" + test_dir = Mix.Phoenix.context_test_path(ctx_app, basedir) + test_file = test_dir <> "_test.exs" + test_fixtures_dir = Mix.Phoenix.context_app_path(ctx_app, "test/support/fixtures") + test_fixtures_file = Path.join([test_fixtures_dir, basedir <> "_fixtures.ex"]) + generate? = Keyword.get(opts, :context, true) + + %Context{ + name: context_name, + module: module, + schema: schema, + alias: alias, + base_module: base, + web_module: web_module(), + basename: basename, + file: file, + test_file: test_file, + test_fixtures_file: test_fixtures_file, + dir: dir, + generate?: generate?, + context_app: ctx_app, + opts: opts, + scope: schema.scope + } + end + + def pre_existing?(%Context{file: file}), do: File.exists?(file) + + def pre_existing_tests?(%Context{test_file: file}), do: File.exists?(file) + + def pre_existing_test_fixtures?(%Context{test_fixtures_file: file}), do: File.exists?(file) + + def function_count(%Context{file: file}) do + {_ast, count} = + file + |> File.read!() + |> Code.string_to_quoted!() + |> Macro.postwalk(0, fn + {:def, _, _} = node, count -> {node, count + 1} + {:defdelegate, _, _} = node, count -> {node, count + 1} + node, count -> {node, count} + end) + + count + end + + def file_count(%Context{dir: dir}) do + dir + |> Path.join("**/*.ex") + |> Path.wildcard() + |> Enum.count() + end + + defp web_module do + base = Mix.Phoenix.base() + cond do + Mix.Phoenix.context_app() != Mix.Phoenix.otp_app() -> + Module.concat([base]) + + String.ends_with?(base, "Web") -> + Module.concat([base]) + + true -> + Module.concat(["#{base}Web"]) + end + end +end diff --git a/deps/phoenix/lib/mix/phoenix/schema.ex b/deps/phoenix/lib/mix/phoenix/schema.ex new file mode 100644 index 0000000..3591334 --- /dev/null +++ b/deps/phoenix/lib/mix/phoenix/schema.ex @@ -0,0 +1,632 @@ +defmodule Mix.Phoenix.Schema do + @moduledoc false + + alias Mix.Phoenix.Schema + + defstruct module: nil, + repo: nil, + repo_alias: nil, + table: nil, + collection: nil, + embedded?: false, + generate?: true, + opts: [], + alias: nil, + file: nil, + attrs: [], + string_attr: nil, + plural: nil, + singular: nil, + uniques: [], + redacts: [], + assocs: [], + types: [], + indexes: [], + defaults: [], + human_singular: nil, + human_plural: nil, + binary_id: false, + migration_defaults: nil, + migration?: false, + params: %{}, + optionals: [], + sample_id: nil, + web_path: nil, + web_namespace: nil, + context_app: nil, + route_helper: nil, + route_prefix: nil, + api_route_prefix: nil, + migration_module: nil, + fixture_unique_functions: [], + fixture_params: [], + prefix: nil, + timestamp_type: :naive_datetime, + scope: nil + + @valid_types [ + :integer, + :float, + :decimal, + :boolean, + :map, + :string, + :array, + :references, + :text, + :date, + :time, + :time_usec, + :naive_datetime, + :naive_datetime_usec, + :utc_datetime, + :utc_datetime_usec, + :uuid, + :binary, + :enum + ] + + def valid_types, do: @valid_types + + def valid?(schema) do + schema =~ ~r/^[A-Z]\w*(\.[A-Z]\w*)*$/ + end + + def new(schema_name, schema_plural, cli_attrs, opts) do + ctx_app = opts[:context_app] || Mix.Phoenix.context_app() + otp_app = Mix.Phoenix.otp_app() + opts = Keyword.merge(Application.get_env(otp_app, :generators, []), opts) + base = Mix.Phoenix.context_base(ctx_app) + basename = Phoenix.Naming.underscore(schema_name) + module = Module.concat([base, schema_name]) + repo = opts[:repo] || Module.concat([base, "Repo"]) + repo_alias = if String.ends_with?(Atom.to_string(repo), ".Repo"), do: "", else: ", as: Repo" + file = Mix.Phoenix.context_lib_path(ctx_app, basename <> ".ex") + table = opts[:table] || schema_plural + scope = Mix.Phoenix.Scope.scope_from_opts(ctx_app, opts[:scope], opts[:no_scope]) + {cli_attrs, uniques, redacts} = extract_attr_flags(cli_attrs) + {assocs, attrs} = partition_attrs_and_assocs(module, attrs(cli_attrs), scope) + types = types(attrs) + web_namespace = opts[:web] && Phoenix.Naming.camelize(opts[:web]) + web_path = web_namespace && Phoenix.Naming.underscore(web_namespace) + api_prefix = Application.get_env(otp_app, :generators)[:api_prefix] || "/api" + embedded? = Keyword.get(opts, :embedded, false) + generate? = Keyword.get(opts, :schema, true) + + singular = + module + |> Module.split() + |> List.last() + |> Phoenix.Naming.underscore() + + collection = if schema_plural == singular, do: singular <> "_collection", else: schema_plural + string_attr = string_attr(types) + create_params = params(attrs, :create) + + optionals = for {key, :map} <- types, do: key, into: [] + + default_params_key = + case Enum.at(create_params, 0) do + {key, _} -> key + nil -> :some_field + end + + fixture_unique_functions = fixture_unique_functions(singular, uniques, attrs) + + %Schema{ + opts: opts, + migration?: Keyword.get(opts, :migration, true), + module: module, + repo: repo, + repo_alias: repo_alias, + table: table, + embedded?: embedded?, + alias: module |> Module.split() |> List.last() |> Module.concat(nil), + file: file, + attrs: attrs, + plural: schema_plural, + singular: singular, + collection: collection, + optionals: optionals, + assocs: assocs, + types: types, + defaults: schema_defaults(attrs), + uniques: uniques, + redacts: redacts, + indexes: indexes(table, assocs, uniques), + human_singular: Phoenix.Naming.humanize(singular), + human_plural: Phoenix.Naming.humanize(schema_plural), + binary_id: opts[:binary_id], + timestamp_type: opts[:timestamp_type] || :naive_datetime, + migration_defaults: migration_defaults(attrs), + string_attr: string_attr, + params: %{ + create: create_params, + update: params(attrs, :update), + default_key: string_attr || default_params_key + }, + web_namespace: web_namespace, + web_path: web_path, + route_helper: route_helper(web_path, singular), + route_prefix: route_prefix(web_path, schema_plural), + api_route_prefix: api_prefix, + sample_id: sample_id(opts), + context_app: ctx_app, + generate?: generate?, + migration_module: migration_module(), + fixture_unique_functions: Enum.sort(fixture_unique_functions), + fixture_params: fixture_params(attrs, fixture_unique_functions), + prefix: opts[:prefix], + scope: scope + } + end + + @doc """ + Returns the string value of the default schema param. + """ + def default_param(%Schema{} = schema, action) do + schema.params + |> Map.fetch!(action) + |> Map.fetch!(schema.params.default_key) + |> to_string() + end + + def extract_attr_flags(cli_attrs) do + {attrs, uniques, redacts} = + Enum.reduce(cli_attrs, {[], [], []}, fn attr, {attrs, uniques, redacts} -> + [attr_name | rest] = String.split(attr, ":") + attr_name = String.to_atom(attr_name) + split_flags(Enum.reverse(rest), attr_name, attrs, uniques, redacts) + end) + + {Enum.reverse(attrs), uniques, redacts} + end + + defp split_flags(["unique" | rest], name, attrs, uniques, redacts), + do: split_flags(rest, name, attrs, [name | uniques], redacts) + + defp split_flags(["redact" | rest], name, attrs, uniques, redacts), + do: split_flags(rest, name, attrs, uniques, [name | redacts]) + + defp split_flags(rest, name, attrs, uniques, redacts), + do: {[Enum.join([name | Enum.reverse(rest)], ":") | attrs], uniques, redacts} + + @doc """ + Parses the attrs as received by generators. + """ + def attrs(attrs) do + Enum.map(attrs, fn attr -> + attr + |> String.split(":", parts: 3) + |> list_to_attr() + |> validate_attr!() + end) + end + + @doc """ + Generates some sample params based on the parsed attributes. + """ + def params(attrs, action \\ :create) when action in [:create, :update] do + Map.new(attrs, fn {k, t} -> {k, type_to_default(k, t, action)} end) + end + + @doc """ + Converts the given value to map format when it's a date, time, datetime or naive_datetime. + + Since `form.html.heex` generated by the live generator uses selects for dates and/or + times, fixtures must use map format for those fields in order to submit the live form. + """ + def live_form_value(%Date{} = date), do: Calendar.strftime(date, "%Y-%m-%d") + + def live_form_value(%Time{} = time), do: Calendar.strftime(time, "%H:%M") + + def live_form_value(%NaiveDateTime{} = naive) do + NaiveDateTime.to_iso8601(naive) + end + + def live_form_value(%DateTime{} = naive) do + DateTime.to_iso8601(naive) + end + + def live_form_value(value), do: value + + @doc """ + Builds an invalid value for `@invalid_attrs` which is nil by default. + + * In case the value is a list, this will return an empty array. + * In case the value is date, datetime, naive_datetime or time, this will return an invalid date. + * In case it is a boolean, we keep it as false + """ + def invalid_form_value(value) when is_list(value), do: [] + + def invalid_form_value(%{day: _day, month: _month, year: _year} = _date), + do: "2022-00" + + def invalid_form_value(%{hour: _hour, minute: _minute}), do: %{hour: 14, minute: 00} + def invalid_form_value(true), do: false + def invalid_form_value(_value), do: nil + + @doc """ + Generates an invalid error message according to the params present in the schema. + """ + def failed_render_change_message(_schema) do + "can't be blank" + end + + def type_for_migration({:enum, _}), do: :string + def type_for_migration(other), do: other + + def format_fields_for_schema(schema) do + Enum.map_join(schema.types, "\n", fn {k, v} -> + " field #{inspect(k)}, #{type_and_opts_for_schema(v)}#{schema.defaults[k]}#{maybe_redact_field(k in schema.redacts)}" + end) + end + + @doc """ + Returns the required fields in the schema. Anything not in the `optionals` list + is considered required. + """ + def required_fields(schema) do + Enum.reject(schema.attrs, fn {key, _} -> key in schema.optionals end) + end + + def type_and_opts_for_schema({:enum, opts}), + do: ~s|Ecto.Enum, values: #{inspect(Keyword.get(opts, :values))}| + + def type_and_opts_for_schema(other), do: inspect(other) + + def maybe_redact_field(true), do: ", redact: true" + def maybe_redact_field(false), do: "" + + @doc """ + Returns the string value for use in EEx templates. + """ + def value(schema, field, value) do + schema.types + |> Keyword.fetch!(field) + |> inspect_value(value) + end + + defp inspect_value(:decimal, value), do: "Decimal.new(\"#{value}\")" + defp inspect_value(_type, value), do: inspect(value) + + defp list_to_attr([key]), do: {String.to_atom(key), :string} + defp list_to_attr([key, value]), do: {String.to_atom(key), String.to_atom(value)} + + defp list_to_attr([key, comp, value]) do + {String.to_atom(key), {String.to_atom(comp), String.to_atom(value)}} + end + + @one_day_in_seconds 24 * 3600 + + defp type_to_default(key, t, :create) do + case t do + {:array, type} -> + build_array_values(type, :create) + + {:enum, values} -> + build_enum_values(values, :create) + + :integer -> + 42 + + :float -> + 120.5 + + :decimal -> + "120.5" + + :boolean -> + true + + :map -> + %{} + + :text -> + "some #{key}" + + :date -> + Date.add(Date.utc_today(), -1) + + :time -> + ~T[14:00:00] + + :time_usec -> + ~T[14:00:00.000000] + + :uuid -> + "7488a646-e31f-11e4-aace-600308960662" + + :utc_datetime -> + DateTime.add( + build_utc_datetime(), + -@one_day_in_seconds, + :second, + Calendar.UTCOnlyTimeZoneDatabase + ) + + :utc_datetime_usec -> + DateTime.add( + build_utc_datetime_usec(), + -@one_day_in_seconds, + :second, + Calendar.UTCOnlyTimeZoneDatabase + ) + + :naive_datetime -> + NaiveDateTime.add(build_utc_naive_datetime(), -@one_day_in_seconds) + + :naive_datetime_usec -> + NaiveDateTime.add(build_utc_naive_datetime_usec(), -@one_day_in_seconds) + + _ -> + "some #{key}" + end + end + + defp type_to_default(key, t, :update) do + case t do + {:array, type} -> build_array_values(type, :update) + {:enum, values} -> build_enum_values(values, :update) + :integer -> 43 + :float -> 456.7 + :decimal -> "456.7" + :boolean -> false + :map -> %{} + :text -> "some updated #{key}" + :date -> Date.utc_today() + :time -> ~T[15:01:01] + :time_usec -> ~T[15:01:01.000000] + :uuid -> "7488a646-e31f-11e4-aace-600308960668" + :utc_datetime -> build_utc_datetime() + :utc_datetime_usec -> build_utc_datetime_usec() + :naive_datetime -> build_utc_naive_datetime() + :naive_datetime_usec -> build_utc_naive_datetime_usec() + _ -> "some updated #{key}" + end + end + + defp build_array_values(:string, :create), + do: Enum.map([1, 2], &"option#{&1}") + + defp build_array_values(:integer, :create), + do: [1, 2] + + defp build_array_values(:string, :update), + do: ["option1"] + + defp build_array_values(:integer, :update), + do: [1] + + defp build_array_values(_, _), + do: [] + + defp build_enum_values(values, action) do + case {action, translate_enum_vals(values)} do + {:create, vals} -> hd(vals) + {:update, [val | []]} -> val + {:update, vals} -> vals |> tl() |> hd() + end + end + + defp build_utc_datetime_usec, + do: %{DateTime.utc_now() | second: 0, microsecond: {0, 6}} + + defp build_utc_datetime, + do: DateTime.truncate(build_utc_datetime_usec(), :second) + + defp build_utc_naive_datetime_usec, + do: %{NaiveDateTime.utc_now() | second: 0, microsecond: {0, 6}} + + defp build_utc_naive_datetime, + do: NaiveDateTime.truncate(build_utc_naive_datetime_usec(), :second) + + @enum_missing_value_error """ + Enum type requires at least one value + For example: + + mix phx.gen.schema Comment comments body:text status:enum:published:unpublished + """ + + defp validate_attr!({name, :datetime}), do: {name, :naive_datetime} + + defp validate_attr!({name, :array}) do + Mix.raise(""" + Phoenix generators expect the type of the array to be given to #{name}:array. + For example: + + mix phx.gen.schema Post posts settings:array:string + """) + end + + defp validate_attr!({_name, :enum}), do: Mix.raise(@enum_missing_value_error) + defp validate_attr!({_name, type} = attr) when type in @valid_types, do: attr + defp validate_attr!({_name, {type, _}} = attr) when type in @valid_types, do: attr + + defp validate_attr!({_, type}) do + Mix.raise( + "Unknown type `#{inspect(type)}` given to generator. " <> + "The supported types are: #{@valid_types |> Enum.sort() |> Enum.join(", ")}" + ) + end + + defp partition_attrs_and_assocs(schema_module, attrs, scope) do + {assocs, attrs} = + Enum.split_with(attrs, fn + {_, {:references, _}} -> + true + + {key, :references} -> + Mix.raise(""" + Phoenix generators expect the table to be given to #{key}:references. + For example: + + mix phx.gen.schema Comment comments body:text post_id:references:posts + """) + + _ -> + false + end) + + assocs = + Enum.map(assocs, fn {key_id, {:references, source}} -> + validate_scope_and_reference_conflict!(scope, key_id) + + key = String.replace(Atom.to_string(key_id), "_id", "") + base = schema_module |> Module.split() |> Enum.drop(-1) + module = Module.concat(base ++ [Phoenix.Naming.camelize(key)]) + {String.to_atom(key), key_id, inspect(module), source} + end) + + {assocs, attrs} + end + + defp validate_scope_and_reference_conflict!( + %Mix.Phoenix.Scope{schema_key: reference_key}, + reference_key + ) do + Mix.raise(""" + Reference #{inspect(reference_key)} has the same name as the scope schema key, either skip the reference or pass it with the --no-scope flag. + """) + end + + defp validate_scope_and_reference_conflict!(_scope, _source), do: :ok + + defp schema_defaults(attrs) do + Enum.into(attrs, %{}, fn + {key, :boolean} -> {key, ", default: false"} + {key, _} -> {key, ""} + end) + end + + defp string_attr(types) do + Enum.find_value(types, fn + {key, :string} -> key + _ -> false + end) + end + + defp types(attrs) do + Keyword.new(attrs, fn + {key, {:enum, vals}} -> {key, {:enum, values: translate_enum_vals(vals)}} + {key, {root, val}} -> {key, {root, schema_type(val)}} + {key, val} -> {key, schema_type(val)} + end) + end + + def translate_enum_vals(vals) do + vals + |> Atom.to_string() + |> String.split(":") + |> Enum.map(&String.to_atom/1) + end + + defp schema_type(:text), do: :string + defp schema_type(:uuid), do: Ecto.UUID + + defp schema_type(val) do + if Code.ensure_loaded?(Ecto.Type) and not Ecto.Type.primitive?(val) do + Mix.raise("Unknown type `#{val}` given to generator") + else + val + end + end + + defp indexes(table, assocs, uniques) do + uniques = Enum.map(uniques, fn key -> {key, true} end) + assocs = Enum.map(assocs, fn {_, key, _, _} -> {key, false} end) + + (uniques ++ assocs) + |> Enum.uniq_by(fn {key, _} -> key end) + |> Enum.map(fn + {key, false} -> "create index(:#{table}, [:#{key}])" + {key, true} -> "create unique_index(:#{table}, [:#{key}])" + end) + end + + defp migration_defaults(attrs) do + Enum.into(attrs, %{}, fn + {key, :boolean} -> {key, ", default: false, null: false"} + {key, _} -> {key, ""} + end) + end + + defp sample_id(opts) do + if Keyword.get(opts, :binary_id, false) do + Keyword.get(opts, :sample_binary_id, "11111111-1111-1111-1111-111111111111") + else + -1 + end + end + + defp route_helper(web_path, singular) do + "#{web_path}_#{singular}" + |> String.trim_leading("_") + |> String.replace("/", "_") + end + + defp route_prefix(web_path, plural) do + path = Path.join(for str <- [web_path, plural], do: to_string(str)) + "/" <> String.trim_leading(path, "/") + end + + defp migration_module do + case Application.get_env(:ecto_sql, :migration_module, Ecto.Migration) do + migration_module when is_atom(migration_module) -> migration_module + other -> Mix.raise("Expected :migration_module to be a module, got: #{inspect(other)}") + end + end + + defp fixture_unique_functions(singular, uniques, attrs) do + uniques + |> Enum.filter(&Keyword.has_key?(attrs, &1)) + |> Enum.into(%{}, fn attr -> + function_name = "unique_#{singular}_#{attr}" + + {function_def, needs_impl?} = + case Keyword.fetch!(attrs, attr) do + :integer -> + function_def = + """ + def #{function_name}, do: System.unique_integer([:positive]) + """ + + {function_def, false} + + type when type in [:string, :text] -> + function_def = + """ + def #{function_name}, do: "some #{attr}\#{System.unique_integer([:positive])}" + """ + + {function_def, false} + + _ -> + function_def = + """ + def #{function_name} do + raise "implement the logic to generate a unique #{singular} #{attr}" + end + """ + + {function_def, true} + end + + {attr, {function_name, function_def, needs_impl?}} + end) + end + + defp fixture_params(attrs, fixture_unique_functions) do + attrs + |> Enum.sort() + |> Enum.map(fn {attr, type} -> + case fixture_unique_functions do + %{^attr => {function_name, _function_def, _needs_impl?}} -> + {attr, "#{function_name}()"} + + %{} -> + {attr, inspect(type_to_default(attr, type, :create))} + end + end) + end +end diff --git a/deps/phoenix/lib/mix/phoenix/scope.ex b/deps/phoenix/lib/mix/phoenix/scope.ex new file mode 100644 index 0000000..bbcb1a7 --- /dev/null +++ b/deps/phoenix/lib/mix/phoenix/scope.ex @@ -0,0 +1,155 @@ +defmodule Mix.Phoenix.Scope do + @moduledoc false + + defstruct name: nil, + default: false, + module: nil, + alias: nil, + assign_key: nil, + access_path: nil, + route_prefix: nil, + route_access_path: nil, + schema_table: nil, + schema_key: nil, + schema_type: nil, + schema_migration_type: nil, + test_data_fixture: nil, + test_setup_helper: nil + + @doc """ + Creates a new scope struct. + """ + def new!(name, opts) do + scope = struct!(__MODULE__, opts) + alias = Module.concat([scope.module |> Module.split() |> List.last()]) + + route_access_path = + case scope.route_access_path || Enum.drop(scope.access_path, -1) do + [] -> scope.access_path + rap -> rap + end + + %{ + scope + | name: name, + alias: alias, + route_access_path: route_access_path, + schema_migration_type: + scope.schema_migration_type || if(scope.schema_type != :id, do: scope.schema_type) + } + end + + @doc """ + Returns a `%{name: scope}` map of configured scopes. + """ + def scopes_from_config(otp_app) do + scopes = Application.get_env(otp_app, :scopes, []) + + Map.new(scopes, fn {name, opts} -> {name, new!(name, opts)} end) + end + + @doc """ + Returns the default scope. + """ + def default_scope(otp_app) do + case Enum.filter(scopes_from_config(otp_app), fn {_, scope} -> scope.default end) do + [{_name, scope}] -> + scope + + [_ | _] = scopes -> + Mix.raise(""" + There can only be one default scope defined on your application, got: + + * #{Enum.map(scopes, fn {name, _scope} -> name end) |> Enum.join("\n * ")} + """) + + [] -> + nil + end + end + + @doc """ + Returns the configured scope for the given --scope parameter. + + Returns `nil` for `--no-scope` and raises if a specific scope is not configured. + """ + def scope_from_opts(_otp_app, bin, false) when is_binary(bin) do + Mix.raise("The --scope and --no-scope options must not be used together") + end + + def scope_from_opts(_otp_app, _name, true), do: nil + + def scope_from_opts(otp_app, nil, _), do: default_scope(otp_app) + + def scope_from_opts(otp_app, name, _) do + key = String.to_atom(name) + scopes = scopes_from_config(otp_app) + + Map.get_lazy(scopes, key, fn -> + Mix.raise(""" + Scope :#{key} not configured! + + Ensure that the scope :#{key} is configured in your application's config: + + config :#{otp_app}, :scopes, [ + #{key}: [ + ... + ] + ] + + Note that phx.gen.auth generates a default scope for you. + """) + end) + end + + @doc """ + Generates a route prefix string with placeholders for the access path. + + Takes a scope_key (what to use for accessing the scope) and a schema with scope information. + If the schema doesn't have a scope with route_prefix, returns an empty string. + Otherwise, it processes the route_prefix, replacing param segments with dynamic path elements. + + ## Examples + + route_prefix("socket.assigns.current_scope", schema_with_scope) + # => "/orgs/\#{socket.assigns.current_scope.organization.slug}" + + route_prefix("@current_scope", schema_with_scope) + # => "/orgs/\#{@current_scope.organization.slug}" + + route_prefix("scope", schema_with_scope) + # => "/orgs/\#{scope.organization.slug}" + """ + def route_prefix( + scope_key, + %{scope: %{route_prefix: route_prefix, route_access_path: route_access_path}} = _schema + ) + when not is_nil(route_prefix) do + # Replace any path segment that starts with a colon with route_access_path from the scope + path_segments = String.split(route_prefix, "/", trim: true) + param_segments = Enum.filter(path_segments, &String.starts_with?(&1, ":")) + + if length(param_segments) > 1 do + Mix.raise( + "The route_prefix option in scope configuration must contain only one parameter. Found: #{inspect(param_segments)}" + ) + end + + path_with_placeholders = + path_segments + |> Enum.map(fn segment -> + if String.starts_with?(segment, ":") do + # Extract parameter name without the colon + access_string = Enum.join(route_access_path, ".") + "\#{#{scope_key}.#{access_string}}" + else + segment + end + end) + |> Enum.join("/") + + "/#{path_with_placeholders}" + end + + def route_prefix(_scope_key, _schema), do: "" +end diff --git a/deps/phoenix/lib/mix/tasks/compile.phoenix.ex b/deps/phoenix/lib/mix/tasks/compile.phoenix.ex new file mode 100644 index 0000000..bf658c7 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/compile.phoenix.ex @@ -0,0 +1,51 @@ +defmodule Mix.Tasks.Compile.Phoenix do + use Mix.Task + @recursive true + @moduledoc false + + @doc false + def run(_args) do + IO.warn(""" + the :phoenix compiler is no longer required in your mix.exs. + + Please find the following line in your mix.exs and remove the :phoenix entry: + + compilers: [..., :phoenix, ...] ++ Mix.compilers(), + """) + + {:ok, _} = Application.ensure_all_started(:phoenix) + + case touch() do + [] -> {:noop, []} + _ -> {:ok, []} + end + end + + @doc false + def touch do + Mix.Phoenix.modules() + |> modules_for_recompilation + |> modules_to_file_paths + |> Stream.map(&touch_if_exists(&1)) + |> Stream.filter(&(&1 == :ok)) + |> Enum.to_list() + end + + defp touch_if_exists(path) do + :file.change_time(path, :calendar.local_time()) + end + + defp modules_for_recompilation(modules) do + Stream.filter(modules, fn mod -> + Code.ensure_loaded?(mod) and phoenix_recompile?(mod) + end) + end + + defp phoenix_recompile?(mod) do + function_exported?(mod, :__phoenix_recompile__?, 0) and mod.__phoenix_recompile__?() + end + + defp modules_to_file_paths(modules) do + Stream.map(modules, fn mod -> mod.__info__(:compile)[:source] end) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.digest.clean.ex b/deps/phoenix/lib/mix/tasks/phx.digest.clean.ex new file mode 100644 index 0000000..7426add --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.digest.clean.ex @@ -0,0 +1,76 @@ +defmodule Mix.Tasks.Phx.Digest.Clean do + use Mix.Task + @default_output_path "priv/static" + @default_age 3600 + @default_keep 2 + + @shortdoc "Removes old versions of static assets." + @recursive true + + @moduledoc """ + Removes old versions of compiled assets. + + By default, it will keep the latest version and + 2 previous versions as well as any digest created + in the last hour. + + $ mix phx.digest.clean + $ mix phx.digest.clean -o /www/public + $ mix phx.digest.clean --age 600 --keep 3 + $ mix phx.digest.clean --all + + ## Options + + * `-o, --output` - indicates the path to your compiled + assets directory. Defaults to `priv/static` + + * `--age` - specifies a maximum age (in seconds) for assets. + Files older than age that are not in the last `--keep` versions + will be removed. Defaults to 3600 (1 hour) + + * `--keep` - specifies how many previous versions of assets to keep. + Defaults to 2 previous versions + + * `--all` - specifies that all compiled assets (including the manifest) + will be removed. Note this overrides the age and keep switches. + + * `--no-compile` - do not run mix compile + """ + + @switches [output: :string, age: :integer, keep: :integer, all: :boolean] + + @doc false + def run(all_args) do + # Ensure all compressors are compiled. + if "--no-compile" not in all_args do + Mix.Task.run("compile", all_args) + end + + Mix.Task.reenable("phx.digest.clean") + + {:ok, _} = Application.ensure_all_started(:phoenix) + + {opts, _, _} = OptionParser.parse(all_args, switches: @switches, aliases: [o: :output]) + output_path = opts[:output] || @default_output_path + age = opts[:age] || @default_age + keep = opts[:keep] || @default_keep + all? = opts[:all] || false + + result = + if all?, + do: Phoenix.Digester.clean_all(output_path), + else: Phoenix.Digester.clean(output_path, age, keep) + + case result do + :ok -> + # We need to call build structure so everything we have cleaned from + # priv is removed from _build in case we have build_embedded set to + # true. In case it's not true, build structure is mostly a no-op, so we + # are fine. + Mix.Project.build_structure() + Mix.shell().info [:green, "Clean complete for #{inspect output_path}"] + {:error, :invalid_path} -> + Mix.shell().error "The output path #{inspect output_path} does not exist" + end + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.digest.ex b/deps/phoenix/lib/mix/tasks/phx.digest.ex new file mode 100644 index 0000000..8f1ce97 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.digest.ex @@ -0,0 +1,88 @@ +defmodule Mix.Tasks.Phx.Digest do + use Mix.Task + @default_input_path "priv/static" + + @shortdoc "Digests and compresses static files" + @recursive true + + @moduledoc """ + Digests and compresses static files. + + $ mix phx.digest + $ mix phx.digest priv/static -o /www/public + + The first argument is the path where the static files are located. The + `-o` option indicates the path that will be used to save the digested and + compressed files. + + If no path is given, it will use `priv/static` as the input and output path. + + The output folder will contain: + + * the original file + * the file compressed with gzip + * a file containing the original file name and its digest + * a compressed file containing the file name and its digest + * a cache manifest file + + Example of generated files: + + * app.js + * app.js.gz + * app-eb0a5b9302e8d32828d8a73f137cc8f0.js + * app-eb0a5b9302e8d32828d8a73f137cc8f0.js.gz + * cache_manifest.json + + You can use `mix phx.digest.clean` to prune stale versions of the assets. + If you want to remove all produced files, run `mix phx.digest.clean --all`. + + ## vsn + + It is possible to digest the stylesheet asset references without the query + string "?vsn=d" with the option `--no-vsn`. + + ## Options + + * `-o, --output` - indicates the path to your compiled + assets directory. Defaults to `priv/static` + + * `--no-vsn` - do not add version query string to assets + + * `--no-compile` - do not run mix compile + """ + + @default_opts [vsn: true] + @switches [output: :string, vsn: :boolean] + + @doc false + def run(all_args) do + # Ensure all compressors are compiled. + if "--no-compile" not in all_args do + Mix.Task.run("compile", all_args) + end + + Mix.Task.reenable("phx.digest") + + {:ok, _} = Application.ensure_all_started(:phoenix) + + {opts, args, _} = OptionParser.parse(all_args, switches: @switches, aliases: [o: :output]) + input_path = List.first(args) || @default_input_path + output_path = opts[:output] || input_path + with_vsn? = Keyword.merge(@default_opts, opts)[:vsn] + + case Phoenix.Digester.compile(input_path, output_path, with_vsn?) do + :ok -> + # We need to call build structure so everything we have + # generated into priv is copied to _build in case we have + # build_embedded set to true. In case it's not true, + # build structure is mostly a no-op, so we are fine. + Mix.Project.build_structure() + Mix.shell().info [:green, "Check your digested files at #{inspect output_path}"] + + {:error, :invalid_path} -> + # Do not exit with status code on purpose because + # in an umbrella not all apps are digestable. + Mix.shell().error "The input path #{inspect input_path} does not exist" + end + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.ex b/deps/phoenix/lib/mix/tasks/phx.ex new file mode 100644 index 0000000..2316dd0 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.ex @@ -0,0 +1,40 @@ +defmodule Mix.Tasks.Phx do + use Mix.Task + + @shortdoc "Prints Phoenix help information" + + @moduledoc """ + Prints Phoenix tasks and their information. + + $ mix phx + + To print the Phoenix version, pass `-v` or `--version`, for example: + + $ mix phx --version + + """ + + @version Mix.Project.config()[:version] + + @impl true + @doc false + def run([version]) when version in ~w(-v --version) do + Mix.shell().info("Phoenix v#{@version}") + end + + def run(args) do + case args do + [] -> general() + _ -> Mix.raise "Invalid arguments, expected: mix phx" + end + end + + defp general() do + Application.ensure_all_started(:phoenix) + Mix.shell().info "Phoenix v#{Application.spec(:phoenix, :vsn)}" + Mix.shell().info "Peace of mind from prototype to production" + Mix.shell().info "\n## Options\n" + Mix.shell().info "-v, --version # Prints Phoenix version\n" + Mix.Tasks.Help.run(["--search", "phx."]) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.auth.ex b/deps/phoenix/lib/mix/tasks/phx.gen.auth.ex new file mode 100644 index 0000000..e3a9861 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.auth.ex @@ -0,0 +1,1152 @@ +defmodule Mix.Tasks.Phx.Gen.Auth do + @shortdoc "Generates authentication logic for a resource" + + @moduledoc """ + Generates authentication logic and related views for a resource. + + ```console + $ mix phx.gen.auth Accounts User users + ``` + + The first argument is the context module followed by the schema module + and its plural name (used as the schema table name). The example above + will generate an `Accounts` context module with two schemas inside: + `User` and `UserToken`. You may name the context and schema according + to your preferences. For example: + + ```console + $ mix phx.gen.auth Identity Client clients + ``` + + Will generate an `Identity` context with `Client` and `ClientToken` inside. + Additional information and security considerations are detailed in the + [`mix phx.gen.auth` guide](mix_phx_gen_auth.html). + + > #### A note on scopes {: .info} + > + > `mix phx.gen.auth` creates a scope named after the schema by default. + > You can read more about scopes in the [Scopes guide](scopes.html). + + ## LiveView vs conventional Controllers & Views + + Authentication views can either be generated to use LiveView by passing + the `--live` option, or they can use conventional Phoenix + Controllers & Views by passing `--no-live`. + + If neither of these options are provided, a prompt will be displayed. + + Using the `--live` option is advised if you plan on using LiveView + elsewhere in your application. The user experience when navigating between + LiveViews can be tightly controlled, allowing you to let your users navigate + to authentication views without necessarily triggering a new HTTP request + each time (which would result in a full page load). + + ## Mixing magic link and password registration + + `mix phx.gen.auth` generates email based authentication, which assumes the user who + owns the email address has control over the account. Therefore, it is extremely + important to void all access tokens once the user confirms their account for the first + time, and we do so by revoking all tokens upon confirmation. + + However, if you allow users to create an account with password, you must also + require them to be logged in by the time of confirmation, otherwise you may be + vulnerable to credential pre-stuffing, as the following attack is possible: + + 1. An attacker registers a new account with the email address of their target, anticipating + that the target creates an account at a later point in time. + 2. The attacker sets a password when registering. + 3. The target registers an account and sees that their email address is already in use. + 4. The target logs in by magic link, but does not change the existing password. + 5. The attacker maintains access using the password they previously set. + + This is why the default implementation raises whenever a user tries to log in for the first + time by magic link and there is a password set. If you add registration with email and + password, then you must require the user to be logged in to confirm their account. + If they don't have a password (because it was set by the attacker), then they can set one + via a "Forgot your password?"-like workflow. + + ## Password hashing + + The password hashing mechanism defaults to `bcrypt` for + Unix systems and `pbkdf2` for Windows systems. Both + systems use the [Comeonin interface](https://hexdocs.pm/comeonin/). + + The password hashing mechanism can be overridden with the + `--hashing-lib` option. The following values are supported: + + * `bcrypt` - [bcrypt_elixir](https://hex.pm/packages/bcrypt_elixir) + * `pbkdf2` - [pbkdf2_elixir](https://hex.pm/packages/pbkdf2_elixir) + * `argon2` - [argon2_elixir](https://hex.pm/packages/argon2_elixir) + + We recommend developers to consider using `argon2`, which + is the most robust of all 3. The downside is that `argon2` + is quite CPU and memory intensive, and you will need more + powerful instances to run your applications on. + + For more information about choosing these libraries, see the + [Comeonin project](https://github.com/riverrun/comeonin). + + ## Multiple invocations + + You can invoke this generator multiple times. This is typically useful + if you have distinct resources that go through distinct authentication + workflows: + + $ mix phx.gen.auth Store User users + $ mix phx.gen.auth Backoffice Admin admins + + Note that when invoking `phx.gen.auth` multiple times, it will also generate + multiple [scopes](guides/authn_authz/scopes.md). Typically, only one scope is needed, + thus you will probably want to customize the generated code afterwards. Also, it + is expected that the generated code is not fully free of conflicts. One example is the + browser pipeline, which will try to assign both scopes as `:current_scope` by default. + You can customize the generated assign key with the `--assign-key` option. + + ## Binary ids + + The `--binary-id` option causes the generated migration to use + `binary_id` for its primary key and foreign keys. + + ## Default options + + This generator uses default options provided in the `:generators` + configuration of your application. These are the defaults: + + config :your_app, :generators, + binary_id: false, + sample_binary_id: "11111111-1111-1111-1111-111111111111" + + You can override those options per invocation by providing corresponding + switches, e.g. `--no-binary-id` to use normal ids despite the default + configuration. + + ## Custom table names + + By default, the table name for the migration and schema will be + the plural name provided for the resource. To customize this value, + a `--table` option may be provided. For example: + + $ mix phx.gen.auth Accounts User users --table accounts_users + + This will cause the generated tables to be named `"accounts_users"` and `"accounts_users_tokens"`. + + ## Custom scope name + + By default, the scope name is the same as the schema name. You can customize the scope name by passing the `--scope` option. For example: + + ```console + $ mix phx.gen.auth Accounts User users --scope app_user + ``` + + This will generate a scope named `app_user` instead of `user`. You can read more about scopes in the [Scopes guide](scopes.html). + + Additionally, the scope's assign key can be customized by passing the `--assign-key` option. For example: + + ```console + $ mix phx.gen.auth Accounts User users --assign-key current_user_scope + ``` + + This is useful when you want to run `mix phx.gen.auth` multiple times in the same project, but note that + often it might make more sense to reuse the same scope with additional fields instead of separate scopes. + """ + + use Mix.Task + + alias Mix.Phoenix.{Context, Schema} + alias Mix.Tasks.Phx.Gen + alias Mix.Tasks.Phx.Gen.Auth.{HashingLibrary, Injector, Migration} + + @switches [ + web: :string, + binary_id: :boolean, + hashing_lib: :string, + table: :string, + merge_with_existing_context: :boolean, + prefix: :string, + live: :boolean, + compile: :boolean, + scope: :string, + assign_key: :string, + agents_md: :boolean + ] + + @doc false + def run(args, test_opts \\ []) do + if Mix.Project.umbrella?() do + Mix.raise( + "mix phx.gen.auth must be invoked from within your *_web application root directory" + ) + end + + Mix.Phoenix.ensure_live_view_compat!(__MODULE__) + + {opts, parsed} = OptionParser.parse!(args, strict: @switches) + validate_args!(parsed) + hashing_library = build_hashing_library!(opts) + + context_args = + OptionParser.to_argv(Keyword.drop(opts, [:scope, :assign_key, :agents_md]), + switches: @switches + ) ++ + parsed + + {context, schema} = Gen.Context.build(context_args ++ ["--no-scope"], help_module: __MODULE__) + + context = put_live_option(context) + Gen.Context.prompt_for_code_injection(context) + + if "--no-compile" not in args do + # Needed so we can get the ecto adapter and ensure other + # libraries are loaded. + Mix.Task.run("compile") + validate_required_dependencies!() + end + + ecto_adapter = + Keyword.get_lazy( + test_opts, + :ecto_adapter, + fn -> get_ecto_adapter!(schema) end + ) + + migration = Migration.build(ecto_adapter) + + binding = [ + context: context, + schema: schema, + migration: migration, + hashing_library: hashing_library, + web_app_name: web_app_name(context), + web_namespace: context.web_module, + endpoint_module: Module.concat([context.web_module, Endpoint]), + auth_module: + Module.concat([context.web_module, schema.web_namespace, "#{inspect(schema.alias)}Auth"]), + router_scope: router_scope(context), + web_path_prefix: web_path_prefix(schema), + test_case_options: test_case_options(ecto_adapter), + live?: Keyword.fetch!(context.opts, :live), + datetime_module: datetime_module(schema), + datetime_now: datetime_now(schema), + scope_config: + scope_config(context, opts[:scope], Keyword.get(opts, :assign_key, "current_scope")), + agents_md: Keyword.get(opts, :agents_md, true) + ] + + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(binding) + + context + |> copy_new_files(binding, paths) + |> inject_conn_case_helpers(paths, binding) + |> inject_hashing_config(hashing_library) + |> maybe_inject_scope_config(binding) + |> maybe_inject_mix_dependency(hashing_library) + |> inject_routes(paths, binding) + |> maybe_inject_router_import(binding) + |> maybe_inject_router_plug(binding) + |> maybe_inject_app_layout_menu(binding) + |> maybe_inject_agents_md(paths, binding) + |> Gen.Notifier.maybe_print_mailer_installation_instructions() + |> print_shell_instructions() + end + + defp web_app_name(%Context{} = context) do + context.web_module + |> inspect() + |> Phoenix.Naming.underscore() + end + + defp validate_args!([_, _, _]), do: :ok + + defp validate_args!(_) do + raise_with_help("Invalid arguments") + end + + defp validate_required_dependencies! do + unless Code.ensure_loaded?(Ecto.Adapters.SQL) do + raise_with_help("mix phx.gen.auth requires ecto_sql", :phx_generator_args) + end + + if generated_with_no_html?() do + raise_with_help("mix phx.gen.auth requires phoenix_html", :phx_generator_args) + end + + if generated_with_no_assets_or_esbuild?() do + Mix.shell().yes?(""" + Warning: did not find phoenix_html in your app.js. + + phx.gen.auth expects the phoenix_html JavaScript to be available in your application for + the generated logout link to work. + This is not the case for applications generated with `--no-assets` or `--no-esbuild`. + + To make the logout link work, you'll need to manually add the phoenix_html JavaScript to your application. + It is available at the "priv/static/phoenix_html" path of the phoenix_html application. + + Alternatively, you can refactor the logout link to submit a `
` with method "delete" instead. + + Continue?\ + """) || System.halt() + end + end + + defp generated_with_no_html? do + Mix.Project.config() + |> Keyword.get(:deps, []) + |> Enum.any?(fn + {:phoenix_html, _} -> true + {:phoenix_html, _, _} -> true + _ -> false + end) + |> Kernel.not() + end + + defp generated_with_no_assets_or_esbuild? do + not Code.ensure_loaded?(Phoenix.HTML) or + case File.read("assets/js/app.js") do + {:ok, content} -> content =~ "priv/static/phoenix_html.js" + {:error, _} -> true + end + end + + defp build_hashing_library!(opts) do + opts + |> Keyword.get_lazy(:hashing_lib, &default_hashing_library_option/0) + |> HashingLibrary.build() + |> case do + {:ok, hashing_library} -> + hashing_library + + {:error, {:unknown_library, unknown_library}} -> + raise_with_help( + "Unknown value for --hashing-lib #{inspect(unknown_library)}", + :hashing_lib + ) + end + end + + defp default_hashing_library_option do + case :os.type() do + {:unix, _} -> "bcrypt" + {:win32, _} -> "pbkdf2" + end + end + + defp scope_config(context, requested_scope, assign_key) do + existing_scopes = Mix.Phoenix.Scope.scopes_from_config(context.context_app) + + {_, default_scope} = + Enum.find(existing_scopes, {nil, nil}, fn {_, scope} -> scope.default end) + + key = String.to_atom(requested_scope || find_scope_name(context, existing_scopes)) + + {create_new?, scope, config_string} = + if Map.has_key?(existing_scopes, key) do + {false, existing_scopes[key], nil} + else + {true, new_scope(context, key, default_scope, assign_key), + scope_config_string(context, key, default_scope, assign_key)} + end + + %{ + scopes: existing_scopes, + default_scope: default_scope, + create_new?: create_new?, + scope: scope, + config_string: config_string + } + end + + defp find_scope_name(context, existing_scopes) do + cond do + # user + is_new_scope?(existing_scopes, context.schema.singular) -> + context.schema.singular + + # accounts_user + is_new_scope?(existing_scopes, "#{context.basename}_#{context.schema.singular}") -> + "#{context.basename}_#{context.schema.singular}" + + # my_app_accounts_user + is_new_scope?( + existing_scopes, + "#{context.context_app}_#{context.basename}_#{context.schema.singular}" + ) -> + "#{context.context_app}_#{context.basename}_#{context.schema.singular}" + + true -> + Mix.raise(""" + Could not generate a scope name for #{context.schema.singular}! These scopes already exist: + + * #{Enum.map(existing_scopes, fn {name, _scope} -> name end) |> Enum.join("\n * ")} + + You can customize the scope name by passing the --scope option. + """) + end + end + + defp is_new_scope?(existing_scopes, bin_key) do + key = String.to_atom(bin_key) + not Map.has_key?(existing_scopes, key) + end + + defp new_scope(context, key, default_scope, assign_key) do + Mix.Phoenix.Scope.new!(key, %{ + default: !default_scope, + module: Module.concat([context.module, "Scope"]), + assign_key: String.to_atom(assign_key), + access_path: [ + String.to_atom(context.schema.singular), + context.schema.opts[:primary_key] || :id + ], + schema_key: + String.to_atom("#{context.schema.singular}_#{context.schema.opts[:primary_key] || :id}"), + schema_type: if(context.schema.binary_id, do: :binary_id, else: :id), + schema_table: context.schema.table, + test_data_fixture: Module.concat([context.module, "Fixtures"]), + test_setup_helper: :"register_and_log_in_#{context.schema.singular}" + }) + end + + defp scope_config_string(context, key, default_scope, assign_key) do + """ + config :#{context.context_app}, :scopes, + #{key}: [ + default: #{if default_scope, do: false, else: true}, + module: #{inspect(context.module)}.Scope, + assign_key: :#{assign_key}, + access_path: [:#{context.schema.singular}, :#{context.schema.opts[:primary_key] || :id}], + schema_key: :#{context.schema.singular}_#{context.schema.opts[:primary_key] || :id}, + schema_type: :#{if(context.schema.binary_id, do: :binary_id, else: :id)}, + schema_table: :#{context.schema.table}, + test_data_fixture: #{inspect(context.module)}Fixtures, + test_setup_helper: :register_and_log_in_#{context.schema.singular} + ]\ + """ + end + + defp prompt_for_conflicts(binding) do + prompt_for_scope_conflicts(binding) + + binding + |> files_to_be_generated() + |> Mix.Phoenix.prompt_for_conflicts() + end + + defp prompt_for_scope_conflicts(binding) do + schema = binding[:schema] + %{scope: scope, default_scope: default_scope, create_new?: new?} = binding[:scope_config] + + cond do + # this can only happen if --scope is used and the user explicitly asked for the scope name + scope && not new? -> + Mix.shell().yes?(""" + The scope #{scope.name} is already configured. + + phx.gen.auth expects the configured scope module #{inspect(scope.module)} to include + a `for_#{schema.singular}/1` function that returns a `%#{inspect(schema.module)}{}` struct: + + def for_#{schema.singular}(nil), do: %__MODULE__{user: nil} + + def for_#{schema.singular}(%<%= inspect schema.alias %>{} = #{schema.singular}) do + %__MODULE__{#{schema.singular}: #{schema.singular}} + end + + Please ensure that your scope module includes such code. + + Do you want to proceed with the generation?\ + """) || System.halt() + + default_scope -> + Mix.shell().yes?(""" + Your application configuration already contains a default scope: #{inspect(default_scope.name)}. + + phx.gen.auth will create a new #{scope.name} scope. + + Note that if you run `phx.gen.live` multiple times, the generated assign key for + the generated scopes can conflict with each other. You can pass `--assign-key` to customize + the assign key for the generated scope. + + Do you want to proceed with the generation?\ + """) || System.halt() + + true -> + :ok + end + end + + defp files_to_be_generated(binding) do + schema = binding[:schema] + context = binding[:context] + context_app = context.context_app + scope_config = binding[:scope_config] + + singular = schema.singular + web_pre = Mix.Phoenix.web_path(context_app) + web_test_pre = Mix.Phoenix.web_test_path(context_app) + migrations_pre = Mix.Phoenix.context_app_path(context_app, "priv/repo/migrations") + web_path = to_string(schema.web_path) + controller_pre = Path.join([web_pre, "controllers", web_path]) + + default_files = + [ + "migration.ex.eex": [migrations_pre, "#{timestamp()}_create_#{schema.table}_auth_tables.exs"], + "notifier.ex.eex": [context.dir, "#{singular}_notifier.ex"], + "schema.ex.eex": [context.dir, "#{singular}.ex"], + "schema_token.ex.eex": [context.dir, "#{singular}_token.ex"], + "auth.ex.eex": [web_pre, web_path, "#{singular}_auth.ex"], + "auth_test.exs.eex": [web_test_pre, web_path, "#{singular}_auth_test.exs"], + "session_controller.ex.eex": [controller_pre, "#{singular}_session_controller.ex"], + "session_controller_test.exs.eex": [ + web_test_pre, + "controllers", + web_path, + "#{singular}_session_controller_test.exs" + ] + ] ++ + if scope_config.create_new? do + ["scope.ex.eex": [context.dir, "scope.ex"]] + else + [] + end + + case Keyword.fetch(context.opts, :live) do + {:ok, true} -> + live_files = [ + "registration_live.ex.eex": [ + web_pre, + "live", + web_path, + "#{singular}_live", + "registration.ex" + ], + "registration_live_test.exs.eex": [ + web_test_pre, + "live", + web_path, + "#{singular}_live", + "registration_test.exs" + ], + "login_live.ex.eex": [web_pre, "live", web_path, "#{singular}_live", "login.ex"], + "login_live_test.exs.eex": [ + web_test_pre, + "live", + web_path, + "#{singular}_live", + "login_test.exs" + ], + "settings_live.ex.eex": [web_pre, "live", web_path, "#{singular}_live", "settings.ex"], + "settings_live_test.exs.eex": [ + web_test_pre, + "live", + web_path, + "#{singular}_live", + "settings_test.exs" + ], + "confirmation_live.ex.eex": [ + web_pre, + "live", + web_path, + "#{singular}_live", + "confirmation.ex" + ], + "confirmation_live_test.exs.eex": [ + web_test_pre, + "live", + web_path, + "#{singular}_live", + "confirmation_test.exs" + ] + ] + + remap_files(default_files ++ live_files) + + _ -> + non_live_files = [ + "registration_new.html.heex.eex": [ + controller_pre, + "#{singular}_registration_html", + "new.html.heex" + ], + "registration_controller.ex.eex": [controller_pre, "#{singular}_registration_controller.ex"], + "registration_controller_test.exs.eex": [ + web_test_pre, + "controllers", + web_path, + "#{singular}_registration_controller_test.exs" + ], + "registration_html.ex.eex": [controller_pre, "#{singular}_registration_html.ex"], + "session_html.ex.eex": [controller_pre, "#{singular}_session_html.ex"], + "session_new.html.heex.eex": [controller_pre, "#{singular}_session_html", "new.html.heex"], + "session_confirm.html.heex.eex": [ + controller_pre, + "#{singular}_session_html", + "confirm.html.heex" + ], + "settings_html.ex.eex": [web_pre, "controllers", web_path, "#{singular}_settings_html.ex"], + "settings_controller.ex.eex": [controller_pre, "#{singular}_settings_controller.ex"], + "settings_edit.html.heex.eex": [ + controller_pre, + "#{singular}_settings_html", + "edit.html.heex" + ], + "settings_controller_test.exs.eex": [ + web_test_pre, + "controllers", + web_path, + "#{singular}_settings_controller_test.exs" + ] + ] + + remap_files(default_files ++ non_live_files) + end + end + + defp remap_files(files) do + for {source, dest} <- files, do: {:eex, to_string(source), Path.join(dest)} + end + + defp copy_new_files(%Context{} = context, binding, paths) do + files = files_to_be_generated(binding) + Mix.Phoenix.copy_from(paths, "priv/templates/phx.gen.auth", binding, files) + inject_context_functions(context, paths, binding) + inject_tests(context, paths, binding) + inject_context_test_fixtures(context, paths, binding) + + context + end + + defp inject_context_functions(%Context{file: file} = context, paths, binding) do + Gen.Context.ensure_context_file_exists(context, paths, binding) + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.auth/context_functions.ex.eex", binding) + |> prepend_newline() + |> inject_before_final_end(file) + end + + defp inject_tests(%Context{test_file: test_file} = context, paths, binding) do + Gen.Context.ensure_test_file_exists(context, paths, binding) + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.auth/test_cases.exs.eex", binding) + |> prepend_newline() + |> inject_before_final_end(test_file) + end + + defp inject_context_test_fixtures( + %Context{test_fixtures_file: test_fixtures_file} = context, + paths, + binding + ) do + Gen.Context.ensure_test_fixtures_file_exists(context, paths, binding) + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.auth/context_fixtures_functions.ex.eex", binding) + |> prepend_newline() + |> inject_before_final_end(test_fixtures_file) + end + + defp inject_conn_case_helpers(%Context{} = context, paths, binding) do + test_file = "test/support/conn_case.ex" + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.auth/conn_case.exs.eex", binding) + |> inject_before_final_end(test_file) + + context + end + + defp inject_routes(%Context{context_app: ctx_app} = context, paths, binding) do + web_prefix = Mix.Phoenix.web_path(ctx_app) + file_path = Path.join(web_prefix, "router.ex") + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.auth/routes.ex.eex", binding) + |> inject_before_final_end(file_path) + + context + end + + defp maybe_inject_mix_dependency(%Context{context_app: ctx_app} = context, %HashingLibrary{ + mix_dependency: mix_dependency + }) do + file_path = Mix.Phoenix.context_app_path(ctx_app, "mix.exs") + + file = File.read!(file_path) + + case Injector.mix_dependency_inject(file, mix_dependency) do + {:ok, new_file} -> + print_injecting(file_path) + File.write!(file_path, new_file) + + :already_injected -> + :ok + + {:error, :unable_to_inject} -> + Mix.shell().info(""" + + Add your #{mix_dependency} dependency to #{file_path}: + + defp deps do + [ + #{mix_dependency}, + ... + ] + end + """) + end + + context + end + + defp maybe_inject_router_import(%Context{context_app: ctx_app} = context, binding) do + web_prefix = Mix.Phoenix.web_path(ctx_app) + file_path = Path.join(web_prefix, "router.ex") + auth_module = Keyword.fetch!(binding, :auth_module) + inject = "import #{inspect(auth_module)}" + use_line = "use #{inspect(context.web_module)}, :router" + + help_text = """ + Add your #{inspect(auth_module)} import to #{Path.relative_to_cwd(file_path)}: + + defmodule #{inspect(context.web_module)}.Router do + #{use_line} + + # Import authentication plugs + #{inject} + + ... + end + """ + + with {:ok, file} <- read_file(file_path), + {:ok, new_file} <- + Injector.inject_unless_contains( + file, + inject, + &String.replace(&1, use_line, "#{use_line}\n\n #{&2}") + ) do + print_injecting(file_path, " - imports") + File.write!(file_path, new_file) + else + :already_injected -> + :ok + + {:error, :unable_to_inject} -> + Mix.shell().info(""" + + #{help_text} + """) + + {:error, {:file_read_error, _}} -> + print_injecting(file_path) + print_unable_to_read_file_error(file_path, help_text) + end + + context + end + + defp maybe_inject_router_plug(%Context{context_app: ctx_app} = context, binding) do + web_prefix = Mix.Phoenix.web_path(ctx_app) + file_path = Path.join(web_prefix, "router.ex") + help_text = Injector.router_plug_help_text(file_path, binding) + + with {:ok, file} <- read_file(file_path), + {:ok, new_file} <- Injector.router_plug_inject(file, binding) do + print_injecting(file_path, " - plug") + File.write!(file_path, new_file) + else + :already_injected -> + :ok + + {:error, :unable_to_inject} -> + Mix.shell().info(""" + + #{help_text} + """) + + {:error, {:file_read_error, _}} -> + print_injecting(file_path) + print_unable_to_read_file_error(file_path, help_text) + end + + context + end + + defp maybe_inject_app_layout_menu(%Context{} = context, binding) do + if file_path = get_layout_html_path(context) do + case Injector.app_layout_menu_inject(binding, File.read!(file_path)) do + {:ok, new_content} -> + print_injecting(file_path) + File.write!(file_path, new_content) + + :already_injected -> + :ok + + {:error, :unable_to_inject} -> + Mix.shell().info(""" + + #{Injector.app_layout_menu_help_text(file_path, binding)} + """) + end + else + {_dup, inject} = Injector.app_layout_menu_code_to_inject(binding) + + missing = + context + |> potential_layout_file_paths() + |> Enum.map_join("\n", &" * #{&1}") + + Mix.shell().error(""" + + Unable to find the root layout file to inject user menu items. + + Missing files: + + #{missing} + + Please ensure this phoenix app was not generated with + --no-html. If you have changed the name of your root + layout file, please add the following code to it where you'd + like the #{binding[:schema].singular} menu items to be rendered. + + #{inject} + """) + end + + context + end + + defp get_layout_html_path(%Context{} = context) do + context + |> potential_layout_file_paths() + |> Enum.find(&File.exists?/1) + end + + defp potential_layout_file_paths(%Context{context_app: ctx_app}) do + web_prefix = Mix.Phoenix.web_path(ctx_app) + + for file_name <- ~w(root.html.heex) do + Path.join([web_prefix, "components", "layouts", file_name]) + end + end + + defp inject_hashing_config(context, %HashingLibrary{} = hashing_library) do + file_path = + if Mix.Phoenix.in_umbrella?(File.cwd!()) do + Path.expand("../../") + else + File.cwd!() + end + |> Path.join("config/test.exs") + + file = + case read_file(file_path) do + {:ok, file} -> file + {:error, {:file_read_error, _}} -> "import Config\n" + end + + case Injector.test_config_inject(file, hashing_library) do + {:ok, new_file} -> + print_injecting(file_path) + File.write!(file_path, new_file) + + :already_injected -> + :ok + + {:error, :unable_to_inject} -> + help_text = Injector.test_config_help_text(file_path, hashing_library) + + Mix.shell().info(""" + + #{help_text} + """) + end + + context + end + + defp maybe_inject_scope_config(%Context{} = context, binding) do + if binding[:scope_config].create_new? do + inject_scope_config(context, binding) + else + context + end + end + + defp inject_scope_config(%Context{} = context, binding) do + scope_config = binding[:scope_config].config_string + + file_path = + if Mix.Phoenix.in_umbrella?(File.cwd!()) do + Path.expand("../../") + else + File.cwd!() + end + |> Path.join("config/config.exs") + + file = + case read_file(file_path) do + {:ok, file} -> file + {:error, {:file_read_error, _}} -> "import Config\n" + end + + case Injector.config_inject(file, scope_config) do + {:ok, new_file} -> + print_injecting(file_path) + File.write!(file_path, new_file) + + :already_injected -> + :ok + + {:error, :unable_to_inject} -> + Mix.shell().info(""" + Add the following to #{Path.relative_to_cwd(file_path)}: + + #{scope_config} + """) + end + + context + end + + defp maybe_inject_agents_md(%Context{} = context, paths, binding) do + if binding[:agents_md] do + # we add our own comment marker (not related to usage_rules) + # to check if phx.gen.auth already ran as we only want to inject once + # even if other options were used + auth_content = + """ + + #{Mix.Phoenix.eval_from(paths, "priv/templates/phx.gen.auth/AGENTS.md.eex", binding)} + + """ + + file_path = + if Mix.Phoenix.in_umbrella?(File.cwd!()) do + Path.expand("../../") + else + File.cwd!() + end + |> Path.join("AGENTS.md") + + with true <- File.exists?(file_path), + content = File.read!(file_path), + false <- content =~ "" do + print_injecting(file_path) + # inject before usage rules + case String.split(content, "", parts: 2) do + [pre, post] -> + File.write!(file_path, [ + pre, + String.trim_trailing(auth_content), + "\n\n", + "", + post + ]) + + _ -> + # just append + File.write!(file_path, content <> "\n\n" <> String.trim_trailing(auth_content)) + end + end + end + + context + end + + defp print_shell_instructions(%Context{} = context) do + Mix.shell().info(""" + + Please re-fetch your dependencies with the following command: + + $ mix deps.get + + Remember to update your repository by running migrations: + + $ mix ecto.migrate + + Once you are ready, visit "/#{context.schema.plural}/register" + to create your account and then access "/dev/mailbox" to + see the account confirmation email. + """) + + context + end + + defp router_scope(%Context{schema: schema} = context) do + prefix = Module.concat(context.web_module, schema.web_namespace) + + if schema.web_namespace do + ~s|"/#{schema.web_path}", #{inspect(prefix)}| + else + ~s|"/", #{inspect(context.web_module)}| + end + end + + defp web_path_prefix(%Schema{web_path: nil}), do: "" + defp web_path_prefix(%Schema{web_path: web_path}), do: "/" <> web_path + + defp inject_before_final_end(content_to_inject, file_path) do + with {:ok, file} <- read_file(file_path), + {:ok, new_file} <- Injector.inject_before_final_end(file, content_to_inject) do + print_injecting(file_path) + File.write!(file_path, new_file) + else + :already_injected -> + :ok + + {:error, {:file_read_error, _}} -> + print_injecting(file_path) + + print_unable_to_read_file_error( + file_path, + """ + + Please add the following to the end of your equivalent + #{Path.relative_to_cwd(file_path)} module: + + #{indent_spaces(content_to_inject, 2)} + """ + ) + end + end + + defp read_file(file_path) do + case File.read(file_path) do + {:ok, file} -> {:ok, file} + {:error, reason} -> {:error, {:file_read_error, reason}} + end + end + + defp indent_spaces(string, number_of_spaces) + when is_binary(string) and is_integer(number_of_spaces) do + indent = String.duplicate(" ", number_of_spaces) + + string + |> String.split("\n") + |> Enum.map_join("\n", &(indent <> &1)) + end + + defp timestamp do + {{y, m, d}, {hh, mm, ss}} = :calendar.universal_time() + "#{y}#{pad(m)}#{pad(d)}#{pad(hh)}#{pad(mm)}#{pad(ss)}" + end + + defp pad(i) when i < 10, do: <> + defp pad(i), do: to_string(i) + + defp prepend_newline(string) when is_binary(string), do: "\n" <> string + + defp get_ecto_adapter!(%Schema{repo: repo}) do + if Code.ensure_loaded?(repo) do + repo.__adapter__() + else + Mix.raise("Unable to find #{inspect(repo)}") + end + end + + defp print_injecting(file_path, suffix \\ []) do + Mix.shell().info([:green, "* injecting ", :reset, Path.relative_to_cwd(file_path), suffix]) + end + + defp print_unable_to_read_file_error(file_path, help_text) do + Mix.shell().error( + """ + + Unable to read file #{Path.relative_to_cwd(file_path)}. + + #{help_text} + """ + |> indent_spaces(2) + ) + end + + @doc false + def raise_with_help(msg) do + raise_with_help(msg, :general) + end + + defp raise_with_help(msg, :general) do + Mix.raise(""" + #{msg} + + mix phx.gen.auth expects a context module name, followed by + the schema module and its plural name (used as the schema + table name). + + For example: + + mix phx.gen.auth Accounts User users + + The context serves as the API boundary for the given resource. + Multiple resources may belong to a context and a resource may be + split over distinct contexts (such as Accounts.User and Payments.User). + """) + end + + defp raise_with_help(msg, :phx_generator_args) do + Mix.raise(""" + #{msg} + + mix phx.gen.auth must be installed into a Phoenix 1.5 app that + contains ecto and html templates. + + mix phx.new my_app + mix phx.new my_app --umbrella + mix phx.new my_app --database mysql + + Apps generated with --no-ecto or --no-html are not supported. + """) + end + + defp raise_with_help(msg, :hashing_lib) do + Mix.raise(""" + #{msg} + + mix phx.gen.auth supports the following values for --hashing-lib + + * bcrypt + * pbkdf2 + * argon2 + + Visit https://github.com/riverrun/comeonin for more information + on choosing a library. + """) + end + + defp test_case_options(Ecto.Adapters.Postgres), do: ", async: true" + defp test_case_options(adapter) when is_atom(adapter), do: "" + + defp datetime_module(%{timestamp_type: :naive_datetime}), do: NaiveDateTime + defp datetime_module(%{timestamp_type: :utc_datetime}), do: DateTime + defp datetime_module(%{timestamp_type: :utc_datetime_usec}), do: DateTime + + defp datetime_now(%{timestamp_type: :naive_datetime}), do: "NaiveDateTime.utc_now(:second)" + defp datetime_now(%{timestamp_type: :utc_datetime}), do: "DateTime.utc_now(:second)" + defp datetime_now(%{timestamp_type: :utc_datetime_usec}), do: "DateTime.utc_now()" + + defp put_live_option(schema) do + opts = + case Keyword.fetch(schema.opts, :live) do + {:ok, _live?} -> + schema.opts + + _ -> + Mix.shell().info(""" + An authentication system can be created in two different ways: + - Using Phoenix.LiveView (default) + - Using Phoenix.Controller only\ + """) + + if Mix.shell().yes?("Do you want to create a LiveView based authentication system?") do + Keyword.put_new(schema.opts, :live, true) + else + Keyword.put_new(schema.opts, :live, false) + end + end + + Map.put(schema, :opts, opts) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.auth/hashing_library.ex b/deps/phoenix/lib/mix/tasks/phx.gen.auth/hashing_library.ex new file mode 100644 index 0000000..2d37afc --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.auth/hashing_library.ex @@ -0,0 +1,55 @@ +defmodule Mix.Tasks.Phx.Gen.Auth.HashingLibrary do + @moduledoc false + + defstruct [:name, :module, :mix_dependency, :test_config] + + @type t :: %__MODULE__{ + name: atom(), + module: module(), + mix_dependency: binary(), + test_config: binary() + } + + def build("bcrypt") do + lib = %__MODULE__{ + name: :bcrypt, + module: Bcrypt, + mix_dependency: ~s|{:bcrypt_elixir, "~> 3.0"}|, + test_config: """ + config :bcrypt_elixir, :log_rounds, 1 + """ + } + + {:ok, lib} + end + + def build("pbkdf2") do + lib = %__MODULE__{ + name: :pbkdf2, + module: Pbkdf2, + mix_dependency: ~s|{:pbkdf2_elixir, "~> 2.0"}|, + test_config: """ + config :pbkdf2_elixir, :rounds, 1 + """ + } + + {:ok, lib} + end + + def build("argon2") do + lib = %__MODULE__{ + name: :argon2, + module: Argon2, + mix_dependency: ~s|{:argon2_elixir, "~> 4.0"}|, + test_config: """ + config :argon2_elixir, t_cost: 1, m_cost: 8 + """ + } + + {:ok, lib} + end + + def build(other) do + {:error, {:unknown_library, other}} + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.auth/injector.ex b/deps/phoenix/lib/mix/tasks/phx.gen.auth/injector.ex new file mode 100644 index 0000000..7629d21 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.auth/injector.ex @@ -0,0 +1,320 @@ +defmodule Mix.Tasks.Phx.Gen.Auth.Injector do + @moduledoc false + + alias Mix.Phoenix.Schema + alias Mix.Tasks.Phx.Gen.Auth.HashingLibrary + + @type schema :: %Schema{} + + @doc """ + Injects a dependency into the contents of mix.exs + """ + @spec mix_dependency_inject(String.t(), String.t()) :: + {:ok, String.t()} | :already_injected | {:error, :unable_to_inject} + def mix_dependency_inject(mixfile, dependency) do + with :ok <- ensure_not_already_injected(mixfile, dependency), + {:ok, new_mixfile} <- do_mix_dependency_inject(mixfile, dependency) do + {:ok, new_mixfile} + end + end + + @spec do_mix_dependency_inject(String.t(), String.t()) :: + {:ok, String.t()} | {:error, :unable_to_inject} + defp do_mix_dependency_inject(mixfile, dependency) do + string_to_split_on = """ + defp deps do + [ + """ + + case split_with_self(mixfile, string_to_split_on) do + {beginning, splitter, rest} -> + new_mixfile = + IO.iodata_to_binary([beginning, splitter, " ", dependency, ?,, ?\n, rest]) + + {:ok, new_mixfile} + + _ -> + {:error, :unable_to_inject} + end + end + + @doc """ + Injects configuration into `file`. + """ + def config_inject(file, code_to_inject) when is_binary(file) and is_binary(code_to_inject) do + inject_unless_contains( + file, + code_to_inject, + # Matches the entire line and captures the line ending. In the + # replace string: + # + # * the entire matching line is inserted with \\0, + # * the actual code is injected with &2, + # * and the appropriate newlines are injected using \\2. + &Regex.replace(~r/(use Mix\.Config|import Config)(\r\n|\n|$)/, &1, "\\0\\2#{&2}\\2", + global: false + ) + ) + end + + @doc """ + Injects configuration for test environment into `file`. + """ + @spec test_config_inject(String.t(), HashingLibrary.t()) :: + {:ok, String.t()} | :already_injected | {:error, :unable_to_inject} + def test_config_inject(file, %HashingLibrary{} = hashing_library) when is_binary(file) do + code_to_inject = + hashing_library + |> test_config_code() + |> normalize_line_endings_to_file(file) + + config_inject(file, code_to_inject) + end + + @doc """ + Instructions to provide the user when `test_config_inject/2` fails. + """ + @spec test_config_help_text(String.t(), HashingLibrary.t()) :: String.t() + def test_config_help_text(file_path, %HashingLibrary{} = hashing_library) do + """ + Add the following to #{Path.relative_to_cwd(file_path)}: + + #{hashing_library |> test_config_code() |> indent_spaces(4)} + """ + end + + defp test_config_code(%HashingLibrary{test_config: test_config}) do + String.trim(""" + # Only in tests, remove the complexity from the password hashing algorithm + #{test_config} + """) + end + + @router_plug_anchor_line "plug :put_secure_browser_headers" + + @doc """ + Injects the fetch_current_scope_for_ plug into router's browser pipeline + """ + @spec router_plug_inject(String.t(), binding :: keyword()) :: + {:ok, String.t()} | :already_injected | {:error, :unable_to_inject} + def router_plug_inject(file, binding) when is_binary(file) do + inject_unless_contains( + file, + router_plug_code(binding), + # Matches the entire line containing `anchor_line` and captures + # the whitespace before the anchor. In the replace string + # + # * the entire matching line is inserted with \\0, + # * the captured indent is inserted using \\1, + # * the actual code is injected with &2, + # * and the appropriate newline is injected using \\2 + &Regex.replace(~r/^(\s*)#{@router_plug_anchor_line}.*(\r\n|\n|$)/Um, &1, "\\0\\1#{&2}\\2", + global: false + ) + ) + end + + @doc """ + Instructions to provide the user when `inject_router_plug/2` fails. + """ + @spec router_plug_help_text(String.t(), binding :: keyword()) :: String.t() + def router_plug_help_text(file_path, binding) do + """ + Add the #{router_plug_name(binding)} plug to the :browser pipeline in #{Path.relative_to_cwd(file_path)}: + + pipeline :browser do + ... + #{@router_plug_anchor_line} + #{router_plug_code(binding)} + end + """ + end + + defp router_plug_code(binding) do + "plug " <> router_plug_name(binding) + end + + defp router_plug_name(binding) do + ":fetch_#{binding[:scope_config].scope.assign_key}_for_#{binding[:schema].singular}" + end + + @doc """ + Injects a menu in the application layout + """ + def app_layout_menu_inject(binding, template_str) do + with {:error, :unable_to_inject} <- + app_layout_menu_inject_at_end_of_nav_tag(binding, template_str), + {:error, :unable_to_inject} <- + app_layout_menu_inject_after_opening_body_tag(binding, template_str) do + {:error, :unable_to_inject} + end + end + + @doc """ + Instructions to provide the user when `app_layout_menu_inject/2` fails. + """ + def app_layout_menu_help_text(file_path, binding) do + {_dup_check, code} = app_layout_menu_code_to_inject(binding) + + """ + Add the following #{binding[:schema].singular} menu items to your #{Path.relative_to_cwd(file_path)} layout file: + + #{code} + """ + end + + @doc """ + Menu code to inject into the application layout template. + """ + def app_layout_menu_code_to_inject(binding, padding \\ 4, newline \\ "\n") do + schema = binding[:schema] + scope_config = binding[:scope_config] + already_injected_str = "#{schema.route_prefix}/log-in" + + template = """ + \ + """ + + {already_injected_str, indent_spaces(template, padding, newline)} + end + + defp formatting_info(template, tag) do + {padding, newline} = + case Regex.run(~r/ {String.trim_leading(pre, "\n") <> " ", "\n"} + [_, "\r\n" <> pre, "\r"] -> {String.trim_leading(pre, "\r\n") <> " ", "\r\n"} + _ -> {"", "\n"} + end + + {String.length(padding), newline} + end + + defp app_layout_menu_inject_at_end_of_nav_tag(binding, file) do + {padding, newline} = formatting_info(file, "<\/nav>") + {dup_check, code} = app_layout_menu_code_to_inject(binding, padding, newline) + + inject_unless_contains( + file, + dup_check, + code, + &Regex.replace(~r/(\s*)<\/nav>/m, &1, "#{newline}#{&2}\\0", global: false) + ) + end + + defp app_layout_menu_inject_after_opening_body_tag(binding, file) do + anchor_line = " String.trim_trailing() + |> String.trim_trailing("end") + |> Kernel.<>(code_to_inject) + |> Kernel.<>("end\n") + + {:ok, new_code} + end + end + + @spec ensure_not_already_injected(String.t(), String.t()) :: :ok | :already_injected + defp ensure_not_already_injected(file, inject) do + if String.contains?(file, inject) do + :already_injected + else + :ok + end + end + + @spec split_with_self(String.t(), String.t()) :: {String.t(), String.t(), String.t()} | :error + defp split_with_self(contents, text) do + case :binary.split(contents, text) do + [left, right] -> {left, text, right} + [_] -> :error + end + end + + @spec normalize_line_endings_to_file(String.t(), String.t()) :: String.t() + defp normalize_line_endings_to_file(code, file) do + String.replace(code, "\n", get_line_ending(file)) + end + + @spec get_line_ending(String.t()) :: String.t() + defp get_line_ending(file) do + case Regex.run(~r/\r\n|\n|$/, file) do + [line_ending] -> line_ending + [] -> "\n" + end + end + + defp indent_spaces(string, number_of_spaces, newline \\ "\n") + when is_binary(string) and is_integer(number_of_spaces) do + indent = String.duplicate(" ", number_of_spaces) + + string + |> String.split("\n") + |> Enum.map_join(newline, &(indent <> &1)) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.auth/migration.ex b/deps/phoenix/lib/mix/tasks/phx.gen.auth/migration.ex new file mode 100644 index 0000000..55e7adf --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.auth/migration.ex @@ -0,0 +1,33 @@ +defmodule Mix.Tasks.Phx.Gen.Auth.Migration do + @moduledoc false + + defstruct [:ecto_adapter, :extensions, :column_definitions] + + def build(ecto_adapter) when is_atom(ecto_adapter) do + %__MODULE__{ + ecto_adapter: ecto_adapter, + extensions: extensions(ecto_adapter), + column_definitions: column_definitions(ecto_adapter) + } + end + + defp extensions(Ecto.Adapters.Postgres) do + ["execute \"CREATE EXTENSION IF NOT EXISTS citext\", \"\""] + end + + defp extensions(_), do: [] + + defp column_definitions(ecto_adapter) do + for field <- ~w(email token)a, + into: %{}, + do: {field, column_definition(field, ecto_adapter)} + end + + defp column_definition(:email, Ecto.Adapters.Postgres), do: "add :email, :citext, null: false" + defp column_definition(:email, Ecto.Adapters.SQLite3), do: "add :email, :string, null: false, collate: :nocase" + defp column_definition(:email, _), do: "add :email, :string, null: false, size: 160" + + defp column_definition(:token, Ecto.Adapters.Postgres), do: "add :token, :binary, null: false" + + defp column_definition(:token, _), do: "add :token, :binary, null: false, size: 32" +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.cert.ex b/deps/phoenix/lib/mix/tasks/phx.gen.cert.ex new file mode 100644 index 0000000..1b6f3fd --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.cert.ex @@ -0,0 +1,311 @@ +defmodule Mix.Tasks.Phx.Gen.Cert do + @shortdoc "Generates a self-signed certificate for HTTPS testing" + + @default_path "priv/cert/selfsigned" + @default_name "Self-signed test certificate" + @default_hostnames ["localhost"] + + @warning """ + WARNING: only use the generated certificate for testing in a closed network + environment, such as running a development server on `localhost`. + For production, staging, or testing servers on the public internet, obtain a + proper certificate, for example from [Let's Encrypt](https://letsencrypt.org). + """ + + @moduledoc """ + Generates a self-signed certificate for HTTPS testing. + + $ mix phx.gen.cert + $ mix phx.gen.cert my-app.localhost my-app.internal.example.com + + Creates a private key and a self-signed certificate in PEM format. These + files can be referenced in the `certfile` and `keyfile` parameters of an + HTTPS Endpoint. + + #{@warning} + + ## Arguments + + The list of hostnames, if none are specified, defaults to: + + * #{Enum.join(@default_hostnames, "\n * ")} + + Other (optional) arguments: + + * `--output` (`-o`): the path and base filename for the certificate and + key (default: #{@default_path}) + * `--name` (`-n`): the Common Name value in certificate's subject + (default: "#{@default_name}") + + Requires OTP 21.3 or later. + """ + + use Mix.Task + import Mix.Generator + + @doc false + def run(all_args) do + if Mix.Project.umbrella?() do + Mix.raise( + "mix phx.gen.cert must be invoked from within your *_web application root directory" + ) + end + + {opts, args} = + OptionParser.parse!( + all_args, + aliases: [n: :name, o: :output], + strict: [name: :string, output: :string] + ) + + path = opts[:output] || @default_path + name = opts[:name] || @default_name + + hostnames = + case args do + [] -> @default_hostnames + list -> list + end + + {certificate, private_key} = certificate_and_key(2048, name, hostnames) + + keyfile = path <> "_key.pem" + certfile = path <> ".pem" + + create_file( + keyfile, + :public_key.pem_encode([:public_key.pem_entry_encode(:RSAPrivateKey, private_key)]) + ) + + create_file( + certfile, + :public_key.pem_encode([{:Certificate, certificate, :not_encrypted}]) + ) + + print_shell_instructions(keyfile, certfile) + end + + @doc false + def certificate_and_key(key_size, name, hostnames) do + private_key = + case generate_rsa_key(key_size, 65537) do + {:ok, key} -> + key + + {:error, :not_supported} -> + Mix.raise(""" + Failed to generate an RSA key pair. + + This Mix task requires Erlang/OTP 20 or later. Please upgrade to a + newer version, or use another tool, such as OpenSSL, to generate a + certificate. + """) + end + + public_key = extract_public_key(private_key) + + certificate = + public_key + |> new_cert(name, hostnames) + |> :public_key.pkix_sign(private_key) + + {certificate, private_key} + end + + defp print_shell_instructions(keyfile, certfile) do + app = Mix.Phoenix.otp_app() + base = Mix.Phoenix.base() + + Mix.shell().info(""" + + If you have not already done so, please update your HTTPS Endpoint + configuration in config/dev.exs: + + config #{inspect(app)}, #{inspect(Mix.Phoenix.web_module(base))}.Endpoint, + ..., + https: [ + # Change to `ip: {0, 0, 0, 0}` to allow access from other machines + ip: {127, 0, 0, 1}, + port: 4001, + cipher_suite: :strong, + certfile: "#{certfile}", + keyfile: "#{keyfile}" + ], + ... + + #{@warning} + """) + end + + require Record + + # RSA key pairs + + Record.defrecordp( + :rsa_private_key, + :RSAPrivateKey, + Record.extract(:RSAPrivateKey, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :rsa_public_key, + :RSAPublicKey, + Record.extract(:RSAPublicKey, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + defp generate_rsa_key(keysize, e) do + private_key = :public_key.generate_key({:rsa, keysize, e}) + {:ok, private_key} + rescue + FunctionClauseError -> + {:error, :not_supported} + end + + defp extract_public_key(rsa_private_key(modulus: m, publicExponent: e)) do + rsa_public_key(modulus: m, publicExponent: e) + end + + # Certificates + + Record.defrecordp( + :otp_tbs_certificate, + :OTPTBSCertificate, + Record.extract(:OTPTBSCertificate, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :signature_algorithm, + :SignatureAlgorithm, + Record.extract(:SignatureAlgorithm, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :validity, + :Validity, + Record.extract(:Validity, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :otp_subject_public_key_info, + :OTPSubjectPublicKeyInfo, + Record.extract(:OTPSubjectPublicKeyInfo, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :public_key_algorithm, + :PublicKeyAlgorithm, + Record.extract(:PublicKeyAlgorithm, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :extension, + :Extension, + Record.extract(:Extension, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :basic_constraints, + :BasicConstraints, + Record.extract(:BasicConstraints, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :attr, + :AttributeTypeAndValue, + Record.extract(:AttributeTypeAndValue, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + # OID values + @rsaEncryption {1, 2, 840, 113_549, 1, 1, 1} + @sha256WithRSAEncryption {1, 2, 840, 113_549, 1, 1, 11} + + @basicConstraints {2, 5, 29, 19} + @keyUsage {2, 5, 29, 15} + @extendedKeyUsage {2, 5, 29, 37} + @subjectKeyIdentifier {2, 5, 29, 14} + @subjectAlternativeName {2, 5, 29, 17} + + @organizationName {2, 5, 4, 10} + @commonName {2, 5, 4, 3} + + @serverAuth {1, 3, 6, 1, 5, 5, 7, 3, 1} + @clientAuth {1, 3, 6, 1, 5, 5, 7, 3, 2} + + defp new_cert(public_key, common_name, hostnames) do + <> = :crypto.strong_rand_bytes(8) + + today = Date.utc_today() + + not_before = + today + |> Date.to_iso8601(:basic) + |> String.slice(2, 6) + + not_after = + today + |> Date.add(365) + |> Date.to_iso8601(:basic) + |> String.slice(2, 6) + + otp_tbs_certificate( + version: :v3, + serialNumber: serial, + signature: signature_algorithm(algorithm: @sha256WithRSAEncryption), + issuer: rdn(common_name), + validity: + validity( + notBefore: {:utcTime, ~c"#{not_before}000000Z"}, + notAfter: {:utcTime, ~c"#{not_after}000000Z"} + ), + subject: rdn(common_name), + subjectPublicKeyInfo: + otp_subject_public_key_info( + algorithm: public_key_algorithm(algorithm: @rsaEncryption), + subjectPublicKey: public_key + ), + extensions: extensions(public_key, hostnames) + ) + end + + defp rdn(common_name) do + {:rdnSequence, + [ + [attr(type: @organizationName, value: {:utf8String, "Phoenix Framework"})], + [attr(type: @commonName, value: {:utf8String, common_name})] + ]} + end + + defp extensions(public_key, hostnames) do + [ + extension( + extnID: @basicConstraints, + critical: true, + extnValue: basic_constraints(cA: false) + ), + extension( + extnID: @keyUsage, + critical: true, + extnValue: [:digitalSignature, :keyEncipherment] + ), + extension( + extnID: @extendedKeyUsage, + critical: false, + extnValue: [@serverAuth, @clientAuth] + ), + extension( + extnID: @subjectKeyIdentifier, + critical: false, + extnValue: key_identifier(public_key) + ), + extension( + extnID: @subjectAlternativeName, + critical: false, + extnValue: Enum.map(hostnames, &{:dNSName, String.to_charlist(&1)}) + ) + ] + end + + defp key_identifier(public_key) do + :crypto.hash(:sha, :public_key.der_encode(:RSAPublicKey, public_key)) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.channel.ex b/deps/phoenix/lib/mix/tasks/phx.gen.channel.ex new file mode 100644 index 0000000..fa1c053 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.channel.ex @@ -0,0 +1,121 @@ +defmodule Mix.Tasks.Phx.Gen.Channel do + @shortdoc "Generates a Phoenix channel" + + @moduledoc """ + Generates a Phoenix channel. + + $ mix phx.gen.channel Room + + Accepts the module name for the channel + + The generated files will contain: + + For a regular application: + + * a channel in `lib/my_app_web/channels` + * a channel test in `test/my_app_web/channels` + + For an umbrella application: + + * a channel in `apps/my_app_web/lib/app_name_web/channels` + * a channel test in `apps/my_app_web/test/my_app_web/channels` + + """ + use Mix.Task + alias Mix.Tasks.Phx.Gen + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise( + "mix phx.gen.channel must be invoked from within your *_web application root directory" + ) + end + + [channel_name] = validate_args!(args) + context_app = Mix.Phoenix.context_app() + web_prefix = Mix.Phoenix.web_path(context_app) + web_test_prefix = Mix.Phoenix.web_test_path(context_app) + binding = Mix.Phoenix.inflect(channel_name) + binding = Keyword.put(binding, :module, "#{binding[:web_module]}.#{binding[:scoped]}") + + Mix.Phoenix.check_module_name_availability!(binding[:module] <> "Channel") + + test_path = Path.join(web_test_prefix, "channels/#{binding[:path]}_channel_test.exs") + case_path = Path.join(Path.dirname(web_test_prefix), "support/channel_case.ex") + + maybe_case = + if File.exists?(case_path) do + [] + else + [{:eex, "channel_case.ex.eex", case_path}] + end + + Mix.Phoenix.copy_from( + paths(), + "priv/templates/phx.gen.channel", + binding, + [ + {:eex, "channel.ex.eex", Path.join(web_prefix, "channels/#{binding[:path]}_channel.ex")}, + {:eex, "channel_test.exs.eex", test_path} + ] ++ maybe_case + ) + + user_socket_path = Mix.Phoenix.web_path(context_app, "channels/user_socket.ex") + + if File.exists?(user_socket_path) do + Mix.shell().info(""" + + Add the channel to your `#{user_socket_path}` handler, for example: + + channel "#{binding[:singular]}:lobby", #{binding[:module]}Channel + """) + else + Mix.shell().info(""" + + The default socket handler - #{binding[:web_module]}.UserSocket - was not found. + """) + + if Mix.shell().yes?("Do you want to create it?") do + Gen.Socket.run(~w(User --from-channel #{channel_name})) + else + Mix.shell().info(""" + + To create it, please run the mix task: + + mix phx.gen.socket User + + Then add the channel to the newly created file, at `#{user_socket_path}`: + + channel "#{binding[:singular]}:lobby", #{binding[:module]}Channel + """) + end + end + end + + @spec raise_with_help() :: no_return() + defp raise_with_help do + Mix.raise(""" + mix phx.gen.channel expects just the module name, following capitalization: + + mix phx.gen.channel Room + + """) + end + + defp validate_args!(args) do + unless length(args) == 1 and args |> hd() |> valid_name?() do + raise_with_help() + end + + args + end + + defp valid_name?(name) do + name =~ ~r/^[A-Z]\w*(\.[A-Z]\w*)*$/ + end + + defp paths do + [".", :phoenix] + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.context.ex b/deps/phoenix/lib/mix/tasks/phx.gen.context.ex new file mode 100644 index 0000000..190c5cb --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.context.ex @@ -0,0 +1,482 @@ +defmodule Mix.Tasks.Phx.Gen.Context do + @shortdoc "Generates a context with functions around an Ecto schema" + + @moduledoc """ + Generates a context with functions around an Ecto schema. + + ```console + $ mix phx.gen.context Accounts User users name:string age:integer + ``` + + The first argument is the context module followed by the schema module + and its plural name (used as the schema table name). + + The context is an Elixir module that serves as an API boundary for + the given resource. A context often holds many related resources. + Therefore, if the context already exists, it will be augmented with + functions for the given resource. + + > Note: A resource may also be split + > over distinct contexts (such as Accounts.User and Payments.User). + + The schema is responsible for mapping the database fields into an + Elixir struct. + + Overall, this generator will add the following files to `lib/your_app`: + + * a context module in `accounts.ex`, serving as the API boundary + * a schema in `accounts/user.ex`, with a `users` table + + A migration file for the repository and test files for the context + will also be generated. + + The generated migration can be skipped with `--no-migration`. + + ## Scopes + + If your application configures its own default [scope](scopes.md), then this generator + will automatically make sure all of your context operations are correctly scoped. + You can pass the `--no-scope` flag to disable the scoping. + + ## Generating without a schema + + In some cases, you may wish to bootstrap the context module and + tests, but leave internal implementation of the context and schema + to yourself. Use the `--no-schema` flags to accomplish this. + + ## `--table` + + By default, the table name for the migration and schema will be + the plural name provided for the resource. To customize this value, + a `--table` option may be provided. For example: + + $ mix phx.gen.context Accounts User users --table cms_users + + ## `--binary-id` + + Generated migration can use `binary_id` for schema's primary key + and its references with option `--binary-id`. + + ## Default options + + This generator uses default options provided in the `:generators` + configuration of your application. These are the defaults: + + config :your_app, :generators, + migration: true, + binary_id: false, + timestamp_type: :naive_datetime, + sample_binary_id: "11111111-1111-1111-1111-111111111111" + + You can override those options per invocation by providing corresponding + switches, e.g. `--no-binary-id` to use normal ids despite the default + configuration or `--migration` to force generation of the migration. + + Read the documentation for `phx.gen.schema` for more information on + attributes. + + ## Skipping prompts + + This generator will prompt you if there is an existing context with the same + name, in order to provide more instructions on how to correctly use phoenix contexts. + You can skip this prompt and automatically merge the new schema access functions and tests into the + existing context using `--merge-with-existing-context`. To prevent changes to + the existing context and exit the generator, use `--no-merge-with-existing-context`. + """ + + use Mix.Task + + alias Mix.Phoenix.{Context, Schema} + alias Mix.Tasks.Phx.Gen + + @switches [ + binary_id: :boolean, + table: :string, + web: :string, + schema: :boolean, + context: :boolean, + context_app: :string, + merge_with_existing_context: :boolean, + prefix: :string, + live: :boolean, + compile: :boolean, + primary_key: :string, + migration: :boolean, + scope: :string, + no_scope: :boolean + ] + + @default_opts [schema: true, context: true] + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise( + "mix phx.gen.context must be invoked from within your *_web application root directory" + ) + end + + {context, schema} = build(args) + + binding = [ + context: context, + schema: schema, + scope: context.scope, + primary_key: schema.opts[:primary_key] || :id + ] + + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(context) + prompt_for_code_injection(context) + + context + |> copy_new_files(paths, binding) + |> print_shell_instructions() + end + + defp prompt_for_conflicts(context) do + context + |> files_to_be_generated() + |> Mix.Phoenix.prompt_for_conflicts() + end + + @doc false + def build(args, opts \\ []) do + help = Keyword.get(opts, :help_module, __MODULE__) + optional = Keyword.get(opts, :name_optional, false) + + {opts, parsed, _} = parse_opts(args) + + {context_name, schema_name, plural, schema_args} = + validate_args!(parsed, optional, help) + + schema_module = inspect(Module.concat(context_name, schema_name)) + schema = Gen.Schema.build([schema_module, plural | schema_args], opts, help) + context = Context.new(context_name, schema, opts) + {context, schema} + end + + defp parse_opts(args) do + {opts, parsed, invalid} = OptionParser.parse(args, switches: @switches) + + merged_opts = + @default_opts + |> Keyword.merge(opts) + |> put_context_app(opts[:context_app]) + + {merged_opts, parsed, invalid} + end + + defp put_context_app(opts, nil), do: opts + + defp put_context_app(opts, string) do + Keyword.put(opts, :context_app, String.to_atom(string)) + end + + @doc false + def files_to_be_generated(%Context{schema: schema}) do + if schema.generate? do + Gen.Schema.files_to_be_generated(schema) + else + [] + end + end + + @doc false + def copy_new_files(%Context{schema: schema} = context, paths, binding) do + if schema.generate?, do: Gen.Schema.copy_new_files(schema, paths, binding) + inject_schema_access(context, paths, binding) + inject_tests(context, paths, binding) + inject_test_fixture(context, paths, binding) + + context + end + + @doc false + def ensure_context_file_exists(%Context{file: file} = context, paths, binding) do + unless Context.pre_existing?(context) do + Mix.Generator.create_file( + file, + Mix.Phoenix.eval_from(paths, "priv/templates/phx.gen.context/context.ex.eex", binding) + ) + end + end + + defp inject_schema_access(%Context{file: file} = context, paths, binding) do + ensure_context_file_exists(context, paths, binding) + + paths + |> Mix.Phoenix.eval_from( + "priv/templates/phx.gen.context/#{schema_access_template(context)}", + binding + ) + |> inject_eex_before_final_end(file, binding) + end + + defp write_file(content, file) do + File.write!(file, content) + end + + @doc false + def ensure_test_file_exists(%Context{test_file: test_file} = context, paths, binding) do + unless Context.pre_existing_tests?(context) do + Mix.Generator.create_file( + test_file, + Mix.Phoenix.eval_from(paths, "priv/templates/phx.gen.context/context_test.exs.eex", binding) + ) + end + end + + defp inject_tests(%Context{test_file: test_file} = context, paths, binding) do + ensure_test_file_exists(context, paths, binding) + + file = + if context.schema.scope do + "test_cases_scope.exs.eex" + else + "test_cases.exs.eex" + end + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.context/#{file}", binding) + |> inject_eex_before_final_end(test_file, binding) + end + + @doc false + def ensure_test_fixtures_file_exists( + %Context{test_fixtures_file: test_fixtures_file} = context, + paths, + binding + ) do + unless Context.pre_existing_test_fixtures?(context) do + Mix.Generator.create_file( + test_fixtures_file, + Mix.Phoenix.eval_from(paths, "priv/templates/phx.gen.context/fixtures_module.ex.eex", binding) + ) + end + end + + defp inject_test_fixture( + %Context{test_fixtures_file: test_fixtures_file} = context, + paths, + binding + ) do + ensure_test_fixtures_file_exists(context, paths, binding) + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.context/fixtures.ex.eex", binding) + |> Mix.Phoenix.prepend_newline() + |> inject_eex_before_final_end(test_fixtures_file, binding) + + maybe_print_unimplemented_fixture_functions(context) + end + + defp maybe_print_unimplemented_fixture_functions(%Context{} = context) do + fixture_functions_needing_implementations = + Enum.flat_map( + context.schema.fixture_unique_functions, + fn + {_field, {_function_name, function_def, true}} -> [function_def] + {_field, {_function_name, _function_def, false}} -> [] + end + ) + + if Enum.any?(fixture_functions_needing_implementations) do + Mix.shell().info(""" + + Some of the generated database columns are unique. Please provide + unique implementations for the following fixture function(s) in + #{context.test_fixtures_file}: + + #{fixture_functions_needing_implementations |> Enum.map_join(&indent(&1, 2)) |> String.trim_trailing()} + """) + end + end + + defp indent(string, spaces) do + indent_string = String.duplicate(" ", spaces) + + string + |> String.split("\n") + |> Enum.map_join(fn line -> + if String.trim(line) == "" do + "\n" + else + indent_string <> line <> "\n" + end + end) + end + + defp inject_eex_before_final_end(content_to_inject, file_path, binding) do + file = File.read!(file_path) + + if String.contains?(file, content_to_inject) do + :ok + else + Mix.shell().info([:green, "* injecting ", :reset, Path.relative_to_cwd(file_path)]) + + file + |> String.trim_trailing() + |> String.trim_trailing("end") + |> EEx.eval_string(binding) + |> Kernel.<>(content_to_inject) + |> Kernel.<>("end\n") + |> write_file(file_path) + end + end + + @doc false + def print_shell_instructions(%Context{schema: schema}) do + if schema.generate? do + Gen.Schema.print_shell_instructions(schema) + else + :ok + end + end + + defp schema_access_template(%Context{schema: schema}) do + cond do + schema.generate? && schema.scope -> + "schema_access_scope.ex.eex" + + schema.generate? -> + "schema_access.ex.eex" + + schema.scope -> + "access_no_schema_scope.ex.eex" + + true -> + "access_no_schema.ex.eex" + end + end + + defp validate_args!( + [maybe_context_name, schema_name_or_plural, plural_or_first_attr | schema_args], + optional, + help + ) do + has_context? = + case schema_name_or_plural do + <> when char in ?A..?Z -> true + _ -> not optional + end + + {context, schema, plural, schema_args} = + if has_context? do + {maybe_context_name, schema_name_or_plural, plural_or_first_attr, schema_args} + else + # mix phx.gen.live User users name:string + # we generate the context from the plural "users" -> Users + context = Phoenix.Naming.camelize(schema_name_or_plural) + + if context == maybe_context_name do + # if someone did + # mix phx.gen.live Users users name + Mix.raise(""" + The given schema #{maybe_context_name} is equal to the camelized version of + the table plural #{schema_name_or_plural}, but the schema is expected to be singular. + + Please pass an explicit context option like: + + mix phx.gen.live #{context} #{maybe_context_name} #{schema_name_or_plural} + + if this is what you want. + """) + end + + {context, maybe_context_name, schema_name_or_plural, [plural_or_first_attr | schema_args]} + end + + cond do + not Context.valid?(context) -> + help.raise_with_help( + "Expected the context, #{inspect(context)}, to be a valid module name" + ) + + not Schema.valid?(schema) -> + help.raise_with_help("Expected the schema, #{inspect(schema)}, to be a valid module name") + + context == schema -> + help.raise_with_help("The context and schema should have different names") + + context == Mix.Phoenix.base() -> + help.raise_with_help( + "Cannot generate context #{context} because it has the same name as the application" + ) + + schema == Mix.Phoenix.base() -> + help.raise_with_help( + "Cannot generate schema #{schema} because it has the same name as the application" + ) + + true -> + {context, schema, plural, schema_args} + end + end + + defp validate_args!(_, _, help) do + help.raise_with_help("Invalid arguments") + end + + @doc false + def raise_with_help(msg) do + Mix.raise(""" + #{msg} + + mix phx.gen.html, phx.gen.json, phx.gen.live, and phx.gen.context + expect a context module name, followed by singular and plural names + of the generated resource, ending with any number of attributes. + For example: + + mix phx.gen.html [Accounts] User users name:string + mix phx.gen.json [Accounts] User users name:string + mix phx.gen.live [Accounts] User users name:string + mix phx.gen.context Accounts User users name:string + + The context serves as the API boundary for the given resource. + It is optional except for phx.gen.context. + Multiple resources may belong to a context and a resource may be + split over distinct contexts (such as Accounts.User and Payments.User). + """) + end + + @doc false + def prompt_for_code_injection(%Context{generate?: false}), do: :ok + + def prompt_for_code_injection(%Context{} = context) do + if Context.pre_existing?(context) && !merge_with_existing_context?(context) do + System.halt() + end + end + + defp merge_with_existing_context?(%Context{} = context) do + Keyword.get_lazy(context.opts, :merge_with_existing_context, fn -> + function_count = Context.function_count(context) + file_count = Context.file_count(context) + + Mix.shell().info(""" + You are generating into an existing context. + + The #{inspect(context.module)} context currently has #{singularize(function_count, "functions")} and \ + #{singularize(file_count, "files")} in its directory. + + * It's OK to have multiple resources in the same context as \ + long as they are closely related. But if a context grows too \ + large, consider breaking it apart + + * If they are not closely related, another context probably works better + + The fact that two entities are related in the database does not mean they belong \ + to the same context. + + If you are not sure, prefer creating a new context over adding to the existing one. + """) + + Mix.shell().yes?("Would you like to proceed?") + end) + end + + defp singularize(1, plural), do: "1 " <> String.trim_trailing(plural, "s") + defp singularize(amount, plural), do: "#{amount} #{plural}" +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.embedded.ex b/deps/phoenix/lib/mix/tasks/phx.gen.embedded.ex new file mode 100644 index 0000000..f22a177 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.embedded.ex @@ -0,0 +1,111 @@ +defmodule Mix.Tasks.Phx.Gen.Embedded do + @shortdoc "Generates an embedded Ecto schema file" + + @moduledoc """ + Generates an embedded Ecto schema for casting/validating data outside the DB. + + ```console + $ mix phx.gen.embedded Blog.Post title:string views:integer + ``` + + The first argument is the schema module followed by the schema attributes. + + The generated schema above will contain: + + * an embedded schema file in `lib/my_app/blog/post.ex` + + ## Attributes + + The resource fields are given using `name:type` syntax + where type are the types supported by Ecto. Omitting + the type makes it default to `:string`: + + ```console + $ mix phx.gen.embedded Blog.Post title views:integer + ``` + + The following types are supported: + + #{for attr <- Mix.Phoenix.Schema.valid_types(), do: " * `#{inspect attr}`\n"} + * `:datetime` - An alias for `:naive_datetime` + """ + use Mix.Task + + alias Mix.Phoenix.Schema + + @switches [binary_id: :boolean, web: :string] + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise "mix phx.gen.embedded must be invoked from within your *_web application root directory" + end + + schema = build(args) + + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(schema) + + copy_new_files(schema, paths, schema: schema) + end + + @doc false + def build(args) do + {schema_opts, parsed, _} = OptionParser.parse(args, switches: @switches) + [schema_name | attrs] = validate_args!(parsed) + opts = + schema_opts + |> Keyword.put(:embedded, true) + |> Keyword.put(:migration, false) + + schema = Schema.new(schema_name, nil, attrs, opts) + + schema + end + + @doc false + def validate_args!([schema | _] = args) do + if Schema.valid?(schema) do + args + else + raise_with_help "Expected the schema argument, #{inspect schema}, to be a valid module name" + end + end + def validate_args!(_) do + raise_with_help "Invalid arguments" + end + + @doc false + @spec raise_with_help(String.t) :: no_return() + def raise_with_help(msg) do + Mix.raise """ + #{msg} + + mix phx.gen.embedded expects a module name followed by + any number of attributes: + + mix phx.gen.embedded Blog.Post title:string + """ + end + + + defp prompt_for_conflicts(schema) do + schema + |> files_to_be_generated() + |> Mix.Phoenix.prompt_for_conflicts() + end + + @doc false + def files_to_be_generated(%Schema{} = schema) do + [{:eex, "embedded_schema.ex.eex", schema.file}] + end + + @doc false + def copy_new_files(%Schema{} = schema, paths, binding) do + files = files_to_be_generated(schema) + Mix.Phoenix.copy_from(paths, "priv/templates/phx.gen.embedded", binding, files) + + schema + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.ex b/deps/phoenix/lib/mix/tasks/phx.gen.ex new file mode 100644 index 0000000..2d0e28c --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.ex @@ -0,0 +1,52 @@ +defmodule Mix.Tasks.Phx.Gen do + use Mix.Task + + @shortdoc "Lists all available Phoenix generators" + + @moduledoc """ + Lists all available Phoenix generators. + + ## CRUD related generators + + The table below shows a summary of the contents created by the CRUD generators: + + | Task | Schema | Migration | Context | Controller | View | LiveView | + |:------------------ |:-:|:-:|:-:|:-:|:-:|:-:| + | `phx.gen.embedded` | x | | | | | | + | `phx.gen.schema` | x | x | | | | | + | `phx.gen.context` | x | x | x | | | | + | `phx.gen.live` | x | x | x | | | x | + | `phx.gen.json` | x | x | x | x | x | | + | `phx.gen.html` | x | x | x | x | x | | + + ## Customizing generators + + You can override the default templates used by generators. + + For example, to customize `phx.gen.live`, you can copy and edit the generator templates + to your own project's priv folder: + + First, create the directory for your custom `phx.gen.live` templates: + + ```console + $ mkdir -p priv/templates/phx.gen.live + ``` + + Next, copy the default phx.gen.live generator templates into your project so you can customize them: + + ```console + $ cp -r deps/phoenix/priv/templates/phx.gen.live/* priv/templates/phx.gen.live/ + ``` + + Phoenix generators will look for templates in your project's `priv/templates` directory first. + If a matching template is found, it will be used instead of the default. + + Note generator templates may change between minor or even patch Phoenix releases, + so custom templates may require updates after upgrading. Use this mechanism at your + own risk. + """ + + def run(_args) do + Mix.Task.run("help", ["--search", "phx.gen."]) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.html.ex b/deps/phoenix/lib/mix/tasks/phx.gen.html.ex new file mode 100644 index 0000000..b628305 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.html.ex @@ -0,0 +1,359 @@ +defmodule Mix.Tasks.Phx.Gen.Html do + @shortdoc "Generates context and controller for an HTML resource" + + @moduledoc """ + Generates controller with view, templates, schema and context for an HTML resource. + + The format is: + + ```console + $ mix phx.gen.html [] [...] + ``` + + For example: + + ```console + $ mix phx.gen.html User users name:string age:integer + ``` + + Will generate a `User` schema for the `users` table within the `Users` context, + with the attributes `name` (as a string) and `age` (as an integer). + + You can also explicitly pass the context name as argument, whenever the context + is well defined: + + ```console + $ mix phx.gen.html Accounts User users name:string age:integer + ``` + + The first argument is the context module (`Accounts`) followed by + the schema module (`User`), table name (`users`), and attributes. + + The context is an Elixir module that serves as an API boundary for + the given resource. A context often holds many related resources. + Therefore, if the context already exists, it will be augmented with + functions for the given resource. + + The schema is responsible for mapping the database fields into an + Elixir struct. It is followed by a list of attributes with their + respective names and types. See `mix phx.gen.schema` for more + information on attributes. + + Overall, this generator will add the following files to `lib/`: + + * a controller in `lib/my_app_web/controllers/user_controller.ex` + * default CRUD HTML templates in `lib/my_app_web/controllers/user_html` + * an HTML view collocated with the controller in `lib/my_app_web/controllers/user_html.ex` + * a schema in `lib/my_app/accounts/user.ex`, with an `users` table + * a context module in `lib/my_app/accounts.ex` for the accounts API + + Additionally, this generator creates the following files: + + * a migration for the schema in `priv/repo/migrations` + * a controller test module in `test/my_app/controllers/user_controller_test.exs` + * a context test module in `test/my_app/accounts_test.exs` + * a context test helper module in `test/support/fixtures/accounts_fixtures.ex` + + If the context already exists, this generator injects functions for the given resource into + the context, context test, and context test helper modules. + + ## Scopes + + If your application configures its own default [scope](scopes.md), then this generator + will automatically make sure all of your context operations are correctly scoped. + You can pass the `--no-scope` flag to disable the scoping. + + ## Umbrella app configuration + + By default, Phoenix injects both web and domain specific functionality into the same + application. When using umbrella applications, those concerns are typically broken + into two separate apps, your context application - let's call it `my_app` - and its web + layer, which Phoenix assumes to be `my_app_web`. + + You can teach Phoenix to use this style via the `:context_app` configuration option + in your `my_app_umbrella/config/config.exs`: + + config :my_app_web, + ecto_repos: [Stuff.Repo], + generators: [context_app: :my_app] + + Alternatively, the `--context-app` option may be supplied to the generator: + + ```console + $ mix phx.gen.html Accounts User users --context-app my_app + ``` + + ## Web namespace + + By default, the controller and HTML views are not namespaced but you can add + a namespace by passing the `--web` flag with a module name, for example: + + ```console + $ mix phx.gen.html Accounts User users --web Accounts + ``` + + Which would generate a `lib/app_web/controllers/accounts/user_controller.ex` and + `lib/app_web/controllers/accounts/user_html.ex`. + + ## Customizing the context, schema, tables and migrations + + In some cases, you may wish to bootstrap HTML templates, controllers, + and controller tests, but leave internal implementation of the context + or schema to yourself. You can use the `--no-context` and `--no-schema` + flags for file generation control. Note `--no-context` implies `--no-schema`: + + ```console + $ mix phx.gen.live Accounts User users --no-context name:string + ``` + + In the cases above, tests are still generated, but they will all fail. + + You can also change the table name or configure the migrations to + use binary ids for primary keys, see `mix phx.gen.schema` for more + information. + """ + use Mix.Task + + alias Mix.Phoenix.{Context, Schema, Scope} + alias Mix.Tasks.Phx.Gen + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise( + "mix phx.gen.html must be invoked from within your *_web application root directory" + ) + end + + Mix.Phoenix.ensure_live_view_compat!(__MODULE__) + + {context, schema} = Gen.Context.build(args, name_optional: true) + + if schema.attrs == [] do + Mix.raise(""" + No attributes provided. The phx.gen.html generator requires at least one attribute. For example: + + mix phx.gen.html Accounts User users name:string + + """) + end + + Gen.Context.prompt_for_code_injection(context) + + {conn_scope, context_scope_prefix} = + if schema.scope do + base = "conn.assigns.#{schema.scope.assign_key}" + {base, "#{base}, "} + else + {"", ""} + end + + binding = [ + context: context, + schema: schema, + primary_key: schema.opts[:primary_key] || :id, + scope: schema.scope, + inputs: inputs(schema), + conn_scope: conn_scope, + context_scope_prefix: context_scope_prefix, + scope_conn_route_prefix: Scope.route_prefix(conn_scope, schema), + scope_param_route_prefix: Scope.route_prefix("scope", schema), + scope_assign_route_prefix: scope_assign_route_prefix(schema), + test_context_scope: + if(schema.scope && schema.scope.route_prefix, do: ", scope: scope", else: "") + ] + + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(context) + + context + |> copy_new_files(paths, binding) + |> print_shell_instructions() + end + + defp prompt_for_conflicts(context) do + context + |> files_to_be_generated() + |> Kernel.++(context_files(context)) + |> Mix.Phoenix.prompt_for_conflicts() + end + + defp context_files(%Context{generate?: true} = context) do + Gen.Context.files_to_be_generated(context) + end + + defp context_files(%Context{generate?: false}) do + [] + end + + @doc false + def files_to_be_generated(%Context{schema: schema, context_app: context_app}) do + singular = schema.singular + web_prefix = Mix.Phoenix.web_path(context_app) + test_prefix = Mix.Phoenix.web_test_path(context_app) + web_path = to_string(schema.web_path) + controller_pre = Path.join([web_prefix, "controllers", web_path]) + test_pre = Path.join([test_prefix, "controllers", web_path]) + + [ + {:eex, "controller.ex.eex", Path.join([controller_pre, "#{singular}_controller.ex"])}, + {:eex, "edit.html.heex.eex", Path.join([controller_pre, "#{singular}_html", "edit.html.heex"])}, + {:eex, "index.html.heex.eex", + Path.join([controller_pre, "#{singular}_html", "index.html.heex"])}, + {:eex, "new.html.heex.eex", Path.join([controller_pre, "#{singular}_html", "new.html.heex"])}, + {:eex, "show.html.heex.eex", Path.join([controller_pre, "#{singular}_html", "show.html.heex"])}, + {:eex, "resource_form.html.heex.eex", + Path.join([controller_pre, "#{singular}_html", "#{singular}_form.html.heex"])}, + {:eex, "html.ex.eex", Path.join([controller_pre, "#{singular}_html.ex"])}, + {:eex, "controller_test.exs.eex", Path.join([test_pre, "#{singular}_controller_test.exs"])} + ] + end + + @doc false + def copy_new_files(%Context{} = context, paths, binding) do + files = files_to_be_generated(context) + Mix.Phoenix.copy_from(paths, "priv/templates/phx.gen.html", binding, files) + if context.generate?, do: Gen.Context.copy_new_files(context, paths, binding) + context + end + + @doc false + def print_shell_instructions(%Context{schema: schema, context_app: ctx_app} = context) do + resource_path = + if schema.scope && schema.scope.route_prefix do + "#{schema.scope.route_prefix}/#{schema.plural}" + else + "/#{schema.plural}" + end + + if schema.web_namespace do + Mix.shell().info(""" + + Add the resource to your #{schema.web_namespace} :browser scope in #{Mix.Phoenix.web_path(ctx_app)}/router.ex: + + scope "/#{schema.web_path}", #{inspect(Module.concat(context.web_module, schema.web_namespace))} do + pipe_through :browser + ... + resources "#{resource_path}", #{inspect(schema.alias)}Controller#{if schema.opts[:primary_key], do: ~s[, param: "#{schema.opts[:primary_key]}"]} + end + """) + else + Mix.shell().info(""" + + Add the resource to your browser scope in #{Mix.Phoenix.web_path(ctx_app)}/router.ex: + + resources "#{resource_path}", #{inspect(schema.alias)}Controller#{if schema.opts[:primary_key], do: ~s[, param: "#{schema.opts[:primary_key]}"]} + """) + end + + if schema.scope do + Mix.shell().info( + "Ensure the routes are defined in a block that sets the `#{inspect(context.scope.assign_key)}` assign." + ) + end + + if context.generate?, do: Gen.Context.print_shell_instructions(context) + end + + @doc false + def inputs(%Schema{} = schema) do + schema.attrs + |> Enum.reject(fn {_key, type} -> type == :map end) + |> Enum.map(fn + {key, :integer} -> + ~s(<.input field={f[#{inspect(key)}]} type="number" label="#{label(key)}" />) + + {key, :float} -> + ~s(<.input field={f[#{inspect(key)}]} type="number" label="#{label(key)}" step="any" />) + + {key, :decimal} -> + ~s(<.input field={f[#{inspect(key)}]} type="number" label="#{label(key)}" step="any" />) + + {key, :boolean} -> + ~s(<.input field={f[#{inspect(key)}]} type="checkbox" label="#{label(key)}" />) + + {key, :text} -> + ~s(<.input field={f[#{inspect(key)}]} type="textarea" label="#{label(key)}" />) + + {key, :date} -> + ~s(<.input field={f[#{inspect(key)}]} type="date" label="#{label(key)}" />) + + {key, :time} -> + ~s(<.input field={f[#{inspect(key)}]} type="time" label="#{label(key)}" />) + + {key, :utc_datetime} -> + ~s(<.input field={f[#{inspect(key)}]} type="datetime-local" label="#{label(key)}" />) + + {key, :naive_datetime} -> + ~s(<.input field={f[#{inspect(key)}]} type="datetime-local" label="#{label(key)}" />) + + {key, {:array, _} = type} -> + ~s""" + <.input + field={f[#{inspect(key)}]} + type="select" + multiple + label="#{label(key)}" + options={#{inspect(default_options(type))}} + /> + """ + + {key, {:enum, _}} -> + ~s""" + <.input + field={f[#{inspect(key)}]} + type="select" + label="#{label(key)}" + prompt="Choose a value" + options={Ecto.Enum.values(#{inspect(schema.module)}, #{inspect(key)})} + /> + """ + + {key, _} -> + ~s(<.input field={f[#{inspect(key)}]} type="text" label="#{label(key)}" />) + end) + end + + defp default_options({:array, :string}), + do: Enum.map([1, 2], &{"Option #{&1}", "option#{&1}"}) + + defp default_options({:array, :integer}), + do: Enum.map([1, 2], &{"#{&1}", &1}) + + defp default_options({:array, _}), do: [] + + defp label(key), do: Phoenix.Naming.humanize(to_string(key)) + + defp scope_assign_route_prefix( + %{scope: %{route_prefix: route_prefix, assign_key: assign_key}} = schema + ) + when not is_nil(route_prefix) do + Scope.route_prefix("@#{assign_key}", schema) + end + + defp scope_assign_route_prefix(_), do: "" + + @doc false + def indent_inputs(inputs, column_padding) do + columns = String.duplicate(" ", column_padding) + + inputs + |> Enum.map(fn input -> + lines = input |> String.split("\n") |> Enum.reject(&(&1 == "")) + + case lines do + [] -> + [] + + [line] -> + [columns, line] + + [first_line | rest] -> + rest = Enum.map_join(rest, "\n", &(columns <> &1)) + [columns, first_line, "\n", rest] + end + end) + |> Enum.intersperse("\n") + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.json.ex b/deps/phoenix/lib/mix/tasks/phx.gen.json.ex new file mode 100644 index 0000000..c7cbe3c --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.json.ex @@ -0,0 +1,253 @@ +defmodule Mix.Tasks.Phx.Gen.Json do + @shortdoc "Generates context and controller for a JSON resource" + + @moduledoc """ + Generates controller, JSON view, and context for a JSON resource. + + The format is: + + ```console + $ mix phx.gen.json []
[...] + ``` + + For example: + + ```console + $ mix phx.gen.json User users name:string age:integer + ``` + + Will generate a `User` schema for the `users` table within the `Users` context, + with the attributes `name` (as a string) and `age` (as an integer). + + You can also explicitly pass the context name as argument, whenever the context + is well defined: + + ```console + $ mix phx.gen.json Accounts User users name:string age:integer + ``` + + The first argument is the context module (`Accounts`) followed by + the schema module (`User`), table name (`users`), and attributes. + + The context is an Elixir module that serves as an API boundary for + the given resource. A context often holds many related resources. + Therefore, if the context already exists, it will be augmented with + functions for the given resource. + + The schema is responsible for mapping the database fields into an + Elixir struct. It is followed by a list of attributes with their + respective names and types. See `mix phx.gen.schema` for more + information on attributes. + + Overall, this generator will add the following files to `lib/`: + + * a context module in `lib/app/accounts.ex` for the accounts API + * a schema in `lib/app/accounts/user.ex`, with an `users` table + * a controller in `lib/app_web/controllers/user_controller.ex` + * a JSON view collocated with the controller in `lib/app_web/controllers/user_json.ex` + + A migration file for the repository and test files for the context and + controller features will also be generated. + + ## API Prefix + + By default, the prefix "/api" will be generated for API route paths. + This can be customized via the `:api_prefix` generators configuration: + + config :your_app, :generators, + api_prefix: "/api/v1" + + ## Scopes + + If your application configures its own default [scope](scopes.md), then this generator + will automatically make sure all of your context operations are correctly scoped. + You can pass the `--no-scope` flag to disable the scoping. + + ## Umbrella app configuration + + By default, Phoenix injects both web and domain specific functionality into the same + application. When using umbrella applications, those concerns are typically broken + into two separate apps, your context application - let's call it `my_app` - and its web + layer, which Phoenix assumes to be `my_app_web`. + + You can teach Phoenix to use this style via the `:context_app` configuration option + in your `my_app_umbrella/config/config.exs`: + + config :my_app_web, + ecto_repos: [Stuff.Repo], + generators: [context_app: :my_app] + + Alternatively, the `--context-app` option may be supplied to the generator: + + ```console + $ mix phx.gen.html Accounts User users --context-app my_app + ``` + + ## Web namespace + + By default, the controller and HTML views are not namespaced but you can add + a namespace by passing the `--web` flag with a module name, for example: + + ```console + $ mix phx.gen.json Accounts User users --web Accounts + ``` + + Which would generate a `lib/app_web/controllers/accounts/user_controller.ex` and + `lib/app_web/controllers/accounts/user_json.ex`. + + ## Customizing the context, schema, tables and migrations + + In some cases, you may wish to bootstrap JSON views, controllers, + and controller tests, but leave internal implementation of the context + or schema to yourself. You can use the `--no-context` and `--no-schema` + flags for file generation control. Note `--no-context` implies `--no-schema`: + + ```console + $ mix phx.gen.live Accounts User users --no-context name:string + ``` + + In the cases above, tests are still generated, but they will all fail. + + You can also change the table name or configure the migrations to + use binary ids for primary keys, see `mix phx.gen.schema` for more + information. + """ + + use Mix.Task + + alias Mix.Phoenix.{Context, Scope} + alias Mix.Tasks.Phx.Gen + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise( + "mix phx.gen.json must be invoked from within your *_web application root directory" + ) + end + + {context, schema} = Gen.Context.build(args, name_optional: true) + + if schema.attrs == [] do + Mix.raise(""" + No attributes provided. The phx.gen.json generator requires at least one attribute. For example: + + mix phx.gen.json Accounts User users name:string + + """) + end + + Gen.Context.prompt_for_code_injection(context) + + {conn_scope, context_scope_prefix} = + if schema.scope do + base = "conn.assigns.#{schema.scope.assign_key}" + {base, "#{base}, "} + else + {"", ""} + end + + binding = [ + context: context, + schema: schema, + scope: schema.scope, + core_components?: Code.ensure_loaded?(Module.concat(context.web_module, "CoreComponents")), + gettext?: Code.ensure_loaded?(Module.concat(context.web_module, "Gettext")), + primary_key: schema.opts[:primary_key] || :id, + conn_scope: conn_scope, + context_scope_prefix: context_scope_prefix, + scope_conn_route_prefix: Scope.route_prefix(conn_scope, schema), + scope_param_route_prefix: Scope.route_prefix("scope", schema), + test_context_scope: + if(schema.scope && schema.scope.route_prefix, do: ", scope: scope", else: "") + ] + + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(context) + + context + |> copy_new_files(paths, binding) + |> print_shell_instructions() + end + + defp prompt_for_conflicts(context) do + context + |> files_to_be_generated() + |> Kernel.++(context_files(context)) + |> Mix.Phoenix.prompt_for_conflicts() + end + + defp context_files(%Context{generate?: true} = context) do + Gen.Context.files_to_be_generated(context) + end + + defp context_files(%Context{generate?: false}) do + [] + end + + @doc false + def files_to_be_generated(%Context{schema: schema, context_app: context_app}) do + singular = schema.singular + web = Mix.Phoenix.web_path(context_app) + test_prefix = Mix.Phoenix.web_test_path(context_app) + web_path = to_string(schema.web_path) + controller_pre = Path.join([web, "controllers", web_path]) + test_pre = Path.join([test_prefix, "controllers", web_path]) + + [ + {:eex, "controller.ex.eex", Path.join([controller_pre, "#{singular}_controller.ex"])}, + {:eex, "json.ex.eex", Path.join([controller_pre, "#{singular}_json.ex"])}, + {:new_eex, "changeset_json.ex.eex", Path.join([web, "controllers/changeset_json.ex"])}, + {:eex, "controller_test.exs.eex", Path.join([test_pre, "#{singular}_controller_test.exs"])}, + {:new_eex, "fallback_controller.ex.eex", Path.join([web, "controllers/fallback_controller.ex"])} + ] + end + + @doc false + def copy_new_files(%Context{} = context, paths, binding) do + files = files_to_be_generated(context) + Mix.Phoenix.copy_from(paths, "priv/templates/phx.gen.json", binding, files) + if context.generate?, do: Gen.Context.copy_new_files(context, paths, binding) + + context + end + + @doc false + def print_shell_instructions(%Context{schema: schema, context_app: ctx_app} = context) do + resource_path = + if schema.scope && schema.scope.route_prefix do + "#{schema.scope.route_prefix}/#{schema.plural}" + else + "/#{schema.plural}" + end + + if schema.web_namespace do + Mix.shell().info(""" + + Add the resource to your #{schema.web_namespace} :api scope in #{Mix.Phoenix.web_path(ctx_app)}/router.ex: + + scope "/#{schema.web_path}", #{inspect(Module.concat(context.web_module, schema.web_namespace))} do + pipe_through :api + ... + resources "#{resource_path}", #{inspect(schema.alias)}Controller#{if schema.opts[:primary_key], do: ~s[, param: "#{schema.opts[:primary_key]}"]} + end + """) + else + Mix.shell().info(""" + + Add the resource to the "#{Application.get_env(ctx_app, :generators)[:api_prefix] || "/api"}" scope in #{Mix.Phoenix.web_path(ctx_app)}/router.ex: + + resources "#{resource_path}", #{inspect(schema.alias)}Controller, except: [:new, :edit]#{if schema.opts[:primary_key], do: ~s[, param: "#{schema.opts[:primary_key]}"]} + """) + end + + if schema.scope do + Mix.shell().info( + "Ensure the routes are defined in a block that sets the `#{inspect(context.scope.assign_key)}` assign." + ) + end + + if context.generate?, do: Gen.Context.print_shell_instructions(context) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.live.ex b/deps/phoenix/lib/mix/tasks/phx.gen.live.ex new file mode 100644 index 0000000..7324b6c --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.live.ex @@ -0,0 +1,453 @@ +defmodule Mix.Tasks.Phx.Gen.Live do + @shortdoc "Generates LiveView, templates, and context for a resource" + + @moduledoc """ + Generates LiveView, templates, and context for a resource. + + The format is: + + ```console + $ mix phx.gen.live []
[...] + ``` + + For example: + + ```console + $ mix phx.gen.live User users name:string age:integer + ``` + + Will generate a `User` schema for the `users` table within the `Users` context, + with the attributes `name` (as a string) and `age` (as an integer). + + You can also explicitly pass the context name as argument, whenever the context + is well defined: + + ```console + $ mix phx.gen.live Accounts User users name:string age:integer + ``` + + The first argument is the context module (`Accounts`) followed by + the schema module (`User`), table name (`users`), and attributes. + + The context is an Elixir module that serves as an API boundary for + the given resource. A context often holds many related resources. + Therefore, if the context already exists, it will be augmented with + functions for the given resource. + + The schema is responsible for mapping the database fields into an + Elixir struct. It is followed by a list of attributes with their + respective names and types. See `mix phx.gen.schema` for more + information on attributes. + + Overall, this generator will add the following files to `lib/`: + + * a context module in `lib/app/accounts.ex` for the accounts API + * a schema in `lib/app/accounts/user.ex`, with a `users` table + * a LiveView in `lib/app_web/live/user_live/show.ex` + * a LiveView in `lib/app_web/live/user_live/index.ex` + * a LiveView in `lib/app_web/live/user_live/form.ex` + * a components module in `lib/app_web/components/core_components.ex` + if none exists + + After file generation is complete, there will be output regarding required + updates to the `lib/app_web/router.ex` file. + + Add the live routes to your browser scope in lib/app_web/router.ex: + + live "/users", UserLive.Index, :index + live "/users/new", UserLive.Form, :new + live "/users/:id", UserLive.Show, :show + live "/users/:id/edit", UserLive.Form, :edit + + ## Scopes + + If your application configures its own default [scope](scopes.md), then this generator + will automatically make sure all of your context operations are correctly scoped. + You can pass the `--no-scope` flag to disable the scoping. + + ## Umbrella app configuration + + By default, Phoenix injects both web and domain specific functionality into the same + application. When using umbrella applications, those concerns are typically broken + into two separate apps, your context application - let's call it `my_app` - and its web + layer, which Phoenix assumes to be `my_app_web`. + + You can teach Phoenix to use this style via the `:context_app` configuration option + in your `my_app_umbrella/config/config.exs`: + + config :my_app_web, + ecto_repos: [Stuff.Repo], + generators: [context_app: :my_app] + + Alternatively, the `--context-app` option may be supplied to the generator: + + ```console + $ mix phx.gen.html Accounts User users --context-app my_app + ``` + + ## Web namespace + + By default, the LiveView modules are defined within a folder named + after the schema, such as `lib/app_web/live/user_live`. You can add + additional namespaces by passing the `--web` flag with a module name, + for example: + + ```console + $ mix phx.gen.live Accounts User users --web Accounts name:string + ``` + + Which would generate the LiveViews in `lib/app_web/live/accounts/user_live/`, + namespaced `AppWeb.Accounts.UserLive` instead of `AppWeb.UserLive`. + + ## Customizing the context, schema, tables and migrations + + In some cases, you may wish to bootstrap HTML templates, LiveViews, + and tests, but leave internal implementation of the context or schema + to yourself. You can use the `--no-context` and `--no-schema` flags + flags for file generation control. Note `--no-context` implies `--no-schema`: + + ```console + $ mix phx.gen.live Accounts User users --no-context name:string + ``` + + In the cases above, tests are still generated, but they will all fail. + + You can also change the table name or configure the migrations to + use binary ids for primary keys, see `mix help phx.gen.schema` for more + information. + """ + use Mix.Task + + alias Mix.Phoenix.{Context, Schema, Scope} + alias Mix.Tasks.Phx.Gen + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise( + "mix phx.gen.live must be invoked from within your *_web application root directory" + ) + end + + Mix.Phoenix.ensure_live_view_compat!(__MODULE__) + + {context, schema} = Gen.Context.build(args, name_optional: true) + validate_context!(context) + + if schema.attrs == [] do + Mix.raise(""" + No attributes provided. The phx.gen.live generator requires at least one attribute. For example: + + mix phx.gen.live Accounts User users name:string + + """) + end + + Gen.Context.prompt_for_code_injection(context) + + {socket_scope, context_scope_prefix, assign_scope, assign_scope_prefix} = + if schema.scope do + base_socket = "socket.assigns.#{schema.scope.assign_key}" + base_assign = "@#{schema.scope.assign_key}" + {base_socket, "#{base_socket}, ", base_assign, "#{base_assign}, "} + else + {"", "", "", ""} + end + + binding = [ + context: context, + schema: schema, + primary_key: schema.opts[:primary_key] || :id, + scope: schema.scope, + inputs: inputs(schema), + socket_scope: socket_scope, + context_scope_prefix: context_scope_prefix, + assign_scope: assign_scope, + assign_scope_prefix: assign_scope_prefix, + scope_param_route_prefix: Scope.route_prefix("scope", schema), + scope_param: scope_param(schema), + scope_param_prefix: scope_param_prefix(schema), + scope_socket_route_prefix: Scope.route_prefix(socket_scope, schema), + scope_assign_route_prefix: scope_assign_route_prefix(schema), + test_context_scope: + if(schema.scope && schema.scope.route_prefix, do: ", scope: scope", else: "") + ] + + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(context) + + context + |> copy_new_files(binding, paths) + |> maybe_inject_imports() + |> print_shell_instructions() + end + + defp validate_context!(context) do + cond do + context.schema.singular == "form" -> + Gen.Context.raise_with_help( + "cannot use form as the schema name because it conflicts with the LiveView assigns!" + ) + + true -> + :ok + end + end + + defp prompt_for_conflicts(context) do + context + |> files_to_be_generated() + |> Kernel.++(context_files(context)) + |> Mix.Phoenix.prompt_for_conflicts() + end + + defp context_files(%Context{generate?: true} = context) do + Gen.Context.files_to_be_generated(context) + end + + defp context_files(%Context{generate?: false}) do + [] + end + + defp files_to_be_generated(%Context{schema: schema, context_app: context_app}) do + web_prefix = Mix.Phoenix.web_path(context_app) + test_prefix = Mix.Phoenix.web_test_path(context_app) + web_path = to_string(schema.web_path) + live_subdir = "#{schema.singular}_live" + web_live = Path.join([web_prefix, "live", web_path, live_subdir]) + test_live = Path.join([test_prefix, "live", web_path]) + + [ + {:eex, "show.ex.eex", Path.join(web_live, "show.ex")}, + {:eex, "index.ex.eex", Path.join(web_live, "index.ex")}, + {:eex, "form.ex.eex", Path.join(web_live, "form.ex")}, + {:eex, "live_test.exs.eex", Path.join(test_live, "#{schema.singular}_live_test.exs")}, + {:new_eex, "core_components.ex.eex", + Path.join([web_prefix, "components", "core_components.ex"])} + ] + end + + defp copy_new_files(%Context{} = context, binding, paths) do + files = files_to_be_generated(context) + + binding = + Keyword.merge(binding, + assigns: %{ + web_namespace: inspect(context.web_module), + gettext: true, + live: true, + # the core components are also generated in phx.new, so we check for + # esbuild (@javascript) - here we just assume that it's there + javascript: true + } + ) + + Mix.Phoenix.copy_from(paths, "priv/templates/phx.gen.live", binding, files) + if context.generate?, do: Gen.Context.copy_new_files(context, paths, binding) + + context + end + + defp maybe_inject_imports(%Context{context_app: ctx_app} = context) do + web_prefix = Mix.Phoenix.web_path(ctx_app) + [lib_prefix, web_dir] = Path.split(web_prefix) + file_path = Path.join(lib_prefix, "#{web_dir}.ex") + file = File.read!(file_path) + inject = "import #{inspect(context.web_module)}.CoreComponents" + + if String.contains?(file, inject) do + :ok + else + do_inject_imports(context, file, file_path, inject) + end + + context + end + + defp do_inject_imports(context, file, file_path, inject) do + Mix.shell().info([:green, "* injecting ", :reset, Path.relative_to_cwd(file_path)]) + + new_file = + String.replace( + file, + "use Phoenix.Component", + "use Phoenix.Component\n #{inject}" + ) + + if file != new_file do + File.write!(file_path, new_file) + else + Mix.shell().info(""" + + Could not find use Phoenix.Component in #{file_path}. + + This typically happens because your application was not generated + with the --live flag: + + mix phx.new my_app --live + + Please make sure LiveView is installed and that #{inspect(context.web_module)} + defines both `live_view/0` and `live_component/0` functions, + and that both functions import #{inspect(context.web_module)}.CoreComponents. + """) + end + end + + @doc false + def print_shell_instructions(%Context{schema: schema, context_app: ctx_app} = context) do + prefix = Module.concat(context.web_module, schema.web_namespace) + web_path = Mix.Phoenix.web_path(ctx_app) + + if schema.web_namespace do + Mix.shell().info(""" + + Add the live routes to your #{schema.web_namespace} :browser scope in #{web_path}/router.ex: + + scope "/#{schema.web_path}", #{inspect(prefix)} do + pipe_through :browser + ... + + #{for line <- live_route_instructions(schema), do: " #{line}"} + end + """) + else + Mix.shell().info(""" + + Add the live routes to your browser scope in #{Mix.Phoenix.web_path(ctx_app)}/router.ex: + + #{for line <- live_route_instructions(schema), do: " #{line}"} + """) + end + + if schema.scope do + Mix.shell().info( + "Ensure the routes are defined in a block that sets the `#{inspect(context.scope.assign_key)}` assign." + ) + end + + if context.generate?, do: Gen.Context.print_shell_instructions(context) + maybe_print_upgrade_info() + end + + defp maybe_print_upgrade_info do + unless Code.ensure_loaded?(Phoenix.LiveView.JS) do + Mix.shell().info(""" + + You must update :phoenix_live_view to v0.18 or later and + :phoenix_live_dashboard to v0.7 or later to use the features + in this generator. + """) + end + end + + defp live_route_instructions(schema) do + route_base = + if schema.scope && schema.scope.route_prefix do + scope_prefix = schema.scope.route_prefix + "#{scope_prefix}/#{schema.plural}" + else + "/#{schema.plural}" + end + + [ + ~s|live "#{route_base}", #{inspect(schema.alias)}Live.Index, :index\n|, + ~s|live "#{route_base}/new", #{inspect(schema.alias)}Live.Form, :new\n|, + ~s|live "#{route_base}/:#{schema.opts[:primary_key] || :id}", #{inspect(schema.alias)}Live.Show, :show\n|, + ~s|live "#{route_base}/:#{schema.opts[:primary_key] || :id}/edit", #{inspect(schema.alias)}Live.Form, :edit| + ] + end + + @doc false + def inputs(%Schema{} = schema) do + schema.attrs + |> Enum.reject(fn {_key, type} -> type == :map end) + |> Enum.map(fn + {_, {:references, _}} -> + nil + + {key, :integer} -> + ~s(<.input field={@form[#{inspect(key)}]} type="number" label="#{label(key)}" />) + + {key, :float} -> + ~s(<.input field={@form[#{inspect(key)}]} type="number" label="#{label(key)}" step="any" />) + + {key, :decimal} -> + ~s(<.input field={@form[#{inspect(key)}]} type="number" label="#{label(key)}" step="any" />) + + {key, :boolean} -> + ~s(<.input field={@form[#{inspect(key)}]} type="checkbox" label="#{label(key)}" />) + + {key, :text} -> + ~s(<.input field={@form[#{inspect(key)}]} type="textarea" label="#{label(key)}" />) + + {key, :date} -> + ~s(<.input field={@form[#{inspect(key)}]} type="date" label="#{label(key)}" />) + + {key, :time} -> + ~s(<.input field={@form[#{inspect(key)}]} type="time" label="#{label(key)}" />) + + {key, :utc_datetime} -> + ~s(<.input field={@form[#{inspect(key)}]} type="datetime-local" label="#{label(key)}" />) + + {key, :naive_datetime} -> + ~s(<.input field={@form[#{inspect(key)}]} type="datetime-local" label="#{label(key)}" />) + + {key, {:array, _} = type} -> + ~s""" + <.input + field={@form[#{inspect(key)}]} + type="select" + multiple + label="#{label(key)}" + options={#{inspect(default_options(type))}} + /> + """ + + {key, {:enum, _}} -> + ~s""" + <.input + field={@form[#{inspect(key)}]} + type="select" + label="#{label(key)}" + prompt="Choose a value" + options={Ecto.Enum.values(#{inspect(schema.module)}, #{inspect(key)})} + /> + """ + + {key, _} -> + ~s(<.input field={@form[#{inspect(key)}]} type="text" label="#{label(key)}" />) + end) + end + + defp default_options({:array, :string}), + do: Enum.map([1, 2], &{"Option #{&1}", "option#{&1}"}) + + defp default_options({:array, :integer}), + do: Enum.map([1, 2], &{"#{&1}", &1}) + + defp default_options({:array, _}), do: [] + + defp label(key), do: Phoenix.Naming.humanize(to_string(key)) + + defp scope_param(%{scope: nil}), do: "" + + defp scope_param(%{scope: %{route_prefix: route_prefix}}) when not is_nil(route_prefix), + do: "scope" + + defp scope_param(_), do: "_scope" + + defp scope_param_prefix(schema) do + param = scope_param(schema) + if param != "", do: "#{param}, ", else: "" + end + + defp scope_assign_route_prefix( + %{scope: %{route_prefix: route_prefix, assign_key: assign_key}} = schema + ) + when not is_nil(route_prefix) do + Scope.route_prefix("@#{assign_key}", schema) + end + + defp scope_assign_route_prefix(_), do: "" +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.notifier.ex b/deps/phoenix/lib/mix/tasks/phx.gen.notifier.ex new file mode 100644 index 0000000..f11136f --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.notifier.ex @@ -0,0 +1,214 @@ +defmodule Mix.Tasks.Phx.Gen.Notifier do + @shortdoc "Generates a notifier that delivers emails by default" + + @moduledoc """ + Generates a notifier that delivers emails by default. + + $ mix phx.gen.notifier Accounts User welcome_user reset_password confirmation_instructions + + This task expects a context module name, followed by a + notifier name and one or more message names. Messages + are the functions that will be created prefixed by "deliver", + so the message name should be "snake_case" without punctuation. + + Additionally a context app can be specified with the flag + `--context-app`, which is useful if the notifier is being + generated in a different app under an umbrella. + + $ mix phx.gen.notifier Accounts User welcome_user --context-app marketing + + The app "marketing" must exist before the command is executed. + """ + + use Mix.Task + + @switches [ + context: :boolean, + context_app: :string, + prefix: :string + ] + + @default_opts [context: true] + + alias Mix.Phoenix.Context + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise( + "mix phx.gen.notifier must be invoked from within your *_web application root directory" + ) + end + + {context, notifier_module, messages} = build(args) + + inflections = Mix.Phoenix.inflect(notifier_module) + + binding = [ + context: context, + inflections: inflections, + notifier_messages: messages + ] + + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(context) + + if "--no-compile" not in args do + Mix.Task.run("compile") + end + + context + |> copy_new_files(binding, paths) + |> maybe_print_mailer_installation_instructions() + end + + @doc false + def build(args, help \\ __MODULE__) do + {opts, parsed, _} = parse_opts(args) + + [context_name, notifier_name | notifier_messages] = validate_args!(parsed, help) + + notifier_module = inspect(Module.concat(context_name, "#{notifier_name}Notifier")) + context = Context.new(notifier_module, opts) + + {context, notifier_module, notifier_messages} + end + + defp parse_opts(args) do + {opts, parsed, invalid} = OptionParser.parse(args, switches: @switches) + + merged_opts = + @default_opts + |> Keyword.merge(opts) + |> put_context_app(opts[:context_app]) + + {merged_opts, parsed, invalid} + end + + defp put_context_app(opts, nil), do: opts + + defp put_context_app(opts, string) do + Keyword.put(opts, :context_app, String.to_atom(string)) + end + + defp validate_args!([context, notifier | messages] = args, help) do + cond do + not Context.valid?(context) -> + help.raise_with_help( + "Expected the context, #{inspect(context)}, to be a valid module name" + ) + + not valid_notifier?(notifier) -> + help.raise_with_help( + "Expected the notifier, #{inspect(notifier)}, to be a valid module name" + ) + + context == Mix.Phoenix.base() -> + help.raise_with_help( + "Cannot generate context #{context} because it has the same name as the application" + ) + + notifier == Mix.Phoenix.base() -> + help.raise_with_help( + "Cannot generate notifier #{notifier} because it has the same name as the application" + ) + + Enum.any?(messages, &(!valid_message?(&1))) -> + help.raise_with_help( + "Cannot generate notifier #{inspect(notifier)} because one of the messages is invalid: #{Enum.map_join(messages, ", ", &inspect/1)}" + ) + + true -> + args + end + end + + defp validate_args!(_, help) do + help.raise_with_help("Invalid arguments") + end + + defp valid_notifier?(notifier) do + notifier =~ ~r/^[A-Z]\w*(\.[A-Z]\w*)*$/ + end + + defp valid_message?(message_name) do + message_name =~ ~r/^[a-z]+(\_[a-z0-9]+)*$/ + end + + @doc false + @spec raise_with_help(String.t()) :: no_return() + def raise_with_help(msg) do + Mix.raise(""" + #{msg} + + mix phx.gen.notifier expects a context module name, followed by a + notifier name and one or more message names. Messages are the + functions that will be created prefixed by "deliver", so the message + name should be "snake_case" without punctuation. + For example: + + mix phx.gen.notifier Accounts User welcome reset_password + + In this example the notifier will be called `UserNotifier` inside + the Accounts context. The functions `deliver_welcome/1` and + `reset_password/1` will be created inside this notifier. + """) + end + + defp copy_new_files(%Context{} = context, binding, paths) do + files = files_to_be_generated(context) + + Mix.Phoenix.copy_from(paths, "priv/templates/phx.gen.notifier", binding, files) + + context + end + + defp files_to_be_generated(%Context{} = context) do + [ + {:eex, "notifier.ex.eex", context.file}, + {:eex, "notifier_test.exs.eex", context.test_file} + ] + end + + defp prompt_for_conflicts(context) do + context + |> files_to_be_generated() + |> Mix.Phoenix.prompt_for_conflicts() + end + + @doc false + @spec maybe_print_mailer_installation_instructions(%Context{}) :: %Context{} + def maybe_print_mailer_installation_instructions(%Context{} = context) do + mailer_module = Module.concat([context.base_module, "Mailer"]) + + unless Code.ensure_loaded?(mailer_module) do + Mix.shell().info(""" + Unable to find the "#{inspect(mailer_module)}" module defined. + + A mailer module like the following is expected to be defined + in your application in order to send emails. + + defmodule #{inspect(mailer_module)} do + use Swoosh.Mailer, otp_app: #{inspect(context.context_app)} + end + + It is also necessary to add "swoosh" as a dependency in your + "mix.exs" file: + + def deps do + [{:swoosh, "~> 1.4"}] + end + + Finally, an adapter needs to be set in your configuration: + + import Config + config #{inspect(context.context_app)}, #{inspect(mailer_module)}, adapter: Swoosh.Adapters.Local + + Check https://hexdocs.pm/swoosh for more details. + """) + end + + context + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.presence.ex b/deps/phoenix/lib/mix/tasks/phx.gen.presence.ex new file mode 100644 index 0000000..c7867b4 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.presence.ex @@ -0,0 +1,69 @@ +defmodule Mix.Tasks.Phx.Gen.Presence do + @shortdoc "Generates a Presence tracker" + + @moduledoc """ + Generates a Presence tracker. + + $ mix phx.gen.presence + $ mix phx.gen.presence MyPresence + + The argument, which defaults to `Presence`, defines the module name of the + Presence tracker. + + Generates a new file, `lib/my_app_web/channels/my_presence.ex`, where + `my_presence` is the snake-cased version of the provided module name. + """ + use Mix.Task + + @doc false + def run([]) do + run(["Presence"]) + end + + def run([alias_name]) do + if Mix.Project.umbrella?() do + Mix.raise( + "mix phx.gen.presence must be invoked from within your *_web application's root directory" + ) + end + + context_app = Mix.Phoenix.context_app() + otp_app = Mix.Phoenix.otp_app() + web_prefix = Mix.Phoenix.web_path(context_app) + inflections = Mix.Phoenix.inflect(alias_name) + + inflections = + Keyword.put(inflections, :module, "#{inflections[:web_module]}.#{inflections[:scoped]}") + + binding = + inflections ++ + [ + otp_app: otp_app, + pubsub_server: Module.concat(Mix.Phoenix.context_base(context_app), "PubSub") + ] + + files = [ + {:eex, "presence.ex.eex", Path.join(web_prefix, "channels/#{binding[:path]}.ex")} + ] + + Mix.Phoenix.copy_from(paths(), "priv/templates/phx.gen.presence", binding, files) + + Mix.shell().info(""" + + Add your new module to your supervision tree, + in lib/#{otp_app}/application.ex: + + children = [ + ... + #{binding[:module]} + ] + + You're all set! See the Phoenix.Presence docs for more details: + https://hexdocs.pm/phoenix/Phoenix.Presence.html + """) + end + + defp paths do + [".", :phoenix] + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.release.ex b/deps/phoenix/lib/mix/tasks/phx.gen.release.ex new file mode 100755 index 0000000..84d9300 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.release.ex @@ -0,0 +1,379 @@ +defmodule Mix.Tasks.Phx.Gen.Release do + @shortdoc "Generates release files and optional Dockerfile for release-based deployments" + + @moduledoc """ + Generates release files and optional Dockerfile for release-based deployments. + + The following release files are created: + + * `lib/app_name/release.ex` - A release module containing tasks for running + migrations inside a release + + * `rel/overlays/bin/migrate` - A migrate script for conveniently invoking + the release system migrations + + * `rel/overlays/bin/server` - A server script for conveniently invoking + the release system with environment variables to start the phoenix web server + + Note, the `rel/overlays` directory is copied into the release build by default when + running `mix release`. + + To skip generating the migration-related files, use the `--no-ecto` flag. To + force these migration-related files to be generated, use the `--ecto` flag. + + ## Docker + + When the `--docker` flag is passed, the following docker files are generated: + + * `Dockerfile` - The Dockerfile for use in any standard docker deployment + + * `.dockerignore` - A docker ignore file with standard elixir defaults + + By default, the build uses whatever base image matches your development system’s + active versions at generation time. To override those defaults, specify: + + * `otp` — the OTP version to use + + * `elixir` — the Elixir version to use + + For extended release configuration, the `mix release.init` task can be used + in addition to this task. See the `Mix.Release` docs for more details. + + If you are using third party JS package managers like `npm` or `yarn`, you will + need to update the generated Dockerfile with an extra step to fetch those packages. + This might look like this: + + ```dockerfile + ... + ARG RUNNER_IMAGE="debian:..." + + FROM node:20 as node + COPY assets assets + RUN cd assets && npm install + + FROM ${BUILDER_IMAGE} as builder + + ... + + COPY assets assets + COPY --from=node assets/node_modules assets/node_modules + ... + ``` + + If you are using esbuild through Node.js or other JavaScript build tools, the approach + above can also be modified to invoke those in the node stage, for example: + + ```dockerfile + FROM node:20 as node + COPY assets assets + RUN cd assets && npm install && node build.js --deploy + ``` + + Note that you may need to adjust the `assets.deploy` task to not invoke Node.js again. + """ + + use Mix.Task + + require Logger + + @doc false + def run(args) do + opts = parse_args(args) + + if Mix.Project.umbrella?() do + Mix.raise(""" + mix phx.gen.release is not supported in umbrella applications. + + Run this task in your web application instead. + """) + end + + app = Mix.Phoenix.otp_app() + app_namespace = Mix.Phoenix.base() + web_namespace = app_namespace |> Mix.Phoenix.web_module() |> inspect() + + binding = [ + app_namespace: app_namespace, + otp_app: app, + assets_dir_exists?: File.dir?("assets") + ] + + Mix.Phoenix.copy_from(paths(), "priv/templates/phx.gen.release", binding, [ + {:eex, "rel/server.sh.eex", "rel/overlays/bin/server"}, + {:eex, "rel/server.bat.eex", "rel/overlays/bin/server.bat"} + ]) + + if opts.ecto do + Mix.Phoenix.copy_from(paths(), "priv/templates/phx.gen.release", binding, [ + {:eex, "rel/migrate.sh.eex", "rel/overlays/bin/migrate"}, + {:eex, "rel/migrate.bat.eex", "rel/overlays/bin/migrate.bat"}, + {:eex, "release.ex.eex", Mix.Phoenix.context_lib_path(app, "release.ex")} + ]) + end + + if opts.docker do + gen_docker(binding, opts) + end + + File.chmod!("rel/overlays/bin/server", 0o755) + File.chmod!("rel/overlays/bin/server.bat", 0o755) + + if opts.ecto do + File.chmod!("rel/overlays/bin/migrate", 0o755) + File.chmod!("rel/overlays/bin/migrate.bat", 0o755) + end + + Mix.shell().info(""" + + Your application is ready to be deployed in a release! + + See https://hexdocs.pm/mix/Mix.Tasks.Release.html for more information about Elixir releases. + #{if opts.docker, do: docker_instructions()} + Here are some useful release commands you can run in any release environment: + + # To build a release + mix release + + # To start your system with the Phoenix server running + _build/dev/rel/#{app}/bin/server + #{if opts.ecto, do: ecto_instructions(app)} + Once the release is running you can connect to it remotely: + + _build/dev/rel/#{app}/bin/#{app} remote + + To list all commands: + + _build/dev/rel/#{app}/bin/#{app} + """) + + if opts.ecto and opts.socket_db_adaptor_installed do + post_install_instructions("config/runtime.exs", ~r/ECTO_IPV6/, """ + [warn] Conditional IPv6 support missing from runtime configuration. + + Add the following to your config/runtime.exs: + + maybe_ipv6 = if System.get_env("ECTO_IPV6") in ~w(true 1), do: [:inet6], else: [] + + config :#{app}, #{app_namespace}.Repo, + ..., + socket_options: maybe_ipv6 + """) + end + + post_install_instructions("config/runtime.exs", ~r/PHX_SERVER/, """ + [warn] Conditional server startup is missing from runtime configuration. + + Add the following to the top of your config/runtime.exs: + + if System.get_env("PHX_SERVER") do + config :#{app}, #{web_namespace}.Endpoint, server: true + end + """) + + post_install_instructions("config/runtime.exs", ~r/PHX_HOST/, """ + [warn] Environment based URL export is missing from runtime configuration. + + Add the following to your config/runtime.exs: + + host = System.get_env("PHX_HOST") || "example.com" + + config :#{app}, #{web_namespace}.Endpoint, + ..., + url: [host: host, port: 443] + """) + end + + defp parse_args(args) do + args + |> OptionParser.parse( + strict: [ecto: :boolean, docker: :boolean, elixir: :string, otp: :string] + ) + |> elem(0) + |> Keyword.put_new_lazy(:ecto, &ecto_sql_installed?/0) + |> Keyword.put_new_lazy(:socket_db_adaptor_installed, &socket_db_adaptor_installed?/0) + |> Keyword.put_new(:docker, false) + |> Keyword.put_new(:elixir, false) + |> Keyword.put_new(:otp, false) + |> Map.new() + end + + defp ecto_instructions(app) do + """ + + # To run migrations + _build/dev/rel/#{app}/bin/migrate + """ + end + + defp docker_instructions do + """ + + Using the generated Dockerfile, your release will be bundled into + a Docker image, ready for deployment on platforms that support Docker. + + For more information about deploying with Docker see + https://hexdocs.pm/phoenix/releases.html#containers + """ + end + + defp paths do + [".", :phoenix] + end + + defp post_install_instructions(path, matching, msg) do + case File.read(path) do + {:ok, content} -> + unless content =~ matching, do: Mix.shell().info(msg) + + {:error, _} -> + Mix.shell().info(msg) + end + end + + defp ecto_sql_installed?, do: Mix.Project.deps_paths() |> Map.has_key?(:ecto_sql) + + defp socket_db_adaptor_installed? do + Mix.Project.deps_paths(depth: 1) + |> Map.take([:tds, :myxql, :postgrex]) + |> map_size() > 0 + end + + @debian "trixie" + defp elixir_and_debian_vsn(elixir_vsn, otp_vsn) do + url = + "https://hub.docker.com/v2/namespaces/hexpm/repositories/elixir/tags?name=#{elixir_vsn}-erlang-#{otp_vsn}-debian-#{@debian}-" + + fetch_body!(url) + |> Phoenix.json_library().decode!() + |> Map.fetch!("results") + |> Enum.find_value(:error, fn %{"name" => name} -> + if String.ends_with?(name, "-slim") do + elixir_vsn = name |> String.split("-") |> List.first() + %{"vsn" => vsn} = Regex.named_captures(~r/.*debian-#{@debian}-(?.*)-slim/, name) + {:ok, elixir_vsn, vsn} + end + end) + end + + defp gen_docker(binding, opts) do + wanted_elixir_vsn = + opts[:elixir] || + case Version.parse!(System.version()) do + %{major: major, minor: minor, pre: ["dev"]} -> "#{major}.#{minor - 1}.0" + _ -> System.version() + end + + otp_vsn = opts[:otp] || otp_vsn() + + vsns = + case elixir_and_debian_vsn(wanted_elixir_vsn, otp_vsn) do + {:ok, elixir_vsn, debian_vsn} -> + {:ok, elixir_vsn, debian_vsn} + + :error -> + case elixir_and_debian_vsn("", otp_vsn) do + {:ok, elixir_vsn, debian_vsn} -> + Logger.warning( + "Docker image for Elixir #{wanted_elixir_vsn} not found, defaulting to Elixir #{elixir_vsn}" + ) + + {:ok, elixir_vsn, debian_vsn} + + :error -> + :error + end + end + + case vsns do + {:ok, elixir_vsn, debian_vsn} -> + binding = + Keyword.merge(binding, + debian: @debian, + debian_vsn: debian_vsn, + elixir_vsn: elixir_vsn, + otp_vsn: otp_vsn + ) + + Mix.Phoenix.copy_from(paths(), "priv/templates/phx.gen.release", binding, [ + {:eex, "Dockerfile.eex", "Dockerfile"}, + {:eex, "dockerignore.eex", ".dockerignore"} + ]) + + :error -> + raise """ + unable to fetch supported Docker image for Elixir #{wanted_elixir_vsn} and Erlang #{otp_vsn}. + Please check https://hub.docker.com/r/hexpm/elixir/tags?page=1&name=#{otp_vsn} \ + for a suitable Elixir version + """ + end + end + + defp ensure_app!(app) do + if function_exported?(Mix, :ensure_application!, 1) do + apply(Mix, :ensure_application!, [app]) + else + {:ok, _} = Application.ensure_all_started(app) + end + end + + defp fetch_body!(url) do + url = String.to_charlist(url) + Logger.debug("Fetching latest image information from #{url}") + ensure_app!(:inets) + ensure_app!(:ssl) + + if proxy = System.get_env("HTTP_PROXY") || System.get_env("http_proxy") do + Logger.debug("Using HTTP_PROXY: #{proxy}") + %{host: host, port: port} = URI.parse(proxy) + :httpc.set_options([{:proxy, {{String.to_charlist(host), port}, []}}]) + end + + if proxy = System.get_env("HTTPS_PROXY") || System.get_env("https_proxy") do + Logger.debug("Using HTTPS_PROXY: #{proxy}") + %{host: host, port: port} = URI.parse(proxy) + :httpc.set_options([{:https_proxy, {{String.to_charlist(host), port}, []}}]) + end + + # https://security.erlef.org/secure_coding_and_deployment_hardening/inets + http_options = [ + ssl: [ + verify: :verify_peer, + cacerts: :public_key.cacerts_get(), + depth: 3, + customize_hostname_check: [ + match_fun: :public_key.pkix_verify_hostname_match_fun(:https) + ], + versions: protocol_versions() + ] + ] + + http_client = + Process.get({__MODULE__, :http_client}, fn url -> + case :httpc.request(:get, {url, []}, http_options, body_format: :binary) do + {:ok, {{_, 200, _}, _headers, body}} -> body + other -> raise "couldn't fetch #{url}: #{inspect(other)}" + end + end) + + http_client.(url) + end + + defp protocol_versions do + otp_major_vsn = :erlang.system_info(:otp_release) |> List.to_integer() + if otp_major_vsn < 25, do: [:"tlsv1.2"], else: [:"tlsv1.2", :"tlsv1.3"] + end + + def otp_vsn do + major = to_string(:erlang.system_info(:otp_release)) + path = Path.join([:code.root_dir(), "releases", major, "OTP_VERSION"]) + + case File.read(path) do + {:ok, content} -> + String.trim(content) + + {:error, _} -> + IO.warn("unable to read OTP minor version at #{path}. Falling back to #{major}.0") + "#{major}.0" + end + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.schema.ex b/deps/phoenix/lib/mix/tasks/phx.gen.schema.ex new file mode 100644 index 0000000..617403e --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.schema.ex @@ -0,0 +1,295 @@ +defmodule Mix.Tasks.Phx.Gen.Schema do + @shortdoc "Generates an Ecto schema and migration file" + + @moduledoc """ + Generates an Ecto schema and migration. + + $ mix phx.gen.schema Blog.Post blog_posts title:string views:integer + + The first argument is the schema module followed by its plural + name (used as the table name). + + The generated schema above will contain: + + * a schema file in `lib/my_app/blog/post.ex`, with a `blog_posts` table + * a migration file for the repository + + The generated migration can be skipped with `--no-migration`. + + ## Contexts + + Your schemas can be generated and added to a separate OTP app. + Make sure your configuration is properly setup or manually + specify the context app with the `--context-app` option with + the CLI. + + Via config: + + config :marketing_web, :generators, context_app: :marketing + + Via CLI: + + $ mix phx.gen.schema Blog.Post blog_posts title:string views:integer --context-app marketing + + ## Attributes + + The resource fields are given using `name:type` syntax + where type are the types supported by Ecto. Omitting + the type makes it default to `:string`: + + $ mix phx.gen.schema Blog.Post blog_posts title views:integer + + The following types are supported: + + #{for attr <- Mix.Phoenix.Schema.valid_types(), do: " * `#{inspect attr}`\n"} + * `:datetime` - An alias for `:naive_datetime` + + The generator also supports references, which we will properly + associate the given column to the primary key column of the + referenced table: + + $ mix phx.gen.schema Blog.Post blog_posts title user_id:references:users + + This will result in a migration with an `:integer` column + of `:user_id` and create an index. + + Furthermore an array type can also be given if it is + supported by your database, although it requires the + type of the underlying array element to be given too: + + $ mix phx.gen.schema Blog.Post blog_posts tags:array:string + + Unique columns can be automatically generated by using: + + $ mix phx.gen.schema Blog.Post blog_posts title:unique unique_int:integer:unique + + Redact columns can be automatically generated by using: + + $ mix phx.gen.schema Accounts.Superhero superheroes secret_identity:redact password:string:redact + + Ecto.Enum fields can be generated by using: + + $ mix phx.gen.schema Blog.Post blog_posts title status:enum:unpublished:published:deleted + + If no data type is given, it defaults to a string. + + ## table + + By default, the table name for the migration and schema will be + the plural name provided for the resource. To customize this value, + a `--table` option may be provided. For example: + + $ mix phx.gen.schema Blog.Post posts --table cms_posts + + ## binary_id + + Generated migration can use `binary_id` for schema's primary key + and its references with option `--binary-id`. + + ## primary_key + + By default, the primary key in the table is called `id`. This option + allows to change the name of the primary key column. For example: + + $ mix phx.gen.schema Blog.post posts --primary-key post_id + + ## repo + + Generated migration can use `repo` to set the migration repository + folder with option `--repo`: + + $ mix phx.gen.schema Blog.Post posts --repo MyApp.Repo.Auth + + ## migration_dir + + Generated migrations can be added to a specific `--migration-dir` which sets + the migration folder path: + + $ mix phx.gen.schema Blog.Post posts --migration-dir /path/to/directory + + + ## prefix + + By default migrations and schemas are generated without a prefix. + + For PostgreSQL this sets the "SCHEMA" (typically set via `search_path`) + and for MySQL it sets the database for the generated migration and schema. + The prefix can be used to thematically organize your tables on the database level. + + A prefix can be specified with the `--prefix` flags. For example: + + $ mix phx.gen.schema Blog.Post posts --prefix blog + + > #### Warning {: .warning} + > + > The flag does not generate migrations to create the schema / database. + > This needs to be done manually or in a separate migration. + + ## Default options + + This generator uses default options provided in the `:generators` + configuration of your application. These are the defaults: + + config :your_app, :generators, + migration: true, + binary_id: false, + timestamp_type: :naive_datetime, + sample_binary_id: "11111111-1111-1111-1111-111111111111" + + You can override those options per invocation by providing corresponding + switches, e.g. `--no-binary-id` to use normal ids despite the default + configuration or `--migration` to force generation of the migration. + + ## UTC timestamps + + By setting the `:timestamp_type` to `:utc_datetime`, the timestamps + will be created using the UTC timezone. This results in a `DateTime` struct + instead of a `NaiveDateTime`. This can also be set to `:utc_datetime_usec` for + microsecond precision. + + """ + use Mix.Task + + alias Mix.Phoenix.Schema + + @switches [migration: :boolean, binary_id: :boolean, table: :string, web: :string, + context_app: :string, prefix: :string, repo: :string, migration_dir: :string, + primary_key: :string, scope: :string, no_scope: :boolean] + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise "mix phx.gen.schema must be invoked from within your *_web application root directory" + end + + schema = build(args, []) + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(schema) + + binding = [ + schema: schema, + primary_key: schema.opts[:primary_key] || :id, + scope: schema.scope + ] + + schema + |> copy_new_files(paths, binding) + |> print_shell_instructions() + end + + defp prompt_for_conflicts(schema) do + schema + |> files_to_be_generated() + |> Mix.Phoenix.prompt_for_conflicts() + end + + @doc false + def build(args, parent_opts, help \\ __MODULE__) do + {schema_opts, parsed, _} = OptionParser.parse(args, switches: @switches) + [schema_name, plural | attrs] = validate_args!(parsed, help) + + opts = + parent_opts + |> Keyword.merge(schema_opts) + |> put_context_app(schema_opts[:context_app]) + |> maybe_update_repo_module() + + Schema.new(schema_name, plural, attrs, opts) + end + + defp maybe_update_repo_module(opts) do + if is_nil(opts[:repo]) do + opts + else + Keyword.update!(opts, :repo, &Module.concat([&1])) + end + end + + defp put_context_app(opts, nil), do: opts + defp put_context_app(opts, string) do + Keyword.put(opts, :context_app, String.to_atom(string)) + end + + @doc false + def files_to_be_generated(%Schema{} = schema) do + [{:eex, "schema.ex.eex", schema.file}] + end + + @doc false + def copy_new_files(%Schema{context_app: ctx_app, repo: repo, opts: opts} = schema, paths, binding) do + files = files_to_be_generated(schema) + Mix.Phoenix.copy_from(paths, "priv/templates/phx.gen.schema", binding, files) + + if schema.migration? do + migration_dir = + cond do + migration_dir = opts[:migration_dir] -> + migration_dir + + opts[:repo] -> + repo_name = repo |> Module.split() |> List.last() |> Macro.underscore() + Mix.Phoenix.context_app_path(ctx_app, "priv/#{repo_name}/migrations/") + + true -> + Mix.Phoenix.context_app_path(ctx_app, "priv/repo/migrations/") + end + + migration_path = Path.join(migration_dir, "#{timestamp()}_create_#{schema.table}.exs") + + Mix.Phoenix.copy_from paths, "priv/templates/phx.gen.schema", binding, [ + {:eex, "migration.exs.eex", migration_path}, + ] + end + + schema + end + + @doc false + def print_shell_instructions(%Schema{} = schema) do + if schema.migration? do + Mix.shell().info """ + + Remember to update your repository by running migrations: + + $ mix ecto.migrate + """ + end + end + + @doc false + def validate_args!([schema, plural | _] = args, help) do + cond do + not Schema.valid?(schema) -> + help.raise_with_help "Expected the schema argument, #{inspect schema}, to be a valid module name" + String.contains?(plural, ":") or plural != Phoenix.Naming.underscore(plural) -> + help.raise_with_help "Expected the plural argument, #{inspect plural}, to be all lowercase using snake_case convention" + true -> + args + end + end + def validate_args!(_, help) do + help.raise_with_help "Invalid arguments" + end + + @doc false + @spec raise_with_help(String.t) :: no_return() + def raise_with_help(msg) do + Mix.raise """ + #{msg} + + mix phx.gen.schema expects both a module name and + the plural of the generated resource followed by + any number of attributes: + + mix phx.gen.schema Blog.Post blog_posts title:string + """ + end + + defp timestamp do + {{y, m, d}, {hh, mm, ss}} = :calendar.universal_time() + "#{y}#{pad(m)}#{pad(d)}#{pad(hh)}#{pad(mm)}#{pad(ss)}" + end + defp pad(i) when i < 10, do: << ?0, ?0 + i >> + defp pad(i), do: to_string(i) +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.secret.ex b/deps/phoenix/lib/mix/tasks/phx.gen.secret.ex new file mode 100644 index 0000000..91e39a2 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.secret.ex @@ -0,0 +1,36 @@ +defmodule Mix.Tasks.Phx.Gen.Secret do + @shortdoc "Generates a secret" + + @moduledoc """ + Generates a secret and prints it to the terminal. + + $ mix phx.gen.secret [length] + + By default, mix phx.gen.secret generates a key 64 characters long. + + The minimum value for `length` is 32. + """ + use Mix.Task + + @doc false + def run([]), do: run(["64"]) + def run([int]), do: int |> parse!() |> random_string() |> Mix.shell().info() + def run([_|_]), do: invalid_args!() + + defp parse!(int) do + case Integer.parse(int) do + {int, ""} -> int + _ -> invalid_args!() + end + end + + defp random_string(length) when length > 31 do + :crypto.strong_rand_bytes(length) |> Base.encode64(padding: false) |> binary_part(0, length) + end + defp random_string(_), do: Mix.raise "The secret should be at least 32 characters long" + + @spec invalid_args!() :: no_return() + defp invalid_args! do + Mix.raise "mix phx.gen.secret expects a length as integer or no argument at all" + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.socket.ex b/deps/phoenix/lib/mix/tasks/phx.gen.socket.ex new file mode 100644 index 0000000..d98a7bb --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.socket.ex @@ -0,0 +1,116 @@ +defmodule Mix.Tasks.Phx.Gen.Socket do + @shortdoc "Generates a Phoenix socket handler" + + @moduledoc """ + Generates a Phoenix socket handler. + + $ mix phx.gen.socket User + + Accepts the module name for the socket. + + The generated files will contain: + + For a regular application: + + * a client in `assets/js` + * a socket in `lib/my_app_web/channels` + + For an umbrella application: + + * a client in `apps/my_app_web/assets/js` + * a socket in `apps/my_app_web/lib/my_app_web/channels` + + You can then generate channels with `mix phx.gen.channel`. + """ + use Mix.Task + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise( + "mix phx.gen.socket must be invoked from within your *_web application's root directory" + ) + end + + [socket_name, pre_existing_channel] = validate_args!(args) + + context_app = Mix.Phoenix.context_app() + web_prefix = Mix.Phoenix.web_path(context_app) + binding = Mix.Phoenix.inflect(socket_name) + + existing_channel = + if pre_existing_channel do + channel_binding = Mix.Phoenix.inflect(pre_existing_channel) + + Keyword.put( + channel_binding, + :module, + "#{channel_binding[:web_module]}.#{channel_binding[:scoped]}" + ) + end + + binding = + binding + |> Keyword.put(:module, "#{binding[:web_module]}.#{binding[:scoped]}") + |> Keyword.put(:endpoint_module, Module.concat([binding[:web_module], Endpoint])) + |> Keyword.put(:web_prefix, web_prefix) + |> Keyword.put(:existing_channel, existing_channel) + + Mix.Phoenix.check_module_name_availability!(binding[:module] <> "Socket") + + Mix.Phoenix.copy_from(paths(), "priv/templates/phx.gen.socket", binding, [ + {:eex, "socket.ex.eex", Path.join(web_prefix, "channels/#{binding[:path]}_socket.ex")}, + {:eex, "socket.js.eex", "assets/js/#{binding[:path]}_socket.js"} + ]) + + Mix.shell().info(""" + + Add the socket handler to your `#{Mix.Phoenix.web_path(context_app, "endpoint.ex")}`, for example: + + socket "/socket", #{binding[:module]}Socket, + websocket: true, + longpoll: false + + For the front-end integration, you need to import the `#{binding[:path]}_socket.js` + in your `assets/js/app.js` file: + + import "./#{binding[:path]}_socket.js" + """) + end + + @spec raise_with_help() :: no_return() + defp raise_with_help do + Mix.raise(""" + mix phx.gen.socket expects the module name: + + mix phx.gen.socket User + + """) + end + + defp validate_args!([name, "--from-channel", pre_existing_channel]) do + unless valid_name?(name) and valid_name?(pre_existing_channel) do + raise_with_help() + end + + [name, pre_existing_channel] + end + + defp validate_args!([name]) do + unless valid_name?(name) do + raise_with_help() + end + + [name, nil] + end + + defp validate_args!(_), do: raise_with_help() + + defp valid_name?(name) do + name =~ ~r/^[A-Z]\w*(\.[A-Z]\w*)*$/ + end + + defp paths do + [".", :phoenix] + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.routes.ex b/deps/phoenix/lib/mix/tasks/phx.routes.ex new file mode 100644 index 0000000..8910f8c --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.routes.ex @@ -0,0 +1,191 @@ +defmodule Mix.Tasks.Phx.Routes do + use Mix.Task + alias Phoenix.Router.ConsoleFormatter + + @shortdoc "Prints all routes" + + @moduledoc """ + Prints all routes for the default or a given router. + Can also locate the controller function behind a specified url. + + $ mix phx.routes [ROUTER] [--info URL] + + The default router is inflected from the application + name unless a configuration named `:namespace` + is set inside your application configuration. For example, + the configuration: + + config :my_app, + namespace: My.App + + will exhibit the routes for `My.App.Router` when this + task is invoked without arguments. + + Umbrella projects do not have a default router and + therefore always expect a router to be given. An + alias can be added to mix.exs to automate this: + + defp aliases do + [ + "phx.routes": "phx.routes MyAppWeb.Router", + # aliases... + ] + + ## Options + + * `--info` - locate the controller function definition called by the given url + * `--method` - what HTTP method to use with the given url, only works when used with `--info` and defaults to `get` + + ## Examples + + Print all routes for the default router: + + $ mix phx.routes + + Print all routes for the given router: + + $ mix phx.routes MyApp.AnotherRouter + + Print information about the controller function called by a specified url: + + $ mix phx.routes --info http://0.0.0.0:4000/home + Module: RouteInfoTestWeb.PageController + Function: :index + /home/my_app/controllers/page_controller.ex:4 + + Print information about the controller function called by a specified url and HTTP method: + + $ mix phx.routes --info http://0.0.0.0:4000/users --method post + Module: RouteInfoTestWeb.UserController + Function: :create + /home/my_app/controllers/user_controller.ex:24 + """ + + @doc false + def run(args, base \\ Mix.Phoenix.base()) do + if "--no-compile" not in args do + Mix.Task.run("compile") + end + + Mix.Task.reenable("phx.routes") + + {opts, args, _} = + OptionParser.parse(args, switches: [endpoint: :string, router: :string, info: :string]) + + {router_mod, endpoint_mod} = + case args do + [passed_router] -> {router(passed_router, base), opts[:endpoint]} + [] -> {router(opts[:router], base), endpoint(opts[:endpoint], base)} + end + + case Keyword.fetch(opts, :info) do + {:ok, url} -> + get_url_info(url, {router_mod, opts}) + + :error -> + router_mod + |> ConsoleFormatter.format(endpoint_mod) + |> Mix.shell().info() + end + end + + def get_url_info(url, {router_mod, opts}) do + %{path: path} = URI.parse(url) + + method = opts |> Keyword.get(:method, "get") |> String.upcase() + meta = Phoenix.Router.route_info(router_mod, method, path, "") + %{plug: plug, plug_opts: plug_opts} = meta + + {module, func_name} = + case meta[:mfa] do + {mod, fun, _} -> {mod, fun} + _ -> {plug, plug_opts} + end + + Mix.shell().info("Module: #{inspect(module)}") + if func_name, do: Mix.shell().info("Function: #{inspect(func_name)}") + + file_path = get_file_path(module) + + if line = get_line_number(module, func_name) do + Mix.shell().info("#{file_path}:#{line}") + else + Mix.shell().info("#{file_path}") + end + end + + defp endpoint(nil, base) do + loaded(web_mod(base, "Endpoint")) + end + + defp endpoint(module, _base) do + loaded(Module.concat([module])) + end + + defp router(nil, base) do + if Mix.Project.umbrella?() do + Mix.raise(""" + umbrella applications require an explicit router to be given to phx.routes, for example: + + $ mix phx.routes MyAppWeb.Router + + An alias can be added to mix.exs aliases to automate this: + + "phx.routes": "phx.routes MyAppWeb.Router" + + """) + end + + web_router = web_mod(base, "Router") + old_router = app_mod(base, "Router") + + loaded(web_router) || loaded(old_router) || + Mix.raise(""" + no router found at #{inspect(web_router)} or #{inspect(old_router)}. + An explicit router module may be given to phx.routes, for example: + + $ mix phx.routes MyAppWeb.Router + + An alias can be added to mix.exs aliases to automate this: + + "phx.routes": "phx.routes MyAppWeb.Router" + + """) + end + + defp router(router_name, _base) do + arg_router = Module.concat([router_name]) + loaded(arg_router) || Mix.raise("the provided router, #{inspect(arg_router)}, does not exist") + end + + defp loaded(module) do + if Code.ensure_loaded?(module), do: module + end + + defp app_mod(base, name), do: Module.concat([base, name]) + + defp web_mod(base, name), do: Module.concat(["#{base}Web", name]) + + defp get_file_path(module_name) do + [compile_infos] = Keyword.get_values(module_name.module_info(), :compile) + [source] = Keyword.get_values(compile_infos, :source) + Path.relative_to_cwd(source) + end + + defp get_line_number(_, nil), do: nil + + defp get_line_number(module, function_name) do + {_, _, _, _, _, _, functions_list} = Code.fetch_docs(module) + + function_infos = + functions_list + |> Enum.find(fn {{type, name, _}, _, _, _, _} -> + type == :function and name == function_name + end) + + case function_infos do + {_, anno, _, _, _} -> :erl_anno.line(anno) + nil -> nil + end + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.server.ex b/deps/phoenix/lib/mix/tasks/phx.server.ex new file mode 100644 index 0000000..f0d85b0 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.server.ex @@ -0,0 +1,55 @@ +defmodule Mix.Tasks.Phx.Server do + use Mix.Task + + @shortdoc "Starts applications and their servers" + + @moduledoc """ + Starts the application by configuring all endpoints servers to run. + + Note: to start the endpoint without using this mix task you must set + `server: true` in your `Phoenix.Endpoint` configuration. + + ## Command line options + + * `--open` - open browser window for each started endpoint + + Furthermore, this task accepts the same command-line options as + `mix run`. + + For example, to run `phx.server` without recompiling: + + $ mix phx.server --no-compile + + The `--no-halt` flag is automatically added. + + Note that the `--no-deps-check` flag cannot be used this way, + because Mix needs to check dependencies to find `phx.server`. + + To run `phx.server` without checking dependencies, you can run: + + $ mix do deps.loadpaths --no-deps-check, phx.server + """ + + @impl true + def run(args) do + Application.put_env(:phoenix, :serve_endpoints, true, persistent: true) + Mix.Tasks.Run.run(run_args() ++ open_args(args)) + end + + defp iex_running? do + Code.ensure_loaded?(IEx) and IEx.started?() + end + + defp open_args(args) do + if "--open" in args do + Application.put_env(:phoenix, :browser_open, true) + args -- ["--open"] + else + args + end + end + + defp run_args do + if iex_running?(), do: [], else: ["--no-halt"] + end +end diff --git a/deps/phoenix/lib/phoenix.ex b/deps/phoenix/lib/phoenix.ex new file mode 100644 index 0000000..9a4d5b7 --- /dev/null +++ b/deps/phoenix/lib/phoenix.ex @@ -0,0 +1,76 @@ +defmodule Phoenix do + @moduledoc """ + This is the documentation for the Phoenix project. + + To get started, see our [overview guides](overview.html). + """ + use Application + + @doc false + def start(_type, _args) do + # Warm up caches + _ = Phoenix.Template.engines() + _ = Phoenix.Template.format_encoder("index.html") + warn_on_missing_json_library() + + # Configure proper system flags from Phoenix only + if stacktrace_depth = Application.get_env(:phoenix, :stacktrace_depth) do + :erlang.system_flag(:backtrace_depth, stacktrace_depth) + end + + if filter = Application.get_env(:phoenix, :filter_parameters) do + Application.put_env(:phoenix, :filter_parameters, Phoenix.Logger.compile_filter(filter)) + end + + if Application.fetch_env!(:phoenix, :logger) do + Phoenix.Logger.install() + end + + children = [ + # Code reloading must be serial across all Phoenix apps + Phoenix.CodeReloader.Server, + {DynamicSupervisor, name: Phoenix.Transports.LongPoll.Supervisor, strategy: :one_for_one} + ] + + Supervisor.start_link(children, strategy: :one_for_one, name: Phoenix.Supervisor) + end + + @doc """ + Returns the configured JSON encoding library for Phoenix. + + To customize the JSON library, including the following + in your `config/config.exs`: + + config :phoenix, :json_library, AlternativeJsonLibrary + + """ + def json_library do + Application.get_env(:phoenix, :json_library, Jason) + end + + @doc """ + Returns the `:plug_init_mode` that controls when plugs are + initialized. + + We recommend to set it to `:runtime` in development for + compilation time improvements. It must be `:compile` in + production (the default). + + This option is passed as the `:init_mode` to `Plug.Builder.compile/3`. + """ + def plug_init_mode do + Application.get_env(:phoenix, :plug_init_mode, :compile) + end + + defp warn_on_missing_json_library do + configured_lib = Application.get_env(:phoenix, :json_library) + + if configured_lib && not Code.ensure_loaded?(configured_lib) do + IO.warn(""" + found #{inspect(configured_lib)} in your application configuration + for Phoenix JSON encoding, but module #{inspect(configured_lib)} is not available. + Ensure #{inspect(configured_lib)} is listed as a dependency in mix.exs. + """) + end + end +end diff --git a/deps/phoenix/lib/phoenix/channel.ex b/deps/phoenix/lib/phoenix/channel.ex new file mode 100644 index 0000000..99e8cce --- /dev/null +++ b/deps/phoenix/lib/phoenix/channel.ex @@ -0,0 +1,720 @@ +defmodule Phoenix.Channel do + @moduledoc ~S""" + Defines a Phoenix Channel. + + Channels provide a means for bidirectional communication from clients that + integrate with the `Phoenix.PubSub` layer for soft-realtime functionality. + + For a conceptual overview, see the [Channels guide](channels.html). + + ## Topics & Callbacks + + Every time you join a channel, you need to choose which particular topic you + want to listen to. The topic is just an identifier, but by convention it is + often made of two parts: `"topic:subtopic"`. Using the `"topic:subtopic"` + approach pairs nicely with the `Phoenix.Socket.channel/3` allowing you to + match on all topics starting with a given prefix by using a splat (the `*` + character) as the last character in the topic pattern: + + channel "room:*", MyAppWeb.RoomChannel + + Any topic coming into the router with the `"room:"` prefix would dispatch + to `MyAppWeb.RoomChannel` in the above example. Topics can also be pattern + matched in your channels' `join/3` callback to pluck out the scoped pattern: + + # handles the special `"lobby"` subtopic + def join("room:lobby", _payload, socket) do + {:ok, socket} + end + + # handles any other subtopic as the room ID, for example `"room:12"`, `"room:34"` + def join("room:" <> room_id, _payload, socket) do + {:ok, socket} + end + + The first argument is the topic, the second argument is a map payload given by + the client, and the third argument is an instance of `Phoenix.Socket`. The + `socket` is provided to all channel callbacks, so check its module and + documentation to learn its fields and the different ways to interact with it. + + ## Authorization + + Clients must join a channel to send and receive PubSub events on that channel. + Your channels must implement a `join/3` callback that authorizes the socket + for the given topic. For example, you could check if the user is allowed to + join that particular room. + + To authorize a socket in `join/3`, return `{:ok, socket}`. + To refuse authorization in `join/3`, return `{:error, reply}`. + + ## Incoming Events + + After a client has successfully joined a channel, incoming events from the + client are routed through the channel's `handle_in/3` callbacks. Within these + callbacks, you can perform any action. Incoming callbacks must return the + `socket` to maintain ephemeral state. + + Typically you'll either forward a message to all listeners with + `broadcast!/3` or reply directly to a client event for request/response style + messaging. + + General message payloads are received as maps: + + def handle_in("new_msg", %{"uid" => uid, "body" => body}, socket) do + ... + {:reply, :ok, socket} + end + + Binary data payloads are passed as a `{:binary, data}` tuple: + + def handle_in("file_chunk", {:binary, chunk}, socket) do + ... + {:reply, :ok, socket} + end + + ## Broadcasts + + You can broadcast events from anywhere in your application to a topic by + the `broadcast` function in the endpoint: + + MyAppWeb.Endpoint.broadcast!("room:13", "new_message", %{content: "hello"}) + + It is also possible to broadcast directly from channels. Here's an example of + receiving an incoming `"new_msg"` event from one client, and broadcasting the + message to all topic subscribers for this socket. + + def handle_in("new_msg", %{"uid" => uid, "body" => body}, socket) do + broadcast!(socket, "new_msg", %{uid: uid, body: body}) + {:noreply, socket} + end + + ## Replies + + Replies are useful for acknowledging a client's message or responding with + the results of an operation. A reply is sent only to the client connected to + the current channel process. Behind the scenes, they include the client + message `ref`, which allows the client to correlate the reply it receives + with the message it sent. + + For example, imagine creating a resource and replying with the created record: + + def handle_in("create:post", attrs, socket) do + changeset = Post.changeset(%Post{}, attrs) + + if changeset.valid? do + post = Repo.insert!(changeset) + response = MyAppWeb.PostView.render("show.json", %{post: post}) + {:reply, {:ok, response}, socket} + else + response = MyAppWeb.ChangesetView.render("errors.json", %{changeset: changeset}) + {:reply, {:error, response}, socket} + end + end + + Or you may just want to confirm that the operation succeeded: + + def handle_in("create:post", attrs, socket) do + changeset = Post.changeset(%Post{}, attrs) + + if changeset.valid? do + Repo.insert!(changeset) + {:reply, :ok, socket} + else + {:reply, :error, socket} + end + end + + Binary data is also supported with replies via a `{:binary, data}` tuple: + + {:reply, {:ok, {:binary, bin}}, socket} + + If you don't want to send a reply to the client, you can return: + + {:noreply, socket} + + One situation when you might do this is if you need to reply later; see + `reply/2`. + + ## Pushes + + Calling `push/3` allows you to send a message to the client which is not a + reply to a specific client message. Because it is not a reply, a pushed + message does not contain a client message `ref`; there is no prior client + message to relate it to. + + Possible use cases include notifying a client that: + - You've auto-saved the user's document + - The user's game is ending soon + - The IoT device's settings should be updated + + For example, you could `push/3` a message to the client in `handle_info/3` + after receiving a `PubSub` message relevant to them. + + alias Phoenix.Socket.Broadcast + def handle_info(%Broadcast{topic: _, event: event, payload: payload}, socket) do + push(socket, event, payload) + {:noreply, socket} + end + + Push data can be given in the form of a map or a tagged `{:binary, data}` + tuple: + + # client asks for their current rank. reply contains it, and client + # is also pushed a leader board and a badge image + def handle_in("current_rank", _, socket) do + push(socket, "leaders", %{leaders: Game.get_leaders(socket.assigns.game_id)}) + push(socket, "badge", {:binary, File.read!(socket.assigns.badge_path)}) + {:reply, %{val: Game.get_rank(socket.assigns[:user])}, socket} + end + + Note that in this example, `push/3` is called from `handle_in/3`; in this way + you can essentially reply N times to a single message from the client. See + `reply/2` for why this may be desirable. + + ## Intercepting Outgoing Events + + When an event is broadcasted with `broadcast/3`, each channel subscriber can + choose to intercept the event and have their `handle_out/3` callback triggered. + This allows the event's payload to be customized on a socket by socket basis + to append extra information, or conditionally filter the message from being + delivered. If the event is not intercepted with `Phoenix.Channel.intercept/1`, + then the message is pushed directly to the client: + + intercept ["new_msg", "user_joined"] + + # for every socket subscribing to this topic, append an `is_editable` + # value for client metadata. + def handle_out("new_msg", msg, socket) do + push(socket, "new_msg", Map.merge(msg, + %{is_editable: User.can_edit_message?(socket.assigns[:user], msg)} + )) + {:noreply, socket} + end + + # do not send broadcasted `"user_joined"` events if this socket's user + # is ignoring the user who joined. + def handle_out("user_joined", msg, socket) do + unless User.ignoring?(socket.assigns[:user], msg.user_id) do + push(socket, "user_joined", msg) + end + {:noreply, socket} + end + + ## Terminate + + On termination, the channel callback `terminate/2` will be invoked with + the error reason and the socket. + + If we are terminating because the client left, the reason will be + `{:shutdown, :left}`. Similarly, if we are terminating because the + client connection was closed, the reason will be `{:shutdown, :closed}`. + + If any of the callbacks return a `:stop` tuple, it will also + trigger terminate with the reason given in the tuple. + + `terminate/2`, however, won't be invoked in case of errors nor in + case of exits. This is the same behaviour as you find in Elixir + abstractions like `GenServer` and others. Similar to `GenServer`, + it would also be possible to `:trap_exit` to guarantee that `terminate/2` + is invoked. This practice is not encouraged though. + + Generally speaking, if you want to clean something up, it is better to + monitor your channel process and do the clean up from another process. + All channel callbacks, including `join/3`, are called from within the + channel process. Therefore, `self()` in any of them returns the PID to + be monitored. + + ## Exit reasons when stopping a channel + + When the channel callbacks return a `:stop` tuple, such as: + + {:stop, :shutdown, socket} + {:stop, {:error, :enoent}, socket} + + the second argument is the exit reason, which follows the same behaviour as + standard `GenServer` exits. + + You have three options to choose from when shutting down a channel: + + * `:normal` - in such cases, the exit won't be logged and linked processes + do not exit + + * `:shutdown` or `{:shutdown, term}` - in such cases, the exit won't be + logged and linked processes exit with the same reason unless they're + trapping exits + + * any other term - in such cases, the exit will be logged and linked + processes exit with the same reason unless they're trapping exits + + ## Subscribing to external topics + + Sometimes you may need to programmatically subscribe a socket to external + topics in addition to the internal `socket.topic`. For example, + imagine you have a bidding system where a remote client dynamically sets + preferences on products they want to receive bidding notifications on. + Instead of requiring a unique channel process and topic per + preference, a more efficient and simple approach would be to subscribe a + single channel to relevant notifications via your endpoint. For example: + + defmodule MyAppWeb.Endpoint.NotificationChannel do + use Phoenix.Channel + + def join("notification:" <> user_id, %{"ids" => ids}, socket) do + topics = for product_id <- ids, do: "product:#{product_id}" + + {:ok, socket + |> assign(:topics, []) + |> put_new_topics(topics)} + end + + def handle_in("watch", %{"product_id" => id}, socket) do + {:reply, :ok, put_new_topics(socket, ["product:#{id}"])} + end + + def handle_in("unwatch", %{"product_id" => id}, socket) do + {:reply, :ok, MyAppWeb.Endpoint.unsubscribe("product:#{id}")} + end + + defp put_new_topics(socket, topics) do + Enum.reduce(topics, socket, fn topic, acc -> + topics = acc.assigns.topics + if topic in topics do + acc + else + :ok = MyAppWeb.Endpoint.subscribe(topic) + assign(acc, :topics, [topic | topics]) + end + end) + end + end + + Note: the caller must be responsible for preventing duplicate subscriptions. + After calling `subscribe/1` from your endpoint, the same flow applies to + handling regular Elixir messages within your channel. Most often, you'll + simply relay the `%Phoenix.Socket.Broadcast{}` event and payload: + + alias Phoenix.Socket.Broadcast + def handle_info(%Broadcast{topic: _, event: event, payload: payload}, socket) do + push(socket, event, payload) + {:noreply, socket} + end + + ## Hibernation + + From Erlang/OTP 20, channels automatically hibernate to save memory + after 15_000 milliseconds of inactivity. This can be customized by + passing the `:hibernate_after` option to `use Phoenix.Channel`: + + use Phoenix.Channel, hibernate_after: 60_000 + + You can also set it to `:infinity` to fully disable it. + + ## Shutdown + + You can configure the shutdown behavior of each channel used when your + application is shutting down by setting the `:shutdown` value on use: + + use Phoenix.Channel, shutdown: 5_000 + + It defaults to 5_000. The supported values are described under the + in the `Supervisor` module docs. + + ## Logging + + By default, channel `"join"` and `"handle_in"` events are logged, using + the level `:info` and `:debug`, respectively. You can change the level used + for each event, or disable logs, per event type by setting the `:log_join` + and `:log_handle_in` options when using `Phoenix.Channel`. For example, the + following configuration logs join events as `:info`, but disables logging for + incoming events: + + use Phoenix.Channel, log_join: :info, log_handle_in: false + + Note that changing an event type's level doesn't affect what is logged, + unless you set it to `false`, it affects the associated level. + """ + alias Phoenix.Socket + alias Phoenix.Channel.Server + + @type payload :: map | term | {:binary, binary} + @type reply :: status :: atom | {status :: atom, response :: payload} + @type socket_ref :: + {transport_pid :: Pid, serializer :: module, topic :: binary, ref :: binary, + join_ref :: binary} + + @doc """ + Handle channel joins by `topic`. + + To authorize a socket, return `{:ok, socket}` or `{:ok, reply, socket}`. To + refuse authorization, return `{:error, reason}`. + + Payloads are serialized before sending with the configured serializer. + + ## Example + + def join("room:lobby", payload, socket) do + if authorized?(payload) do + {:ok, socket} + else + {:error, %{reason: "unauthorized"}} + end + end + + """ + @callback join(topic :: binary, payload :: payload, socket :: Socket.t()) :: + {:ok, Socket.t()} + | {:ok, reply :: payload, Socket.t()} + | {:error, reason :: map} + + @doc """ + Handle incoming `event`s. + + Payloads are serialized before sending with the configured serializer. + + ## Example + + def handle_in("ping", payload, socket) do + {:reply, {:ok, payload}, socket} + end + """ + @callback handle_in(event :: String.t(), payload :: payload, socket :: Socket.t()) :: + {:noreply, Socket.t()} + | {:noreply, Socket.t(), timeout | :hibernate} + | {:reply, reply, Socket.t()} + | {:stop, reason :: term, Socket.t()} + | {:stop, reason :: term, reply, Socket.t()} + + @doc """ + Intercepts outgoing `event`s. + + See `intercept/1`. + """ + @callback handle_out(event :: String.t(), payload :: payload, socket :: Socket.t()) :: + {:noreply, Socket.t()} + | {:noreply, Socket.t(), timeout | :hibernate} + | {:stop, reason :: term, Socket.t()} + + @doc """ + Handle regular Elixir process messages. + + See `c:GenServer.handle_info/2`. + """ + @callback handle_info(msg :: term, socket :: Socket.t()) :: + {:noreply, Socket.t()} + | {:stop, reason :: term, Socket.t()} + + @doc """ + Handle regular GenServer call messages. + + See `c:GenServer.handle_call/3`. + """ + @callback handle_call(msg :: term, from :: {pid, tag :: term}, socket :: Socket.t()) :: + {:reply, response :: term, Socket.t()} + | {:noreply, Socket.t()} + | {:stop, reason :: term, Socket.t()} + + @doc """ + Handle regular GenServer cast messages. + + See `c:GenServer.handle_cast/2`. + """ + @callback handle_cast(msg :: term, socket :: Socket.t()) :: + {:noreply, Socket.t()} + | {:stop, reason :: term, Socket.t()} + + @doc false + @callback code_change(old_vsn, Socket.t(), extra :: term) :: + {:ok, Socket.t()} + | {:error, reason :: term} + when old_vsn: term | {:down, term} + + @doc """ + Invoked when the channel process is about to exit. + + See `c:GenServer.terminate/2`. + """ + @callback terminate( + reason :: :normal | :shutdown | {:shutdown, :left | :closed | term}, + Socket.t() + ) :: + term + + @optional_callbacks handle_in: 3, + handle_out: 3, + handle_info: 2, + handle_call: 3, + handle_cast: 2, + code_change: 3, + terminate: 2 + + defmacro __using__(opts \\ []) do + quote do + opts = unquote(opts) + @behaviour unquote(__MODULE__) + @on_definition unquote(__MODULE__) + @before_compile unquote(__MODULE__) + @phoenix_intercepts [] + @phoenix_log_join Keyword.get(opts, :log_join, :info) + @phoenix_log_handle_in Keyword.get(opts, :log_handle_in, :debug) + @phoenix_hibernate_after Keyword.get(opts, :hibernate_after, 15_000) + @phoenix_shutdown Keyword.get(opts, :shutdown, 5000) + + import unquote(__MODULE__) + import Phoenix.Socket, only: [assign: 3, assign: 2] + + def child_spec(init_arg) do + %{ + id: __MODULE__, + start: {__MODULE__, :start_link, [init_arg]}, + shutdown: @phoenix_shutdown, + restart: :temporary + } + end + + def start_link(triplet) do + GenServer.start_link(Phoenix.Channel.Server, triplet, + hibernate_after: @phoenix_hibernate_after + ) + end + + def __socket__(:private) do + %{log_join: @phoenix_log_join, log_handle_in: @phoenix_log_handle_in} + end + end + end + + defmacro __before_compile__(_) do + quote do + def __intercepts__, do: @phoenix_intercepts + end + end + + @doc """ + Defines which Channel events to intercept for `handle_out/3` callbacks. + + By default, broadcasted events are pushed directly to the client, but + intercepting events gives your channel a chance to customize the event + for the client to append extra information or filter the message from being + delivered. + + *Note*: intercepting events can introduce significantly more overhead if a + large number of subscribers must customize a message since the broadcast will + be encoded N times instead of a single shared encoding across all subscribers. + + ## Examples + + intercept ["new_msg"] + + def handle_out("new_msg", payload, socket) do + push(socket, "new_msg", Map.merge(payload, + is_editable: User.can_edit_message?(socket.assigns[:user], payload) + )) + {:noreply, socket} + end + + `handle_out/3` callbacks must return one of: + + {:noreply, Socket.t} | + {:noreply, Socket.t, timeout | :hibernate} | + {:stop, reason :: term, Socket.t} + + """ + defmacro intercept(events) do + quote do + @phoenix_intercepts unquote(events) + end + end + + @doc false + def __on_definition__(env, :def, :handle_out, [event, _payload, _socket], _, _) + when is_binary(event) do + unless event in Module.get_attribute(env.module, :phoenix_intercepts) do + IO.write( + "#{Path.relative_to(env.file, File.cwd!())}:#{env.line}: [warning] " <> + "An intercept for event \"#{event}\" has not yet been defined in #{env.module}.handle_out/3. " <> + "Add \"#{event}\" to your list of intercepted events with intercept/1" + ) + end + end + + def __on_definition__(_env, _kind, _name, _args, _guards, _body) do + :ok + end + + @doc """ + Broadcast an event to all subscribers of the socket topic. + + The event's message must be a serializable map or a tagged `{:binary, data}` + tuple where `data` is binary data. + + ## Examples + + iex> broadcast(socket, "new_message", %{id: 1, content: "hello"}) + :ok + + iex> broadcast(socket, "new_message", {:binary, "hello"}) + :ok + + """ + def broadcast(socket, event, message) do + %{pubsub_server: pubsub_server, topic: topic} = assert_joined!(socket) + Server.broadcast(pubsub_server, topic, event, message) + end + + @doc """ + Same as `broadcast/3`, but raises if broadcast fails. + """ + def broadcast!(socket, event, message) do + %{pubsub_server: pubsub_server, topic: topic} = assert_joined!(socket) + Server.broadcast!(pubsub_server, topic, event, message) + end + + @doc """ + Broadcast event from pid to all subscribers of the socket topic. + + The channel that owns the socket will not receive the published + message. The event's message must be a serializable map or a tagged + `{:binary, data}` tuple where `data` is binary data. + + ## Examples + + iex> broadcast_from(socket, "new_message", %{id: 1, content: "hello"}) + :ok + + iex> broadcast_from(socket, "new_message", {:binary, "hello"}) + :ok + + """ + def broadcast_from(socket, event, message) do + %{pubsub_server: pubsub_server, topic: topic, channel_pid: channel_pid} = + assert_joined!(socket) + + Server.broadcast_from(pubsub_server, channel_pid, topic, event, message) + end + + @doc """ + Same as `broadcast_from/3`, but raises if broadcast fails. + """ + def broadcast_from!(socket, event, message) do + %{pubsub_server: pubsub_server, topic: topic, channel_pid: channel_pid} = + assert_joined!(socket) + + Server.broadcast_from!(pubsub_server, channel_pid, topic, event, message) + end + + @doc """ + Sends an event directly to the connected client without requiring a prior + message from the client. + + The event's message must be a serializable map or a tagged `{:binary, data}` + tuple where `data` is binary data. + + Note that unlike some in client libraries, this server-side `push/3` does not + return a reference. If you need to get a reply from the client and to + correlate that reply with the message you pushed, you'll need to include a + unique identifier in the message, track it in the Channel's state, have the + client include it in its reply, and examine the ref when the reply comes to + `handle_in/3`. + + ## Examples + + iex> push(socket, "new_message", %{id: 1, content: "hello"}) + :ok + + iex> push(socket, "new_message", {:binary, "hello"}) + :ok + + """ + def push(socket, event, message) do + %{transport_pid: transport_pid, topic: topic} = assert_joined!(socket) + Server.push(transport_pid, socket.join_ref, topic, event, message, socket.serializer) + end + + @doc """ + Replies asynchronously to a socket push. + + The usual way of replying to a client's message is to return a tuple from `handle_in/3` + like: + + {:reply, {status, payload}, socket} + + But sometimes you need to reply to a push asynchronously - that is, after + your `handle_in/3` callback completes. For example, you might need to perform + work in another process and reply when it's finished. + + You can do this by generating a reference to the socket with `socket_ref/1` + and calling `reply/2` with that ref when you're ready to reply. + + *Note*: A `socket_ref` is required so the `socket` itself is not leaked + outside the channel. The `socket` holds information such as assigns and + transport configuration, so it's important to not copy this information + outside of the channel that owns it. + + Technically, `reply/2` will allow you to reply multiple times to the same + client message, and each reply will include the client message `ref`. But the + client may expect only one reply; in that case, `push/3` would be preferable + for the additional messages. + + Payloads are serialized before sending with the configured serializer. + + ## Examples + + def handle_in("work", payload, socket) do + Worker.perform(payload, socket_ref(socket)) + {:noreply, socket} + end + + def handle_info({:work_complete, result, ref}, socket) do + reply(ref, {:ok, result}) + {:noreply, socket} + end + + """ + @spec reply(socket_ref, reply) :: :ok + def reply(socket_ref, status) when is_atom(status) do + reply(socket_ref, {status, %{}}) + end + + def reply({transport_pid, serializer, topic, ref, join_ref}, {status, payload}) do + Server.reply(transport_pid, join_ref, ref, topic, {status, payload}, serializer) + end + + @doc """ + Generates a `socket_ref` for an async reply. + + See `reply/2` for example usage. + """ + @spec socket_ref(Socket.t()) :: socket_ref + def socket_ref(%Socket{joined: true, ref: ref} = socket) when not is_nil(ref) do + {socket.transport_pid, socket.serializer, socket.topic, ref, socket.join_ref} + end + + def socket_ref(_socket) do + raise ArgumentError, """ + socket refs can only be generated for a socket that has joined with a push ref + """ + end + + defp assert_joined!(%Socket{joined: true} = socket) do + socket + end + + defp assert_joined!(%Socket{joined: false}) do + raise """ + push/3, reply/2, and broadcast/3 can only be called after the socket has finished joining. + To push a message on join, send to self and handle in handle_info/2. For example: + + def join(topic, auth_msg, socket) do + ... + send(self(), :after_join) + {:ok, socket} + end + + def handle_info(:after_join, socket) do + push(socket, "feed", %{list: feed_items(socket)}) + {:noreply, socket} + end + + """ + end +end diff --git a/deps/phoenix/lib/phoenix/channel/server.ex b/deps/phoenix/lib/phoenix/channel/server.ex new file mode 100644 index 0000000..3d6c57e --- /dev/null +++ b/deps/phoenix/lib/phoenix/channel/server.ex @@ -0,0 +1,571 @@ +defmodule Phoenix.Channel.Server do + @moduledoc false + use GenServer, restart: :temporary + + require Logger + + alias Phoenix.PubSub + alias Phoenix.Socket + alias Phoenix.Socket.{Broadcast, Message, Reply, PoolSupervisor} + + ## Socket API + + @doc """ + Joins the channel in socket with authentication payload. + """ + @spec join(Socket.t(), module, Message.t(), keyword) :: {:ok, term, pid} | {:error, term} + def join(socket, channel, message, opts) do + %{topic: topic, payload: payload, ref: ref, join_ref: join_ref} = message + + starter = opts[:starter] || (&PoolSupervisor.start_child/3) + assigns = Map.merge(socket.assigns, Keyword.get(opts, :assigns, %{})) + + socket = %{ + socket + | topic: topic, + channel: channel, + join_ref: join_ref || ref, + assigns: assigns + } + + ref = make_ref() + from = {self(), ref} + child_spec = channel.child_spec({socket.endpoint, from}) + + case starter.(socket, from, child_spec) do + {:ok, pid} -> + send(pid, {Phoenix.Channel, payload, from, socket}) + mon_ref = Process.monitor(pid) + + receive do + {^ref, {:ok, reply}} -> + Process.demonitor(mon_ref, [:flush]) + {:ok, reply, pid} + + {^ref, {:error, reply}} -> + Process.demonitor(mon_ref, [:flush]) + {:error, reply} + + {:DOWN, ^mon_ref, _, _, reason} -> + Logger.error(fn -> Exception.format_exit(reason) end) + {:error, %{reason: "join crashed"}} + end + + {:error, reason} -> + Logger.error(fn -> Exception.format_exit(reason) end) + {:error, %{reason: "join crashed"}} + end + end + + @doc """ + Gets the socket from the channel. + + Used by channel tests. + """ + @spec socket(pid) :: Socket.t() + def socket(pid) do + GenServer.call(pid, :socket) + end + + @doc """ + Emulates the socket being closed. + + Used by channel tests. + """ + @spec close(pid, timeout) :: :ok + def close(pid, timeout) do + GenServer.cast(pid, :close) + ref = Process.monitor(pid) + + receive do + {:DOWN, ^ref, _, _, _} -> :ok + after + timeout -> + Process.exit(pid, :kill) + receive do: ({:DOWN, ^ref, _, _, _} -> :ok) + end + end + + ## Channel API + + @doc """ + Hook invoked by Phoenix.PubSub dispatch. + """ + def dispatch(subscribers, from, %Broadcast{event: event} = msg) do + Enum.reduce(subscribers, %{}, fn + {pid, _}, cache when pid == from -> + cache + + {pid, {:fastlane, fastlane_pid, serializer, event_intercepts}}, cache -> + if event in event_intercepts do + send(pid, msg) + cache + else + case cache do + %{^serializer => encoded_msg} -> + send(fastlane_pid, encoded_msg) + cache + + %{} -> + encoded_msg = serializer.fastlane!(msg) + send(fastlane_pid, encoded_msg) + Map.put(cache, serializer, encoded_msg) + end + end + + {pid, _}, cache -> + send(pid, msg) + cache + end) + + :ok + end + + def dispatch(entries, :none, message) do + for {pid, _} <- entries do + send(pid, message) + end + + :ok + end + + def dispatch(entries, from, message) do + for {pid, _} <- entries, pid != from do + send(pid, message) + end + + :ok + end + + @doc """ + Broadcasts on the given pubsub server with the given + `topic`, `event` and `payload`. + + The message is encoded as `Phoenix.Socket.Broadcast`. + """ + def broadcast(pubsub_server, topic, event, payload) + when is_binary(topic) and is_binary(event) do + broadcast = %Broadcast{ + topic: topic, + event: event, + payload: payload + } + + PubSub.broadcast(pubsub_server, topic, broadcast, __MODULE__) + end + + @doc """ + Broadcasts on the given pubsub server with the given + `topic`, `event` and `payload`. + + Raises in case of crashes. + """ + def broadcast!(pubsub_server, topic, event, payload) + when is_binary(topic) and is_binary(event) do + broadcast = %Broadcast{ + topic: topic, + event: event, + payload: payload + } + + PubSub.broadcast!(pubsub_server, topic, broadcast, __MODULE__) + end + + @doc """ + Broadcasts on the given pubsub server with the given + `from`, `topic`, `event` and `payload`. + + The message is encoded as `Phoenix.Socket.Broadcast`. + """ + def broadcast_from(pubsub_server, from, topic, event, payload) + when is_binary(topic) and is_binary(event) do + broadcast = %Broadcast{ + topic: topic, + event: event, + payload: payload + } + + PubSub.broadcast_from(pubsub_server, from, topic, broadcast, __MODULE__) + end + + @doc """ + Broadcasts on the given pubsub server with the given + `from`, `topic`, `event` and `payload`. + + Raises in case of crashes. + """ + def broadcast_from!(pubsub_server, from, topic, event, payload) + when is_binary(topic) and is_binary(event) do + broadcast = %Broadcast{ + topic: topic, + event: event, + payload: payload + } + + PubSub.broadcast_from!(pubsub_server, from, topic, broadcast, __MODULE__) + end + + @doc """ + Broadcasts on the given pubsub server with the given + `topic`, `event` and `payload`. + + The message is encoded as `Phoenix.Socket.Broadcast`. + """ + def local_broadcast(pubsub_server, topic, event, payload) + when is_binary(topic) and is_binary(event) do + broadcast = %Broadcast{ + topic: topic, + event: event, + payload: payload + } + + PubSub.local_broadcast(pubsub_server, topic, broadcast, __MODULE__) + end + + @doc """ + Broadcasts on the given pubsub server with the given + `from`, `topic`, `event` and `payload`. + + The message is encoded as `Phoenix.Socket.Broadcast`. + """ + def local_broadcast_from(pubsub_server, from, topic, event, payload) + when is_binary(topic) and is_binary(event) do + broadcast = %Broadcast{ + topic: topic, + event: event, + payload: payload + } + + PubSub.local_broadcast_from(pubsub_server, from, topic, broadcast, __MODULE__) + end + + @doc """ + Pushes a message with the given topic, event and payload + to the given process. + + Payloads are serialized before sending with the configured serializer. + """ + def push(pid, join_ref, topic, event, payload, serializer) + when is_binary(topic) and is_binary(event) do + message = %Message{join_ref: join_ref, topic: topic, event: event, payload: payload} + send(pid, serializer.encode!(message)) + :ok + end + + @doc """ + Replies to a given ref to the transport process. + + Payloads are serialized before sending with the configured serializer. + """ + def reply(pid, join_ref, ref, topic, {status, payload}, serializer) + when is_binary(topic) do + reply = %Reply{topic: topic, join_ref: join_ref, ref: ref, status: status, payload: payload} + send(pid, serializer.encode!(reply)) + :ok + end + + ## Callbacks + + @doc false + def init({_endpoint, {pid, _}}) do + {:ok, Process.monitor(pid)} + end + + @doc false + def handle_call(:socket, _from, socket) do + {:reply, socket, socket} + end + + @doc false + def handle_call(msg, from, socket) do + msg + |> socket.channel.handle_call(from, socket) + |> handle_result(:handle_call) + end + + @doc false + def handle_cast(:close, socket) do + {:stop, {:shutdown, :closed}, socket} + end + + @doc false + def handle_cast(msg, socket) do + msg + |> socket.channel.handle_cast(socket) + |> handle_result(:handle_cast) + end + + @doc false + def handle_info({Phoenix.Channel, auth_payload, {pid, _} = from, socket}, ref) do + Process.demonitor(ref) + %{channel: channel, topic: topic, private: private} = socket + Process.put(:"$initial_call", {channel, :join, 3}) + Process.put(:"$callers", [pid]) + + # TODO: replace with Process.put_label/2 when we require Elixir 1.17 + Process.put(:"$process_label", {Phoenix.Channel, channel, topic}) + + socket = %{ + socket + | channel_pid: self(), + private: Map.merge(channel.__socket__(:private), private) + } + + start = System.monotonic_time() + {reply, state} = channel_join(channel, topic, auth_payload, socket) + duration = System.monotonic_time() - start + metadata = %{params: auth_payload, socket: socket, result: elem(reply, 0)} + :telemetry.execute([:phoenix, :channel_joined], %{duration: duration}, metadata) + GenServer.reply(from, reply) + state + end + + def handle_info(%Message{topic: topic, event: "phx_leave", ref: ref}, %{topic: topic} = socket) do + handle_in({:stop, {:shutdown, :left}, :ok, put_in(socket.ref, ref)}) + end + + def handle_info( + %Message{topic: topic, event: event, payload: payload, ref: ref}, + %{topic: topic} = socket + ) do + start = System.monotonic_time() + result = socket.channel.handle_in(event, payload, put_in(socket.ref, ref)) + duration = System.monotonic_time() - start + metadata = %{ref: ref, event: event, params: payload, socket: socket} + :telemetry.execute([:phoenix, :channel_handled_in], %{duration: duration}, metadata) + handle_in(result) + end + + def handle_info( + %Broadcast{event: "phx_drain"}, + %{transport_pid: transport_pid} = socket + ) do + send(transport_pid, :socket_drain) + {:stop, {:shutdown, :draining}, socket} + end + + def handle_info( + %Broadcast{topic: topic, event: event, payload: payload}, + %Socket{topic: topic} = socket + ) do + event + |> socket.channel.handle_out(payload, socket) + |> handle_result(:handle_out) + end + + def handle_info({:DOWN, ref, _, _, reason}, ref) do + {:stop, reason, ref} + end + + def handle_info({:DOWN, _, _, transport_pid, reason}, %{transport_pid: transport_pid} = socket) do + reason = if reason == :normal, do: {:shutdown, :closed}, else: reason + {:stop, reason, socket} + end + + def handle_info(msg, %{channel: channel} = socket) do + if function_exported?(channel, :handle_info, 2) do + msg + |> socket.channel.handle_info(socket) + |> handle_result(:handle_info) + else + warn_unexpected_msg(:handle_info, 2, msg, channel) + {:noreply, socket} + end + end + + @doc false + def code_change(old, %{channel: channel} = socket, extra) do + if function_exported?(channel, :code_change, 3) do + channel.code_change(old, socket, extra) + else + {:ok, socket} + end + end + + @doc false + def terminate(reason, %{channel: channel} = socket) do + if function_exported?(channel, :terminate, 2) do + channel.terminate(reason, socket) + else + :ok + end + end + + def terminate(_reason, _socket) do + :ok + end + + ## Joins + + defp channel_join(channel, topic, auth_payload, socket) do + case channel.join(topic, auth_payload, socket) do + {:ok, socket} -> + {{:ok, %{}}, init_join(socket, channel, topic)} + + {:ok, reply, socket} -> + {{:ok, reply}, init_join(socket, channel, topic)} + + {:error, reply} -> + {{:error, reply}, {:stop, :shutdown, socket}} + + other -> + raise """ + channel #{inspect(socket.channel)}.join/3 is expected to return one of: + + {:ok, Socket.t} | + {:ok, reply :: map, Socket.t} | + {:error, reply :: map} + + got #{inspect(other)} + """ + end + end + + defp init_join(socket, channel, topic) do + %{transport_pid: transport_pid, serializer: serializer, pubsub_server: pubsub_server} = socket + + unless pubsub_server do + raise """ + The :pubsub_server was not configured for endpoint #{inspect(socket.endpoint)}. + Make sure to start a PubSub process in your application supervision tree: + + {Phoenix.PubSub, [name: YOURAPP.PubSub, adapter: Phoenix.PubSub.PG2]} + + And then add it to your endpoint config: + + config :YOURAPP, YOURAPPWeb.Endpoint, + # ... + pubsub_server: YOURAPP.PubSub + """ + end + + Process.monitor(transport_pid) + fastlane = {:fastlane, transport_pid, serializer, channel.__intercepts__()} + PubSub.subscribe(pubsub_server, topic, metadata: fastlane) + + {:noreply, %{socket | joined: true}} + end + + ## Handle results + + defp handle_result({:stop, reason, socket}, _callback) do + case reason do + :normal -> send_socket_close(socket, reason) + :shutdown -> send_socket_close(socket, reason) + {:shutdown, _} -> send_socket_close(socket, reason) + _ -> :noop + end + + {:stop, reason, socket} + end + + defp handle_result({:reply, resp, socket}, :handle_call) do + {:reply, resp, socket} + end + + defp handle_result({:noreply, socket}, callback) + when callback in [:handle_call, :handle_cast] do + {:noreply, socket} + end + + defp handle_result({:noreply, socket}, _callback) do + {:noreply, put_in(socket.ref, nil)} + end + + defp handle_result({:noreply, socket, timeout_or_hibernate}, _callback) do + {:noreply, put_in(socket.ref, nil), timeout_or_hibernate} + end + + defp handle_result(result, :handle_in) do + raise """ + Expected handle_in/3 to return one of: + + {:noreply, Socket.t} | + {:noreply, Socket.t, timeout | :hibernate} | + {:reply, {status :: atom, response :: map}, Socket.t} | + {:reply, status :: atom, Socket.t} | + {:stop, reason :: term, Socket.t} | + {:stop, reason :: term, {status :: atom, response :: map}, Socket.t} | + {:stop, reason :: term, status :: atom, Socket.t} + + got #{inspect(result)} + """ + end + + defp handle_result(result, callback) do + raise """ + Expected #{callback} to return one of: + + {:noreply, Socket.t} | + {:noreply, Socket.t, timeout | :hibernate} | + {:stop, reason :: term, Socket.t} | + + got #{inspect(result)} + """ + end + + defp send_socket_close(%{transport_pid: transport_pid}, reason) do + send(transport_pid, {:socket_close, self(), reason}) + end + + ## Handle in/replies + + defp handle_in({:reply, reply, %Socket{} = socket}) do + handle_reply(socket, reply) + {:noreply, put_in(socket.ref, nil)} + end + + defp handle_in({:stop, reason, reply, socket}) do + handle_reply(socket, reply) + handle_result({:stop, reason, socket}, :handle_in) + end + + defp handle_in(other) do + handle_result(other, :handle_in) + end + + defp handle_reply(socket, {status, payload}) when is_atom(status) do + reply( + socket.transport_pid, + socket.join_ref, + socket.ref, + socket.topic, + {status, payload}, + socket.serializer + ) + end + + defp handle_reply(socket, status) when is_atom(status) do + handle_reply(socket, {status, %{}}) + end + + defp handle_reply(_socket, reply) do + raise """ + Channel replies from handle_in/3 are expected to be one of: + + status :: atom + {status :: atom, response :: map} + + for example: + + {:reply, :ok, socket} + {:reply, {:ok, %{}}, socket} + {:stop, :shutdown, {:error, %{}}, socket} + + got #{inspect(reply)} + """ + end + + defp warn_unexpected_msg(fun, arity, msg, channel) do + proc = + case Process.info(self(), :registered_name) do + {_, []} -> self() + {_, name} -> name + end + + :error_logger.warning_msg( + ~c"~p ~p received unexpected message in #{fun}/#{arity}: ~p~n", + [channel, proc, msg] + ) + end +end diff --git a/deps/phoenix/lib/phoenix/code_reloader.ex b/deps/phoenix/lib/phoenix/code_reloader.ex new file mode 100644 index 0000000..69a3358 --- /dev/null +++ b/deps/phoenix/lib/phoenix/code_reloader.ex @@ -0,0 +1,340 @@ +defmodule Phoenix.CodeReloader do + @moduledoc """ + A plug and module to handle automatic code reloading. + + To avoid race conditions, all code reloads are funneled through a + sequential call operation. + """ + + ## Server delegation + + @doc """ + Reloads code for the current Mix project by invoking the + `:reloadable_compilers` on the list of `:reloadable_apps`. + + This is configured in your application environment like: + + config :your_app, YourAppWeb.Endpoint, + reloadable_compilers: [:gettext, :elixir], + reloadable_apps: [:ui, :backend] + + Keep in mind `:reloadable_compilers` must be a subset of the + `:compilers` specified in `project/0` in your `mix.exs`. + + The `:reloadable_apps` defaults to `nil`. In such case + default behaviour is to reload the current project if it + consists of a single app, or all applications within an umbrella + project. You can set `:reloadable_apps` to a subset of default + applications to reload only some of them, an empty list - to + effectively disable the code reloader, or include external + applications from library dependencies. + + This function is a no-op and returns `:ok` if Mix is not available. + + The reloader should also be configured as a Mix listener in project's + mix.exs file (since Elixir v1.18): + + def project do + [ + ..., + listeners: [Phoenix.CodeReloader] + ] + end + + This way the reloader can notice whenever the project is compiled + concurrently. + + ## Options + + * `:reloadable_args` - additional CLI args to pass to the compiler tasks. + Defaults to `["--no-all-warnings"]` so only warnings related to the + files being compiled are printed + + """ + @spec reload(module, keyword) :: :ok | {:error, binary()} + def reload(endpoint, opts \\ []) do + if Code.ensure_loaded?(Mix.Project), do: reload!(endpoint, opts), else: :ok + end + + @doc """ + Same as `reload/1` but it will raise if Mix is not available. + """ + @spec reload!(module, keyword) :: :ok | {:error, binary()} + defdelegate reload!(endpoint, opts), to: Phoenix.CodeReloader.Server + + @doc """ + Synchronizes with the code server if it is alive. + + It returns `:ok`. If it is not running, it also returns `:ok`. + """ + @spec sync :: :ok + defdelegate sync, to: Phoenix.CodeReloader.Server + + @doc false + @spec child_spec(keyword) :: Supervisor.child_spec() + defdelegate child_spec(opts), to: Phoenix.CodeReloader.MixListener + + ## Plug + + @behaviour Plug + import Plug.Conn + + @style %{ + light: %{ + primary: "#EB532D", + accent: "#a0b0c0", + text_color: "#304050", + background: "#ffffff", + heading_background: "#f9f9fa" + }, + dark: %{ + primary: "#FF6B4A", + accent: "#c0c0c0", + text_color: "#e5e5e5", + background: "#1a1a1a", + heading_background: "#2a2a2a" + }, + logo: + "data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgNzEgNDgiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgoJPHBhdGggZD0ibTI2LjM3MSAzMy40NzctLjU1Mi0uMWMtMy45Mi0uNzI5LTYuMzk3LTMuMS03LjU3LTYuODI5LS43MzMtMi4zMjQuNTk3LTQuMDM1IDMuMDM1LTQuMTQ4IDEuOTk1LS4wOTIgMy4zNjIgMS4wNTUgNC41NyAyLjM5IDEuNTU3IDEuNzIgMi45ODQgMy41NTggNC41MTQgNS4zMDUgMi4yMDIgMi41MTUgNC43OTcgNC4xMzQgOC4zNDcgMy42MzQgMy4xODMtLjQ0OCA1Ljk1OC0xLjcyNSA4LjM3MS0zLjgyOC4zNjMtLjMxNi43NjEtLjU5MiAxLjE0NC0uODg2bC0uMjQxLS4yODRjLTIuMDI3LjYzLTQuMDkzLjg0MS02LjIwNS43MzUtMy4xOTUtLjE2LTYuMjQtLjgyOC04Ljk2NC0yLjU4Mi0yLjQ4Ni0xLjYwMS00LjMxOS0zLjc0Ni01LjE5LTYuNjExLS43MDQtMi4zMTUuNzM2LTMuOTM0IDMuMTM1LTMuNi45NDguMTMzIDEuNzQ2LjU2IDIuNDYzIDEuMTY1LjU4My40OTMgMS4xNDMgMS4wMTUgMS43MzggMS40OTMgMi44IDIuMjUgNi43MTIgMi4zNzUgMTAuMjY1LS4wNjgtNS44NDItLjAyNi05LjgxNy0zLjI0LTEzLjMwOC03LjMxMy0xLjM2Ni0xLjU5NC0yLjctMy4yMTYtNC4wOTUtNC43ODUtMi42OTgtMy4wMzYtNS42OTItNS43MS05Ljc5LTYuNjIzQzEyLjgtLjYyMyA3Ljc0NS4xNCAyLjg5MyAyLjM2MSAxLjkyNiAyLjgwNC45OTcgMy4zMTkgMCA0LjE0OWMuNDk0IDAgLjc2My4wMDYgMS4wMzIgMCAyLjQ0Ni0uMDY0IDQuMjggMS4wMjMgNS42MDIgMy4wMjQuOTYyIDEuNDU3IDEuNDE1IDMuMTA0IDEuNzYxIDQuNzk4LjUxMyAyLjUxNS4yNDcgNS4wNzguNTQ0IDcuNjA1Ljc2MSA2LjQ5NCA0LjA4IDExLjAyNiAxMC4yNiAxMy4zNDYgMi4yNjcuODUyIDQuNTkxIDEuMTM1IDcuMTcyLjU1NVpNMTAuNzUxIDMuODUyYy0uOTc2LjI0Ni0xLjc1Ni0uMTQ4LTIuNTYtLjk2MiAxLjM3Ny0uMzQzIDIuNTkyLS40NzYgMy44OTctLjUyOC0uMTA3Ljg0OC0uNjA3IDEuMzA2LTEuMzM2IDEuNDlabTMyLjAwMiAzNy45MjRjLS4wODUtLjYyNi0uNjItLjkwMS0xLjA0LTEuMjI4LTEuODU3LTEuNDQ2LTQuMDMtMS45NTgtNi4zMzMtMi0xLjM3NS0uMDI2LTIuNzM1LS4xMjgtNC4wMzEtLjYxLS41OTUtLjIyLTEuMjYtLjUwNS0xLjI0NC0xLjI3Mi4wMTUtLjc4LjY5My0xIDEuMzEtMS4xODQuNTA1LS4xNSAxLjAyNi0uMjQ3IDEuNi0uMzgyLTEuNDYtLjkzNi0yLjg4Ni0xLjA2NS00Ljc4Ny0uMy0yLjk5MyAxLjIwMi01Ljk0MyAxLjA2LTguOTI2LS4wMTctMS42ODQtLjYwOC0zLjE3OS0xLjU2My00LjczNS0yLjQwOGwtLjA0My4wM2EyLjk2IDIuOTYgMCAwIDAgLjA0LS4wMjljLS4wMzgtLjExNy0uMTA3LS4xMi0uMTk3LS4wNTRsLjEyMi4xMDdjMS4yOSAyLjExNSAzLjAzNCAzLjgxNyA1LjAwNCA1LjI3MSAzLjc5MyAyLjggNy45MzYgNC40NzEgMTIuNzg0IDMuNzNBNjYuNzE0IDY2LjcxNCAwIDAgMSAzNyA0MC44NzdjMS45OC0uMTYgMy44NjYuMzk4IDUuNzUzLjg5OVptLTkuMTQtMzAuMzQ1Yy0uMTA1LS4wNzYtLjIwNi0uMjY2LS40Mi0uMDY5IDEuNzQ1IDIuMzYgMy45ODUgNC4wOTggNi42ODMgNS4xOTMgNC4zNTQgMS43NjcgOC43NzMgMi4wNyAxMy4yOTMuNTEgMy41MS0xLjIxIDYuMDMzLS4wMjggNy4zNDMgMy4zOC4xOS0zLjk1NS0yLjEzNy02LjgzNy01Ljg0My03LjQwMS0yLjA4NC0uMzE4LTQuMDEuMzczLTUuOTYyLjk0LTUuNDM0IDEuNTc1LTEwLjQ4NS43OTgtMTUuMDk0LTIuNTUzWm0yNy4wODUgMTUuNDI1Yy43MDguMDU5IDEuNDE2LjEyMyAyLjEyNC4xODUtMS42LTEuNDA1LTMuNTUtMS41MTctNS41MjMtMS40MDQtMy4wMDMuMTctNS4xNjcgMS45MDMtNy4xNCAzLjk3Mi0xLjczOSAxLjgyNC0zLjMxIDMuODctNS45MDMgNC42MDQuMDQzLjA3OC4wNTQuMTE3LjA2Ni4xMTcuMzUuMDA1LjY5OS4wMjEgMS4wNDcuMDA1IDMuNzY4LS4xNyA3LjMxNy0uOTY1IDEwLjE0LTMuNy44OS0uODYgMS42ODUtMS44MTcgMi41NDQtMi43MS43MTYtLjc0NiAxLjU4NC0xLjE1OSAyLjY0NS0xLjA3Wm0tOC43NTMtNC42N2MtMi44MTIuMjQ2LTUuMjU0IDEuNDA5LTcuNTQ4IDIuOTQzLTEuNzY2IDEuMTgtMy42NTQgMS43MzgtNS43NzYgMS4zNy0uMzc0LS4wNjYtLjc1LS4xMTQtMS4xMjQtLjE3bC0uMDEzLjE1NmMuMTM1LjA3LjI2NS4xNTEuNDA1LjIwNy4zNTQuMTQuNzAyLjMwOCAxLjA3LjM5NSA0LjA4My45NzEgNy45OTIuNDc0IDExLjUxNi0xLjgwMyAyLjIyMS0xLjQzNSA0LjUyMS0xLjcwNyA3LjAxMy0xLjMzNi4yNTIuMDM4LjUwMy4wODMuNzU2LjEwNy4yMzQuMDIyLjQ3OS4yNTUuNzk1LjAwMy0yLjE3OS0xLjU3NC00LjUyNi0yLjA5Ni03LjA5NC0xLjg3MlptLTEwLjA0OS05LjU0NGMxLjQ3NS4wNTEgMi45NDMtLjE0MiA0LjQ4Ni0xLjA1OS0uNDUyLjA0LS42NDMuMDQtLjgyNy4wNzYtMi4xMjYuNDI0LTQuMDMzLS4wNC01LjczMy0xLjM4My0uNjIzLS40OTMtMS4yNTctLjk3NC0xLjg4OS0xLjQ1Ny0yLjUwMy0xLjkxNC01LjM3NC0yLjU1NS04LjUxNC0yLjUuMDUuMTU0LjA1NC4yNi4xMDguMzE1IDMuNDE3IDMuNDU1IDcuMzcxIDUuODM2IDEyLjM2OSA2LjAwOFptMjQuNzI3IDE3LjczMWMtMi4xMTQtMi4wOTctNC45NTItMi4zNjctNy41NzgtLjUzNyAxLjczOC4wNzggMy4wNDMuNjMyIDQuMTAxIDEuNzI4LjM3NC4zODguNzYzLjc2OCAxLjE4MiAxLjEwNiAxLjYgMS4yOSA0LjMxMSAxLjM1MiA1Ljg5Ni4xNTUtMS44NjEtLjcyNi0xLjg2MS0uNzI2LTMuNjAxLTIuNDUyWm0tMjEuMDU4IDE2LjA2Yy0xLjg1OC0zLjQ2LTQuOTgxLTQuMjQtOC41OS00LjAwOGE5LjY2NyA5LjY2NyAwIDAgMSAyLjk3NyAxLjM5Yy44NC41ODYgMS41NDcgMS4zMTEgMi4yNDMgMi4wNTUgMS4zOCAxLjQ3MyAzLjUzNCAyLjM3NiA0Ljk2MiAyLjA3LS42NTYtLjQxMi0xLjIzOC0uODQ4LTEuNTkyLTEuNTA3Wm0xNy4yOS0xOS4zMmMwLS4wMjMuMDAxLS4wNDUuMDAzLS4wNjhsLS4wMDYuMDA2LjAwNi0uMDA2LS4wMzYtLjAwNC4wMjEuMDE4LjAxMi4wNTNabS0yMCAxNC43NDRhNy42MSA3LjYxIDAgMCAwLS4wNzItLjA0MS4xMjcuMTI3IDAgMCAwIC4wMTUuMDQzYy4wMDUuMDA4LjAzOCAwIC4wNTgtLjAwMlptLS4wNzItLjA0MS0uMDA4LS4wMzQtLjAwOC4wMS4wMDgtLjAxLS4wMjItLjAwNi4wMDUuMDI2LjAyNC4wMTRaIgogICAgICAgICAgICBmaWxsPSIjRkQ0RjAwIiAvPgo8L3N2Zz4K", + monospace_font: "menlo, consolas, monospace" + } + + @doc """ + API used by Plug to start the code reloader. + """ + def init(opts) do + Keyword.put_new(opts, :reloader, &Phoenix.CodeReloader.reload/2) + end + + @doc """ + API used by Plug to invoke the code reloader on every request. + """ + def call(conn, opts) do + case opts[:reloader].(conn.private.phoenix_endpoint, opts) do + :ok -> + conn + + {:error, output} -> + conn + |> put_resp_content_type("text/html") + |> send_resp(500, template(output)) + |> halt() + end + end + + defp template(output) do + """ + + + + + CompileError + + + + + +
+ +
+
Compilation error
+
Console output is shown below.
+
+
+
+
#{format_output(output)}
+
+ + + """ + end + + defp format_output(output) do + output + |> String.trim() + |> remove_ansi_escapes() + |> Plug.HTML.html_escape() + end + + defp remove_ansi_escapes(text) do + Regex.replace(~r/\e\[[0-9;]*[a-zA-Z]/, text, "") + end +end diff --git a/deps/phoenix/lib/phoenix/code_reloader/mix_listener.ex b/deps/phoenix/lib/phoenix/code_reloader/mix_listener.ex new file mode 100644 index 0000000..f136314 --- /dev/null +++ b/deps/phoenix/lib/phoenix/code_reloader/mix_listener.ex @@ -0,0 +1,72 @@ +defmodule Phoenix.CodeReloader.MixListener do + @moduledoc false + + use GenServer + + @name __MODULE__ + + @spec start_link(keyword) :: GenServer.on_start() + def start_link(_opts) do + GenServer.start_link(__MODULE__, {}, name: @name) + end + + @spec started? :: boolean() + def started? do + Process.whereis(Phoenix.CodeReloader.MixListener) != nil + end + + @doc """ + Unloads all modules invalidated by external compilations. + + Only reloads modules from the given apps. + """ + @spec purge([atom()]) :: :ok + def purge(apps) do + GenServer.call(@name, {:purge, apps}, :infinity) + end + + @impl true + def init({}) do + {:ok, %{to_purge: %{}}} + end + + @impl true + def handle_call({:purge, apps}, _from, state) do + for app <- apps, modules = state.to_purge[app] do + purge_modules(modules) + end + + {:reply, :ok, %{state | to_purge: %{}}} + end + + @impl true + def handle_info({:modules_compiled, info}, state) do + if info.os_pid == System.pid() do + # Ignore compilations from ourselves, because the modules are + # already updated in memory + {:noreply, state} + else + %{changed: changed, removed: removed} = info.modules_diff + + state = + update_in(state.to_purge[info.app], fn to_purge -> + to_purge = to_purge || MapSet.new() + to_purge = Enum.into(changed, to_purge) + Enum.into(removed, to_purge) + end) + + {:noreply, state} + end + end + + def handle_info(_message, state) do + {:noreply, state} + end + + defp purge_modules(modules) do + for module <- modules do + :code.purge(module) + :code.delete(module) + end + end +end diff --git a/deps/phoenix/lib/phoenix/code_reloader/proxy.ex b/deps/phoenix/lib/phoenix/code_reloader/proxy.ex new file mode 100644 index 0000000..c6a6b0f --- /dev/null +++ b/deps/phoenix/lib/phoenix/code_reloader/proxy.ex @@ -0,0 +1,76 @@ +# A tiny proxy that stores all output sent to the group leader +# while forwarding all requests to it. +defmodule Phoenix.CodeReloader.Proxy do + @moduledoc false + use GenServer + + def start() do + GenServer.start(__MODULE__, :ok) + end + + def diagnostics(proxy, diagnostics) do + GenServer.cast(proxy, {:diagnostics, diagnostics}) + end + + def stop(proxy) do + GenServer.call(proxy, :stop, :infinity) + end + + ## Callbacks + + def init(:ok) do + {:ok, []} + end + + def handle_cast({:diagnostics, diagnostics}, output) do + {:noreply, diagnostics |> Enum.map(&diagnostic_to_chars/1) |> Enum.reverse(output)} + end + + def handle_call(:stop, _from, output) do + {:stop, :normal, Enum.reverse(output), output} + end + + def handle_info(msg, output) do + case msg do + {:io_request, from, reply, {:put_chars, chars}} -> + put_chars(from, reply, chars, output) + + {:io_request, from, reply, {:put_chars, m, f, as}} -> + put_chars(from, reply, apply(m, f, as), output) + + {:io_request, from, reply, {:put_chars, _encoding, chars}} -> + put_chars(from, reply, chars, output) + + {:io_request, from, reply, {:put_chars, _encoding, m, f, as}} -> + put_chars(from, reply, apply(m, f, as), output) + + {:io_request, _from, _reply, _request} = msg -> + send(Process.group_leader(), msg) + {:noreply, output} + + _ -> + {:noreply, output} + end + end + + defp put_chars(from, reply, chars, output) do + send(Process.group_leader(), {:io_request, from, reply, {:put_chars, chars}}) + {:noreply, [chars | output]} + end + + defp diagnostic_to_chars(%{severity: :error, message: "**" <> _ = message}) do + "\n#{message}\n" + end + + defp diagnostic_to_chars(%{severity: severity, message: message, file: file, position: position}) when is_binary(file) do + "\n#{severity}: #{message}\n #{Path.relative_to_cwd(file)}#{position(position)}\n" + end + + defp diagnostic_to_chars(%{severity: severity, message: message}) do + "\n#{severity}: #{message}\n" + end + + defp position({line, col}), do: ":#{line}:#{col}" + defp position(line) when is_integer(line) and line > 0, do: ":#{line}" + defp position(_), do: "" +end diff --git a/deps/phoenix/lib/phoenix/code_reloader/server.ex b/deps/phoenix/lib/phoenix/code_reloader/server.ex new file mode 100644 index 0000000..c8650c8 --- /dev/null +++ b/deps/phoenix/lib/phoenix/code_reloader/server.ex @@ -0,0 +1,461 @@ +defmodule Phoenix.CodeReloader.Server do + @moduledoc false + use GenServer + + require Logger + alias Phoenix.CodeReloader.Proxy + + def start_link(_) do + GenServer.start_link(__MODULE__, :ok, name: __MODULE__) + end + + def check_symlinks do + GenServer.call(__MODULE__, :check_symlinks, :infinity) + end + + def reload!(endpoint, opts) do + GenServer.call(__MODULE__, {:reload!, endpoint, opts}, :infinity) + end + + def sync do + pid = Process.whereis(__MODULE__) + ref = Process.monitor(pid) + GenServer.cast(pid, {:sync, self(), ref}) + + receive do + ^ref -> :ok + {:DOWN, ^ref, _, _, _} -> :ok + end + end + + ## Callbacks + + def init(:ok) do + {:ok, %{check_symlinks: true, timestamp: timestamp()}} + end + + def handle_call(:check_symlinks, _from, state) do + if state.check_symlinks and Code.ensure_loaded?(Mix.Project) and not Mix.Project.umbrella?() and + File.dir?("priv") do + priv_path = "#{Mix.Project.app_path()}/priv" + + case :file.read_link(priv_path) do + {:ok, _} -> + :ok + + {:error, _} -> + if can_symlink?() do + File.rm_rf(priv_path) + Mix.Project.build_structure() + else + Logger.warning( + "Phoenix is unable to create symlinks. Phoenix's code reloader will run " <> + "considerably faster if symlinks are allowed." <> os_symlink(:os.type()) + ) + end + end + end + + {:reply, :ok, %{state | check_symlinks: false}} + end + + def handle_call({:reload!, endpoint, opts}, from, state) do + compilers = endpoint.config(:reloadable_compilers) + apps = endpoint.config(:reloadable_apps) || default_reloadable_apps() + args = Keyword.get(opts, :reloadable_args, ["--no-all-warnings"]) + + froms = all_waiting([from], endpoint) + + {backup, res, out} = + with_build_lock(fn -> + purge_fallback? = + if Phoenix.CodeReloader.MixListener.started?() do + Phoenix.CodeReloader.MixListener.purge(apps) + false + else + warn_missing_mix_listener() + true + end + + # We do a backup of the endpoint in case compilation fails. + # If so we can bring it back to finish the request handling. + backup = load_backup(endpoint) + + {res, out} = + proxy_io(fn -> + try do + task_loaded = Code.ensure_loaded(Mix.Task) + mix_compile(task_loaded, compilers, apps, args, state.timestamp, purge_fallback?) + catch + :exit, {:shutdown, 1} -> + :error + + kind, reason -> + IO.puts(Exception.format(kind, reason, __STACKTRACE__)) + :error + end + end) + + {backup, res, out} + end) + + reply = + case res do + :ok -> + :ok + + :error -> + write_backup(backup) + {:error, IO.iodata_to_binary(out)} + end + + Enum.each(froms, &GenServer.reply(&1, reply)) + {:noreply, %{state | timestamp: timestamp()}} + end + + def handle_cast({:sync, pid, ref}, state) do + send(pid, ref) + {:noreply, state} + end + + def handle_info(_, state) do + {:noreply, state} + end + + defp default_reloadable_apps() do + if Mix.Project.umbrella?() do + Enum.map(Mix.Dep.Umbrella.cached(), & &1.app) + else + [Mix.Project.config()[:app]] + end + end + + defp os_symlink({:win32, _}), + do: + " On Windows, the lack of symlinks may even cause empty assets to be served. " <> + "Luckily, you can address this issue by starting your Windows terminal at least " <> + "once with \"Run as Administrator\" and then running your Phoenix application." + + defp os_symlink(_), + do: "" + + defp can_symlink?() do + build_path = Mix.Project.build_path() + symlink = Path.join(Path.dirname(build_path), "__phoenix__") + + case File.ln_s(build_path, symlink) do + :ok -> + File.rm_rf(symlink) + true + + {:error, :eexist} -> + File.rm_rf(symlink) + true + + {:error, _} -> + false + end + end + + defp load_backup(mod) do + mod + |> :code.which() + |> read_backup() + end + + defp read_backup(path) when is_list(path) do + case File.read(path) do + {:ok, binary} -> {:ok, path, binary} + _ -> :error + end + end + + defp read_backup(_path), do: :error + + defp write_backup({:ok, path, file}), do: File.write!(path, file) + defp write_backup(:error), do: :ok + + defp all_waiting(acc, endpoint) do + receive do + {:"$gen_call", from, {:reload!, ^endpoint, _}} -> all_waiting([from | acc], endpoint) + after + 0 -> acc + end + end + + if Version.match?(System.version(), ">= 1.18.0-dev") do + defp warn_missing_mix_listener do + if Mix.Project.get() != Phoenix.MixProject do + IO.warn(""" + a Mix listener expected by Phoenix.CodeReloader is missing. + + Please add the listener to your mix.exs configuration, like so: + + def project do + [ + ..., + listeners: [Phoenix.CodeReloader] + ] + end + + """) + end + end + else + defp warn_missing_mix_listener do + :ok + end + end + + defp mix_compile( + {:module, Mix.Task}, + compilers, + apps_to_reload, + compile_args, + timestamp, + purge_fallback? + ) do + config = Mix.Project.config() + path = Mix.Project.consolidation_path(config) + + mix_compile_deps( + Mix.Dep.cached(), + apps_to_reload, + compile_args, + compilers, + timestamp, + path, + purge_fallback? + ) + + mix_compile_project( + config[:app], + apps_to_reload, + compile_args, + compilers, + timestamp, + path, + purge_fallback? + ) + + if config[:consolidate_protocols] do + # If we are consolidating protocols, we need to purge all of its modules + # to ensure the consolidated versions are loaded. "mix compile" performs + # a similar task. + Code.prepend_path(path) + purge_modules(path) + end + + :ok + end + + defp mix_compile({:error, _reason}, _, _, _, _, _) do + raise "the Code Reloader is enabled but Mix is not available. If you want to " <> + "use the Code Reloader in production or inside an escript, you must add " <> + ":mix to your applications list. Otherwise, you must disable code reloading " <> + "in such environments" + end + + defp mix_compile_deps( + deps, + apps_to_reload, + compile_args, + compilers, + timestamp, + path, + purge_fallback? + ) do + for dep <- deps, dep.app in apps_to_reload do + Mix.Dep.in_dependency(dep, fn _ -> + mix_compile_unless_stale_config(compilers, compile_args, timestamp, path, purge_fallback?) + end) + end + end + + defp mix_compile_project(nil, _, _, _, _, _, _), do: :ok + + defp mix_compile_project( + app, + apps_to_reload, + compile_args, + compilers, + timestamp, + path, + purge_fallback? + ) do + if app in apps_to_reload do + mix_compile_unless_stale_config(compilers, compile_args, timestamp, path, purge_fallback?) + end + end + + defp mix_compile_unless_stale_config(compilers, compile_args, timestamp, path, purge_fallback?) do + manifests = Mix.Tasks.Compile.Elixir.manifests() + configs = Mix.Project.config_files() + config = Mix.Project.config() + + case Mix.Utils.extract_stale(configs, manifests) do + [] -> + # If the manifests are more recent than the timestamp, + # someone updated this app behind the scenes, so purge all beams. + # TODO: remove once we depend on Elixir 1.18 + if purge_fallback? and Mix.Utils.stale?(manifests, [timestamp]) do + purge_modules(Path.join(Mix.Project.app_path(config), "ebin")) + end + + mix_compile(compilers, compile_args, config, path) + + files -> + raise """ + could not compile application: #{Mix.Project.config()[:app]}. + + You must restart your server after changing configuration files or your dependencies. + In particular, the following files changed and must be recomputed on a server restart: + + * #{Enum.map_join(files, "\n * ", &Path.relative_to_cwd/1)} + + """ + end + end + + defp mix_compile(compilers, compile_args, config, consolidation_path) do + all = config[:compilers] || Mix.compilers() + + compilers = + for compiler <- compilers, compiler in all do + Mix.Task.reenable("compile.#{compiler}") + compiler + end + + # We call build_structure mostly for Windows so new + # assets in priv are copied to the build directory. + Mix.Project.build_structure(config) + + args = [ + # TODO: The purge option may no longer be required from Elixir v1.18 + "--purge-consolidation-path-if-stale", + consolidation_path, + # Since Elixir v1.20, Elixir no longer automatically purges compiler + # modules, which is ok for most workflows, but since code reloading never + # shuts down the server, we enable purging to avoid too many temp modules. + "--purge-compiler-modules" | compile_args + ] + + {status, diagnostics} = + with_logger_app(config, fn -> + run_compilers(compilers, args, :noop, []) + end) + + Proxy.diagnostics(Process.group_leader(), diagnostics) + + cond do + status == :error -> + if "--return-errors" not in args do + exit({:shutdown, 1}) + end + + status == :ok && config[:consolidate_protocols] -> + # TODO: Calling compile.protocols is no longer be required from Elixir v1.19 + Mix.Task.reenable("compile.protocols") + Mix.Task.run("compile.protocols", []) + :ok + + true -> + :ok + end + end + + defp timestamp, do: System.system_time(:second) + + defp purge_modules(path) do + with {:ok, beams} <- File.ls(path) do + for beam <- beams do + case :binary.split(beam, ".beam") do + [module, ""] -> module |> String.to_atom() |> purge_module() + _ -> :ok + end + end + + :ok + end + end + + defp purge_module(module) do + :code.purge(module) + :code.delete(module) + end + + defp proxy_io(fun) do + original_gl = Process.group_leader() + {:ok, proxy_gl} = Proxy.start() + Process.group_leader(self(), proxy_gl) + + try do + {fun.(), Proxy.stop(proxy_gl)} + after + Process.group_leader(self(), original_gl) + Process.exit(proxy_gl, :kill) + end + end + + ## TODO: Replace this by Mix.Task.Compiler.run/2 on Elixir v1.19+ + + defp run_compilers([], _, status, diagnostics) do + {status, diagnostics} + end + + defp run_compilers([compiler | rest], args, status, diagnostics) do + {new_status, new_diagnostics} = run_compiler(compiler, args) + diagnostics = diagnostics ++ new_diagnostics + + case new_status do + :error -> {:error, diagnostics} + :ok -> run_compilers(rest, args, :ok, diagnostics) + :noop -> run_compilers(rest, args, status, diagnostics) + end + end + + defp run_compiler(compiler, args) do + result = normalize(Mix.Task.run("compile.#{compiler}", args), compiler) + Enum.reduce(Mix.ProjectStack.pop_after_compiler(compiler), result, & &1.(&2)) + end + + defp normalize(result, name) do + case result do + {status, diagnostics} when status in [:ok, :noop, :error] and is_list(diagnostics) -> + {status, diagnostics} + + # ok/noop can come from tasks that have already run + _ when result in [:ok, :noop] -> + {result, []} + + _ -> + # TODO: Convert this to an error on v2.0 + Mix.shell().error( + "warning: Mix compiler #{inspect(name)} was supposed to return " <> + "{:ok | :noop | :error, [diagnostic]} but it returned #{inspect(result)}" + ) + + {:noop, []} + end + end + + # TODO: remove once we depend on Elixir 1.17 + defp with_logger_app(config, fun) do + app = Keyword.fetch!(config, :app) + logger_config_app = Application.get_env(:logger, :compile_time_application) + + try do + Logger.configure(compile_time_application: app) + fun.() + after + Logger.configure(compile_time_application: logger_config_app) + end + end + + # TODO: remove once we depend on Elixir 1.18 + if Code.ensure_loaded?(Mix.Project) and function_exported?(Mix.Project, :with_build_lock, 1) do + defp with_build_lock(fun), do: Mix.Project.with_build_lock(fun) + else + defp with_build_lock(fun), do: fun.() + end +end diff --git a/deps/phoenix/lib/phoenix/config.ex b/deps/phoenix/lib/phoenix/config.ex new file mode 100644 index 0000000..8ef5930 --- /dev/null +++ b/deps/phoenix/lib/phoenix/config.ex @@ -0,0 +1,166 @@ +defmodule Phoenix.Config do + # Handles Phoenix configuration. + # + # This module is private to Phoenix and should not be accessed + # directly. The Phoenix endpoint configuration can be accessed + # at runtime using the `config/2` function. + @moduledoc false + + use GenServer + + @doc """ + Starts a Phoenix configuration handler. + """ + def start_link({module, config, defaults, opts}) do + permanent = Keyword.keys(defaults) + GenServer.start_link(__MODULE__, {module, config, permanent}, opts) + end + + @doc """ + Puts a given key-value pair in config. + """ + def put(module, key, value) do + :ets.insert(module, {key, value}) + end + + @doc """ + Adds permanent configuration. + + Permanent configuration is not deleted on hot code reload. + """ + def permanent(module, key, value) do + pid = :ets.lookup_element(module, :__config__, 2) + GenServer.call(pid, {:permanent, key, value}) + end + + @doc """ + Caches a value in Phoenix configuration handler for the module. + + The given function needs to return a tuple with `:cache` if the + value should be cached or `:nocache` if the value should not be + cached because it can be consequently considered stale. + + Notice writes are not serialized to the server, we expect the + function that generates the cache to be idempotent. + """ + @spec cache(module, term, (module -> {:cache | :nocache, term})) :: term + def cache(module, key, fun) do + try do + :ets.lookup(module, key) + rescue + e -> + case :ets.info(module) do + :undefined -> + raise "could not find ets table for endpoint #{inspect(module)}. Make sure your endpoint is started and note you cannot access endpoint functions at compile-time" + + _ -> + reraise e, __STACKTRACE__ + end + else + [{^key, :cache, val}] -> + val + + [] -> + case fun.(module) do + {:cache, val} -> + :ets.insert(module, {key, :cache, val}) + val + + {:nocache, val} -> + val + end + end + end + + @doc """ + Clears all cached entries in the endpoint. + """ + @spec clear_cache(module) :: :ok + def clear_cache(module) do + :ets.match_delete(module, {:_, :cache, :_}) + :ok + end + + @doc """ + Reads the configuration for module from the given OTP app. + + Useful to read a particular value at compilation time. + """ + def from_env(otp_app, module, defaults) do + config = fetch_config(otp_app, module) + + merge(defaults, config) + end + + defp fetch_config(otp_app, module) do + case Application.fetch_env(otp_app, module) do + {:ok, conf} -> conf + :error -> [] + end + end + + @doc """ + Take 2 keyword lists and merge them recursively. + + Used to merge configuration values into defaults. + """ + def merge(a, b), do: Keyword.merge(a, b, &merger/3) + + defp merger(_k, v1, v2) do + if Keyword.keyword?(v1) and Keyword.keyword?(v2) do + Keyword.merge(v1, v2, &merger/3) + else + v2 + end + end + + @doc """ + Changes the configuration for the given module. + + It receives a keyword list with changed config and another + with removed ones. The changed config are updated while the + removed ones stop the configuration server, effectively removing + the table. + """ + def config_change(module, changed, removed) do + pid = :ets.lookup_element(module, :__config__, 2) + GenServer.call(pid, {:config_change, changed, removed}) + end + + # Callbacks + + def init({module, config, permanent}) do + :ets.new(module, [:named_table, :public, read_concurrency: true]) + update(module, config, []) + :ets.insert(module, {:__config__, self()}) + {:ok, {module, [:__config__ | permanent]}} + end + + def handle_call({:permanent, key, value}, _from, {module, permanent}) do + :ets.insert(module, {key, value}) + {:reply, :ok, {module, [key | permanent]}} + end + + def handle_call({:config_change, changed, removed}, _from, {module, permanent}) do + cond do + changed = changed[module] -> + update(module, changed, permanent) + {:reply, :ok, {module, permanent}} + + module in removed -> + {:stop, :normal, :ok, {module, permanent}} + + true -> + clear_cache(module) + {:reply, :ok, {module, permanent}} + end + end + + defp update(module, config, permanent) do + old_keys = :ets.select(module, [{{:"$1", :_}, [], [:"$1"]}]) + new_keys = Enum.map(config, &elem(&1, 0)) + Enum.each((old_keys -- new_keys) -- permanent, &:ets.delete(module, &1)) + :ets.insert(module, config) + clear_cache(module) + end +end diff --git a/deps/phoenix/lib/phoenix/controller.ex b/deps/phoenix/lib/phoenix/controller.ex new file mode 100644 index 0000000..9b56e6f --- /dev/null +++ b/deps/phoenix/lib/phoenix/controller.ex @@ -0,0 +1,2015 @@ +defmodule Phoenix.Controller do + import Plug.Conn + alias Plug.Conn.AlreadySentError + + require Logger + + @unsent [:unset, :set, :set_chunked, :set_file] + + # View/Layout deprecation plan + # 1. DONE! Deprecate :namespace option in favor of :layouts on use + # 2. Deprecate the :layouts option in use Phoenix.Controller + # 3. Deprecate setting a non-format view/layout on put_* + # 4. Deprecate rendering a view/layout from :_ + + @type view :: atom() + @type layout :: {module(), layout_name :: atom()} | false + + @moduledoc """ + Controllers are used to group common functionality in the same + (pluggable) module. + + For example, the route: + + get "/users/:id", MyAppWeb.UserController, :show + + will invoke the `show/2` action in the `MyAppWeb.UserController`: + + defmodule MyAppWeb.UserController do + use MyAppWeb, :controller + + def show(conn, %{"id" => id}) do + user = Repo.get(User, id) + render(conn, :show, user: user) + end + end + + An action is a regular function that receives the connection + and the request parameters as arguments. The connection is a + `Plug.Conn` struct, as specified by the Plug library. + + Then we invoke `render/3`, passing the connection, the template + to render (typically named after the action), and the `user: user` + as assigns. We will explore all of those concepts next. + + ## Connection + + A controller by default provides many convenience functions for + manipulating the connection, rendering templates, and more. + + Those functions are imported from two modules: + + * `Plug.Conn` - a collection of low-level functions to work with + the connection + + * `Phoenix.Controller` - functions provided by Phoenix + to support rendering, and other Phoenix specific behaviour + + If you want to have functions that manipulate the connection + without fully implementing the controller, you can import both + modules directly instead of `use Phoenix.Controller`. + + ## Rendering + + One of the main features provided by controllers is the ability + to perform content negotiation and render templates based on + information sent by the client. + + There are two ways to render content in a controller. One option + is to invoke format-specific functions, such as `html/2` and `json/2`. + + However, most commonly controllers invoke custom modules called + views. Views are modules capable of rendering a custom format. + This is done by specifying the option `:formats` when defining + the controller: + + use Phoenix.Controller, formats: [:html, :json] + + Now, when invoking `render/3`, a controller named `MyAppWeb.UserController` + will invoke `MyAppWeb.UserHTML` and `MyAppWeb.UserJSON` respectively + when rendering each format: + + def show(conn, %{"id" => id}) do + user = Repo.get(User, id) + # Will invoke UserHTML.show(%{user: user}) for html requests + # Will invoke UserJSON.show(%{user: user}) for json requests + render(conn, :show, user: user) + end + + You can also specify formats to render by calling `put_view/2` + directly with a connection. For example, instead of inferring the + the view names from the controller, as done in: + + use Phoenix.Controller, formats: [:html, :json] + + You can write the above explicitly in your actions as: + + put_view(conn, html: MyAppWeb.UserHTML, json: MyAppWeb.UserJSON) + + Or as a plug: + + plug :put_view, html: MyAppWeb.UserHTML, json: MyAppWeb.UserJSON + + ## Layouts + + Many applications have shared content that they want to include on every + page, most often the `` tag and its contents. In Phoenix, this is + done via the `put_root_layout` function: + + put_root_layout(conn, html: {MyAppWeb.Layouts, :root}) + + In most applications, this is invoked as a Plug in your application router: + + plug :put_root_layout, html: {MyAppWeb.Layouts, :root} + + This layout is shared by all controllers, and also by `Phoenix.LiveView`. + + However, you can also specify controller-specific layouts using `put_layout/2`, + although this functionality is discouraged in Phoenix v1.8 in favor of using + function components to build your application. + + ## Options + + When used, the controller supports the following options to customize + template rendering: + + * `:formats` - the formats this controller will render + by default. For example, specifying `formats: [:html, :json]` + for a controller named `MyAppWeb.UserController` will + invoke `MyAppWeb.UserHTML` and `MyAppWeb.UserJSON` when + respectively rendering each format. + + The `:formats` option is required. You may set it to an empty list + if you don't expect to render any format upfront. To retain the + behaviour of older Phoenix versions, you can explicitly pass the + "View" suffix to the `:formats` option: + + use Phoenix.Controller, formats: [html: "View", json: "View"] + + ## Plug pipeline + + As with routers, controllers also have their own plug pipeline. + However, different from routers, controllers have a single pipeline: + + defmodule MyAppWeb.UserController do + use MyAppWeb, :controller + + plug :authenticate, usernames: ["jose", "eric", "sonny"] + + def show(conn, params) do + # authenticated users only + end + + defp authenticate(conn, options) do + if get_session(conn, :username) in options[:usernames] do + conn + else + conn |> redirect(to: "/") |> halt() + end + end + end + + The `:authenticate` plug will be invoked before the action. If the + plug calls `Plug.Conn.halt/1` (which is by default imported into + controllers), it will halt the pipeline and won't invoke the action. + + ### Guards + + `plug/2` in controllers supports guards, allowing a developer to configure + a plug to only run in some particular action. + + plug :do_something when action in [:show, :edit] + + Due to operator precedence in Elixir, if the second argument is a keyword list, + we need to wrap the keyword in `[...]` when using `when`: + + plug :authenticate, [usernames: ["jose", "eric", "sonny"]] when action in [:show, :edit] + plug :authenticate, [usernames: ["admin"]] when not action in [:index] + + The first plug will run only when action is show or edit. The second plug will + always run, except for the index action. + + Those guards work like regular Elixir guards and the only variables accessible + in the guard are `conn`, the `action` as an atom and the `controller` as an + alias. + + ## Controllers are plugs + + Like routers, controllers are plugs, but they are wired to dispatch + to a particular function which is called an action. + + For example, the route: + + get "/users/:id", UserController, :show + + will invoke `UserController` as a plug: + + UserController.call(conn, :show) + + which will trigger the plug pipeline and which will eventually + invoke the inner action plug that dispatches to the `show/2` + function in `UserController`. + + As controllers are plugs, they implement both [`init/1`](`c:Plug.init/1`) and + [`call/2`](`c:Plug.call/2`), and it also provides a function named `action/2` + which is responsible for dispatching the appropriate action + after the plug stack (and is also overridable). + + ### Overriding `action/2` for custom arguments + + Phoenix injects an `action/2` plug in your controller which calls the + function matched from the router. By default, it passes the conn and params. + In some cases, overriding the `action/2` plug in your controller is a + useful way to inject arguments into your actions that you would otherwise + need to repeatedly fetch off the connection. For example, imagine if you + stored a `conn.assigns.current_user` in the connection and wanted quick + access to the user for every action in your controller: + + def action(conn, _) do + args = [conn, conn.params, conn.assigns.current_user] + apply(__MODULE__, action_name(conn), args) + end + + def index(conn, _params, user) do + videos = Repo.all(user_videos(user)) + # ... + end + + def delete(conn, %{"id" => id}, user) do + video = Repo.get!(user_videos(user), id) + # ... + end + + """ + defmacro __using__(opts) do + opts = + if Macro.quoted_literal?(opts) do + Macro.prewalk(opts, &expand_alias(&1, __CALLER__)) + else + opts + end + + quote bind_quoted: [opts: opts] do + import Phoenix.Controller + import Plug.Conn + + use Phoenix.Controller.Pipeline + + with {layout, view} <- Phoenix.Controller.__plugs__(__MODULE__, opts) do + plug :put_new_layout, layout + plug :put_new_view, view + end + end + end + + defp expand_alias({:__aliases__, _, _} = alias, env), + do: Macro.expand(alias, %{env | function: {:action, 2}}) + + defp expand_alias(other, _env), do: other + + @doc """ + Registers the plug to call as a fallback to the controller action. + + A fallback plug is useful to translate common domain data structures + into a valid `%Plug.Conn{}` response. If the controller action fails to + return a `%Plug.Conn{}`, the provided plug will be called and receive + the controller's `%Plug.Conn{}` as it was before the action was invoked + along with the value returned from the controller action. + + ## Examples + + defmodule MyController do + use Phoenix.Controller + + action_fallback MyFallbackController + + def show(conn, %{"id" => id}, current_user) do + with {:ok, post} <- Blog.fetch_post(id), + :ok <- Authorizer.authorize(current_user, :view, post) do + + render(conn, "show.json", post: post) + end + end + end + + In the above example, `with` is used to match only a successful + post fetch, followed by valid authorization for the current user. + In the event either of those fail to match, `with` will not invoke + the render block and instead return the unmatched value. In this case, + imagine `Blog.fetch_post/2` returned `{:error, :not_found}` or + `Authorizer.authorize/3` returned `{:error, :unauthorized}`. For cases + where these data structures serve as return values across multiple + boundaries in our domain, a single fallback module can be used to + translate the value into a valid response. For example, you could + write the following fallback controller to handle the above values: + + defmodule MyFallbackController do + use Phoenix.Controller + + def call(conn, {:error, :not_found}) do + conn + |> put_status(:not_found) + |> put_view(MyErrorView) + |> render(:"404") + end + + def call(conn, {:error, :unauthorized}) do + conn + |> put_status(:forbidden) + |> put_view(MyErrorView) + |> render(:"403") + end + end + """ + defmacro action_fallback(plug) do + Phoenix.Controller.Pipeline.__action_fallback__(plug, __CALLER__) + end + + @doc """ + Returns the action name as an atom, raises if unavailable. + """ + @spec action_name(Plug.Conn.t()) :: atom + def action_name(conn), do: conn.private.phoenix_action + + @doc """ + Returns the controller module as an atom, raises if unavailable. + """ + @spec controller_module(Plug.Conn.t()) :: atom + def controller_module(conn), do: conn.private.phoenix_controller + + @doc """ + Returns the router module as an atom, raises if unavailable. + """ + @spec router_module(Plug.Conn.t()) :: atom + def router_module(conn), do: conn.private.phoenix_router + + @doc """ + Returns the endpoint module as an atom, raises if unavailable. + """ + @spec endpoint_module(Plug.Conn.t()) :: atom + def endpoint_module(conn), do: conn.private.phoenix_endpoint + + @doc """ + Returns the template name rendered in the view as a string + (or nil if no template was rendered). + """ + @spec view_template(Plug.Conn.t()) :: binary | nil + def view_template(conn) do + conn.private[:phoenix_template] + end + + @doc """ + Sends JSON response. + + It uses the configured `:json_library` under the `:phoenix` + application for `:json` to pick up the encoder module. + + ## Examples + + iex> json(conn, %{id: 123}) + + """ + @spec json(Plug.Conn.t(), term) :: Plug.Conn.t() + def json(conn, data) do + response = Phoenix.json_library().encode_to_iodata!(data) + send_resp(conn, conn.status || 200, "application/json", response) + end + + @doc """ + A plug that may convert a JSON response into a JSONP one. + + In case a JSON response is returned, it will be converted + to a JSONP as long as the callback field is present in + the query string. The callback field itself defaults to + "callback", but may be configured with the callback option. + + In case there is no callback or the response is not encoded + in JSON format, it is a no-op. + + Only alphanumeric characters and underscore are allowed in the + callback name. Otherwise an exception is raised. + + ## Examples + + # Will convert JSON to JSONP if callback=someFunction is given + plug :allow_jsonp + + # Will convert JSON to JSONP if cb=someFunction is given + plug :allow_jsonp, callback: "cb" + + """ + @spec allow_jsonp(Plug.Conn.t(), Keyword.t()) :: Plug.Conn.t() + def allow_jsonp(conn, opts \\ []) do + callback = Keyword.get(opts, :callback, "callback") + + case Map.fetch(conn.query_params, callback) do + :error -> + conn + + {:ok, ""} -> + conn + + {:ok, cb} -> + validate_jsonp_callback!(cb) + + register_before_send(conn, fn conn -> + if json_response?(conn) do + conn + |> put_resp_header("content-type", "application/javascript") + |> resp(conn.status, jsonp_body(conn.resp_body, cb)) + else + conn + end + end) + end + end + + defp json_response?(conn) do + case get_resp_header(conn, "content-type") do + ["application/json;" <> _] -> true + ["application/json"] -> true + _ -> false + end + end + + defp jsonp_body(data, callback) do + body = + data + |> IO.iodata_to_binary() + |> String.replace(<<0x2028::utf8>>, "\\u2028") + |> String.replace(<<0x2029::utf8>>, "\\u2029") + + "/**/ typeof #{callback} === 'function' && #{callback}(#{body});" + end + + defp validate_jsonp_callback!(<>) + when h in ?0..?9 or h in ?A..?Z or h in ?a..?z or h == ?_, + do: validate_jsonp_callback!(t) + + defp validate_jsonp_callback!(<<>>), do: :ok + + defp validate_jsonp_callback!(_), + do: raise(ArgumentError, "the JSONP callback name contains invalid characters") + + @doc """ + Sends text response. + + ## Examples + + iex> text(conn, "hello") + + iex> text(conn, :implements_to_string) + + """ + @spec text(Plug.Conn.t(), String.Chars.t()) :: Plug.Conn.t() + def text(conn, data) do + send_resp(conn, conn.status || 200, "text/plain", to_string(data)) + end + + @doc """ + Sends html response. + + ## Examples + + iex> html(conn, "...") + + """ + @spec html(Plug.Conn.t(), iodata) :: Plug.Conn.t() + def html(conn, data) do + send_resp(conn, conn.status || 200, "text/html", data) + end + + @doc """ + Sends redirect response to the given url. + + For security, `:to` only accepts paths. Use the `:external` + option to redirect to any URL. + + The response will be sent with the status code defined within + the connection, via `Plug.Conn.put_status/2`. If no status + code is set, a 302 response is sent. + + ## Examples + + iex> redirect(conn, to: "/login") + + iex> redirect(conn, external: "https://elixir-lang.org") + + """ + def redirect(conn, opts) when is_list(opts) do + url = url(opts) + html = Plug.HTML.html_escape(url) + body = "You are being redirected." + + conn + |> put_resp_header("location", url) + |> send_resp(conn.status || 302, "text/html", body) + end + + defp url(opts) do + cond do + to = opts[:to] -> validate_local_url(to) + external = opts[:external] -> external + true -> raise ArgumentError, "expected :to or :external option in redirect/2" + end + end + + @invalid_local_url_chars ["\\", "/%09", "/\t"] + defp validate_local_url("//" <> _ = to), do: raise_invalid_url(to) + + defp validate_local_url("/" <> _ = to) do + if String.contains?(to, @invalid_local_url_chars) do + raise ArgumentError, "unsafe characters detected for local redirect in URL #{inspect(to)}" + else + to + end + end + + defp validate_local_url(to), do: raise_invalid_url(to) + + @spec raise_invalid_url(term()) :: no_return() + defp raise_invalid_url(url) do + raise ArgumentError, "the :to option in redirect expects a path but was #{inspect(url)}" + end + + @doc """ + Stores the view for rendering. + + Raises `Plug.Conn.AlreadySentError` if `conn` is already sent. + + ## Examples + + iex> put_view(conn, html: AppHTML, json: AppJSON) + + """ + @spec put_view(Plug.Conn.t(), [{format :: atom, view}] | view) :: Plug.Conn.t() + def put_view(%Plug.Conn{state: state} = conn, formats) when state in @unsent do + put_private_view(conn, :phoenix_view, :replace, formats) + end + + def put_view(%Plug.Conn{} = conn, module) do + raise(AlreadySentError, """ + the response was already sent. + + Status code: #{conn.status} + Request path: #{conn.request_path} + Method: #{conn.method} + View module: #{inspect(module)} + """) + end + + defp put_private_view(conn, priv_key, kind, formats) when is_list(formats) do + formats = Enum.into(formats, %{}, fn {format, value} -> {to_string(format), value} end) + put_private_formats(conn, priv_key, kind, formats) + end + + # TODO: Deprecate this whole branch on Phoenix v1.9 + defp put_private_view(conn, priv_key, kind, value) do + put_private_formats(conn, priv_key, kind, %{_: value}) + end + + defp put_private_formats(conn, priv_key, kind, formats) when kind in [:new, :replace] do + update_in(conn.private, fn private -> + existing = Map.get(private, priv_key, %{}) + + new_formats = + case kind do + :new -> Map.merge(formats, existing) + :replace -> Map.merge(existing, formats) + end + + Map.put(private, priv_key, new_formats) + end) + end + + @doc """ + Stores the view for rendering if one was not stored yet. + + Raises `Plug.Conn.AlreadySentError` if `conn` is already sent. + """ + # TODO: Remove | view from the spec once we deprecate put_new_view on controllers on v1.9 + @spec put_new_view(Plug.Conn.t(), [{format :: atom, view}] | view) :: Plug.Conn.t() + def put_new_view(%Plug.Conn{state: state} = conn, formats) when state in @unsent do + put_private_view(conn, :phoenix_view, :new, formats) + end + + def put_new_view(%Plug.Conn{} = conn, module) do + raise(AlreadySentError, """ + the response was already sent. + + Status code: #{conn.status} + Request path: #{conn.request_path} + Method: #{conn.method} + View module: #{inspect(module)} + """) + end + + @doc """ + Retrieves the current view for the given format. + + If no format is given, takes the current one from the connection. + """ + @spec view_module(Plug.Conn.t(), binary | nil) :: atom + def view_module(conn, format \\ nil) do + format = format || get_safe_format(conn) + + # TODO: Remove the first branch once code paths are deprecated and then removed + case conn.private[:phoenix_view] do + %{_: value} when value != nil -> + value + + %{^format => value} -> + value + + formats -> + raise "no view was found for the format: #{inspect(format)}. " <> + "The supported formats are: #{inspect(Map.keys(formats || %{}) -- [:_])}" + end + end + + @doc """ + Stores the layout for rendering. + + The layout must be given as keyword list where the key is the request + format the layout will be applied to (such as `:html`) and the value + is one of: + + * `{module, layout}` with the `module` the layout is defined and + the name of the `layout` as an atom + + * `false` which disables the layout + + If `false` is given without a format, all layouts are disabled. + + ## Examples + + iex> layout(conn) + false + + iex> conn = put_layout(conn, html: {AppView, :application}) + iex> layout(conn) + {AppView, :application} + + iex> conn = put_layout(conn, html: {AppView, :print}) + iex> layout(conn) + {AppView, :print} + + Raises `Plug.Conn.AlreadySentError` if `conn` is already sent. + """ + @spec put_layout(Plug.Conn.t(), [{format :: atom, layout}] | false) :: Plug.Conn.t() + def put_layout(%Plug.Conn{state: state} = conn, layout) do + if state in @unsent do + put_private_layout(conn, :phoenix_layout, :replace, layout) + else + raise AlreadySentError, """ + the response was already sent. + + Status code: #{conn.status} + Request path: #{conn.request_path} + Method: #{conn.method} + Layout: #{inspect(layout)} + """ + end + end + + defp put_private_layout(conn, private_key, kind, layouts) when is_list(layouts) do + formats = + Map.new(layouts, fn + {format, false} -> + {Atom.to_string(format), false} + + {format, layout} when is_atom(layout) -> + format = Atom.to_string(format) + + case conn.private[private_key] do + %{^format => {mod, _}} -> + IO.warn(""" + specifying a layout without module is deprecated, use #{format}: #{inspect({mod, layout})} instead\ + """) + + {format, {mod, layout}} + + %{} -> + raise "cannot use put_layout/2 or put_root_layout/2 with atom because " <> + "there is no previous layout set for format #{inspect(format)}" + end + + {format, {mod, layout}} when is_atom(mod) and is_atom(layout) -> + {Atom.to_string(format), {mod, layout}} + + {format, other} -> + raise ArgumentError, """ + put_layout and put_root_layout expects an module and template per format, such as: + + #{format}: {MyView, :app} + + Got: + + #{inspect(other)} + """ + end) + + put_private_formats(conn, private_key, kind, formats) + end + + defp put_private_layout(conn, private_key, kind, no_format) do + case no_format do + false -> + put_private_formats(conn, private_key, kind, %{_: false}) + + # TODO: Deprecate this branch on Phoenix v1.9 + {mod, layout} when is_atom(mod) -> + put_private_formats(conn, private_key, kind, %{_: {mod, layout}}) + + layout when is_binary(layout) or is_atom(layout) -> + case Map.get(conn.private, private_key, %{_: false}) do + %{_: {mod, _}} -> + IO.warn(""" + specifying put_layout(conn, template) or put_new_layout(conn, template) is deprecated, \ + specify the layout with the format instead: put_layout(conn, html: #{inspect({mod, layout})}) + """) + + put_private_formats(conn, private_key, kind, %{_: {mod, layout}}) + + %{_: false} -> + raise "cannot use put_layout/2 or put_root_layout/2 with atom/binary when layout is false, use a tuple instead" + + %{} -> + raise "you must pass the format when using put_layout/2 or put_root_layout/2 and a previous format was set, " <> + "such as: put_layout(conn, html: #{inspect(layout)})" + end + end + end + + @doc """ + Stores the layout for rendering if one was not stored yet. + + See `put_layout/2` for more information. + + Raises `Plug.Conn.AlreadySentError` if `conn` is already sent. + """ + # TODO: Remove | layout from the spec once we deprecate put_new_layout on controllers + @spec put_new_layout(Plug.Conn.t(), [{format :: atom, layout}] | layout) :: Plug.Conn.t() + def put_new_layout(%Plug.Conn{state: state} = conn, layout) + when (is_tuple(layout) and tuple_size(layout) == 2) or is_list(layout) or layout == false do + unless state in @unsent do + raise(AlreadySentError, """ + the response was already sent. + + Status code: #{conn.status} + Request path: #{conn.request_path} + Method: #{conn.method} + Layout: #{inspect(layout)} + """) + end + + put_private_layout(conn, :phoenix_layout, :new, layout) + end + + @doc """ + Stores the root layout for rendering. + + The layout must be given as keyword list where the key is the request + format the layout will be applied to (such as `:html`) and the value + is one of: + + * `{module, layout}` with the `module` the layout is defined and + the name of the `layout` as an atom + + * `layout` when the name of the layout. This requires a layout for + the given format in the shape of `{module, layout}` to be previously + given + + * `false` which disables the layout + + ## Examples + + iex> root_layout(conn) + false + + iex> conn = put_root_layout(conn, html: {AppView, :root}) + iex> root_layout(conn) + {AppView, :root} + + iex> conn = put_root_layout(conn, html: :bare) + iex> root_layout(conn) + {AppView, :bare} + + Raises `Plug.Conn.AlreadySentError` if `conn` is already sent. + """ + @spec put_root_layout(Plug.Conn.t(), [{format :: atom, layout}] | false) :: + Plug.Conn.t() + def put_root_layout(%Plug.Conn{state: state} = conn, layout) do + if state in @unsent do + put_private_layout(conn, :phoenix_root_layout, :replace, layout) + else + raise AlreadySentError, """ + the response was already sent. + + Status code: #{conn.status} + Request path: #{conn.request_path} + Method: #{conn.method} + Layout: #{inspect(layout)} + """ + end + end + + @doc false + @deprecated "put_layout_formats/2 is deprecated, pass a keyword list to put_layout/put_root_layout instead" + @spec put_layout_formats(Plug.Conn.t(), [String.t()]) :: Plug.Conn.t() + def put_layout_formats(%Plug.Conn{state: state} = conn, formats) + when state in @unsent and is_list(formats) do + put_private(conn, :phoenix_layout_formats, formats) + end + + def put_layout_formats(%Plug.Conn{} = conn, _formats) do + raise(AlreadySentError, """ + the response was already sent. + + Status code: #{conn.status} + Request path: #{conn.request_path} + Method: #{conn.method} + """) + end + + @doc false + @deprecated "layout_formats/1 is deprecated, pass a keyword list to put_layout/put_root_layout instead" + @spec layout_formats(Plug.Conn.t()) :: [String.t()] + def layout_formats(conn) do + Map.get(conn.private, :phoenix_layout_formats, ~w(html)) + end + + @doc """ + Retrieves the current layout for the given format. + + If no format is given, takes the current one from the connection. + """ + @spec layout(Plug.Conn.t(), binary | nil) :: {atom, String.t() | atom} | false + def layout(conn, format \\ nil) do + get_private_layout(conn, :phoenix_layout, format) + end + + @doc """ + Retrieves the current root layout for the given format. + + If no format is given, takes the current one from the connection. + """ + @spec root_layout(Plug.Conn.t(), binary | nil) :: {atom, String.t() | atom} | false + def root_layout(conn, format \\ nil) do + get_private_layout(conn, :phoenix_root_layout, format) + end + + defp get_private_layout(conn, priv_key, format) do + format = format || get_safe_format(conn) + + # TODO: Remove _ handling once layouts(false) is set to remove all formats + case conn.private[priv_key] do + %{_: value} -> if format in [nil | layout_formats(conn)], do: value, else: false + %{^format => value} -> value + _ -> false + end + end + + @doc """ + Render the given template or the default template + specified by the current action with the given assigns. + + See `render/3` for more information. + """ + @spec render(Plug.Conn.t(), Keyword.t() | map | binary | atom) :: Plug.Conn.t() + def render(conn, template_or_assigns \\ []) + + def render(conn, template) when is_binary(template) or is_atom(template) do + render(conn, template, []) + end + + def render(conn, assigns) do + render(conn, action_name(conn), assigns) + end + + @doc """ + Renders the given `template` and `assigns` based on the `conn` information. + + Once the template is rendered, the template format is set as the response + content type (for example, an HTML template will set "text/html" as response + content type) and the data is sent to the client with default status of 200. + + ## Arguments + + * `conn` - the `Plug.Conn` struct + + * `template` - which may be an atom or a string. If an atom, like `:index`, + it will render a template with the same format as the one returned by + `get_format/1`. For example, for an HTML request, it will render + the "index.html" template. If the template is a string, it must contain + the extension too, like "index.json" + + * `assigns` - a dictionary with the assigns to be used in the view. Those + assigns are merged and have higher precedence than the connection assigns + (`conn.assigns`) + + ## Examples + + To render a template, you must configure your controller with the formats + to render. You can do so on `use`, which will infer the modules based on + the controller name: + + defmodule MyAppWeb.UserController do + # Will use MyAppWeb.UserHTML and MyAppWeb.UserJSON + use Phoenix.Controller, formats: [:html, :json] + end + + With the formats set, you can render in two ways, either passing a string + with the template name and explicit format: + + def show(conn, _params) do + render(conn, "show.html", message: "Hello") + end + + The example above renders a template "show.html" from the `MyAppWeb.UserHTML` + and sets the response content type to "text/html". + + Or, if you want the template format to be set dynamically based on the request, + you can pass an atom instead (without the extension): + + def show(conn, _params) do + render(conn, :show, message: "Hello") + end + + If the formats are not known at compile-time, you can call `put_view/2` + at runtime: + + defmodule MyAppWeb.UserController do + use Phoenix.Controller + + def show(conn, _params) do + conn + |> put_view(html: MyAppWeb.UserHTML) + |> render("show.html", message: "Hello") + end + end + + """ + @spec render(Plug.Conn.t(), binary | atom, Keyword.t() | map) :: Plug.Conn.t() + def render(conn, template, assigns) + when is_atom(template) and (is_map(assigns) or is_list(assigns)) do + format = + get_format(conn) || + raise "cannot render template #{inspect(template)} because conn.params[\"_format\"] is not set. " <> + "Please set `plug :accepts, ~w(html json ...)` in your pipeline." + + render_and_send(conn, format, Atom.to_string(template), assigns) + end + + def render(conn, template, assigns) + when is_binary(template) and (is_map(assigns) or is_list(assigns)) do + {base, format} = split_template(template) + conn |> put_format(format) |> render_and_send(format, base, assigns) + end + + def render(conn, view, template) + when is_atom(view) and (is_binary(template) or is_atom(template)) do + IO.warn( + "Phoenix.Controller.render/3 with a view is deprecated, see the documentation for render/3 for an alternative" + ) + + render(conn, view, template, []) + end + + @doc false + @deprecated "render/4 is deprecated. Use put_view + render/3" + def render(conn, view, template, assigns) + when is_atom(view) and (is_binary(template) or is_atom(template)) do + conn + |> put_view(view) + |> render(template, assigns) + end + + defp render_and_send(conn, format, template, assigns) do + view = view_module(conn, format) + conn = prepare_assigns(conn, assigns, template, format) + data = render_with_layouts(conn, view, template, format) + + conn + |> ensure_resp_content_type(MIME.type(format)) + |> send_resp(conn.status || 200, data) + end + + defp render_with_layouts(conn, view, template, format) do + render_assigns = Map.put(conn.assigns, :conn, conn) + + case root_layout(conn, format) do + {layout_mod, layout_tpl} -> + {layout_base, _} = split_template(layout_tpl) + inner = template_render(view, template, format, render_assigns) + root_assigns = render_assigns |> Map.put(:inner_content, inner) |> Map.delete(:layout) + template_render_to_iodata(layout_mod, layout_base, format, root_assigns) + + false -> + template_render_to_iodata(view, template, format, render_assigns) + end + end + + defp template_render(view, template, format, assigns) do + metadata = %{view: view, template: template, format: format} + + :telemetry.span([:phoenix, :controller, :render], metadata, fn -> + {Phoenix.Template.render(view, template, format, assigns), metadata} + end) + end + + defp template_render_to_iodata(view, template, format, assigns) do + metadata = %{view: view, template: template, format: format} + + :telemetry.span([:phoenix, :controller, :render], metadata, fn -> + {Phoenix.Template.render_to_iodata(view, template, format, assigns), metadata} + end) + end + + defp prepare_assigns(conn, assigns, template, format) do + assigns = to_map(assigns) + + layout = + case assigns_layout(conn, assigns, format) do + {mod, layout} when is_binary(layout) -> {mod, Path.rootname(layout)} + {mod, layout} when is_atom(layout) -> {mod, Atom.to_string(layout)} + false -> false + end + + conn + |> put_private(:phoenix_template, template <> "." <> format) + |> Map.update!(:assigns, fn prev -> + prev + |> Map.merge(assigns) + |> Map.put(:layout, layout) + end) + end + + defp assigns_layout(_conn, %{layout: layout}, _format), do: layout + + defp assigns_layout(conn, _assigns, format) do + # TODO: Remove _ handling once layouts(false) is set to remove all formats + case conn.private[:phoenix_layout] do + %{^format => bad_value, _: good_value} when good_value != false -> + IO.warn(""" + conflicting layouts found. A layout has been set with format, such as: + + put_layout(conn, #{format}: #{inspect(bad_value)}) + + But also without format: + + put_layout(conn, #{inspect(good_value)}) + + In this case, the layout without format will always win. + Passing the layout without a format is currently soft-deprecated. + If you use layouts with formats, make sure that they are + used everywhere. Also remember to configure your controller + to use layouts with formats: + + use Phoenix.Controller, layouts: [#{format}: #{inspect(bad_value)}] + """) + + if format in layout_formats(conn), do: good_value, else: false + + %{_: value} -> + if format in layout_formats(conn), do: value, else: false + + %{^format => value} -> + value + + _ -> + false + end + end + + defp to_map(assigns) when is_map(assigns), do: assigns + defp to_map(assigns) when is_list(assigns), do: :maps.from_list(assigns) + + defp split_template(name) when is_atom(name), do: {Atom.to_string(name), nil} + + defp split_template(name) when is_binary(name) do + case :binary.split(name, ".") do + [base, format] -> + {base, format} + + [^name] -> + raise "cannot render template #{inspect(name)} without format. Use an atom if the " <> + "template format is meant to be set dynamically based on the request format" + + [base | formats] -> + {base, List.last(formats)} + end + end + + defp send_resp(conn, default_status, default_content_type, body) do + conn + |> ensure_resp_content_type(default_content_type) + |> send_resp(conn.status || default_status, body) + end + + defp ensure_resp_content_type(%Plug.Conn{resp_headers: resp_headers} = conn, content_type) do + if List.keyfind(resp_headers, "content-type", 0) do + conn + else + content_type = content_type <> "; charset=utf-8" + %{conn | resp_headers: [{"content-type", content_type} | resp_headers]} + end + end + + @doc """ + Puts the url string or `%URI{}` to be used for route generation. + + This function overrides the default URL generation pulled + from the `%Plug.Conn{}`'s endpoint configuration. + + ## Examples + + Imagine your application is configured to run on "example.com" + but after the user signs in, you want all links to use + "some_user.example.com". You can do so by setting the proper + router url configuration: + + def put_router_url_by_user(conn) do + put_router_url(conn, get_user_from_conn(conn).account_name <> ".example.com") + end + + Now when you call `Routes.some_route_url(conn, ...)`, it will use + the router url set above. Keep in mind that, if you want to generate + routes to the *current* domain, it is preferred to use + `Routes.some_route_path` helpers, as those are always relative. + """ + def put_router_url(conn, %URI{} = uri) do + put_private(conn, :phoenix_router_url, URI.to_string(uri)) + end + + def put_router_url(conn, url) when is_binary(url) do + put_private(conn, :phoenix_router_url, url) + end + + @doc """ + Puts the URL or `%URI{}` to be used for the static url generation. + + Using this function on a `%Plug.Conn{}` struct tells `static_url/2` to use + the given information for URL generation instead of the `%Plug.Conn{}`'s + endpoint configuration (much like `put_router_url/2` but for static URLs). + """ + def put_static_url(conn, %URI{} = uri) do + put_private(conn, :phoenix_static_url, URI.to_string(uri)) + end + + def put_static_url(conn, url) when is_binary(url) do + put_private(conn, :phoenix_static_url, url) + end + + @doc """ + Puts the format in the connection. + + This format is used when rendering a template as an atom. + For example, `render(conn, :foo)` will render `"foo.FORMAT"` + where the format is the one set here. The default format + is typically set from the negotiation done in `accepts/2`. + + See `get_format/1` for retrieval. + """ + def put_format(conn, format), do: put_private(conn, :phoenix_format, to_string(format)) + + @doc """ + Returns the request format, such as "json", "html". + + This format is used when rendering a template as an atom. + For example, `render(conn, :foo)` will render `"foo.FORMAT"` + where the format is the one set here. The default format + is typically set from the negotiation done in `accepts/2`. + """ + def get_format(conn) do + conn.private[:phoenix_format] || conn.params["_format"] + end + + defp get_safe_format(conn) do + conn.private[:phoenix_format] || + case conn.params do + %{"_format" => format} -> format + %{} -> nil + end + end + + @doc """ + Sends the given file or binary as a download. + + The second argument must be `{:binary, contents}`, where + `contents` will be sent as download, or`{:file, path}`, + where `path` is the filesystem location of the file to + be sent. Be careful to not interpolate the path from + external parameters, as it could allow traversal of the + filesystem. + + The download is achieved by setting "content-disposition" + to attachment. The "content-type" will also be set based + on the extension of the given filename but can be customized + via the `:content_type` and `:charset` options. + + ## Options + + * `:filename` - the filename to be presented to the user + as download + * `:content_type` - the content type of the file or binary + sent as download. It is automatically inferred from the + filename extension + * `:disposition` - specifies disposition type + (`:attachment` or `:inline`). If `:attachment` was used, + user will be prompted to save the file. If `:inline` was used, + the browser will attempt to open the file. + Defaults to `:attachment`. + * `:charset` - the charset of the file, such as "utf-8". + Defaults to none + * `:offset` - the bytes to offset when reading. Defaults to `0` + * `:length` - the total bytes to read. Defaults to `:all` + * `:encode` - encodes the filename using `URI.encode/2`. + Defaults to `true`. When `false`, disables encoding. If you + disable encoding, you need to guarantee there are no special + characters in the filename, such as quotes, newlines, etc. + Otherwise you can expose your application to security attacks + + ## Examples + + To send a file that is stored inside your application priv + directory: + + path = Application.app_dir(:my_app, "priv/prospectus.pdf") + send_download(conn, {:file, path}) + + When using `{:file, path}`, the filename is inferred from the + given path but may also be set explicitly. + + To allow the user to download contents that are in memory as + a binary or string: + + send_download(conn, {:binary, "world"}, filename: "hello.txt") + + See `Plug.Conn.send_file/3` and `Plug.Conn.send_resp/3` if you + would like to access the low-level functions used to send files + and responses via Plug. + """ + def send_download(conn, kind, opts \\ []) + + def send_download(conn, {:file, path}, opts) do + filename = opts[:filename] || Path.basename(path) + offset = opts[:offset] || 0 + length = opts[:length] || :all + + conn + |> prepare_send_download(filename, opts) + |> send_file(conn.status || 200, path, offset, length) + end + + def send_download(conn, {:binary, contents}, opts) do + filename = + opts[:filename] || raise ":filename option is required when sending binary download" + + conn + |> prepare_send_download(filename, opts) + |> send_resp(conn.status || 200, contents) + end + + defp prepare_send_download(conn, filename, opts) do + content_type = opts[:content_type] || MIME.from_path(filename) + encoded_filename = encode_filename(filename, Keyword.get(opts, :encode, true)) + disposition_type = get_disposition_type(Keyword.get(opts, :disposition, :attachment)) + warn_if_ajax(conn) + + disposition = ~s[#{disposition_type}; filename="#{encoded_filename}"] + + disposition = + if encoded_filename != filename do + disposition <> "; filename*=utf-8''#{encoded_filename}" + else + disposition + end + + conn + |> put_resp_content_type(content_type, opts[:charset]) + |> put_resp_header("content-disposition", disposition) + end + + defp encode_filename(filename, false), do: filename + defp encode_filename(filename, true), do: URI.encode(filename, &URI.char_unreserved?/1) + + defp get_disposition_type(:attachment), do: "attachment" + defp get_disposition_type(:inline), do: "inline" + + defp get_disposition_type(other), + do: + raise( + ArgumentError, + "expected :disposition to be :attachment or :inline, got: #{inspect(other)}" + ) + + defp ajax?(conn) do + case get_req_header(conn, "x-requested-with") do + [value] -> value in ["XMLHttpRequest", "xmlhttprequest"] + [] -> false + end + end + + defp warn_if_ajax(conn) do + if ajax?(conn) do + Logger.warning( + "send_download/3 has been invoked during an AJAX request. " <> + "The download may not work as expected under XMLHttpRequest" + ) + end + end + + @doc """ + Scrubs the parameters from the request. + + This process is two-fold: + + * Checks to see if the `required_key` is present + * Changes empty parameters of `required_key` (recursively) to nils + + This function is useful for removing empty strings sent + via HTML forms. If you are providing an API, there + is likely no need to invoke `scrub_params/2`. + + If the `required_key` is not present, it will + raise `Phoenix.MissingParamError`. + + ## Examples + + iex> scrub_params(conn, "user") + + """ + @spec scrub_params(Plug.Conn.t(), String.t()) :: Plug.Conn.t() + def scrub_params(%Plug.Conn{} = conn, required_key) when is_binary(required_key) do + param = Map.get(conn.params, required_key) |> scrub_param() + + unless param do + raise Phoenix.MissingParamError, key: required_key + end + + params = Map.put(conn.params, required_key, param) + %{conn | params: params} + end + + defp scrub_param(%{__struct__: mod} = struct) when is_atom(mod) do + struct + end + + defp scrub_param(%{} = param) do + Enum.reduce(param, %{}, fn {k, v}, acc -> + Map.put(acc, k, scrub_param(v)) + end) + end + + defp scrub_param(param) when is_list(param) do + Enum.map(param, &scrub_param/1) + end + + defp scrub_param(param) do + if scrub?(param), do: nil, else: param + end + + defp scrub?(" " <> rest), do: scrub?(rest) + defp scrub?(""), do: true + defp scrub?(_), do: false + + @doc """ + Enables CSRF protection. + + Currently used as a wrapper function for `Plug.CSRFProtection` + and mainly serves as a function plug in `YourApp.Router`. + + Check `get_csrf_token/0` and `delete_csrf_token/0` for + retrieving and deleting CSRF tokens. + """ + def protect_from_forgery(conn, opts \\ []) do + Plug.CSRFProtection.call(conn, Plug.CSRFProtection.init(opts)) + end + + @doc """ + Put headers that improve browser security. + + It sets the following headers, if they are not already set: + + * `content-security-policy` - It sets `frame-ancestors` and + `base-uri` to `self`, restricting embedding and the use of + `` element to same origin respectively. It is equivalent + to setting `"base-uri 'self'; frame-ancestors 'self';"` + + * `referrer-policy` - only send origin on cross origin requests + + * `x-content-type-options` - set to nosniff. This requires + script and style tags to be sent with proper content type + + * `x-permitted-cross-domain-policies` - set to none to restrict + Adobe Flash Player’s access to data + + A custom headers map may also be given to be merged with defaults. + + It is recommended for custom header keys to be in lowercase, to avoid sending + duplicate keys or invalid responses. + """ + def put_secure_browser_headers(conn, headers \\ %{}) + + def put_secure_browser_headers(conn, []) do + put_secure_defaults(conn) + end + + def put_secure_browser_headers(conn, headers) when is_map(headers) do + conn + |> put_secure_defaults() + |> merge_resp_headers(headers) + end + + defp put_secure_defaults(%Plug.Conn{resp_headers: resp_headers} = conn) do + headers = [ + {"referrer-policy", "strict-origin-when-cross-origin"}, + {"content-security-policy", "base-uri 'self'; frame-ancestors 'self';"}, + {"x-content-type-options", "nosniff"}, + {"x-permitted-cross-domain-policies", "none"} + ] + + resp_headers = + Enum.reduce(headers, resp_headers, fn {key, _} = pair, acc -> + case :lists.keymember(key, 1, acc) do + true -> acc + false -> [pair | acc] + end + end) + + %{conn | resp_headers: resp_headers} + end + + @doc """ + Gets or generates a CSRF token. + + If a token exists, it is returned, otherwise it is generated and stored + in the process dictionary. + """ + defdelegate get_csrf_token(), to: Plug.CSRFProtection + + @doc """ + Deletes the CSRF token from the process dictionary. + + *Note*: The token is deleted only after a response has been sent. + """ + defdelegate delete_csrf_token(), to: Plug.CSRFProtection + + @doc """ + Performs content negotiation based on the available formats. + + It receives a connection, a list of formats that the server + is capable of rendering and then proceeds to perform content + negotiation based on the request information. If the client + accepts any of the given formats, the request proceeds. + + If the request contains a "_format" parameter, it is + considered to be the format desired by the client. If no + "_format" parameter is available, this function will parse + the "accept" header and find a matching format accordingly. + + This function is useful when you may want to serve different + content-types (such as JSON and HTML) from the same routes. + However, if you always have distinct routes, you can also + disable content negotiation and simply hardcode your format + of choice in your route pipelines: + + plug :put_format, "html" + + It is important to notice that browsers have historically + sent bad accept headers. For this reason, this function will + default to "html" format whenever: + + * the accepted list of arguments contains the "html" format + + * the accept header specified more than one media type preceded + or followed by the wildcard media type "`*/*`" + + This function raises `Phoenix.NotAcceptableError`, which is rendered + with status 406, whenever the server cannot serve a response in any + of the formats expected by the client. + + ## Examples + + `accepts/2` can be invoked as a function: + + iex> accepts(conn, ["html", "json"]) + + or used as a plug: + + plug :accepts, ["html", "json"] + plug :accepts, ~w(html json) + + ## Custom media types + + It is possible to add custom media types to your Phoenix application. + The first step is to teach Plug about those new media types in + your `config/config.exs` file: + + config :mime, :types, %{ + "application/vnd.api+json" => ["json-api"] + } + + The key is the media type, the value is a list of formats the + media type can be identified with. For example, by using + "json-api", you will be able to use templates with extension + "index.json-api" or to force a particular format in a given + URL by sending "?_format=json-api". + + After this change, you must recompile plug: + + $ mix deps.clean mime --build + $ mix deps.get + + And now you can use it in accepts too: + + plug :accepts, ["html", "json-api"] + + """ + @spec accepts(Plug.Conn.t(), [binary]) :: Plug.Conn.t() + def accepts(conn, [_ | _] = accepted) do + case conn.params do + %{"_format" => format} -> + handle_params_accept(conn, format, accepted) + + %{} -> + handle_header_accept(conn, get_req_header(conn, "accept"), accepted) + end + end + + defp handle_params_accept(conn, format, accepted) do + if format in accepted do + put_format(conn, format) + else + raise Phoenix.NotAcceptableError, + message: "unknown format #{inspect(format)}, expected one of #{inspect(accepted)}", + accepts: accepted + end + end + + # In case there is no accept header or the header is */* + # we use the first format specified in the accepts list. + defp handle_header_accept(conn, header, [first | _]) when header == [] or header == ["*/*"] do + put_format(conn, first) + end + + # In case there is a header, we need to parse it. + # But before we check for */* because if one exists and we serve html, + # we unfortunately need to assume it is a browser sending us a request. + defp handle_header_accept(conn, [header | _], accepted) do + if header =~ "*/*" and "html" in accepted do + put_format(conn, "html") + else + parse_header_accept(conn, String.split(header, ","), [], accepted) + end + end + + defp parse_header_accept(conn, [h | t], acc, accepted) do + case Plug.Conn.Utils.media_type(h) do + {:ok, type, subtype, args} -> + exts = parse_exts(type, subtype) + q = parse_q(args) + + if format = q === 1.0 && find_format(exts, accepted) do + put_format(conn, format) + else + parse_header_accept(conn, t, [{-q, h, exts} | acc], accepted) + end + + :error -> + parse_header_accept(conn, t, acc, accepted) + end + end + + defp parse_header_accept(conn, [], acc, accepted) do + acc + |> Enum.sort() + |> Enum.find_value(&parse_header_accept(conn, &1, accepted)) + |> Kernel.||(refuse(conn, acc, accepted)) + end + + defp parse_header_accept(conn, {_, _, exts}, accepted) do + if format = find_format(exts, accepted) do + put_format(conn, format) + end + end + + defp parse_q(args) do + case Map.fetch(args, "q") do + {:ok, float} -> + case Float.parse(float) do + {float, _} -> float + :error -> 1.0 + end + + :error -> + 1.0 + end + end + + defp parse_exts("*", "*"), do: "*/*" + defp parse_exts(type, "*"), do: type + defp parse_exts(type, subtype), do: MIME.extensions(type <> "/" <> subtype) + + defp find_format("*/*", accepted), do: Enum.fetch!(accepted, 0) + defp find_format(exts, accepted) when is_list(exts), do: Enum.find(exts, &(&1 in accepted)) + defp find_format(_type_range, []), do: nil + + defp find_format(type_range, [h | t]) do + mime_type = MIME.type(h) + + case Plug.Conn.Utils.media_type(mime_type) do + {:ok, accepted_type, _subtype, _args} when type_range === accepted_type -> h + _ -> find_format(type_range, t) + end + end + + @spec refuse(term(), [tuple], [binary]) :: no_return() + defp refuse(_conn, given, accepted) do + raise Phoenix.NotAcceptableError, + accepts: accepted, + message: """ + no supported media type in accept header. + + Expected one of #{inspect(accepted)} but got the following formats: + + * #{Enum.map_join(given, "\n ", fn {_, header, exts} -> inspect(header) <> " with extensions: " <> inspect(exts) end)} + + To accept custom formats, register them under the :mime library + in your config/config.exs file: + + config :mime, :types, %{ + "application/xml" => ["xml"] + } + + And then run `mix deps.clean --build mime` to force it to be recompiled. + """ + end + + @doc """ + Fetches the flash storage. + """ + def fetch_flash(conn, _opts \\ []) do + if Map.get(conn.assigns, :flash) do + conn + else + session_flash = get_session(conn, "phoenix_flash") + conn = persist_flash(conn, session_flash || %{}) + + register_before_send(conn, fn conn -> + flash = conn.assigns.flash + flash_size = map_size(flash) + + cond do + is_nil(session_flash) and flash_size == 0 -> + conn + + flash_size > 0 and conn.status in 300..308 -> + put_session(conn, "phoenix_flash", flash) + + true -> + delete_session(conn, "phoenix_flash") + end + end) + end + end + + @doc """ + Merges a map into the flash. + + Returns the updated connection. + + ## Examples + + iex> conn = merge_flash(conn, info: "Welcome Back!") + iex> Phoenix.Flash.get(conn.assigns.flash, :info) + "Welcome Back!" + + """ + def merge_flash(conn, enumerable) do + map = for {k, v} <- enumerable, into: %{}, do: {flash_key(k), v} + persist_flash(conn, Map.merge(Map.get(conn.assigns, :flash, %{}), map)) + end + + @doc """ + Persists a value in flash. + + `key` can be any atom or binary value. Phoenix does not enforce which keys + are stored in the flash, as long as the values are internally consistent. + By default, the Phoenix generators use `:info` and `:error` keys. + + Returns the updated connection. + + ## Examples + + iex> conn = put_flash(conn, :info, "Welcome Back!") + iex> Phoenix.Flash.get(conn.assigns.flash, :info) + "Welcome Back!" + + """ + def put_flash(conn, key, message) do + flash = + Map.get(conn.assigns, :flash) || + raise ArgumentError, message: "flash not fetched, call fetch_flash/2" + + persist_flash(conn, Map.put(flash, flash_key(key), message)) + end + + @doc """ + Returns a map of previously set flash messages or an empty map. + + ## Examples + + iex> get_flash(conn) + %{} + + iex> conn = put_flash(conn, :info, "Welcome Back!") + iex> get_flash(conn) + %{"info" => "Welcome Back!"} + + """ + @deprecated "get_flash/1 is deprecated. Use the @flash assign provided by the :fetch_flash plug" + def get_flash(conn) do + Map.get(conn.assigns, :flash) || + raise ArgumentError, message: "flash not fetched, call fetch_flash/2" + end + + @doc """ + Returns a message from flash by `key` (or `nil` if no message is available for `key`). + + ## Examples + + iex> conn = put_flash(conn, :info, "Welcome Back!") + iex> get_flash(conn, :info) + "Welcome Back!" + + """ + @deprecated "get_flash/2 is deprecated. Use Phoenix.Flash.get(@flash, key) instead" + def get_flash(conn, key) do + get_flash(conn)[flash_key(key)] + end + + @doc """ + Generates a status message from the template name. + + ## Examples + + iex> status_message_from_template("404.html") + "Not Found" + iex> status_message_from_template("whatever.html") + "Internal Server Error" + + """ + def status_message_from_template(template) do + template + |> String.split(".") + |> hd() + |> String.to_integer() + |> Plug.Conn.Status.reason_phrase() + rescue + _ -> "Internal Server Error" + end + + @doc """ + Clears all flash messages. + """ + def clear_flash(conn) do + persist_flash(conn, %{}) + end + + defp flash_key(binary) when is_binary(binary), do: binary + defp flash_key(atom) when is_atom(atom), do: Atom.to_string(atom) + + defp persist_flash(conn, value) do + assign(conn, :flash, value) + end + + @doc """ + Returns the current request path with its default query parameters: + + iex> current_path(conn) + "/users/123?existing=param" + + See `current_path/2` to override the default parameters. + + The path is normalized based on the `conn.script_name` and + `conn.path_info`. For example, "/foo//bar/" will become "/foo/bar". + If you want the original path, use `conn.request_path` instead. + """ + def current_path(%Plug.Conn{query_string: ""} = conn) do + normalized_request_path(conn) + end + + def current_path(%Plug.Conn{query_string: query_string} = conn) do + normalized_request_path(conn) <> "?" <> query_string + end + + @doc """ + Returns the current path with the given query parameters. + + You may also retrieve only the request path by passing an + empty map of params. + + ## Examples + + iex> current_path(conn) + "/users/123?existing=param" + + iex> current_path(conn, %{new: "param"}) + "/users/123?new=param" + + iex> current_path(conn, %{filter: %{status: ["draft", "published"]}}) + "/users/123?filter[status][]=draft&filter[status][]=published" + + iex> current_path(conn, %{}) + "/users/123" + + The path is normalized based on the `conn.script_name` and + `conn.path_info`. For example, "/foo//bar/" will become "/foo/bar". + If you want the original path, use `conn.request_path` instead. + """ + def current_path(%Plug.Conn{} = conn, params) when params == %{} do + normalized_request_path(conn) + end + + def current_path(%Plug.Conn{} = conn, params) do + normalized_request_path(conn) <> "?" <> Plug.Conn.Query.encode(params) + end + + defp normalized_request_path(%{path_info: info, script_name: script}) do + "/" <> Enum.join(script ++ info, "/") + end + + @doc """ + Returns the current request url with its default query parameters: + + iex> current_url(conn) + "https://www.example.com/users/123?existing=param" + + See `current_url/2` to override the default parameters. + """ + def current_url(%Plug.Conn{} = conn) do + Phoenix.VerifiedRoutes.unverified_url(conn, current_path(conn)) + end + + @doc ~S""" + Returns the current request URL with query params. + + The path will be retrieved from the currently requested path via + `current_path/1`. The scheme, host and others will be received from + the URL configuration in your Phoenix endpoint. The reason we don't + use the host and scheme information in the request is because most + applications are behind proxies and the host and scheme may not + actually reflect the host and scheme accessed by the client. If you + want to access the url precisely as requested by the client, see + `Plug.Conn.request_url/1`. + + ## Examples + + iex> current_url(conn) + "https://www.example.com/users/123?existing=param" + + iex> current_url(conn, %{new: "param"}) + "https://www.example.com/users/123?new=param" + + iex> current_url(conn, %{}) + "https://www.example.com/users/123" + + ## Custom URL Generation + + In some cases, you'll need to generate a request's URL, but using a + different scheme, different host, etc. This can be accomplished in + two ways. + + If you want to do so in a case-by-case basis, you can define a custom + function that gets the endpoint URI configuration and changes it accordingly. + For example, to get the current URL always in HTTPS format: + + def current_secure_url(conn, params \\ %{}) do + current_uri = MyAppWeb.Endpoint.struct_url() + current_path = Phoenix.Controller.current_path(conn, params) + Phoenix.VerifiedRoutes.unverified_url(%URI{current_uri | scheme: "https"}, current_path) + end + + However, if you want all generated URLs to always have a certain schema, + host, etc, you may use `put_router_url/2`. + """ + def current_url(%Plug.Conn{} = conn, %{} = params) do + Phoenix.VerifiedRoutes.unverified_url(conn, current_path(conn, params)) + end + + @doc """ + Assigns multiple key-value pairs to the connection. + Accepts a keyword list, a map, or a single-argument function. + + This function accepts a map or keyword list of assigns and merges them into + the connection's assigns. It is equivalent to calling `Plug.Conn.assign/3` + multiple times. + + If a function is given, it takes the current assigns as an argument and its return + value will be merged into the current assigns. + + ## Examples + + assign(conn, name: "Alice", role: :admin) + assign(conn, %{name: "Alice", role: :admin}) + assign(conn, fn %{name: name, logo: logo} -> %{title: Enum.join([name, logo], " | ")} end) + """ + def assign(conn, keyword_or_map_or_fun) + + def assign(conn, fun) when is_function(fun, 1) do + assign(conn, fun.(conn.assigns)) + end + + defdelegate assign(conn, assigns), to: Plug.Conn, as: :merge_assigns + + @doc false + def __plugs__(controller_module, opts) do + if Keyword.get(opts, :put_default_views, true) do + base = Phoenix.Naming.unsuffix(controller_module, "Controller") + + view = + case Keyword.fetch(opts, :formats) do + {:ok, formats} when is_list(formats) -> + Enum.map(formats, fn + format when is_atom(format) -> + {format, :"#{base}#{String.upcase(to_string(format))}"} + + {format, suffix} -> + {format, :"#{base}#{suffix}"} + end) + + :error -> + IO.warn( + """ + use #{inspect(controller_module)} must receive the :formats option with \ + the formats you intend to render. To keep compatibility within your app, \ + you can list it as: + + formats: [html: "View", json: "View", ...] + + Listing all formats your application renders. + """, + [] + ) + + :"#{base}View" + end + + layouts = + case Keyword.fetch(opts, :layouts) do + {:ok, formats} when is_list(formats) -> + # TODO: Deprecate passing :layouts altogether in Phoenix v1.9, + # use Phoenix.Controller should only set views + Enum.map(formats, fn + {format, mod} when is_atom(mod) -> + {format, {mod, :app}} + + {format, {mod, template}} when is_atom(mod) and is_atom(template) -> + {format, {mod, template}} + + other -> + raise ArgumentError, """ + expected :layouts to be a list of format module pairs of the form: [html: DemoWeb.Layouts] or [html: {DemoWeb.Layouts, :app}] + + Got: #{inspect(other)} + """ + end) + + :error -> + cond do + namespace = Keyword.get(opts, :namespace) -> + layout = Module.concat(namespace, "LayoutView") + + IO.warn( + """ + the :namespace option given to #{inspect(controller_module)} is deprecated. + Set "plug :put_layout, html: #{inspect(layout)}" instead\ + """, + [] + ) + + {layout, :app} + + Keyword.has_key?(opts, :formats) -> + [] + + true -> + layout = + controller_module + |> Atom.to_string() + |> String.split(".") + |> Enum.drop(-1) + |> Enum.take(2) + |> Kernel.++(["LayoutView"]) + |> Module.concat() + + {layout, :app} + end + end + + {layouts, view} + else + IO.warn( + """ + the :put_default_views option given to #{inspect(controller_module)} is deprecated. + Set formats: [] instead\ + """, + [] + ) + + false + end + end +end diff --git a/deps/phoenix/lib/phoenix/controller/pipeline.ex b/deps/phoenix/lib/phoenix/controller/pipeline.ex new file mode 100644 index 0000000..a99f921 --- /dev/null +++ b/deps/phoenix/lib/phoenix/controller/pipeline.ex @@ -0,0 +1,220 @@ +defmodule Phoenix.Controller.Pipeline do + @moduledoc false + + @doc false + defmacro __using__(_) do + quote do + @behaviour Plug + + require Phoenix.Endpoint + import Phoenix.Controller.Pipeline + + Module.register_attribute(__MODULE__, :plugs, accumulate: true) + @before_compile Phoenix.Controller.Pipeline + @phoenix_fallback :unregistered + + @doc false + def init(opts), do: opts + + @doc false + def call(conn, action) when is_atom(action) do + conn + |> merge_private( + phoenix_controller: __MODULE__, + phoenix_action: action + ) + |> phoenix_controller_pipeline(action) + end + + @doc false + def action(%Plug.Conn{private: %{phoenix_action: action}} = conn, _options) do + apply(__MODULE__, action, [conn, conn.params]) + end + + defoverridable init: 1, call: 2, action: 2 + end + end + + @doc false + def __action_fallback__(plug, caller) do + plug = Macro.expand(plug, %{caller | function: {:init, 1}}) + quote bind_quoted: [plug: plug] do + @phoenix_fallback Phoenix.Controller.Pipeline.validate_fallback( + plug, + __MODULE__, + Module.get_attribute(__MODULE__, :phoenix_fallback) + ) + end + end + + @doc false + def validate_fallback(plug, module, fallback) do + cond do + fallback == nil -> + raise """ + action_fallback can only be called when using Phoenix.Controller. + Add `use Phoenix.Controller` to #{inspect(module)} + """ + + fallback != :unregistered -> + raise "action_fallback can only be called a single time per controller." + + not is_atom(plug) -> + raise ArgumentError, + "expected action_fallback to be a module or function plug, got #{inspect(plug)}" + + fallback == :unregistered -> + case Atom.to_charlist(plug) do + ~c"Elixir." ++ _ -> {:module, plug} + _ -> {:function, plug} + end + end + end + + @doc false + defmacro __before_compile__(env) do + action = {:action, [], true} + plugs = [action | Module.get_attribute(env.module, :plugs)] + + {conn, body} = + Plug.Builder.compile(env, plugs, + log_on_halt: :debug, + init_mode: Phoenix.plug_init_mode() + ) + + fallback_ast = + env.module + |> Module.get_attribute(:phoenix_fallback) + |> build_fallback() + + quote do + defoverridable action: 2 + + def action(var!(conn_before), opts) do + try do + var!(conn_after) = super(var!(conn_before), opts) + unquote(fallback_ast) + catch + :error, reason -> + Phoenix.Controller.Pipeline.__catch__( + var!(conn_before), + reason, + __MODULE__, + var!(conn_before).private.phoenix_action, + __STACKTRACE__ + ) + end + end + + defp phoenix_controller_pipeline(unquote(conn), var!(action)) do + var!(conn) = unquote(conn) + var!(controller) = __MODULE__ + _ = var!(conn) + _ = var!(controller) + _ = var!(action) + + unquote(body) + end + end + end + + defp build_fallback(:unregistered) do + quote do: var!(conn_after) + end + + defp build_fallback({:module, plug}) do + quote bind_quoted: binding() do + case var!(conn_after) do + %Plug.Conn{} = conn_after -> conn_after + val -> plug.call(var!(conn_before), plug.init(val)) + end + end + end + + defp build_fallback({:function, plug}) do + quote do + case var!(conn_after) do + %Plug.Conn{} = conn_after -> conn_after + val -> unquote(plug)(var!(conn_before), val) + end + end + end + + @doc false + def __catch__( + %Plug.Conn{}, + :function_clause, + controller, + action, + [{controller, action, [%Plug.Conn{} | _] = action_args, _loc} | _] = stack + ) do + args = [module: controller, function: action, arity: length(action_args), args: action_args] + reraise Phoenix.ActionClauseError, args, stack + end + + def __catch__(%Plug.Conn{} = conn, reason, _controller, _action, stack) do + Plug.Conn.WrapperError.reraise(conn, :error, reason, stack) + end + + @doc """ + Stores a plug to be executed as part of the plug pipeline. + """ + defmacro plug(plug) + + defmacro plug({:when, _, [plug, guards]}), do: plug(plug, [], guards, __CALLER__) + + defmacro plug(plug), do: plug(plug, [], true, __CALLER__) + + @doc """ + Stores a plug with the given options to be executed as part of + the plug pipeline. + """ + defmacro plug(plug, opts) + + defmacro plug(plug, {:when, _, [opts, guards]}), do: plug(plug, opts, guards, __CALLER__) + + defmacro plug(plug, opts), do: plug(plug, opts, true, __CALLER__) + + defp plug(plug, opts, guards, caller) do + runtime? = Phoenix.plug_init_mode() == :runtime + + plug = + if runtime? do + expand_alias(plug, caller) + else + plug + end + + opts = + if runtime? and Macro.quoted_literal?(opts) do + Macro.prewalk(opts, &expand_alias(&1, caller)) + else + opts + end + + quote do + @plugs {unquote(plug), unquote(opts), unquote(escape_guards(guards))} + end + end + + defp expand_alias({:__aliases__, _, _} = alias, env), + do: Macro.expand(alias, %{env | function: {:init, 1}}) + + defp expand_alias(other, _env), do: other + + defp escape_guards({pre_expanded, _, [_ | _]} = node) + when pre_expanded in [:@, :__aliases__], + do: node + + defp escape_guards({left, meta, right}), + do: {:{}, [], [escape_guards(left), meta, escape_guards(right)]} + + defp escape_guards({left, right}), + do: {escape_guards(left), escape_guards(right)} + + defp escape_guards([_ | _] = list), + do: Enum.map(list, &escape_guards/1) + + defp escape_guards(node), + do: node +end diff --git a/deps/phoenix/lib/phoenix/debug.ex b/deps/phoenix/lib/phoenix/debug.ex new file mode 100644 index 0000000..4f86ae6 --- /dev/null +++ b/deps/phoenix/lib/phoenix/debug.ex @@ -0,0 +1,168 @@ +defmodule Phoenix.Debug do + @moduledoc """ + Functions for runtime introspection and debugging of Phoenix applications. + + This module provides utilities for inspecting and debugging Phoenix applications. + At the moment, it only includes functions related to `Phoenix.Socket` and `Phoenix.Channel` + processes. + + It allows you to: + + * List all currently connected `Phoenix.Socket` transport processes. + * List all channels for a given `Phoenix.Socket` process. + * Get the socket of a channel process. + * Check if a process is a `Phoenix.Socket` or `Phoenix.Channel`. + + """ + + @doc """ + Returns a list of all currently connected `Phoenix.Socket` transport processes. + + Note that custom sockets implementing the `Phoenix.Socket.Transport` behaviour + are not listed. + + Each process corresponds to one connection that can have multiple channels. + + For example, when using Phoenix LiveView, the browser establishes a socket + connection when initially navigating to the page, and each live navigation + retains the same socket connection. Nested LiveViews also share the same + connection, each being a different channel. See `Phoenix.Debug.channels/1`. + + ## Examples + + iex> Phoenix.Debug.list_sockets() + [%{pid: #PID<0.123.0>, module: Phoenix.LiveView.Socket, id: nil}] + + """ + def list_sockets do + for pid <- Process.list(), dict = socket_process_dict(pid), not is_nil(dict) do + {Phoenix.Socket, mod, id} = keyfind(dict, :"$process_label") + %{pid: pid, module: mod, id: id} + end + end + + defp keyfind(list, key) do + case List.keyfind(list, key, 0) do + {^key, value} -> value + _ -> nil + end + end + + defp socket_process_dict(pid) do + # Phoenix.Socket sets the "$process_label" to {Phoenix.Socket, handler_module, id} + with info when is_list(info) <- Process.info(pid, [:dictionary]), + dictionary when not is_nil(dictionary) <- keyfind(info, :dictionary), + label when not is_nil(label) <- keyfind(dictionary, :"$process_label"), + {Phoenix.Socket, mod, id} when is_atom(mod) and (is_binary(id) or is_nil(id)) <- label do + dictionary + else + _ -> nil + end + end + + @doc """ + Returns true if the given pid is a `Phoenix.Socket` transport process. + + It returns `false` for custom sockets implementing the `Phoenix.Socket.Transport` behaviour. + + ## Examples + + iex> Phoenix.Debug.list_sockets() |> Enum.at(0) |> Map.fetch!(:pid) |> socket_process?() + true + + iex> socket_process?(pid(0,456,0)) + false + + """ + def socket_process?(pid) do + not is_nil(socket_process_dict(pid)) + end + + @doc """ + Checks if the given pid is a `Phoenix.Channel` process. + + Note: this function returns false for [custom channels](https://hexdocs.pm/phoenix/Phoenix.Socket.html#module-custom-channels). + """ + def channel_process?(pid) do + # Phoenix.Channel sets the "$process_label" to {Phoenix.Socket, handler_module, id} + with info when is_list(info) <- Process.info(pid, [:dictionary]), + dictionary when not is_nil(dictionary) <- keyfind(info, :dictionary), + label when not is_nil(label) <- keyfind(dictionary, :"$process_label"), + {Phoenix.Channel, mod, topic} when is_atom(mod) and is_binary(topic) <- label do + true + else + _ -> false + end + end + + @doc """ + Returns a list of all currently connected channels for the given `Phoenix.Socket` pid. + + Each channel is represented as a map with the following keys: + + - `:pid` - the pid of the channel process + - `:status` - the status of the channel + - `:topic` - the topic of the channel + + Note that this list also contains [custom channels](https://hexdocs.pm/phoenix/Phoenix.Socket.html#module-custom-channels) + like LiveViews. You can check if a channel is a custom channel by using the `channel?/1` + function, which returns `false` for custom channels. + + ## Examples + + iex> pid = Phoenix.Debug.list_sockets() |> Enum.at(0) |> Map.fetch!(:pid) + iex> Phoenix.Debug.list_channels(pid) + {:ok, + [ + %{pid: #PID<0.1702.0>, status: :joined, topic: "lv:phx-GDp9a9UZPiTxcgnE"}, + %{pid: #PID<0.1727.0>, status: :joined, topic: "lv:sidebar"} + ]} + + iex> Phoenix.Debug.list_channels(pid(0,456,0)) + {:error, :not_alive} + + """ + def list_channels(socket_pid) do + ref = make_ref() + + if Process.alive?(socket_pid) and socket_process?(socket_pid) do + send(socket_pid, {:debug_channels, ref, self()}) + + receive do + {:debug_channels, ^ref, channels} -> + {:ok, channels} + after + 5_000 -> {:error, :timeout} + end + else + {:error, :not_alive} + end + end + + @doc """ + Returns the socket of the channel process. + + Note: this only works for channels defined with `use Phoenix.Channel`. + For LiveViews, use the functions defined in `Phoenix.LiveView.Debug` instead. + + ## Examples + + iex> pid = Phoenix.Debug.list_sockets() |> Enum.at(0) |> Map.fetch!(:pid) + iex> {:ok, channels} = Phoenix.Debug.list_channels(pid) + iex> channels |> Enum.at(0) |> Map.fetch!(:pid) |> socket() + {:ok, %Phoenix.Socket{...}} + + iex> socket(pid(0,456,0)) + {:error, :not_alive_or_not_a_channel} + + """ + def socket(channel_pid) do + if channel_process?(channel_pid) do + {:ok, Phoenix.Channel.Server.socket(channel_pid)} + else + {:error, :not_alive_or_not_a_channel} + end + catch + :exit, _ -> {:error, :not_alive_or_not_a_channel} + end +end diff --git a/deps/phoenix/lib/phoenix/digester.ex b/deps/phoenix/lib/phoenix/digester.ex new file mode 100644 index 0000000..f5222bf --- /dev/null +++ b/deps/phoenix/lib/phoenix/digester.ex @@ -0,0 +1,416 @@ +defmodule Phoenix.Digester do + @manifest_version 1 + @empty_manifest %{ + "version" => @manifest_version, + "digests" => %{}, + "latest" => %{} + } + + defp now() do + :calendar.datetime_to_gregorian_seconds(:calendar.universal_time()) + end + + @moduledoc false + + @doc """ + Digests and compresses the static files in the given `input_path` + and saves them in the given `output_path`. + """ + @spec compile(String.t(), String.t(), boolean()) :: :ok | {:error, :invalid_path} + def compile(input_path, output_path, with_vsn?) do + if File.exists?(input_path) do + File.mkdir_p!(output_path) + + files = filter_files(input_path) + files = fixup_sourcemaps(files) + latest = generate_latest(files) + digests = load_compile_digests(output_path) + digested_files = Enum.map(files, &digested_contents(&1, latest, with_vsn?)) + + save_manifest(digested_files, latest, digests, output_path) + + digested_files + |> Task.async_stream(&write_to_disk(&1, output_path), ordered: false, timeout: :infinity) + |> Stream.run() + else + {:error, :invalid_path} + end + end + + defp filter_files(input_path) do + input_path + |> Path.join("**") + |> Path.wildcard() + |> Enum.filter(&(not (File.dir?(&1) or compiled_file?(&1)))) + |> Enum.map(&map_file(&1, input_path)) + end + + defp fixup_sourcemaps(files) when is_list(files) do + Enum.map(files, &maybe_fixup_sourcemap(&1, files)) + end + + defp maybe_fixup_sourcemap(sourcemap, files) do + if Path.extname(sourcemap.filename) == ".map" do + fixup_sourcemap(sourcemap, files) + else + sourcemap + end + end + + defp fixup_sourcemap(%{} = sourcemap, files) do + asset_path = Path.rootname(sourcemap.absolute_path, ".map") + asset = Enum.find(files, fn file -> file.absolute_path == asset_path end) + + if asset do + new_digested_filename = asset.digested_filename <> ".map" + %{sourcemap | digest: asset.digest, digested_filename: new_digested_filename} + else + sourcemap + end + end + + defp generate_latest(files) do + Map.new( + files, + &{ + manifest_join(&1.relative_path, &1.filename), + manifest_join(&1.relative_path, &1.digested_filename) + } + ) + end + + defp load_compile_digests(output_path) do + manifest = load_manifest(output_path) + manifest["digests"] + end + + defp load_manifest(output_path) do + manifest_path = Path.join(output_path, "cache_manifest.json") + + if File.exists?(manifest_path) do + manifest_path + |> File.read!() + |> Phoenix.json_library().decode!() + |> migrate_manifest(output_path) + else + @empty_manifest + end + end + + defp migrate_manifest(%{"version" => @manifest_version} = manifest, _output_path), do: manifest + defp migrate_manifest(_latest, _output_path), do: @empty_manifest + + defp save_manifest(files, latest, old_digests, output_path) do + old_digests_that_still_exist = + old_digests + |> Enum.filter(fn {file, _} -> File.exists?(Path.join(output_path, file)) end) + |> Map.new() + + digests = Map.merge(old_digests_that_still_exist, generate_digests(files)) + write_manifest(latest, digests, output_path) + end + + @comment "This file was auto-generated by `mix phx.digest`. Remove it and all generated artefacts with `mix phx.digest.clean --all`" + + defp write_manifest(latest, digests, output_path) do + encoder = Phoenix.json_library() + + json = """ + { + "!comment!":#{encoder.encode!(@comment)}, + "version":#{encoder.encode!(@manifest_version)}, + "latest":#{encoder.encode!(latest)}, + "digests":#{encoder.encode!(digests)} + } + """ + + File.write!(Path.join(output_path, "cache_manifest.json"), json) + end + + defp remove_manifest(output_path) do + File.rm(Path.join(output_path, "cache_manifest.json")) + end + + defp generate_digests(files) do + Map.new( + files, + &{ + manifest_join(&1.relative_path, &1.digested_filename), + build_digest(&1) + } + ) + end + + defp build_digest(file) do + %{ + logical_path: manifest_join(file.relative_path, file.filename), + mtime: now(), + size: file.size, + digest: file.digest, + sha512: Base.encode64(:crypto.hash(:sha512, file.digested_content)) + } + end + + defp manifest_join(".", filename), do: filename + defp manifest_join(path, filename), do: Path.join(path, filename) + + defp compiled_file?(file_path) do + digested_file_regex = ~r/(-[a-fA-F\d]{32})/ + + Regex.match?(digested_file_regex, Path.basename(file_path)) || + Path.extname(file_path) in compressed_extensions() || + Path.basename(file_path) == "cache_manifest.json" + end + + defp compressed_extensions do + compressors = Application.fetch_env!(:phoenix, :static_compressors) + Enum.flat_map(compressors, & &1.file_extensions()) + end + + defp map_file(file_path, input_path) do + stats = File.stat!(file_path) + content = File.read!(file_path) + + basename = Path.basename(file_path) + rootname = Path.rootname(basename) + extension = Path.extname(basename) + digest = Base.encode16(:erlang.md5(content), case: :lower) + + %{ + absolute_path: file_path, + relative_path: file_path |> Path.relative_to(input_path) |> Path.dirname(), + filename: basename, + size: stats.size, + content: content, + digest: digest, + digested_content: nil, + digested_filename: "#{rootname}-#{digest}#{extension}" + } + end + + defp write_to_disk(file, output_path) do + path = Path.join(output_path, file.relative_path) + File.mkdir_p!(path) + + compressors = Application.fetch_env!(:phoenix, :static_compressors) + + Enum.each(compressors, fn compressor -> + [file_extension | _] = compressor.file_extensions() + + compressed_digested_result = + compressor.compress_file(file.digested_filename, file.digested_content) + + with {:ok, compressed_digested} <- compressed_digested_result do + File.write!( + Path.join(path, file.digested_filename <> file_extension), + compressed_digested + ) + end + + compress_result = + if file.digested_content == file.content, + do: compressed_digested_result, + else: compressor.compress_file(file.filename, file.content) + + with {:ok, compressed} <- compress_result do + File.write!( + Path.join(path, file.filename <> file_extension), + compressed + ) + end + end) + + # uncompressed files + File.write!(Path.join(path, file.digested_filename), file.digested_content) + File.write!(Path.join(path, file.filename), file.content) + + file + end + + defp digested_contents(file, latest, with_vsn?) do + ext = Path.extname(file.filename) + + digested_content = + case ext do + ".css" -> digest_stylesheet_asset_references(file, latest, with_vsn?) + ".js" -> digest_javascript_asset_references(file, latest) + ".map" -> digest_javascript_map_asset_references(file, latest) + _ -> file.content + end + + %{file | digested_content: digested_content} + end + + defp digest_stylesheet_asset_references(file, latest, with_vsn?) do + stylesheet_url_regex = ~r{(url\(\s*)(\S+?)(\s*\))} + quoted_text_regex = ~r{\A(['"])(.+)\1\z} + + Regex.replace(stylesheet_url_regex, file.content, fn _, open, url, close -> + case Regex.run(quoted_text_regex, url) do + [_, quote_symbol, url] -> + open <> + quote_symbol <> digested_url(url, file, latest, with_vsn?) <> quote_symbol <> close + + nil -> + open <> digested_url(url, file, latest, with_vsn?) <> close + end + end) + end + + defp digest_javascript_asset_references(file, latest) do + javascript_source_map_regex = ~r{(//#\s*sourceMappingURL=\s*)(\S+)} + + Regex.replace(javascript_source_map_regex, file.content, fn _, source_map_text, url -> + source_map_text <> digested_url(url, file, latest, false) + end) + end + + defp digest_javascript_map_asset_references(file, latest) do + javascript_map_file_regex = ~r{(['"]file['"]:['"])([^,"']+)(['"])} + + Regex.replace(javascript_map_file_regex, file.content, fn _, open_text, url, close_text -> + open_text <> digested_url(url, file, latest, false) <> close_text + end) + end + + defp digested_url("/" <> relative_path, _file, latest, with_vsn?) do + case Map.fetch(latest, relative_path) do + {:ok, digested_path} -> relative_digested_path(digested_path, with_vsn?) + :error -> "/" <> relative_path + end + end + + defp digested_url(url, file, latest, with_vsn?) do + case URI.parse(url) do + %URI{scheme: nil, host: nil} -> + manifest_path = + file.relative_path + |> Path.join(url) + |> Path.expand() + |> Path.relative_to_cwd() + + case Map.fetch(latest, manifest_path) do + {:ok, digested_path} -> + absolute_digested_url(url, digested_path, with_vsn?) + + :error -> + url + end + + _ -> + url + end + end + + defp relative_digested_path(digested_path, true), + do: relative_digested_path(digested_path) <> "?vsn=d" + + defp relative_digested_path(digested_path, false), + do: relative_digested_path(digested_path) + + defp relative_digested_path(digested_path), + do: "/" <> digested_path + + defp absolute_digested_url(url, digested_path, true), + do: absolute_digested_url(url, digested_path) <> "?vsn=d" + + defp absolute_digested_url(url, digested_path, false), + do: absolute_digested_url(url, digested_path) + + defp absolute_digested_url(url, digested_path), + do: url |> Path.dirname() |> Path.join(Path.basename(digested_path)) + + @doc """ + Deletes compiled/compressed asset files that are no longer in use based on + the specified criteria. + + ## Arguments + + * `path` - The path where the compiled/compressed files are saved + * `age` - The max age of assets to keep in seconds + * `keep` - The number of old versions to keep + + """ + @spec clean(String.t(), integer, integer, integer) :: :ok | {:error, :invalid_path} + def clean(path, age, keep, now \\ now()) do + if File.exists?(path) do + %{"latest" => latest, "digests" => digests} = load_manifest(path) + files = files_to_clean(latest, digests, now - age, keep) + remove_files(files, path) + write_manifest(latest, Map.drop(digests, files), path) + :ok + else + {:error, :invalid_path} + end + end + + @doc """ + Deletes compiled/compressed asset files, including the cache manifest. + + ## Arguments + + * `path` - The path where the compiled/compressed files are saved + + """ + @spec clean_all(String.t()) :: :ok | {:error, :invalid_path} + def clean_all(path) do + if File.exists?(path) do + %{"digests" => digests} = load_manifest(path) + grouped_digests = group_by_logical_path(digests) + logical_paths = Map.keys(grouped_digests) + + files = + for {_, versions} <- grouped_digests, + file <- Enum.map(versions, fn {path, _attrs} -> path end), + do: file + + remove_files(files, path) + remove_compressed_files(logical_paths, path) + remove_manifest(path) + :ok + else + {:error, :invalid_path} + end + end + + defp files_to_clean(latest, digests, max_age, keep) do + digests = Map.drop(digests, Map.values(latest)) + + for {_, versions} <- group_by_logical_path(digests), + file <- versions_to_clean(versions, max_age, keep), + do: file + end + + defp versions_to_clean(versions, max_age, keep) do + versions + |> Enum.map(fn {path, attrs} -> Map.put(attrs, "path", path) end) + |> Enum.sort_by(& &1["mtime"], &>/2) + |> Enum.with_index(1) + |> Enum.filter(fn {version, index} -> max_age > version["mtime"] || index > keep end) + |> Enum.map(fn {version, _index} -> version["path"] end) + end + + defp group_by_logical_path(digests) do + Enum.group_by(digests, fn {_, attrs} -> attrs["logical_path"] end) + end + + defp remove_files(files, output_path) do + for file <- files do + output_path + |> Path.join(file) + |> File.rm() + + remove_compressed_file(file, output_path) + end + end + + defp remove_compressed_files(files, output_path) do + for file <- files, do: remove_compressed_file(file, output_path) + end + + defp remove_compressed_file(file, output_path) do + compressed_extensions() + |> Enum.map(fn extension -> Path.join(output_path, file <> extension) end) + |> Enum.each(&File.rm/1) + end +end diff --git a/deps/phoenix/lib/phoenix/digester/compressor.ex b/deps/phoenix/lib/phoenix/digester/compressor.ex new file mode 100644 index 0000000..16581db --- /dev/null +++ b/deps/phoenix/lib/phoenix/digester/compressor.ex @@ -0,0 +1,44 @@ +defmodule Phoenix.Digester.Compressor do + @moduledoc ~S""" + Defines the `Phoenix.Digester.Compressor` behaviour for + implementing static file compressors. + + A custom compressor expects 2 functions to be implemented. + + By default, Phoenix uses only `Phoenix.Digester.Gzip` to compress + static files, but additional compressors can be defined and added + to the digest process. + + ## Example + + If you wanted to compress files using an external brotli compression + library, you could define a new module implementing the behaviour and add the + module to the list of configured Phoenix static compressors. + + defmodule MyApp.BrotliCompressor do + @behaviour Phoenix.Digester.Compressor + + def compress_file(file_path, content) do + valid_extension = Path.extname(file_path) in Application.fetch_env!(:phoenix, :gzippable_exts) + {:ok, compressed_content} = :brotli.encode(content) + + if valid_extension && byte_size(compressed_content) < byte_size(content) do + {:ok, compressed_content} + else + :error + end + end + + def file_extensions do + [".br"] + end + end + + # config/config.exs + config :phoenix, + static_compressors: [Phoenix.Digester.Gzip, MyApp.BrotliCompressor], + # ... + """ + @callback compress_file(Path.t(), binary()) :: {:ok, binary()} | :error + @callback file_extensions() :: nonempty_list(String.t()) +end diff --git a/deps/phoenix/lib/phoenix/digester/gzip.ex b/deps/phoenix/lib/phoenix/digester/gzip.ex new file mode 100644 index 0000000..6f86b46 --- /dev/null +++ b/deps/phoenix/lib/phoenix/digester/gzip.ex @@ -0,0 +1,18 @@ +defmodule Phoenix.Digester.Gzip do + @moduledoc ~S""" + Gzip compressor for Phoenix.Digester + """ + @behaviour Phoenix.Digester.Compressor + + def compress_file(file_path, content) do + if Path.extname(file_path) in Application.fetch_env!(:phoenix, :gzippable_exts) do + {:ok, :zlib.gzip(content)} + else + :error + end + end + + def file_extensions do + [".gz"] + end +end diff --git a/deps/phoenix/lib/phoenix/endpoint.ex b/deps/phoenix/lib/phoenix/endpoint.ex new file mode 100644 index 0000000..089ef47 --- /dev/null +++ b/deps/phoenix/lib/phoenix/endpoint.ex @@ -0,0 +1,1094 @@ +defmodule Phoenix.Endpoint do + @moduledoc ~S""" + Defines a Phoenix endpoint. + + The endpoint is the boundary where all requests to your + web application start. It is also the interface your + application provides to the underlying web servers. + + Overall, an endpoint has three responsibilities: + + * to provide a wrapper for starting and stopping the + endpoint as part of a supervision tree + + * to define an initial plug pipeline for requests + to pass through + + * to host web specific configuration for your + application + + ## Endpoints + + An endpoint is simply a module defined with the help + of `Phoenix.Endpoint`. If you have used the `mix phx.new` + generator, an endpoint was automatically generated as + part of your application: + + defmodule YourAppWeb.Endpoint do + use Phoenix.Endpoint, otp_app: :your_app + + # plug ... + # plug ... + + plug YourApp.Router + end + + Endpoints must be explicitly started as part of your application + supervision tree. Endpoints are added by default + to the supervision tree in generated applications. Endpoints can be + added to the supervision tree as follows: + + children = [ + YourAppWeb.Endpoint + ] + + ## Endpoint configuration + + All endpoints are configured in your application environment. + For example: + + config :your_app, YourAppWeb.Endpoint, + secret_key_base: "kjoy3o1zeidquwy1398juxzldjlksahdk3" + + Endpoint configuration is split into two categories. Compile-time + configuration means the configuration is read during compilation + and changing it at runtime has no effect. The compile-time + configuration is mostly related to error handling. + + Runtime configuration, instead, is accessed during or + after your application is started and can be read through the + `c:config/2` function: + + YourAppWeb.Endpoint.config(:port) + YourAppWeb.Endpoint.config(:some_config, :default_value) + + ### Compile-time configuration + + Compile-time configuration may be set on `config/dev.exs`, `config/prod.exs` + and so on, but has no effect on `config/runtime.exs`: + + * `:code_reloader` - when `true`, enables code reloading functionality. + For the list of code reloader configuration options see + `Phoenix.CodeReloader.reload/1`. Keep in mind code reloading is + based on the file-system, therefore it is not possible to run two + instances of the same app at the same time with code reloading in + development, as they will race each other and only one will effectively + recompile the files. In such cases, tweak your config files so code + reloading is enabled in only one of the apps or set the `MIX_BUILD_PATH` + environment variable to give them distinct build directories + + * `:debug_errors` - when `true`, uses `Plug.Debugger` functionality for + debugging failures in the application. Recommended to be set to `true` + only in development as it allows listing of the application source + code during debugging. Defaults to `false` + + * `:force_ssl` - ensures no data is ever sent via HTTP, always redirecting + to HTTPS. It expects a list of options which are forwarded to `Plug.SSL`. + By default it sets the "strict-transport-security" header in HTTPS requests, + forcing browsers to always use HTTPS. If an unsafe request (HTTP) is sent, + it redirects to the HTTPS version using the `:host` specified in the `:url` + configuration. To dynamically redirect to the `host` of the current request, + set `:host` in the `:force_ssl` configuration to `nil` + + ### Runtime configuration + + The configuration below may be set on `config/dev.exs`, `config/prod.exs` + and so on, as well as on `config/runtime.exs`. Typically, if you need to + configure them with system environment variables, you set them in + `config/runtime.exs`. These options may also be set when starting the + endpoint in your supervision tree, such as `{MyApp.Endpoint, options}`. + + * `:adapter` - which webserver adapter to use for serving web requests. + See the "Adapter configuration" section below + + * `:cache_static_manifest` - a path to a json manifest file that contains + static files and their digested version. This is typically set to + "priv/static/cache_manifest.json" which is the file automatically generated + by `mix phx.digest`. It can be either: a string containing a file system path + or a tuple containing the application name and the path within that application. + + * `:cache_static_manifest_latest` - a map of the static files pointing to their + digest version. This is automatically loaded from `cache_static_manifest` on + boot. However, if you have your own static handling mechanism, you may want to + set this value explicitly. This is used by projects such as `LiveView` to + detect if the client is running on the latest version of all assets. + + * `:cache_manifest_skip_vsn` - when true, skips the appended query string + "?vsn=d" when generating paths to static assets. This query string is used + by `Plug.Static` to set long expiry dates, therefore, you should set this + option to true only if you are not using `Plug.Static` to serve assets, + for example, if you are using a CDN. If you are setting this option, you + should also consider passing `--no-vsn` to `mix phx.digest`. Defaults to + `false`. + + * `:check_origin` - configure the default `:check_origin` setting for + transports. See `socket/3` for options. Defaults to `true`. + + * `:secret_key_base` - a secret key used as a base to generate secrets + for encrypting and signing data. For example, cookies and tokens + are signed by default, but they may also be encrypted if desired. + Defaults to `nil` as it must be set per application + + * `:server` - when `true`, starts the web server when the endpoint + supervision tree starts. Defaults to `false`. The `mix phx.server` + task automatically sets this to `true` + + * `:url` - configuration for generating URLs throughout the app. + Accepts the `:host`, `:scheme`, `:path` and `:port` options. All + keys except `:path` can be changed at runtime. Defaults to: + + [host: "localhost", path: "/"] + + The `:port` option requires either an integer or string. The `:host` + option requires a string. + + The `:scheme` option accepts `"http"` and `"https"` values. Default value + is inferred from top level `:http` or `:https` option. It is useful + when hosting Phoenix behind a load balancer or reverse proxy and + terminating SSL there. + + The `:path` option can be used to override root path. Useful when hosting + Phoenix behind a reverse proxy with URL rewrite rules + + * `:static_url` - configuration for generating URLs for static files. + It will fallback to `url` if no option is provided. Accepts the same + options as `url` + + * `:watchers` - a set of watchers to run alongside your server. It + expects a list of tuples containing the executable and its arguments. + Watchers are guaranteed to run in the application directory, but only + when the server is enabled (unless `:force_watchers` configuration is + set to `true`). For example, the watcher below will run the "watch" mode + of the webpack build tool when the server starts. You can configure it + to whatever build tool or command you want: + + [ + node: [ + "node_modules/webpack/bin/webpack.js", + "--mode", + "development", + "--watch", + "--watch-options-stdin" + ] + ] + + The `:cd` and `:env` options can be given at the end of the list to customize + the watcher: + + [node: [..., cd: "assets", env: [{"TAILWIND_MODE", "watch"}]]] + + A watcher can also be a module-function-args tuple that will be invoked accordingly: + + [another: {Mod, :fun, [arg1, arg2]}] + + When `false`, watchers can be disabled. + + * `:force_watchers` - when `true`, forces your watchers to start + even when the `:server` option is set to `false`. + + * `:live_reload` - configuration for the live reload option. + Configuration requires a `:patterns` option which should be a list of + file patterns to watch. When these files change, it will trigger a reload. + + live_reload: [ + url: "ws://localhost:4000", + patterns: [ + ~r"priv/static/(?!uploads/).*(js|css|png|jpeg|jpg|gif|svg)$", + ~r"lib/app_web/(live|views)/.*(ex)$", + ~r"lib/app_web/templates/.*(eex)$" + ] + ] + + * `:pubsub_server` - the name of the pubsub server to use in channels + and via the Endpoint broadcast functions. The PubSub server is typically + started in your supervision tree. + + * `:render_errors` - responsible for rendering templates whenever there + is a failure in the application. For example, if the application crashes + with a 500 error during a HTML request, `render("500.html", assigns)` + will be called in the view given to `:render_errors`. + A `:formats` list can be provided to specify a module per format to handle + error rendering. Example: + + [formats: [html: MyApp.ErrorHTML], layout: false, log: :debug] + + * `:log_access_url` - log the access url once the server boots + + Note that you can also store your own configurations in the Phoenix.Endpoint. + For example, [Phoenix LiveView](https://hexdocs.pm/phoenix_live_view) expects + its own configuration under the `:live_view` key. In such cases, you should + consult the documentation of the respective projects. + + ### Adapter configuration + + Phoenix allows you to choose which webserver adapter to use. Newly generated + applications created via the `phx.new` Mix task use the + [`Bandit`](https://github.com/mtrudel/bandit) webserver via the + `Bandit.PhoenixAdapter` adapter. If not otherwise specified via the `adapter` + option Phoenix will fall back to the `Phoenix.Endpoint.Cowboy2Adapter` for + backwards compatibility with applications generated prior to Phoenix 1.7.8. + + Both adapters can be configured in a similar manner using the following two + top-level options: + + * `:http` - the configuration for the HTTP server. It accepts all options + as defined by either [`Bandit`](https://hexdocs.pm/bandit/Bandit.html#t:options/0) + or [`Plug.Cowboy`](https://hexdocs.pm/plug_cowboy/) depending on your + choice of adapter. Defaults to `false` + + * `:https` - the configuration for the HTTPS server. It accepts all options + as defined by either [`Bandit`](https://hexdocs.pm/bandit/Bandit.html#t:options/0) + or [`Plug.Cowboy`](https://hexdocs.pm/plug_cowboy/) depending on your + choice of adapter. Defaults to `false` + + In addition, the connection draining can be configured for the Cowboy webserver via the following + top-level option (this is not required for Bandit as it has connection draining built-in): + + * `:drainer` - a drainer process waits for any on-going request to finish + during application shutdown. It accepts the `:shutdown` and + `:check_interval` options as defined by `Plug.Cowboy.Drainer`. + Note the draining does not terminate any existing connection, it simply + waits for them to finish. Socket connections run their own drainer + before this one is invoked. That's because sockets are stateful and + can be gracefully notified, which allows us to stagger them over a + longer period of time. See the documentation for `socket/3` for more + information + + ## Endpoint API + + In the previous section, we have used the `c:config/2` function that is + automatically generated in your endpoint. Here's a list of all the functions + that are automatically defined in your endpoint: + + * for handling paths and URLs: `c:struct_url/0`, `c:url/0`, `c:path/1`, + `c:static_url/0`,`c:static_path/1`, and `c:static_integrity/1` + + * for gathering runtime information about the address and port the + endpoint is running on: `c:server_info/1` + + * for broadcasting to channels: `c:broadcast/3`, `c:broadcast!/3`, + `c:broadcast_from/4`, `c:broadcast_from!/4`, `c:local_broadcast/3`, + and `c:local_broadcast_from/4` + + * for configuration: `c:start_link/1`, `c:config/2`, and `c:config_change/2` + + * as required by the `Plug` behaviour: `c:Plug.init/1` and `c:Plug.call/2` + + """ + + @type topic :: String.t() + @type event :: String.t() + @type msg :: map | {:binary, binary} + + # Configuration + + @doc """ + Starts the endpoint supervision tree. + + Starts endpoint's configuration cache and possibly the servers for + handling requests. + """ + @callback start_link(keyword) :: Supervisor.on_start() + + @doc """ + Access the endpoint configuration given by key. + """ + @callback config(key :: atom, default :: term) :: term + + @doc """ + Reload the endpoint configuration on application upgrades. + """ + @callback config_change(changed :: term, removed :: term) :: term + + # Paths and URLs + + @doc """ + Generates the endpoint base URL, but as a `URI` struct. + """ + @callback struct_url() :: URI.t() + + @doc """ + Generates the endpoint base URL without any path information. + """ + @callback url() :: String.t() + + @doc """ + Generates the path information when routing to this endpoint. + """ + @callback path(path :: String.t()) :: String.t() + + @doc """ + Generates the static URL without any path information. + """ + @callback static_url() :: String.t() + + @doc """ + Generates a route to a static file in `priv/static`. + """ + @callback static_path(path :: String.t()) :: String.t() + + @doc """ + Generates an integrity hash to a static file in `priv/static`. + """ + @callback static_integrity(path :: String.t()) :: String.t() | nil + + @doc """ + Generates a two item tuple containing the `static_path` and `static_integrity`. + """ + @callback static_lookup(path :: String.t()) :: {String.t(), String.t()} | {String.t(), nil} + + @doc """ + Returns the script name from the :url configuration. + """ + @callback script_name() :: [String.t()] + + @doc """ + Returns the host from the :url configuration. + """ + @callback host() :: String.t() + + # Server information + + @doc """ + Returns the address and port that the server is running on + """ + @callback server_info(Plug.Conn.scheme()) :: + {:ok, {:inet.ip_address(), :inet.port_number()} | :inet.returned_non_ip_address()} + | {:error, term()} + + # Channels + + @doc """ + Subscribes the caller to the given topic. + + See `Phoenix.PubSub.subscribe/3` for options. + """ + @callback subscribe(topic, opts :: Keyword.t()) :: :ok | {:error, term} + + @doc """ + Unsubscribes the caller from the given topic. + """ + @callback unsubscribe(topic) :: :ok | {:error, term} + + @doc """ + Broadcasts a `msg` as `event` in the given `topic` to all nodes. + """ + @callback broadcast(topic, event, msg) :: :ok | {:error, term} + + @doc """ + Broadcasts a `msg` as `event` in the given `topic` to all nodes. + + Raises in case of failures. + """ + @callback broadcast!(topic, event, msg) :: :ok + + @doc """ + Broadcasts a `msg` from the given `from` as `event` in the given `topic` to all nodes. + """ + @callback broadcast_from(from :: pid, topic, event, msg) :: :ok | {:error, term} + + @doc """ + Broadcasts a `msg` from the given `from` as `event` in the given `topic` to all nodes. + + Raises in case of failures. + """ + @callback broadcast_from!(from :: pid, topic, event, msg) :: :ok + + @doc """ + Broadcasts a `msg` as `event` in the given `topic` within the current node. + """ + @callback local_broadcast(topic, event, msg) :: :ok + + @doc """ + Broadcasts a `msg` from the given `from` as `event` in the given `topic` within the current node. + """ + @callback local_broadcast_from(from :: pid, topic, event, msg) :: :ok + + @doc false + defmacro __using__(opts) do + quote do + @behaviour Phoenix.Endpoint + + unquote(config(opts)) + unquote(pubsub()) + unquote(plug()) + unquote(server()) + end + end + + defp config(opts) do + quote do + @otp_app unquote(opts)[:otp_app] || raise("endpoint expects :otp_app to be given") + + # Compile-time configuration checking + # This ensures that if a compile-time configuration is overwritten at runtime the application won't boot. + var!(code_reloading?) = + Application.compile_env(@otp_app, [__MODULE__, :code_reloader], false) + + var!(debug_errors?) = Application.compile_env(@otp_app, [__MODULE__, :debug_errors], false) + var!(force_ssl) = Application.compile_env(@otp_app, [__MODULE__, :force_ssl]) + + # Avoid unused variable warnings + _ = var!(code_reloading?) + _ = var!(force_ssl) + end + end + + defp pubsub() do + quote generated: true do + def subscribe(topic, opts \\ []) when is_binary(topic) do + Phoenix.PubSub.subscribe(pubsub_server!(), topic, opts) + end + + def unsubscribe(topic) do + Phoenix.PubSub.unsubscribe(pubsub_server!(), topic) + end + + def broadcast_from(from, topic, event, msg) do + Phoenix.Channel.Server.broadcast_from(pubsub_server!(), from, topic, event, msg) + end + + def broadcast_from!(from, topic, event, msg) do + Phoenix.Channel.Server.broadcast_from!(pubsub_server!(), from, topic, event, msg) + end + + def broadcast(topic, event, msg) do + Phoenix.Channel.Server.broadcast(pubsub_server!(), topic, event, msg) + end + + def broadcast!(topic, event, msg) do + Phoenix.Channel.Server.broadcast!(pubsub_server!(), topic, event, msg) + end + + def local_broadcast(topic, event, msg) do + Phoenix.Channel.Server.local_broadcast(pubsub_server!(), topic, event, msg) + end + + def local_broadcast_from(from, topic, event, msg) do + Phoenix.Channel.Server.local_broadcast_from(pubsub_server!(), from, topic, event, msg) + end + + defp pubsub_server! do + config(:pubsub_server) || + raise ArgumentError, "no :pubsub_server configured for #{inspect(__MODULE__)}" + end + end + end + + defp plug() do + quote location: :keep do + use Plug.Builder, init_mode: Phoenix.plug_init_mode() + import Phoenix.Endpoint + + Module.register_attribute(__MODULE__, :phoenix_sockets, accumulate: true) + + if force_ssl = Phoenix.Endpoint.__force_ssl__(__MODULE__, var!(force_ssl)) do + plug Plug.SSL, force_ssl + end + + if var!(debug_errors?) do + logo = + "data:image/svg+xml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgNzEgNDgiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgoJPHBhdGggZD0ibTI2LjM3MSAzMy40NzctLjU1Mi0uMWMtMy45Mi0uNzI5LTYuMzk3LTMuMS03LjU3LTYuODI5LS43MzMtMi4zMjQuNTk3LTQuMDM1IDMuMDM1LTQuMTQ4IDEuOTk1LS4wOTIgMy4zNjIgMS4wNTUgNC41NyAyLjM5IDEuNTU3IDEuNzIgMi45ODQgMy41NTggNC41MTQgNS4zMDUgMi4yMDIgMi41MTUgNC43OTcgNC4xMzQgOC4zNDcgMy42MzQgMy4xODMtLjQ0OCA1Ljk1OC0xLjcyNSA4LjM3MS0zLjgyOC4zNjMtLjMxNi43NjEtLjU5MiAxLjE0NC0uODg2bC0uMjQxLS4yODRjLTIuMDI3LjYzLTQuMDkzLjg0MS02LjIwNS43MzUtMy4xOTUtLjE2LTYuMjQtLjgyOC04Ljk2NC0yLjU4Mi0yLjQ4Ni0xLjYwMS00LjMxOS0zLjc0Ni01LjE5LTYuNjExLS43MDQtMi4zMTUuNzM2LTMuOTM0IDMuMTM1LTMuNi45NDguMTMzIDEuNzQ2LjU2IDIuNDYzIDEuMTY1LjU4My40OTMgMS4xNDMgMS4wMTUgMS43MzggMS40OTMgMi44IDIuMjUgNi43MTIgMi4zNzUgMTAuMjY1LS4wNjgtNS44NDItLjAyNi05LjgxNy0zLjI0LTEzLjMwOC03LjMxMy0xLjM2Ni0xLjU5NC0yLjctMy4yMTYtNC4wOTUtNC43ODUtMi42OTgtMy4wMzYtNS42OTItNS43MS05Ljc5LTYuNjIzQzEyLjgtLjYyMyA3Ljc0NS4xNCAyLjg5MyAyLjM2MSAxLjkyNiAyLjgwNC45OTcgMy4zMTkgMCA0LjE0OWMuNDk0IDAgLjc2My4wMDYgMS4wMzIgMCAyLjQ0Ni0uMDY0IDQuMjggMS4wMjMgNS42MDIgMy4wMjQuOTYyIDEuNDU3IDEuNDE1IDMuMTA0IDEuNzYxIDQuNzk4LjUxMyAyLjUxNS4yNDcgNS4wNzguNTQ0IDcuNjA1Ljc2MSA2LjQ5NCA0LjA4IDExLjAyNiAxMC4yNiAxMy4zNDYgMi4yNjcuODUyIDQuNTkxIDEuMTM1IDcuMTcyLjU1NVpNMTAuNzUxIDMuODUyYy0uOTc2LjI0Ni0xLjc1Ni0uMTQ4LTIuNTYtLjk2MiAxLjM3Ny0uMzQzIDIuNTkyLS40NzYgMy44OTctLjUyOC0uMTA3Ljg0OC0uNjA3IDEuMzA2LTEuMzM2IDEuNDlabTMyLjAwMiAzNy45MjRjLS4wODUtLjYyNi0uNjItLjkwMS0xLjA0LTEuMjI4LTEuODU3LTEuNDQ2LTQuMDMtMS45NTgtNi4zMzMtMi0xLjM3NS0uMDI2LTIuNzM1LS4xMjgtNC4wMzEtLjYxLS41OTUtLjIyLTEuMjYtLjUwNS0xLjI0NC0xLjI3Mi4wMTUtLjc4LjY5My0xIDEuMzEtMS4xODQuNTA1LS4xNSAxLjAyNi0uMjQ3IDEuNi0uMzgyLTEuNDYtLjkzNi0yLjg4Ni0xLjA2NS00Ljc4Ny0uMy0yLjk5MyAxLjIwMi01Ljk0MyAxLjA2LTguOTI2LS4wMTctMS42ODQtLjYwOC0zLjE3OS0xLjU2My00LjczNS0yLjQwOGwtLjA0My4wM2EyLjk2IDIuOTYgMCAwIDAgLjA0LS4wMjljLS4wMzgtLjExNy0uMTA3LS4xMi0uMTk3LS4wNTRsLjEyMi4xMDdjMS4yOSAyLjExNSAzLjAzNCAzLjgxNyA1LjAwNCA1LjI3MSAzLjc5MyAyLjggNy45MzYgNC40NzEgMTIuNzg0IDMuNzNBNjYuNzE0IDY2LjcxNCAwIDAgMSAzNyA0MC44NzdjMS45OC0uMTYgMy44NjYuMzk4IDUuNzUzLjg5OVptLTkuMTQtMzAuMzQ1Yy0uMTA1LS4wNzYtLjIwNi0uMjY2LS40Mi0uMDY5IDEuNzQ1IDIuMzYgMy45ODUgNC4wOTggNi42ODMgNS4xOTMgNC4zNTQgMS43NjcgOC43NzMgMi4wNyAxMy4yOTMuNTEgMy41MS0xLjIxIDYuMDMzLS4wMjggNy4zNDMgMy4zOC4xOS0zLjk1NS0yLjEzNy02LjgzNy01Ljg0My03LjQwMS0yLjA4NC0uMzE4LTQuMDEuMzczLTUuOTYyLjk0LTUuNDM0IDEuNTc1LTEwLjQ4NS43OTgtMTUuMDk0LTIuNTUzWm0yNy4wODUgMTUuNDI1Yy43MDguMDU5IDEuNDE2LjEyMyAyLjEyNC4xODUtMS42LTEuNDA1LTMuNTUtMS41MTctNS41MjMtMS40MDQtMy4wMDMuMTctNS4xNjcgMS45MDMtNy4xNCAzLjk3Mi0xLjczOSAxLjgyNC0zLjMxIDMuODctNS45MDMgNC42MDQuMDQzLjA3OC4wNTQuMTE3LjA2Ni4xMTcuMzUuMDA1LjY5OS4wMjEgMS4wNDcuMDA1IDMuNzY4LS4xNyA3LjMxNy0uOTY1IDEwLjE0LTMuNy44OS0uODYgMS42ODUtMS44MTcgMi41NDQtMi43MS43MTYtLjc0NiAxLjU4NC0xLjE1OSAyLjY0NS0xLjA3Wm0tOC43NTMtNC42N2MtMi44MTIuMjQ2LTUuMjU0IDEuNDA5LTcuNTQ4IDIuOTQzLTEuNzY2IDEuMTgtMy42NTQgMS43MzgtNS43NzYgMS4zNy0uMzc0LS4wNjYtLjc1LS4xMTQtMS4xMjQtLjE3bC0uMDEzLjE1NmMuMTM1LjA3LjI2NS4xNTEuNDA1LjIwNy4zNTQuMTQuNzAyLjMwOCAxLjA3LjM5NSA0LjA4My45NzEgNy45OTIuNDc0IDExLjUxNi0xLjgwMyAyLjIyMS0xLjQzNSA0LjUyMS0xLjcwNyA3LjAxMy0xLjMzNi4yNTIuMDM4LjUwMy4wODMuNzU2LjEwNy4yMzQuMDIyLjQ3OS4yNTUuNzk1LjAwMy0yLjE3OS0xLjU3NC00LjUyNi0yLjA5Ni03LjA5NC0xLjg3MlptLTEwLjA0OS05LjU0NGMxLjQ3NS4wNTEgMi45NDMtLjE0MiA0LjQ4Ni0xLjA1OS0uNDUyLjA0LS42NDMuMDQtLjgyNy4wNzYtMi4xMjYuNDI0LTQuMDMzLS4wNC01LjczMy0xLjM4My0uNjIzLS40OTMtMS4yNTctLjk3NC0xLjg4OS0xLjQ1Ny0yLjUwMy0xLjkxNC01LjM3NC0yLjU1NS04LjUxNC0yLjUuMDUuMTU0LjA1NC4yNi4xMDguMzE1IDMuNDE3IDMuNDU1IDcuMzcxIDUuODM2IDEyLjM2OSA2LjAwOFptMjQuNzI3IDE3LjczMWMtMi4xMTQtMi4wOTctNC45NTItMi4zNjctNy41NzgtLjUzNyAxLjczOC4wNzggMy4wNDMuNjMyIDQuMTAxIDEuNzI4LjM3NC4zODguNzYzLjc2OCAxLjE4MiAxLjEwNiAxLjYgMS4yOSA0LjMxMSAxLjM1MiA1Ljg5Ni4xNTUtMS44NjEtLjcyNi0xLjg2MS0uNzI2LTMuNjAxLTIuNDUyWm0tMjEuMDU4IDE2LjA2Yy0xLjg1OC0zLjQ2LTQuOTgxLTQuMjQtOC41OS00LjAwOGE5LjY2NyA5LjY2NyAwIDAgMSAyLjk3NyAxLjM5Yy44NC41ODYgMS41NDcgMS4zMTEgMi4yNDMgMi4wNTUgMS4zOCAxLjQ3MyAzLjUzNCAyLjM3NiA0Ljk2MiAyLjA3LS42NTYtLjQxMi0xLjIzOC0uODQ4LTEuNTkyLTEuNTA3Wm0xNy4yOS0xOS4zMmMwLS4wMjMuMDAxLS4wNDUuMDAzLS4wNjhsLS4wMDYuMDA2LjAwNi0uMDA2LS4wMzYtLjAwNC4wMjEuMDE4LjAxMi4wNTNabS0yMCAxNC43NDRhNy42MSA3LjYxIDAgMCAwLS4wNzItLjA0MS4xMjcuMTI3IDAgMCAwIC4wMTUuMDQzYy4wMDUuMDA4LjAzOCAwIC4wNTgtLjAwMlptLS4wNzItLjA0MS0uMDA4LS4wMzQtLjAwOC4wMS4wMDgtLjAxLS4wMjItLjAwNi4wMDUuMDI2LjAyNC4wMTRaIgogICAgICAgICAgICBmaWxsPSIjRkQ0RjAwIiAvPgo8L3N2Zz4K" + + use Plug.Debugger, + otp_app: @otp_app, + banner: {Phoenix.Endpoint.RenderErrors, :__debugger_banner__, []}, + style: [ + primary: "#EB532D", + dark: [ + primary: "#FF6B4A", + logo: logo + ], + logo: logo + ] + end + + plug :socket_dispatch + + # Compile after the debugger so we properly wrap it. + @before_compile Phoenix.Endpoint + end + end + + defp server() do + quote location: :keep, unquote: false do + @doc """ + Returns the child specification to start the endpoint + under a supervision tree. + """ + def child_spec(opts) do + %{ + id: __MODULE__, + start: {__MODULE__, :start_link, [opts]}, + type: :supervisor + } + end + + @doc """ + Starts the endpoint supervision tree. + + All other options are merged into the endpoint configuration. + """ + def start_link(opts \\ []) do + Phoenix.Endpoint.Supervisor.start_link(@otp_app, __MODULE__, opts) + end + + @doc """ + Returns the endpoint configuration for `key` + + Returns `default` if the key does not exist. + """ + def config(key, default \\ nil) do + case :ets.lookup(__MODULE__, key) do + [{^key, val}] -> val + [] -> default + end + end + + @doc """ + Reloads the configuration given the application environment changes. + """ + def config_change(changed, removed) do + Phoenix.Endpoint.Supervisor.config_change(__MODULE__, changed, removed) + end + + defp persistent!() do + :persistent_term.get({Phoenix.Endpoint, __MODULE__}, nil) || + raise "could not find persistent term for endpoint #{inspect(__MODULE__)}. Make sure your endpoint is started and note you cannot access endpoint functions at compile-time" + end + + @doc """ + Generates the endpoint base URL without any path information. + + It uses the configuration under `:url` to generate such. + """ + def url, do: persistent!().url + + @doc """ + Generates the static URL without any path information. + + It uses the configuration under `:static_url` to generate + such. It falls back to `:url` if `:static_url` is not set. + """ + def static_url, do: persistent!().static_url + + @doc """ + Generates the endpoint base URL but as a `URI` struct. + + It uses the configuration under `:url` to generate such. + Useful for manipulating the URL data and passing it to + URL helpers. + """ + def struct_url, do: persistent!().struct_url + + @doc """ + Returns the host for the given endpoint. + """ + def host, do: persistent!().host + + @doc """ + Generates the path information when routing to this endpoint. + """ + def path(path), do: persistent!().path <> path + + @doc """ + Generates the script name. + """ + def script_name, do: persistent!().script_name + + @doc """ + Generates a route to a static file in `priv/static`. + """ + def static_path(path) do + prefix = persistent!().static_path + + case :binary.split(path, "#") do + [path, fragment] -> prefix <> elem(static_lookup(path), 0) <> "#" <> fragment + [path] -> prefix <> elem(static_lookup(path), 0) + end + end + + @doc """ + Generates a base64-encoded cryptographic hash (sha512) to a static file + in `priv/static`. Meant to be used for Subresource Integrity with CDNs. + """ + def static_integrity(path), do: elem(static_lookup(path), 1) + + @doc """ + Returns a two item tuple with the first item being the `static_path` + and the second item being the `static_integrity`. + """ + def static_lookup(path) do + Phoenix.Config.cache( + __MODULE__, + {:__phoenix_static__, path}, + &Phoenix.Endpoint.Supervisor.static_lookup(&1, path) + ) + end + + @doc """ + Returns the address and port that the server is running on + """ + def server_info(scheme), do: config(:adapter).server_info(__MODULE__, scheme) + end + end + + @doc false + def __force_ssl__(module, force_ssl) do + if force_ssl do + Keyword.put_new(force_ssl, :host, {module, :host, []}) + end + end + + @doc false + defmacro __before_compile__(%{module: module}) do + sockets = Module.get_attribute(module, :phoenix_sockets) + + dispatches = + for {path, socket, socket_opts} <- sockets, + {path, plug, conn_ast, plug_opts} <- socket_paths(module, path, socket, socket_opts) do + quote do + defp do_socket_dispatch(unquote(path), conn) do + halt(unquote(plug).call(unquote(conn_ast), unquote(Macro.escape(plug_opts)))) + end + end + end + + quote do + defoverridable call: 2 + + # Inline render errors so we set the endpoint before calling it. + def call(conn, opts) do + conn = %{conn | script_name: script_name(), secret_key_base: config(:secret_key_base)} + conn = Plug.Conn.put_private(conn, :phoenix_endpoint, __MODULE__) + + try do + super(conn, opts) + rescue + e in Plug.Conn.WrapperError -> + %{conn: conn, kind: kind, reason: reason, stack: stack} = e + + Phoenix.Endpoint.RenderErrors.__catch__( + conn, + kind, + reason, + stack, + config(:render_errors) + ) + catch + kind, reason -> + stack = __STACKTRACE__ + + Phoenix.Endpoint.RenderErrors.__catch__( + conn, + kind, + reason, + stack, + config(:render_errors) + ) + end + end + + @doc false + def __sockets__, do: unquote(Macro.escape(sockets)) + + @doc false + def socket_dispatch(%{path_info: path} = conn, _opts), do: do_socket_dispatch(path, conn) + unquote(dispatches) + defp do_socket_dispatch(_path, conn), do: conn + end + end + + defp socket_paths(endpoint, path, socket, opts) do + paths = [] + + common_config = [ + :path, + :serializer, + :transport_log, + :check_origin, + :check_csrf, + :code_reloader, + :connect_info, + :auth_token, + :log + ] + + websocket = + opts + |> Keyword.get(:websocket, true) + |> maybe_validate_keys( + common_config ++ + [ + :timeout, + :max_frame_size, + :fullsweep_after, + :compress, + :subprotocols, + :error_handler + ] + ) + + longpoll = + opts + |> Keyword.get(:longpoll, false) + |> maybe_validate_keys( + common_config ++ + [ + :window_ms, + :pubsub_timeout_ms, + :crypto + ] + ) + + paths = + if websocket do + websocket = put_auth_token(websocket, opts[:auth_token]) + config = Phoenix.Socket.Transport.load_config(websocket, Phoenix.Transports.WebSocket) + plug_init = {endpoint, socket, config} + {conn_ast, match_path} = socket_path(path, config) + [{match_path, Phoenix.Transports.WebSocket, conn_ast, plug_init} | paths] + else + paths + end + + paths = + if longpoll do + longpoll = put_auth_token(longpoll, opts[:auth_token]) + config = Phoenix.Socket.Transport.load_config(longpoll, Phoenix.Transports.LongPoll) + plug_init = {endpoint, socket, config} + {conn_ast, match_path} = socket_path(path, config) + [{match_path, Phoenix.Transports.LongPoll, conn_ast, plug_init} | paths] + else + paths + end + + paths + end + + defp put_auth_token(true, enabled), do: [auth_token: enabled] + defp put_auth_token(opts, enabled), do: Keyword.put(opts, :auth_token, enabled) + + defp socket_path(path, config) do + end_path_fragment = Keyword.fetch!(config, :path) + + {vars, path} = + String.split(path <> "/" <> end_path_fragment, "/", trim: true) + |> Enum.join("/") + |> Plug.Router.Utils.build_path_match() + + conn_ast = + if vars == [] do + quote do + conn + end + else + params = + for var <- vars, + param = Atom.to_string(var), + not match?("_" <> _, param), + do: {param, Macro.var(var, nil)} + + quote do + params = %{unquote_splicing(params)} + %{conn | path_params: params, params: params} + end + end + + {conn_ast, path} + end + + defp maybe_validate_keys(opts, keys) when is_list(opts), do: Keyword.validate!(opts, keys) + defp maybe_validate_keys(other, _), do: other + + ## API + + @doc """ + Defines a websocket/longpoll mount-point for a `socket`. + + It expects a `path`, a `socket` module, and a set of options. + The socket module is typically defined with `Phoenix.Socket`. + + Both websocket and longpolling connections are supported out + of the box. + + ## Options + + * `:websocket` - controls the websocket configuration. + Defaults to `true`. May be false or a keyword list + of options. See ["Common configuration"](#socket/3-common-configuration) + and ["WebSocket configuration"](#socket/3-websocket-configuration) + for the whole list + + * `:longpoll` - controls the longpoll configuration. + Defaults to `false`. May be true or a keyword list + of options. See ["Common configuration"](#socket/3-common-configuration) + and ["Longpoll configuration"](#socket/3-longpoll-configuration) + for the whole list + + * `:drainer` - a keyword list or a custom MFA function returning a keyword list, for example: + + {MyAppWeb.Socket, :drainer_configuration, []} + + configuring how to drain sockets on application shutdown. + The goal is to notify all channels (and + LiveViews) clients to reconnect. The supported options are: + + * `:batch_size` - How many clients to notify at once in a given batch. + Defaults to 10000. + * `:batch_interval` - The amount of time in milliseconds given for a + batch to terminate. Defaults to 2000ms. + * `:shutdown` - The maximum amount of time in milliseconds allowed + to drain all batches. Defaults to 30000ms. + * `:log` - the log level for drain actions. Defaults the `:log` option + passed to `use Phoenix.Socket` or `:info`. Set it to `false` to disable logging. + + For example, if you have 150k connections, the default values will + split them into 15 batches of 10k connections. Each batch takes + 2000ms before the next batch starts. In this case, we will do everything + right under the maximum shutdown time of 30000ms. Therefore, as + you increase the number of connections, remember to adjust the shutdown + accordingly. Finally, after the socket drainer runs, the lower level + HTTP/HTTPS connection drainer will still run, and apply to all connections. + Set it to `false` to disable draining. + + * `auth_token` - a boolean that enables the use of the channels client's auth_token option. + The exact token exchange mechanism depends on the transport: + + * the websocket transport, this enables a token to be passed through the `Sec-WebSocket-Protocol` header. + * the longpoll transport, this allows the token to be passed through the `Authorization` header. + + The token is available in the `connect_info` as `:auth_token`. + + Custom transports might implement their own mechanism. + + You can also pass the options below on `use Phoenix.Socket`. + The values specified here override the value in `use Phoenix.Socket`. + + ## Examples + + socket "/ws", MyApp.UserSocket + + socket "/ws/admin", MyApp.AdminUserSocket, + longpoll: true, + websocket: [compress: true] + + ## Path params + + It is possible to include variables in the path, these will be + available in the `params` that are passed to the socket. + + socket "/ws/:user_id", MyApp.UserSocket, + websocket: [path: "/project/:project_id"] + + ## Common configuration + + The configuration below can be given to both `:websocket` and + `:longpoll` keys: + + * `:path` - the path to use for the transport. Will default + to the transport name ("/websocket" or "/longpoll") + + * `:serializer` - a list of serializers for messages. See + `Phoenix.Socket` for more information + + * `:transport_log` - if the transport layer itself should log and, + if so, the level + + * `:check_origin` - if the transport should check the origin of requests when + the `origin` header is present. May be `true`, `false`, a list of URIs that + are allowed, or a function provided as MFA tuple. Defaults to `:check_origin` + setting at endpoint configuration. + + If `true`, the header is checked against `:host` in `YourAppWeb.Endpoint.config(:url)[:host]`. + + If `false` and you do not validate the session in your socket, your app + is vulnerable to Cross-Site WebSocket Hijacking (CSWSH) attacks. + Only use in development, when the host is truly unknown or when + serving clients that do not send the `origin` header, such as mobile apps. + + You can also specify a list of explicitly allowed origins. Each origin may include + scheme, host, and port. Wildcards are supported. + + check_origin: [ + "https://example.com", + "//another.com:888", + "//*.other.com" + ] + + Or to accept any origin matching the request connection's host, port, and scheme: + + check_origin: :conn + + Or a custom MFA function: + + check_origin: {MyAppWeb.Auth, :my_check_origin?, []} + + The MFA is invoked with the request `%URI{}` as the first argument, + followed by arguments in the MFA list, and must return a boolean. + + * `:check_csrf` - if the transport should perform CSRF check. To avoid + "Cross-Site WebSocket Hijacking", you must have at least one of + `check_origin` and `check_csrf` enabled. If you set both to `false`, + Phoenix will raise, but it is still possible to disable both by passing + a custom MFA to `check_origin`. In such cases, it is your responsibility + to ensure at least one of them is enabled. Defaults to `true` + + * `:code_reloader` - enable or disable the code reloader. Defaults to your + endpoint configuration + + * `:connect_info` - a list of keys that represent data to be copied from + the transport to be made available in the user socket `connect/3` callback. + See the "Connect info" subsection for valid keys + + ### Connect info + + The valid keys are: + + * `:peer_data` - the result of `Plug.Conn.get_peer_data/1` + + * `:trace_context_headers` - a list of all trace context headers. Supported + headers are defined by the [W3C Trace Context Specification](https://www.w3.org/TR/trace-context-1/). + These headers are necessary for libraries such as [OpenTelemetry](https://opentelemetry.io/) + to extract trace propagation information to know this request is part of a + larger trace in progress. + + * `:x_headers` - all request headers that have an "x-" prefix + + * `:uri` - a `%URI{}` with information from the conn + + * `:user_agent` - the value of the "user-agent" request header + + * `{:session, session_config}` - the session information from `Plug.Conn`. + The `session_config` is typically an exact copy of the arguments given + to `Plug.Session`. In order to validate the session, the "_csrf_token" + must be given as request parameter when connecting the socket with the + value of `URI.encode_www_form(Plug.CSRFProtection.get_csrf_token())`. + The CSRF token request parameter can be modified via the `:csrf_token_key` + option. + + Additionally, `session_config` may be a MFA, such as + `{MyAppWeb.Auth, :get_session_config, []}`, to allow loading config in + runtime. + + Arbitrary keywords may also appear following the above valid keys, which + is useful for passing custom connection information to the socket. + + For example: + + ``` + socket "/socket", AppWeb.UserSocket, + websocket: [ + connect_info: [:peer_data, :trace_context_headers, :x_headers, :uri, session: [store: :cookie]] + ] + ``` + + With arbitrary keywords: + + ``` + socket "/socket", AppWeb.UserSocket, + websocket: [ + connect_info: [:uri, custom_value: "abcdef"] + ] + ``` + + > #### Where are my headers? {: .tip} + > + > Phoenix only gives you limited access to the connection headers for security + > reasons. WebSockets are cross-domain, which means that, when a user "John Doe" + > visits a malicious website, the malicious website can open up a WebSocket + > connection to your application, and the browser will gladly submit John Doe's + > authentication/cookie information. If you were to accept this information as is, + > the malicious website would have full control of a WebSocket connection to your + > application, authenticated on John Doe's behalf. + > + > To safe-guard your application, Phoenix limits and validates the connection + > information your socket can access. This means your application is safe from + > these attacks, but you can't access cookies and other headers in your socket. + > You may access the session stored in the connection via the `:connect_info` + > option, provided you also pass a csrf token when connecting over WebSocket. + + ## Websocket configuration + + The following configuration applies only to `:websocket`. + + * `:timeout` - the timeout for keeping websocket connections + open after it last received data, defaults to 60_000ms + + * `:max_frame_size` - the maximum allowed frame size in bytes, + defaults to "infinity" + + * `:fullsweep_after` - the maximum number of garbage collections + before forcing a fullsweep for the socket process. You can set + it to `0` to force more frequent cleanups of your websocket + transport processes. Setting this option requires Erlang/OTP 24 + + * `:compress` - whether to enable per message compression on + all data frames, defaults to false + + * `:subprotocols` - a list of supported websocket subprotocols. + Used for handshake `Sec-WebSocket-Protocol` response header, defaults to nil. + + For example: + + subprotocols: ["sip", "mqtt"] + + * `:error_handler` - custom error handler for connection errors. + If `c:Phoenix.Socket.connect/3` returns an `{:error, reason}` tuple, + the error handler will be called with the error reason. For WebSockets, + the error handler must be a MFA tuple that receives a `Plug.Conn`, the + error reason, and returns a `Plug.Conn` with a response. For example: + + socket "/socket", MySocket, + websocket: [ + error_handler: {MySocket, :handle_error, []} + ] + + and a `{:error, :rate_limit}` return may be handled on `MySocket` as: + + def handle_error(conn, :rate_limit), do: Plug.Conn.send_resp(conn, 429, "Too many requests") + + ## Longpoll configuration + + The following configuration applies only to `:longpoll`: + + * `:window_ms` - how long the client can wait for new messages + in its poll request in milliseconds (ms). Defaults to `10_000`. + + * `:pubsub_timeout_ms` - how long a request can wait for the + pubsub layer to respond in milliseconds (ms). Defaults to `2000`. + + * `:crypto` - options for verifying and signing the token, accepted + by `Phoenix.Token`. By default tokens are valid for 2 weeks + + """ + defmacro socket(path, module, opts \\ []) do + module = Macro.expand(module, %{__CALLER__ | function: {:socket_dispatch, 2}}) + + quote do + @phoenix_sockets {unquote(path), unquote(module), unquote(opts)} + end + end + + @doc false + @deprecated "Phoenix.Endpoint.instrument/4 is deprecated and has no effect. Use :telemetry instead" + defmacro instrument(_endpoint_or_conn_or_socket, _event, _runtime, _fun) do + :ok + end + + @doc """ + Checks if Endpoint's web server has been configured to start. + + * `otp_app` - The OTP app running the endpoint, for example `:my_app` + * `endpoint` - The endpoint module, for example `MyAppWeb.Endpoint` + + ## Examples + + iex> Phoenix.Endpoint.server?(:my_app, MyAppWeb.Endpoint) + true + + """ + def server?(otp_app, endpoint) when is_atom(otp_app) and is_atom(endpoint) do + Phoenix.Endpoint.Supervisor.server?(otp_app, endpoint) + end +end diff --git a/deps/phoenix/lib/phoenix/endpoint/cowboy2_adapter.ex b/deps/phoenix/lib/phoenix/endpoint/cowboy2_adapter.ex new file mode 100644 index 0000000..96482b9 --- /dev/null +++ b/deps/phoenix/lib/phoenix/endpoint/cowboy2_adapter.ex @@ -0,0 +1,155 @@ +defmodule Phoenix.Endpoint.Cowboy2Adapter do + @moduledoc """ + The Cowboy2 adapter for Phoenix. + + ## Endpoint configuration + + This adapter uses the following endpoint configuration: + + * `:http` - the configuration for the HTTP server. It accepts all options + as defined by [`Plug.Cowboy`](https://hexdocs.pm/plug_cowboy/). Defaults + to `false` + + * `:https` - the configuration for the HTTPS server. It accepts all options + as defined by [`Plug.Cowboy`](https://hexdocs.pm/plug_cowboy/). Defaults + to `false` + + * `:drainer` - a drainer process that triggers when your application is + shutting down to wait for any on-going request to finish. It accepts all + options as defined by [`Plug.Cowboy.Drainer`](https://hexdocs.pm/plug_cowboy/Plug.Cowboy.Drainer.html). + Defaults to `[]`, which will start a drainer process for each configured endpoint, + but can be disabled by setting it to `false`. + + ## Custom dispatch options + + You can provide custom dispatch options in order to use Phoenix's + builtin Cowboy server with custom handlers. For example, to handle + raw WebSockets [as shown in Cowboy's docs](https://github.com/ninenines/cowboy/tree/master/examples)). + + The options are passed to both `:http` and `:https` keys in the + endpoint configuration. However, once you pass your custom dispatch + options, you will need to manually wire the Phoenix endpoint by + adding the following rule: + + {:_, Plug.Cowboy.Handler, {MyAppWeb.Endpoint, []}} + + For example: + + config :myapp, MyAppWeb.Endpoint, + http: [dispatch: [ + {:_, [ + {"/foo", MyAppWeb.CustomHandler, []}, + {:_, Plug.Cowboy.Handler, {MyAppWeb.Endpoint, []}} + ]}]] + + It is also important to specify your handlers first, otherwise + Phoenix will intercept the requests before they get to your handler. + """ + + require Logger + + @doc false + def child_specs(endpoint, config) do + otp_app = Keyword.fetch!(config, :otp_app) + + refs_and_specs = + for {scheme, port} <- [http: 4000, https: 4040], opts = config[scheme] do + port = :proplists.get_value(:port, opts, port) + + unless port do + Logger.error(":port for #{scheme} config is nil, cannot start server") + raise "aborting due to nil port" + end + + # Ranch options are read from the top, so we keep the user opts first. + opts = :proplists.delete(:port, opts) ++ [port: port_to_integer(port), otp_app: otp_app] + child_spec(scheme, endpoint, opts, config[:code_reloader]) + end + + {refs, child_specs} = Enum.unzip(refs_and_specs) + + if drainer = refs != [] && Keyword.get(config, :drainer, []) do + child_specs ++ [{Plug.Cowboy.Drainer, Keyword.put_new(drainer, :refs, refs)}] + else + child_specs + end + end + + defp child_spec(scheme, endpoint, config, code_reloader?) do + if scheme == :https do + Application.ensure_all_started(:ssl) + end + + ref = make_ref(endpoint, scheme) + + plug = + if code_reloader? do + {Phoenix.Endpoint.SyncCodeReloadPlug, {endpoint, []}} + else + {endpoint, []} + end + + spec = Plug.Cowboy.child_spec(ref: ref, scheme: scheme, plug: plug, options: config) + spec = update_in(spec.start, &{__MODULE__, :start_link, [scheme, endpoint, &1]}) + {ref, spec} + end + + @doc false + def start_link(scheme, endpoint, {m, f, [ref | _] = a}) do + # ref is used by Ranch to identify its listeners, defaulting + # to plug.HTTP and plug.HTTPS and overridable by users. + case apply(m, f, a) do + {:ok, pid} -> + Logger.info(info(scheme, endpoint, ref)) + {:ok, pid} + + {:error, {:shutdown, {_, _, {:listen_error, _, :eaddrinuse}}}} = error -> + Logger.error([info(scheme, endpoint, ref), " failed, port already in use"]) + error + + {:error, {:shutdown, {_, _, {{_, {:error, :eaddrinuse}}, _}}}} = error -> + Logger.error([info(scheme, endpoint, ref), " failed, port already in use"]) + error + + {:error, _} = error -> + error + end + end + + defp info(scheme, endpoint, ref) do + server = "cowboy #{Application.spec(:cowboy)[:vsn]}" + "Running #{inspect(endpoint)} with #{server} at #{bound_address(scheme, ref)}" + end + + defp bound_address(scheme, ref) do + case :ranch.get_addr(ref) do + {:local, unix_path} -> + "#{unix_path} (#{scheme}+unix)" + + {addr, port} -> + "#{:inet.ntoa(addr)}:#{port} (#{scheme})" + end + rescue + _ -> scheme + end + + # TODO: Remove this once {:system, env_var} deprecation is removed + defp port_to_integer({:system, env_var}), do: port_to_integer(System.get_env(env_var)) + defp port_to_integer(port) when is_binary(port), do: String.to_integer(port) + defp port_to_integer(port) when is_integer(port), do: port + + def server_info(endpoint, scheme) do + address = + endpoint + |> make_ref(scheme) + |> :ranch.get_addr() + + {:ok, address} + rescue + e -> {:error, Exception.message(e)} + end + + defp make_ref(endpoint, scheme) do + Module.concat(endpoint, scheme |> Atom.to_string() |> String.upcase()) + end +end diff --git a/deps/phoenix/lib/phoenix/endpoint/render_errors.ex b/deps/phoenix/lib/phoenix/endpoint/render_errors.ex new file mode 100644 index 0000000..72c18a5 --- /dev/null +++ b/deps/phoenix/lib/phoenix/endpoint/render_errors.ex @@ -0,0 +1,195 @@ +defmodule Phoenix.Endpoint.RenderErrors do + # This module is used to catch failures and render them using a view. + # + # This module is automatically used in `Phoenix.Endpoint` where it + # overrides `call/2` to provide rendering. Once the error is + # rendered, the error is reraised unless it is a NoRouteError. + # + # ## Options + # + # * `:formats` - the format to use when none is available from the request + # * `:log` - the `t:Logger.level/0` or `false` to disable logging rendered errors + # + @moduledoc false + + import Plug.Conn + + require Logger + + alias Phoenix.Router.NoRouteError + alias Phoenix.Controller + + @already_sent {:plug_conn, :sent} + + @doc false + defmacro __using__(opts) do + quote do + @before_compile Phoenix.Endpoint.RenderErrors + @phoenix_render_errors unquote(opts) + end + end + + @doc false + defmacro __before_compile__(_) do + quote location: :keep do + defoverridable call: 2 + + def call(conn, opts) do + try do + super(conn, opts) + rescue + e in Plug.Conn.WrapperError -> + %{conn: conn, kind: kind, reason: reason, stack: stack} = e + unquote(__MODULE__).__catch__(conn, kind, reason, stack, @phoenix_render_errors) + catch + kind, reason -> + stack = __STACKTRACE__ + unquote(__MODULE__).__catch__(conn, kind, reason, stack, @phoenix_render_errors) + end + end + end + end + + @doc false + def __catch__(%Plug.Conn{} = conn, kind, reason, stack, opts) do + conn = + receive do + @already_sent -> + send(self(), @already_sent) + %{conn | state: :sent} + after + 0 -> + instrument_render_and_send(conn, kind, reason, stack, opts) + end + + maybe_raise(kind, reason, stack) + conn + end + + defp instrument_render_and_send(conn, kind, reason, stack, opts) do + level = Keyword.get(opts, :log, :debug) + status = status(kind, reason) + conn = error_conn(conn, kind, reason) + start = System.monotonic_time() + + metadata = %{ + conn: conn, + status: status, + kind: kind, + reason: reason, + stacktrace: stack, + log: level + } + + try do + render(conn, status, kind, reason, stack, opts) + after + duration = System.monotonic_time() - start + :telemetry.execute([:phoenix, :error_rendered], %{duration: duration}, metadata) + end + end + + defp error_conn(_conn, :error, %NoRouteError{conn: conn}), do: conn + defp error_conn(conn, _kind, _reason), do: conn + + defp maybe_raise(:error, %NoRouteError{}, _stack), do: :ok + defp maybe_raise(kind, reason, stack), do: :erlang.raise(kind, reason, stack) + + ## Rendering + + @doc false + def __debugger_banner__(_conn, _status, _kind, %NoRouteError{router: router}, _stack) do + """ +

Available routes

+
#{Phoenix.Router.ConsoleFormatter.format(router)}
+ """ + end + + def __debugger_banner__(_conn, _status, _kind, _reason, _stack), do: nil + + defp render(conn, status, kind, reason, stack, opts) do + conn = + conn + |> maybe_fetch_query_params() + |> fetch_view_format(opts) + |> Plug.Conn.put_status(status) + |> Controller.put_root_layout(opts[:root_layout] || false) + |> Controller.put_layout(opts[:layout] || false) + + format = Controller.get_format(conn) + reason = Exception.normalize(kind, reason, stack) + template = "#{conn.status}.#{format}" + assigns = %{kind: kind, reason: reason, stack: stack, status: conn.status, __changed__: nil} + + Controller.render(conn, template, assigns) + end + + defp maybe_fetch_query_params(%Plug.Conn{} = conn) do + fetch_query_params(conn) + rescue + Plug.Conn.InvalidQueryError -> + case conn.params do + %Plug.Conn.Unfetched{} -> %{conn | query_params: %{}, params: %{}} + params -> %{conn | query_params: %{}, params: params} + end + end + + defp fetch_view_format(conn, opts) do + # We ignore params["_format"] although we respect any already stored. + view = opts[:view] + formats = opts[:formats] + accepts = opts[:accepts] + + cond do + formats -> + put_formats(conn, Enum.map(formats, fn {k, v} -> {Atom.to_string(k), v} end)) + + view && accepts -> + put_formats(conn, Enum.map(accepts, &{&1, view})) + + true -> + raise ArgumentError, + "expected :render_errors to have :formats or :view/:accepts, but got: #{inspect(opts)}" + end + end + + defp put_formats(conn, formats) do + [{fallback_format, fallback_view} | _] = formats + + try do + conn = + case conn.private do + %{phoenix_format: format} when is_binary(format) -> conn + _ -> Controller.accepts(conn, Enum.map(formats, &elem(&1, 0))) + end + + format = Phoenix.Controller.get_format(conn) + + case List.keyfind(formats, format, 0) do + {_, view} -> + Controller.put_view(conn, view) + + nil -> + conn + |> Controller.put_format(fallback_format) + |> Controller.put_view(fallback_view) + end + rescue + e in Phoenix.NotAcceptableError -> + Logger.debug( + "Could not render errors due to #{Exception.message(e)}. " <> + "Errors will be rendered using the first accepted format #{inspect(fallback_format)} as fallback. " <> + "Please customize the :formats option under the :render_errors configuration " <> + "in your endpoint if you want to support other formats or choose another fallback" + ) + + conn + |> Controller.put_format(fallback_format) + |> Controller.put_view(fallback_view) + end + end + + defp status(:error, error), do: Plug.Exception.status(error) + defp status(:throw, _throw), do: 500 + defp status(:exit, _exit), do: 500 +end diff --git a/deps/phoenix/lib/phoenix/endpoint/supervisor.ex b/deps/phoenix/lib/phoenix/endpoint/supervisor.ex new file mode 100644 index 0000000..9c5f190 --- /dev/null +++ b/deps/phoenix/lib/phoenix/endpoint/supervisor.ex @@ -0,0 +1,488 @@ +defmodule Phoenix.Endpoint.Supervisor do + # This module contains the logic used by most functions in Phoenix.Endpoint + # as well the supervisor for sockets, adapters, watchers, etc. + @moduledoc false + + require Logger + use Supervisor + + @doc """ + Starts the endpoint supervision tree. + """ + def start_link(otp_app, mod, opts \\ []) do + with {:ok, pid} = ok <- Supervisor.start_link(__MODULE__, {otp_app, mod, opts}, name: mod) do + # We don't use the defaults in the checks below + conf = Keyword.merge(Application.get_env(otp_app, mod, []), opts) + log_access_url(mod, conf) + browser_open(mod, conf) + + measurements = %{system_time: System.system_time()} + metadata = %{pid: pid, config: conf, module: mod, otp_app: otp_app} + :telemetry.execute([:phoenix, :endpoint, :init], measurements, metadata) + + ok + end + end + + @doc false + def init({otp_app, mod, opts}) do + default_conf = Phoenix.Config.merge(defaults(otp_app, mod), opts) + env_conf = Phoenix.Config.from_env(otp_app, mod, default_conf) + + secret_conf = + cond do + Code.ensure_loaded?(mod) and function_exported?(mod, :init, 2) -> + IO.warn( + """ + your #{inspect(mod)} defines a init/2 callback, which is now deprecated. \ + This callback is invoked when your endpoint is initialized as part of your supervision tree. \ + Instead, you should either: + + 1. Move all dynamic configuration to config/runtime.exs (preferred). For example: + + # config/runtime.exs + import Config + + if config_env() == :prod do + config #{inspect(otp_app)}, #{inspect(mod)}, + http: [:inet6, port: System.fetch_env!("PORT")] + end + + 2. Pass the configuration you returned from the `init/2` callback \ + as additional options when starting the endpoint in your supervision tree. \ + For example: {#{inspect(mod)}, some_extra_options: true} + """, + [] + ) + + {:ok, init_conf} = mod.init(:supervisor, env_conf) + init_conf + + is_nil(Application.get_env(otp_app, mod)) -> + Logger.warning( + "no configuration found for otp_app #{inspect(otp_app)} and module #{inspect(mod)}" + ) + + env_conf + + true -> + env_conf + end + + extra_conf = [ + endpoint_id: :crypto.strong_rand_bytes(16) |> Base.encode64(padding: false), + # TODO: Remove this once :pubsub is removed + pubsub_server: secret_conf[:pubsub_server] || secret_conf[:pubsub][:name] + ] + + secret_conf = extra_conf ++ secret_conf + default_conf = extra_conf ++ default_conf + + # Drop all secrets from secret_conf before passing it around + conf = Keyword.drop(secret_conf, [:secret_key_base]) + server? = server?(conf) + + if conf[:instrumenters] do + Logger.warning( + ":instrumenters configuration for #{inspect(mod)} is deprecated and has no effect" + ) + end + + if server? and conf[:code_reloader] do + Phoenix.CodeReloader.Server.check_symlinks() + end + + # TODO: Remove this once {:system, env_var} tuples are removed + warn_on_deprecated_system_env_tuples(otp_app, mod, conf, :http) + warn_on_deprecated_system_env_tuples(otp_app, mod, conf, :https) + warn_on_deprecated_system_env_tuples(otp_app, mod, conf, :url) + warn_on_deprecated_system_env_tuples(otp_app, mod, conf, :static_url) + + children = + config_children(mod, secret_conf, default_conf) ++ + warmup_children(mod) ++ + pubsub_children(mod, conf) ++ + socket_children(mod, conf, :child_spec) ++ + server_children(mod, conf, server?) ++ + socket_children(mod, conf, :drainer_spec) ++ + watcher_children(mod, conf, server?) + + Supervisor.init(children, strategy: :one_for_one) + end + + defp pubsub_children(mod, conf) do + pub_conf = conf[:pubsub] + + if pub_conf do + Logger.warning(""" + The :pubsub key in your #{inspect(mod)} is deprecated. + + You must now start the pubsub in your application supervision tree. + Go to lib/my_app/application.ex and add the following: + + {Phoenix.PubSub, #{inspect(pub_conf)}} + + Now, back in your config files in config/*, you can remove the :pubsub + key and add the :pubsub_server key, with the PubSub name: + + pubsub_server: #{inspect(pub_conf[:name])} + """) + end + + if pub_conf[:adapter] do + [{Phoenix.PubSub, pub_conf}] + else + [] + end + end + + defp socket_children(endpoint, conf, fun) do + for {_, socket, opts} <- Enum.uniq_by(endpoint.__sockets__(), &elem(&1, 1)), + _ = check_origin_or_csrf_checked!(conf, opts), + spec = apply_or_ignore(socket, fun, [[endpoint: endpoint] ++ opts]), + spec != :ignore do + spec + end + end + + defp apply_or_ignore(socket, fun, args) do + # If the module is not loaded, we want to invoke and crash + if not Code.ensure_loaded?(socket) or function_exported?(socket, fun, length(args)) do + apply(socket, fun, args) + else + :ignore + end + end + + defp check_origin_or_csrf_checked!(endpoint_conf, socket_opts) do + check_origin = endpoint_conf[:check_origin] + + for {transport, transport_opts} <- socket_opts, is_list(transport_opts) do + check_origin = Keyword.get(transport_opts, :check_origin, check_origin) + + check_csrf = transport_opts[:check_csrf] + + if check_origin == false and check_csrf == false do + raise ArgumentError, + "one of :check_origin and :check_csrf must be set to non-false value for " <> + "transport #{inspect(transport)}" + end + end + end + + defp config_children(mod, conf, default_conf) do + args = {mod, conf, default_conf, name: Module.concat(mod, "Config")} + [{Phoenix.Config, args}] + end + + defp warmup_children(mod) do + [%{id: :warmup, start: {__MODULE__, :warmup, [mod]}}] + end + + defp server_children(mod, config, server?) do + cond do + server? -> + adapter = config[:adapter] + adapter.child_specs(mod, config) + + config[:http] || config[:https] -> + if System.get_env("RELEASE_NAME") do + Logger.info( + "Configuration :server was not enabled for #{inspect(mod)}, http/https services won't start" + ) + end + + [] + + true -> + [] + end + end + + defp watcher_children(_mod, conf, server?) do + watchers = conf[:watchers] || [] + + if server? || conf[:force_watchers] do + Enum.map(watchers, &{Phoenix.Endpoint.Watcher, &1}) + else + [] + end + end + + @doc """ + Checks if Endpoint's web server has been configured to start. + """ + def server?(otp_app, endpoint) when is_atom(otp_app) and is_atom(endpoint) do + server?(Application.get_env(otp_app, endpoint, [])) + end + + defp server?(conf) when is_list(conf) do + Keyword.get_lazy(conf, :server, fn -> + Application.get_env(:phoenix, :serve_endpoints, false) + end) + end + + defp defaults(otp_app, module) do + [ + otp_app: otp_app, + + # Compile-time config + code_reloader: false, + debug_errors: false, + render_errors: [view: render_errors(module), accepts: ~w(html), layout: false], + + # Runtime config + + # Even though Bandit is the default in apps generated via the installer, + # we continue to use Cowboy as the default if not explicitly specified for + # backwards compatibility. TODO: Change this to default to Bandit in 2.0 + adapter: Phoenix.Endpoint.Cowboy2Adapter, + cache_static_manifest: nil, + check_origin: true, + http: false, + https: false, + reloadable_apps: nil, + # TODO: Gettext had a compiler in earlier versions, + # but not since v0.20, so we can remove it here eventually. + reloadable_compilers: [:phoenix_live_view, :gettext, :elixir, :app], + secret_key_base: nil, + static_url: nil, + url: [host: "localhost", path: "/"], + cache_manifest_skip_vsn: false, + + # Supervisor config + watchers: [], + force_watchers: false + ] + end + + defp render_errors(module) do + module + |> Module.split() + |> Enum.at(0) + |> Module.concat("ErrorView") + end + + @doc """ + Callback that changes the configuration from the app callback. + """ + def config_change(endpoint, changed, removed) do + res = Phoenix.Config.config_change(endpoint, changed, removed) + warmup(endpoint) + res + end + + @doc """ + Returns a two item tuple with the first element containing the + static path of a file in the static root directory + and the second element containing the sha512 of that file (for SRI). + + When the file exists, it includes a timestamp. When it doesn't exist, + just the static path is returned. + + The result is wrapped in a `{:cache | :nocache, value}` tuple so + the `Phoenix.Config` layer knows how to cache it. + """ + @invalid_local_url_chars ["\\"] + + def static_lookup(_endpoint, "//" <> _ = path) do + raise_invalid_path(path) + end + + def static_lookup(_endpoint, "/" <> _ = path) do + if String.contains?(path, @invalid_local_url_chars) do + raise ArgumentError, "unsafe characters detected for path #{inspect(path)}" + else + {:nocache, {path, nil}} + end + end + + def static_lookup(_endpoint, path) when is_binary(path) do + raise_invalid_path(path) + end + + defp raise_invalid_path(path) do + raise ArgumentError, "expected a path starting with a single / but got #{inspect(path)}" + end + + # TODO: Remove the first function clause once {:system, env_var} tuples are removed + defp host_to_binary({:system, env_var}), do: host_to_binary(System.get_env(env_var)) + defp host_to_binary(host), do: host + + # TODO: Remove the first function clause once {:system, env_var} tuples are removed + defp port_to_integer({:system, env_var}), do: port_to_integer(System.get_env(env_var)) + defp port_to_integer(port) when is_binary(port), do: String.to_integer(port) + defp port_to_integer(port) when is_integer(port), do: port + + defp warn_on_deprecated_system_env_tuples(otp_app, mod, conf, key) do + deprecated_configs = Enum.filter(conf[key] || [], &match?({_, {:system, _}}, &1)) + + if Enum.any?(deprecated_configs) do + deprecated_config_lines = for {k, v} <- deprecated_configs, do: "#{k}: #{inspect(v)}" + + runtime_exs_config_lines = + for {key, {:system, env_var}} <- deprecated_configs, + do: ~s|#{key}: System.get_env("#{env_var}")| + + Logger.warning(""" + #{inspect(key)} configuration containing {:system, env_var} tuples for #{inspect(mod)} is deprecated. + + Configuration with deprecated values: + + config #{inspect(otp_app)}, #{inspect(mod)}, + #{key}: [ + #{deprecated_config_lines |> Enum.join(",\r\n ")} + ] + + Move this configuration into config/runtime.exs and replace the {:system, env_var} tuples + with System.get_env/1 function calls: + + config #{inspect(otp_app)}, #{inspect(mod)}, + #{key}: [ + #{runtime_exs_config_lines |> Enum.join(",\r\n ")} + ] + """) + end + end + + @doc """ + Invoked to warm up caches on start and config change. + """ + def warmup(endpoint) do + warmup_persistent(endpoint) + + try do + if manifest = cache_static_manifest(endpoint) do + warmup_static(endpoint, manifest) + end + rescue + e -> Logger.error("Could not warm up static assets: #{Exception.message(e)}") + end + + # To prevent a race condition where the socket listener is already started + # but the config not warmed up, we run warmup/1 as a child in the supervision + # tree. As we don't actually want to start a process, we return :ignore here. + :ignore + end + + defp warmup_persistent(endpoint) do + url_config = endpoint.config(:url) + static_url_config = endpoint.config(:static_url) || url_config + + struct_url = build_url(endpoint, url_config) + host = host_to_binary(url_config[:host] || "localhost") + path = empty_string_if_root(url_config[:path] || "/") + script_name = String.split(path, "/", trim: true) + + static_url = build_url(endpoint, static_url_config) |> String.Chars.URI.to_string() + static_path = empty_string_if_root(static_url_config[:path] || "/") + + :persistent_term.put({Phoenix.Endpoint, endpoint}, %{ + struct_url: struct_url, + url: String.Chars.URI.to_string(struct_url), + host: host, + path: path, + script_name: script_name, + static_path: static_path, + static_url: static_url + }) + end + + defp empty_string_if_root("/"), do: "" + defp empty_string_if_root(other), do: other + + defp build_url(endpoint, url) do + https = endpoint.config(:https) + http = endpoint.config(:http) + + {scheme, port} = + cond do + https -> {"https", https[:port] || 443} + http -> {"http", http[:port] || 80} + true -> {"http", 80} + end + + scheme = url[:scheme] || scheme + host = host_to_binary(url[:host] || "localhost") + port = port_to_integer(url[:port] || port) + + if host =~ ~r"[^:]:\d" do + Logger.warning( + "url: [host: ...] configuration value #{inspect(host)} for #{inspect(endpoint)} is invalid" + ) + end + + %URI{scheme: scheme, port: port, host: host} + end + + defp warmup_static(endpoint, %{"latest" => latest, "digests" => digests}) do + Phoenix.Config.put(endpoint, :cache_static_manifest_latest, latest) + with_vsn? = !endpoint.config(:cache_manifest_skip_vsn) + + Enum.each(latest, fn {key, _} -> + Phoenix.Config.cache(endpoint, {:__phoenix_static__, "/" <> key}, fn _ -> + {:cache, static_cache(digests, Map.get(latest, key), with_vsn?)} + end) + end) + end + + defp warmup_static(_endpoint, _manifest) do + raise ArgumentError, "expected cache manifest to include 'latest' and 'digests' keys" + end + + defp static_cache(digests, value, true) do + {"/#{value}?vsn=d", static_integrity(digests[value]["sha512"])} + end + + defp static_cache(digests, value, false) do + {"/#{value}", static_integrity(digests[value]["sha512"])} + end + + defp static_integrity(nil), do: nil + defp static_integrity(sha), do: "sha512-#{sha}" + + defp cache_static_manifest(endpoint) do + if inner = endpoint.config(:cache_static_manifest) do + {app, inner} = + case inner do + {_, _} = inner -> inner + inner when is_binary(inner) -> {endpoint.config(:otp_app), inner} + _ -> raise ArgumentError, ":cache_static_manifest must be a binary or a tuple" + end + + outer = Application.app_dir(app, inner) + + if File.exists?(outer) do + outer |> File.read!() |> Phoenix.json_library().decode!() + else + raise ArgumentError, + "could not find static manifest at #{inspect(outer)}. " <> + "Run \"mix phx.digest\" after building your static files " <> + "or remove the \"cache_static_manifest\" configuration from your config files." + end + else + nil + end + end + + defp log_access_url(endpoint, conf) do + if Keyword.get(conf, :log_access_url, true) && server?(conf) do + Logger.info("Access #{inspect(endpoint)} at #{endpoint.url()}") + end + end + + defp browser_open(endpoint, conf) do + if Application.get_env(:phoenix, :browser_open, false) && server?(conf) do + url = endpoint.url() + + {cmd, args} = + case :os.type() do + {:win32, _} -> {"cmd", ["/c", "start", url]} + {:unix, :darwin} -> {"open", [url]} + {:unix, _} -> {"xdg-open", [url]} + end + + System.cmd(cmd, args) + end + end +end diff --git a/deps/phoenix/lib/phoenix/endpoint/sync_code_reload_plug.ex b/deps/phoenix/lib/phoenix/endpoint/sync_code_reload_plug.ex new file mode 100644 index 0000000..642cdf3 --- /dev/null +++ b/deps/phoenix/lib/phoenix/endpoint/sync_code_reload_plug.ex @@ -0,0 +1,36 @@ +defmodule Phoenix.Endpoint.SyncCodeReloadPlug do + @moduledoc ~S""" + Wraps an Endpoint, attempting to sync with Phoenix's code reloader if + an exception is raised which indicates that we may be in the middle of a reload. + + We detect this by looking at the raised exception and seeing if it indicates + that the endpoint is not defined. This indicates that the code reloader may be + midway through a compile, and that we should attempt to retry the request + after the compile has completed. This is also why this must be implemented in + a separate module (one that is not recompiled in a typical code reload cycle), + since otherwise it may be the case that the endpoint itself is not defined. + """ + + @behaviour Plug + + def init({endpoint, opts}), do: {endpoint, endpoint.init(opts)} + + def call(conn, {endpoint, opts}), do: do_call(conn, endpoint, opts, true) + + defp do_call(conn, endpoint, opts, retry?) do + try do + endpoint.call(conn, opts) + rescue + exception in [UndefinedFunctionError] -> + case exception do + %UndefinedFunctionError{module: ^endpoint} when retry? -> + # Sync with the code reloader and retry once + Phoenix.CodeReloader.sync() + do_call(conn, endpoint, opts, false) + + exception -> + reraise(exception, __STACKTRACE__) + end + end + end +end diff --git a/deps/phoenix/lib/phoenix/endpoint/watcher.ex b/deps/phoenix/lib/phoenix/endpoint/watcher.ex new file mode 100644 index 0000000..7f6fe4c --- /dev/null +++ b/deps/phoenix/lib/phoenix/endpoint/watcher.ex @@ -0,0 +1,60 @@ +defmodule Phoenix.Endpoint.Watcher do + @moduledoc false + require Logger + + def child_spec(args) do + %{ + id: make_ref(), + start: {__MODULE__, :start_link, [args]}, + restart: :transient + } + end + + def start_link({cmd, args}) do + Task.start_link(__MODULE__, :watch, [to_string(cmd), args]) + end + + def watch(_cmd, {mod, fun, args}) do + try do + apply(mod, fun, args) + catch + kind, reason -> + # The function returned a non-zero exit code. + # Sleep for a couple seconds before exiting to + # ensure this doesn't hit the supervisor's + # max_restarts/max_seconds limit. + Process.sleep(2000) + :erlang.raise(kind, reason, __STACKTRACE__) + end + end + + def watch(cmd, args) when is_list(args) do + {args, opts} = Enum.split_while(args, &is_binary(&1)) + opts = Keyword.merge([into: IO.stream(:stdio, :line), stderr_to_stdout: true], opts) + + try do + System.cmd(cmd, args, opts) + catch + :error, :enoent -> + relative = Path.relative_to_cwd(cmd) + + Logger.error( + "Could not start watcher #{inspect(relative)} from #{inspect(cd(opts))}, executable does not exist" + ) + + exit(:shutdown) + else + {_, 0} -> + :ok + + {_, _} -> + # System.cmd returned a non-zero exit code + # sleep for a couple seconds before exiting to ensure this doesn't + # hit the supervisor's max_restarts / max_seconds limit + Process.sleep(2000) + exit(:watcher_command_error) + end + end + + defp cd(opts), do: opts[:cd] || File.cwd!() +end diff --git a/deps/phoenix/lib/phoenix/exceptions.ex b/deps/phoenix/lib/phoenix/exceptions.ex new file mode 100644 index 0000000..6e091f1 --- /dev/null +++ b/deps/phoenix/lib/phoenix/exceptions.ex @@ -0,0 +1,72 @@ +defmodule Phoenix.NotAcceptableError do + @moduledoc """ + Raised when one of the `accept*` headers is not accepted by the server. + + This exception is commonly raised by `Phoenix.Controller.accepts/2` + which negotiates the media types the server is able to serve with + the contents the client is able to render. + + If you are seeing this error, you should check if you are listing + the desired formats in your `:accepts` plug or if you are setting + the proper accept header in the client. The exception contains the + acceptable mime types in the `accepts` field. + """ + + defexception message: nil, accepts: [], plug_status: 406 +end + +defmodule Phoenix.MissingParamError do + @moduledoc """ + Raised when a key is expected to be present in the request parameters, + but is not. + + This exception is raised by `Phoenix.Controller.scrub_params/2` which: + + * Checks to see if the required_key is present (can be empty) + * Changes all empty parameters to nils ("" -> nil) + + If you are seeing this error, you should handle the error and surface it + to the end user. It means that there is a parameter missing from the request. + """ + + defexception [:message, plug_status: 400] + + def exception([key: value]) do + msg = "expected key #{inspect value} to be present in params, " <> + "please send the expected key or adapt your scrub_params/2 call" + %Phoenix.MissingParamError{message: msg} + end +end + +defmodule Phoenix.ActionClauseError do + exception_keys = + FunctionClauseError.__struct__() + |> Map.keys() + |> Kernel.--([:__exception__, :__struct__]) + + defexception exception_keys + + @impl true + def message(exception) do + exception + |> Map.put(:__struct__, FunctionClauseError) + |> FunctionClauseError.message() + end + + @impl true + def blame(exception, stacktrace) do + {exception, stacktrace} = + exception + |> Map.put(:__struct__, FunctionClauseError) + |> FunctionClauseError.blame(stacktrace) + + exception = Map.put(exception, :__struct__, __MODULE__) + + {exception, stacktrace} + end +end + +defimpl Plug.Exception, for: Phoenix.ActionClauseError do + def status(_), do: 400 + def actions(_), do: [] +end diff --git a/deps/phoenix/lib/phoenix/flash.ex b/deps/phoenix/lib/phoenix/flash.ex new file mode 100644 index 0000000..591c9a5 --- /dev/null +++ b/deps/phoenix/lib/phoenix/flash.ex @@ -0,0 +1,29 @@ +defmodule Phoenix.Flash do + @moduledoc """ + Provides shared flash access. + """ + + @doc """ + Gets the key from the map of flash data. + + ## Examples + + ```heex +
<%= Phoenix.Flash.get(@flash, :info) %>
+
<%= Phoenix.Flash.get(@flash, :error) %>
+ ``` + """ + def get(%mod{}, key) when is_atom(key) or is_binary(key) do + raise ArgumentError, """ + expected a map of flash data, but got a %#{inspect(mod)}{} + + Use the @flash assign set by the :fetch_flash plug instead: + + <%= Phoenix.Flash.get(@flash, :#{key}) %> + """ + end + + def get(%{} = flash, key) when is_atom(key) or is_binary(key) do + Map.get(flash, to_string(key)) + end +end diff --git a/deps/phoenix/lib/phoenix/logger.ex b/deps/phoenix/lib/phoenix/logger.ex new file mode 100644 index 0000000..f561b7c --- /dev/null +++ b/deps/phoenix/lib/phoenix/logger.ex @@ -0,0 +1,442 @@ +defmodule Phoenix.Logger do + @moduledoc """ + Instrumenter to handle logging of various instrumentation events. + + ## Instrumentation + + Phoenix uses the `:telemetry` library for instrumentation. The following events + are published by Phoenix with the following measurements and metadata: + + * `[:phoenix, :endpoint, :init]` - dispatched by `Phoenix.Endpoint` after your + Endpoint supervision tree successfully starts + * Measurement: `%{system_time: system_time}` + * Metadata: `%{pid: pid(), config: Keyword.t(), module: module(), otp_app: atom()}` + * Disable logging: This event is not logged + + * `[:phoenix, :endpoint, :start]` - dispatched by `Plug.Telemetry` in your endpoint, + usually after code reloading + * Measurement: `%{system_time: system_time}` + * Metadata: `%{conn: Plug.Conn.t, options: Keyword.t}` + * Options: `%{log: Logger.level | false}` + * Disable logging: In your endpoint `plug Plug.Telemetry, ..., log: Logger.level | false` + * Configure log level dynamically: `plug Plug.Telemetry, ..., log: {Mod, Fun, Args}` + + * `[:phoenix, :endpoint, :stop]` - dispatched by `Plug.Telemetry` in your + endpoint whenever the response is sent + * Measurement: `%{duration: native_time}` + * Metadata: `%{conn: Plug.Conn.t, options: Keyword.t}` + * Options: `%{log: Logger.level | false}` + * Disable logging: In your endpoint `plug Plug.Telemetry, ..., log: Logger.level | false` + * Configure log level dynamically: `plug Plug.Telemetry, ..., log: {Mod, Fun, Args}` + + * `[:phoenix, :router_dispatch, :start]` - dispatched by `Phoenix.Router` + before dispatching to a matched route + * Measurement: `%{system_time: System.system_time}` + * Metadata: `%{conn: Plug.Conn.t, route: binary, plug: module, plug_opts: term, path_params: map, pipe_through: [atom], log: Logger.level | false}` + * Disable logging: Pass `log: false` to the router macro, for example: `get("/page", PageController, :index, log: false)` + * Configure log level dynamically: `get("/page", PageController, :index, log: {Mod, Fun, Args})` + + * `[:phoenix, :router_dispatch, :exception]` - dispatched by `Phoenix.Router` + after exceptions on dispatching a route + * Measurement: `%{duration: native_time}` + * Metadata: `%{conn: Plug.Conn.t, kind: :throw | :error | :exit, reason: term(), stacktrace: Exception.stacktrace()}` + * Disable logging: This event is not logged + + * `[:phoenix, :router_dispatch, :stop]` - dispatched by `Phoenix.Router` + after successfully dispatching a matched route + * Measurement: `%{duration: native_time}` + * Metadata: `%{conn: Plug.Conn.t, route: binary, plug: module, plug_opts: term, path_params: map, pipe_through: [atom], log: Logger.level | false}` + * Disable logging: This event is not logged + + * `[:phoenix, :error_rendered]` - dispatched at the end of an error view being rendered + * Measurement: `%{duration: native_time}` + * Metadata: `%{conn: Plug.Conn.t, status: Plug.Conn.status, kind: Exception.kind, reason: term, stacktrace: Exception.stacktrace}` + * Disable logging: Set `render_errors: [log: false]` on your endpoint configuration + + * `[:phoenix, :socket_connected]` - dispatched by `Phoenix.Socket`, at the end of a socket connection + * Measurement: `%{duration: native_time}` + * Metadata: `%{endpoint: atom, transport: atom, params: term, connect_info: map, vsn: binary, user_socket: atom, result: :ok | :error, serializer: atom, log: Logger.level | false}` + * Disable logging: `use Phoenix.Socket, log: false` or `socket "/foo", MySocket, websocket: [log: false]` in your endpoint + + * `[:phoenix, :socket_drain]` - dispatched by `Phoenix.Socket` when using the `:drainer` option + * Measurement: `%{count: integer, total: integer, index: integer, rounds: integer}` + * Metadata: `%{endpoint: atom, socket: atom, intervasl: integer, log: Logger.level | false}` + * Disable logging: `use Phoenix.Socket, log: false` in your endpoint or pass `:log` option in the `:drainer` option + + * `[:phoenix, :channel_joined]` - dispatched at the end of a channel join + * Measurement: `%{duration: native_time}` + * Metadata: `%{result: :ok | :error, params: term, socket: Phoenix.Socket.t}` + * Disable logging: This event cannot be disabled + + * `[:phoenix, :channel_handled_in]` - dispatched at the end of a channel handle in + * Measurement: `%{duration: native_time}` + * Metadata: `%{event: binary, params: term, socket: Phoenix.Socket.t}` + * Disable logging: This event cannot be disabled + + To see an example of how Phoenix LiveDashboard uses these events to create + metrics, visit . + + ## Parameter filtering + + When logging parameters, Phoenix can filter out sensitive parameters + such as passwords and tokens. Parameters to be filtered can be + added via the `:filter_parameters` option: + + config :phoenix, :filter_parameters, ["password", "secret"] + + With the configuration above, Phoenix will filter any parameter + that contains the terms `password` or `secret`. The match is + case sensitive. + + Phoenix's default is `["password"]`. + + Phoenix can filter all parameters by default and selectively keep + parameters. This can be configured like so: + + config :phoenix, :filter_parameters, {:keep, ["id", "order"]} + + With the configuration above, Phoenix will filter all parameters, + except those that match exactly `id` or `order`. If a kept parameter + matches, all parameters nested under that one will also be kept. + + ## Dynamic log level + + In some cases you may wish to set the log level dynamically + on a per-request basis. To do so, set the `:log` option to + a tuple, `{Mod, Fun, Args}`. The `Plug.Conn.t()` for the + request will be prepended to the provided list of arguments. + + When invoked, your function must return a + [`Logger.level()`](`t:Logger.level()/0`) or `false` to + disable logging for the request. + + For example, in your Endpoint you might do something like this: + + # lib/my_app_web/endpoint.ex + plug Plug.Telemetry, + event_prefix: [:phoenix, :endpoint], + log: {__MODULE__, :log_level, []} + + # Disables logging for routes like /status/* + def log_level(%{path_info: ["status" | _]}), do: false + def log_level(_), do: :info + + ## Disabling + + When you are using custom logging system it is not always desirable to enable + `#{inspect(__MODULE__)}` by default. You can always disable this in general by: + + config :phoenix, :logger, false + """ + + require Logger + + @doc false + def install do + handlers = %{ + [:phoenix, :endpoint, :start] => &__MODULE__.phoenix_endpoint_start/4, + [:phoenix, :endpoint, :stop] => &__MODULE__.phoenix_endpoint_stop/4, + [:phoenix, :router_dispatch, :start] => &__MODULE__.phoenix_router_dispatch_start/4, + [:phoenix, :error_rendered] => &__MODULE__.phoenix_error_rendered/4, + [:phoenix, :socket_connected] => &__MODULE__.phoenix_socket_connected/4, + [:phoenix, :socket_drain] => &__MODULE__.phoenix_socket_drain/4, + [:phoenix, :channel_joined] => &__MODULE__.phoenix_channel_joined/4, + [:phoenix, :channel_handled_in] => &__MODULE__.phoenix_channel_handled_in/4 + } + + for {key, fun} <- handlers do + :telemetry.attach({__MODULE__, key}, key, fun, :ok) + end + end + + @doc false + def duration(duration) do + duration = System.convert_time_unit(duration, :native, :microsecond) + + if duration > 1000 do + [duration |> div(1000) |> Integer.to_string(), "ms"] + else + [Integer.to_string(duration), "µs"] + end + end + + @doc false + def compile_filter({:compiled, _key, _value} = filter), do: filter + def compile_filter({:discard, params}), do: compile_discard(params) + def compile_filter({:keep, params}), do: {:keep, params} + def compile_filter(params), do: compile_discard(params) + + defp compile_discard([]) do + {:compiled, [], []} + end + + defp compile_discard(params) when is_list(params) or is_binary(params) do + key_match = :binary.compile_pattern(params) + value_match = params |> List.wrap() |> Enum.map(&(&1 <> "=")) |> :binary.compile_pattern() + {:compiled, key_match, value_match} + end + + @doc false + def filter_values(values, filter \\ Application.get_env(:phoenix, :filter_parameters, [])) do + case compile_filter(filter) do + {:compiled, key_match, value_match} -> discard_values(values, key_match, value_match) + {:keep, match} -> keep_values(values, match) + end + end + + defp discard_values(%{__struct__: mod} = struct, _key_match, _value_match) when is_atom(mod) do + struct + end + + defp discard_values(%{} = map, key_match, value_match) do + Enum.into(map, %{}, fn {k, v} -> + cond do + is_binary(k) and String.contains?(k, key_match) -> + {k, "[FILTERED]"} + + is_binary(v) and String.contains?(v, value_match) -> + {k, "[FILTERED]"} + + true -> + {k, discard_values(v, key_match, value_match)} + end + end) + end + + defp discard_values([_ | _] = list, key_match, value_match) do + Enum.map(list, &discard_values(&1, key_match, value_match)) + end + + defp discard_values(other, _key_match, _value_match), do: other + + defp keep_values(%{__struct__: mod}, _match) when is_atom(mod), do: "[FILTERED]" + + defp keep_values(%{} = map, match) do + Enum.into(map, %{}, fn {k, v} -> + if is_binary(k) and k in match do + {k, v} + else + {k, keep_values(v, match)} + end + end) + end + + defp keep_values([_ | _] = list, match) do + Enum.map(list, &keep_values(&1, match)) + end + + defp keep_values(_other, _match), do: "[FILTERED]" + + defp log_level(nil, _conn), do: :info + defp log_level(level, _conn) when is_atom(level), do: level + + defp log_level({mod, fun, args}, conn) when is_atom(mod) and is_atom(fun) and is_list(args) do + apply(mod, fun, [conn | args]) + end + + ## Event: [:phoenix, :endpoint, *] + + @doc false + def phoenix_endpoint_start(_, _, %{conn: conn} = metadata, _) do + case log_level(metadata[:options][:log], conn) do + false -> + :ok + + level -> + Logger.log(level, fn -> + %{method: method, request_path: request_path} = conn + [method, ?\s, request_path] + end) + end + end + + @doc false + def phoenix_endpoint_stop(_, %{duration: duration}, %{conn: conn} = metadata, _) do + case log_level(metadata[:options][:log], conn) do + false -> + :ok + + level -> + Logger.log(level, fn -> + %{status: status, state: state} = conn + status = status_to_string(status) + [connection_type(state), ?\s, status, " in ", duration(duration)] + end) + end + end + + defp connection_type(:set_chunked), do: "Chunked" + defp connection_type(_), do: "Sent" + + ## Event: [:phoenix, :error_rendered] + + @doc false + def phoenix_error_rendered(_, _, %{log: false}, _), do: :ok + + def phoenix_error_rendered(_, _, %{log: level, status: status, kind: kind, reason: reason}, _) do + Logger.log(level, fn -> + [ + "Converted ", + Atom.to_string(kind), + ?\s, + error_banner(kind, reason), + " to ", + status_to_string(status), + " response" + ] + end) + end + + defp status_to_string(status) do + status |> Plug.Conn.Status.code() |> Integer.to_string() + end + + defp error_banner(:error, %type{}), do: inspect(type) + defp error_banner(_kind, reason), do: inspect(reason) + + ## Event: [:phoenix, :router_dispatch, :start] + + @doc false + def phoenix_router_dispatch_start(_, _, %{log: false}, _), do: :ok + + def phoenix_router_dispatch_start(_, _, metadata, _) do + %{log: level, conn: conn, plug: plug} = metadata + level = log_level(level, conn) + + Logger.log(level, fn -> + %{ + pipe_through: pipe_through, + plug_opts: plug_opts + } = metadata + + log_mfa = + case metadata[:mfa] do + {mod, fun, arity} -> mfa(mod, fun, arity) + _ when is_atom(plug_opts) -> mfa(plug, plug_opts, 2) + _ -> inspect(plug) + end + + [ + "Processing with ", + log_mfa, + ?\n, + " Parameters: ", + params(conn.params), + ?\n, + " Pipelines: ", + inspect(pipe_through) + ] + end) + end + + defp mfa(mod, fun, arity), + do: [inspect(mod), ?., Atom.to_string(fun), ?/, arity + ?0] + + defp params(%Plug.Conn.Unfetched{}), do: "[UNFETCHED]" + defp params(params), do: params |> filter_values() |> inspect() + + ## Event: [:phoenix, :socket_connected] + + @doc false + def phoenix_socket_connected(_, _, %{log: false}, _), do: :ok + + def phoenix_socket_connected(_, %{duration: duration}, %{log: level} = meta, _) do + Logger.log(level, fn -> + %{ + transport: transport, + params: params, + user_socket: user_socket, + result: result, + serializer: serializer + } = meta + + [ + connect_result(result), + inspect(user_socket), + " in ", + duration(duration), + "\n Transport: ", + inspect(transport), + "\n Serializer: ", + inspect(serializer), + "\n Parameters: ", + inspect(filter_values(params)) + ] + end) + end + + defp connect_result(:ok), do: "CONNECTED TO " + defp connect_result(:error), do: "REFUSED CONNECTION TO " + + @doc false + def phoenix_socket_drain(_, _, %{log: false}, _), do: :ok + + def phoenix_socket_drain( + _, + %{count: count, total: total, index: index, rounds: rounds}, + %{log: level} = meta, + _ + ) do + Logger.log(level, fn -> + %{socket: socket, interval: interval} = meta + + [ + "DRAINING #{count} of #{total} total connection(s) for socket ", + inspect(socket), + " every #{interval}ms - ", + "round #{index} of #{rounds}" + ] + end) + end + + ## Event: [:phoenix, :channel_joined] + + @doc false + def phoenix_channel_joined(_, %{duration: duration}, %{socket: socket} = metadata, _) do + channel_log(:log_join, socket, fn -> + %{result: result, params: params} = metadata + + [ + join_result(result), + socket.topic, + " in ", + duration(duration), + "\n Parameters: ", + inspect(filter_values(params)) + ] + end) + end + + defp join_result(:ok), do: "JOINED " + defp join_result(:error), do: "REFUSED JOIN " + + ## Event: [:phoenix, :channel_handle_in] + + @doc false + def phoenix_channel_handled_in(_, %{duration: duration}, %{socket: socket} = metadata, _) do + channel_log(:log_handle_in, socket, fn -> + %{event: event, params: params} = metadata + + [ + "HANDLED ", + event, + " INCOMING ON ", + socket.topic, + " (", + inspect(socket.channel), + ") in ", + duration(duration), + "\n Parameters: ", + inspect(filter_values(params)) + ] + end) + end + + defp channel_log(_log_option, %{topic: "phoenix" <> _}, _fun), do: :ok + + defp channel_log(log_option, %{private: private}, fun) do + if level = Map.get(private, log_option) do + Logger.log(level, fun) + end + end +end diff --git a/deps/phoenix/lib/phoenix/naming.ex b/deps/phoenix/lib/phoenix/naming.ex new file mode 100644 index 0000000..174ebb1 --- /dev/null +++ b/deps/phoenix/lib/phoenix/naming.ex @@ -0,0 +1,132 @@ +defmodule Phoenix.Naming do + @moduledoc """ + Conveniences for inflecting and working with names in Phoenix. + """ + + @doc """ + Extracts the resource name from an alias. + + ## Examples + + iex> Phoenix.Naming.resource_name(MyApp.User) + "user" + + iex> Phoenix.Naming.resource_name(MyApp.UserView, "View") + "user" + + """ + @spec resource_name(String.Chars.t, String.t) :: String.t + def resource_name(alias, suffix \\ "") do + alias + |> to_string() + |> Module.split() + |> List.last() + |> unsuffix(suffix) + |> underscore() + end + + @doc """ + Removes the given suffix from the name if it exists. + + ## Examples + + iex> Phoenix.Naming.unsuffix("MyApp.User", "View") + "MyApp.User" + + iex> Phoenix.Naming.unsuffix("MyApp.UserView", "View") + "MyApp.User" + + """ + @spec unsuffix(String.t, String.t) :: String.t + def unsuffix(value, suffix) do + string = to_string(value) + suffix_size = byte_size(suffix) + prefix_size = byte_size(string) - suffix_size + case string do + <> -> prefix + _ -> string + end + end + + @doc """ + Converts a string to underscore case. + + ## Examples + + iex> Phoenix.Naming.underscore("MyApp") + "my_app" + + In general, `underscore` can be thought of as the reverse of + `camelize`, however, in some cases formatting may be lost: + + Phoenix.Naming.underscore "SAPExample" #=> "sap_example" + Phoenix.Naming.camelize "sap_example" #=> "SapExample" + + """ + @spec underscore(String.t) :: String.t + + def underscore(value), do: Macro.underscore(value) + + defp to_lower_char(char) when char in ?A..?Z, do: char + 32 + defp to_lower_char(char), do: char + + @doc """ + Converts a string to camel case. + + Takes an optional `:lower` flag to return lowerCamelCase. + + ## Examples + + iex> Phoenix.Naming.camelize("my_app") + "MyApp" + + iex> Phoenix.Naming.camelize("my_app", :lower) + "myApp" + + In general, `camelize` can be thought of as the reverse of + `underscore`, however, in some cases formatting may be lost: + + Phoenix.Naming.underscore "SAPExample" #=> "sap_example" + Phoenix.Naming.camelize "sap_example" #=> "SapExample" + + """ + @spec camelize(String.t) :: String.t + def camelize(value), do: Macro.camelize(value) + + @spec camelize(String.t, :lower) :: String.t + def camelize("", :lower), do: "" + def camelize(<>, :lower) do + camelize(t, :lower) + end + def camelize(<> = value, :lower) do + <<_first, rest :: binary>> = camelize(value) + <> <> rest + end + + @doc """ + Converts an attribute/form field into its humanize version. + + ## Examples + + iex> Phoenix.Naming.humanize(:username) + "Username" + iex> Phoenix.Naming.humanize(:created_at) + "Created at" + iex> Phoenix.Naming.humanize("user_id") + "User" + + """ + @spec humanize(atom | String.t) :: String.t + def humanize(atom) when is_atom(atom), + do: humanize(Atom.to_string(atom)) + def humanize(bin) when is_binary(bin) do + bin = + if String.ends_with?(bin, "_id") do + binary_part(bin, 0, byte_size(bin) - 3) + else + bin + end + + bin |> String.replace("_", " ") |> String.capitalize + end +end diff --git a/deps/phoenix/lib/phoenix/param.ex b/deps/phoenix/lib/phoenix/param.ex new file mode 100644 index 0000000..34a5d70 --- /dev/null +++ b/deps/phoenix/lib/phoenix/param.ex @@ -0,0 +1,139 @@ +defprotocol Phoenix.Param do + @moduledoc ~S""" + A protocol that converts data structures into URL parameters. + + This protocol is used by `Phoenix.VerifiedRoutes` and other parts of the + Phoenix stack. For example, when you write: + + ~p"/user/#{@user}/edit" + + Phoenix knows how to extract the `:id` from `@user` thanks + to this protocol. + + (Deprecated URL helpers, e.g. `user_path(conn, :edit, @user)`, work the + same way.) + + By default, Phoenix implements this protocol for integers, binaries, atoms, + and structs. For structs, a key `:id` is assumed, but you may provide a + specific implementation. + + The term `nil` cannot be converted to param. + + ## Custom parameters + + In order to customize the parameter for any struct, + one can simply implement this protocol. For example for a `Date` struct: + + defimpl Phoenix.Param, for: Date do + def to_param(date) do + Date.to_string(date) + end + end + + However, for convenience, this protocol can also be + derivable. For example: + + defmodule User do + @derive Phoenix.Param + defstruct [:id, :username] + end + + By default, the derived implementation will also use + the `:id` key. In case the user does not contain an + `:id` key, the key can be specified with an option: + + defmodule User do + @derive {Phoenix.Param, key: :username} + defstruct [:username] + end + + will automatically use `:username` in URLs. + + When using Ecto, you must call `@derive` before + your `schema` call: + + @derive {Phoenix.Param, key: :username} + schema "users" do + + """ + + @fallback_to_any true + + @spec to_param(term) :: String.t() + def to_param(term) +end + +defimpl Phoenix.Param, for: Integer do + def to_param(int), do: Integer.to_string(int) +end + +defimpl Phoenix.Param, for: Float do + def to_param(float), do: Float.to_string(float) +end + +defimpl Phoenix.Param, for: BitString do + def to_param(bin) when is_binary(bin), do: bin +end + +defimpl Phoenix.Param, for: Atom do + def to_param(nil) do + raise ArgumentError, "cannot convert nil to param" + end + + def to_param(atom) do + Atom.to_string(atom) + end +end + +defimpl Phoenix.Param, for: Map do + def to_param(map) do + raise ArgumentError, + "maps cannot be converted to_param. A struct was expected, got: #{inspect(map)}" + end +end + +defimpl Phoenix.Param, for: Any do + defmacro __deriving__(module, struct, options) do + key = Keyword.get(options, :key, :id) + + unless Map.has_key?(struct, key) do + raise ArgumentError, + "cannot derive Phoenix.Param for struct #{inspect(module)} " <> + "because it does not have key #{inspect(key)}. Please pass " <> + "the :key option when deriving" + end + + quote do + defimpl Phoenix.Param, for: unquote(module) do + def to_param(%{unquote(key) => nil}) do + raise ArgumentError, + "cannot convert #{inspect(unquote(module))} to param, " <> + "key #{inspect(unquote(key))} contains a nil value" + end + + def to_param(%{unquote(key) => key}) when is_integer(key), do: Integer.to_string(key) + def to_param(%{unquote(key) => key}) when is_binary(key), do: key + def to_param(%{unquote(key) => key}), do: Phoenix.Param.to_param(key) + end + end + end + + def to_param(%{id: nil}) do + raise ArgumentError, "cannot convert struct to param, key :id contains a nil value" + end + + def to_param(%{id: id}) when is_integer(id), do: Integer.to_string(id) + def to_param(%{id: id}) when is_binary(id), do: id + def to_param(%{id: id}), do: Phoenix.Param.to_param(id) + + def to_param(map) when is_map(map) do + raise ArgumentError, + "structs expect an :id key when converting to_param or a custom implementation " <> + "of the Phoenix.Param protocol (read Phoenix.Param docs for more information), " <> + "got: #{inspect(map)}" + end + + def to_param(data) do + raise Protocol.UndefinedError, protocol: @protocol, value: data + end +end diff --git a/deps/phoenix/lib/phoenix/presence.ex b/deps/phoenix/lib/phoenix/presence.ex new file mode 100644 index 0000000..a936ece --- /dev/null +++ b/deps/phoenix/lib/phoenix/presence.ex @@ -0,0 +1,730 @@ +defmodule Phoenix.Presence do + @moduledoc """ + Provides Presence tracking to processes and channels. + + This behaviour provides presence features such as fetching + presences for a given topic, as well as handling diffs of + join and leave events as they occur in real-time. Using this + module defines a supervisor and a module that implements the + `Phoenix.Tracker` behaviour that uses `Phoenix.PubSub` to + broadcast presence updates. + + In case you want to use only a subset of the functionality + provided by `Phoenix.Presence`, such as tracking processes + but without broadcasting updates, we recommend that you look + at the `Phoenix.Tracker` functionality from the `phoenix_pubsub` + project. + + ## Example Usage + + Start by defining a presence module within your application + which uses `Phoenix.Presence` and provide the `:otp_app` which + holds your configuration, as well as the `:pubsub_server`. + + defmodule MyAppWeb.Presence do + use Phoenix.Presence, + otp_app: :my_app, + pubsub_server: MyApp.PubSub + end + + The `:pubsub_server` must point to an existing pubsub server + running in your application, which is included by default as + `MyApp.PubSub` for new applications. + + Next, add the new supervisor to your supervision tree in + `lib/my_app/application.ex`. It must be after the PubSub child + and before the endpoint: + + children = [ + ... + {Phoenix.PubSub, name: MyApp.PubSub}, + MyAppWeb.Presence, + MyAppWeb.Endpoint + ] + + Once added, presences can be tracked in your channel after joining: + + defmodule MyAppWeb.MyChannel do + use MyAppWeb, :channel + alias MyAppWeb.Presence + + def join("some:topic", _params, socket) do + send(self(), :after_join) + {:ok, assign(socket, :user_id, ...)} + end + + def handle_info(:after_join, socket) do + {:ok, _} = Presence.track(socket, socket.assigns.user_id, %{ + online_at: inspect(System.system_time(:second)) + }) + + push(socket, "presence_state", Presence.list(socket)) + {:noreply, socket} + end + end + + In the example above, `Presence.track` is used to register this channel's process as a + presence for the socket's user ID, with a map of metadata. + Next, the current presence information for + the socket's topic is pushed to the client as a `"presence_state"` event. + + Finally, a diff of presence join and leave events will be sent to the + client as they happen in real-time with the "presence_diff" event. + The diff structure will be a map of `:joins` and `:leaves` of the form: + + %{ + joins: %{"123" => %{metas: [%{status: "away", phx_ref: ...}]}}, + leaves: %{"456" => %{metas: [%{status: "online", phx_ref: ...}]}} + }, + + See `c:list/1` for more information on the presence data structure. + + ## Custom dispatcher + + It's possible to customize the dispatcher module used to broadcast. + By default, `Phoenix.Channel.Server` is used, which is the same dispatcher + used by channels. To customize the dispatcher, pass the `:dispatcher` option + when using `Phoenix.Presence`: + + use Phoenix.Presence, + otp_app: :my_app, + pubsub_server: MyApp.PubSub, + dispatcher: MyApp.CustomDispatcher + + See `m:Phoenix.PubSub#module-custom-dispatching` for more information on + custom dispatchers. + + ## Fetching Presence Information + + Presence metadata should be minimized and used to store small, + ephemeral state, such as a user's "online" or "away" status. + More detailed information, such as user details that need to be fetched + from the database, can be achieved by overriding the `c:fetch/2` function. + + The `c:fetch/2` callback is triggered when using `c:list/1` and on + every update, and it serves as a mechanism to fetch presence information + a single time, before broadcasting the information to all channel subscribers. + This prevents N query problems and gives you a single place to group + isolated data fetching to extend presence metadata. + + The function must return a map of data matching the outlined Presence + data structure, including the `:metas` key, but can extend the map of + information to include any additional information. For example: + + def fetch(_topic, presences) do + users = presences |> Map.keys() |> Accounts.get_users_map() + + for {key, %{metas: metas}} <- presences, into: %{} do + {key, %{metas: metas, user: users[String.to_integer(key)]}} + end + end + + Where `Account.get_users_map/1` could be implemented like: + + def get_users_map(ids) do + query = + from u in User, + where: u.id in ^ids, + select: {u.id, u} + + query |> Repo.all() |> Enum.into(%{}) + end + + The `fetch/2` function above fetches all users from the database who + have registered presences for the given topic. The presences + information is then extended with a `:user` key of the user's + information, while maintaining the required `:metas` field from the + original presence data. + + ## Using Elixir as a Presence Client + + Presence is great for external clients, such as JavaScript applications, but + it can also be used from an Elixir client process to keep track of presence + changes as they happen on the server. This can be accomplished by implementing + the optional [`init/1`](`c:init/1`) and [`handle_metas/4`](`c:handle_metas/4`) + callbacks on your presence module. For example, the following callback + receives presence metadata changes, and broadcasts to other Elixir processes + about users joining and leaving: + + defmodule MyApp.Presence do + use Phoenix.Presence, + otp_app: :my_app, + pubsub_server: MyApp.PubSub + + def init(_opts) do + {:ok, %{}} # user-land state + end + + def handle_metas(topic, %{joins: joins, leaves: leaves}, presences, state) do + # fetch existing presence information for the joined users and broadcast the + # event to all subscribers + for {user_id, presence} <- joins do + user_data = %{user: presence.user, metas: Map.fetch!(presences, user_id)} + msg = {MyApp.PresenceClient, {:join, user_data}} + Phoenix.PubSub.local_broadcast(MyApp.PubSub, topic, msg) + end + + # fetch existing presence information for the left users and broadcast the + # event to all subscribers + for {user_id, presence} <- leaves do + metas = + case Map.fetch(presences, user_id) do + {:ok, presence_metas} -> presence_metas + :error -> [] + end + + user_data = %{user: presence.user, metas: metas} + msg = {MyApp.PresenceClient, {:leave, user_data}} + Phoenix.PubSub.local_broadcast(MyApp.PubSub, topic, msg) + end + + {:ok, state} + end + end + + The `handle_metas/4` callback receives the topic, presence diff, current presences + for the topic with their metadata, and any user-land state accumulated from init and + subsequent `handle_metas/4` calls. In our example implementation, we walk the `:joins` and + `:leaves` in the diff, and populate a complete presence from our known presence information. + Then we broadcast to the local node subscribers about user joins and leaves. + + ## Testing with Presence + + Every time the `fetch` callback is invoked, it is done from a separate + process. Given those processes run asynchronously, it is often necessary + to guarantee they have been shutdown at the end of every test. This can + be done by using ExUnit's `on_exit` hook plus `fetchers_pids` function: + + on_exit(fn -> + for pid <- MyAppWeb.Presence.fetchers_pids() do + ref = Process.monitor(pid) + assert_receive {:DOWN, ^ref, _, _, _}, 1000 + end + end) + + """ + + @type presences :: %{String.t() => %{metas: [map()]}} + @type presence :: %{key: String.t(), meta: map()} + @type topic :: String.t() + + @doc """ + Track a channel's process as a presence. + + Tracked presences are grouped by `key`, cast as a string. For example, to + group each user's channels together, use user IDs as keys. Each presence can + be associated with a map of metadata to store small, ephemeral state, such as + a user's online status. To store detailed information, see `c:fetch/2`. + + ## Example + + alias MyApp.Presence + def handle_info(:after_join, socket) do + {:ok, _} = Presence.track(socket, socket.assigns.user_id, %{ + online_at: inspect(System.system_time(:second)) + }) + {:noreply, socket} + end + + """ + @callback track(socket :: Phoenix.Socket.t(), key :: String.t(), meta :: map()) :: + {:ok, ref :: binary()} + | {:error, reason :: term()} + + @doc """ + Track an arbitrary process as a presence. + + Same with `track/3`, except track any process by `topic` and `key`. + """ + @callback track(pid, topic, key :: String.t(), meta :: map()) :: + {:ok, ref :: binary()} + | {:error, reason :: term()} + + @doc """ + Stop tracking a channel's process. + """ + @callback untrack(socket :: Phoenix.Socket.t(), key :: String.t()) :: :ok + + @doc """ + Stop tracking a process. + """ + @callback untrack(pid, topic, key :: String.t()) :: :ok + + @doc """ + Update a channel presence's metadata. + + Replace a presence's metadata by passing a new map or a function that takes + the current map and returns a new one. + """ + @callback update( + socket :: Phoenix.Socket.t(), + key :: String.t(), + meta :: map() | (map() -> map()) + ) :: + {:ok, ref :: binary()} + | {:error, reason :: term()} + + @doc """ + Update a process presence's metadata. + + Same as `update/3`, but with an arbitrary process. + """ + @callback update(pid, topic, key :: String.t(), meta :: map() | (map() -> map())) :: + {:ok, ref :: binary()} + | {:error, reason :: term()} + + @doc """ + Returns presences for a socket/topic. + + ## Presence data structure + + The presence information is returned as a map with presences grouped + by key, cast as a string, and accumulated metadata, with the following form: + + %{key => %{metas: [%{phx_ref: ..., ...}, ...]}} + + For example, imagine a user with id `123` online from two + different devices, as well as a user with id `456` online from + just one device. The following presence information might be returned: + + %{"123" => %{metas: [%{status: "away", phx_ref: ...}, + %{status: "online", phx_ref: ...}]}, + "456" => %{metas: [%{status: "online", phx_ref: ...}]}} + + The keys of the map will usually point to a resource ID. The value + will contain a map with a `:metas` key containing a list of metadata + for each resource. Additionally, every metadata entry will contain a + `:phx_ref` key which can be used to uniquely identify metadata for a + given key. In the event that the metadata was previously updated, + a `:phx_ref_prev` key will be present containing the previous + `:phx_ref` value. + """ + @callback list(socket_or_topic :: Phoenix.Socket.t() | topic) :: presences + + @doc """ + Returns the map of presence metadata for a socket/topic-key pair. + + ## Examples + + Uses the same data format as each presence in `c:list/1`, but only + returns metadata for the presences under a topic and key pair. For example, + a user with key `"user1"`, connected to the same chat room `"room:1"` from two + devices, could return: + + iex> MyPresence.get_by_key("room:1", "user1") + [%{name: "User 1", metas: [%{device: "Desktop"}, %{device: "Mobile"}]}] + + Like `c:list/1`, the presence metadata is passed to the `fetch` + callback of your presence module to fetch any additional information. + """ + @callback get_by_key(Phoenix.Socket.t() | topic, key :: String.t()) :: [presence] + + @doc """ + Extend presence information with additional data. + + When `c:list/1` is used to list all presences of the given `topic`, this + callback is triggered once to modify the result before it is broadcasted to + all channel subscribers. This avoids N query problems and provides a single + place to extend presence metadata. You must return a map of data matching the + original result, including the `:metas` key, but can extend the map to include + any additional information. + + The default implementation simply passes `presences` through unchanged. + + ## Example + + def fetch(_topic, presences) do + query = + from u in User, + where: u.id in ^Map.keys(presences), + select: {u.id, u} + + users = query |> Repo.all() |> Enum.into(%{}) + for {key, %{metas: metas}} <- presences, into: %{} do + {key, %{metas: metas, user: users[key]}} + end + end + + """ + @callback fetch(topic, presences) :: presences + + @doc """ + Initializes the presence client state. + + Invoked when your presence module starts, allows dynamically + providing initial state for handling presence metadata. + """ + @callback init(state :: term) :: {:ok, new_state :: term} + + @doc """ + Receives presence metadata changes. + """ + @callback handle_metas(topic :: String.t(), diff :: map(), presences :: map(), state :: term) :: + {:ok, term} + + @optional_callbacks init: 1, handle_metas: 4 + + defmacro __using__(opts) do + quote location: :keep, bind_quoted: [opts: opts] do + @behaviour Phoenix.Presence + @opts opts + @task_supervisor Module.concat(__MODULE__, "TaskSupervisor") + + _ = opts[:otp_app] || raise "use Phoenix.Presence expects :otp_app to be given" + + # User defined + def fetch(_topic, presences), do: presences + defoverridable fetch: 2 + + # Private + + def child_spec(opts) do + opts = Keyword.merge(@opts, opts) + + %{ + id: __MODULE__, + start: {Phoenix.Presence, :start_link, [__MODULE__, @task_supervisor, opts]}, + type: :supervisor + } + end + + # API + + def track(%Phoenix.Socket{} = socket, key, meta) do + track(socket.channel_pid, socket.topic, key, meta) + end + + def track(pid, topic, key, meta) do + Phoenix.Tracker.track(__MODULE__, pid, topic, key, meta) + end + + def untrack(%Phoenix.Socket{} = socket, key) do + untrack(socket.channel_pid, socket.topic, key) + end + + def untrack(pid, topic, key) do + Phoenix.Tracker.untrack(__MODULE__, pid, topic, key) + end + + def update(%Phoenix.Socket{} = socket, key, meta) do + update(socket.channel_pid, socket.topic, key, meta) + end + + def update(pid, topic, key, meta) do + Phoenix.Tracker.update(__MODULE__, pid, topic, key, meta) + end + + def list(%Phoenix.Socket{topic: topic}), do: list(topic) + def list(topic), do: Phoenix.Presence.list(__MODULE__, topic) + + def get_by_key(%Phoenix.Socket{topic: topic}, key), do: get_by_key(topic, key) + def get_by_key(topic, key), do: Phoenix.Presence.get_by_key(__MODULE__, topic, key) + + def fetchers_pids(), do: Task.Supervisor.children(@task_supervisor) + end + end + + defmodule Tracker do + @moduledoc false + use Phoenix.Tracker + + def start_link({module, task_supervisor, opts}) do + pubsub_server = + opts[:pubsub_server] || raise "use Phoenix.Presence expects :pubsub_server to be given" + + dispatcher = opts[:dispatcher] || Phoenix.Channel.Server + + Phoenix.Tracker.start_link( + __MODULE__, + {module, task_supervisor, pubsub_server, dispatcher}, + opts + ) + end + + def init(state), do: Phoenix.Presence.init(state) + + def handle_diff(diff, state), do: Phoenix.Presence.handle_diff(diff, state) + + def handle_info(msg, state), + do: Phoenix.Presence.handle_info(msg, state) + end + + @doc false + def start_link(module, task_supervisor, opts) do + otp_app = opts[:otp_app] + + opts = + opts + |> Keyword.merge(Application.get_env(otp_app, module, [])) + |> Keyword.put(:name, module) + + children = [ + {Task.Supervisor, name: task_supervisor}, + {Tracker, {module, task_supervisor, opts}} + ] + + sup_opts = [ + strategy: :rest_for_one, + name: Module.concat(module, "Supervisor") + ] + + Supervisor.start_link(children, sup_opts) + end + + @doc false + def init({module, task_supervisor, pubsub_server, dispatcher}) do + state = %{ + module: module, + task_supervisor: task_supervisor, + pubsub_server: pubsub_server, + topics: %{}, + tasks: :queue.new(), + current_task: nil, + client_state: nil, + dispatcher: dispatcher + } + + client_state = + if function_exported?(module, :handle_metas, 4) do + unless function_exported?(module, :init, 1) do + raise ArgumentError, """ + missing #{inspect(module)}.init/1 callback for client state + + When you implement the handle_metas/4 callback, you must also + implement init/1. For example, add the following to + #{inspect(module)}: + + def init(_opts), do: {:ok, %{}} + + """ + end + + case module.init(%{}) do + {:ok, client_state} -> + client_state + + other -> + raise ArgumentError, """ + expected #{inspect(module)}.init/1 to return {:ok, state}, got: #{inspect(other)} + """ + end + end + + {:ok, %{state | client_state: client_state}} + end + + @doc false + def handle_diff(diff, state) do + {:ok, async_merge(state, diff)} + end + + @doc false + def handle_info({task_ref, {:phoenix, ref, computed_diffs}}, state) do + %{current_task: current_task} = state + {^ref, %Task{ref: ^task_ref} = task} = current_task + Task.shutdown(task) + + Enum.each(computed_diffs, fn {topic, presence_diff} -> + broadcast = %Phoenix.Socket.Broadcast{ + topic: topic, + event: "presence_diff", + payload: presence_diff + } + + Phoenix.PubSub.local_broadcast(state.pubsub_server, topic, broadcast, state.dispatcher) + end) + + new_state = + if function_exported?(state.module, :handle_metas, 4) do + do_handle_metas(state, computed_diffs) + else + state + end + + {:noreply, next_task(new_state)} + end + + @doc false + def list(module, topic) do + grouped = + module + |> Phoenix.Tracker.list(topic) + |> group() + + module.fetch(topic, grouped) + end + + @doc false + def get_by_key(module, topic, key) do + string_key = to_string(key) + + case Phoenix.Tracker.get_by_key(module, topic, key) do + [] -> + [] + + [_ | _] = pid_metas -> + metas = Enum.map(pid_metas, fn {_pid, meta} -> meta end) + %{^string_key => fetched_metas} = module.fetch(topic, %{string_key => %{metas: metas}}) + fetched_metas + end + end + + @doc false + def group(presences) do + presences + |> Enum.reverse() + |> Enum.reduce(%{}, fn {key, meta}, acc -> + Map.update(acc, to_string(key), %{metas: [meta]}, fn %{metas: metas} -> + %{metas: [meta | metas]} + end) + end) + end + + defp send_continue(%Task{} = task, ref), do: send(task.pid, {ref, :continue}) + + defp next_task(state) do + case :queue.out(state.tasks) do + {{:value, {ref, %Task{} = next}}, remaining_tasks} -> + send_continue(next, ref) + %{state | current_task: {ref, next}, tasks: remaining_tasks} + + {:empty, _} -> + %{state | current_task: nil, tasks: :queue.new()} + end + end + + defp do_handle_metas(state, computed_diffs) do + Enum.reduce(computed_diffs, state, fn {topic, presence_diff}, acc -> + updated_topics = merge_diff(acc.topics, topic, presence_diff) + + topic_presences = + case Map.fetch(updated_topics, topic) do + {:ok, presences} -> presences + :error -> %{} + end + + case acc.module.handle_metas(topic, presence_diff, topic_presences, acc.client_state) do + {:ok, updated_client_state} -> + %{acc | topics: updated_topics, client_state: updated_client_state} + + other -> + raise ArgumentError, """ + expected #{inspect(acc.module)}.handle_metas/4 to return {:ok, new_state}. + + got: #{inspect(other)} + """ + end + end) + end + + defp async_merge(state, diff) do + %{module: module} = state + ref = make_ref() + + new_task = + Task.Supervisor.async(state.task_supervisor, fn -> + computed_diffs = + Enum.map(diff, fn {topic, {joins, leaves}} -> + joins = module.fetch(topic, Phoenix.Presence.group(joins)) + leaves = module.fetch(topic, Phoenix.Presence.group(leaves)) + {topic, %{joins: joins, leaves: leaves}} + end) + + receive do + {^ref, :continue} -> {:phoenix, ref, computed_diffs} + end + end) + + if state.current_task do + %{state | tasks: :queue.in({ref, new_task}, state.tasks)} + else + send_continue(new_task, ref) + %{state | current_task: {ref, new_task}} + end + end + + defp merge_diff(topics, topic, %{leaves: leaves, joins: joins} = _diff) do + # add new topic if needed + updated_topics = + if Map.has_key?(topics, topic) do + topics + else + add_new_topic(topics, topic) + end + + # merge diff into topics + {updated_topics, _topic} = Enum.reduce(joins, {updated_topics, topic}, &handle_join/2) + {updated_topics, _topic} = Enum.reduce(leaves, {updated_topics, topic}, &handle_leave/2) + + # if no more presences for given topic, remove topic + if topic_presences_count(updated_topics, topic) == 0 do + remove_topic(updated_topics, topic) + else + updated_topics + end + end + + defp handle_join({joined_key, presence}, {topics, topic}) do + joined_metas = Map.get(presence, :metas, []) + {add_new_presence_or_metas(topics, topic, joined_key, joined_metas), topic} + end + + defp handle_leave({left_key, presence}, {topics, topic}) do + {remove_presence_or_metas(topics, topic, left_key, presence), topic} + end + + defp add_new_presence_or_metas( + topics, + topic, + key, + new_metas + ) do + topic_presences = topics[topic] + + updated_topic = + case Map.fetch(topic_presences, key) do + # existing presence, add new metas + {:ok, existing_metas} -> + remaining_metas = new_metas -- existing_metas + updated_metas = existing_metas ++ remaining_metas + Map.put(topic_presences, key, updated_metas) + + # there are no presences for that key + :error -> + Map.put_new(topic_presences, key, new_metas) + end + + Map.put(topics, topic, updated_topic) + end + + defp remove_presence_or_metas( + topics, + topic, + key, + deleted_metas + ) do + topic_presences = topics[topic] + presence_metas = Map.get(topic_presences, key, []) + remaining_metas = presence_metas -- Map.get(deleted_metas, :metas, []) + + updated_topic = + case remaining_metas do + [] -> Map.delete(topic_presences, key) + _ -> Map.put(topic_presences, key, remaining_metas) + end + + Map.put(topics, topic, updated_topic) + end + + defp add_new_topic(topics, topic) do + Map.put_new(topics, topic, %{}) + end + + defp remove_topic(topics, topic) do + Map.delete(topics, topic) + end + + defp topic_presences_count(topics, topic) do + map_size(topics[topic]) + end +end diff --git a/deps/phoenix/lib/phoenix/router.ex b/deps/phoenix/lib/phoenix/router.ex new file mode 100644 index 0000000..763d45d --- /dev/null +++ b/deps/phoenix/lib/phoenix/router.ex @@ -0,0 +1,1334 @@ +defmodule Phoenix.Router do + defmodule NoRouteError do + @moduledoc """ + Exception raised when no route is found. + """ + defexception plug_status: 404, message: "no route found", conn: nil, router: nil + + def exception(opts) do + conn = Keyword.fetch!(opts, :conn) + router = Keyword.fetch!(opts, :router) + path = "/" <> Enum.join(conn.path_info, "/") + + %NoRouteError{ + message: "no route found for #{conn.method} #{path} (#{inspect(router)})", + conn: conn, + router: router + } + end + end + + defmodule MalformedURIError do + @moduledoc """ + Exception raised when the URI is malformed on matching. + """ + defexception [:message, plug_status: 400] + end + + @moduledoc """ + Defines a Phoenix router. + + The router provides a set of macros for generating routes + that dispatch to specific controllers and actions. Those + macros are named after HTTP verbs. For example: + + defmodule MyAppWeb.Router do + use Phoenix.Router + + get "/pages/:page", PageController, :show + end + + The `get/3` macro above accepts a request to `/pages/hello` and dispatches + it to `PageController`'s `show` action with `%{"page" => "hello"}` in + `params`. + + Phoenix's router is extremely efficient, as it relies on Elixir + pattern matching for matching routes and serving requests. + + ## Routing + + `get/3`, `post/3`, `put/3`, and other macros named after HTTP verbs are used + to create routes. + + The route: + + get "/pages", PageController, :index + + matches a `GET` request to `/pages` and dispatches it to the `index` action in + `PageController`. + + get "/pages/:page", PageController, :show + + matches `/pages/hello` and dispatches to the `show` action with + `%{"page" => "hello"}` in `params`. + + defmodule PageController do + def show(conn, params) do + # %{"page" => "hello"} == params + end + end + + Partial and multiple segments can be matched. For example: + + get "/api/v:version/pages/:id", PageController, :show + + matches `/api/v1/pages/2` and puts `%{"version" => "1", "id" => "2"}` in + `params`. Only the trailing part of a segment can be captured. + + Routes are matched from top to bottom. The second route here: + + get "/pages/:page", PageController, :show + get "/pages/hello", PageController, :hello + + will never match `/pages/hello` because `/pages/:page` matches that first. + + Routes can use glob-like patterns to match trailing segments. + + get "/pages/*page", PageController, :show + + matches `/pages/hello/world` and puts the globbed segments in `params["page"]`. + + GET /pages/hello/world + %{"page" => ["hello", "world"]} = params + + Globs cannot have prefixes nor suffixes, but can be mixed with variables: + + get "/pages/he:page/*rest", PageController, :show + + matches + + GET /pages/hello + %{"page" => "llo", "rest" => []} = params + + GET /pages/hey/there/world + %{"page" => "y", "rest" => ["there" "world"]} = params + + > #### Why the macros? {: .info} + > + > Phoenix does its best to keep the usage of macros low. You may have noticed, + > however, that the `Phoenix.Router` relies heavily on macros. Why is that? + > + > We use `get`, `post`, `put`, and `delete` to define your routes. We use macros + > for two purposes: + > + > * They define the routing engine, used on every request, to choose which + > controller to dispatch the request to. Thanks to macros, Phoenix compiles + > all of your routes to a single case-statement with pattern matching rules, + > which is heavily optimized by the Erlang VM + > + > * For each route you define, we also define metadata to implement `Phoenix.VerifiedRoutes`. + > As we will soon learn, verified routes allows to us to reference any route + > as if it is a plain looking string, except it is verified by the compiler + > to be valid (making it much harder to ship broken links, forms, mails, etc + > to production) + > + > In other words, the router relies on macros to build applications that are + > faster and safer. Also remember that macros in Elixir are compile-time only, + > which gives plenty of stability after the code is compiled. Phoenix also provides + > introspection for all defined routes via `mix phx.routes`. + + ## Generating routes + + For generating routes inside your application, see the `Phoenix.VerifiedRoutes` + documentation for `~p` based route generation which is the preferred way to + generate route paths and URLs with compile-time verification. + + Phoenix also supports generating function helpers, which was the default + mechanism in Phoenix v1.6 and earlier. We will explore it next. + + ### Helpers (deprecated) + + Phoenix generates a module `Helpers` inside your router by default, which contains + named helpers to help developers generate and keep their routes up to date. + Helpers can be disabled by passing `helpers: false` to `use Phoenix.Router`. + + Helpers are automatically generated based on the controller name. + For example, the route: + + get "/pages/:page", PageController, :show + + will generate the following named helper: + + MyAppWeb.Router.Helpers.page_path(conn_or_endpoint, :show, "hello") + "/pages/hello" + + MyAppWeb.Router.Helpers.page_path(conn_or_endpoint, :show, "hello", some: "query") + "/pages/hello?some=query" + + MyAppWeb.Router.Helpers.page_url(conn_or_endpoint, :show, "hello") + "http://example.com/pages/hello" + + MyAppWeb.Router.Helpers.page_url(conn_or_endpoint, :show, "hello", some: "query") + "http://example.com/pages/hello?some=query" + + If the route contains glob-like patterns, parameters for those have to be given as + list: + + MyAppWeb.Router.Helpers.page_path(conn_or_endpoint, :show, ["hello", "world"]) + "/pages/hello/world" + + The URL generated in the named URL helpers is based on the configuration for + `:url`, `:http` and `:https`. However, if for some reason you need to manually + control the URL generation, the url helpers also allow you to pass in a `URI` + struct: + + uri = %URI{scheme: "https", host: "other.example.com"} + MyAppWeb.Router.Helpers.page_url(uri, :show, "hello") + "https://other.example.com/pages/hello" + + The named helper can also be customized with the `:as` option. Given + the route: + + get "/pages/:page", PageController, :show, as: :special_page + + the named helper will be: + + MyAppWeb.Router.Helpers.special_page_path(conn, :show, "hello") + "/pages/hello" + + ## Scopes and Resources + + It is very common in Phoenix applications to namespace all of your + routes under the application scope: + + scope "/", MyAppWeb do + get "/pages/:id", PageController, :show + end + + The route above will dispatch to `MyAppWeb.PageController`. This syntax + is convenient for developers, since we don't have to repeat `MyAppWeb.` + prefix on all routes + + Like all paths, you can define dynamic segments that will be applied as + parameters in the controller: + + scope "/api/:version", MyAppWeb do + get "/pages/:id", PageController, :show + end + + For example, the route above will match on the path `"/api/v1/pages/1"` + and in the controller the `params` argument will have a map with the + key `:version` with the value `"v1"`. + + Phoenix also provides a `resources/4` macro that allows developers + to generate "RESTful" routes to a given resource: + + defmodule MyAppWeb.Router do + use Phoenix.Router, helpers: false + + resources "/pages", PageController, only: [:show] + resources "/users", UserController, except: [:delete] + end + + Finally, Phoenix ships with a `mix phx.routes` task that nicely + formats all routes in a given router. We can use it to verify all + routes included in the router above: + + $ mix phx.routes + GET /pages/:id PageController.show/2 + GET /users UserController.index/2 + GET /users/:id/edit UserController.edit/2 + GET /users/new UserController.new/2 + GET /users/:id UserController.show/2 + POST /users UserController.create/2 + PATCH /users/:id UserController.update/2 + PUT /users/:id UserController.update/2 + + One can also pass a router explicitly as an argument to the task: + + $ mix phx.routes MyAppWeb.Router + + Check `scope/2` and `resources/4` for more information. + + ## Pipelines and plugs + + Once a request arrives at the Phoenix router, it performs + a series of transformations through pipelines until the + request is dispatched to a desired route. + + Such transformations are defined via plugs, as defined + in the [Plug](https://github.com/elixir-lang/plug) specification. + Once a pipeline is defined, it can be piped through per scope. + + For example: + + defmodule MyAppWeb.Router do + use Phoenix.Router + + pipeline :browser do + plug :fetch_session + plug :accepts, ["html"] + end + + scope "/" do + pipe_through :browser + + # browser related routes and resources + end + end + + `Phoenix.Router` imports functions from both `Plug.Conn` and `Phoenix.Controller` + to help define plugs. In the example above, `fetch_session/2` + comes from `Plug.Conn` while `accepts/2` comes from `Phoenix.Controller`. + + Note that router pipelines are only invoked after a route is found. + No plug is invoked in case no matches were found. + + ## Learn more + + See the [Routing](routing.md) guide for more information and examples + within an actual Phoenix application. + """ + + alias Phoenix.Router.{Resource, Scope, Route, Helpers} + + @http_methods [:get, :post, :put, :patch, :delete, :options, :connect, :trace, :head] + + @doc false + defmacro __using__(opts) do + quote do + unquote(prelude(opts)) + unquote(defs()) + unquote(match_dispatch()) + unquote(verified_routes()) + end + end + + defp prelude(opts) do + quote do + Module.register_attribute(__MODULE__, :phoenix_routes, accumulate: true) + # TODO: Require :helpers to be explicit given + @phoenix_helpers Keyword.get(unquote(opts), :helpers, true) + + import Phoenix.Router + + # TODO v2: No longer automatically import dependencies + import Plug.Conn + import Phoenix.Controller + + # Set up initial scope + @phoenix_pipeline nil + Phoenix.Router.Scope.init(__MODULE__) + @before_compile unquote(__MODULE__) + end + end + + # Because those macros are executed multiple times, + # we end-up generating a huge scope that drastically + # affects compilation. We work around it by defining + # those functions only once and calling it over and + # over again. + defp defs() do + quote unquote: false do + var!(add_resources, Phoenix.Router) = fn resource -> + path = resource.path + ctrl = resource.controller + opts = resource.route + + if resource.singleton do + Enum.each(resource.actions, fn + :show -> + get path, ctrl, :show, opts + + :new -> + get path <> "/new", ctrl, :new, opts + + :edit -> + get path <> "/edit", ctrl, :edit, opts + + :create -> + post path, ctrl, :create, opts + + :delete -> + delete path, ctrl, :delete, opts + + :update -> + patch path, ctrl, :update, opts + put path, ctrl, :update, Keyword.put(opts, :as, nil) + end) + else + param = resource.param + + Enum.each(resource.actions, fn + :index -> + get path, ctrl, :index, opts + + :show -> + get path <> "/:" <> param, ctrl, :show, opts + + :new -> + get path <> "/new", ctrl, :new, opts + + :edit -> + get path <> "/:" <> param <> "/edit", ctrl, :edit, opts + + :create -> + post path, ctrl, :create, opts + + :delete -> + delete path <> "/:" <> param, ctrl, :delete, opts + + :update -> + patch path <> "/:" <> param, ctrl, :update, opts + put path <> "/:" <> param, ctrl, :update, Keyword.put(opts, :as, nil) + end) + end + end + end + end + + @doc false + def __call__( + %{private: %{phoenix_router: router, phoenix_bypass: {router, pipes}}} = conn, + metadata, + prepare, + pipeline, + _ + ) do + conn = prepare.(conn, metadata) + + case pipes do + :current -> pipeline.(conn) + _ -> Enum.reduce(pipes, conn, fn pipe, acc -> apply(router, pipe, [acc, []]) end) + end + end + + def __call__(%{private: %{phoenix_bypass: :all}} = conn, metadata, prepare, _, _) do + prepare.(conn, metadata) + end + + def __call__(conn, metadata, prepare, pipeline, {plug, opts}) do + conn = prepare.(conn, metadata) + start = System.monotonic_time() + measurements = %{system_time: System.system_time()} + metadata = %{metadata | conn: conn} + :telemetry.execute([:phoenix, :router_dispatch, :start], measurements, metadata) + + case pipeline.(conn) do + %Plug.Conn{halted: true} = halted_conn -> + measurements = %{duration: System.monotonic_time() - start} + metadata = %{metadata | conn: halted_conn} + :telemetry.execute([:phoenix, :router_dispatch, :stop], measurements, metadata) + halted_conn + + %Plug.Conn{} = piped_conn -> + try do + plug.call(piped_conn, plug.init(opts)) + else + conn -> + measurements = %{duration: System.monotonic_time() - start} + metadata = %{metadata | conn: conn} + :telemetry.execute([:phoenix, :router_dispatch, :stop], measurements, metadata) + conn + rescue + e in Plug.Conn.WrapperError -> + measurements = %{duration: System.monotonic_time() - start} + new_metadata = %{conn: conn, kind: :error, reason: e, stacktrace: __STACKTRACE__} + metadata = Map.merge(metadata, new_metadata) + :telemetry.execute([:phoenix, :router_dispatch, :exception], measurements, metadata) + Plug.Conn.WrapperError.reraise(e) + catch + kind, reason -> + measurements = %{duration: System.monotonic_time() - start} + new_metadata = %{conn: conn, kind: kind, reason: reason, stacktrace: __STACKTRACE__} + metadata = Map.merge(metadata, new_metadata) + :telemetry.execute([:phoenix, :router_dispatch, :exception], measurements, metadata) + Plug.Conn.WrapperError.reraise(piped_conn, kind, reason, __STACKTRACE__) + end + end + end + + defp match_dispatch() do + quote location: :keep, generated: true do + @behaviour Plug + + @doc """ + Callback required by Plug that initializes the router + for serving web requests. + """ + def init(opts) do + opts + end + + @doc """ + Callback invoked by Plug on every request. + """ + def call(conn, _opts) do + %{method: method, path_info: path_info, host: host} = conn = prepare(conn) + decoded = Enum.map(path_info, &URI.decode/1) + + case __match_route__(decoded, method, host) do + {metadata, prepare, pipeline, plug_opts} -> + Phoenix.Router.__call__(conn, metadata, prepare, pipeline, plug_opts) + + :error -> + raise NoRouteError, conn: conn, router: __MODULE__ + end + end + + defoverridable init: 1, call: 2 + end + end + + defp verified_routes() do + quote location: :keep, generated: true do + @behaviour Phoenix.VerifiedRoutes + + def formatted_routes(_) do + Phoenix.Router.__formatted_routes__(__MODULE__) + end + + def verified_route?(_, split_path) do + Phoenix.Router.__verified_route__?(__MODULE__, split_path) + end + end + end + + @doc false + defmacro __before_compile__(env) do + routes = env.module |> Module.get_attribute(:phoenix_routes) |> Enum.reverse() + routes_with_exprs = Enum.map(routes, &{&1, Route.exprs(&1)}) + + helpers = + if Module.get_attribute(env.module, :phoenix_helpers) do + Helpers.define(env, routes_with_exprs) + end + + {matches, {pipelines, _}} = + Enum.map_reduce(routes_with_exprs, {[], %{}}, &build_match/2) + + routes_per_path = + routes_with_exprs + |> Enum.group_by(&elem(&1, 1).path, &elem(&1, 0)) + + verifies = + routes_with_exprs + |> Enum.map(&elem(&1, 1).path) + |> Enum.uniq() + |> Enum.map(&build_verify(&1, routes_per_path)) + + verify_catch_all = + quote generated: true do + @doc false + def __verify_route__(_path_info) do + :error + end + end + + match_catch_all = + quote generated: true do + @doc false + def __match_route__(_path_info, _verb, _host) do + :error + end + end + + forward_catch_all = + quote generated: true do + @doc false + def __forward__(_), do: nil + end + + checks = + routes + |> Enum.map(fn %{line: line, metadata: metadata, plug: plug} -> + {line, Map.get(metadata, :mfa, {plug, :init, 1})} + end) + |> Enum.uniq() + |> Enum.map(fn {line, {module, function, arity}} -> + quote line: line, do: _ = &(unquote(module).unquote(function) / unquote(arity)) + end) + + keys = [:verb, :path, :plug, :plug_opts, :helper, :metadata] + routes = Enum.map(routes, &Map.take(&1, keys)) + + quote do + @doc false + def __routes__, do: unquote(Macro.escape(routes)) + + @doc false + def __checks__, do: unquote({:__block__, [], checks}) + + @doc false + def __helpers__, do: unquote(helpers) + + defp prepare(conn) do + merge_private(conn, [{:phoenix_router, __MODULE__}, {__MODULE__, conn.script_name}]) + end + + unquote(pipelines) + unquote(verifies) + unquote(verify_catch_all) + unquote(matches) + unquote(match_catch_all) + unquote(forward_catch_all) + end + end + + defp build_verify(path, routes_per_path) do + routes = Map.get(routes_per_path, path) + warn_on_verify? = Enum.all?(routes, & &1.warn_on_verify?) + + case Enum.find(routes, &(&1.kind == :forward)) do + %{metadata: %{forward: forward}, plug: plug, plug_opts: plug_opts} -> + quote generated: true do + def __forward__(unquote(plug)) do + unquote(forward) + end + + def __verify_route__(unquote(path)) do + {{unquote(plug), unquote(forward), unquote(Macro.escape(plug_opts))}, + unquote(warn_on_verify?)} + end + end + + _ -> + quote generated: true do + def __verify_route__(unquote(path)) do + {nil, unquote(warn_on_verify?)} + end + end + end + end + + defp build_match({route, expr}, {acc_pipes, known_pipes}) do + {pipe_name, acc_pipes, known_pipes} = build_match_pipes(route, acc_pipes, known_pipes) + + %{ + prepare: prepare, + dispatch: dispatch, + verb_match: verb_match, + path_params: path_params, + hosts: hosts, + path: path + } = expr + + clauses = + for host <- hosts do + quote line: route.line do + def __match_route__(unquote(path), unquote(verb_match), unquote(host)) do + {unquote(build_metadata(route, path_params)), + fn var!(conn, :conn), %{path_params: var!(path_params, :conn)} -> + unquote(prepare) + end, &(unquote(Macro.var(pipe_name, __MODULE__)) / 1), unquote(dispatch)} + end + end + end + + {clauses, {acc_pipes, known_pipes}} + end + + defp build_match_pipes(route, acc_pipes, known_pipes) do + %{pipe_through: pipe_through} = route + + case known_pipes do + %{^pipe_through => name} -> + {name, acc_pipes, known_pipes} + + %{} -> + name = :"__pipe_through#{map_size(known_pipes)}__" + acc_pipes = [build_pipes(name, pipe_through) | acc_pipes] + known_pipes = Map.put(known_pipes, pipe_through, name) + {name, acc_pipes, known_pipes} + end + end + + defp build_metadata(route, path_params) do + %{ + path: path, + plug: plug, + plug_opts: plug_opts, + pipe_through: pipe_through, + metadata: metadata + } = route + + pairs = [ + conn: nil, + route: path, + plug: plug, + plug_opts: Macro.escape(plug_opts), + path_params: path_params, + pipe_through: pipe_through + ] + + {:%{}, [], pairs ++ Macro.escape(Map.to_list(metadata))} + end + + defp build_pipes(name, []) do + quote do + defp unquote(name)(conn), do: conn + end + end + + defp build_pipes(name, pipe_through) do + plugs = pipe_through |> Enum.reverse() |> Enum.map(&{&1, [], true}) + opts = [init_mode: Phoenix.plug_init_mode(), log_on_halt: :debug] + {conn, body} = Plug.Builder.compile(__ENV__, plugs, opts) + + quote do + defp unquote(name)(unquote(conn)), do: unquote(body) + end + end + + @doc """ + Generates a route match based on an arbitrary HTTP method. + + Useful for defining routes not included in the built-in macros. + + The catch-all verb, `:*`, may also be used to match all HTTP methods. + + ## Options + + * `:as` - configures the named helper. If `nil`, does not generate + a helper. Has no effect when using verified routes exclusively + * `:alias` - configure if the scope alias should be applied to the route. + Defaults to true, disables scoping if false. + * `:log` - the level to log the route dispatching under, may be set to false. Defaults to + `:debug`. Route dispatching contains information about how the route is handled (which controller + action is called, what parameters are available and which pipelines are used) and is separate from + the plug level logging. To alter the plug log level, please see + https://hexdocs.pm/phoenix/Phoenix.Logger.html#module-dynamic-log-level. + * `:private` - a map of private data to merge into the connection + when a route matches + * `:assigns` - a map of data to merge into the connection when a route matches + * `:metadata` - a map of metadata used by the telemetry events and returned by + `route_info/4`. The `:mfa` field is used by telemetry to print logs and by the + router to emit compile time checks. Custom fields may be added. + * `:warn_on_verify` - the boolean for whether matches to this route trigger + an unmatched route warning for `Phoenix.VerifiedRoutes`. It is useful to ignore + an otherwise catch-all route definition from being matched when verifying routes. + Defaults `false`. + + ## Examples + + match(:move, "/events/:id", EventController, :move) + + match(:*, "/any", SomeController, :any) + + """ + defmacro match(verb, path, plug, plug_opts, options \\ []) do + add_route(:match, verb, path, expand_alias(plug, __CALLER__), plug_opts, options) + end + + for verb <- @http_methods do + @doc """ + Generates a route to handle a #{verb} request to the given path. + + #{verb}("/events/:id", EventController, :action) + + See `match/5` for options. + + #{if verb == :head do + """ + ## Compatibility with `Plug.Head` + + By default, Phoenix applications include `Plug.Head` in their endpoint, + which converts HEAD requests into regular GET requests. Therefore, if + you intend to use `head/4` in your router, you need to move `Plug.Head` + to inside your router in a way it does not conflict with the paths given + to `head/4`. + """ + end} + """ + defmacro unquote(verb)(path, plug, plug_opts, options \\ []) do + add_route(:match, unquote(verb), path, expand_alias(plug, __CALLER__), plug_opts, options) + end + end + + defp add_route(kind, verb, path, plug, plug_opts, options) do + quote do + @phoenix_routes Scope.route( + __ENV__.line, + __ENV__.module, + unquote(kind), + unquote(verb), + unquote(path), + unquote(plug), + unquote(plug_opts), + unquote(options) + ) + end + end + + @doc """ + Defines a plug pipeline. + + Pipelines are defined at the router root and can be used + from any scope. + + ## Examples + + pipeline :api do + plug :token_authentication + plug :dispatch + end + + A scope may then use this pipeline as: + + scope "/" do + pipe_through :api + end + + Every time `pipe_through/1` is called, the new pipelines + are appended to the ones previously given. + """ + defmacro pipeline(plug, do: block) do + with true <- is_atom(plug), + imports = __CALLER__.macros ++ __CALLER__.functions, + {mod, _} <- Enum.find(imports, fn {_, imports} -> {plug, 2} in imports end) do + raise ArgumentError, + "cannot define pipeline named #{inspect(plug)} " <> + "because there is an import from #{inspect(mod)} with the same name" + end + + block = + quote do + plug = unquote(plug) + @phoenix_pipeline [] + unquote(block) + end + + compiler = + quote unquote: false do + Scope.pipeline(__MODULE__, plug) + + {conn, body} = + Plug.Builder.compile(__ENV__, @phoenix_pipeline, init_mode: Phoenix.plug_init_mode()) + + def unquote(plug)(unquote(conn), _) do + try do + unquote(body) + rescue + e in Plug.Conn.WrapperError -> + Plug.Conn.WrapperError.reraise(e) + catch + :error, reason -> + Plug.Conn.WrapperError.reraise(unquote(conn), :error, reason, __STACKTRACE__) + end + end + + @phoenix_pipeline nil + end + + quote do + try do + unquote(block) + unquote(compiler) + after + :ok + end + end + end + + @doc """ + Defines a plug inside a pipeline. + + See `pipeline/2` for more information. + """ + defmacro plug(plug, opts \\ []) do + {plug, opts} = expand_plug_and_opts(plug, opts, __CALLER__) + + quote do + if pipeline = @phoenix_pipeline do + @phoenix_pipeline [{unquote(plug), unquote(opts), true} | pipeline] + else + raise "cannot define plug at the router level, plug must be defined inside a pipeline" + end + end + end + + defp expand_plug_and_opts(plug, opts, caller) do + runtime? = Phoenix.plug_init_mode() == :runtime + + plug = + if runtime? do + expand_alias(plug, caller) + else + plug + end + + opts = + if runtime? and Macro.quoted_literal?(opts) do + Macro.prewalk(opts, &expand_alias(&1, caller)) + else + opts + end + + {plug, opts} + end + + defp expand_alias({:__aliases__, _, _} = alias, env), + do: Macro.expand(alias, %{env | function: {:init, 1}}) + + defp expand_alias(other, _env), do: other + + @doc """ + Defines a list of plugs (and pipelines) to send the connection through. + + Plugs are specified using the atom name of any imported 2-arity function + which takes a `Plug.Conn` and options and returns a `Plug.Conn`. For + example, `:require_authenticated_user`. + + Pipelines are defined in the router, see `pipeline/2` for more information. + + pipe_through [:require_authenticated_user, :my_browser_pipeline] + + ## Multiple invocations + + `pipe_through/1` can be invoked multiple times within the same scope. Each + invocation appends new plugs and pipelines to run, which are applied to all + routes **after** the `pipe_through/1` invocation. For example: + + scope "/" do + pipe_through [:browser] + get "/", HomeController, :index + + pipe_through [:require_authenticated_user] + get "/settings", UserController, :edit + end + + In the example above, `/` pipes through `browser` only, while `/settings` pipes + through both `browser` and `require_authenticated_user`. Therefore, to avoid + confusion, we recommend a single `pipe_through` at the top of each scope: + + scope "/" do + pipe_through [:browser] + get "/", HomeController, :index + end + + scope "/" do + pipe_through [:browser, :require_authenticated_user] + get "/settings", UserController, :edit + end + """ + defmacro pipe_through(pipes) do + pipes = + if Phoenix.plug_init_mode() == :runtime and Macro.quoted_literal?(pipes) do + Macro.prewalk(pipes, &expand_alias(&1, __CALLER__)) + else + pipes + end + + quote do + if pipeline = @phoenix_pipeline do + raise "cannot pipe_through inside a pipeline" + else + Scope.pipe_through(__MODULE__, unquote(pipes)) + end + end + end + + @doc """ + Defines "RESTful" routes for a resource. + + The given definition: + + resources "/users", UserController + + will include routes to the following actions: + + * `GET /users` => `:index` + * `GET /users/new` => `:new` + * `POST /users` => `:create` + * `GET /users/:id` => `:show` + * `GET /users/:id/edit` => `:edit` + * `PATCH /users/:id` => `:update` + * `PUT /users/:id` => `:update` + * `DELETE /users/:id` => `:delete` + + ## Options + + This macro accepts a set of options: + + * `:only` - a list of actions to generate routes for, for example: `[:show, :edit]` + * `:except` - a list of actions to exclude generated routes from, for example: `[:delete]` + * `:param` - the name of the parameter for this resource, defaults to `"id"` + * `:name` - the prefix for this resource. This is used for the named helper + and as the prefix for the parameter in nested resources. The default value + is automatically derived from the controller name, i.e. `UserController` will + have name `"user"` + * `:as` - configures the named helper. If `nil`, does not generate + a helper. Has no effect when using verified routes exclusively + * `:singleton` - defines routes for a singleton resource that is looked up by + the client without referencing an ID. Read below for more information + + ## Singleton resources + + When a resource needs to be looked up without referencing an ID, because + it contains only a single entry in the given context, the `:singleton` + option can be used to generate a set of routes that are specific to + such single resource: + + * `GET /user` => `:show` + * `GET /user/new` => `:new` + * `POST /user` => `:create` + * `GET /user/edit` => `:edit` + * `PATCH /user` => `:update` + * `PUT /user` => `:update` + * `DELETE /user` => `:delete` + + Usage example: + + resources "/account", AccountController, only: [:show], singleton: true + + ## Nested Resources + + This macro also supports passing a nested block of route definitions. + This is helpful for nesting children resources within their parents to + generate nested routes. + + The given definition: + + resources "/users", UserController do + resources "/posts", PostController + end + + will include the following routes: + + ```console + user_post_path GET /users/:user_id/posts PostController :index + user_post_path GET /users/:user_id/posts/:id/edit PostController :edit + user_post_path GET /users/:user_id/posts/new PostController :new + user_post_path GET /users/:user_id/posts/:id PostController :show + user_post_path POST /users/:user_id/posts PostController :create + user_post_path PATCH /users/:user_id/posts/:id PostController :update + PUT /users/:user_id/posts/:id PostController :update + user_post_path DELETE /users/:user_id/posts/:id PostController :delete + ``` + """ + defmacro resources(path, controller, opts, do: nested_context) do + add_resources(path, controller, opts, do: nested_context) + end + + @doc """ + See `resources/4`. + """ + defmacro resources(path, controller, do: nested_context) do + add_resources(path, controller, [], do: nested_context) + end + + defmacro resources(path, controller, opts) do + add_resources(path, controller, opts, do: nil) + end + + @doc """ + See `resources/4`. + """ + defmacro resources(path, controller) do + add_resources(path, controller, [], do: nil) + end + + defp add_resources(path, controller, options, do: context) do + scope = + if context do + quote do + scope(resource.member, do: unquote(context)) + end + end + + quote do + resource = Resource.build(unquote(path), unquote(controller), unquote(options)) + var!(add_resources, Phoenix.Router).(resource) + unquote(scope) + end + end + + @doc """ + Defines a scope in which routes can be nested. + + ## Examples + + scope path: "/api/v1", alias: API.V1 do + get "/pages/:id", PageController, :show + end + + The generated route above will match on the path `"/api/v1/pages/:id"` + and will dispatch to `:show` action in `API.V1.PageController`. A named + helper `api_v1_page_path` will also be generated. + + ## Options + + The supported options are: + + * `:path` - a string containing the path scope. + * `:as` - a string or atom containing the named helper scope. When set to + false, it resets the nested helper scopes. Has no effect when using verified + routes exclusively + * `:alias` - an alias (atom) containing the controller scope. When set to + false, it resets all nested aliases. + * `:host` - a string or list of strings containing the host scope, or prefix host scope, + ie `"foo.bar.com"`, `"foo."` + * `:private` - a map of private data to merge into the connection when a route matches + * `:assigns` - a map of data to merge into the connection when a route matches + * `:log` - the level to log the route dispatching under, may be set to false. Defaults to + `:debug`. Route dispatching contains information about how the route is handled (which controller + action is called, what parameters are available and which pipelines are used) and is separate from + the plug level logging. To alter the plug log level, please see + https://hexdocs.pm/phoenix/Phoenix.Logger.html#module-dynamic-log-level. + + """ + defmacro scope(options, do: context) do + options = + if Macro.quoted_literal?(options) do + Macro.prewalk(options, &expand_alias(&1, __CALLER__)) + else + options + end + + do_scope(options, context) + end + + @doc """ + Define a scope with the given path. + + This function is a shortcut for: + + scope path: path do + ... + end + + ## Examples + + scope "/v1", host: "api." do + get "/pages/:id", PageController, :show + end + + """ + defmacro scope(path, options, do: context) do + options = + if Macro.quoted_literal?(options) do + Macro.prewalk(options, &expand_alias(&1, __CALLER__)) + else + options + end + + options = + quote do + path = unquote(path) + + case unquote(options) do + alias when is_atom(alias) -> [path: path, alias: alias] + options when is_list(options) -> Keyword.put(options, :path, path) + end + end + + do_scope(options, context) + end + + @doc """ + Defines a scope with the given path and alias. + + This function is a shortcut for: + + scope path: path, alias: alias do + ... + end + + ## Examples + + scope "/v1", API.V1, host: "api." do + get "/pages/:id", PageController, :show + end + + """ + defmacro scope(path, alias, options, do: context) do + alias = expand_alias(alias, __CALLER__) + + options = + quote do + unquote(options) + |> Keyword.put(:path, unquote(path)) + |> Keyword.put(:alias, unquote(alias)) + end + + do_scope(options, context) + end + + defp do_scope(options, context) do + quote do + Scope.push(__MODULE__, unquote(options)) + + try do + unquote(context) + after + Scope.pop(__MODULE__) + end + end + end + + @doc """ + Returns the full alias with the current scope's aliased prefix. + + Useful for applying the same short-hand alias handling to + other values besides the second argument in route definitions. + + ## Examples + + scope "/", MyPrefix do + get "/", ProxyPlug, controller: scoped_alias(__MODULE__, MyController) + end + """ + @doc type: :reflection + def scoped_alias(router_module, alias) do + Scope.expand_alias(router_module, alias) + end + + @doc """ + Returns the full path with the current scope's path prefix. + """ + @doc type: :reflection + def scoped_path(router_module, path) do + Scope.full_path(router_module, path) + end + + @doc """ + Forwards a request at the given path to a plug. + + This is commonly used to forward all subroutes to another Plug. + For example: + + forward "/admin", SomeLib.AdminDashboard + + The above will allow `SomeLib.AdminDashboard` to handle `/admin`, + `/admin/foo`, `/admin/bar/baz`, and so on. Furthermore, + `SomeLib.AdminDashboard` does not to be aware of the prefix it + is mounted in. From its point of view, the routes above are simply + handled as `/`, `/foo`, and `/bar/baz`. + + A common use case for `forward` is for sharing a router between + applications or even breaking a big router into smaller ones. + However, in other for route generation to route accordingly, you + can only forward to a given `Phoenix.Router` once. + + The router pipelines will be invoked prior to forwarding the + connection. + + ## Examples + + scope "/", MyApp do + pipe_through [:browser, :admin] + + forward "/admin", SomeLib.AdminDashboard + forward "/api", ApiRouter + end + + """ + defmacro forward(path, plug, plug_opts \\ [], router_opts \\ []) do + {plug, plug_opts} = expand_plug_and_opts(plug, plug_opts, __CALLER__) + router_opts = Keyword.put(router_opts, :as, nil) + + quote unquote: true, bind_quoted: [path: path, plug: plug] do + unquote(add_route(:forward, :*, path, plug, plug_opts, router_opts)) + end + end + + @doc """ + Returns all routes information from the given router. + """ + def routes(router) do + router.__routes__() + end + + @doc """ + Returns the compile-time route info and runtime path params for a request. + + The `path` can be either a string or the `path_info` segments. + + A map of metadata is returned with the following keys: + + * `:log` - the configured log level. For example `:debug` + * `:path_params` - the map of runtime path params + * `:pipe_through` - the list of pipelines for the route's scope, for example `[:browser]` + * `:plug` - the plug to dispatch the route to, for example `AppWeb.PostController` + * `:plug_opts` - the options to pass when calling the plug, for example: `:index` + * `:route` - the string route pattern, such as `"/posts/:id"` + + ## Examples + + iex> Phoenix.Router.route_info(AppWeb.Router, "GET", "/posts/123", "myhost") + %{ + log: :debug, + path_params: %{"id" => "123"}, + pipe_through: [:browser], + plug: AppWeb.PostController, + plug_opts: :show, + route: "/posts/:id", + } + + iex> Phoenix.Router.route_info(MyRouter, "GET", "/not-exists", "myhost") + :error + """ + @doc type: :reflection + def route_info(router, method, path, host) when is_binary(path) do + split_path = for segment <- String.split(path, "/"), segment != "", do: segment + route_info(router, method, split_path, host) + end + + def route_info(router, method, split_path, host) when is_list(split_path) do + with {metadata, _prepare, _pipeline, {_plug, _opts}} <- + router.__match_route__(split_path, method, host) do + Map.delete(metadata, :conn) + end + end + + @doc false + def __formatted_routes__(router) do + Enum.flat_map(router.__routes__(), fn route -> + Code.ensure_loaded(route.plug) + + if function_exported?(route.plug, :formatted_routes, 1) do + route.plug_opts + |> route.plug.formatted_routes() + |> Enum.map(fn nested_route -> + route = %{ + route + | path: Path.join(route.path, nested_route.path), + verb: nested_route.verb + } + + Map.put(route, :label, nested_route.label) + end) + else + plug = + case route.metadata[:mfa] do + {module, _, _} -> module + _ -> route.plug + end + + label = "#{inspect(plug)} #{inspect(route.plug_opts)}" + + [ + %{ + helper: route.helper, + verb: route.verb, + path: route.path, + label: label + } + ] + end + end) + end + + @doc false + def __verified_route__?(router, split_path) do + case router.__verify_route__(split_path) do + {_forward_plug, true = _warn_on_verify?} -> + false + + {nil = _forward_plug, false = _warn_on_verify?} -> + true + + {{router, script_name, plug_opts}, false = _warn_on_verify?} -> + Code.ensure_loaded(router) + + if function_exported?(router, :verified_route?, 2) do + router.verified_route?(plug_opts, split_path -- script_name) + else + true + end + + :error -> + false + end + end +end diff --git a/deps/phoenix/lib/phoenix/router/console_formatter.ex b/deps/phoenix/lib/phoenix/router/console_formatter.ex new file mode 100644 index 0000000..f808509 --- /dev/null +++ b/deps/phoenix/lib/phoenix/router/console_formatter.ex @@ -0,0 +1,143 @@ +defmodule Phoenix.Router.ConsoleFormatter do + @moduledoc false + + @doc """ + Format the routes for printing. + """ + + @socket_verb "WS" + + @longpoll_verbs ["GET", "POST"] + + def format(router, endpoint \\ nil) do + routes = router.formatted_routes([]) + + column_widths = calculate_column_widths(router, routes, endpoint) + + IO.iodata_to_binary([ + Enum.map(routes, &format_route(&1, router, column_widths)), + format_endpoint(endpoint, column_widths) + ]) + end + + defp format_endpoint(nil, _router), do: "" + + defp format_endpoint(endpoint, widths) do + case endpoint.__sockets__() do + [] -> + "" + + sockets -> + Enum.map(sockets, fn socket -> + [format_websocket(socket, widths), format_longpoll(socket, widths)] + end) + end + end + + defp format_websocket({_path, Phoenix.LiveReloader.Socket, _opts}, _), do: "" + + defp format_websocket({path, module, opts}, widths) do + if opts[:websocket] != false do + {verb_len, path_len, route_name_len} = widths + + String.duplicate(" ", route_name_len) <> + " " <> + String.pad_trailing(@socket_verb, verb_len) <> + " " <> + String.pad_trailing(path <> "/websocket", path_len) <> + " " <> + inspect(module) <> + "\n" + else + "" + end + end + + defp format_longpoll({_path, Phoenix.LiveReloader.Socket, _opts}, _), do: "" + + defp format_longpoll({path, module, opts}, widths) do + if opts[:longpoll] != false do + for method <- @longpoll_verbs, into: "" do + {verb_len, path_len, route_name_len} = widths + + String.duplicate(" ", route_name_len) <> + " " <> + String.pad_trailing(method, verb_len) <> + " " <> + String.pad_trailing(path <> "/longpoll", path_len) <> + " " <> + inspect(module) <> + "\n" + end + else + "" + end + end + + defp calculate_column_widths(router, routes, endpoint) do + sockets = (endpoint && endpoint.__sockets__()) || [] + + widths = + Enum.reduce(routes, {0, 0, 0}, fn route, acc -> + %{verb: verb, path: path, helper: helper} = route + verb = verb_name(verb) + {verb_len, path_len, route_name_len} = acc + route_name = route_name(router, helper) + + {max(verb_len, String.length(verb)), max(path_len, String.length(path)), + max(route_name_len, String.length(route_name))} + end) + + Enum.reduce(sockets, widths, fn {path, _mod, opts}, acc -> + {verb_len, path_len, route_name_len} = acc + + verb_length = + socket_verbs(opts) + |> Enum.map(&String.length/1) + |> Enum.max(&>=/2, fn -> 0 end) + + {max(verb_len, verb_length), max(path_len, String.length(path <> "/websocket")), + route_name_len} + end) + end + + defp format_route(route, router, column_widths) do + %{ + verb: verb, + path: path, + label: label + } = route + + verb = verb_name(verb) + route_name = route_name(router, Map.get(route, :helper)) + {verb_len, path_len, route_name_len} = column_widths + + String.pad_leading(route_name, route_name_len) <> + " " <> + String.pad_trailing(verb, verb_len) <> + " " <> + String.pad_trailing(path, path_len) <> + " " <> + label <> "\n" + end + + defp route_name(_router, nil), do: "" + + defp route_name(router, name) do + if router.__helpers__() do + name <> "_path" + else + "" + end + end + + defp verb_name(verb), do: verb |> to_string() |> String.upcase() + + defp socket_verbs(socket_opts) do + if socket_opts[:longpoll] != false do + [@socket_verb | @longpoll_verbs] + else + [@socket_verb] + end + end +end diff --git a/deps/phoenix/lib/phoenix/router/helpers.ex b/deps/phoenix/lib/phoenix/router/helpers.ex new file mode 100644 index 0000000..7ebceea --- /dev/null +++ b/deps/phoenix/lib/phoenix/router/helpers.ex @@ -0,0 +1,382 @@ +defmodule Phoenix.Router.Helpers do + # Module that generates the routing helpers. + @moduledoc false + + alias Phoenix.Router.Route + alias Plug.Conn + + @doc """ + Generates the helper module for the given environment and routes. + """ + def define(env, routes) do + # Ignore any route without helper or forwards. + routes = + Enum.reject(routes, fn {route, _exprs} -> + is_nil(route.helper) or route.kind == :forward + end) + + trailing_slash? = Enum.any?(routes, fn {route, _} -> route.trailing_slash? end) + groups = Enum.group_by(routes, fn {route, _exprs} -> route.helper end) + + impls = + for {_helper, helper_routes} <- groups, + {_, [{route, exprs} | _]} <- + helper_routes + |> Enum.group_by(fn {route, exprs} -> [length(exprs.binding) | route.plug_opts] end) + |> Enum.sort(), + do: defhelper(route, exprs) + + catch_all = Enum.map(groups, &defhelper_catch_all/1) + + defhelper = + quote generated: true, unquote: false do + defhelper = fn helper, vars, opts, bins, segs, trailing_slash? -> + def unquote(:"#{helper}_path")( + conn_or_endpoint, + unquote(Macro.escape(opts)), + unquote_splicing(vars) + ) do + unquote(:"#{helper}_path")( + conn_or_endpoint, + unquote(Macro.escape(opts)), + unquote_splicing(vars), + [] + ) + end + + def unquote(:"#{helper}_path")( + conn_or_endpoint, + unquote(Macro.escape(opts)), + unquote_splicing(vars), + params + ) + when is_list(params) or is_map(params) do + path( + conn_or_endpoint, + segments( + unquote(segs), + params, + unquote(bins), + unquote(trailing_slash?), + {unquote(helper), unquote(Macro.escape(opts)), + unquote(Enum.map(vars, &Macro.to_string/1))} + ) + ) + end + + def unquote(:"#{helper}_url")( + conn_or_endpoint, + unquote(Macro.escape(opts)), + unquote_splicing(vars) + ) do + unquote(:"#{helper}_url")( + conn_or_endpoint, + unquote(Macro.escape(opts)), + unquote_splicing(vars), + [] + ) + end + + def unquote(:"#{helper}_url")( + conn_or_endpoint, + unquote(Macro.escape(opts)), + unquote_splicing(vars), + params + ) + when is_list(params) or is_map(params) do + url(conn_or_endpoint) <> + unquote(:"#{helper}_path")( + conn_or_endpoint, + unquote(Macro.escape(opts)), + unquote_splicing(vars), + params + ) + end + end + end + + defcatch_all = + quote generated: true, unquote: false do + defcatch_all = fn helper, binding_lengths, params_lengths, routes -> + for length <- binding_lengths do + binding = List.duplicate({:_, [], nil}, length) + arity = length + 2 + + def unquote(:"#{helper}_path")(conn_or_endpoint, action, unquote_splicing(binding)) do + path(conn_or_endpoint, "/") + raise_route_error(unquote(helper), :path, unquote(arity), action, []) + end + + def unquote(:"#{helper}_url")(conn_or_endpoint, action, unquote_splicing(binding)) do + url(conn_or_endpoint) + raise_route_error(unquote(helper), :url, unquote(arity), action, []) + end + end + + for length <- params_lengths do + binding = List.duplicate({:_, [], nil}, length) + arity = length + 2 + + def unquote(:"#{helper}_path")( + conn_or_endpoint, + action, + unquote_splicing(binding), + params + ) do + path(conn_or_endpoint, "/") + raise_route_error(unquote(helper), :path, unquote(arity + 1), action, params) + end + + def unquote(:"#{helper}_url")( + conn_or_endpoint, + action, + unquote_splicing(binding), + params + ) do + url(conn_or_endpoint) + raise_route_error(unquote(helper), :url, unquote(arity + 1), action, params) + end + end + + defp raise_route_error(unquote(helper), suffix, arity, action, params) do + Phoenix.Router.Helpers.raise_route_error( + __MODULE__, + "#{unquote(helper)}_#{suffix}", + arity, + action, + unquote(Macro.escape(routes)), + params + ) + end + end + end + + # It is in general bad practice to generate large chunks of code + # inside quoted expressions. However, we can get away with this + # here for two reasons: + # + # * Helper modules are quite uncommon, typically one per project. + # + # * We inline most of the code for performance, so it is specific + # per helper module anyway. + # + code = + quote do + @moduledoc false + unquote(defhelper) + unquote(defcatch_all) + unquote_splicing(impls) + unquote_splicing(catch_all) + + @doc """ + Generates the path information including any necessary prefix. + """ + def path(data, path) do + Phoenix.VerifiedRoutes.unverified_path(data, unquote(env.module), path) + end + + @doc """ + Generates the connection/endpoint base URL without any path information. + """ + def url(data) do + Phoenix.VerifiedRoutes.unverified_url(data, "") + end + + @doc """ + Generates path to a static asset given its file path. + """ + def static_path(conn_or_endpoint_ctx, path) do + Phoenix.VerifiedRoutes.static_path(conn_or_endpoint_ctx, path) + end + + @doc """ + Generates url to a static asset given its file path. + """ + def static_url(conn_or_endpoint_ctx, path) do + Phoenix.VerifiedRoutes.static_url(conn_or_endpoint_ctx, path) + end + + @doc """ + Generates an integrity hash to a static asset given its file path. + """ + def static_integrity(conn_or_endpoint_ctx, path) do + Phoenix.VerifiedRoutes.static_integrity(conn_or_endpoint_ctx, path) + end + + # Functions used by generated helpers + # Those are inlined here for performance + + defp to_param(int) when is_integer(int), do: Integer.to_string(int) + defp to_param(bin) when is_binary(bin), do: bin + defp to_param(false), do: "false" + defp to_param(true), do: "true" + defp to_param(data), do: Phoenix.Param.to_param(data) + + defp segments(segments, [], _reserved, trailing_slash?, _opts) do + maybe_append_slash(segments, trailing_slash?) + end + + defp segments(segments, query, reserved, trailing_slash?, _opts) + when is_list(query) or is_map(query) do + dict = + for {k, v} <- query, + (k = to_string(k)) not in reserved, + do: {k, v} + + case Conn.Query.encode(dict, &to_param/1) do + "" -> maybe_append_slash(segments, trailing_slash?) + o -> maybe_append_slash(segments, trailing_slash?) <> "?" <> o + end + end + + if unquote(trailing_slash?) do + defp maybe_append_slash("/", _), do: "/" + defp maybe_append_slash(path, true), do: path <> "/" + end + + defp maybe_append_slash(path, _), do: path + end + + name = Module.concat(env.module, Helpers) + Module.create(name, code, line: env.line, file: env.file) + name + end + + @doc """ + Receives a route and returns the quoted definition for its helper function. + + In case a helper name was not given, or route is forwarded, returns nil. + """ + def defhelper(%Route{} = route, exprs) do + helper = route.helper + opts = route.plug_opts + trailing_slash? = route.trailing_slash? + + {bins, vars} = :lists.unzip(exprs.binding) + segs = expand_segments(exprs.path) + + quote do + defhelper.( + unquote(helper), + unquote(Macro.escape(vars)), + unquote(Macro.escape(opts)), + unquote(Macro.escape(bins)), + unquote(Macro.escape(segs)), + unquote(Macro.escape(trailing_slash?)) + ) + end + end + + def defhelper_catch_all({helper, routes_and_exprs}) do + routes = + routes_and_exprs + |> Enum.map(fn {routes, exprs} -> + {routes.plug_opts, Enum.map(exprs.binding, &elem(&1, 0))} + end) + |> Enum.sort() + + params_lengths = + routes + |> Enum.map(fn {_, bindings} -> length(bindings) end) + |> Enum.uniq() + + # Each helper defines catch all like this: + # + # def helper_path(context, action, ...binding) + # def helper_path(context, action, ...binding, params) + # + # Given the helpers are ordered by binding length, the additional + # helper with param for a helper_path/n will always override the + # binding for helper_path/n+1, so we skip those here to avoid warnings. + binding_lengths = Enum.reject(params_lengths, &((&1 - 1) in params_lengths)) + + quote do + defcatch_all.( + unquote(helper), + unquote(binding_lengths), + unquote(params_lengths), + unquote(Macro.escape(routes)) + ) + end + end + + @doc """ + Callback for generate router catch all. + """ + def raise_route_error(mod, fun, arity, action, routes, params) do + cond do + is_atom(action) and not Keyword.has_key?(routes, action) -> + "no action #{inspect(action)} for #{inspect(mod)}.#{fun}/#{arity}" + |> invalid_route_error(fun, routes) + + is_list(params) or is_map(params) -> + "no function clause for #{inspect(mod)}.#{fun}/#{arity} and action #{inspect(action)}" + |> invalid_route_error(fun, routes) + + true -> + invalid_param_error(mod, fun, arity, action, routes) + end + end + + defp invalid_route_error(prelude, fun, routes) do + suggestions = + for {action, bindings} <- routes do + bindings = Enum.join([inspect(action) | bindings], ", ") + "\n #{fun}(conn_or_endpoint, #{bindings}, params \\\\ [])" + end + + raise ArgumentError, + "#{prelude}. The following actions/clauses are supported:\n#{suggestions}" + end + + defp invalid_param_error(mod, fun, arity, action, routes) do + call_vars = Keyword.fetch!(routes, action) + + raise ArgumentError, """ + #{inspect(mod)}.#{fun}/#{arity} called with invalid params. + The last argument to this function should be a keyword list or a map. + For example: + + #{fun}(#{Enum.join(["conn", ":#{action}" | call_vars], ", ")}, page: 5, per_page: 10) + + It is possible you have called this function without defining the proper + number of path segments in your router. + """ + end + + @doc """ + Callback for properly encoding parameters in routes. + """ + def encode_param(str), do: URI.encode(str, &URI.char_unreserved?/1) + + defp expand_segments([]), do: "/" + + defp expand_segments(segments) when is_list(segments) do + expand_segments(segments, "") + end + + defp expand_segments(segments) do + quote(do: "/" <> Enum.map_join(unquote(segments), "/", &unquote(__MODULE__).encode_param/1)) + end + + defp expand_segments([{:|, _, [h, t]}], acc), + do: + quote( + do: + unquote(expand_segments([h], acc)) <> + "/" <> Enum.map_join(unquote(t), "/", &unquote(__MODULE__).encode_param/1) + ) + + defp expand_segments([h | t], acc) when is_binary(h), + do: expand_segments(t, quote(do: unquote(acc) <> unquote("/" <> h))) + + defp expand_segments([h | t], acc), + do: + expand_segments( + t, + quote(do: unquote(acc) <> "/" <> unquote(__MODULE__).encode_param(to_param(unquote(h)))) + ) + + defp expand_segments([], acc), + do: acc +end diff --git a/deps/phoenix/lib/phoenix/router/resource.ex b/deps/phoenix/lib/phoenix/router/resource.ex new file mode 100644 index 0000000..3d89b91 --- /dev/null +++ b/deps/phoenix/lib/phoenix/router/resource.ex @@ -0,0 +1,86 @@ +defmodule Phoenix.Router.Resource do + # This module defines the Resource struct that is used + # throughout Phoenix's router. This struct is private + # as it contains internal routing information. + @moduledoc false + + alias Phoenix.Router.Resource + + @default_param_key "id" + @actions [:index, :edit, :new, :show, :create, :update, :delete] + + @doc """ + The `Phoenix.Router.Resource` struct. It stores: + + * `:path` - the path as string (not normalized) + * `:param` - the param to be used in routes (not normalized) + * `:controller` - the controller as an atom + * `:actions` - a list of actions as atoms + * `:route` - the context for resource routes + * `:member` - the context for member routes + * `:collection` - the context for collection routes + + """ + defstruct [:path, :actions, :param, :route, :controller, :member, :collection, :singleton] + @type t :: %Resource{} + + @doc """ + Builds a resource struct. + """ + def build(path, controller, options) when is_atom(controller) and is_list(options) do + path = Phoenix.Router.Scope.validate_path(path) + alias = Keyword.get(options, :alias) + param = Keyword.get(options, :param, @default_param_key) + name = Keyword.get(options, :name, Phoenix.Naming.resource_name(controller, "Controller")) + as = Keyword.get(options, :as, name) + private = Keyword.get(options, :private, %{}) + assigns = Keyword.get(options, :assigns, %{}) + + singleton = Keyword.get(options, :singleton, false) + actions = extract_actions(options, singleton) + + route = [as: as, private: private, assigns: assigns] + collection = [path: path, as: as, private: private, assigns: assigns] + member_path = if singleton, do: path, else: Path.join(path, ":#{name}_#{param}") + member = [path: member_path, as: as, alias: alias, private: private, assigns: assigns] + + %Resource{path: path, actions: actions, param: param, route: route, + member: member, collection: collection, controller: controller, singleton: singleton} + end + + defp extract_actions(opts, singleton) do + only = Keyword.get(opts, :only) + except = Keyword.get(opts, :except) + + cond do + only -> + supported_actions = validate_actions(:only, singleton, only) + supported_actions -- (supported_actions -- only) + + except -> + supported_actions = validate_actions(:except, singleton, except) + supported_actions -- except + + true -> default_actions(singleton) + end + end + + defp validate_actions(type, singleton, actions) do + supported_actions = default_actions(singleton) + + unless actions -- supported_actions == [] do + raise ArgumentError, """ + invalid :#{type} action(s) passed to resources. + + supported#{if singleton, do: " singleton", else: ""} actions: #{inspect(supported_actions)} + + got: #{inspect(actions)} + """ + end + + supported_actions + end + + defp default_actions(true = _singleton), do: @actions -- [:index] + defp default_actions(false = _singleton), do: @actions +end diff --git a/deps/phoenix/lib/phoenix/router/route.ex b/deps/phoenix/lib/phoenix/router/route.ex new file mode 100644 index 0000000..bc2f1b7 --- /dev/null +++ b/deps/phoenix/lib/phoenix/router/route.ex @@ -0,0 +1,232 @@ +defmodule Phoenix.Router.Route do + # This module defines the Route struct that is used + # throughout Phoenix's router. This struct is private + # as it contains internal routing information. + @moduledoc false + + alias Phoenix.Router.Route + + @doc """ + The `Phoenix.Router.Route` struct. It stores: + + * `:verb` - the HTTP verb as an atom + * `:line` - the line the route was defined + * `:kind` - the kind of route, either `:match` or `:forward` + * `:path` - the normalized path as string + * `:hosts` - the list of request hosts or host prefixes + * `:plug` - the plug module + * `:plug_opts` - the plug options + * `:helper` - the name of the helper as a string (may be nil) + * `:private` - the private route info + * `:assigns` - the route info + * `:pipe_through` - the pipeline names as a list of atoms + * `:metadata` - general metadata used on telemetry events and route info + * `:trailing_slash?` - whether or not the helper functions append a trailing slash + * `:warn_on_verify?` - whether or not to warn on route verification + """ + + defstruct [ + :verb, + :line, + :kind, + :path, + :hosts, + :plug, + :plug_opts, + :helper, + :private, + :pipe_through, + :assigns, + :metadata, + :trailing_slash?, + :warn_on_verify? + ] + + @type t :: %Route{} + + @doc "Used as a plug on forwarding" + def init(opts), do: opts + + @doc "Used as a plug on forwarding" + def call(%{path_info: path, script_name: script} = conn, {fwd_segments, plug, opts}) do + new_path = path -- fwd_segments + {base, ^new_path} = Enum.split(path, length(path) - length(new_path)) + conn = %{conn | path_info: new_path, script_name: script ++ base} + conn = plug.call(conn, plug.init(opts)) + %{conn | path_info: path, script_name: script} + end + + @doc """ + Receives the verb, path, plug, options and helper + and returns a `Phoenix.Router.Route` struct. + """ + @spec build( + non_neg_integer, + :match | :forward, + atom, + String.t(), + String.t() | nil, + atom, + atom, + atom | nil, + list(atom), + map, + map, + map, + boolean, + boolean + ) :: t + def build( + line, + kind, + verb, + path, + hosts, + plug, + plug_opts, + helper, + pipe_through, + private, + assigns, + metadata, + trailing_slash?, + warn_on_verify? + ) + when is_atom(verb) and is_list(hosts) and + is_atom(plug) and (is_binary(helper) or is_nil(helper)) and + is_list(pipe_through) and is_map(private) and is_map(assigns) and + is_map(metadata) and kind in [:match, :forward] and + is_boolean(trailing_slash?) do + %Route{ + kind: kind, + verb: verb, + path: path, + hosts: hosts, + private: private, + plug: plug, + plug_opts: plug_opts, + helper: helper, + pipe_through: pipe_through, + assigns: assigns, + line: line, + metadata: metadata, + trailing_slash?: trailing_slash?, + warn_on_verify?: warn_on_verify? + } + end + + @doc """ + Builds the compiled expressions used by the route. + """ + def exprs(route) do + {path, binding} = build_path_and_binding(route) + + %{ + path: path, + binding: binding, + dispatch: build_dispatch(route), + hosts: build_host_match(route.hosts), + path_params: build_path_params(binding), + prepare: build_prepare(route), + verb_match: verb_match(route.verb) + } + end + + def build_host_match([]), do: [Plug.Router.Utils.build_host_match(nil)] + + def build_host_match([_ | _] = hosts) do + for host <- hosts, do: Plug.Router.Utils.build_host_match(host) + end + + defp verb_match(:*), do: Macro.var(:_verb, nil) + defp verb_match(verb), do: verb |> to_string() |> String.upcase() + + defp build_path_params(binding), do: {:%{}, [], binding} + + defp build_path_and_binding(%Route{path: path} = route) do + {_params, segments} = + case route.kind do + :forward -> Plug.Router.Utils.build_path_match(path <> "/*_forward_path_info") + :match -> Plug.Router.Utils.build_path_match(path) + end + + rewrite_segments(segments) + end + + # We rewrite segments to use consistent variable naming as we want to group routes later on. + defp rewrite_segments(segments) do + {segments, {binding, _counter}} = + Macro.prewalk(segments, {[], 0}, fn + {name, _meta, nil}, {binding, counter} + when is_atom(name) and name != :_forward_path_info -> + var = Macro.var(:"arg#{counter}", __MODULE__) + {var, {[{Atom.to_string(name), var} | binding], counter + 1}} + + other, acc -> + {other, acc} + end) + + {segments, Enum.reverse(binding)} + end + + defp build_prepare(route) do + {match_params, merge_params} = build_params() + {match_private, merge_private} = build_prepare_expr(:private, route.private) + {match_assigns, merge_assigns} = build_prepare_expr(:assigns, route.assigns) + + match_all = match_params ++ match_private ++ match_assigns + merge_all = merge_params ++ merge_private ++ merge_assigns + + quote do + %{unquote_splicing(match_all)} = var!(conn, :conn) + %{var!(conn, :conn) | unquote_splicing(merge_all)} + end + end + + defp build_prepare_expr(_key, data) when data == %{}, do: {[], []} + + defp build_prepare_expr(key, data) do + var = Macro.var(key, :conn) + merge = quote(do: Map.merge(unquote(var), unquote(Macro.escape(data)))) + {[{key, var}], [{key, merge}]} + end + + defp build_dispatch(%Route{kind: :match, plug: plug, plug_opts: plug_opts}) do + quote do + {unquote(plug), unquote(Macro.escape(plug_opts))} + end + end + + defp build_dispatch(%Route{ + kind: :forward, + plug: plug, + plug_opts: plug_opts, + metadata: metadata + }) do + quote do + { + Phoenix.Router.Route, + {unquote(metadata.forward), unquote(plug), unquote(Macro.escape(plug_opts))} + } + end + end + + defp build_params() do + params = Macro.var(:params, :conn) + path_params = Macro.var(:path_params, :conn) + + merge_params = + quote(do: Phoenix.Router.Route.merge_params(unquote(params), unquote(path_params))) + + { + [{:params, params}], + [{:params, merge_params}, {:path_params, path_params}] + } + end + + @doc """ + Merges params from router. + """ + def merge_params(%Plug.Conn.Unfetched{}, path_params), do: path_params + def merge_params(params, path_params), do: Map.merge(params, path_params) +end diff --git a/deps/phoenix/lib/phoenix/router/scope.ex b/deps/phoenix/lib/phoenix/router/scope.ex new file mode 100644 index 0000000..cc9b61e --- /dev/null +++ b/deps/phoenix/lib/phoenix/router/scope.ex @@ -0,0 +1,302 @@ +defmodule Phoenix.Router.Scope do + alias Phoenix.Router.Scope + @moduledoc false + + @stack :phoenix_router_scopes + @pipes :phoenix_pipeline_scopes + @top :phoenix_top_scopes + + defstruct path: [], + alias: [], + as: [], + pipes: [], + hosts: [], + private: %{}, + assigns: %{}, + log: :debug, + trailing_slash?: false + + @doc """ + Initializes the scope. + """ + def init(module) do + Module.put_attribute(module, @stack, []) + Module.put_attribute(module, @top, %Scope{}) + Module.put_attribute(module, @pipes, MapSet.new()) + end + + @doc """ + Builds a route based on the top of the stack. + """ + def route(line, module, kind, verb, path, plug, plug_opts, opts) do + unless is_atom(plug) do + raise ArgumentError, "routes expect a module plug as second argument, got: #{inspect(plug)}" + end + + top = get_top(module) + path = validate_path(path) + private = Keyword.get(opts, :private, %{}) + assigns = Keyword.get(opts, :assigns, %{}) + as = Keyword.get_lazy(opts, :as, fn -> Phoenix.Naming.resource_name(plug, "Controller") end) + alias? = Keyword.get(opts, :alias, true) + trailing_slash? = deprecated_trailing_slash(opts, top) + warn_on_verify? = Keyword.get(opts, :warn_on_verify, false) + + if to_string(as) == "static" do + raise ArgumentError, + "`static` is a reserved route prefix generated from #{inspect(plug)} or `:as` option" + end + + {path, alias, as, private, assigns} = join(top, path, plug, alias?, as, private, assigns) + + metadata = + opts + |> Keyword.get(:metadata, %{}) + |> Map.put(:log, Keyword.get(opts, :log, top.log)) + + metadata = + if kind == :forward do + Map.put(metadata, :forward, validate_forward!(path, plug)) + else + metadata + end + + Phoenix.Router.Route.build( + line, + kind, + verb, + path, + top.hosts, + alias, + plug_opts, + as, + top.pipes, + private, + assigns, + metadata, + trailing_slash?, + warn_on_verify? + ) + end + + defp validate_forward!(path, plug) when is_atom(plug) do + case Plug.Router.Utils.build_path_match(path) do + {[], path_segments} -> + path_segments + + _ -> + raise ArgumentError, + "dynamic segment \"#{path}\" not allowed when forwarding. Use a static path instead" + end + end + + defp validate_forward!(_, plug) do + raise ArgumentError, "forward expects a module as the second argument, #{inspect(plug)} given" + end + + @doc """ + Validates a path is a string and contains a leading prefix. + """ + def validate_path("/" <> _ = path), do: path + + def validate_path(path) when is_binary(path) do + IO.warn("router paths should begin with a forward slash, got: #{inspect(path)}") + "/" <> path + end + + def validate_path(path) do + raise ArgumentError, "router paths must be strings, got: #{inspect(path)}" + end + + @doc """ + Defines the given pipeline. + """ + def pipeline(module, pipe) when is_atom(pipe) do + update_pipes(module, &MapSet.put(&1, pipe)) + end + + @doc """ + Appends the given pipes to the current scope pipe through. + """ + def pipe_through(module, new_pipes) do + new_pipes = List.wrap(new_pipes) + %{pipes: pipes} = top = get_top(module) + + if pipe = Enum.find(new_pipes, &(&1 in pipes)) do + raise ArgumentError, + "duplicate pipe_through for #{inspect(pipe)}. " <> + "A plug may only be used once inside a scoped pipe_through" + end + + put_top(module, %{top | pipes: pipes ++ new_pipes}) + end + + @doc """ + Pushes a scope into the module stack. + """ + def push(module, path) when is_binary(path) do + push(module, path: path) + end + + def push(module, opts) when is_list(opts) do + top = get_top(module) + + path = + if path = Keyword.get(opts, :path) do + path |> validate_path() |> String.split("/", trim: true) + else + [] + end + + alias = append_unless_false(top, opts, :alias, &Atom.to_string(&1)) + as = append_unless_false(top, opts, :as, & &1) + + hosts = + case Keyword.fetch(opts, :host) do + {:ok, val} -> validate_hosts!(val) + :error -> top.hosts + end + + private = Keyword.get(opts, :private, %{}) + assigns = Keyword.get(opts, :assigns, %{}) + + update_stack(module, fn stack -> [top | stack] end) + + put_top(module, %Scope{ + path: top.path ++ path, + alias: alias, + as: as, + hosts: hosts, + pipes: top.pipes, + private: Map.merge(top.private, private), + assigns: Map.merge(top.assigns, assigns), + log: Keyword.get(opts, :log, top.log), + trailing_slash?: deprecated_trailing_slash(opts, top) + }) + end + + defp deprecated_trailing_slash(opts, top) do + case Keyword.fetch(opts, :trailing_slash) do + {:ok, value} -> + IO.warn( + "the :trailing_slash option in the router is deprecated. " <> + "If you are using Phoenix.VerifiedRoutes, it has no effect. " <> + "If you are using the generated helpers, migrate to Phoenix.VerifiedRoutes" + ) + + value == true + + :error -> + top.trailing_slash? + end + end + + defp validate_hosts!(nil), do: [] + defp validate_hosts!(host) when is_binary(host), do: [host] + + defp validate_hosts!(hosts) when is_list(hosts) do + for host <- hosts do + unless is_binary(host), do: raise_invalid_host(host) + + host + end + end + + defp validate_hosts!(invalid), do: raise_invalid_host(invalid) + + defp raise_invalid_host(host) do + raise ArgumentError, + "expected router scope :host to be compile-time string or list of strings, got: #{inspect(host)}" + end + + defp append_unless_false(top, opts, key, fun) do + case opts[key] do + false -> [] + nil -> Map.fetch!(top, key) + other -> Map.fetch!(top, key) ++ [fun.(other)] + end + end + + @doc """ + Pops a scope from the module stack. + """ + def pop(module) do + update_stack(module, fn [top | stack] -> + put_top(module, top) + stack + end) + end + + @doc """ + Expands the alias in the current router scope. + """ + def expand_alias(module, alias) do + join_alias(get_top(module), alias) + end + + @doc """ + Returns the full path in the current router scope. + """ + def full_path(module, path) do + split_path = String.split(path, "/", trim: true) + prefix = get_top(module).path + + cond do + prefix == [] -> path + split_path == [] -> "/" <> Enum.join(prefix, "/") + true -> "/" <> Path.join(get_top(module).path ++ split_path) + end + end + + defp join(top, path, alias, alias?, as, private, assigns) do + joined_alias = + if alias? do + join_alias(top, alias) + else + alias + end + + {join_path(top, path), joined_alias, join_as(top, as), Map.merge(top.private, private), + Map.merge(top.assigns, assigns)} + end + + defp join_path(top, path) do + "/" <> Enum.join(top.path ++ String.split(path, "/", trim: true), "/") + end + + defp join_alias(top, alias) when is_atom(alias) do + case Atom.to_string(alias) do + <> when head in ?a..?z -> alias + alias -> Module.concat(top.alias ++ [alias]) + end + end + + defp join_as(_top, nil), do: nil + defp join_as(top, as) when is_atom(as) or is_binary(as), do: Enum.join(top.as ++ [as], "_") + + defp get_top(module) do + get_attribute(module, @top) + end + + defp update_stack(module, fun) do + update_attribute(module, @stack, fun) + end + + defp update_pipes(module, fun) do + update_attribute(module, @pipes, fun) + end + + defp put_top(module, value) do + Module.put_attribute(module, @top, value) + value + end + + defp get_attribute(module, attr) do + Module.get_attribute(module, attr) || + raise "Phoenix router scope was not initialized" + end + + defp update_attribute(module, attr, fun) do + Module.put_attribute(module, attr, fun.(get_attribute(module, attr))) + end +end diff --git a/deps/phoenix/lib/phoenix/socket.ex b/deps/phoenix/lib/phoenix/socket.ex new file mode 100644 index 0000000..18a4c9e --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket.ex @@ -0,0 +1,889 @@ +defmodule Phoenix.Socket do + @moduledoc ~S""" + A socket implementation that multiplexes messages over channels. + + `Phoenix.Socket` is used as a module for establishing a connection + between client and server. Once the connection is established, + the initial state is stored in the `Phoenix.Socket` struct. + + The same socket can be used to receive events from different transports. + Phoenix supports `websocket` and `longpoll` options when invoking + `Phoenix.Endpoint.socket/3` in your endpoint. `websocket` is set by default + and `longpoll` can also be configured explicitly. + + socket "/socket", MyAppWeb.Socket, websocket: true, longpoll: false + + The command above means incoming socket connections can be made via + a WebSocket connection. Incoming and outgoing events are routed to + channels by topic: + + channel "room:lobby", MyAppWeb.LobbyChannel + + See `Phoenix.Channel` for more information on channels. + + ## Socket Behaviour + + Socket handlers are mounted in Endpoints and must define two callbacks: + + * `connect/3` - receives the socket params, connection info if any, and + authenticates the connection. Must return a `Phoenix.Socket` struct, + often with custom assigns + + * `id/1` - receives the socket returned by `connect/3` and returns the + id of this connection as a string. The `id` is used to identify socket + connections, often to a particular user, allowing us to force disconnections. + For sockets requiring no authentication, `nil` can be returned + + ## Examples + + defmodule MyAppWeb.UserSocket do + use Phoenix.Socket + + channel "room:*", MyAppWeb.RoomChannel + + def connect(params, socket, _connect_info) do + {:ok, assign(socket, :user_id, params["user_id"])} + end + + def id(socket), do: "users_socket:#{socket.assigns.user_id}" + end + + # Disconnect all user's socket connections and their multiplexed channels + MyAppWeb.Endpoint.broadcast("users_socket:" <> user.id, "disconnect", %{}) + + ## Socket fields + + * `:id` - The string id of the socket + * `:assigns` - The map of socket assigns, default: `%{}` + * `:channel` - The current channel module + * `:channel_pid` - The channel pid + * `:endpoint` - The endpoint module where this socket originated, for example: `MyAppWeb.Endpoint` + * `:handler` - The socket module where this socket originated, for example: `MyAppWeb.UserSocket` + * `:joined` - If the socket has effectively joined the channel + * `:join_ref` - The ref sent by the client when joining + * `:ref` - The latest ref sent by the client + * `:pubsub_server` - The registered name of the socket's pubsub server + * `:topic` - The string topic, for example `"room:123"` + * `:transport` - An identifier for the transport, used for logging + * `:transport_pid` - The pid of the socket's transport process + * `:serializer` - The serializer for socket messages + + ## Using options + + On `use Phoenix.Socket`, the following options are accepted: + + * `:log` - the default level to log socket actions. Defaults + to `:info`. May be set to `false` to disable it + + * `:partitions` - each channel is spawned under a supervisor. + This option controls how many supervisors will be spawned + to handle channels. Defaults to the number of cores. + + ## Garbage collection + + It's possible to force garbage collection in the transport process after + processing large messages. For example, to trigger such from your channels, + run: + + send(socket.transport_pid, :garbage_collect) + + Alternatively, you can configure your endpoint socket to trigger more + fullsweep garbage collections more frequently, by setting the `:fullsweep_after` + option for websockets. See `Phoenix.Endpoint.socket/3` for more info. + + ## Client-server communication + + The encoding of server data and the decoding of client data is done + according to a serializer, defined in `Phoenix.Socket.Serializer`. + By default, JSON encoding is used to broker messages to and from clients. + + The serializer `decode!` function must return a `Phoenix.Socket.Message` + which is forwarded to channels except: + + * `"heartbeat"` events in the "phoenix" topic - should just emit an OK reply + * `"phx_join"` on any topic - should join the topic + * `"phx_leave"` on any topic - should leave the topic + + Each message also has a `ref` field which is used to track responses. + + The server may send messages or replies back. For messages, the + ref uniquely identifies the message. For replies, the ref matches + the original message. Both data-types also include a join_ref that + uniquely identifies the currently joined channel. + + The `Phoenix.Socket` implementation may also send special messages + and replies: + + * `"phx_error"` - in case of errors, such as a channel process + crashing, or when attempting to join an already joined channel + + * `"phx_close"` - the channel was gracefully closed + + Phoenix ships with a JavaScript implementation of both websocket + and long polling that interacts with Phoenix.Socket and can be + used as reference for those interested in implementing custom clients. + + ## Custom sockets and transports + + See the `Phoenix.Socket.Transport` documentation for more information on + writing your own socket that does not leverage channels or for writing + your own transports that interacts with other sockets. + + ## Custom channels + + You can list any module as a channel as long as it implements + a `child_spec/1` function. The `child_spec/1` function receives + the caller as argument and it must return a child spec that + initializes a process. + + Once the process is initialized, it will receive the following + message: + + {Phoenix.Channel, auth_payload, from, socket} + + A custom channel implementation MUST invoke + `GenServer.reply(from, {:ok | :error, reply_payload})` during its + initialization with a custom `reply_payload` that will be sent as + a reply to the client. Failing to do so will block the socket forever. + + A custom channel receives `Phoenix.Socket.Message` structs as regular + messages from the transport. Replies to those messages and custom + messages can be sent to the socket at any moment by building an + appropriate `Phoenix.Socket.Reply` and `Phoenix.Socket.Message` + structs, encoding them with the serializer and dispatching the + serialized result to the transport. + + For example, to handle "phx_leave" messages, which is recommended + to be handled by all channel implementations, one may do: + + def handle_info( + %Message{topic: topic, event: "phx_leave"} = message, + %{topic: topic, serializer: serializer, transport_pid: transport_pid} = socket + ) do + send transport_pid, serializer.encode!(build_leave_reply(message)) + {:stop, {:shutdown, :left}, socket} + end + + A special message delivered to all channels is a Broadcast with + event "phx_drain", which is sent when draining the socket during + application shutdown. Typically it is handled by sending a drain + message to the transport, causing it to shutdown: + + def handle_info( + %Broadcast{event: "phx_drain"}, + %{transport_pid: transport_pid} = socket + ) do + send(transport_pid, :socket_drain) + {:stop, {:shutdown, :draining}, socket} + end + + We also recommend all channels to monitor the `transport_pid` + on `init` and exit if the transport exits. We also advise to rewrite + `:normal` exit reasons (usually due to the socket being closed) + to the `{:shutdown, :closed}` to guarantee links are broken on + the channel exit (as a `:normal` exit does not break links): + + def handle_info({:DOWN, _, _, transport_pid, reason}, %{transport_pid: transport_pid} = socket) do + reason = if reason == :normal, do: {:shutdown, :closed}, else: reason + {:stop, reason, socket} + end + + Any process exit is treated as an error by the socket layer unless + a `{:socket_close, pid, reason}` message is sent to the socket before + shutdown. + + Custom channel implementations cannot be tested with `Phoenix.ChannelTest`. + """ + + require Logger + alias Phoenix.Socket + alias Phoenix.Socket.{Broadcast, Message, Reply} + + @doc """ + Receives the socket params and authenticates the connection. + + ## Socket params and assigns + + Socket params are passed from the client and can + be used to verify and authenticate a user. After + verification, you can put default assigns into + the socket that will be set for all channels, ie + + {:ok, assign(socket, :user_id, verified_user_id)} + + To deny connection, return `:error` or `{:error, term}`. To control the + response the client receives in that case, [define an error handler in the + websocket + configuration](https://hexdocs.pm/phoenix/Phoenix.Endpoint.html#socket/3-websocket-configuration). + + See `Phoenix.Token` documentation for examples in + performing token verification on connect. + """ + @callback connect(params :: map, Socket.t(), connect_info :: map) :: + {:ok, Socket.t()} | {:error, term} | :error + + @doc """ + Shortcut version of `connect/3` which does not receive `connect_info`. + + Provided for backwards compatibility. + """ + @callback connect(params :: map, Socket.t()) :: {:ok, Socket.t()} | {:error, term} | :error + + @doc ~S""" + Identifies the socket connection. + + Socket IDs are topics that allow you to identify all sockets for a given user: + + def id(socket), do: "users_socket:#{socket.assigns.user_id}" + + Would allow you to broadcast a `"disconnect"` event and terminate + all active sockets and channels for a given user: + + MyAppWeb.Endpoint.broadcast("users_socket:" <> user.id, "disconnect", %{}) + + Returning `nil` makes this socket anonymous. + """ + @callback id(Socket.t()) :: String.t() | nil + + @optional_callbacks connect: 2, connect: 3 + + defmodule InvalidMessageError do + @moduledoc """ + Raised when the socket message is invalid. + """ + defexception [:message] + end + + defstruct assigns: %{}, + channel: nil, + channel_pid: nil, + endpoint: nil, + handler: nil, + id: nil, + joined: false, + join_ref: nil, + private: %{}, + pubsub_server: nil, + ref: nil, + serializer: nil, + topic: nil, + transport: nil, + transport_pid: nil + + @type t :: %Socket{ + assigns: map, + channel: atom, + channel_pid: pid, + endpoint: atom, + handler: atom, + id: String.t() | nil, + joined: boolean, + ref: term, + private: map, + pubsub_server: atom, + serializer: atom, + topic: String.t(), + transport: atom, + transport_pid: pid + } + + defmacro __using__(opts) do + quote do + ## User API + + import Phoenix.Socket + @behaviour Phoenix.Socket + @before_compile Phoenix.Socket + Module.register_attribute(__MODULE__, :phoenix_channels, accumulate: true) + @phoenix_socket_options unquote(opts) + + ## Callbacks + + @behaviour Phoenix.Socket.Transport + + @doc false + def child_spec(opts) do + Phoenix.Socket.__child_spec__(__MODULE__, opts, @phoenix_socket_options) + end + + @doc false + def drainer_spec(opts) do + Phoenix.Socket.__drainer_spec__(__MODULE__, opts, @phoenix_socket_options) + end + + @doc false + def connect(map), do: Phoenix.Socket.__connect__(__MODULE__, map, @phoenix_socket_options) + + @doc false + def init(state), do: Phoenix.Socket.__init__(state) + + @doc false + def handle_in(message, state), do: Phoenix.Socket.__in__(message, state) + + @doc false + def handle_info(message, state), do: Phoenix.Socket.__info__(message, state) + + @doc false + def terminate(reason, state), do: Phoenix.Socket.__terminate__(reason, state) + end + end + + ## USER API + + @doc """ + Adds a `key`/`value` pair to `socket` assigns. + + See also `assign/2` to add multiple key/value pairs. + + ## Examples + + iex> assign(socket, :name, "Elixir") + """ + def assign(%Socket{} = socket, key, value) do + assign(socket, [{key, value}]) + end + + @doc """ + Adds key/value pairs to socket assigns. + Accepts a keyword list, a map, or a single-argument function. + + When a keyword list or map is provided, it will be merged into the existing assigns. + + If a function is given, it takes the current assigns as an argument and its return + value will be merged into the current assigns. + + ## Examples + + iex> assign(socket, name: "Elixir", logo: "💧") + iex> assign(socket, %{name: "Elixir"}) + iex> assign(socket, fn %{name: name, logo: logo} -> %{title: Enum.join([name, logo], " | ")} end) + + """ + def assign(%Socket{} = socket, keyword_or_map) + when is_map(keyword_or_map) or is_list(keyword_or_map) do + %{socket | assigns: Map.merge(socket.assigns, Map.new(keyword_or_map))} + end + + def assign(%Socket{} = socket, fun) when is_function(fun, 1) do + assign(socket, fun.(socket.assigns)) + end + + @doc """ + Defines a channel matching the given topic and transports. + + * `topic_pattern` - The string pattern, for example `"room:*"`, `"users:*"`, + or `"system"` + * `module` - The channel module handler, for example `MyAppWeb.RoomChannel` + * `opts` - The optional list of options, see below + + ## Options + + * `:assigns` - the map of socket assigns to merge into the socket on join + + ## Examples + + channel "topic1:*", MyChannel + + ## Topic Patterns + + The `channel` macro accepts topic patterns in two flavors. A splat (the `*` + character) argument can be provided as the last character to indicate a + `"topic:subtopic"` match. If a plain string is provided, only that topic will + match the channel handler. Most use-cases will use the `"topic:*"` pattern to + allow more versatile topic scoping. + + See `Phoenix.Channel` for more information + """ + defmacro channel(topic_pattern, module, opts \\ []) do + module = expand_alias(module, __CALLER__) + + opts = + if Macro.quoted_literal?(opts) do + Macro.prewalk(opts, &expand_alias(&1, __CALLER__)) + else + opts + end + + quote do + @phoenix_channels {unquote(topic_pattern), unquote(module), unquote(opts)} + end + end + + defp expand_alias({:__aliases__, _, _} = alias, env), + do: Macro.expand(alias, %{env | function: {:channel, 3}}) + + defp expand_alias(other, _env), do: other + + @doc false + @deprecated "transport/3 in Phoenix.Socket is deprecated and has no effect" + defmacro transport(_name, _module, _config \\ []) do + :ok + end + + defmacro __before_compile__(env) do + channels = + env.module + |> Module.get_attribute(:phoenix_channels, []) + |> Enum.reverse() + + channel_defs = + for {topic_pattern, module, opts} <- channels do + topic_pattern + |> to_topic_match() + |> defchannel(module, opts) + end + + quote do + unquote(channel_defs) + def __channel__(_topic), do: nil + end + end + + defp to_topic_match(topic_pattern) do + case String.split(topic_pattern, "*") do + [prefix, ""] -> quote do: < _rest>> + [bare_topic] -> bare_topic + _ -> raise ArgumentError, "channels using splat patterns must end with *" + end + end + + defp defchannel(topic_match, channel_module, opts) do + quote do + def __channel__(unquote(topic_match)), do: unquote({channel_module, Macro.escape(opts)}) + end + end + + ## CALLBACKS IMPLEMENTATION + + def __child_spec__(handler, opts, socket_options) do + endpoint = Keyword.fetch!(opts, :endpoint) + opts = Keyword.merge(socket_options, opts) + partitions = Keyword.get(opts, :partitions, System.schedulers_online()) + args = {endpoint, handler, partitions} + Supervisor.child_spec({Phoenix.Socket.PoolSupervisor, args}, id: handler) + end + + def __drainer_spec__(handler, opts, socket_options) do + endpoint = Keyword.fetch!(opts, :endpoint) + opts = Keyword.merge(socket_options, opts) + + if drainer = Keyword.get(opts, :drainer, []) do + drainer = + case drainer do + {module, function, arguments} -> + apply(module, function, arguments) + + _ -> + drainer + end + + opts = Keyword.merge(opts, drainer: drainer) + + {Phoenix.Socket.PoolDrainer, {endpoint, handler, opts}} + else + :ignore + end + end + + def __connect__(user_socket, map, socket_options) do + %{ + endpoint: endpoint, + options: options, + transport: transport, + params: params, + connect_info: connect_info + } = map + + vsn = params["vsn"] || "1.0.0" + + options = Keyword.merge(socket_options, options) + start = System.monotonic_time() + + case negotiate_serializer(Keyword.fetch!(options, :serializer), vsn) do + {:ok, serializer} -> + result = user_connect(user_socket, endpoint, transport, serializer, params, connect_info) + + metadata = %{ + endpoint: endpoint, + transport: transport, + params: params, + connect_info: connect_info, + vsn: vsn, + user_socket: user_socket, + log: Keyword.get(options, :log, :info), + result: result(result), + serializer: serializer + } + + duration = System.monotonic_time() - start + :telemetry.execute([:phoenix, :socket_connected], %{duration: duration}, metadata) + result + + :error -> + :error + end + end + + defp result({:ok, _}), do: :ok + defp result(:error), do: :error + defp result({:error, _}), do: :error + + defp set_label(socket) do + # TODO: replace with Process.put_label/2 when we require Elixir 1.17 + Process.put(:"$process_label", {Phoenix.Socket, socket.handler, socket.id}) + end + + def __init__({state, %{id: id, endpoint: endpoint} = socket}) do + set_label(socket) + _ = id && endpoint.subscribe(id) + {:ok, {state, %{socket | transport_pid: self()}}} + end + + def __in__({payload, opts}, {state, socket}) do + %{topic: topic} = message = socket.serializer.decode!(payload, opts) + handle_in(Map.get(state.channels, topic), message, state, socket) + end + + def __info__({:DOWN, ref, _, pid, reason}, {state, socket}) do + case state.channels_inverse do + %{^pid => {topic, join_ref}} -> + state = delete_channel(state, pid, topic, ref) + {:push, encode_on_exit(socket, topic, join_ref, reason), {state, socket}} + + %{} -> + {:ok, {state, socket}} + end + end + + def __info__(%Broadcast{event: "disconnect"}, state) do + {:stop, {:shutdown, :disconnected}, state} + end + + def __info__(:socket_drain, state) do + # downstream websock_adapter's will close with 1012 Service Restart + {:stop, {:shutdown, :restart}, state} + end + + def __info__({:socket_push, opcode, payload}, state) do + {:push, {opcode, payload}, state} + end + + def __info__({:socket_close, pid, _reason}, state) do + socket_close(pid, state) + end + + def __info__(:garbage_collect, state) do + :erlang.garbage_collect(self()) + {:ok, state} + end + + def __info__({:debug_channels, ref, reply_to}, {state, socket}) do + channels = + Enum.map(state.channels, fn {topic, {pid, _monitor_ref, status}} -> + %{topic: topic, pid: pid, status: status} + end) + + send(reply_to, {:debug_channels, ref, channels}) + {:ok, {state, socket}} + end + + def __info__(_, state) do + {:ok, state} + end + + def __terminate__(_reason, _state_socket) do + :ok + end + + defp negotiate_serializer(serializers, vsn) when is_list(serializers) and is_binary(vsn) do + case Version.parse(vsn) do + {:ok, vsn} -> + serializers + |> Enum.find(:error, fn {_serializer, vsn_req} -> Version.match?(vsn, vsn_req) end) + |> case do + {serializer, _vsn_req} -> + {:ok, serializer} + + :error -> + Logger.warning( + "The client's requested transport version \"#{vsn}\" " <> + "does not match server's version requirements of #{inspect(serializers)}" + ) + + :error + end + + :error -> + Logger.warning("Client sent invalid transport version \"#{vsn}\"") + :error + end + end + + defp negotiate_serializer(_serializer, vsn) do + Logger.warning("Client sent invalid transport version \"#{vsn}\"") + :error + end + + defp user_connect(handler, endpoint, transport, serializer, params, connect_info) do + # The information in the Phoenix.Socket goes to userland and channels. + socket = %Socket{ + handler: handler, + endpoint: endpoint, + pubsub_server: endpoint.config(:pubsub_server), + serializer: serializer, + transport: transport + } + + # The information in the state is kept only inside the socket process. + state = %{ + channels: %{}, + channels_inverse: %{} + } + + connect_result = + if function_exported?(handler, :connect, 3) do + handler.connect(params, socket, connect_info) + else + handler.connect(params, socket) + end + + case connect_result do + {:ok, %Socket{} = socket} -> + case handler.id(socket) do + nil -> + {:ok, {state, socket}} + + id when is_binary(id) -> + # update the process label + set_label(socket) + {:ok, {state, %{socket | id: id}}} + + invalid -> + Logger.warning( + "#{inspect(handler)}.id/1 returned invalid identifier " <> + "#{inspect(invalid)}. Expected nil or a string." + ) + + :error + end + + :error -> + :error + + {:error, _reason} = err -> + err + + invalid -> + connect_arity = + if function_exported?(handler, :connect, 3), do: "connect/3", else: "connect/2" + + Logger.error( + "#{inspect(handler)}. #{connect_arity} returned invalid value #{inspect(invalid)}. " <> + "Expected {:ok, socket}, {:error, reason} or :error" + ) + + :error + end + end + + defp handle_in(_, %{ref: ref, topic: "phoenix", event: "heartbeat"}, state, socket) do + reply = %Reply{ + ref: ref, + topic: "phoenix", + status: :ok, + payload: %{} + } + + {:reply, :ok, encode_reply(socket, reply), {state, socket}} + end + + defp handle_in( + nil, + %{event: "phx_join", topic: topic, ref: ref, join_ref: join_ref} = message, + state, + socket + ) do + case socket.handler.__channel__(topic) do + {channel, opts} -> + case Phoenix.Channel.Server.join(socket, channel, message, opts) do + {:ok, reply, pid} -> + reply = %Reply{ + join_ref: join_ref, + ref: ref, + topic: topic, + status: :ok, + payload: reply + } + + state = put_channel(state, pid, topic, join_ref) + {:reply, :ok, encode_reply(socket, reply), {state, socket}} + + {:error, reply} -> + reply = %Reply{ + join_ref: join_ref, + ref: ref, + topic: topic, + status: :error, + payload: reply + } + + {:reply, :error, encode_reply(socket, reply), {state, socket}} + end + + _ -> + Logger.warning("Ignoring unmatched topic \"#{topic}\" in #{inspect(socket.handler)}") + {:reply, :error, encode_ignore(socket, message), {state, socket}} + end + end + + defp handle_in({pid, _ref, status}, %{event: "phx_join", topic: topic} = message, state, socket) do + receive do + {:socket_close, ^pid, _reason} -> :ok + after + 0 -> + if status != :leaving do + Logger.debug(fn -> + "Duplicate channel join for topic \"#{topic}\" in #{inspect(socket.handler)}. " <> + "Closing existing channel for new join." + end) + end + end + + :ok = shutdown_duplicate_channel(pid) + {:push, {opcode, payload}, {new_state, new_socket}} = socket_close(pid, {state, socket}) + send(self(), {:socket_push, opcode, payload}) + handle_in(nil, message, new_state, new_socket) + end + + defp handle_in({pid, _ref, _status}, %{event: "phx_leave"} = msg, state, socket) do + %{topic: topic, join_ref: join_ref} = msg + + case state.channels_inverse do + # we need to match on nil to handle v1 protocol + %{^pid => {^topic, existing_join_ref}} when existing_join_ref in [join_ref, nil] -> + send(pid, msg) + {:ok, {update_channel_status(state, pid, topic, :leaving), socket}} + + # the client has raced a server close. No need to reply since we already sent close + %{^pid => {^topic, _old_join_ref}} -> + {:ok, {state, socket}} + end + end + + defp handle_in({pid, _ref, _status}, msg, state, socket) do + %{topic: topic, join_ref: join_ref} = msg + + case state.channels_inverse do + # we need to match on nil to handle v1 protocol + %{^pid => {^topic, existing_join_ref}} when existing_join_ref in [join_ref, nil] -> + send(pid, msg) + {:ok, {state, socket}} + + # the client has sent a stale message to a previous join_ref, ignore + %{^pid => {^topic, _old_join_ref}} -> + {:ok, {state, socket}} + end + end + + defp handle_in( + nil, + %{event: "phx_leave", ref: ref, topic: topic, join_ref: join_ref}, + state, + socket + ) do + reply = %Reply{ + ref: ref, + join_ref: join_ref, + topic: topic, + status: :ok, + payload: %{} + } + + {:reply, :ok, encode_reply(socket, reply), {state, socket}} + end + + defp handle_in(nil, message, state, socket) do + # This clause can happen if the server drops the channel + # and the client sends a message meanwhile + {:reply, :error, encode_ignore(socket, message), {state, socket}} + end + + defp put_channel(state, pid, topic, join_ref) do + %{channels: channels, channels_inverse: channels_inverse} = state + monitor_ref = Process.monitor(pid) + + %{ + state + | channels: Map.put(channels, topic, {pid, monitor_ref, :joined}), + channels_inverse: Map.put(channels_inverse, pid, {topic, join_ref}) + } + end + + defp delete_channel(state, pid, topic, monitor_ref) do + %{channels: channels, channels_inverse: channels_inverse} = state + Process.demonitor(monitor_ref, [:flush]) + + %{ + state + | channels: Map.delete(channels, topic), + channels_inverse: Map.delete(channels_inverse, pid) + } + end + + defp encode_on_exit(socket, topic, ref, _reason) do + message = %Message{join_ref: ref, ref: ref, topic: topic, event: "phx_error", payload: %{}} + encode_reply(socket, message) + end + + defp encode_ignore(socket, %{ref: ref, topic: topic}) do + reply = %Reply{ref: ref, topic: topic, status: :error, payload: %{reason: "unmatched topic"}} + encode_reply(socket, reply) + end + + defp encode_reply(%{serializer: serializer}, message) do + {:socket_push, opcode, payload} = serializer.encode!(message) + {opcode, payload} + end + + defp encode_close(socket, topic, join_ref) do + message = %Message{ + join_ref: join_ref, + ref: join_ref, + topic: topic, + event: "phx_close", + payload: %{} + } + + encode_reply(socket, message) + end + + defp shutdown_duplicate_channel(pid) do + ref = Process.monitor(pid) + Process.exit(pid, {:shutdown, :duplicate_join}) + + receive do + {:DOWN, ^ref, _, _, _} -> :ok + after + 5_000 -> + Process.exit(pid, :kill) + receive do: ({:DOWN, ^ref, _, _, _} -> :ok) + end + end + + defp socket_close(pid, {state, socket}) do + case state.channels_inverse do + %{^pid => {topic, join_ref}} -> + {^pid, monitor_ref, _status} = Map.fetch!(state.channels, topic) + state = delete_channel(state, pid, topic, monitor_ref) + {:push, encode_close(socket, topic, join_ref), {state, socket}} + + %{} -> + {:ok, {state, socket}} + end + end + + defp update_channel_status(state, pid, topic, status) do + new_channels = Map.update!(state.channels, topic, fn {^pid, ref, _} -> {pid, ref, status} end) + %{state | channels: new_channels} + end +end diff --git a/deps/phoenix/lib/phoenix/socket/message.ex b/deps/phoenix/lib/phoenix/socket/message.ex new file mode 100644 index 0000000..b35f15b --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket/message.ex @@ -0,0 +1,85 @@ +defmodule Phoenix.Socket.Message do + @moduledoc """ + Defines a message dispatched over transport to channels and vice-versa. + + The message format requires the following keys: + + * `:topic` - The string topic or topic:subtopic pair namespace, for + example "messages", "messages:123" + * `:event`- The string event name, for example "phx_join" + * `:payload` - The message payload + * `:ref` - The unique string ref + * `:join_ref` - The unique string ref when joining + + """ + + @type t :: %Phoenix.Socket.Message{} + defstruct topic: nil, event: nil, payload: nil, ref: nil, join_ref: nil + + @doc """ + Converts a map with string keys into a message struct. + + Raises `Phoenix.Socket.InvalidMessageError` if not valid. + """ + def from_map!(map) when is_map(map) do + try do + %Phoenix.Socket.Message{ + topic: Map.fetch!(map, "topic"), + event: Map.fetch!(map, "event"), + payload: Map.fetch!(map, "payload"), + ref: Map.fetch!(map, "ref"), + join_ref: Map.get(map, "join_ref") + } + rescue + err in [KeyError] -> + raise Phoenix.Socket.InvalidMessageError, "missing key #{inspect(err.key)}" + end + end + + defimpl Inspect do + def inspect(%Phoenix.Socket.Message{} = msg, opts) do + processed_msg = process_message(msg) + Inspect.Any.inspect(processed_msg, opts) + end + + defp process_message(%{payload: payload} = msg) when is_map(payload) do + %{msg | payload: Phoenix.Logger.filter_values(payload)} + end + + defp process_message(msg), do: msg + end +end + +defmodule Phoenix.Socket.Reply do + @moduledoc """ + Defines a reply sent from channels to transports. + + The message format requires the following keys: + + * `:topic` - The string topic or topic:subtopic pair namespace, for example "messages", "messages:123" + * `:status` - The reply status as an atom + * `:payload` - The reply payload + * `:ref` - The unique string ref + * `:join_ref` - The unique string ref when joining + + """ + + @type t :: %Phoenix.Socket.Reply{} + defstruct topic: nil, status: nil, payload: nil, ref: nil, join_ref: nil +end + +defmodule Phoenix.Socket.Broadcast do + @moduledoc """ + Defines a message sent from pubsub to channels and vice-versa. + + The message format requires the following keys: + + * `:topic` - The string topic or topic:subtopic pair namespace, for example "messages", "messages:123" + * `:event`- The string event name, for example "phx_join" + * `:payload` - The message payload + + """ + + @type t :: %Phoenix.Socket.Broadcast{} + defstruct topic: nil, event: nil, payload: nil +end diff --git a/deps/phoenix/lib/phoenix/socket/pool_supervisor.ex b/deps/phoenix/lib/phoenix/socket/pool_supervisor.ex new file mode 100644 index 0000000..4f2d304 --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket/pool_supervisor.ex @@ -0,0 +1,138 @@ +defmodule Phoenix.Socket.PoolSupervisor do + @moduledoc false + use Supervisor + + def start_link({endpoint, name, partitions}) do + Supervisor.start_link( + __MODULE__, + {endpoint, name, partitions}, + name: Module.concat(endpoint, name) + ) + end + + def start_child(socket, key, spec) do + %{endpoint: endpoint, handler: name} = socket + + case endpoint.config({:socket, name}) do + ets when not is_nil(ets) -> + partitions = :ets.lookup_element(ets, :partitions, 2) + sup = :ets.lookup_element(ets, :erlang.phash2(key, partitions), 2) + DynamicSupervisor.start_child(sup, spec) + + nil -> + raise ArgumentError, """ + no socket supervision tree found for #{inspect(name)}. + + Ensure your #{inspect(endpoint)} contains a socket mount, for example: + + socket "/socket", #{inspect(name)}, + websocket: true, + longpoll: true + """ + end + end + + def start_pooled(ref, i) do + case DynamicSupervisor.start_link(strategy: :one_for_one) do + {:ok, pid} -> + :ets.insert(ref, {i, pid}) + {:ok, pid} + + {:error, reason} -> + {:error, reason} + end + end + + @impl true + def init({endpoint, name, partitions}) do + ref = :ets.new(name, [:public, read_concurrency: true]) + :ets.insert(ref, {:partitions, partitions}) + Phoenix.Config.permanent(endpoint, {:socket, name}, ref) + + children = + for i <- 0..(partitions - 1) do + %{ + id: i, + start: {__MODULE__, :start_pooled, [ref, i]}, + type: :supervisor, + shutdown: :infinity + } + end + + Supervisor.init(children, strategy: :one_for_one) + end +end + +defmodule Phoenix.Socket.PoolDrainer do + @moduledoc false + use GenServer + + def child_spec({_endpoint, name, opts} = tuple) do + # The process should terminate within shutdown but, + # in case it doesn't, we will be killed if we exceed + # double of that + %{ + id: {:terminator, name}, + start: {__MODULE__, :start_link, [tuple]}, + shutdown: Keyword.get(opts[:drainer], :shutdown, 30_000) + } + end + + def start_link(tuple) do + GenServer.start_link(__MODULE__, tuple) + end + + @impl true + def init({endpoint, name, opts}) do + Process.flag(:trap_exit, true) + size = Keyword.get(opts[:drainer], :batch_size, 10_000) + interval = Keyword.get(opts[:drainer], :batch_interval, 2_000) + log_level = Keyword.get(opts[:drainer], :log, opts[:log] || :info) + {:ok, {endpoint, name, size, interval, log_level}} + end + + @impl true + def terminate(_reason, {endpoint, name, size, interval, log_level}) do + ets = endpoint.config({:socket, name}) + partitions = :ets.lookup_element(ets, :partitions, 2) + + {collection, total} = + Enum.map_reduce(0..(partitions - 1), 0, fn index, total -> + try do + sup = :ets.lookup_element(ets, index, 2) + children = DynamicSupervisor.which_children(sup) + {Enum.map(children, &elem(&1, 1)), total + length(children)} + catch + _, _ -> {[], total} + end + end) + + rounds = div(total, size) + 1 + + for {pids, index} <- + collection |> Stream.concat() |> Stream.chunk_every(size) |> Stream.with_index(1) do + count = if index == rounds, do: length(pids), else: size + + :telemetry.execute( + [:phoenix, :socket_drain], + %{count: count, total: total, index: index, rounds: rounds}, + %{ + endpoint: endpoint, + socket: name, + interval: interval, + log: log_level + } + ) + + spawn(fn -> + for pid <- pids do + send(pid, %Phoenix.Socket.Broadcast{event: "phx_drain"}) + end + end) + + if index < rounds do + Process.sleep(interval) + end + end + end +end diff --git a/deps/phoenix/lib/phoenix/socket/serializer.ex b/deps/phoenix/lib/phoenix/socket/serializer.ex new file mode 100644 index 0000000..a8deb15 --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket/serializer.ex @@ -0,0 +1,29 @@ +defmodule Phoenix.Socket.Serializer do + @moduledoc """ + A behaviour that serializes incoming and outgoing socket messages. + + By default Phoenix provides a serializer that encodes to JSON and + decodes JSON messages. + + Custom serializers may be configured in the socket. + """ + + @doc """ + Encodes a `Phoenix.Socket.Broadcast` struct to fastlane format. + """ + @callback fastlane!(Phoenix.Socket.Broadcast.t()) :: + {:socket_push, :text, iodata()} + | {:socket_push, :binary, iodata()} + + @doc """ + Encodes `Phoenix.Socket.Message` and `Phoenix.Socket.Reply` structs to push format. + """ + @callback encode!(Phoenix.Socket.Message.t() | Phoenix.Socket.Reply.t()) :: + {:socket_push, :text, iodata()} + | {:socket_push, :binary, iodata()} + + @doc """ + Decodes iodata into `Phoenix.Socket.Message` struct. + """ + @callback decode!(iodata, options :: Keyword.t()) :: Phoenix.Socket.Message.t() +end diff --git a/deps/phoenix/lib/phoenix/socket/serializers/v1_json_serializer.ex b/deps/phoenix/lib/phoenix/socket/serializers/v1_json_serializer.ex new file mode 100644 index 0000000..cd96fe8 --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket/serializers/v1_json_serializer.ex @@ -0,0 +1,47 @@ +defmodule Phoenix.Socket.V1.JSONSerializer do + @moduledoc false + @behaviour Phoenix.Socket.Serializer + + alias Phoenix.Socket.{Broadcast, Message, Reply} + + @impl true + def fastlane!(%Broadcast{} = msg) do + map = %Message{topic: msg.topic, event: msg.event, payload: msg.payload} + {:socket_push, :text, encode_v1_fields_only(map)} + end + + @impl true + def encode!(%Reply{} = reply) do + map = %Message{ + topic: reply.topic, + event: "phx_reply", + ref: reply.ref, + payload: %{status: reply.status, response: reply.payload} + } + + {:socket_push, :text, encode_v1_fields_only(map)} + end + + def encode!(%Message{} = map) do + {:socket_push, :text, encode_v1_fields_only(map)} + end + + @impl true + def decode!(message, _opts) do + payload = Phoenix.json_library().decode!(message) + + case payload do + %{} -> + Phoenix.Socket.Message.from_map!(payload) + + other -> + raise "V1 JSON Serializer expected a map, got #{inspect(other)}" + end + end + + defp encode_v1_fields_only(%Message{} = msg) do + msg + |> Map.take([:topic, :event, :payload, :ref]) + |> Phoenix.json_library().encode_to_iodata!() + end +end diff --git a/deps/phoenix/lib/phoenix/socket/serializers/v2_json_serializer.ex b/deps/phoenix/lib/phoenix/socket/serializers/v2_json_serializer.ex new file mode 100644 index 0000000..05e7b4c --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket/serializers/v2_json_serializer.ex @@ -0,0 +1,160 @@ +defmodule Phoenix.Socket.V2.JSONSerializer do + @moduledoc false + @behaviour Phoenix.Socket.Serializer + + @push 0 + @reply 1 + @broadcast 2 + + alias Phoenix.Socket.{Broadcast, Message, Reply} + + @impl true + def fastlane!(%Broadcast{payload: {:binary, data}} = msg) do + topic_size = byte_size!(msg.topic, :topic, 255) + event_size = byte_size!(msg.event, :event, 255) + + bin = << + @broadcast::size(8), + topic_size::size(8), + event_size::size(8), + msg.topic::binary-size(topic_size), + msg.event::binary-size(event_size), + data::binary + >> + + {:socket_push, :binary, bin} + end + + def fastlane!(%Broadcast{payload: %{}} = msg) do + data = Phoenix.json_library().encode_to_iodata!([nil, nil, msg.topic, msg.event, msg.payload]) + {:socket_push, :text, data} + end + + def fastlane!(%Broadcast{payload: invalid}) do + raise ArgumentError, "expected broadcasted payload to be a map, got: #{inspect(invalid)}" + end + + @impl true + def encode!(%Reply{payload: {:binary, data}} = reply) do + status = to_string(reply.status) + join_ref = to_string(reply.join_ref) + ref = to_string(reply.ref) + join_ref_size = byte_size!(join_ref, :join_ref, 255) + ref_size = byte_size!(ref, :ref, 255) + topic_size = byte_size!(reply.topic, :topic, 255) + status_size = byte_size!(status, :status, 255) + + bin = << + @reply::size(8), + join_ref_size::size(8), + ref_size::size(8), + topic_size::size(8), + status_size::size(8), + join_ref::binary-size(join_ref_size), + ref::binary-size(ref_size), + reply.topic::binary-size(topic_size), + status::binary-size(status_size), + data::binary + >> + + {:socket_push, :binary, bin} + end + + def encode!(%Reply{} = reply) do + data = [ + reply.join_ref, + reply.ref, + reply.topic, + "phx_reply", + %{status: reply.status, response: reply.payload} + ] + + {:socket_push, :text, Phoenix.json_library().encode_to_iodata!(data)} + end + + def encode!(%Message{payload: {:binary, data}} = msg) do + join_ref = to_string(msg.join_ref) + join_ref_size = byte_size!(join_ref, :join_ref, 255) + topic_size = byte_size!(msg.topic, :topic, 255) + event_size = byte_size!(msg.event, :event, 255) + + bin = << + @push::size(8), + join_ref_size::size(8), + topic_size::size(8), + event_size::size(8), + join_ref::binary-size(join_ref_size), + msg.topic::binary-size(topic_size), + msg.event::binary-size(event_size), + data::binary + >> + + {:socket_push, :binary, bin} + end + + def encode!(%Message{payload: %{}} = msg) do + data = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload] + {:socket_push, :text, Phoenix.json_library().encode_to_iodata!(data)} + end + + def encode!(%Message{payload: invalid}) do + raise ArgumentError, "expected payload to be a map, got: #{inspect(invalid)}" + end + + @impl true + def decode!(raw_message, opts) do + case Keyword.fetch(opts, :opcode) do + {:ok, :text} -> decode_text(raw_message) + {:ok, :binary} -> decode_binary(raw_message) + end + end + + defp decode_text(raw_message) do + [join_ref, ref, topic, event, payload | _] = Phoenix.json_library().decode!(raw_message) + + %Message{ + topic: topic, + event: event, + payload: payload, + ref: ref, + join_ref: join_ref + } + end + + defp decode_binary(<< + @push::size(8), + join_ref_size::size(8), + ref_size::size(8), + topic_size::size(8), + event_size::size(8), + join_ref::binary-size(join_ref_size), + ref::binary-size(ref_size), + topic::binary-size(topic_size), + event::binary-size(event_size), + data::binary + >>) do + %Message{ + topic: topic, + event: event, + payload: {:binary, data}, + ref: ref, + join_ref: join_ref + } + end + + defp byte_size!(bin, kind, max) do + case byte_size(bin) do + size when size <= max -> + size + + oversized -> + raise ArgumentError, """ + unable to convert #{kind} to binary. + + #{inspect(bin)} + + must be less than or equal to #{max} bytes, but is #{oversized} bytes. + """ + end + end +end diff --git a/deps/phoenix/lib/phoenix/socket/transport.ex b/deps/phoenix/lib/phoenix/socket/transport.ex new file mode 100644 index 0000000..a09c778 --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket/transport.ex @@ -0,0 +1,654 @@ +defmodule Phoenix.Socket.Transport do + @moduledoc """ + Outlines the Socket <-> Transport communication. + + Each transport, such as websockets and longpolling, must interact + with a socket. This module defines the functions a transport will + invoke on a given socket implementation. + + `Phoenix.Socket` is just one possible implementation of a socket + that multiplexes events over multiple channels. If you implement + this behaviour, then existing transports can use your new socket + implementation, without passing through channels. + + This module also provides guidelines and convenience functions for + implementing transports. Albeit its primary goal is to aid in the + definition of custom sockets. + + ## Example + + Here is a simple echo socket implementation: + + defmodule EchoSocket do + @behaviour Phoenix.Socket.Transport + + def child_spec(opts) do + # We won't spawn any process, so let's ignore the child spec + :ignore + end + + def connect(state) do + # Callback to retrieve relevant data from the connection. + # The map contains options, params, transport and endpoint keys. + {:ok, state} + end + + def init(state) do + # Now we are effectively inside the process that maintains the socket. + {:ok, state} + end + + def handle_in({text, _opts}, state) do + {:reply, :ok, {:text, text}, state} + end + + def handle_info(_, state) do + {:ok, state} + end + + def terminate(_reason, _state) do + :ok + end + end + + It can be mounted in your endpoint like any other socket: + + socket "/socket", EchoSocket, websocket: true, longpoll: true + + You can now interact with the socket under `/socket/websocket` + and `/socket/longpoll`. + + ## Custom transports + + Sockets are operated by a transport. When a transport is defined, + it usually receives a socket module and the module will be invoked + when certain events happen at the transport level. The functions + a transport can invoke are the callbacks defined in this module. + + Whenever the transport receives a new connection, it should invoke + the `c:connect/1` callback with a map of metadata. Different sockets + may require different metadata. + + If the connection is accepted, the transport can move the connection + to another process, if so desires, or keep using the same process. The + process responsible for managing the socket should then call `c:init/1`. + + For each message received from the client, the transport must call + `c:handle_in/2` on the socket. For each informational message the + transport receives, it should call `c:handle_info/2` on the socket. + + Transports can optionally implement `c:handle_control/2` for handling + control frames such as `:ping` and `:pong`. + + On termination, `c:terminate/2` must be called. A special atom with + reason `:closed` can be used to specify that the client terminated + the connection. + + ### Booting + + When you list a socket under `Phoenix.Endpoint.socket/3`, Phoenix + will automatically start the socket module under its supervision tree, + however Phoenix does not manage any transport. + + Whenever your endpoint starts, Phoenix invokes the `child_spec/1` on + each listed socket and start that specification under the endpoint + supervisor. Since the socket supervision tree is started by the endpoint, + any custom transport must be started after the endpoint. + """ + + @type state :: term() + + @doc """ + Returns a child specification for socket management. + + This is invoked only once per socket regardless of + the number of transports and should be responsible + for setting up any process structure used exclusively + by the socket regardless of transports. + + Each socket connection is started by the transport + and the process that controls the socket likely + belongs to the transport. However, some sockets spawn + new processes, such as `Phoenix.Socket` which spawns + channels, and this gives the ability to start a + supervision tree associated to the socket. + + It receives the socket options from the endpoint, + for example: + + socket "/my_app", MyApp.Socket, shutdown: 5000 + + means `child_spec([shutdown: 5000])` will be invoked. + + `:ignore` means no child spec is necessary for this socket. + """ + @callback child_spec(keyword) :: :supervisor.child_spec() | :ignore + + @doc """ + Returns a child specification for terminating the socket. + + This is a process that is started late in the supervision + tree with the specific goal of draining connections on + application shutdown. + + Similar to `child_spec/1`, it receives the socket options + from the endpoint. + """ + @callback drainer_spec(keyword) :: :supervisor.child_spec() | :ignore + + @doc """ + Connects to the socket. + + The transport passes a map of metadata and the socket + returns `{:ok, state}`, `{:error, reason}` or `:error`. + The state must be stored by the transport and returned + in all future operations. When `{:error, reason}` is + returned, some transports - such as WebSockets - allow + customizing the response based on `reason` via a custom + `:error_handler`. + + This function is used for authorization purposes and it + may be invoked outside of the process that effectively + runs the socket. + + In the default `Phoenix.Socket` implementation, the + metadata expects the following keys: + + * `:endpoint` - the application endpoint + * `:transport` - the transport name + * `:params` - the connection parameters + * `:options` - a keyword list of transport options, often + given by developers when configuring the transport. + It must include a `:serializer` field with the list of + serializers and their requirements + + """ + @callback connect(transport_info :: map) :: {:ok, state} | {:error, term()} | :error + + @doc """ + Initializes the socket state. + + This must be executed from the process that will effectively + operate the socket. + """ + @callback init(state) :: {:ok, state} + + @doc """ + Handles incoming socket messages. + + The message is represented as `{payload, options}`. It must + return one of: + + * `{:ok, state}` - continues the socket with no reply + * `{:reply, status, reply, state}` - continues the socket with reply + * `{:stop, reason, state}` - stops the socket + + The `reply` is a tuple contain an `opcode` atom and a message that can + be any term. The built-in websocket transport supports both `:text` and + `:binary` opcode and the message must be always iodata. Long polling only + supports text opcode. + """ + @callback handle_in({message :: term, opts :: keyword}, state) :: + {:ok, state} + | {:reply, :ok | :error, {opcode :: atom, message :: term}, state} + | {:stop, reason :: term, state} + + @doc """ + Handles incoming control frames. + + The message is represented as `{payload, options}`. It must + return one of: + + * `{:ok, state}` - continues the socket with no reply + * `{:reply, status, reply, state}` - continues the socket with reply + * `{:stop, reason, state}` - stops the socket + + Control frames are only supported when using websockets. + + The `options` contains an `opcode` key, this will be either `:ping` or + `:pong`. + + If a control frame doesn't have a payload, then the payload value + will be `nil`. + """ + @callback handle_control({message :: term, opts :: keyword}, state) :: + {:ok, state} + | {:reply, :ok | :error, {opcode :: atom, message :: term}, state} + | {:stop, reason :: term, state} + + @doc """ + Handles info messages. + + The message is a term. It must return one of: + + * `{:ok, state}` - continues the socket with no reply + * `{:push, reply, state}` - continues the socket with reply + * `{:stop, reason, state}` - stops the socket + + The `reply` is a tuple contain an `opcode` atom and a message that can + be any term. The built-in websocket transport supports both `:text` and + `:binary` opcode and the message must be always iodata. Long polling only + supports text opcode. + """ + @callback handle_info(message :: term, state) :: + {:ok, state} + | {:push, {opcode :: atom, message :: term}, state} + | {:stop, reason :: term, state} + + @doc """ + Invoked on termination. + + If `reason` is `:closed`, it means the client closed the socket. This is + considered a `:normal` exit signal, so linked process will not automatically + exit. See `Process.exit/2` for more details on exit signals. + """ + @callback terminate(reason :: term, state) :: :ok + + @optional_callbacks handle_control: 2, drainer_spec: 1 + + require Logger + + @doc false + def load_config(true, module), + do: module.default_config() + + def load_config(config, module), + do: module.default_config() |> Keyword.merge(config) |> load_config() + + @doc false + def load_config(config) do + {connect_info, config} = Keyword.pop(config, :connect_info, []) + + connect_info = + if config[:auth_token] do + # auth_token is included by default when enabled + [:auth_token | connect_info] + else + connect_info + end + + connect_info = + Enum.map(connect_info, fn + key when key in [:peer_data, :trace_context_headers, :uri, :user_agent, :x_headers, :sec_websocket_headers, :auth_token] -> + key + + {:session, session} -> + {:session, init_session(session)} + + {_, _} = pair -> + pair + + other -> + raise ArgumentError, + ":connect_info keys are expected to be one of :peer_data, :trace_context_headers, :x_headers, :user_agent, :sec_websocket_headers, :uri, or {:session, config}, " <> + "optionally followed by custom keyword pairs, got: #{inspect(other)}" + end) + + [connect_info: connect_info] ++ config + end + + # The original session_config is returned in addition to init value so we can + # access special config like :csrf_token_key downstream. + defp init_session(session_config) when is_list(session_config) do + key = Keyword.fetch!(session_config, :key) + store = Plug.Session.Store.get(Keyword.fetch!(session_config, :store)) + init = store.init(Keyword.drop(session_config, [:store, :key])) + csrf_token_key = Keyword.get(session_config, :csrf_token_key, "_csrf_token") + {key, store, {csrf_token_key, init}} + end + + defp init_session({_, _, _} = mfa) do + {:mfa, mfa} + end + + @doc """ + Runs the code reloader if enabled. + """ + def code_reload(conn, endpoint, opts) do + if Keyword.get(opts, :code_reloader, endpoint.config(:code_reloader)) do + Phoenix.CodeReloader.reload(endpoint) + end + + conn + end + + @doc """ + Logs the transport request. + + Available for transports that generate a connection. + """ + def transport_log(conn, level) do + if level do + Plug.Logger.call(conn, Plug.Logger.init(log: level)) + else + conn + end + end + + @doc """ + Checks the origin request header against the list of allowed origins. + + Should be called by transports before connecting when appropriate. + If the origin header matches the allowed origins, no origin header was + sent or no origin was configured, it will return the given connection. + + Otherwise a 403 Forbidden response will be sent and the connection halted. + It is a noop if the connection has been halted. + """ + def check_origin(conn, handler, endpoint, opts, sender \\ &Plug.Conn.send_resp/1) + + def check_origin(%Plug.Conn{halted: true} = conn, _handler, _endpoint, _opts, _sender), + do: conn + + def check_origin(conn, handler, endpoint, opts, sender) do + import Plug.Conn + origin = conn |> get_req_header("origin") |> List.first() + check_origin = check_origin_config(handler, endpoint, opts) + + cond do + is_nil(origin) or check_origin == false -> + conn + + origin_allowed?(check_origin, URI.parse(origin), endpoint, conn) -> + conn + + true -> + Logger.error(""" + Could not check origin for Phoenix.Socket transport. + + Origin of the request: #{origin} + + This happens when you are attempting a socket connection to + a different host than the one configured in your config/ + files. For example, in development the host is configured + to "localhost" but you may be trying to access it from + "127.0.0.1". To fix this issue, you may either: + + 1. update [url: [host: ...]] to your actual host in the + config file for your current environment (recommended) + + 2. pass the :check_origin option when configuring your + endpoint or when configuring the transport in your + UserSocket module, explicitly outlining which origins + are allowed: + + check_origin: ["https://example.com", + "//another.com:888", "//other.com"] + + """) + + resp(conn, :forbidden, "") + |> sender.() + |> halt() + end + end + + @doc """ + Checks the Websocket subprotocols request header against the allowed subprotocols. + + Should be called by transports before connecting when appropriate. + If the sec-websocket-protocol header matches the allowed subprotocols, + it will put sec-websocket-protocol response header and return the given connection. + If no sec-websocket-protocol header was sent it will return the given connection. + + Otherwise a 403 Forbidden response will be sent and the connection halted. + It is a noop if the connection has been halted. + """ + def check_subprotocols(conn, subprotocols) + + def check_subprotocols(%Plug.Conn{halted: true} = conn, _subprotocols), do: conn + def check_subprotocols(conn, nil), do: conn + + def check_subprotocols(conn, subprotocols) when is_list(subprotocols) do + case Plug.Conn.get_req_header(conn, "sec-websocket-protocol") do + [] -> + conn + + [subprotocols_header | _] -> + request_subprotocols = subprotocols_header |> Plug.Conn.Utils.list() + + subprotocol = + Enum.find(subprotocols, fn elem -> Enum.find(request_subprotocols, &(&1 == elem)) end) + + if subprotocol do + Plug.Conn.put_resp_header(conn, "sec-websocket-protocol", subprotocol) + else + subprotocols_error_response(conn, subprotocols) + end + end + end + + def check_subprotocols(conn, subprotocols), do: subprotocols_error_response(conn, subprotocols) + + defp subprotocols_error_response(conn, subprotocols) do + import Plug.Conn + request_headers = get_req_header(conn, "sec-websocket-protocol") + + Logger.error(""" + Could not check Websocket subprotocols for Phoenix.Socket transport. + + Subprotocols of the request: #{inspect(request_headers)} + Configured supported subprotocols: #{inspect(subprotocols)} + + This happens when you are attempting a socket connection to + a different subprotocols than the one configured in your endpoint + or when you incorrectly configured supported subprotocols. + + To fix this issue, you may either: + + 1. update websocket: [subprotocols: [..]] to your actual subprotocols + in your endpoint socket configuration. + + 2. check the correctness of the `sec-websocket-protocol` request header + sent from the client. + + 3. remove `websocket` option from your endpoint socket configuration + if you don't use Websocket subprotocols. + """) + + resp(conn, :forbidden, "") + |> send_resp() + |> halt() + end + + @doc """ + Extracts connection information from `conn` and returns a map. + + Keys are retrieved from the optional transport option `:connect_info`. + This functionality is transport specific. Please refer to your transports' + documentation for more information. + + The supported keys are: + + * `:peer_data` - the result of `Plug.Conn.get_peer_data/1` + + * `:trace_context_headers` - a list of all trace context headers + + * `:x_headers` - a list of all request headers that have an "x-" prefix + + * `:uri` - a `%URI{}` derived from the conn + + * `:user_agent` - the value of the "user-agent" request header + + * `:sec_websocket_headers` - a list of all request headers that have a "sec-websocket-" prefix + + The CSRF check can be disabled by setting the `:check_csrf` option to `false`. + """ + def connect_info(conn, endpoint, keys, opts \\ []) do + for key <- keys, into: %{} do + case key do + :peer_data -> + {:peer_data, Plug.Conn.get_peer_data(conn)} + + :trace_context_headers -> + {:trace_context_headers, fetch_trace_context_headers(conn)} + + :x_headers -> + {:x_headers, fetch_headers(conn, "x-")} + + :uri -> + {:uri, fetch_uri(conn)} + + :user_agent -> + {:user_agent, fetch_user_agent(conn)} + + :sec_websocket_headers -> + {:sec_websocket_headers, fetch_headers(conn, "sec-websocket-")} + + {:session, session} -> + {:session, connect_session(conn, endpoint, session, opts)} + + :auth_token -> + {:auth_token, conn.private[:phoenix_transport_auth_token]} + + {key, val} -> + {key, val} + end + end + end + + defp connect_session(conn, endpoint, {key, store, {csrf_token_key, init}}, opts) do + conn = Plug.Conn.fetch_cookies(conn) + check_csrf = Keyword.get(opts, :check_csrf, true) + + with cookie when is_binary(cookie) <- conn.cookies[key], + conn = put_in(conn.secret_key_base, endpoint.config(:secret_key_base)), + {_, session} <- store.get(conn, cookie, init), + true <- not check_csrf or csrf_token_valid?(conn, session, csrf_token_key) do + session + else + _ -> nil + end + end + + defp connect_session(conn, endpoint, {:mfa, {module, function, args}}, opts) do + case apply(module, function, args) do + session_config when is_list(session_config) -> + connect_session(conn, endpoint, init_session(session_config), opts) + + other -> + raise ArgumentError, + "the MFA given to `session_config` must return a keyword list, got: #{inspect(other)}" + end + end + + defp fetch_headers(conn, prefix) do + for {header, _} = pair <- conn.req_headers, + String.starts_with?(header, prefix), + do: pair + end + + defp fetch_trace_context_headers(conn) do + for {header, _} = pair <- conn.req_headers, + header in ["traceparent", "tracestate"], + do: pair + end + + defp fetch_uri(conn) do + %URI{ + scheme: to_string(conn.scheme), + query: conn.query_string, + port: conn.port, + host: conn.host, + authority: conn.host, + path: conn.request_path + } + end + + defp fetch_user_agent(conn) do + with {_, value} <- List.keyfind(conn.req_headers, "user-agent", 0) do + value + end + end + + defp csrf_token_valid?(conn, session, csrf_token_key) do + with csrf_token when is_binary(csrf_token) <- conn.params["_csrf_token"], + csrf_state when is_binary(csrf_state) <- + Plug.CSRFProtection.dump_state_from_session(session[csrf_token_key]) do + Plug.CSRFProtection.valid_state_and_csrf_token?(csrf_state, csrf_token) + end + end + + defp check_origin_config(handler, endpoint, opts) do + Phoenix.Config.cache(endpoint, {:check_origin, handler}, fn _ -> + check_origin = + case Keyword.get(opts, :check_origin, endpoint.config(:check_origin)) do + origins when is_list(origins) -> + Enum.map(origins, &parse_origin/1) + + boolean when is_boolean(boolean) -> + boolean + + {module, function, arguments} -> + {module, function, arguments} + + :conn -> + :conn + + invalid -> + raise ArgumentError, + ":check_origin expects a boolean, list of hosts, :conn, or MFA tuple, got: #{inspect(invalid)}" + end + + {:cache, check_origin} + end) + end + + defp parse_origin(origin) do + case URI.parse(origin) do + %{host: nil} -> + raise ArgumentError, + "invalid :check_origin option: #{inspect(origin)}. " <> + "Expected an origin with a host that is parsable by URI.parse/1. For example: " <> + "[\"https://example.com\", \"//another.com:888\", \"//other.com\"]" + + %{scheme: scheme, port: port, host: host} -> + {scheme, host, port} + end + end + + defp origin_allowed?({module, function, arguments}, uri, _endpoint, _conn), + do: apply(module, function, [uri | arguments]) + + defp origin_allowed?(:conn, uri, _endpoint, %Plug.Conn{} = conn) do + uri.host == conn.host and + uri.scheme == Atom.to_string(conn.scheme) and + uri.port == conn.port + end + + defp origin_allowed?(_check_origin, %{host: nil}, _endpoint, _conn), + do: false + + defp origin_allowed?(true, uri, endpoint, _conn), + do: compare?(uri.host, host_to_binary(endpoint.config(:url)[:host])) + + defp origin_allowed?(check_origin, uri, _endpoint, _conn) when is_list(check_origin), + do: origin_allowed?(uri, check_origin) + + defp origin_allowed?(uri, allowed_origins) do + %{scheme: origin_scheme, host: origin_host, port: origin_port} = uri + + Enum.any?(allowed_origins, fn {allowed_scheme, allowed_host, allowed_port} -> + compare?(origin_scheme, allowed_scheme) and + compare?(origin_port, allowed_port) and + compare_host?(origin_host, allowed_host) + end) + end + + defp compare?(request_val, allowed_val) do + is_nil(allowed_val) or request_val == allowed_val + end + + defp compare_host?(_request_host, nil), + do: true + + defp compare_host?(request_host, "*." <> allowed_host), + do: request_host == allowed_host or String.ends_with?(request_host, "." <> allowed_host) + + defp compare_host?(request_host, allowed_host), + do: request_host == allowed_host + + # TODO: Remove this once {:system, env_var} deprecation is removed + defp host_to_binary({:system, env_var}), do: host_to_binary(System.get_env(env_var)) + defp host_to_binary(host), do: host +end diff --git a/deps/phoenix/lib/phoenix/test/channel_test.ex b/deps/phoenix/lib/phoenix/test/channel_test.ex new file mode 100644 index 0000000..9897e9a --- /dev/null +++ b/deps/phoenix/lib/phoenix/test/channel_test.ex @@ -0,0 +1,748 @@ +defmodule Phoenix.ChannelTest do + @moduledoc """ + Conveniences for testing Phoenix channels. + + In channel tests, we interact with channels via process + communication, sending and receiving messages. It is also + common to subscribe to the same topic the channel subscribes + to, allowing us to assert if a given message was broadcast + or not. + + ## Channel testing + + To get started, define the module attribute `@endpoint` + in your test case pointing to your application endpoint. + + Then you can directly create a socket and + `subscribe_and_join/4` topics and channels: + + {:ok, _, socket} = + socket(UserSocket, "user:id", %{some_assigns: 1}) + |> subscribe_and_join(RoomChannel, "room:lobby", %{"id" => 3}) + + You usually want to set the same ID and assigns your + `UserSocket.connect/3` callback would set. Alternatively, + you can use the `connect/3` helper to call your `UserSocket.connect/3` + callback and initialize the socket with the socket id: + + {:ok, socket} = connect(UserSocket, %{"some" => "params"}, %{}) + {:ok, _, socket} = subscribe_and_join(socket, "room:lobby", %{"id" => 3}) + + Once called, `subscribe_and_join/4` will subscribe the + current test process to the "room:lobby" topic and start a + channel in another process. It returns `{:ok, reply, socket}` + or `{:error, reply}`. + + Now, in the same way the channel has a socket representing + communication it will push to the client. Our test has a + socket representing communication to be pushed to the server. + + For example, we can use the `push/3` function in the test + to push messages to the channel (it will invoke `handle_in/3`): + + push(socket, "my_event", %{"some" => "data"}) + + Similarly, we can broadcast messages from the test itself + on the topic that both test and channel are subscribed to, + triggering `handle_out/3` on the channel: + + broadcast_from(socket, "my_event", %{"some" => "data"}) + + > Note only `broadcast_from/3` and `broadcast_from!/3` are + available in tests to avoid broadcast messages to be resent + to the test process. + + While the functions above are pushing data to the channel + (server) we can use `assert_push/3` to verify the channel + pushed a message to the client: + + assert_push "my_event", %{"some" => "data"} + + Or even assert something was broadcast into pubsub: + + assert_broadcast "my_event", %{"some" => "data"} + + Finally, every time a message is pushed to the channel, + a reference is returned. We can use this reference to + assert a particular reply was sent from the server: + + ref = push(socket, "counter", %{}) + assert_reply ref, :ok, %{"counter" => 1} + + ## Checking side-effects + + Often one may want to do side-effects inside channels, + like writing to the database, and verify those side-effects + during their tests. + + Imagine the following `handle_in/3` inside a channel: + + def handle_in("publish", %{"id" => id}, socket) do + Repo.get!(Post, id) |> Post.publish() |> Repo.update!() + {:noreply, socket} + end + + Because the whole communication is asynchronous, the + following test would be very brittle: + + push(socket, "publish", %{"id" => 3}) + assert Repo.get_by(Post, id: 3, published: true) + + The issue is that we have no guarantees the channel has + done processing our message after calling `push/3`. The + best solution is to assert the channel sent us a reply + before doing any other assertion. First change the + channel to send replies: + + def handle_in("publish", %{"id" => id}, socket) do + Repo.get!(Post, id) |> Post.publish() |> Repo.update!() + {:reply, :ok, socket} + end + + Then expect them in the test: + + ref = push(socket, "publish", %{"id" => 3}) + assert_reply ref, :ok + assert Repo.get_by(Post, id: 3, published: true) + + ## Leave and close + + This module also provides functions to simulate leaving + and closing a channel. Once you leave or close a channel, + because the channel is linked to the test process on join, + it will crash the test process: + + leave(socket) + ** (EXIT from #PID<...>) {:shutdown, :leave} + + You can avoid this by unlinking the channel process in + the test: + + Process.unlink(socket.channel_pid) + + Notice `leave/1` is async, so it will also return a + reference which you can use to check for a reply: + + ref = leave(socket) + assert_reply ref, :ok + + On the other hand, close is always sync and it will + return only after the channel process is guaranteed to + have been terminated: + + :ok = close(socket) + + This mimics the behaviour existing in clients. + + To assert that your channel closes or errors asynchronously, + you can monitor the channel process with the tools provided + by Elixir, and wait for the `:DOWN` message. + Imagine an implementation of the `handle_info/2` function + that closes the channel when it receives `:some_message`: + + def handle_info(:some_message, socket) do + {:stop, :normal, socket} + end + + In your test, you can assert that the close happened by: + + Process.monitor(socket.channel_pid) + send(socket.channel_pid, :some_message) + assert_receive {:DOWN, _, _, _, :normal} + + """ + + alias Phoenix.Socket + alias Phoenix.Socket.{Broadcast, Message, Reply} + alias Phoenix.Channel.Server + + defmodule NoopSerializer do + @behaviour Phoenix.Socket.Serializer + @moduledoc false + + def fastlane!(%Broadcast{} = msg) do + %Message{ + topic: msg.topic, + event: msg.event, + payload: msg.payload + } + end + + def encode!(%Reply{} = reply), do: reply + def encode!(%Message{} = msg), do: msg + def decode!(message, _opts), do: message + end + + @doc false + defmacro __using__(_) do + IO.warn """ + Using Phoenix.ChannelTest is deprecated, instead of: + + use Phoenix.ChannelTest + + do: + + import Phoenix.ChannelTest + """, Macro.Env.stacktrace(__CALLER__) + + quote do + import Phoenix.ChannelTest + end + end + + @doc """ + Builds a socket for the given `socket_module`. + + The socket is then used to subscribe and join channels. + Use this function when you want to create a blank socket + to pass to functions like `UserSocket.connect/3`. + + Otherwise, use `socket/4` if you want to build a socket with + existing id and assigns. + + ## Examples + + socket(MyApp.UserSocket) + + """ + defmacro socket(socket_module) do + socket(socket_module, nil, [], [], __CALLER__) + end + + @doc """ + Builds a socket for the given `socket_module` with given id and assigns. + + ## Examples + + socket(MyApp.UserSocket, "user_id", %{some: :assign}) + + If you need to access the socket in another process than the test process, + you can give the `pid` of the test process in the 4th argument. + + ## Examples + + test "connect in a task" do + pid = self() + task = Task.async(fn -> + socket = socket(MyApp.UserSocket, "user_id", %{some: :assign}, test_process: pid) + broadcast_from!(socket, "default", %{"foo" => "bar"}) + assert_push "default", %{"foo" => "bar"} + end) + Task.await(task) + end + + """ + defmacro socket(socket_module, socket_id, socket_assigns, options \\ []) do + socket(socket_module, socket_id, socket_assigns, options, __CALLER__) + end + + defp socket(module, id, assigns, options, caller) do + if endpoint = Module.get_attribute(caller.module, :endpoint) do + quote do + unquote(__MODULE__).__socket__( + unquote(module), + unquote(id), + unquote(assigns), + unquote(endpoint), + unquote(options) + ) + end + else + raise "module attribute @endpoint not set for socket/2" + end + end + + @doc false + def __socket__(socket, id, assigns, endpoint, options) do + %Socket{ + assigns: Enum.into(assigns, %{}), + endpoint: endpoint, + handler: socket || first_socket!(endpoint), + id: id, + pubsub_server: endpoint.config(:pubsub_server), + serializer: NoopSerializer, + transport: {__MODULE__, fetch_test_supervisor!(options)}, + transport_pid: self() + } + end + + defp first_socket!(endpoint) do + case endpoint.__sockets__ do + [] -> raise ArgumentError, "#{inspect endpoint} has no socket declaration" + [{_, socket, _} | _] -> socket + end + end + + defp fetch_test_supervisor!(options) do + case ExUnit.OnExitHandler.get_supervisor(Keyword.get(options, :test_process, self())) do + {:ok, nil} -> + opts = [strategy: :one_for_one, max_restarts: 1_000_000, max_seconds: 1] + {:ok, sup} = Supervisor.start_link([], opts) + ExUnit.OnExitHandler.put_supervisor(self(), sup) + sup + + {:ok, sup} -> + sup + + :error -> + raise ArgumentError, "socket/1-3 can only be invoked from the test process" + end + end + + @doc false + @deprecated "Phoenix.ChannelTest.socket/0 is deprecated, please call socket/1 instead" + defmacro socket() do + socket(nil, nil, [], [], __CALLER__) + end + + @doc false + @deprecated "Phoenix.ChannelTest.socket/2 is deprecated, please call socket/4 instead" + defmacro socket(id, assigns) do + socket(nil, id, assigns, [], __CALLER__) + end + + @doc """ + Initiates a transport connection for the socket handler. + + Useful for testing UserSocket authentication. Returns + the result of the handler's `connect/3` callback. + """ + defmacro connect(handler, params, options \\ quote(do: [])) do + if endpoint = Module.get_attribute(__CALLER__.module, :endpoint) do + quote do + unquote(__MODULE__).__connect__( + unquote(endpoint), + unquote(handler), + unquote(params), + unquote(options) + ) + end + else + raise "module attribute @endpoint not set for socket/2" + end + end + + @doc false + def __connect__(endpoint, handler, params, options) do + {connect_info, options} = + if is_map(options) do + IO.warn( + "Passing \"connect_info\" directly to connect/3 is deprecated, please pass \"connect_info: ...\" as an option instead" + ) + + {options, []} + else + Keyword.pop(options, :connect_info, %{}) + end + + map = %{ + endpoint: endpoint, + transport: {__MODULE__, fetch_test_supervisor!(options)}, + options: [serializer: [{NoopSerializer, "~> 1.0.0"}]], + params: __stringify__(params), + connect_info: connect_info + } + + with {:ok, state} <- handler.connect(map), + {:ok, {_, socket}} = handler.init(state), + do: {:ok, socket} + end + + @doc "See `subscribe_and_join!/4`." + def subscribe_and_join!(%Socket{} = socket, topic) when is_binary(topic) do + subscribe_and_join!(socket, nil, topic, %{}) + end + + @doc "See `subscribe_and_join!/4`." + def subscribe_and_join!(%Socket{} = socket, topic, payload) + when is_binary(topic) and is_map(payload) do + subscribe_and_join!(socket, nil, topic, payload) + end + + @doc """ + Same as `subscribe_and_join/4`, but returns either the socket + or throws an error. + + This is helpful when you are not testing joining the channel + and just need the socket. + """ + def subscribe_and_join!(%Socket{} = socket, channel, topic, payload \\ %{}) + when is_atom(channel) and is_binary(topic) and is_map(payload) do + case subscribe_and_join(socket, channel, topic, payload) do + {:ok, _, socket} -> socket + {:error, error} -> raise "could not join channel, got error: #{inspect(error)}" + end + end + + @doc "See `subscribe_and_join/4`." + def subscribe_and_join(%Socket{} = socket, topic) when is_binary(topic) do + subscribe_and_join(socket, nil, topic, %{}) + end + + @doc "See `subscribe_and_join/4`." + def subscribe_and_join(%Socket{} = socket, topic, payload) + when is_binary(topic) and is_map(payload) do + subscribe_and_join(socket, nil, topic, payload) + end + + @doc """ + Subscribes to the given topic and joins the channel + under the given topic and payload. + + By subscribing to the topic, we can use `assert_broadcast/3` + to verify a message has been sent through the pubsub layer. + + By joining the channel, we can interact with it directly. + The given channel is joined in a separate process which is + linked to the test process. + + If no channel module is provided, the socket's handler is used to + lookup the matching channel for the given topic. + + It returns `{:ok, reply, socket}` or `{:error, reply}`. + """ + def subscribe_and_join(%Socket{} = socket, channel, topic, payload \\ %{}) + when is_atom(channel) and is_binary(topic) and is_map(payload) do + socket.endpoint.subscribe(topic) + join(socket, channel, topic, payload) + end + + @doc "See `join/4`." + def join(%Socket{} = socket, topic) when is_binary(topic) do + join(socket, nil, topic, %{}) + end + + @doc "See `join/4`." + def join(%Socket{} = socket, topic, payload) when is_binary(topic) and is_map(payload) do + join(socket, nil, topic, payload) + end + + @doc """ + Joins the channel under the given topic and payload. + + The given channel is joined in a separate process + which is linked to the test process. + + It returns `{:ok, reply, socket}` or `{:error, reply}`. + """ + def join(%Socket{} = socket, channel, topic, payload \\ %{}) + when is_atom(channel) and is_binary(topic) and is_map(payload) do + message = %Message{ + event: "phx_join", + payload: __stringify__(payload), + topic: topic, + ref: System.unique_integer([:positive]) + } + + {channel, opts} = + if channel do + {channel, []} + else + match_topic_to_channel!(socket, topic) + end + + %Socket{transport: {__MODULE__, sup}} = socket + + starter = + fn _, _, spec -> + Supervisor.start_child(sup, %{spec | id: make_ref()}) + end + + case Server.join(socket, channel, message, [starter: starter] ++ opts) do + {:ok, reply, pid} -> + Process.link(pid) + {:ok, reply, Server.socket(pid)} + {:error, _} = error -> + error + end + end + + @doc """ + Pushes a message into the channel. + + The triggers the `handle_in/3` callback in the channel. + + ## Examples + + iex> push(socket, "new_message", %{id: 1, content: "hello"}) + reference + + """ + @spec push(Socket.t, String.t, map()) :: reference() + def push(%Socket{} = socket, event, payload \\ %{}) do + ref = make_ref() + send(socket.channel_pid, + %Message{event: event, topic: socket.topic, ref: ref, payload: __stringify__(payload)}) + ref + end + + @doc """ + Emulates the client leaving the channel. + + By default this will crash the test process. Run + `Process.unlink(socket.channel_pid)` before this to prevent + this from happening. See [Leave and close](#module-leave-and-close). + """ + @spec leave(Socket.t) :: reference() + def leave(%Socket{} = socket) do + push(socket, "phx_leave", %{}) + end + + @doc """ + Emulates the client closing the socket. + + By default this will crash the test process. Run + `Process.unlink(socket.channel_pid)` before this to prevent + this from happening. See [Leave and close](#module-leave-and-close). + + Closing socket is synchronous and has a default timeout + of 5000 milliseconds. + """ + def close(%Socket{} = socket, timeout \\ 5000) do + Server.close(socket.channel_pid, timeout) + end + + @doc """ + Broadcast event from pid to all subscribers of the socket topic. + + The test process will not receive the published message. This triggers + the `handle_out/3` callback in the channel. + + ## Examples + + iex> broadcast_from(socket, "new_message", %{id: 1, content: "hello"}) + :ok + + """ + def broadcast_from(%Socket{} = socket, event, message) do + %{pubsub_server: pubsub_server, topic: topic, transport_pid: transport_pid} = socket + Server.broadcast_from pubsub_server, transport_pid, topic, event, message + end + + @doc """ + Same as `broadcast_from/3`, but raises if broadcast fails. + """ + def broadcast_from!(%Socket{} = socket, event, message) do + %{pubsub_server: pubsub_server, topic: topic, transport_pid: transport_pid} = socket + Server.broadcast_from! pubsub_server, transport_pid, topic, event, message + end + + @doc """ + Asserts the channel has pushed a message back to the client + with the given event and payload within `timeout`. + + Notice event and payload are patterns. This means one can write: + + assert_push "some_event", %{"data" => _} + + In the assertion above, we don't particularly care about + the data being sent, as long as something was sent. + + The timeout is in milliseconds and defaults to the `:assert_receive_timeout` + set on the `:ex_unit` application (which defaults to 100ms). + + **NOTE:** Because event and payload are patterns, they will be matched. This + means that if you wish to assert that the received payload is equivalent to + an existing variable, you need to pin the variable in the assertion + expression. + + Good: + + expected_payload = %{foo: "bar"} + assert_push "some_event", ^expected_payload + + Bad: + + expected_payload = %{foo: "bar"} + assert_push "some_event", expected_payload + # The code above does not assert the payload matches the described map. + + Guards can also be given to the payload pattern: + + assert_push "some_event", %{"counter" => c} when c > 0 + """ + defmacro assert_push(event, payload, timeout \\ Application.fetch_env!(:ex_unit, :assert_receive_timeout)) do + pattern = extract_pattern_and_apply_guard(event, payload, Phoenix.Socket.Message) + + quote do + assert_receive unquote(pattern), unquote(timeout) + end + end + + @doc """ + Asserts the channel has not pushed a message to the client + matching the given event and payload within `timeout`. + + Like `assert_push`, the event and payload are patterns. + + The timeout is in milliseconds and defaults to the `:refute_receive_timeout` + set on the `:ex_unit` application (which defaults to 100ms). + Keep in mind this macro will block the test by the + timeout value, so use it only when necessary as overuse + will certainly slow down your test suite. + """ + defmacro refute_push(event, payload, timeout \\ Application.fetch_env!(:ex_unit, :refute_receive_timeout)) do + + quote do + refute_receive %Phoenix.Socket.Message{ + event: unquote(event), + payload: unquote(payload)}, unquote(timeout) + end + end + + @doc """ + Asserts the channel has replied to the given message within + `timeout`. + + Notice status and payload are patterns. This means one can write: + + ref = push(channel, "some_event") + assert_reply ref, :ok, %{"data" => _} + + In the assertion above, we don't particularly care about + the data being sent, as long as something was replied. + + The timeout is in milliseconds and defaults to the `:assert_receive_timeout` + set on the `:ex_unit` application (which defaults to 100ms). + + Guards can also be given to the payload pattern: + + ref = push(channel, "some_event") + assert_reply ref, :ok, %{"counter" => c} when c > 0 + """ + defmacro assert_reply(ref, status, payload \\ Macro.escape(%{}), timeout \\ Application.fetch_env!(:ex_unit, :assert_receive_timeout)) do + {payload, guard} = extract_guard(payload) + + pattern = quote do + %Phoenix.Socket.Reply{ + ref: ^ref, + status: unquote(status), + payload: unquote(payload)} + end + + struct_pattern = apply_guard(pattern, guard) + + quote do + ref = unquote(ref) + assert_receive unquote(struct_pattern), unquote(timeout) + end + end + + @doc """ + Asserts the channel has not replied with a matching payload within + `timeout`. + + Like `assert_reply`, the event and payload are patterns. + + The timeout is in milliseconds and defaults to the `:refute_receive_timeout` + set on the `:ex_unit` application (which defaults to 100ms). + Keep in mind this macro will block the test by the + timeout value, so use it only when necessary as overuse + will certainly slow down your test suite. + """ + defmacro refute_reply(ref, status, payload \\ Macro.escape(%{}), timeout \\ Application.fetch_env!(:ex_unit, :refute_receive_timeout)) do + quote do + ref = unquote(ref) + refute_receive %Phoenix.Socket.Reply{ + ref: ^ref, + status: unquote(status), + payload: unquote(payload)}, unquote(timeout) + end + end + + @doc """ + Asserts the channel has broadcast a message within `timeout`. + + Before asserting anything was broadcast, we must first + subscribe to the topic of the channel in the test process: + + @endpoint.subscribe("foo:ok") + + Now we can match on event and payload as patterns: + + assert_broadcast "some_event", %{"data" => _} + + In the assertion above, we don't particularly care about + the data being sent, as long as something was sent. + + The timeout is in milliseconds and defaults to the `:assert_receive_timeout` + set on the `:ex_unit` application (which defaults to 100ms). + + Guards can also be given to the payload pattern: + + assert_broadcast "some_event", %{"counter" => c} when c > 0 + """ + defmacro assert_broadcast(event, payload, timeout \\ Application.fetch_env!(:ex_unit, :assert_receive_timeout)) do + pattern = extract_pattern_and_apply_guard(event, payload, Phoenix.Socket.Broadcast) + + quote do + assert_receive unquote(pattern), unquote(timeout) + end + end + + @doc """ + Asserts the channel has not broadcast a message within `timeout`. + + Like `assert_broadcast`, the event and payload are patterns. + + The timeout is in milliseconds and defaults to the `:refute_receive_timeout` + set on the `:ex_unit` application (which defaults to 100ms). + Keep in mind this macro will block the test by the + timeout value, so use it only when necessary as overuse + will certainly slow down your test suite. + """ + defmacro refute_broadcast(event, payload, timeout \\ Application.fetch_env!(:ex_unit, :refute_receive_timeout)) do + quote do + refute_receive %Phoenix.Socket.Broadcast{event: unquote(event), + payload: unquote(payload)}, unquote(timeout) + end + end + + defp match_topic_to_channel!(socket, topic) do + unless socket.handler do + raise """ + no socket handler found to lookup channel for topic #{inspect topic}. + Use connect/3 when calling subscribe_and_join/* (or subscribe_and_join!/*) + without a channel, for example: + + {:ok, socket} = connect(UserSocket, %{}, %{}) + socket = subscribe_and_join!(socket, "foo:bar", %{}) + + """ + end + + case socket.handler.__channel__(topic) do + {channel, opts} when is_atom(channel) -> {channel, opts} + _ -> raise "no channel found for topic #{inspect topic} in #{inspect socket.handler}" + end + end + + defp extract_guard({:when, _, [payload, guard]}), do: {payload, guard} + defp extract_guard(payload), do: {payload, nil} + + defp apply_guard(pattern, nil), do: pattern + defp apply_guard(pattern, guard), do: {:when, [], [pattern, guard]} + + defp extract_pattern_and_apply_guard(event, payload, struct_module) do + {payload, guard} = extract_guard(payload) + + pattern_struct = quote do + %{__struct__: unquote(struct_module), event: unquote(event), payload: unquote(payload)} + end + + apply_guard(pattern_struct, guard) + end + + @doc false + def __stringify__(%{__struct__: _} = struct), + do: struct + def __stringify__(%{} = params), + do: Enum.into(params, %{}, &stringify_kv/1) + def __stringify__(params) when is_list(params), + do: Enum.map(params, &__stringify__/1) + def __stringify__(other), + do: other + + defp stringify_kv({k, v}), + do: {to_string(k), __stringify__(v)} +end diff --git a/deps/phoenix/lib/phoenix/test/conn_test.ex b/deps/phoenix/lib/phoenix/test/conn_test.ex new file mode 100644 index 0000000..9def147 --- /dev/null +++ b/deps/phoenix/lib/phoenix/test/conn_test.ex @@ -0,0 +1,733 @@ +defmodule Phoenix.ConnTest do + @moduledoc """ + Conveniences for testing Phoenix endpoints and connection related helpers. + + You likely want to use this module or make it part of your `ExUnit.CaseTemplate`. + Once used, this module automatically imports all functions defined here as + well as the functions in `Plug.Conn`. + + ## Endpoint testing + + `Phoenix.ConnTest` typically works against endpoints. That's the preferred way + to test anything that your router dispatches to: + + @endpoint MyAppWeb.Endpoint + + test "says welcome on the home page" do + conn = get(build_conn(), "/") + assert conn.resp_body =~ "Welcome!" + end + + test "logs in" do + conn = post(build_conn(), "/login", [username: "john", password: "doe"]) + assert conn.resp_body =~ "Logged in!" + end + + The `@endpoint` module attribute contains the endpoint under testing, + most commonly your application endpoint itself. If you are using the + MyApp.ConnCase generated by Phoenix, it is automatically set for you. + + As in your router and controllers, the connection is the main abstraction + in testing. `build_conn()` returns a new connection and functions in this + module can be used to manipulate the connection before dispatching + to the endpoint. + + For example, one could set the accepts header for json requests as + follows: + + build_conn() + |> put_req_header("accept", "application/json") + |> get("/") + + You can also create your own helpers, such as `json_conn()` that uses + `build_conn/0` and `put_req_header/3`, so you avoid repeating the connection + setup throughout your tests. + + ## Controller testing + + The functions in this module can also be used for controller testing. + While endpoint testing is preferred over controller testing, especially + since the controller in Phoenix plays an integration role between your + domain and your views, unit testing controllers may be helpful in some + situations. + + For such cases, you need to set the `@endpoint` attribute to your controller + and pass an atom representing the action to dispatch: + + @endpoint MyAppWeb.HomeController + + test "says welcome on the home page" do + conn = get(build_conn(), :index) + assert conn.resp_body =~ "Welcome!" + end + + Keep in mind that, once the `@endpoint` variable is set, all tests after + setting it will be affected. + + ## Views testing + + Under other circumstances, you may be testing a view or another layer that + requires a connection for processing. For such cases, a connection can be + created using the `build_conn/3` helper: + + MyApp.UserView.render("hello.html", conn: build_conn(:get, "/")) + + While `build_conn/0` returns a connection with no request information to it, + `build_conn/3` returns a connection with the given request information already + filled in. + + ## Recycling + + Browsers implement a storage by using cookies. When a cookie is set in the + response, the browser stores it and sends it in the next request. + + To emulate this behaviour, this module provides the idea of recycling. + The `recycle/1` function receives a connection and returns a new connection, + similar to the one returned by `build_conn/0` with all the response cookies + from the previous connection defined as request headers. This is useful when + testing multiple routes that require cookies or session to work. + + Keep in mind Phoenix will automatically recycle the connection between + dispatches. This usually works out well most times, but it may discard + information if you are modifying the connection before the next dispatch: + + # No recycling as the connection is fresh + conn = get(build_conn(), "/") + + # The connection is recycled, creating a new one behind the scenes + conn = post(conn, "/login") + + # We can also recycle manually in case we want custom headers + conn = + conn + |> recycle() + |> put_req_header("x-special", "nice") + + # No recycling as we did it explicitly + conn = delete(conn, "/logout") + + Recycling also recycles the "accept" and "authorization" headers, + as well as peer data information. + """ + + @doc false + defmacro __using__(_) do + IO.warn """ + Using Phoenix.ConnTest is deprecated, instead of: + + use Phoenix.ConnTest + + do: + + import Plug.Conn + import Phoenix.ConnTest + """, Macro.Env.stacktrace(__CALLER__) + + quote do + import Plug.Conn + import Phoenix.ConnTest + end + end + + alias Plug.Conn + import ExUnit.Assertions, only: [flunk: 1] + + @doc """ + Creates a connection to be used in upcoming requests. + """ + @spec build_conn() :: Conn.t + def build_conn() do + build_conn(:get, "/", nil) + end + + @doc """ + Creates a connection to be used in upcoming requests + with a preset method, path and body. + + This is useful when a specific connection is required + for testing a plug or a particular function. + """ + @spec build_conn(atom | binary, binary, binary | list | map | nil) :: Conn.t + def build_conn(method, path, params_or_body \\ nil) do + Plug.Adapters.Test.Conn.conn(%Conn{}, method, path, params_or_body) + |> Conn.put_private(:plug_skip_csrf_protection, true) + |> Conn.put_private(:phoenix_recycled, true) + end + + @http_methods [:get, :post, :put, :patch, :delete, :options, :connect, :trace, :head] + + for method <- @http_methods do + @doc """ + Dispatches to the current endpoint. + + See `dispatch/5` for more information. + """ + defmacro unquote(method)(conn, path_or_action, params_or_body \\ nil) do + method = unquote(method) + quote do + Phoenix.ConnTest.dispatch(unquote(conn), @endpoint, unquote(method), + unquote(path_or_action), unquote(params_or_body)) + end + end + end + + @doc """ + Dispatches the connection to the given endpoint. + + When invoked via `get/3`, `post/3` and friends, the endpoint + is automatically retrieved from the `@endpoint` module + attribute, otherwise it must be given as an argument. + + The connection will be configured with the given `method`, + `path_or_action` and `params_or_body`. + + If `path_or_action` is a string, it is considered to be the + request path and stored as so in the connection. If an atom, + it is assumed to be an action and the connection is dispatched + to the given action. + + ## Parameters and body + + This function, as well as `get/3`, `post/3` and friends, accepts the + request body or parameters as last argument: + + get(build_conn(), "/", some: "param") + get(build_conn(), "/", "some=param&url=encoded") + + The allowed values are: + + * `nil` - meaning there is no body + + * a binary - containing a request body. For such cases, `:headers` + must be given as option with a content-type + + * a map or list - containing the parameters which will automatically + set the content-type to multipart. The map or list may contain + other lists or maps and all entries will be normalized to string + keys + + * a struct - unlike other maps, a struct will be passed through as-is + without normalizing its entries + """ + def dispatch(conn, endpoint, method, path_or_action, params_or_body \\ nil) + def dispatch(%Plug.Conn{} = conn, endpoint, method, path_or_action, params_or_body) do + if is_nil(endpoint) do + raise "no @endpoint set in test case" + end + + if is_binary(params_or_body) and is_nil(List.keyfind(conn.req_headers, "content-type", 0)) do + raise ArgumentError, "a content-type header is required when setting " <> + "a binary body in a test connection" + end + + conn + |> ensure_recycled() + |> dispatch_endpoint(endpoint, method, path_or_action, params_or_body) + |> Conn.put_private(:phoenix_recycled, false) + |> from_set_to_sent() + end + def dispatch(conn, _endpoint, method, _path_or_action, _params_or_body) do + raise ArgumentError, "expected first argument to #{method} to be a " <> + "%Plug.Conn{}, got #{inspect conn}" + end + + defp dispatch_endpoint(conn, endpoint, method, path, params_or_body) when is_binary(path) do + conn + |> Plug.Adapters.Test.Conn.conn(method, path, params_or_body) + |> endpoint.call(endpoint.init([])) + end + + defp dispatch_endpoint(conn, endpoint, method, action, params_or_body) when is_atom(action) do + conn + |> Plug.Adapters.Test.Conn.conn(method, "/", params_or_body) + |> endpoint.call(endpoint.init(action)) + end + + defp from_set_to_sent(%Conn{state: :set} = conn), do: Conn.send_resp(conn) + defp from_set_to_sent(conn), do: conn + + @doc """ + Inits a session used exclusively for testing. + """ + @spec init_test_session(Conn.t, map | keyword) :: Conn.t + defdelegate init_test_session(conn, session), to: Plug.Test + + @doc """ + Puts a request cookie. + """ + @spec put_req_cookie(Conn.t, binary, binary) :: Conn.t + defdelegate put_req_cookie(conn, key, value), to: Plug.Test + + @doc """ + Deletes a request cookie. + """ + @spec delete_req_cookie(Conn.t, binary) :: Conn.t + defdelegate delete_req_cookie(conn, key), to: Plug.Test + + @doc """ + Fetches the flash storage. + """ + @spec fetch_flash(Conn.t) :: Conn.t + defdelegate fetch_flash(conn), to: Phoenix.Controller + + @doc """ + Gets the whole flash storage. + """ + @spec get_flash(Conn.t) :: map + @deprecated "get_flash/1 is deprecated. Use conn.assigns.flash instead" + def get_flash(conn), do: conn.assigns.flash + + @doc """ + Gets the given key from the flash storage. + """ + @spec get_flash(Conn.t, term) :: term + @deprecated "get_flash/2 is deprecated. Use Phoenix.Flash.get/2 instead" + def get_flash(conn, key) do + Phoenix.Flash.get(conn.assigns.flash, key) + end + + @doc """ + Puts the given value under key in the flash storage. + """ + @spec put_flash(Conn.t, term, term) :: Conn.t + defdelegate put_flash(conn, key, value), to: Phoenix.Controller + + @doc """ + Clears up the flash storage. + """ + @spec clear_flash(Conn.t) :: Conn.t + defdelegate clear_flash(conn), to: Phoenix.Controller + + @doc """ + Returns the content type as long as it matches the given format. + + ## Examples + + # Assert we have an html response with utf-8 charset + assert response_content_type(conn, :html) =~ "charset=utf-8" + + """ + @spec response_content_type(Conn.t, atom) :: String.t + def response_content_type(conn, format) when is_atom(format) do + case Conn.get_resp_header(conn, "content-type") do + [] -> + raise "no content-type was set, expected a #{format} response" + [h] -> + if response_content_type?(h, format) do + h + else + raise "expected content-type for #{format}, got: #{inspect h}" + end + [_|_] -> + raise "more than one content-type was set, expected a #{format} response" + end + end + + defp response_content_type?(header, format) do + case parse_content_type(header) do + {part, subpart} -> + format = Atom.to_string(format) + format in MIME.extensions(part <> "/" <> subpart) or + format == subpart or String.ends_with?(subpart, "+" <> format) + _ -> + false + end + end + + defp parse_content_type(header) do + case Plug.Conn.Utils.content_type(header) do + {:ok, part, subpart, _params} -> + {part, subpart} + _ -> + false + end + end + + @doc """ + Asserts the given status code and returns the response body + if one was set or sent. + + ## Examples + + conn = get(build_conn(), "/") + assert response(conn, 200) =~ "hello world" + + """ + @spec response(Conn.t, status :: integer | atom) :: binary + def response(%Conn{state: :unset}, _status) do + raise """ + expected connection to have a response but no response was set/sent. + Please verify that you assign to "conn" after a request: + + conn = get(conn, "/") + assert html_response(conn) =~ "Hello" + """ + end + + def response(%Conn{status: status, resp_body: body}, given) do + given = Plug.Conn.Status.code(given) + + if given == status do + body + else + raise "expected response with status #{given}, got: #{status}, with body:\n#{inspect(body)}" + end + end + + @doc """ + Asserts the given status code, that we have an html response and + returns the response body if one was set or sent. + + ## Examples + + assert html_response(conn, 200) =~ "" + """ + @spec html_response(Conn.t, status :: integer | atom) :: String.t + def html_response(conn, status) do + body = response(conn, status) + _ = response_content_type(conn, :html) + body + end + + @doc """ + Asserts the given status code, that we have a text response and + returns the response body if one was set or sent. + + ## Examples + + assert text_response(conn, 200) =~ "hello" + """ + @spec text_response(Conn.t, status :: integer | atom) :: String.t + def text_response(conn, status) do + body = response(conn, status) + _ = response_content_type(conn, :text) + body + end + + @doc """ + Asserts the given status code, that we have a json response and + returns the decoded JSON response if one was set or sent. + + ## Examples + + body = json_response(conn, 200) + assert "can't be blank" in body["errors"] + + """ + @spec json_response(Conn.t, status :: integer | atom) :: term + def json_response(conn, status) do + body = response(conn, status) + _ = response_content_type(conn, :json) + + Phoenix.json_library().decode!(body) + end + + @doc """ + Returns the location header from the given redirect response. + + Raises if the response does not match the redirect status code + (defaults to 302). + + ## Examples + + assert redirected_to(conn) =~ "/foo/bar" + assert redirected_to(conn, 301) =~ "/foo/bar" + assert redirected_to(conn, :moved_permanently) =~ "/foo/bar" + """ + @spec redirected_to(Conn.t, status :: non_neg_integer) :: String.t + def redirected_to(conn, status \\ 302) + + def redirected_to(%Conn{state: :unset}, _status) do + raise "expected connection to have redirected but no response was set/sent" + end + + def redirected_to(conn, status) when is_atom(status) do + redirected_to(conn, Plug.Conn.Status.code(status)) + end + + def redirected_to(%Conn{status: status} = conn, status) do + location = Conn.get_resp_header(conn, "location") |> List.first + location || raise "no location header was set on redirected_to" + end + + def redirected_to(conn, status) do + raise "expected redirection with status #{status}, got: #{conn.status}" + end + + @doc """ + Recycles the connection. + + Recycling receives a connection and returns a new connection, + containing cookies and relevant information from the given one. + + This emulates behaviour performed by browsers where cookies + returned in the response are available in following requests. + + By default, only the headers "accept", "accept-language", and + "authorization" are recycled. However, a custom set of headers + can be specified by passing a list of strings representing its + names as the second argument of the function. + + Note `recycle/1` is automatically invoked when dispatching + to the endpoint, unless the connection has already been + recycled. + """ + @spec recycle(Conn.t, [String.t]) :: Conn.t + def recycle(conn, headers \\ ~w(accept accept-language authorization)) do + build_conn() + |> Map.put(:host, conn.host) + |> Map.put(:remote_ip, conn.remote_ip) + |> Plug.Test.recycle_cookies(conn) + |> Plug.Test.put_peer_data(Plug.Conn.get_peer_data(conn)) + |> copy_headers(conn.req_headers, headers) + end + + defp copy_headers(conn, headers, copy) do + headers = for {k, v} <- headers, k in copy, do: {k, v} + %{conn | req_headers: headers ++ conn.req_headers} + end + + @doc """ + Ensures the connection is recycled if it wasn't already. + + See `recycle/1` for more information. + """ + @spec ensure_recycled(Conn.t) :: Conn.t + def ensure_recycled(conn) do + if conn.private[:phoenix_recycled] do + conn + else + recycle(conn) + end + end + + @doc """ + Calls the Endpoint and Router pipelines. + + Useful for unit testing Plugs where Endpoint and/or router pipeline + plugs are required for proper setup. + + Note the use of `get("/")` following `bypass_through` in the examples below. + To execute the plug pipelines, you must issue a request against the router. + Most often, you can simply send a GET request against the root path, but you + may also specify a different method or path which your pipelines may operate + against. + + ## Examples + + For example, imagine you are testing an authentication plug in + isolation, but you need to invoke the Endpoint plugs and router + pipelines to set up session and flash related dependencies. + One option is to invoke an existing route that uses the proper + pipelines. You can do so by passing the connection and the + router name to `bypass_through`: + + conn = + conn + |> bypass_through(MyAppWeb.Router) + |> get("/some_url") + |> MyApp.RequireAuthentication.call([]) + assert conn.halted + + You can also specify which pipelines you want to run: + + conn = + conn + |> bypass_through(MyAppWeb.Router, [:browser]) + |> get("/") + |> MyApp.RequireAuthentication.call([]) + assert conn.halted + + Alternatively, you could only invoke the Endpoint's plugs: + + conn = + conn + |> bypass_through() + |> get("/") + |> MyApp.RequireAuthentication.call([]) + + assert conn.halted + """ + @spec bypass_through(Conn.t) :: Conn.t + def bypass_through(conn) do + Plug.Conn.put_private(conn, :phoenix_bypass, :all) + end + + @doc """ + Calls the Endpoint and Router pipelines for the current route. + + See `bypass_through/1`. + """ + @spec bypass_through(Conn.t, module) :: Conn.t + def bypass_through(conn, router) do + Plug.Conn.put_private(conn, :phoenix_bypass, {router, :current}) + end + + @doc """ + Calls the Endpoint and the given Router pipelines. + + See `bypass_through/1`. + """ + @spec bypass_through(Conn.t, module, atom | list) :: Conn.t + def bypass_through(conn, router, pipelines) do + Plug.Conn.put_private(conn, :phoenix_bypass, {router, List.wrap(pipelines)}) + end + + @doc """ + Returns the matched params from the URL the connection was redirected to. + + Uses the provided `%Plug.Conn{}`s router matched in the previous request. + Raises if the response's location header is not set or if the response does + not match the redirect status code (defaults to 302). + + ## Examples + + assert redirected_to(conn) =~ "/posts/123" + assert %{id: "123"} = redirected_params(conn) + assert %{id: "123"} = redirected_params(conn, 303) + """ + @spec redirected_params(Conn.t, status :: non_neg_integer) :: map + def redirected_params(%Plug.Conn{} = conn, status \\ 302) do + router = Phoenix.Controller.router_module(conn) + %URI{path: path, host: host} = conn |> redirected_to(status) |> URI.parse() + path = remove_script_name(conn, router, path) + + case Phoenix.Router.route_info(router, "GET", path, host || conn.host) do + :error -> + raise Phoenix.Router.NoRouteError, conn: conn, router: router + %{path_params: path_params} -> + Enum.into(path_params, %{}, fn {key, val} -> {String.to_atom(key), val} end) + end + end + + defp remove_script_name(conn, router, path) do + case conn.private[router] do + [_ | _] = list -> + script_name = "/" <> Enum.join(list, ",") + String.replace_leading(path, script_name, "") + + _ -> + path + end + end + + @doc """ + Returns the matched params of the URL for the `%Plug.Conn{}`'s router. + + Useful for extracting path params out of returned URLs, such as those + returned by `Phoenix.LiveViewTest`'s redirected results. + + ## Examples + + assert {:error, {:redirect, %{to: "/posts/123" = to}}} = live(conn, "/path") + assert %{id: "123"} = path_params(conn, to) + """ + @spec path_params(Conn.t, String.t) :: map + def path_params(%Plug.Conn{} = conn, to) when is_binary(to) do + router = Phoenix.Controller.router_module(conn) + + case Phoenix.Router.route_info(router, "GET", to, conn.host) do + %{path_params: path_params} -> + Enum.into(path_params, %{}, fn {key, val} -> {String.to_atom(key), val} end) + + :error -> + raise Phoenix.Router.NoRouteError, conn: conn, router: router + end + end + + @doc """ + Asserts an error was wrapped and sent with the given status. + + Useful for testing actions that you expect raise an error and have + the response wrapped in an HTTP status, with content usually rendered + by your MyAppWeb.ErrorHTML view. + + The function accepts a status either as an integer HTTP status or + atom, such as `500` or `:internal_server_error`. The list of allowed atoms is available + in `Plug.Conn.Status`. If an error is raised, a 3-tuple of the wrapped + response is returned matching the status, headers, and body of the response: + + {500, [{"content-type", "text/html"} | _], "Internal Server Error"} + + ## Examples + + assert_error_sent :internal_server_error, fn -> + get(build_conn(), "/broken/route") + end + + response = assert_error_sent 500, fn -> + get(build_conn(), "/broken/route") + end + assert {500, [_h | _t], "Internal Server Error"} = response + + This can also be used to test a route resulted in an error that was translated to a + specific response by the `Plug.Status` protocol, such as `Ecto.NoResultsError`: + + assert_error_sent :not_found, fn -> + get(build_conn(), "/something-that-raises-no-results-error") + end + + *Note*: for routes that don't raise an error, but instead return a status, you should test the + response directly: + + conn = get(build_conn(), "/users/not-found") + assert response(conn, 404) + """ + @spec assert_error_sent(integer | atom, function) :: {integer, list, term} + def assert_error_sent(status_int_or_atom, func) do + expected_status = Plug.Conn.Status.code(status_int_or_atom) + discard_previously_sent() + result = + func + |> wrap_request() + |> receive_response(expected_status) + + discard_previously_sent() + result + end + + defp receive_response({:ok, conn}, expected_status) do + if conn.state == :sent do + flunk "expected error to be sent as #{expected_status} status, but response sent #{conn.status} without error" + else + flunk "expected error to be sent as #{expected_status} status, but no error happened" + end + end + defp receive_response({:error, {_kind, exception, stack}}, expected_status) do + receive do + {ref, {^expected_status, headers, body}} when is_reference(ref) -> + {expected_status, headers, body} + + {ref, {sent_status, _headers, _body}} when is_reference(ref) -> + reraise ExUnit.AssertionError.exception(""" + expected error to be sent as #{expected_status} status, but got #{sent_status} from: + + #{Exception.format_banner(:error, exception)} + """), stack + + after 0 -> + reraise ExUnit.AssertionError.exception(""" + expected error to be sent as #{expected_status} status, but got an error with no response from: + + #{Exception.format_banner(:error, exception)} + """), stack + end + end + + defp discard_previously_sent() do + receive do + {ref, {_, _, _}} when is_reference(ref) -> discard_previously_sent() + {:plug_conn, :sent} -> discard_previously_sent() + after + 0 -> :ok + end + end + + defp wrap_request(func) do + try do + {:ok, func.()} + catch + kind, error -> {:error, {kind, error, __STACKTRACE__}} + end + end +end diff --git a/deps/phoenix/lib/phoenix/token.ex b/deps/phoenix/lib/phoenix/token.ex new file mode 100644 index 0000000..8c2d4d1 --- /dev/null +++ b/deps/phoenix/lib/phoenix/token.ex @@ -0,0 +1,275 @@ +defmodule Phoenix.Token do + @moduledoc """ + Conveniences to sign/encrypt data inside tokens + for use in Channels, API authentication, and more. + + The data stored in the token is signed to prevent tampering, and is + optionally encrypted. This means that, so long as the + key (see below) remains secret, you can be assured that the data + stored in the token has not been tampered with by a third party. + However, unless the token is encrypted, it is not safe to use this + token to store private information, such as a user's sensitive + identification data, as it can be trivially decoded. If the + token is encrypted, its contents will be kept secret from the + client, but it is still a best practice to encode as little secret + information as possible, to minimize the impact of key leakage. + + ## Example + + When generating a unique token for use in an API or Channel + it is advised to use a unique identifier for the user, typically + the id from a database. For example: + + iex> user_id = 1 + iex> token = Phoenix.Token.sign(MyAppWeb.Endpoint, "user auth", user_id) + iex> Phoenix.Token.verify(MyAppWeb.Endpoint, "user auth", token, max_age: 86400) + {:ok, 1} + + In that example we have a user's id, we generate a token and + verify it using the secret key base configured in the given + `endpoint`. We guarantee the token will only be valid for one day + by setting a max age (recommended). + + The first argument to `sign/4`, `verify/4`, `encrypt/4`, and + `decrypt/4` can be one of: + + * the module name of a Phoenix endpoint (shown above) - where + the secret key base is extracted from the endpoint + * `Plug.Conn` - where the secret key base is extracted from the + endpoint stored in the connection + * `Phoenix.Socket` or `Phoenix.LiveView.Socket` - where the secret + key base is extracted from the endpoint stored in the socket + * a string, representing the secret key base itself. A key base + with at least 20 randomly generated characters should be used + to provide adequate entropy + + The second argument is a [cryptographic salt](https://en.wikipedia.org/wiki/Salt_(cryptography)) + which must be the same in both calls to `sign/4` and `verify/4`, or + both calls to `encrypt/4` and `decrypt/4`. For instance, it may be + called "user auth" and treated as namespace when generating a token + that will be used to authenticate users on channels or on your APIs. + + The third argument can be any term (string, int, list, etc.) + that you wish to codify into the token. Upon valid verification, + this same term will be extracted from the token. + + ## Usage + + Once a token is signed, we can send it to the client in multiple ways. + + One is via the meta tag: + + ```heex + + ``` + + Or an endpoint that returns it: + + def create(conn, params) do + user = User.create(params) + render(conn, "user.json", + %{token: Phoenix.Token.sign(conn, "user auth", user.id), user: user}) + end + + Once the token is sent, the client may now send it back to the server + as an authentication mechanism. For example, we can use it to authenticate + a user on a Phoenix channel: + + defmodule MyApp.UserSocket do + use Phoenix.Socket + + def connect(%{"token" => token}, socket, _connect_info) do + case Phoenix.Token.verify(socket, "user auth", token, max_age: 86400) do + {:ok, user_id} -> + socket = assign(socket, :user, Repo.get!(User, user_id)) + {:ok, socket} + {:error, _} -> + :error + end + end + + def connect(_params, _socket, _connect_info), do: :error + end + + In this example, the phoenix.js client will send the token in the + `connect` command which is then validated by the server. + + `Phoenix.Token` can also be used for validating APIs, handling + password resets, e-mail confirmation and more. + """ + + @type context :: + Plug.Conn.t() + | %{required(:endpoint) => atom, optional(atom()) => any()} + | atom + | binary + + @type shared_opt :: + {:key_iterations, pos_integer} + | {:key_length, pos_integer} + | {:key_digest, :sha256 | :sha384 | :sha512} + + @type max_age_opt :: {:max_age, pos_integer | :infinity} + @type signed_at_opt :: {:signed_at, pos_integer} + + @doc """ + Encodes and signs data into a token you can send to clients. + + ## Options + + * `:key_iterations` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 1000 + * `:key_length` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 32 + * `:key_digest` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to `:sha256` + * `:signed_at` - set the timestamp of the token in *seconds*. + If no value is provided, it will be set to the current time in milliseconds. + * `:max_age` - the default maximum age in **seconds** of the token. Defaults to + 86400 seconds (1 day) and it may be overridden on `verify/4`. + + """ + @spec sign(context, binary, term, [shared_opt | max_age_opt | signed_at_opt]) :: binary + def sign(context, salt, data, opts \\ []) when is_binary(salt) do + context + |> get_key_base() + |> Plug.Crypto.sign(salt, data, opts) + end + + @doc """ + Encodes, encrypts, and signs data into a token you can send to + clients. Its usage is identical to that of `sign/4`, but the data + is extracted using `decrypt/4`, rather than `verify/4`. + + ## Options + + * `:key_iterations` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 1000 + * `:key_length` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 32 + * `:key_digest` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to `:sha256` + * `:signed_at` - set the timestamp of the token in **seconds**. + If no value is provided, it will be set to the current time in milliseconds. + * `:max_age` - the default maximum age in **seconds** of the token. Defaults to + 86400 seconds (1 day) and it may be overridden on `decrypt/4`. + + """ + @spec encrypt(context, binary, term, [shared_opt | max_age_opt | signed_at_opt]) :: binary + def encrypt(context, secret, data, opts \\ []) when is_binary(secret) do + context + |> get_key_base() + |> Plug.Crypto.encrypt(secret, data, opts) + end + + @doc """ + Decodes the original data from the token and verifies its integrity. + + ## Examples + + In this scenario we will create a token, sign it, then provide it to a client + application. The client will then use this token to authenticate requests for + resources from the server. See `Phoenix.Token` summary for more info about + creating tokens. + + iex> user_id = 99 + iex> secret = "kjoy3o1zeidquwy1398juxzldjlksahdk3" + iex> namespace = "user auth" + iex> token = Phoenix.Token.sign(secret, namespace, user_id) + + The mechanism for passing the token to the client is typically through a + cookie, a JSON response body, or HTTP header. For now, assume the client has + received a token it can use to validate requests for protected resources. + + When the server receives a request, it can use `verify/4` to determine if it + should provide the requested resources to the client: + + iex> Phoenix.Token.verify(secret, namespace, token, max_age: 86400) + {:ok, 99} + + In this example, we know the client sent a valid token because `verify/4` + returned a tuple of type `{:ok, user_id}`. The server can now proceed with + the request. + + However, if the client had sent an expired token, an invalid token, or `nil`, + `verify/4` would have returned an error instead: + + iex> Phoenix.Token.verify(secret, namespace, expired, max_age: 86400) + {:error, :expired} + + iex> Phoenix.Token.verify(secret, namespace, invalid, max_age: 86400) + {:error, :invalid} + + iex> Phoenix.Token.verify(secret, namespace, nil, max_age: 86400) + {:error, :missing} + + ## Options + + * `:key_iterations` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 1000 + * `:key_length` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 32 + * `:key_digest` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to `:sha256` + * `:max_age` - verifies the token only if it has been generated + "max age" ago in seconds. Defaults to the max age signed in the + token by `sign/4`. + """ + @spec verify(context, binary, binary, [shared_opt | max_age_opt]) :: + {:ok, term} | {:error, :expired | :invalid | :missing} + def verify(context, salt, token, opts \\ []) when is_binary(salt) do + context + |> get_key_base() + |> Plug.Crypto.verify(salt, token, opts) + end + + @doc """ + Decrypts the original data from the token and verifies its integrity. + + Its usage is identical to `verify/4` but for encrypted tokens. + + ## Options + + * `:key_iterations` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 1000 + * `:key_length` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 32 + * `:key_digest` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to `:sha256` + * `:max_age` - verifies the token only if it has been generated + "max age" ago in seconds. Defaults to the max age signed in the + token by `encrypt/4`. + """ + @spec decrypt(context, binary, binary, [shared_opt | max_age_opt]) :: term() + def decrypt(context, secret, token, opts \\ []) when is_binary(secret) do + context + |> get_key_base() + |> Plug.Crypto.decrypt(secret, token, opts) + end + + ## Helpers + + defp get_key_base(%Plug.Conn{} = conn), + do: conn |> Phoenix.Controller.endpoint_module() |> get_endpoint_key_base() + + defp get_key_base(%_{endpoint: endpoint}), + do: get_endpoint_key_base(endpoint) + + defp get_key_base(endpoint) when is_atom(endpoint), + do: get_endpoint_key_base(endpoint) + + defp get_key_base(string) when is_binary(string) and byte_size(string) >= 20, + do: string + + defp get_endpoint_key_base(endpoint) do + endpoint.config(:secret_key_base) || + raise """ + no :secret_key_base configuration found in #{inspect(endpoint)}. + Ensure your environment has the necessary mix configuration. For example: + + config :my_app, MyAppWeb.Endpoint, + secret_key_base: ... + + """ + end +end diff --git a/deps/phoenix/lib/phoenix/transports/long_poll.ex b/deps/phoenix/lib/phoenix/transports/long_poll.ex new file mode 100644 index 0000000..fc84140 --- /dev/null +++ b/deps/phoenix/lib/phoenix/transports/long_poll.ex @@ -0,0 +1,296 @@ +defmodule Phoenix.Transports.LongPoll do + @moduledoc false + @behaviour Plug + + # The maximum is 10MB but read_body will cap the whole request at ~8MB, + # so this acts as a secondary protection mechanism. + @max_base64_size 10_000_000 + # TODO: enforce batch size on the server in the next release + # @max_poll_batch_size 100 + @connect_info_opts [:check_csrf] + + import Plug.Conn + alias Phoenix.Socket.{V1, V2, Transport} + + def default_config() do + [ + window_ms: 10_000, + path: "/longpoll", + pubsub_timeout_ms: 2_000, + serializer: [{V1.JSONSerializer, "~> 1.0.0"}, {V2.JSONSerializer, "~> 2.0.0"}], + transport_log: false, + crypto: [max_age: 1_209_600] + ] + end + + def init(opts), do: opts + + def call(conn, {endpoint, handler, opts}) do + conn + |> fetch_query_params() + |> put_resp_header("access-control-allow-origin", "*") + |> Transport.code_reload(endpoint, opts) + |> Transport.transport_log(opts[:transport_log]) + |> Transport.check_origin(handler, endpoint, opts, &status_json/1) + |> dispatch(endpoint, handler, opts) + end + + defp dispatch(%{halted: true} = conn, _, _, _) do + conn + end + + # Responds to pre-flight CORS requests with Allow-Origin-* headers. + # We allow cross-origin requests as we always validate the Origin header. + defp dispatch(%{method: "OPTIONS"} = conn, _, _, _) do + headers = get_req_header(conn, "access-control-request-headers") |> Enum.join(", ") + + conn + |> put_resp_header("access-control-allow-headers", headers) + |> put_resp_header("access-control-allow-methods", "get, post, options") + |> put_resp_header("access-control-max-age", "3600") + |> send_resp(:ok, "") + end + + # Starts a new session or listen to a message if one already exists. + defp dispatch(%{method: "GET"} = conn, endpoint, handler, opts) do + case resume_session(conn, conn.params, endpoint, opts) do + {:ok, new_conn, server_ref} -> + listen(new_conn, server_ref, endpoint, opts) + + :error -> + new_session(conn, endpoint, handler, opts) + end + end + + # Publish the message. + defp dispatch(%{method: "POST"} = conn, endpoint, _, opts) do + case resume_session(conn, conn.params, endpoint, opts) do + {:ok, new_conn, server_ref} -> + publish(new_conn, server_ref, endpoint, opts) + + :error -> + conn |> put_status(:gone) |> status_json() + end + end + + # All other requests should fail. + defp dispatch(conn, _, _, _) do + send_resp(conn, :bad_request, "") + end + + defp publish(conn, server_ref, endpoint, opts) do + case read_body(conn, []) do + {:ok, body, conn} -> + # We need to match on both v1 and v2 protocol, as well as wrap for backwards compat + status = + case get_req_header(conn, "content-type") do + ["application/x-ndjson"] -> + body + |> String.splitter(["\n", "\r\n"]) + # |> Stream.take(@max_poll_batch_size) + |> Enum.find_value(fn part -> + msg = + case part do + "[" <> _ = txt -> {txt, :text} + base64 -> {safe_decode64!(base64), :binary} + end + + transport_dispatch(endpoint, server_ref, msg, opts) + end) + + _ -> + transport_dispatch(endpoint, server_ref, {body, :text}, opts) + end + + conn |> put_status(status || :ok) |> status_json() + + _ -> + raise Plug.BadRequestError + end + end + + defp safe_decode64!(base64) do + if byte_size(base64) <= @max_base64_size do + Base.decode64!(base64) + else + raise Plug.BadRequestError + end + end + + defp transport_dispatch(endpoint, server_ref, body, opts) do + ref = make_ref() + broadcast_from!(endpoint, server_ref, {:dispatch, client_ref(server_ref), body, ref}) + + receive do + {:ok, ^ref} -> nil + {:error, ^ref} -> nil + after + opts[:window_ms] -> :request_timeout + end + end + + ## Session handling + + defp new_session(conn, endpoint, handler, opts) do + priv_topic = + "phx:lp:" <> + Base.encode64(:crypto.strong_rand_bytes(16)) <> + (System.system_time(:millisecond) |> Integer.to_string()) + + keys = Keyword.get(opts, :connect_info, []) + + conn = maybe_auth_token_from_header(conn, opts[:auth_token]) + + connect_info = + Transport.connect_info(conn, endpoint, keys, Keyword.take(opts, @connect_info_opts)) + + arg = {endpoint, handler, opts, conn.params, priv_topic, connect_info} + spec = {Phoenix.Transports.LongPoll.Server, arg} + + case DynamicSupervisor.start_child(Phoenix.Transports.LongPoll.Supervisor, spec) do + :ignore -> + conn |> put_status(:forbidden) |> status_json() + + {:ok, server_pid} -> + data = {:v1, endpoint.config(:endpoint_id), server_pid, priv_topic} + token = sign_token(endpoint, data, opts) + conn |> put_status(:gone) |> status_token_messages_json(token, []) + end + end + + defp listen(conn, server_ref, endpoint, opts) do + ref = make_ref() + client_ref = client_ref(server_ref) + broadcast_from!(endpoint, server_ref, {:flush, client_ref, ref}) + + {status, messages} = + receive do + {:messages, messages, ^ref} -> + {:ok, messages} + + {:now_available, ^ref} -> + broadcast_from!(endpoint, server_ref, {:flush, client_ref, ref}) + + receive do + {:messages, messages, ^ref} -> {:ok, messages} + after + opts[:window_ms] -> + broadcast_from!(endpoint, server_ref, {:expired, client_ref, ref}) + {:no_content, []} + end + after + opts[:window_ms] -> + broadcast_from!(endpoint, server_ref, {:expired, client_ref, ref}) + {:no_content, []} + end + + conn + |> put_status(status) + |> status_token_messages_json(conn.params["token"], messages) + end + + # Retrieves the serialized `Phoenix.LongPoll.Server` pid + # by publishing a message in the encrypted private topic. + defp resume_session(%Plug.Conn{} = conn, %{"token" => token}, endpoint, opts) do + case verify_token(endpoint, token, opts) do + {:ok, {:v1, id, pid, priv_topic}} -> + server_ref = server_ref(endpoint.config(:endpoint_id), id, pid, priv_topic) + + new_conn = + Plug.Conn.register_before_send(conn, fn conn -> + unsubscribe(endpoint, server_ref) + conn + end) + + ref = make_ref() + :ok = subscribe(endpoint, server_ref) + broadcast_from!(endpoint, server_ref, {:subscribe, client_ref(server_ref), ref}) + + receive do + {:subscribe, ^ref} -> {:ok, new_conn, server_ref} + after + opts[:pubsub_timeout_ms] -> :error + end + + _ -> + :error + end + end + + defp resume_session(%Plug.Conn{}, _params, _endpoint, _opts), do: :error + + ## Helpers + + defp server_ref(endpoint_id, id, pid, topic) when is_pid(pid) do + cond do + node(pid) in Node.list() -> pid + endpoint_id == id and Process.alive?(pid) -> pid + true -> topic + end + end + + defp client_ref(topic) when is_binary(topic), do: topic + defp client_ref(pid) when is_pid(pid), do: self() + + defp subscribe(endpoint, topic) when is_binary(topic), + do: Phoenix.PubSub.subscribe(endpoint.config(:pubsub_server), topic) + + defp subscribe(_endpoint, pid) when is_pid(pid), + do: :ok + + defp unsubscribe(endpoint, topic) when is_binary(topic), + do: Phoenix.PubSub.unsubscribe(endpoint.config(:pubsub_server), topic) + + defp unsubscribe(_endpoint, pid) when is_pid(pid), + do: :ok + + defp broadcast_from!(endpoint, topic, msg) when is_binary(topic), + do: Phoenix.PubSub.broadcast_from!(endpoint.config(:pubsub_server), self(), topic, msg) + + defp broadcast_from!(_endpoint, pid, msg) when is_pid(pid), + do: send(pid, msg) + + defp sign_token(endpoint, data, opts) do + Phoenix.Token.sign( + endpoint, + Atom.to_string(endpoint.config(:pubsub_server)), + data, + opts[:crypto] + ) + end + + defp verify_token(endpoint, signed, opts) do + Phoenix.Token.verify( + endpoint, + Atom.to_string(endpoint.config(:pubsub_server)), + signed, + opts[:crypto] + ) + end + + defp maybe_auth_token_from_header(conn, true) do + case Plug.Conn.get_req_header(conn, "x-phoenix-authtoken") do + [] -> + conn + + [token | _] -> + Plug.Conn.put_private(conn, :phoenix_transport_auth_token, token) + end + end + + defp maybe_auth_token_from_header(conn, _), do: conn + + defp status_json(conn) do + send_json(conn, %{"status" => conn.status || 200}) + end + + defp status_token_messages_json(conn, token, messages) do + send_json(conn, %{"status" => conn.status || 200, "token" => token, "messages" => messages}) + end + + defp send_json(conn, data) do + conn + |> put_resp_header("content-type", "application/json; charset=utf-8") + |> send_resp(200, Phoenix.json_library().encode_to_iodata!(data)) + end +end diff --git a/deps/phoenix/lib/phoenix/transports/long_poll_server.ex b/deps/phoenix/lib/phoenix/transports/long_poll_server.ex new file mode 100644 index 0000000..e3a5f23 --- /dev/null +++ b/deps/phoenix/lib/phoenix/transports/long_poll_server.ex @@ -0,0 +1,161 @@ +defmodule Phoenix.Transports.LongPoll.Server do + @moduledoc false + + use GenServer, restart: :temporary + alias Phoenix.PubSub + + def start_link(arg) do + GenServer.start_link(__MODULE__, arg) + end + + def init({endpoint, handler, options, params, priv_topic, connect_info}) do + config = %{ + endpoint: endpoint, + transport: :longpoll, + options: options, + params: params, + connect_info: connect_info + } + + window_ms = Keyword.fetch!(options, :window_ms) + + case handler.connect(config) do + {:ok, handler_state} -> + {:ok, handler_state} = handler.init(handler_state) + + state = %{ + buffer: [], + handler: {handler, handler_state}, + window_ms: trunc(window_ms * 1.5), + pubsub_server: endpoint.config(:pubsub_server), + priv_topic: priv_topic, + last_client_poll: now_ms(), + client_ref: nil + } + + :ok = PubSub.subscribe(state.pubsub_server, priv_topic) + schedule_inactive_shutdown(state.window_ms) + {:ok, state} + + :error -> + :ignore + + {:error, _reason} -> + :ignore + end + end + + def handle_info({:dispatch, client_ref, {body, opcode}, ref}, state) do + %{handler: {handler, handler_state}} = state + + case handler.handle_in({body, opcode: opcode}, handler_state) do + {:reply, status, {_, reply}, handler_state} -> + state = %{state | handler: {handler, handler_state}} + status = if status == :ok, do: :ok, else: :error + broadcast_from!(state, client_ref, {status, ref}) + publish_reply(state, reply) + + {:ok, handler_state} -> + state = %{state | handler: {handler, handler_state}} + broadcast_from!(state, client_ref, {:ok, ref}) + {:noreply, state} + + {:stop, reason, handler_state} -> + state = %{state | handler: {handler, handler_state}} + broadcast_from!(state, client_ref, {:error, ref}) + {:stop, reason, state} + end + end + + def handle_info({:subscribe, client_ref, ref}, state) do + broadcast_from!(state, client_ref, {:subscribe, ref}) + {:noreply, state} + end + + def handle_info({:flush, client_ref, ref}, state) do + case state.buffer do + [] -> + {:noreply, %{state | client_ref: {client_ref, ref}, last_client_poll: now_ms()}} + + buffer -> + broadcast_from!(state, client_ref, {:messages, Enum.reverse(buffer), ref}) + {:noreply, %{state | client_ref: nil, last_client_poll: now_ms(), buffer: []}} + end + end + + def handle_info({:expired, client_ref, ref}, state) do + case state.client_ref do + {^client_ref, ^ref} -> + {:noreply, %{state | client_ref: nil}} + + _ -> + {:noreply, state} + end + end + + def handle_info(:shutdown_if_inactive, state) do + if now_ms() - state.last_client_poll > state.window_ms do + {:stop, {:shutdown, :inactive}, state} + else + schedule_inactive_shutdown(state.window_ms) + {:noreply, state} + end + end + + def handle_info(message, state) do + %{handler: {handler, handler_state}} = state + + case handler.handle_info(message, handler_state) do + {:push, {_, reply}, handler_state} -> + state = %{state | handler: {handler, handler_state}} + publish_reply(state, reply) + + {:ok, handler_state} -> + state = %{state | handler: {handler, handler_state}} + {:noreply, state} + + {:stop, reason, handler_state} -> + state = %{state | handler: {handler, handler_state}} + {:stop, reason, state} + end + end + + def terminate(reason, state) do + %{handler: {handler, handler_state}} = state + handler.terminate(reason, handler_state) + :ok + end + + defp broadcast_from!(state, client_ref, msg) when is_binary(client_ref), + do: PubSub.broadcast_from!(state.pubsub_server, self(), client_ref, msg) + + defp broadcast_from!(_state, client_ref, msg) when is_pid(client_ref), + do: send(client_ref, msg) + + defp publish_reply(state, reply) when is_map(reply) do + IO.warn( + "Returning a map from the LongPolling serializer is deprecated. " <> + "Please return JSON encoded data instead (see Phoenix.Socket.Serializer)" + ) + + publish_reply(state, Phoenix.json_library().encode_to_iodata!(reply)) + end + + defp publish_reply(state, reply) do + notify_client_now_available(state) + {:noreply, update_in(state.buffer, &[IO.iodata_to_binary(reply) | &1])} + end + + defp notify_client_now_available(state) do + case state.client_ref do + {client_ref, ref} -> broadcast_from!(state, client_ref, {:now_available, ref}) + nil -> :ok + end + end + + defp now_ms, do: System.system_time(:millisecond) + + defp schedule_inactive_shutdown(window_ms) do + Process.send_after(self(), :shutdown_if_inactive, window_ms) + end +end diff --git a/deps/phoenix/lib/phoenix/transports/websocket.ex b/deps/phoenix/lib/phoenix/transports/websocket.ex new file mode 100644 index 0000000..e942257 --- /dev/null +++ b/deps/phoenix/lib/phoenix/transports/websocket.ex @@ -0,0 +1,130 @@ +defmodule Phoenix.Transports.WebSocket do + @moduledoc false + # + # How WebSockets Work In Phoenix + # + # WebSocket support in Phoenix is implemented on top of the `WebSockAdapter` library. Upgrade + # requests from clients originate as regular HTTP requests that get routed to this module via + # Plug. These requests are then upgraded to WebSocket connections via + # `WebSockAdapter.upgrade/4`, which takes as an argument the handler for a given socket endpoint + # as configured in the application's Endpoint. This handler module must implement the + # transport-agnostic `Phoenix.Socket.Transport` behaviour (this same behaviour is also used for + # other transports such as long polling). Because this behaviour is a superset of the `WebSock` + # behaviour, the `WebSock` library is able to use the callbacks in the `WebSock` behaviour to + # call this handler module directly for the rest of the WebSocket connection's lifetime. + # + @behaviour Plug + + @connect_info_opts [:check_csrf] + + @auth_token_prefix "base64url.bearer.phx." + + import Plug.Conn + + alias Phoenix.Socket.{V1, V2, Transport} + + def default_config() do + [ + path: "/websocket", + serializer: [{V1.JSONSerializer, "~> 1.0.0"}, {V2.JSONSerializer, "~> 2.0.0"}], + error_handler: {__MODULE__, :handle_error, []}, + timeout: 60_000, + transport_log: false, + compress: false + ] + end + + def init(opts), do: opts + + def call(%{method: "GET"} = conn, {endpoint, handler, opts}) do + subprotocols = + if opts[:auth_token] do + # when using Sec-WebSocket-Protocol for passing an auth token + # the server must reply with one of the subprotocols in the request; + # therefore we include "phoenix" as allowed subprotocol and include it on the client + ["phoenix" | Keyword.get(opts, :subprotocols, [])] + else + opts[:subprotocols] + end + + conn + |> fetch_query_params() + |> Transport.code_reload(endpoint, opts) + |> Transport.transport_log(opts[:transport_log]) + |> Transport.check_origin(handler, endpoint, opts) + |> maybe_auth_token_from_header(opts[:auth_token]) + |> Transport.check_subprotocols(subprotocols) + |> case do + %{halted: true} = conn -> + conn + + %{params: params} = conn -> + keys = Keyword.get(opts, :connect_info, []) + + connect_info = + Transport.connect_info(conn, endpoint, keys, Keyword.take(opts, @connect_info_opts)) + + config = %{ + endpoint: endpoint, + transport: :websocket, + options: opts, + params: params, + connect_info: connect_info + } + + case handler.connect(config) do + {:ok, arg} -> + try do + conn + |> WebSockAdapter.upgrade(handler, arg, opts) + |> halt() + rescue + e in WebSockAdapter.UpgradeError -> send_resp(conn, 400, e.message) + end + + :error -> + send_resp(conn, 403, "") + + {:error, reason} -> + {m, f, args} = opts[:error_handler] + apply(m, f, [conn, reason | args]) + end + end + end + + def call(conn, _), do: send_resp(conn, 400, "") + + def handle_error(conn, _reason), do: send_resp(conn, 403, "") + + defp maybe_auth_token_from_header(conn, true) do + case get_req_header(conn, "sec-websocket-protocol") do + [] -> + conn + + [subprotocols_header | _] -> + request_subprotocols = + subprotocols_header + |> Plug.Conn.Utils.list() + |> Enum.split_with(&String.starts_with?(&1, @auth_token_prefix)) + + case request_subprotocols do + {[@auth_token_prefix <> encoded_token], actual_subprotocols} -> + token = Base.decode64!(encoded_token, padding: false) + + conn + |> put_private(:phoenix_transport_auth_token, token) + |> set_actual_subprotocols(actual_subprotocols) + + _ -> + conn + end + end + end + + defp maybe_auth_token_from_header(conn, _), do: conn + + defp set_actual_subprotocols(conn, []), do: delete_req_header(conn, "sec-websocket-protocol") + + defp set_actual_subprotocols(conn, subprotocols), + do: put_req_header(conn, "sec-websocket-protocol", Enum.join(subprotocols, ", ")) +end diff --git a/deps/phoenix/lib/phoenix/verified_routes.ex b/deps/phoenix/lib/phoenix/verified_routes.ex new file mode 100644 index 0000000..0585bde --- /dev/null +++ b/deps/phoenix/lib/phoenix/verified_routes.ex @@ -0,0 +1,1069 @@ +defmodule Phoenix.VerifiedRoutes do + @moduledoc ~S''' + Provides route generation with compile-time verification. + + Use of the `sigil_p` macro allows paths and URLs throughout your + application to be compile-time verified against your Phoenix router(s). + For example, the following path and URL usages: + + ~H""" + Log in + """ + + redirect(to: url(~p"/posts/#{post}")) + + Will be verified against your standard `Phoenix.Router` definitions: + + get "/posts/:post_id", PostController, :show + post "/sessions/new", SessionController, :create + + Unmatched routes will issue compiler warnings: + + ```console + warning: no route path for AppWeb.Router matches "/postz/#{post}" + lib/app_web/controllers/post_controller.ex:100: AppWeb.PostController.show/2 + ``` + + Additionally, interpolated ~p values are encoded via the `Phoenix.Param` protocol. + For example, a `%Post{}` struct in your application may derive the `Phoenix.Param` + protocol to generate slug-based paths rather than ID based ones. This allows you to + use `~p"/posts/#{post}"` rather than `~p"/posts/#{post.slug}"` throughout your + application. See the `Phoenix.Param` documentation for more details. + + Finally, query strings are also supported in verified routes, either in traditional form: + + ~p"/posts?page=#{page}" + + Or as a keyword list or map of values: + + params = %{page: 1, direction: "asc"} + ~p"/posts?#{params}" + + Like path segments, query strings params are proper URL encoded and may be interpolated + directly into the ~p string. + + To ease url comparisons during tests (e.g. when using `assert_redirect/3`) query params + will be sorted. This is controlled by the `phoenix: [sort_verified_routes_query_params: true]` + configuration option. + + ## What about named routes? + + Many web frameworks, and early versions of Phoenix, provided a feature called "named routes". + The idea is that, when you define routes in your web applications, you could give them names + too. In Phoenix that was done as follows: + + get "/login", SessionController, :create, as: :login + + And now you could generate the route using the `login_path` function. + + Named routes exist to avoid hardcoding routes in your templates, if you wrote `` + and then changed your router, the link would point to a page that no longer exist. By using + `login_path`, we make sure it always points to a valid URL in our router. However, named routes + come with the downsides of indirection: when you look at the code, it is not immediately clear + which URL will be generated. Furthermore, if you have an existing URL and you want to add it + to a template, you need to do a reverse lookup and find its name in the router. At the end of + the day, named routes are arbitrary names that need to be memorized by developers, adding + cognitive overhead. + + Verified routes tackle this problem by allowing the routes to be written as we would read them + in a browser, but using the `~p` sigil to guarantee they actually exist at compilation time. + They remove the indirection of named routes while keeping their guarantees. + + In any case, if part of your application requires features similar to named routes, then + remember you can still leverage Elixir features to achieve the same result. For example, + you can define several functions as named routes to be reused across modules: + + def login_path, do: ~p"/login" + def user_home_path(user), do: ~p"/users/#{user.username}" + + ## Options + + To verify routes in your application modules, such as controller, templates, and views, + `use Phoenix.VerifiedRoutes`, which supports the following options: + + * `:router` - The required router to verify `~p` paths against + * `:endpoint` - Optional endpoint for URL generation + * `:statics` - Optional list of static directories to treat as verified paths + * `:path_prefixes` - Optional list of path prefixes to be added to every generated path. + See "Path prefixes" for more information + + For example: + + use Phoenix.VerifiedRoutes, + router: AppWeb.Router, + endpoint: AppWeb.Endpoint, + statics: ~w(images) + + ## Connection/socket-based route generation + + The majority of path and URL generation needs your application will be met + with `~p` and `url/1`, where all information necessary to construct the path + or URL is provided by the compile-time information stored in the Endpoint + and Router passed to `use Phoenix.VerifiedRoutes`. + + That said, there are some circumstances where `path/2`, `path/3`, `url/2`, and `url/3` + are required: + + * When the runtime values of the `%Plug.Conn{}`, `%Phoenix.LiveSocket{}`, or a `%URI{}` + dictate the formation of the path or URL, which happens under the following scenarios: + + - `Phoenix.Controller.put_router_url/2` is used to override the endpoint's URL + - `Phoenix.Controller.put_static_url/2` is used to override the endpoint's static URL + + * When the Router module differs from the one passed to `use Phoenix.VerifiedRoutes`, + such as library code, or application code that relies on multiple routers. In such cases, + the router module can be provided explicitly to `path/3` and `url/3`. + + ## Tracking warnings + + All static path segments must start with forward slash, and you must have a static segment + between dynamic interpolations in order for a route to be verified without warnings. + For example, imagine you have these two routes: + + get "/media/posts/:id" + get "/media/images/:id" + + The following route will be verified and emit a warning as it does not match the router: + + ~p"/media/post/#{post}" + + However the one below will not, the "post" segment is dynamic: + + type = "post" + ~p"/media/#{type}/#{post}" + + If you find yourself needing to generate dynamic URLs which are defined statically + in the router, that's a good indicator you should refactor it into one or more + function, such as `posts_path/1` and `images_path/1`. + + Like any other compilation warning, the Elixir compiler will warn any time the file + that a `~p` resides in changes, or if the router is changed. + + ## Localized routes and path prefixes + + Applications that need to support internationalization (i18n) and localization (l10n) + often do so at the URL level. In such cases, there are different approaches one can + choose. + + One option is to perform i18n at the domain level. You can have `example.com` (in which + you would detect the locale based on the "Accept-Language" HTTP header), `en.example.com`, + `en-GB.example.com` and so forth. In this case, you would have a plug that looks at the + host and at HTTP headers and calls `Gettext.get_locale/1` accordingly. The biggest benefit + of this approach is that you don't have to change the routes in your application and + verified routes works as is. + + Some applications, however, like to add the locale as part of the URL prefix: + + scope "/:locale" do + get "/posts" + get "/images" + end + + For such cases, VerifiedRoutes allow you to configure a `path_prefixes` option, which + is a list of segments to prepend to the URL. For example: + + use Phoenix.VerifiedRoutes, + router: AppWeb.Router, + endpoint: AppWeb.Endpoint, + path_prefixes: [{Gettext, :get_locale, []}] + + The above will prepend `"/#{Gettext.get_locale()}"` to every path and url generated with + `~p`. If your website has a handful of URLs that do not require the locale prefix, then + we suggest defining them in a separate module, where you use `Phoenix.VerifiedRoutes` + without the prefix option: + + defmodule UnlocalizedRoutes do + use Phoenix.VerifiedRoutes, + router: AppWeb.Router, + endpoint: AppWeb.Endpoint + + # Since :path_prefixes was not declared, + # the code below won't prepend the locale and still be verified + def root, do: ~p"/" + end + + Finally, for even more complex use cases, where the whole URL needs to localized, + see projects such as [`routex`](https://hex.pm/packages/routex) and + [`ex_cldr_routes`](https://hex.pm/packages/ex_cldr_routes). + + ## Usage with custom plugs + + Sometimes, when we want to do dynamic routing, we will forward to custom plugs. + It is possible to make these dynamic routers support `mix phx.routes` and verified + routes at compile time by adopting the `Phoenix.VerifiedRoutes` behaviour. + For example: + + defmodule MyApp.LocaleRouter do + use Plug.Router + @behaviour Phoenix.VerifiedRoutes + + # custom routing rules + + # for displaying in `mix phx.routes` + def formatted_routes(plug_opts) do + for locale <- supported_locales(plug_opts) do + %{verb: "GET", path: "/#{locale}/*subpath"} + end + end + + def verified_route?(plug_opts, path) do + plug_opts + |> supported_locales() + |> Enum.any?(fn locale -> + Enum.at(path, 0) == locale + end) + end + end + ''' + @doc false + defstruct router: nil, + route: nil, + inspected_route: nil, + warn_location: nil, + test_path: nil + + defmacro __using__(opts) do + opts = + if Keyword.keyword?(opts) do + for {k, v} <- opts do + if Macro.quoted_literal?(v) do + {k, Macro.prewalk(v, &expand_alias(&1, __CALLER__))} + else + {k, v} + end + end + else + opts + end + + quote do + unquote(__MODULE__).__using__(__MODULE__, unquote(opts)) + import unquote(__MODULE__) + end + end + + @doc false + def __using__(mod, opts) do + Module.register_attribute(mod, :phoenix_verified_routes, accumulate: true) + Module.put_attribute(mod, :before_compile, __MODULE__) + Module.put_attribute(mod, :router, Keyword.fetch!(opts, :router)) + Module.put_attribute(mod, :endpoint, Keyword.get(opts, :endpoint)) + + statics = + case Keyword.get(opts, :statics, []) do + list when is_list(list) -> list + other -> raise ArgumentError, "expected statics to be a list, got: #{inspect(other)}" + end + + path_prefixes = + case Keyword.get(opts, :path_prefixes, []) do + list when is_list(list) -> + list + + other -> + raise ArgumentError, + "expected path_prefixes to be a list of zero-arity functions, got: #{inspect(other)}" + end + + if Module.get_attribute(mod, :phoenix_verified_config) do + raise "duplicate call to \"use Phoenix.VerifiedRoutes\" found, make sure it is used only once per module" + end + + Module.put_attribute(mod, :phoenix_verified_config, %{ + statics: statics, + path_prefixes: path_prefixes + }) + end + + @type plug_opts :: any() + @type formatted_route :: %{ + required(:verb) => String.t(), + required(:path) => String.t(), + required(:label) => String.t() + } + + @doc """ + Returns the necessary information about routes for display in `mix phx.routes`. + + The `plug_opts` is typically only passed when the router is mounted within + a `Phoenix.Router`. Otherwise it defaults to `[]`. + """ + @callback formatted_routes(plug_opts()) :: [formatted_route()] + + @doc """ + Returns `true` if the path is verified, and false if not. + + The `plug_opts` is typically only passed when the router is mounted within + a `Phoenix.Router`. Otherwise it defaults to `[]`. + """ + @callback verified_route?(plug_opts(), [String.t()]) :: boolean() + + defmacro __before_compile__(_env) do + quote do + @after_verify {__MODULE__, :__phoenix_verify_routes__} + + @doc false + def __phoenix_verify_routes__(_module) do + unquote(__MODULE__).__verify__(@phoenix_verified_routes) + end + end + end + + @doc false + def __verify__(routes) when is_list(routes) do + Enum.each(routes, fn %__MODULE__{} = route -> + test_path = split_test_path(route.test_path) + + unless route.router.verified_route?([], test_path) do + IO.warn( + "no route path for #{inspect(route.router)} matches #{route.inspected_route}", + route.warn_location + ) + end + end) + end + + defp split_test_path(test_path) do + test_path + |> String.split("#") + |> Enum.at(0) + |> String.split("/") + |> Enum.filter(fn segment -> segment != "" end) + |> Enum.map(&URI.decode/1) + end + + defp expand_alias({:__aliases__, _, _} = alias, env), + do: Macro.expand(alias, %{env | function: {:path, 2}}) + + defp expand_alias(other, _env), do: other + + @doc ~S''' + Generates the router path with route verification. + + Interpolated named parameters are encoded via the `Phoenix.Param` protocol. + + Warns when the provided path does not match against the router specified + in `use Phoenix.VerifiedRoutes` or the `@router` module attribute. + + ## Examples + + use Phoenix.VerifiedRoutes, endpoint: MyAppWeb.Endpoint, router: MyAppWeb.Router + + redirect(to: ~p"/users/top") + + redirect(to: ~p"/users/#{@user}") + + ~H""" + <.link href={~p"/users?page=#{@page}"}>profile + + <.link href={~p"/users?#{@params}"}>profile + """ + ''' + defmacro sigil_p({:<<>>, _meta, _segments} = route, extra) do + validate_sigil_p!(extra) + endpoint = attr!(__CALLER__, :endpoint) + router = attr!(__CALLER__, :router) + + route + |> build_route(route, __CALLER__, endpoint, router) + |> inject_path(__CALLER__) + end + + defp inject_path( + {%__MODULE__{} = route, static?, _endpoint_ctx, _route_ast, path_ast, static_ast}, + env + ) do + if static? do + static_ast + else + Module.put_attribute(env.module, :phoenix_verified_routes, route) + path_ast + end + end + + defp inject_url( + {%__MODULE__{} = route, static?, endpoint_ctx, route_ast, path_ast, _static_ast}, + env + ) do + if static? do + quote do + unquote(__MODULE__).static_url(unquote_splicing([endpoint_ctx, route_ast])) + end + else + Module.put_attribute(env.module, :phoenix_verified_routes, route) + + quote do + unquote(__MODULE__).unverified_url(unquote_splicing([endpoint_ctx, path_ast])) + end + end + end + + defp validate_sigil_p!([]), do: :ok + + defp validate_sigil_p!(extra) do + raise ArgumentError, "~p does not support modifiers after closing, got: #{extra}" + end + + defp raise_invalid_route(ast) do + raise ArgumentError, + "expected compile-time ~p path string, got: #{Macro.to_string(ast)}\n" <> + "Use unverified_path/2 and unverified_url/2 if you need to build an arbitrary path." + end + + @doc ~S''' + Generates the router path with route verification. + + See `sigil_p/2` for more information. + + Warns when the provided path does not match against the router specified + in the router argument. + + ## Examples + + import Phoenix.VerifiedRoutes + + redirect(to: path(conn, MyAppWeb.Router, ~p"/users/top")) + + redirect(to: path(conn, MyAppWeb.Router, ~p"/users/#{@user}")) + + ~H""" + <.link href={path(@uri, MyAppWeb.Router, "/users?page=#{@page}")}>profile + <.link href={path(@uri, MyAppWeb.Router, "/users?#{@params}")}>profile + """ + ''' + defmacro path( + conn_or_socket_or_endpoint_or_uri, + router, + {:sigil_p, _, [{:<<>>, _meta, _segments} = route, extra]} = sigil_p + ) do + validate_sigil_p!(extra) + + route + |> build_route(sigil_p, __CALLER__, conn_or_socket_or_endpoint_or_uri, router) + |> inject_path(__CALLER__) + end + + defmacro path(_endpoint, _router, other), do: raise_invalid_route(other) + + @doc ~S''' + Generates the router path with route verification. + + See `sigil_p/2` for more information. + + Warns when the provided path does not match against the router specified + in `use Phoenix.VerifiedRoutes` or the `@router` module attribute. + + ## Examples + + import Phoenix.VerifiedRoutes + + redirect(to: path(conn, ~p"/users/top")) + + redirect(to: path(conn, ~p"/users/#{@user}")) + + ~H""" + <.link href={path(@uri, "/users?page=#{@page}")}>profile + <.link href={path(@uri, "/users?#{@params}")}>profile + """ + ''' + defmacro path( + conn_or_socket_or_endpoint_or_uri, + {:sigil_p, _, [{:<<>>, _meta, _segments} = route, extra]} = sigil_p + ) do + validate_sigil_p!(extra) + router = attr!(__CALLER__, :router) + + route + |> build_route(sigil_p, __CALLER__, conn_or_socket_or_endpoint_or_uri, router) + |> inject_path(__CALLER__) + end + + defmacro path(_conn_or_socket_or_endpoint_or_uri, other), do: raise_invalid_route(other) + + @doc ~S''' + Generates the router url with route verification. + + See `sigil_p/2` for more information. + + Warns when the provided path does not match against the router specified + in `use Phoenix.VerifiedRoutes` or the `@router` module attribute. + + ## Examples + + use Phoenix.VerifiedRoutes, endpoint: MyAppWeb.Endpoint, router: MyAppWeb.Router + + redirect(to: url(conn, ~p"/users/top")) + + redirect(to: url(conn, ~p"/users/#{@user}")) + + ~H""" + <.link href={url(@uri, "/users?#{[page: @page]}")}>profile + """ + + The router may also be provided in cases where you want to verify routes for a + router other than the one passed to `use Phoenix.VerifiedRoutes`: + + redirect(to: url(conn, OtherRouter, ~p"/users")) + + Forwarded routes are also resolved automatically. For example, imagine you + have a forward path to an admin router in your main router: + + defmodule AppWeb.Router do + ... + forward "/admin", AppWeb.AdminRouter + end + + defmodule AppWeb.AdminRouter do + ... + get "/users", AppWeb.Admin.UserController + end + + Forwarded paths in your main application router will be verified as usual, + such as `~p"/admin/users"`. + ''' + defmacro url({:sigil_p, _, [{:<<>>, _meta, _segments} = route, _]} = sigil_p) do + endpoint = attr!(__CALLER__, :endpoint) + router = attr!(__CALLER__, :router) + + route + |> build_route(sigil_p, __CALLER__, endpoint, router) + |> inject_url(__CALLER__) + end + + defmacro url(other), do: raise_invalid_route(other) + + @doc """ + Generates the router url with route verification from the connection, socket, or URI. + + See `url/1` for more information. + """ + defmacro url( + conn_or_socket_or_endpoint_or_uri, + {:sigil_p, _, [{:<<>>, _meta, _segments} = route, _]} = sigil_p + ) do + router = attr!(__CALLER__, :router) + + route + |> build_route(sigil_p, __CALLER__, conn_or_socket_or_endpoint_or_uri, router) + |> inject_url(__CALLER__) + end + + defmacro url(_conn_or_socket_or_endpoint_or_uri, other), do: raise_invalid_route(other) + + @doc """ + Generates the url with route verification from the connection, socket, or URI and router. + + See `url/1` for more information. + """ + defmacro url( + conn_or_socket_or_endpoint_or_uri, + router, + {:sigil_p, _, [{:<<>>, _meta, _segments} = route, _]} = sigil_p + ) do + router = Macro.expand(router, __CALLER__) + + route + |> build_route(sigil_p, __CALLER__, conn_or_socket_or_endpoint_or_uri, router) + |> inject_url(__CALLER__) + end + + defmacro url(_conn_or_socket_or_endpoint_or_uri, _router, other), do: raise_invalid_route(other) + + @doc """ + Generates url to a static asset given its file path. + + See `c:Phoenix.Endpoint.static_url/0` and `c:Phoenix.Endpoint.static_path/1` for more information. + + ## Examples + + iex> static_url(conn, "/assets/js/app.js") + "https://example.com/assets/js/app-813dfe33b5c7f8388bccaaa38eec8382.js" + + iex> static_url(socket, "/assets/js/app.js") + "https://example.com/assets/js/app-813dfe33b5c7f8388bccaaa38eec8382.js" + + iex> static_url(AppWeb.Endpoint, "/assets/js/app.js") + "https://example.com/assets/js/app-813dfe33b5c7f8388bccaaa38eec8382.js" + """ + def static_url(conn_or_socket_or_endpoint, path) + + def static_url(%Plug.Conn{private: private}, path) do + case private do + %{phoenix_static_url: static_url} -> concat_url(static_url, path) + %{phoenix_endpoint: endpoint} -> static_url(endpoint, path) + end + end + + def static_url(%_{endpoint: endpoint}, path) do + static_url(endpoint, path) + end + + def static_url(endpoint, path) when is_atom(endpoint) do + endpoint.static_url() <> endpoint.static_path(path) + end + + def static_url(other, path) do + raise ArgumentError, + "expected a %Plug.Conn{}, a %Phoenix.Socket{}, a struct with an :endpoint key, " <> + "or a Phoenix.Endpoint when building static url for #{path}, got: #{inspect(other)}" + end + + @doc """ + Returns the URL for the endpoint from the path without verification. + + ## Examples + + iex> unverified_url(conn, "/posts") + "https://example.com/posts" + + iex> unverified_url(conn, "/posts", page: 1) + "https://example.com/posts?page=1" + """ + def unverified_url(conn_or_socket_or_endpoint_or_uri, path, params \\ %{}) + when (is_map(params) or is_list(params)) and is_binary(path) do + guarded_unverified_url(conn_or_socket_or_endpoint_or_uri, path, params) + end + + defp guarded_unverified_url(%Plug.Conn{private: private}, path, params) do + case private do + %{phoenix_router_url: url} when is_binary(url) -> concat_url(url, path, params) + %{phoenix_endpoint: endpoint} -> concat_url(endpoint.url(), path, params) + end + end + + defp guarded_unverified_url(%_{endpoint: endpoint}, path, params) do + concat_url(endpoint.url(), path, params) + end + + defp guarded_unverified_url(%URI{} = uri, path, params) do + append_params(URI.to_string(%{uri | path: path}), params) + end + + defp guarded_unverified_url(endpoint, path, params) when is_atom(endpoint) do + concat_url(endpoint.url(), path, params) + end + + defp guarded_unverified_url(other, path, _params) do + raise ArgumentError, + "expected a %Plug.Conn{}, a %Phoenix.Socket{}, a %URI{}, a struct with an :endpoint key, " <> + "or a Phoenix.Endpoint when building url at #{path}, got: #{inspect(other)}" + end + + defp concat_url(url, path) when is_binary(path), do: url <> path + + defp concat_url(url, path, params) when is_binary(path) do + append_params(url <> path, params) + end + + @doc """ + Generates path to a static asset given its file path. + + See `c:Phoenix.Endpoint.static_path/1` for more information. + + ## Examples + + iex> static_path(conn, "/assets/js/app.js") + "/assets/js/app-813dfe33b5c7f8388bccaaa38eec8382.js" + + iex> static_path(socket, "assets/js/app.js") + "/assets/js/app-813dfe33b5c7f8388bccaaa38eec8382.js" + + iex> static_path(AppWeb.Endpoint, "assets/js/app.js") + "/assets/js/app-813dfe33b5c7f8388bccaaa38eec8382.js" + + iex> static_path(%URI{path: "/subresource"}, "/assets/js/app.js") + "/subresource/assets/js/app-813dfe33b5c7f8388bccaaa38eec8382.js" + """ + def static_path(conn_or_socket_or_endpoint_or_uri, path) + + def static_path(%Plug.Conn{private: private}, path) do + case private do + %{phoenix_static_url: _} -> path + %{phoenix_endpoint: endpoint} -> endpoint.static_path(path) + end + end + + def static_path(%URI{} = uri, path) do + (uri.path || "") <> path + end + + def static_path(%_{endpoint: endpoint}, path) do + static_path(endpoint, path) + end + + def static_path(endpoint, path) when is_atom(endpoint) do + endpoint.static_path(path) + end + + @doc """ + Returns the path with relevant script name prefixes without verification. + + ## Examples + + iex> unverified_path(conn, AppWeb.Router, "/posts") + "/posts" + + iex> unverified_path(conn, AppWeb.Router, "/posts", page: 1) + "/posts?page=1" + """ + def unverified_path(conn_or_socket_or_endpoint_or_uri, router, path, params \\ %{}) + + def unverified_path(%Plug.Conn{} = conn, router, path, params) do + conn + |> build_own_forward_path(router, path) + |> Kernel.||(build_conn_forward_path(conn, router, path)) + |> Kernel.||(path_with_script(path, conn.script_name)) + |> append_params(params) + end + + def unverified_path(%URI{} = uri, _router, path, params) do + append_params((uri.path || "") <> path, params) + end + + def unverified_path(%_{endpoint: endpoint}, router, path, params) do + unverified_path(endpoint, router, path, params) + end + + def unverified_path(endpoint, _router, path, params) when is_atom(endpoint) do + append_params(endpoint.path(path), params) + end + + def unverified_path(other, router, path, _params) do + raise ArgumentError, + "expected a %Plug.Conn{}, a %Phoenix.Socket{}, a %URI{}, a struct with an :endpoint key, " <> + "or a Phoenix.Endpoint when building path for #{inspect(router)} at #{path}, got: #{inspect(other)}" + end + + defp append_params(path, params) when params == %{} or params == [], do: path + + defp append_params(path, params) when is_map(params) or is_list(params) do + path <> "?" <> __encode_query__(params) + end + + @doc false + def __encode_segment__(data) do + case data do + [] -> "" + [str | _] when is_binary(str) -> Enum.map_join(data, "/", &encode_segment/1) + _ -> encode_segment(data) + end + end + + defp encode_segment(data) do + data + |> Phoenix.Param.to_param() + |> URI.encode(&URI.char_unreserved?/1) + end + + # Segments must always start with / + defp verify_segment(["/" <> _ | _] = segments, route), do: verify_segment(segments, route, []) + + defp verify_segment(_, route) do + raise ArgumentError, "paths must begin with /, got: #{Macro.to_string(route)}" + end + + # separator followed by dynamic + defp verify_segment(["/" | rest], route, acc), do: verify_segment(rest, route, ["/" | acc]) + + # we've found a static segment, return to caller with rewritten query if found + defp verify_segment(["/" <> _ = segment | rest], route, acc) do + case {String.split(segment, "?"), rest} do + {[segment], _} -> + verify_segment(rest, route, [URI.encode(segment) | acc]) + + {[segment, static_query], dynamic_query} -> + {Enum.reverse([URI.encode(segment) | acc]), + verify_query(dynamic_query, route, [static_query])} + end + end + + # we reached the static query string, return to caller + defp verify_segment(["?" <> query], _route, acc) do + {Enum.reverse(acc), [query]} + end + + # we reached the dynamic query string, return to call with rewritten query + defp verify_segment(["?" <> static_query_segment | rest], route, acc) do + {Enum.reverse(acc), verify_query(rest, route, [static_query_segment])} + end + + defp verify_segment([segment | _], route, _acc) when is_binary(segment) do + raise ArgumentError, + "path segments after interpolation must begin with /, got: #{inspect(segment)} in #{Macro.to_string(route)}" + end + + defp verify_segment( + [ + {:"::", m1, [{{:., m2, [Kernel, :to_string]}, m3, [dynamic]}, {:binary, _, _} = bin]} + | rest + ], + route, + [prev | _] = acc + ) + when is_binary(prev) do + rewrite = {:"::", m1, [{{:., m2, [__MODULE__, :__encode_segment__]}, m3, [dynamic]}, bin]} + verify_segment(rest, route, [rewrite | acc]) + end + + defp verify_segment([_ | _], route, _acc) do + raise ArgumentError, + "a dynamic ~p interpolation must follow a static segment, got: #{Macro.to_string(route)}" + end + + # we've reached the end of the path without finding query, return to caller + defp verify_segment([], _route, acc), do: {Enum.reverse(acc), _query = []} + + defp verify_query( + [ + {:"::", m1, [{{:., m2, [Kernel, :to_string]}, m3, [arg]}, {:binary, _, _} = bin]} + | rest + ], + route, + acc + ) do + unless is_binary(hd(acc)) do + raise ArgumentError, + "interpolated query string params must be separated by &, got: #{Macro.to_string(route)}" + end + + sort_params? = Application.get_env(:phoenix, :sort_verified_routes_query_params, false) + + rewrite = + {:"::", m1, [{{:., m2, [__MODULE__, :__encode_query__]}, m3, [arg, sort_params?]}, bin]} + + verify_query(rest, route, [rewrite | acc]) + end + + defp verify_query([], _route, acc), do: Enum.reverse(acc) + + defp verify_query(["=" | rest], route, acc) do + verify_query(rest, route, ["=" | acc]) + end + + defp verify_query(["&" <> _ = param | rest], route, acc) do + unless String.contains?(param, "=") do + raise ArgumentError, + "expected query string param key to end with = or declare a static key value pair, got: #{inspect(param)}" + end + + verify_query(rest, route, [param | acc]) + end + + defp verify_query(_other, route, _acc) do + raise_invalid_query(route) + end + + defp raise_invalid_query(route) do + raise ArgumentError, + "expected query string param to be compile-time map or keyword list, got: #{Macro.to_string(route)}" + end + + @doc """ + Generates an integrity hash to a static asset given its file path. + + See `c:Phoenix.Endpoint.static_integrity/1` for more information. + + ## Examples + + iex> static_integrity(conn, "/assets/js/app.js") + "813dfe33b5c7f8388bccaaa38eec8382" + + iex> static_integrity(socket, "/assets/js/app.js") + "813dfe33b5c7f8388bccaaa38eec8382" + + iex> static_integrity(AppWeb.Endpoint, "/assets/js/app.js") + "813dfe33b5c7f8388bccaaa38eec8382" + """ + def static_integrity(conn_or_socket_or_endpoint, path) + + def static_integrity(%Plug.Conn{private: %{phoenix_endpoint: endpoint}}, path) do + static_integrity(endpoint, path) + end + + def static_integrity(%_{endpoint: endpoint}, path) do + static_integrity(endpoint, path) + end + + def static_integrity(endpoint, path) when is_atom(endpoint) do + endpoint.static_integrity(path) + end + + @doc false + def __encode_query__(dict, sort? \\ false) + + def __encode_query__(dict, sort?) when is_map(dict) and not is_struct(dict) do + case Plug.Conn.Query.encode(dict, &to_param/1) do + "" -> "" + query_str -> maybe_sort_query(query_str, sort?) + end + end + + def __encode_query__(dict, sort?) when is_list(dict) do + if dict == [] or match?([{_, _} | _], dict) do + case Plug.Conn.Query.encode(dict, &to_param/1) do + "" -> "" + query_str -> maybe_sort_query(query_str, sort?) + end + else + raise ArgumentError, + "expected a keyword list or map for query string encoding, got: #{inspect(dict)}. " <> + "Use the full query string syntax instead, such as ~p\"/path?#\{[key: #{inspect(dict)}]}\"" + end + end + + def __encode_query__(val, _sort?), do: val |> to_param() |> URI.encode_www_form() + + defp maybe_sort_query(query_str, false), do: query_str + + defp maybe_sort_query(query, true), + do: query |> String.split("&") |> Enum.sort() |> Enum.join("&") + + defp to_param(int) when is_integer(int), do: Integer.to_string(int) + defp to_param(bin) when is_binary(bin), do: bin + defp to_param(false), do: "false" + defp to_param(true), do: "true" + defp to_param(data), do: Phoenix.Param.to_param(data) + + defp build_route(route_ast, sigil_p, env, endpoint_ctx, router) do + config = Module.get_attribute(env.module, :phoenix_verified_config, []) + + router = + case Macro.expand(router, env) do + mod when is_atom(mod) -> + mod + + other -> + raise ArgumentError, """ + expected router to be to module, got: #{inspect(other)} + + If your router is not defined at compile-time, use unverified_path/3 instead. + """ + end + + {static?, meta, test_path, path_ast, static_ast} = + rewrite_path(route_ast, endpoint_ctx, router, config) + + route = %__MODULE__{ + router: router, + warn_location: warn_location(meta, env), + inspected_route: Macro.to_string(sigil_p), + test_path: test_path + } + + {route, static?, endpoint_ctx, route_ast, path_ast, static_ast} + end + + defp warn_location(meta, %{line: line, file: file, function: function, module: module}) do + column = if column = meta[:column], do: column + 2 + [line: line, function: function, module: module, file: file, column: column] + end + + defp rewrite_path(route, endpoint, router, config) do + {:<<>>, meta, segments} = route + {path_rewrite, query_rewrite} = verify_segment(segments, route) + + path_rewrite = + if config.path_prefixes != [] and + static_path?(path_rewrite |> Enum.slice(0, 1) |> materialize_path(), config.statics) do + path_rewrite + else + compile_prefixes(config.path_prefixes, meta) ++ path_rewrite + end + + rewrite_route = + if query_rewrite == [] do + {:<<>>, meta, path_rewrite} + else + quote generated: true do + query_str = unquote({:<<>>, meta, query_rewrite}) + path_str = unquote({:<<>>, meta, path_rewrite}) + + if query_str == "" do + path_str + else + path_str <> "?" <> query_str + end + end + end + + test_path = materialize_path(path_rewrite) + static? = static_path?(test_path, config.statics) + + path_ast = + quote generated: true do + unquote(__MODULE__).unverified_path(unquote_splicing([endpoint, router, rewrite_route])) + end + + static_ast = + quote generated: true do + unquote(__MODULE__).static_path(unquote_splicing([endpoint, rewrite_route])) + end + + {static?, meta, test_path, path_ast, static_ast} + end + + defp materialize_path(path) do + Enum.map_join(path, &if(is_binary(&1), do: &1, else: "1")) + end + + defp compile_prefixes(path_prefixes, meta) do + Enum.flat_map(path_prefixes, fn + {module, fun, args} when is_atom(module) and is_atom(fun) and is_list(args) -> + [ + "/", + {:"::", meta, + [{{:., meta, [module, fun]}, meta, Macro.escape(args)}, {:binary, meta, nil}]} + ] + + other -> + raise ArgumentError, + ":path_prefixes option in VerifiedRoutes must be a {mod, fun, args} and return a string, got: #{inspect(other)}" + end) + end + + defp attr!(%{function: nil}, _) do + raise "Phoenix.VerifiedRoutes can only be used inside functions, please move your usage of ~p to functions" + end + + defp attr!(env, :endpoint) do + Module.get_attribute(env.module, :endpoint) || + raise """ + expected @endpoint to be set. For dynamic endpoint resolution, use path/2 instead. + + for example: + + path(conn_or_socket, ~p"/my-path") + """ + end + + defp attr!(env, name) do + Module.get_attribute(env.module, name) || raise "expected @#{name} module attribute to be set" + end + + defp static_path?(path, statics) do + Enum.find(statics, &String.starts_with?(path, "/" <> &1)) + end + + defp build_own_forward_path(conn, router, path) do + case conn.private do + %{^router => local_script} when is_list(local_script) -> + path_with_script(path, local_script) + + %{} -> + nil + end + end + + defp build_conn_forward_path(%Plug.Conn{} = conn, router, path) do + with %{phoenix_router: phx_router} <- conn.private, + %{^phx_router => script_name} when is_list(script_name) <- conn.private, + local_script when is_list(local_script) <- phx_router.__forward__(router) do + path_with_script(path, script_name ++ local_script) + else + _ -> nil + end + end + + defp path_with_script(path, []), do: path + defp path_with_script(path, script), do: "/" <> Enum.join(script, "/") <> path +end diff --git a/deps/phoenix/mix.exs b/deps/phoenix/mix.exs new file mode 100644 index 0000000..30ea8a9 --- /dev/null +++ b/deps/phoenix/mix.exs @@ -0,0 +1,308 @@ +defmodule Phoenix.MixProject do + use Mix.Project + + if Mix.env() != :prod do + for path <- :code.get_path(), + Regex.match?(~r/phx_new-[\w\.\-]+\/ebin$/, List.to_string(path)) do + Code.delete_path(path) + end + end + + @version "1.8.7" + @scm_url "https://github.com/phoenixframework/phoenix" + + # If the elixir requirement is updated, we need to make the installer + # use at least the minimum requirement used here. Although often the + # installer is ahead of Phoenix itself. + @elixir_requirement "~> 1.15" + + def project do + [ + app: :phoenix, + version: @version, + elixir: @elixir_requirement, + deps: deps(), + package: package(), + consolidate_protocols: Mix.env() != :test, + xref: [ + exclude: [ + {IEx, :started?, 0}, + Ecto.Type, + :ranch, + :cowboy_req, + Plug.Cowboy.Conn, + Plug.Cowboy, + :httpc, + :public_key + ] + ], + elixirc_paths: elixirc_paths(Mix.env()), + name: "Phoenix", + docs: &docs/0, + aliases: aliases(), + source_url: @scm_url, + homepage_url: "https://www.phoenixframework.org", + description: "Peace of mind from prototype to production", + test_ignore_filters: [ + &String.starts_with?(&1, "test/fixtures/"), + &String.starts_with?(&1, "test/support/") + ] + ] + end + + def cli do + [ + preferred_envs: [docs: :docs] + ] + end + + defp elixirc_paths(:docs), do: ["lib", "installer/lib"] + defp elixirc_paths(_), do: ["lib"] + + defp extra_applications(:test), do: [:inets] + defp extra_applications(_), do: [] + + def application do + [ + mod: {Phoenix, []}, + extra_applications: extra_applications(Mix.env()) ++ [:logger, :eex, :crypto, :public_key], + env: [ + logger: true, + stacktrace_depth: nil, + filter_parameters: ["password", "token"], + serve_endpoints: false, + gzippable_exts: ~w(.js .map .css .txt .text .html .json .svg .eot .ttf), + static_compressors: [Phoenix.Digester.Gzip] + ] + ] + end + + defp deps do + [ + {:plug, "~> 1.14"}, + {:plug_crypto, "~> 1.2 or ~> 2.0"}, + {:telemetry, "~> 0.4 or ~> 1.0"}, + {:phoenix_pubsub, "~> 2.1"}, + {:phoenix_template, "~> 1.0"}, + {:websock_adapter, "~> 0.5.3"}, + + # TODO Drop phoenix_view as an optional dependency in Phoenix v2.0 + {:phoenix_view, "~> 2.0", optional: true}, + + # Optional deps + {:plug_cowboy, "~> 2.7", optional: true}, + {:bandit, "~> 1.0", optional: true}, + {:jason, "~> 1.0", optional: true}, + + # Docs dependencies (some for cross references) + {:ex_doc, "~> 0.38", only: :docs}, + {:ecto, "~> 3.0", only: :docs}, + {:ecto_sql, "~> 3.10", only: :docs}, + {:gettext, "~> 1.0", only: :docs}, + {:telemetry_poller, "~> 1.0", only: :docs}, + {:telemetry_metrics, "~> 1.0", only: :docs}, + {:makeup_elixir, "~> 1.0.1 or ~> 1.1", only: :docs}, + {:makeup_eex, "~> 2.0", only: :docs}, + {:makeup_syntect, "~> 0.1.0", only: :docs}, + # Test dependencies + {:phoenix_html, "~> 4.0", only: [:docs, :test]}, + {:phx_new, path: "./installer", only: [:docs, :test]}, + {:mint, "~> 1.4", only: :test}, + {:mint_web_socket, "~> 1.0.0", only: :test}, + + # Dev dependencies + {:esbuild, "~> 0.8", only: :dev} + ] + end + + defp package do + [ + maintainers: ["Chris McCord", "José Valim", "Gary Rennie", "Jason Stiebs"], + licenses: ["MIT"], + links: %{ + "GitHub" => @scm_url, + "Changelog" => "https://hexdocs.pm/phoenix/changelog.html" + }, + files: ~w( + assets/js lib priv usage-rules CHANGELOG.md LICENSE.md mix.exs package.json README.md .formatter.exs + installer/templates/phx_web/components/core_components.ex.eex + ) + ] + end + + defp docs do + [ + search: [ + %{ + name: "Latest", + help: + "Search latest versions of Plug, Phoenix, Phoenix.{HTML, LiveView, PubSub, Template}", + packages: [ + :plug, + :phoenix, + :phoenix_html, + :phoenix_live_view, + :phoenix_pubsub, + :phoenix_template + ] + }, + %{ + name: "Current version", + help: "Search only this project" + } + ], + source_ref: "v#{@version}", + main: "overview", + logo: "logo.png", + extra_section: "GUIDES", + assets: %{"guides/assets" => "assets"}, + formatters: ["html", "epub"], + groups_for_modules: groups_for_modules(), + extras: extras(), + groups_for_extras: groups_for_extras(), + groups_for_docs: [ + Reflection: &(&1[:type] == :reflection) + ], + skip_undefined_reference_warnings_on: ["CHANGELOG.md"] + ] + end + + defp extras do + [ + "guides/introduction/overview.md", + "guides/introduction/installation.md", + "guides/introduction/up_and_running.md", + "guides/introduction/community.md", + "guides/introduction/packages_glossary.md", + "guides/directory_structure.md", + "guides/request_lifecycle.md", + "guides/plug.md", + "guides/routing.md", + "guides/controllers.md", + "guides/components.md", + "guides/ecto.md", + "guides/json_and_apis.md", + "guides/live_view.md", + "guides/asset_management.md", + "guides/telemetry.md", + "guides/security.md", + "guides/authn_authz/authn_authz.md", + "guides/authn_authz/mix_phx_gen_auth.md", + "guides/authn_authz/scopes.md", + "guides/authn_authz/api_authentication.md", + "guides/data_modelling/contexts.md", + "guides/data_modelling/your_first_context.md", + "guides/data_modelling/in_context_relationships.md", + "guides/data_modelling/cross_context_boundaries.md", + "guides/data_modelling/more_examples.md", + "guides/data_modelling/faq.md", + "guides/real_time/channels.md", + "guides/real_time/presence.md", + "guides/testing/testing.md", + "guides/testing/testing_contexts.md", + "guides/testing/testing_controllers.md", + "guides/testing/testing_channels.md", + "guides/deployment/deployment.md", + "guides/deployment/releases.md", + "guides/deployment/fly.md", + "guides/deployment/gigalixir.md", + "guides/deployment/heroku.md", + "guides/howto/custom_error_pages.md", + "guides/howto/file_uploads.md", + "guides/howto/swapping_databases.md", + "guides/howto/using_ssl.md", + "guides/howto/writing_a_channels_client.md", + "guides/cheatsheets/router.cheatmd", + "CHANGELOG.md", + "JS Documentation": [url: "js/index.html"] + ] + end + + defp groups_for_extras do + [ + Introduction: ~r/guides\/introduction\/.?/, + "Core Concepts": ~r/guides\/[^\/]+\.md/, + "Data Modelling": ~r/guides\/data_modelling\/.?/, + "Authn and Authz": ~r/guides\/authn_authz\/.?/, + "Real-time": ~r/guides\/real_time\/.?/, + Testing: ~r/guides\/testing\/.?/, + Deployment: ~r/guides\/deployment\/.?/, + Cheatsheets: ~r/guides\/cheatsheets\/.?/, + "How-to's": ~r/guides\/howto\/.?/ + ] + end + + defp groups_for_modules do + # Ungrouped Modules: + # + # Phoenix + # Phoenix.Channel + # Phoenix.Controller + # Phoenix.Endpoint + # Phoenix.Naming + # Phoenix.Logger + # Phoenix.Param + # Phoenix.Presence + # Phoenix.Router + # Phoenix.Socket + # Phoenix.Token + # Phoenix.VerifiedRoutes + + [ + Testing: [ + Phoenix.ChannelTest, + Phoenix.ConnTest + ], + "Adapters and Plugs": [ + Phoenix.CodeReloader, + Phoenix.Endpoint.Cowboy2Adapter, + Phoenix.Endpoint.SyncCodeReloadPlug + ], + Digester: [ + Phoenix.Digester.Compressor, + Phoenix.Digester.Gzip + ], + Socket: [ + Phoenix.Socket.Broadcast, + Phoenix.Socket.Message, + Phoenix.Socket.Reply, + Phoenix.Socket.Serializer, + Phoenix.Socket.Transport + ] + ] + end + + defp aliases do + [ + docs: ["docs", &generate_js_docs/1], + "assets.build": ["esbuild module", "esbuild cdn", "esbuild cdn_min", "esbuild main"], + "assets.watch": "esbuild module --watch", + "archive.build": &raise_on_archive_build/1, + # copy core_components before compiling / publishing + compile: [©_core_components/1, "compile"], + "hex.publish": [©_core_components/1, "hex.publish"] + ] + end + + defp generate_js_docs(_) do + Mix.Task.run("app.start") + {_, 0} = System.cmd("npm", ["install"], into: IO.stream()) + {_, 0} = System.cmd("npm", ["run", "docs"], into: IO.stream()) + end + + defp raise_on_archive_build(_) do + Mix.raise(""" + You are trying to install "phoenix" as an archive, which is not supported. \ + You probably meant to install "phx_new" instead + """) + end + + defp copy_core_components(_) do + source = + Path.join(__DIR__, "installer/templates/phx_web/components/core_components.ex.eex") + + destination_dir = Path.join([__DIR__, "priv", "templates", "phx.gen.live"]) + destination = Path.join(destination_dir, "core_components.ex.eex") + File.cp!(source, destination) + end +end diff --git a/deps/phoenix/package.json b/deps/phoenix/package.json new file mode 100644 index 0000000..cf9ddf6 --- /dev/null +++ b/deps/phoenix/package.json @@ -0,0 +1,46 @@ +{ + "name": "phoenix", + "version": "1.8.7", + "description": "The official JavaScript client for the Phoenix web framework.", + "license": "MIT", + "module": "./priv/static/phoenix.mjs", + "main": "./priv/static/phoenix.cjs.js", + "unpkg": "./priv/static/phoenix.min.js", + "jsdelivr": "./priv/static/phoenix.min.js", + "exports": { + "import": "./priv/static/phoenix.mjs", + "require": "./priv/static/phoenix.cjs.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/phoenixframework/phoenix.git" + }, + "author": "Chris McCord (https://www.phoenixframework.org)", + "files": [ + "README.md", + "LICENSE.md", + "package.json", + "priv/static/*", + "assets/js/phoenix/*" + ], + "devDependencies": { + "@babel/cli": "7.28.6", + "@babel/core": "7.29.0", + "@babel/preset-env": "7.29.3", + "@eslint/js": "^10.0.1", + "@stylistic/eslint-plugin": "^5.0.0", + "documentation": "^14.0.3", + "eslint": "10.2.1", + "eslint-plugin-jest": "29.15.2", + "jest": "^30.0.0", + "jest-environment-jsdom": "^30.0.0", + "jsdom": "^29.0.1", + "mock-socket": "^9.3.1" + }, + "scripts": { + "test": "jest", + "test.coverage": "jest --coverage", + "test.watch": "jest --watch", + "docs": "documentation build assets/js/phoenix/index.js -f html -o doc/js" + } +} diff --git a/deps/phoenix/priv/static/favicon.ico b/deps/phoenix/priv/static/favicon.ico new file mode 100644 index 0000000..7f372bf Binary files /dev/null and b/deps/phoenix/priv/static/favicon.ico differ diff --git a/deps/phoenix/priv/static/phoenix-orange.png b/deps/phoenix/priv/static/phoenix-orange.png new file mode 100644 index 0000000..996c77d Binary files /dev/null and b/deps/phoenix/priv/static/phoenix-orange.png differ diff --git a/deps/phoenix/priv/static/phoenix.cjs.js b/deps/phoenix/priv/static/phoenix.cjs.js new file mode 100644 index 0000000..a2ab42a --- /dev/null +++ b/deps/phoenix/priv/static/phoenix.cjs.js @@ -0,0 +1,1667 @@ +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); + +// js/phoenix/index.js +var phoenix_exports = {}; +__export(phoenix_exports, { + Channel: () => Channel, + LongPoll: () => LongPoll, + Presence: () => Presence, + Serializer: () => serializer_default, + Socket: () => Socket +}); +module.exports = __toCommonJS(phoenix_exports); + +// js/phoenix/utils.js +var closure = (value) => { + if (typeof value === "function") { + return value; + } else { + let closure2 = function() { + return value; + }; + return closure2; + } +}; + +// js/phoenix/constants.js +var globalSelf = typeof self !== "undefined" ? self : null; +var phxWindow = typeof window !== "undefined" ? window : null; +var global = globalSelf || phxWindow || globalThis; +var DEFAULT_VSN = "2.0.0"; +var SOCKET_STATES = { connecting: 0, open: 1, closing: 2, closed: 3 }; +var MAX_LONGPOLL_BATCH_SIZE = 100; +var DEFAULT_TIMEOUT = 1e4; +var WS_CLOSE_NORMAL = 1e3; +var CHANNEL_STATES = { + closed: "closed", + errored: "errored", + joined: "joined", + joining: "joining", + leaving: "leaving" +}; +var CHANNEL_EVENTS = { + close: "phx_close", + error: "phx_error", + join: "phx_join", + reply: "phx_reply", + leave: "phx_leave" +}; +var TRANSPORTS = { + longpoll: "longpoll", + websocket: "websocket" +}; +var XHR_STATES = { + complete: 4 +}; +var AUTH_TOKEN_PREFIX = "base64url.bearer.phx."; + +// js/phoenix/push.js +var Push = class { + constructor(channel, event, payload, timeout) { + this.channel = channel; + this.event = event; + this.payload = payload || function() { + return {}; + }; + this.receivedResp = null; + this.timeout = timeout; + this.timeoutTimer = null; + this.recHooks = []; + this.sent = false; + } + /** + * + * @param {number} timeout + */ + resend(timeout) { + this.timeout = timeout; + this.reset(); + this.send(); + } + /** + * + */ + send() { + if (this.hasReceived("timeout")) { + return; + } + this.startTimeout(); + this.sent = true; + this.channel.socket.push({ + topic: this.channel.topic, + event: this.event, + payload: this.payload(), + ref: this.ref, + join_ref: this.channel.joinRef() + }); + } + /** + * + * @param {*} status + * @param {*} callback + */ + receive(status, callback) { + if (this.hasReceived(status)) { + callback(this.receivedResp.response); + } + this.recHooks.push({ status, callback }); + return this; + } + /** + * @private + */ + reset() { + this.cancelRefEvent(); + this.ref = null; + this.refEvent = null; + this.receivedResp = null; + this.sent = false; + } + /** + * @private + */ + matchReceive({ status, response, _ref }) { + this.recHooks.filter((h) => h.status === status).forEach((h) => h.callback(response)); + } + /** + * @private + */ + cancelRefEvent() { + if (!this.refEvent) { + return; + } + this.channel.off(this.refEvent); + } + /** + * @private + */ + cancelTimeout() { + clearTimeout(this.timeoutTimer); + this.timeoutTimer = null; + } + /** + * @private + */ + startTimeout() { + if (this.timeoutTimer) { + this.cancelTimeout(); + } + this.ref = this.channel.socket.makeRef(); + this.refEvent = this.channel.replyEventName(this.ref); + this.channel.on(this.refEvent, (payload) => { + this.cancelRefEvent(); + this.cancelTimeout(); + this.receivedResp = payload; + this.matchReceive(payload); + }); + this.timeoutTimer = setTimeout(() => { + this.trigger("timeout", {}); + }, this.timeout); + } + /** + * @private + */ + hasReceived(status) { + return this.receivedResp && this.receivedResp.status === status; + } + /** + * @private + */ + trigger(status, response) { + this.channel.trigger(this.refEvent, { status, response }); + } +}; + +// js/phoenix/timer.js +var Timer = class { + constructor(callback, timerCalc) { + this.callback = callback; + this.timerCalc = timerCalc; + this.timer = null; + this.tries = 0; + } + reset() { + this.tries = 0; + clearTimeout(this.timer); + } + /** + * Cancels any previous scheduleTimeout and schedules callback + */ + scheduleTimeout() { + clearTimeout(this.timer); + this.timer = setTimeout(() => { + this.tries = this.tries + 1; + this.callback(); + }, this.timerCalc(this.tries + 1)); + } +}; + +// js/phoenix/channel.js +var Channel = class { + constructor(topic, params, socket) { + this.state = CHANNEL_STATES.closed; + this.topic = topic; + this.params = closure(params || {}); + this.socket = socket; + this.bindings = []; + this.bindingRef = 0; + this.timeout = this.socket.timeout; + this.joinedOnce = false; + this.joinPush = new Push(this, CHANNEL_EVENTS.join, this.params, this.timeout); + this.pushBuffer = []; + this.stateChangeRefs = []; + this.rejoinTimer = new Timer(() => { + if (this.socket.isConnected()) { + this.rejoin(); + } + }, this.socket.rejoinAfterMs); + this.stateChangeRefs.push(this.socket.onError(() => this.rejoinTimer.reset())); + this.stateChangeRefs.push( + this.socket.onOpen(() => { + this.rejoinTimer.reset(); + if (this.isErrored()) { + this.rejoin(); + } + }) + ); + this.joinPush.receive("ok", () => { + this.state = CHANNEL_STATES.joined; + this.rejoinTimer.reset(); + this.pushBuffer.forEach((pushEvent) => pushEvent.send()); + this.pushBuffer = []; + }); + this.joinPush.receive("error", () => { + this.state = CHANNEL_STATES.errored; + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.onClose(() => { + this.rejoinTimer.reset(); + if (this.socket.hasLogger()) this.socket.log("channel", `close ${this.topic} ${this.joinRef()}`); + this.state = CHANNEL_STATES.closed; + this.socket.remove(this); + }); + this.onError((reason) => { + if (this.socket.hasLogger()) this.socket.log("channel", `error ${this.topic}`, reason); + if (this.isJoining()) { + this.joinPush.reset(); + } + this.state = CHANNEL_STATES.errored; + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.joinPush.receive("timeout", () => { + if (this.socket.hasLogger()) this.socket.log("channel", `timeout ${this.topic} (${this.joinRef()})`, this.joinPush.timeout); + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), this.timeout); + leavePush.send(); + this.state = CHANNEL_STATES.errored; + this.joinPush.reset(); + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.on(CHANNEL_EVENTS.reply, (payload, ref) => { + this.trigger(this.replyEventName(ref), payload); + }); + } + /** + * Join the channel + * @param {integer} timeout + * @returns {Push} + */ + join(timeout = this.timeout) { + if (this.joinedOnce) { + throw new Error("tried to join multiple times. 'join' can only be called a single time per channel instance"); + } else { + this.timeout = timeout; + this.joinedOnce = true; + this.rejoin(); + return this.joinPush; + } + } + /** + * Hook into channel close + * @param {Function} callback + */ + onClose(callback) { + this.on(CHANNEL_EVENTS.close, callback); + } + /** + * Hook into channel errors + * @param {Function} callback + */ + onError(callback) { + return this.on(CHANNEL_EVENTS.error, (reason) => callback(reason)); + } + /** + * Subscribes on channel events + * + * Subscription returns a ref counter, which can be used later to + * unsubscribe the exact event listener + * + * @example + * const ref1 = channel.on("event", do_stuff) + * const ref2 = channel.on("event", do_other_stuff) + * channel.off("event", ref1) + * // Since unsubscription, do_stuff won't fire, + * // while do_other_stuff will keep firing on the "event" + * + * @param {string} event + * @param {Function} callback + * @returns {integer} ref + */ + on(event, callback) { + let ref = this.bindingRef++; + this.bindings.push({ event, ref, callback }); + return ref; + } + /** + * Unsubscribes off of channel events + * + * Use the ref returned from a channel.on() to unsubscribe one + * handler, or pass nothing for the ref to unsubscribe all + * handlers for the given event. + * + * @example + * // Unsubscribe the do_stuff handler + * const ref1 = channel.on("event", do_stuff) + * channel.off("event", ref1) + * + * // Unsubscribe all handlers from event + * channel.off("event") + * + * @param {string} event + * @param {integer} ref + */ + off(event, ref) { + this.bindings = this.bindings.filter((bind) => { + return !(bind.event === event && (typeof ref === "undefined" || ref === bind.ref)); + }); + } + /** + * @private + */ + canPush() { + return this.socket.isConnected() && this.isJoined(); + } + /** + * Sends a message `event` to phoenix with the payload `payload`. + * Phoenix receives this in the `handle_in(event, payload, socket)` + * function. if phoenix replies or it times out (default 10000ms), + * then optionally the reply can be received. + * + * @example + * channel.push("event") + * .receive("ok", payload => console.log("phoenix replied:", payload)) + * .receive("error", err => console.log("phoenix errored", err)) + * .receive("timeout", () => console.log("timed out pushing")) + * @param {string} event + * @param {Object} payload + * @param {number} [timeout] + * @returns {Push} + */ + push(event, payload, timeout = this.timeout) { + payload = payload || {}; + if (!this.joinedOnce) { + throw new Error(`tried to push '${event}' to '${this.topic}' before joining. Use channel.join() before pushing events`); + } + let pushEvent = new Push(this, event, function() { + return payload; + }, timeout); + if (this.canPush()) { + pushEvent.send(); + } else { + pushEvent.startTimeout(); + this.pushBuffer.push(pushEvent); + } + return pushEvent; + } + /** Leaves the channel + * + * Unsubscribes from server events, and + * instructs channel to terminate on server + * + * Triggers onClose() hooks + * + * To receive leave acknowledgements, use the `receive` + * hook to bind to the server ack, ie: + * + * @example + * channel.leave().receive("ok", () => alert("left!") ) + * + * @param {integer} timeout + * @returns {Push} + */ + leave(timeout = this.timeout) { + this.rejoinTimer.reset(); + this.joinPush.cancelTimeout(); + this.state = CHANNEL_STATES.leaving; + let onClose = () => { + if (this.socket.hasLogger()) this.socket.log("channel", `leave ${this.topic}`); + this.trigger(CHANNEL_EVENTS.close, "leave"); + }; + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), timeout); + leavePush.receive("ok", () => onClose()).receive("timeout", () => onClose()); + leavePush.send(); + if (!this.canPush()) { + leavePush.trigger("ok", {}); + } + return leavePush; + } + /** + * Overridable message hook + * + * Receives all events for specialized message handling + * before dispatching to the channel callbacks. + * + * Must return the payload, modified or unmodified + * @param {string} event + * @param {Object} payload + * @param {integer} ref + * @returns {Object} + */ + onMessage(_event, payload, _ref) { + return payload; + } + /** + * @private + */ + isMember(topic, event, payload, joinRef) { + if (this.topic !== topic) { + return false; + } + if (joinRef && joinRef !== this.joinRef()) { + if (this.socket.hasLogger()) this.socket.log("channel", "dropping outdated message", { topic, event, payload, joinRef }); + return false; + } else { + return true; + } + } + /** + * @private + */ + joinRef() { + return this.joinPush.ref; + } + /** + * @private + */ + rejoin(timeout = this.timeout) { + if (this.isLeaving()) { + return; + } + this.socket.leaveOpenTopic(this.topic); + this.state = CHANNEL_STATES.joining; + this.joinPush.resend(timeout); + } + /** + * @private + */ + trigger(event, payload, ref, joinRef) { + let handledPayload = this.onMessage(event, payload, ref, joinRef); + if (payload && !handledPayload) { + throw new Error("channel onMessage callbacks must return the payload, modified or unmodified"); + } + let eventBindings = this.bindings.filter((bind) => bind.event === event); + for (let i = 0; i < eventBindings.length; i++) { + let bind = eventBindings[i]; + bind.callback(handledPayload, ref, joinRef || this.joinRef()); + } + } + /** + * @private + */ + replyEventName(ref) { + return `chan_reply_${ref}`; + } + /** + * @private + */ + isClosed() { + return this.state === CHANNEL_STATES.closed; + } + /** + * @private + */ + isErrored() { + return this.state === CHANNEL_STATES.errored; + } + /** + * @private + */ + isJoined() { + return this.state === CHANNEL_STATES.joined; + } + /** + * @private + */ + isJoining() { + return this.state === CHANNEL_STATES.joining; + } + /** + * @private + */ + isLeaving() { + return this.state === CHANNEL_STATES.leaving; + } +}; + +// js/phoenix/ajax.js +var Ajax = class { + static request(method, endPoint, headers, body, timeout, ontimeout, callback) { + if (global.XDomainRequest) { + let req = new global.XDomainRequest(); + return this.xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback); + } else if (global.XMLHttpRequest) { + let req = new global.XMLHttpRequest(); + return this.xhrRequest(req, method, endPoint, headers, body, timeout, ontimeout, callback); + } else if (global.fetch && global.AbortController) { + return this.fetchRequest(method, endPoint, headers, body, timeout, ontimeout, callback); + } else { + throw new Error("No suitable XMLHttpRequest implementation found"); + } + } + static fetchRequest(method, endPoint, headers, body, timeout, ontimeout, callback) { + let options = { + method, + headers, + body + }; + let controller = null; + if (timeout) { + controller = new AbortController(); + const _timeoutId = setTimeout(() => controller.abort(), timeout); + options.signal = controller.signal; + } + global.fetch(endPoint, options).then((response) => response.text()).then((data) => this.parseJSON(data)).then((data) => callback && callback(data)).catch((err) => { + if (err.name === "AbortError" && ontimeout) { + ontimeout(); + } else { + callback && callback(null); + } + }); + return controller; + } + static xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback) { + req.timeout = timeout; + req.open(method, endPoint); + req.onload = () => { + let response = this.parseJSON(req.responseText); + callback && callback(response); + }; + if (ontimeout) { + req.ontimeout = ontimeout; + } + req.onprogress = () => { + }; + req.send(body); + return req; + } + static xhrRequest(req, method, endPoint, headers, body, timeout, ontimeout, callback) { + req.open(method, endPoint, true); + req.timeout = timeout; + for (let [key, value] of Object.entries(headers)) { + req.setRequestHeader(key, value); + } + req.onerror = () => callback && callback(null); + req.onreadystatechange = () => { + if (req.readyState === XHR_STATES.complete && callback) { + let response = this.parseJSON(req.responseText); + callback(response); + } + }; + if (ontimeout) { + req.ontimeout = ontimeout; + } + req.send(body); + return req; + } + static parseJSON(resp) { + if (!resp || resp === "") { + return null; + } + try { + return JSON.parse(resp); + } catch { + console && console.log("failed to parse JSON response", resp); + return null; + } + } + static serialize(obj, parentKey) { + let queryStr = []; + for (var key in obj) { + if (!Object.prototype.hasOwnProperty.call(obj, key)) { + continue; + } + let paramKey = parentKey ? `${parentKey}[${key}]` : key; + let paramVal = obj[key]; + if (typeof paramVal === "object") { + queryStr.push(this.serialize(paramVal, paramKey)); + } else { + queryStr.push(encodeURIComponent(paramKey) + "=" + encodeURIComponent(paramVal)); + } + } + return queryStr.join("&"); + } + static appendParams(url, params) { + if (Object.keys(params).length === 0) { + return url; + } + let prefix = url.match(/\?/) ? "&" : "?"; + return `${url}${prefix}${this.serialize(params)}`; + } +}; + +// js/phoenix/longpoll.js +var arrayBufferToBase64 = (buffer) => { + let binary = ""; + let bytes = new Uint8Array(buffer); + let len = bytes.byteLength; + for (let i = 0; i < len; i++) { + binary += String.fromCharCode(bytes[i]); + } + return btoa(binary); +}; +var LongPoll = class { + constructor(endPoint, protocols) { + if (protocols && protocols.length === 2 && protocols[1].startsWith(AUTH_TOKEN_PREFIX)) { + this.authToken = atob(protocols[1].slice(AUTH_TOKEN_PREFIX.length)); + } + this.endPoint = null; + this.token = null; + this.skipHeartbeat = true; + this.reqs = /* @__PURE__ */ new Set(); + this.awaitingBatchAck = false; + this.currentBatch = null; + this.currentBatchTimer = null; + this.batchBuffer = []; + this.onopen = function() { + }; + this.onerror = function() { + }; + this.onmessage = function() { + }; + this.onclose = function() { + }; + this.pollEndpoint = this.normalizeEndpoint(endPoint); + this.readyState = SOCKET_STATES.connecting; + setTimeout(() => this.poll(), 0); + } + normalizeEndpoint(endPoint) { + return endPoint.replace("ws://", "http://").replace("wss://", "https://").replace(new RegExp("(.*)/" + TRANSPORTS.websocket), "$1/" + TRANSPORTS.longpoll); + } + endpointURL() { + return Ajax.appendParams(this.pollEndpoint, { token: this.token }); + } + closeAndRetry(code, reason, wasClean) { + this.close(code, reason, wasClean); + this.readyState = SOCKET_STATES.connecting; + } + ontimeout() { + this.onerror("timeout"); + this.closeAndRetry(1005, "timeout", false); + } + isActive() { + return this.readyState === SOCKET_STATES.open || this.readyState === SOCKET_STATES.connecting; + } + poll() { + const headers = { "Accept": "application/json" }; + if (this.authToken) { + headers["X-Phoenix-AuthToken"] = this.authToken; + } + this.ajax("GET", headers, null, () => this.ontimeout(), (resp) => { + if (resp) { + var { status, token, messages } = resp; + if (status === 410 && this.token !== null) { + this.onerror(410); + this.closeAndRetry(3410, "session_gone", false); + return; + } + this.token = token; + } else { + status = 0; + } + switch (status) { + case 200: + messages.forEach((msg) => { + setTimeout(() => this.onmessage({ data: msg }), 0); + }); + this.poll(); + break; + case 204: + this.poll(); + break; + case 410: + this.readyState = SOCKET_STATES.open; + this.onopen({}); + this.poll(); + break; + case 403: + this.onerror(403); + this.close(1008, "forbidden", false); + break; + case 0: + case 500: + this.onerror(500); + this.closeAndRetry(1011, "internal server error", 500); + break; + default: + throw new Error(`unhandled poll status ${status}`); + } + }); + } + // we collect all pushes within the current event loop by + // setTimeout 0, which optimizes back-to-back procedural + // pushes against an empty buffer + send(body) { + if (typeof body !== "string") { + body = arrayBufferToBase64(body); + } + if (this.currentBatch) { + this.currentBatch.push(body); + } else if (this.awaitingBatchAck) { + this.batchBuffer.push(body); + } else { + this.currentBatch = [body]; + this.currentBatchTimer = setTimeout(() => { + this.batchSend(this.currentBatch); + this.currentBatch = null; + }, 0); + } + } + batchSend(messages, offset = 0) { + this.awaitingBatchAck = true; + const next = offset + MAX_LONGPOLL_BATCH_SIZE; + const batch = messages.slice(offset, next); + this.ajax("POST", { "Content-Type": "application/x-ndjson" }, batch.join("\n"), () => this.onerror("timeout"), (resp) => { + if (!resp || resp.status !== 200) { + this.awaitingBatchAck = false; + this.onerror(resp && resp.status); + this.closeAndRetry(1011, "internal server error", false); + } else if (next < messages.length) { + this.batchSend(messages, next); + } else if (this.batchBuffer.length > 0) { + this.batchSend(this.batchBuffer); + this.batchBuffer = []; + } else { + this.awaitingBatchAck = false; + } + }); + } + close(code, reason, wasClean) { + for (let req of this.reqs) { + req.abort(); + } + this.readyState = SOCKET_STATES.closed; + let opts = Object.assign({ code: 1e3, reason: void 0, wasClean: true }, { code, reason, wasClean }); + this.batchBuffer = []; + clearTimeout(this.currentBatchTimer); + this.currentBatchTimer = null; + if (typeof CloseEvent !== "undefined") { + this.onclose(new CloseEvent("close", opts)); + } else { + this.onclose(opts); + } + } + ajax(method, headers, body, onCallerTimeout, callback) { + let req; + let ontimeout = () => { + this.reqs.delete(req); + onCallerTimeout(); + }; + req = Ajax.request(method, this.endpointURL(), headers, body, this.timeout, ontimeout, (resp) => { + this.reqs.delete(req); + if (this.isActive()) { + callback(resp); + } + }); + this.reqs.add(req); + } +}; + +// js/phoenix/presence.js +var Presence = class _Presence { + constructor(channel, opts = {}) { + let events = opts.events || { state: "presence_state", diff: "presence_diff" }; + this.state = {}; + this.pendingDiffs = []; + this.channel = channel; + this.joinRef = null; + this.caller = { + onJoin: function() { + }, + onLeave: function() { + }, + onSync: function() { + } + }; + this.channel.on(events.state, (newState) => { + let { onJoin, onLeave, onSync } = this.caller; + this.joinRef = this.channel.joinRef(); + this.state = _Presence.syncState(this.state, newState, onJoin, onLeave); + this.pendingDiffs.forEach((diff) => { + this.state = _Presence.syncDiff(this.state, diff, onJoin, onLeave); + }); + this.pendingDiffs = []; + onSync(); + }); + this.channel.on(events.diff, (diff) => { + let { onJoin, onLeave, onSync } = this.caller; + if (this.inPendingSyncState()) { + this.pendingDiffs.push(diff); + } else { + this.state = _Presence.syncDiff(this.state, diff, onJoin, onLeave); + onSync(); + } + }); + } + onJoin(callback) { + this.caller.onJoin = callback; + } + onLeave(callback) { + this.caller.onLeave = callback; + } + onSync(callback) { + this.caller.onSync = callback; + } + list(by) { + return _Presence.list(this.state, by); + } + inPendingSyncState() { + return !this.joinRef || this.joinRef !== this.channel.joinRef(); + } + // lower-level public static API + /** + * Used to sync the list of presences on the server + * with the client's state. An optional `onJoin` and `onLeave` callback can + * be provided to react to changes in the client's local presences across + * disconnects and reconnects with the server. + * + * @returns {Presence} + */ + static syncState(currentState, newState, onJoin, onLeave) { + let state = this.clone(currentState); + let joins = {}; + let leaves = {}; + this.map(state, (key, presence) => { + if (!newState[key]) { + leaves[key] = presence; + } + }); + this.map(newState, (key, newPresence) => { + let currentPresence = state[key]; + if (currentPresence) { + let newRefs = newPresence.metas.map((m) => m.phx_ref); + let curRefs = currentPresence.metas.map((m) => m.phx_ref); + let joinedMetas = newPresence.metas.filter((m) => curRefs.indexOf(m.phx_ref) < 0); + let leftMetas = currentPresence.metas.filter((m) => newRefs.indexOf(m.phx_ref) < 0); + if (joinedMetas.length > 0) { + joins[key] = newPresence; + joins[key].metas = joinedMetas; + } + if (leftMetas.length > 0) { + leaves[key] = this.clone(currentPresence); + leaves[key].metas = leftMetas; + } + } else { + joins[key] = newPresence; + } + }); + return this.syncDiff(state, { joins, leaves }, onJoin, onLeave); + } + /** + * + * Used to sync a diff of presence join and leave + * events from the server, as they happen. Like `syncState`, `syncDiff` + * accepts optional `onJoin` and `onLeave` callbacks to react to a user + * joining or leaving from a device. + * + * @returns {Presence} + */ + static syncDiff(state, diff, onJoin, onLeave) { + let { joins, leaves } = this.clone(diff); + if (!onJoin) { + onJoin = function() { + }; + } + if (!onLeave) { + onLeave = function() { + }; + } + this.map(joins, (key, newPresence) => { + let currentPresence = state[key]; + state[key] = this.clone(newPresence); + if (currentPresence) { + let joinedRefs = state[key].metas.map((m) => m.phx_ref); + let curMetas = currentPresence.metas.filter((m) => joinedRefs.indexOf(m.phx_ref) < 0); + state[key].metas.unshift(...curMetas); + } + onJoin(key, currentPresence, newPresence); + }); + this.map(leaves, (key, leftPresence) => { + let currentPresence = state[key]; + if (!currentPresence) { + return; + } + let refsToRemove = leftPresence.metas.map((m) => m.phx_ref); + currentPresence.metas = currentPresence.metas.filter((p) => { + return refsToRemove.indexOf(p.phx_ref) < 0; + }); + onLeave(key, currentPresence, leftPresence); + if (currentPresence.metas.length === 0) { + delete state[key]; + } + }); + return state; + } + /** + * Returns the array of presences, with selected metadata. + * + * @param {Object} presences + * @param {Function} chooser + * + * @returns {Presence} + */ + static list(presences, chooser) { + if (!chooser) { + chooser = function(key, pres) { + return pres; + }; + } + return this.map(presences, (key, presence) => { + return chooser(key, presence); + }); + } + // private + static map(obj, func) { + return Object.getOwnPropertyNames(obj).map((key) => func(key, obj[key])); + } + static clone(obj) { + return JSON.parse(JSON.stringify(obj)); + } +}; + +// js/phoenix/serializer.js +var serializer_default = { + HEADER_LENGTH: 1, + META_LENGTH: 4, + KINDS: { push: 0, reply: 1, broadcast: 2 }, + encode(msg, callback) { + if (msg.payload.constructor === ArrayBuffer) { + return callback(this.binaryEncode(msg)); + } else { + let payload = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload]; + return callback(JSON.stringify(payload)); + } + }, + decode(rawPayload, callback) { + if (rawPayload.constructor === ArrayBuffer) { + return callback(this.binaryDecode(rawPayload)); + } else { + let [join_ref, ref, topic, event, payload] = JSON.parse(rawPayload); + return callback({ join_ref, ref, topic, event, payload }); + } + }, + // private + binaryEncode(message) { + let { join_ref, ref, event, topic, payload } = message; + let encoder = new TextEncoder(); + let joinRefBytes = encoder.encode(join_ref); + let refBytes = encoder.encode(ref); + let topicBytes = encoder.encode(topic); + let eventBytes = encoder.encode(event); + this.assertFieldSize(joinRefBytes.byteLength, "join_ref"); + this.assertFieldSize(refBytes.byteLength, "ref"); + this.assertFieldSize(topicBytes.byteLength, "topic"); + this.assertFieldSize(eventBytes.byteLength, "event"); + let metaLength = this.META_LENGTH + joinRefBytes.byteLength + refBytes.byteLength + topicBytes.byteLength + eventBytes.byteLength; + let header = new ArrayBuffer(this.HEADER_LENGTH + metaLength); + let headerBytes = new Uint8Array(header); + let view = new DataView(header); + let offset = 0; + view.setUint8(offset++, this.KINDS.push); + view.setUint8(offset++, joinRefBytes.byteLength); + view.setUint8(offset++, refBytes.byteLength); + view.setUint8(offset++, topicBytes.byteLength); + view.setUint8(offset++, eventBytes.byteLength); + headerBytes.set(joinRefBytes, offset); + offset += joinRefBytes.byteLength; + headerBytes.set(refBytes, offset); + offset += refBytes.byteLength; + headerBytes.set(topicBytes, offset); + offset += topicBytes.byteLength; + headerBytes.set(eventBytes, offset); + offset += eventBytes.byteLength; + var combined = new Uint8Array(header.byteLength + payload.byteLength); + combined.set(headerBytes, 0); + combined.set(new Uint8Array(payload), header.byteLength); + return combined.buffer; + }, + assertFieldSize(size, name) { + if (size > 255) { + throw new Error(`unable to convert ${name} to binary: must be less than or equal to 255 bytes, but is ${size} bytes`); + } + }, + binaryDecode(buffer) { + let view = new DataView(buffer); + let kind = view.getUint8(0); + let decoder = new TextDecoder(); + switch (kind) { + case this.KINDS.push: + return this.decodePush(buffer, view, decoder); + case this.KINDS.reply: + return this.decodeReply(buffer, view, decoder); + case this.KINDS.broadcast: + return this.decodeBroadcast(buffer, view, decoder); + } + }, + decodePush(buffer, view, decoder) { + let joinRefSize = view.getUint8(1); + let topicSize = view.getUint8(2); + let eventSize = view.getUint8(3); + let offset = this.HEADER_LENGTH + this.META_LENGTH - 1; + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)); + offset = offset + joinRefSize; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + return { join_ref: joinRef, ref: null, topic, event, payload: data }; + }, + decodeReply(buffer, view, decoder) { + let joinRefSize = view.getUint8(1); + let refSize = view.getUint8(2); + let topicSize = view.getUint8(3); + let eventSize = view.getUint8(4); + let offset = this.HEADER_LENGTH + this.META_LENGTH; + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)); + offset = offset + joinRefSize; + let ref = decoder.decode(buffer.slice(offset, offset + refSize)); + offset = offset + refSize; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + let payload = { status: event, response: data }; + return { join_ref: joinRef, ref, topic, event: CHANNEL_EVENTS.reply, payload }; + }, + decodeBroadcast(buffer, view, decoder) { + let topicSize = view.getUint8(1); + let eventSize = view.getUint8(2); + let offset = this.HEADER_LENGTH + 2; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + return { join_ref: null, ref: null, topic, event, payload: data }; + } +}; + +// js/phoenix/socket.js +var Socket = class { + constructor(endPoint, opts = {}) { + this.stateChangeCallbacks = { open: [], close: [], error: [], message: [] }; + this.channels = []; + this.sendBuffer = []; + this.ref = 0; + this.fallbackRef = null; + this.timeout = opts.timeout || DEFAULT_TIMEOUT; + this.transport = opts.transport || global.WebSocket || LongPoll; + this.primaryPassedHealthCheck = false; + this.longPollFallbackMs = opts.longPollFallbackMs; + this.fallbackTimer = null; + this.sessionStore = opts.sessionStorage || global && global.sessionStorage; + this.establishedConnections = 0; + this.defaultEncoder = serializer_default.encode.bind(serializer_default); + this.defaultDecoder = serializer_default.decode.bind(serializer_default); + this.closeWasClean = true; + this.disconnecting = false; + this.binaryType = opts.binaryType || "arraybuffer"; + this.connectClock = 1; + this.pageHidden = false; + if (this.transport !== LongPoll) { + this.encode = opts.encode || this.defaultEncoder; + this.decode = opts.decode || this.defaultDecoder; + } else { + this.encode = this.defaultEncoder; + this.decode = this.defaultDecoder; + } + let awaitingConnectionOnPageShow = null; + if (phxWindow && phxWindow.addEventListener) { + phxWindow.addEventListener("pagehide", (_e) => { + if (this.conn) { + this.disconnect(); + awaitingConnectionOnPageShow = this.connectClock; + } + }); + phxWindow.addEventListener("pageshow", (_e) => { + if (awaitingConnectionOnPageShow === this.connectClock) { + awaitingConnectionOnPageShow = null; + this.connect(); + } + }); + phxWindow.addEventListener("visibilitychange", () => { + if (document.visibilityState === "hidden") { + this.pageHidden = true; + } else { + this.pageHidden = false; + if (!this.isConnected() && !this.closeWasClean) { + this.teardown(() => this.connect()); + } + } + }); + } + this.heartbeatIntervalMs = opts.heartbeatIntervalMs || 3e4; + this.rejoinAfterMs = (tries) => { + if (opts.rejoinAfterMs) { + return opts.rejoinAfterMs(tries); + } else { + return [1e3, 2e3, 5e3][tries - 1] || 1e4; + } + }; + this.reconnectAfterMs = (tries) => { + if (opts.reconnectAfterMs) { + return opts.reconnectAfterMs(tries); + } else { + return [10, 50, 100, 150, 200, 250, 500, 1e3, 2e3][tries - 1] || 5e3; + } + }; + this.logger = opts.logger || null; + if (!this.logger && opts.debug) { + this.logger = (kind, msg, data) => { + console.log(`${kind}: ${msg}`, data); + }; + } + this.longpollerTimeout = opts.longpollerTimeout || 2e4; + this.params = closure(opts.params || {}); + this.endPoint = `${endPoint}/${TRANSPORTS.websocket}`; + this.vsn = opts.vsn || DEFAULT_VSN; + this.heartbeatTimeoutTimer = null; + this.heartbeatTimer = null; + this.pendingHeartbeatRef = null; + this.reconnectTimer = new Timer(() => { + if (this.pageHidden) { + this.log("Not reconnecting as page is hidden!"); + this.teardown(); + return; + } + this.teardown(() => this.connect()); + }, this.reconnectAfterMs); + this.authToken = opts.authToken; + } + /** + * Returns the LongPoll transport reference + */ + getLongPollTransport() { + return LongPoll; + } + /** + * Disconnects and replaces the active transport + * + * @param {Function} newTransport - The new transport class to instantiate + * + */ + replaceTransport(newTransport) { + this.connectClock++; + this.closeWasClean = true; + clearTimeout(this.fallbackTimer); + this.reconnectTimer.reset(); + if (this.conn) { + this.conn.close(); + this.conn = null; + } + this.transport = newTransport; + } + /** + * Returns the socket protocol + * + * @returns {string} + */ + protocol() { + return location.protocol.match(/^https/) ? "wss" : "ws"; + } + /** + * The fully qualified socket url + * + * @returns {string} + */ + endPointURL() { + let uri = Ajax.appendParams( + Ajax.appendParams(this.endPoint, this.params()), + { vsn: this.vsn } + ); + if (uri.charAt(0) !== "/") { + return uri; + } + if (uri.charAt(1) === "/") { + return `${this.protocol()}:${uri}`; + } + return `${this.protocol()}://${location.host}${uri}`; + } + /** + * Disconnects the socket + * + * See https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent#Status_codes for valid status codes. + * + * @param {Function} callback - Optional callback which is called after socket is disconnected. + * @param {integer} code - A status code for disconnection (Optional). + * @param {string} reason - A textual description of the reason to disconnect. (Optional) + */ + disconnect(callback, code, reason) { + this.connectClock++; + this.disconnecting = true; + this.closeWasClean = true; + clearTimeout(this.fallbackTimer); + this.reconnectTimer.reset(); + this.teardown(() => { + this.disconnecting = false; + callback && callback(); + }, code, reason); + } + /** + * + * @param {Object} params - The params to send when connecting, for example `{user_id: userToken}` + * + * Passing params to connect is deprecated; pass them in the Socket constructor instead: + * `new Socket("/socket", {params: {user_id: userToken}})`. + */ + connect(params) { + if (params) { + console && console.log("passing params to connect is deprecated. Instead pass :params to the Socket constructor"); + this.params = closure(params); + } + if (this.conn && !this.disconnecting) { + return; + } + if (this.longPollFallbackMs && this.transport !== LongPoll) { + this.connectWithFallback(LongPoll, this.longPollFallbackMs); + } else { + this.transportConnect(); + } + } + /** + * Logs the message. Override `this.logger` for specialized logging. noops by default + * @param {string} kind + * @param {string} msg + * @param {Object} data + */ + log(kind, msg, data) { + this.logger && this.logger(kind, msg, data); + } + /** + * Returns true if a logger has been set on this socket. + */ + hasLogger() { + return this.logger !== null; + } + /** + * Registers callbacks for connection open events + * + * @example socket.onOpen(function(){ console.info("the socket was opened") }) + * + * @param {Function} callback + */ + onOpen(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.open.push([ref, callback]); + return ref; + } + /** + * Registers callbacks for connection close events + * @param {Function} callback + */ + onClose(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.close.push([ref, callback]); + return ref; + } + /** + * Registers callbacks for connection error events + * + * @example socket.onError(function(error){ alert("An error occurred") }) + * + * @param {Function} callback + */ + onError(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.error.push([ref, callback]); + return ref; + } + /** + * Registers callbacks for connection message events + * @param {Function} callback + */ + onMessage(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.message.push([ref, callback]); + return ref; + } + /** + * Pings the server and invokes the callback with the RTT in milliseconds + * @param {Function} callback + * + * Returns true if the ping was pushed or false if unable to be pushed. + */ + ping(callback) { + if (!this.isConnected()) { + return false; + } + let ref = this.makeRef(); + let startTime = Date.now(); + this.push({ topic: "phoenix", event: "heartbeat", payload: {}, ref }); + let onMsgRef = this.onMessage((msg) => { + if (msg.ref === ref) { + this.off([onMsgRef]); + callback(Date.now() - startTime); + } + }); + return true; + } + /** + * @private + * + * @param {Function} + */ + transportName(transport) { + switch (transport) { + case LongPoll: + return "LongPoll"; + default: + return transport.name; + } + } + /** + * @private + */ + transportConnect() { + this.connectClock++; + this.closeWasClean = false; + let protocols = void 0; + if (this.authToken) { + protocols = ["phoenix", `${AUTH_TOKEN_PREFIX}${btoa(this.authToken).replace(/=/g, "")}`]; + } + this.conn = new this.transport(this.endPointURL(), protocols); + this.conn.binaryType = this.binaryType; + this.conn.timeout = this.longpollerTimeout; + this.conn.onopen = () => this.onConnOpen(); + this.conn.onerror = (error) => this.onConnError(error); + this.conn.onmessage = (event) => this.onConnMessage(event); + this.conn.onclose = (event) => this.onConnClose(event); + } + getSession(key) { + return this.sessionStore && this.sessionStore.getItem(key); + } + storeSession(key, val) { + this.sessionStore && this.sessionStore.setItem(key, val); + } + connectWithFallback(fallbackTransport, fallbackThreshold = 2500) { + clearTimeout(this.fallbackTimer); + let established = false; + let primaryTransport = true; + let openRef, errorRef; + let fallbackTransportName = this.transportName(fallbackTransport); + let fallback = (reason) => { + this.log("transport", `falling back to ${fallbackTransportName}...`, reason); + this.off([openRef, errorRef]); + primaryTransport = false; + this.replaceTransport(fallbackTransport); + this.transportConnect(); + }; + if (this.getSession(`phx:fallback:${fallbackTransportName}`)) { + return fallback("memorized"); + } + this.fallbackTimer = setTimeout(fallback, fallbackThreshold); + errorRef = this.onError((reason) => { + this.log("transport", "error", reason); + if (primaryTransport && !established) { + clearTimeout(this.fallbackTimer); + fallback(reason); + } + }); + if (this.fallbackRef) { + this.off([this.fallbackRef]); + } + this.fallbackRef = this.onOpen(() => { + established = true; + if (!primaryTransport) { + let fallbackTransportName2 = this.transportName(fallbackTransport); + if (!this.primaryPassedHealthCheck) { + this.storeSession(`phx:fallback:${fallbackTransportName2}`, "true"); + } + return this.log("transport", `established ${fallbackTransportName2} fallback`); + } + clearTimeout(this.fallbackTimer); + this.fallbackTimer = setTimeout(fallback, fallbackThreshold); + this.ping((rtt) => { + this.log("transport", "connected to primary after", rtt); + this.primaryPassedHealthCheck = true; + clearTimeout(this.fallbackTimer); + }); + }); + this.transportConnect(); + } + clearHeartbeats() { + clearTimeout(this.heartbeatTimer); + clearTimeout(this.heartbeatTimeoutTimer); + } + onConnOpen() { + if (this.hasLogger()) this.log("transport", `${this.transportName(this.transport)} connected to ${this.endPointURL()}`); + this.closeWasClean = false; + this.disconnecting = false; + this.establishedConnections++; + this.flushSendBuffer(); + this.reconnectTimer.reset(); + this.resetHeartbeat(); + this.stateChangeCallbacks.open.forEach(([, callback]) => callback()); + } + /** + * @private + */ + heartbeatTimeout() { + if (this.pendingHeartbeatRef) { + this.pendingHeartbeatRef = null; + if (this.hasLogger()) { + this.log("transport", "heartbeat timeout. Attempting to re-establish connection"); + } + this.triggerChanError(); + this.closeWasClean = false; + this.teardown(() => this.reconnectTimer.scheduleTimeout(), WS_CLOSE_NORMAL, "heartbeat timeout"); + } + } + resetHeartbeat() { + if (this.conn && this.conn.skipHeartbeat) { + return; + } + this.pendingHeartbeatRef = null; + this.clearHeartbeats(); + this.heartbeatTimer = setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs); + } + teardown(callback, code, reason) { + if (!this.conn) { + return callback && callback(); + } + const connToClose = this.conn; + this.waitForBufferDone(connToClose, () => { + if (code) { + connToClose.close(code, reason || ""); + } else { + connToClose.close(); + } + this.waitForSocketClosed(connToClose, () => { + if (this.conn === connToClose) { + this.conn.onopen = function() { + }; + this.conn.onerror = function() { + }; + this.conn.onmessage = function() { + }; + this.conn.onclose = function() { + }; + this.conn = null; + } + callback && callback(); + }); + }); + } + waitForBufferDone(conn, callback, tries = 1) { + if (tries === 5 || !conn.bufferedAmount) { + callback(); + return; + } + setTimeout(() => { + this.waitForBufferDone(conn, callback, tries + 1); + }, 150 * tries); + } + waitForSocketClosed(conn, callback, tries = 1) { + if (tries === 5 || conn.readyState === SOCKET_STATES.closed) { + callback(); + return; + } + setTimeout(() => { + this.waitForSocketClosed(conn, callback, tries + 1); + }, 150 * tries); + } + onConnClose(event) { + if (this.conn) this.conn.onclose = () => { + }; + let closeCode = event && event.code; + if (this.hasLogger()) this.log("transport", "close", event); + this.triggerChanError(); + this.clearHeartbeats(); + if (!this.closeWasClean && closeCode !== 1e3) { + this.reconnectTimer.scheduleTimeout(); + } + this.stateChangeCallbacks.close.forEach(([, callback]) => callback(event)); + } + /** + * @private + */ + onConnError(error) { + if (this.hasLogger()) this.log("transport", "error", error); + let transportBefore = this.transport; + let establishedBefore = this.establishedConnections; + this.stateChangeCallbacks.error.forEach(([, callback]) => { + callback(error, transportBefore, establishedBefore); + }); + if (transportBefore === this.transport || establishedBefore > 0) { + this.triggerChanError(); + } + } + /** + * @private + */ + triggerChanError() { + this.channels.forEach((channel) => { + if (!(channel.isErrored() || channel.isLeaving() || channel.isClosed())) { + channel.trigger(CHANNEL_EVENTS.error); + } + }); + } + /** + * @returns {string} + */ + connectionState() { + switch (this.conn && this.conn.readyState) { + case SOCKET_STATES.connecting: + return "connecting"; + case SOCKET_STATES.open: + return "open"; + case SOCKET_STATES.closing: + return "closing"; + default: + return "closed"; + } + } + /** + * @returns {boolean} + */ + isConnected() { + return this.connectionState() === "open"; + } + /** + * @private + * + * @param {Channel} + */ + remove(channel) { + this.off(channel.stateChangeRefs); + this.channels = this.channels.filter((c) => c !== channel); + } + /** + * Removes `onOpen`, `onClose`, `onError,` and `onMessage` registrations. + * + * @param {refs} - list of refs returned by calls to + * `onOpen`, `onClose`, `onError,` and `onMessage` + */ + off(refs) { + for (let key in this.stateChangeCallbacks) { + this.stateChangeCallbacks[key] = this.stateChangeCallbacks[key].filter(([ref]) => { + return refs.indexOf(ref) === -1; + }); + } + } + /** + * Initiates a new channel for the given topic + * + * @param {string} topic + * @param {Object} chanParams - Parameters for the channel + * @returns {Channel} + */ + channel(topic, chanParams = {}) { + let chan = new Channel(topic, chanParams, this); + this.channels.push(chan); + return chan; + } + /** + * @param {Object} data + */ + push(data) { + if (this.hasLogger()) { + let { topic, event, payload, ref, join_ref } = data; + this.log("push", `${topic} ${event} (${join_ref}, ${ref})`, payload); + } + if (this.isConnected()) { + this.encode(data, (result) => this.conn.send(result)); + } else { + this.sendBuffer.push(() => this.encode(data, (result) => this.conn.send(result))); + } + } + /** + * Return the next message ref, accounting for overflows + * @returns {string} + */ + makeRef() { + let newRef = this.ref + 1; + if (newRef === this.ref) { + this.ref = 0; + } else { + this.ref = newRef; + } + return this.ref.toString(); + } + sendHeartbeat() { + if (this.pendingHeartbeatRef && !this.isConnected()) { + return; + } + this.pendingHeartbeatRef = this.makeRef(); + this.push({ topic: "phoenix", event: "heartbeat", payload: {}, ref: this.pendingHeartbeatRef }); + this.heartbeatTimeoutTimer = setTimeout(() => this.heartbeatTimeout(), this.heartbeatIntervalMs); + } + flushSendBuffer() { + if (this.isConnected() && this.sendBuffer.length > 0) { + this.sendBuffer.forEach((callback) => callback()); + this.sendBuffer = []; + } + } + onConnMessage(rawMessage) { + this.decode(rawMessage.data, (msg) => { + let { topic, event, payload, ref, join_ref } = msg; + if (ref && ref === this.pendingHeartbeatRef) { + this.clearHeartbeats(); + this.pendingHeartbeatRef = null; + this.heartbeatTimer = setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs); + } + if (this.hasLogger()) this.log("receive", `${payload.status || ""} ${topic} ${event} ${ref && "(" + ref + ")" || ""}`, payload); + for (let i = 0; i < this.channels.length; i++) { + const channel = this.channels[i]; + if (!channel.isMember(topic, event, payload, join_ref)) { + continue; + } + channel.trigger(event, payload, ref, join_ref); + } + for (let i = 0; i < this.stateChangeCallbacks.message.length; i++) { + let [, callback] = this.stateChangeCallbacks.message[i]; + callback(msg); + } + }); + } + leaveOpenTopic(topic) { + let dupChannel = this.channels.find((c) => c.topic === topic && (c.isJoined() || c.isJoining())); + if (dupChannel) { + if (this.hasLogger()) this.log("transport", `leaving duplicate topic "${topic}"`); + dupChannel.leave(); + } + } +}; +//# sourceMappingURL=phoenix.cjs.js.map diff --git a/deps/phoenix/priv/static/phoenix.cjs.js.map b/deps/phoenix/priv/static/phoenix.cjs.js.map new file mode 100644 index 0000000..f6d4a94 --- /dev/null +++ b/deps/phoenix/priv/static/phoenix.cjs.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["../../assets/js/phoenix/index.js", "../../assets/js/phoenix/utils.js", "../../assets/js/phoenix/constants.js", "../../assets/js/phoenix/push.js", "../../assets/js/phoenix/timer.js", "../../assets/js/phoenix/channel.js", "../../assets/js/phoenix/ajax.js", "../../assets/js/phoenix/longpoll.js", "../../assets/js/phoenix/presence.js", "../../assets/js/phoenix/serializer.js", "../../assets/js/phoenix/socket.js"], + "sourcesContent": ["/**\n * Phoenix Channels JavaScript client\n *\n * ## Socket Connection\n *\n * A single connection is established to the server and\n * channels are multiplexed over the connection.\n * Connect to the server using the `Socket` class:\n *\n * ```javascript\n * let socket = new Socket(\"/socket\", {params: {userToken: \"123\"}})\n * socket.connect()\n * ```\n *\n * The `Socket` constructor takes the mount point of the socket,\n * the authentication params, as well as options that can be found in\n * the Socket docs, such as configuring the `LongPoll` transport, and\n * heartbeat.\n *\n * ## Channels\n *\n * Channels are isolated, concurrent processes on the server that\n * subscribe to topics and broker events between the client and server.\n * To join a channel, you must provide the topic, and channel params for\n * authorization. Here's an example chat room example where `\"new_msg\"`\n * events are listened for, messages are pushed to the server, and\n * the channel is joined with ok/error/timeout matches:\n *\n * ```\n * let channel = socket.channel(\"room:123\", {token: roomToken})\n * channel.on(\"new_msg\", msg => console.log(\"Got message\", msg) )\n * $input.onEnter( e => {\n * channel.push(\"new_msg\", {body: e.target.val}, 10000)\n * .receive(\"ok\", (msg) => console.log(\"created message\", msg) )\n * .receive(\"error\", (reasons) => console.log(\"create failed\", reasons) )\n * .receive(\"timeout\", () => console.log(\"Networking issue...\") )\n * })\n *\n * channel.join()\n * .receive(\"ok\", ({messages}) => console.log(\"catching up\", messages) )\n * .receive(\"error\", ({reason}) => console.log(\"failed join\", reason) )\n * .receive(\"timeout\", () => console.log(\"Networking issue. Still waiting...\"))\n *```\n *\n * ## Joining\n *\n * Creating a channel with `socket.channel(topic, params)`, binds the params to\n * `channel.params`, which are sent up on `channel.join()`.\n * Subsequent rejoins will send up the modified params for\n * updating authorization params, or passing up last_message_id information.\n * Successful joins receive an \"ok\" status, while unsuccessful joins\n * receive \"error\".\n *\n * With the default serializers and WebSocket transport, JSON text frames are\n * used for pushing a JSON object literal. If an `ArrayBuffer` instance is provided,\n * binary encoding will be used and the message will be sent with the binary\n * opcode.\n *\n * *Note*: binary messages are only supported on the WebSocket transport.\n *\n * ## Duplicate Join Subscriptions\n *\n * While the client may join any number of topics on any number of channels,\n * the client may only hold a single subscription for each unique topic at any\n * given time. When attempting to create a duplicate subscription,\n * the server will close the existing channel, log a warning, and\n * spawn a new channel for the topic. The client will have their\n * `channel.onClose` callbacks fired for the existing channel, and the new\n * channel join will have its receive hooks processed as normal.\n *\n * ## Pushing Messages\n *\n * From the previous example, we can see that pushing messages to the server\n * can be done with `channel.push(eventName, payload)` and we can optionally\n * receive responses from the push. Additionally, we can use\n * `receive(\"timeout\", callback)` to abort waiting for our other `receive` hooks\n * and take action after some period of waiting. The default timeout is 10000ms.\n *\n *\n * ## Socket Hooks\n *\n * Lifecycle events of the multiplexed connection can be hooked into via\n * `socket.onError()` and `socket.onClose()` events, ie:\n *\n * ```\n * socket.onError( () => console.log(\"there was an error with the connection!\") )\n * socket.onClose( () => console.log(\"the connection dropped\") )\n * ```\n *\n *\n * ## Channel Hooks\n *\n * For each joined channel, you can bind to `onError` and `onClose` events\n * to monitor the channel lifecycle, ie:\n *\n * ```\n * channel.onError( () => console.log(\"there was an error!\") )\n * channel.onClose( () => console.log(\"the channel has gone away gracefully\") )\n * ```\n *\n * ### onError hooks\n *\n * `onError` hooks are invoked if the socket connection drops, or the channel\n * crashes on the server. In either case, a channel rejoin is attempted\n * automatically in an exponential backoff manner.\n *\n * ### onClose hooks\n *\n * `onClose` hooks are invoked only in two cases. 1) the channel explicitly\n * closed on the server, or 2). The client explicitly closed, by calling\n * `channel.leave()`\n *\n *\n * ## Presence\n *\n * The `Presence` object provides features for syncing presence information\n * from the server with the client and handling presences joining and leaving.\n *\n * ### Syncing state from the server\n *\n * To sync presence state from the server, first instantiate an object and\n * pass your channel in to track lifecycle events:\n *\n * ```\n * let channel = socket.channel(\"some:topic\")\n * let presence = new Presence(channel)\n * ```\n *\n * Next, use the `presence.onSync` callback to react to state changes\n * from the server. For example, to render the list of users every time\n * the list changes, you could write:\n *\n * ```\n * presence.onSync(() => {\n * myRenderUsersFunction(presence.list())\n * })\n * ```\n *\n * ### Listing Presences\n *\n * `presence.list` is used to return a list of presence information\n * based on the local state of metadata. By default, all presence\n * metadata is returned, but a `listBy` function can be supplied to\n * allow the client to select which metadata to use for a given presence.\n * For example, you may have a user online from different devices with\n * a metadata status of \"online\", but they have set themselves to \"away\"\n * on another device. In this case, the app may choose to use the \"away\"\n * status for what appears on the UI. The example below defines a `listBy`\n * function which prioritizes the first metadata which was registered for\n * each user. This could be the first tab they opened, or the first device\n * they came online from:\n *\n * ```\n * let listBy = (id, {metas: [first, ...rest]}) => {\n * first.count = rest.length + 1 // count of this user's presences\n * first.id = id\n * return first\n * }\n * let onlineUsers = presence.list(listBy)\n * ```\n *\n * ### Handling individual presence join and leave events\n *\n * The `presence.onJoin` and `presence.onLeave` callbacks can be used to\n * react to individual presences joining and leaving the app. For example:\n *\n * ```\n * let presence = new Presence(channel)\n *\n * // detect if user has joined for the 1st time or from another tab/device\n * presence.onJoin((id, current, newPres) => {\n * if(!current){\n * console.log(\"user has entered for the first time\", newPres)\n * } else {\n * console.log(\"user additional presence\", newPres)\n * }\n * })\n *\n * // detect if user has left from all tabs/devices, or is still present\n * presence.onLeave((id, current, leftPres) => {\n * if(current.metas.length === 0){\n * console.log(\"user has left from all devices\", leftPres)\n * } else {\n * console.log(\"user left from a device\", leftPres)\n * }\n * })\n * // receive presence data from server\n * presence.onSync(() => {\n * displayUsers(presence.list())\n * })\n * ```\n * @module phoenix\n */\n\nimport Channel from \"./channel\"\nimport LongPoll from \"./longpoll\"\nimport Presence from \"./presence\"\nimport Serializer from \"./serializer\"\nimport Socket from \"./socket\"\n\nexport {\n Channel,\n LongPoll,\n Presence,\n Serializer,\n Socket\n}\n", "// wraps value in closure or returns closure\nexport let closure = (value) => {\n if(typeof value === \"function\"){\n return value\n } else {\n let closure = function (){ return value }\n return closure\n }\n}\n", "export const globalSelf = typeof self !== \"undefined\" ? self : null\nexport const phxWindow = typeof window !== \"undefined\" ? window : null\nexport const global = globalSelf || phxWindow || globalThis\nexport const DEFAULT_VSN = \"2.0.0\"\nexport const SOCKET_STATES = {connecting: 0, open: 1, closing: 2, closed: 3}\nexport const MAX_LONGPOLL_BATCH_SIZE = 100;\nexport const DEFAULT_TIMEOUT = 10000\nexport const WS_CLOSE_NORMAL = 1000\nexport const CHANNEL_STATES = {\n closed: \"closed\",\n errored: \"errored\",\n joined: \"joined\",\n joining: \"joining\",\n leaving: \"leaving\",\n}\nexport const CHANNEL_EVENTS = {\n close: \"phx_close\",\n error: \"phx_error\",\n join: \"phx_join\",\n reply: \"phx_reply\",\n leave: \"phx_leave\"\n}\n\nexport const TRANSPORTS = {\n longpoll: \"longpoll\",\n websocket: \"websocket\"\n}\nexport const XHR_STATES = {\n complete: 4\n}\nexport const AUTH_TOKEN_PREFIX = \"base64url.bearer.phx.\"\n", "/**\n * Initializes the Push\n * @param {Channel} channel - The Channel\n * @param {string} event - The event, for example `\"phx_join\"`\n * @param {Object} payload - The payload, for example `{user_id: 123}`\n * @param {number} timeout - The push timeout in milliseconds\n */\nexport default class Push {\n constructor(channel, event, payload, timeout){\n this.channel = channel\n this.event = event\n this.payload = payload || function (){ return {} }\n this.receivedResp = null\n this.timeout = timeout\n this.timeoutTimer = null\n this.recHooks = []\n this.sent = false\n }\n\n /**\n *\n * @param {number} timeout\n */\n resend(timeout){\n this.timeout = timeout\n this.reset()\n this.send()\n }\n\n /**\n *\n */\n send(){\n if(this.hasReceived(\"timeout\")){ return }\n this.startTimeout()\n this.sent = true\n this.channel.socket.push({\n topic: this.channel.topic,\n event: this.event,\n payload: this.payload(),\n ref: this.ref,\n join_ref: this.channel.joinRef()\n })\n }\n\n /**\n *\n * @param {*} status\n * @param {*} callback\n */\n receive(status, callback){\n if(this.hasReceived(status)){\n callback(this.receivedResp.response)\n }\n\n this.recHooks.push({status, callback})\n return this\n }\n\n /**\n * @private\n */\n reset(){\n this.cancelRefEvent()\n this.ref = null\n this.refEvent = null\n this.receivedResp = null\n this.sent = false\n }\n\n /**\n * @private\n */\n matchReceive({status, response, _ref}){\n this.recHooks.filter(h => h.status === status)\n .forEach(h => h.callback(response))\n }\n\n /**\n * @private\n */\n cancelRefEvent(){\n if(!this.refEvent){ return }\n this.channel.off(this.refEvent)\n }\n\n /**\n * @private\n */\n cancelTimeout(){\n clearTimeout(this.timeoutTimer)\n this.timeoutTimer = null\n }\n\n /**\n * @private\n */\n startTimeout(){\n if(this.timeoutTimer){ this.cancelTimeout() }\n this.ref = this.channel.socket.makeRef()\n this.refEvent = this.channel.replyEventName(this.ref)\n\n this.channel.on(this.refEvent, payload => {\n this.cancelRefEvent()\n this.cancelTimeout()\n this.receivedResp = payload\n this.matchReceive(payload)\n })\n\n this.timeoutTimer = setTimeout(() => {\n this.trigger(\"timeout\", {})\n }, this.timeout)\n }\n\n /**\n * @private\n */\n hasReceived(status){\n return this.receivedResp && this.receivedResp.status === status\n }\n\n /**\n * @private\n */\n trigger(status, response){\n this.channel.trigger(this.refEvent, {status, response})\n }\n}\n", "/**\n *\n * Creates a timer that accepts a `timerCalc` function to perform\n * calculated timeout retries, such as exponential backoff.\n *\n * @example\n * let reconnectTimer = new Timer(() => this.connect(), function(tries){\n * return [1000, 5000, 10000][tries - 1] || 10000\n * })\n * reconnectTimer.scheduleTimeout() // fires after 1000\n * reconnectTimer.scheduleTimeout() // fires after 5000\n * reconnectTimer.reset()\n * reconnectTimer.scheduleTimeout() // fires after 1000\n *\n * @param {Function} callback\n * @param {Function} timerCalc\n */\nexport default class Timer {\n constructor(callback, timerCalc){\n this.callback = callback\n this.timerCalc = timerCalc\n this.timer = null\n this.tries = 0\n }\n\n reset(){\n this.tries = 0\n clearTimeout(this.timer)\n }\n\n /**\n * Cancels any previous scheduleTimeout and schedules callback\n */\n scheduleTimeout(){\n clearTimeout(this.timer)\n\n this.timer = setTimeout(() => {\n this.tries = this.tries + 1\n this.callback()\n }, this.timerCalc(this.tries + 1))\n }\n}\n", "import {closure} from \"./utils\"\nimport {\n CHANNEL_EVENTS,\n CHANNEL_STATES,\n} from \"./constants\"\n\nimport Push from \"./push\"\nimport Timer from \"./timer\"\n\n/**\n *\n * @param {string} topic\n * @param {(Object|function)} params\n * @param {Socket} socket\n */\nexport default class Channel {\n constructor(topic, params, socket){\n this.state = CHANNEL_STATES.closed\n this.topic = topic\n this.params = closure(params || {})\n this.socket = socket\n this.bindings = []\n this.bindingRef = 0\n this.timeout = this.socket.timeout\n this.joinedOnce = false\n this.joinPush = new Push(this, CHANNEL_EVENTS.join, this.params, this.timeout)\n this.pushBuffer = []\n this.stateChangeRefs = []\n\n this.rejoinTimer = new Timer(() => {\n if(this.socket.isConnected()){ this.rejoin() }\n }, this.socket.rejoinAfterMs)\n this.stateChangeRefs.push(this.socket.onError(() => this.rejoinTimer.reset()))\n this.stateChangeRefs.push(this.socket.onOpen(() => {\n this.rejoinTimer.reset()\n if(this.isErrored()){ this.rejoin() }\n })\n )\n this.joinPush.receive(\"ok\", () => {\n this.state = CHANNEL_STATES.joined\n this.rejoinTimer.reset()\n this.pushBuffer.forEach(pushEvent => pushEvent.send())\n this.pushBuffer = []\n })\n this.joinPush.receive(\"error\", () => {\n this.state = CHANNEL_STATES.errored\n if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() }\n })\n this.onClose(() => {\n this.rejoinTimer.reset()\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `close ${this.topic} ${this.joinRef()}`)\n this.state = CHANNEL_STATES.closed\n this.socket.remove(this)\n })\n this.onError(reason => {\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `error ${this.topic}`, reason)\n if(this.isJoining()){ this.joinPush.reset() }\n this.state = CHANNEL_STATES.errored\n if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() }\n })\n this.joinPush.receive(\"timeout\", () => {\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `timeout ${this.topic} (${this.joinRef()})`, this.joinPush.timeout)\n let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), this.timeout)\n leavePush.send()\n this.state = CHANNEL_STATES.errored\n this.joinPush.reset()\n if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() }\n })\n this.on(CHANNEL_EVENTS.reply, (payload, ref) => {\n this.trigger(this.replyEventName(ref), payload)\n })\n }\n\n /**\n * Join the channel\n * @param {integer} timeout\n * @returns {Push}\n */\n join(timeout = this.timeout){\n if(this.joinedOnce){\n throw new Error(\"tried to join multiple times. 'join' can only be called a single time per channel instance\")\n } else {\n this.timeout = timeout\n this.joinedOnce = true\n this.rejoin()\n return this.joinPush\n }\n }\n\n /**\n * Hook into channel close\n * @param {Function} callback\n */\n onClose(callback){\n this.on(CHANNEL_EVENTS.close, callback)\n }\n\n /**\n * Hook into channel errors\n * @param {Function} callback\n */\n onError(callback){\n return this.on(CHANNEL_EVENTS.error, reason => callback(reason))\n }\n\n /**\n * Subscribes on channel events\n *\n * Subscription returns a ref counter, which can be used later to\n * unsubscribe the exact event listener\n *\n * @example\n * const ref1 = channel.on(\"event\", do_stuff)\n * const ref2 = channel.on(\"event\", do_other_stuff)\n * channel.off(\"event\", ref1)\n * // Since unsubscription, do_stuff won't fire,\n * // while do_other_stuff will keep firing on the \"event\"\n *\n * @param {string} event\n * @param {Function} callback\n * @returns {integer} ref\n */\n on(event, callback){\n let ref = this.bindingRef++\n this.bindings.push({event, ref, callback})\n return ref\n }\n\n /**\n * Unsubscribes off of channel events\n *\n * Use the ref returned from a channel.on() to unsubscribe one\n * handler, or pass nothing for the ref to unsubscribe all\n * handlers for the given event.\n *\n * @example\n * // Unsubscribe the do_stuff handler\n * const ref1 = channel.on(\"event\", do_stuff)\n * channel.off(\"event\", ref1)\n *\n * // Unsubscribe all handlers from event\n * channel.off(\"event\")\n *\n * @param {string} event\n * @param {integer} ref\n */\n off(event, ref){\n this.bindings = this.bindings.filter((bind) => {\n return !(bind.event === event && (typeof ref === \"undefined\" || ref === bind.ref))\n })\n }\n\n /**\n * @private\n */\n canPush(){ return this.socket.isConnected() && this.isJoined() }\n\n /**\n * Sends a message `event` to phoenix with the payload `payload`.\n * Phoenix receives this in the `handle_in(event, payload, socket)`\n * function. if phoenix replies or it times out (default 10000ms),\n * then optionally the reply can be received.\n *\n * @example\n * channel.push(\"event\")\n * .receive(\"ok\", payload => console.log(\"phoenix replied:\", payload))\n * .receive(\"error\", err => console.log(\"phoenix errored\", err))\n * .receive(\"timeout\", () => console.log(\"timed out pushing\"))\n * @param {string} event\n * @param {Object} payload\n * @param {number} [timeout]\n * @returns {Push}\n */\n push(event, payload, timeout = this.timeout){\n payload = payload || {}\n if(!this.joinedOnce){\n throw new Error(`tried to push '${event}' to '${this.topic}' before joining. Use channel.join() before pushing events`)\n }\n let pushEvent = new Push(this, event, function (){ return payload }, timeout)\n if(this.canPush()){\n pushEvent.send()\n } else {\n pushEvent.startTimeout()\n this.pushBuffer.push(pushEvent)\n }\n\n return pushEvent\n }\n\n /** Leaves the channel\n *\n * Unsubscribes from server events, and\n * instructs channel to terminate on server\n *\n * Triggers onClose() hooks\n *\n * To receive leave acknowledgements, use the `receive`\n * hook to bind to the server ack, ie:\n *\n * @example\n * channel.leave().receive(\"ok\", () => alert(\"left!\") )\n *\n * @param {integer} timeout\n * @returns {Push}\n */\n leave(timeout = this.timeout){\n this.rejoinTimer.reset()\n this.joinPush.cancelTimeout()\n\n this.state = CHANNEL_STATES.leaving\n let onClose = () => {\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `leave ${this.topic}`)\n this.trigger(CHANNEL_EVENTS.close, \"leave\")\n }\n let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), timeout)\n leavePush.receive(\"ok\", () => onClose())\n .receive(\"timeout\", () => onClose())\n leavePush.send()\n if(!this.canPush()){ leavePush.trigger(\"ok\", {}) }\n\n return leavePush\n }\n\n /**\n * Overridable message hook\n *\n * Receives all events for specialized message handling\n * before dispatching to the channel callbacks.\n *\n * Must return the payload, modified or unmodified\n * @param {string} event\n * @param {Object} payload\n * @param {integer} ref\n * @returns {Object}\n */\n onMessage(_event, payload, _ref){ return payload }\n\n /**\n * @private\n */\n isMember(topic, event, payload, joinRef){\n if(this.topic !== topic){ return false }\n\n if(joinRef && joinRef !== this.joinRef()){\n if(this.socket.hasLogger()) this.socket.log(\"channel\", \"dropping outdated message\", {topic, event, payload, joinRef})\n return false\n } else {\n return true\n }\n }\n\n /**\n * @private\n */\n joinRef(){ return this.joinPush.ref }\n\n /**\n * @private\n */\n rejoin(timeout = this.timeout){\n if(this.isLeaving()){ return }\n this.socket.leaveOpenTopic(this.topic)\n this.state = CHANNEL_STATES.joining\n this.joinPush.resend(timeout)\n }\n\n /**\n * @private\n */\n trigger(event, payload, ref, joinRef){\n let handledPayload = this.onMessage(event, payload, ref, joinRef)\n if(payload && !handledPayload){ throw new Error(\"channel onMessage callbacks must return the payload, modified or unmodified\") }\n\n let eventBindings = this.bindings.filter(bind => bind.event === event)\n\n for(let i = 0; i < eventBindings.length; i++){\n let bind = eventBindings[i]\n bind.callback(handledPayload, ref, joinRef || this.joinRef())\n }\n }\n\n /**\n * @private\n */\n replyEventName(ref){ return `chan_reply_${ref}` }\n\n /**\n * @private\n */\n isClosed(){ return this.state === CHANNEL_STATES.closed }\n\n /**\n * @private\n */\n isErrored(){ return this.state === CHANNEL_STATES.errored }\n\n /**\n * @private\n */\n isJoined(){ return this.state === CHANNEL_STATES.joined }\n\n /**\n * @private\n */\n isJoining(){ return this.state === CHANNEL_STATES.joining }\n\n /**\n * @private\n */\n isLeaving(){ return this.state === CHANNEL_STATES.leaving }\n}\n", "import {\n global,\n XHR_STATES\n} from \"./constants\"\n\nexport default class Ajax {\n\n static request(method, endPoint, headers, body, timeout, ontimeout, callback){\n if(global.XDomainRequest){\n let req = new global.XDomainRequest() // IE8, IE9\n return this.xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback)\n } else if(global.XMLHttpRequest){\n let req = new global.XMLHttpRequest() // IE7+, Firefox, Chrome, Opera, Safari\n return this.xhrRequest(req, method, endPoint, headers, body, timeout, ontimeout, callback)\n } else if(global.fetch && global.AbortController){\n // Fetch with AbortController for modern browsers\n return this.fetchRequest(method, endPoint, headers, body, timeout, ontimeout, callback)\n } else {\n throw new Error(\"No suitable XMLHttpRequest implementation found\")\n }\n }\n\n static fetchRequest(method, endPoint, headers, body, timeout, ontimeout, callback){\n let options = {\n method,\n headers,\n body,\n }\n let controller = null\n if(timeout){\n controller = new AbortController()\n const _timeoutId = setTimeout(() => controller.abort(), timeout)\n options.signal = controller.signal\n }\n global.fetch(endPoint, options)\n .then(response => response.text())\n .then(data => this.parseJSON(data))\n .then(data => callback && callback(data))\n .catch(err => {\n if(err.name === \"AbortError\" && ontimeout){\n ontimeout()\n } else {\n callback && callback(null)\n }\n })\n return controller\n }\n\n static xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback){\n req.timeout = timeout\n req.open(method, endPoint)\n req.onload = () => {\n let response = this.parseJSON(req.responseText)\n callback && callback(response)\n }\n if(ontimeout){ req.ontimeout = ontimeout }\n\n // Work around bug in IE9 that requires an attached onprogress handler\n req.onprogress = () => { }\n\n req.send(body)\n return req\n }\n\n static xhrRequest(req, method, endPoint, headers, body, timeout, ontimeout, callback){\n req.open(method, endPoint, true)\n req.timeout = timeout\n for(let [key, value] of Object.entries(headers)){\n req.setRequestHeader(key, value)\n }\n req.onerror = () => callback && callback(null)\n req.onreadystatechange = () => {\n if(req.readyState === XHR_STATES.complete && callback){\n let response = this.parseJSON(req.responseText)\n callback(response)\n }\n }\n if(ontimeout){ req.ontimeout = ontimeout }\n\n req.send(body)\n return req\n }\n\n static parseJSON(resp){\n if(!resp || resp === \"\"){ return null }\n\n try {\n return JSON.parse(resp)\n } catch {\n console && console.log(\"failed to parse JSON response\", resp)\n return null\n }\n }\n\n static serialize(obj, parentKey){\n let queryStr = []\n for(var key in obj){\n if(!Object.prototype.hasOwnProperty.call(obj, key)){ continue }\n let paramKey = parentKey ? `${parentKey}[${key}]` : key\n let paramVal = obj[key]\n if(typeof paramVal === \"object\"){\n queryStr.push(this.serialize(paramVal, paramKey))\n } else {\n queryStr.push(encodeURIComponent(paramKey) + \"=\" + encodeURIComponent(paramVal))\n }\n }\n return queryStr.join(\"&\")\n }\n\n static appendParams(url, params){\n if(Object.keys(params).length === 0){ return url }\n\n let prefix = url.match(/\\?/) ? \"&\" : \"?\"\n return `${url}${prefix}${this.serialize(params)}`\n }\n}\n", "import {\n SOCKET_STATES,\n TRANSPORTS,\n AUTH_TOKEN_PREFIX,\n MAX_LONGPOLL_BATCH_SIZE\n} from \"./constants\"\n\nimport Ajax from \"./ajax\"\n\nlet arrayBufferToBase64 = (buffer) => {\n let binary = \"\"\n let bytes = new Uint8Array(buffer)\n let len = bytes.byteLength\n for(let i = 0; i < len; i++){ binary += String.fromCharCode(bytes[i]) }\n return btoa(binary)\n}\n\nexport default class LongPoll {\n\n constructor(endPoint, protocols){\n // we only support subprotocols for authToken\n // [\"phoenix\", \"base64url.bearer.phx.BASE64_ENCODED_TOKEN\"]\n if(protocols && protocols.length === 2 && protocols[1].startsWith(AUTH_TOKEN_PREFIX)){\n this.authToken = atob(protocols[1].slice(AUTH_TOKEN_PREFIX.length))\n }\n this.endPoint = null\n this.token = null\n this.skipHeartbeat = true\n this.reqs = new Set()\n this.awaitingBatchAck = false\n this.currentBatch = null\n this.currentBatchTimer = null\n this.batchBuffer = []\n this.onopen = function (){ } // noop\n this.onerror = function (){ } // noop\n this.onmessage = function (){ } // noop\n this.onclose = function (){ } // noop\n this.pollEndpoint = this.normalizeEndpoint(endPoint)\n this.readyState = SOCKET_STATES.connecting\n // we must wait for the caller to finish setting up our callbacks and timeout properties\n setTimeout(() => this.poll(), 0)\n }\n\n normalizeEndpoint(endPoint){\n return (endPoint\n .replace(\"ws://\", \"http://\")\n .replace(\"wss://\", \"https://\")\n .replace(new RegExp(\"(.*)\\/\" + TRANSPORTS.websocket), \"$1/\" + TRANSPORTS.longpoll))\n }\n\n endpointURL(){\n return Ajax.appendParams(this.pollEndpoint, {token: this.token})\n }\n\n closeAndRetry(code, reason, wasClean){\n this.close(code, reason, wasClean)\n this.readyState = SOCKET_STATES.connecting\n }\n\n ontimeout(){\n this.onerror(\"timeout\")\n this.closeAndRetry(1005, \"timeout\", false)\n }\n\n isActive(){ return this.readyState === SOCKET_STATES.open || this.readyState === SOCKET_STATES.connecting }\n\n poll(){\n const headers = {\"Accept\": \"application/json\"}\n if(this.authToken){\n headers[\"X-Phoenix-AuthToken\"] = this.authToken\n }\n this.ajax(\"GET\", headers, null, () => this.ontimeout(), resp => {\n if(resp){\n var {status, token, messages} = resp\n if(status === 410 && this.token !== null){\n // In case we already have a token, this means that our existing session\n // is gone. We fail so that the client rejoins its channels.\n this.onerror(410)\n this.closeAndRetry(3410, \"session_gone\", false)\n return\n }\n this.token = token\n } else {\n status = 0\n }\n\n switch(status){\n case 200:\n messages.forEach(msg => {\n // Tasks are what things like event handlers, setTimeout callbacks,\n // promise resolves and more are run within.\n // In modern browsers, there are two different kinds of tasks,\n // microtasks and macrotasks.\n // Microtasks are mainly used for Promises, while macrotasks are\n // used for everything else.\n // Microtasks always have priority over macrotasks. If the JS engine\n // is looking for a task to run, it will always try to empty the\n // microtask queue before attempting to run anything from the\n // macrotask queue.\n //\n // For the WebSocket transport, messages always arrive in their own\n // event. This means that if any promises are resolved from within,\n // their callbacks will always finish execution by the time the\n // next message event handler is run.\n //\n // In order to emulate this behaviour, we need to make sure each\n // onmessage handler is run within its own macrotask.\n setTimeout(() => this.onmessage({data: msg}), 0)\n })\n this.poll()\n break\n case 204:\n this.poll()\n break\n case 410:\n this.readyState = SOCKET_STATES.open\n this.onopen({})\n this.poll()\n break\n case 403:\n this.onerror(403)\n this.close(1008, \"forbidden\", false)\n break\n case 0:\n case 500:\n this.onerror(500)\n this.closeAndRetry(1011, \"internal server error\", 500)\n break\n default: throw new Error(`unhandled poll status ${status}`)\n }\n })\n }\n\n // we collect all pushes within the current event loop by\n // setTimeout 0, which optimizes back-to-back procedural\n // pushes against an empty buffer\n\n send(body){\n if(typeof(body) !== \"string\"){ body = arrayBufferToBase64(body) }\n if(this.currentBatch){\n this.currentBatch.push(body)\n } else if(this.awaitingBatchAck){\n this.batchBuffer.push(body)\n } else {\n this.currentBatch = [body]\n this.currentBatchTimer = setTimeout(() => {\n this.batchSend(this.currentBatch)\n this.currentBatch = null\n }, 0)\n }\n }\n\n batchSend(messages, offset = 0){\n this.awaitingBatchAck = true\n const next = offset + MAX_LONGPOLL_BATCH_SIZE\n const batch = messages.slice(offset, next)\n this.ajax(\"POST\", {\"Content-Type\": \"application/x-ndjson\"}, batch.join(\"\\n\"), () => this.onerror(\"timeout\"), resp => {\n if(!resp || resp.status !== 200){\n this.awaitingBatchAck = false\n this.onerror(resp && resp.status)\n this.closeAndRetry(1011, \"internal server error\", false)\n } else if(next < messages.length){\n this.batchSend(messages, next)\n } else if(this.batchBuffer.length > 0){\n this.batchSend(this.batchBuffer)\n this.batchBuffer = []\n } else {\n this.awaitingBatchAck = false\n }\n })\n }\n\n close(code, reason, wasClean){\n for(let req of this.reqs){ req.abort() }\n this.readyState = SOCKET_STATES.closed\n let opts = Object.assign({code: 1000, reason: undefined, wasClean: true}, {code, reason, wasClean})\n this.batchBuffer = []\n clearTimeout(this.currentBatchTimer)\n this.currentBatchTimer = null\n if(typeof(CloseEvent) !== \"undefined\"){\n this.onclose(new CloseEvent(\"close\", opts))\n } else {\n this.onclose(opts)\n }\n }\n\n ajax(method, headers, body, onCallerTimeout, callback){\n let req\n let ontimeout = () => {\n this.reqs.delete(req)\n onCallerTimeout()\n }\n req = Ajax.request(method, this.endpointURL(), headers, body, this.timeout, ontimeout, resp => {\n this.reqs.delete(req)\n if(this.isActive()){ callback(resp) }\n })\n this.reqs.add(req)\n }\n}\n", "/**\n * Initializes the Presence\n * @param {Channel} channel - The Channel\n * @param {Object} opts - The options,\n * for example `{events: {state: \"state\", diff: \"diff\"}}`\n */\nexport default class Presence {\n\n constructor(channel, opts = {}){\n let events = opts.events || {state: \"presence_state\", diff: \"presence_diff\"}\n this.state = {}\n this.pendingDiffs = []\n this.channel = channel\n this.joinRef = null\n this.caller = {\n onJoin: function (){ },\n onLeave: function (){ },\n onSync: function (){ }\n }\n\n this.channel.on(events.state, newState => {\n let {onJoin, onLeave, onSync} = this.caller\n\n this.joinRef = this.channel.joinRef()\n this.state = Presence.syncState(this.state, newState, onJoin, onLeave)\n\n this.pendingDiffs.forEach(diff => {\n this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave)\n })\n this.pendingDiffs = []\n onSync()\n })\n\n this.channel.on(events.diff, diff => {\n let {onJoin, onLeave, onSync} = this.caller\n\n if(this.inPendingSyncState()){\n this.pendingDiffs.push(diff)\n } else {\n this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave)\n onSync()\n }\n })\n }\n\n onJoin(callback){ this.caller.onJoin = callback }\n\n onLeave(callback){ this.caller.onLeave = callback }\n\n onSync(callback){ this.caller.onSync = callback }\n\n list(by){ return Presence.list(this.state, by) }\n\n inPendingSyncState(){\n return !this.joinRef || (this.joinRef !== this.channel.joinRef())\n }\n\n // lower-level public static API\n\n /**\n * Used to sync the list of presences on the server\n * with the client's state. An optional `onJoin` and `onLeave` callback can\n * be provided to react to changes in the client's local presences across\n * disconnects and reconnects with the server.\n *\n * @returns {Presence}\n */\n static syncState(currentState, newState, onJoin, onLeave){\n let state = this.clone(currentState)\n let joins = {}\n let leaves = {}\n\n this.map(state, (key, presence) => {\n if(!newState[key]){\n leaves[key] = presence\n }\n })\n this.map(newState, (key, newPresence) => {\n let currentPresence = state[key]\n if(currentPresence){\n let newRefs = newPresence.metas.map(m => m.phx_ref)\n let curRefs = currentPresence.metas.map(m => m.phx_ref)\n let joinedMetas = newPresence.metas.filter(m => curRefs.indexOf(m.phx_ref) < 0)\n let leftMetas = currentPresence.metas.filter(m => newRefs.indexOf(m.phx_ref) < 0)\n if(joinedMetas.length > 0){\n joins[key] = newPresence\n joins[key].metas = joinedMetas\n }\n if(leftMetas.length > 0){\n leaves[key] = this.clone(currentPresence)\n leaves[key].metas = leftMetas\n }\n } else {\n joins[key] = newPresence\n }\n })\n return this.syncDiff(state, {joins: joins, leaves: leaves}, onJoin, onLeave)\n }\n\n /**\n *\n * Used to sync a diff of presence join and leave\n * events from the server, as they happen. Like `syncState`, `syncDiff`\n * accepts optional `onJoin` and `onLeave` callbacks to react to a user\n * joining or leaving from a device.\n *\n * @returns {Presence}\n */\n static syncDiff(state, diff, onJoin, onLeave){\n let {joins, leaves} = this.clone(diff)\n if(!onJoin){ onJoin = function (){ } }\n if(!onLeave){ onLeave = function (){ } }\n\n this.map(joins, (key, newPresence) => {\n let currentPresence = state[key]\n state[key] = this.clone(newPresence)\n if(currentPresence){\n let joinedRefs = state[key].metas.map(m => m.phx_ref)\n let curMetas = currentPresence.metas.filter(m => joinedRefs.indexOf(m.phx_ref) < 0)\n state[key].metas.unshift(...curMetas)\n }\n onJoin(key, currentPresence, newPresence)\n })\n this.map(leaves, (key, leftPresence) => {\n let currentPresence = state[key]\n if(!currentPresence){ return }\n let refsToRemove = leftPresence.metas.map(m => m.phx_ref)\n currentPresence.metas = currentPresence.metas.filter(p => {\n return refsToRemove.indexOf(p.phx_ref) < 0\n })\n onLeave(key, currentPresence, leftPresence)\n if(currentPresence.metas.length === 0){\n delete state[key]\n }\n })\n return state\n }\n\n /**\n * Returns the array of presences, with selected metadata.\n *\n * @param {Object} presences\n * @param {Function} chooser\n *\n * @returns {Presence}\n */\n static list(presences, chooser){\n if(!chooser){ chooser = function (key, pres){ return pres } }\n\n return this.map(presences, (key, presence) => {\n return chooser(key, presence)\n })\n }\n\n // private\n\n static map(obj, func){\n return Object.getOwnPropertyNames(obj).map(key => func(key, obj[key]))\n }\n\n static clone(obj){ return JSON.parse(JSON.stringify(obj)) }\n}\n", "/* The default serializer for encoding and decoding messages */\nimport {\n CHANNEL_EVENTS\n} from \"./constants\"\n\nexport default {\n HEADER_LENGTH: 1,\n META_LENGTH: 4,\n KINDS: {push: 0, reply: 1, broadcast: 2},\n\n encode(msg, callback){\n if(msg.payload.constructor === ArrayBuffer){\n return callback(this.binaryEncode(msg))\n } else {\n let payload = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload]\n return callback(JSON.stringify(payload))\n }\n },\n\n decode(rawPayload, callback){\n if(rawPayload.constructor === ArrayBuffer){\n return callback(this.binaryDecode(rawPayload))\n } else {\n let [join_ref, ref, topic, event, payload] = JSON.parse(rawPayload)\n return callback({join_ref, ref, topic, event, payload})\n }\n },\n\n // private\n\n binaryEncode(message){\n let {join_ref, ref, event, topic, payload} = message\n let encoder = new TextEncoder()\n let joinRefBytes = encoder.encode(join_ref)\n let refBytes = encoder.encode(ref)\n let topicBytes = encoder.encode(topic)\n let eventBytes = encoder.encode(event)\n\n this.assertFieldSize(joinRefBytes.byteLength, \"join_ref\")\n this.assertFieldSize(refBytes.byteLength, \"ref\")\n this.assertFieldSize(topicBytes.byteLength, \"topic\")\n this.assertFieldSize(eventBytes.byteLength, \"event\")\n\n let metaLength = this.META_LENGTH + joinRefBytes.byteLength + refBytes.byteLength + topicBytes.byteLength + eventBytes.byteLength\n let header = new ArrayBuffer(this.HEADER_LENGTH + metaLength)\n let headerBytes = new Uint8Array(header)\n let view = new DataView(header)\n let offset = 0\n\n view.setUint8(offset++, this.KINDS.push) // kind\n view.setUint8(offset++, joinRefBytes.byteLength)\n view.setUint8(offset++, refBytes.byteLength)\n view.setUint8(offset++, topicBytes.byteLength)\n view.setUint8(offset++, eventBytes.byteLength)\n headerBytes.set(joinRefBytes, offset); offset += joinRefBytes.byteLength\n headerBytes.set(refBytes, offset); offset += refBytes.byteLength\n headerBytes.set(topicBytes, offset); offset += topicBytes.byteLength\n headerBytes.set(eventBytes, offset); offset += eventBytes.byteLength\n\n var combined = new Uint8Array(header.byteLength + payload.byteLength)\n combined.set(headerBytes, 0)\n combined.set(new Uint8Array(payload), header.byteLength)\n\n return combined.buffer\n },\n\n assertFieldSize(size, name){\n if(size > 255){\n throw new Error(`unable to convert ${name} to binary: must be less than or equal to 255 bytes, but is ${size} bytes`)\n }\n },\n\n binaryDecode(buffer){\n let view = new DataView(buffer)\n let kind = view.getUint8(0)\n let decoder = new TextDecoder()\n switch(kind){\n case this.KINDS.push: return this.decodePush(buffer, view, decoder)\n case this.KINDS.reply: return this.decodeReply(buffer, view, decoder)\n case this.KINDS.broadcast: return this.decodeBroadcast(buffer, view, decoder)\n }\n },\n\n decodePush(buffer, view, decoder){\n let joinRefSize = view.getUint8(1)\n let topicSize = view.getUint8(2)\n let eventSize = view.getUint8(3)\n let offset = this.HEADER_LENGTH + this.META_LENGTH - 1 // pushes have no ref\n let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize))\n offset = offset + joinRefSize\n let topic = decoder.decode(buffer.slice(offset, offset + topicSize))\n offset = offset + topicSize\n let event = decoder.decode(buffer.slice(offset, offset + eventSize))\n offset = offset + eventSize\n let data = buffer.slice(offset, buffer.byteLength)\n return {join_ref: joinRef, ref: null, topic: topic, event: event, payload: data}\n },\n\n decodeReply(buffer, view, decoder){\n let joinRefSize = view.getUint8(1)\n let refSize = view.getUint8(2)\n let topicSize = view.getUint8(3)\n let eventSize = view.getUint8(4)\n let offset = this.HEADER_LENGTH + this.META_LENGTH\n let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize))\n offset = offset + joinRefSize\n let ref = decoder.decode(buffer.slice(offset, offset + refSize))\n offset = offset + refSize\n let topic = decoder.decode(buffer.slice(offset, offset + topicSize))\n offset = offset + topicSize\n let event = decoder.decode(buffer.slice(offset, offset + eventSize))\n offset = offset + eventSize\n let data = buffer.slice(offset, buffer.byteLength)\n let payload = {status: event, response: data}\n return {join_ref: joinRef, ref: ref, topic: topic, event: CHANNEL_EVENTS.reply, payload: payload}\n },\n\n decodeBroadcast(buffer, view, decoder){\n let topicSize = view.getUint8(1)\n let eventSize = view.getUint8(2)\n let offset = this.HEADER_LENGTH + 2\n let topic = decoder.decode(buffer.slice(offset, offset + topicSize))\n offset = offset + topicSize\n let event = decoder.decode(buffer.slice(offset, offset + eventSize))\n offset = offset + eventSize\n let data = buffer.slice(offset, buffer.byteLength)\n\n return {join_ref: null, ref: null, topic: topic, event: event, payload: data}\n }\n}\n", "import {\n global,\n phxWindow,\n CHANNEL_EVENTS,\n DEFAULT_TIMEOUT,\n DEFAULT_VSN,\n SOCKET_STATES,\n TRANSPORTS,\n WS_CLOSE_NORMAL,\n AUTH_TOKEN_PREFIX\n} from \"./constants\"\n\nimport {\n closure\n} from \"./utils\"\n\nimport Ajax from \"./ajax\"\nimport Channel from \"./channel\"\nimport LongPoll from \"./longpoll\"\nimport Serializer from \"./serializer\"\nimport Timer from \"./timer\"\n\n/** Initializes the Socket *\n *\n * For IE8 support use an ES5-shim (https://github.com/es-shims/es5-shim)\n *\n * @param {string} endPoint - The string WebSocket endpoint, ie, `\"ws://example.com/socket\"`,\n * `\"wss://example.com\"`\n * `\"/socket\"` (inherited host & protocol)\n * @param {Object} [opts] - Optional configuration\n * @param {Function} [opts.transport] - The Websocket Transport, for example WebSocket or Phoenix.LongPoll.\n *\n * Defaults to WebSocket with automatic LongPoll fallback if WebSocket is not defined.\n * To fallback to LongPoll when WebSocket attempts fail, use `longPollFallbackMs: 2500`.\n *\n * @param {number} [opts.longPollFallbackMs] - The millisecond time to attempt the primary transport\n * before falling back to the LongPoll transport. Disabled by default.\n *\n * @param {boolean} [opts.debug] - When true, enables debug logging. Default false.\n *\n * @param {Function} [opts.encode] - The function to encode outgoing messages.\n *\n * Defaults to JSON encoder.\n *\n * @param {Function} [opts.decode] - The function to decode incoming messages.\n *\n * Defaults to JSON:\n *\n * ```javascript\n * (payload, callback) => callback(JSON.parse(payload))\n * ```\n *\n * @param {number} [opts.timeout] - The default timeout in milliseconds to trigger push timeouts.\n *\n * Defaults `DEFAULT_TIMEOUT`\n * @param {number} [opts.heartbeatIntervalMs] - The millisec interval to send a heartbeat message\n * @param {Function} [opts.reconnectAfterMs] - The optional function that returns the\n * socket reconnect interval, in milliseconds.\n *\n * Defaults to stepped backoff of:\n *\n * ```javascript\n * function(tries){\n * return [10, 50, 100, 150, 200, 250, 500, 1000, 2000][tries - 1] || 5000\n * }\n * ````\n *\n * @param {Function} [opts.rejoinAfterMs] - The optional function that returns the millisec\n * rejoin interval for individual channels.\n *\n * ```javascript\n * function(tries){\n * return [1000, 2000, 5000][tries - 1] || 10000\n * }\n * ````\n *\n * @param {Function} [opts.logger] - The optional function for specialized logging, ie:\n *\n * ```javascript\n * function(kind, msg, data) {\n * console.log(`${kind}: ${msg}`, data)\n * }\n * ```\n *\n * @param {number} [opts.longpollerTimeout] - The maximum timeout of a long poll AJAX request.\n *\n * Defaults to 20s (double the server long poll timer).\n *\n * @param {(Object|function)} [opts.params] - The optional params to pass when connecting\n * @param {string} [opts.authToken] - the optional authentication token to be exposed on the server\n * under the `:auth_token` connect_info key.\n * @param {string} [opts.binaryType] - The binary type to use for binary WebSocket frames.\n *\n * Defaults to \"arraybuffer\"\n *\n * @param {vsn} [opts.vsn] - The serializer's protocol version to send on connect.\n *\n * Defaults to DEFAULT_VSN.\n *\n * @param {Object} [opts.sessionStorage] - An optional Storage compatible object\n * Phoenix uses sessionStorage for longpoll fallback history. Overriding the store is\n * useful when Phoenix won't have access to `sessionStorage`. For example, This could\n * happen if a site loads a cross-domain channel in an iframe. Example usage:\n *\n * class InMemoryStorage {\n * constructor() { this.storage = {} }\n * getItem(keyName) { return this.storage[keyName] || null }\n * removeItem(keyName) { delete this.storage[keyName] }\n * setItem(keyName, keyValue) { this.storage[keyName] = keyValue }\n * }\n *\n*/\nexport default class Socket {\n constructor(endPoint, opts = {}){\n this.stateChangeCallbacks = {open: [], close: [], error: [], message: []}\n this.channels = []\n this.sendBuffer = []\n this.ref = 0\n this.fallbackRef = null\n this.timeout = opts.timeout || DEFAULT_TIMEOUT\n this.transport = opts.transport || global.WebSocket || LongPoll\n this.primaryPassedHealthCheck = false\n this.longPollFallbackMs = opts.longPollFallbackMs\n this.fallbackTimer = null\n this.sessionStore = opts.sessionStorage || (global && global.sessionStorage)\n this.establishedConnections = 0\n this.defaultEncoder = Serializer.encode.bind(Serializer)\n this.defaultDecoder = Serializer.decode.bind(Serializer)\n // We start with closeWasClean true to avoid the visibility change\n // logic from connecting if the socket was never connected in the first place.\n // transportConnect sets it to false on open.\n this.closeWasClean = true\n this.disconnecting = false\n this.binaryType = opts.binaryType || \"arraybuffer\"\n this.connectClock = 1\n this.pageHidden = false\n if(this.transport !== LongPoll){\n this.encode = opts.encode || this.defaultEncoder\n this.decode = opts.decode || this.defaultDecoder\n } else {\n this.encode = this.defaultEncoder\n this.decode = this.defaultDecoder\n }\n let awaitingConnectionOnPageShow = null\n if(phxWindow && phxWindow.addEventListener){\n phxWindow.addEventListener(\"pagehide\", _e => {\n if(this.conn){\n this.disconnect()\n awaitingConnectionOnPageShow = this.connectClock\n }\n })\n phxWindow.addEventListener(\"pageshow\", _e => {\n if(awaitingConnectionOnPageShow === this.connectClock){\n awaitingConnectionOnPageShow = null\n this.connect()\n }\n })\n phxWindow.addEventListener(\"visibilitychange\", () => {\n if(document.visibilityState === \"hidden\"){\n this.pageHidden = true\n } else {\n this.pageHidden = false\n // reconnect immediately\n if(!this.isConnected() && !this.closeWasClean){\n this.teardown(() => this.connect())\n }\n }\n })\n }\n this.heartbeatIntervalMs = opts.heartbeatIntervalMs || 30000\n this.rejoinAfterMs = (tries) => {\n if(opts.rejoinAfterMs){\n return opts.rejoinAfterMs(tries)\n } else {\n return [1000, 2000, 5000][tries - 1] || 10000\n }\n }\n this.reconnectAfterMs = (tries) => {\n if(opts.reconnectAfterMs){\n return opts.reconnectAfterMs(tries)\n } else {\n return [10, 50, 100, 150, 200, 250, 500, 1000, 2000][tries - 1] || 5000\n }\n }\n this.logger = opts.logger || null\n if(!this.logger && opts.debug){\n this.logger = (kind, msg, data) => { console.log(`${kind}: ${msg}`, data) }\n }\n this.longpollerTimeout = opts.longpollerTimeout || 20000\n this.params = closure(opts.params || {})\n this.endPoint = `${endPoint}/${TRANSPORTS.websocket}`\n this.vsn = opts.vsn || DEFAULT_VSN\n this.heartbeatTimeoutTimer = null\n this.heartbeatTimer = null\n this.pendingHeartbeatRef = null\n this.reconnectTimer = new Timer(() => {\n if(this.pageHidden){\n this.log(\"Not reconnecting as page is hidden!\")\n this.teardown()\n return\n }\n this.teardown(() => this.connect())\n }, this.reconnectAfterMs)\n this.authToken = opts.authToken\n }\n\n /**\n * Returns the LongPoll transport reference\n */\n getLongPollTransport(){ return LongPoll }\n\n /**\n * Disconnects and replaces the active transport\n *\n * @param {Function} newTransport - The new transport class to instantiate\n *\n */\n replaceTransport(newTransport){\n this.connectClock++\n this.closeWasClean = true\n clearTimeout(this.fallbackTimer)\n this.reconnectTimer.reset()\n if(this.conn){\n this.conn.close()\n this.conn = null\n }\n this.transport = newTransport\n }\n\n /**\n * Returns the socket protocol\n *\n * @returns {string}\n */\n protocol(){ return location.protocol.match(/^https/) ? \"wss\" : \"ws\" }\n\n /**\n * The fully qualified socket url\n *\n * @returns {string}\n */\n endPointURL(){\n let uri = Ajax.appendParams(\n Ajax.appendParams(this.endPoint, this.params()), {vsn: this.vsn})\n if(uri.charAt(0) !== \"/\"){ return uri }\n if(uri.charAt(1) === \"/\"){ return `${this.protocol()}:${uri}` }\n\n return `${this.protocol()}://${location.host}${uri}`\n }\n\n /**\n * Disconnects the socket\n *\n * See https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent#Status_codes for valid status codes.\n *\n * @param {Function} callback - Optional callback which is called after socket is disconnected.\n * @param {integer} code - A status code for disconnection (Optional).\n * @param {string} reason - A textual description of the reason to disconnect. (Optional)\n */\n disconnect(callback, code, reason){\n this.connectClock++\n this.disconnecting = true\n this.closeWasClean = true\n clearTimeout(this.fallbackTimer)\n this.reconnectTimer.reset()\n this.teardown(() => {\n this.disconnecting = false\n callback && callback()\n }, code, reason)\n }\n\n /**\n *\n * @param {Object} params - The params to send when connecting, for example `{user_id: userToken}`\n *\n * Passing params to connect is deprecated; pass them in the Socket constructor instead:\n * `new Socket(\"/socket\", {params: {user_id: userToken}})`.\n */\n connect(params){\n if(params){\n console && console.log(\"passing params to connect is deprecated. Instead pass :params to the Socket constructor\")\n this.params = closure(params)\n }\n if(this.conn && !this.disconnecting){ return }\n if(this.longPollFallbackMs && this.transport !== LongPoll){\n this.connectWithFallback(LongPoll, this.longPollFallbackMs)\n } else {\n this.transportConnect()\n }\n }\n\n /**\n * Logs the message. Override `this.logger` for specialized logging. noops by default\n * @param {string} kind\n * @param {string} msg\n * @param {Object} data\n */\n log(kind, msg, data){ this.logger && this.logger(kind, msg, data) }\n\n /**\n * Returns true if a logger has been set on this socket.\n */\n hasLogger(){ return this.logger !== null }\n\n /**\n * Registers callbacks for connection open events\n *\n * @example socket.onOpen(function(){ console.info(\"the socket was opened\") })\n *\n * @param {Function} callback\n */\n onOpen(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.open.push([ref, callback])\n return ref\n }\n\n /**\n * Registers callbacks for connection close events\n * @param {Function} callback\n */\n onClose(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.close.push([ref, callback])\n return ref\n }\n\n /**\n * Registers callbacks for connection error events\n *\n * @example socket.onError(function(error){ alert(\"An error occurred\") })\n *\n * @param {Function} callback\n */\n onError(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.error.push([ref, callback])\n return ref\n }\n\n /**\n * Registers callbacks for connection message events\n * @param {Function} callback\n */\n onMessage(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.message.push([ref, callback])\n return ref\n }\n\n /**\n * Pings the server and invokes the callback with the RTT in milliseconds\n * @param {Function} callback\n *\n * Returns true if the ping was pushed or false if unable to be pushed.\n */\n ping(callback){\n if(!this.isConnected()){ return false }\n let ref = this.makeRef()\n let startTime = Date.now()\n this.push({topic: \"phoenix\", event: \"heartbeat\", payload: {}, ref: ref})\n let onMsgRef = this.onMessage(msg => {\n if(msg.ref === ref){\n this.off([onMsgRef])\n callback(Date.now() - startTime)\n }\n })\n return true\n }\n\n /**\n * @private\n *\n * @param {Function}\n */\n transportName(transport){\n // JavaScript minification, enabled by default in production in Phoenix\n // projects, renames symbols to reduce code size.\n // See https://esbuild.github.io/api/#keep-names.\n // This helper ensures we return the correct name for the LongPoll transport\n // even after minification. The other common transport is WebSocket, which\n // is native to browsers and does not need special handling.\n switch(transport){\n case LongPoll: return \"LongPoll\"\n default: return transport.name\n }\n }\n\n /**\n * @private\n */\n transportConnect(){\n this.connectClock++\n this.closeWasClean = false\n let protocols = undefined\n // Sec-WebSocket-Protocol based token\n // (longpoll uses Authorization header instead)\n if(this.authToken){\n protocols = [\"phoenix\", `${AUTH_TOKEN_PREFIX}${btoa(this.authToken).replace(/=/g, \"\")}`]\n }\n this.conn = new this.transport(this.endPointURL(), protocols)\n this.conn.binaryType = this.binaryType\n this.conn.timeout = this.longpollerTimeout\n this.conn.onopen = () => this.onConnOpen()\n this.conn.onerror = error => this.onConnError(error)\n this.conn.onmessage = event => this.onConnMessage(event)\n this.conn.onclose = event => this.onConnClose(event)\n }\n\n getSession(key){ return this.sessionStore && this.sessionStore.getItem(key) }\n\n storeSession(key, val){ this.sessionStore && this.sessionStore.setItem(key, val) }\n\n connectWithFallback(fallbackTransport, fallbackThreshold = 2500){\n clearTimeout(this.fallbackTimer)\n let established = false\n let primaryTransport = true\n let openRef, errorRef\n let fallbackTransportName = this.transportName(fallbackTransport)\n let fallback = (reason) => {\n this.log(\"transport\", `falling back to ${fallbackTransportName}...`, reason)\n this.off([openRef, errorRef])\n primaryTransport = false\n this.replaceTransport(fallbackTransport)\n this.transportConnect()\n }\n if(this.getSession(`phx:fallback:${fallbackTransportName}`)){ return fallback(\"memorized\") }\n\n this.fallbackTimer = setTimeout(fallback, fallbackThreshold)\n\n errorRef = this.onError(reason => {\n this.log(\"transport\", \"error\", reason)\n if(primaryTransport && !established){\n clearTimeout(this.fallbackTimer)\n fallback(reason)\n }\n })\n if(this.fallbackRef){\n this.off([this.fallbackRef])\n }\n this.fallbackRef = this.onOpen(() => {\n established = true\n if(!primaryTransport){\n let fallbackTransportName = this.transportName(fallbackTransport)\n // only memorize LP if we never connected to primary\n if(!this.primaryPassedHealthCheck){ this.storeSession(`phx:fallback:${fallbackTransportName}`, \"true\") }\n return this.log(\"transport\", `established ${fallbackTransportName} fallback`)\n }\n // if we've established primary, give the fallback a new period to attempt ping\n clearTimeout(this.fallbackTimer)\n this.fallbackTimer = setTimeout(fallback, fallbackThreshold)\n this.ping(rtt => {\n this.log(\"transport\", \"connected to primary after\", rtt)\n this.primaryPassedHealthCheck = true\n clearTimeout(this.fallbackTimer)\n })\n })\n this.transportConnect()\n }\n\n clearHeartbeats(){\n clearTimeout(this.heartbeatTimer)\n clearTimeout(this.heartbeatTimeoutTimer)\n }\n\n onConnOpen(){\n if(this.hasLogger()) this.log(\"transport\", `${this.transportName(this.transport)} connected to ${this.endPointURL()}`)\n this.closeWasClean = false\n this.disconnecting = false\n this.establishedConnections++\n this.flushSendBuffer()\n this.reconnectTimer.reset()\n this.resetHeartbeat()\n this.stateChangeCallbacks.open.forEach(([, callback]) => callback())\n }\n\n /**\n * @private\n */\n\n heartbeatTimeout(){\n if(this.pendingHeartbeatRef){\n this.pendingHeartbeatRef = null\n if(this.hasLogger()){ this.log(\"transport\", \"heartbeat timeout. Attempting to re-establish connection\") }\n this.triggerChanError()\n this.closeWasClean = false\n this.teardown(() => this.reconnectTimer.scheduleTimeout(), WS_CLOSE_NORMAL, \"heartbeat timeout\")\n }\n }\n\n resetHeartbeat(){\n if(this.conn && this.conn.skipHeartbeat){ return }\n this.pendingHeartbeatRef = null\n this.clearHeartbeats()\n this.heartbeatTimer = setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs)\n }\n\n teardown(callback, code, reason){\n if(!this.conn){\n return callback && callback()\n }\n\n // If someone calls connect before we finish tearing down,\n // we create a new connection, but we still want to finish tearing down the old one.\n const connToClose = this.conn\n\n this.waitForBufferDone(connToClose, () => {\n if(code){ connToClose.close(code, reason || \"\") } else { connToClose.close() }\n\n this.waitForSocketClosed(connToClose, () => {\n if(this.conn === connToClose){\n this.conn.onopen = function (){ } // noop\n this.conn.onerror = function (){ } // noop\n this.conn.onmessage = function (){ } // noop\n this.conn.onclose = function (){ } // noop\n this.conn = null\n }\n\n callback && callback()\n })\n })\n }\n\n waitForBufferDone(conn, callback, tries = 1){\n if(tries === 5 || !conn.bufferedAmount){\n callback()\n return\n }\n\n setTimeout(() => {\n this.waitForBufferDone(conn, callback, tries + 1)\n }, 150 * tries)\n }\n\n waitForSocketClosed(conn, callback, tries = 1){\n if(tries === 5 || conn.readyState === SOCKET_STATES.closed){\n callback()\n return\n }\n\n setTimeout(() => {\n this.waitForSocketClosed(conn, callback, tries + 1)\n }, 150 * tries)\n }\n\n onConnClose(event){\n if(this.conn) this.conn.onclose = () => {} // noop to prevent recursive calls in teardown\n let closeCode = event && event.code\n if(this.hasLogger()) this.log(\"transport\", \"close\", event)\n this.triggerChanError()\n this.clearHeartbeats()\n if(!this.closeWasClean && closeCode !== 1000){\n this.reconnectTimer.scheduleTimeout()\n }\n this.stateChangeCallbacks.close.forEach(([, callback]) => callback(event))\n }\n\n /**\n * @private\n */\n onConnError(error){\n if(this.hasLogger()) this.log(\"transport\", \"error\", error)\n let transportBefore = this.transport\n let establishedBefore = this.establishedConnections\n this.stateChangeCallbacks.error.forEach(([, callback]) => {\n callback(error, transportBefore, establishedBefore)\n })\n if(transportBefore === this.transport || establishedBefore > 0){\n this.triggerChanError()\n }\n }\n\n /**\n * @private\n */\n triggerChanError(){\n this.channels.forEach(channel => {\n if(!(channel.isErrored() || channel.isLeaving() || channel.isClosed())){\n channel.trigger(CHANNEL_EVENTS.error)\n }\n })\n }\n\n /**\n * @returns {string}\n */\n connectionState(){\n switch(this.conn && this.conn.readyState){\n case SOCKET_STATES.connecting: return \"connecting\"\n case SOCKET_STATES.open: return \"open\"\n case SOCKET_STATES.closing: return \"closing\"\n default: return \"closed\"\n }\n }\n\n /**\n * @returns {boolean}\n */\n isConnected(){ return this.connectionState() === \"open\" }\n\n /**\n * @private\n *\n * @param {Channel}\n */\n remove(channel){\n this.off(channel.stateChangeRefs)\n this.channels = this.channels.filter(c => c !== channel)\n }\n\n /**\n * Removes `onOpen`, `onClose`, `onError,` and `onMessage` registrations.\n *\n * @param {refs} - list of refs returned by calls to\n * `onOpen`, `onClose`, `onError,` and `onMessage`\n */\n off(refs){\n for(let key in this.stateChangeCallbacks){\n this.stateChangeCallbacks[key] = this.stateChangeCallbacks[key].filter(([ref]) => {\n return refs.indexOf(ref) === -1\n })\n }\n }\n\n /**\n * Initiates a new channel for the given topic\n *\n * @param {string} topic\n * @param {Object} chanParams - Parameters for the channel\n * @returns {Channel}\n */\n channel(topic, chanParams = {}){\n let chan = new Channel(topic, chanParams, this)\n this.channels.push(chan)\n return chan\n }\n\n /**\n * @param {Object} data\n */\n push(data){\n if(this.hasLogger()){\n let {topic, event, payload, ref, join_ref} = data\n this.log(\"push\", `${topic} ${event} (${join_ref}, ${ref})`, payload)\n }\n\n if(this.isConnected()){\n this.encode(data, result => this.conn.send(result))\n } else {\n this.sendBuffer.push(() => this.encode(data, result => this.conn.send(result)))\n }\n }\n\n /**\n * Return the next message ref, accounting for overflows\n * @returns {string}\n */\n makeRef(){\n let newRef = this.ref + 1\n if(newRef === this.ref){ this.ref = 0 } else { this.ref = newRef }\n\n return this.ref.toString()\n }\n\n sendHeartbeat(){\n if(this.pendingHeartbeatRef && !this.isConnected()){ return }\n this.pendingHeartbeatRef = this.makeRef()\n this.push({topic: \"phoenix\", event: \"heartbeat\", payload: {}, ref: this.pendingHeartbeatRef})\n this.heartbeatTimeoutTimer = setTimeout(() => this.heartbeatTimeout(), this.heartbeatIntervalMs)\n }\n\n flushSendBuffer(){\n if(this.isConnected() && this.sendBuffer.length > 0){\n this.sendBuffer.forEach(callback => callback())\n this.sendBuffer = []\n }\n }\n\n onConnMessage(rawMessage){\n this.decode(rawMessage.data, msg => {\n let {topic, event, payload, ref, join_ref} = msg\n if(ref && ref === this.pendingHeartbeatRef){\n this.clearHeartbeats()\n this.pendingHeartbeatRef = null\n this.heartbeatTimer = setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs)\n }\n\n if(this.hasLogger()) this.log(\"receive\", `${payload.status || \"\"} ${topic} ${event} ${ref && \"(\" + ref + \")\" || \"\"}`, payload)\n\n for(let i = 0; i < this.channels.length; i++){\n const channel = this.channels[i]\n if(!channel.isMember(topic, event, payload, join_ref)){ continue }\n channel.trigger(event, payload, ref, join_ref)\n }\n\n for(let i = 0; i < this.stateChangeCallbacks.message.length; i++){\n let [, callback] = this.stateChangeCallbacks.message[i]\n callback(msg)\n }\n })\n }\n\n leaveOpenTopic(topic){\n let dupChannel = this.channels.find(c => c.topic === topic && (c.isJoined() || c.isJoining()))\n if(dupChannel){\n if(this.hasLogger()) this.log(\"transport\", `leaving duplicate topic \"${topic}\"`)\n dupChannel.leave()\n }\n }\n}\n"], + "mappings": ";;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACCO,IAAI,UAAU,CAAC,UAAU;AAC9B,MAAG,OAAO,UAAU,YAAW;AAC7B,WAAO;AAAA,EACT,OAAO;AACL,QAAIA,WAAU,WAAW;AAAE,aAAO;AAAA,IAAM;AACxC,WAAOA;AAAA,EACT;AACF;;;ACRO,IAAM,aAAa,OAAO,SAAS,cAAc,OAAO;AACxD,IAAM,YAAY,OAAO,WAAW,cAAc,SAAS;AAC3D,IAAM,SAAS,cAAc,aAAa;AAC1C,IAAM,cAAc;AACpB,IAAM,gBAAgB,EAAC,YAAY,GAAG,MAAM,GAAG,SAAS,GAAG,QAAQ,EAAC;AACpE,IAAM,0BAA0B;AAChC,IAAM,kBAAkB;AACxB,IAAM,kBAAkB;AACxB,IAAM,iBAAiB;AAAA,EAC5B,QAAQ;AAAA,EACR,SAAS;AAAA,EACT,QAAQ;AAAA,EACR,SAAS;AAAA,EACT,SAAS;AACX;AACO,IAAM,iBAAiB;AAAA,EAC5B,OAAO;AAAA,EACP,OAAO;AAAA,EACP,MAAM;AAAA,EACN,OAAO;AAAA,EACP,OAAO;AACT;AAEO,IAAM,aAAa;AAAA,EACxB,UAAU;AAAA,EACV,WAAW;AACb;AACO,IAAM,aAAa;AAAA,EACxB,UAAU;AACZ;AACO,IAAM,oBAAoB;;;ACvBjC,IAAqB,OAArB,MAA0B;AAAA,EACxB,YAAY,SAAS,OAAO,SAAS,SAAQ;AAC3C,SAAK,UAAU;AACf,SAAK,QAAQ;AACb,SAAK,UAAU,WAAW,WAAW;AAAE,aAAO,CAAC;AAAA,IAAE;AACjD,SAAK,eAAe;AACpB,SAAK,UAAU;AACf,SAAK,eAAe;AACpB,SAAK,WAAW,CAAC;AACjB,SAAK,OAAO;AAAA,EACd;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,OAAO,SAAQ;AACb,SAAK,UAAU;AACf,SAAK,MAAM;AACX,SAAK,KAAK;AAAA,EACZ;AAAA;AAAA;AAAA;AAAA,EAKA,OAAM;AACJ,QAAG,KAAK,YAAY,SAAS,GAAE;AAAE;AAAA,IAAO;AACxC,SAAK,aAAa;AAClB,SAAK,OAAO;AACZ,SAAK,QAAQ,OAAO,KAAK;AAAA,MACvB,OAAO,KAAK,QAAQ;AAAA,MACpB,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK,QAAQ;AAAA,MACtB,KAAK,KAAK;AAAA,MACV,UAAU,KAAK,QAAQ,QAAQ;AAAA,IACjC,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,QAAQ,QAAQ,UAAS;AACvB,QAAG,KAAK,YAAY,MAAM,GAAE;AAC1B,eAAS,KAAK,aAAa,QAAQ;AAAA,IACrC;AAEA,SAAK,SAAS,KAAK,EAAC,QAAQ,SAAQ,CAAC;AACrC,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,QAAO;AACL,SAAK,eAAe;AACpB,SAAK,MAAM;AACX,SAAK,WAAW;AAChB,SAAK,eAAe;AACpB,SAAK,OAAO;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,aAAa,EAAC,QAAQ,UAAU,KAAI,GAAE;AACpC,SAAK,SAAS,OAAO,OAAK,EAAE,WAAW,MAAM,EAC1C,QAAQ,OAAK,EAAE,SAAS,QAAQ,CAAC;AAAA,EACtC;AAAA;AAAA;AAAA;AAAA,EAKA,iBAAgB;AACd,QAAG,CAAC,KAAK,UAAS;AAAE;AAAA,IAAO;AAC3B,SAAK,QAAQ,IAAI,KAAK,QAAQ;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA,EAKA,gBAAe;AACb,iBAAa,KAAK,YAAY;AAC9B,SAAK,eAAe;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA,EAKA,eAAc;AACZ,QAAG,KAAK,cAAa;AAAE,WAAK,cAAc;AAAA,IAAE;AAC5C,SAAK,MAAM,KAAK,QAAQ,OAAO,QAAQ;AACvC,SAAK,WAAW,KAAK,QAAQ,eAAe,KAAK,GAAG;AAEpD,SAAK,QAAQ,GAAG,KAAK,UAAU,aAAW;AACxC,WAAK,eAAe;AACpB,WAAK,cAAc;AACnB,WAAK,eAAe;AACpB,WAAK,aAAa,OAAO;AAAA,IAC3B,CAAC;AAED,SAAK,eAAe,WAAW,MAAM;AACnC,WAAK,QAAQ,WAAW,CAAC,CAAC;AAAA,IAC5B,GAAG,KAAK,OAAO;AAAA,EACjB;AAAA;AAAA;AAAA;AAAA,EAKA,YAAY,QAAO;AACjB,WAAO,KAAK,gBAAgB,KAAK,aAAa,WAAW;AAAA,EAC3D;AAAA;AAAA;AAAA;AAAA,EAKA,QAAQ,QAAQ,UAAS;AACvB,SAAK,QAAQ,QAAQ,KAAK,UAAU,EAAC,QAAQ,SAAQ,CAAC;AAAA,EACxD;AACF;;;AC9GA,IAAqB,QAArB,MAA2B;AAAA,EACzB,YAAY,UAAU,WAAU;AAC9B,SAAK,WAAW;AAChB,SAAK,YAAY;AACjB,SAAK,QAAQ;AACb,SAAK,QAAQ;AAAA,EACf;AAAA,EAEA,QAAO;AACL,SAAK,QAAQ;AACb,iBAAa,KAAK,KAAK;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA,EAKA,kBAAiB;AACf,iBAAa,KAAK,KAAK;AAEvB,SAAK,QAAQ,WAAW,MAAM;AAC5B,WAAK,QAAQ,KAAK,QAAQ;AAC1B,WAAK,SAAS;AAAA,IAChB,GAAG,KAAK,UAAU,KAAK,QAAQ,CAAC,CAAC;AAAA,EACnC;AACF;;;AC1BA,IAAqB,UAArB,MAA6B;AAAA,EAC3B,YAAY,OAAO,QAAQ,QAAO;AAChC,SAAK,QAAQ,eAAe;AAC5B,SAAK,QAAQ;AACb,SAAK,SAAS,QAAQ,UAAU,CAAC,CAAC;AAClC,SAAK,SAAS;AACd,SAAK,WAAW,CAAC;AACjB,SAAK,aAAa;AAClB,SAAK,UAAU,KAAK,OAAO;AAC3B,SAAK,aAAa;AAClB,SAAK,WAAW,IAAI,KAAK,MAAM,eAAe,MAAM,KAAK,QAAQ,KAAK,OAAO;AAC7E,SAAK,aAAa,CAAC;AACnB,SAAK,kBAAkB,CAAC;AAExB,SAAK,cAAc,IAAI,MAAM,MAAM;AACjC,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,OAAO;AAAA,MAAE;AAAA,IAC/C,GAAG,KAAK,OAAO,aAAa;AAC5B,SAAK,gBAAgB,KAAK,KAAK,OAAO,QAAQ,MAAM,KAAK,YAAY,MAAM,CAAC,CAAC;AAC7E,SAAK,gBAAgB;AAAA,MAAK,KAAK,OAAO,OAAO,MAAM;AACjD,aAAK,YAAY,MAAM;AACvB,YAAG,KAAK,UAAU,GAAE;AAAE,eAAK,OAAO;AAAA,QAAE;AAAA,MACtC,CAAC;AAAA,IACD;AACA,SAAK,SAAS,QAAQ,MAAM,MAAM;AAChC,WAAK,QAAQ,eAAe;AAC5B,WAAK,YAAY,MAAM;AACvB,WAAK,WAAW,QAAQ,eAAa,UAAU,KAAK,CAAC;AACrD,WAAK,aAAa,CAAC;AAAA,IACrB,CAAC;AACD,SAAK,SAAS,QAAQ,SAAS,MAAM;AACnC,WAAK,QAAQ,eAAe;AAC5B,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,YAAY,gBAAgB;AAAA,MAAE;AAAA,IACpE,CAAC;AACD,SAAK,QAAQ,MAAM;AACjB,WAAK,YAAY,MAAM;AACvB,UAAG,KAAK,OAAO,UAAU,EAAG,MAAK,OAAO,IAAI,WAAW,SAAS,KAAK,KAAK,IAAI,KAAK,QAAQ,CAAC,EAAE;AAC9F,WAAK,QAAQ,eAAe;AAC5B,WAAK,OAAO,OAAO,IAAI;AAAA,IACzB,CAAC;AACD,SAAK,QAAQ,YAAU;AACrB,UAAG,KAAK,OAAO,UAAU,EAAG,MAAK,OAAO,IAAI,WAAW,SAAS,KAAK,KAAK,IAAI,MAAM;AACpF,UAAG,KAAK,UAAU,GAAE;AAAE,aAAK,SAAS,MAAM;AAAA,MAAE;AAC5C,WAAK,QAAQ,eAAe;AAC5B,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,YAAY,gBAAgB;AAAA,MAAE;AAAA,IACpE,CAAC;AACD,SAAK,SAAS,QAAQ,WAAW,MAAM;AACrC,UAAG,KAAK,OAAO,UAAU,EAAG,MAAK,OAAO,IAAI,WAAW,WAAW,KAAK,KAAK,KAAK,KAAK,QAAQ,CAAC,KAAK,KAAK,SAAS,OAAO;AACzH,UAAI,YAAY,IAAI,KAAK,MAAM,eAAe,OAAO,QAAQ,CAAC,CAAC,GAAG,KAAK,OAAO;AAC9E,gBAAU,KAAK;AACf,WAAK,QAAQ,eAAe;AAC5B,WAAK,SAAS,MAAM;AACpB,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,YAAY,gBAAgB;AAAA,MAAE;AAAA,IACpE,CAAC;AACD,SAAK,GAAG,eAAe,OAAO,CAAC,SAAS,QAAQ;AAC9C,WAAK,QAAQ,KAAK,eAAe,GAAG,GAAG,OAAO;AAAA,IAChD,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,KAAK,UAAU,KAAK,SAAQ;AAC1B,QAAG,KAAK,YAAW;AACjB,YAAM,IAAI,MAAM,4FAA4F;AAAA,IAC9G,OAAO;AACL,WAAK,UAAU;AACf,WAAK,aAAa;AAClB,WAAK,OAAO;AACZ,aAAO,KAAK;AAAA,IACd;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,QAAQ,UAAS;AACf,SAAK,GAAG,eAAe,OAAO,QAAQ;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,QAAQ,UAAS;AACf,WAAO,KAAK,GAAG,eAAe,OAAO,YAAU,SAAS,MAAM,CAAC;AAAA,EACjE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAmBA,GAAG,OAAO,UAAS;AACjB,QAAI,MAAM,KAAK;AACf,SAAK,SAAS,KAAK,EAAC,OAAO,KAAK,SAAQ,CAAC;AACzC,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,IAAI,OAAO,KAAI;AACb,SAAK,WAAW,KAAK,SAAS,OAAO,CAAC,SAAS;AAC7C,aAAO,EAAE,KAAK,UAAU,UAAU,OAAO,QAAQ,eAAe,QAAQ,KAAK;AAAA,IAC/E,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,UAAS;AAAE,WAAO,KAAK,OAAO,YAAY,KAAK,KAAK,SAAS;AAAA,EAAE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkB/D,KAAK,OAAO,SAAS,UAAU,KAAK,SAAQ;AAC1C,cAAU,WAAW,CAAC;AACtB,QAAG,CAAC,KAAK,YAAW;AAClB,YAAM,IAAI,MAAM,kBAAkB,KAAK,SAAS,KAAK,KAAK,4DAA4D;AAAA,IACxH;AACA,QAAI,YAAY,IAAI,KAAK,MAAM,OAAO,WAAW;AAAE,aAAO;AAAA,IAAQ,GAAG,OAAO;AAC5E,QAAG,KAAK,QAAQ,GAAE;AAChB,gBAAU,KAAK;AAAA,IACjB,OAAO;AACL,gBAAU,aAAa;AACvB,WAAK,WAAW,KAAK,SAAS;AAAA,IAChC;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,MAAM,UAAU,KAAK,SAAQ;AAC3B,SAAK,YAAY,MAAM;AACvB,SAAK,SAAS,cAAc;AAE5B,SAAK,QAAQ,eAAe;AAC5B,QAAI,UAAU,MAAM;AAClB,UAAG,KAAK,OAAO,UAAU,EAAG,MAAK,OAAO,IAAI,WAAW,SAAS,KAAK,KAAK,EAAE;AAC5E,WAAK,QAAQ,eAAe,OAAO,OAAO;AAAA,IAC5C;AACA,QAAI,YAAY,IAAI,KAAK,MAAM,eAAe,OAAO,QAAQ,CAAC,CAAC,GAAG,OAAO;AACzE,cAAU,QAAQ,MAAM,MAAM,QAAQ,CAAC,EACpC,QAAQ,WAAW,MAAM,QAAQ,CAAC;AACrC,cAAU,KAAK;AACf,QAAG,CAAC,KAAK,QAAQ,GAAE;AAAE,gBAAU,QAAQ,MAAM,CAAC,CAAC;AAAA,IAAE;AAEjD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,UAAU,QAAQ,SAAS,MAAK;AAAE,WAAO;AAAA,EAAQ;AAAA;AAAA;AAAA;AAAA,EAKjD,SAAS,OAAO,OAAO,SAAS,SAAQ;AACtC,QAAG,KAAK,UAAU,OAAM;AAAE,aAAO;AAAA,IAAM;AAEvC,QAAG,WAAW,YAAY,KAAK,QAAQ,GAAE;AACvC,UAAG,KAAK,OAAO,UAAU,EAAG,MAAK,OAAO,IAAI,WAAW,6BAA6B,EAAC,OAAO,OAAO,SAAS,QAAO,CAAC;AACpH,aAAO;AAAA,IACT,OAAO;AACL,aAAO;AAAA,IACT;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,UAAS;AAAE,WAAO,KAAK,SAAS;AAAA,EAAI;AAAA;AAAA;AAAA;AAAA,EAKpC,OAAO,UAAU,KAAK,SAAQ;AAC5B,QAAG,KAAK,UAAU,GAAE;AAAE;AAAA,IAAO;AAC7B,SAAK,OAAO,eAAe,KAAK,KAAK;AACrC,SAAK,QAAQ,eAAe;AAC5B,SAAK,SAAS,OAAO,OAAO;AAAA,EAC9B;AAAA;AAAA;AAAA;AAAA,EAKA,QAAQ,OAAO,SAAS,KAAK,SAAQ;AACnC,QAAI,iBAAiB,KAAK,UAAU,OAAO,SAAS,KAAK,OAAO;AAChE,QAAG,WAAW,CAAC,gBAAe;AAAE,YAAM,IAAI,MAAM,6EAA6E;AAAA,IAAE;AAE/H,QAAI,gBAAgB,KAAK,SAAS,OAAO,UAAQ,KAAK,UAAU,KAAK;AAErE,aAAQ,IAAI,GAAG,IAAI,cAAc,QAAQ,KAAI;AAC3C,UAAI,OAAO,cAAc,CAAC;AAC1B,WAAK,SAAS,gBAAgB,KAAK,WAAW,KAAK,QAAQ,CAAC;AAAA,IAC9D;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,eAAe,KAAI;AAAE,WAAO,cAAc,GAAG;AAAA,EAAG;AAAA;AAAA;AAAA;AAAA,EAKhD,WAAU;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAO;AAAA;AAAA;AAAA;AAAA,EAKxD,YAAW;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAQ;AAAA;AAAA;AAAA;AAAA,EAK1D,WAAU;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAO;AAAA;AAAA;AAAA;AAAA,EAKxD,YAAW;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAQ;AAAA;AAAA;AAAA;AAAA,EAK1D,YAAW;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAQ;AAC5D;;;ACjTA,IAAqB,OAArB,MAA0B;AAAA,EAExB,OAAO,QAAQ,QAAQ,UAAU,SAAS,MAAM,SAAS,WAAW,UAAS;AAC3E,QAAG,OAAO,gBAAe;AACvB,UAAI,MAAM,IAAI,OAAO,eAAe;AACpC,aAAO,KAAK,eAAe,KAAK,QAAQ,UAAU,MAAM,SAAS,WAAW,QAAQ;AAAA,IACtF,WAAU,OAAO,gBAAe;AAC9B,UAAI,MAAM,IAAI,OAAO,eAAe;AACpC,aAAO,KAAK,WAAW,KAAK,QAAQ,UAAU,SAAS,MAAM,SAAS,WAAW,QAAQ;AAAA,IAC3F,WAAU,OAAO,SAAS,OAAO,iBAAgB;AAE/C,aAAO,KAAK,aAAa,QAAQ,UAAU,SAAS,MAAM,SAAS,WAAW,QAAQ;AAAA,IACxF,OAAO;AACL,YAAM,IAAI,MAAM,iDAAiD;AAAA,IACnE;AAAA,EACF;AAAA,EAEA,OAAO,aAAa,QAAQ,UAAU,SAAS,MAAM,SAAS,WAAW,UAAS;AAChF,QAAI,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,QAAI,aAAa;AACjB,QAAG,SAAQ;AACT,mBAAa,IAAI,gBAAgB;AACjC,YAAM,aAAa,WAAW,MAAM,WAAW,MAAM,GAAG,OAAO;AAC/D,cAAQ,SAAS,WAAW;AAAA,IAC9B;AACA,WAAO,MAAM,UAAU,OAAO,EAC3B,KAAK,cAAY,SAAS,KAAK,CAAC,EAChC,KAAK,UAAQ,KAAK,UAAU,IAAI,CAAC,EACjC,KAAK,UAAQ,YAAY,SAAS,IAAI,CAAC,EACvC,MAAM,SAAO;AACZ,UAAG,IAAI,SAAS,gBAAgB,WAAU;AACxC,kBAAU;AAAA,MACZ,OAAO;AACL,oBAAY,SAAS,IAAI;AAAA,MAC3B;AAAA,IACF,CAAC;AACH,WAAO;AAAA,EACT;AAAA,EAEA,OAAO,eAAe,KAAK,QAAQ,UAAU,MAAM,SAAS,WAAW,UAAS;AAC9E,QAAI,UAAU;AACd,QAAI,KAAK,QAAQ,QAAQ;AACzB,QAAI,SAAS,MAAM;AACjB,UAAI,WAAW,KAAK,UAAU,IAAI,YAAY;AAC9C,kBAAY,SAAS,QAAQ;AAAA,IAC/B;AACA,QAAG,WAAU;AAAE,UAAI,YAAY;AAAA,IAAU;AAGzC,QAAI,aAAa,MAAM;AAAA,IAAE;AAEzB,QAAI,KAAK,IAAI;AACb,WAAO;AAAA,EACT;AAAA,EAEA,OAAO,WAAW,KAAK,QAAQ,UAAU,SAAS,MAAM,SAAS,WAAW,UAAS;AACnF,QAAI,KAAK,QAAQ,UAAU,IAAI;AAC/B,QAAI,UAAU;AACd,aAAQ,CAAC,KAAK,KAAK,KAAK,OAAO,QAAQ,OAAO,GAAE;AAC9C,UAAI,iBAAiB,KAAK,KAAK;AAAA,IACjC;AACA,QAAI,UAAU,MAAM,YAAY,SAAS,IAAI;AAC7C,QAAI,qBAAqB,MAAM;AAC7B,UAAG,IAAI,eAAe,WAAW,YAAY,UAAS;AACpD,YAAI,WAAW,KAAK,UAAU,IAAI,YAAY;AAC9C,iBAAS,QAAQ;AAAA,MACnB;AAAA,IACF;AACA,QAAG,WAAU;AAAE,UAAI,YAAY;AAAA,IAAU;AAEzC,QAAI,KAAK,IAAI;AACb,WAAO;AAAA,EACT;AAAA,EAEA,OAAO,UAAU,MAAK;AACpB,QAAG,CAAC,QAAQ,SAAS,IAAG;AAAE,aAAO;AAAA,IAAK;AAEtC,QAAI;AACF,aAAO,KAAK,MAAM,IAAI;AAAA,IACxB,QAAQ;AACN,iBAAW,QAAQ,IAAI,iCAAiC,IAAI;AAC5D,aAAO;AAAA,IACT;AAAA,EACF;AAAA,EAEA,OAAO,UAAU,KAAK,WAAU;AAC9B,QAAI,WAAW,CAAC;AAChB,aAAQ,OAAO,KAAI;AACjB,UAAG,CAAC,OAAO,UAAU,eAAe,KAAK,KAAK,GAAG,GAAE;AAAE;AAAA,MAAS;AAC9D,UAAI,WAAW,YAAY,GAAG,SAAS,IAAI,GAAG,MAAM;AACpD,UAAI,WAAW,IAAI,GAAG;AACtB,UAAG,OAAO,aAAa,UAAS;AAC9B,iBAAS,KAAK,KAAK,UAAU,UAAU,QAAQ,CAAC;AAAA,MAClD,OAAO;AACL,iBAAS,KAAK,mBAAmB,QAAQ,IAAI,MAAM,mBAAmB,QAAQ,CAAC;AAAA,MACjF;AAAA,IACF;AACA,WAAO,SAAS,KAAK,GAAG;AAAA,EAC1B;AAAA,EAEA,OAAO,aAAa,KAAK,QAAO;AAC9B,QAAG,OAAO,KAAK,MAAM,EAAE,WAAW,GAAE;AAAE,aAAO;AAAA,IAAI;AAEjD,QAAI,SAAS,IAAI,MAAM,IAAI,IAAI,MAAM;AACrC,WAAO,GAAG,GAAG,GAAG,MAAM,GAAG,KAAK,UAAU,MAAM,CAAC;AAAA,EACjD;AACF;;;AC1GA,IAAI,sBAAsB,CAAC,WAAW;AACpC,MAAI,SAAS;AACb,MAAI,QAAQ,IAAI,WAAW,MAAM;AACjC,MAAI,MAAM,MAAM;AAChB,WAAQ,IAAI,GAAG,IAAI,KAAK,KAAI;AAAE,cAAU,OAAO,aAAa,MAAM,CAAC,CAAC;AAAA,EAAE;AACtE,SAAO,KAAK,MAAM;AACpB;AAEA,IAAqB,WAArB,MAA8B;AAAA,EAE5B,YAAY,UAAU,WAAU;AAG9B,QAAG,aAAa,UAAU,WAAW,KAAK,UAAU,CAAC,EAAE,WAAW,iBAAiB,GAAE;AACnF,WAAK,YAAY,KAAK,UAAU,CAAC,EAAE,MAAM,kBAAkB,MAAM,CAAC;AAAA,IACpE;AACA,SAAK,WAAW;AAChB,SAAK,QAAQ;AACb,SAAK,gBAAgB;AACrB,SAAK,OAAO,oBAAI,IAAI;AACpB,SAAK,mBAAmB;AACxB,SAAK,eAAe;AACpB,SAAK,oBAAoB;AACzB,SAAK,cAAc,CAAC;AACpB,SAAK,SAAS,WAAW;AAAA,IAAE;AAC3B,SAAK,UAAU,WAAW;AAAA,IAAE;AAC5B,SAAK,YAAY,WAAW;AAAA,IAAE;AAC9B,SAAK,UAAU,WAAW;AAAA,IAAE;AAC5B,SAAK,eAAe,KAAK,kBAAkB,QAAQ;AACnD,SAAK,aAAa,cAAc;AAEhC,eAAW,MAAM,KAAK,KAAK,GAAG,CAAC;AAAA,EACjC;AAAA,EAEA,kBAAkB,UAAS;AACzB,WAAQ,SACL,QAAQ,SAAS,SAAS,EAC1B,QAAQ,UAAU,UAAU,EAC5B,QAAQ,IAAI,OAAO,UAAW,WAAW,SAAS,GAAG,QAAQ,WAAW,QAAQ;AAAA,EACrF;AAAA,EAEA,cAAa;AACX,WAAO,KAAK,aAAa,KAAK,cAAc,EAAC,OAAO,KAAK,MAAK,CAAC;AAAA,EACjE;AAAA,EAEA,cAAc,MAAM,QAAQ,UAAS;AACnC,SAAK,MAAM,MAAM,QAAQ,QAAQ;AACjC,SAAK,aAAa,cAAc;AAAA,EAClC;AAAA,EAEA,YAAW;AACT,SAAK,QAAQ,SAAS;AACtB,SAAK,cAAc,MAAM,WAAW,KAAK;AAAA,EAC3C;AAAA,EAEA,WAAU;AAAE,WAAO,KAAK,eAAe,cAAc,QAAQ,KAAK,eAAe,cAAc;AAAA,EAAW;AAAA,EAE1G,OAAM;AACJ,UAAM,UAAU,EAAC,UAAU,mBAAkB;AAC7C,QAAG,KAAK,WAAU;AAChB,cAAQ,qBAAqB,IAAI,KAAK;AAAA,IACxC;AACA,SAAK,KAAK,OAAO,SAAS,MAAM,MAAM,KAAK,UAAU,GAAG,UAAQ;AAC9D,UAAG,MAAK;AACN,YAAI,EAAC,QAAQ,OAAO,SAAQ,IAAI;AAChC,YAAG,WAAW,OAAO,KAAK,UAAU,MAAK;AAGvC,eAAK,QAAQ,GAAG;AAChB,eAAK,cAAc,MAAM,gBAAgB,KAAK;AAC9C;AAAA,QACF;AACA,aAAK,QAAQ;AAAA,MACf,OAAO;AACL,iBAAS;AAAA,MACX;AAEA,cAAO,QAAO;AAAA,QACZ,KAAK;AACH,mBAAS,QAAQ,SAAO;AAmBtB,uBAAW,MAAM,KAAK,UAAU,EAAC,MAAM,IAAG,CAAC,GAAG,CAAC;AAAA,UACjD,CAAC;AACD,eAAK,KAAK;AACV;AAAA,QACF,KAAK;AACH,eAAK,KAAK;AACV;AAAA,QACF,KAAK;AACH,eAAK,aAAa,cAAc;AAChC,eAAK,OAAO,CAAC,CAAC;AACd,eAAK,KAAK;AACV;AAAA,QACF,KAAK;AACH,eAAK,QAAQ,GAAG;AAChB,eAAK,MAAM,MAAM,aAAa,KAAK;AACnC;AAAA,QACF,KAAK;AAAA,QACL,KAAK;AACH,eAAK,QAAQ,GAAG;AAChB,eAAK,cAAc,MAAM,yBAAyB,GAAG;AACrD;AAAA,QACF;AAAS,gBAAM,IAAI,MAAM,yBAAyB,MAAM,EAAE;AAAA,MAC5D;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAMA,KAAK,MAAK;AACR,QAAG,OAAO,SAAU,UAAS;AAAE,aAAO,oBAAoB,IAAI;AAAA,IAAE;AAChE,QAAG,KAAK,cAAa;AACnB,WAAK,aAAa,KAAK,IAAI;AAAA,IAC7B,WAAU,KAAK,kBAAiB;AAC9B,WAAK,YAAY,KAAK,IAAI;AAAA,IAC5B,OAAO;AACL,WAAK,eAAe,CAAC,IAAI;AACzB,WAAK,oBAAoB,WAAW,MAAM;AACxC,aAAK,UAAU,KAAK,YAAY;AAChC,aAAK,eAAe;AAAA,MACtB,GAAG,CAAC;AAAA,IACN;AAAA,EACF;AAAA,EAEA,UAAU,UAAU,SAAS,GAAE;AAC7B,SAAK,mBAAmB;AACxB,UAAM,OAAO,SAAS;AACtB,UAAM,QAAQ,SAAS,MAAM,QAAQ,IAAI;AACzC,SAAK,KAAK,QAAQ,EAAC,gBAAgB,uBAAsB,GAAG,MAAM,KAAK,IAAI,GAAG,MAAM,KAAK,QAAQ,SAAS,GAAG,UAAQ;AACnH,UAAG,CAAC,QAAQ,KAAK,WAAW,KAAI;AAC9B,aAAK,mBAAmB;AACxB,aAAK,QAAQ,QAAQ,KAAK,MAAM;AAChC,aAAK,cAAc,MAAM,yBAAyB,KAAK;AAAA,MACzD,WAAU,OAAO,SAAS,QAAO;AAC/B,aAAK,UAAU,UAAU,IAAI;AAAA,MAC/B,WAAU,KAAK,YAAY,SAAS,GAAE;AACpC,aAAK,UAAU,KAAK,WAAW;AAC/B,aAAK,cAAc,CAAC;AAAA,MACtB,OAAO;AACL,aAAK,mBAAmB;AAAA,MAC1B;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,MAAM,QAAQ,UAAS;AAC3B,aAAQ,OAAO,KAAK,MAAK;AAAE,UAAI,MAAM;AAAA,IAAE;AACvC,SAAK,aAAa,cAAc;AAChC,QAAI,OAAO,OAAO,OAAO,EAAC,MAAM,KAAM,QAAQ,QAAW,UAAU,KAAI,GAAG,EAAC,MAAM,QAAQ,SAAQ,CAAC;AAClG,SAAK,cAAc,CAAC;AACpB,iBAAa,KAAK,iBAAiB;AACnC,SAAK,oBAAoB;AACzB,QAAG,OAAO,eAAgB,aAAY;AACpC,WAAK,QAAQ,IAAI,WAAW,SAAS,IAAI,CAAC;AAAA,IAC5C,OAAO;AACL,WAAK,QAAQ,IAAI;AAAA,IACnB;AAAA,EACF;AAAA,EAEA,KAAK,QAAQ,SAAS,MAAM,iBAAiB,UAAS;AACpD,QAAI;AACJ,QAAI,YAAY,MAAM;AACpB,WAAK,KAAK,OAAO,GAAG;AACpB,sBAAgB;AAAA,IAClB;AACA,UAAM,KAAK,QAAQ,QAAQ,KAAK,YAAY,GAAG,SAAS,MAAM,KAAK,SAAS,WAAW,UAAQ;AAC7F,WAAK,KAAK,OAAO,GAAG;AACpB,UAAG,KAAK,SAAS,GAAE;AAAE,iBAAS,IAAI;AAAA,MAAE;AAAA,IACtC,CAAC;AACD,SAAK,KAAK,IAAI,GAAG;AAAA,EACnB;AACF;;;AChMA,IAAqB,WAArB,MAAqB,UAAS;AAAA,EAE5B,YAAY,SAAS,OAAO,CAAC,GAAE;AAC7B,QAAI,SAAS,KAAK,UAAU,EAAC,OAAO,kBAAkB,MAAM,gBAAe;AAC3E,SAAK,QAAQ,CAAC;AACd,SAAK,eAAe,CAAC;AACrB,SAAK,UAAU;AACf,SAAK,UAAU;AACf,SAAK,SAAS;AAAA,MACZ,QAAQ,WAAW;AAAA,MAAE;AAAA,MACrB,SAAS,WAAW;AAAA,MAAE;AAAA,MACtB,QAAQ,WAAW;AAAA,MAAE;AAAA,IACvB;AAEA,SAAK,QAAQ,GAAG,OAAO,OAAO,cAAY;AACxC,UAAI,EAAC,QAAQ,SAAS,OAAM,IAAI,KAAK;AAErC,WAAK,UAAU,KAAK,QAAQ,QAAQ;AACpC,WAAK,QAAQ,UAAS,UAAU,KAAK,OAAO,UAAU,QAAQ,OAAO;AAErE,WAAK,aAAa,QAAQ,UAAQ;AAChC,aAAK,QAAQ,UAAS,SAAS,KAAK,OAAO,MAAM,QAAQ,OAAO;AAAA,MAClE,CAAC;AACD,WAAK,eAAe,CAAC;AACrB,aAAO;AAAA,IACT,CAAC;AAED,SAAK,QAAQ,GAAG,OAAO,MAAM,UAAQ;AACnC,UAAI,EAAC,QAAQ,SAAS,OAAM,IAAI,KAAK;AAErC,UAAG,KAAK,mBAAmB,GAAE;AAC3B,aAAK,aAAa,KAAK,IAAI;AAAA,MAC7B,OAAO;AACL,aAAK,QAAQ,UAAS,SAAS,KAAK,OAAO,MAAM,QAAQ,OAAO;AAChE,eAAO;AAAA,MACT;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,OAAO,UAAS;AAAE,SAAK,OAAO,SAAS;AAAA,EAAS;AAAA,EAEhD,QAAQ,UAAS;AAAE,SAAK,OAAO,UAAU;AAAA,EAAS;AAAA,EAElD,OAAO,UAAS;AAAE,SAAK,OAAO,SAAS;AAAA,EAAS;AAAA,EAEhD,KAAK,IAAG;AAAE,WAAO,UAAS,KAAK,KAAK,OAAO,EAAE;AAAA,EAAE;AAAA,EAE/C,qBAAoB;AAClB,WAAO,CAAC,KAAK,WAAY,KAAK,YAAY,KAAK,QAAQ,QAAQ;AAAA,EACjE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,OAAO,UAAU,cAAc,UAAU,QAAQ,SAAQ;AACvD,QAAI,QAAQ,KAAK,MAAM,YAAY;AACnC,QAAI,QAAQ,CAAC;AACb,QAAI,SAAS,CAAC;AAEd,SAAK,IAAI,OAAO,CAAC,KAAK,aAAa;AACjC,UAAG,CAAC,SAAS,GAAG,GAAE;AAChB,eAAO,GAAG,IAAI;AAAA,MAChB;AAAA,IACF,CAAC;AACD,SAAK,IAAI,UAAU,CAAC,KAAK,gBAAgB;AACvC,UAAI,kBAAkB,MAAM,GAAG;AAC/B,UAAG,iBAAgB;AACjB,YAAI,UAAU,YAAY,MAAM,IAAI,OAAK,EAAE,OAAO;AAClD,YAAI,UAAU,gBAAgB,MAAM,IAAI,OAAK,EAAE,OAAO;AACtD,YAAI,cAAc,YAAY,MAAM,OAAO,OAAK,QAAQ,QAAQ,EAAE,OAAO,IAAI,CAAC;AAC9E,YAAI,YAAY,gBAAgB,MAAM,OAAO,OAAK,QAAQ,QAAQ,EAAE,OAAO,IAAI,CAAC;AAChF,YAAG,YAAY,SAAS,GAAE;AACxB,gBAAM,GAAG,IAAI;AACb,gBAAM,GAAG,EAAE,QAAQ;AAAA,QACrB;AACA,YAAG,UAAU,SAAS,GAAE;AACtB,iBAAO,GAAG,IAAI,KAAK,MAAM,eAAe;AACxC,iBAAO,GAAG,EAAE,QAAQ;AAAA,QACtB;AAAA,MACF,OAAO;AACL,cAAM,GAAG,IAAI;AAAA,MACf;AAAA,IACF,CAAC;AACD,WAAO,KAAK,SAAS,OAAO,EAAC,OAAc,OAAc,GAAG,QAAQ,OAAO;AAAA,EAC7E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,OAAO,SAAS,OAAO,MAAM,QAAQ,SAAQ;AAC3C,QAAI,EAAC,OAAO,OAAM,IAAI,KAAK,MAAM,IAAI;AACrC,QAAG,CAAC,QAAO;AAAE,eAAS,WAAW;AAAA,MAAE;AAAA,IAAE;AACrC,QAAG,CAAC,SAAQ;AAAE,gBAAU,WAAW;AAAA,MAAE;AAAA,IAAE;AAEvC,SAAK,IAAI,OAAO,CAAC,KAAK,gBAAgB;AACpC,UAAI,kBAAkB,MAAM,GAAG;AAC/B,YAAM,GAAG,IAAI,KAAK,MAAM,WAAW;AACnC,UAAG,iBAAgB;AACjB,YAAI,aAAa,MAAM,GAAG,EAAE,MAAM,IAAI,OAAK,EAAE,OAAO;AACpD,YAAI,WAAW,gBAAgB,MAAM,OAAO,OAAK,WAAW,QAAQ,EAAE,OAAO,IAAI,CAAC;AAClF,cAAM,GAAG,EAAE,MAAM,QAAQ,GAAG,QAAQ;AAAA,MACtC;AACA,aAAO,KAAK,iBAAiB,WAAW;AAAA,IAC1C,CAAC;AACD,SAAK,IAAI,QAAQ,CAAC,KAAK,iBAAiB;AACtC,UAAI,kBAAkB,MAAM,GAAG;AAC/B,UAAG,CAAC,iBAAgB;AAAE;AAAA,MAAO;AAC7B,UAAI,eAAe,aAAa,MAAM,IAAI,OAAK,EAAE,OAAO;AACxD,sBAAgB,QAAQ,gBAAgB,MAAM,OAAO,OAAK;AACxD,eAAO,aAAa,QAAQ,EAAE,OAAO,IAAI;AAAA,MAC3C,CAAC;AACD,cAAQ,KAAK,iBAAiB,YAAY;AAC1C,UAAG,gBAAgB,MAAM,WAAW,GAAE;AACpC,eAAO,MAAM,GAAG;AAAA,MAClB;AAAA,IACF,CAAC;AACD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,OAAO,KAAK,WAAW,SAAQ;AAC7B,QAAG,CAAC,SAAQ;AAAE,gBAAU,SAAU,KAAK,MAAK;AAAE,eAAO;AAAA,MAAK;AAAA,IAAE;AAE5D,WAAO,KAAK,IAAI,WAAW,CAAC,KAAK,aAAa;AAC5C,aAAO,QAAQ,KAAK,QAAQ;AAAA,IAC9B,CAAC;AAAA,EACH;AAAA;AAAA,EAIA,OAAO,IAAI,KAAK,MAAK;AACnB,WAAO,OAAO,oBAAoB,GAAG,EAAE,IAAI,SAAO,KAAK,KAAK,IAAI,GAAG,CAAC,CAAC;AAAA,EACvE;AAAA,EAEA,OAAO,MAAM,KAAI;AAAE,WAAO,KAAK,MAAM,KAAK,UAAU,GAAG,CAAC;AAAA,EAAE;AAC5D;;;AC5JA,IAAO,qBAAQ;AAAA,EACb,eAAe;AAAA,EACf,aAAa;AAAA,EACb,OAAO,EAAC,MAAM,GAAG,OAAO,GAAG,WAAW,EAAC;AAAA,EAEvC,OAAO,KAAK,UAAS;AACnB,QAAG,IAAI,QAAQ,gBAAgB,aAAY;AACzC,aAAO,SAAS,KAAK,aAAa,GAAG,CAAC;AAAA,IACxC,OAAO;AACL,UAAI,UAAU,CAAC,IAAI,UAAU,IAAI,KAAK,IAAI,OAAO,IAAI,OAAO,IAAI,OAAO;AACvE,aAAO,SAAS,KAAK,UAAU,OAAO,CAAC;AAAA,IACzC;AAAA,EACF;AAAA,EAEA,OAAO,YAAY,UAAS;AAC1B,QAAG,WAAW,gBAAgB,aAAY;AACxC,aAAO,SAAS,KAAK,aAAa,UAAU,CAAC;AAAA,IAC/C,OAAO;AACL,UAAI,CAAC,UAAU,KAAK,OAAO,OAAO,OAAO,IAAI,KAAK,MAAM,UAAU;AAClE,aAAO,SAAS,EAAC,UAAU,KAAK,OAAO,OAAO,QAAO,CAAC;AAAA,IACxD;AAAA,EACF;AAAA;AAAA,EAIA,aAAa,SAAQ;AACnB,QAAI,EAAC,UAAU,KAAK,OAAO,OAAO,QAAO,IAAI;AAC7C,QAAI,UAAU,IAAI,YAAY;AAC9B,QAAI,eAAe,QAAQ,OAAO,QAAQ;AAC1C,QAAI,WAAW,QAAQ,OAAO,GAAG;AACjC,QAAI,aAAa,QAAQ,OAAO,KAAK;AACrC,QAAI,aAAa,QAAQ,OAAO,KAAK;AAErC,SAAK,gBAAgB,aAAa,YAAY,UAAU;AACxD,SAAK,gBAAgB,SAAS,YAAY,KAAK;AAC/C,SAAK,gBAAgB,WAAW,YAAY,OAAO;AACnD,SAAK,gBAAgB,WAAW,YAAY,OAAO;AAEnD,QAAI,aAAa,KAAK,cAAc,aAAa,aAAa,SAAS,aAAa,WAAW,aAAa,WAAW;AACvH,QAAI,SAAS,IAAI,YAAY,KAAK,gBAAgB,UAAU;AAC5D,QAAI,cAAc,IAAI,WAAW,MAAM;AACvC,QAAI,OAAO,IAAI,SAAS,MAAM;AAC9B,QAAI,SAAS;AAEb,SAAK,SAAS,UAAU,KAAK,MAAM,IAAI;AACvC,SAAK,SAAS,UAAU,aAAa,UAAU;AAC/C,SAAK,SAAS,UAAU,SAAS,UAAU;AAC3C,SAAK,SAAS,UAAU,WAAW,UAAU;AAC7C,SAAK,SAAS,UAAU,WAAW,UAAU;AAC7C,gBAAY,IAAI,cAAc,MAAM;AAAG,cAAU,aAAa;AAC9D,gBAAY,IAAI,UAAU,MAAM;AAAG,cAAU,SAAS;AACtD,gBAAY,IAAI,YAAY,MAAM;AAAG,cAAU,WAAW;AAC1D,gBAAY,IAAI,YAAY,MAAM;AAAG,cAAU,WAAW;AAE1D,QAAI,WAAW,IAAI,WAAW,OAAO,aAAa,QAAQ,UAAU;AACpE,aAAS,IAAI,aAAa,CAAC;AAC3B,aAAS,IAAI,IAAI,WAAW,OAAO,GAAG,OAAO,UAAU;AAEvD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,gBAAgB,MAAM,MAAK;AACzB,QAAG,OAAO,KAAI;AACZ,YAAM,IAAI,MAAM,qBAAqB,IAAI,+DAA+D,IAAI,QAAQ;AAAA,IACtH;AAAA,EACF;AAAA,EAEA,aAAa,QAAO;AAClB,QAAI,OAAO,IAAI,SAAS,MAAM;AAC9B,QAAI,OAAO,KAAK,SAAS,CAAC;AAC1B,QAAI,UAAU,IAAI,YAAY;AAC9B,YAAO,MAAK;AAAA,MACV,KAAK,KAAK,MAAM;AAAM,eAAO,KAAK,WAAW,QAAQ,MAAM,OAAO;AAAA,MAClE,KAAK,KAAK,MAAM;AAAO,eAAO,KAAK,YAAY,QAAQ,MAAM,OAAO;AAAA,MACpE,KAAK,KAAK,MAAM;AAAW,eAAO,KAAK,gBAAgB,QAAQ,MAAM,OAAO;AAAA,IAC9E;AAAA,EACF;AAAA,EAEA,WAAW,QAAQ,MAAM,SAAQ;AAC/B,QAAI,cAAc,KAAK,SAAS,CAAC;AACjC,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,SAAS,KAAK,gBAAgB,KAAK,cAAc;AACrD,QAAI,UAAU,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,WAAW,CAAC;AACvE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,OAAO,OAAO,MAAM,QAAQ,OAAO,UAAU;AACjD,WAAO,EAAC,UAAU,SAAS,KAAK,MAAM,OAAc,OAAc,SAAS,KAAI;AAAA,EACjF;AAAA,EAEA,YAAY,QAAQ,MAAM,SAAQ;AAChC,QAAI,cAAc,KAAK,SAAS,CAAC;AACjC,QAAI,UAAU,KAAK,SAAS,CAAC;AAC7B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,SAAS,KAAK,gBAAgB,KAAK;AACvC,QAAI,UAAU,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,WAAW,CAAC;AACvE,aAAS,SAAS;AAClB,QAAI,MAAM,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,OAAO,CAAC;AAC/D,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,OAAO,OAAO,MAAM,QAAQ,OAAO,UAAU;AACjD,QAAI,UAAU,EAAC,QAAQ,OAAO,UAAU,KAAI;AAC5C,WAAO,EAAC,UAAU,SAAS,KAAU,OAAc,OAAO,eAAe,OAAO,QAAgB;AAAA,EAClG;AAAA,EAEA,gBAAgB,QAAQ,MAAM,SAAQ;AACpC,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,SAAS,KAAK,gBAAgB;AAClC,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,OAAO,OAAO,MAAM,QAAQ,OAAO,UAAU;AAEjD,WAAO,EAAC,UAAU,MAAM,KAAK,MAAM,OAAc,OAAc,SAAS,KAAI;AAAA,EAC9E;AACF;;;ACjBA,IAAqB,SAArB,MAA4B;AAAA,EAC1B,YAAY,UAAU,OAAO,CAAC,GAAE;AAC9B,SAAK,uBAAuB,EAAC,MAAM,CAAC,GAAG,OAAO,CAAC,GAAG,OAAO,CAAC,GAAG,SAAS,CAAC,EAAC;AACxE,SAAK,WAAW,CAAC;AACjB,SAAK,aAAa,CAAC;AACnB,SAAK,MAAM;AACX,SAAK,cAAc;AACnB,SAAK,UAAU,KAAK,WAAW;AAC/B,SAAK,YAAY,KAAK,aAAa,OAAO,aAAa;AACvD,SAAK,2BAA2B;AAChC,SAAK,qBAAqB,KAAK;AAC/B,SAAK,gBAAgB;AACrB,SAAK,eAAe,KAAK,kBAAmB,UAAU,OAAO;AAC7D,SAAK,yBAAyB;AAC9B,SAAK,iBAAiB,mBAAW,OAAO,KAAK,kBAAU;AACvD,SAAK,iBAAiB,mBAAW,OAAO,KAAK,kBAAU;AAIvD,SAAK,gBAAgB;AACrB,SAAK,gBAAgB;AACrB,SAAK,aAAa,KAAK,cAAc;AACrC,SAAK,eAAe;AACpB,SAAK,aAAa;AAClB,QAAG,KAAK,cAAc,UAAS;AAC7B,WAAK,SAAS,KAAK,UAAU,KAAK;AAClC,WAAK,SAAS,KAAK,UAAU,KAAK;AAAA,IACpC,OAAO;AACL,WAAK,SAAS,KAAK;AACnB,WAAK,SAAS,KAAK;AAAA,IACrB;AACA,QAAI,+BAA+B;AACnC,QAAG,aAAa,UAAU,kBAAiB;AACzC,gBAAU,iBAAiB,YAAY,QAAM;AAC3C,YAAG,KAAK,MAAK;AACX,eAAK,WAAW;AAChB,yCAA+B,KAAK;AAAA,QACtC;AAAA,MACF,CAAC;AACD,gBAAU,iBAAiB,YAAY,QAAM;AAC3C,YAAG,iCAAiC,KAAK,cAAa;AACpD,yCAA+B;AAC/B,eAAK,QAAQ;AAAA,QACf;AAAA,MACF,CAAC;AACD,gBAAU,iBAAiB,oBAAoB,MAAM;AACnD,YAAG,SAAS,oBAAoB,UAAS;AACvC,eAAK,aAAa;AAAA,QACpB,OAAO;AACL,eAAK,aAAa;AAElB,cAAG,CAAC,KAAK,YAAY,KAAK,CAAC,KAAK,eAAc;AAC5C,iBAAK,SAAS,MAAM,KAAK,QAAQ,CAAC;AAAA,UACpC;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AACA,SAAK,sBAAsB,KAAK,uBAAuB;AACvD,SAAK,gBAAgB,CAAC,UAAU;AAC9B,UAAG,KAAK,eAAc;AACpB,eAAO,KAAK,cAAc,KAAK;AAAA,MACjC,OAAO;AACL,eAAO,CAAC,KAAM,KAAM,GAAI,EAAE,QAAQ,CAAC,KAAK;AAAA,MAC1C;AAAA,IACF;AACA,SAAK,mBAAmB,CAAC,UAAU;AACjC,UAAG,KAAK,kBAAiB;AACvB,eAAO,KAAK,iBAAiB,KAAK;AAAA,MACpC,OAAO;AACL,eAAO,CAAC,IAAI,IAAI,KAAK,KAAK,KAAK,KAAK,KAAK,KAAM,GAAI,EAAE,QAAQ,CAAC,KAAK;AAAA,MACrE;AAAA,IACF;AACA,SAAK,SAAS,KAAK,UAAU;AAC7B,QAAG,CAAC,KAAK,UAAU,KAAK,OAAM;AAC5B,WAAK,SAAS,CAAC,MAAM,KAAK,SAAS;AAAE,gBAAQ,IAAI,GAAG,IAAI,KAAK,GAAG,IAAI,IAAI;AAAA,MAAE;AAAA,IAC5E;AACA,SAAK,oBAAoB,KAAK,qBAAqB;AACnD,SAAK,SAAS,QAAQ,KAAK,UAAU,CAAC,CAAC;AACvC,SAAK,WAAW,GAAG,QAAQ,IAAI,WAAW,SAAS;AACnD,SAAK,MAAM,KAAK,OAAO;AACvB,SAAK,wBAAwB;AAC7B,SAAK,iBAAiB;AACtB,SAAK,sBAAsB;AAC3B,SAAK,iBAAiB,IAAI,MAAM,MAAM;AACpC,UAAG,KAAK,YAAW;AACjB,aAAK,IAAI,qCAAqC;AAC9C,aAAK,SAAS;AACd;AAAA,MACF;AACA,WAAK,SAAS,MAAM,KAAK,QAAQ,CAAC;AAAA,IACpC,GAAG,KAAK,gBAAgB;AACxB,SAAK,YAAY,KAAK;AAAA,EACxB;AAAA;AAAA;AAAA;AAAA,EAKA,uBAAsB;AAAE,WAAO;AAAA,EAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQxC,iBAAiB,cAAa;AAC5B,SAAK;AACL,SAAK,gBAAgB;AACrB,iBAAa,KAAK,aAAa;AAC/B,SAAK,eAAe,MAAM;AAC1B,QAAG,KAAK,MAAK;AACX,WAAK,KAAK,MAAM;AAChB,WAAK,OAAO;AAAA,IACd;AACA,SAAK,YAAY;AAAA,EACnB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,WAAU;AAAE,WAAO,SAAS,SAAS,MAAM,QAAQ,IAAI,QAAQ;AAAA,EAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOpE,cAAa;AACX,QAAI,MAAM,KAAK;AAAA,MACb,KAAK,aAAa,KAAK,UAAU,KAAK,OAAO,CAAC;AAAA,MAAG,EAAC,KAAK,KAAK,IAAG;AAAA,IAAC;AAClE,QAAG,IAAI,OAAO,CAAC,MAAM,KAAI;AAAE,aAAO;AAAA,IAAI;AACtC,QAAG,IAAI,OAAO,CAAC,MAAM,KAAI;AAAE,aAAO,GAAG,KAAK,SAAS,CAAC,IAAI,GAAG;AAAA,IAAG;AAE9D,WAAO,GAAG,KAAK,SAAS,CAAC,MAAM,SAAS,IAAI,GAAG,GAAG;AAAA,EACpD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,WAAW,UAAU,MAAM,QAAO;AAChC,SAAK;AACL,SAAK,gBAAgB;AACrB,SAAK,gBAAgB;AACrB,iBAAa,KAAK,aAAa;AAC/B,SAAK,eAAe,MAAM;AAC1B,SAAK,SAAS,MAAM;AAClB,WAAK,gBAAgB;AACrB,kBAAY,SAAS;AAAA,IACvB,GAAG,MAAM,MAAM;AAAA,EACjB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,QAAO;AACb,QAAG,QAAO;AACR,iBAAW,QAAQ,IAAI,yFAAyF;AAChH,WAAK,SAAS,QAAQ,MAAM;AAAA,IAC9B;AACA,QAAG,KAAK,QAAQ,CAAC,KAAK,eAAc;AAAE;AAAA,IAAO;AAC7C,QAAG,KAAK,sBAAsB,KAAK,cAAc,UAAS;AACxD,WAAK,oBAAoB,UAAU,KAAK,kBAAkB;AAAA,IAC5D,OAAO;AACL,WAAK,iBAAiB;AAAA,IACxB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,IAAI,MAAM,KAAK,MAAK;AAAE,SAAK,UAAU,KAAK,OAAO,MAAM,KAAK,IAAI;AAAA,EAAE;AAAA;AAAA;AAAA;AAAA,EAKlE,YAAW;AAAE,WAAO,KAAK,WAAW;AAAA,EAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASzC,OAAO,UAAS;AACd,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,KAAK,KAAK,CAAC,KAAK,QAAQ,CAAC;AACnD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,QAAQ,UAAS;AACf,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,MAAM,KAAK,CAAC,KAAK,QAAQ,CAAC;AACpD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,UAAS;AACf,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,MAAM,KAAK,CAAC,KAAK,QAAQ,CAAC;AACpD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,UAAU,UAAS;AACjB,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,QAAQ,KAAK,CAAC,KAAK,QAAQ,CAAC;AACtD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,KAAK,UAAS;AACZ,QAAG,CAAC,KAAK,YAAY,GAAE;AAAE,aAAO;AAAA,IAAM;AACtC,QAAI,MAAM,KAAK,QAAQ;AACvB,QAAI,YAAY,KAAK,IAAI;AACzB,SAAK,KAAK,EAAC,OAAO,WAAW,OAAO,aAAa,SAAS,CAAC,GAAG,IAAQ,CAAC;AACvE,QAAI,WAAW,KAAK,UAAU,SAAO;AACnC,UAAG,IAAI,QAAQ,KAAI;AACjB,aAAK,IAAI,CAAC,QAAQ,CAAC;AACnB,iBAAS,KAAK,IAAI,IAAI,SAAS;AAAA,MACjC;AAAA,IACF,CAAC;AACD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,cAAc,WAAU;AAOtB,YAAO,WAAU;AAAA,MACf,KAAK;AAAU,eAAO;AAAA,MACtB;AAAS,eAAO,UAAU;AAAA,IAC5B;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,mBAAkB;AAChB,SAAK;AACL,SAAK,gBAAgB;AACrB,QAAI,YAAY;AAGhB,QAAG,KAAK,WAAU;AAChB,kBAAY,CAAC,WAAW,GAAG,iBAAiB,GAAG,KAAK,KAAK,SAAS,EAAE,QAAQ,MAAM,EAAE,CAAC,EAAE;AAAA,IACzF;AACA,SAAK,OAAO,IAAI,KAAK,UAAU,KAAK,YAAY,GAAG,SAAS;AAC5D,SAAK,KAAK,aAAa,KAAK;AAC5B,SAAK,KAAK,UAAU,KAAK;AACzB,SAAK,KAAK,SAAS,MAAM,KAAK,WAAW;AACzC,SAAK,KAAK,UAAU,WAAS,KAAK,YAAY,KAAK;AACnD,SAAK,KAAK,YAAY,WAAS,KAAK,cAAc,KAAK;AACvD,SAAK,KAAK,UAAU,WAAS,KAAK,YAAY,KAAK;AAAA,EACrD;AAAA,EAEA,WAAW,KAAI;AAAE,WAAO,KAAK,gBAAgB,KAAK,aAAa,QAAQ,GAAG;AAAA,EAAE;AAAA,EAE5E,aAAa,KAAK,KAAI;AAAE,SAAK,gBAAgB,KAAK,aAAa,QAAQ,KAAK,GAAG;AAAA,EAAE;AAAA,EAEjF,oBAAoB,mBAAmB,oBAAoB,MAAK;AAC9D,iBAAa,KAAK,aAAa;AAC/B,QAAI,cAAc;AAClB,QAAI,mBAAmB;AACvB,QAAI,SAAS;AACb,QAAI,wBAAwB,KAAK,cAAc,iBAAiB;AAChE,QAAI,WAAW,CAAC,WAAW;AACzB,WAAK,IAAI,aAAa,mBAAmB,qBAAqB,OAAO,MAAM;AAC3E,WAAK,IAAI,CAAC,SAAS,QAAQ,CAAC;AAC5B,yBAAmB;AACnB,WAAK,iBAAiB,iBAAiB;AACvC,WAAK,iBAAiB;AAAA,IACxB;AACA,QAAG,KAAK,WAAW,gBAAgB,qBAAqB,EAAE,GAAE;AAAE,aAAO,SAAS,WAAW;AAAA,IAAE;AAE3F,SAAK,gBAAgB,WAAW,UAAU,iBAAiB;AAE3D,eAAW,KAAK,QAAQ,YAAU;AAChC,WAAK,IAAI,aAAa,SAAS,MAAM;AACrC,UAAG,oBAAoB,CAAC,aAAY;AAClC,qBAAa,KAAK,aAAa;AAC/B,iBAAS,MAAM;AAAA,MACjB;AAAA,IACF,CAAC;AACD,QAAG,KAAK,aAAY;AAClB,WAAK,IAAI,CAAC,KAAK,WAAW,CAAC;AAAA,IAC7B;AACA,SAAK,cAAc,KAAK,OAAO,MAAM;AACnC,oBAAc;AACd,UAAG,CAAC,kBAAiB;AACnB,YAAIC,yBAAwB,KAAK,cAAc,iBAAiB;AAEhE,YAAG,CAAC,KAAK,0BAAyB;AAAE,eAAK,aAAa,gBAAgBA,sBAAqB,IAAI,MAAM;AAAA,QAAE;AACvG,eAAO,KAAK,IAAI,aAAa,eAAeA,sBAAqB,WAAW;AAAA,MAC9E;AAEA,mBAAa,KAAK,aAAa;AAC/B,WAAK,gBAAgB,WAAW,UAAU,iBAAiB;AAC3D,WAAK,KAAK,SAAO;AACf,aAAK,IAAI,aAAa,8BAA8B,GAAG;AACvD,aAAK,2BAA2B;AAChC,qBAAa,KAAK,aAAa;AAAA,MACjC,CAAC;AAAA,IACH,CAAC;AACD,SAAK,iBAAiB;AAAA,EACxB;AAAA,EAEA,kBAAiB;AACf,iBAAa,KAAK,cAAc;AAChC,iBAAa,KAAK,qBAAqB;AAAA,EACzC;AAAA,EAEA,aAAY;AACV,QAAG,KAAK,UAAU,EAAG,MAAK,IAAI,aAAa,GAAG,KAAK,cAAc,KAAK,SAAS,CAAC,iBAAiB,KAAK,YAAY,CAAC,EAAE;AACrH,SAAK,gBAAgB;AACrB,SAAK,gBAAgB;AACrB,SAAK;AACL,SAAK,gBAAgB;AACrB,SAAK,eAAe,MAAM;AAC1B,SAAK,eAAe;AACpB,SAAK,qBAAqB,KAAK,QAAQ,CAAC,CAAC,EAAE,QAAQ,MAAM,SAAS,CAAC;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA,EAMA,mBAAkB;AAChB,QAAG,KAAK,qBAAoB;AAC1B,WAAK,sBAAsB;AAC3B,UAAG,KAAK,UAAU,GAAE;AAAE,aAAK,IAAI,aAAa,0DAA0D;AAAA,MAAE;AACxG,WAAK,iBAAiB;AACtB,WAAK,gBAAgB;AACrB,WAAK,SAAS,MAAM,KAAK,eAAe,gBAAgB,GAAG,iBAAiB,mBAAmB;AAAA,IACjG;AAAA,EACF;AAAA,EAEA,iBAAgB;AACd,QAAG,KAAK,QAAQ,KAAK,KAAK,eAAc;AAAE;AAAA,IAAO;AACjD,SAAK,sBAAsB;AAC3B,SAAK,gBAAgB;AACrB,SAAK,iBAAiB,WAAW,MAAM,KAAK,cAAc,GAAG,KAAK,mBAAmB;AAAA,EACvF;AAAA,EAEA,SAAS,UAAU,MAAM,QAAO;AAC9B,QAAG,CAAC,KAAK,MAAK;AACZ,aAAO,YAAY,SAAS;AAAA,IAC9B;AAIA,UAAM,cAAc,KAAK;AAEzB,SAAK,kBAAkB,aAAa,MAAM;AACxC,UAAG,MAAK;AAAE,oBAAY,MAAM,MAAM,UAAU,EAAE;AAAA,MAAE,OAAO;AAAE,oBAAY,MAAM;AAAA,MAAE;AAE7E,WAAK,oBAAoB,aAAa,MAAM;AAC1C,YAAG,KAAK,SAAS,aAAY;AAC3B,eAAK,KAAK,SAAS,WAAW;AAAA,UAAE;AAChC,eAAK,KAAK,UAAU,WAAW;AAAA,UAAE;AACjC,eAAK,KAAK,YAAY,WAAW;AAAA,UAAE;AACnC,eAAK,KAAK,UAAU,WAAW;AAAA,UAAE;AACjC,eAAK,OAAO;AAAA,QACd;AAEA,oBAAY,SAAS;AAAA,MACvB,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAAA,EAEA,kBAAkB,MAAM,UAAU,QAAQ,GAAE;AAC1C,QAAG,UAAU,KAAK,CAAC,KAAK,gBAAe;AACrC,eAAS;AACT;AAAA,IACF;AAEA,eAAW,MAAM;AACf,WAAK,kBAAkB,MAAM,UAAU,QAAQ,CAAC;AAAA,IAClD,GAAG,MAAM,KAAK;AAAA,EAChB;AAAA,EAEA,oBAAoB,MAAM,UAAU,QAAQ,GAAE;AAC5C,QAAG,UAAU,KAAK,KAAK,eAAe,cAAc,QAAO;AACzD,eAAS;AACT;AAAA,IACF;AAEA,eAAW,MAAM;AACf,WAAK,oBAAoB,MAAM,UAAU,QAAQ,CAAC;AAAA,IACpD,GAAG,MAAM,KAAK;AAAA,EAChB;AAAA,EAEA,YAAY,OAAM;AAChB,QAAG,KAAK,KAAM,MAAK,KAAK,UAAU,MAAM;AAAA,IAAC;AACzC,QAAI,YAAY,SAAS,MAAM;AAC/B,QAAG,KAAK,UAAU,EAAG,MAAK,IAAI,aAAa,SAAS,KAAK;AACzD,SAAK,iBAAiB;AACtB,SAAK,gBAAgB;AACrB,QAAG,CAAC,KAAK,iBAAiB,cAAc,KAAK;AAC3C,WAAK,eAAe,gBAAgB;AAAA,IACtC;AACA,SAAK,qBAAqB,MAAM,QAAQ,CAAC,CAAC,EAAE,QAAQ,MAAM,SAAS,KAAK,CAAC;AAAA,EAC3E;AAAA;AAAA;AAAA;AAAA,EAKA,YAAY,OAAM;AAChB,QAAG,KAAK,UAAU,EAAG,MAAK,IAAI,aAAa,SAAS,KAAK;AACzD,QAAI,kBAAkB,KAAK;AAC3B,QAAI,oBAAoB,KAAK;AAC7B,SAAK,qBAAqB,MAAM,QAAQ,CAAC,CAAC,EAAE,QAAQ,MAAM;AACxD,eAAS,OAAO,iBAAiB,iBAAiB;AAAA,IACpD,CAAC;AACD,QAAG,oBAAoB,KAAK,aAAa,oBAAoB,GAAE;AAC7D,WAAK,iBAAiB;AAAA,IACxB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,mBAAkB;AAChB,SAAK,SAAS,QAAQ,aAAW;AAC/B,UAAG,EAAE,QAAQ,UAAU,KAAK,QAAQ,UAAU,KAAK,QAAQ,SAAS,IAAG;AACrE,gBAAQ,QAAQ,eAAe,KAAK;AAAA,MACtC;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,kBAAiB;AACf,YAAO,KAAK,QAAQ,KAAK,KAAK,YAAW;AAAA,MACvC,KAAK,cAAc;AAAY,eAAO;AAAA,MACtC,KAAK,cAAc;AAAM,eAAO;AAAA,MAChC,KAAK,cAAc;AAAS,eAAO;AAAA,MACnC;AAAS,eAAO;AAAA,IAClB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,cAAa;AAAE,WAAO,KAAK,gBAAgB,MAAM;AAAA,EAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOxD,OAAO,SAAQ;AACb,SAAK,IAAI,QAAQ,eAAe;AAChC,SAAK,WAAW,KAAK,SAAS,OAAO,OAAK,MAAM,OAAO;AAAA,EACzD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,IAAI,MAAK;AACP,aAAQ,OAAO,KAAK,sBAAqB;AACvC,WAAK,qBAAqB,GAAG,IAAI,KAAK,qBAAqB,GAAG,EAAE,OAAO,CAAC,CAAC,GAAG,MAAM;AAChF,eAAO,KAAK,QAAQ,GAAG,MAAM;AAAA,MAC/B,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,OAAO,aAAa,CAAC,GAAE;AAC7B,QAAI,OAAO,IAAI,QAAQ,OAAO,YAAY,IAAI;AAC9C,SAAK,SAAS,KAAK,IAAI;AACvB,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,KAAK,MAAK;AACR,QAAG,KAAK,UAAU,GAAE;AAClB,UAAI,EAAC,OAAO,OAAO,SAAS,KAAK,SAAQ,IAAI;AAC7C,WAAK,IAAI,QAAQ,GAAG,KAAK,IAAI,KAAK,KAAK,QAAQ,KAAK,GAAG,KAAK,OAAO;AAAA,IACrE;AAEA,QAAG,KAAK,YAAY,GAAE;AACpB,WAAK,OAAO,MAAM,YAAU,KAAK,KAAK,KAAK,MAAM,CAAC;AAAA,IACpD,OAAO;AACL,WAAK,WAAW,KAAK,MAAM,KAAK,OAAO,MAAM,YAAU,KAAK,KAAK,KAAK,MAAM,CAAC,CAAC;AAAA,IAChF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,UAAS;AACP,QAAI,SAAS,KAAK,MAAM;AACxB,QAAG,WAAW,KAAK,KAAI;AAAE,WAAK,MAAM;AAAA,IAAE,OAAO;AAAE,WAAK,MAAM;AAAA,IAAO;AAEjE,WAAO,KAAK,IAAI,SAAS;AAAA,EAC3B;AAAA,EAEA,gBAAe;AACb,QAAG,KAAK,uBAAuB,CAAC,KAAK,YAAY,GAAE;AAAE;AAAA,IAAO;AAC5D,SAAK,sBAAsB,KAAK,QAAQ;AACxC,SAAK,KAAK,EAAC,OAAO,WAAW,OAAO,aAAa,SAAS,CAAC,GAAG,KAAK,KAAK,oBAAmB,CAAC;AAC5F,SAAK,wBAAwB,WAAW,MAAM,KAAK,iBAAiB,GAAG,KAAK,mBAAmB;AAAA,EACjG;AAAA,EAEA,kBAAiB;AACf,QAAG,KAAK,YAAY,KAAK,KAAK,WAAW,SAAS,GAAE;AAClD,WAAK,WAAW,QAAQ,cAAY,SAAS,CAAC;AAC9C,WAAK,aAAa,CAAC;AAAA,IACrB;AAAA,EACF;AAAA,EAEA,cAAc,YAAW;AACvB,SAAK,OAAO,WAAW,MAAM,SAAO;AAClC,UAAI,EAAC,OAAO,OAAO,SAAS,KAAK,SAAQ,IAAI;AAC7C,UAAG,OAAO,QAAQ,KAAK,qBAAoB;AACzC,aAAK,gBAAgB;AACrB,aAAK,sBAAsB;AAC3B,aAAK,iBAAiB,WAAW,MAAM,KAAK,cAAc,GAAG,KAAK,mBAAmB;AAAA,MACvF;AAEA,UAAG,KAAK,UAAU,EAAG,MAAK,IAAI,WAAW,GAAG,QAAQ,UAAU,EAAE,IAAI,KAAK,IAAI,KAAK,IAAI,OAAO,MAAM,MAAM,OAAO,EAAE,IAAI,OAAO;AAE7H,eAAQ,IAAI,GAAG,IAAI,KAAK,SAAS,QAAQ,KAAI;AAC3C,cAAM,UAAU,KAAK,SAAS,CAAC;AAC/B,YAAG,CAAC,QAAQ,SAAS,OAAO,OAAO,SAAS,QAAQ,GAAE;AAAE;AAAA,QAAS;AACjE,gBAAQ,QAAQ,OAAO,SAAS,KAAK,QAAQ;AAAA,MAC/C;AAEA,eAAQ,IAAI,GAAG,IAAI,KAAK,qBAAqB,QAAQ,QAAQ,KAAI;AAC/D,YAAI,CAAC,EAAE,QAAQ,IAAI,KAAK,qBAAqB,QAAQ,CAAC;AACtD,iBAAS,GAAG;AAAA,MACd;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,eAAe,OAAM;AACnB,QAAI,aAAa,KAAK,SAAS,KAAK,OAAK,EAAE,UAAU,UAAU,EAAE,SAAS,KAAK,EAAE,UAAU,EAAE;AAC7F,QAAG,YAAW;AACZ,UAAG,KAAK,UAAU,EAAG,MAAK,IAAI,aAAa,4BAA4B,KAAK,GAAG;AAC/E,iBAAW,MAAM;AAAA,IACnB;AAAA,EACF;AACF;", + "names": ["closure", "fallbackTransportName"] +} diff --git a/deps/phoenix/priv/static/phoenix.js b/deps/phoenix/priv/static/phoenix.js new file mode 100644 index 0000000..455b902 --- /dev/null +++ b/deps/phoenix/priv/static/phoenix.js @@ -0,0 +1,1668 @@ +var Phoenix = (() => { + var __defProp = Object.defineProperty; + var __getOwnPropDesc = Object.getOwnPropertyDescriptor; + var __getOwnPropNames = Object.getOwnPropertyNames; + var __hasOwnProp = Object.prototype.hasOwnProperty; + var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); + }; + var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; + }; + var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); + + // js/phoenix/index.js + var phoenix_exports = {}; + __export(phoenix_exports, { + Channel: () => Channel, + LongPoll: () => LongPoll, + Presence: () => Presence, + Serializer: () => serializer_default, + Socket: () => Socket + }); + + // js/phoenix/utils.js + var closure = (value) => { + if (typeof value === "function") { + return value; + } else { + let closure2 = function() { + return value; + }; + return closure2; + } + }; + + // js/phoenix/constants.js + var globalSelf = typeof self !== "undefined" ? self : null; + var phxWindow = typeof window !== "undefined" ? window : null; + var global = globalSelf || phxWindow || globalThis; + var DEFAULT_VSN = "2.0.0"; + var SOCKET_STATES = { connecting: 0, open: 1, closing: 2, closed: 3 }; + var MAX_LONGPOLL_BATCH_SIZE = 100; + var DEFAULT_TIMEOUT = 1e4; + var WS_CLOSE_NORMAL = 1e3; + var CHANNEL_STATES = { + closed: "closed", + errored: "errored", + joined: "joined", + joining: "joining", + leaving: "leaving" + }; + var CHANNEL_EVENTS = { + close: "phx_close", + error: "phx_error", + join: "phx_join", + reply: "phx_reply", + leave: "phx_leave" + }; + var TRANSPORTS = { + longpoll: "longpoll", + websocket: "websocket" + }; + var XHR_STATES = { + complete: 4 + }; + var AUTH_TOKEN_PREFIX = "base64url.bearer.phx."; + + // js/phoenix/push.js + var Push = class { + constructor(channel, event, payload, timeout) { + this.channel = channel; + this.event = event; + this.payload = payload || function() { + return {}; + }; + this.receivedResp = null; + this.timeout = timeout; + this.timeoutTimer = null; + this.recHooks = []; + this.sent = false; + } + /** + * + * @param {number} timeout + */ + resend(timeout) { + this.timeout = timeout; + this.reset(); + this.send(); + } + /** + * + */ + send() { + if (this.hasReceived("timeout")) { + return; + } + this.startTimeout(); + this.sent = true; + this.channel.socket.push({ + topic: this.channel.topic, + event: this.event, + payload: this.payload(), + ref: this.ref, + join_ref: this.channel.joinRef() + }); + } + /** + * + * @param {*} status + * @param {*} callback + */ + receive(status, callback) { + if (this.hasReceived(status)) { + callback(this.receivedResp.response); + } + this.recHooks.push({ status, callback }); + return this; + } + /** + * @private + */ + reset() { + this.cancelRefEvent(); + this.ref = null; + this.refEvent = null; + this.receivedResp = null; + this.sent = false; + } + /** + * @private + */ + matchReceive({ status, response, _ref }) { + this.recHooks.filter((h) => h.status === status).forEach((h) => h.callback(response)); + } + /** + * @private + */ + cancelRefEvent() { + if (!this.refEvent) { + return; + } + this.channel.off(this.refEvent); + } + /** + * @private + */ + cancelTimeout() { + clearTimeout(this.timeoutTimer); + this.timeoutTimer = null; + } + /** + * @private + */ + startTimeout() { + if (this.timeoutTimer) { + this.cancelTimeout(); + } + this.ref = this.channel.socket.makeRef(); + this.refEvent = this.channel.replyEventName(this.ref); + this.channel.on(this.refEvent, (payload) => { + this.cancelRefEvent(); + this.cancelTimeout(); + this.receivedResp = payload; + this.matchReceive(payload); + }); + this.timeoutTimer = setTimeout(() => { + this.trigger("timeout", {}); + }, this.timeout); + } + /** + * @private + */ + hasReceived(status) { + return this.receivedResp && this.receivedResp.status === status; + } + /** + * @private + */ + trigger(status, response) { + this.channel.trigger(this.refEvent, { status, response }); + } + }; + + // js/phoenix/timer.js + var Timer = class { + constructor(callback, timerCalc) { + this.callback = callback; + this.timerCalc = timerCalc; + this.timer = null; + this.tries = 0; + } + reset() { + this.tries = 0; + clearTimeout(this.timer); + } + /** + * Cancels any previous scheduleTimeout and schedules callback + */ + scheduleTimeout() { + clearTimeout(this.timer); + this.timer = setTimeout(() => { + this.tries = this.tries + 1; + this.callback(); + }, this.timerCalc(this.tries + 1)); + } + }; + + // js/phoenix/channel.js + var Channel = class { + constructor(topic, params, socket) { + this.state = CHANNEL_STATES.closed; + this.topic = topic; + this.params = closure(params || {}); + this.socket = socket; + this.bindings = []; + this.bindingRef = 0; + this.timeout = this.socket.timeout; + this.joinedOnce = false; + this.joinPush = new Push(this, CHANNEL_EVENTS.join, this.params, this.timeout); + this.pushBuffer = []; + this.stateChangeRefs = []; + this.rejoinTimer = new Timer(() => { + if (this.socket.isConnected()) { + this.rejoin(); + } + }, this.socket.rejoinAfterMs); + this.stateChangeRefs.push(this.socket.onError(() => this.rejoinTimer.reset())); + this.stateChangeRefs.push( + this.socket.onOpen(() => { + this.rejoinTimer.reset(); + if (this.isErrored()) { + this.rejoin(); + } + }) + ); + this.joinPush.receive("ok", () => { + this.state = CHANNEL_STATES.joined; + this.rejoinTimer.reset(); + this.pushBuffer.forEach((pushEvent) => pushEvent.send()); + this.pushBuffer = []; + }); + this.joinPush.receive("error", () => { + this.state = CHANNEL_STATES.errored; + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.onClose(() => { + this.rejoinTimer.reset(); + if (this.socket.hasLogger()) this.socket.log("channel", `close ${this.topic} ${this.joinRef()}`); + this.state = CHANNEL_STATES.closed; + this.socket.remove(this); + }); + this.onError((reason) => { + if (this.socket.hasLogger()) this.socket.log("channel", `error ${this.topic}`, reason); + if (this.isJoining()) { + this.joinPush.reset(); + } + this.state = CHANNEL_STATES.errored; + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.joinPush.receive("timeout", () => { + if (this.socket.hasLogger()) this.socket.log("channel", `timeout ${this.topic} (${this.joinRef()})`, this.joinPush.timeout); + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), this.timeout); + leavePush.send(); + this.state = CHANNEL_STATES.errored; + this.joinPush.reset(); + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.on(CHANNEL_EVENTS.reply, (payload, ref) => { + this.trigger(this.replyEventName(ref), payload); + }); + } + /** + * Join the channel + * @param {integer} timeout + * @returns {Push} + */ + join(timeout = this.timeout) { + if (this.joinedOnce) { + throw new Error("tried to join multiple times. 'join' can only be called a single time per channel instance"); + } else { + this.timeout = timeout; + this.joinedOnce = true; + this.rejoin(); + return this.joinPush; + } + } + /** + * Hook into channel close + * @param {Function} callback + */ + onClose(callback) { + this.on(CHANNEL_EVENTS.close, callback); + } + /** + * Hook into channel errors + * @param {Function} callback + */ + onError(callback) { + return this.on(CHANNEL_EVENTS.error, (reason) => callback(reason)); + } + /** + * Subscribes on channel events + * + * Subscription returns a ref counter, which can be used later to + * unsubscribe the exact event listener + * + * @example + * const ref1 = channel.on("event", do_stuff) + * const ref2 = channel.on("event", do_other_stuff) + * channel.off("event", ref1) + * // Since unsubscription, do_stuff won't fire, + * // while do_other_stuff will keep firing on the "event" + * + * @param {string} event + * @param {Function} callback + * @returns {integer} ref + */ + on(event, callback) { + let ref = this.bindingRef++; + this.bindings.push({ event, ref, callback }); + return ref; + } + /** + * Unsubscribes off of channel events + * + * Use the ref returned from a channel.on() to unsubscribe one + * handler, or pass nothing for the ref to unsubscribe all + * handlers for the given event. + * + * @example + * // Unsubscribe the do_stuff handler + * const ref1 = channel.on("event", do_stuff) + * channel.off("event", ref1) + * + * // Unsubscribe all handlers from event + * channel.off("event") + * + * @param {string} event + * @param {integer} ref + */ + off(event, ref) { + this.bindings = this.bindings.filter((bind) => { + return !(bind.event === event && (typeof ref === "undefined" || ref === bind.ref)); + }); + } + /** + * @private + */ + canPush() { + return this.socket.isConnected() && this.isJoined(); + } + /** + * Sends a message `event` to phoenix with the payload `payload`. + * Phoenix receives this in the `handle_in(event, payload, socket)` + * function. if phoenix replies or it times out (default 10000ms), + * then optionally the reply can be received. + * + * @example + * channel.push("event") + * .receive("ok", payload => console.log("phoenix replied:", payload)) + * .receive("error", err => console.log("phoenix errored", err)) + * .receive("timeout", () => console.log("timed out pushing")) + * @param {string} event + * @param {Object} payload + * @param {number} [timeout] + * @returns {Push} + */ + push(event, payload, timeout = this.timeout) { + payload = payload || {}; + if (!this.joinedOnce) { + throw new Error(`tried to push '${event}' to '${this.topic}' before joining. Use channel.join() before pushing events`); + } + let pushEvent = new Push(this, event, function() { + return payload; + }, timeout); + if (this.canPush()) { + pushEvent.send(); + } else { + pushEvent.startTimeout(); + this.pushBuffer.push(pushEvent); + } + return pushEvent; + } + /** Leaves the channel + * + * Unsubscribes from server events, and + * instructs channel to terminate on server + * + * Triggers onClose() hooks + * + * To receive leave acknowledgements, use the `receive` + * hook to bind to the server ack, ie: + * + * @example + * channel.leave().receive("ok", () => alert("left!") ) + * + * @param {integer} timeout + * @returns {Push} + */ + leave(timeout = this.timeout) { + this.rejoinTimer.reset(); + this.joinPush.cancelTimeout(); + this.state = CHANNEL_STATES.leaving; + let onClose = () => { + if (this.socket.hasLogger()) this.socket.log("channel", `leave ${this.topic}`); + this.trigger(CHANNEL_EVENTS.close, "leave"); + }; + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), timeout); + leavePush.receive("ok", () => onClose()).receive("timeout", () => onClose()); + leavePush.send(); + if (!this.canPush()) { + leavePush.trigger("ok", {}); + } + return leavePush; + } + /** + * Overridable message hook + * + * Receives all events for specialized message handling + * before dispatching to the channel callbacks. + * + * Must return the payload, modified or unmodified + * @param {string} event + * @param {Object} payload + * @param {integer} ref + * @returns {Object} + */ + onMessage(_event, payload, _ref) { + return payload; + } + /** + * @private + */ + isMember(topic, event, payload, joinRef) { + if (this.topic !== topic) { + return false; + } + if (joinRef && joinRef !== this.joinRef()) { + if (this.socket.hasLogger()) this.socket.log("channel", "dropping outdated message", { topic, event, payload, joinRef }); + return false; + } else { + return true; + } + } + /** + * @private + */ + joinRef() { + return this.joinPush.ref; + } + /** + * @private + */ + rejoin(timeout = this.timeout) { + if (this.isLeaving()) { + return; + } + this.socket.leaveOpenTopic(this.topic); + this.state = CHANNEL_STATES.joining; + this.joinPush.resend(timeout); + } + /** + * @private + */ + trigger(event, payload, ref, joinRef) { + let handledPayload = this.onMessage(event, payload, ref, joinRef); + if (payload && !handledPayload) { + throw new Error("channel onMessage callbacks must return the payload, modified or unmodified"); + } + let eventBindings = this.bindings.filter((bind) => bind.event === event); + for (let i = 0; i < eventBindings.length; i++) { + let bind = eventBindings[i]; + bind.callback(handledPayload, ref, joinRef || this.joinRef()); + } + } + /** + * @private + */ + replyEventName(ref) { + return `chan_reply_${ref}`; + } + /** + * @private + */ + isClosed() { + return this.state === CHANNEL_STATES.closed; + } + /** + * @private + */ + isErrored() { + return this.state === CHANNEL_STATES.errored; + } + /** + * @private + */ + isJoined() { + return this.state === CHANNEL_STATES.joined; + } + /** + * @private + */ + isJoining() { + return this.state === CHANNEL_STATES.joining; + } + /** + * @private + */ + isLeaving() { + return this.state === CHANNEL_STATES.leaving; + } + }; + + // js/phoenix/ajax.js + var Ajax = class { + static request(method, endPoint, headers, body, timeout, ontimeout, callback) { + if (global.XDomainRequest) { + let req = new global.XDomainRequest(); + return this.xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback); + } else if (global.XMLHttpRequest) { + let req = new global.XMLHttpRequest(); + return this.xhrRequest(req, method, endPoint, headers, body, timeout, ontimeout, callback); + } else if (global.fetch && global.AbortController) { + return this.fetchRequest(method, endPoint, headers, body, timeout, ontimeout, callback); + } else { + throw new Error("No suitable XMLHttpRequest implementation found"); + } + } + static fetchRequest(method, endPoint, headers, body, timeout, ontimeout, callback) { + let options = { + method, + headers, + body + }; + let controller = null; + if (timeout) { + controller = new AbortController(); + const _timeoutId = setTimeout(() => controller.abort(), timeout); + options.signal = controller.signal; + } + global.fetch(endPoint, options).then((response) => response.text()).then((data) => this.parseJSON(data)).then((data) => callback && callback(data)).catch((err) => { + if (err.name === "AbortError" && ontimeout) { + ontimeout(); + } else { + callback && callback(null); + } + }); + return controller; + } + static xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback) { + req.timeout = timeout; + req.open(method, endPoint); + req.onload = () => { + let response = this.parseJSON(req.responseText); + callback && callback(response); + }; + if (ontimeout) { + req.ontimeout = ontimeout; + } + req.onprogress = () => { + }; + req.send(body); + return req; + } + static xhrRequest(req, method, endPoint, headers, body, timeout, ontimeout, callback) { + req.open(method, endPoint, true); + req.timeout = timeout; + for (let [key, value] of Object.entries(headers)) { + req.setRequestHeader(key, value); + } + req.onerror = () => callback && callback(null); + req.onreadystatechange = () => { + if (req.readyState === XHR_STATES.complete && callback) { + let response = this.parseJSON(req.responseText); + callback(response); + } + }; + if (ontimeout) { + req.ontimeout = ontimeout; + } + req.send(body); + return req; + } + static parseJSON(resp) { + if (!resp || resp === "") { + return null; + } + try { + return JSON.parse(resp); + } catch (e) { + console && console.log("failed to parse JSON response", resp); + return null; + } + } + static serialize(obj, parentKey) { + let queryStr = []; + for (var key in obj) { + if (!Object.prototype.hasOwnProperty.call(obj, key)) { + continue; + } + let paramKey = parentKey ? `${parentKey}[${key}]` : key; + let paramVal = obj[key]; + if (typeof paramVal === "object") { + queryStr.push(this.serialize(paramVal, paramKey)); + } else { + queryStr.push(encodeURIComponent(paramKey) + "=" + encodeURIComponent(paramVal)); + } + } + return queryStr.join("&"); + } + static appendParams(url, params) { + if (Object.keys(params).length === 0) { + return url; + } + let prefix = url.match(/\?/) ? "&" : "?"; + return `${url}${prefix}${this.serialize(params)}`; + } + }; + + // js/phoenix/longpoll.js + var arrayBufferToBase64 = (buffer) => { + let binary = ""; + let bytes = new Uint8Array(buffer); + let len = bytes.byteLength; + for (let i = 0; i < len; i++) { + binary += String.fromCharCode(bytes[i]); + } + return btoa(binary); + }; + var LongPoll = class { + constructor(endPoint, protocols) { + if (protocols && protocols.length === 2 && protocols[1].startsWith(AUTH_TOKEN_PREFIX)) { + this.authToken = atob(protocols[1].slice(AUTH_TOKEN_PREFIX.length)); + } + this.endPoint = null; + this.token = null; + this.skipHeartbeat = true; + this.reqs = /* @__PURE__ */ new Set(); + this.awaitingBatchAck = false; + this.currentBatch = null; + this.currentBatchTimer = null; + this.batchBuffer = []; + this.onopen = function() { + }; + this.onerror = function() { + }; + this.onmessage = function() { + }; + this.onclose = function() { + }; + this.pollEndpoint = this.normalizeEndpoint(endPoint); + this.readyState = SOCKET_STATES.connecting; + setTimeout(() => this.poll(), 0); + } + normalizeEndpoint(endPoint) { + return endPoint.replace("ws://", "http://").replace("wss://", "https://").replace(new RegExp("(.*)/" + TRANSPORTS.websocket), "$1/" + TRANSPORTS.longpoll); + } + endpointURL() { + return Ajax.appendParams(this.pollEndpoint, { token: this.token }); + } + closeAndRetry(code, reason, wasClean) { + this.close(code, reason, wasClean); + this.readyState = SOCKET_STATES.connecting; + } + ontimeout() { + this.onerror("timeout"); + this.closeAndRetry(1005, "timeout", false); + } + isActive() { + return this.readyState === SOCKET_STATES.open || this.readyState === SOCKET_STATES.connecting; + } + poll() { + const headers = { "Accept": "application/json" }; + if (this.authToken) { + headers["X-Phoenix-AuthToken"] = this.authToken; + } + this.ajax("GET", headers, null, () => this.ontimeout(), (resp) => { + if (resp) { + var { status, token, messages } = resp; + if (status === 410 && this.token !== null) { + this.onerror(410); + this.closeAndRetry(3410, "session_gone", false); + return; + } + this.token = token; + } else { + status = 0; + } + switch (status) { + case 200: + messages.forEach((msg) => { + setTimeout(() => this.onmessage({ data: msg }), 0); + }); + this.poll(); + break; + case 204: + this.poll(); + break; + case 410: + this.readyState = SOCKET_STATES.open; + this.onopen({}); + this.poll(); + break; + case 403: + this.onerror(403); + this.close(1008, "forbidden", false); + break; + case 0: + case 500: + this.onerror(500); + this.closeAndRetry(1011, "internal server error", 500); + break; + default: + throw new Error(`unhandled poll status ${status}`); + } + }); + } + // we collect all pushes within the current event loop by + // setTimeout 0, which optimizes back-to-back procedural + // pushes against an empty buffer + send(body) { + if (typeof body !== "string") { + body = arrayBufferToBase64(body); + } + if (this.currentBatch) { + this.currentBatch.push(body); + } else if (this.awaitingBatchAck) { + this.batchBuffer.push(body); + } else { + this.currentBatch = [body]; + this.currentBatchTimer = setTimeout(() => { + this.batchSend(this.currentBatch); + this.currentBatch = null; + }, 0); + } + } + batchSend(messages, offset = 0) { + this.awaitingBatchAck = true; + const next = offset + MAX_LONGPOLL_BATCH_SIZE; + const batch = messages.slice(offset, next); + this.ajax("POST", { "Content-Type": "application/x-ndjson" }, batch.join("\n"), () => this.onerror("timeout"), (resp) => { + if (!resp || resp.status !== 200) { + this.awaitingBatchAck = false; + this.onerror(resp && resp.status); + this.closeAndRetry(1011, "internal server error", false); + } else if (next < messages.length) { + this.batchSend(messages, next); + } else if (this.batchBuffer.length > 0) { + this.batchSend(this.batchBuffer); + this.batchBuffer = []; + } else { + this.awaitingBatchAck = false; + } + }); + } + close(code, reason, wasClean) { + for (let req of this.reqs) { + req.abort(); + } + this.readyState = SOCKET_STATES.closed; + let opts = Object.assign({ code: 1e3, reason: void 0, wasClean: true }, { code, reason, wasClean }); + this.batchBuffer = []; + clearTimeout(this.currentBatchTimer); + this.currentBatchTimer = null; + if (typeof CloseEvent !== "undefined") { + this.onclose(new CloseEvent("close", opts)); + } else { + this.onclose(opts); + } + } + ajax(method, headers, body, onCallerTimeout, callback) { + let req; + let ontimeout = () => { + this.reqs.delete(req); + onCallerTimeout(); + }; + req = Ajax.request(method, this.endpointURL(), headers, body, this.timeout, ontimeout, (resp) => { + this.reqs.delete(req); + if (this.isActive()) { + callback(resp); + } + }); + this.reqs.add(req); + } + }; + + // js/phoenix/presence.js + var Presence = class _Presence { + constructor(channel, opts = {}) { + let events = opts.events || { state: "presence_state", diff: "presence_diff" }; + this.state = {}; + this.pendingDiffs = []; + this.channel = channel; + this.joinRef = null; + this.caller = { + onJoin: function() { + }, + onLeave: function() { + }, + onSync: function() { + } + }; + this.channel.on(events.state, (newState) => { + let { onJoin, onLeave, onSync } = this.caller; + this.joinRef = this.channel.joinRef(); + this.state = _Presence.syncState(this.state, newState, onJoin, onLeave); + this.pendingDiffs.forEach((diff) => { + this.state = _Presence.syncDiff(this.state, diff, onJoin, onLeave); + }); + this.pendingDiffs = []; + onSync(); + }); + this.channel.on(events.diff, (diff) => { + let { onJoin, onLeave, onSync } = this.caller; + if (this.inPendingSyncState()) { + this.pendingDiffs.push(diff); + } else { + this.state = _Presence.syncDiff(this.state, diff, onJoin, onLeave); + onSync(); + } + }); + } + onJoin(callback) { + this.caller.onJoin = callback; + } + onLeave(callback) { + this.caller.onLeave = callback; + } + onSync(callback) { + this.caller.onSync = callback; + } + list(by) { + return _Presence.list(this.state, by); + } + inPendingSyncState() { + return !this.joinRef || this.joinRef !== this.channel.joinRef(); + } + // lower-level public static API + /** + * Used to sync the list of presences on the server + * with the client's state. An optional `onJoin` and `onLeave` callback can + * be provided to react to changes in the client's local presences across + * disconnects and reconnects with the server. + * + * @returns {Presence} + */ + static syncState(currentState, newState, onJoin, onLeave) { + let state = this.clone(currentState); + let joins = {}; + let leaves = {}; + this.map(state, (key, presence) => { + if (!newState[key]) { + leaves[key] = presence; + } + }); + this.map(newState, (key, newPresence) => { + let currentPresence = state[key]; + if (currentPresence) { + let newRefs = newPresence.metas.map((m) => m.phx_ref); + let curRefs = currentPresence.metas.map((m) => m.phx_ref); + let joinedMetas = newPresence.metas.filter((m) => curRefs.indexOf(m.phx_ref) < 0); + let leftMetas = currentPresence.metas.filter((m) => newRefs.indexOf(m.phx_ref) < 0); + if (joinedMetas.length > 0) { + joins[key] = newPresence; + joins[key].metas = joinedMetas; + } + if (leftMetas.length > 0) { + leaves[key] = this.clone(currentPresence); + leaves[key].metas = leftMetas; + } + } else { + joins[key] = newPresence; + } + }); + return this.syncDiff(state, { joins, leaves }, onJoin, onLeave); + } + /** + * + * Used to sync a diff of presence join and leave + * events from the server, as they happen. Like `syncState`, `syncDiff` + * accepts optional `onJoin` and `onLeave` callbacks to react to a user + * joining or leaving from a device. + * + * @returns {Presence} + */ + static syncDiff(state, diff, onJoin, onLeave) { + let { joins, leaves } = this.clone(diff); + if (!onJoin) { + onJoin = function() { + }; + } + if (!onLeave) { + onLeave = function() { + }; + } + this.map(joins, (key, newPresence) => { + let currentPresence = state[key]; + state[key] = this.clone(newPresence); + if (currentPresence) { + let joinedRefs = state[key].metas.map((m) => m.phx_ref); + let curMetas = currentPresence.metas.filter((m) => joinedRefs.indexOf(m.phx_ref) < 0); + state[key].metas.unshift(...curMetas); + } + onJoin(key, currentPresence, newPresence); + }); + this.map(leaves, (key, leftPresence) => { + let currentPresence = state[key]; + if (!currentPresence) { + return; + } + let refsToRemove = leftPresence.metas.map((m) => m.phx_ref); + currentPresence.metas = currentPresence.metas.filter((p) => { + return refsToRemove.indexOf(p.phx_ref) < 0; + }); + onLeave(key, currentPresence, leftPresence); + if (currentPresence.metas.length === 0) { + delete state[key]; + } + }); + return state; + } + /** + * Returns the array of presences, with selected metadata. + * + * @param {Object} presences + * @param {Function} chooser + * + * @returns {Presence} + */ + static list(presences, chooser) { + if (!chooser) { + chooser = function(key, pres) { + return pres; + }; + } + return this.map(presences, (key, presence) => { + return chooser(key, presence); + }); + } + // private + static map(obj, func) { + return Object.getOwnPropertyNames(obj).map((key) => func(key, obj[key])); + } + static clone(obj) { + return JSON.parse(JSON.stringify(obj)); + } + }; + + // js/phoenix/serializer.js + var serializer_default = { + HEADER_LENGTH: 1, + META_LENGTH: 4, + KINDS: { push: 0, reply: 1, broadcast: 2 }, + encode(msg, callback) { + if (msg.payload.constructor === ArrayBuffer) { + return callback(this.binaryEncode(msg)); + } else { + let payload = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload]; + return callback(JSON.stringify(payload)); + } + }, + decode(rawPayload, callback) { + if (rawPayload.constructor === ArrayBuffer) { + return callback(this.binaryDecode(rawPayload)); + } else { + let [join_ref, ref, topic, event, payload] = JSON.parse(rawPayload); + return callback({ join_ref, ref, topic, event, payload }); + } + }, + // private + binaryEncode(message) { + let { join_ref, ref, event, topic, payload } = message; + let encoder = new TextEncoder(); + let joinRefBytes = encoder.encode(join_ref); + let refBytes = encoder.encode(ref); + let topicBytes = encoder.encode(topic); + let eventBytes = encoder.encode(event); + this.assertFieldSize(joinRefBytes.byteLength, "join_ref"); + this.assertFieldSize(refBytes.byteLength, "ref"); + this.assertFieldSize(topicBytes.byteLength, "topic"); + this.assertFieldSize(eventBytes.byteLength, "event"); + let metaLength = this.META_LENGTH + joinRefBytes.byteLength + refBytes.byteLength + topicBytes.byteLength + eventBytes.byteLength; + let header = new ArrayBuffer(this.HEADER_LENGTH + metaLength); + let headerBytes = new Uint8Array(header); + let view = new DataView(header); + let offset = 0; + view.setUint8(offset++, this.KINDS.push); + view.setUint8(offset++, joinRefBytes.byteLength); + view.setUint8(offset++, refBytes.byteLength); + view.setUint8(offset++, topicBytes.byteLength); + view.setUint8(offset++, eventBytes.byteLength); + headerBytes.set(joinRefBytes, offset); + offset += joinRefBytes.byteLength; + headerBytes.set(refBytes, offset); + offset += refBytes.byteLength; + headerBytes.set(topicBytes, offset); + offset += topicBytes.byteLength; + headerBytes.set(eventBytes, offset); + offset += eventBytes.byteLength; + var combined = new Uint8Array(header.byteLength + payload.byteLength); + combined.set(headerBytes, 0); + combined.set(new Uint8Array(payload), header.byteLength); + return combined.buffer; + }, + assertFieldSize(size, name) { + if (size > 255) { + throw new Error(`unable to convert ${name} to binary: must be less than or equal to 255 bytes, but is ${size} bytes`); + } + }, + binaryDecode(buffer) { + let view = new DataView(buffer); + let kind = view.getUint8(0); + let decoder = new TextDecoder(); + switch (kind) { + case this.KINDS.push: + return this.decodePush(buffer, view, decoder); + case this.KINDS.reply: + return this.decodeReply(buffer, view, decoder); + case this.KINDS.broadcast: + return this.decodeBroadcast(buffer, view, decoder); + } + }, + decodePush(buffer, view, decoder) { + let joinRefSize = view.getUint8(1); + let topicSize = view.getUint8(2); + let eventSize = view.getUint8(3); + let offset = this.HEADER_LENGTH + this.META_LENGTH - 1; + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)); + offset = offset + joinRefSize; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + return { join_ref: joinRef, ref: null, topic, event, payload: data }; + }, + decodeReply(buffer, view, decoder) { + let joinRefSize = view.getUint8(1); + let refSize = view.getUint8(2); + let topicSize = view.getUint8(3); + let eventSize = view.getUint8(4); + let offset = this.HEADER_LENGTH + this.META_LENGTH; + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)); + offset = offset + joinRefSize; + let ref = decoder.decode(buffer.slice(offset, offset + refSize)); + offset = offset + refSize; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + let payload = { status: event, response: data }; + return { join_ref: joinRef, ref, topic, event: CHANNEL_EVENTS.reply, payload }; + }, + decodeBroadcast(buffer, view, decoder) { + let topicSize = view.getUint8(1); + let eventSize = view.getUint8(2); + let offset = this.HEADER_LENGTH + 2; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + return { join_ref: null, ref: null, topic, event, payload: data }; + } + }; + + // js/phoenix/socket.js + var Socket = class { + constructor(endPoint, opts = {}) { + this.stateChangeCallbacks = { open: [], close: [], error: [], message: [] }; + this.channels = []; + this.sendBuffer = []; + this.ref = 0; + this.fallbackRef = null; + this.timeout = opts.timeout || DEFAULT_TIMEOUT; + this.transport = opts.transport || global.WebSocket || LongPoll; + this.primaryPassedHealthCheck = false; + this.longPollFallbackMs = opts.longPollFallbackMs; + this.fallbackTimer = null; + this.sessionStore = opts.sessionStorage || global && global.sessionStorage; + this.establishedConnections = 0; + this.defaultEncoder = serializer_default.encode.bind(serializer_default); + this.defaultDecoder = serializer_default.decode.bind(serializer_default); + this.closeWasClean = true; + this.disconnecting = false; + this.binaryType = opts.binaryType || "arraybuffer"; + this.connectClock = 1; + this.pageHidden = false; + if (this.transport !== LongPoll) { + this.encode = opts.encode || this.defaultEncoder; + this.decode = opts.decode || this.defaultDecoder; + } else { + this.encode = this.defaultEncoder; + this.decode = this.defaultDecoder; + } + let awaitingConnectionOnPageShow = null; + if (phxWindow && phxWindow.addEventListener) { + phxWindow.addEventListener("pagehide", (_e) => { + if (this.conn) { + this.disconnect(); + awaitingConnectionOnPageShow = this.connectClock; + } + }); + phxWindow.addEventListener("pageshow", (_e) => { + if (awaitingConnectionOnPageShow === this.connectClock) { + awaitingConnectionOnPageShow = null; + this.connect(); + } + }); + phxWindow.addEventListener("visibilitychange", () => { + if (document.visibilityState === "hidden") { + this.pageHidden = true; + } else { + this.pageHidden = false; + if (!this.isConnected() && !this.closeWasClean) { + this.teardown(() => this.connect()); + } + } + }); + } + this.heartbeatIntervalMs = opts.heartbeatIntervalMs || 3e4; + this.rejoinAfterMs = (tries) => { + if (opts.rejoinAfterMs) { + return opts.rejoinAfterMs(tries); + } else { + return [1e3, 2e3, 5e3][tries - 1] || 1e4; + } + }; + this.reconnectAfterMs = (tries) => { + if (opts.reconnectAfterMs) { + return opts.reconnectAfterMs(tries); + } else { + return [10, 50, 100, 150, 200, 250, 500, 1e3, 2e3][tries - 1] || 5e3; + } + }; + this.logger = opts.logger || null; + if (!this.logger && opts.debug) { + this.logger = (kind, msg, data) => { + console.log(`${kind}: ${msg}`, data); + }; + } + this.longpollerTimeout = opts.longpollerTimeout || 2e4; + this.params = closure(opts.params || {}); + this.endPoint = `${endPoint}/${TRANSPORTS.websocket}`; + this.vsn = opts.vsn || DEFAULT_VSN; + this.heartbeatTimeoutTimer = null; + this.heartbeatTimer = null; + this.pendingHeartbeatRef = null; + this.reconnectTimer = new Timer(() => { + if (this.pageHidden) { + this.log("Not reconnecting as page is hidden!"); + this.teardown(); + return; + } + this.teardown(() => this.connect()); + }, this.reconnectAfterMs); + this.authToken = opts.authToken; + } + /** + * Returns the LongPoll transport reference + */ + getLongPollTransport() { + return LongPoll; + } + /** + * Disconnects and replaces the active transport + * + * @param {Function} newTransport - The new transport class to instantiate + * + */ + replaceTransport(newTransport) { + this.connectClock++; + this.closeWasClean = true; + clearTimeout(this.fallbackTimer); + this.reconnectTimer.reset(); + if (this.conn) { + this.conn.close(); + this.conn = null; + } + this.transport = newTransport; + } + /** + * Returns the socket protocol + * + * @returns {string} + */ + protocol() { + return location.protocol.match(/^https/) ? "wss" : "ws"; + } + /** + * The fully qualified socket url + * + * @returns {string} + */ + endPointURL() { + let uri = Ajax.appendParams( + Ajax.appendParams(this.endPoint, this.params()), + { vsn: this.vsn } + ); + if (uri.charAt(0) !== "/") { + return uri; + } + if (uri.charAt(1) === "/") { + return `${this.protocol()}:${uri}`; + } + return `${this.protocol()}://${location.host}${uri}`; + } + /** + * Disconnects the socket + * + * See https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent#Status_codes for valid status codes. + * + * @param {Function} callback - Optional callback which is called after socket is disconnected. + * @param {integer} code - A status code for disconnection (Optional). + * @param {string} reason - A textual description of the reason to disconnect. (Optional) + */ + disconnect(callback, code, reason) { + this.connectClock++; + this.disconnecting = true; + this.closeWasClean = true; + clearTimeout(this.fallbackTimer); + this.reconnectTimer.reset(); + this.teardown(() => { + this.disconnecting = false; + callback && callback(); + }, code, reason); + } + /** + * + * @param {Object} params - The params to send when connecting, for example `{user_id: userToken}` + * + * Passing params to connect is deprecated; pass them in the Socket constructor instead: + * `new Socket("/socket", {params: {user_id: userToken}})`. + */ + connect(params) { + if (params) { + console && console.log("passing params to connect is deprecated. Instead pass :params to the Socket constructor"); + this.params = closure(params); + } + if (this.conn && !this.disconnecting) { + return; + } + if (this.longPollFallbackMs && this.transport !== LongPoll) { + this.connectWithFallback(LongPoll, this.longPollFallbackMs); + } else { + this.transportConnect(); + } + } + /** + * Logs the message. Override `this.logger` for specialized logging. noops by default + * @param {string} kind + * @param {string} msg + * @param {Object} data + */ + log(kind, msg, data) { + this.logger && this.logger(kind, msg, data); + } + /** + * Returns true if a logger has been set on this socket. + */ + hasLogger() { + return this.logger !== null; + } + /** + * Registers callbacks for connection open events + * + * @example socket.onOpen(function(){ console.info("the socket was opened") }) + * + * @param {Function} callback + */ + onOpen(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.open.push([ref, callback]); + return ref; + } + /** + * Registers callbacks for connection close events + * @param {Function} callback + */ + onClose(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.close.push([ref, callback]); + return ref; + } + /** + * Registers callbacks for connection error events + * + * @example socket.onError(function(error){ alert("An error occurred") }) + * + * @param {Function} callback + */ + onError(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.error.push([ref, callback]); + return ref; + } + /** + * Registers callbacks for connection message events + * @param {Function} callback + */ + onMessage(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.message.push([ref, callback]); + return ref; + } + /** + * Pings the server and invokes the callback with the RTT in milliseconds + * @param {Function} callback + * + * Returns true if the ping was pushed or false if unable to be pushed. + */ + ping(callback) { + if (!this.isConnected()) { + return false; + } + let ref = this.makeRef(); + let startTime = Date.now(); + this.push({ topic: "phoenix", event: "heartbeat", payload: {}, ref }); + let onMsgRef = this.onMessage((msg) => { + if (msg.ref === ref) { + this.off([onMsgRef]); + callback(Date.now() - startTime); + } + }); + return true; + } + /** + * @private + * + * @param {Function} + */ + transportName(transport) { + switch (transport) { + case LongPoll: + return "LongPoll"; + default: + return transport.name; + } + } + /** + * @private + */ + transportConnect() { + this.connectClock++; + this.closeWasClean = false; + let protocols = void 0; + if (this.authToken) { + protocols = ["phoenix", `${AUTH_TOKEN_PREFIX}${btoa(this.authToken).replace(/=/g, "")}`]; + } + this.conn = new this.transport(this.endPointURL(), protocols); + this.conn.binaryType = this.binaryType; + this.conn.timeout = this.longpollerTimeout; + this.conn.onopen = () => this.onConnOpen(); + this.conn.onerror = (error) => this.onConnError(error); + this.conn.onmessage = (event) => this.onConnMessage(event); + this.conn.onclose = (event) => this.onConnClose(event); + } + getSession(key) { + return this.sessionStore && this.sessionStore.getItem(key); + } + storeSession(key, val) { + this.sessionStore && this.sessionStore.setItem(key, val); + } + connectWithFallback(fallbackTransport, fallbackThreshold = 2500) { + clearTimeout(this.fallbackTimer); + let established = false; + let primaryTransport = true; + let openRef, errorRef; + let fallbackTransportName = this.transportName(fallbackTransport); + let fallback = (reason) => { + this.log("transport", `falling back to ${fallbackTransportName}...`, reason); + this.off([openRef, errorRef]); + primaryTransport = false; + this.replaceTransport(fallbackTransport); + this.transportConnect(); + }; + if (this.getSession(`phx:fallback:${fallbackTransportName}`)) { + return fallback("memorized"); + } + this.fallbackTimer = setTimeout(fallback, fallbackThreshold); + errorRef = this.onError((reason) => { + this.log("transport", "error", reason); + if (primaryTransport && !established) { + clearTimeout(this.fallbackTimer); + fallback(reason); + } + }); + if (this.fallbackRef) { + this.off([this.fallbackRef]); + } + this.fallbackRef = this.onOpen(() => { + established = true; + if (!primaryTransport) { + let fallbackTransportName2 = this.transportName(fallbackTransport); + if (!this.primaryPassedHealthCheck) { + this.storeSession(`phx:fallback:${fallbackTransportName2}`, "true"); + } + return this.log("transport", `established ${fallbackTransportName2} fallback`); + } + clearTimeout(this.fallbackTimer); + this.fallbackTimer = setTimeout(fallback, fallbackThreshold); + this.ping((rtt) => { + this.log("transport", "connected to primary after", rtt); + this.primaryPassedHealthCheck = true; + clearTimeout(this.fallbackTimer); + }); + }); + this.transportConnect(); + } + clearHeartbeats() { + clearTimeout(this.heartbeatTimer); + clearTimeout(this.heartbeatTimeoutTimer); + } + onConnOpen() { + if (this.hasLogger()) this.log("transport", `${this.transportName(this.transport)} connected to ${this.endPointURL()}`); + this.closeWasClean = false; + this.disconnecting = false; + this.establishedConnections++; + this.flushSendBuffer(); + this.reconnectTimer.reset(); + this.resetHeartbeat(); + this.stateChangeCallbacks.open.forEach(([, callback]) => callback()); + } + /** + * @private + */ + heartbeatTimeout() { + if (this.pendingHeartbeatRef) { + this.pendingHeartbeatRef = null; + if (this.hasLogger()) { + this.log("transport", "heartbeat timeout. Attempting to re-establish connection"); + } + this.triggerChanError(); + this.closeWasClean = false; + this.teardown(() => this.reconnectTimer.scheduleTimeout(), WS_CLOSE_NORMAL, "heartbeat timeout"); + } + } + resetHeartbeat() { + if (this.conn && this.conn.skipHeartbeat) { + return; + } + this.pendingHeartbeatRef = null; + this.clearHeartbeats(); + this.heartbeatTimer = setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs); + } + teardown(callback, code, reason) { + if (!this.conn) { + return callback && callback(); + } + const connToClose = this.conn; + this.waitForBufferDone(connToClose, () => { + if (code) { + connToClose.close(code, reason || ""); + } else { + connToClose.close(); + } + this.waitForSocketClosed(connToClose, () => { + if (this.conn === connToClose) { + this.conn.onopen = function() { + }; + this.conn.onerror = function() { + }; + this.conn.onmessage = function() { + }; + this.conn.onclose = function() { + }; + this.conn = null; + } + callback && callback(); + }); + }); + } + waitForBufferDone(conn, callback, tries = 1) { + if (tries === 5 || !conn.bufferedAmount) { + callback(); + return; + } + setTimeout(() => { + this.waitForBufferDone(conn, callback, tries + 1); + }, 150 * tries); + } + waitForSocketClosed(conn, callback, tries = 1) { + if (tries === 5 || conn.readyState === SOCKET_STATES.closed) { + callback(); + return; + } + setTimeout(() => { + this.waitForSocketClosed(conn, callback, tries + 1); + }, 150 * tries); + } + onConnClose(event) { + if (this.conn) this.conn.onclose = () => { + }; + let closeCode = event && event.code; + if (this.hasLogger()) this.log("transport", "close", event); + this.triggerChanError(); + this.clearHeartbeats(); + if (!this.closeWasClean && closeCode !== 1e3) { + this.reconnectTimer.scheduleTimeout(); + } + this.stateChangeCallbacks.close.forEach(([, callback]) => callback(event)); + } + /** + * @private + */ + onConnError(error) { + if (this.hasLogger()) this.log("transport", "error", error); + let transportBefore = this.transport; + let establishedBefore = this.establishedConnections; + this.stateChangeCallbacks.error.forEach(([, callback]) => { + callback(error, transportBefore, establishedBefore); + }); + if (transportBefore === this.transport || establishedBefore > 0) { + this.triggerChanError(); + } + } + /** + * @private + */ + triggerChanError() { + this.channels.forEach((channel) => { + if (!(channel.isErrored() || channel.isLeaving() || channel.isClosed())) { + channel.trigger(CHANNEL_EVENTS.error); + } + }); + } + /** + * @returns {string} + */ + connectionState() { + switch (this.conn && this.conn.readyState) { + case SOCKET_STATES.connecting: + return "connecting"; + case SOCKET_STATES.open: + return "open"; + case SOCKET_STATES.closing: + return "closing"; + default: + return "closed"; + } + } + /** + * @returns {boolean} + */ + isConnected() { + return this.connectionState() === "open"; + } + /** + * @private + * + * @param {Channel} + */ + remove(channel) { + this.off(channel.stateChangeRefs); + this.channels = this.channels.filter((c) => c !== channel); + } + /** + * Removes `onOpen`, `onClose`, `onError,` and `onMessage` registrations. + * + * @param {refs} - list of refs returned by calls to + * `onOpen`, `onClose`, `onError,` and `onMessage` + */ + off(refs) { + for (let key in this.stateChangeCallbacks) { + this.stateChangeCallbacks[key] = this.stateChangeCallbacks[key].filter(([ref]) => { + return refs.indexOf(ref) === -1; + }); + } + } + /** + * Initiates a new channel for the given topic + * + * @param {string} topic + * @param {Object} chanParams - Parameters for the channel + * @returns {Channel} + */ + channel(topic, chanParams = {}) { + let chan = new Channel(topic, chanParams, this); + this.channels.push(chan); + return chan; + } + /** + * @param {Object} data + */ + push(data) { + if (this.hasLogger()) { + let { topic, event, payload, ref, join_ref } = data; + this.log("push", `${topic} ${event} (${join_ref}, ${ref})`, payload); + } + if (this.isConnected()) { + this.encode(data, (result) => this.conn.send(result)); + } else { + this.sendBuffer.push(() => this.encode(data, (result) => this.conn.send(result))); + } + } + /** + * Return the next message ref, accounting for overflows + * @returns {string} + */ + makeRef() { + let newRef = this.ref + 1; + if (newRef === this.ref) { + this.ref = 0; + } else { + this.ref = newRef; + } + return this.ref.toString(); + } + sendHeartbeat() { + if (this.pendingHeartbeatRef && !this.isConnected()) { + return; + } + this.pendingHeartbeatRef = this.makeRef(); + this.push({ topic: "phoenix", event: "heartbeat", payload: {}, ref: this.pendingHeartbeatRef }); + this.heartbeatTimeoutTimer = setTimeout(() => this.heartbeatTimeout(), this.heartbeatIntervalMs); + } + flushSendBuffer() { + if (this.isConnected() && this.sendBuffer.length > 0) { + this.sendBuffer.forEach((callback) => callback()); + this.sendBuffer = []; + } + } + onConnMessage(rawMessage) { + this.decode(rawMessage.data, (msg) => { + let { topic, event, payload, ref, join_ref } = msg; + if (ref && ref === this.pendingHeartbeatRef) { + this.clearHeartbeats(); + this.pendingHeartbeatRef = null; + this.heartbeatTimer = setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs); + } + if (this.hasLogger()) this.log("receive", `${payload.status || ""} ${topic} ${event} ${ref && "(" + ref + ")" || ""}`, payload); + for (let i = 0; i < this.channels.length; i++) { + const channel = this.channels[i]; + if (!channel.isMember(topic, event, payload, join_ref)) { + continue; + } + channel.trigger(event, payload, ref, join_ref); + } + for (let i = 0; i < this.stateChangeCallbacks.message.length; i++) { + let [, callback] = this.stateChangeCallbacks.message[i]; + callback(msg); + } + }); + } + leaveOpenTopic(topic) { + let dupChannel = this.channels.find((c) => c.topic === topic && (c.isJoined() || c.isJoining())); + if (dupChannel) { + if (this.hasLogger()) this.log("transport", `leaving duplicate topic "${topic}"`); + dupChannel.leave(); + } + } + }; + return __toCommonJS(phoenix_exports); +})(); diff --git a/deps/phoenix/priv/static/phoenix.min.js b/deps/phoenix/priv/static/phoenix.min.js new file mode 100644 index 0000000..bb4d483 --- /dev/null +++ b/deps/phoenix/priv/static/phoenix.min.js @@ -0,0 +1,2 @@ +var Phoenix=(()=>{var H=Object.defineProperty;var D=Object.getOwnPropertyDescriptor;var U=Object.getOwnPropertyNames;var F=Object.prototype.hasOwnProperty;var I=(h,e)=>{for(var t in e)H(h,t,{get:e[t],enumerable:!0})},z=(h,e,t,i)=>{if(e&&typeof e=="object"||typeof e=="function")for(let s of U(e))!F.call(h,s)&&s!==t&&H(h,s,{get:()=>e[s],enumerable:!(i=D(e,s))||i.enumerable});return h};var J=h=>z(H({},"__esModule",{value:!0}),h);var G={};I(G,{Channel:()=>j,LongPoll:()=>T,Presence:()=>_,Serializer:()=>L,Socket:()=>x});var R=h=>typeof h=="function"?h:function(){return h};var W=typeof self!="undefined"?self:null,v=typeof window!="undefined"?window:null,p=W||v||globalThis,O="2.0.0",g={connecting:0,open:1,closing:2,closed:3},B=100,P=1e4,$=1e3,u={closed:"closed",errored:"errored",joined:"joined",joining:"joining",leaving:"leaving"},m={close:"phx_close",error:"phx_error",join:"phx_join",reply:"phx_reply",leave:"phx_leave"},A={longpoll:"longpoll",websocket:"websocket"},M={complete:4},w="base64url.bearer.phx.";var y=class{constructor(e,t,i,s){this.channel=e,this.event=t,this.payload=i||function(){return{}},this.receivedResp=null,this.timeout=s,this.timeoutTimer=null,this.recHooks=[],this.sent=!1}resend(e){this.timeout=e,this.reset(),this.send()}send(){this.hasReceived("timeout")||(this.startTimeout(),this.sent=!0,this.channel.socket.push({topic:this.channel.topic,event:this.event,payload:this.payload(),ref:this.ref,join_ref:this.channel.joinRef()}))}receive(e,t){return this.hasReceived(e)&&t(this.receivedResp.response),this.recHooks.push({status:e,callback:t}),this}reset(){this.cancelRefEvent(),this.ref=null,this.refEvent=null,this.receivedResp=null,this.sent=!1}matchReceive({status:e,response:t,_ref:i}){this.recHooks.filter(s=>s.status===e).forEach(s=>s.callback(t))}cancelRefEvent(){this.refEvent&&this.channel.off(this.refEvent)}cancelTimeout(){clearTimeout(this.timeoutTimer),this.timeoutTimer=null}startTimeout(){this.timeoutTimer&&this.cancelTimeout(),this.ref=this.channel.socket.makeRef(),this.refEvent=this.channel.replyEventName(this.ref),this.channel.on(this.refEvent,e=>{this.cancelRefEvent(),this.cancelTimeout(),this.receivedResp=e,this.matchReceive(e)}),this.timeoutTimer=setTimeout(()=>{this.trigger("timeout",{})},this.timeout)}hasReceived(e){return this.receivedResp&&this.receivedResp.status===e}trigger(e,t){this.channel.trigger(this.refEvent,{status:e,response:t})}};var k=class{constructor(e,t){this.callback=e,this.timerCalc=t,this.timer=null,this.tries=0}reset(){this.tries=0,clearTimeout(this.timer)}scheduleTimeout(){clearTimeout(this.timer),this.timer=setTimeout(()=>{this.tries=this.tries+1,this.callback()},this.timerCalc(this.tries+1))}};var j=class{constructor(e,t,i){this.state=u.closed,this.topic=e,this.params=R(t||{}),this.socket=i,this.bindings=[],this.bindingRef=0,this.timeout=this.socket.timeout,this.joinedOnce=!1,this.joinPush=new y(this,m.join,this.params,this.timeout),this.pushBuffer=[],this.stateChangeRefs=[],this.rejoinTimer=new k(()=>{this.socket.isConnected()&&this.rejoin()},this.socket.rejoinAfterMs),this.stateChangeRefs.push(this.socket.onError(()=>this.rejoinTimer.reset())),this.stateChangeRefs.push(this.socket.onOpen(()=>{this.rejoinTimer.reset(),this.isErrored()&&this.rejoin()})),this.joinPush.receive("ok",()=>{this.state=u.joined,this.rejoinTimer.reset(),this.pushBuffer.forEach(s=>s.send()),this.pushBuffer=[]}),this.joinPush.receive("error",()=>{this.state=u.errored,this.socket.isConnected()&&this.rejoinTimer.scheduleTimeout()}),this.onClose(()=>{this.rejoinTimer.reset(),this.socket.hasLogger()&&this.socket.log("channel",`close ${this.topic} ${this.joinRef()}`),this.state=u.closed,this.socket.remove(this)}),this.onError(s=>{this.socket.hasLogger()&&this.socket.log("channel",`error ${this.topic}`,s),this.isJoining()&&this.joinPush.reset(),this.state=u.errored,this.socket.isConnected()&&this.rejoinTimer.scheduleTimeout()}),this.joinPush.receive("timeout",()=>{this.socket.hasLogger()&&this.socket.log("channel",`timeout ${this.topic} (${this.joinRef()})`,this.joinPush.timeout),new y(this,m.leave,R({}),this.timeout).send(),this.state=u.errored,this.joinPush.reset(),this.socket.isConnected()&&this.rejoinTimer.scheduleTimeout()}),this.on(m.reply,(s,o)=>{this.trigger(this.replyEventName(o),s)})}join(e=this.timeout){if(this.joinedOnce)throw new Error("tried to join multiple times. 'join' can only be called a single time per channel instance");return this.timeout=e,this.joinedOnce=!0,this.rejoin(),this.joinPush}onClose(e){this.on(m.close,e)}onError(e){return this.on(m.error,t=>e(t))}on(e,t){let i=this.bindingRef++;return this.bindings.push({event:e,ref:i,callback:t}),i}off(e,t){this.bindings=this.bindings.filter(i=>!(i.event===e&&(typeof t=="undefined"||t===i.ref)))}canPush(){return this.socket.isConnected()&&this.isJoined()}push(e,t,i=this.timeout){if(t=t||{},!this.joinedOnce)throw new Error(`tried to push '${e}' to '${this.topic}' before joining. Use channel.join() before pushing events`);let s=new y(this,e,function(){return t},i);return this.canPush()?s.send():(s.startTimeout(),this.pushBuffer.push(s)),s}leave(e=this.timeout){this.rejoinTimer.reset(),this.joinPush.cancelTimeout(),this.state=u.leaving;let t=()=>{this.socket.hasLogger()&&this.socket.log("channel",`leave ${this.topic}`),this.trigger(m.close,"leave")},i=new y(this,m.leave,R({}),e);return i.receive("ok",()=>t()).receive("timeout",()=>t()),i.send(),this.canPush()||i.trigger("ok",{}),i}onMessage(e,t,i){return t}isMember(e,t,i,s){return this.topic!==e?!1:s&&s!==this.joinRef()?(this.socket.hasLogger()&&this.socket.log("channel","dropping outdated message",{topic:e,event:t,payload:i,joinRef:s}),!1):!0}joinRef(){return this.joinPush.ref}rejoin(e=this.timeout){this.isLeaving()||(this.socket.leaveOpenTopic(this.topic),this.state=u.joining,this.joinPush.resend(e))}trigger(e,t,i,s){let o=this.onMessage(e,t,i,s);if(t&&!o)throw new Error("channel onMessage callbacks must return the payload, modified or unmodified");let n=this.bindings.filter(r=>r.event===e);for(let r=0;rl.abort(),o);a.signal=l.signal}return p.fetch(t,a).then(c=>c.text()).then(c=>this.parseJSON(c)).then(c=>r&&r(c)).catch(c=>{c.name==="AbortError"&&n?n():r&&r(null)}),l}static xdomainRequest(e,t,i,s,o,n,r){return e.timeout=o,e.open(t,i),e.onload=()=>{let a=this.parseJSON(e.responseText);r&&r(a)},n&&(e.ontimeout=n),e.onprogress=()=>{},e.send(s),e}static xhrRequest(e,t,i,s,o,n,r,a){e.open(t,i,!0),e.timeout=n;for(let[l,c]of Object.entries(s))e.setRequestHeader(l,c);return e.onerror=()=>a&&a(null),e.onreadystatechange=()=>{if(e.readyState===M.complete&&a){let l=this.parseJSON(e.responseText);a(l)}},r&&(e.ontimeout=r),e.send(o),e}static parseJSON(e){if(!e||e==="")return null;try{return JSON.parse(e)}catch(t){return console&&console.log("failed to parse JSON response",e),null}}static serialize(e,t){let i=[];for(var s in e){if(!Object.prototype.hasOwnProperty.call(e,s))continue;let o=t?`${t}[${s}]`:s,n=e[s];typeof n=="object"?i.push(this.serialize(n,o)):i.push(encodeURIComponent(o)+"="+encodeURIComponent(n))}return i.join("&")}static appendParams(e,t){if(Object.keys(t).length===0)return e;let i=e.match(/\?/)?"&":"?";return`${e}${i}${this.serialize(t)}`}};var X=h=>{let e="",t=new Uint8Array(h),i=t.byteLength;for(let s=0;sthis.poll(),0)}normalizeEndpoint(e){return e.replace("ws://","http://").replace("wss://","https://").replace(new RegExp("(.*)/"+A.websocket),"$1/"+A.longpoll)}endpointURL(){return C.appendParams(this.pollEndpoint,{token:this.token})}closeAndRetry(e,t,i){this.close(e,t,i),this.readyState=g.connecting}ontimeout(){this.onerror("timeout"),this.closeAndRetry(1005,"timeout",!1)}isActive(){return this.readyState===g.open||this.readyState===g.connecting}poll(){let e={Accept:"application/json"};this.authToken&&(e["X-Phoenix-AuthToken"]=this.authToken),this.ajax("GET",e,null,()=>this.ontimeout(),t=>{if(t){var{status:i,token:s,messages:o}=t;if(i===410&&this.token!==null){this.onerror(410),this.closeAndRetry(3410,"session_gone",!1);return}this.token=s}else i=0;switch(i){case 200:o.forEach(n=>{setTimeout(()=>this.onmessage({data:n}),0)}),this.poll();break;case 204:this.poll();break;case 410:this.readyState=g.open,this.onopen({}),this.poll();break;case 403:this.onerror(403),this.close(1008,"forbidden",!1);break;case 0:case 500:this.onerror(500),this.closeAndRetry(1011,"internal server error",500);break;default:throw new Error(`unhandled poll status ${i}`)}})}send(e){typeof e!="string"&&(e=X(e)),this.currentBatch?this.currentBatch.push(e):this.awaitingBatchAck?this.batchBuffer.push(e):(this.currentBatch=[e],this.currentBatchTimer=setTimeout(()=>{this.batchSend(this.currentBatch),this.currentBatch=null},0))}batchSend(e,t=0){this.awaitingBatchAck=!0;let i=t+B,s=e.slice(t,i);this.ajax("POST",{"Content-Type":"application/x-ndjson"},s.join(` +`),()=>this.onerror("timeout"),o=>{!o||o.status!==200?(this.awaitingBatchAck=!1,this.onerror(o&&o.status),this.closeAndRetry(1011,"internal server error",!1)):i0?(this.batchSend(this.batchBuffer),this.batchBuffer=[]):this.awaitingBatchAck=!1})}close(e,t,i){for(let o of this.reqs)o.abort();this.readyState=g.closed;let s=Object.assign({code:1e3,reason:void 0,wasClean:!0},{code:e,reason:t,wasClean:i});this.batchBuffer=[],clearTimeout(this.currentBatchTimer),this.currentBatchTimer=null,typeof CloseEvent!="undefined"?this.onclose(new CloseEvent("close",s)):this.onclose(s)}ajax(e,t,i,s,o){let n,r=()=>{this.reqs.delete(n),s()};n=C.request(e,this.endpointURL(),t,i,this.timeout,r,a=>{this.reqs.delete(n),this.isActive()&&o(a)}),this.reqs.add(n)}};var _=class h{constructor(e,t={}){let i=t.events||{state:"presence_state",diff:"presence_diff"};this.state={},this.pendingDiffs=[],this.channel=e,this.joinRef=null,this.caller={onJoin:function(){},onLeave:function(){},onSync:function(){}},this.channel.on(i.state,s=>{let{onJoin:o,onLeave:n,onSync:r}=this.caller;this.joinRef=this.channel.joinRef(),this.state=h.syncState(this.state,s,o,n),this.pendingDiffs.forEach(a=>{this.state=h.syncDiff(this.state,a,o,n)}),this.pendingDiffs=[],r()}),this.channel.on(i.diff,s=>{let{onJoin:o,onLeave:n,onSync:r}=this.caller;this.inPendingSyncState()?this.pendingDiffs.push(s):(this.state=h.syncDiff(this.state,s,o,n),r())})}onJoin(e){this.caller.onJoin=e}onLeave(e){this.caller.onLeave=e}onSync(e){this.caller.onSync=e}list(e){return h.list(this.state,e)}inPendingSyncState(){return!this.joinRef||this.joinRef!==this.channel.joinRef()}static syncState(e,t,i,s){let o=this.clone(e),n={},r={};return this.map(o,(a,l)=>{t[a]||(r[a]=l)}),this.map(t,(a,l)=>{let c=o[a];if(c){let b=l.metas.map(f=>f.phx_ref),d=c.metas.map(f=>f.phx_ref),E=l.metas.filter(f=>d.indexOf(f.phx_ref)<0),S=c.metas.filter(f=>b.indexOf(f.phx_ref)<0);E.length>0&&(n[a]=l,n[a].metas=E),S.length>0&&(r[a]=this.clone(c),r[a].metas=S)}else n[a]=l}),this.syncDiff(o,{joins:n,leaves:r},i,s)}static syncDiff(e,t,i,s){let{joins:o,leaves:n}=this.clone(t);return i||(i=function(){}),s||(s=function(){}),this.map(o,(r,a)=>{let l=e[r];if(e[r]=this.clone(a),l){let c=e[r].metas.map(d=>d.phx_ref),b=l.metas.filter(d=>c.indexOf(d.phx_ref)<0);e[r].metas.unshift(...b)}i(r,l,a)}),this.map(n,(r,a)=>{let l=e[r];if(!l)return;let c=a.metas.map(b=>b.phx_ref);l.metas=l.metas.filter(b=>c.indexOf(b.phx_ref)<0),s(r,l,a),l.metas.length===0&&delete e[r]}),e}static list(e,t){return t||(t=function(i,s){return s}),this.map(e,(i,s)=>t(i,s))}static map(e,t){return Object.getOwnPropertyNames(e).map(i=>t(i,e[i]))}static clone(e){return JSON.parse(JSON.stringify(e))}};var L={HEADER_LENGTH:1,META_LENGTH:4,KINDS:{push:0,reply:1,broadcast:2},encode(h,e){if(h.payload.constructor===ArrayBuffer)return e(this.binaryEncode(h));{let t=[h.join_ref,h.ref,h.topic,h.event,h.payload];return e(JSON.stringify(t))}},decode(h,e){if(h.constructor===ArrayBuffer)return e(this.binaryDecode(h));{let[t,i,s,o,n]=JSON.parse(h);return e({join_ref:t,ref:i,topic:s,event:o,payload:n})}},binaryEncode(h){let{join_ref:e,ref:t,event:i,topic:s,payload:o}=h,n=new TextEncoder,r=n.encode(e),a=n.encode(t),l=n.encode(s),c=n.encode(i);this.assertFieldSize(r.byteLength,"join_ref"),this.assertFieldSize(a.byteLength,"ref"),this.assertFieldSize(l.byteLength,"topic"),this.assertFieldSize(c.byteLength,"event");let b=this.META_LENGTH+r.byteLength+a.byteLength+l.byteLength+c.byteLength,d=new ArrayBuffer(this.HEADER_LENGTH+b),E=new Uint8Array(d),S=new DataView(d),f=0;S.setUint8(f++,this.KINDS.push),S.setUint8(f++,r.byteLength),S.setUint8(f++,a.byteLength),S.setUint8(f++,l.byteLength),S.setUint8(f++,c.byteLength),E.set(r,f),f+=r.byteLength,E.set(a,f),f+=a.byteLength,E.set(l,f),f+=l.byteLength,E.set(c,f),f+=c.byteLength;var N=new Uint8Array(d.byteLength+o.byteLength);return N.set(E,0),N.set(new Uint8Array(o),d.byteLength),N.buffer},assertFieldSize(h,e){if(h>255)throw new Error(`unable to convert ${e} to binary: must be less than or equal to 255 bytes, but is ${h} bytes`)},binaryDecode(h){let e=new DataView(h),t=e.getUint8(0),i=new TextDecoder;switch(t){case this.KINDS.push:return this.decodePush(h,e,i);case this.KINDS.reply:return this.decodeReply(h,e,i);case this.KINDS.broadcast:return this.decodeBroadcast(h,e,i)}},decodePush(h,e,t){let i=e.getUint8(1),s=e.getUint8(2),o=e.getUint8(3),n=this.HEADER_LENGTH+this.META_LENGTH-1,r=t.decode(h.slice(n,n+i));n=n+i;let a=t.decode(h.slice(n,n+s));n=n+s;let l=t.decode(h.slice(n,n+o));n=n+o;let c=h.slice(n,h.byteLength);return{join_ref:r,ref:null,topic:a,event:l,payload:c}},decodeReply(h,e,t){let i=e.getUint8(1),s=e.getUint8(2),o=e.getUint8(3),n=e.getUint8(4),r=this.HEADER_LENGTH+this.META_LENGTH,a=t.decode(h.slice(r,r+i));r=r+i;let l=t.decode(h.slice(r,r+s));r=r+s;let c=t.decode(h.slice(r,r+o));r=r+o;let b=t.decode(h.slice(r,r+n));r=r+n;let d=h.slice(r,h.byteLength),E={status:b,response:d};return{join_ref:a,ref:l,topic:c,event:m.reply,payload:E}},decodeBroadcast(h,e,t){let i=e.getUint8(1),s=e.getUint8(2),o=this.HEADER_LENGTH+2,n=t.decode(h.slice(o,o+i));o=o+i;let r=t.decode(h.slice(o,o+s));o=o+s;let a=h.slice(o,h.byteLength);return{join_ref:null,ref:null,topic:n,event:r,payload:a}}};var x=class{constructor(e,t={}){this.stateChangeCallbacks={open:[],close:[],error:[],message:[]},this.channels=[],this.sendBuffer=[],this.ref=0,this.fallbackRef=null,this.timeout=t.timeout||P,this.transport=t.transport||p.WebSocket||T,this.primaryPassedHealthCheck=!1,this.longPollFallbackMs=t.longPollFallbackMs,this.fallbackTimer=null,this.sessionStore=t.sessionStorage||p&&p.sessionStorage,this.establishedConnections=0,this.defaultEncoder=L.encode.bind(L),this.defaultDecoder=L.decode.bind(L),this.closeWasClean=!0,this.disconnecting=!1,this.binaryType=t.binaryType||"arraybuffer",this.connectClock=1,this.pageHidden=!1,this.transport!==T?(this.encode=t.encode||this.defaultEncoder,this.decode=t.decode||this.defaultDecoder):(this.encode=this.defaultEncoder,this.decode=this.defaultDecoder);let i=null;v&&v.addEventListener&&(v.addEventListener("pagehide",s=>{this.conn&&(this.disconnect(),i=this.connectClock)}),v.addEventListener("pageshow",s=>{i===this.connectClock&&(i=null,this.connect())}),v.addEventListener("visibilitychange",()=>{document.visibilityState==="hidden"?this.pageHidden=!0:(this.pageHidden=!1,!this.isConnected()&&!this.closeWasClean&&this.teardown(()=>this.connect()))})),this.heartbeatIntervalMs=t.heartbeatIntervalMs||3e4,this.rejoinAfterMs=s=>t.rejoinAfterMs?t.rejoinAfterMs(s):[1e3,2e3,5e3][s-1]||1e4,this.reconnectAfterMs=s=>t.reconnectAfterMs?t.reconnectAfterMs(s):[10,50,100,150,200,250,500,1e3,2e3][s-1]||5e3,this.logger=t.logger||null,!this.logger&&t.debug&&(this.logger=(s,o,n)=>{console.log(`${s}: ${o}`,n)}),this.longpollerTimeout=t.longpollerTimeout||2e4,this.params=R(t.params||{}),this.endPoint=`${e}/${A.websocket}`,this.vsn=t.vsn||O,this.heartbeatTimeoutTimer=null,this.heartbeatTimer=null,this.pendingHeartbeatRef=null,this.reconnectTimer=new k(()=>{if(this.pageHidden){this.log("Not reconnecting as page is hidden!"),this.teardown();return}this.teardown(()=>this.connect())},this.reconnectAfterMs),this.authToken=t.authToken}getLongPollTransport(){return T}replaceTransport(e){this.connectClock++,this.closeWasClean=!0,clearTimeout(this.fallbackTimer),this.reconnectTimer.reset(),this.conn&&(this.conn.close(),this.conn=null),this.transport=e}protocol(){return location.protocol.match(/^https/)?"wss":"ws"}endPointURL(){let e=C.appendParams(C.appendParams(this.endPoint,this.params()),{vsn:this.vsn});return e.charAt(0)!=="/"?e:e.charAt(1)==="/"?`${this.protocol()}:${e}`:`${this.protocol()}://${location.host}${e}`}disconnect(e,t,i){this.connectClock++,this.disconnecting=!0,this.closeWasClean=!0,clearTimeout(this.fallbackTimer),this.reconnectTimer.reset(),this.teardown(()=>{this.disconnecting=!1,e&&e()},t,i)}connect(e){e&&(console&&console.log("passing params to connect is deprecated. Instead pass :params to the Socket constructor"),this.params=R(e)),!(this.conn&&!this.disconnecting)&&(this.longPollFallbackMs&&this.transport!==T?this.connectWithFallback(T,this.longPollFallbackMs):this.transportConnect())}log(e,t,i){this.logger&&this.logger(e,t,i)}hasLogger(){return this.logger!==null}onOpen(e){let t=this.makeRef();return this.stateChangeCallbacks.open.push([t,e]),t}onClose(e){let t=this.makeRef();return this.stateChangeCallbacks.close.push([t,e]),t}onError(e){let t=this.makeRef();return this.stateChangeCallbacks.error.push([t,e]),t}onMessage(e){let t=this.makeRef();return this.stateChangeCallbacks.message.push([t,e]),t}ping(e){if(!this.isConnected())return!1;let t=this.makeRef(),i=Date.now();this.push({topic:"phoenix",event:"heartbeat",payload:{},ref:t});let s=this.onMessage(o=>{o.ref===t&&(this.off([s]),e(Date.now()-i))});return!0}transportName(e){switch(e){case T:return"LongPoll";default:return e.name}}transportConnect(){this.connectClock++,this.closeWasClean=!1;let e;this.authToken&&(e=["phoenix",`${w}${btoa(this.authToken).replace(/=/g,"")}`]),this.conn=new this.transport(this.endPointURL(),e),this.conn.binaryType=this.binaryType,this.conn.timeout=this.longpollerTimeout,this.conn.onopen=()=>this.onConnOpen(),this.conn.onerror=t=>this.onConnError(t),this.conn.onmessage=t=>this.onConnMessage(t),this.conn.onclose=t=>this.onConnClose(t)}getSession(e){return this.sessionStore&&this.sessionStore.getItem(e)}storeSession(e,t){this.sessionStore&&this.sessionStore.setItem(e,t)}connectWithFallback(e,t=2500){clearTimeout(this.fallbackTimer);let i=!1,s=!0,o,n,r=this.transportName(e),a=l=>{this.log("transport",`falling back to ${r}...`,l),this.off([o,n]),s=!1,this.replaceTransport(e),this.transportConnect()};if(this.getSession(`phx:fallback:${r}`))return a("memorized");this.fallbackTimer=setTimeout(a,t),n=this.onError(l=>{this.log("transport","error",l),s&&!i&&(clearTimeout(this.fallbackTimer),a(l))}),this.fallbackRef&&this.off([this.fallbackRef]),this.fallbackRef=this.onOpen(()=>{if(i=!0,!s){let l=this.transportName(e);return this.primaryPassedHealthCheck||this.storeSession(`phx:fallback:${l}`,"true"),this.log("transport",`established ${l} fallback`)}clearTimeout(this.fallbackTimer),this.fallbackTimer=setTimeout(a,t),this.ping(l=>{this.log("transport","connected to primary after",l),this.primaryPassedHealthCheck=!0,clearTimeout(this.fallbackTimer)})}),this.transportConnect()}clearHeartbeats(){clearTimeout(this.heartbeatTimer),clearTimeout(this.heartbeatTimeoutTimer)}onConnOpen(){this.hasLogger()&&this.log("transport",`${this.transportName(this.transport)} connected to ${this.endPointURL()}`),this.closeWasClean=!1,this.disconnecting=!1,this.establishedConnections++,this.flushSendBuffer(),this.reconnectTimer.reset(),this.resetHeartbeat(),this.stateChangeCallbacks.open.forEach(([,e])=>e())}heartbeatTimeout(){this.pendingHeartbeatRef&&(this.pendingHeartbeatRef=null,this.hasLogger()&&this.log("transport","heartbeat timeout. Attempting to re-establish connection"),this.triggerChanError(),this.closeWasClean=!1,this.teardown(()=>this.reconnectTimer.scheduleTimeout(),$,"heartbeat timeout"))}resetHeartbeat(){this.conn&&this.conn.skipHeartbeat||(this.pendingHeartbeatRef=null,this.clearHeartbeats(),this.heartbeatTimer=setTimeout(()=>this.sendHeartbeat(),this.heartbeatIntervalMs))}teardown(e,t,i){if(!this.conn)return e&&e();let s=this.conn;this.waitForBufferDone(s,()=>{t?s.close(t,i||""):s.close(),this.waitForSocketClosed(s,()=>{this.conn===s&&(this.conn.onopen=function(){},this.conn.onerror=function(){},this.conn.onmessage=function(){},this.conn.onclose=function(){},this.conn=null),e&&e()})})}waitForBufferDone(e,t,i=1){if(i===5||!e.bufferedAmount){t();return}setTimeout(()=>{this.waitForBufferDone(e,t,i+1)},150*i)}waitForSocketClosed(e,t,i=1){if(i===5||e.readyState===g.closed){t();return}setTimeout(()=>{this.waitForSocketClosed(e,t,i+1)},150*i)}onConnClose(e){this.conn&&(this.conn.onclose=()=>{});let t=e&&e.code;this.hasLogger()&&this.log("transport","close",e),this.triggerChanError(),this.clearHeartbeats(),!this.closeWasClean&&t!==1e3&&this.reconnectTimer.scheduleTimeout(),this.stateChangeCallbacks.close.forEach(([,i])=>i(e))}onConnError(e){this.hasLogger()&&this.log("transport","error",e);let t=this.transport,i=this.establishedConnections;this.stateChangeCallbacks.error.forEach(([,s])=>{s(e,t,i)}),(t===this.transport||i>0)&&this.triggerChanError()}triggerChanError(){this.channels.forEach(e=>{e.isErrored()||e.isLeaving()||e.isClosed()||e.trigger(m.error)})}connectionState(){switch(this.conn&&this.conn.readyState){case g.connecting:return"connecting";case g.open:return"open";case g.closing:return"closing";default:return"closed"}}isConnected(){return this.connectionState()==="open"}remove(e){this.off(e.stateChangeRefs),this.channels=this.channels.filter(t=>t!==e)}off(e){for(let t in this.stateChangeCallbacks)this.stateChangeCallbacks[t]=this.stateChangeCallbacks[t].filter(([i])=>e.indexOf(i)===-1)}channel(e,t={}){let i=new j(e,t,this);return this.channels.push(i),i}push(e){if(this.hasLogger()){let{topic:t,event:i,payload:s,ref:o,join_ref:n}=e;this.log("push",`${t} ${i} (${n}, ${o})`,s)}this.isConnected()?this.encode(e,t=>this.conn.send(t)):this.sendBuffer.push(()=>this.encode(e,t=>this.conn.send(t)))}makeRef(){let e=this.ref+1;return e===this.ref?this.ref=0:this.ref=e,this.ref.toString()}sendHeartbeat(){this.pendingHeartbeatRef&&!this.isConnected()||(this.pendingHeartbeatRef=this.makeRef(),this.push({topic:"phoenix",event:"heartbeat",payload:{},ref:this.pendingHeartbeatRef}),this.heartbeatTimeoutTimer=setTimeout(()=>this.heartbeatTimeout(),this.heartbeatIntervalMs))}flushSendBuffer(){this.isConnected()&&this.sendBuffer.length>0&&(this.sendBuffer.forEach(e=>e()),this.sendBuffer=[])}onConnMessage(e){this.decode(e.data,t=>{let{topic:i,event:s,payload:o,ref:n,join_ref:r}=t;n&&n===this.pendingHeartbeatRef&&(this.clearHeartbeats(),this.pendingHeartbeatRef=null,this.heartbeatTimer=setTimeout(()=>this.sendHeartbeat(),this.heartbeatIntervalMs)),this.hasLogger()&&this.log("receive",`${o.status||""} ${i} ${s} ${n&&"("+n+")"||""}`,o);for(let a=0;ai.topic===e&&(i.isJoined()||i.isJoining()));t&&(this.hasLogger()&&this.log("transport",`leaving duplicate topic "${e}"`),t.leave())}};return J(G);})(); diff --git a/deps/phoenix/priv/static/phoenix.mjs b/deps/phoenix/priv/static/phoenix.mjs new file mode 100644 index 0000000..aa10a66 --- /dev/null +++ b/deps/phoenix/priv/static/phoenix.mjs @@ -0,0 +1,1645 @@ +// js/phoenix/utils.js +var closure = (value) => { + if (typeof value === "function") { + return value; + } else { + let closure2 = function() { + return value; + }; + return closure2; + } +}; + +// js/phoenix/constants.js +var globalSelf = typeof self !== "undefined" ? self : null; +var phxWindow = typeof window !== "undefined" ? window : null; +var global = globalSelf || phxWindow || globalThis; +var DEFAULT_VSN = "2.0.0"; +var SOCKET_STATES = { connecting: 0, open: 1, closing: 2, closed: 3 }; +var MAX_LONGPOLL_BATCH_SIZE = 100; +var DEFAULT_TIMEOUT = 1e4; +var WS_CLOSE_NORMAL = 1e3; +var CHANNEL_STATES = { + closed: "closed", + errored: "errored", + joined: "joined", + joining: "joining", + leaving: "leaving" +}; +var CHANNEL_EVENTS = { + close: "phx_close", + error: "phx_error", + join: "phx_join", + reply: "phx_reply", + leave: "phx_leave" +}; +var TRANSPORTS = { + longpoll: "longpoll", + websocket: "websocket" +}; +var XHR_STATES = { + complete: 4 +}; +var AUTH_TOKEN_PREFIX = "base64url.bearer.phx."; + +// js/phoenix/push.js +var Push = class { + constructor(channel, event, payload, timeout) { + this.channel = channel; + this.event = event; + this.payload = payload || function() { + return {}; + }; + this.receivedResp = null; + this.timeout = timeout; + this.timeoutTimer = null; + this.recHooks = []; + this.sent = false; + } + /** + * + * @param {number} timeout + */ + resend(timeout) { + this.timeout = timeout; + this.reset(); + this.send(); + } + /** + * + */ + send() { + if (this.hasReceived("timeout")) { + return; + } + this.startTimeout(); + this.sent = true; + this.channel.socket.push({ + topic: this.channel.topic, + event: this.event, + payload: this.payload(), + ref: this.ref, + join_ref: this.channel.joinRef() + }); + } + /** + * + * @param {*} status + * @param {*} callback + */ + receive(status, callback) { + if (this.hasReceived(status)) { + callback(this.receivedResp.response); + } + this.recHooks.push({ status, callback }); + return this; + } + /** + * @private + */ + reset() { + this.cancelRefEvent(); + this.ref = null; + this.refEvent = null; + this.receivedResp = null; + this.sent = false; + } + /** + * @private + */ + matchReceive({ status, response, _ref }) { + this.recHooks.filter((h) => h.status === status).forEach((h) => h.callback(response)); + } + /** + * @private + */ + cancelRefEvent() { + if (!this.refEvent) { + return; + } + this.channel.off(this.refEvent); + } + /** + * @private + */ + cancelTimeout() { + clearTimeout(this.timeoutTimer); + this.timeoutTimer = null; + } + /** + * @private + */ + startTimeout() { + if (this.timeoutTimer) { + this.cancelTimeout(); + } + this.ref = this.channel.socket.makeRef(); + this.refEvent = this.channel.replyEventName(this.ref); + this.channel.on(this.refEvent, (payload) => { + this.cancelRefEvent(); + this.cancelTimeout(); + this.receivedResp = payload; + this.matchReceive(payload); + }); + this.timeoutTimer = setTimeout(() => { + this.trigger("timeout", {}); + }, this.timeout); + } + /** + * @private + */ + hasReceived(status) { + return this.receivedResp && this.receivedResp.status === status; + } + /** + * @private + */ + trigger(status, response) { + this.channel.trigger(this.refEvent, { status, response }); + } +}; + +// js/phoenix/timer.js +var Timer = class { + constructor(callback, timerCalc) { + this.callback = callback; + this.timerCalc = timerCalc; + this.timer = null; + this.tries = 0; + } + reset() { + this.tries = 0; + clearTimeout(this.timer); + } + /** + * Cancels any previous scheduleTimeout and schedules callback + */ + scheduleTimeout() { + clearTimeout(this.timer); + this.timer = setTimeout(() => { + this.tries = this.tries + 1; + this.callback(); + }, this.timerCalc(this.tries + 1)); + } +}; + +// js/phoenix/channel.js +var Channel = class { + constructor(topic, params, socket) { + this.state = CHANNEL_STATES.closed; + this.topic = topic; + this.params = closure(params || {}); + this.socket = socket; + this.bindings = []; + this.bindingRef = 0; + this.timeout = this.socket.timeout; + this.joinedOnce = false; + this.joinPush = new Push(this, CHANNEL_EVENTS.join, this.params, this.timeout); + this.pushBuffer = []; + this.stateChangeRefs = []; + this.rejoinTimer = new Timer(() => { + if (this.socket.isConnected()) { + this.rejoin(); + } + }, this.socket.rejoinAfterMs); + this.stateChangeRefs.push(this.socket.onError(() => this.rejoinTimer.reset())); + this.stateChangeRefs.push( + this.socket.onOpen(() => { + this.rejoinTimer.reset(); + if (this.isErrored()) { + this.rejoin(); + } + }) + ); + this.joinPush.receive("ok", () => { + this.state = CHANNEL_STATES.joined; + this.rejoinTimer.reset(); + this.pushBuffer.forEach((pushEvent) => pushEvent.send()); + this.pushBuffer = []; + }); + this.joinPush.receive("error", () => { + this.state = CHANNEL_STATES.errored; + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.onClose(() => { + this.rejoinTimer.reset(); + if (this.socket.hasLogger()) this.socket.log("channel", `close ${this.topic} ${this.joinRef()}`); + this.state = CHANNEL_STATES.closed; + this.socket.remove(this); + }); + this.onError((reason) => { + if (this.socket.hasLogger()) this.socket.log("channel", `error ${this.topic}`, reason); + if (this.isJoining()) { + this.joinPush.reset(); + } + this.state = CHANNEL_STATES.errored; + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.joinPush.receive("timeout", () => { + if (this.socket.hasLogger()) this.socket.log("channel", `timeout ${this.topic} (${this.joinRef()})`, this.joinPush.timeout); + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), this.timeout); + leavePush.send(); + this.state = CHANNEL_STATES.errored; + this.joinPush.reset(); + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.on(CHANNEL_EVENTS.reply, (payload, ref) => { + this.trigger(this.replyEventName(ref), payload); + }); + } + /** + * Join the channel + * @param {integer} timeout + * @returns {Push} + */ + join(timeout = this.timeout) { + if (this.joinedOnce) { + throw new Error("tried to join multiple times. 'join' can only be called a single time per channel instance"); + } else { + this.timeout = timeout; + this.joinedOnce = true; + this.rejoin(); + return this.joinPush; + } + } + /** + * Hook into channel close + * @param {Function} callback + */ + onClose(callback) { + this.on(CHANNEL_EVENTS.close, callback); + } + /** + * Hook into channel errors + * @param {Function} callback + */ + onError(callback) { + return this.on(CHANNEL_EVENTS.error, (reason) => callback(reason)); + } + /** + * Subscribes on channel events + * + * Subscription returns a ref counter, which can be used later to + * unsubscribe the exact event listener + * + * @example + * const ref1 = channel.on("event", do_stuff) + * const ref2 = channel.on("event", do_other_stuff) + * channel.off("event", ref1) + * // Since unsubscription, do_stuff won't fire, + * // while do_other_stuff will keep firing on the "event" + * + * @param {string} event + * @param {Function} callback + * @returns {integer} ref + */ + on(event, callback) { + let ref = this.bindingRef++; + this.bindings.push({ event, ref, callback }); + return ref; + } + /** + * Unsubscribes off of channel events + * + * Use the ref returned from a channel.on() to unsubscribe one + * handler, or pass nothing for the ref to unsubscribe all + * handlers for the given event. + * + * @example + * // Unsubscribe the do_stuff handler + * const ref1 = channel.on("event", do_stuff) + * channel.off("event", ref1) + * + * // Unsubscribe all handlers from event + * channel.off("event") + * + * @param {string} event + * @param {integer} ref + */ + off(event, ref) { + this.bindings = this.bindings.filter((bind) => { + return !(bind.event === event && (typeof ref === "undefined" || ref === bind.ref)); + }); + } + /** + * @private + */ + canPush() { + return this.socket.isConnected() && this.isJoined(); + } + /** + * Sends a message `event` to phoenix with the payload `payload`. + * Phoenix receives this in the `handle_in(event, payload, socket)` + * function. if phoenix replies or it times out (default 10000ms), + * then optionally the reply can be received. + * + * @example + * channel.push("event") + * .receive("ok", payload => console.log("phoenix replied:", payload)) + * .receive("error", err => console.log("phoenix errored", err)) + * .receive("timeout", () => console.log("timed out pushing")) + * @param {string} event + * @param {Object} payload + * @param {number} [timeout] + * @returns {Push} + */ + push(event, payload, timeout = this.timeout) { + payload = payload || {}; + if (!this.joinedOnce) { + throw new Error(`tried to push '${event}' to '${this.topic}' before joining. Use channel.join() before pushing events`); + } + let pushEvent = new Push(this, event, function() { + return payload; + }, timeout); + if (this.canPush()) { + pushEvent.send(); + } else { + pushEvent.startTimeout(); + this.pushBuffer.push(pushEvent); + } + return pushEvent; + } + /** Leaves the channel + * + * Unsubscribes from server events, and + * instructs channel to terminate on server + * + * Triggers onClose() hooks + * + * To receive leave acknowledgements, use the `receive` + * hook to bind to the server ack, ie: + * + * @example + * channel.leave().receive("ok", () => alert("left!") ) + * + * @param {integer} timeout + * @returns {Push} + */ + leave(timeout = this.timeout) { + this.rejoinTimer.reset(); + this.joinPush.cancelTimeout(); + this.state = CHANNEL_STATES.leaving; + let onClose = () => { + if (this.socket.hasLogger()) this.socket.log("channel", `leave ${this.topic}`); + this.trigger(CHANNEL_EVENTS.close, "leave"); + }; + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), timeout); + leavePush.receive("ok", () => onClose()).receive("timeout", () => onClose()); + leavePush.send(); + if (!this.canPush()) { + leavePush.trigger("ok", {}); + } + return leavePush; + } + /** + * Overridable message hook + * + * Receives all events for specialized message handling + * before dispatching to the channel callbacks. + * + * Must return the payload, modified or unmodified + * @param {string} event + * @param {Object} payload + * @param {integer} ref + * @returns {Object} + */ + onMessage(_event, payload, _ref) { + return payload; + } + /** + * @private + */ + isMember(topic, event, payload, joinRef) { + if (this.topic !== topic) { + return false; + } + if (joinRef && joinRef !== this.joinRef()) { + if (this.socket.hasLogger()) this.socket.log("channel", "dropping outdated message", { topic, event, payload, joinRef }); + return false; + } else { + return true; + } + } + /** + * @private + */ + joinRef() { + return this.joinPush.ref; + } + /** + * @private + */ + rejoin(timeout = this.timeout) { + if (this.isLeaving()) { + return; + } + this.socket.leaveOpenTopic(this.topic); + this.state = CHANNEL_STATES.joining; + this.joinPush.resend(timeout); + } + /** + * @private + */ + trigger(event, payload, ref, joinRef) { + let handledPayload = this.onMessage(event, payload, ref, joinRef); + if (payload && !handledPayload) { + throw new Error("channel onMessage callbacks must return the payload, modified or unmodified"); + } + let eventBindings = this.bindings.filter((bind) => bind.event === event); + for (let i = 0; i < eventBindings.length; i++) { + let bind = eventBindings[i]; + bind.callback(handledPayload, ref, joinRef || this.joinRef()); + } + } + /** + * @private + */ + replyEventName(ref) { + return `chan_reply_${ref}`; + } + /** + * @private + */ + isClosed() { + return this.state === CHANNEL_STATES.closed; + } + /** + * @private + */ + isErrored() { + return this.state === CHANNEL_STATES.errored; + } + /** + * @private + */ + isJoined() { + return this.state === CHANNEL_STATES.joined; + } + /** + * @private + */ + isJoining() { + return this.state === CHANNEL_STATES.joining; + } + /** + * @private + */ + isLeaving() { + return this.state === CHANNEL_STATES.leaving; + } +}; + +// js/phoenix/ajax.js +var Ajax = class { + static request(method, endPoint, headers, body, timeout, ontimeout, callback) { + if (global.XDomainRequest) { + let req = new global.XDomainRequest(); + return this.xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback); + } else if (global.XMLHttpRequest) { + let req = new global.XMLHttpRequest(); + return this.xhrRequest(req, method, endPoint, headers, body, timeout, ontimeout, callback); + } else if (global.fetch && global.AbortController) { + return this.fetchRequest(method, endPoint, headers, body, timeout, ontimeout, callback); + } else { + throw new Error("No suitable XMLHttpRequest implementation found"); + } + } + static fetchRequest(method, endPoint, headers, body, timeout, ontimeout, callback) { + let options = { + method, + headers, + body + }; + let controller = null; + if (timeout) { + controller = new AbortController(); + const _timeoutId = setTimeout(() => controller.abort(), timeout); + options.signal = controller.signal; + } + global.fetch(endPoint, options).then((response) => response.text()).then((data) => this.parseJSON(data)).then((data) => callback && callback(data)).catch((err) => { + if (err.name === "AbortError" && ontimeout) { + ontimeout(); + } else { + callback && callback(null); + } + }); + return controller; + } + static xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback) { + req.timeout = timeout; + req.open(method, endPoint); + req.onload = () => { + let response = this.parseJSON(req.responseText); + callback && callback(response); + }; + if (ontimeout) { + req.ontimeout = ontimeout; + } + req.onprogress = () => { + }; + req.send(body); + return req; + } + static xhrRequest(req, method, endPoint, headers, body, timeout, ontimeout, callback) { + req.open(method, endPoint, true); + req.timeout = timeout; + for (let [key, value] of Object.entries(headers)) { + req.setRequestHeader(key, value); + } + req.onerror = () => callback && callback(null); + req.onreadystatechange = () => { + if (req.readyState === XHR_STATES.complete && callback) { + let response = this.parseJSON(req.responseText); + callback(response); + } + }; + if (ontimeout) { + req.ontimeout = ontimeout; + } + req.send(body); + return req; + } + static parseJSON(resp) { + if (!resp || resp === "") { + return null; + } + try { + return JSON.parse(resp); + } catch { + console && console.log("failed to parse JSON response", resp); + return null; + } + } + static serialize(obj, parentKey) { + let queryStr = []; + for (var key in obj) { + if (!Object.prototype.hasOwnProperty.call(obj, key)) { + continue; + } + let paramKey = parentKey ? `${parentKey}[${key}]` : key; + let paramVal = obj[key]; + if (typeof paramVal === "object") { + queryStr.push(this.serialize(paramVal, paramKey)); + } else { + queryStr.push(encodeURIComponent(paramKey) + "=" + encodeURIComponent(paramVal)); + } + } + return queryStr.join("&"); + } + static appendParams(url, params) { + if (Object.keys(params).length === 0) { + return url; + } + let prefix = url.match(/\?/) ? "&" : "?"; + return `${url}${prefix}${this.serialize(params)}`; + } +}; + +// js/phoenix/longpoll.js +var arrayBufferToBase64 = (buffer) => { + let binary = ""; + let bytes = new Uint8Array(buffer); + let len = bytes.byteLength; + for (let i = 0; i < len; i++) { + binary += String.fromCharCode(bytes[i]); + } + return btoa(binary); +}; +var LongPoll = class { + constructor(endPoint, protocols) { + if (protocols && protocols.length === 2 && protocols[1].startsWith(AUTH_TOKEN_PREFIX)) { + this.authToken = atob(protocols[1].slice(AUTH_TOKEN_PREFIX.length)); + } + this.endPoint = null; + this.token = null; + this.skipHeartbeat = true; + this.reqs = /* @__PURE__ */ new Set(); + this.awaitingBatchAck = false; + this.currentBatch = null; + this.currentBatchTimer = null; + this.batchBuffer = []; + this.onopen = function() { + }; + this.onerror = function() { + }; + this.onmessage = function() { + }; + this.onclose = function() { + }; + this.pollEndpoint = this.normalizeEndpoint(endPoint); + this.readyState = SOCKET_STATES.connecting; + setTimeout(() => this.poll(), 0); + } + normalizeEndpoint(endPoint) { + return endPoint.replace("ws://", "http://").replace("wss://", "https://").replace(new RegExp("(.*)/" + TRANSPORTS.websocket), "$1/" + TRANSPORTS.longpoll); + } + endpointURL() { + return Ajax.appendParams(this.pollEndpoint, { token: this.token }); + } + closeAndRetry(code, reason, wasClean) { + this.close(code, reason, wasClean); + this.readyState = SOCKET_STATES.connecting; + } + ontimeout() { + this.onerror("timeout"); + this.closeAndRetry(1005, "timeout", false); + } + isActive() { + return this.readyState === SOCKET_STATES.open || this.readyState === SOCKET_STATES.connecting; + } + poll() { + const headers = { "Accept": "application/json" }; + if (this.authToken) { + headers["X-Phoenix-AuthToken"] = this.authToken; + } + this.ajax("GET", headers, null, () => this.ontimeout(), (resp) => { + if (resp) { + var { status, token, messages } = resp; + if (status === 410 && this.token !== null) { + this.onerror(410); + this.closeAndRetry(3410, "session_gone", false); + return; + } + this.token = token; + } else { + status = 0; + } + switch (status) { + case 200: + messages.forEach((msg) => { + setTimeout(() => this.onmessage({ data: msg }), 0); + }); + this.poll(); + break; + case 204: + this.poll(); + break; + case 410: + this.readyState = SOCKET_STATES.open; + this.onopen({}); + this.poll(); + break; + case 403: + this.onerror(403); + this.close(1008, "forbidden", false); + break; + case 0: + case 500: + this.onerror(500); + this.closeAndRetry(1011, "internal server error", 500); + break; + default: + throw new Error(`unhandled poll status ${status}`); + } + }); + } + // we collect all pushes within the current event loop by + // setTimeout 0, which optimizes back-to-back procedural + // pushes against an empty buffer + send(body) { + if (typeof body !== "string") { + body = arrayBufferToBase64(body); + } + if (this.currentBatch) { + this.currentBatch.push(body); + } else if (this.awaitingBatchAck) { + this.batchBuffer.push(body); + } else { + this.currentBatch = [body]; + this.currentBatchTimer = setTimeout(() => { + this.batchSend(this.currentBatch); + this.currentBatch = null; + }, 0); + } + } + batchSend(messages, offset = 0) { + this.awaitingBatchAck = true; + const next = offset + MAX_LONGPOLL_BATCH_SIZE; + const batch = messages.slice(offset, next); + this.ajax("POST", { "Content-Type": "application/x-ndjson" }, batch.join("\n"), () => this.onerror("timeout"), (resp) => { + if (!resp || resp.status !== 200) { + this.awaitingBatchAck = false; + this.onerror(resp && resp.status); + this.closeAndRetry(1011, "internal server error", false); + } else if (next < messages.length) { + this.batchSend(messages, next); + } else if (this.batchBuffer.length > 0) { + this.batchSend(this.batchBuffer); + this.batchBuffer = []; + } else { + this.awaitingBatchAck = false; + } + }); + } + close(code, reason, wasClean) { + for (let req of this.reqs) { + req.abort(); + } + this.readyState = SOCKET_STATES.closed; + let opts = Object.assign({ code: 1e3, reason: void 0, wasClean: true }, { code, reason, wasClean }); + this.batchBuffer = []; + clearTimeout(this.currentBatchTimer); + this.currentBatchTimer = null; + if (typeof CloseEvent !== "undefined") { + this.onclose(new CloseEvent("close", opts)); + } else { + this.onclose(opts); + } + } + ajax(method, headers, body, onCallerTimeout, callback) { + let req; + let ontimeout = () => { + this.reqs.delete(req); + onCallerTimeout(); + }; + req = Ajax.request(method, this.endpointURL(), headers, body, this.timeout, ontimeout, (resp) => { + this.reqs.delete(req); + if (this.isActive()) { + callback(resp); + } + }); + this.reqs.add(req); + } +}; + +// js/phoenix/presence.js +var Presence = class _Presence { + constructor(channel, opts = {}) { + let events = opts.events || { state: "presence_state", diff: "presence_diff" }; + this.state = {}; + this.pendingDiffs = []; + this.channel = channel; + this.joinRef = null; + this.caller = { + onJoin: function() { + }, + onLeave: function() { + }, + onSync: function() { + } + }; + this.channel.on(events.state, (newState) => { + let { onJoin, onLeave, onSync } = this.caller; + this.joinRef = this.channel.joinRef(); + this.state = _Presence.syncState(this.state, newState, onJoin, onLeave); + this.pendingDiffs.forEach((diff) => { + this.state = _Presence.syncDiff(this.state, diff, onJoin, onLeave); + }); + this.pendingDiffs = []; + onSync(); + }); + this.channel.on(events.diff, (diff) => { + let { onJoin, onLeave, onSync } = this.caller; + if (this.inPendingSyncState()) { + this.pendingDiffs.push(diff); + } else { + this.state = _Presence.syncDiff(this.state, diff, onJoin, onLeave); + onSync(); + } + }); + } + onJoin(callback) { + this.caller.onJoin = callback; + } + onLeave(callback) { + this.caller.onLeave = callback; + } + onSync(callback) { + this.caller.onSync = callback; + } + list(by) { + return _Presence.list(this.state, by); + } + inPendingSyncState() { + return !this.joinRef || this.joinRef !== this.channel.joinRef(); + } + // lower-level public static API + /** + * Used to sync the list of presences on the server + * with the client's state. An optional `onJoin` and `onLeave` callback can + * be provided to react to changes in the client's local presences across + * disconnects and reconnects with the server. + * + * @returns {Presence} + */ + static syncState(currentState, newState, onJoin, onLeave) { + let state = this.clone(currentState); + let joins = {}; + let leaves = {}; + this.map(state, (key, presence) => { + if (!newState[key]) { + leaves[key] = presence; + } + }); + this.map(newState, (key, newPresence) => { + let currentPresence = state[key]; + if (currentPresence) { + let newRefs = newPresence.metas.map((m) => m.phx_ref); + let curRefs = currentPresence.metas.map((m) => m.phx_ref); + let joinedMetas = newPresence.metas.filter((m) => curRefs.indexOf(m.phx_ref) < 0); + let leftMetas = currentPresence.metas.filter((m) => newRefs.indexOf(m.phx_ref) < 0); + if (joinedMetas.length > 0) { + joins[key] = newPresence; + joins[key].metas = joinedMetas; + } + if (leftMetas.length > 0) { + leaves[key] = this.clone(currentPresence); + leaves[key].metas = leftMetas; + } + } else { + joins[key] = newPresence; + } + }); + return this.syncDiff(state, { joins, leaves }, onJoin, onLeave); + } + /** + * + * Used to sync a diff of presence join and leave + * events from the server, as they happen. Like `syncState`, `syncDiff` + * accepts optional `onJoin` and `onLeave` callbacks to react to a user + * joining or leaving from a device. + * + * @returns {Presence} + */ + static syncDiff(state, diff, onJoin, onLeave) { + let { joins, leaves } = this.clone(diff); + if (!onJoin) { + onJoin = function() { + }; + } + if (!onLeave) { + onLeave = function() { + }; + } + this.map(joins, (key, newPresence) => { + let currentPresence = state[key]; + state[key] = this.clone(newPresence); + if (currentPresence) { + let joinedRefs = state[key].metas.map((m) => m.phx_ref); + let curMetas = currentPresence.metas.filter((m) => joinedRefs.indexOf(m.phx_ref) < 0); + state[key].metas.unshift(...curMetas); + } + onJoin(key, currentPresence, newPresence); + }); + this.map(leaves, (key, leftPresence) => { + let currentPresence = state[key]; + if (!currentPresence) { + return; + } + let refsToRemove = leftPresence.metas.map((m) => m.phx_ref); + currentPresence.metas = currentPresence.metas.filter((p) => { + return refsToRemove.indexOf(p.phx_ref) < 0; + }); + onLeave(key, currentPresence, leftPresence); + if (currentPresence.metas.length === 0) { + delete state[key]; + } + }); + return state; + } + /** + * Returns the array of presences, with selected metadata. + * + * @param {Object} presences + * @param {Function} chooser + * + * @returns {Presence} + */ + static list(presences, chooser) { + if (!chooser) { + chooser = function(key, pres) { + return pres; + }; + } + return this.map(presences, (key, presence) => { + return chooser(key, presence); + }); + } + // private + static map(obj, func) { + return Object.getOwnPropertyNames(obj).map((key) => func(key, obj[key])); + } + static clone(obj) { + return JSON.parse(JSON.stringify(obj)); + } +}; + +// js/phoenix/serializer.js +var serializer_default = { + HEADER_LENGTH: 1, + META_LENGTH: 4, + KINDS: { push: 0, reply: 1, broadcast: 2 }, + encode(msg, callback) { + if (msg.payload.constructor === ArrayBuffer) { + return callback(this.binaryEncode(msg)); + } else { + let payload = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload]; + return callback(JSON.stringify(payload)); + } + }, + decode(rawPayload, callback) { + if (rawPayload.constructor === ArrayBuffer) { + return callback(this.binaryDecode(rawPayload)); + } else { + let [join_ref, ref, topic, event, payload] = JSON.parse(rawPayload); + return callback({ join_ref, ref, topic, event, payload }); + } + }, + // private + binaryEncode(message) { + let { join_ref, ref, event, topic, payload } = message; + let encoder = new TextEncoder(); + let joinRefBytes = encoder.encode(join_ref); + let refBytes = encoder.encode(ref); + let topicBytes = encoder.encode(topic); + let eventBytes = encoder.encode(event); + this.assertFieldSize(joinRefBytes.byteLength, "join_ref"); + this.assertFieldSize(refBytes.byteLength, "ref"); + this.assertFieldSize(topicBytes.byteLength, "topic"); + this.assertFieldSize(eventBytes.byteLength, "event"); + let metaLength = this.META_LENGTH + joinRefBytes.byteLength + refBytes.byteLength + topicBytes.byteLength + eventBytes.byteLength; + let header = new ArrayBuffer(this.HEADER_LENGTH + metaLength); + let headerBytes = new Uint8Array(header); + let view = new DataView(header); + let offset = 0; + view.setUint8(offset++, this.KINDS.push); + view.setUint8(offset++, joinRefBytes.byteLength); + view.setUint8(offset++, refBytes.byteLength); + view.setUint8(offset++, topicBytes.byteLength); + view.setUint8(offset++, eventBytes.byteLength); + headerBytes.set(joinRefBytes, offset); + offset += joinRefBytes.byteLength; + headerBytes.set(refBytes, offset); + offset += refBytes.byteLength; + headerBytes.set(topicBytes, offset); + offset += topicBytes.byteLength; + headerBytes.set(eventBytes, offset); + offset += eventBytes.byteLength; + var combined = new Uint8Array(header.byteLength + payload.byteLength); + combined.set(headerBytes, 0); + combined.set(new Uint8Array(payload), header.byteLength); + return combined.buffer; + }, + assertFieldSize(size, name) { + if (size > 255) { + throw new Error(`unable to convert ${name} to binary: must be less than or equal to 255 bytes, but is ${size} bytes`); + } + }, + binaryDecode(buffer) { + let view = new DataView(buffer); + let kind = view.getUint8(0); + let decoder = new TextDecoder(); + switch (kind) { + case this.KINDS.push: + return this.decodePush(buffer, view, decoder); + case this.KINDS.reply: + return this.decodeReply(buffer, view, decoder); + case this.KINDS.broadcast: + return this.decodeBroadcast(buffer, view, decoder); + } + }, + decodePush(buffer, view, decoder) { + let joinRefSize = view.getUint8(1); + let topicSize = view.getUint8(2); + let eventSize = view.getUint8(3); + let offset = this.HEADER_LENGTH + this.META_LENGTH - 1; + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)); + offset = offset + joinRefSize; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + return { join_ref: joinRef, ref: null, topic, event, payload: data }; + }, + decodeReply(buffer, view, decoder) { + let joinRefSize = view.getUint8(1); + let refSize = view.getUint8(2); + let topicSize = view.getUint8(3); + let eventSize = view.getUint8(4); + let offset = this.HEADER_LENGTH + this.META_LENGTH; + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)); + offset = offset + joinRefSize; + let ref = decoder.decode(buffer.slice(offset, offset + refSize)); + offset = offset + refSize; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + let payload = { status: event, response: data }; + return { join_ref: joinRef, ref, topic, event: CHANNEL_EVENTS.reply, payload }; + }, + decodeBroadcast(buffer, view, decoder) { + let topicSize = view.getUint8(1); + let eventSize = view.getUint8(2); + let offset = this.HEADER_LENGTH + 2; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + return { join_ref: null, ref: null, topic, event, payload: data }; + } +}; + +// js/phoenix/socket.js +var Socket = class { + constructor(endPoint, opts = {}) { + this.stateChangeCallbacks = { open: [], close: [], error: [], message: [] }; + this.channels = []; + this.sendBuffer = []; + this.ref = 0; + this.fallbackRef = null; + this.timeout = opts.timeout || DEFAULT_TIMEOUT; + this.transport = opts.transport || global.WebSocket || LongPoll; + this.primaryPassedHealthCheck = false; + this.longPollFallbackMs = opts.longPollFallbackMs; + this.fallbackTimer = null; + this.sessionStore = opts.sessionStorage || global && global.sessionStorage; + this.establishedConnections = 0; + this.defaultEncoder = serializer_default.encode.bind(serializer_default); + this.defaultDecoder = serializer_default.decode.bind(serializer_default); + this.closeWasClean = true; + this.disconnecting = false; + this.binaryType = opts.binaryType || "arraybuffer"; + this.connectClock = 1; + this.pageHidden = false; + if (this.transport !== LongPoll) { + this.encode = opts.encode || this.defaultEncoder; + this.decode = opts.decode || this.defaultDecoder; + } else { + this.encode = this.defaultEncoder; + this.decode = this.defaultDecoder; + } + let awaitingConnectionOnPageShow = null; + if (phxWindow && phxWindow.addEventListener) { + phxWindow.addEventListener("pagehide", (_e) => { + if (this.conn) { + this.disconnect(); + awaitingConnectionOnPageShow = this.connectClock; + } + }); + phxWindow.addEventListener("pageshow", (_e) => { + if (awaitingConnectionOnPageShow === this.connectClock) { + awaitingConnectionOnPageShow = null; + this.connect(); + } + }); + phxWindow.addEventListener("visibilitychange", () => { + if (document.visibilityState === "hidden") { + this.pageHidden = true; + } else { + this.pageHidden = false; + if (!this.isConnected() && !this.closeWasClean) { + this.teardown(() => this.connect()); + } + } + }); + } + this.heartbeatIntervalMs = opts.heartbeatIntervalMs || 3e4; + this.rejoinAfterMs = (tries) => { + if (opts.rejoinAfterMs) { + return opts.rejoinAfterMs(tries); + } else { + return [1e3, 2e3, 5e3][tries - 1] || 1e4; + } + }; + this.reconnectAfterMs = (tries) => { + if (opts.reconnectAfterMs) { + return opts.reconnectAfterMs(tries); + } else { + return [10, 50, 100, 150, 200, 250, 500, 1e3, 2e3][tries - 1] || 5e3; + } + }; + this.logger = opts.logger || null; + if (!this.logger && opts.debug) { + this.logger = (kind, msg, data) => { + console.log(`${kind}: ${msg}`, data); + }; + } + this.longpollerTimeout = opts.longpollerTimeout || 2e4; + this.params = closure(opts.params || {}); + this.endPoint = `${endPoint}/${TRANSPORTS.websocket}`; + this.vsn = opts.vsn || DEFAULT_VSN; + this.heartbeatTimeoutTimer = null; + this.heartbeatTimer = null; + this.pendingHeartbeatRef = null; + this.reconnectTimer = new Timer(() => { + if (this.pageHidden) { + this.log("Not reconnecting as page is hidden!"); + this.teardown(); + return; + } + this.teardown(() => this.connect()); + }, this.reconnectAfterMs); + this.authToken = opts.authToken; + } + /** + * Returns the LongPoll transport reference + */ + getLongPollTransport() { + return LongPoll; + } + /** + * Disconnects and replaces the active transport + * + * @param {Function} newTransport - The new transport class to instantiate + * + */ + replaceTransport(newTransport) { + this.connectClock++; + this.closeWasClean = true; + clearTimeout(this.fallbackTimer); + this.reconnectTimer.reset(); + if (this.conn) { + this.conn.close(); + this.conn = null; + } + this.transport = newTransport; + } + /** + * Returns the socket protocol + * + * @returns {string} + */ + protocol() { + return location.protocol.match(/^https/) ? "wss" : "ws"; + } + /** + * The fully qualified socket url + * + * @returns {string} + */ + endPointURL() { + let uri = Ajax.appendParams( + Ajax.appendParams(this.endPoint, this.params()), + { vsn: this.vsn } + ); + if (uri.charAt(0) !== "/") { + return uri; + } + if (uri.charAt(1) === "/") { + return `${this.protocol()}:${uri}`; + } + return `${this.protocol()}://${location.host}${uri}`; + } + /** + * Disconnects the socket + * + * See https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent#Status_codes for valid status codes. + * + * @param {Function} callback - Optional callback which is called after socket is disconnected. + * @param {integer} code - A status code for disconnection (Optional). + * @param {string} reason - A textual description of the reason to disconnect. (Optional) + */ + disconnect(callback, code, reason) { + this.connectClock++; + this.disconnecting = true; + this.closeWasClean = true; + clearTimeout(this.fallbackTimer); + this.reconnectTimer.reset(); + this.teardown(() => { + this.disconnecting = false; + callback && callback(); + }, code, reason); + } + /** + * + * @param {Object} params - The params to send when connecting, for example `{user_id: userToken}` + * + * Passing params to connect is deprecated; pass them in the Socket constructor instead: + * `new Socket("/socket", {params: {user_id: userToken}})`. + */ + connect(params) { + if (params) { + console && console.log("passing params to connect is deprecated. Instead pass :params to the Socket constructor"); + this.params = closure(params); + } + if (this.conn && !this.disconnecting) { + return; + } + if (this.longPollFallbackMs && this.transport !== LongPoll) { + this.connectWithFallback(LongPoll, this.longPollFallbackMs); + } else { + this.transportConnect(); + } + } + /** + * Logs the message. Override `this.logger` for specialized logging. noops by default + * @param {string} kind + * @param {string} msg + * @param {Object} data + */ + log(kind, msg, data) { + this.logger && this.logger(kind, msg, data); + } + /** + * Returns true if a logger has been set on this socket. + */ + hasLogger() { + return this.logger !== null; + } + /** + * Registers callbacks for connection open events + * + * @example socket.onOpen(function(){ console.info("the socket was opened") }) + * + * @param {Function} callback + */ + onOpen(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.open.push([ref, callback]); + return ref; + } + /** + * Registers callbacks for connection close events + * @param {Function} callback + */ + onClose(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.close.push([ref, callback]); + return ref; + } + /** + * Registers callbacks for connection error events + * + * @example socket.onError(function(error){ alert("An error occurred") }) + * + * @param {Function} callback + */ + onError(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.error.push([ref, callback]); + return ref; + } + /** + * Registers callbacks for connection message events + * @param {Function} callback + */ + onMessage(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.message.push([ref, callback]); + return ref; + } + /** + * Pings the server and invokes the callback with the RTT in milliseconds + * @param {Function} callback + * + * Returns true if the ping was pushed or false if unable to be pushed. + */ + ping(callback) { + if (!this.isConnected()) { + return false; + } + let ref = this.makeRef(); + let startTime = Date.now(); + this.push({ topic: "phoenix", event: "heartbeat", payload: {}, ref }); + let onMsgRef = this.onMessage((msg) => { + if (msg.ref === ref) { + this.off([onMsgRef]); + callback(Date.now() - startTime); + } + }); + return true; + } + /** + * @private + * + * @param {Function} + */ + transportName(transport) { + switch (transport) { + case LongPoll: + return "LongPoll"; + default: + return transport.name; + } + } + /** + * @private + */ + transportConnect() { + this.connectClock++; + this.closeWasClean = false; + let protocols = void 0; + if (this.authToken) { + protocols = ["phoenix", `${AUTH_TOKEN_PREFIX}${btoa(this.authToken).replace(/=/g, "")}`]; + } + this.conn = new this.transport(this.endPointURL(), protocols); + this.conn.binaryType = this.binaryType; + this.conn.timeout = this.longpollerTimeout; + this.conn.onopen = () => this.onConnOpen(); + this.conn.onerror = (error) => this.onConnError(error); + this.conn.onmessage = (event) => this.onConnMessage(event); + this.conn.onclose = (event) => this.onConnClose(event); + } + getSession(key) { + return this.sessionStore && this.sessionStore.getItem(key); + } + storeSession(key, val) { + this.sessionStore && this.sessionStore.setItem(key, val); + } + connectWithFallback(fallbackTransport, fallbackThreshold = 2500) { + clearTimeout(this.fallbackTimer); + let established = false; + let primaryTransport = true; + let openRef, errorRef; + let fallbackTransportName = this.transportName(fallbackTransport); + let fallback = (reason) => { + this.log("transport", `falling back to ${fallbackTransportName}...`, reason); + this.off([openRef, errorRef]); + primaryTransport = false; + this.replaceTransport(fallbackTransport); + this.transportConnect(); + }; + if (this.getSession(`phx:fallback:${fallbackTransportName}`)) { + return fallback("memorized"); + } + this.fallbackTimer = setTimeout(fallback, fallbackThreshold); + errorRef = this.onError((reason) => { + this.log("transport", "error", reason); + if (primaryTransport && !established) { + clearTimeout(this.fallbackTimer); + fallback(reason); + } + }); + if (this.fallbackRef) { + this.off([this.fallbackRef]); + } + this.fallbackRef = this.onOpen(() => { + established = true; + if (!primaryTransport) { + let fallbackTransportName2 = this.transportName(fallbackTransport); + if (!this.primaryPassedHealthCheck) { + this.storeSession(`phx:fallback:${fallbackTransportName2}`, "true"); + } + return this.log("transport", `established ${fallbackTransportName2} fallback`); + } + clearTimeout(this.fallbackTimer); + this.fallbackTimer = setTimeout(fallback, fallbackThreshold); + this.ping((rtt) => { + this.log("transport", "connected to primary after", rtt); + this.primaryPassedHealthCheck = true; + clearTimeout(this.fallbackTimer); + }); + }); + this.transportConnect(); + } + clearHeartbeats() { + clearTimeout(this.heartbeatTimer); + clearTimeout(this.heartbeatTimeoutTimer); + } + onConnOpen() { + if (this.hasLogger()) this.log("transport", `${this.transportName(this.transport)} connected to ${this.endPointURL()}`); + this.closeWasClean = false; + this.disconnecting = false; + this.establishedConnections++; + this.flushSendBuffer(); + this.reconnectTimer.reset(); + this.resetHeartbeat(); + this.stateChangeCallbacks.open.forEach(([, callback]) => callback()); + } + /** + * @private + */ + heartbeatTimeout() { + if (this.pendingHeartbeatRef) { + this.pendingHeartbeatRef = null; + if (this.hasLogger()) { + this.log("transport", "heartbeat timeout. Attempting to re-establish connection"); + } + this.triggerChanError(); + this.closeWasClean = false; + this.teardown(() => this.reconnectTimer.scheduleTimeout(), WS_CLOSE_NORMAL, "heartbeat timeout"); + } + } + resetHeartbeat() { + if (this.conn && this.conn.skipHeartbeat) { + return; + } + this.pendingHeartbeatRef = null; + this.clearHeartbeats(); + this.heartbeatTimer = setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs); + } + teardown(callback, code, reason) { + if (!this.conn) { + return callback && callback(); + } + const connToClose = this.conn; + this.waitForBufferDone(connToClose, () => { + if (code) { + connToClose.close(code, reason || ""); + } else { + connToClose.close(); + } + this.waitForSocketClosed(connToClose, () => { + if (this.conn === connToClose) { + this.conn.onopen = function() { + }; + this.conn.onerror = function() { + }; + this.conn.onmessage = function() { + }; + this.conn.onclose = function() { + }; + this.conn = null; + } + callback && callback(); + }); + }); + } + waitForBufferDone(conn, callback, tries = 1) { + if (tries === 5 || !conn.bufferedAmount) { + callback(); + return; + } + setTimeout(() => { + this.waitForBufferDone(conn, callback, tries + 1); + }, 150 * tries); + } + waitForSocketClosed(conn, callback, tries = 1) { + if (tries === 5 || conn.readyState === SOCKET_STATES.closed) { + callback(); + return; + } + setTimeout(() => { + this.waitForSocketClosed(conn, callback, tries + 1); + }, 150 * tries); + } + onConnClose(event) { + if (this.conn) this.conn.onclose = () => { + }; + let closeCode = event && event.code; + if (this.hasLogger()) this.log("transport", "close", event); + this.triggerChanError(); + this.clearHeartbeats(); + if (!this.closeWasClean && closeCode !== 1e3) { + this.reconnectTimer.scheduleTimeout(); + } + this.stateChangeCallbacks.close.forEach(([, callback]) => callback(event)); + } + /** + * @private + */ + onConnError(error) { + if (this.hasLogger()) this.log("transport", "error", error); + let transportBefore = this.transport; + let establishedBefore = this.establishedConnections; + this.stateChangeCallbacks.error.forEach(([, callback]) => { + callback(error, transportBefore, establishedBefore); + }); + if (transportBefore === this.transport || establishedBefore > 0) { + this.triggerChanError(); + } + } + /** + * @private + */ + triggerChanError() { + this.channels.forEach((channel) => { + if (!(channel.isErrored() || channel.isLeaving() || channel.isClosed())) { + channel.trigger(CHANNEL_EVENTS.error); + } + }); + } + /** + * @returns {string} + */ + connectionState() { + switch (this.conn && this.conn.readyState) { + case SOCKET_STATES.connecting: + return "connecting"; + case SOCKET_STATES.open: + return "open"; + case SOCKET_STATES.closing: + return "closing"; + default: + return "closed"; + } + } + /** + * @returns {boolean} + */ + isConnected() { + return this.connectionState() === "open"; + } + /** + * @private + * + * @param {Channel} + */ + remove(channel) { + this.off(channel.stateChangeRefs); + this.channels = this.channels.filter((c) => c !== channel); + } + /** + * Removes `onOpen`, `onClose`, `onError,` and `onMessage` registrations. + * + * @param {refs} - list of refs returned by calls to + * `onOpen`, `onClose`, `onError,` and `onMessage` + */ + off(refs) { + for (let key in this.stateChangeCallbacks) { + this.stateChangeCallbacks[key] = this.stateChangeCallbacks[key].filter(([ref]) => { + return refs.indexOf(ref) === -1; + }); + } + } + /** + * Initiates a new channel for the given topic + * + * @param {string} topic + * @param {Object} chanParams - Parameters for the channel + * @returns {Channel} + */ + channel(topic, chanParams = {}) { + let chan = new Channel(topic, chanParams, this); + this.channels.push(chan); + return chan; + } + /** + * @param {Object} data + */ + push(data) { + if (this.hasLogger()) { + let { topic, event, payload, ref, join_ref } = data; + this.log("push", `${topic} ${event} (${join_ref}, ${ref})`, payload); + } + if (this.isConnected()) { + this.encode(data, (result) => this.conn.send(result)); + } else { + this.sendBuffer.push(() => this.encode(data, (result) => this.conn.send(result))); + } + } + /** + * Return the next message ref, accounting for overflows + * @returns {string} + */ + makeRef() { + let newRef = this.ref + 1; + if (newRef === this.ref) { + this.ref = 0; + } else { + this.ref = newRef; + } + return this.ref.toString(); + } + sendHeartbeat() { + if (this.pendingHeartbeatRef && !this.isConnected()) { + return; + } + this.pendingHeartbeatRef = this.makeRef(); + this.push({ topic: "phoenix", event: "heartbeat", payload: {}, ref: this.pendingHeartbeatRef }); + this.heartbeatTimeoutTimer = setTimeout(() => this.heartbeatTimeout(), this.heartbeatIntervalMs); + } + flushSendBuffer() { + if (this.isConnected() && this.sendBuffer.length > 0) { + this.sendBuffer.forEach((callback) => callback()); + this.sendBuffer = []; + } + } + onConnMessage(rawMessage) { + this.decode(rawMessage.data, (msg) => { + let { topic, event, payload, ref, join_ref } = msg; + if (ref && ref === this.pendingHeartbeatRef) { + this.clearHeartbeats(); + this.pendingHeartbeatRef = null; + this.heartbeatTimer = setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs); + } + if (this.hasLogger()) this.log("receive", `${payload.status || ""} ${topic} ${event} ${ref && "(" + ref + ")" || ""}`, payload); + for (let i = 0; i < this.channels.length; i++) { + const channel = this.channels[i]; + if (!channel.isMember(topic, event, payload, join_ref)) { + continue; + } + channel.trigger(event, payload, ref, join_ref); + } + for (let i = 0; i < this.stateChangeCallbacks.message.length; i++) { + let [, callback] = this.stateChangeCallbacks.message[i]; + callback(msg); + } + }); + } + leaveOpenTopic(topic) { + let dupChannel = this.channels.find((c) => c.topic === topic && (c.isJoined() || c.isJoining())); + if (dupChannel) { + if (this.hasLogger()) this.log("transport", `leaving duplicate topic "${topic}"`); + dupChannel.leave(); + } + } +}; +export { + Channel, + LongPoll, + Presence, + serializer_default as Serializer, + Socket +}; +//# sourceMappingURL=phoenix.mjs.map diff --git a/deps/phoenix/priv/static/phoenix.mjs.map b/deps/phoenix/priv/static/phoenix.mjs.map new file mode 100644 index 0000000..e348b41 --- /dev/null +++ b/deps/phoenix/priv/static/phoenix.mjs.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["../../assets/js/phoenix/utils.js", "../../assets/js/phoenix/constants.js", "../../assets/js/phoenix/push.js", "../../assets/js/phoenix/timer.js", "../../assets/js/phoenix/channel.js", "../../assets/js/phoenix/ajax.js", "../../assets/js/phoenix/longpoll.js", "../../assets/js/phoenix/presence.js", "../../assets/js/phoenix/serializer.js", "../../assets/js/phoenix/socket.js"], + "sourcesContent": ["// wraps value in closure or returns closure\nexport let closure = (value) => {\n if(typeof value === \"function\"){\n return value\n } else {\n let closure = function (){ return value }\n return closure\n }\n}\n", "export const globalSelf = typeof self !== \"undefined\" ? self : null\nexport const phxWindow = typeof window !== \"undefined\" ? window : null\nexport const global = globalSelf || phxWindow || globalThis\nexport const DEFAULT_VSN = \"2.0.0\"\nexport const SOCKET_STATES = {connecting: 0, open: 1, closing: 2, closed: 3}\nexport const MAX_LONGPOLL_BATCH_SIZE = 100;\nexport const DEFAULT_TIMEOUT = 10000\nexport const WS_CLOSE_NORMAL = 1000\nexport const CHANNEL_STATES = {\n closed: \"closed\",\n errored: \"errored\",\n joined: \"joined\",\n joining: \"joining\",\n leaving: \"leaving\",\n}\nexport const CHANNEL_EVENTS = {\n close: \"phx_close\",\n error: \"phx_error\",\n join: \"phx_join\",\n reply: \"phx_reply\",\n leave: \"phx_leave\"\n}\n\nexport const TRANSPORTS = {\n longpoll: \"longpoll\",\n websocket: \"websocket\"\n}\nexport const XHR_STATES = {\n complete: 4\n}\nexport const AUTH_TOKEN_PREFIX = \"base64url.bearer.phx.\"\n", "/**\n * Initializes the Push\n * @param {Channel} channel - The Channel\n * @param {string} event - The event, for example `\"phx_join\"`\n * @param {Object} payload - The payload, for example `{user_id: 123}`\n * @param {number} timeout - The push timeout in milliseconds\n */\nexport default class Push {\n constructor(channel, event, payload, timeout){\n this.channel = channel\n this.event = event\n this.payload = payload || function (){ return {} }\n this.receivedResp = null\n this.timeout = timeout\n this.timeoutTimer = null\n this.recHooks = []\n this.sent = false\n }\n\n /**\n *\n * @param {number} timeout\n */\n resend(timeout){\n this.timeout = timeout\n this.reset()\n this.send()\n }\n\n /**\n *\n */\n send(){\n if(this.hasReceived(\"timeout\")){ return }\n this.startTimeout()\n this.sent = true\n this.channel.socket.push({\n topic: this.channel.topic,\n event: this.event,\n payload: this.payload(),\n ref: this.ref,\n join_ref: this.channel.joinRef()\n })\n }\n\n /**\n *\n * @param {*} status\n * @param {*} callback\n */\n receive(status, callback){\n if(this.hasReceived(status)){\n callback(this.receivedResp.response)\n }\n\n this.recHooks.push({status, callback})\n return this\n }\n\n /**\n * @private\n */\n reset(){\n this.cancelRefEvent()\n this.ref = null\n this.refEvent = null\n this.receivedResp = null\n this.sent = false\n }\n\n /**\n * @private\n */\n matchReceive({status, response, _ref}){\n this.recHooks.filter(h => h.status === status)\n .forEach(h => h.callback(response))\n }\n\n /**\n * @private\n */\n cancelRefEvent(){\n if(!this.refEvent){ return }\n this.channel.off(this.refEvent)\n }\n\n /**\n * @private\n */\n cancelTimeout(){\n clearTimeout(this.timeoutTimer)\n this.timeoutTimer = null\n }\n\n /**\n * @private\n */\n startTimeout(){\n if(this.timeoutTimer){ this.cancelTimeout() }\n this.ref = this.channel.socket.makeRef()\n this.refEvent = this.channel.replyEventName(this.ref)\n\n this.channel.on(this.refEvent, payload => {\n this.cancelRefEvent()\n this.cancelTimeout()\n this.receivedResp = payload\n this.matchReceive(payload)\n })\n\n this.timeoutTimer = setTimeout(() => {\n this.trigger(\"timeout\", {})\n }, this.timeout)\n }\n\n /**\n * @private\n */\n hasReceived(status){\n return this.receivedResp && this.receivedResp.status === status\n }\n\n /**\n * @private\n */\n trigger(status, response){\n this.channel.trigger(this.refEvent, {status, response})\n }\n}\n", "/**\n *\n * Creates a timer that accepts a `timerCalc` function to perform\n * calculated timeout retries, such as exponential backoff.\n *\n * @example\n * let reconnectTimer = new Timer(() => this.connect(), function(tries){\n * return [1000, 5000, 10000][tries - 1] || 10000\n * })\n * reconnectTimer.scheduleTimeout() // fires after 1000\n * reconnectTimer.scheduleTimeout() // fires after 5000\n * reconnectTimer.reset()\n * reconnectTimer.scheduleTimeout() // fires after 1000\n *\n * @param {Function} callback\n * @param {Function} timerCalc\n */\nexport default class Timer {\n constructor(callback, timerCalc){\n this.callback = callback\n this.timerCalc = timerCalc\n this.timer = null\n this.tries = 0\n }\n\n reset(){\n this.tries = 0\n clearTimeout(this.timer)\n }\n\n /**\n * Cancels any previous scheduleTimeout and schedules callback\n */\n scheduleTimeout(){\n clearTimeout(this.timer)\n\n this.timer = setTimeout(() => {\n this.tries = this.tries + 1\n this.callback()\n }, this.timerCalc(this.tries + 1))\n }\n}\n", "import {closure} from \"./utils\"\nimport {\n CHANNEL_EVENTS,\n CHANNEL_STATES,\n} from \"./constants\"\n\nimport Push from \"./push\"\nimport Timer from \"./timer\"\n\n/**\n *\n * @param {string} topic\n * @param {(Object|function)} params\n * @param {Socket} socket\n */\nexport default class Channel {\n constructor(topic, params, socket){\n this.state = CHANNEL_STATES.closed\n this.topic = topic\n this.params = closure(params || {})\n this.socket = socket\n this.bindings = []\n this.bindingRef = 0\n this.timeout = this.socket.timeout\n this.joinedOnce = false\n this.joinPush = new Push(this, CHANNEL_EVENTS.join, this.params, this.timeout)\n this.pushBuffer = []\n this.stateChangeRefs = []\n\n this.rejoinTimer = new Timer(() => {\n if(this.socket.isConnected()){ this.rejoin() }\n }, this.socket.rejoinAfterMs)\n this.stateChangeRefs.push(this.socket.onError(() => this.rejoinTimer.reset()))\n this.stateChangeRefs.push(this.socket.onOpen(() => {\n this.rejoinTimer.reset()\n if(this.isErrored()){ this.rejoin() }\n })\n )\n this.joinPush.receive(\"ok\", () => {\n this.state = CHANNEL_STATES.joined\n this.rejoinTimer.reset()\n this.pushBuffer.forEach(pushEvent => pushEvent.send())\n this.pushBuffer = []\n })\n this.joinPush.receive(\"error\", () => {\n this.state = CHANNEL_STATES.errored\n if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() }\n })\n this.onClose(() => {\n this.rejoinTimer.reset()\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `close ${this.topic} ${this.joinRef()}`)\n this.state = CHANNEL_STATES.closed\n this.socket.remove(this)\n })\n this.onError(reason => {\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `error ${this.topic}`, reason)\n if(this.isJoining()){ this.joinPush.reset() }\n this.state = CHANNEL_STATES.errored\n if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() }\n })\n this.joinPush.receive(\"timeout\", () => {\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `timeout ${this.topic} (${this.joinRef()})`, this.joinPush.timeout)\n let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), this.timeout)\n leavePush.send()\n this.state = CHANNEL_STATES.errored\n this.joinPush.reset()\n if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() }\n })\n this.on(CHANNEL_EVENTS.reply, (payload, ref) => {\n this.trigger(this.replyEventName(ref), payload)\n })\n }\n\n /**\n * Join the channel\n * @param {integer} timeout\n * @returns {Push}\n */\n join(timeout = this.timeout){\n if(this.joinedOnce){\n throw new Error(\"tried to join multiple times. 'join' can only be called a single time per channel instance\")\n } else {\n this.timeout = timeout\n this.joinedOnce = true\n this.rejoin()\n return this.joinPush\n }\n }\n\n /**\n * Hook into channel close\n * @param {Function} callback\n */\n onClose(callback){\n this.on(CHANNEL_EVENTS.close, callback)\n }\n\n /**\n * Hook into channel errors\n * @param {Function} callback\n */\n onError(callback){\n return this.on(CHANNEL_EVENTS.error, reason => callback(reason))\n }\n\n /**\n * Subscribes on channel events\n *\n * Subscription returns a ref counter, which can be used later to\n * unsubscribe the exact event listener\n *\n * @example\n * const ref1 = channel.on(\"event\", do_stuff)\n * const ref2 = channel.on(\"event\", do_other_stuff)\n * channel.off(\"event\", ref1)\n * // Since unsubscription, do_stuff won't fire,\n * // while do_other_stuff will keep firing on the \"event\"\n *\n * @param {string} event\n * @param {Function} callback\n * @returns {integer} ref\n */\n on(event, callback){\n let ref = this.bindingRef++\n this.bindings.push({event, ref, callback})\n return ref\n }\n\n /**\n * Unsubscribes off of channel events\n *\n * Use the ref returned from a channel.on() to unsubscribe one\n * handler, or pass nothing for the ref to unsubscribe all\n * handlers for the given event.\n *\n * @example\n * // Unsubscribe the do_stuff handler\n * const ref1 = channel.on(\"event\", do_stuff)\n * channel.off(\"event\", ref1)\n *\n * // Unsubscribe all handlers from event\n * channel.off(\"event\")\n *\n * @param {string} event\n * @param {integer} ref\n */\n off(event, ref){\n this.bindings = this.bindings.filter((bind) => {\n return !(bind.event === event && (typeof ref === \"undefined\" || ref === bind.ref))\n })\n }\n\n /**\n * @private\n */\n canPush(){ return this.socket.isConnected() && this.isJoined() }\n\n /**\n * Sends a message `event` to phoenix with the payload `payload`.\n * Phoenix receives this in the `handle_in(event, payload, socket)`\n * function. if phoenix replies or it times out (default 10000ms),\n * then optionally the reply can be received.\n *\n * @example\n * channel.push(\"event\")\n * .receive(\"ok\", payload => console.log(\"phoenix replied:\", payload))\n * .receive(\"error\", err => console.log(\"phoenix errored\", err))\n * .receive(\"timeout\", () => console.log(\"timed out pushing\"))\n * @param {string} event\n * @param {Object} payload\n * @param {number} [timeout]\n * @returns {Push}\n */\n push(event, payload, timeout = this.timeout){\n payload = payload || {}\n if(!this.joinedOnce){\n throw new Error(`tried to push '${event}' to '${this.topic}' before joining. Use channel.join() before pushing events`)\n }\n let pushEvent = new Push(this, event, function (){ return payload }, timeout)\n if(this.canPush()){\n pushEvent.send()\n } else {\n pushEvent.startTimeout()\n this.pushBuffer.push(pushEvent)\n }\n\n return pushEvent\n }\n\n /** Leaves the channel\n *\n * Unsubscribes from server events, and\n * instructs channel to terminate on server\n *\n * Triggers onClose() hooks\n *\n * To receive leave acknowledgements, use the `receive`\n * hook to bind to the server ack, ie:\n *\n * @example\n * channel.leave().receive(\"ok\", () => alert(\"left!\") )\n *\n * @param {integer} timeout\n * @returns {Push}\n */\n leave(timeout = this.timeout){\n this.rejoinTimer.reset()\n this.joinPush.cancelTimeout()\n\n this.state = CHANNEL_STATES.leaving\n let onClose = () => {\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `leave ${this.topic}`)\n this.trigger(CHANNEL_EVENTS.close, \"leave\")\n }\n let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), timeout)\n leavePush.receive(\"ok\", () => onClose())\n .receive(\"timeout\", () => onClose())\n leavePush.send()\n if(!this.canPush()){ leavePush.trigger(\"ok\", {}) }\n\n return leavePush\n }\n\n /**\n * Overridable message hook\n *\n * Receives all events for specialized message handling\n * before dispatching to the channel callbacks.\n *\n * Must return the payload, modified or unmodified\n * @param {string} event\n * @param {Object} payload\n * @param {integer} ref\n * @returns {Object}\n */\n onMessage(_event, payload, _ref){ return payload }\n\n /**\n * @private\n */\n isMember(topic, event, payload, joinRef){\n if(this.topic !== topic){ return false }\n\n if(joinRef && joinRef !== this.joinRef()){\n if(this.socket.hasLogger()) this.socket.log(\"channel\", \"dropping outdated message\", {topic, event, payload, joinRef})\n return false\n } else {\n return true\n }\n }\n\n /**\n * @private\n */\n joinRef(){ return this.joinPush.ref }\n\n /**\n * @private\n */\n rejoin(timeout = this.timeout){\n if(this.isLeaving()){ return }\n this.socket.leaveOpenTopic(this.topic)\n this.state = CHANNEL_STATES.joining\n this.joinPush.resend(timeout)\n }\n\n /**\n * @private\n */\n trigger(event, payload, ref, joinRef){\n let handledPayload = this.onMessage(event, payload, ref, joinRef)\n if(payload && !handledPayload){ throw new Error(\"channel onMessage callbacks must return the payload, modified or unmodified\") }\n\n let eventBindings = this.bindings.filter(bind => bind.event === event)\n\n for(let i = 0; i < eventBindings.length; i++){\n let bind = eventBindings[i]\n bind.callback(handledPayload, ref, joinRef || this.joinRef())\n }\n }\n\n /**\n * @private\n */\n replyEventName(ref){ return `chan_reply_${ref}` }\n\n /**\n * @private\n */\n isClosed(){ return this.state === CHANNEL_STATES.closed }\n\n /**\n * @private\n */\n isErrored(){ return this.state === CHANNEL_STATES.errored }\n\n /**\n * @private\n */\n isJoined(){ return this.state === CHANNEL_STATES.joined }\n\n /**\n * @private\n */\n isJoining(){ return this.state === CHANNEL_STATES.joining }\n\n /**\n * @private\n */\n isLeaving(){ return this.state === CHANNEL_STATES.leaving }\n}\n", "import {\n global,\n XHR_STATES\n} from \"./constants\"\n\nexport default class Ajax {\n\n static request(method, endPoint, headers, body, timeout, ontimeout, callback){\n if(global.XDomainRequest){\n let req = new global.XDomainRequest() // IE8, IE9\n return this.xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback)\n } else if(global.XMLHttpRequest){\n let req = new global.XMLHttpRequest() // IE7+, Firefox, Chrome, Opera, Safari\n return this.xhrRequest(req, method, endPoint, headers, body, timeout, ontimeout, callback)\n } else if(global.fetch && global.AbortController){\n // Fetch with AbortController for modern browsers\n return this.fetchRequest(method, endPoint, headers, body, timeout, ontimeout, callback)\n } else {\n throw new Error(\"No suitable XMLHttpRequest implementation found\")\n }\n }\n\n static fetchRequest(method, endPoint, headers, body, timeout, ontimeout, callback){\n let options = {\n method,\n headers,\n body,\n }\n let controller = null\n if(timeout){\n controller = new AbortController()\n const _timeoutId = setTimeout(() => controller.abort(), timeout)\n options.signal = controller.signal\n }\n global.fetch(endPoint, options)\n .then(response => response.text())\n .then(data => this.parseJSON(data))\n .then(data => callback && callback(data))\n .catch(err => {\n if(err.name === \"AbortError\" && ontimeout){\n ontimeout()\n } else {\n callback && callback(null)\n }\n })\n return controller\n }\n\n static xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback){\n req.timeout = timeout\n req.open(method, endPoint)\n req.onload = () => {\n let response = this.parseJSON(req.responseText)\n callback && callback(response)\n }\n if(ontimeout){ req.ontimeout = ontimeout }\n\n // Work around bug in IE9 that requires an attached onprogress handler\n req.onprogress = () => { }\n\n req.send(body)\n return req\n }\n\n static xhrRequest(req, method, endPoint, headers, body, timeout, ontimeout, callback){\n req.open(method, endPoint, true)\n req.timeout = timeout\n for(let [key, value] of Object.entries(headers)){\n req.setRequestHeader(key, value)\n }\n req.onerror = () => callback && callback(null)\n req.onreadystatechange = () => {\n if(req.readyState === XHR_STATES.complete && callback){\n let response = this.parseJSON(req.responseText)\n callback(response)\n }\n }\n if(ontimeout){ req.ontimeout = ontimeout }\n\n req.send(body)\n return req\n }\n\n static parseJSON(resp){\n if(!resp || resp === \"\"){ return null }\n\n try {\n return JSON.parse(resp)\n } catch {\n console && console.log(\"failed to parse JSON response\", resp)\n return null\n }\n }\n\n static serialize(obj, parentKey){\n let queryStr = []\n for(var key in obj){\n if(!Object.prototype.hasOwnProperty.call(obj, key)){ continue }\n let paramKey = parentKey ? `${parentKey}[${key}]` : key\n let paramVal = obj[key]\n if(typeof paramVal === \"object\"){\n queryStr.push(this.serialize(paramVal, paramKey))\n } else {\n queryStr.push(encodeURIComponent(paramKey) + \"=\" + encodeURIComponent(paramVal))\n }\n }\n return queryStr.join(\"&\")\n }\n\n static appendParams(url, params){\n if(Object.keys(params).length === 0){ return url }\n\n let prefix = url.match(/\\?/) ? \"&\" : \"?\"\n return `${url}${prefix}${this.serialize(params)}`\n }\n}\n", "import {\n SOCKET_STATES,\n TRANSPORTS,\n AUTH_TOKEN_PREFIX,\n MAX_LONGPOLL_BATCH_SIZE\n} from \"./constants\"\n\nimport Ajax from \"./ajax\"\n\nlet arrayBufferToBase64 = (buffer) => {\n let binary = \"\"\n let bytes = new Uint8Array(buffer)\n let len = bytes.byteLength\n for(let i = 0; i < len; i++){ binary += String.fromCharCode(bytes[i]) }\n return btoa(binary)\n}\n\nexport default class LongPoll {\n\n constructor(endPoint, protocols){\n // we only support subprotocols for authToken\n // [\"phoenix\", \"base64url.bearer.phx.BASE64_ENCODED_TOKEN\"]\n if(protocols && protocols.length === 2 && protocols[1].startsWith(AUTH_TOKEN_PREFIX)){\n this.authToken = atob(protocols[1].slice(AUTH_TOKEN_PREFIX.length))\n }\n this.endPoint = null\n this.token = null\n this.skipHeartbeat = true\n this.reqs = new Set()\n this.awaitingBatchAck = false\n this.currentBatch = null\n this.currentBatchTimer = null\n this.batchBuffer = []\n this.onopen = function (){ } // noop\n this.onerror = function (){ } // noop\n this.onmessage = function (){ } // noop\n this.onclose = function (){ } // noop\n this.pollEndpoint = this.normalizeEndpoint(endPoint)\n this.readyState = SOCKET_STATES.connecting\n // we must wait for the caller to finish setting up our callbacks and timeout properties\n setTimeout(() => this.poll(), 0)\n }\n\n normalizeEndpoint(endPoint){\n return (endPoint\n .replace(\"ws://\", \"http://\")\n .replace(\"wss://\", \"https://\")\n .replace(new RegExp(\"(.*)\\/\" + TRANSPORTS.websocket), \"$1/\" + TRANSPORTS.longpoll))\n }\n\n endpointURL(){\n return Ajax.appendParams(this.pollEndpoint, {token: this.token})\n }\n\n closeAndRetry(code, reason, wasClean){\n this.close(code, reason, wasClean)\n this.readyState = SOCKET_STATES.connecting\n }\n\n ontimeout(){\n this.onerror(\"timeout\")\n this.closeAndRetry(1005, \"timeout\", false)\n }\n\n isActive(){ return this.readyState === SOCKET_STATES.open || this.readyState === SOCKET_STATES.connecting }\n\n poll(){\n const headers = {\"Accept\": \"application/json\"}\n if(this.authToken){\n headers[\"X-Phoenix-AuthToken\"] = this.authToken\n }\n this.ajax(\"GET\", headers, null, () => this.ontimeout(), resp => {\n if(resp){\n var {status, token, messages} = resp\n if(status === 410 && this.token !== null){\n // In case we already have a token, this means that our existing session\n // is gone. We fail so that the client rejoins its channels.\n this.onerror(410)\n this.closeAndRetry(3410, \"session_gone\", false)\n return\n }\n this.token = token\n } else {\n status = 0\n }\n\n switch(status){\n case 200:\n messages.forEach(msg => {\n // Tasks are what things like event handlers, setTimeout callbacks,\n // promise resolves and more are run within.\n // In modern browsers, there are two different kinds of tasks,\n // microtasks and macrotasks.\n // Microtasks are mainly used for Promises, while macrotasks are\n // used for everything else.\n // Microtasks always have priority over macrotasks. If the JS engine\n // is looking for a task to run, it will always try to empty the\n // microtask queue before attempting to run anything from the\n // macrotask queue.\n //\n // For the WebSocket transport, messages always arrive in their own\n // event. This means that if any promises are resolved from within,\n // their callbacks will always finish execution by the time the\n // next message event handler is run.\n //\n // In order to emulate this behaviour, we need to make sure each\n // onmessage handler is run within its own macrotask.\n setTimeout(() => this.onmessage({data: msg}), 0)\n })\n this.poll()\n break\n case 204:\n this.poll()\n break\n case 410:\n this.readyState = SOCKET_STATES.open\n this.onopen({})\n this.poll()\n break\n case 403:\n this.onerror(403)\n this.close(1008, \"forbidden\", false)\n break\n case 0:\n case 500:\n this.onerror(500)\n this.closeAndRetry(1011, \"internal server error\", 500)\n break\n default: throw new Error(`unhandled poll status ${status}`)\n }\n })\n }\n\n // we collect all pushes within the current event loop by\n // setTimeout 0, which optimizes back-to-back procedural\n // pushes against an empty buffer\n\n send(body){\n if(typeof(body) !== \"string\"){ body = arrayBufferToBase64(body) }\n if(this.currentBatch){\n this.currentBatch.push(body)\n } else if(this.awaitingBatchAck){\n this.batchBuffer.push(body)\n } else {\n this.currentBatch = [body]\n this.currentBatchTimer = setTimeout(() => {\n this.batchSend(this.currentBatch)\n this.currentBatch = null\n }, 0)\n }\n }\n\n batchSend(messages, offset = 0){\n this.awaitingBatchAck = true\n const next = offset + MAX_LONGPOLL_BATCH_SIZE\n const batch = messages.slice(offset, next)\n this.ajax(\"POST\", {\"Content-Type\": \"application/x-ndjson\"}, batch.join(\"\\n\"), () => this.onerror(\"timeout\"), resp => {\n if(!resp || resp.status !== 200){\n this.awaitingBatchAck = false\n this.onerror(resp && resp.status)\n this.closeAndRetry(1011, \"internal server error\", false)\n } else if(next < messages.length){\n this.batchSend(messages, next)\n } else if(this.batchBuffer.length > 0){\n this.batchSend(this.batchBuffer)\n this.batchBuffer = []\n } else {\n this.awaitingBatchAck = false\n }\n })\n }\n\n close(code, reason, wasClean){\n for(let req of this.reqs){ req.abort() }\n this.readyState = SOCKET_STATES.closed\n let opts = Object.assign({code: 1000, reason: undefined, wasClean: true}, {code, reason, wasClean})\n this.batchBuffer = []\n clearTimeout(this.currentBatchTimer)\n this.currentBatchTimer = null\n if(typeof(CloseEvent) !== \"undefined\"){\n this.onclose(new CloseEvent(\"close\", opts))\n } else {\n this.onclose(opts)\n }\n }\n\n ajax(method, headers, body, onCallerTimeout, callback){\n let req\n let ontimeout = () => {\n this.reqs.delete(req)\n onCallerTimeout()\n }\n req = Ajax.request(method, this.endpointURL(), headers, body, this.timeout, ontimeout, resp => {\n this.reqs.delete(req)\n if(this.isActive()){ callback(resp) }\n })\n this.reqs.add(req)\n }\n}\n", "/**\n * Initializes the Presence\n * @param {Channel} channel - The Channel\n * @param {Object} opts - The options,\n * for example `{events: {state: \"state\", diff: \"diff\"}}`\n */\nexport default class Presence {\n\n constructor(channel, opts = {}){\n let events = opts.events || {state: \"presence_state\", diff: \"presence_diff\"}\n this.state = {}\n this.pendingDiffs = []\n this.channel = channel\n this.joinRef = null\n this.caller = {\n onJoin: function (){ },\n onLeave: function (){ },\n onSync: function (){ }\n }\n\n this.channel.on(events.state, newState => {\n let {onJoin, onLeave, onSync} = this.caller\n\n this.joinRef = this.channel.joinRef()\n this.state = Presence.syncState(this.state, newState, onJoin, onLeave)\n\n this.pendingDiffs.forEach(diff => {\n this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave)\n })\n this.pendingDiffs = []\n onSync()\n })\n\n this.channel.on(events.diff, diff => {\n let {onJoin, onLeave, onSync} = this.caller\n\n if(this.inPendingSyncState()){\n this.pendingDiffs.push(diff)\n } else {\n this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave)\n onSync()\n }\n })\n }\n\n onJoin(callback){ this.caller.onJoin = callback }\n\n onLeave(callback){ this.caller.onLeave = callback }\n\n onSync(callback){ this.caller.onSync = callback }\n\n list(by){ return Presence.list(this.state, by) }\n\n inPendingSyncState(){\n return !this.joinRef || (this.joinRef !== this.channel.joinRef())\n }\n\n // lower-level public static API\n\n /**\n * Used to sync the list of presences on the server\n * with the client's state. An optional `onJoin` and `onLeave` callback can\n * be provided to react to changes in the client's local presences across\n * disconnects and reconnects with the server.\n *\n * @returns {Presence}\n */\n static syncState(currentState, newState, onJoin, onLeave){\n let state = this.clone(currentState)\n let joins = {}\n let leaves = {}\n\n this.map(state, (key, presence) => {\n if(!newState[key]){\n leaves[key] = presence\n }\n })\n this.map(newState, (key, newPresence) => {\n let currentPresence = state[key]\n if(currentPresence){\n let newRefs = newPresence.metas.map(m => m.phx_ref)\n let curRefs = currentPresence.metas.map(m => m.phx_ref)\n let joinedMetas = newPresence.metas.filter(m => curRefs.indexOf(m.phx_ref) < 0)\n let leftMetas = currentPresence.metas.filter(m => newRefs.indexOf(m.phx_ref) < 0)\n if(joinedMetas.length > 0){\n joins[key] = newPresence\n joins[key].metas = joinedMetas\n }\n if(leftMetas.length > 0){\n leaves[key] = this.clone(currentPresence)\n leaves[key].metas = leftMetas\n }\n } else {\n joins[key] = newPresence\n }\n })\n return this.syncDiff(state, {joins: joins, leaves: leaves}, onJoin, onLeave)\n }\n\n /**\n *\n * Used to sync a diff of presence join and leave\n * events from the server, as they happen. Like `syncState`, `syncDiff`\n * accepts optional `onJoin` and `onLeave` callbacks to react to a user\n * joining or leaving from a device.\n *\n * @returns {Presence}\n */\n static syncDiff(state, diff, onJoin, onLeave){\n let {joins, leaves} = this.clone(diff)\n if(!onJoin){ onJoin = function (){ } }\n if(!onLeave){ onLeave = function (){ } }\n\n this.map(joins, (key, newPresence) => {\n let currentPresence = state[key]\n state[key] = this.clone(newPresence)\n if(currentPresence){\n let joinedRefs = state[key].metas.map(m => m.phx_ref)\n let curMetas = currentPresence.metas.filter(m => joinedRefs.indexOf(m.phx_ref) < 0)\n state[key].metas.unshift(...curMetas)\n }\n onJoin(key, currentPresence, newPresence)\n })\n this.map(leaves, (key, leftPresence) => {\n let currentPresence = state[key]\n if(!currentPresence){ return }\n let refsToRemove = leftPresence.metas.map(m => m.phx_ref)\n currentPresence.metas = currentPresence.metas.filter(p => {\n return refsToRemove.indexOf(p.phx_ref) < 0\n })\n onLeave(key, currentPresence, leftPresence)\n if(currentPresence.metas.length === 0){\n delete state[key]\n }\n })\n return state\n }\n\n /**\n * Returns the array of presences, with selected metadata.\n *\n * @param {Object} presences\n * @param {Function} chooser\n *\n * @returns {Presence}\n */\n static list(presences, chooser){\n if(!chooser){ chooser = function (key, pres){ return pres } }\n\n return this.map(presences, (key, presence) => {\n return chooser(key, presence)\n })\n }\n\n // private\n\n static map(obj, func){\n return Object.getOwnPropertyNames(obj).map(key => func(key, obj[key]))\n }\n\n static clone(obj){ return JSON.parse(JSON.stringify(obj)) }\n}\n", "/* The default serializer for encoding and decoding messages */\nimport {\n CHANNEL_EVENTS\n} from \"./constants\"\n\nexport default {\n HEADER_LENGTH: 1,\n META_LENGTH: 4,\n KINDS: {push: 0, reply: 1, broadcast: 2},\n\n encode(msg, callback){\n if(msg.payload.constructor === ArrayBuffer){\n return callback(this.binaryEncode(msg))\n } else {\n let payload = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload]\n return callback(JSON.stringify(payload))\n }\n },\n\n decode(rawPayload, callback){\n if(rawPayload.constructor === ArrayBuffer){\n return callback(this.binaryDecode(rawPayload))\n } else {\n let [join_ref, ref, topic, event, payload] = JSON.parse(rawPayload)\n return callback({join_ref, ref, topic, event, payload})\n }\n },\n\n // private\n\n binaryEncode(message){\n let {join_ref, ref, event, topic, payload} = message\n let encoder = new TextEncoder()\n let joinRefBytes = encoder.encode(join_ref)\n let refBytes = encoder.encode(ref)\n let topicBytes = encoder.encode(topic)\n let eventBytes = encoder.encode(event)\n\n this.assertFieldSize(joinRefBytes.byteLength, \"join_ref\")\n this.assertFieldSize(refBytes.byteLength, \"ref\")\n this.assertFieldSize(topicBytes.byteLength, \"topic\")\n this.assertFieldSize(eventBytes.byteLength, \"event\")\n\n let metaLength = this.META_LENGTH + joinRefBytes.byteLength + refBytes.byteLength + topicBytes.byteLength + eventBytes.byteLength\n let header = new ArrayBuffer(this.HEADER_LENGTH + metaLength)\n let headerBytes = new Uint8Array(header)\n let view = new DataView(header)\n let offset = 0\n\n view.setUint8(offset++, this.KINDS.push) // kind\n view.setUint8(offset++, joinRefBytes.byteLength)\n view.setUint8(offset++, refBytes.byteLength)\n view.setUint8(offset++, topicBytes.byteLength)\n view.setUint8(offset++, eventBytes.byteLength)\n headerBytes.set(joinRefBytes, offset); offset += joinRefBytes.byteLength\n headerBytes.set(refBytes, offset); offset += refBytes.byteLength\n headerBytes.set(topicBytes, offset); offset += topicBytes.byteLength\n headerBytes.set(eventBytes, offset); offset += eventBytes.byteLength\n\n var combined = new Uint8Array(header.byteLength + payload.byteLength)\n combined.set(headerBytes, 0)\n combined.set(new Uint8Array(payload), header.byteLength)\n\n return combined.buffer\n },\n\n assertFieldSize(size, name){\n if(size > 255){\n throw new Error(`unable to convert ${name} to binary: must be less than or equal to 255 bytes, but is ${size} bytes`)\n }\n },\n\n binaryDecode(buffer){\n let view = new DataView(buffer)\n let kind = view.getUint8(0)\n let decoder = new TextDecoder()\n switch(kind){\n case this.KINDS.push: return this.decodePush(buffer, view, decoder)\n case this.KINDS.reply: return this.decodeReply(buffer, view, decoder)\n case this.KINDS.broadcast: return this.decodeBroadcast(buffer, view, decoder)\n }\n },\n\n decodePush(buffer, view, decoder){\n let joinRefSize = view.getUint8(1)\n let topicSize = view.getUint8(2)\n let eventSize = view.getUint8(3)\n let offset = this.HEADER_LENGTH + this.META_LENGTH - 1 // pushes have no ref\n let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize))\n offset = offset + joinRefSize\n let topic = decoder.decode(buffer.slice(offset, offset + topicSize))\n offset = offset + topicSize\n let event = decoder.decode(buffer.slice(offset, offset + eventSize))\n offset = offset + eventSize\n let data = buffer.slice(offset, buffer.byteLength)\n return {join_ref: joinRef, ref: null, topic: topic, event: event, payload: data}\n },\n\n decodeReply(buffer, view, decoder){\n let joinRefSize = view.getUint8(1)\n let refSize = view.getUint8(2)\n let topicSize = view.getUint8(3)\n let eventSize = view.getUint8(4)\n let offset = this.HEADER_LENGTH + this.META_LENGTH\n let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize))\n offset = offset + joinRefSize\n let ref = decoder.decode(buffer.slice(offset, offset + refSize))\n offset = offset + refSize\n let topic = decoder.decode(buffer.slice(offset, offset + topicSize))\n offset = offset + topicSize\n let event = decoder.decode(buffer.slice(offset, offset + eventSize))\n offset = offset + eventSize\n let data = buffer.slice(offset, buffer.byteLength)\n let payload = {status: event, response: data}\n return {join_ref: joinRef, ref: ref, topic: topic, event: CHANNEL_EVENTS.reply, payload: payload}\n },\n\n decodeBroadcast(buffer, view, decoder){\n let topicSize = view.getUint8(1)\n let eventSize = view.getUint8(2)\n let offset = this.HEADER_LENGTH + 2\n let topic = decoder.decode(buffer.slice(offset, offset + topicSize))\n offset = offset + topicSize\n let event = decoder.decode(buffer.slice(offset, offset + eventSize))\n offset = offset + eventSize\n let data = buffer.slice(offset, buffer.byteLength)\n\n return {join_ref: null, ref: null, topic: topic, event: event, payload: data}\n }\n}\n", "import {\n global,\n phxWindow,\n CHANNEL_EVENTS,\n DEFAULT_TIMEOUT,\n DEFAULT_VSN,\n SOCKET_STATES,\n TRANSPORTS,\n WS_CLOSE_NORMAL,\n AUTH_TOKEN_PREFIX\n} from \"./constants\"\n\nimport {\n closure\n} from \"./utils\"\n\nimport Ajax from \"./ajax\"\nimport Channel from \"./channel\"\nimport LongPoll from \"./longpoll\"\nimport Serializer from \"./serializer\"\nimport Timer from \"./timer\"\n\n/** Initializes the Socket *\n *\n * For IE8 support use an ES5-shim (https://github.com/es-shims/es5-shim)\n *\n * @param {string} endPoint - The string WebSocket endpoint, ie, `\"ws://example.com/socket\"`,\n * `\"wss://example.com\"`\n * `\"/socket\"` (inherited host & protocol)\n * @param {Object} [opts] - Optional configuration\n * @param {Function} [opts.transport] - The Websocket Transport, for example WebSocket or Phoenix.LongPoll.\n *\n * Defaults to WebSocket with automatic LongPoll fallback if WebSocket is not defined.\n * To fallback to LongPoll when WebSocket attempts fail, use `longPollFallbackMs: 2500`.\n *\n * @param {number} [opts.longPollFallbackMs] - The millisecond time to attempt the primary transport\n * before falling back to the LongPoll transport. Disabled by default.\n *\n * @param {boolean} [opts.debug] - When true, enables debug logging. Default false.\n *\n * @param {Function} [opts.encode] - The function to encode outgoing messages.\n *\n * Defaults to JSON encoder.\n *\n * @param {Function} [opts.decode] - The function to decode incoming messages.\n *\n * Defaults to JSON:\n *\n * ```javascript\n * (payload, callback) => callback(JSON.parse(payload))\n * ```\n *\n * @param {number} [opts.timeout] - The default timeout in milliseconds to trigger push timeouts.\n *\n * Defaults `DEFAULT_TIMEOUT`\n * @param {number} [opts.heartbeatIntervalMs] - The millisec interval to send a heartbeat message\n * @param {Function} [opts.reconnectAfterMs] - The optional function that returns the\n * socket reconnect interval, in milliseconds.\n *\n * Defaults to stepped backoff of:\n *\n * ```javascript\n * function(tries){\n * return [10, 50, 100, 150, 200, 250, 500, 1000, 2000][tries - 1] || 5000\n * }\n * ````\n *\n * @param {Function} [opts.rejoinAfterMs] - The optional function that returns the millisec\n * rejoin interval for individual channels.\n *\n * ```javascript\n * function(tries){\n * return [1000, 2000, 5000][tries - 1] || 10000\n * }\n * ````\n *\n * @param {Function} [opts.logger] - The optional function for specialized logging, ie:\n *\n * ```javascript\n * function(kind, msg, data) {\n * console.log(`${kind}: ${msg}`, data)\n * }\n * ```\n *\n * @param {number} [opts.longpollerTimeout] - The maximum timeout of a long poll AJAX request.\n *\n * Defaults to 20s (double the server long poll timer).\n *\n * @param {(Object|function)} [opts.params] - The optional params to pass when connecting\n * @param {string} [opts.authToken] - the optional authentication token to be exposed on the server\n * under the `:auth_token` connect_info key.\n * @param {string} [opts.binaryType] - The binary type to use for binary WebSocket frames.\n *\n * Defaults to \"arraybuffer\"\n *\n * @param {vsn} [opts.vsn] - The serializer's protocol version to send on connect.\n *\n * Defaults to DEFAULT_VSN.\n *\n * @param {Object} [opts.sessionStorage] - An optional Storage compatible object\n * Phoenix uses sessionStorage for longpoll fallback history. Overriding the store is\n * useful when Phoenix won't have access to `sessionStorage`. For example, This could\n * happen if a site loads a cross-domain channel in an iframe. Example usage:\n *\n * class InMemoryStorage {\n * constructor() { this.storage = {} }\n * getItem(keyName) { return this.storage[keyName] || null }\n * removeItem(keyName) { delete this.storage[keyName] }\n * setItem(keyName, keyValue) { this.storage[keyName] = keyValue }\n * }\n *\n*/\nexport default class Socket {\n constructor(endPoint, opts = {}){\n this.stateChangeCallbacks = {open: [], close: [], error: [], message: []}\n this.channels = []\n this.sendBuffer = []\n this.ref = 0\n this.fallbackRef = null\n this.timeout = opts.timeout || DEFAULT_TIMEOUT\n this.transport = opts.transport || global.WebSocket || LongPoll\n this.primaryPassedHealthCheck = false\n this.longPollFallbackMs = opts.longPollFallbackMs\n this.fallbackTimer = null\n this.sessionStore = opts.sessionStorage || (global && global.sessionStorage)\n this.establishedConnections = 0\n this.defaultEncoder = Serializer.encode.bind(Serializer)\n this.defaultDecoder = Serializer.decode.bind(Serializer)\n // We start with closeWasClean true to avoid the visibility change\n // logic from connecting if the socket was never connected in the first place.\n // transportConnect sets it to false on open.\n this.closeWasClean = true\n this.disconnecting = false\n this.binaryType = opts.binaryType || \"arraybuffer\"\n this.connectClock = 1\n this.pageHidden = false\n if(this.transport !== LongPoll){\n this.encode = opts.encode || this.defaultEncoder\n this.decode = opts.decode || this.defaultDecoder\n } else {\n this.encode = this.defaultEncoder\n this.decode = this.defaultDecoder\n }\n let awaitingConnectionOnPageShow = null\n if(phxWindow && phxWindow.addEventListener){\n phxWindow.addEventListener(\"pagehide\", _e => {\n if(this.conn){\n this.disconnect()\n awaitingConnectionOnPageShow = this.connectClock\n }\n })\n phxWindow.addEventListener(\"pageshow\", _e => {\n if(awaitingConnectionOnPageShow === this.connectClock){\n awaitingConnectionOnPageShow = null\n this.connect()\n }\n })\n phxWindow.addEventListener(\"visibilitychange\", () => {\n if(document.visibilityState === \"hidden\"){\n this.pageHidden = true\n } else {\n this.pageHidden = false\n // reconnect immediately\n if(!this.isConnected() && !this.closeWasClean){\n this.teardown(() => this.connect())\n }\n }\n })\n }\n this.heartbeatIntervalMs = opts.heartbeatIntervalMs || 30000\n this.rejoinAfterMs = (tries) => {\n if(opts.rejoinAfterMs){\n return opts.rejoinAfterMs(tries)\n } else {\n return [1000, 2000, 5000][tries - 1] || 10000\n }\n }\n this.reconnectAfterMs = (tries) => {\n if(opts.reconnectAfterMs){\n return opts.reconnectAfterMs(tries)\n } else {\n return [10, 50, 100, 150, 200, 250, 500, 1000, 2000][tries - 1] || 5000\n }\n }\n this.logger = opts.logger || null\n if(!this.logger && opts.debug){\n this.logger = (kind, msg, data) => { console.log(`${kind}: ${msg}`, data) }\n }\n this.longpollerTimeout = opts.longpollerTimeout || 20000\n this.params = closure(opts.params || {})\n this.endPoint = `${endPoint}/${TRANSPORTS.websocket}`\n this.vsn = opts.vsn || DEFAULT_VSN\n this.heartbeatTimeoutTimer = null\n this.heartbeatTimer = null\n this.pendingHeartbeatRef = null\n this.reconnectTimer = new Timer(() => {\n if(this.pageHidden){\n this.log(\"Not reconnecting as page is hidden!\")\n this.teardown()\n return\n }\n this.teardown(() => this.connect())\n }, this.reconnectAfterMs)\n this.authToken = opts.authToken\n }\n\n /**\n * Returns the LongPoll transport reference\n */\n getLongPollTransport(){ return LongPoll }\n\n /**\n * Disconnects and replaces the active transport\n *\n * @param {Function} newTransport - The new transport class to instantiate\n *\n */\n replaceTransport(newTransport){\n this.connectClock++\n this.closeWasClean = true\n clearTimeout(this.fallbackTimer)\n this.reconnectTimer.reset()\n if(this.conn){\n this.conn.close()\n this.conn = null\n }\n this.transport = newTransport\n }\n\n /**\n * Returns the socket protocol\n *\n * @returns {string}\n */\n protocol(){ return location.protocol.match(/^https/) ? \"wss\" : \"ws\" }\n\n /**\n * The fully qualified socket url\n *\n * @returns {string}\n */\n endPointURL(){\n let uri = Ajax.appendParams(\n Ajax.appendParams(this.endPoint, this.params()), {vsn: this.vsn})\n if(uri.charAt(0) !== \"/\"){ return uri }\n if(uri.charAt(1) === \"/\"){ return `${this.protocol()}:${uri}` }\n\n return `${this.protocol()}://${location.host}${uri}`\n }\n\n /**\n * Disconnects the socket\n *\n * See https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent#Status_codes for valid status codes.\n *\n * @param {Function} callback - Optional callback which is called after socket is disconnected.\n * @param {integer} code - A status code for disconnection (Optional).\n * @param {string} reason - A textual description of the reason to disconnect. (Optional)\n */\n disconnect(callback, code, reason){\n this.connectClock++\n this.disconnecting = true\n this.closeWasClean = true\n clearTimeout(this.fallbackTimer)\n this.reconnectTimer.reset()\n this.teardown(() => {\n this.disconnecting = false\n callback && callback()\n }, code, reason)\n }\n\n /**\n *\n * @param {Object} params - The params to send when connecting, for example `{user_id: userToken}`\n *\n * Passing params to connect is deprecated; pass them in the Socket constructor instead:\n * `new Socket(\"/socket\", {params: {user_id: userToken}})`.\n */\n connect(params){\n if(params){\n console && console.log(\"passing params to connect is deprecated. Instead pass :params to the Socket constructor\")\n this.params = closure(params)\n }\n if(this.conn && !this.disconnecting){ return }\n if(this.longPollFallbackMs && this.transport !== LongPoll){\n this.connectWithFallback(LongPoll, this.longPollFallbackMs)\n } else {\n this.transportConnect()\n }\n }\n\n /**\n * Logs the message. Override `this.logger` for specialized logging. noops by default\n * @param {string} kind\n * @param {string} msg\n * @param {Object} data\n */\n log(kind, msg, data){ this.logger && this.logger(kind, msg, data) }\n\n /**\n * Returns true if a logger has been set on this socket.\n */\n hasLogger(){ return this.logger !== null }\n\n /**\n * Registers callbacks for connection open events\n *\n * @example socket.onOpen(function(){ console.info(\"the socket was opened\") })\n *\n * @param {Function} callback\n */\n onOpen(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.open.push([ref, callback])\n return ref\n }\n\n /**\n * Registers callbacks for connection close events\n * @param {Function} callback\n */\n onClose(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.close.push([ref, callback])\n return ref\n }\n\n /**\n * Registers callbacks for connection error events\n *\n * @example socket.onError(function(error){ alert(\"An error occurred\") })\n *\n * @param {Function} callback\n */\n onError(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.error.push([ref, callback])\n return ref\n }\n\n /**\n * Registers callbacks for connection message events\n * @param {Function} callback\n */\n onMessage(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.message.push([ref, callback])\n return ref\n }\n\n /**\n * Pings the server and invokes the callback with the RTT in milliseconds\n * @param {Function} callback\n *\n * Returns true if the ping was pushed or false if unable to be pushed.\n */\n ping(callback){\n if(!this.isConnected()){ return false }\n let ref = this.makeRef()\n let startTime = Date.now()\n this.push({topic: \"phoenix\", event: \"heartbeat\", payload: {}, ref: ref})\n let onMsgRef = this.onMessage(msg => {\n if(msg.ref === ref){\n this.off([onMsgRef])\n callback(Date.now() - startTime)\n }\n })\n return true\n }\n\n /**\n * @private\n *\n * @param {Function}\n */\n transportName(transport){\n // JavaScript minification, enabled by default in production in Phoenix\n // projects, renames symbols to reduce code size.\n // See https://esbuild.github.io/api/#keep-names.\n // This helper ensures we return the correct name for the LongPoll transport\n // even after minification. The other common transport is WebSocket, which\n // is native to browsers and does not need special handling.\n switch(transport){\n case LongPoll: return \"LongPoll\"\n default: return transport.name\n }\n }\n\n /**\n * @private\n */\n transportConnect(){\n this.connectClock++\n this.closeWasClean = false\n let protocols = undefined\n // Sec-WebSocket-Protocol based token\n // (longpoll uses Authorization header instead)\n if(this.authToken){\n protocols = [\"phoenix\", `${AUTH_TOKEN_PREFIX}${btoa(this.authToken).replace(/=/g, \"\")}`]\n }\n this.conn = new this.transport(this.endPointURL(), protocols)\n this.conn.binaryType = this.binaryType\n this.conn.timeout = this.longpollerTimeout\n this.conn.onopen = () => this.onConnOpen()\n this.conn.onerror = error => this.onConnError(error)\n this.conn.onmessage = event => this.onConnMessage(event)\n this.conn.onclose = event => this.onConnClose(event)\n }\n\n getSession(key){ return this.sessionStore && this.sessionStore.getItem(key) }\n\n storeSession(key, val){ this.sessionStore && this.sessionStore.setItem(key, val) }\n\n connectWithFallback(fallbackTransport, fallbackThreshold = 2500){\n clearTimeout(this.fallbackTimer)\n let established = false\n let primaryTransport = true\n let openRef, errorRef\n let fallbackTransportName = this.transportName(fallbackTransport)\n let fallback = (reason) => {\n this.log(\"transport\", `falling back to ${fallbackTransportName}...`, reason)\n this.off([openRef, errorRef])\n primaryTransport = false\n this.replaceTransport(fallbackTransport)\n this.transportConnect()\n }\n if(this.getSession(`phx:fallback:${fallbackTransportName}`)){ return fallback(\"memorized\") }\n\n this.fallbackTimer = setTimeout(fallback, fallbackThreshold)\n\n errorRef = this.onError(reason => {\n this.log(\"transport\", \"error\", reason)\n if(primaryTransport && !established){\n clearTimeout(this.fallbackTimer)\n fallback(reason)\n }\n })\n if(this.fallbackRef){\n this.off([this.fallbackRef])\n }\n this.fallbackRef = this.onOpen(() => {\n established = true\n if(!primaryTransport){\n let fallbackTransportName = this.transportName(fallbackTransport)\n // only memorize LP if we never connected to primary\n if(!this.primaryPassedHealthCheck){ this.storeSession(`phx:fallback:${fallbackTransportName}`, \"true\") }\n return this.log(\"transport\", `established ${fallbackTransportName} fallback`)\n }\n // if we've established primary, give the fallback a new period to attempt ping\n clearTimeout(this.fallbackTimer)\n this.fallbackTimer = setTimeout(fallback, fallbackThreshold)\n this.ping(rtt => {\n this.log(\"transport\", \"connected to primary after\", rtt)\n this.primaryPassedHealthCheck = true\n clearTimeout(this.fallbackTimer)\n })\n })\n this.transportConnect()\n }\n\n clearHeartbeats(){\n clearTimeout(this.heartbeatTimer)\n clearTimeout(this.heartbeatTimeoutTimer)\n }\n\n onConnOpen(){\n if(this.hasLogger()) this.log(\"transport\", `${this.transportName(this.transport)} connected to ${this.endPointURL()}`)\n this.closeWasClean = false\n this.disconnecting = false\n this.establishedConnections++\n this.flushSendBuffer()\n this.reconnectTimer.reset()\n this.resetHeartbeat()\n this.stateChangeCallbacks.open.forEach(([, callback]) => callback())\n }\n\n /**\n * @private\n */\n\n heartbeatTimeout(){\n if(this.pendingHeartbeatRef){\n this.pendingHeartbeatRef = null\n if(this.hasLogger()){ this.log(\"transport\", \"heartbeat timeout. Attempting to re-establish connection\") }\n this.triggerChanError()\n this.closeWasClean = false\n this.teardown(() => this.reconnectTimer.scheduleTimeout(), WS_CLOSE_NORMAL, \"heartbeat timeout\")\n }\n }\n\n resetHeartbeat(){\n if(this.conn && this.conn.skipHeartbeat){ return }\n this.pendingHeartbeatRef = null\n this.clearHeartbeats()\n this.heartbeatTimer = setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs)\n }\n\n teardown(callback, code, reason){\n if(!this.conn){\n return callback && callback()\n }\n\n // If someone calls connect before we finish tearing down,\n // we create a new connection, but we still want to finish tearing down the old one.\n const connToClose = this.conn\n\n this.waitForBufferDone(connToClose, () => {\n if(code){ connToClose.close(code, reason || \"\") } else { connToClose.close() }\n\n this.waitForSocketClosed(connToClose, () => {\n if(this.conn === connToClose){\n this.conn.onopen = function (){ } // noop\n this.conn.onerror = function (){ } // noop\n this.conn.onmessage = function (){ } // noop\n this.conn.onclose = function (){ } // noop\n this.conn = null\n }\n\n callback && callback()\n })\n })\n }\n\n waitForBufferDone(conn, callback, tries = 1){\n if(tries === 5 || !conn.bufferedAmount){\n callback()\n return\n }\n\n setTimeout(() => {\n this.waitForBufferDone(conn, callback, tries + 1)\n }, 150 * tries)\n }\n\n waitForSocketClosed(conn, callback, tries = 1){\n if(tries === 5 || conn.readyState === SOCKET_STATES.closed){\n callback()\n return\n }\n\n setTimeout(() => {\n this.waitForSocketClosed(conn, callback, tries + 1)\n }, 150 * tries)\n }\n\n onConnClose(event){\n if(this.conn) this.conn.onclose = () => {} // noop to prevent recursive calls in teardown\n let closeCode = event && event.code\n if(this.hasLogger()) this.log(\"transport\", \"close\", event)\n this.triggerChanError()\n this.clearHeartbeats()\n if(!this.closeWasClean && closeCode !== 1000){\n this.reconnectTimer.scheduleTimeout()\n }\n this.stateChangeCallbacks.close.forEach(([, callback]) => callback(event))\n }\n\n /**\n * @private\n */\n onConnError(error){\n if(this.hasLogger()) this.log(\"transport\", \"error\", error)\n let transportBefore = this.transport\n let establishedBefore = this.establishedConnections\n this.stateChangeCallbacks.error.forEach(([, callback]) => {\n callback(error, transportBefore, establishedBefore)\n })\n if(transportBefore === this.transport || establishedBefore > 0){\n this.triggerChanError()\n }\n }\n\n /**\n * @private\n */\n triggerChanError(){\n this.channels.forEach(channel => {\n if(!(channel.isErrored() || channel.isLeaving() || channel.isClosed())){\n channel.trigger(CHANNEL_EVENTS.error)\n }\n })\n }\n\n /**\n * @returns {string}\n */\n connectionState(){\n switch(this.conn && this.conn.readyState){\n case SOCKET_STATES.connecting: return \"connecting\"\n case SOCKET_STATES.open: return \"open\"\n case SOCKET_STATES.closing: return \"closing\"\n default: return \"closed\"\n }\n }\n\n /**\n * @returns {boolean}\n */\n isConnected(){ return this.connectionState() === \"open\" }\n\n /**\n * @private\n *\n * @param {Channel}\n */\n remove(channel){\n this.off(channel.stateChangeRefs)\n this.channels = this.channels.filter(c => c !== channel)\n }\n\n /**\n * Removes `onOpen`, `onClose`, `onError,` and `onMessage` registrations.\n *\n * @param {refs} - list of refs returned by calls to\n * `onOpen`, `onClose`, `onError,` and `onMessage`\n */\n off(refs){\n for(let key in this.stateChangeCallbacks){\n this.stateChangeCallbacks[key] = this.stateChangeCallbacks[key].filter(([ref]) => {\n return refs.indexOf(ref) === -1\n })\n }\n }\n\n /**\n * Initiates a new channel for the given topic\n *\n * @param {string} topic\n * @param {Object} chanParams - Parameters for the channel\n * @returns {Channel}\n */\n channel(topic, chanParams = {}){\n let chan = new Channel(topic, chanParams, this)\n this.channels.push(chan)\n return chan\n }\n\n /**\n * @param {Object} data\n */\n push(data){\n if(this.hasLogger()){\n let {topic, event, payload, ref, join_ref} = data\n this.log(\"push\", `${topic} ${event} (${join_ref}, ${ref})`, payload)\n }\n\n if(this.isConnected()){\n this.encode(data, result => this.conn.send(result))\n } else {\n this.sendBuffer.push(() => this.encode(data, result => this.conn.send(result)))\n }\n }\n\n /**\n * Return the next message ref, accounting for overflows\n * @returns {string}\n */\n makeRef(){\n let newRef = this.ref + 1\n if(newRef === this.ref){ this.ref = 0 } else { this.ref = newRef }\n\n return this.ref.toString()\n }\n\n sendHeartbeat(){\n if(this.pendingHeartbeatRef && !this.isConnected()){ return }\n this.pendingHeartbeatRef = this.makeRef()\n this.push({topic: \"phoenix\", event: \"heartbeat\", payload: {}, ref: this.pendingHeartbeatRef})\n this.heartbeatTimeoutTimer = setTimeout(() => this.heartbeatTimeout(), this.heartbeatIntervalMs)\n }\n\n flushSendBuffer(){\n if(this.isConnected() && this.sendBuffer.length > 0){\n this.sendBuffer.forEach(callback => callback())\n this.sendBuffer = []\n }\n }\n\n onConnMessage(rawMessage){\n this.decode(rawMessage.data, msg => {\n let {topic, event, payload, ref, join_ref} = msg\n if(ref && ref === this.pendingHeartbeatRef){\n this.clearHeartbeats()\n this.pendingHeartbeatRef = null\n this.heartbeatTimer = setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs)\n }\n\n if(this.hasLogger()) this.log(\"receive\", `${payload.status || \"\"} ${topic} ${event} ${ref && \"(\" + ref + \")\" || \"\"}`, payload)\n\n for(let i = 0; i < this.channels.length; i++){\n const channel = this.channels[i]\n if(!channel.isMember(topic, event, payload, join_ref)){ continue }\n channel.trigger(event, payload, ref, join_ref)\n }\n\n for(let i = 0; i < this.stateChangeCallbacks.message.length; i++){\n let [, callback] = this.stateChangeCallbacks.message[i]\n callback(msg)\n }\n })\n }\n\n leaveOpenTopic(topic){\n let dupChannel = this.channels.find(c => c.topic === topic && (c.isJoined() || c.isJoining()))\n if(dupChannel){\n if(this.hasLogger()) this.log(\"transport\", `leaving duplicate topic \"${topic}\"`)\n dupChannel.leave()\n }\n }\n}\n"], + "mappings": ";AACO,IAAI,UAAU,CAAC,UAAU;AAC9B,MAAG,OAAO,UAAU,YAAW;AAC7B,WAAO;AAAA,EACT,OAAO;AACL,QAAIA,WAAU,WAAW;AAAE,aAAO;AAAA,IAAM;AACxC,WAAOA;AAAA,EACT;AACF;;;ACRO,IAAM,aAAa,OAAO,SAAS,cAAc,OAAO;AACxD,IAAM,YAAY,OAAO,WAAW,cAAc,SAAS;AAC3D,IAAM,SAAS,cAAc,aAAa;AAC1C,IAAM,cAAc;AACpB,IAAM,gBAAgB,EAAC,YAAY,GAAG,MAAM,GAAG,SAAS,GAAG,QAAQ,EAAC;AACpE,IAAM,0BAA0B;AAChC,IAAM,kBAAkB;AACxB,IAAM,kBAAkB;AACxB,IAAM,iBAAiB;AAAA,EAC5B,QAAQ;AAAA,EACR,SAAS;AAAA,EACT,QAAQ;AAAA,EACR,SAAS;AAAA,EACT,SAAS;AACX;AACO,IAAM,iBAAiB;AAAA,EAC5B,OAAO;AAAA,EACP,OAAO;AAAA,EACP,MAAM;AAAA,EACN,OAAO;AAAA,EACP,OAAO;AACT;AAEO,IAAM,aAAa;AAAA,EACxB,UAAU;AAAA,EACV,WAAW;AACb;AACO,IAAM,aAAa;AAAA,EACxB,UAAU;AACZ;AACO,IAAM,oBAAoB;;;ACvBjC,IAAqB,OAArB,MAA0B;AAAA,EACxB,YAAY,SAAS,OAAO,SAAS,SAAQ;AAC3C,SAAK,UAAU;AACf,SAAK,QAAQ;AACb,SAAK,UAAU,WAAW,WAAW;AAAE,aAAO,CAAC;AAAA,IAAE;AACjD,SAAK,eAAe;AACpB,SAAK,UAAU;AACf,SAAK,eAAe;AACpB,SAAK,WAAW,CAAC;AACjB,SAAK,OAAO;AAAA,EACd;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,OAAO,SAAQ;AACb,SAAK,UAAU;AACf,SAAK,MAAM;AACX,SAAK,KAAK;AAAA,EACZ;AAAA;AAAA;AAAA;AAAA,EAKA,OAAM;AACJ,QAAG,KAAK,YAAY,SAAS,GAAE;AAAE;AAAA,IAAO;AACxC,SAAK,aAAa;AAClB,SAAK,OAAO;AACZ,SAAK,QAAQ,OAAO,KAAK;AAAA,MACvB,OAAO,KAAK,QAAQ;AAAA,MACpB,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK,QAAQ;AAAA,MACtB,KAAK,KAAK;AAAA,MACV,UAAU,KAAK,QAAQ,QAAQ;AAAA,IACjC,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,QAAQ,QAAQ,UAAS;AACvB,QAAG,KAAK,YAAY,MAAM,GAAE;AAC1B,eAAS,KAAK,aAAa,QAAQ;AAAA,IACrC;AAEA,SAAK,SAAS,KAAK,EAAC,QAAQ,SAAQ,CAAC;AACrC,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,QAAO;AACL,SAAK,eAAe;AACpB,SAAK,MAAM;AACX,SAAK,WAAW;AAChB,SAAK,eAAe;AACpB,SAAK,OAAO;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,aAAa,EAAC,QAAQ,UAAU,KAAI,GAAE;AACpC,SAAK,SAAS,OAAO,OAAK,EAAE,WAAW,MAAM,EAC1C,QAAQ,OAAK,EAAE,SAAS,QAAQ,CAAC;AAAA,EACtC;AAAA;AAAA;AAAA;AAAA,EAKA,iBAAgB;AACd,QAAG,CAAC,KAAK,UAAS;AAAE;AAAA,IAAO;AAC3B,SAAK,QAAQ,IAAI,KAAK,QAAQ;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA,EAKA,gBAAe;AACb,iBAAa,KAAK,YAAY;AAC9B,SAAK,eAAe;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA,EAKA,eAAc;AACZ,QAAG,KAAK,cAAa;AAAE,WAAK,cAAc;AAAA,IAAE;AAC5C,SAAK,MAAM,KAAK,QAAQ,OAAO,QAAQ;AACvC,SAAK,WAAW,KAAK,QAAQ,eAAe,KAAK,GAAG;AAEpD,SAAK,QAAQ,GAAG,KAAK,UAAU,aAAW;AACxC,WAAK,eAAe;AACpB,WAAK,cAAc;AACnB,WAAK,eAAe;AACpB,WAAK,aAAa,OAAO;AAAA,IAC3B,CAAC;AAED,SAAK,eAAe,WAAW,MAAM;AACnC,WAAK,QAAQ,WAAW,CAAC,CAAC;AAAA,IAC5B,GAAG,KAAK,OAAO;AAAA,EACjB;AAAA;AAAA;AAAA;AAAA,EAKA,YAAY,QAAO;AACjB,WAAO,KAAK,gBAAgB,KAAK,aAAa,WAAW;AAAA,EAC3D;AAAA;AAAA;AAAA;AAAA,EAKA,QAAQ,QAAQ,UAAS;AACvB,SAAK,QAAQ,QAAQ,KAAK,UAAU,EAAC,QAAQ,SAAQ,CAAC;AAAA,EACxD;AACF;;;AC9GA,IAAqB,QAArB,MAA2B;AAAA,EACzB,YAAY,UAAU,WAAU;AAC9B,SAAK,WAAW;AAChB,SAAK,YAAY;AACjB,SAAK,QAAQ;AACb,SAAK,QAAQ;AAAA,EACf;AAAA,EAEA,QAAO;AACL,SAAK,QAAQ;AACb,iBAAa,KAAK,KAAK;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA,EAKA,kBAAiB;AACf,iBAAa,KAAK,KAAK;AAEvB,SAAK,QAAQ,WAAW,MAAM;AAC5B,WAAK,QAAQ,KAAK,QAAQ;AAC1B,WAAK,SAAS;AAAA,IAChB,GAAG,KAAK,UAAU,KAAK,QAAQ,CAAC,CAAC;AAAA,EACnC;AACF;;;AC1BA,IAAqB,UAArB,MAA6B;AAAA,EAC3B,YAAY,OAAO,QAAQ,QAAO;AAChC,SAAK,QAAQ,eAAe;AAC5B,SAAK,QAAQ;AACb,SAAK,SAAS,QAAQ,UAAU,CAAC,CAAC;AAClC,SAAK,SAAS;AACd,SAAK,WAAW,CAAC;AACjB,SAAK,aAAa;AAClB,SAAK,UAAU,KAAK,OAAO;AAC3B,SAAK,aAAa;AAClB,SAAK,WAAW,IAAI,KAAK,MAAM,eAAe,MAAM,KAAK,QAAQ,KAAK,OAAO;AAC7E,SAAK,aAAa,CAAC;AACnB,SAAK,kBAAkB,CAAC;AAExB,SAAK,cAAc,IAAI,MAAM,MAAM;AACjC,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,OAAO;AAAA,MAAE;AAAA,IAC/C,GAAG,KAAK,OAAO,aAAa;AAC5B,SAAK,gBAAgB,KAAK,KAAK,OAAO,QAAQ,MAAM,KAAK,YAAY,MAAM,CAAC,CAAC;AAC7E,SAAK,gBAAgB;AAAA,MAAK,KAAK,OAAO,OAAO,MAAM;AACjD,aAAK,YAAY,MAAM;AACvB,YAAG,KAAK,UAAU,GAAE;AAAE,eAAK,OAAO;AAAA,QAAE;AAAA,MACtC,CAAC;AAAA,IACD;AACA,SAAK,SAAS,QAAQ,MAAM,MAAM;AAChC,WAAK,QAAQ,eAAe;AAC5B,WAAK,YAAY,MAAM;AACvB,WAAK,WAAW,QAAQ,eAAa,UAAU,KAAK,CAAC;AACrD,WAAK,aAAa,CAAC;AAAA,IACrB,CAAC;AACD,SAAK,SAAS,QAAQ,SAAS,MAAM;AACnC,WAAK,QAAQ,eAAe;AAC5B,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,YAAY,gBAAgB;AAAA,MAAE;AAAA,IACpE,CAAC;AACD,SAAK,QAAQ,MAAM;AACjB,WAAK,YAAY,MAAM;AACvB,UAAG,KAAK,OAAO,UAAU,EAAG,MAAK,OAAO,IAAI,WAAW,SAAS,KAAK,KAAK,IAAI,KAAK,QAAQ,CAAC,EAAE;AAC9F,WAAK,QAAQ,eAAe;AAC5B,WAAK,OAAO,OAAO,IAAI;AAAA,IACzB,CAAC;AACD,SAAK,QAAQ,YAAU;AACrB,UAAG,KAAK,OAAO,UAAU,EAAG,MAAK,OAAO,IAAI,WAAW,SAAS,KAAK,KAAK,IAAI,MAAM;AACpF,UAAG,KAAK,UAAU,GAAE;AAAE,aAAK,SAAS,MAAM;AAAA,MAAE;AAC5C,WAAK,QAAQ,eAAe;AAC5B,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,YAAY,gBAAgB;AAAA,MAAE;AAAA,IACpE,CAAC;AACD,SAAK,SAAS,QAAQ,WAAW,MAAM;AACrC,UAAG,KAAK,OAAO,UAAU,EAAG,MAAK,OAAO,IAAI,WAAW,WAAW,KAAK,KAAK,KAAK,KAAK,QAAQ,CAAC,KAAK,KAAK,SAAS,OAAO;AACzH,UAAI,YAAY,IAAI,KAAK,MAAM,eAAe,OAAO,QAAQ,CAAC,CAAC,GAAG,KAAK,OAAO;AAC9E,gBAAU,KAAK;AACf,WAAK,QAAQ,eAAe;AAC5B,WAAK,SAAS,MAAM;AACpB,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,YAAY,gBAAgB;AAAA,MAAE;AAAA,IACpE,CAAC;AACD,SAAK,GAAG,eAAe,OAAO,CAAC,SAAS,QAAQ;AAC9C,WAAK,QAAQ,KAAK,eAAe,GAAG,GAAG,OAAO;AAAA,IAChD,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,KAAK,UAAU,KAAK,SAAQ;AAC1B,QAAG,KAAK,YAAW;AACjB,YAAM,IAAI,MAAM,4FAA4F;AAAA,IAC9G,OAAO;AACL,WAAK,UAAU;AACf,WAAK,aAAa;AAClB,WAAK,OAAO;AACZ,aAAO,KAAK;AAAA,IACd;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,QAAQ,UAAS;AACf,SAAK,GAAG,eAAe,OAAO,QAAQ;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,QAAQ,UAAS;AACf,WAAO,KAAK,GAAG,eAAe,OAAO,YAAU,SAAS,MAAM,CAAC;AAAA,EACjE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAmBA,GAAG,OAAO,UAAS;AACjB,QAAI,MAAM,KAAK;AACf,SAAK,SAAS,KAAK,EAAC,OAAO,KAAK,SAAQ,CAAC;AACzC,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,IAAI,OAAO,KAAI;AACb,SAAK,WAAW,KAAK,SAAS,OAAO,CAAC,SAAS;AAC7C,aAAO,EAAE,KAAK,UAAU,UAAU,OAAO,QAAQ,eAAe,QAAQ,KAAK;AAAA,IAC/E,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,UAAS;AAAE,WAAO,KAAK,OAAO,YAAY,KAAK,KAAK,SAAS;AAAA,EAAE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkB/D,KAAK,OAAO,SAAS,UAAU,KAAK,SAAQ;AAC1C,cAAU,WAAW,CAAC;AACtB,QAAG,CAAC,KAAK,YAAW;AAClB,YAAM,IAAI,MAAM,kBAAkB,KAAK,SAAS,KAAK,KAAK,4DAA4D;AAAA,IACxH;AACA,QAAI,YAAY,IAAI,KAAK,MAAM,OAAO,WAAW;AAAE,aAAO;AAAA,IAAQ,GAAG,OAAO;AAC5E,QAAG,KAAK,QAAQ,GAAE;AAChB,gBAAU,KAAK;AAAA,IACjB,OAAO;AACL,gBAAU,aAAa;AACvB,WAAK,WAAW,KAAK,SAAS;AAAA,IAChC;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,MAAM,UAAU,KAAK,SAAQ;AAC3B,SAAK,YAAY,MAAM;AACvB,SAAK,SAAS,cAAc;AAE5B,SAAK,QAAQ,eAAe;AAC5B,QAAI,UAAU,MAAM;AAClB,UAAG,KAAK,OAAO,UAAU,EAAG,MAAK,OAAO,IAAI,WAAW,SAAS,KAAK,KAAK,EAAE;AAC5E,WAAK,QAAQ,eAAe,OAAO,OAAO;AAAA,IAC5C;AACA,QAAI,YAAY,IAAI,KAAK,MAAM,eAAe,OAAO,QAAQ,CAAC,CAAC,GAAG,OAAO;AACzE,cAAU,QAAQ,MAAM,MAAM,QAAQ,CAAC,EACpC,QAAQ,WAAW,MAAM,QAAQ,CAAC;AACrC,cAAU,KAAK;AACf,QAAG,CAAC,KAAK,QAAQ,GAAE;AAAE,gBAAU,QAAQ,MAAM,CAAC,CAAC;AAAA,IAAE;AAEjD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,UAAU,QAAQ,SAAS,MAAK;AAAE,WAAO;AAAA,EAAQ;AAAA;AAAA;AAAA;AAAA,EAKjD,SAAS,OAAO,OAAO,SAAS,SAAQ;AACtC,QAAG,KAAK,UAAU,OAAM;AAAE,aAAO;AAAA,IAAM;AAEvC,QAAG,WAAW,YAAY,KAAK,QAAQ,GAAE;AACvC,UAAG,KAAK,OAAO,UAAU,EAAG,MAAK,OAAO,IAAI,WAAW,6BAA6B,EAAC,OAAO,OAAO,SAAS,QAAO,CAAC;AACpH,aAAO;AAAA,IACT,OAAO;AACL,aAAO;AAAA,IACT;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,UAAS;AAAE,WAAO,KAAK,SAAS;AAAA,EAAI;AAAA;AAAA;AAAA;AAAA,EAKpC,OAAO,UAAU,KAAK,SAAQ;AAC5B,QAAG,KAAK,UAAU,GAAE;AAAE;AAAA,IAAO;AAC7B,SAAK,OAAO,eAAe,KAAK,KAAK;AACrC,SAAK,QAAQ,eAAe;AAC5B,SAAK,SAAS,OAAO,OAAO;AAAA,EAC9B;AAAA;AAAA;AAAA;AAAA,EAKA,QAAQ,OAAO,SAAS,KAAK,SAAQ;AACnC,QAAI,iBAAiB,KAAK,UAAU,OAAO,SAAS,KAAK,OAAO;AAChE,QAAG,WAAW,CAAC,gBAAe;AAAE,YAAM,IAAI,MAAM,6EAA6E;AAAA,IAAE;AAE/H,QAAI,gBAAgB,KAAK,SAAS,OAAO,UAAQ,KAAK,UAAU,KAAK;AAErE,aAAQ,IAAI,GAAG,IAAI,cAAc,QAAQ,KAAI;AAC3C,UAAI,OAAO,cAAc,CAAC;AAC1B,WAAK,SAAS,gBAAgB,KAAK,WAAW,KAAK,QAAQ,CAAC;AAAA,IAC9D;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,eAAe,KAAI;AAAE,WAAO,cAAc,GAAG;AAAA,EAAG;AAAA;AAAA;AAAA;AAAA,EAKhD,WAAU;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAO;AAAA;AAAA;AAAA;AAAA,EAKxD,YAAW;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAQ;AAAA;AAAA;AAAA;AAAA,EAK1D,WAAU;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAO;AAAA;AAAA;AAAA;AAAA,EAKxD,YAAW;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAQ;AAAA;AAAA;AAAA;AAAA,EAK1D,YAAW;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAQ;AAC5D;;;ACjTA,IAAqB,OAArB,MAA0B;AAAA,EAExB,OAAO,QAAQ,QAAQ,UAAU,SAAS,MAAM,SAAS,WAAW,UAAS;AAC3E,QAAG,OAAO,gBAAe;AACvB,UAAI,MAAM,IAAI,OAAO,eAAe;AACpC,aAAO,KAAK,eAAe,KAAK,QAAQ,UAAU,MAAM,SAAS,WAAW,QAAQ;AAAA,IACtF,WAAU,OAAO,gBAAe;AAC9B,UAAI,MAAM,IAAI,OAAO,eAAe;AACpC,aAAO,KAAK,WAAW,KAAK,QAAQ,UAAU,SAAS,MAAM,SAAS,WAAW,QAAQ;AAAA,IAC3F,WAAU,OAAO,SAAS,OAAO,iBAAgB;AAE/C,aAAO,KAAK,aAAa,QAAQ,UAAU,SAAS,MAAM,SAAS,WAAW,QAAQ;AAAA,IACxF,OAAO;AACL,YAAM,IAAI,MAAM,iDAAiD;AAAA,IACnE;AAAA,EACF;AAAA,EAEA,OAAO,aAAa,QAAQ,UAAU,SAAS,MAAM,SAAS,WAAW,UAAS;AAChF,QAAI,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,QAAI,aAAa;AACjB,QAAG,SAAQ;AACT,mBAAa,IAAI,gBAAgB;AACjC,YAAM,aAAa,WAAW,MAAM,WAAW,MAAM,GAAG,OAAO;AAC/D,cAAQ,SAAS,WAAW;AAAA,IAC9B;AACA,WAAO,MAAM,UAAU,OAAO,EAC3B,KAAK,cAAY,SAAS,KAAK,CAAC,EAChC,KAAK,UAAQ,KAAK,UAAU,IAAI,CAAC,EACjC,KAAK,UAAQ,YAAY,SAAS,IAAI,CAAC,EACvC,MAAM,SAAO;AACZ,UAAG,IAAI,SAAS,gBAAgB,WAAU;AACxC,kBAAU;AAAA,MACZ,OAAO;AACL,oBAAY,SAAS,IAAI;AAAA,MAC3B;AAAA,IACF,CAAC;AACH,WAAO;AAAA,EACT;AAAA,EAEA,OAAO,eAAe,KAAK,QAAQ,UAAU,MAAM,SAAS,WAAW,UAAS;AAC9E,QAAI,UAAU;AACd,QAAI,KAAK,QAAQ,QAAQ;AACzB,QAAI,SAAS,MAAM;AACjB,UAAI,WAAW,KAAK,UAAU,IAAI,YAAY;AAC9C,kBAAY,SAAS,QAAQ;AAAA,IAC/B;AACA,QAAG,WAAU;AAAE,UAAI,YAAY;AAAA,IAAU;AAGzC,QAAI,aAAa,MAAM;AAAA,IAAE;AAEzB,QAAI,KAAK,IAAI;AACb,WAAO;AAAA,EACT;AAAA,EAEA,OAAO,WAAW,KAAK,QAAQ,UAAU,SAAS,MAAM,SAAS,WAAW,UAAS;AACnF,QAAI,KAAK,QAAQ,UAAU,IAAI;AAC/B,QAAI,UAAU;AACd,aAAQ,CAAC,KAAK,KAAK,KAAK,OAAO,QAAQ,OAAO,GAAE;AAC9C,UAAI,iBAAiB,KAAK,KAAK;AAAA,IACjC;AACA,QAAI,UAAU,MAAM,YAAY,SAAS,IAAI;AAC7C,QAAI,qBAAqB,MAAM;AAC7B,UAAG,IAAI,eAAe,WAAW,YAAY,UAAS;AACpD,YAAI,WAAW,KAAK,UAAU,IAAI,YAAY;AAC9C,iBAAS,QAAQ;AAAA,MACnB;AAAA,IACF;AACA,QAAG,WAAU;AAAE,UAAI,YAAY;AAAA,IAAU;AAEzC,QAAI,KAAK,IAAI;AACb,WAAO;AAAA,EACT;AAAA,EAEA,OAAO,UAAU,MAAK;AACpB,QAAG,CAAC,QAAQ,SAAS,IAAG;AAAE,aAAO;AAAA,IAAK;AAEtC,QAAI;AACF,aAAO,KAAK,MAAM,IAAI;AAAA,IACxB,QAAQ;AACN,iBAAW,QAAQ,IAAI,iCAAiC,IAAI;AAC5D,aAAO;AAAA,IACT;AAAA,EACF;AAAA,EAEA,OAAO,UAAU,KAAK,WAAU;AAC9B,QAAI,WAAW,CAAC;AAChB,aAAQ,OAAO,KAAI;AACjB,UAAG,CAAC,OAAO,UAAU,eAAe,KAAK,KAAK,GAAG,GAAE;AAAE;AAAA,MAAS;AAC9D,UAAI,WAAW,YAAY,GAAG,SAAS,IAAI,GAAG,MAAM;AACpD,UAAI,WAAW,IAAI,GAAG;AACtB,UAAG,OAAO,aAAa,UAAS;AAC9B,iBAAS,KAAK,KAAK,UAAU,UAAU,QAAQ,CAAC;AAAA,MAClD,OAAO;AACL,iBAAS,KAAK,mBAAmB,QAAQ,IAAI,MAAM,mBAAmB,QAAQ,CAAC;AAAA,MACjF;AAAA,IACF;AACA,WAAO,SAAS,KAAK,GAAG;AAAA,EAC1B;AAAA,EAEA,OAAO,aAAa,KAAK,QAAO;AAC9B,QAAG,OAAO,KAAK,MAAM,EAAE,WAAW,GAAE;AAAE,aAAO;AAAA,IAAI;AAEjD,QAAI,SAAS,IAAI,MAAM,IAAI,IAAI,MAAM;AACrC,WAAO,GAAG,GAAG,GAAG,MAAM,GAAG,KAAK,UAAU,MAAM,CAAC;AAAA,EACjD;AACF;;;AC1GA,IAAI,sBAAsB,CAAC,WAAW;AACpC,MAAI,SAAS;AACb,MAAI,QAAQ,IAAI,WAAW,MAAM;AACjC,MAAI,MAAM,MAAM;AAChB,WAAQ,IAAI,GAAG,IAAI,KAAK,KAAI;AAAE,cAAU,OAAO,aAAa,MAAM,CAAC,CAAC;AAAA,EAAE;AACtE,SAAO,KAAK,MAAM;AACpB;AAEA,IAAqB,WAArB,MAA8B;AAAA,EAE5B,YAAY,UAAU,WAAU;AAG9B,QAAG,aAAa,UAAU,WAAW,KAAK,UAAU,CAAC,EAAE,WAAW,iBAAiB,GAAE;AACnF,WAAK,YAAY,KAAK,UAAU,CAAC,EAAE,MAAM,kBAAkB,MAAM,CAAC;AAAA,IACpE;AACA,SAAK,WAAW;AAChB,SAAK,QAAQ;AACb,SAAK,gBAAgB;AACrB,SAAK,OAAO,oBAAI,IAAI;AACpB,SAAK,mBAAmB;AACxB,SAAK,eAAe;AACpB,SAAK,oBAAoB;AACzB,SAAK,cAAc,CAAC;AACpB,SAAK,SAAS,WAAW;AAAA,IAAE;AAC3B,SAAK,UAAU,WAAW;AAAA,IAAE;AAC5B,SAAK,YAAY,WAAW;AAAA,IAAE;AAC9B,SAAK,UAAU,WAAW;AAAA,IAAE;AAC5B,SAAK,eAAe,KAAK,kBAAkB,QAAQ;AACnD,SAAK,aAAa,cAAc;AAEhC,eAAW,MAAM,KAAK,KAAK,GAAG,CAAC;AAAA,EACjC;AAAA,EAEA,kBAAkB,UAAS;AACzB,WAAQ,SACL,QAAQ,SAAS,SAAS,EAC1B,QAAQ,UAAU,UAAU,EAC5B,QAAQ,IAAI,OAAO,UAAW,WAAW,SAAS,GAAG,QAAQ,WAAW,QAAQ;AAAA,EACrF;AAAA,EAEA,cAAa;AACX,WAAO,KAAK,aAAa,KAAK,cAAc,EAAC,OAAO,KAAK,MAAK,CAAC;AAAA,EACjE;AAAA,EAEA,cAAc,MAAM,QAAQ,UAAS;AACnC,SAAK,MAAM,MAAM,QAAQ,QAAQ;AACjC,SAAK,aAAa,cAAc;AAAA,EAClC;AAAA,EAEA,YAAW;AACT,SAAK,QAAQ,SAAS;AACtB,SAAK,cAAc,MAAM,WAAW,KAAK;AAAA,EAC3C;AAAA,EAEA,WAAU;AAAE,WAAO,KAAK,eAAe,cAAc,QAAQ,KAAK,eAAe,cAAc;AAAA,EAAW;AAAA,EAE1G,OAAM;AACJ,UAAM,UAAU,EAAC,UAAU,mBAAkB;AAC7C,QAAG,KAAK,WAAU;AAChB,cAAQ,qBAAqB,IAAI,KAAK;AAAA,IACxC;AACA,SAAK,KAAK,OAAO,SAAS,MAAM,MAAM,KAAK,UAAU,GAAG,UAAQ;AAC9D,UAAG,MAAK;AACN,YAAI,EAAC,QAAQ,OAAO,SAAQ,IAAI;AAChC,YAAG,WAAW,OAAO,KAAK,UAAU,MAAK;AAGvC,eAAK,QAAQ,GAAG;AAChB,eAAK,cAAc,MAAM,gBAAgB,KAAK;AAC9C;AAAA,QACF;AACA,aAAK,QAAQ;AAAA,MACf,OAAO;AACL,iBAAS;AAAA,MACX;AAEA,cAAO,QAAO;AAAA,QACZ,KAAK;AACH,mBAAS,QAAQ,SAAO;AAmBtB,uBAAW,MAAM,KAAK,UAAU,EAAC,MAAM,IAAG,CAAC,GAAG,CAAC;AAAA,UACjD,CAAC;AACD,eAAK,KAAK;AACV;AAAA,QACF,KAAK;AACH,eAAK,KAAK;AACV;AAAA,QACF,KAAK;AACH,eAAK,aAAa,cAAc;AAChC,eAAK,OAAO,CAAC,CAAC;AACd,eAAK,KAAK;AACV;AAAA,QACF,KAAK;AACH,eAAK,QAAQ,GAAG;AAChB,eAAK,MAAM,MAAM,aAAa,KAAK;AACnC;AAAA,QACF,KAAK;AAAA,QACL,KAAK;AACH,eAAK,QAAQ,GAAG;AAChB,eAAK,cAAc,MAAM,yBAAyB,GAAG;AACrD;AAAA,QACF;AAAS,gBAAM,IAAI,MAAM,yBAAyB,MAAM,EAAE;AAAA,MAC5D;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAMA,KAAK,MAAK;AACR,QAAG,OAAO,SAAU,UAAS;AAAE,aAAO,oBAAoB,IAAI;AAAA,IAAE;AAChE,QAAG,KAAK,cAAa;AACnB,WAAK,aAAa,KAAK,IAAI;AAAA,IAC7B,WAAU,KAAK,kBAAiB;AAC9B,WAAK,YAAY,KAAK,IAAI;AAAA,IAC5B,OAAO;AACL,WAAK,eAAe,CAAC,IAAI;AACzB,WAAK,oBAAoB,WAAW,MAAM;AACxC,aAAK,UAAU,KAAK,YAAY;AAChC,aAAK,eAAe;AAAA,MACtB,GAAG,CAAC;AAAA,IACN;AAAA,EACF;AAAA,EAEA,UAAU,UAAU,SAAS,GAAE;AAC7B,SAAK,mBAAmB;AACxB,UAAM,OAAO,SAAS;AACtB,UAAM,QAAQ,SAAS,MAAM,QAAQ,IAAI;AACzC,SAAK,KAAK,QAAQ,EAAC,gBAAgB,uBAAsB,GAAG,MAAM,KAAK,IAAI,GAAG,MAAM,KAAK,QAAQ,SAAS,GAAG,UAAQ;AACnH,UAAG,CAAC,QAAQ,KAAK,WAAW,KAAI;AAC9B,aAAK,mBAAmB;AACxB,aAAK,QAAQ,QAAQ,KAAK,MAAM;AAChC,aAAK,cAAc,MAAM,yBAAyB,KAAK;AAAA,MACzD,WAAU,OAAO,SAAS,QAAO;AAC/B,aAAK,UAAU,UAAU,IAAI;AAAA,MAC/B,WAAU,KAAK,YAAY,SAAS,GAAE;AACpC,aAAK,UAAU,KAAK,WAAW;AAC/B,aAAK,cAAc,CAAC;AAAA,MACtB,OAAO;AACL,aAAK,mBAAmB;AAAA,MAC1B;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,MAAM,QAAQ,UAAS;AAC3B,aAAQ,OAAO,KAAK,MAAK;AAAE,UAAI,MAAM;AAAA,IAAE;AACvC,SAAK,aAAa,cAAc;AAChC,QAAI,OAAO,OAAO,OAAO,EAAC,MAAM,KAAM,QAAQ,QAAW,UAAU,KAAI,GAAG,EAAC,MAAM,QAAQ,SAAQ,CAAC;AAClG,SAAK,cAAc,CAAC;AACpB,iBAAa,KAAK,iBAAiB;AACnC,SAAK,oBAAoB;AACzB,QAAG,OAAO,eAAgB,aAAY;AACpC,WAAK,QAAQ,IAAI,WAAW,SAAS,IAAI,CAAC;AAAA,IAC5C,OAAO;AACL,WAAK,QAAQ,IAAI;AAAA,IACnB;AAAA,EACF;AAAA,EAEA,KAAK,QAAQ,SAAS,MAAM,iBAAiB,UAAS;AACpD,QAAI;AACJ,QAAI,YAAY,MAAM;AACpB,WAAK,KAAK,OAAO,GAAG;AACpB,sBAAgB;AAAA,IAClB;AACA,UAAM,KAAK,QAAQ,QAAQ,KAAK,YAAY,GAAG,SAAS,MAAM,KAAK,SAAS,WAAW,UAAQ;AAC7F,WAAK,KAAK,OAAO,GAAG;AACpB,UAAG,KAAK,SAAS,GAAE;AAAE,iBAAS,IAAI;AAAA,MAAE;AAAA,IACtC,CAAC;AACD,SAAK,KAAK,IAAI,GAAG;AAAA,EACnB;AACF;;;AChMA,IAAqB,WAArB,MAAqB,UAAS;AAAA,EAE5B,YAAY,SAAS,OAAO,CAAC,GAAE;AAC7B,QAAI,SAAS,KAAK,UAAU,EAAC,OAAO,kBAAkB,MAAM,gBAAe;AAC3E,SAAK,QAAQ,CAAC;AACd,SAAK,eAAe,CAAC;AACrB,SAAK,UAAU;AACf,SAAK,UAAU;AACf,SAAK,SAAS;AAAA,MACZ,QAAQ,WAAW;AAAA,MAAE;AAAA,MACrB,SAAS,WAAW;AAAA,MAAE;AAAA,MACtB,QAAQ,WAAW;AAAA,MAAE;AAAA,IACvB;AAEA,SAAK,QAAQ,GAAG,OAAO,OAAO,cAAY;AACxC,UAAI,EAAC,QAAQ,SAAS,OAAM,IAAI,KAAK;AAErC,WAAK,UAAU,KAAK,QAAQ,QAAQ;AACpC,WAAK,QAAQ,UAAS,UAAU,KAAK,OAAO,UAAU,QAAQ,OAAO;AAErE,WAAK,aAAa,QAAQ,UAAQ;AAChC,aAAK,QAAQ,UAAS,SAAS,KAAK,OAAO,MAAM,QAAQ,OAAO;AAAA,MAClE,CAAC;AACD,WAAK,eAAe,CAAC;AACrB,aAAO;AAAA,IACT,CAAC;AAED,SAAK,QAAQ,GAAG,OAAO,MAAM,UAAQ;AACnC,UAAI,EAAC,QAAQ,SAAS,OAAM,IAAI,KAAK;AAErC,UAAG,KAAK,mBAAmB,GAAE;AAC3B,aAAK,aAAa,KAAK,IAAI;AAAA,MAC7B,OAAO;AACL,aAAK,QAAQ,UAAS,SAAS,KAAK,OAAO,MAAM,QAAQ,OAAO;AAChE,eAAO;AAAA,MACT;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,OAAO,UAAS;AAAE,SAAK,OAAO,SAAS;AAAA,EAAS;AAAA,EAEhD,QAAQ,UAAS;AAAE,SAAK,OAAO,UAAU;AAAA,EAAS;AAAA,EAElD,OAAO,UAAS;AAAE,SAAK,OAAO,SAAS;AAAA,EAAS;AAAA,EAEhD,KAAK,IAAG;AAAE,WAAO,UAAS,KAAK,KAAK,OAAO,EAAE;AAAA,EAAE;AAAA,EAE/C,qBAAoB;AAClB,WAAO,CAAC,KAAK,WAAY,KAAK,YAAY,KAAK,QAAQ,QAAQ;AAAA,EACjE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,OAAO,UAAU,cAAc,UAAU,QAAQ,SAAQ;AACvD,QAAI,QAAQ,KAAK,MAAM,YAAY;AACnC,QAAI,QAAQ,CAAC;AACb,QAAI,SAAS,CAAC;AAEd,SAAK,IAAI,OAAO,CAAC,KAAK,aAAa;AACjC,UAAG,CAAC,SAAS,GAAG,GAAE;AAChB,eAAO,GAAG,IAAI;AAAA,MAChB;AAAA,IACF,CAAC;AACD,SAAK,IAAI,UAAU,CAAC,KAAK,gBAAgB;AACvC,UAAI,kBAAkB,MAAM,GAAG;AAC/B,UAAG,iBAAgB;AACjB,YAAI,UAAU,YAAY,MAAM,IAAI,OAAK,EAAE,OAAO;AAClD,YAAI,UAAU,gBAAgB,MAAM,IAAI,OAAK,EAAE,OAAO;AACtD,YAAI,cAAc,YAAY,MAAM,OAAO,OAAK,QAAQ,QAAQ,EAAE,OAAO,IAAI,CAAC;AAC9E,YAAI,YAAY,gBAAgB,MAAM,OAAO,OAAK,QAAQ,QAAQ,EAAE,OAAO,IAAI,CAAC;AAChF,YAAG,YAAY,SAAS,GAAE;AACxB,gBAAM,GAAG,IAAI;AACb,gBAAM,GAAG,EAAE,QAAQ;AAAA,QACrB;AACA,YAAG,UAAU,SAAS,GAAE;AACtB,iBAAO,GAAG,IAAI,KAAK,MAAM,eAAe;AACxC,iBAAO,GAAG,EAAE,QAAQ;AAAA,QACtB;AAAA,MACF,OAAO;AACL,cAAM,GAAG,IAAI;AAAA,MACf;AAAA,IACF,CAAC;AACD,WAAO,KAAK,SAAS,OAAO,EAAC,OAAc,OAAc,GAAG,QAAQ,OAAO;AAAA,EAC7E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,OAAO,SAAS,OAAO,MAAM,QAAQ,SAAQ;AAC3C,QAAI,EAAC,OAAO,OAAM,IAAI,KAAK,MAAM,IAAI;AACrC,QAAG,CAAC,QAAO;AAAE,eAAS,WAAW;AAAA,MAAE;AAAA,IAAE;AACrC,QAAG,CAAC,SAAQ;AAAE,gBAAU,WAAW;AAAA,MAAE;AAAA,IAAE;AAEvC,SAAK,IAAI,OAAO,CAAC,KAAK,gBAAgB;AACpC,UAAI,kBAAkB,MAAM,GAAG;AAC/B,YAAM,GAAG,IAAI,KAAK,MAAM,WAAW;AACnC,UAAG,iBAAgB;AACjB,YAAI,aAAa,MAAM,GAAG,EAAE,MAAM,IAAI,OAAK,EAAE,OAAO;AACpD,YAAI,WAAW,gBAAgB,MAAM,OAAO,OAAK,WAAW,QAAQ,EAAE,OAAO,IAAI,CAAC;AAClF,cAAM,GAAG,EAAE,MAAM,QAAQ,GAAG,QAAQ;AAAA,MACtC;AACA,aAAO,KAAK,iBAAiB,WAAW;AAAA,IAC1C,CAAC;AACD,SAAK,IAAI,QAAQ,CAAC,KAAK,iBAAiB;AACtC,UAAI,kBAAkB,MAAM,GAAG;AAC/B,UAAG,CAAC,iBAAgB;AAAE;AAAA,MAAO;AAC7B,UAAI,eAAe,aAAa,MAAM,IAAI,OAAK,EAAE,OAAO;AACxD,sBAAgB,QAAQ,gBAAgB,MAAM,OAAO,OAAK;AACxD,eAAO,aAAa,QAAQ,EAAE,OAAO,IAAI;AAAA,MAC3C,CAAC;AACD,cAAQ,KAAK,iBAAiB,YAAY;AAC1C,UAAG,gBAAgB,MAAM,WAAW,GAAE;AACpC,eAAO,MAAM,GAAG;AAAA,MAClB;AAAA,IACF,CAAC;AACD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,OAAO,KAAK,WAAW,SAAQ;AAC7B,QAAG,CAAC,SAAQ;AAAE,gBAAU,SAAU,KAAK,MAAK;AAAE,eAAO;AAAA,MAAK;AAAA,IAAE;AAE5D,WAAO,KAAK,IAAI,WAAW,CAAC,KAAK,aAAa;AAC5C,aAAO,QAAQ,KAAK,QAAQ;AAAA,IAC9B,CAAC;AAAA,EACH;AAAA;AAAA,EAIA,OAAO,IAAI,KAAK,MAAK;AACnB,WAAO,OAAO,oBAAoB,GAAG,EAAE,IAAI,SAAO,KAAK,KAAK,IAAI,GAAG,CAAC,CAAC;AAAA,EACvE;AAAA,EAEA,OAAO,MAAM,KAAI;AAAE,WAAO,KAAK,MAAM,KAAK,UAAU,GAAG,CAAC;AAAA,EAAE;AAC5D;;;AC5JA,IAAO,qBAAQ;AAAA,EACb,eAAe;AAAA,EACf,aAAa;AAAA,EACb,OAAO,EAAC,MAAM,GAAG,OAAO,GAAG,WAAW,EAAC;AAAA,EAEvC,OAAO,KAAK,UAAS;AACnB,QAAG,IAAI,QAAQ,gBAAgB,aAAY;AACzC,aAAO,SAAS,KAAK,aAAa,GAAG,CAAC;AAAA,IACxC,OAAO;AACL,UAAI,UAAU,CAAC,IAAI,UAAU,IAAI,KAAK,IAAI,OAAO,IAAI,OAAO,IAAI,OAAO;AACvE,aAAO,SAAS,KAAK,UAAU,OAAO,CAAC;AAAA,IACzC;AAAA,EACF;AAAA,EAEA,OAAO,YAAY,UAAS;AAC1B,QAAG,WAAW,gBAAgB,aAAY;AACxC,aAAO,SAAS,KAAK,aAAa,UAAU,CAAC;AAAA,IAC/C,OAAO;AACL,UAAI,CAAC,UAAU,KAAK,OAAO,OAAO,OAAO,IAAI,KAAK,MAAM,UAAU;AAClE,aAAO,SAAS,EAAC,UAAU,KAAK,OAAO,OAAO,QAAO,CAAC;AAAA,IACxD;AAAA,EACF;AAAA;AAAA,EAIA,aAAa,SAAQ;AACnB,QAAI,EAAC,UAAU,KAAK,OAAO,OAAO,QAAO,IAAI;AAC7C,QAAI,UAAU,IAAI,YAAY;AAC9B,QAAI,eAAe,QAAQ,OAAO,QAAQ;AAC1C,QAAI,WAAW,QAAQ,OAAO,GAAG;AACjC,QAAI,aAAa,QAAQ,OAAO,KAAK;AACrC,QAAI,aAAa,QAAQ,OAAO,KAAK;AAErC,SAAK,gBAAgB,aAAa,YAAY,UAAU;AACxD,SAAK,gBAAgB,SAAS,YAAY,KAAK;AAC/C,SAAK,gBAAgB,WAAW,YAAY,OAAO;AACnD,SAAK,gBAAgB,WAAW,YAAY,OAAO;AAEnD,QAAI,aAAa,KAAK,cAAc,aAAa,aAAa,SAAS,aAAa,WAAW,aAAa,WAAW;AACvH,QAAI,SAAS,IAAI,YAAY,KAAK,gBAAgB,UAAU;AAC5D,QAAI,cAAc,IAAI,WAAW,MAAM;AACvC,QAAI,OAAO,IAAI,SAAS,MAAM;AAC9B,QAAI,SAAS;AAEb,SAAK,SAAS,UAAU,KAAK,MAAM,IAAI;AACvC,SAAK,SAAS,UAAU,aAAa,UAAU;AAC/C,SAAK,SAAS,UAAU,SAAS,UAAU;AAC3C,SAAK,SAAS,UAAU,WAAW,UAAU;AAC7C,SAAK,SAAS,UAAU,WAAW,UAAU;AAC7C,gBAAY,IAAI,cAAc,MAAM;AAAG,cAAU,aAAa;AAC9D,gBAAY,IAAI,UAAU,MAAM;AAAG,cAAU,SAAS;AACtD,gBAAY,IAAI,YAAY,MAAM;AAAG,cAAU,WAAW;AAC1D,gBAAY,IAAI,YAAY,MAAM;AAAG,cAAU,WAAW;AAE1D,QAAI,WAAW,IAAI,WAAW,OAAO,aAAa,QAAQ,UAAU;AACpE,aAAS,IAAI,aAAa,CAAC;AAC3B,aAAS,IAAI,IAAI,WAAW,OAAO,GAAG,OAAO,UAAU;AAEvD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,gBAAgB,MAAM,MAAK;AACzB,QAAG,OAAO,KAAI;AACZ,YAAM,IAAI,MAAM,qBAAqB,IAAI,+DAA+D,IAAI,QAAQ;AAAA,IACtH;AAAA,EACF;AAAA,EAEA,aAAa,QAAO;AAClB,QAAI,OAAO,IAAI,SAAS,MAAM;AAC9B,QAAI,OAAO,KAAK,SAAS,CAAC;AAC1B,QAAI,UAAU,IAAI,YAAY;AAC9B,YAAO,MAAK;AAAA,MACV,KAAK,KAAK,MAAM;AAAM,eAAO,KAAK,WAAW,QAAQ,MAAM,OAAO;AAAA,MAClE,KAAK,KAAK,MAAM;AAAO,eAAO,KAAK,YAAY,QAAQ,MAAM,OAAO;AAAA,MACpE,KAAK,KAAK,MAAM;AAAW,eAAO,KAAK,gBAAgB,QAAQ,MAAM,OAAO;AAAA,IAC9E;AAAA,EACF;AAAA,EAEA,WAAW,QAAQ,MAAM,SAAQ;AAC/B,QAAI,cAAc,KAAK,SAAS,CAAC;AACjC,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,SAAS,KAAK,gBAAgB,KAAK,cAAc;AACrD,QAAI,UAAU,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,WAAW,CAAC;AACvE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,OAAO,OAAO,MAAM,QAAQ,OAAO,UAAU;AACjD,WAAO,EAAC,UAAU,SAAS,KAAK,MAAM,OAAc,OAAc,SAAS,KAAI;AAAA,EACjF;AAAA,EAEA,YAAY,QAAQ,MAAM,SAAQ;AAChC,QAAI,cAAc,KAAK,SAAS,CAAC;AACjC,QAAI,UAAU,KAAK,SAAS,CAAC;AAC7B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,SAAS,KAAK,gBAAgB,KAAK;AACvC,QAAI,UAAU,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,WAAW,CAAC;AACvE,aAAS,SAAS;AAClB,QAAI,MAAM,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,OAAO,CAAC;AAC/D,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,OAAO,OAAO,MAAM,QAAQ,OAAO,UAAU;AACjD,QAAI,UAAU,EAAC,QAAQ,OAAO,UAAU,KAAI;AAC5C,WAAO,EAAC,UAAU,SAAS,KAAU,OAAc,OAAO,eAAe,OAAO,QAAgB;AAAA,EAClG;AAAA,EAEA,gBAAgB,QAAQ,MAAM,SAAQ;AACpC,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,SAAS,KAAK,gBAAgB;AAClC,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,OAAO,OAAO,MAAM,QAAQ,OAAO,UAAU;AAEjD,WAAO,EAAC,UAAU,MAAM,KAAK,MAAM,OAAc,OAAc,SAAS,KAAI;AAAA,EAC9E;AACF;;;ACjBA,IAAqB,SAArB,MAA4B;AAAA,EAC1B,YAAY,UAAU,OAAO,CAAC,GAAE;AAC9B,SAAK,uBAAuB,EAAC,MAAM,CAAC,GAAG,OAAO,CAAC,GAAG,OAAO,CAAC,GAAG,SAAS,CAAC,EAAC;AACxE,SAAK,WAAW,CAAC;AACjB,SAAK,aAAa,CAAC;AACnB,SAAK,MAAM;AACX,SAAK,cAAc;AACnB,SAAK,UAAU,KAAK,WAAW;AAC/B,SAAK,YAAY,KAAK,aAAa,OAAO,aAAa;AACvD,SAAK,2BAA2B;AAChC,SAAK,qBAAqB,KAAK;AAC/B,SAAK,gBAAgB;AACrB,SAAK,eAAe,KAAK,kBAAmB,UAAU,OAAO;AAC7D,SAAK,yBAAyB;AAC9B,SAAK,iBAAiB,mBAAW,OAAO,KAAK,kBAAU;AACvD,SAAK,iBAAiB,mBAAW,OAAO,KAAK,kBAAU;AAIvD,SAAK,gBAAgB;AACrB,SAAK,gBAAgB;AACrB,SAAK,aAAa,KAAK,cAAc;AACrC,SAAK,eAAe;AACpB,SAAK,aAAa;AAClB,QAAG,KAAK,cAAc,UAAS;AAC7B,WAAK,SAAS,KAAK,UAAU,KAAK;AAClC,WAAK,SAAS,KAAK,UAAU,KAAK;AAAA,IACpC,OAAO;AACL,WAAK,SAAS,KAAK;AACnB,WAAK,SAAS,KAAK;AAAA,IACrB;AACA,QAAI,+BAA+B;AACnC,QAAG,aAAa,UAAU,kBAAiB;AACzC,gBAAU,iBAAiB,YAAY,QAAM;AAC3C,YAAG,KAAK,MAAK;AACX,eAAK,WAAW;AAChB,yCAA+B,KAAK;AAAA,QACtC;AAAA,MACF,CAAC;AACD,gBAAU,iBAAiB,YAAY,QAAM;AAC3C,YAAG,iCAAiC,KAAK,cAAa;AACpD,yCAA+B;AAC/B,eAAK,QAAQ;AAAA,QACf;AAAA,MACF,CAAC;AACD,gBAAU,iBAAiB,oBAAoB,MAAM;AACnD,YAAG,SAAS,oBAAoB,UAAS;AACvC,eAAK,aAAa;AAAA,QACpB,OAAO;AACL,eAAK,aAAa;AAElB,cAAG,CAAC,KAAK,YAAY,KAAK,CAAC,KAAK,eAAc;AAC5C,iBAAK,SAAS,MAAM,KAAK,QAAQ,CAAC;AAAA,UACpC;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH;AACA,SAAK,sBAAsB,KAAK,uBAAuB;AACvD,SAAK,gBAAgB,CAAC,UAAU;AAC9B,UAAG,KAAK,eAAc;AACpB,eAAO,KAAK,cAAc,KAAK;AAAA,MACjC,OAAO;AACL,eAAO,CAAC,KAAM,KAAM,GAAI,EAAE,QAAQ,CAAC,KAAK;AAAA,MAC1C;AAAA,IACF;AACA,SAAK,mBAAmB,CAAC,UAAU;AACjC,UAAG,KAAK,kBAAiB;AACvB,eAAO,KAAK,iBAAiB,KAAK;AAAA,MACpC,OAAO;AACL,eAAO,CAAC,IAAI,IAAI,KAAK,KAAK,KAAK,KAAK,KAAK,KAAM,GAAI,EAAE,QAAQ,CAAC,KAAK;AAAA,MACrE;AAAA,IACF;AACA,SAAK,SAAS,KAAK,UAAU;AAC7B,QAAG,CAAC,KAAK,UAAU,KAAK,OAAM;AAC5B,WAAK,SAAS,CAAC,MAAM,KAAK,SAAS;AAAE,gBAAQ,IAAI,GAAG,IAAI,KAAK,GAAG,IAAI,IAAI;AAAA,MAAE;AAAA,IAC5E;AACA,SAAK,oBAAoB,KAAK,qBAAqB;AACnD,SAAK,SAAS,QAAQ,KAAK,UAAU,CAAC,CAAC;AACvC,SAAK,WAAW,GAAG,QAAQ,IAAI,WAAW,SAAS;AACnD,SAAK,MAAM,KAAK,OAAO;AACvB,SAAK,wBAAwB;AAC7B,SAAK,iBAAiB;AACtB,SAAK,sBAAsB;AAC3B,SAAK,iBAAiB,IAAI,MAAM,MAAM;AACpC,UAAG,KAAK,YAAW;AACjB,aAAK,IAAI,qCAAqC;AAC9C,aAAK,SAAS;AACd;AAAA,MACF;AACA,WAAK,SAAS,MAAM,KAAK,QAAQ,CAAC;AAAA,IACpC,GAAG,KAAK,gBAAgB;AACxB,SAAK,YAAY,KAAK;AAAA,EACxB;AAAA;AAAA;AAAA;AAAA,EAKA,uBAAsB;AAAE,WAAO;AAAA,EAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQxC,iBAAiB,cAAa;AAC5B,SAAK;AACL,SAAK,gBAAgB;AACrB,iBAAa,KAAK,aAAa;AAC/B,SAAK,eAAe,MAAM;AAC1B,QAAG,KAAK,MAAK;AACX,WAAK,KAAK,MAAM;AAChB,WAAK,OAAO;AAAA,IACd;AACA,SAAK,YAAY;AAAA,EACnB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,WAAU;AAAE,WAAO,SAAS,SAAS,MAAM,QAAQ,IAAI,QAAQ;AAAA,EAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOpE,cAAa;AACX,QAAI,MAAM,KAAK;AAAA,MACb,KAAK,aAAa,KAAK,UAAU,KAAK,OAAO,CAAC;AAAA,MAAG,EAAC,KAAK,KAAK,IAAG;AAAA,IAAC;AAClE,QAAG,IAAI,OAAO,CAAC,MAAM,KAAI;AAAE,aAAO;AAAA,IAAI;AACtC,QAAG,IAAI,OAAO,CAAC,MAAM,KAAI;AAAE,aAAO,GAAG,KAAK,SAAS,CAAC,IAAI,GAAG;AAAA,IAAG;AAE9D,WAAO,GAAG,KAAK,SAAS,CAAC,MAAM,SAAS,IAAI,GAAG,GAAG;AAAA,EACpD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,WAAW,UAAU,MAAM,QAAO;AAChC,SAAK;AACL,SAAK,gBAAgB;AACrB,SAAK,gBAAgB;AACrB,iBAAa,KAAK,aAAa;AAC/B,SAAK,eAAe,MAAM;AAC1B,SAAK,SAAS,MAAM;AAClB,WAAK,gBAAgB;AACrB,kBAAY,SAAS;AAAA,IACvB,GAAG,MAAM,MAAM;AAAA,EACjB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,QAAO;AACb,QAAG,QAAO;AACR,iBAAW,QAAQ,IAAI,yFAAyF;AAChH,WAAK,SAAS,QAAQ,MAAM;AAAA,IAC9B;AACA,QAAG,KAAK,QAAQ,CAAC,KAAK,eAAc;AAAE;AAAA,IAAO;AAC7C,QAAG,KAAK,sBAAsB,KAAK,cAAc,UAAS;AACxD,WAAK,oBAAoB,UAAU,KAAK,kBAAkB;AAAA,IAC5D,OAAO;AACL,WAAK,iBAAiB;AAAA,IACxB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,IAAI,MAAM,KAAK,MAAK;AAAE,SAAK,UAAU,KAAK,OAAO,MAAM,KAAK,IAAI;AAAA,EAAE;AAAA;AAAA;AAAA;AAAA,EAKlE,YAAW;AAAE,WAAO,KAAK,WAAW;AAAA,EAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASzC,OAAO,UAAS;AACd,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,KAAK,KAAK,CAAC,KAAK,QAAQ,CAAC;AACnD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,QAAQ,UAAS;AACf,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,MAAM,KAAK,CAAC,KAAK,QAAQ,CAAC;AACpD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,UAAS;AACf,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,MAAM,KAAK,CAAC,KAAK,QAAQ,CAAC;AACpD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,UAAU,UAAS;AACjB,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,QAAQ,KAAK,CAAC,KAAK,QAAQ,CAAC;AACtD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,KAAK,UAAS;AACZ,QAAG,CAAC,KAAK,YAAY,GAAE;AAAE,aAAO;AAAA,IAAM;AACtC,QAAI,MAAM,KAAK,QAAQ;AACvB,QAAI,YAAY,KAAK,IAAI;AACzB,SAAK,KAAK,EAAC,OAAO,WAAW,OAAO,aAAa,SAAS,CAAC,GAAG,IAAQ,CAAC;AACvE,QAAI,WAAW,KAAK,UAAU,SAAO;AACnC,UAAG,IAAI,QAAQ,KAAI;AACjB,aAAK,IAAI,CAAC,QAAQ,CAAC;AACnB,iBAAS,KAAK,IAAI,IAAI,SAAS;AAAA,MACjC;AAAA,IACF,CAAC;AACD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,cAAc,WAAU;AAOtB,YAAO,WAAU;AAAA,MACf,KAAK;AAAU,eAAO;AAAA,MACtB;AAAS,eAAO,UAAU;AAAA,IAC5B;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,mBAAkB;AAChB,SAAK;AACL,SAAK,gBAAgB;AACrB,QAAI,YAAY;AAGhB,QAAG,KAAK,WAAU;AAChB,kBAAY,CAAC,WAAW,GAAG,iBAAiB,GAAG,KAAK,KAAK,SAAS,EAAE,QAAQ,MAAM,EAAE,CAAC,EAAE;AAAA,IACzF;AACA,SAAK,OAAO,IAAI,KAAK,UAAU,KAAK,YAAY,GAAG,SAAS;AAC5D,SAAK,KAAK,aAAa,KAAK;AAC5B,SAAK,KAAK,UAAU,KAAK;AACzB,SAAK,KAAK,SAAS,MAAM,KAAK,WAAW;AACzC,SAAK,KAAK,UAAU,WAAS,KAAK,YAAY,KAAK;AACnD,SAAK,KAAK,YAAY,WAAS,KAAK,cAAc,KAAK;AACvD,SAAK,KAAK,UAAU,WAAS,KAAK,YAAY,KAAK;AAAA,EACrD;AAAA,EAEA,WAAW,KAAI;AAAE,WAAO,KAAK,gBAAgB,KAAK,aAAa,QAAQ,GAAG;AAAA,EAAE;AAAA,EAE5E,aAAa,KAAK,KAAI;AAAE,SAAK,gBAAgB,KAAK,aAAa,QAAQ,KAAK,GAAG;AAAA,EAAE;AAAA,EAEjF,oBAAoB,mBAAmB,oBAAoB,MAAK;AAC9D,iBAAa,KAAK,aAAa;AAC/B,QAAI,cAAc;AAClB,QAAI,mBAAmB;AACvB,QAAI,SAAS;AACb,QAAI,wBAAwB,KAAK,cAAc,iBAAiB;AAChE,QAAI,WAAW,CAAC,WAAW;AACzB,WAAK,IAAI,aAAa,mBAAmB,qBAAqB,OAAO,MAAM;AAC3E,WAAK,IAAI,CAAC,SAAS,QAAQ,CAAC;AAC5B,yBAAmB;AACnB,WAAK,iBAAiB,iBAAiB;AACvC,WAAK,iBAAiB;AAAA,IACxB;AACA,QAAG,KAAK,WAAW,gBAAgB,qBAAqB,EAAE,GAAE;AAAE,aAAO,SAAS,WAAW;AAAA,IAAE;AAE3F,SAAK,gBAAgB,WAAW,UAAU,iBAAiB;AAE3D,eAAW,KAAK,QAAQ,YAAU;AAChC,WAAK,IAAI,aAAa,SAAS,MAAM;AACrC,UAAG,oBAAoB,CAAC,aAAY;AAClC,qBAAa,KAAK,aAAa;AAC/B,iBAAS,MAAM;AAAA,MACjB;AAAA,IACF,CAAC;AACD,QAAG,KAAK,aAAY;AAClB,WAAK,IAAI,CAAC,KAAK,WAAW,CAAC;AAAA,IAC7B;AACA,SAAK,cAAc,KAAK,OAAO,MAAM;AACnC,oBAAc;AACd,UAAG,CAAC,kBAAiB;AACnB,YAAIC,yBAAwB,KAAK,cAAc,iBAAiB;AAEhE,YAAG,CAAC,KAAK,0BAAyB;AAAE,eAAK,aAAa,gBAAgBA,sBAAqB,IAAI,MAAM;AAAA,QAAE;AACvG,eAAO,KAAK,IAAI,aAAa,eAAeA,sBAAqB,WAAW;AAAA,MAC9E;AAEA,mBAAa,KAAK,aAAa;AAC/B,WAAK,gBAAgB,WAAW,UAAU,iBAAiB;AAC3D,WAAK,KAAK,SAAO;AACf,aAAK,IAAI,aAAa,8BAA8B,GAAG;AACvD,aAAK,2BAA2B;AAChC,qBAAa,KAAK,aAAa;AAAA,MACjC,CAAC;AAAA,IACH,CAAC;AACD,SAAK,iBAAiB;AAAA,EACxB;AAAA,EAEA,kBAAiB;AACf,iBAAa,KAAK,cAAc;AAChC,iBAAa,KAAK,qBAAqB;AAAA,EACzC;AAAA,EAEA,aAAY;AACV,QAAG,KAAK,UAAU,EAAG,MAAK,IAAI,aAAa,GAAG,KAAK,cAAc,KAAK,SAAS,CAAC,iBAAiB,KAAK,YAAY,CAAC,EAAE;AACrH,SAAK,gBAAgB;AACrB,SAAK,gBAAgB;AACrB,SAAK;AACL,SAAK,gBAAgB;AACrB,SAAK,eAAe,MAAM;AAC1B,SAAK,eAAe;AACpB,SAAK,qBAAqB,KAAK,QAAQ,CAAC,CAAC,EAAE,QAAQ,MAAM,SAAS,CAAC;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA,EAMA,mBAAkB;AAChB,QAAG,KAAK,qBAAoB;AAC1B,WAAK,sBAAsB;AAC3B,UAAG,KAAK,UAAU,GAAE;AAAE,aAAK,IAAI,aAAa,0DAA0D;AAAA,MAAE;AACxG,WAAK,iBAAiB;AACtB,WAAK,gBAAgB;AACrB,WAAK,SAAS,MAAM,KAAK,eAAe,gBAAgB,GAAG,iBAAiB,mBAAmB;AAAA,IACjG;AAAA,EACF;AAAA,EAEA,iBAAgB;AACd,QAAG,KAAK,QAAQ,KAAK,KAAK,eAAc;AAAE;AAAA,IAAO;AACjD,SAAK,sBAAsB;AAC3B,SAAK,gBAAgB;AACrB,SAAK,iBAAiB,WAAW,MAAM,KAAK,cAAc,GAAG,KAAK,mBAAmB;AAAA,EACvF;AAAA,EAEA,SAAS,UAAU,MAAM,QAAO;AAC9B,QAAG,CAAC,KAAK,MAAK;AACZ,aAAO,YAAY,SAAS;AAAA,IAC9B;AAIA,UAAM,cAAc,KAAK;AAEzB,SAAK,kBAAkB,aAAa,MAAM;AACxC,UAAG,MAAK;AAAE,oBAAY,MAAM,MAAM,UAAU,EAAE;AAAA,MAAE,OAAO;AAAE,oBAAY,MAAM;AAAA,MAAE;AAE7E,WAAK,oBAAoB,aAAa,MAAM;AAC1C,YAAG,KAAK,SAAS,aAAY;AAC3B,eAAK,KAAK,SAAS,WAAW;AAAA,UAAE;AAChC,eAAK,KAAK,UAAU,WAAW;AAAA,UAAE;AACjC,eAAK,KAAK,YAAY,WAAW;AAAA,UAAE;AACnC,eAAK,KAAK,UAAU,WAAW;AAAA,UAAE;AACjC,eAAK,OAAO;AAAA,QACd;AAEA,oBAAY,SAAS;AAAA,MACvB,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAAA,EAEA,kBAAkB,MAAM,UAAU,QAAQ,GAAE;AAC1C,QAAG,UAAU,KAAK,CAAC,KAAK,gBAAe;AACrC,eAAS;AACT;AAAA,IACF;AAEA,eAAW,MAAM;AACf,WAAK,kBAAkB,MAAM,UAAU,QAAQ,CAAC;AAAA,IAClD,GAAG,MAAM,KAAK;AAAA,EAChB;AAAA,EAEA,oBAAoB,MAAM,UAAU,QAAQ,GAAE;AAC5C,QAAG,UAAU,KAAK,KAAK,eAAe,cAAc,QAAO;AACzD,eAAS;AACT;AAAA,IACF;AAEA,eAAW,MAAM;AACf,WAAK,oBAAoB,MAAM,UAAU,QAAQ,CAAC;AAAA,IACpD,GAAG,MAAM,KAAK;AAAA,EAChB;AAAA,EAEA,YAAY,OAAM;AAChB,QAAG,KAAK,KAAM,MAAK,KAAK,UAAU,MAAM;AAAA,IAAC;AACzC,QAAI,YAAY,SAAS,MAAM;AAC/B,QAAG,KAAK,UAAU,EAAG,MAAK,IAAI,aAAa,SAAS,KAAK;AACzD,SAAK,iBAAiB;AACtB,SAAK,gBAAgB;AACrB,QAAG,CAAC,KAAK,iBAAiB,cAAc,KAAK;AAC3C,WAAK,eAAe,gBAAgB;AAAA,IACtC;AACA,SAAK,qBAAqB,MAAM,QAAQ,CAAC,CAAC,EAAE,QAAQ,MAAM,SAAS,KAAK,CAAC;AAAA,EAC3E;AAAA;AAAA;AAAA;AAAA,EAKA,YAAY,OAAM;AAChB,QAAG,KAAK,UAAU,EAAG,MAAK,IAAI,aAAa,SAAS,KAAK;AACzD,QAAI,kBAAkB,KAAK;AAC3B,QAAI,oBAAoB,KAAK;AAC7B,SAAK,qBAAqB,MAAM,QAAQ,CAAC,CAAC,EAAE,QAAQ,MAAM;AACxD,eAAS,OAAO,iBAAiB,iBAAiB;AAAA,IACpD,CAAC;AACD,QAAG,oBAAoB,KAAK,aAAa,oBAAoB,GAAE;AAC7D,WAAK,iBAAiB;AAAA,IACxB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,mBAAkB;AAChB,SAAK,SAAS,QAAQ,aAAW;AAC/B,UAAG,EAAE,QAAQ,UAAU,KAAK,QAAQ,UAAU,KAAK,QAAQ,SAAS,IAAG;AACrE,gBAAQ,QAAQ,eAAe,KAAK;AAAA,MACtC;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,kBAAiB;AACf,YAAO,KAAK,QAAQ,KAAK,KAAK,YAAW;AAAA,MACvC,KAAK,cAAc;AAAY,eAAO;AAAA,MACtC,KAAK,cAAc;AAAM,eAAO;AAAA,MAChC,KAAK,cAAc;AAAS,eAAO;AAAA,MACnC;AAAS,eAAO;AAAA,IAClB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKA,cAAa;AAAE,WAAO,KAAK,gBAAgB,MAAM;AAAA,EAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOxD,OAAO,SAAQ;AACb,SAAK,IAAI,QAAQ,eAAe;AAChC,SAAK,WAAW,KAAK,SAAS,OAAO,OAAK,MAAM,OAAO;AAAA,EACzD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,IAAI,MAAK;AACP,aAAQ,OAAO,KAAK,sBAAqB;AACvC,WAAK,qBAAqB,GAAG,IAAI,KAAK,qBAAqB,GAAG,EAAE,OAAO,CAAC,CAAC,GAAG,MAAM;AAChF,eAAO,KAAK,QAAQ,GAAG,MAAM;AAAA,MAC/B,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,OAAO,aAAa,CAAC,GAAE;AAC7B,QAAI,OAAO,IAAI,QAAQ,OAAO,YAAY,IAAI;AAC9C,SAAK,SAAS,KAAK,IAAI;AACvB,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,KAAK,MAAK;AACR,QAAG,KAAK,UAAU,GAAE;AAClB,UAAI,EAAC,OAAO,OAAO,SAAS,KAAK,SAAQ,IAAI;AAC7C,WAAK,IAAI,QAAQ,GAAG,KAAK,IAAI,KAAK,KAAK,QAAQ,KAAK,GAAG,KAAK,OAAO;AAAA,IACrE;AAEA,QAAG,KAAK,YAAY,GAAE;AACpB,WAAK,OAAO,MAAM,YAAU,KAAK,KAAK,KAAK,MAAM,CAAC;AAAA,IACpD,OAAO;AACL,WAAK,WAAW,KAAK,MAAM,KAAK,OAAO,MAAM,YAAU,KAAK,KAAK,KAAK,MAAM,CAAC,CAAC;AAAA,IAChF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,UAAS;AACP,QAAI,SAAS,KAAK,MAAM;AACxB,QAAG,WAAW,KAAK,KAAI;AAAE,WAAK,MAAM;AAAA,IAAE,OAAO;AAAE,WAAK,MAAM;AAAA,IAAO;AAEjE,WAAO,KAAK,IAAI,SAAS;AAAA,EAC3B;AAAA,EAEA,gBAAe;AACb,QAAG,KAAK,uBAAuB,CAAC,KAAK,YAAY,GAAE;AAAE;AAAA,IAAO;AAC5D,SAAK,sBAAsB,KAAK,QAAQ;AACxC,SAAK,KAAK,EAAC,OAAO,WAAW,OAAO,aAAa,SAAS,CAAC,GAAG,KAAK,KAAK,oBAAmB,CAAC;AAC5F,SAAK,wBAAwB,WAAW,MAAM,KAAK,iBAAiB,GAAG,KAAK,mBAAmB;AAAA,EACjG;AAAA,EAEA,kBAAiB;AACf,QAAG,KAAK,YAAY,KAAK,KAAK,WAAW,SAAS,GAAE;AAClD,WAAK,WAAW,QAAQ,cAAY,SAAS,CAAC;AAC9C,WAAK,aAAa,CAAC;AAAA,IACrB;AAAA,EACF;AAAA,EAEA,cAAc,YAAW;AACvB,SAAK,OAAO,WAAW,MAAM,SAAO;AAClC,UAAI,EAAC,OAAO,OAAO,SAAS,KAAK,SAAQ,IAAI;AAC7C,UAAG,OAAO,QAAQ,KAAK,qBAAoB;AACzC,aAAK,gBAAgB;AACrB,aAAK,sBAAsB;AAC3B,aAAK,iBAAiB,WAAW,MAAM,KAAK,cAAc,GAAG,KAAK,mBAAmB;AAAA,MACvF;AAEA,UAAG,KAAK,UAAU,EAAG,MAAK,IAAI,WAAW,GAAG,QAAQ,UAAU,EAAE,IAAI,KAAK,IAAI,KAAK,IAAI,OAAO,MAAM,MAAM,OAAO,EAAE,IAAI,OAAO;AAE7H,eAAQ,IAAI,GAAG,IAAI,KAAK,SAAS,QAAQ,KAAI;AAC3C,cAAM,UAAU,KAAK,SAAS,CAAC;AAC/B,YAAG,CAAC,QAAQ,SAAS,OAAO,OAAO,SAAS,QAAQ,GAAE;AAAE;AAAA,QAAS;AACjE,gBAAQ,QAAQ,OAAO,SAAS,KAAK,QAAQ;AAAA,MAC/C;AAEA,eAAQ,IAAI,GAAG,IAAI,KAAK,qBAAqB,QAAQ,QAAQ,KAAI;AAC/D,YAAI,CAAC,EAAE,QAAQ,IAAI,KAAK,qBAAqB,QAAQ,CAAC;AACtD,iBAAS,GAAG;AAAA,MACd;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,eAAe,OAAM;AACnB,QAAI,aAAa,KAAK,SAAS,KAAK,OAAK,EAAE,UAAU,UAAU,EAAE,SAAS,KAAK,EAAE,UAAU,EAAE;AAC7F,QAAG,YAAW;AACZ,UAAG,KAAK,UAAU,EAAG,MAAK,IAAI,aAAa,4BAA4B,KAAK,GAAG;AAC/E,iBAAW,MAAM;AAAA,IACnB;AAAA,EACF;AACF;", + "names": ["closure", "fallbackTransportName"] +} diff --git a/deps/phoenix/priv/static/phoenix.png b/deps/phoenix/priv/static/phoenix.png new file mode 100644 index 0000000..9c81075 Binary files /dev/null and b/deps/phoenix/priv/static/phoenix.png differ diff --git a/deps/phoenix/priv/templates/phx.gen.auth/AGENTS.md.eex b/deps/phoenix/priv/templates/phx.gen.auth/AGENTS.md.eex new file mode 100644 index 0000000..c4b1a1b --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/AGENTS.md.eex @@ -0,0 +1,57 @@ +## Authentication + +- **Always** handle authentication flow at the router level with proper redirects +- **Always** be mindful of where to place routes. `phx.gen.auth` creates multiple router plugs<%= if live? do %> and `live_session` scopes<% end %>: + - A plug `:fetch_<%= scope_config.scope.assign_key %>_for_<%= schema.singular %>` that is included in the default browser pipeline + - A plug `:require_authenticated_<%= schema.singular %>` that redirects to the log in page when the <%= schema.singular %> is not authenticated<%= if live? do %> + - A `live_session :current_<%= schema.singular %>` scope - for routes that need the current <%= schema.singular %> but don't require authentication, similar to `:fetch_<%= scope_config.scope.assign_key %>_for_<%= schema.singular %>` + - A `live_session :require_authenticated_<%= schema.singular %>` scope - for routes that require authentication, similar to the plug with the same name<% end %> + - In both cases, a `@<%= scope_config.scope.assign_key %>` is assigned to the Plug connection<%= if live? do %> and LiveView socket<% end %> + - A plug `redirect_if_<%= schema.singular %>_is_authenticated` that redirects to a default path in case the <%= schema.singular %> is authenticated - useful for a registration page that should only be shown to unauthenticated <%= schema.plural %> +- **Always let the user know in which router scopes<%= if live? do%>, `live_session`,<% end %> and pipeline you are placing the route, AND SAY WHY** +- `phx.gen.auth` assigns the `<%= scope_config.scope.assign_key %>` assign - it **does not assign a `current_<%= schema.singular %>` assign** +- Always pass the assign `<%= scope_config.scope.assign_key %>` to context modules as first argument. When performing queries, use `<%= scope_config.scope.assign_key %>.<%= schema.singular %>` to filter the query results +- To derive/access `current_<%= schema.singular %>` in templates, **always use the `@<%= scope_config.scope.assign_key %>.<%= schema.singular %>`**, never use **`@current_<%= schema.singular %>`** in templates<%= if live? do %> or LiveViews +- **Never** duplicate `live_session` names. A `live_session :current_<%= schema.singular %>` can only be defined __once__ in the router, so all routes for the `live_session :current_<%= schema.singular %>` must be grouped in a single block<% end %> +- Anytime you hit `<%= scope_config.scope.assign_key %>` errors or the logged in session isn't displaying the right content, **always double check the router and ensure you are using the correct plug<%= if live? do %> and `live_session`<% end %> as described below** + +### Routes that require authentication + +<%= if live? do %>LiveViews that require login should **always be placed inside the __existing__ `live_session :require_authenticated_<%= schema.singular %>` block**: + + scope "/", AppWeb do + pipe_through [:browser, :require_authenticated_<%= schema.singular %>] + + live_session :require_authenticated_<%= schema.singular %>, + on_mount: [{<%= inspect auth_module %>, :require_authenticated}] do + # phx.gen.auth generated routes + live "/<%= schema.plural %>/settings", <%= inspect schema.alias %>Live.Settings, :edit + live "/<%= schema.plural %>/settings/confirm-email/:token", <%= inspect schema.alias %>Live.Settings, :confirm_email + # our own routes that require logged in <%= schema.singular %> + live "/", MyLiveThatRequiresAuth, :index + end + end + +<% end %>Controller routes must be placed in a scope that sets the `:require_authenticated_<%= schema.singular %>` plug: + + scope "/", AppWeb do + pipe_through [:browser, :require_authenticated_<%= schema.singular %>] + + get "/", MyControllerThatRequiresAuth, :index + end + +### Routes that work with or without authentication + +<%= if live? do %>LiveViews that can work with or without authentication, **always use the __existing__ `:current_<%= schema.singular %>` scope**, ie: + + scope "/", MyAppWeb do + pipe_through [:browser] + + live_session :current_<%= schema.singular %>, + on_mount: [{<%= inspect auth_module %>, :mount_<%= scope_config.scope.assign_key %>}] do + # our own routes that work with or without authentication + live "/", PublicLive + end + end + +<% end %>Controllers automatically have the `<%= scope_config.scope.assign_key %>` available if they use the `:browser` pipeline. diff --git a/deps/phoenix/priv/templates/phx.gen.auth/auth.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/auth.ex.eex new file mode 100644 index 0000000..927ff53 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/auth.ex.eex @@ -0,0 +1,321 @@ +defmodule <%= inspect auth_module %> do + use <%= inspect context.web_module %>, :verified_routes + + import Plug.Conn + import Phoenix.Controller + + alias <%= inspect context.module %> + alias <%= inspect scope_config.scope.module %> + + # Make the remember me cookie valid for 14 days. This should match + # the session validity setting in <%= inspect schema.alias %>Token. + @max_cookie_age_in_days 14 + @remember_me_cookie "_<%= web_app_name %>_<%= schema.singular %>_remember_me" + @remember_me_options [ + sign: true, + max_age: @max_cookie_age_in_days * 24 * 60 * 60, + same_site: "Lax" + ] + + # How old the session token should be before a new one is issued. When a request is made + # with a session token older than this value, then a new session token will be created + # and the session and remember-me cookies (if set) will be updated with the new token. + # Lowering this value will result in more tokens being created by active users. Increasing + # it will result in less time before a session token expires for a user to get issued a new + # token. This can be set to a value greater than `@max_cookie_age_in_days` to disable + # the reissuing of tokens completely. + @session_reissue_age_in_days 7 + + @doc """ + Logs the <%= schema.singular %> in. + + Redirects to the session's `:<%= schema.singular %>_return_to` path + or falls back to the `signed_in_path/1`. + """ + def log_in_<%= schema.singular %>(conn, <%= schema.singular %>, params \\ %{}) do + <%= schema.singular %>_return_to = get_session(conn, :<%= schema.singular %>_return_to) + + conn + |> create_or_extend_session(<%= schema.singular %>, params) + |> redirect(to: <%= schema.singular %>_return_to || signed_in_path(conn)) + end + + @doc """ + Logs the <%= schema.singular %> out. + + It clears all session data for safety. See renew_session. + """ + def log_out_<%= schema.singular %>(conn) do + <%= schema.singular %>_token = get_session(conn, :<%= schema.singular %>_token) + <%= schema.singular %>_token && <%= inspect context.alias %>.delete_<%= schema.singular %>_session_token(<%= schema.singular %>_token) + + if live_socket_id = get_session(conn, :live_socket_id) do + <%= inspect(endpoint_module) %>.broadcast(live_socket_id, "disconnect", %{}) + end + + conn + |> renew_session(nil) + |> delete_resp_cookie(@remember_me_cookie, @remember_me_options) + |> redirect(to: ~p"/") + end + + @doc """ + Authenticates the <%= schema.singular %> by looking into the session and remember me token. + + Will reissue the session token if it is older than the configured age. + """ + def fetch_<%= scope_config.scope.assign_key %>_for_<%= schema.singular %>(conn, _opts) do + with {token, conn} <- ensure_<%= schema.singular %>_token(conn), + {<%= schema.singular %>, token_inserted_at} <- <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(token) do + conn + |> assign(:<%= scope_config.scope.assign_key %>, <%= inspect scope_config.scope.alias %>.for_<%= schema.singular %>(<%= schema.singular %>)) + |> maybe_reissue_<%= schema.singular %>_session_token(<%= schema.singular %>, token_inserted_at) + else + nil -> assign(conn, :<%= scope_config.scope.assign_key %>, <%= inspect scope_config.scope.alias %>.for_<%= schema.singular %>(nil)) + end + end + + defp ensure_<%= schema.singular %>_token(conn) do + if token = get_session(conn, :<%= schema.singular %>_token) do + {token, conn} + else + conn = fetch_cookies(conn, signed: [@remember_me_cookie]) + + if token = conn.cookies[@remember_me_cookie] do + {token, conn |> put_token_in_session(token) |> put_session(:<%= schema.singular %>_remember_me, true)} + else + nil + end + end + end + + # Reissue the session token if it is older than the configured reissue age. + defp maybe_reissue_<%= schema.singular %>_session_token(conn, <%= schema.singular %>, token_inserted_at) do + token_age = <%= inspect datetime_module %>.diff(<%= datetime_now %>, token_inserted_at, :day) + + if token_age >= @session_reissue_age_in_days do + create_or_extend_session(conn, <%= schema.singular %>, %{}) + else + conn + end + end + + # This function is the one responsible for creating session tokens + # and storing them safely in the session and cookies. It may be called + # either when logging in, during sudo mode, or to renew a session which + # will soon expire. + # + # When the session is created, rather than extended, the renew_session + # function will clear the session to avoid fixation attacks. See the + # renew_session function to customize this behaviour. + defp create_or_extend_session(conn, <%= schema.singular %>, params) do + token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + remember_me = get_session(conn, :<%= schema.singular %>_remember_me) + + conn + |> renew_session(<%= schema.singular %>) + |> put_token_in_session(token) + |> maybe_write_remember_me_cookie(token, params, remember_me) + end + + # Do not renew session if the <%= schema.singular %> is already logged in + # to prevent CSRF errors or data being lost in tabs that are still open + defp renew_session(conn, <%= schema.singular %>) when conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %>.id == <%= schema.singular %>.id do + conn + end + + # This function renews the session ID and erases the whole + # session to avoid fixation attacks. If there is any data + # in the session you may want to preserve after log in/log out, + # you must explicitly fetch the session data before clearing + # and then immediately set it after clearing, for example: + # + # defp renew_session(conn, _<%= schema.singular %>) do + # delete_csrf_token() + # preferred_locale = get_session(conn, :preferred_locale) + # + # conn + # |> configure_session(renew: true) + # |> clear_session() + # |> put_session(:preferred_locale, preferred_locale) + # end + # + defp renew_session(conn, _<%= schema.singular %>) do + delete_csrf_token() + + conn + |> configure_session(renew: true) + |> clear_session() + end + + defp maybe_write_remember_me_cookie(conn, token, %{"remember_me" => "true"}, _), + do: write_remember_me_cookie(conn, token) + + defp maybe_write_remember_me_cookie(conn, token, _params, true), + do: write_remember_me_cookie(conn, token) + + defp maybe_write_remember_me_cookie(conn, _token, _params, _), do: conn + + defp write_remember_me_cookie(conn, token) do + conn + |> put_session(:<%= schema.singular %>_remember_me, true) + |> put_resp_cookie(@remember_me_cookie, token, @remember_me_options) + end + + <%= if live? do %>defp put_token_in_session(conn, token) do + conn + |> put_session(:<%= schema.singular %>_token, token) + |> put_session(:live_socket_id, <%= schema.singular %>_session_topic(token)) + end + + @doc """ + Disconnects existing sockets for the given tokens. + """ + def disconnect_sessions(tokens) do + Enum.each(tokens, fn %{token: token} -> + <%= inspect endpoint_module %>.broadcast(<%= schema.singular %>_session_topic(token), "disconnect", %{}) + end) + end + + defp <%= schema.singular %>_session_topic(token), do: "<%= schema.plural %>_sessions:#{Base.url_encode64(token)}" + + @doc """ + Handles mounting and authenticating the <%= scope_config.scope.assign_key %> in LiveViews. + + ## `on_mount` arguments + + * `:mount_<%= scope_config.scope.assign_key %>` - Assigns <%= scope_config.scope.assign_key %> + to socket assigns based on <%= schema.singular %>_token, or nil if + there's no <%= schema.singular %>_token or no matching <%= schema.singular %>. + + * `:require_authenticated` - Authenticates the <%= schema.singular %> from the session, + and assigns the <%= scope_config.scope.assign_key %> to socket assigns based + on <%= schema.singular %>_token. + Redirects to login page if there's no logged <%= schema.singular %>. + + ## Examples + + Use the `on_mount` lifecycle macro in LiveViews to mount or authenticate + the `<%= scope_config.scope.assign_key %>`: + + defmodule <%= inspect context.web_module %>.PageLive do + use <%= inspect context.web_module %>, :live_view + + on_mount {<%= inspect auth_module %>, :mount_<%= scope_config.scope.assign_key %>} + ... + end + + Or use the `live_session` of your router to invoke the on_mount callback: + + live_session :authenticated, on_mount: [{<%= inspect auth_module %>, :require_authenticated}] do + live "/profile", ProfileLive, :index + end + """ + def on_mount(:mount_<%= scope_config.scope.assign_key %>, _params, session, socket) do + {:cont, mount_<%= scope_config.scope.assign_key %>(socket, session)} + end + + def on_mount(:require_authenticated, _params, session, socket) do + socket = mount_<%= scope_config.scope.assign_key %>(socket, session) + + if socket.assigns.<%= scope_config.scope.assign_key %> && socket.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %> do + {:cont, socket} + else + socket = + socket + |> Phoenix.LiveView.put_flash(:error, "You must log in to access this page.") + |> Phoenix.LiveView.redirect(to: ~p"<%= schema.route_prefix %>/log-in") + + {:halt, socket} + end + end + + def on_mount(:require_sudo_mode, _params, session, socket) do + socket = mount_<%= scope_config.scope.assign_key %>(socket, session) + + if <%= inspect context.alias %>.sudo_mode?(socket.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %>, -10) do + {:cont, socket} + else + socket = + socket + |> Phoenix.LiveView.put_flash(:error, "You must re-authenticate to access this page.") + |> Phoenix.LiveView.redirect(to: ~p"<%= schema.route_prefix %>/log-in") + + {:halt, socket} + end + end + + defp mount_<%= scope_config.scope.assign_key %>(socket, session) do + Phoenix.Component.assign_new(socket, :<%= scope_config.scope.assign_key %>, fn -> + {<%= schema.singular %>, _} = + if <%= schema.singular %>_token = session["<%= schema.singular %>_token"] do + <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(<%= schema.singular %>_token) + end || {nil, nil} + + <%= inspect scope_config.scope.alias %>.for_<%= schema.singular %>(<%= schema.singular %>) + end) + end + + @doc "Returns the path to redirect to after log in." + # the <%= schema.singular %> was already logged in, redirect to settings + def signed_in_path(%Plug.Conn{assigns: %{<%= scope_config.scope.assign_key %>: %<%= inspect scope_config.scope.alias %>{<%= schema.singular %>: %<%= inspect context.alias %>.<%= inspect schema.alias %>{}}}}) do + ~p"<%= schema.route_prefix %>/settings" + end + + def signed_in_path(_), do: ~p"/" + + <% else %>defp put_token_in_session(conn, token) do + put_session(conn, :<%= schema.singular %>_token, token) + end + + @doc """ + Plug for routes that require sudo mode. + """ + def require_sudo_mode(conn, _opts) do + if <%= inspect context.alias %>.sudo_mode?(conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %>, -10) do + conn + else + conn + |> put_flash(:error, "You must re-authenticate to access this page.") + |> maybe_store_return_to() + |> redirect(to: ~p"<%= schema.route_prefix %>/log-in") + |> halt() + end + end + + @doc """ + Plug for routes that require the <%= schema.singular %> to not be authenticated. + """ + def redirect_if_<%= schema.singular %>_is_authenticated(conn, _opts) do + if conn.assigns.<%= scope_config.scope.assign_key %> do + conn + |> redirect(to: signed_in_path(conn)) + |> halt() + else + conn + end + end + + defp signed_in_path(_conn), do: ~p"/" + + <% end %>@doc """ + Plug for routes that require the <%= schema.singular %> to be authenticated. + """ + def require_authenticated_<%= schema.singular %>(conn, _opts) do + if conn.assigns.<%= scope_config.scope.assign_key %> && conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %> do + conn + else + conn + |> put_flash(:error, "You must log in to access this page.") + |> maybe_store_return_to() + |> redirect(to: ~p"<%= schema.route_prefix %>/log-in") + |> halt() + end + end + + defp maybe_store_return_to(%{method: "GET"} = conn) do + put_session(conn, :<%= schema.singular %>_return_to, current_path(conn)) + end + + defp maybe_store_return_to(conn), do: conn +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/auth_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.auth/auth_test.exs.eex new file mode 100644 index 0000000..3fef928 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/auth_test.exs.eex @@ -0,0 +1,442 @@ +defmodule <%= inspect auth_module %>Test do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + <%= if live? do %>alias Phoenix.LiveView + <% end %>alias <%= inspect context.module %> + alias <%= inspect context.module %>.<%= inspect scope_config.scope.alias %> + alias <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Auth + + import <%= inspect context.module %>Fixtures + + @remember_me_cookie "_<%= web_app_name %>_<%= schema.singular %>_remember_me" + @remember_me_cookie_max_age 60 * 60 * 24 * 14 + + setup %{conn: conn} do + conn = + conn + |> Map.replace!(:secret_key_base, <%= inspect endpoint_module %>.config(:secret_key_base)) + |> init_test_session(%{}) + + %{<%= schema.singular %>: %{<%= schema.singular %>_fixture() | authenticated_at: <%= datetime_now %>}, conn: conn} + end + + describe "log_in_<%= schema.singular %>/3" do + test "stores the <%= schema.singular %> token in the session", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(conn, <%= schema.singular %>) + assert token = get_session(conn, :<%= schema.singular %>_token)<%= if live? do %> + assert get_session(conn, :live_socket_id) == "<%= schema.plural %>_sessions:#{Base.url_encode64(token)}"<% end %> + assert redirected_to(conn) == ~p"/" + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(token) + end + + test "clears everything previously stored in the session", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = conn |> put_session(:to_be_removed, "value") |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>) + refute get_session(conn, :to_be_removed) + end + + test "keeps session when re-authenticating", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + conn + |> assign(:<%= scope_config.scope.assign_key %>, <%= inspect scope_config.scope.alias %>.for_<%= schema.singular %>(<%= schema.singular %>)) + |> put_session(:to_be_removed, "value") + |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>) + + assert get_session(conn, :to_be_removed) + end + + test "clears session when <%= schema.singular %> does not match when re-authenticating", %{ + conn: conn, + <%= schema.singular %>: <%= schema.singular %> + } do + other_<%= schema.singular %> = <%= schema.singular %>_fixture() + + conn = + conn + |> assign(:<%= scope_config.scope.assign_key %>, <%= inspect scope_config.scope.alias %>.for_<%= schema.singular %>(other_<%= schema.singular %>)) + |> put_session(:to_be_removed, "value") + |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>) + + refute get_session(conn, :to_be_removed) + end + + test "redirects to the configured path", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = conn |> put_session(:<%= schema.singular %>_return_to, "/hello") |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>) + assert redirected_to(conn) == "/hello" + end + + test "writes a cookie if remember_me is configured", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = conn |> fetch_cookies() |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>, %{"remember_me" => "true"}) + assert get_session(conn, :<%= schema.singular %>_token) == conn.cookies[@remember_me_cookie] + assert get_session(conn, :<%= schema.singular %>_remember_me) == true + + assert %{value: signed_token, max_age: max_age} = conn.resp_cookies[@remember_me_cookie] + assert signed_token != get_session(conn, :<%= schema.singular %>_token) + assert max_age == @remember_me_cookie_max_age + end<%= if live? do %> + + test "redirects to settings when <%= schema.singular %> is already logged in", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + conn + |> assign(:<%= scope_config.scope.assign_key %>, <%= inspect scope_config.scope.alias %>.for_<%= schema.singular %>(<%= schema.singular %>)) + |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>) + + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/settings" + end<% end %> + + test "writes a cookie if remember_me was set in previous session", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = conn |> fetch_cookies() |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>, %{"remember_me" => "true"}) + assert get_session(conn, :<%= schema.singular %>_token) == conn.cookies[@remember_me_cookie] + assert get_session(conn, :<%= schema.singular %>_remember_me) == true + + conn = + conn + |> recycle() + |> Map.replace!(:secret_key_base, <%= inspect endpoint_module %>.config(:secret_key_base)) + |> fetch_cookies() + |> init_test_session(%{<%= schema.singular %>_remember_me: true}) + + # the conn is already logged in and has the remember_me cookie set, + # now we log in again and even without explicitly setting remember_me, + # the cookie should be set again + conn = conn |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>, %{}) + assert %{value: signed_token, max_age: max_age} = conn.resp_cookies[@remember_me_cookie] + assert signed_token != get_session(conn, :<%= schema.singular %>_token) + assert max_age == @remember_me_cookie_max_age + assert get_session(conn, :<%= schema.singular %>_remember_me) == true + end + end + + describe "logout_<%= schema.singular %>/1" do + test "erases session and cookies", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + <%= schema.singular %>_token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + + conn = + conn + |> put_session(:<%= schema.singular %>_token, <%= schema.singular %>_token) + |> put_req_cookie(@remember_me_cookie, <%= schema.singular %>_token) + |> fetch_cookies() + |> <%= inspect schema.alias %>Auth.log_out_<%= schema.singular %>() + + refute get_session(conn, :<%= schema.singular %>_token) + refute conn.cookies[@remember_me_cookie] + assert %{max_age: 0} = conn.resp_cookies[@remember_me_cookie] + assert redirected_to(conn) == ~p"/" + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(<%= schema.singular %>_token) + end + + <%= if live? do %>test "broadcasts to the given live_socket_id", %{conn: conn} do + live_socket_id = "<%= schema.plural %>_sessions:abcdef-token" + <%= inspect(endpoint_module) %>.subscribe(live_socket_id) + + conn + |> put_session(:live_socket_id, live_socket_id) + |> <%= inspect(schema.alias) %>Auth.log_out_<%= schema.singular %>() + + assert_receive %Phoenix.Socket.Broadcast{event: "disconnect", topic: ^live_socket_id} + end + + <% end %>test "works even if <%= schema.singular %> is already logged out", %{conn: conn} do + conn = conn |> fetch_cookies() |> <%= inspect schema.alias %>Auth.log_out_<%= schema.singular %>() + refute get_session(conn, :<%= schema.singular %>_token) + assert %{max_age: 0} = conn.resp_cookies[@remember_me_cookie] + assert redirected_to(conn) == ~p"/" + end + end + + describe "fetch_<%= scope_config.scope.assign_key %>_for_<%= schema.singular %>/2" do + test "authenticates <%= schema.singular %> from session", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + <%= schema.singular %>_token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + + conn = + conn |> put_session(:<%= schema.singular %>_token, <%= schema.singular %>_token) |> <%= inspect schema.alias %>Auth.fetch_<%= scope_config.scope.assign_key %>_for_<%= schema.singular %>([]) + + assert conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %>.id == <%= schema.singular %>.id + assert conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %>.authenticated_at == <%= schema.singular %>.authenticated_at + assert get_session(conn, :<%= schema.singular %>_token) == <%= schema.singular %>_token + end + + test "authenticates <%= schema.singular %> from cookies", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + logged_in_conn = + conn |> fetch_cookies() |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>, %{"remember_me" => "true"}) + + <%= schema.singular %>_token = logged_in_conn.cookies[@remember_me_cookie] + %{value: signed_token} = logged_in_conn.resp_cookies[@remember_me_cookie] + + conn = + conn + |> put_req_cookie(@remember_me_cookie, signed_token) + |> <%= inspect schema.alias %>Auth.fetch_<%= scope_config.scope.assign_key %>_for_<%= schema.singular %>([]) + + assert conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %>.id == <%= schema.singular %>.id + assert conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %>.authenticated_at == <%= schema.singular %>.authenticated_at + assert get_session(conn, :<%= schema.singular %>_token) == <%= schema.singular %>_token + assert get_session(conn, :<%= schema.singular %>_remember_me)<%= if live? do %> + + assert get_session(conn, :live_socket_id) == + "<%= schema.plural %>_sessions:#{Base.url_encode64(<%= schema.singular %>_token)}"<% end %> + end + + test "does not authenticate if data is missing", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + _ = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + conn = <%= inspect schema.alias %>Auth.fetch_<%= scope_config.scope.assign_key %>_for_<%= schema.singular %>(conn, []) + refute get_session(conn, :<%= schema.singular %>_token) + refute conn.assigns.<%= scope_config.scope.assign_key %> + end + + test "reissues a new token after a few days and refreshes cookie", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + logged_in_conn = + conn |> fetch_cookies() |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>, %{"remember_me" => "true"}) + + token = logged_in_conn.cookies[@remember_me_cookie] + %{value: signed_token} = logged_in_conn.resp_cookies[@remember_me_cookie] + + offset_<%= schema.singular %>_token(token, -10, :day) + {<%= schema.singular %>, _} = <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(token) + + conn = + conn + |> put_session(:<%= schema.singular %>_token, token) + |> put_session(:<%= schema.singular %>_remember_me, true) + |> put_req_cookie(@remember_me_cookie, signed_token) + |> <%= inspect schema.alias %>Auth.fetch_<%= scope_config.scope.assign_key %>_for_<%= schema.singular %>([]) + + assert conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %>.id == <%= schema.singular %>.id + assert conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %>.authenticated_at == <%= schema.singular %>.authenticated_at + assert new_token = get_session(conn, :<%= schema.singular %>_token) + assert new_token != token + assert %{value: new_signed_token, max_age: max_age} = conn.resp_cookies[@remember_me_cookie] + assert new_signed_token != signed_token + assert max_age == @remember_me_cookie_max_age + end + end + + <%= if live? do %>describe "on_mount :mount_<%= scope_config.scope.assign_key %>" do + setup %{conn: conn} do + %{conn: <%= inspect schema.alias %>Auth.fetch_<%= scope_config.scope.assign_key %>_for_<%= schema.singular %>(conn, [])} + end + + test "assigns <%= scope_config.scope.assign_key %> based on a valid <%= schema.singular %>_token", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + <%= schema.singular %>_token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + session = conn |> put_session(:<%= schema.singular %>_token, <%= schema.singular %>_token) |> get_session() + + {:cont, updated_socket} = + <%= inspect schema.alias %>Auth.on_mount(:mount_<%= scope_config.scope.assign_key %>, %{}, session, %LiveView.Socket{}) + + assert updated_socket.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %>.id == <%= schema.singular %>.id + end + + test "assigns nil to <%= scope_config.scope.assign_key %> assign if there isn't a valid <%= schema.singular %>_token", %{conn: conn} do + <%= schema.singular %>_token = "invalid_token" + session = conn |> put_session(:<%= schema.singular %>_token, <%= schema.singular %>_token) |> get_session() + + {:cont, updated_socket} = + <%= inspect schema.alias %>Auth.on_mount(:mount_<%= scope_config.scope.assign_key %>, %{}, session, %LiveView.Socket{}) + + assert updated_socket.assigns.<%= scope_config.scope.assign_key %> == nil + end + + test "assigns nil to <%= scope_config.scope.assign_key %> assign if there isn't a <%= schema.singular %>_token", %{conn: conn} do + session = conn |> get_session() + + {:cont, updated_socket} = + <%= inspect schema.alias %>Auth.on_mount(:mount_<%= scope_config.scope.assign_key %>, %{}, session, %LiveView.Socket{}) + + assert updated_socket.assigns.<%= scope_config.scope.assign_key %> == nil + end + end + + describe "on_mount :require_authenticated" do + test "authenticates <%= scope_config.scope.assign_key %> based on a valid <%= schema.singular %>_token", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + <%= schema.singular %>_token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + session = conn |> put_session(:<%= schema.singular %>_token, <%= schema.singular %>_token) |> get_session() + + {:cont, updated_socket} = + <%= inspect schema.alias %>Auth.on_mount(:require_authenticated, %{}, session, %LiveView.Socket{}) + + assert updated_socket.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %>.id == <%= schema.singular %>.id + end + + test "redirects to login page if there isn't a valid <%= schema.singular %>_token", %{conn: conn} do + <%= schema.singular %>_token = "invalid_token" + session = conn |> put_session(:<%= schema.singular %>_token, <%= schema.singular %>_token) |> get_session() + + socket = %LiveView.Socket{ + endpoint: <%= inspect context.web_module %>.Endpoint, + assigns: %{__changed__: %{}, flash: %{}} + } + + {:halt, updated_socket} = <%= inspect schema.alias %>Auth.on_mount(:require_authenticated, %{}, session, socket) + assert updated_socket.assigns.<%= scope_config.scope.assign_key %> == nil + end + + test "redirects to login page if there isn't a <%= schema.singular %>_token", %{conn: conn} do + session = conn |> get_session() + + socket = %LiveView.Socket{ + endpoint: <%= inspect context.web_module %>.Endpoint, + assigns: %{__changed__: %{}, flash: %{}} + } + + {:halt, updated_socket} = <%= inspect schema.alias %>Auth.on_mount(:require_authenticated, %{}, session, socket) + assert updated_socket.assigns.<%= scope_config.scope.assign_key %> == nil + end + end + + describe "on_mount :require_sudo_mode" do + test "allows <%= schema.plural %> that have authenticated in the last 10 minutes", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + <%= schema.singular %>_token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + session = conn |> put_session(:<%= schema.singular %>_token, <%= schema.singular %>_token) |> get_session() + + socket = %LiveView.Socket{ + endpoint: <%= inspect(endpoint_module) %>, + assigns: %{__changed__: %{}, flash: %{}} + } + + assert {:cont, _updated_socket} = + <%= inspect schema.alias %>Auth.on_mount(:require_sudo_mode, %{}, session, socket) + end + + test "redirects when authentication is too old", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + eleven_minutes_ago = <%= datetime_now %> |> <%= inspect datetime_module %>.add(-11, :minute) + <%= schema.singular %> = %{<%= schema.singular %> | authenticated_at: eleven_minutes_ago} + <%= schema.singular %>_token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + {<%= schema.singular %>, token_inserted_at} = <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(<%= schema.singular %>_token) + assert <%= inspect datetime_module %>.compare(token_inserted_at, <%= schema.singular %>.authenticated_at) == :gt + session = conn |> put_session(:<%= schema.singular %>_token, <%= schema.singular %>_token) |> get_session() + + socket = %LiveView.Socket{ + endpoint: <%= inspect context.web_module %>.Endpoint, + assigns: %{__changed__: %{}, flash: %{}} + } + + assert {:halt, _updated_socket} = + <%= inspect schema.alias %>Auth.on_mount(:require_sudo_mode, %{}, session, socket) + end + end<% else %>describe "require_sudo_mode/2" do + test "allows <%= schema.plural %> that have authenticated in the last 10 minutes", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + conn + |> fetch_flash() + |> assign(:<%= scope_config.scope.assign_key %>, <%= inspect scope_config.scope.alias %>.for_<%= schema.singular %>(<%= schema.singular %>)) + |> <%= inspect schema.alias %>Auth.require_sudo_mode([]) + + refute conn.halted + refute conn.status + end + + test "redirects when authentication is too old", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + eleven_minutes_ago = <%= datetime_now %> |> <%= inspect datetime_module %>.add(-11, :minute) + <%= schema.singular %> = %{<%= schema.singular %> | authenticated_at: eleven_minutes_ago} + <%= schema.singular %>_token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + {<%= schema.singular %>, token_inserted_at} = <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(<%= schema.singular %>_token) + assert <%= inspect datetime_module %>.compare(token_inserted_at, <%= schema.singular %>.authenticated_at) == :gt + + conn = + conn + |> fetch_flash() + |> assign(:<%= scope_config.scope.assign_key %>, Scope.for_<%= schema.singular %>(<%= schema.singular %>)) + |> <%= inspect schema.alias %>Auth.require_sudo_mode([]) + + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/log-in" + + assert Phoenix.Flash.get(conn.assigns.flash, :error) == + "You must re-authenticate to access this page." + end + end + + describe "redirect_if_<%= schema.singular %>_is_authenticated/2" do + setup %{conn: conn} do + %{conn: <%= inspect schema.alias %>Auth.fetch_<%= scope_config.scope.assign_key %>_for_<%= schema.singular %>(conn, [])} + end + + test "redirects if <%= schema.singular %> is authenticated", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + conn + |> assign(:<%= scope_config.scope.assign_key %>, <%= inspect scope_config.scope.alias %>.for_<%= schema.singular %>(<%= schema.singular %>)) + |> <%= inspect schema.alias %>Auth.redirect_if_<%= schema.singular %>_is_authenticated([]) + + assert conn.halted + assert redirected_to(conn) == ~p"/" + end + + test "does not redirect if <%= schema.singular %> is not authenticated", %{conn: conn} do + conn = <%= inspect schema.alias %>Auth.redirect_if_<%= schema.singular %>_is_authenticated(conn, []) + refute conn.halted + refute conn.status + end + end<% end %> + + describe "require_authenticated_<%= schema.singular %>/2" do + setup %{conn: conn} do + %{conn: <%= inspect schema.alias %>Auth.fetch_<%= scope_config.scope.assign_key %>_for_<%= schema.singular %>(conn, [])} + end + + test "redirects if <%= schema.singular %> is not authenticated", %{conn: conn} do + conn = conn |> fetch_flash() |> <%= inspect schema.alias %>Auth.require_authenticated_<%= schema.singular %>([]) + assert conn.halted + + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/log-in" + + assert Phoenix.Flash.get(conn.assigns.flash, :error) == + "You must log in to access this page." + end + + test "stores the path to redirect to on GET", %{conn: conn} do + halted_conn = + %{conn | path_info: ["foo"], query_string: ""} + |> fetch_flash() + |> <%= inspect schema.alias %>Auth.require_authenticated_<%= schema.singular %>([]) + + assert halted_conn.halted + assert get_session(halted_conn, :<%= schema.singular %>_return_to) == "/foo" + + halted_conn = + %{conn | path_info: ["foo"], query_string: "bar=baz"} + |> fetch_flash() + |> <%= inspect schema.alias %>Auth.require_authenticated_<%= schema.singular %>([]) + + assert halted_conn.halted + assert get_session(halted_conn, :<%= schema.singular %>_return_to) == "/foo?bar=baz" + + halted_conn = + %{conn | path_info: ["foo"], query_string: "bar", method: "POST"} + |> fetch_flash() + |> <%= inspect schema.alias %>Auth.require_authenticated_<%= schema.singular %>([]) + + assert halted_conn.halted + refute get_session(halted_conn, :<%= schema.singular %>_return_to) + end + + test "does not redirect if <%= schema.singular %> is authenticated", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + conn + |> assign(:<%= scope_config.scope.assign_key %>, <%= inspect scope_config.scope.alias %>.for_<%= schema.singular %>(<%= schema.singular %>)) + |> <%= inspect schema.alias %>Auth.require_authenticated_<%= schema.singular %>([]) + + refute conn.halted + refute conn.status + end + end<%= if live? do %> + + describe "disconnect_sessions/1" do + test "broadcasts disconnect messages for each token" do + tokens = [%{token: "token1"}, %{token: "token2"}] + + for %{token: token} <- tokens do + <%= inspect context.web_module %>.Endpoint.subscribe("<%= schema.plural %>_sessions:#{Base.url_encode64(token)}") + end + + <%= inspect schema.alias %>Auth.disconnect_sessions(tokens) + + assert_receive %Phoenix.Socket.Broadcast{ + event: "disconnect", + topic: "<%= schema.plural %>_sessions:dG9rZW4x" + } + + assert_receive %Phoenix.Socket.Broadcast{ + event: "disconnect", + topic: "<%= schema.plural %>_sessions:dG9rZW4y" + } + end + end<% end %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/confirmation_live.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_live.ex.eex new file mode 100644 index 0000000..d55fc25 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_live.ex.eex @@ -0,0 +1,94 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.Confirmation do + use <%= inspect context.web_module %>, :live_view + + alias <%= inspect context.module %> + + @impl true + def render(assigns) do + ~H""" + ={@<%= scope_config.scope.assign_key %>}> +
+
+ <.header>Welcome {@<%= schema.singular %>.email} +
+ + <.form + :if={!@<%= schema.singular %>.confirmed_at} + for={@form} + id="confirmation_form" + phx-mounted={JS.focus_first()} + phx-submit="submit" + action={~p"<%= schema.route_prefix %>/log-in?_action=confirmed"} + phx-trigger-action={@trigger_submit} + > + + <.button + name={@form[:remember_me].name} + value="true" + phx-disable-with="Confirming..." + class="btn btn-primary w-full" + > + Confirm and stay logged in + + <.button phx-disable-with="Confirming..." class="btn btn-primary btn-soft w-full mt-2"> + Confirm and log in only this time + + + + <.form + :if={@<%= schema.singular %>.confirmed_at} + for={@form} + id="login_form" + phx-submit="submit" + phx-mounted={JS.focus_first()} + action={~p"<%= schema.route_prefix %>/log-in"} + phx-trigger-action={@trigger_submit} + > + + <%%= if @<%= scope_config.scope.assign_key %> do %> + <.button phx-disable-with="Logging in..." class="btn btn-primary w-full"> + Log in + + <%% else %> + <.button + name={@form[:remember_me].name} + value="true" + phx-disable-with="Logging in..." + class="btn btn-primary w-full" + > + Keep me logged in on this device + + <.button phx-disable-with="Logging in..." class="btn btn-primary btn-soft w-full mt-2"> + Log me in only this time + + <%% end %> + + +

.confirmed_at} class="alert alert-outline mt-8"> + Tip: If you prefer passwords, you can enable them in the <%= schema.singular %> settings. +

+
+
+ """ + end + + @impl true + def mount(%{"token" => token}, _session, socket) do + if <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>_by_magic_link_token(token) do + form = to_form(%{"token" => token}, as: "<%= schema.singular %>") + + {:ok, assign(socket, <%= schema.singular %>: <%= schema.singular %>, form: form, trigger_submit: false), + temporary_assigns: [form: nil]} + else + {:ok, + socket + |> put_flash(:error, "Magic link is invalid or it has expired.") + |> push_navigate(to: ~p"<%= schema.route_prefix %>/log-in")} + end + end + + @impl true + def handle_event("submit", %{"<%= schema.singular %>" => params}, socket) do + {:noreply, assign(socket, form: to_form(params, as: "<%= schema.singular %>"), trigger_submit: true)} + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/confirmation_live_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_live_test.exs.eex new file mode 100644 index 0000000..f4dda1c --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_live_test.exs.eex @@ -0,0 +1,118 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.ConfirmationTest do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + import Phoenix.LiveViewTest + import <%= inspect context.module %>Fixtures + + alias <%= inspect context.module %> + + setup do + %{unconfirmed_<%= schema.singular %>: unconfirmed_<%= schema.singular %>_fixture(), confirmed_<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + describe "Confirm <%= schema.singular %>" do + test "renders confirmation page for unconfirmed <%= schema.singular %>", %{conn: conn, unconfirmed_<%= schema.singular %>: <%= schema.singular %>} do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_login_instructions(<%= schema.singular %>, url) + end) + + {:ok, _lv, html} = live(conn, ~p"<%= schema.route_prefix %>/log-in/#{token}") + assert html =~ "Confirm and stay logged in" + end + + test "renders login page for confirmed <%= schema.singular %>", %{conn: conn, confirmed_<%= schema.singular %>: <%= schema.singular %>} do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_login_instructions(<%= schema.singular %>, url) + end) + + {:ok, _lv, html} = live(conn, ~p"<%= schema.route_prefix %>/log-in/#{token}") + refute html =~ "Confirm my account" + assert html =~ "Keep me logged in on this device" + end + + test "renders login page for already logged in <%= schema.singular %>", %{conn: conn, confirmed_<%= schema.singular %>: <%= schema.singular %>} do + conn = log_in_<%= schema.singular %>(conn, <%= schema.singular %>) + + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_login_instructions(<%= schema.singular %>, url) + end) + + {:ok, _lv, html} = live(conn, ~p"<%= schema.route_prefix %>/log-in/#{token}") + refute html =~ "Confirm my account" + assert html =~ "Log in" + end + + test "confirms the given token once", %{conn: conn, unconfirmed_<%= schema.singular %>: <%= schema.singular %>} do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_login_instructions(<%= schema.singular %>, url) + end) + + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/log-in/#{token}") + + form = form(lv, "#confirmation_form", %{"<%= schema.singular %>" => %{"token" => token}}) + render_submit(form) + + conn = follow_trigger_action(form, conn) + + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ + "<%= inspect schema.alias %> confirmed successfully" + + assert <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= schema.singular %>.id).confirmed_at + # we are logged in now + assert get_session(conn, :<%= schema.singular %>_token) + assert redirected_to(conn) == ~p"/" + + # log out, new conn + conn = build_conn() + + {:ok, _lv, html} = + live(conn, ~p"<%= schema.route_prefix %>/log-in/#{token}") + |> follow_redirect(conn, ~p"<%= schema.route_prefix %>/log-in") + + assert html =~ "Magic link is invalid or it has expired" + end + + test "logs confirmed <%= schema.singular %> in without changing confirmed_at", %{ + conn: conn, + confirmed_<%= schema.singular %>: <%= schema.singular %> + } do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_login_instructions(<%= schema.singular %>, url) + end) + + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/log-in/#{token}") + + form = form(lv, "#login_form", %{"<%= schema.singular %>" => %{"token" => token}}) + render_submit(form) + + conn = follow_trigger_action(form, conn) + + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ + "Welcome back!" + + assert <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= schema.singular %>.id).confirmed_at == <%= schema.singular %>.confirmed_at + + # log out, new conn + conn = build_conn() + + {:ok, _lv, html} = + live(conn, ~p"<%= schema.route_prefix %>/log-in/#{token}") + |> follow_redirect(conn, ~p"<%= schema.route_prefix %>/log-in") + + assert html =~ "Magic link is invalid or it has expired" + end + + test "raises error for invalid token", %{conn: conn} do + {:ok, _lv, html} = + live(conn, ~p"<%= schema.route_prefix %>/log-in/invalid-token") + |> follow_redirect(conn, ~p"<%= schema.route_prefix %>/log-in") + + assert html =~ "Magic link is invalid or it has expired" + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/conn_case.exs.eex b/deps/phoenix/priv/templates/phx.gen.auth/conn_case.exs.eex new file mode 100644 index 0000000..de10dd7 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/conn_case.exs.eex @@ -0,0 +1,41 @@ + + @doc """ + Setup helper that registers and logs in <%= schema.plural %>. + + setup :register_and_log_in_<%= schema.singular %> + + It stores an updated connection and a registered <%= schema.singular %> in the + test context. + """ + def register_and_log_in_<%= schema.singular %>(%{conn: conn} = context) do + <%= schema.singular %> = <%= inspect context.module %>Fixtures.<%= schema.singular %>_fixture() + scope = <%= inspect scope_config.scope.module %>.for_<%= schema.singular %>(<%= schema.singular %>) + + opts = + context + |> Map.take([:token_authenticated_at]) + |> Enum.into([]) + + %{conn: log_in_<%= schema.singular %>(conn, <%= schema.singular %>, opts), <%= schema.singular %>: <%= schema.singular %>, scope: scope} + end + + @doc """ + Logs the given `<%= schema.singular %>` into the `conn`. + + It returns an updated `conn`. + """ + def log_in_<%= schema.singular %>(conn, <%= schema.singular %>, opts \\ []) do + token = <%= inspect context.module %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + + maybe_set_token_authenticated_at(token, opts[:token_authenticated_at]) + + conn + |> Phoenix.ConnTest.init_test_session(%{}) + |> Plug.Conn.put_session(:<%= schema.singular %>_token, token) + end + + defp maybe_set_token_authenticated_at(_token, nil), do: nil + + defp maybe_set_token_authenticated_at(token, authenticated_at) do + <%= inspect context.module %>Fixtures.override_token_authenticated_at(token, authenticated_at) + end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/context_fixtures_functions.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/context_fixtures_functions.ex.eex new file mode 100644 index 0000000..290e226 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/context_fixtures_functions.ex.eex @@ -0,0 +1,82 @@ + import Ecto.Query + + alias <%= inspect context.module %> + alias <%= inspect scope_config.scope.module %> + + def unique_<%= schema.singular %>_email, do: "<%= schema.singular %>#{System.unique_integer()}@example.com" + def valid_<%= schema.singular %>_password, do: "hello world!" + + def valid_<%= schema.singular %>_attributes(attrs \\ %{}) do + Enum.into(attrs, %{ + email: unique_<%= schema.singular %>_email() + }) + end + + def unconfirmed_<%= schema.singular %>_fixture(attrs \\ %{}) do + {:ok, <%= schema.singular %>} = + attrs + |> valid_<%= schema.singular %>_attributes() + |> <%= inspect context.alias %>.register_<%= schema.singular %>() + + <%= schema.singular %> + end + + def <%= schema.singular %>_fixture(attrs \\ %{}) do + <%= schema.singular %> = unconfirmed_<%= schema.singular %>_fixture(attrs) + + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_login_instructions(<%= schema.singular %>, url) + end) + + {:ok, {<%= schema.singular %>, _expired_tokens}} = + <%= inspect context.alias %>.login_<%= schema.singular %>_by_magic_link(token) + + <%= schema.singular %> + end + + def <%= schema.singular %>_scope_fixture do + <%= schema.singular %> = <%= schema.singular %>_fixture() + <%= schema.singular %>_scope_fixture(<%= schema.singular %>) + end + + def <%= schema.singular %>_scope_fixture(<%= schema.singular %>) do + <%= inspect scope_config.scope.alias %>.for_<%= schema.singular %>(<%= schema.singular %>) + end + + def set_password(<%= schema.singular %>) do + {:ok, {<%= schema.singular %>, _expired_tokens}} = + <%= inspect context.alias %>.update_<%= schema.singular %>_password(<%= schema.singular %>, %{password: valid_<%= schema.singular %>_password()}) + + <%= schema.singular %> + end + + def extract_<%= schema.singular %>_token(fun) do + {:ok, captured_email} = fun.(&"[TOKEN]#{&1}[TOKEN]") + [_, token | _] = String.split(captured_email.text_body, "[TOKEN]") + token + end + + def override_token_authenticated_at(token, authenticated_at) when is_binary(token) do + <%= inspect schema.repo %>.update_all( + from(t in <%= inspect context.alias %>.<%= inspect schema.alias %>Token, + where: t.token == ^token + ), + set: [authenticated_at: authenticated_at] + ) + end + + def generate_<%= schema.singular %>_magic_link_token(<%= schema.singular %>) do + {encoded_token, <%= schema.singular %>_token} = <%= inspect context.alias %>.<%= inspect schema.alias %>Token.build_email_token(<%= schema.singular %>, "login") + <%= inspect schema.repo %>.insert!(<%= schema.singular %>_token) + {encoded_token, <%= schema.singular %>_token.token} + end + + def offset_<%= schema.singular %>_token(token, amount_to_add, unit) do + dt = <%= inspect datetime_module %>.add(<%= datetime_now %>, amount_to_add, unit) + + <%= inspect schema.repo %>.update_all( + from(ut in <%= inspect context.alias %>.<%= inspect schema.alias %>Token, where: ut.token == ^token), + set: [inserted_at: dt, authenticated_at: dt] + ) + end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/context_functions.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/context_functions.ex.eex new file mode 100644 index 0000000..1e96da5 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/context_functions.ex.eex @@ -0,0 +1,288 @@ + alias <%= inspect context.module %>.{<%= inspect schema.alias %>, <%= inspect schema.alias %>Token, <%= inspect schema.alias %>Notifier} + + ## Database getters + + @doc """ + Gets a <%= schema.singular %> by email. + + ## Examples + + iex> get_<%= schema.singular %>_by_email("foo@example.com") + %<%= inspect schema.alias %>{} + + iex> get_<%= schema.singular %>_by_email("unknown@example.com") + nil + + """ + def get_<%= schema.singular %>_by_email(email) when is_binary(email) do + Repo.get_by(<%= inspect schema.alias %>, email: email) + end + + @doc """ + Gets a <%= schema.singular %> by email and password. + + ## Examples + + iex> get_<%= schema.singular %>_by_email_and_password("foo@example.com", "correct_password") + %<%= inspect schema.alias %>{} + + iex> get_<%= schema.singular %>_by_email_and_password("foo@example.com", "invalid_password") + nil + + """ + def get_<%= schema.singular %>_by_email_and_password(email, password) + when is_binary(email) and is_binary(password) do + <%= schema.singular %> = Repo.get_by(<%= inspect schema.alias %>, email: email) + if <%= inspect schema.alias %>.valid_password?(<%= schema.singular %>, password), do: <%= schema.singular %> + end + + @doc """ + Gets a single <%= schema.singular %>. + + Raises `Ecto.NoResultsError` if the <%= inspect schema.alias %> does not exist. + + ## Examples + + iex> get_<%= schema.singular %>!(123) + %<%= inspect schema.alias %>{} + + iex> get_<%= schema.singular %>!(456) + ** (Ecto.NoResultsError) + + """ + def get_<%= schema.singular %>!(id), do: Repo.get!(<%= inspect schema.alias %>, id) + + ## <%= schema.human_singular %> registration + + @doc """ + Registers a <%= schema.singular %>. + + ## Examples + + iex> register_<%= schema.singular %>(%{field: value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> register_<%= schema.singular %>(%{field: bad_value}) + {:error, %Ecto.Changeset{}} + + """ + def register_<%= schema.singular %>(attrs) do + %<%= inspect schema.alias %>{} + |> <%= inspect schema.alias %>.email_changeset(attrs) + |> Repo.insert() + end + + ## Settings + + @doc """ + Checks whether the <%= schema.singular %> is in sudo mode. + + The <%= schema.singular %> is in sudo mode when the last authentication was done no further + than 20 minutes ago. The limit can be given as second argument in minutes. + """ + def sudo_mode?(<%= schema.singular %>, minutes \\ -20) + + def sudo_mode?(%<%= inspect schema.alias %>{authenticated_at: ts}, minutes) when is_struct(ts, <%= inspect datetime_module %>) do + <%= inspect datetime_module %>.after?(ts, <%= inspect datetime_module %>.utc_now() |> <%= inspect datetime_module %>.add(minutes, :minute)) + end + + def sudo_mode?(_<%= schema.singular %>, _minutes), do: false + + @doc """ + Returns an `%Ecto.Changeset{}` for changing the <%= schema.singular %> email. + + See `<%= inspect context.module %>.<%= inspect schema.alias %>.email_changeset/3` for a list of supported options. + + ## Examples + + iex> change_<%= schema.singular %>_email(<%= schema.singular %>) + %Ecto.Changeset{data: %<%= inspect schema.alias %>{}} + + """ + def change_<%= schema.singular %>_email(<%= schema.singular %>, attrs \\ %{}, opts \\ []) do + <%= inspect schema.alias %>.email_changeset(<%= schema.singular %>, attrs, opts) + end + + @doc """ + Updates the <%= schema.singular %> email using the given token. + + If the token matches, the <%= schema.singular %> email is updated and the token is deleted. + """ + def update_<%= schema.singular %>_email(<%= schema.singular %>, token) do + context = "change:#{<%= schema.singular %>.email}" + + Repo.transact(fn -> + with {:ok, query} <- <%= inspect schema.alias %>Token.verify_change_email_token_query(token, context), + %<%= inspect schema.alias %>Token{sent_to: email} <- Repo.one(query), + {:ok, <%= schema.singular %>} <- Repo.update(<%= inspect schema.alias %>.email_changeset(<%= schema.singular %>, %{email: email})), + {_count, _result} <- + Repo.delete_all(from(<%= inspect schema.alias %>Token, where: [<%= schema.singular %>_id: ^<%= schema.singular %>.id, context: ^context])) do + {:ok, <%= schema.singular %>} + else + _ -> {:error, :transaction_aborted} + end + end) + end + + @doc """ + Returns an `%Ecto.Changeset{}` for changing the <%= schema.singular %> password. + + See `<%= inspect context.module %>.<%= inspect schema.alias %>.password_changeset/3` for a list of supported options. + + ## Examples + + iex> change_<%= schema.singular %>_password(<%= schema.singular %>) + %Ecto.Changeset{data: %<%= inspect schema.alias %>{}} + + """ + def change_<%= schema.singular %>_password(<%= schema.singular %>, attrs \\ %{}, opts \\ []) do + <%= inspect schema.alias %>.password_changeset(<%= schema.singular %>, attrs, opts) + end + + @doc """ + Updates the <%= schema.singular %> password. + + Returns a tuple with the updated <%= schema.singular %>, as well as a list of expired tokens. + + ## Examples + + iex> update_<%= schema.singular %>_password(<%= schema.singular %>, %{password: ...}) + {:ok, {%<%= inspect schema.alias %>{}, [...]}} + + iex> update_<%= schema.singular %>_password(<%= schema.singular %>, %{password: "too short"}) + {:error, %Ecto.Changeset{}} + + """ + def update_<%= schema.singular %>_password(<%= schema.singular %>, attrs) do + <%= schema.singular %> + |> <%= inspect schema.alias %>.password_changeset(attrs) + |> update_<%= schema.singular %>_and_delete_all_tokens() + end + + ## Session + + @doc """ + Generates a session token. + """ + def generate_<%= schema.singular %>_session_token(<%= schema.singular %>) do + {token, <%= schema.singular %>_token} = <%= inspect schema.alias %>Token.build_session_token(<%= schema.singular %>) + Repo.insert!(<%= schema.singular %>_token) + token + end + + @doc """ + Gets the <%= schema.singular %> with the given signed token. + + If the token is valid `{<%= schema.singular %>, token_inserted_at}` is returned, otherwise `nil` is returned. + """ + def get_<%= schema.singular %>_by_session_token(token) do + {:ok, query} = <%= inspect schema.alias %>Token.verify_session_token_query(token) + Repo.one(query) + end + + @doc """ + Gets the <%= schema.singular %> with the given magic link token. + """ + def get_<%= schema.singular %>_by_magic_link_token(token) do + with {:ok, query} <- <%= inspect schema.alias %>Token.verify_magic_link_token_query(token), + {<%= schema.singular %>, _token} <- Repo.one(query) do + <%= schema.singular %> + else + _ -> nil + end + end + + @doc """ + Logs the <%= schema.singular %> in by magic link. + + There are three cases to consider: + + 1. The <%= schema.singular %> has already confirmed their email. They are logged in + and the magic link is expired. + + 2. The <%= schema.singular %> has not confirmed their email and no password is set. + In this case, the <%= schema.singular %> gets confirmed, logged in, and all tokens - + including session ones - are expired. In theory, no other tokens + exist but we delete all of them for best security practices. + + 3. The <%= schema.singular %> has not confirmed their email but a password is set. + This cannot happen in the default implementation but may be the + source of security pitfalls. See the "Mixing magic link and password registration" section of + `mix help phx.gen.auth`. + """ + def login_<%= schema.singular %>_by_magic_link(token) do + {:ok, query} = <%= inspect schema.alias %>Token.verify_magic_link_token_query(token) + + case Repo.one(query) do + # Prevent session fixation attacks by disallowing magic links for unconfirmed users with password + {%<%= inspect schema.alias %>{confirmed_at: nil, hashed_password: hash}, _token} when not is_nil(hash) -> + raise """ + magic link log in is not allowed for unconfirmed users with a password set! + + This cannot happen with the default implementation, which indicates that you + might have adapted the code to a different use case. Please make sure to read the + "Mixing magic link and password registration" section of `mix help phx.gen.auth`. + """ + + {%<%= inspect schema.alias %>{confirmed_at: nil} = <%= schema.singular %>, _token} -> + <%= schema.singular %> + |> <%= inspect schema.alias %>.confirm_changeset() + |> update_<%= schema.singular %>_and_delete_all_tokens() + + {<%= schema.singular %>, token} -> + Repo.delete!(token) + {:ok, {<%= schema.singular %>, []}} + + nil -> + {:error, :not_found} + end + end + + @doc ~S""" + Delivers the update email instructions to the given <%= schema.singular %>. + + ## Examples + + iex> deliver_<%= schema.singular %>_update_email_instructions(<%= schema.singular %>, current_email, &url(~p"<%= schema.route_prefix %>/settings/confirm-email/#{&1}")) + {:ok, %{to: ..., body: ...}} + + """ + def deliver_<%= schema.singular %>_update_email_instructions(%<%= inspect schema.alias %>{} = <%= schema.singular %>, current_email, update_email_url_fun) + when is_function(update_email_url_fun, 1) do + {encoded_token, <%= schema.singular %>_token} = <%= inspect schema.alias %>Token.build_email_token(<%= schema.singular %>, "change:#{current_email}") + + Repo.insert!(<%= schema.singular %>_token) + <%= inspect schema.alias %>Notifier.deliver_update_email_instructions(<%= schema.singular %>, update_email_url_fun.(encoded_token)) + end + + @doc """ + Delivers the magic link login instructions to the given <%= schema.singular %>. + """ + def deliver_login_instructions(%<%= inspect schema.alias %>{} = <%= schema.singular %>, magic_link_url_fun) + when is_function(magic_link_url_fun, 1) do + {encoded_token, <%= schema.singular %>_token} = <%= inspect schema.alias %>Token.build_email_token(<%= schema.singular %>, "login") + Repo.insert!(<%= schema.singular %>_token) + <%= inspect schema.alias %>Notifier.deliver_login_instructions(<%= schema.singular %>, magic_link_url_fun.(encoded_token)) + end + + @doc """ + Deletes the signed token with the given context. + """ + def delete_<%= schema.singular %>_session_token(token) do + Repo.delete_all(from(<%= inspect schema.alias %>Token, where: [token: ^token, context: "session"])) + :ok + end + + ## Token helper + + defp update_<%= schema.singular %>_and_delete_all_tokens(changeset) do + Repo.transact(fn -> + with {:ok, <%= schema.singular %>} <- Repo.update(changeset) do + tokens_to_expire = Repo.all_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + + Repo.delete_all(from(t in <%= inspect schema.alias %>Token, where: t.id in ^Enum.map(tokens_to_expire, & &1.id))) + + {:ok, {<%= schema.singular %>, tokens_to_expire}} + end + end) + end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/login_live.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/login_live.ex.eex new file mode 100644 index 0000000..84afc3f --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/login_live.ex.eex @@ -0,0 +1,134 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.Login do + use <%= inspect context.web_module %>, :live_view + + alias <%= inspect context.module %> + + @impl true + def render(assigns) do + ~H""" + ={@<%= scope_config.scope.assign_key %>}> +
+
+ <.header> +

Log in

+ <:subtitle> + <%%= if @<%= scope_config.scope.assign_key %> do %> + You need to reauthenticate to perform sensitive actions on your account. + <%% else %> + Don't have an account? <.link + navigate={~p"<%= schema.route_prefix %>/register"} + class="font-semibold text-brand hover:underline" + phx-no-format + >Sign up for an account now. + <%% end %> + + +
+ +
+ <.icon name="hero-information-circle" class="size-6 shrink-0" /> +
+

You are running the local mail adapter.

+

+ To see sent emails, visit <.link href="/dev/mailbox" class="underline">the mailbox page. +

+
+
+ + <.form + :let={f} + for={@form} + id="login_form_magic" + action={~p"<%= schema.route_prefix %>/log-in"} + phx-submit="submit_magic" + > + <.input + readonly={!!@<%= scope_config.scope.assign_key %>} + field={f[:email]} + type="email" + label="Email" + autocomplete="username" + spellcheck="false" + required + phx-mounted={JS.focus()} + /> + <.button class="btn btn-primary w-full"> + Log in with email + + + +
or
+ + <.form + :let={f} + for={@form} + id="login_form_password" + action={~p"<%= schema.route_prefix %>/log-in"} + phx-submit="submit_password" + phx-trigger-action={@trigger_submit} + > + <.input + readonly={!!@<%= scope_config.scope.assign_key %>} + field={f[:email]} + type="email" + label="Email" + autocomplete="username" + spellcheck="false" + required + /> + <.input + field={@form[:password]} + type="password" + label="Password" + autocomplete="current-password" + spellcheck="false" + /> + <.button class="btn btn-primary w-full" name={@form[:remember_me].name} value="true"> + Log in and stay logged in + + <.button class="btn btn-primary btn-soft w-full mt-2"> + Log in only this time + + +
+
+ """ + end + + @impl true + def mount(_params, _session, socket) do + email = + Phoenix.Flash.get(socket.assigns.flash, :email) || + get_in(socket.assigns, [:<%= scope_config.scope.assign_key %>, Access.key(:<%= schema.singular %>), Access.key(:email)]) + + form = to_form(%{"email" => email}, as: "<%= schema.singular %>") + + {:ok, assign(socket, form: form, trigger_submit: false)} + end + + @impl true + def handle_event("submit_password", _params, socket) do + {:noreply, assign(socket, :trigger_submit, true)} + end + + def handle_event("submit_magic", %{"<%= schema.singular %>" => %{"email" => email}}, socket) do + if <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(email) do + <%= inspect context.alias %>.deliver_login_instructions( + <%= schema.singular %>, + &url(~p"<%= schema.route_prefix %>/log-in/#{&1}") + ) + end + + info = + "If your email is in our system, you will receive instructions for logging in shortly." + + {:noreply, + socket + |> put_flash(:info, info) + |> push_navigate(to: ~p"<%= schema.route_prefix %>/log-in")} + end + + defp local_mail_adapter? do + Application.get_env(:<%= Mix.Phoenix.otp_app() %>, <%= inspect context.base_module %>.Mailer)[:adapter] == Swoosh.Adapters.Local + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/login_live_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.auth/login_live_test.exs.eex new file mode 100644 index 0000000..4624042 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/login_live_test.exs.eex @@ -0,0 +1,109 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.LoginTest do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + import Phoenix.LiveViewTest + import <%= inspect context.module %>Fixtures + + describe "login page" do + test "renders login page", %{conn: conn} do + {:ok, _lv, html} = live(conn, ~p"<%= schema.route_prefix %>/log-in") + + assert html =~ "Log in" + assert html =~ "Register" + assert html =~ "Log in with email" + end + end + + describe "<%= schema.singular %> login - magic link" do + test "sends magic link email when <%= schema.singular %> exists", %{conn: conn} do + <%= schema.singular %> = <%= schema.singular %>_fixture() + + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/log-in") + + {:ok, _lv, html} = + form(lv, "#login_form_magic", <%= schema.singular %>: %{email: <%= schema.singular %>.email}) + |> render_submit() + |> follow_redirect(conn, ~p"<%= schema.route_prefix %>/log-in") + + assert html =~ "If your email is in our system" + + assert <%= inspect schema.repo %>.get_by!(<%= inspect context.module %>.<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id).context == + "login" + end + + test "does not disclose if <%= schema.singular %> is registered", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/log-in") + + {:ok, _lv, html} = + form(lv, "#login_form_magic", <%= schema.singular %>: %{email: "idonotexist@example.com"}) + |> render_submit() + |> follow_redirect(conn, ~p"<%= schema.route_prefix %>/log-in") + + assert html =~ "If your email is in our system" + end + end + + describe "<%= schema.singular %> login - password" do + test "redirects if <%= schema.singular %> logs in with valid credentials", %{conn: conn} do + <%= schema.singular %> = <%= schema.singular %>_fixture() |> set_password() + + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/log-in") + + form = + form(lv, "#login_form_password", + <%= schema.singular %>: %{email: <%= schema.singular %>.email, password: valid_<%= schema.singular %>_password(), remember_me: true} + ) + + conn = submit_form(form, conn) + + assert redirected_to(conn) == ~p"/" + end + + test "redirects to login page with a flash error if credentials are invalid", %{ + conn: conn + } do + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/log-in") + + form = + form(lv, "#login_form_password", <%= schema.singular %>: %{email: "test@email.com", password: "123456"}) + + render_submit(form, %{user: %{remember_me: true}}) + + conn = follow_trigger_action(form, conn) + assert Phoenix.Flash.get(conn.assigns.flash, :error) == "Invalid email or password" + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/log-in" + end + end + + describe "login navigation" do + test "redirects to registration page when the Register button is clicked", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/log-in") + + {:ok, _login_live, login_html} = + lv + |> element("main a", "Sign up") + |> render_click() + |> follow_redirect(conn, ~p"<%= schema.route_prefix %>/register") + + assert login_html =~ "Register" + end + end + + describe "re-authentication (sudo mode)" do + setup %{conn: conn} do + <%= schema.singular %> = <%= schema.singular %>_fixture() + %{<%= schema.singular %>: <%= schema.singular %>, conn: log_in_<%= schema.singular %>(conn, <%= schema.singular %>)} + end + + test "shows login page with email filled in", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + {:ok, _lv, html} = live(conn, ~p"<%= schema.route_prefix %>/log-in") + + assert html =~ "You need to reauthenticate" + refute html =~ "Register" + assert html =~ "Log in with email" + + assert html =~ + ~s(.Migrations.Create<%= Macro.camelize(schema.table) %>AuthTables do + use <%= inspect schema.migration_module %> + + def change do<%= if Enum.any?(migration.extensions) do %><%= for extension <- migration.extensions do %> + <%= extension %><% end %> +<% end %> + create table(:<%= schema.table %><%= if schema.binary_id do %>, primary_key: false<% end %>) do +<%= if schema.binary_id do %> add :id, :binary_id, primary_key: true +<% end %> <%= migration.column_definitions[:email] %> + add :hashed_password, :string + add :confirmed_at, <%= inspect schema.timestamp_type %> + + timestamps(<%= if schema.timestamp_type != :naive_datetime, do: "type: #{inspect schema.timestamp_type}" %>) + end + + create unique_index(:<%= schema.table %>, [:email]) + + create table(:<%= schema.table %>_tokens<%= if schema.binary_id do %>, primary_key: false<% end %>) do +<%= if schema.binary_id do %> add :id, :binary_id, primary_key: true +<% end %> add :<%= schema.singular %>_id, references(:<%= schema.table %>, <%= if schema.binary_id do %>type: :binary_id, <% end %>on_delete: :delete_all), null: false + <%= migration.column_definitions[:token] %> + add :context, :string, null: false + add :sent_to, :string + add :authenticated_at, <%= inspect schema.timestamp_type %> + + timestamps(<%= if schema.timestamp_type != :naive_datetime, do: "type: #{inspect schema.timestamp_type}, " %>updated_at: false) + end + + create index(:<%= schema.table %>_tokens, [:<%= schema.singular %>_id]) + create unique_index(:<%= schema.table %>_tokens, [:context, :token]) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/notifier.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/notifier.ex.eex new file mode 100644 index 0000000..a74a96f --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/notifier.ex.eex @@ -0,0 +1,84 @@ +defmodule <%= inspect context.module %>.<%= inspect schema.alias %>Notifier do + import Swoosh.Email + + alias <%= inspect context.base_module %>.Mailer + alias <%= inspect context.module %>.<%= inspect schema.alias %> + + # Delivers the email using the application mailer. + defp deliver(recipient, subject, body) do + email = + new() + |> to(recipient) + |> from({"<%= inspect context.base_module %>", "contact@example.com"}) + |> subject(subject) + |> text_body(body) + + with {:ok, _metadata} <- Mailer.deliver(email) do + {:ok, email} + end + end + + @doc """ + Deliver instructions to update a <%= schema.singular %> email. + """ + def deliver_update_email_instructions(<%= schema.singular %>, url) do + deliver(<%= schema.singular %>.email, "Update email instructions", """ + + ============================== + + Hi #{<%= schema.singular %>.email}, + + You can change your email by visiting the URL below: + + #{url} + + If you didn't request this change, please ignore this. + + ============================== + """) + end + + @doc """ + Deliver instructions to log in with a magic link. + """ + def deliver_login_instructions(<%= schema.singular %>, url) do + case <%= schema.singular %> do + %<%= inspect schema.alias %>{confirmed_at: nil} -> deliver_confirmation_instructions(<%= schema.singular %>, url) + _ -> deliver_magic_link_instructions(<%= schema.singular %>, url) + end + end + + defp deliver_magic_link_instructions(<%= schema.singular %>, url) do + deliver(<%= schema.singular %>.email, "Log in instructions", """ + + ============================== + + Hi #{<%= schema.singular %>.email}, + + You can log into your account by visiting the URL below: + + #{url} + + If you didn't request this email, please ignore this. + + ============================== + """) + end + + defp deliver_confirmation_instructions(<%= schema.singular %>, url) do + deliver(<%= schema.singular %>.email, "Confirmation instructions", """ + + ============================== + + Hi #{<%= schema.singular %>.email}, + + You can confirm your account by visiting the URL below: + + #{url} + + If you didn't create an account with us, please ignore this. + + ============================== + """) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/registration_controller.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/registration_controller.ex.eex new file mode 100644 index 0000000..eb361e4 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/registration_controller.ex.eex @@ -0,0 +1,32 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>RegistrationController do + use <%= inspect context.web_module %>, :controller + + alias <%= inspect context.module %> + alias <%= inspect schema.module %> + + def new(conn, _params) do + changeset = <%= inspect context.alias %>.change_<%= schema.singular %>_email(%<%= inspect schema.alias %>{}) + render(conn, :new, changeset: changeset) + end + + def create(conn, %{"<%= schema.singular %>" => <%= schema.singular %>_params}) do + case <%= inspect context.alias %>.register_<%= schema.singular %>(<%= schema.singular %>_params) do + {:ok, <%= schema.singular %>} -> + {:ok, _} = + <%= inspect context.alias %>.deliver_login_instructions( + <%= schema.singular %>, + &url(~p"<%= schema.route_prefix %>/log-in/#{&1}") + ) + + conn + |> put_flash( + :info, + "An email was sent to #{<%= schema.singular %>.email}, please access it to confirm your account." + ) + |> redirect(to: ~p"<%= schema.route_prefix %>/log-in") + + {:error, %Ecto.Changeset{} = changeset} -> + render(conn, :new, changeset: changeset) + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/registration_controller_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.auth/registration_controller_test.exs.eex new file mode 100644 index 0000000..62db2c6 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/registration_controller_test.exs.eex @@ -0,0 +1,50 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>RegistrationControllerTest do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + import <%= inspect context.module %>Fixtures + + describe "GET <%= schema.route_prefix %>/register" do + test "renders registration page", %{conn: conn} do + conn = get(conn, ~p"<%= schema.route_prefix %>/register") + response = html_response(conn, 200) + assert response =~ "Register" + assert response =~ ~p"<%= schema.route_prefix %>/log-in" + assert response =~ ~p"<%= schema.route_prefix %>/register" + end + + test "redirects if already logged in", %{conn: conn} do + conn = conn |> log_in_<%= schema.singular %>(<%= schema.singular %>_fixture()) |> get(~p"<%= schema.route_prefix %>/register") + + assert redirected_to(conn) == ~p"/" + end + end + + describe "POST <%= schema.route_prefix %>/register" do + @tag :capture_log + test "creates account but does not log in", %{conn: conn} do + email = unique_<%= schema.singular %>_email() + + conn = + post(conn, ~p"<%= schema.route_prefix %>/register", %{ + "<%= schema.singular %>" => valid_<%= schema.singular %>_attributes(email: email) + }) + + refute get_session(conn, :<%= schema.singular %>_token) + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/log-in" + + assert conn.assigns.flash["info"] =~ + ~r/An email was sent to .*, please access it to confirm your account/ + end + + test "render errors for invalid data", %{conn: conn} do + conn = + post(conn, ~p"<%= schema.route_prefix %>/register", %{ + "<%= schema.singular %>" => %{"email" => "with spaces"} + }) + + response = html_response(conn, 200) + assert response =~ "Register" + assert response =~ "must have the @ sign and no spaces" + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/registration_html.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/registration_html.ex.eex new file mode 100644 index 0000000..72d9bd4 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/registration_html.ex.eex @@ -0,0 +1,5 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>RegistrationHTML do + use <%= inspect context.web_module %>, :html + + embed_templates "<%= schema.singular %>_registration_html/*" +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/registration_live.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/registration_live.ex.eex new file mode 100644 index 0000000..53490c4 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/registration_live.ex.eex @@ -0,0 +1,89 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.Registration do + use <%= inspect context.web_module %>, :live_view + + alias <%= inspect context.module %> + alias <%= inspect schema.module %> + + @impl true + def render(assigns) do + ~H""" + ={@<%= scope_config.scope.assign_key %>}> +
+
+ <.header> + Register for an account + <:subtitle> + Already registered? + <.link navigate={~p"<%= schema.route_prefix %>/log-in"} class="font-semibold text-brand hover:underline"> + Log in + + to your account now. + + +
+ + <.form for={@form} id="registration_form" phx-submit="save" phx-change="validate"> + <.input + field={@form[:email]} + type="email" + label="Email" + autocomplete="username" + spellcheck="false" + required + phx-mounted={JS.focus()} + /> + + <.button phx-disable-with="Creating account..." class="btn btn-primary w-full"> + Create an account + + +
+
+ """ + end + + @impl true + def mount(_params, _session, %{assigns: %{<%= scope_config.scope.assign_key %>: %{<%= schema.singular %>: <%= schema.singular %>}}} = socket) + when not is_nil(<%= schema.singular %>) do + {:ok, redirect(socket, to: <%= inspect auth_module %>.signed_in_path(socket))} + end + + def mount(_params, _session, socket) do + changeset = <%= inspect context.alias %>.change_<%= schema.singular %>_email(%<%= inspect schema.alias %>{}, %{}, validate_unique: false) + + {:ok, assign_form(socket, changeset), temporary_assigns: [form: nil]} + end + + @impl true + def handle_event("save", %{"<%= schema.singular %>" => <%= schema.singular %>_params}, socket) do + case <%= inspect context.alias %>.register_<%= schema.singular %>(<%= schema.singular %>_params) do + {:ok, <%= schema.singular %>} -> + {:ok, _} = + <%= inspect context.alias %>.deliver_login_instructions( + <%= schema.singular %>, + &url(~p"<%= schema.route_prefix %>/log-in/#{&1}") + ) + + {:noreply, + socket + |> put_flash( + :info, + "An email was sent to #{<%= schema.singular %>.email}, please access it to confirm your account." + ) + |> push_navigate(to: ~p"<%= schema.route_prefix %>/log-in")} + + {:error, %Ecto.Changeset{} = changeset} -> + {:noreply, assign_form(socket, changeset)} + end + end + + def handle_event("validate", %{"<%= schema.singular %>" => <%= schema.singular %>_params}, socket) do + changeset = <%= inspect context.alias %>.change_<%= schema.singular %>_email(%<%= inspect schema.alias %>{}, <%= schema.singular %>_params, validate_unique: false) + {:noreply, assign_form(socket, Map.put(changeset, :action, :validate))} + end + + defp assign_form(socket, %Ecto.Changeset{} = changeset) do + form = to_form(changeset, as: "<%= schema.singular %>") + assign(socket, form: form) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/registration_live_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.auth/registration_live_test.exs.eex new file mode 100644 index 0000000..104d82b --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/registration_live_test.exs.eex @@ -0,0 +1,82 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.RegistrationTest do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + import Phoenix.LiveViewTest + import <%= inspect context.module %>Fixtures + + describe "Registration page" do + test "renders registration page", %{conn: conn} do + {:ok, _lv, html} = live(conn, ~p"<%= schema.route_prefix %>/register") + + assert html =~ "Register" + assert html =~ "Log in" + end + + test "redirects if already logged in", %{conn: conn} do + result = + conn + |> log_in_<%= schema.singular %>(<%= schema.singular %>_fixture()) + |> live(~p"<%= schema.route_prefix %>/register") + |> follow_redirect(conn, ~p"/") + + assert {:ok, _conn} = result + end + + test "renders errors for invalid data", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/register") + + result = + lv + |> element("#registration_form") + |> render_change(<%= schema.singular %>: %{"email" => "with spaces"}) + + assert result =~ "Register" + assert result =~ "must have the @ sign and no spaces" + end + end + + describe "register <%= schema.singular %>" do + test "creates account but does not log in", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/register") + + email = unique_<%= schema.singular %>_email() + form = form(lv, "#registration_form", <%= schema.singular %>: valid_<%= schema.singular %>_attributes(email: email)) + + {:ok, _lv, html} = + render_submit(form) + |> follow_redirect(conn, ~p"<%= schema.route_prefix %>/log-in") + + assert html =~ + ~r/An email was sent to .*, please access it to confirm your account/ + end + + test "renders errors for duplicated email", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/register") + + <%= schema.singular %> = <%= schema.singular %>_fixture(%{email: "test@email.com"}) + + result = + lv + |> form("#registration_form", + <%= schema.singular %>: %{"email" => <%= schema.singular %>.email} + ) + |> render_submit() + + assert result =~ "has already been taken" + end + end + + describe "registration navigation" do + test "redirects to login page when the Log in button is clicked", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/register") + + {:ok, _login_live, login_html} = + lv + |> element("main a", "Log in") + |> render_click() + |> follow_redirect(conn, ~p"<%= schema.route_prefix %>/log-in") + + assert login_html =~ "Log in" + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/registration_new.html.heex.eex b/deps/phoenix/priv/templates/phx.gen.auth/registration_new.html.heex.eex new file mode 100644 index 0000000..1bf901e --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/registration_new.html.heex.eex @@ -0,0 +1,32 @@ +={@<%= scope_config.scope.assign_key %>}> +
+
+ <.header> + Register for an account + <:subtitle> + Already registered? + <.link navigate={~p"<%= schema.route_prefix %>/log-in"} class="font-semibold text-brand hover:underline"> + Log in + + to your account now. + + +
+ + <.form :let={f} for={@changeset} action={~p"<%= schema.route_prefix %>/register"}> + <.input + field={f[:email]} + type="email" + label="Email" + autocomplete="username" + spellcheck="false" + required + phx-mounted={JS.focus()} + /> + + <.button phx-disable-with="Creating account..." class="btn btn-primary w-full"> + Create an account + + +
+
diff --git a/deps/phoenix/priv/templates/phx.gen.auth/routes.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/routes.ex.eex new file mode 100644 index 0000000..9f780d9 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/routes.ex.eex @@ -0,0 +1,42 @@ + + ## Authentication routes + + <%= if not live? do %>scope <%= router_scope %> do + pipe_through [:browser, :redirect_if_<%= schema.singular %>_is_authenticated] + + get "/<%= schema.plural %>/register", <%= inspect schema.alias %>RegistrationController, :new + post "/<%= schema.plural %>/register", <%= inspect schema.alias %>RegistrationController, :create + end + + <% end %>scope <%= router_scope %> do + pipe_through [:browser, :require_authenticated_<%= schema.singular %>]<%= if live? do %> + + live_session :require_authenticated_<%= schema.singular %>, + on_mount: [{<%= inspect auth_module %>, :require_authenticated}] do + live "/<%= schema.plural %>/settings", <%= inspect schema.alias %>Live.Settings, :edit + live "/<%= schema.plural %>/settings/confirm-email/:token", <%= inspect schema.alias %>Live.Settings, :confirm_email + end + + post "/<%= schema.plural %>/update-password", <%= inspect schema.alias %>SessionController, :update_password<% else %> + + get "/<%= schema.plural %>/settings", <%= inspect schema.alias %>SettingsController, :edit + put "/<%= schema.plural %>/settings", <%= inspect schema.alias %>SettingsController, :update + get "/<%= schema.plural %>/settings/confirm-email/:token", <%= inspect schema.alias %>SettingsController, :confirm_email<% end %> + end + + scope <%= router_scope %> do + pipe_through [:browser] + + <%= if live? do %>live_session :current_<%= schema.singular %>, + on_mount: [{<%= inspect auth_module %>, :mount_<%= scope_config.scope.assign_key %>}] do + live "/<%= schema.plural %>/register", <%= inspect schema.alias %>Live.Registration, :new + live "/<%= schema.plural %>/log-in", <%= inspect schema.alias %>Live.Login, :new + live "/<%= schema.plural %>/log-in/:token", <%= inspect schema.alias %>Live.Confirmation, :new + end + + post "/<%= schema.plural %>/log-in", <%= inspect schema.alias %>SessionController, :create + delete "/<%= schema.plural %>/log-out", <%= inspect schema.alias %>SessionController, :delete<% else %>get "/<%= schema.plural %>/log-in", <%= inspect schema.alias %>SessionController, :new + get "/<%= schema.plural %>/log-in/:token", <%= inspect schema.alias %>SessionController, :confirm + post "/<%= schema.plural %>/log-in", <%= inspect schema.alias %>SessionController, :create + delete "/<%= schema.plural %>/log-out", <%= inspect schema.alias %>SessionController, :delete<% end %> + end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/schema.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/schema.ex.eex new file mode 100644 index 0000000..6efe510 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/schema.ex.eex @@ -0,0 +1,137 @@ +defmodule <%= inspect schema.module %> do + use Ecto.Schema + import Ecto.Changeset +<%= if schema.binary_id do %> + @primary_key {:id, :binary_id, autogenerate: true} + @foreign_key_type :binary_id<% end %> + schema <%= inspect schema.table %> do + field :email, :string + field :password, :string, virtual: true, redact: true + field :hashed_password, :string, redact: true + field :confirmed_at, <%= inspect schema.timestamp_type %> + field :authenticated_at, <%= inspect schema.timestamp_type %>, virtual: true + + timestamps(<%= if schema.timestamp_type != :naive_datetime, do: "type: #{inspect schema.timestamp_type}" %>) + end + + @doc """ + A <%= schema.singular %> changeset for registering or changing the email. + + It requires the email to change otherwise an error is added. + + ## Options + + * `:validate_unique` - Set to false if you don't want to validate the + uniqueness of the email, useful when displaying live validations. + Defaults to `true`. + """ + def email_changeset(<%= schema.singular %>, attrs, opts \\ []) do + <%= schema.singular %> + |> cast(attrs, [:email]) + |> validate_email(opts) + end + + defp validate_email(changeset, opts) do + changeset = + changeset + |> validate_required([:email]) + |> validate_format(:email, ~r/^[^@,;\s]+@[^@,;\s]+$/, + message: "must have the @ sign and no spaces" + ) + |> validate_length(:email, max: 160) + + if Keyword.get(opts, :validate_unique, true) do + changeset + |> unsafe_validate_unique(:email, <%= inspect schema.repo %>) + |> unique_constraint(:email) + |> validate_email_changed() + else + changeset + end + end + + defp validate_email_changed(changeset) do + if get_field(changeset, :email) && get_change(changeset, :email) == nil do + add_error(changeset, :email, "did not change") + else + changeset + end + end + + @doc """ + A <%= schema.singular %> changeset for changing the password. + + It is important to validate the length of the password, as long passwords may + be very expensive to hash for certain algorithms. + + ## Options + + * `:hash_password` - Hashes the password so it can be stored securely + in the database and ensures the password field is cleared to prevent + leaks in the logs. If password hashing is not needed and clearing the + password field is not desired (like when using this changeset for + validations on a LiveView form), this option can be set to `false`. + Defaults to `true`. + """ + def password_changeset(<%= schema.singular %>, attrs, opts \\ []) do + <%= schema.singular %> + |> cast(attrs, [:password]) + |> validate_confirmation(:password, message: "does not match password") + |> validate_password(opts) + end + + defp validate_password(changeset, opts) do + changeset + |> validate_required([:password]) + |> validate_length(:password, min: 12, max: 72) + # Examples of additional password validation: + # |> validate_format(:password, ~r/[a-z]/, message: "at least one lower case character") + # |> validate_format(:password, ~r/[A-Z]/, message: "at least one upper case character") + # |> validate_format(:password, ~r/[!?@#$%^&*_0-9]/, message: "at least one digit or punctuation character") + |> maybe_hash_password(opts) + end + + defp maybe_hash_password(changeset, opts) do + hash_password? = Keyword.get(opts, :hash_password, true) + password = get_change(changeset, :password) + + if hash_password? && password && changeset.valid? do + changeset<%= if hashing_library.name == :bcrypt do %> + # If using Bcrypt, then further validate it is at most 72 bytes long + |> validate_length(:password, max: 72, count: :bytes)<% end %> + # Hashing could be done with `Ecto.Changeset.prepare_changes/2`, but that + # would keep the database transaction open longer and hurt performance. + |> put_change(:hashed_password, <%= inspect hashing_library.module %>.hash_pwd_salt(password)) + |> delete_change(:password) + else + changeset + end + end + + @doc """ + Confirms the account by setting `confirmed_at`. + """ + def confirm_changeset(<%= schema.singular %>) do + <%= case schema.timestamp_type do %> + <% :naive_datetime -> %>now = NaiveDateTime.utc_now(:second) + <% :utc_datetime -> %>now = DateTime.utc_now(:second) + <% :utc_datetime_usec -> %>now = DateTime.utc_now(:microsecond) + <% end %>change(<%= schema.singular %>, confirmed_at: now) + end + + @doc """ + Verifies the password. + + If there is no <%= schema.singular %> or the <%= schema.singular %> doesn't have a password, we call + `<%= inspect hashing_library.module %>.no_user_verify/0` to avoid timing attacks. + """ + def valid_password?(%<%= inspect schema.module %>{hashed_password: hashed_password}, password) + when is_binary(hashed_password) and byte_size(password) > 0 do + <%= inspect hashing_library.module %>.verify_pass(password, hashed_password) + end + + def valid_password?(_, _) do + <%= inspect hashing_library.module %>.no_user_verify() + false + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/schema_token.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/schema_token.ex.eex new file mode 100644 index 0000000..94e5923 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/schema_token.ex.eex @@ -0,0 +1,158 @@ +defmodule <%= inspect schema.module %>Token do + use Ecto.Schema + import Ecto.Query + alias <%= inspect schema.module %>Token + + @hash_algorithm :sha256 + @rand_size 32 + + # It is very important to keep the magic link token expiry short, + # since someone with access to the email may take over the account. + @magic_link_validity_in_minutes 15 + @change_email_validity_in_days 7 + @session_validity_in_days 14 +<%= if schema.binary_id do %> + @primary_key {:id, :binary_id, autogenerate: true} + @foreign_key_type :binary_id<% end %> + schema "<%= schema.table %>_tokens" do + field :token, :binary + field :context, :string + field :sent_to, :string + field :authenticated_at, <%= inspect schema.timestamp_type %> + belongs_to :<%= schema.singular %>, <%= inspect schema.module %> + + timestamps(<%= if schema.timestamp_type != :naive_datetime, do: "type: #{inspect schema.timestamp_type}, " %>updated_at: false) + end + + @doc """ + Generates a token that will be stored in a signed place, + such as session or cookie. As they are signed, those + tokens do not need to be hashed. + + The reason why we store session tokens in the database, even + though Phoenix already provides a session cookie, is because + Phoenix's default session cookies are not persisted, they are + simply signed and potentially encrypted. This means they are + valid indefinitely, unless you change the signing/encryption + salt. + + Therefore, storing them allows individual <%= schema.singular %> + sessions to be expired. The token system can also be extended + to store additional data, such as the device used for logging in. + You could then use this information to display all valid sessions + and devices in the UI and allow users to explicitly expire any + session they deem invalid. + """ + def build_session_token(<%= schema.singular %>) do + token = :crypto.strong_rand_bytes(@rand_size) + dt = <%= schema.singular %>.authenticated_at || <%= datetime_now %> + {token, %<%= inspect schema.alias %>Token{token: token, context: "session", <%= schema.singular %>_id: <%= schema.singular %>.id, authenticated_at: dt}} + end + + @doc """ + Checks if the token is valid and returns its underlying lookup query. + + The query returns the <%= schema.singular %> found by the token, if any, along with the token's creation time. + + The token is valid if it matches the value in the database and it has + not expired (after @session_validity_in_days). + """ + def verify_session_token_query(token) do + query = + from token in by_token_and_context_query(token, "session"), + join: <%= schema.singular %> in assoc(token, :<%= schema.singular %>), + where: token.inserted_at > ago(@session_validity_in_days, "day"), + select: {%{<%= schema.singular %> | authenticated_at: token.authenticated_at}, token.inserted_at} + + {:ok, query} + end + + @doc """ + Builds a token and its hash to be delivered to the <%= schema.singular %>'s email. + + The non-hashed token is sent to the <%= schema.singular %> email while the + hashed part is stored in the database. The original token cannot be reconstructed, + which means anyone with read-only access to the database cannot directly use + the token in the application to gain access. Furthermore, if the <%= schema.singular %> changes + their email in the system, the tokens sent to the previous email are no longer + valid. + + Users can easily adapt the existing code to provide other types of delivery methods, + for example, by phone numbers. + """ + def build_email_token(<%= schema.singular %>, context) do + build_hashed_token(<%= schema.singular %>, context, <%= schema.singular %>.email) + end + + defp build_hashed_token(<%= schema.singular %>, context, sent_to) do + token = :crypto.strong_rand_bytes(@rand_size) + hashed_token = :crypto.hash(@hash_algorithm, token) + + {Base.url_encode64(token, padding: false), + %<%= inspect schema.alias %>Token{ + token: hashed_token, + context: context, + sent_to: sent_to, + <%= schema.singular %>_id: <%= schema.singular %>.id + }} + end + + @doc """ + Checks if the token is valid and returns its underlying lookup query. + + If found, the query returns a tuple of the form `{<%= schema.singular %>, token}`. + + The given token is valid if it matches its hashed counterpart in the + database. This function also checks whether the token has expired. The context + of a magic link token is always "login". + """ + def verify_magic_link_token_query(token) do + case Base.url_decode64(token, padding: false) do + {:ok, decoded_token} -> + hashed_token = :crypto.hash(@hash_algorithm, decoded_token) + + query = + from token in by_token_and_context_query(hashed_token, "login"), + join: <%= schema.singular %> in assoc(token, :<%= schema.singular %>), + where: token.inserted_at > ago(^@magic_link_validity_in_minutes, "minute"), + where: token.sent_to == <%= schema.singular %>.email, + select: {<%= schema.singular %>, token} + + {:ok, query} + + :error -> + :error + end + end + + @doc """ + Checks if the token is valid and returns its underlying lookup query. + + The query returns the <%= schema.singular %>_token found by the token, if any. + + This is used to validate requests to change the <%= schema.singular %> + email. + The given token is valid if it matches its hashed counterpart in the + database and if it has not expired (after @change_email_validity_in_days). + The context must always start with "change:". + """ + def verify_change_email_token_query(token, "change:" <> _ = context) do + case Base.url_decode64(token, padding: false) do + {:ok, decoded_token} -> + hashed_token = :crypto.hash(@hash_algorithm, decoded_token) + + query = + from token in by_token_and_context_query(hashed_token, context), + where: token.inserted_at > ago(@change_email_validity_in_days, "day") + + {:ok, query} + + :error -> + :error + end + end + + defp by_token_and_context_query(token, context) do + from <%= inspect schema.alias %>Token, where: [token: ^token, context: ^context] + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/scope.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/scope.ex.eex new file mode 100644 index 0000000..2829aa2 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/scope.ex.eex @@ -0,0 +1,33 @@ +defmodule <%= inspect scope_config.scope.module %> do + @moduledoc """ + Defines the scope of the caller to be used throughout the app. + + The `<%= inspect scope_config.scope.module %>` allows public interfaces to receive + information about the caller, such as if the call is initiated from an + end-user, and if so, which user. Additionally, such a scope can carry fields + such as "super user" or other privileges for use as authorization, or to + ensure specific code paths can only be access for a given scope. + + It is useful for logging as well as for scoping pubsub subscriptions and + broadcasts when a caller subscribes to an interface or performs a particular + action. + + Feel free to extend the fields on this struct to fit the needs of + growing application requirements. + """ + + alias <%= inspect schema.module %> + + defstruct <%= schema.singular %>: nil + + @doc """ + Creates a scope for the given <%= schema.singular %>. + + Returns nil if no <%= schema.singular %> is given. + """ + def for_<%= schema.singular %>(%<%= inspect schema.alias %>{} = <%= schema.singular %>) do + %__MODULE__{<%= schema.singular %>: <%= schema.singular %>} + end + + def for_<%= schema.singular %>(nil), do: nil +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/session_confirm.html.heex.eex b/deps/phoenix/priv/templates/phx.gen.auth/session_confirm.html.heex.eex new file mode 100644 index 0000000..6d15837 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/session_confirm.html.heex.eex @@ -0,0 +1,59 @@ +={@<%= scope_config.scope.assign_key %>}> +
+
+ <.header>Welcome {@<%= schema.singular %>.email} +
+ + <.form + :if={!@<%= schema.singular %>.confirmed_at} + for={@form} + id="confirmation_form" + action={~p"<%= schema.route_prefix %>/log-in?_action=confirmed"} + phx-mounted={JS.focus_first()} + > + + <.button + name={@form[:remember_me].name} + value="true" + phx-disable-with="Confirming..." + class="btn btn-primary w-full" + > + Confirm and stay logged in + + <.button phx-disable-with="Confirming..." class="btn btn-primary btn-soft w-full mt-2"> + Confirm and log in only this time + + + + <.form + :if={@<%= schema.singular %>.confirmed_at} + for={@form} + id="login_form" + action={~p"<%= schema.route_prefix %>/log-in"} + phx-mounted={JS.focus_first()} + > + + <%%= if @<%= scope_config.scope.assign_key %> do %> + <.button variant="primary" phx-disable-with="Logging in..." class="btn btn-primary w-full"> + Log in + + <%% else %> + <.button + name={@form[:remember_me].name} + value="true" + phx-disable-with="Logging in..." + class="btn btn-primary w-full" + > + Keep me logged in on this device + + <.button phx-disable-with="Logging in..." class="btn btn-primary btn-soft w-full mt-2"> + Log me in only this time + + <%% end %> + + +

.confirmed_at} class="alert alert-outline mt-8"> + Tip: If you prefer passwords, you can enable them in the <%= schema.singular %> settings. +

+
+
diff --git a/deps/phoenix/priv/templates/phx.gen.auth/session_controller.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/session_controller.ex.eex new file mode 100644 index 0000000..67877fe --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/session_controller.ex.eex @@ -0,0 +1,143 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>SessionController do + use <%= inspect context.web_module %>, :controller + + alias <%= inspect context.module %> + alias <%= inspect auth_module %><%= if live? do %> + + def create(conn, %{"_action" => "confirmed"} = params) do + create(conn, params, "<%= schema.human_singular %> confirmed successfully.") + end + + def create(conn, params) do + create(conn, params, "Welcome back!") + end + + # magic link login + defp create(conn, %{"<%= schema.singular %>" => %{"token" => token} = <%= schema.singular %>_params}, info) do + case <%= inspect context.alias %>.login_<%= schema.singular %>_by_magic_link(token) do + {:ok, {<%= schema.singular %>, tokens_to_disconnect}} -> + <%= inspect schema.alias %>Auth.disconnect_sessions(tokens_to_disconnect) + + conn + |> put_flash(:info, info) + |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>, <%= schema.singular %>_params) + + _ -> + conn + |> put_flash(:error, "The link is invalid or it has expired.") + |> redirect(to: ~p"<%= schema.route_prefix %>/log-in") + end + end + + # email + password login + defp create(conn, %{"<%= schema.singular %>" => <%= schema.singular %>_params}, info) do + %{"email" => email, "password" => password} = <%= schema.singular %>_params + + if <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(email, password) do + conn + |> put_flash(:info, info) + |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>, <%= schema.singular %>_params) + else + # In order to prevent user enumeration attacks, don't disclose whether the email is registered. + conn + |> put_flash(:error, "Invalid email or password") + |> put_flash(:email, String.slice(email, 0, 160)) + |> redirect(to: ~p"<%= schema.route_prefix %>/log-in") + end + end + + def update_password(conn, %{"<%= schema.singular %>" => <%= schema.singular %>_params} = params) do + <%= schema.singular %> = conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %> + true = <%= inspect context.alias %>.sudo_mode?(<%= schema.singular %>) + {:ok, {_<%= schema.singular %>, expired_tokens}} = <%= inspect context.alias %>.update_<%= schema.singular %>_password(<%= schema.singular %>, <%= schema.singular %>_params) + + # disconnect all existing LiveViews with old sessions + <%= inspect schema.alias %>Auth.disconnect_sessions(expired_tokens) + + conn + |> put_session(:<%= schema.singular %>_return_to, ~p"<%= schema.route_prefix %>/settings") + |> create(params, "Password updated successfully!") + end<% else %> + + def new(conn, _params) do + email = get_in(conn.assigns, [:<%= scope_config.scope.assign_key %>, Access.key(:<%= schema.singular %>), Access.key(:email)]) + form = Phoenix.Component.to_form(%{"email" => email}, as: "<%= schema.singular %>") + + render(conn, :new, form: form) + end + + # magic link login + def create(conn, %{"<%= schema.singular %>" => %{"token" => token} = <%= schema.singular %>_params} = params) do + info = + case params do + %{"_action" => "confirmed"} -> "<%= schema.human_singular %> confirmed successfully." + _ -> "Welcome back!" + end + + case <%= inspect context.alias %>.login_<%= schema.singular %>_by_magic_link(token) do + {:ok, {<%= schema.singular %>, _expired_tokens}} -> + conn + |> put_flash(:info, info) + |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>, <%= schema.singular %>_params) + + {:error, :not_found} -> + conn + |> put_flash(:error, "The link is invalid or it has expired.") + |> render(:new, form: Phoenix.Component.to_form(%{}, as: "<%= schema.singular %>")) + end + end + + # email + password login + def create(conn, %{"<%= schema.singular %>" => %{"email" => email, "password" => password} = <%= schema.singular %>_params}) do + if <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(email, password) do + conn + |> put_flash(:info, "Welcome back!") + |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>, <%= schema.singular %>_params) + else + form = Phoenix.Component.to_form(<%= schema.singular %>_params, as: "<%= schema.singular %>") + + # In order to prevent user enumeration attacks, don't disclose whether the email is registered. + conn + |> put_flash(:error, "Invalid email or password") + |> render(:new, form: form) + end + end + + # magic link request + def create(conn, %{"<%= schema.singular %>" => %{"email" => email}}) do + if <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(email) do + <%= inspect context.alias %>.deliver_login_instructions( + <%= schema.singular %>, + &url(~p"<%= schema.route_prefix %>/log-in/#{&1}") + ) + end + + info = + "If your email is in our system, you will receive instructions for logging in shortly." + + conn + |> put_flash(:info, info) + |> redirect(to: ~p"<%= schema.route_prefix %>/log-in") + end + + def confirm(conn, %{"token" => token}) do + if <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>_by_magic_link_token(token) do + form = Phoenix.Component.to_form(%{"token" => token}, as: "<%= schema.singular %>") + + conn + |> assign(:<%= schema.singular %>, <%= schema.singular %>) + |> assign(:form, form) + |> render(:confirm) + else + conn + |> put_flash(:error, "Magic link is invalid or it has expired.") + |> redirect(to: ~p"<%= schema.route_prefix %>/log-in") + end + end<% end %> + + def delete(conn, _params) do + conn + |> put_flash(:info, "Logged out successfully.") + |> <%= inspect schema.alias %>Auth.log_out_<%= schema.singular %>() + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/session_controller_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.auth/session_controller_test.exs.eex new file mode 100644 index 0000000..bcd8b10 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/session_controller_test.exs.eex @@ -0,0 +1,224 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>SessionControllerTest do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + import <%= inspect context.module %>Fixtures + alias <%= inspect context.module %> + + setup do + %{unconfirmed_<%= schema.singular %>: unconfirmed_<%= schema.singular %>_fixture(), <%= schema.singular %>: <%= schema.singular %>_fixture()} + end<%= if not live? do %> + + describe "GET <%= schema.route_prefix %>/log-in" do + test "renders login page", %{conn: conn} do + conn = get(conn, ~p"<%= schema.route_prefix %>/log-in") + response = html_response(conn, 200) + assert response =~ "Log in" + assert response =~ ~p"<%= schema.route_prefix %>/register" + assert response =~ "Log in with email" + end + + test "renders login page with email filled in (sudo mode)", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + html = + conn + |> log_in_<%= schema.singular %>(<%= schema.singular %>) + |> get(~p"<%= schema.route_prefix %>/log-in") + |> html_response(200) + + assert html =~ "You need to reauthenticate" + refute html =~ "Register" + assert html =~ "Log in with email" + + assert html =~ + ~s(/log-in?mode=password") + response = html_response(conn, 200) + assert response =~ "Log in" + assert response =~ ~p"<%= schema.route_prefix %>/register" + assert response =~ "Log in with email" + end + end + + describe "GET <%= schema.route_prefix %>/log-in/:token" do + test "renders confirmation page for unconfirmed <%= schema.singular %>", %{conn: conn, unconfirmed_<%= schema.singular %>: <%= schema.singular %>} do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_login_instructions(<%= schema.singular %>, url) + end) + + conn = get(conn, ~p"<%= schema.route_prefix %>/log-in/#{token}") + assert html_response(conn, 200) =~ "Confirm and stay logged in" + end + + test "renders login page for confirmed <%= schema.singular %>", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_login_instructions(<%= schema.singular %>, url) + end) + + conn = get(conn, ~p"<%= schema.route_prefix %>/log-in/#{token}") + html = html_response(conn, 200) + refute html =~ "Confirm my account" + assert html =~ "Log in" + end + + test "raises error for invalid token", %{conn: conn} do + conn = get(conn, ~p"<%= schema.route_prefix %>/log-in/invalid-token") + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/log-in" + + assert Phoenix.Flash.get(conn.assigns.flash, :error) == + "Magic link is invalid or it has expired." + end + end<% end %> + + describe "POST <%= schema.route_prefix %>/log-in - email and password" do + test "logs the <%= schema.singular %> in", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + <%= schema.singular %> = set_password(<%= schema.singular %>) + + conn = + post(conn, ~p"<%= schema.route_prefix %>/log-in", %{ + "<%= schema.singular %>" => %{"email" => <%= schema.singular %>.email, "password" => valid_<%= schema.singular %>_password()} + }) + + assert get_session(conn, :<%= schema.singular %>_token) + assert redirected_to(conn) == ~p"/" + + # Now do a logged in request and assert on the menu + conn = get(conn, ~p"/") + response = html_response(conn, 200) + assert response =~ <%= schema.singular %>.email + assert response =~ ~p"<%= schema.route_prefix %>/settings" + assert response =~ ~p"<%= schema.route_prefix %>/log-out" + end + + test "logs the <%= schema.singular %> in with remember me", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + <%= schema.singular %> = set_password(<%= schema.singular %>) + + conn = + post(conn, ~p"<%= schema.route_prefix %>/log-in", %{ + "<%= schema.singular %>" => %{ + "email" => <%= schema.singular %>.email, + "password" => valid_<%= schema.singular %>_password(), + "remember_me" => "true" + } + }) + + assert conn.resp_cookies["_<%= web_app_name %>_<%= schema.singular %>_remember_me"] + assert redirected_to(conn) == ~p"/" + end + + test "logs the <%= schema.singular %> in with return to", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + <%= schema.singular %> = set_password(<%= schema.singular %>) + + conn = + conn + |> init_test_session(<%= schema.singular %>_return_to: "/foo/bar") + |> post(~p"<%= schema.route_prefix %>/log-in", %{ + "<%= schema.singular %>" => %{ + "email" => <%= schema.singular %>.email, + "password" => valid_<%= schema.singular %>_password() + } + }) + + assert redirected_to(conn) == "/foo/bar" + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ "Welcome back!" + end + + test "<%= if live?, do: "redirects to login page", else: "emits error message" %> with invalid credentials", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + post(conn, ~p"<%= schema.route_prefix %>/log-in?mode=password", %{ + "<%= schema.singular %>" => %{"email" => <%= schema.singular %>.email, "password" => "invalid_password"} + }) + + <%= if live? do %>assert Phoenix.Flash.get(conn.assigns.flash, :error) == "Invalid email or password" + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/log-in"<% else %>response = html_response(conn, 200) + assert response =~ "Log in" + assert response =~ "Invalid email or password"<% end %> + end + end + + describe "POST <%= schema.route_prefix %>/log-in - magic link" do + <%= if not live? do %>test "sends magic link email when <%= schema.singular %> exists", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + post(conn, ~p"<%= schema.route_prefix %>/log-in", %{ + "<%= schema.singular %>" => %{"email" => <%= schema.singular %>.email} + }) + + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ "If your email is in our system" + assert <%= inspect schema.repo %>.get_by!(<%= inspect context.alias %>.<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id).context == "login" + end + + <% end %>test "logs the <%= schema.singular %> in", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + {token, _hashed_token} = generate_<%= schema.singular %>_magic_link_token(<%= schema.singular %>) + + conn = + post(conn, ~p"<%= schema.route_prefix %>/log-in", %{ + "<%= schema.singular %>" => %{"token" => token} + }) + + assert get_session(conn, :<%= schema.singular %>_token) + assert redirected_to(conn) == ~p"/" + + # Now do a logged in request and assert on the menu + conn = get(conn, ~p"/") + response = html_response(conn, 200) + assert response =~ <%= schema.singular %>.email + assert response =~ ~p"<%= schema.route_prefix %>/settings" + assert response =~ ~p"<%= schema.route_prefix %>/log-out" + end + + test "confirms unconfirmed <%= schema.singular %>", %{conn: conn, unconfirmed_<%= schema.singular %>: <%= schema.singular %>} do + {token, _hashed_token} = generate_<%= schema.singular %>_magic_link_token(<%= schema.singular %>) + refute <%= schema.singular %>.confirmed_at + + conn = + post(conn, ~p"<%= schema.route_prefix %>/log-in", %{ + "<%= schema.singular %>" => %{"token" => token}, + "_action" => "confirmed" + }) + + assert get_session(conn, :<%= schema.singular %>_token) + assert redirected_to(conn) == ~p"/" + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ "<%= schema.human_singular %> confirmed successfully." + + assert <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= schema.singular %>.id).confirmed_at + + # Now do a logged in request and assert on the menu + conn = get(conn, ~p"/") + response = html_response(conn, 200) + assert response =~ <%= schema.singular %>.email + assert response =~ ~p"<%= schema.route_prefix %>/settings" + assert response =~ ~p"<%= schema.route_prefix %>/log-out" + end + + test "<%= if live?, do: "redirects to login page", else: "emits error message" %> when magic link is invalid", %{conn: conn} do + conn = + post(conn, ~p"<%= schema.route_prefix %>/log-in", %{ + "<%= schema.singular %>" => %{"token" => "invalid"} + }) + + <%= if live? do %>assert Phoenix.Flash.get(conn.assigns.flash, :error) == + "The link is invalid or it has expired." + + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/log-in"<% else %>assert html_response(conn, 200) =~ "The link is invalid or it has expired."<% end %> + end + end + + describe "DELETE <%= schema.route_prefix %>/log-out" do + test "logs the <%= schema.singular %> out", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = conn |> log_in_<%= schema.singular %>(<%= schema.singular %>) |> delete(~p"<%= schema.route_prefix %>/log-out") + assert redirected_to(conn) == ~p"/" + refute get_session(conn, :<%= schema.singular %>_token) + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ "Logged out successfully" + end + + test "succeeds even if the <%= schema.singular %> is not logged in", %{conn: conn} do + conn = delete(conn, ~p"<%= schema.route_prefix %>/log-out") + assert redirected_to(conn) == ~p"/" + refute get_session(conn, :<%= schema.singular %>_token) + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ "Logged out successfully" + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/session_html.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/session_html.ex.eex new file mode 100644 index 0000000..0a02f38 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/session_html.ex.eex @@ -0,0 +1,9 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>SessionHTML do + use <%= inspect context.web_module %>, :html + + embed_templates "<%= schema.singular %>_session_html/*" + + defp local_mail_adapter? do + Application.get_env(:<%= Mix.Phoenix.otp_app() %>, <%= inspect context.base_module %>.Mailer)[:adapter] == Swoosh.Adapters.Local + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/session_new.html.heex.eex b/deps/phoenix/priv/templates/phx.gen.auth/session_new.html.heex.eex new file mode 100644 index 0000000..28f3322 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/session_new.html.heex.eex @@ -0,0 +1,73 @@ +={@<%= scope_config.scope.assign_key %>}> +
+
+ <.header> +

Log in

+ <:subtitle> + <%%= if @<%= scope_config.scope.assign_key %> do %> + You need to reauthenticate to perform sensitive actions on your account. + <%% else %> + Don't have an account? <.link + navigate={~p"<%= schema.route_prefix %>/register"} + class="font-semibold text-brand hover:underline" + phx-no-format + >Sign up for an account now. + <%% end %> + + +
+ +
+ <.icon name="hero-information-circle" class="size-6 shrink-0" /> +
+

You are running the local mail adapter.

+

+ To see sent emails, visit <.link href="/dev/mailbox" class="underline">the mailbox page. +

+
+
+ + <.form :let={f} for={@form} as={:<%= schema.singular %>} id="login_form_magic" action={~p"<%= schema.route_prefix %>/log-in"}> + <.input + readonly={!!@<%= scope_config.scope.assign_key %>} + field={f[:email]} + type="email" + label="Email" + autocomplete="username" + spellcheck="false" + required + phx-mounted={JS.focus()} + /> + <.button class="btn btn-primary w-full"> + Log in with email + + + +
or
+ + <.form :let={f} for={@form} as={:<%= schema.singular %>} id="login_form_password" action={~p"<%= schema.route_prefix %>/log-in"}> + <.input + readonly={!!@<%= scope_config.scope.assign_key %>} + field={f[:email]} + type="email" + label="Email" + autocomplete="username" + spellcheck="false" + required + /> + <.input + field={f[:password]} + type="password" + label="Password" + autocomplete="current-password" + spellcheck="false" + /> + <.button class="btn btn-primary w-full" name={@form[:remember_me].name} value="true"> + Log in and stay logged in + + <.button class="btn btn-primary btn-soft w-full mt-2"> + Log in only this time + + +
+
diff --git a/deps/phoenix/priv/templates/phx.gen.auth/settings_controller.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/settings_controller.ex.eex new file mode 100644 index 0000000..d45d396 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/settings_controller.ex.eex @@ -0,0 +1,77 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>SettingsController do + use <%= inspect context.web_module %>, :controller + + alias <%= inspect context.module %> + alias <%= inspect auth_module %> + + import <%= inspect auth_module %>, only: [require_sudo_mode: 2] + + plug :require_sudo_mode + plug :assign_email_and_password_changesets + + def edit(conn, _params) do + render(conn, :edit) + end + + def update(conn, %{"action" => "update_email"} = params) do + %{"<%= schema.singular %>" => <%= schema.singular %>_params} = params + <%= schema.singular %> = conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %> + + case <%= inspect context.alias %>.change_<%= schema.singular %>_email(<%= schema.singular %>, <%= schema.singular %>_params) do + %{valid?: true} = changeset -> + <%= inspect context.alias %>.deliver_<%= schema.singular %>_update_email_instructions( + Ecto.Changeset.apply_action!(changeset, :insert), + <%= schema.singular %>.email, + &url(~p"<%= schema.route_prefix %>/settings/confirm-email/#{&1}") + ) + + conn + |> put_flash( + :info, + "A link to confirm your email change has been sent to the new address." + ) + |> redirect(to: ~p"<%= schema.route_prefix %>/settings") + + changeset -> + render(conn, :edit, email_changeset: %{changeset | action: :insert}) + end + end + + def update(conn, %{"action" => "update_password"} = params) do + %{"<%= schema.singular %>" => <%= schema.singular %>_params} = params + <%= schema.singular %> = conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %> + + case <%= inspect context.alias %>.update_<%= schema.singular %>_password(<%= schema.singular %>, <%= schema.singular %>_params) do + {:ok, {<%= schema.singular %>, _}} -> + conn + |> put_flash(:info, "Password updated successfully.") + |> put_session(:<%= schema.singular %>_return_to, ~p"<%= schema.route_prefix %>/settings") + |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>) + + {:error, changeset} -> + render(conn, :edit, password_changeset: changeset) + end + end + + def confirm_email(conn, %{"token" => token}) do + case <%= inspect context.alias %>.update_<%= schema.singular %>_email(conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %>, token) do + {:ok, _<%= schema.singular %>} -> + conn + |> put_flash(:info, "Email changed successfully.") + |> redirect(to: ~p"<%= schema.route_prefix %>/settings") + + {:error, _} -> + conn + |> put_flash(:error, "Email change link is invalid or it has expired.") + |> redirect(to: ~p"<%= schema.route_prefix %>/settings") + end + end + + defp assign_email_and_password_changesets(conn, _opts) do + <%= schema.singular %> = conn.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %> + + conn + |> assign(:email_changeset, <%= inspect context.alias %>.change_<%= schema.singular %>_email(<%= schema.singular %>)) + |> assign(:password_changeset, <%= inspect context.alias %>.change_<%= schema.singular %>_password(<%= schema.singular %>)) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/settings_controller_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.auth/settings_controller_test.exs.eex new file mode 100644 index 0000000..7831855 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/settings_controller_test.exs.eex @@ -0,0 +1,148 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>SettingsControllerTest do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + alias <%= inspect context.module %> + import <%= inspect context.module %>Fixtures + + setup :register_and_log_in_<%= schema.singular %> + + describe "GET <%= schema.route_prefix %>/settings" do + test "renders settings page", %{conn: conn} do + conn = get(conn, ~p"<%= schema.route_prefix %>/settings") + response = html_response(conn, 200) + assert response =~ "Settings" + end + + test "redirects if <%= schema.singular %> is not logged in" do + conn = build_conn() + conn = get(conn, ~p"<%= schema.route_prefix %>/settings") + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/log-in" + end + + @tag token_authenticated_at: <%= inspect datetime_module %>.add(<%= datetime_now %>, -11, :minute) + test "redirects if <%= schema.singular %> is not in sudo mode", %{conn: conn} do + conn = get(conn, ~p"<%= schema.route_prefix %>/settings") + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/log-in" + + assert Phoenix.Flash.get(conn.assigns.flash, :error) == + "You must re-authenticate to access this page." + end + end + + describe "PUT <%= schema.route_prefix %>/settings (change password form)" do + test "updates the <%= schema.singular %> password and resets tokens", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + new_password_conn = + put(conn, ~p"<%= schema.route_prefix %>/settings", %{ + "action" => "update_password", + "<%= schema.singular %>" => %{ + "password" => "new valid password", + "password_confirmation" => "new valid password" + } + }) + + assert redirected_to(new_password_conn) == ~p"<%= schema.route_prefix %>/settings" + + assert get_session(new_password_conn, :<%= schema.singular %>_token) != get_session(conn, :<%= schema.singular %>_token) + + assert Phoenix.Flash.get(new_password_conn.assigns.flash, :info) =~ + "Password updated successfully" + + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(<%= schema.singular %>.email, "new valid password") + end + + test "does not update password on invalid data", %{conn: conn} do + old_password_conn = + put(conn, ~p"<%= schema.route_prefix %>/settings", %{ + "action" => "update_password", + "<%= schema.singular %>" => %{ + "password" => "too short", + "password_confirmation" => "does not match" + } + }) + + response = html_response(old_password_conn, 200) + assert response =~ "Settings" + assert response =~ "should be at least 12 character(s)" + assert response =~ "does not match password" + + assert get_session(old_password_conn, :<%= schema.singular %>_token) == get_session(conn, :<%= schema.singular %>_token) + end + end + + describe "PUT <%= schema.route_prefix %>/settings (change email form)" do + @tag :capture_log + test "updates the <%= schema.singular %> email", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + put(conn, ~p"<%= schema.route_prefix %>/settings", %{ + "action" => "update_email", + "<%= schema.singular %>" => %{"email" => unique_<%= schema.singular %>_email()} + }) + + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/settings" + + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ + "A link to confirm your email" + + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(<%= schema.singular %>.email) + end + + test "does not update email on invalid data", %{conn: conn} do + conn = + put(conn, ~p"<%= schema.route_prefix %>/settings", %{ + "action" => "update_email", + "<%= schema.singular %>" => %{"email" => "with spaces"} + }) + + response = html_response(conn, 200) + assert response =~ "Settings" + assert response =~ "must have the @ sign and no spaces" + end + end + + describe "GET <%= schema.route_prefix %>/settings/confirm-email/:token" do + setup %{<%= schema.singular %>: <%= schema.singular %>} do + email = unique_<%= schema.singular %>_email() + + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_<%= schema.singular %>_update_email_instructions(%{<%= schema.singular %> | email: email}, <%= schema.singular %>.email, url) + end) + + %{token: token, email: email} + end + + test "updates the <%= schema.singular %> email once", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>, token: token, email: email} do + conn = get(conn, ~p"<%= schema.route_prefix %>/settings/confirm-email/#{token}") + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/settings" + + assert Phoenix.Flash.get(conn.assigns.flash, :info) =~ + "Email changed successfully" + + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(<%= schema.singular %>.email) + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(email) + + conn = get(conn, ~p"<%= schema.route_prefix %>/settings/confirm-email/#{token}") + + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/settings" + + assert Phoenix.Flash.get(conn.assigns.flash, :error) =~ + "Email change link is invalid or it has expired" + end + + test "does not update email with invalid token", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = get(conn, ~p"<%= schema.route_prefix %>/settings/confirm-email/oops") + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/settings" + + assert Phoenix.Flash.get(conn.assigns.flash, :error) =~ + "Email change link is invalid or it has expired" + + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(<%= schema.singular %>.email) + end + + test "redirects if <%= schema.singular %> is not logged in", %{token: token} do + conn = build_conn() + conn = get(conn, ~p"<%= schema.route_prefix %>/settings/confirm-email/#{token}") + assert redirected_to(conn) == ~p"<%= schema.route_prefix %>/log-in" + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/settings_edit.html.heex.eex b/deps/phoenix/priv/templates/phx.gen.auth/settings_edit.html.heex.eex new file mode 100644 index 0000000..322d0fa --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/settings_edit.html.heex.eex @@ -0,0 +1,49 @@ +={@<%= scope_config.scope.assign_key %>}> +
+ <.header> + Account Settings + <:subtitle>Manage your account email address and password settings + +
+ + <.form :let={f} for={@email_changeset} action={~p"<%= schema.route_prefix %>/settings"} id="update_email"> + + + <.input + field={f[:email]} + type="email" + label="Email" + autocomplete="username" + spellcheck="false" + required + /> + + <.button variant="primary" phx-disable-with="Changing...">Change Email + + +
+ + <.form :let={f} for={@password_changeset} action={~p"<%= schema.route_prefix %>/settings"} id="update_password"> + + + <.input + field={f[:password]} + type="password" + label="New password" + autocomplete="new-password" + spellcheck="false" + required + /> + <.input + field={f[:password_confirmation]} + type="password" + label="Confirm new password" + autocomplete="new-password" + spellcheck="false" + required + /> + <.button variant="primary" phx-disable-with="Changing..."> + Save Password + + + diff --git a/deps/phoenix/priv/templates/phx.gen.auth/settings_html.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/settings_html.ex.eex new file mode 100644 index 0000000..7c8c9ea --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/settings_html.ex.eex @@ -0,0 +1,5 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>SettingsHTML do + use <%= inspect context.web_module %>, :html + + embed_templates "<%= schema.singular %>_settings_html/*" +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/settings_live.ex.eex b/deps/phoenix/priv/templates/phx.gen.auth/settings_live.ex.eex new file mode 100644 index 0000000..6e051f8 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/settings_live.ex.eex @@ -0,0 +1,160 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.Settings do + use <%= inspect context.web_module %>, :live_view + + on_mount {<%= inspect auth_module %>, :require_sudo_mode} + + alias <%= inspect context.module %> + + @impl true + def render(assigns) do + ~H""" + ={@<%= scope_config.scope.assign_key %>}> +
+ <.header> + Account Settings + <:subtitle>Manage your account email address and password settings + +
+ + <.form for={@email_form} id="email_form" phx-submit="update_email" phx-change="validate_email"> + <.input + field={@email_form[:email]} + type="email" + label="Email" + autocomplete="username" + spellcheck="false" + required + /> + <.button variant="primary" phx-disable-with="Changing...">Change Email + + +
+ + <.form + for={@password_form} + id="password_form" + action={~p"<%= schema.route_prefix %>/update-password"} + method="post" + phx-change="validate_password" + phx-submit="update_password" + phx-trigger-action={@trigger_submit} + > + + <.input + field={@password_form[:password]} + type="password" + label="New password" + autocomplete="new-password" + spellcheck="false" + required + /> + <.input + field={@password_form[:password_confirmation]} + type="password" + label="Confirm new password" + autocomplete="new-password" + spellcheck="false" + /> + <.button variant="primary" phx-disable-with="Saving..."> + Save Password + + + + """ + end + + @impl true + def mount(%{"token" => token}, _session, socket) do + socket = + case <%= inspect context.alias %>.update_<%= schema.singular %>_email(socket.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %>, token) do + {:ok, _<%= schema.singular %>} -> + put_flash(socket, :info, "Email changed successfully.") + + {:error, _} -> + put_flash(socket, :error, "Email change link is invalid or it has expired.") + end + + {:ok, push_navigate(socket, to: ~p"<%= schema.route_prefix %>/settings")} + end + + def mount(_params, _session, socket) do + <%= schema.singular %> = socket.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %> + email_changeset = <%= inspect context.alias %>.change_<%= schema.singular %>_email(<%= schema.singular %>, %{}, validate_unique: false) + password_changeset = <%= inspect context.alias %>.change_<%= schema.singular %>_password(<%= schema.singular %>, %{}, hash_password: false) + + socket = + socket + |> assign(:current_email, <%= schema.singular %>.email) + |> assign(:email_form, to_form(email_changeset)) + |> assign(:password_form, to_form(password_changeset)) + |> assign(:trigger_submit, false) + + {:ok, socket} + end + + @impl true + def handle_event("validate_email", params, socket) do + %{"<%= schema.singular %>" => <%= schema.singular %>_params} = params + + email_form = + socket.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %> + |> <%= inspect context.alias %>.change_<%= schema.singular %>_email(<%= schema.singular %>_params, validate_unique: false) + |> Map.put(:action, :validate) + |> to_form() + + {:noreply, assign(socket, email_form: email_form)} + end + + def handle_event("update_email", params, socket) do + %{"<%= schema.singular %>" => <%= schema.singular %>_params} = params + <%= schema.singular %> = socket.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %> + true = <%= inspect context.alias %>.sudo_mode?(<%= schema.singular %>) + + case <%= inspect context.alias %>.change_<%= schema.singular %>_email(<%= schema.singular %>, <%= schema.singular %>_params) do + %{valid?: true} = changeset -> + <%= inspect context.alias %>.deliver_<%= schema.singular %>_update_email_instructions( + Ecto.Changeset.apply_action!(changeset, :insert), + <%= schema.singular %>.email, + &url(~p"<%= schema.route_prefix %>/settings/confirm-email/#{&1}") + ) + + info = "A link to confirm your email change has been sent to the new address." + {:noreply, socket |> put_flash(:info, info)} + + changeset -> + {:noreply, assign(socket, :email_form, to_form(changeset, action: :insert))} + end + end + + def handle_event("validate_password", params, socket) do + %{"<%= schema.singular %>" => <%= schema.singular %>_params} = params + + password_form = + socket.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %> + |> <%= inspect context.alias %>.change_<%= schema.singular %>_password(<%= schema.singular %>_params, hash_password: false) + |> Map.put(:action, :validate) + |> to_form() + + {:noreply, assign(socket, password_form: password_form)} + end + + def handle_event("update_password", params, socket) do + %{"<%= schema.singular %>" => <%= schema.singular %>_params} = params + <%= schema.singular %> = socket.assigns.<%= scope_config.scope.assign_key %>.<%= schema.singular %> + true = <%= inspect context.alias %>.sudo_mode?(<%= schema.singular %>) + + case <%= inspect context.alias %>.change_<%= schema.singular %>_password(<%= schema.singular %>, <%= schema.singular %>_params) do + %{valid?: true} = changeset -> + {:noreply, assign(socket, trigger_submit: true, password_form: to_form(changeset))} + + changeset -> + {:noreply, assign(socket, password_form: to_form(changeset, action: :insert))} + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/settings_live_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.auth/settings_live_test.exs.eex new file mode 100644 index 0000000..a025eec --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/settings_live_test.exs.eex @@ -0,0 +1,212 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.SettingsTest do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + alias <%= inspect context.module %> + import Phoenix.LiveViewTest + import <%= inspect context.module %>Fixtures + + describe "Settings page" do + test "renders settings page", %{conn: conn} do + {:ok, _lv, html} = + conn + |> log_in_<%= schema.singular %>(<%= schema.singular %>_fixture()) + |> live(~p"<%= schema.route_prefix %>/settings") + + assert html =~ "Change Email" + assert html =~ "Save Password" + end + + test "redirects if <%= schema.singular %> is not logged in", %{conn: conn} do + assert {:error, redirect} = live(conn, ~p"<%= schema.route_prefix %>/settings") + + assert {:redirect, %{to: path, flash: flash}} = redirect + assert path == ~p"<%= schema.route_prefix %>/log-in" + assert %{"error" => "You must log in to access this page."} = flash + end + + test "redirects if <%= schema.singular %> is not in sudo mode", %{conn: conn} do + {:ok, conn} = + conn + |> log_in_<%= schema.singular %>(<%= schema.singular %>_fixture(), + token_authenticated_at: <%= inspect datetime_module %>.add(<%= datetime_now %>, -11, :minute) + ) + |> live(~p"<%= schema.route_prefix %>/settings") + |> follow_redirect(conn, ~p"<%= schema.route_prefix %>/log-in") + + assert conn.resp_body =~ "You must re-authenticate to access this page." + end + end + + describe "update email form" do + setup %{conn: conn} do + <%= schema.singular %> = <%= schema.singular %>_fixture() + %{conn: log_in_<%= schema.singular %>(conn, <%= schema.singular %>), <%= schema.singular %>: <%= schema.singular %>} + end + + test "updates the <%= schema.singular %> email", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + new_email = unique_<%= schema.singular %>_email() + + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/settings") + + result = + lv + |> form("#email_form", %{ + "<%= schema.singular %>" => %{"email" => new_email} + }) + |> render_submit() + + assert result =~ "A link to confirm your email" + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(<%= schema.singular %>.email) + end + + test "renders errors with invalid data (phx-change)", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/settings") + + result = + lv + |> element("#email_form") + |> render_change(%{ + "action" => "update_email", + "<%= schema.singular %>" => %{"email" => "with spaces"} + }) + + assert result =~ "Change Email" + assert result =~ "must have the @ sign and no spaces" + end + + test "renders errors with invalid data (phx-submit)", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/settings") + + result = + lv + |> form("#email_form", %{ + "<%= schema.singular %>" => %{"email" => <%= schema.singular %>.email} + }) + |> render_submit() + + assert result =~ "Change Email" + assert result =~ "did not change" + end + end + + describe "update password form" do + setup %{conn: conn} do + <%= schema.singular %> = <%= schema.singular %>_fixture() + %{conn: log_in_<%= schema.singular %>(conn, <%= schema.singular %>), <%= schema.singular %>: <%= schema.singular %>} + end + + test "updates the <%= schema.singular %> password", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + new_password = valid_<%= schema.singular %>_password() + + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/settings") + + form = + form(lv, "#password_form", %{ + "<%= schema.singular %>" => %{ + "email" => <%= schema.singular %>.email, + "password" => new_password, + "password_confirmation" => new_password + } + }) + + render_submit(form) + + new_password_conn = follow_trigger_action(form, conn) + + assert redirected_to(new_password_conn) == ~p"<%= schema.route_prefix %>/settings" + + assert get_session(new_password_conn, :<%= schema.singular %>_token) != get_session(conn, :<%= schema.singular %>_token) + + assert Phoenix.Flash.get(new_password_conn.assigns.flash, :info) =~ + "Password updated successfully" + + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(<%= schema.singular %>.email, new_password) + end + + test "renders errors with invalid data (phx-change)", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/settings") + + result = + lv + |> element("#password_form") + |> render_change(%{ + "<%= schema.singular %>" => %{ + "password" => "too short", + "password_confirmation" => "does not match" + } + }) + + assert result =~ "Save Password" + assert result =~ "should be at least 12 character(s)" + assert result =~ "does not match password" + end + + test "renders errors with invalid data (phx-submit)", %{conn: conn} do + {:ok, lv, _html} = live(conn, ~p"<%= schema.route_prefix %>/settings") + + result = + lv + |> form("#password_form", %{ + "<%= schema.singular %>" => %{ + "password" => "too short", + "password_confirmation" => "does not match" + } + }) + |> render_submit() + + assert result =~ "Save Password" + assert result =~ "should be at least 12 character(s)" + assert result =~ "does not match password" + end + end + + describe "confirm email" do + setup %{conn: conn} do + <%= schema.singular %> = <%= schema.singular %>_fixture() + email = unique_<%= schema.singular %>_email() + + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_<%= schema.singular %>_update_email_instructions(%{<%= schema.singular %> | email: email}, <%= schema.singular %>.email, url) + end) + + %{conn: log_in_<%= schema.singular %>(conn, <%= schema.singular %>), token: token, email: email, <%= schema.singular %>: <%= schema.singular %>} + end + + test "updates the <%= schema.singular %> email once", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>, token: token, email: email} do + {:error, redirect} = live(conn, ~p"<%= schema.route_prefix %>/settings/confirm-email/#{token}") + + assert {:live_redirect, %{to: path, flash: flash}} = redirect + assert path == ~p"<%= schema.route_prefix %>/settings" + assert %{"info" => message} = flash + assert message == "Email changed successfully." + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(<%= schema.singular %>.email) + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(email) + + # use confirm token again + {:error, redirect} = live(conn, ~p"<%= schema.route_prefix %>/settings/confirm-email/#{token}") + assert {:live_redirect, %{to: path, flash: flash}} = redirect + assert path == ~p"<%= schema.route_prefix %>/settings" + assert %{"error" => message} = flash + assert message == "Email change link is invalid or it has expired." + end + + test "does not update email with invalid token", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + {:error, redirect} = live(conn, ~p"<%= schema.route_prefix %>/settings/confirm-email/oops") + assert {:live_redirect, %{to: path, flash: flash}} = redirect + assert path == ~p"<%= schema.route_prefix %>/settings" + assert %{"error" => message} = flash + assert message == "Email change link is invalid or it has expired." + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(<%= schema.singular %>.email) + end + + test "redirects if <%= schema.singular %> is not logged in", %{token: token} do + conn = build_conn() + {:error, redirect} = live(conn, ~p"<%= schema.route_prefix %>/settings/confirm-email/#{token}") + assert {:redirect, %{to: path, flash: flash}} = redirect + assert path == ~p"<%= schema.route_prefix %>/log-in" + assert %{"error" => message} = flash + assert message == "You must log in to access this page." + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/test_cases.exs.eex b/deps/phoenix/priv/templates/phx.gen.auth/test_cases.exs.eex new file mode 100644 index 0000000..80bb6ed --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/test_cases.exs.eex @@ -0,0 +1,391 @@ + import <%= inspect context.module %>Fixtures + alias <%= inspect context.module %>.{<%= inspect schema.alias %>, <%= inspect schema.alias %>Token} + + describe "get_<%= schema.singular %>_by_email/1" do + test "does not return the <%= schema.singular %> if the email does not exist" do + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_email("unknown@example.com") + end + + test "returns the <%= schema.singular %> if the email exists" do + %{id: id} = <%= schema.singular %> = <%= schema.singular %>_fixture() + assert %<%= inspect schema.alias %>{id: ^id} = <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(<%= schema.singular %>.email) + end + end + + describe "get_<%= schema.singular %>_by_email_and_password/2" do + test "does not return the <%= schema.singular %> if the email does not exist" do + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password("unknown@example.com", "hello world!") + end + + test "does not return the <%= schema.singular %> if the password is not valid" do + <%= schema.singular %> = <%= schema.singular %>_fixture() |> set_password() + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(<%= schema.singular %>.email, "invalid") + end + + test "returns the <%= schema.singular %> if the email and password are valid" do + %{id: id} = <%= schema.singular %> = <%= schema.singular %>_fixture() |> set_password() + + assert %<%= inspect schema.alias %>{id: ^id} = + <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(<%= schema.singular %>.email, valid_<%= schema.singular %>_password()) + end + end + + describe "get_<%= schema.singular %>!/1" do + test "raises if id is invalid" do + assert_raise Ecto.NoResultsError, fn -> + <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= inspect schema.sample_id %>) + end + end + + test "returns the <%= schema.singular %> with the given id" do + %{id: id} = <%= schema.singular %> = <%= schema.singular %>_fixture() + assert %<%= inspect schema.alias %>{id: ^id} = <%= inspect context.alias %>.get_<%= schema.singular %>!(<%=schema.singular %>.id) + end + end + + describe "register_<%= schema.singular %>/1" do + test "requires email to be set" do + {:error, changeset} = <%= inspect context.alias %>.register_<%= schema.singular %>(%{}) + + assert %{email: ["can't be blank"]} = errors_on(changeset) + end + + test "validates email when given" do + {:error, changeset} = <%= inspect context.alias %>.register_<%= schema.singular %>(%{email: "not valid"}) + + assert %{email: ["must have the @ sign and no spaces"]} = errors_on(changeset) + end + + test "validates maximum values for email for security" do + too_long = String.duplicate("db", 100) + {:error, changeset} = <%= inspect context.alias %>.register_<%= schema.singular %>(%{email: too_long}) + assert "should be at most 160 character(s)" in errors_on(changeset).email + end + + test "validates email uniqueness" do + %{email: email} = <%= schema.singular %>_fixture() + {:error, changeset} = <%= inspect context.alias %>.register_<%= schema.singular %>(%{email: email}) + assert "has already been taken" in errors_on(changeset).email + + # Now try with the uppercased email too, to check that email case is ignored. + {:error, changeset} = <%= inspect context.alias %>.register_<%= schema.singular %>(%{email: String.upcase(email)}) + assert "has already been taken" in errors_on(changeset).email + end + + test "registers <%= schema.plural %> without password" do + email = unique_<%= schema.singular %>_email() + {:ok, <%= schema.singular %>} = <%= inspect context.alias %>.register_<%= schema.singular %>(valid_<%= schema.singular %>_attributes(email: email)) + assert <%= schema.singular %>.email == email + assert is_nil(<%= schema.singular %>.hashed_password) + assert is_nil(<%= schema.singular %>.confirmed_at) + assert is_nil(<%= schema.singular %>.password) + end + end + + describe "sudo_mode?/2" do + test "validates the authenticated_at time" do + now = <%= inspect datetime_module %>.utc_now() + + assert <%= inspect context.alias %>.sudo_mode?(%<%= inspect schema.alias %>{authenticated_at: <%= inspect datetime_module %>.utc_now()}) + assert <%= inspect context.alias %>.sudo_mode?(%<%= inspect schema.alias %>{authenticated_at: <%= inspect datetime_module %>.add(now, -19, :minute)}) + refute <%= inspect context.alias %>.sudo_mode?(%<%= inspect schema.alias %>{authenticated_at: <%= inspect datetime_module %>.add(now, -21, :minute)}) + + # minute override + refute <%= inspect context.alias %>.sudo_mode?( + %<%= inspect schema.alias %>{authenticated_at: <%= inspect datetime_module %>.add(now, -11, :minute)}, + -10 + ) + + # not authenticated + refute <%= inspect context.alias %>.sudo_mode?(%<%= inspect schema.alias %>{}) + end + end + + describe "change_<%= schema.singular %>_email/3" do + test "returns a <%= schema.singular %> changeset" do + assert %Ecto.Changeset{} = changeset = <%= inspect context.alias %>.change_<%= schema.singular %>_email(%<%= inspect schema.alias %>{}) + assert changeset.required == [:email] + end + end + + describe "deliver_<%= schema.singular %>_update_email_instructions/3" do + setup do + %{<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + test "sends token through notification", %{<%= schema.singular %>: <%= schema.singular %>} do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_<%= schema.singular %>_update_email_instructions(<%= schema.singular %>, "current@example.com", url) + end) + + {:ok, token} = Base.url_decode64(token, padding: false) + assert <%= schema.singular %>_token = Repo.get_by(<%= inspect schema.alias %>Token, token: :crypto.hash(:sha256, token)) + assert <%= schema.singular %>_token.<%= schema.singular %>_id == <%= schema.singular %>.id + assert <%= schema.singular %>_token.sent_to == <%= schema.singular %>.email + assert <%= schema.singular %>_token.context == "change:current@example.com" + end + end + + describe "update_<%= schema.singular %>_email/2" do + setup do + <%= schema.singular %> = unconfirmed_<%= schema.singular %>_fixture() + email = unique_<%= schema.singular %>_email() + + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_<%= schema.singular %>_update_email_instructions(%{<%= schema.singular %> | email: email}, <%= schema.singular %>.email, url) + end) + + %{<%= schema.singular %>: <%= schema.singular %>, token: token, email: email} + end + + test "updates the email with a valid token", %{<%= schema.singular %>: <%= schema.singular %>, token: token, email: email} do + assert {:ok, %{email: ^email}} = <%= inspect context.alias %>.update_<%= schema.singular %>_email(<%= schema.singular %>, token) + changed_<%= schema.singular %> = Repo.get!(<%= inspect schema.alias %>, <%= schema.singular %>.id) + assert changed_<%= schema.singular %>.email != <%= schema.singular %>.email + assert changed_<%= schema.singular %>.email == email + refute Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + + test "does not update email with invalid token", %{<%= schema.singular %>: <%= schema.singular %>} do + assert <%= inspect context.alias %>.update_<%= schema.singular %>_email(<%= schema.singular %>, "oops") == + {:error, :transaction_aborted} + + assert Repo.get!(<%= inspect schema.alias %>, <%= schema.singular %>.id).email == <%= schema.singular %>.email + assert Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + + test "does not update email if <%= schema.singular %> email changed", %{<%= schema.singular %>: <%= schema.singular %>, token: token} do + assert <%= inspect context.alias %>.update_<%= schema.singular %>_email(%{<%= schema.singular %> | email: "current@example.com"}, token) == + {:error, :transaction_aborted} + + assert Repo.get!(<%= inspect schema.alias %>, <%= schema.singular %>.id).email == <%= schema.singular %>.email + assert Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + + test "does not update email if token expired", %{<%= schema.singular %>: <%= schema.singular %>, token: token} do + {1, nil} = Repo.update_all(<%= inspect schema.alias %>Token, set: [inserted_at: ~N[2020-01-01 00:00:00]]) + + assert <%= inspect context.alias %>.update_<%= schema.singular %>_email(<%= schema.singular %>, token) == + {:error, :transaction_aborted} + + assert Repo.get!(<%= inspect schema.alias %>, <%= schema.singular %>.id).email == <%= schema.singular %>.email + assert Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + end + + describe "change_<%= schema.singular %>_password/3" do + test "returns a <%= schema.singular %> changeset" do + assert %Ecto.Changeset{} = changeset = <%= inspect context.alias %>.change_<%= schema.singular %>_password(%<%= inspect schema.alias %>{}) + assert changeset.required == [:password] + end + + test "allows fields to be set" do + changeset = + <%= inspect context.alias %>.change_<%= schema.singular %>_password( + %<%= inspect schema.alias %>{}, + %{ + "password" => "new valid password" + }, + hash_password: false + ) + + assert changeset.valid? + assert get_change(changeset, :password) == "new valid password" + assert is_nil(get_change(changeset, :hashed_password)) + end + end + + describe "update_<%= schema.singular %>_password/2" do + setup do + %{<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + test "validates password", %{<%= schema.singular %>: <%= schema.singular %>} do + {:error, changeset} = + <%= inspect context.alias %>.update_<%= schema.singular %>_password(<%= schema.singular %>, %{ + password: "not valid", + password_confirmation: "another" + }) + + assert %{ + password: ["should be at least 12 character(s)"], + password_confirmation: ["does not match password"] + } = errors_on(changeset) + end + + test "validates maximum values for password for security", %{<%= schema.singular %>: <%= schema.singular %>} do + too_long = String.duplicate("db", 100) + + {:error, changeset} = + <%= inspect context.alias %>.update_<%= schema.singular %>_password(<%= schema.singular %>, %{password: too_long}) + + assert "should be at most 72 character(s)" in errors_on(changeset).password + end + + test "updates the password", %{<%= schema.singular %>: <%= schema.singular %>} do + {:ok, {<%= schema.singular %>, expired_tokens}} = + <%= inspect context.alias %>.update_<%= schema.singular %>_password(<%= schema.singular %>, %{ + password: "new valid password" + }) + + assert expired_tokens == [] + assert is_nil(<%= schema.singular %>.password) + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(<%= schema.singular %>.email, "new valid password") + end + + test "deletes all tokens for the given <%= schema.singular %>", %{<%= schema.singular %>: <%= schema.singular %>} do + _ = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + + {:ok, {_, _}} = + <%= inspect context.alias %>.update_<%= schema.singular %>_password(<%= schema.singular %>, %{ + password: "new valid password" + }) + + refute Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + end + + describe "generate_<%= schema.singular %>_session_token/1" do + setup do + %{<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + test "generates a token", %{<%= schema.singular %>: <%= schema.singular %>} do + token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + assert <%= schema.singular %>_token = Repo.get_by(<%= inspect schema.alias %>Token, token: token) + assert <%= schema.singular %>_token.context == "session" + assert <%= schema.singular %>_token.authenticated_at != nil + + # Creating the same token for another <%= schema.singular %> should fail + assert_raise Ecto.ConstraintError, fn -> + Repo.insert!(%<%= inspect schema.alias %>Token{ + token: <%= schema.singular %>_token.token, + <%= schema.singular %>_id: <%= schema.singular %>_fixture().id, + context: "session" + }) + end + end + + test "duplicates the authenticated_at of given <%= schema.singular %> in new token", %{<%= schema.singular %>: <%= schema.singular %>} do + <%= schema.singular %> = %{<%= schema.singular %> | authenticated_at: <%= inspect datetime_module %>.add(<%= datetime_now %>, -3600)} + token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + assert <%= schema.singular %>_token = Repo.get_by(<%= inspect schema.alias %>Token, token: token) + assert <%= schema.singular %>_token.authenticated_at == <%= schema.singular %>.authenticated_at + assert <%= inspect datetime_module %>.compare(<%= schema.singular %>_token.inserted_at, <%= schema.singular %>.authenticated_at) == :gt + end + end + + describe "get_<%= schema.singular %>_by_session_token/1" do + setup do + <%= schema.singular %> = <%= schema.singular %>_fixture() + token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + %{<%= schema.singular %>: <%= schema.singular %>, token: token} + end + + test "returns <%= schema.singular %> by token", %{<%= schema.singular %>: <%= schema.singular %>, token: token} do + assert {session_<%= schema.singular %>, token_inserted_at} = <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(token) + assert session_<%= schema.singular %>.id == <%= schema.singular %>.id + assert session_<%= schema.singular %>.authenticated_at != nil + assert token_inserted_at != nil + end + + test "does not return <%= schema.singular %> for invalid token" do + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token("oops") + end + + test "does not return <%= schema.singular %> for expired token", %{token: token} do + dt = ~N[2020-01-01 00:00:00] + {1, nil} = Repo.update_all(<%= inspect schema.alias %>Token, set: [inserted_at: dt, authenticated_at: dt]) + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(token) + end + end + + describe "get_<%= schema.singular %>_by_magic_link_token/1" do + setup do + <%= schema.singular %> = <%= schema.singular %>_fixture() + {encoded_token, _hashed_token} = generate_<%= schema.singular %>_magic_link_token(<%= schema.singular %>) + %{<%= schema.singular %>: <%= schema.singular %>, token: encoded_token} + end + + test "returns <%= schema.singular %> by token", %{<%= schema.singular %>: <%= schema.singular %>, token: token} do + assert session_<%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>_by_magic_link_token(token) + assert session_<%= schema.singular %>.id == <%= schema.singular %>.id + end + + test "does not return <%= schema.singular %> for invalid token" do + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_magic_link_token("oops") + end + + test "does not return <%= schema.singular %> for expired token", %{token: token} do + {1, nil} = Repo.update_all(<%= inspect schema.alias %>Token, set: [inserted_at: ~N[2020-01-01 00:00:00]]) + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_magic_link_token(token) + end + end + + describe "login_<%= schema.singular %>_by_magic_link/1" do + test "confirms <%= schema.singular %> and expires tokens" do + <%= schema.singular %> = unconfirmed_<%= schema.singular %>_fixture() + refute <%= schema.singular %>.confirmed_at + {encoded_token, hashed_token} = generate_<%= schema.singular %>_magic_link_token(<%= schema.singular %>) + + assert {:ok, {<%= schema.singular %>, [%{token: ^hashed_token}]}} = + <%= inspect context.alias %>.login_<%= schema.singular %>_by_magic_link(encoded_token) + + assert <%= schema.singular %>.confirmed_at + end + + test "returns <%= schema.singular %> and (deleted) token for confirmed <%= schema.singular %>" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + assert <%= schema.singular %>.confirmed_at + {encoded_token, _hashed_token} = generate_<%= schema.singular %>_magic_link_token(<%= schema.singular %>) + assert {:ok, {^<%= schema.singular %>, []}} = <%= inspect context.alias %>.login_<%= schema.singular %>_by_magic_link(encoded_token) + # one time use only + assert {:error, :not_found} = <%= inspect context.alias %>.login_<%= schema.singular %>_by_magic_link(encoded_token) + end + + test "raises when unconfirmed <%= schema.singular %> has password set" do + <%= schema.singular %> = unconfirmed_<%= schema.singular %>_fixture() + {1, nil} = Repo.update_all(<%= inspect schema.alias %>, set: [hashed_password: "hashed"]) + {encoded_token, _hashed_token} = generate_<%= schema.singular %>_magic_link_token(<%= schema.singular %>) + + assert_raise RuntimeError, ~r/magic link log in is not allowed/, fn -> + <%= inspect context.alias %>.login_<%= schema.singular %>_by_magic_link(encoded_token) + end + end + end + + describe "delete_<%= schema.singular %>_session_token/1" do + test "deletes the token" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + assert <%= inspect context.alias %>.delete_<%= schema.singular %>_session_token(token) == :ok + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(token) + end + end + + describe "deliver_login_instructions/2" do + setup do + %{<%= schema.singular %>: unconfirmed_<%= schema.singular %>_fixture()} + end + + test "sends token through notification", %{<%= schema.singular %>: <%= schema.singular %>} do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_login_instructions(<%= schema.singular %>, url) + end) + + {:ok, token} = Base.url_decode64(token, padding: false) + assert <%= schema.singular %>_token = Repo.get_by(<%= inspect schema.alias %>Token, token: :crypto.hash(:sha256, token)) + assert <%= schema.singular %>_token.<%= schema.singular %>_id == <%= schema.singular %>.id + assert <%= schema.singular %>_token.sent_to == <%= schema.singular %>.email + assert <%= schema.singular %>_token.context == "login" + end + end + + describe "inspect/2 for the <%= inspect schema.alias %> module" do + test "does not include password" do + refute inspect(%<%= inspect schema.alias %>{password: "123456"}) =~ "password: \"123456\"" + end + end diff --git a/deps/phoenix/priv/templates/phx.gen.channel/channel.ex.eex b/deps/phoenix/priv/templates/phx.gen.channel/channel.ex.eex new file mode 100644 index 0000000..0f7a9b5 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.channel/channel.ex.eex @@ -0,0 +1,32 @@ +defmodule <%= module %>Channel do + use <%= web_module %>, :channel + + @impl true + def join("<%= singular %>:lobby", payload, socket) do + if authorized?(payload) do + {:ok, socket} + else + {:error, %{reason: "unauthorized"}} + end + end + + # Channels can be used in a request/response fashion + # by sending replies to requests from the client + @impl true + def handle_in("ping", payload, socket) do + {:reply, {:ok, payload}, socket} + end + + # It is also common to receive messages from the client and + # broadcast to everyone in the current topic (<%= singular %>:lobby). + @impl true + def handle_in("shout", payload, socket) do + broadcast(socket, "shout", payload) + {:noreply, socket} + end + + # Add authorization logic here as required. + defp authorized?(_payload) do + true + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.channel/channel_case.ex.eex b/deps/phoenix/priv/templates/phx.gen.channel/channel_case.ex.eex new file mode 100644 index 0000000..f0dd5f9 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.channel/channel_case.ex.eex @@ -0,0 +1,39 @@ +defmodule <%= web_module %>.ChannelCase do + @moduledoc """ + This module defines the test case to be used by + channel tests. + + Such tests rely on `Phoenix.ChannelTest` and also + import other functionality to make it easier + to build common data structures and query the data layer. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use <%= web_module %>.ChannelCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + # Import conveniences for testing with channels + import Phoenix.ChannelTest + import <%= web_module %>.ChannelCase + + # The default endpoint for testing + @endpoint <%= web_module %>.Endpoint + end + end<%= if Code.ensure_loaded?(Ecto.Adapters.SQL) do %> + + setup tags do + <%= base %>.DataCase.setup_sandbox(tags) + :ok + end<% else %> + + setup _tags do + :ok + end<% end %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.channel/channel_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.channel/channel_test.exs.eex new file mode 100644 index 0000000..7698d87 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.channel/channel_test.exs.eex @@ -0,0 +1,27 @@ +defmodule <%= module %>ChannelTest do + use <%= web_module %>.ChannelCase + + setup do + {:ok, _, socket} = + <%= web_module %>.UserSocket + |> socket("user_id", %{some: :assign}) + |> subscribe_and_join(<%= module %>Channel, "<%= singular %>:lobby") + + %{socket: socket} + end + + test "ping replies with status ok", %{socket: socket} do + ref = push(socket, "ping", %{"hello" => "there"}) + assert_reply ref, :ok, %{"hello" => "there"} + end + + test "shout broadcasts to <%= singular %>:lobby", %{socket: socket} do + push(socket, "shout", %{"hello" => "all"}) + assert_broadcast "shout", %{"hello" => "all"} + end + + test "broadcasts are pushed to the client", %{socket: socket} do + broadcast_from!(socket, "broadcast", %{"some" => "data"}) + assert_push "broadcast", %{"some" => "data"} + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.context/access_no_schema.ex.eex b/deps/phoenix/priv/templates/phx.gen.context/access_no_schema.ex.eex new file mode 100644 index 0000000..e241734 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/access_no_schema.ex.eex @@ -0,0 +1,89 @@ + + alias <%= inspect schema.module %> + + @doc """ + Returns the list of <%= schema.plural %>. + + ## Examples + + iex> list_<%= schema.plural %>() + [%<%= inspect schema.alias %>{}, ...] + + """ + def list_<%= schema.plural %> do + raise "TODO" + end + + @doc """ + Gets a single <%= schema.singular %>. + + Raises if the <%= schema.human_singular %> does not exist. + + ## Examples + + iex> get_<%= schema.singular %>!(123) + %<%= inspect schema.alias %>{} + + """ + def get_<%= schema.singular %>!(<%= primary_key %>), do: raise "TODO" + + @doc """ + Creates a <%= schema.singular %>. + + ## Examples + + iex> create_<%= schema.singular %>(%{field: value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> create_<%= schema.singular %>(%{field: bad_value}) + {:error, ...} + + """ + def create_<%= schema.singular %>(attrs) do + raise "TODO" + end + + @doc """ + Updates a <%= schema.singular %>. + + ## Examples + + iex> update_<%= schema.singular %>(<%= schema.singular %>, %{field: new_value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> update_<%= schema.singular %>(<%= schema.singular %>, %{field: bad_value}) + {:error, ...} + + """ + def update_<%= schema.singular %>(%<%= inspect schema.alias %>{} = <%= schema.singular %>, attrs) do + raise "TODO" + end + + @doc """ + Deletes a <%= inspect schema.alias %>. + + ## Examples + + iex> delete_<%= schema.singular %>(<%= schema.singular %>) + {:ok, %<%= inspect schema.alias %>{}} + + iex> delete_<%= schema.singular %>(<%= schema.singular %>) + {:error, ...} + + """ + def delete_<%= schema.singular %>(%<%= inspect schema.alias %>{} = <%= schema.singular %>) do + raise "TODO" + end + + @doc """ + Returns a data structure for tracking <%= schema.singular %> changes. + + ## Examples + + iex> change_<%= schema.singular %>(<%= schema.singular %>) + %Todo{...} + + """ + def change_<%= schema.singular %>(%<%= inspect schema.alias %>{} = <%= schema.singular %>, _attrs \\ %{}) do + raise "TODO" + end diff --git a/deps/phoenix/priv/templates/phx.gen.context/access_no_schema_scope.ex.eex b/deps/phoenix/priv/templates/phx.gen.context/access_no_schema_scope.ex.eex new file mode 100644 index 0000000..eef5d57 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/access_no_schema_scope.ex.eex @@ -0,0 +1,97 @@ + + alias <%= inspect schema.module %> + alias <%= inspect scope.alias %> + + @doc """ + Subscribes to scoped notifications about any <%= schema.singular %> changes. + """ + def subscribe_<%= schema.singular %>(%<%= inspect scope.alias %>{} = _scope) do + raise "TODO" + end + + @doc """ + Returns the list of <%= schema.plural %>. + + ## Examples + + iex> list_<%= schema.plural %>(scope) + [%<%= inspect schema.alias %>{}, ...] + + """ + def list_<%= schema.plural %>(%<%= inspect scope.alias %>{} = _scope) do + raise "TODO" + end + + @doc """ + Gets a single <%= schema.singular %>. + + Raises if the <%= schema.human_singular %> does not exist. + + ## Examples + + iex> get_<%= schema.singular %>!(scope, 123) + %<%= inspect schema.alias %>{} + + """ + def get_<%= schema.singular %>!(%<%= inspect scope.alias %>{} = _scope, id), do: raise "TODO" + + @doc """ + Creates a <%= schema.singular %>. + + ## Examples + + iex> create_<%= schema.singular %>(scope, %{field: value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> create_<%= schema.singular %>(scope, %{field: bad_value}) + {:error, ...} + + """ + def create_<%= schema.singular %>(%<%= inspect scope.alias %>{} = _scope, attrs) do + raise "TODO" + end + + @doc """ + Updates a <%= schema.singular %>. + + ## Examples + + iex> update_<%= schema.singular %>(scope, <%= schema.singular %>, %{field: new_value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> update_<%= schema.singular %>(scope, <%= schema.singular %>, %{field: bad_value}) + {:error, ...} + + """ + def update_<%= schema.singular %>(%<%= inspect scope.alias %>{} = _scope, %<%= inspect schema.alias %>{} = <%= schema.singular %>, attrs) do + raise "TODO" + end + + @doc """ + Deletes a <%= inspect schema.alias %>. + + ## Examples + + iex> delete_<%= schema.singular %>(scope, <%= schema.singular %>) + {:ok, %<%= inspect schema.alias %>{}} + + iex> delete_<%= schema.singular %>(scope, <%= schema.singular %>) + {:error, ...} + + """ + def delete_<%= schema.singular %>(%<%= inspect scope.alias %>{} = _scope, %<%= inspect schema.alias %>{} = <%= schema.singular %>) do + raise "TODO" + end + + @doc """ + Returns a data structure for tracking <%= schema.singular %> changes. + + ## Examples + + iex> change_<%= schema.singular %>(scope, <%= schema.singular %>) + %Todo{...} + + """ + def change_<%= schema.singular %>(%<%= inspect scope.alias %>{} = _scope, %<%= inspect schema.alias %>{} = <%= schema.singular %>, _attrs \\ %{}) do + raise "TODO" + end diff --git a/deps/phoenix/priv/templates/phx.gen.context/context.ex.eex b/deps/phoenix/priv/templates/phx.gen.context/context.ex.eex new file mode 100644 index 0000000..51f2d28 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/context.ex.eex @@ -0,0 +1,8 @@ +defmodule <%= inspect context.module %> do + @moduledoc """ + The <%= context.name %> context. + """ + + import Ecto.Query, warn: false + alias <%= inspect schema.repo %><%= schema.repo_alias %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.context/context_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.context/context_test.exs.eex new file mode 100644 index 0000000..8958ec4 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/context_test.exs.eex @@ -0,0 +1,5 @@ +defmodule <%= inspect context.module %>Test do + use <%= inspect context.base_module %>.DataCase + + alias <%= inspect context.module %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.context/fixtures.ex.eex b/deps/phoenix/priv/templates/phx.gen.context/fixtures.ex.eex new file mode 100644 index 0000000..c44257f --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/fixtures.ex.eex @@ -0,0 +1,23 @@ +<%= for {attr, {_function_name, function_def, _needs_impl?}} <- schema.fixture_unique_functions do %> @doc """ + Generate a unique <%= schema.singular %> <%= attr %>. + """ +<%= function_def %> +<% end %> @doc """ + Generate a <%= schema.singular %>. + """ + def <%= schema.singular %>_fixture(<%= if scope do %>scope, <% end %>attrs \\ %{}) do<%= if scope do %> + attrs = + Enum.into(attrs, %{ +<%= schema.fixture_params |> Enum.map(fn {key, code} -> " #{key}: #{code}" end) |> Enum.join(",\n") %> + }) + + {:ok, <%= schema.singular %>} = <%= inspect context.module %>.create_<%= schema.singular %>(scope, attrs)<% else %> + {:ok, <%= schema.singular %>} = + attrs + |> Enum.into(%{ +<%= schema.fixture_params |> Enum.map(fn {key, code} -> " #{key}: #{code}" end) |> Enum.join(",\n") %> + }) + |> <%= inspect context.module %>.create_<%= schema.singular %>() +<% end %> + <%= schema.singular %> + end diff --git a/deps/phoenix/priv/templates/phx.gen.context/fixtures_module.ex.eex b/deps/phoenix/priv/templates/phx.gen.context/fixtures_module.ex.eex new file mode 100644 index 0000000..111acbf --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/fixtures_module.ex.eex @@ -0,0 +1,6 @@ +defmodule <%= inspect context.module %>Fixtures do + @moduledoc """ + This module defines test helpers for creating + entities via the `<%= inspect context.module %>` context. + """ +end diff --git a/deps/phoenix/priv/templates/phx.gen.context/schema_access.ex.eex b/deps/phoenix/priv/templates/phx.gen.context/schema_access.ex.eex new file mode 100644 index 0000000..b55b1d9 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/schema_access.ex.eex @@ -0,0 +1,96 @@ + + alias <%= inspect schema.module %> + + @doc """ + Returns the list of <%= schema.plural %>. + + ## Examples + + iex> list_<%= schema.plural %>() + [%<%= inspect schema.alias %>{}, ...] + + """ + def list_<%= schema.plural %> do + Repo.all(<%= inspect schema.alias %>) + end + + @doc """ + Gets a single <%= schema.singular %>. + + Raises `Ecto.NoResultsError` if the <%= schema.human_singular %> does not exist. + + ## Examples + + iex> get_<%= schema.singular %>!(123) + %<%= inspect schema.alias %>{} + + iex> get_<%= schema.singular %>!(456) + ** (Ecto.NoResultsError) + + """ + def get_<%= schema.singular %>!(<%= primary_key %>), do: Repo.get!(<%= inspect schema.alias %>, <%= primary_key %>) + + @doc """ + Creates a <%= schema.singular %>. + + ## Examples + + iex> create_<%= schema.singular %>(%{field: value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> create_<%= schema.singular %>(%{field: bad_value}) + {:error, %Ecto.Changeset{}} + + """ + def create_<%= schema.singular %>(attrs) do + %<%= inspect schema.alias %>{} + |> <%= inspect schema.alias %>.changeset(attrs) + |> Repo.insert() + end + + @doc """ + Updates a <%= schema.singular %>. + + ## Examples + + iex> update_<%= schema.singular %>(<%= schema.singular %>, %{field: new_value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> update_<%= schema.singular %>(<%= schema.singular %>, %{field: bad_value}) + {:error, %Ecto.Changeset{}} + + """ + def update_<%= schema.singular %>(%<%= inspect schema.alias %>{} = <%= schema.singular %>, attrs) do + <%= schema.singular %> + |> <%= inspect schema.alias %>.changeset(attrs) + |> Repo.update() + end + + @doc """ + Deletes a <%= schema.singular %>. + + ## Examples + + iex> delete_<%= schema.singular %>(<%= schema.singular %>) + {:ok, %<%= inspect schema.alias %>{}} + + iex> delete_<%= schema.singular %>(<%= schema.singular %>) + {:error, %Ecto.Changeset{}} + + """ + def delete_<%= schema.singular %>(%<%= inspect schema.alias %>{} = <%= schema.singular %>) do + Repo.delete(<%= schema.singular %>) + end + + @doc """ + Returns an `%Ecto.Changeset{}` for tracking <%= schema.singular %> changes. + + ## Examples + + iex> change_<%= schema.singular %>(<%= schema.singular %>) + %Ecto.Changeset{data: %<%= inspect schema.alias %>{}} + + """ + def change_<%= schema.singular %>(%<%= inspect schema.alias %>{} = <%= schema.singular %>, attrs \\ %{}) do + <%= inspect schema.alias %>.changeset(<%= schema.singular %>, attrs) + end diff --git a/deps/phoenix/priv/templates/phx.gen.context/schema_access_scope.ex.eex b/deps/phoenix/priv/templates/phx.gen.context/schema_access_scope.ex.eex new file mode 100644 index 0000000..0b41ca5 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/schema_access_scope.ex.eex @@ -0,0 +1,139 @@ + + alias <%= inspect schema.module %> + alias <%= inspect scope.module %> + + @doc """ + Subscribes to scoped notifications about any <%= schema.singular %> changes. + + The broadcasted messages match the pattern: + + * {:created, %<%= inspect schema.alias %>{}} + * {:updated, %<%= inspect schema.alias %>{}} + * {:deleted, %<%= inspect schema.alias %>{}} + + """ + def subscribe_<%= schema.plural %>(%<%= inspect scope.alias %>{} = scope) do + key = scope.<%= Enum.join(scope.access_path, ".") %> + + Phoenix.PubSub.subscribe(<%= inspect context.base_module %>.PubSub, "<%= scope.name %>:#{key}:<%= schema.plural %>") + end + + defp broadcast_<%= schema.singular %>(%<%= inspect scope.alias %>{} = scope, message) do + key = scope.<%= Enum.join(scope.access_path, ".") %> + + Phoenix.PubSub.broadcast(<%= inspect context.base_module %>.PubSub, "<%= scope.name %>:#{key}:<%= schema.plural %>", message) + end + + @doc """ + Returns the list of <%= schema.plural %>. + + ## Examples + + iex> list_<%= schema.plural %>(scope) + [%<%= inspect schema.alias %>{}, ...] + + """ + def list_<%= schema.plural %>(%<%= inspect scope.alias %>{} = scope) do + Repo.all_by(<%= inspect schema.alias %>, <%= scope.schema_key %>: scope.<%= Enum.join(scope.access_path, ".") %>) + end + + @doc """ + Gets a single <%= schema.singular %>. + + Raises `Ecto.NoResultsError` if the <%= schema.human_singular %> does not exist. + + ## Examples + + iex> get_<%= schema.singular %>!(scope, 123) + %<%= inspect schema.alias %>{} + + iex> get_<%= schema.singular %>!(scope, 456) + ** (Ecto.NoResultsError) + + """ + def get_<%= schema.singular %>!(%<%= inspect scope.alias %>{} = scope, <%= primary_key %>) do + Repo.get_by!(<%= inspect schema.alias %>, <%= primary_key %>: <%= primary_key %>, <%= scope.schema_key %>: scope.<%= Enum.join(scope.access_path, ".") %>) + end + + @doc """ + Creates a <%= schema.singular %>. + + ## Examples + + iex> create_<%= schema.singular %>(scope, %{field: value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> create_<%= schema.singular %>(scope, %{field: bad_value}) + {:error, %Ecto.Changeset{}} + + """ + def create_<%= schema.singular %>(%<%= inspect scope.alias %>{} = scope, attrs) do + with {:ok, <%= schema.singular %> = %<%= inspect schema.alias %>{}} <- + %<%= inspect schema.alias %>{} + |> <%= inspect schema.alias %>.changeset(attrs, scope) + |> Repo.insert() do + broadcast_<%= schema.singular %>(scope, {:created, <%= schema.singular %>}) + {:ok, <%= schema.singular %>} + end + end + + @doc """ + Updates a <%= schema.singular %>. + + ## Examples + + iex> update_<%= schema.singular %>(scope, <%= schema.singular %>, %{field: new_value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> update_<%= schema.singular %>(scope, <%= schema.singular %>, %{field: bad_value}) + {:error, %Ecto.Changeset{}} + + """ + def update_<%= schema.singular %>(%<%= inspect scope.alias %>{} = scope, %<%= inspect schema.alias %>{} = <%= schema.singular %>, attrs) do + true = <%= schema.singular %>.<%= scope.schema_key %> == scope.<%= Enum.join(scope.access_path, ".") %> + + with {:ok, <%= schema.singular %> = %<%= inspect schema.alias %>{}} <- + <%= schema.singular %> + |> <%= inspect schema.alias %>.changeset(attrs, scope) + |> Repo.update() do + broadcast_<%= schema.singular %>(scope, {:updated, <%= schema.singular %>}) + {:ok, <%= schema.singular %>} + end + end + + @doc """ + Deletes a <%= schema.singular %>. + + ## Examples + + iex> delete_<%= schema.singular %>(scope, <%= schema.singular %>) + {:ok, %<%= inspect schema.alias %>{}} + + iex> delete_<%= schema.singular %>(scope, <%= schema.singular %>) + {:error, %Ecto.Changeset{}} + + """ + def delete_<%= schema.singular %>(%<%= inspect scope.alias %>{} = scope, %<%= inspect schema.alias %>{} = <%= schema.singular %>) do + true = <%= schema.singular %>.<%= scope.schema_key %> == scope.<%= Enum.join(scope.access_path, ".") %> + + with {:ok, <%= schema.singular %> = %<%= inspect schema.alias %>{}} <- + Repo.delete(<%= schema.singular %>) do + broadcast_<%= schema.singular %>(scope, {:deleted, <%= schema.singular %>}) + {:ok, <%= schema.singular %>} + end + end + + @doc """ + Returns an `%Ecto.Changeset{}` for tracking <%= schema.singular %> changes. + + ## Examples + + iex> change_<%= schema.singular %>(scope, <%= schema.singular %>) + %Ecto.Changeset{data: %<%= inspect schema.alias %>{}} + + """ + def change_<%= schema.singular %>(%<%= inspect scope.alias %>{} = scope, %<%= inspect schema.alias %>{} = <%= schema.singular %>, attrs \\ %{}) do + true = <%= schema.singular %>.<%= scope.schema_key %> == scope.<%= Enum.join(scope.access_path, ".") %> + + <%= inspect schema.alias %>.changeset(<%= schema.singular %>, attrs, scope) + end diff --git a/deps/phoenix/priv/templates/phx.gen.context/test_cases.exs.eex b/deps/phoenix/priv/templates/phx.gen.context/test_cases.exs.eex new file mode 100644 index 0000000..edf38c9 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/test_cases.exs.eex @@ -0,0 +1,54 @@ + + describe "<%= schema.plural %>" do + alias <%= inspect schema.module %> + + import <%= inspect context.module %>Fixtures + + @invalid_attrs <%= Mix.Phoenix.to_text for {key, _} <- schema.params.create, into: %{}, do: {key, nil} %> + + test "list_<%= schema.plural %>/0 returns all <%= schema.plural %>" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + assert <%= inspect context.alias %>.list_<%= schema.plural %>() == [<%= schema.singular %>] + end + + test "get_<%= schema.singular %>!/1 returns the <%= schema.singular %> with given id" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + assert <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= schema.singular %>.<%= primary_key %>) == <%= schema.singular %> + end + + test "create_<%= schema.singular %>/1 with valid data creates a <%= schema.singular %>" do + valid_attrs = <%= Mix.Phoenix.to_text schema.params.create %> + + assert {:ok, %<%= inspect schema.alias %>{} = <%= schema.singular %>} = <%= inspect context.alias %>.create_<%= schema.singular %>(valid_attrs)<%= for {field, value} <- schema.params.create do %> + assert <%= schema.singular %>.<%= field %> == <%= Mix.Phoenix.Schema.value(schema, field, value) %><% end %> + end + + test "create_<%= schema.singular %>/1 with invalid data returns error changeset" do + assert {:error, %Ecto.Changeset{}} = <%= inspect context.alias %>.create_<%= schema.singular %>(@invalid_attrs) + end + + test "update_<%= schema.singular %>/2 with valid data updates the <%= schema.singular %>" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + update_attrs = <%= Mix.Phoenix.to_text schema.params.update%> + + assert {:ok, %<%= inspect schema.alias %>{} = <%= schema.singular %>} = <%= inspect context.alias %>.update_<%= schema.singular %>(<%= schema.singular %>, update_attrs)<%= for {field, value} <- schema.params.update do %> + assert <%= schema.singular %>.<%= field %> == <%= Mix.Phoenix.Schema.value(schema, field, value) %><% end %> + end + + test "update_<%= schema.singular %>/2 with invalid data returns error changeset" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + assert {:error, %Ecto.Changeset{}} = <%= inspect context.alias %>.update_<%= schema.singular %>(<%= schema.singular %>, @invalid_attrs) + assert <%= schema.singular %> == <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= schema.singular %>.<%= primary_key %>) + end + + test "delete_<%= schema.singular %>/1 deletes the <%= schema.singular %>" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + assert {:ok, %<%= inspect schema.alias %>{}} = <%= inspect context.alias %>.delete_<%= schema.singular %>(<%= schema.singular %>) + assert_raise Ecto.NoResultsError, fn -> <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= schema.singular %>.<%= primary_key %>) end + end + + test "change_<%= schema.singular %>/1 returns a <%= schema.singular %> changeset" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + assert %Ecto.Changeset{} = <%= inspect context.alias %>.change_<%= schema.singular %>(<%= schema.singular %>) + end + end diff --git a/deps/phoenix/priv/templates/phx.gen.context/test_cases_scope.exs.eex b/deps/phoenix/priv/templates/phx.gen.context/test_cases_scope.exs.eex new file mode 100644 index 0000000..457ea8e --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/test_cases_scope.exs.eex @@ -0,0 +1,86 @@ + + describe "<%= schema.plural %>" do + alias <%= inspect schema.module %> + + import <%= inspect scope.test_data_fixture %>, only: [<%= scope.name %>_scope_fixture: 0] + import <%= inspect context.module %>Fixtures + + @invalid_attrs <%= Mix.Phoenix.to_text for {key, _} <- schema.params.create, into: %{}, do: {key, nil} %> + + test "list_<%= schema.plural %>/1 returns all scoped <%= schema.plural %>" do + scope = <%= scope.name %>_scope_fixture() + other_scope = <%= scope.name %>_scope_fixture() + <%= schema.singular %> = <%= schema.singular %>_fixture(scope) + other_<%= schema.singular %> = <%= schema.singular %>_fixture(other_scope) + assert <%= inspect context.alias %>.list_<%= schema.plural %>(scope) == [<%= schema.singular %>] + assert <%= inspect context.alias %>.list_<%= schema.plural %>(other_scope) == [other_<%= schema.singular %>] + end + + test "get_<%= schema.singular %>!/2 returns the <%= schema.singular %> with given id" do + scope = <%= scope.name %>_scope_fixture() + <%= schema.singular %> = <%= schema.singular %>_fixture(scope) + other_scope = <%= scope.name %>_scope_fixture() + assert <%= inspect context.alias %>.get_<%= schema.singular %>!(scope, <%= schema.singular %>.<%= schema.opts[:primary_key] || :id %>) == <%= schema.singular %> + assert_raise Ecto.NoResultsError, fn -> <%= inspect context.alias %>.get_<%= schema.singular %>!(other_scope, <%= schema.singular %>.<%= schema.opts[:primary_key] || :id %>) end + end + + test "create_<%= schema.singular %>/2 with valid data creates a <%= schema.singular %>" do + valid_attrs = <%= Mix.Phoenix.to_text schema.params.create %> + scope = <%= scope.name %>_scope_fixture() + + assert {:ok, %<%= inspect schema.alias %>{} = <%= schema.singular %>} = <%= inspect context.alias %>.create_<%= schema.singular %>(scope, valid_attrs)<%= for {field, value} <- schema.params.create do %> + assert <%= schema.singular %>.<%= field %> == <%= Mix.Phoenix.Schema.value(schema, field, value) %><% end %> + assert <%= schema.singular %>.<%= scope.schema_key %> == scope.<%= scope.access_path |> Enum.join(".") %> + end + + test "create_<%= schema.singular %>/2 with invalid data returns error changeset" do + scope = <%= scope.name %>_scope_fixture() + assert {:error, %Ecto.Changeset{}} = <%= inspect context.alias %>.create_<%= schema.singular %>(scope, @invalid_attrs) + end + + test "update_<%= schema.singular %>/3 with valid data updates the <%= schema.singular %>" do + scope = <%= scope.name %>_scope_fixture() + <%= schema.singular %> = <%= schema.singular %>_fixture(scope) + update_attrs = <%= Mix.Phoenix.to_text schema.params.update%> + + assert {:ok, %<%= inspect schema.alias %>{} = <%= schema.singular %>} = <%= inspect context.alias %>.update_<%= schema.singular %>(scope, <%= schema.singular %>, update_attrs)<%= for {field, value} <- schema.params.update do %> + assert <%= schema.singular %>.<%= field %> == <%= Mix.Phoenix.Schema.value(schema, field, value) %><% end %> + end + + test "update_<%= schema.singular %>/3 with invalid scope raises" do + scope = <%= scope.name %>_scope_fixture() + other_scope = <%= scope.name %>_scope_fixture() + <%= schema.singular %> = <%= schema.singular %>_fixture(scope) + + assert_raise MatchError, fn -> + <%= inspect context.alias %>.update_<%= schema.singular %>(other_scope, <%= schema.singular %>, %{}) + end + end + + test "update_<%= schema.singular %>/3 with invalid data returns error changeset" do + scope = <%= scope.name %>_scope_fixture() + <%= schema.singular %> = <%= schema.singular %>_fixture(scope) + assert {:error, %Ecto.Changeset{}} = <%= inspect context.alias %>.update_<%= schema.singular %>(scope, <%= schema.singular %>, @invalid_attrs) + assert <%= schema.singular %> == <%= inspect context.alias %>.get_<%= schema.singular %>!(scope, <%= schema.singular %>.<%= schema.opts[:primary_key] || :id %>) + end + + test "delete_<%= schema.singular %>/2 deletes the <%= schema.singular %>" do + scope = <%= scope.name %>_scope_fixture() + <%= schema.singular %> = <%= schema.singular %>_fixture(scope) + assert {:ok, %<%= inspect schema.alias %>{}} = <%= inspect context.alias %>.delete_<%= schema.singular %>(scope, <%= schema.singular %>) + assert_raise Ecto.NoResultsError, fn -> <%= inspect context.alias %>.get_<%= schema.singular %>!(scope, <%= schema.singular %>.<%= schema.opts[:primary_key] || :id %>) end + end + + test "delete_<%= schema.singular %>/2 with invalid scope raises" do + scope = <%= scope.name %>_scope_fixture() + other_scope = <%= scope.name %>_scope_fixture() + <%= schema.singular %> = <%= schema.singular %>_fixture(scope) + assert_raise MatchError, fn -> <%= inspect context.alias %>.delete_<%= schema.singular %>(other_scope, <%= schema.singular %>) end + end + + test "change_<%= schema.singular %>/2 returns a <%= schema.singular %> changeset" do + scope = <%= scope.name %>_scope_fixture() + <%= schema.singular %> = <%= schema.singular %>_fixture(scope) + assert %Ecto.Changeset{} = <%= inspect context.alias %>.change_<%= schema.singular %>(scope, <%= schema.singular %>) + end + end diff --git a/deps/phoenix/priv/templates/phx.gen.embedded/embedded_schema.ex.eex b/deps/phoenix/priv/templates/phx.gen.embedded/embedded_schema.ex.eex new file mode 100644 index 0000000..81320e3 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.embedded/embedded_schema.ex.eex @@ -0,0 +1,17 @@ +defmodule <%= inspect schema.module %> do + use Ecto.Schema + import Ecto.Changeset + alias <%= inspect schema.module %> + + embedded_schema do <%= if !Enum.empty?(schema.types) do %> +<%= Mix.Phoenix.Schema.format_fields_for_schema(schema) %><% end %> +<%= for {_, k, _, _} <- schema.assocs do %> field <%= inspect k %>, <%= if schema.binary_id do %>:binary_id<% else %>:id<% end %> +<% end %> end + + @doc false + def changeset(%<%= inspect schema.alias %>{} = <%= schema.singular %>, attrs) do + <%= schema.singular %> + |> cast(attrs, [<%= Enum.map_join(schema.attrs, ", ", &inspect(elem(&1, 0))) %>]) + |> validate_required([<%= Enum.map_join(schema.attrs, ", ", &inspect(elem(&1, 0))) %>]) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.html/controller.ex.eex b/deps/phoenix/priv/templates/phx.gen.html/controller.ex.eex new file mode 100644 index 0000000..7ef1927 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/controller.ex.eex @@ -0,0 +1,67 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Controller do + use <%= inspect context.web_module %>, :controller + + alias <%= inspect context.module %> + alias <%= inspect schema.module %> + + def index(conn, _params) do + <%= schema.plural %> = <%= inspect context.alias %>.list_<%= schema.plural %>(<%= conn_scope %>) + render(conn, :index, <%= schema.collection %>: <%= schema.plural %>) + end + + def new(conn, _params) do<%= if scope do %> + changeset = + <%= inspect context.alias %>.change_<%= schema.singular %>(<%= context_scope_prefix %>%<%= inspect schema.alias %>{ + <%= scope.schema_key %>: <%= conn_scope %>.<%= Enum.join(scope.access_path, ".") %> + }) +<% else %> + changeset = <%= inspect context.alias %>.change_<%= schema.singular %>(%<%= inspect schema.alias %>{})<% end %> + render(conn, :new, changeset: changeset) + end + + def create(conn, %{<%= inspect schema.singular %> => <%= schema.singular %>_params}) do + case <%= inspect context.alias %>.create_<%= schema.singular %>(<%= context_scope_prefix %><%= schema.singular %>_params) do + {:ok, <%= schema.singular %>} -> + conn + |> put_flash(:info, "<%= schema.human_singular %> created successfully.") + |> redirect(to: ~p"<%= scope_conn_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}") + + {:error, %Ecto.Changeset{} = changeset} -> + render(conn, :new, changeset: changeset) + end + end + + def show(conn, %{"<%= primary_key %>" => <%= primary_key %>}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= context_scope_prefix %><%= primary_key %>) + render(conn, :show, <%= schema.singular %>: <%= schema.singular %>) + end + + def edit(conn, %{"<%= primary_key %>" => <%= primary_key %>}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= context_scope_prefix %><%= primary_key %>) + changeset = <%= inspect context.alias %>.change_<%= schema.singular %>(<%= context_scope_prefix %><%= schema.singular %>) + render(conn, :edit, <%= schema.singular %>: <%= schema.singular %>, changeset: changeset) + end + + def update(conn, %{"<%= primary_key %>" => <%= primary_key %>, <%= inspect schema.singular %> => <%= schema.singular %>_params}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= context_scope_prefix %><%= primary_key %>) + + case <%= inspect context.alias %>.update_<%= schema.singular %>(<%= context_scope_prefix %><%= schema.singular %>, <%= schema.singular %>_params) do + {:ok, <%= schema.singular %>} -> + conn + |> put_flash(:info, "<%= schema.human_singular %> updated successfully.") + |> redirect(to: ~p"<%= scope_conn_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}") + + {:error, %Ecto.Changeset{} = changeset} -> + render(conn, :edit, <%= schema.singular %>: <%= schema.singular %>, changeset: changeset) + end + end + + def delete(conn, %{"<%= primary_key %>" => <%= primary_key %>}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= context_scope_prefix %><%= primary_key %>) + {:ok, _<%= schema.singular %>} = <%= inspect context.alias %>.delete_<%= schema.singular %>(<%= context_scope_prefix %><%= schema.singular %>) + + conn + |> put_flash(:info, "<%= schema.human_singular %> deleted successfully.") + |> redirect(to: ~p"<%= scope_conn_route_prefix %><%= schema.route_prefix %>") + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.html/controller_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.html/controller_test.exs.eex new file mode 100644 index 0000000..721b708 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/controller_test.exs.eex @@ -0,0 +1,90 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>ControllerTest do + use <%= inspect context.web_module %>.ConnCase + + import <%= inspect context.module %>Fixtures + + @create_attrs <%= Mix.Phoenix.to_text schema.params.create %> + @update_attrs <%= Mix.Phoenix.to_text schema.params.update %> + @invalid_attrs <%= Mix.Phoenix.to_text (for {key, _} <- schema.params.create, into: %{}, do: {key, nil}) %><%= if scope do %> + + setup :<%= scope.test_setup_helper %><% end %> + + describe "index" do + test "lists all <%= schema.plural %>", %{conn: conn<%= test_context_scope %>} do + conn = get(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>") + assert html_response(conn, 200) =~ "Listing <%= schema.human_plural %>" + end + end + + describe "new <%= schema.singular %>" do + test "renders form", %{conn: conn<%= test_context_scope %>} do + conn = get(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/new") + assert html_response(conn, 200) =~ "New <%= schema.human_singular %>" + end + end + + describe "create <%= schema.singular %>" do + test "redirects to show when data is valid", %{conn: conn<%= test_context_scope %>} do + conn = post(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>", <%= schema.singular %>: @create_attrs) + + assert %{<%= primary_key %>: <%= primary_key %>} = redirected_params(conn) + assert redirected_to(conn) == ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= primary_key %>}" + + conn = get(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= primary_key %>}") + assert html_response(conn, 200) =~ "<%= schema.human_singular %> #{<%= primary_key %>}" + end + + test "renders errors when data is invalid", %{conn: conn<%= test_context_scope %>} do + conn = post(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>", <%= schema.singular %>: @invalid_attrs) + assert html_response(conn, 200) =~ "New <%= schema.human_singular %>" + end + end + + describe "edit <%= schema.singular %>" do + setup [:create_<%= schema.singular %>] + + test "renders form for editing chosen <%= schema.singular %>", %{conn: conn, <%= schema.singular %>: <%= schema.singular %><%= test_context_scope %>} do + conn = get(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}/edit") + assert html_response(conn, 200) =~ "Edit <%= schema.human_singular %>" + end + end + + describe "update <%= schema.singular %>" do + setup [:create_<%= schema.singular %>] + + test "redirects when data is valid", %{conn: conn, <%= schema.singular %>: <%= schema.singular %><%= test_context_scope %>} do + conn = put(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}", <%= schema.singular %>: @update_attrs) + assert redirected_to(conn) == ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}" + + conn = get(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}")<%= if schema.string_attr do %> + assert html_response(conn, 200) =~ <%= inspect Mix.Phoenix.Schema.default_param(schema, :update) %><% else %> + assert html_response(conn, 200)<% end %> + end + + test "renders errors when data is invalid", %{conn: conn, <%= schema.singular %>: <%= schema.singular %><%= test_context_scope %>} do + conn = put(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}", <%= schema.singular %>: @invalid_attrs) + assert html_response(conn, 200) =~ "Edit <%= schema.human_singular %>" + end + end + + describe "delete <%= schema.singular %>" do + setup [:create_<%= schema.singular %>] + + test "deletes chosen <%= schema.singular %>", %{conn: conn, <%= schema.singular %>: <%= schema.singular %><%= test_context_scope %>} do + conn = delete(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}") + assert redirected_to(conn) == ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>" + + assert_error_sent 404, fn -> + get(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}") + end + end + end + +<%= if scope do %> defp create_<%= schema.singular %>(%{scope: scope}) do + <%= schema.singular %> = <%= schema.singular %>_fixture(scope) +<% else %> defp create_<%= schema.singular %>(_) do + <%= schema.singular %> = <%= schema.singular %>_fixture() +<% end %> + %{<%= schema.singular %>: <%= schema.singular %>} + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.html/edit.html.heex.eex b/deps/phoenix/priv/templates/phx.gen.html/edit.html.heex.eex new file mode 100644 index 0000000..36f7ead --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/edit.html.heex.eex @@ -0,0 +1,8 @@ + <%= scope.assign_key %>={@<%= scope.assign_key %>}<% end %>> + <.header> + Edit <%= schema.human_singular %> {@<%= schema.singular %>.<%= primary_key %>} + <:subtitle>Use this form to manage <%= schema.singular %> records in your database. + + + <.<%= schema.singular %>_form changeset={@changeset} action={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>/#{@<%= schema.singular %>}"} return_to={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>"} /> + diff --git a/deps/phoenix/priv/templates/phx.gen.html/html.ex.eex b/deps/phoenix/priv/templates/phx.gen.html/html.ex.eex new file mode 100644 index 0000000..a8a190b --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/html.ex.eex @@ -0,0 +1,17 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>HTML do + use <%= inspect context.web_module %>, :html + + embed_templates "<%= schema.singular %>_html/*" + + @doc """ + Renders a <%= schema.singular %> form. + + The form is defined in the template at + <%= schema.singular %>_html/<%= schema.singular %>_form.html.heex + """ + attr :changeset, Ecto.Changeset, required: true + attr :action, :string, required: true + attr :return_to, :string, default: nil + + def <%= schema.singular %>_form(assigns) +end diff --git a/deps/phoenix/priv/templates/phx.gen.html/index.html.heex.eex b/deps/phoenix/priv/templates/phx.gen.html/index.html.heex.eex new file mode 100644 index 0000000..842ec6b --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/index.html.heex.eex @@ -0,0 +1,25 @@ + <%= scope.assign_key %>={@<%= scope.assign_key %>}<% end %>> + <.header> + Listing <%= schema.human_plural %> + <:actions> + <.button href={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>/new"}> + <.icon name="hero-plus" /> New <%= schema.human_singular %> + + + + + <.table id="<%= schema.plural %>" rows={@<%= schema.collection %>} row_click={&JS.navigate(~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>/#{&1}")}><%= for {k, _} <- schema.attrs do %> + <:col :let={<%= schema.singular %>} label="<%= Phoenix.Naming.humanize(Atom.to_string(k)) %>">{<%= schema.singular %>.<%= k %>}<% end %> + <:action :let={<%= schema.singular %>}> +
+ <.link navigate={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}"}>Show +
+ <.link navigate={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}/edit"}>Edit + + <:action :let={<%= schema.singular %>}> + <.link href={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}"} method="delete" data-confirm="Are you sure?"> + Delete + + + +
diff --git a/deps/phoenix/priv/templates/phx.gen.html/new.html.heex.eex b/deps/phoenix/priv/templates/phx.gen.html/new.html.heex.eex new file mode 100644 index 0000000..9dce36f --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/new.html.heex.eex @@ -0,0 +1,8 @@ + <%= scope.assign_key %>={@<%= scope.assign_key %>}<% end %>> + <.header> + New <%= schema.human_singular %> + <:subtitle>Use this form to manage <%= schema.singular %> records in your database. + + + <.<%= schema.singular %>_form changeset={@changeset} action={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>"} return_to={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>"} /> + diff --git a/deps/phoenix/priv/templates/phx.gen.html/resource_form.html.heex.eex b/deps/phoenix/priv/templates/phx.gen.html/resource_form.html.heex.eex new file mode 100644 index 0000000..c0f98e1 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/resource_form.html.heex.eex @@ -0,0 +1,7 @@ +<.form :let={f} for={@changeset} action={@action}> +<%= Mix.Tasks.Phx.Gen.Html.indent_inputs(inputs, 2) %> +
+ <.button variant="primary">Save <%= schema.human_singular %> + <.button :if={@return_to} href={@return_to}>Cancel +
+ diff --git a/deps/phoenix/priv/templates/phx.gen.html/show.html.heex.eex b/deps/phoenix/priv/templates/phx.gen.html/show.html.heex.eex new file mode 100644 index 0000000..87cf8db --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/show.html.heex.eex @@ -0,0 +1,18 @@ + <%= scope.assign_key %>={@<%= scope.assign_key %>}<% end %>> + <.header> + <%= schema.human_singular %> {@<%= schema.singular %>.<%= primary_key %>} + <:subtitle>This is a <%= schema.singular %> record from your database. + <:actions> + <.button navigate={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>"}> + <.icon name="hero-arrow-left" /> + + <.button variant="primary" navigate={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>/#{@<%= schema.singular %>}/edit?return_to=show"}> + <.icon name="hero-pencil-square" /> Edit <%= schema.singular %> + + + + + <.list><%= for {k, _} <- schema.attrs do %> + <:item title="<%= Phoenix.Naming.humanize(Atom.to_string(k)) %>">{@<%= schema.singular %>.<%= k %>}<% end %> + + diff --git a/deps/phoenix/priv/templates/phx.gen.json/changeset_json.ex.eex b/deps/phoenix/priv/templates/phx.gen.json/changeset_json.ex.eex new file mode 100644 index 0000000..89b1aa1 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.json/changeset_json.ex.eex @@ -0,0 +1,39 @@ +defmodule <%= inspect context.web_module %>.ChangesetJSON do + @doc """ + Renders changeset errors. + """<%= if core_components? do %> + def error(%{changeset: changeset}) do + # When encoded, the changeset returns its errors + # as a JSON object. So we just pass it forward. + %{errors: Ecto.Changeset.traverse_errors(changeset, &<%= inspect context.web_module %>.CoreComponents.translate_error/1)} + end<% else %> + def error(%{changeset: changeset}) do + # When encoded, the changeset returns its errors + # as a JSON object. So we just pass it forward. + %{errors: Ecto.Changeset.traverse_errors(changeset, &translate_error/1)} + end +<%= if gettext? do %> + defp translate_error({msg, opts}) do + # set by Ecto and indicates we should also apply plural rules. + if count = opts[:count] do + Gettext.dngettext(<%= inspect context.web_module %>.Gettext, "errors", msg, msg, count, opts) + else + Gettext.dgettext(<%= inspect context.web_module %>.Gettext, "errors", msg, opts) + end + end +<% else %> + defp translate_error({msg, opts}) do + # You can make use of gettext to translate error messages by + # uncommenting and adjusting the following code: + + # if count = opts[:count] do + # Gettext.dngettext(<%= inspect context.web_module %>.Gettext, "errors", msg, msg, count, opts) + # else + # Gettext.dgettext(<%= inspect context.web_module %>.Gettext, "errors", msg, opts) + # end + + Enum.reduce(opts, msg, fn {key, value}, acc -> + String.replace(acc, "%{#{key}}", fn _ -> to_string(value) end) + end) + end<% end %><% end %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.json/controller.ex.eex b/deps/phoenix/priv/templates/phx.gen.json/controller.ex.eex new file mode 100644 index 0000000..ba07f57 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.json/controller.ex.eex @@ -0,0 +1,43 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Controller do + use <%= inspect context.web_module %>, :controller + + alias <%= inspect context.module %> + alias <%= inspect schema.module %> + + action_fallback <%= inspect context.web_module %>.FallbackController + + def index(conn, _params) do + <%= schema.plural %> = <%= inspect context.alias %>.list_<%= schema.plural %>(<%= conn_scope %>) + render(conn, :index, <%= schema.plural %>: <%= schema.plural %>) + end + + def create(conn, %{<%= inspect schema.singular %> => <%= schema.singular %>_params}) do + with {:ok, %<%= inspect schema.alias %>{} = <%= schema.singular %>} <- <%= inspect context.alias %>.create_<%= schema.singular %>(<%= context_scope_prefix %><%= schema.singular %>_params) do + conn + |> put_status(:created) + |> put_resp_header("location", ~p"<%= schema.api_route_prefix %><%= scope_conn_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}") + |> render(:show, <%= schema.singular %>: <%= schema.singular %>) + end + end + + def show(conn, %{"<%= primary_key %>" => <%= primary_key %>}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= context_scope_prefix %><%= primary_key %>) + render(conn, :show, <%= schema.singular %>: <%= schema.singular %>) + end + + def update(conn, %{"<%= primary_key %>" => <%= primary_key %>, <%= inspect schema.singular %> => <%= schema.singular %>_params}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= context_scope_prefix %><%= primary_key %>) + + with {:ok, %<%= inspect schema.alias %>{} = <%= schema.singular %>} <- <%= inspect context.alias %>.update_<%= schema.singular %>(<%= context_scope_prefix %><%= schema.singular %>, <%= schema.singular %>_params) do + render(conn, :show, <%= schema.singular %>: <%= schema.singular %>) + end + end + + def delete(conn, %{"<%= primary_key %>" => <%= primary_key %>}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= context_scope_prefix %><%= primary_key %>) + + with {:ok, %<%= inspect schema.alias %>{}} <- <%= inspect context.alias %>.delete_<%= schema.singular %>(<%= context_scope_prefix %><%= schema.singular %>) do + send_resp(conn, :no_content, "") + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.json/controller_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.json/controller_test.exs.eex new file mode 100644 index 0000000..ecc0c7f --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.json/controller_test.exs.eex @@ -0,0 +1,88 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>ControllerTest do + use <%= inspect context.web_module %>.ConnCase + + import <%= inspect context.module %>Fixtures + alias <%= inspect schema.module %> + + @create_attrs %{ +<%= schema.params.create |> Enum.map(fn {key, val} -> " #{key}: #{inspect(val)}" end) |> Enum.join(",\n") %> + } + @update_attrs %{ +<%= schema.params.update |> Enum.map(fn {key, val} -> " #{key}: #{inspect(val)}" end) |> Enum.join(",\n") %> + } + @invalid_attrs <%= Mix.Phoenix.to_text for {key, _} <- schema.params.create, into: %{}, do: {key, nil} %><%= if scope do %> + + setup :<%= scope.test_setup_helper %><% end %> + + setup %{conn: conn} do + {:ok, conn: put_req_header(conn, "accept", "application/json")} + end + + describe "index" do + test "lists all <%= schema.plural %>", %{conn: conn<%= test_context_scope %>} do + conn = get(conn, ~p"<%= schema.api_route_prefix %><%= scope_param_route_prefix %><%= schema.route_prefix %>") + assert json_response(conn, 200)["data"] == [] + end + end + + describe "create <%= schema.singular %>" do + test "renders <%= schema.singular %> when data is valid", %{conn: conn<%= test_context_scope %>} do + conn = post(conn, ~p"<%= schema.api_route_prefix %><%= scope_param_route_prefix %><%= schema.route_prefix %>", <%= schema.singular %>: @create_attrs) + assert %{"<%= primary_key %>" => <%= primary_key %>} = json_response(conn, 201)["data"] + + conn = get(conn, ~p"<%= schema.api_route_prefix %><%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= primary_key %>}") + + assert %{ + "<%= primary_key %>" => ^<%= primary_key %><%= for {key, val} <- schema.params.create |> Phoenix.json_library().encode!() |> Phoenix.json_library().decode!() do %>, + "<%= key %>" => <%= inspect(val) %><% end %> + } = json_response(conn, 200)["data"] + end + + test "renders errors when data is invalid", %{conn: conn<%= test_context_scope %>} do + conn = post(conn, ~p"<%= schema.api_route_prefix %><%= scope_param_route_prefix %><%= schema.route_prefix %>", <%= schema.singular %>: @invalid_attrs) + assert json_response(conn, 422)["errors"] != %{} + end + end + + describe "update <%= schema.singular %>" do + setup [:create_<%= schema.singular %>] + + test "renders <%= schema.singular %> when data is valid", %{conn: conn, <%= schema.singular %>: %<%= inspect schema.alias %>{<%= primary_key %>: <%= primary_key %>} = <%= schema.singular %><%= test_context_scope %>} do + conn = put(conn, ~p"<%= schema.api_route_prefix %><%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}", <%= schema.singular %>: @update_attrs) + assert %{"<%= primary_key %>" => ^<%= primary_key %>} = json_response(conn, 200)["data"] + + conn = get(conn, ~p"<%= schema.api_route_prefix %><%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= primary_key %>}") + + assert %{ + "<%= primary_key %>" => ^<%= primary_key %><%= for {key, val} <- schema.params.update |> Phoenix.json_library().encode!() |> Phoenix.json_library().decode!() do %>, + "<%= key %>" => <%= inspect(val) %><% end %> + } = json_response(conn, 200)["data"] + end + + test "renders errors when data is invalid", %{conn: conn, <%= schema.singular %>: <%= schema.singular %><%= test_context_scope %>} do + conn = put(conn, ~p"<%= schema.api_route_prefix %><%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}", <%= schema.singular %>: @invalid_attrs) + assert json_response(conn, 422)["errors"] != %{} + end + end + + describe "delete <%= schema.singular %>" do + setup [:create_<%= schema.singular %>] + + test "deletes chosen <%= schema.singular %>", %{conn: conn, <%= schema.singular %>: <%= schema.singular %><%= test_context_scope %>} do + conn = delete(conn, ~p"<%= schema.api_route_prefix %><%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}") + assert response(conn, 204) + + assert_error_sent 404, fn -> + get(conn, ~p"<%= schema.api_route_prefix %><%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}") + end + end + end + +<%= if scope do %> defp create_<%= schema.singular %>(%{scope: scope}) do + <%= schema.singular %> = <%= schema.singular %>_fixture(scope) +<% else %> defp create_<%= schema.singular %>(_) do + <%= schema.singular %> = <%= schema.singular %>_fixture() +<% end %> + %{<%= schema.singular %>: <%= schema.singular %>} + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.json/fallback_controller.ex.eex b/deps/phoenix/priv/templates/phx.gen.json/fallback_controller.ex.eex new file mode 100644 index 0000000..2de0863 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.json/fallback_controller.ex.eex @@ -0,0 +1,24 @@ +defmodule <%= inspect context.web_module %>.FallbackController do + @moduledoc """ + Translates controller action results into valid `Plug.Conn` responses. + + See `Phoenix.Controller.action_fallback/1` for more details. + """ + use <%= inspect context.web_module %>, :controller + + <%= if schema.generate? do %># This clause handles errors returned by Ecto's insert/update/delete. + def call(conn, {:error, %Ecto.Changeset{} = changeset}) do + conn + |> put_status(:unprocessable_entity) + |> put_view(json: <%= inspect context.web_module %>.ChangesetJSON) + |> render(:error, changeset: changeset) + end + + <% end %># This clause is an example of how to handle resources that cannot be found. + def call(conn, {:error, :not_found}) do + conn + |> put_status(:not_found) + |> put_view(html: <%= inspect context.web_module %>.ErrorHTML, json: <%= inspect context.web_module %>.ErrorJSON) + |> render(:"404") + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.json/json.ex.eex b/deps/phoenix/priv/templates/phx.gen.json/json.ex.eex new file mode 100644 index 0000000..d71a684 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.json/json.ex.eex @@ -0,0 +1,23 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>JSON do + alias <%= inspect schema.module %> + + @doc """ + Renders a list of <%= schema.plural %>. + """ + def index(%{<%= schema.plural %>: <%= schema.plural %>}) do + %{data: for(<%= schema.singular %> <- <%= schema.plural %>, do: data(<%= schema.singular %>))} + end + + @doc """ + Renders a single <%= schema.singular %>. + """ + def show(%{<%= schema.singular %>: <%= schema.singular %>}) do + %{data: data(<%= schema.singular %>)} + end + + defp data(%<%= inspect schema.alias %>{} = <%= schema.singular %>) do + %{ +<%= [{primary_key, :id} | schema.attrs] |> Enum.map(fn {k, _} -> " #{k}: #{schema.singular}.#{k}" end) |> Enum.join(",\n") %> + } + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.live/core_components.ex.eex b/deps/phoenix/priv/templates/phx.gen.live/core_components.ex.eex new file mode 100644 index 0000000..bb643fe --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.live/core_components.ex.eex @@ -0,0 +1,520 @@ +defmodule <%= @web_namespace %>.CoreComponents do + @moduledoc """ + Provides core UI components. + + At first glance, this module may seem daunting, but its goal is to provide + core building blocks for your application, such as tables, forms, and + inputs. The components consist mostly of markup and are well-documented + with doc strings and declarative assigns. You may customize and style + them in any way you want, based on your application growth and needs. + + The foundation for styling is Tailwind CSS, a utility-first CSS framework, + augmented with daisyUI, a Tailwind CSS plugin that provides UI components + and themes. Here are useful references: + + * [daisyUI](https://daisyui.com/docs/intro/) - a good place to get + started and see the available components. + + * [Tailwind CSS](https://tailwindcss.com) - the foundational framework + we build on. You will use it for layout, sizing, flexbox, grid, and + spacing. + + * [Heroicons](https://heroicons.com) - see `icon/1` for usage. + + * [Phoenix.Component](https://hexdocs.pm/phoenix_live_view/Phoenix.Component.html) - + the component system used by Phoenix. Some components, such as `<.link>` + and `<.form>`, are defined there. + + """ + use Phoenix.Component<%= if @gettext do %> + use Gettext, backend: <%= @web_namespace %>.Gettext<% end %><%= if @live do %> + + alias Phoenix.LiveView.JS<% end %> + + @doc """ + Renders flash notices. + + ## Examples + + <.flash kind={:info} flash={@flash} /> + <.flash + id="welcome-back" + kind={:info} + phx-mounted={show("#welcome-back") |> JS.remove_attribute("hidden")} + hidden + > + Welcome Back! + + """ + attr :id, :string, doc: "the optional id of flash container" + attr :flash, :map, default: %{}, doc: "the map of flash messages to display" + attr :title, :string, default: nil + attr :kind, :atom, values: [:info, :error], doc: "used for styling and flash lookup" + attr :rest, :global, doc: "the arbitrary HTML attributes to add to the flash container" + + slot :inner_block, doc: "the optional inner block that renders the flash message" + + def flash(assigns) do + assigns = assign_new(assigns, :id, fn -> "flash-#{assigns.kind}" end) + + ~H""" +
+ phx-click={JS.push("lv:clear-flash", value: %{key: @kind}) |> hide("##{@id}")}<% else %> + data-flash<% end %> + role="alert" + class="toast toast-top toast-end z-50" + {@rest} + > +
+ <.icon :if={@kind == :info} name="hero-information-circle" class="size-5 shrink-0" /> + <.icon :if={@kind == :error} name="hero-exclamation-circle" class="size-5 shrink-0" /> +
+

{@title}

+

{msg}

+
+
+ +
+
+ """ + end + + @doc """ + Renders a button with navigation support. + + ## Examples + + <.button>Send! + <.button phx-click="go" variant="primary">Send! + <.button navigate={~p"/"}>Home + """ + attr :rest, :global, include: ~w(href navigate patch method download name value disabled) + attr :class, :any + attr :variant, :string, values: ~w(primary) + slot :inner_block, required: true + + def button(%{rest: rest} = assigns) do + variants = %{"primary" => "btn-primary", nil => "btn-primary btn-soft"} + + assigns = + assign_new(assigns, :class, fn -> + ["btn", Map.fetch!(variants, assigns[:variant])] + end) + + if rest[:href] || rest[:navigate] || rest[:patch] do + ~H""" + <.link class={@class} {@rest}> + {render_slot(@inner_block)} + + """ + else + ~H""" + + """ + end + end + + @doc """ + Renders an input with label and error messages. + + A `Phoenix.HTML.FormField` may be passed as argument, + which is used to retrieve the input name, id, and values. + Otherwise all attributes may be passed explicitly. + + ## Types + + This function accepts all HTML input types, considering that: + + * You may also set `type="select"` to render a ` + """ + end + + def input(%{type: "checkbox"} = assigns) do + assigns = + assign_new(assigns, :checked, fn -> + Phoenix.HTML.Form.normalize_value("checkbox", assigns[:value]) + end) + + ~H""" +
+ + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + def input(%{type: "select"} = assigns) do + ~H""" +
+ + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + def input(%{type: "textarea"} = assigns) do + ~H""" +
+ + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + # All other inputs text, datetime-local, url, password, etc. are handled here... + def input(assigns) do + ~H""" +
+ + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + # Helper used by inputs to generate form errors + defp error(assigns) do + ~H""" +

+ <.icon name="hero-exclamation-circle" class="size-5" /> + {render_slot(@inner_block)} +

+ """ + end + + @doc """ + Renders a header with title. + """ + slot :inner_block, required: true + slot :subtitle + slot :actions + + def header(assigns) do + ~H""" +
+
+

+ {render_slot(@inner_block)} +

+

+ {render_slot(@subtitle)} +

+
+
{render_slot(@actions)}
+
+ """ + end + + @doc """ + Renders a table with generic styling. + + ## Examples + + <.table id="users" rows={@users}> + <:col :let={user} label="id">{user.id} + <:col :let={user} label="username">{user.username} + + """ + attr :id, :string, required: true + attr :rows, :list, required: true + attr :row_id, :any, default: nil, doc: "the function for generating the row id" + attr :row_click, :any, default: nil, doc: "the function for handling phx-click on each row" + + attr :row_item, :any, + default: &Function.identity/1, + doc: "the function for mapping each row before calling the :col and :action slots" + + slot :col, required: true do + attr :label, :string + end + + slot :action, doc: "the slot for showing user actions in the last table column" + + def table(assigns) do + assigns = + with %{rows: %Phoenix.LiveView.LiveStream{}} <- assigns do + assign(assigns, row_id: assigns.row_id || fn {id, _item} -> id end) + end + + ~H""" +
+ + + + + + + + + + + + +
{col[:label]} + <%= maybe_eex_gettext.("Actions", @gettext) %> +
+ {render_slot(col, @row_item.(row))} + +
+ <%%= for action <- @action do %> + {render_slot(action, @row_item.(row))} + <%% end %> +
+
+ """ + end + + @doc """ + Renders a data list. + + ## Examples + + <.list> + <:item title="Title">{@post.title} + <:item title="Views">{@post.views} + + """ + slot :item, required: true do + attr :title, :string, required: true + end + + def list(assigns) do + ~H""" +
    +
  • +
    +
    {item.title}
    +
    {render_slot(item)}
    +
    +
  • +
+ """ + end + + @doc """ + Renders a [Heroicon](https://heroicons.com). + + Heroicons come in three styles – outline, solid, and mini. + By default, the outline style is used, but solid and mini may + be applied by using the `-solid` and `-mini` suffix. + + You can customize the size and colors of the icons by setting + width, height, and background color classes. + + Icons are extracted from the `deps/heroicons` directory and bundled within + your compiled app.css by the plugin in `assets/vendor/heroicons.js`. + + ## Examples + + <.icon name="hero-x-mark" /> + <.icon name="hero-arrow-path" class="ml-1 size-3 motion-safe:animate-spin" /> + """ + attr :name, :string, required: true + attr :class, :any, default: "size-4" + + def icon(%{name: "hero-" <> _} = assigns) do + ~H""" + + """ + end<%= if @live do %> + + ## JS Commands + + def show(js \\ %JS{}, selector) do + JS.show(js, + to: selector, + time: 300, + transition: + {"transition-all ease-out duration-300", + "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95", + "opacity-100 translate-y-0 sm:scale-100"} + ) + end + + def hide(js \\ %JS{}, selector) do + JS.hide(js, + to: selector, + time: 200, + transition: + {"transition-all ease-in duration-200", "opacity-100 translate-y-0 sm:scale-100", + "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95"} + ) + end<% end %> + + @doc """ + Translates an error message using gettext. + """<%= if @gettext do %> + def translate_error({msg, opts}) do + # When using gettext, we typically pass the strings we want + # to translate as a static argument: + # + # # Translate the number of files with plural rules + # dngettext("errors", "1 file", "%{count} files", count) + # + # However the error messages in our forms and APIs are generated + # dynamically, so we need to translate them by calling Gettext + # with our gettext backend as first argument. Translations are + # available in the errors.po file (as we use the "errors" domain). + if count = opts[:count] do + Gettext.dngettext(<%= @web_namespace %>.Gettext, "errors", msg, msg, count, opts) + else + Gettext.dgettext(<%= @web_namespace %>.Gettext, "errors", msg, opts) + end + end<% else %> + def translate_error({msg, opts}) do + # You can make use of gettext to translate error messages by + # uncommenting and adjusting the following code: + + # if count = opts[:count] do + # Gettext.dngettext(<%= @web_namespace %>.Gettext, "errors", msg, msg, count, opts) + # else + # Gettext.dgettext(<%= @web_namespace %>.Gettext, "errors", msg, opts) + # end + + Enum.reduce(opts, msg, fn {key, value}, acc -> + String.replace(acc, "%{#{key}}", fn _ -> to_string(value) end) + end) + end<% end %> + + @doc """ + Translates the errors for a field from a keyword list of errors. + """ + def translate_errors(errors, field) when is_list(errors) do + for {^field, {msg, opts}} <- errors, do: translate_error({msg, opts}) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.live/form.ex.eex b/deps/phoenix/priv/templates/phx.gen.live/form.ex.eex new file mode 100644 index 0000000..6406031 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.live/form.ex.eex @@ -0,0 +1,98 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.Form do + use <%= inspect context.web_module %>, :live_view + + alias <%= inspect context.module %> + alias <%= inspect schema.module %> + + @impl true + def render(assigns) do + ~H""" + <%= scope.assign_key %>={@<%= scope.assign_key %>}<% end %>> + <.header> + {@page_title} + <:subtitle>Use this form to manage <%= schema.singular %> records in your database. + + + <.form for={@form} id="<%= schema.singular %>-form" phx-change="validate" phx-submit="save"> +<%= Mix.Tasks.Phx.Gen.Html.indent_inputs(inputs, 8) %> +
+ <.button phx-disable-with="Saving..." variant="primary">Save <%= schema.human_singular %> + <.button navigate={return_path(<%= assign_scope_prefix %>@return_to, @<%= schema.singular %>)}>Cancel +
+ +
+ """ + end + + @impl true + def mount(params, _session, socket) do + {:ok, + socket + |> assign(:return_to, return_to(params["return_to"])) + |> apply_action(socket.assigns.live_action, params)} + end + + defp return_to("show"), do: "show" + defp return_to(_), do: "index" + + defp apply_action(socket, :edit, %{"<%= primary_key %>" => <%= primary_key %>}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= context_scope_prefix %><%= primary_key %>) + + socket + |> assign(:page_title, "Edit <%= schema.human_singular %>") + |> assign(:<%= schema.singular %>, <%= schema.singular %>) + |> assign(:form, to_form(<%= inspect context.alias %>.change_<%= schema.singular %>(<%= context_scope_prefix %><%= schema.singular %>))) + end + + defp apply_action(socket, :new, _params) do + <%= schema.singular %> = %<%= inspect schema.alias %>{<%= if scope do %><%= scope.schema_key %>: <%= socket_scope %>.<%= Enum.join(scope.access_path, ".") %><% end %>} + + socket + |> assign(:page_title, "New <%= schema.human_singular %>") + |> assign(:<%= schema.singular %>, <%= schema.singular %>) + |> assign(:form, to_form(<%= inspect context.alias %>.change_<%= schema.singular %>(<%= context_scope_prefix %><%= schema.singular %>))) + end + + @impl true + def handle_event("validate", %{"<%= schema.singular %>" => <%= schema.singular %>_params}, socket) do + changeset = <%= inspect context.alias %>.change_<%= schema.singular %>(<%= context_scope_prefix %>socket.assigns.<%= schema.singular %>, <%= schema.singular %>_params) + {:noreply, assign(socket, form: to_form(changeset, action: :validate))} + end + + def handle_event("save", %{"<%= schema.singular %>" => <%= schema.singular %>_params}, socket) do + save_<%= schema.singular %>(socket, socket.assigns.live_action, <%= schema.singular %>_params) + end + + defp save_<%= schema.singular %>(socket, :edit, <%= schema.singular %>_params) do + case <%= inspect context.alias %>.update_<%= schema.singular %>(<%= context_scope_prefix %>socket.assigns.<%= schema.singular %>, <%= schema.singular %>_params) do + {:ok, <%= schema.singular %>} -> + {:noreply, + socket + |> put_flash(:info, "<%= schema.human_singular %> updated successfully") + <%= if scope do %>|> push_navigate( + to: return_path(<%= context_scope_prefix %>socket.assigns.return_to, <%= schema.singular %>) + )}<% else %>|> push_navigate(to: return_path(socket.assigns.return_to, <%= schema.singular %>))}<% end %> + + {:error, %Ecto.Changeset{} = changeset} -> + {:noreply, assign(socket, form: to_form(changeset))} + end + end + + defp save_<%= schema.singular %>(socket, :new, <%= schema.singular %>_params) do + case <%= inspect context.alias %>.create_<%= schema.singular %>(<%= context_scope_prefix %><%= schema.singular %>_params) do + {:ok, <%= schema.singular %>} -> + {:noreply, + socket + |> put_flash(:info, "<%= schema.human_singular %> created successfully") + <%= if scope do %>|> push_navigate( + to: return_path(<%= context_scope_prefix %>socket.assigns.return_to, <%= schema.singular %>) + )}<% else %>|> push_navigate(to: return_path(socket.assigns.return_to, <%= schema.singular %>))}<% end %> + + {:error, %Ecto.Changeset{} = changeset} -> + {:noreply, assign(socket, form: to_form(changeset))} + end + end + + defp return_path(<%= scope_param_prefix %>"index", _<%= schema.singular %>), do: ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>" + defp return_path(<%= scope_param_prefix %>"show", <%= schema.singular %>), do: ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}" +end diff --git a/deps/phoenix/priv/templates/phx.gen.live/index.ex.eex b/deps/phoenix/priv/templates/phx.gen.live/index.ex.eex new file mode 100644 index 0000000..2548459 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.live/index.ex.eex @@ -0,0 +1,74 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.Index do + use <%= inspect context.web_module %>, :live_view + + alias <%= inspect context.module %> + + @impl true + def render(assigns) do + ~H""" + <%= scope.assign_key %>={@<%= scope.assign_key %>}<% end %>> + <.header> + Listing <%= schema.human_plural %> + <:actions> + <.button variant="primary" navigate={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>/new"}> + <.icon name="hero-plus" /> New <%= schema.human_singular %> + + + + + <.table + id="<%= schema.plural %>" + rows={@streams.<%= schema.collection %>} + row_click={fn {_id, <%= schema.singular %>} -> JS.navigate(~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}") end} + ><%= for {k, _} <- schema.attrs do %> + <:col :let={{_id, <%= schema.singular %>}} label="<%= Phoenix.Naming.humanize(Atom.to_string(k)) %>">{<%= schema.singular %>.<%= k %>}<% end %> + <:action :let={{_id, <%= schema.singular %>}}> +
+ <.link navigate={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}"}>Show +
+ <.link navigate={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}/edit"}>Edit + + <:action :let={{id, <%= schema.singular %>}}> + <.link + phx-click={JS.push("delete", value: %{<%= primary_key %>: <%= schema.singular %>.<%= primary_key %>}) |> hide("##{id}")} + data-confirm="Are you sure?" + > + Delete + + + +
+ """ + end + + @impl true + def mount(_params, _session, socket) do<%= if scope do %> + if connected?(socket) do + <%= inspect context.alias %>.subscribe_<%= schema.plural %>(<%= socket_scope %>) + end +<% end %> + {:ok, + socket + |> assign(:page_title, "Listing <%= schema.human_plural %>")<%= if primary_key != :id do %> + |> stream_configure(:<%= schema.collection %>, dom_id: &"<%= schema.collection %>-#{&1.<%= primary_key %>}")<% end %> + |> stream(:<%= schema.collection %>, list_<%= schema.plural %>(<%= socket_scope %>))} + end + + @impl true + def handle_event("delete", %{"<%= primary_key %>" => <%= primary_key %>}, socket) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= context_scope_prefix %><%= primary_key %>) + {:ok, _} = <%= inspect context.alias %>.delete_<%= schema.singular %>(<%= context_scope_prefix %><%= schema.singular %>) + + {:noreply, stream_delete(socket, :<%= schema.collection %>, <%= schema.singular %>)} + end<%= if scope do %> + + @impl true + def handle_info({type, %<%= inspect schema.module %>{}}, socket) + when type in [:created, :updated, :deleted] do + {:noreply, stream(socket, :<%= schema.collection %>, list_<%= schema.plural %>(<%= socket_scope %>), reset: true)} + end<% end %> + + defp list_<%= schema.plural %>(<%= scope && scope.assign_key %>) do + <%= inspect context.alias %>.list_<%= schema.plural %>(<%= scope && scope.assign_key %>) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.live/live_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.live/live_test.exs.eex new file mode 100644 index 0000000..3aba63b --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.live/live_test.exs.eex @@ -0,0 +1,128 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>LiveTest do + use <%= inspect context.web_module %>.ConnCase + + import Phoenix.LiveViewTest + import <%= inspect context.module %>Fixtures + + @create_attrs <%= Mix.Phoenix.to_text for {key, value} <- schema.params.create, into: %{}, do: {key, Mix.Phoenix.Schema.live_form_value(value)} %> + @update_attrs <%= Mix.Phoenix.to_text for {key, value} <- schema.params.update, into: %{}, do: {key, Mix.Phoenix.Schema.live_form_value(value)} %> + @invalid_attrs <%= Mix.Phoenix.to_text for {key, value} <- schema.params.create, into: %{}, do: {key, value |> Mix.Phoenix.Schema.live_form_value() |> Mix.Phoenix.Schema.invalid_form_value()} %><%= if scope do %> + + setup :<%= scope.test_setup_helper %> + + defp create_<%= schema.singular %>(%{scope: scope}) do + <%= schema.singular %> = <%= schema.singular %>_fixture(scope) +<% else %> + defp create_<%= schema.singular %>(_) do + <%= schema.singular %> = <%= schema.singular %>_fixture() +<% end %> + %{<%= schema.singular %>: <%= schema.singular %>} + end + + describe "Index" do + setup [:create_<%= schema.singular %>] + + test "lists all <%= schema.plural %>", <%= if schema.string_attr do %>%{conn: conn, <%= schema.singular %>: <%= schema.singular %><%= test_context_scope %>}<% else %>%{conn: conn<%= test_context_scope %>}<% end %> do + {:ok, _index_live, html} = live(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>") + + assert html =~ "Listing <%= schema.human_plural %>"<%= if schema.string_attr do %> + assert html =~ <%= schema.singular %>.<%= schema.string_attr %><% end %> + end + + test "saves new <%= schema.singular %>", %{conn: conn<%= test_context_scope %>} do + {:ok, index_live, _html} = live(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>") + + assert {:ok, form_live, _} = + index_live + |> element("a", "New <%= schema.human_singular %>") + |> render_click() + |> follow_redirect(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/new") + + assert render(form_live) =~ "New <%= schema.human_singular %>" + + assert form_live + |> form("#<%= schema.singular %>-form", <%= schema.singular %>: @invalid_attrs) + |> render_change() =~ "<%= Mix.Phoenix.Schema.failed_render_change_message(schema) %>" + + assert {:ok, index_live, _html} = + form_live + |> form("#<%= schema.singular %>-form", <%= schema.singular %>: @create_attrs) + |> render_submit() + |> follow_redirect(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>") + + html = render(index_live) + assert html =~ "<%= schema.human_singular %> created successfully"<%= if schema.string_attr do %> + assert html =~ "some <%= schema.string_attr %>"<% end %> + end + + test "updates <%= schema.singular %> in listing", %{conn: conn, <%= schema.singular %>: <%= schema.singular %><%= test_context_scope %>} do + {:ok, index_live, _html} = live(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>") + + assert {:ok, form_live, _html} = + index_live + |> element("#<%= schema.collection %>-#{<%= schema.singular %>.<%= primary_key %>} a", "Edit") + |> render_click() + |> follow_redirect(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}/edit") + + assert render(form_live) =~ "Edit <%= schema.human_singular %>" + + assert form_live + |> form("#<%= schema.singular %>-form", <%= schema.singular %>: @invalid_attrs) + |> render_change() =~ "<%= Mix.Phoenix.Schema.failed_render_change_message(schema) %>" + + assert {:ok, index_live, _html} = + form_live + |> form("#<%= schema.singular %>-form", <%= schema.singular %>: @update_attrs) + |> render_submit() + |> follow_redirect(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>") + + html = render(index_live) + assert html =~ "<%= schema.human_singular %> updated successfully"<%= if schema.string_attr do %> + assert html =~ "some updated <%= schema.string_attr %>"<% end %> + end + + test "deletes <%= schema.singular %> in listing", %{conn: conn, <%= schema.singular %>: <%= schema.singular %><%= test_context_scope %>} do + {:ok, index_live, _html} = live(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>") + + assert index_live |> element("#<%= schema.collection %>-#{<%= schema.singular %>.<%= primary_key %>} a", "Delete") |> render_click() + refute has_element?(index_live, "#<%= schema.plural %>-#{<%= schema.singular %>.<%= primary_key %>}") + end + end + + describe "Show" do + setup [:create_<%= schema.singular %>] + + test "displays <%= schema.singular %>", %{conn: conn, <%= schema.singular %>: <%= schema.singular %><%= test_context_scope %>} do + {:ok, _show_live, html} = live(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}") + + assert html =~ "Show <%= schema.human_singular %>"<%= if schema.string_attr do %> + assert html =~ <%= schema.singular %>.<%= schema.string_attr %><% end %> + end + + test "updates <%= schema.singular %> and returns to show", %{conn: conn, <%= schema.singular %>: <%= schema.singular %><%= test_context_scope %>} do + {:ok, show_live, _html} = live(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}") + + assert {:ok, form_live, _} = + show_live + |> element("a", "Edit") + |> render_click() + |> follow_redirect(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}/edit?return_to=show") + + assert render(form_live) =~ "Edit <%= schema.human_singular %>" + + assert form_live + |> form("#<%= schema.singular %>-form", <%= schema.singular %>: @invalid_attrs) + |> render_change() =~ "<%= Mix.Phoenix.Schema.failed_render_change_message(schema) %>" + + assert {:ok, show_live, _html} = + form_live + |> form("#<%= schema.singular %>-form", <%= schema.singular %>: @update_attrs) + |> render_submit() + |> follow_redirect(conn, ~p"<%= scope_param_route_prefix %><%= schema.route_prefix %>/#{<%= schema.singular %>}") + + html = render(show_live) + assert html =~ "<%= schema.human_singular %> updated successfully"<%= if schema.string_attr do %> + assert html =~ "some updated <%= schema.string_attr %>"<% end %> + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.live/show.ex.eex b/deps/phoenix/priv/templates/phx.gen.live/show.ex.eex new file mode 100644 index 0000000..bf9e9e8 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.live/show.ex.eex @@ -0,0 +1,64 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.Show do + use <%= inspect context.web_module %>, :live_view + + alias <%= inspect context.module %> + + @impl true + def render(assigns) do + ~H""" + <%= scope.assign_key %>={@<%= scope.assign_key %>}<% end %>> + <.header> + <%= schema.human_singular %> {@<%= schema.singular %>.<%= primary_key %>} + <:subtitle>This is a <%= schema.singular %> record from your database. + <:actions> + <.button navigate={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>"}> + <.icon name="hero-arrow-left" /> + + <.button variant="primary" navigate={~p"<%= scope_assign_route_prefix %><%= schema.route_prefix %>/#{@<%= schema.singular %>}/edit?return_to=show"}> + <.icon name="hero-pencil-square" /> Edit <%= schema.singular %> + + + + + <.list><%= for {k, _} <- schema.attrs do %> + <:item title="<%= Phoenix.Naming.humanize(Atom.to_string(k)) %>">{@<%= schema.singular %>.<%= k %>}<% end %> + + + """ + end + + @impl true + def mount(%{"<%= primary_key %>" => <%= primary_key %>}, _session, socket) do<%= if scope do %> + if connected?(socket) do + <%= inspect context.alias %>.subscribe_<%= schema.plural %>(<%= socket_scope %>) + end +<% end %> + {:ok, + socket + |> assign(:page_title, "Show <%= schema.human_singular %>") + |> assign(:<%= schema.singular %>, <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= context_scope_prefix %><%= primary_key %>))} + end<%= if scope do %> + + @impl true + def handle_info( + {:updated, %<%= inspect schema.module %>{<%= primary_key %>: <%= primary_key %>} = <%= schema.singular %>}, + %{assigns: %{<%= schema.singular %>: %{<%= primary_key %>: <%= primary_key %>}}} = socket + ) do + {:noreply, assign(socket, :<%= schema.singular %>, <%= schema.singular %>)} + end + + def handle_info( + {:deleted, %<%= inspect schema.module %>{<%= primary_key %>: <%= primary_key %>}}, + %{assigns: %{<%= schema.singular %>: %{<%= primary_key %>: <%= primary_key %>}}} = socket + ) do + {:noreply, + socket + |> put_flash(:error, "The current <%= schema.singular %> was deleted.") + |> push_navigate(to: ~p"<%= scope_socket_route_prefix %><%= schema.route_prefix %>")} + end + + def handle_info({type, %<%= inspect schema.module %>{}}, socket) + when type in [:created, :updated, :deleted] do + {:noreply, socket} + end<% end %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.notifier/notifier.ex.eex b/deps/phoenix/priv/templates/phx.gen.notifier/notifier.ex.eex new file mode 100644 index 0000000..7a2cb2a --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.notifier/notifier.ex.eex @@ -0,0 +1,14 @@ +defmodule <%= inspect context.module %> do + import Swoosh.Email + alias <%= inspect context.base_module %>.Mailer<%= for message <- notifier_messages do %> + + def deliver_<%= message %>(%{name: name, email: email}) do + new() + |> to({name, email}) + |> from({"Phoenix Team", "team@example.com"}) + |> subject("Welcome to Phoenix, #{name}!") + |> html_body("

Hello, #{name}

") + |> text_body("Hello, #{name}\n") + |> Mailer.deliver() + end<% end %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.notifier/notifier_test.exs.eex b/deps/phoenix/priv/templates/phx.gen.notifier/notifier_test.exs.eex new file mode 100644 index 0000000..74f7bf4 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.notifier/notifier_test.exs.eex @@ -0,0 +1,18 @@ +defmodule <%= inspect context.module %>Test do + use ExUnit.Case, async: true + import Swoosh.TestAssertions + + alias <%= inspect context.module %><%= for message <- notifier_messages do %> + + test "deliver_<%= message %>/1" do + user = %{name: "Alice", email: "alice@example.com"} + + <%= inflections[:alias] %>.deliver_<%= message %>(user) + + assert_email_sent( + subject: "Welcome to Phoenix, Alice!", + to: {"Alice", "alice@example.com"}, + text_body: ~r/Hello, Alice/ + ) + end<% end %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.presence/presence.ex.eex b/deps/phoenix/priv/templates/phx.gen.presence/presence.ex.eex new file mode 100644 index 0000000..de5d98d --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.presence/presence.ex.eex @@ -0,0 +1,11 @@ +defmodule <%= module %> do + @moduledoc """ + Provides presence tracking to channels and processes. + + See the [`Phoenix.Presence`](https://hexdocs.pm/phoenix/Phoenix.Presence.html) + docs for more details. + """ + use Phoenix.Presence, + otp_app: <%= inspect otp_app %>, + pubsub_server: <%= inspect pubsub_server %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.release/Dockerfile.eex b/deps/phoenix/priv/templates/phx.gen.release/Dockerfile.eex new file mode 100644 index 0000000..40a88ee --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/Dockerfile.eex @@ -0,0 +1,101 @@ +# Find eligible builder and runner images on Docker Hub. We use Ubuntu/Debian +# instead of Alpine to avoid DNS resolution issues in production. +# +# https://hub.docker.com/r/hexpm/elixir/tags?name=ubuntu +# https://hub.docker.com/_/ubuntu/tags +# +# This file is based on these images: +# +# - https://hub.docker.com/r/hexpm/elixir/tags - for the build image +# - https://hub.docker.com/_/debian/tags?name=<%= debian %>-<%= debian_vsn %>-slim - for the release image +# - https://pkgs.org/ - resource for finding needed packages +# - Ex: docker.io/hexpm/elixir:<%= elixir_vsn %>-erlang-<%= otp_vsn %>-debian-<%= debian %>-<%= debian_vsn %>-slim +# +ARG ELIXIR_VERSION=<%= elixir_vsn %> +ARG OTP_VERSION=<%= otp_vsn %> +ARG DEBIAN_VERSION=<%= debian %>-<%= debian_vsn %>-slim + +ARG BUILDER_IMAGE="docker.io/hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}" +ARG RUNNER_IMAGE="docker.io/debian:${DEBIAN_VERSION}" + +FROM ${BUILDER_IMAGE} AS builder + +# install build dependencies +RUN apt-get update \ + && apt-get install -y --no-install-recommends build-essential git \ + && rm -rf /var/lib/apt/lists/* + +# prepare build dir +WORKDIR /app + +# install hex + rebar +RUN mix local.hex --force \ + && mix local.rebar --force + +# set build ENV +ENV MIX_ENV="prod" + +# install mix dependencies +COPY mix.exs mix.lock ./ +RUN mix deps.get --only $MIX_ENV +RUN mkdir config + +# copy compile-time config files before we compile dependencies +# to ensure any relevant config change will trigger the dependencies +# to be re-compiled. +COPY config/config.exs config/${MIX_ENV}.exs config/ +RUN mix deps.compile +<%= if assets_dir_exists? do %> +RUN mix assets.setup +<% end %> +COPY priv priv + +COPY lib lib + +# Compile the release +RUN mix compile +<%= if assets_dir_exists? do %> +COPY assets assets + +# compile assets +RUN mix assets.deploy +<% end %> +# Changes to config/runtime.exs don't require recompiling the code +COPY config/runtime.exs config/ + +COPY rel rel +RUN mix release + +# start a new build stage so that the final image will only contain +# the compiled release and other runtime necessities +FROM ${RUNNER_IMAGE} AS final + +RUN apt-get update \ + && apt-get install -y --no-install-recommends libstdc++6 openssl libncurses6 locales ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +# Set the locale +RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen \ + && locale-gen + +ENV LANG=en_US.UTF-8 +ENV LANGUAGE=en_US:en +ENV LC_ALL=en_US.UTF-8 + +WORKDIR "/app" +RUN chown nobody /app + +# set runner ENV +ENV MIX_ENV="prod" + +# Only copy the final release from the build stage +COPY --from=builder --chown=nobody:root /app/_build/${MIX_ENV}/rel/<%= otp_app %> ./ + +USER nobody + +# If using an environment that doesn't automatically reap zombie processes, it is +# advised to add an init process such as tini via `apt-get install` +# above and adding an entrypoint. See https://github.com/krallin/tini for details +# ENTRYPOINT ["/tini", "--"] + +CMD ["/app/bin/server"] diff --git a/deps/phoenix/priv/templates/phx.gen.release/dockerignore.eex b/deps/phoenix/priv/templates/phx.gen.release/dockerignore.eex new file mode 100644 index 0000000..5d2adff --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/dockerignore.eex @@ -0,0 +1,46 @@ +# This file excludes paths from the Docker build context. +# +# By default, Docker's build context includes all files (and folders) in the +# current directory. Even if a file isn't copied into the container it is still sent to +# the Docker daemon. +# +# There are multiple reasons to exclude files from the build context: +# +# 1. Prevent nested folders from being copied into the container (ex: exclude +# /assets/node_modules when copying /assets) +# 2. Reduce the size of the build context and improve build time (ex. /build, /deps, /doc) +# 3. Avoid sending files containing sensitive information +# +# More information on using .dockerignore is available here: +# https://docs.docker.com/engine/reference/builder/#dockerignore-file + +.dockerignore + +# Ignore git, but keep git HEAD and refs to access current commit hash if needed: +# +# $ cat .git/HEAD | awk '{print ".git/"$2}' | xargs cat +# d0b8727759e1e0e7aa3d41707d12376e373d5ecc +.git +!.git/HEAD +!.git/refs + +# Common development/test artifacts +/cover/ +/doc/ +/test/ +/tmp/ +.elixir_ls + +# Mix artifacts +/_build/ +/deps/ +*.ez + +# Generated on crash by the VM +erl_crash.dump + +# Static artifacts - These should be fetched and built inside the Docker image +# https://hexdocs.pm/phoenix/Mix.Tasks.Phx.Gen.Release.html#module-docker +/assets/node_modules/ +/priv/static/assets/ +/priv/static/cache_manifest.json diff --git a/deps/phoenix/priv/templates/phx.gen.release/rel/migrate.bat.eex b/deps/phoenix/priv/templates/phx.gen.release/rel/migrate.bat.eex new file mode 100644 index 0000000..8e2503b --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/rel/migrate.bat.eex @@ -0,0 +1 @@ +call "%~dp0\<%= otp_app %>" eval <%= app_namespace %>.Release.migrate diff --git a/deps/phoenix/priv/templates/phx.gen.release/rel/migrate.sh.eex b/deps/phoenix/priv/templates/phx.gen.release/rel/migrate.sh.eex new file mode 100644 index 0000000..3103c98 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/rel/migrate.sh.eex @@ -0,0 +1,5 @@ +#!/bin/sh +set -eu + +cd -P -- "$(dirname -- "$0")" +exec ./<%= otp_app %> eval <%= app_namespace %>.Release.migrate diff --git a/deps/phoenix/priv/templates/phx.gen.release/rel/server.bat.eex b/deps/phoenix/priv/templates/phx.gen.release/rel/server.bat.eex new file mode 100644 index 0000000..d6216f6 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/rel/server.bat.eex @@ -0,0 +1,2 @@ +set PHX_SERVER=true +call "%~dp0\<%= otp_app %>" start diff --git a/deps/phoenix/priv/templates/phx.gen.release/rel/server.sh.eex b/deps/phoenix/priv/templates/phx.gen.release/rel/server.sh.eex new file mode 100644 index 0000000..11638f1 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/rel/server.sh.eex @@ -0,0 +1,5 @@ +#!/bin/sh +set -eu + +cd -P -- "$(dirname -- "$0")" +PHX_SERVER=true exec ./<%= otp_app %> start diff --git a/deps/phoenix/priv/templates/phx.gen.release/release.ex.eex b/deps/phoenix/priv/templates/phx.gen.release/release.ex.eex new file mode 100644 index 0000000..86d00a1 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/release.ex.eex @@ -0,0 +1,30 @@ +defmodule <%= app_namespace %>.Release do + @moduledoc """ + Used for executing DB release tasks when run in production without Mix + installed. + """ + @app :<%= otp_app %> + + def migrate do + load_app() + + for repo <- repos() do + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :up, all: true)) + end + end + + def rollback(repo, version) do + load_app() + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :down, to: version)) + end + + defp repos do + Application.fetch_env!(@app, :ecto_repos) + end + + defp load_app do + # Many platforms require SSL when connecting to the database + Application.ensure_all_started(:ssl) + Application.ensure_loaded(@app) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.schema/migration.exs.eex b/deps/phoenix/priv/templates/phx.gen.schema/migration.exs.eex new file mode 100644 index 0000000..0f3e332 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.schema/migration.exs.eex @@ -0,0 +1,19 @@ +defmodule <%= inspect schema.repo %>.Migrations.Create<%= Macro.camelize(schema.table) %> do + use <%= inspect schema.migration_module %> + + def change do + create table(:<%= schema.table %><%= if schema.binary_id || schema.opts[:primary_key] do %>, primary_key: false<% end %><%= if schema.prefix do %>, prefix: :<%= schema.prefix %><% end %>) do +<%= if schema.binary_id do %> add :<%= primary_key %>, :binary_id, primary_key: true +<% else %><%= if schema.opts[:primary_key] do %> add :<%= schema.opts[:primary_key] %>, :id, primary_key: true +<% end %><% end %><%= for {k, v} <- schema.attrs do %> add <%= inspect k %>, <%= inspect Mix.Phoenix.Schema.type_for_migration(v) %><%= schema.migration_defaults[k] %> +<% end %><%= for {_, i, _, s} <- schema.assocs do %> add <%= inspect(i) %>, references(<%= inspect(s) %>, on_delete: :nothing<%= if schema.binary_id do %>, type: :binary_id<% end %>) +<% end %><%= if scope do %> add :<%= scope.schema_key %>, <%= if scope.schema_table do %>references(:<%= scope.schema_table %>, <%= if scope.schema_migration_type do %>type: <%= inspect scope.schema_migration_type %>, <% end %>on_delete: :delete_all)<% else %><%= inspect(scope.schema_migration_type || scope.schema_type) %><% end %> +<% end %> + timestamps(<%= if schema.timestamp_type != :naive_datetime, do: "type: #{inspect schema.timestamp_type}" %>) + end<%= if scope do %> + + create index(:<%= schema.table %>, [:<%= scope.schema_key %>])<% end %> +<%= if Enum.any?(schema.indexes) do %><%= for index <- schema.indexes do %> + <%= index %><% end %> +<% end %> end +end diff --git a/deps/phoenix/priv/templates/phx.gen.schema/schema.ex.eex b/deps/phoenix/priv/templates/phx.gen.schema/schema.ex.eex new file mode 100644 index 0000000..f1aa869 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.schema/schema.ex.eex @@ -0,0 +1,26 @@ +defmodule <%= inspect schema.module %> do + use Ecto.Schema + import Ecto.Changeset +<%= if schema.prefix do %> + @schema_prefix :<%= schema.prefix %><% end %><%= if schema.opts[:primary_key] do %> + @derive {Phoenix.Param, key: :<%= schema.opts[:primary_key] %>}<% end %><%= if schema.binary_id do %> + @primary_key {:<%= primary_key %>, :binary_id, autogenerate: true} + @foreign_key_type :binary_id<% else %><%= if schema.opts[:primary_key] do %> + @primary_key {:<%= schema.opts[:primary_key] %>, :id, autogenerate: true}<% end %><% end %> + schema <%= inspect schema.table %> do +<%= Mix.Phoenix.Schema.format_fields_for_schema(schema) %> +<%= for {_, k, _, _} <- schema.assocs do %> field <%= inspect k %>, <%= if schema.binary_id do %>:binary_id<% else %>:id<% end %> +<% end %><%= if scope do %> field :<%= scope.schema_key %>, <%= inspect scope.schema_type %> +<% end %> + timestamps(<%= if schema.timestamp_type != :naive_datetime, do: "type: #{inspect schema.timestamp_type}" %>) + end + + @doc false + def changeset(<%= schema.singular %>, attrs<%= if scope do %>, <%= scope.name %>_scope<% end %>) do + <%= schema.singular %> + |> cast(attrs, [<%= Enum.map_join(schema.attrs, ", ", &inspect(elem(&1, 0))) %>]) + |> validate_required([<%= Enum.map_join(Mix.Phoenix.Schema.required_fields(schema), ", ", &inspect(elem(&1, 0))) %>]) +<%= for k <- schema.uniques do %> |> unique_constraint(<%= inspect k %>) +<% end %><%= if scope do %> |> put_change(:<%= scope.schema_key %>, <%= scope.name %>_scope.<%= Enum.join(scope.access_path, ".") %>) +<% end %> end +end diff --git a/deps/phoenix/priv/templates/phx.gen.socket/socket.ex.eex b/deps/phoenix/priv/templates/phx.gen.socket/socket.ex.eex new file mode 100644 index 0000000..772da12 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.socket/socket.ex.eex @@ -0,0 +1,57 @@ +defmodule <%= module %>Socket do + use Phoenix.Socket + + # A Socket handler + # + # It's possible to control the websocket connection and + # assign values that can be accessed by your channel topics. + + ## Channels<%= if existing_channel do %> + + channel "<%= existing_channel[:singular] %>:*", <%= existing_channel[:module] %>Channel +<% else %> + # Uncomment the following line to define a "room:*" topic + # pointing to the `<%= web_module %>.RoomChannel`: + # + # channel "room:*", <%= web_module %>.RoomChannel + # + # To create a channel file, use the mix task: + # + # mix phx.gen.channel Room + # + # See the [`Channels guide`](https://hexdocs.pm/phoenix/channels.html) + # for further details. + +<% end %> + # Socket params are passed from the client and can + # be used to verify and authenticate a user. After + # verification, you can put default assigns into + # the socket that will be set for all channels, ie + # + # {:ok, assign(socket, :user_id, verified_user_id)} + # + # To deny connection, return `:error` or `{:error, term}`. To control the + # response the client receives in that case, [define an error handler in the + # websocket + # configuration](https://hexdocs.pm/phoenix/Phoenix.Endpoint.html#socket/3-websocket-configuration). + # + # See `Phoenix.Token` documentation for examples in + # performing token verification on connect. + @impl true + def connect(_params, socket, _connect_info) do + {:ok, socket} + end + + # Socket IDs are topics that allow you to identify all sockets for a given user: + # + # def id(socket), do: "user_socket:#{socket.assigns.user_id}" + # + # Would allow you to broadcast a "disconnect" event and terminate + # all active sockets and channels for a given user: + # + # <%= endpoint_module %>.broadcast("user_socket:#{user.id}", "disconnect", %{}) + # + # Returning `nil` makes this socket anonymous. + @impl true + def id(_socket), do: nil +end diff --git a/deps/phoenix/priv/templates/phx.gen.socket/socket.js.eex b/deps/phoenix/priv/templates/phx.gen.socket/socket.js.eex new file mode 100644 index 0000000..236d998 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.socket/socket.js.eex @@ -0,0 +1,23 @@ +// NOTE: The contents of this file will only be executed if +// you uncomment its entry in "assets/js/app.js". + +// Bring in Phoenix channels client library: +import {Socket} from "phoenix" + +// And connect to the path in "<%= web_prefix %>/endpoint.ex". We pass the +// token for authentication. +// +// Read the [`Using Token Authentication`](https://hexdocs.pm/phoenix/channels.html#using-token-authentication) +// section to see how the token should be used. +let socket = new Socket("/socket", {authToken: window.userToken}) +socket.connect() + +// Now that you are connected, you can join channels with a topic. +// Let's assume you have a channel with a topic named `room` and the +// subtopic is its id - in this case 42: +let channel = socket.channel("room:42", {}) +channel.join() + .receive("ok", resp => { console.log("Joined successfully", resp) }) + .receive("error", resp => { console.log("Unable to join", resp) }) + +export default socket diff --git a/deps/phoenix/usage-rules/ecto.md b/deps/phoenix/usage-rules/ecto.md new file mode 100644 index 0000000..3d7fd39 --- /dev/null +++ b/deps/phoenix/usage-rules/ecto.md @@ -0,0 +1,9 @@ +## Ecto Guidelines + +- **Always** preload Ecto associations in queries when they'll be accessed in templates, ie a message that needs to reference the `message.user.email` +- Remember `import Ecto.Query` and other supporting modules when you write `seeds.exs` +- `Ecto.Schema` fields always use the `:string` type, even for `:text`, columns, ie: `field :name, :string` +- `Ecto.Changeset.validate_number/2` **DOES NOT SUPPORT the `:allow_nil` option**. By default, Ecto validations only run if a change for the given field exists and the change value is not nil, so such as option is never needed +- You **must** use `Ecto.Changeset.get_field(changeset, :field)` to access changeset fields +- Fields which are set programmatically, such as `user_id`, must not be listed in `cast` calls or similar for security purposes. Instead they must be explicitly set when creating the struct +- **Always** invoke `mix ecto.gen.migration migration_name_using_underscores` when generating migration files, so the correct timestamp and conventions are applied diff --git a/deps/phoenix/usage-rules/elixir.md b/deps/phoenix/usage-rules/elixir.md new file mode 100644 index 0000000..63af87f --- /dev/null +++ b/deps/phoenix/usage-rules/elixir.md @@ -0,0 +1,55 @@ +## Elixir guidelines + +- Elixir lists **do not support index based access via the access syntax** + + **Never do this (invalid)**: + + i = 0 + mylist = ["blue", "green"] + mylist[i] + + Instead, **always** use `Enum.at`, pattern matching, or `List` for index based list access, ie: + + i = 0 + mylist = ["blue", "green"] + Enum.at(mylist, i) + +- Elixir variables are immutable, but can be rebound, so for block expressions like `if`, `case`, `cond`, etc + you *must* bind the result of the expression to a variable if you want to use it and you CANNOT rebind the result inside the expression, ie: + + # INVALID: we are rebinding inside the `if` and the result never gets assigned + if connected?(socket) do + socket = assign(socket, :val, val) + end + + # VALID: we rebind the result of the `if` to a new variable + socket = + if connected?(socket) do + assign(socket, :val, val) + end + +- **Never** nest multiple modules in the same file as it can cause cyclic dependencies and compilation errors +- **Never** use map access syntax (`changeset[:field]`) on structs as they do not implement the Access behaviour by default. For regular structs, you **must** access the fields directly, such as `my_struct.field` or use higher level APIs that are available on the struct if they exist, `Ecto.Changeset.get_field/2` for changesets +- Elixir's standard library has everything necessary for date and time manipulation. Familiarize yourself with the common `Time`, `Date`, `DateTime`, and `Calendar` interfaces by accessing their documentation as necessary. **Never** install additional dependencies unless asked or for date/time parsing (which you can use the `date_time_parser` package) +- Don't use `String.to_atom/1` on user input (memory leak risk) +- Predicate function names should not start with `is_` and should end in a question mark. Names like `is_thing` should be reserved for guards +- Elixir's builtin OTP primitives like `DynamicSupervisor` and `Registry`, require names in the child spec, such as `{DynamicSupervisor, name: MyApp.MyDynamicSup}`, then you can use `DynamicSupervisor.start_child(MyApp.MyDynamicSup, child_spec)` +- Use `Task.async_stream(collection, callback, options)` for concurrent enumeration with back-pressure. The majority of times you will want to pass `timeout: :infinity` as option + +## Mix guidelines + +- Read the docs and options before using tasks (by using `mix help task_name`) +- To debug test failures, run tests in a specific file with `mix test test/my_test.exs` or run all previously failed tests with `mix test --failed` +- `mix deps.clean --all` is **almost never needed**. **Avoid** using it unless you have good reason + +## Test guidelines + +- **Always use `start_supervised!/1`** to start processes in tests as it guarantees cleanup between tests +- **Avoid** `Process.sleep/1` and `Process.alive?/1` in tests + - Instead of sleeping to wait for a process to finish, **always** use `Process.monitor/1` and assert on the DOWN message: + + ref = Process.monitor(pid) + assert_receive {:DOWN, ^ref, :process, ^pid, :normal} + + - Instead of sleeping to synchronize before the next call, **always** use `_ = :sys.get_state/1` to ensure the process has handled prior messages + diff --git a/deps/phoenix/usage-rules/html.md b/deps/phoenix/usage-rules/html.md new file mode 100644 index 0000000..e47e796 --- /dev/null +++ b/deps/phoenix/usage-rules/html.md @@ -0,0 +1,76 @@ +## Phoenix HTML guidelines + +- Phoenix templates **always** use `~H` or .html.heex files (known as HEEx), **never** use `~E` +- **Always** use the imported `Phoenix.Component.form/1` and `Phoenix.Component.inputs_for/1` function to build forms. **Never** use `Phoenix.HTML.form_for` or `Phoenix.HTML.inputs_for` as they are outdated +- When building forms **always** use the already imported `Phoenix.Component.to_form/2` (`assign(socket, form: to_form(...))` and `<.form for={@form} id="msg-form">`), then access those forms in the template via `@form[:field]` +- **Always** add unique DOM IDs to key elements (like forms, buttons, etc) when writing templates, these IDs can later be used in tests (`<.form for={@form} id="product-form">`) +- For "app wide" template imports, you can import/alias into the `my_app_web.ex`'s `html_helpers` block, so they will be available to all LiveViews, LiveComponent's, and all modules that do `use MyAppWeb, :html` (replace "my_app" by the actual app name) + +- Elixir supports `if/else` but **does NOT support `if/else if` or `if/elsif`**. **Never use `else if` or `elseif` in Elixir**, **always** use `cond` or `case` for multiple conditionals. + + **Never do this (invalid)**: + + <%= if condition do %> + ... + <% else if other_condition %> + ... + <% end %> + + Instead **always** do this: + + <%= cond do %> + <% condition -> %> + ... + <% condition2 -> %> + ... + <% true -> %> + ... + <% end %> + +- HEEx require special tag annotation if you want to insert literal curly's like `{` or `}`. If you want to show a textual code snippet on the page in a `
` or `` block you *must* annotate the parent tag with `phx-no-curly-interpolation`:
+
+      
+        let obj = {key: "val"}
+      
+
+  Within `phx-no-curly-interpolation` annotated tags, you can use `{` and `}` without escaping them, and dynamic Elixir expressions can still be used with `<%= ... %>` syntax
+
+- HEEx class attrs support lists, but you must **always** use list `[...]` syntax. You can use the class list syntax to conditionally add classes, **always do this for multiple class values**:
+
+      Text
+
+  and **always** wrap `if`'s inside `{...}` expressions with parens, like done above (`if(@other_condition, do: "...", else: "...")`)
+
+  and **never** do this, since it's invalid (note the missing `[` and `]`):
+
+       ...
+      => Raises compile syntax error on invalid HEEx attr syntax
+
+- **Never** use `<% Enum.each %>` or non-for comprehensions for generating template content, instead **always** use `<%= for item <- @collection do %>`
+- HEEx HTML comments use `<%!-- comment --%>`. **Always** use the HEEx HTML comment syntax for template comments (`<%!-- comment --%>`)
+- HEEx allows interpolation via `{...}` and `<%= ... %>`, but the `<%= %>` **only** works within tag bodies. **Always** use the `{...}` syntax for interpolation within tag attributes, and for interpolation of values within tag bodies. **Always** interpolate block constructs (if, cond, case, for) within tag bodies using `<%= ... %>`.
+
+  **Always** do this:
+
+      
+ {@my_assign} + <%= if @some_block_condition do %> + {@another_assign} + <% end %> +
+ + and **Never** do this – the program will terminate with a syntax error: + + <%!-- THIS IS INVALID NEVER EVER DO THIS --%> +
+ {if @invalid_block_construct do} + {end} +
diff --git a/deps/phoenix/usage-rules/liveview.md b/deps/phoenix/usage-rules/liveview.md new file mode 100644 index 0000000..96e83b7 --- /dev/null +++ b/deps/phoenix/usage-rules/liveview.md @@ -0,0 +1,231 @@ +## Phoenix LiveView guidelines + +- **Never** use the deprecated `live_redirect` and `live_patch` functions, instead **always** use the `<.link navigate={href}>` and `<.link patch={href}>` in templates, and `push_navigate` and `push_patch` functions LiveViews +- **Avoid LiveComponent's** unless you have a strong, specific need for them +- LiveViews should be named like `AppWeb.WeatherLive`, with a `Live` suffix. When you go to add LiveView routes to the router, the default `:browser` scope is **already aliased** with the `AppWeb` module, so you can just do `live "/weather", WeatherLive` + +### LiveView streams + +- **Always** use LiveView streams for collections for assigning regular lists to avoid memory ballooning and runtime termination with the following operations: + - basic append of N items - `stream(socket, :messages, [new_msg])` + - resetting stream with new items - `stream(socket, :messages, [new_msg], reset: true)` (e.g. for filtering items) + - prepend to stream - `stream(socket, :messages, [new_msg], at: -1)` + - deleting items - `stream_delete(socket, :messages, msg)` + +- When using the `stream/3` interfaces in the LiveView, the LiveView template must 1) always set `phx-update="stream"` on the parent element, with a DOM id on the parent element like `id="messages"` and 2) consume the `@streams.stream_name` collection and use the id as the DOM id for each child. For a call like `stream(socket, :messages, [new_msg])` in the LiveView, the template would be: + +
+
+ {msg.text} +
+
+ +- LiveView streams are *not* enumerable, so you cannot use `Enum.filter/2` or `Enum.reject/2` on them. Instead, if you want to filter, prune, or refresh a list of items on the UI, you **must refetch the data and re-stream the entire stream collection, passing reset: true**: + + def handle_event("filter", %{"filter" => filter}, socket) do + # re-fetch the messages based on the filter + messages = list_messages(filter) + + {:noreply, + socket + |> assign(:messages_empty?, messages == []) + # reset the stream with the new messages + |> stream(:messages, messages, reset: true)} + end + +- LiveView streams *do not support counting or empty states*. If you need to display a count, you must track it using a separate assign. For empty states, you can use Tailwind classes: + +
+ +
+ {task.name} +
+
+ + The above only works if the empty state is the only HTML block alongside the stream for-comprehension. + +- When updating an assign that should change content inside any streamed item(s), you MUST re-stream the items + along with the updated assign: + + def handle_event("edit_message", %{"message_id" => message_id}, socket) do + message = Chat.get_message!(message_id) + edit_form = to_form(Chat.change_message(message, %{content: message.content})) + + # re-insert message so @editing_message_id toggle logic takes effect for that stream item + {:noreply, + socket + |> stream_insert(:messages, message) + |> assign(:editing_message_id, String.to_integer(message_id)) + |> assign(:edit_form, edit_form)} + end + + And in the template: + +
+
+ {message.username} + <%= if @editing_message_id == message.id do %> + <%!-- Edit mode --%> + <.form for={@edit_form} id="edit-form-#{message.id}" phx-submit="save_edit"> + ... + + <% end %> +
+
+ +- **Never** use the deprecated `phx-update="append"` or `phx-update="prepend"` for collections + +### LiveView JavaScript interop + +- Remember anytime you use `phx-hook="MyHook"` and that JS hook manages its own DOM, you **must** also set the `phx-update="ignore"` attribute +- **Always** provide an unique DOM id alongside `phx-hook` otherwise a compiler error will be raised + +LiveView hooks come in two flavors, 1) colocated js hooks for "inline" scripts defined inside HEEx, +and 2) external `phx-hook` annotations where JavaScript object literals are defined and passed to the `LiveSocket` constructor. + +#### Inline colocated js hooks + +**Never** write raw embedded ` + +- colocated hooks are automatically integrated into the app.js bundle +- colocated hooks names **MUST ALWAYS** start with a `.` prefix, i.e. `.PhoneNumber` + +#### External phx-hook + +External JS hooks (`
`) must be placed in `assets/js/` and passed to the +LiveSocket constructor: + + const MyHook = { + mounted() { ... } + } + let liveSocket = new LiveSocket("/live", Socket, { + hooks: { MyHook } + }); + +#### Pushing events between client and server + +Use LiveView's `push_event/3` when you need to push events/data to the client for a phx-hook to handle. +**Always** return or rebind the socket on `push_event/3` when pushing events: + + # re-bind socket so we maintain event state to be pushed + socket = push_event(socket, "my_event", %{...}) + + # or return the modified socket directly: + def handle_event("some_event", _, socket) do + {:noreply, push_event(socket, "my_event", %{...})} + end + +Pushed events can then be picked up in a JS hook with `this.handleEvent`: + + mounted() { + this.handleEvent("my_event", data => console.log("from server:", data)); + } + +Clients can also push an event to the server and receive a reply with `this.pushEvent`: + + mounted() { + this.el.addEventListener("click", e => { + this.pushEvent("my_event", { one: 1 }, reply => console.log("got reply from server:", reply)); + }) + } + +Where the server handled it via: + + def handle_event("my_event", %{"one" => 1}, socket) do + {:reply, %{two: 2}, socket} + end + +### LiveView tests + +- `Phoenix.LiveViewTest` module and `LazyHTML` (included) for making your assertions +- Form tests are driven by `Phoenix.LiveViewTest`'s `render_submit/2` and `render_change/2` functions +- Come up with a step-by-step test plan that splits major test cases into small, isolated files. You may start with simpler tests that verify content exists, gradually add interaction tests +- **Always reference the key element IDs you added in the LiveView templates in your tests** for `Phoenix.LiveViewTest` functions like `element/2`, `has_element/2`, selectors, etc +- **Never** tests again raw HTML, **always** use `element/2`, `has_element/2`, and similar: `assert has_element?(view, "#my-form")` +- Instead of relying on testing text content, which can change, favor testing for the presence of key elements +- Focus on testing outcomes rather than implementation details +- Be aware that `Phoenix.Component` functions like `<.form>` might produce different HTML than expected. Test against the output HTML structure, not your mental model of what you expect it to be +- When facing test failures with element selectors, add debug statements to print the actual HTML, but use `LazyHTML` selectors to limit the output, ie: + + html = render(view) + document = LazyHTML.from_fragment(html) + matches = LazyHTML.filter(document, "your-complex-selector") + IO.inspect(matches, label: "Matches") + +### Form handling + +#### Creating a form from params + +If you want to create a form based on `handle_event` params: + + def handle_event("submitted", params, socket) do + {:noreply, assign(socket, form: to_form(params))} + end + +When you pass a map to `to_form/1`, it assumes said map contains the form params, which are expected to have string keys. + +You can also specify a name to nest the params: + + def handle_event("submitted", %{"user" => user_params}, socket) do + {:noreply, assign(socket, form: to_form(user_params, as: :user))} + end + +#### Creating a form from changesets + +When using changesets, the underlying data, form params, and errors are retrieved from it. The `:as` option is automatically computed too. E.g. if you have a user schema: + + defmodule MyApp.Users.User do + use Ecto.Schema + ... + end + +And then you create a changeset that you pass to `to_form`: + + %MyApp.Users.User{} + |> Ecto.Changeset.change() + |> to_form() + +Once the form is submitted, the params will be available under `%{"user" => user_params}`. + +In the template, the form form assign can be passed to the `<.form>` function component: + + <.form for={@form} id="todo-form" phx-change="validate" phx-submit="save"> + <.input field={@form[:field]} type="text" /> + + +Always give the form an explicit, unique DOM ID, like `id="todo-form"`. + +#### Avoiding form errors + +**Always** use a form assigned via `to_form/2` in the LiveView, and the `<.input>` component in the template. In the template **always access forms this**: + + <%!-- ALWAYS do this (valid) --%> + <.form for={@form} id="my-form"> + <.input field={@form[:field]} type="text" /> + + +And **never** do this: + + <%!-- NEVER do this (invalid) --%> + <.form for={@changeset} id="my-form"> + <.input field={@changeset[:field]} type="text" /> + + +- You are FORBIDDEN from accessing the changeset in the template as it will cause errors +- **Never** use `<.form let={f} ...>` in the template, instead **always use `<.form for={@form} ...>`**, then drive all form references from the form assign as in `@form[:field]`. The UI should **always** be driven by a `to_form/2` assigned in the LiveView module that is derived from a changeset diff --git a/deps/phoenix/usage-rules/phoenix.md b/deps/phoenix/usage-rules/phoenix.md new file mode 100644 index 0000000..79c2c49 --- /dev/null +++ b/deps/phoenix/usage-rules/phoenix.md @@ -0,0 +1,15 @@ +## Phoenix guidelines + +- Remember Phoenix router `scope` blocks include an optional alias which is prefixed for all routes within the scope. **Always** be mindful of this when creating routes within a scope to avoid duplicate module prefixes. + +- You **never** need to create your own `alias` for route definitions! The `scope` provides the alias, ie: + + scope "/admin", AppWeb.Admin do + pipe_through :browser + + live "/users", UserLive, :index + end + + the UserLive route would point to the `AppWeb.Admin.UserLive` module + +- `Phoenix.View` no longer is needed or included with Phoenix, don't use it diff --git a/deps/phoenix_html/.hex b/deps/phoenix_html/.hex new file mode 100644 index 0000000..008e01d Binary files /dev/null and b/deps/phoenix_html/.hex differ diff --git a/deps/phoenix_html/CHANGELOG.md b/deps/phoenix_html/CHANGELOG.md new file mode 100644 index 0000000..48949a1 --- /dev/null +++ b/deps/phoenix_html/CHANGELOG.md @@ -0,0 +1,457 @@ +# Changelog + +## 4.3.0 (2025-09-28) + +* Enhancements + * Implement `Phoenix.HTML.Safe` for Duration + * Add function head for argument names of `normalize_value/2` to improve documentation + * Allow custom tags in options_for_select + * Allow datetime as form option values + +* Bug fixes + * Avoid false positive warnings on Elixir v1.19 + +## 4.2.1 (2025-02-21) + +* Enhancements + * Add type to `Phoenix.HTML.FormField` + * Allow keyword lists in options to use nil as key/value + +## 4.2.0 (2024-12-28) + +* Enhancements + * Add `Phoenix.HTML.css_escape/1` to escape strings for use inside CSS selectors + * Add the ability to pass `:hr` to `options_for_select/2` to render a horizontal rule + +* Bug fixes + * Pass form action through in FormData implementation + +## v4.1.1 (2024-03-01) + * Fix dependency resolution error + +## v4.1.0 (2024-02-29) + +* Enhancements + * Introduce form `:action` and consider input as changed if action changes to support better change tracking + +## v4.0.0 (2023-12-19) + +This version removes deprecated functionality and moved all HTML helpers to a separate library. HTML Helpers are no longer used in new apps from Phoenix v1.7, instead it relies on function components from `Phoenix.LiveView`. Older applications who wish to maintain compatibility, add `{:phoenix_html_helpers, "~> 1.0"}` to your `mix.exs` and then replace `use Phoenix.HTML` in your applications by: + +```elixir +import Phoenix.HTML +import Phoenix.HTML.Form +use PhoenixHTMLHelpers +``` + +## v3.3.3 (2023-10-09) + +* Enhancements + * Allow string fields on `input_changed?` + +## v3.3.2 (2023-08-10) + +* Enhancements + * Address deprecations in Elixir v1.16+ + +* Deprecations + * Deprecate `inputs_for/2` and `inputs_for/3` (without anonymous functions) + +## v3.3.1 (2023-02-27) + +* Bug fix + * Set display to none on generated forms + * Warn for maps with atom keys + +## v3.3.0 (2023-02-10) + +* Enhancements + * Support deeply nested class lists + * Implement Phoenix.HTML.Safe for URI + * Implement Phoenix.HTML.FormData for Map + +* Bug fix + * Generate unique IDs for checkboxes based on the value + * Use artificial button click instead of `form.submit` in JavaScript to trigger all relevant events + * Fix a bug where nil/false/true attributes in `aria`/`data`/`phx` would emit empty or literal values, such as `"true"` and `"false"`. This release aligns them with all other attributes so both `nil` and `false` emit nothing. `true` emits the attribute with no value. + +* Deprecations + * `Phoenix.HTML.Tag.attributes_escape/1` is deprecated in favor of `Phoenix.HTML.attributes_escape/1` + +## v3.2.0 (2021-12-18) + +* Enhancements + * Raise if the `id` attribute is set to a number. This is actually an invalid value according to the HTML spec and it can lead to problematic client behaviour, especially in LiveView and other client frameworks. + * Allow `phx` attributes to be nested, similar to `aria` and `data` attributes + * Allow hidden fields in forms to be a list of values + +## v3.1.0 (2021-10-23) + +* Bug fix + * Do not submit data-method links if default has been prevented +* Deprecations + * Deprecate `~E` and `Phoenix.HTML.Tag.attributes_escape/1` + * Remove deprecated `Phoenix.HTML.Link.link/1` + +## v3.0.4 (2021-09-23) + +* Bug fix + * Ensure `class={@class}` in HEEx templates and `:class` attribute in `content_tag` are properly escaped against XSS + +## v3.0.3 (2021-09-04) + +* Bug fix + * Fix sorting of attributes in `tag`/`content_tag` + +## v3.0.2 (2021-08-19) + +* Enhancements + * Support maps on `Phoenix.HTML.Tag.attributes_escape/1` + +## v3.0.1 (2021-08-14) + +* Enhancements + * Add `Phoenix.HTML.Tag.csrf_input_tag/2` + +## v3.0.0 (2021-08-06) + +* Enhancements + * Allow extra html attributes on the `:prompt` option in `select` + * Make `Plug` an optional dependency + * Prefix form id on inputs when it is given to `form_for/3` + * Allow `%URI{}` to be passed to `link/2` and `button/2` as `:to` + * Expose `Phoenix.HTML.Tag.csrf_token_value/1` + * Add `Phoenix.HTML.Tag.attributes_escape/1` + +* Bug fixes + * Honor the `form` attribute when creating hidden checkbox input + * Use `to_iso8601` as the standard implementation for safe dates and times + +* Deprecations + * `form_for` without an anonymous function has been deprecated. v3.0 has deprecated the usage, v3.1 will emit warnings, and v3.2 will fully remove the functionality + +* Backwards incompatible changes + * Strings given as attributes keys in `tag` and `content_tag` are now emitted as is (without being dasherized) and are also HTML escaped + * Prefix form id on inputs when it is given to `form_for/3` + * By default dates and times will format to the `to_iso8601` functions provided by their implementation + * Do not include `csrf-param` and `method-param` in generated `csrf_meta_tag` + * Remove deprecated `escape_javascript` in favor of `javascript_escape` + * Remove deprecated `field_value` in favor of `input_value` + * Remove deprecated `field_name` in favor of `input_name` + * Remove deprecated `field_id` in favor of `input_id` + +## v2.14.3 (2020-12-12) + +* Bug fixes + * Fix warnings on Elixir v1.12 + +## v2.14.2 (2020-04-30) + +* Deprecations + * Deprecate `Phoenix`-specific assigns `:view_module` and `:view_template` + +## v2.14.1 (2020-03-20) + +* Enhancements + * Add `Phoenix.HTML.Form.options_for_select/2` + * Add `Phoenix.HTML.Form.inputs_for/3` + +* Bug fixes + * Disable hidden input for disabled checkboxes + +## v2.14.0 (2020-01-28) + +* Enhancements + * Remove enforce_utf8 workaround on forms as it is no longer required by browser + * Remove support tuple-based date/time with microseconds calendar types + * Allow strings as first element in `content_tag` + * Add `:srcset` support to `img_tag` + * Allow `inputs_for` to skip hidden fields + +## v2.13.4 (2020-01-28) + +* Bug fixes + * Fix invalid :line in Elixir v1.10.0 + +## v2.13.3 (2019-05-31) + +* Enhancements + * Add atom support to FormData + +* Bug fixes + * Keep proper line numbers on .eex templates for proper coverage + +## v2.13.2 (2019-03-29) + +* Bug fixes + * Stop event propagation when confirm dialog is canceled + +## v2.13.1 (2019-01-05) + +* Enhancements + * Allow safe content to be given to label + * Also escale template literals in `javascript_escape/1` + +* Bug fixes + * Fix deprecation warnings to point to the correct alternative + +## v2.13.0 (2018-12-09) + +* Enhancements + * Require Elixir v1.5+ for more efficient template compilation/rendering + * Add `Phoenix.HTML.Engine.encode_to_iodata!/1` + * Add `Phoenix.HTML.Form.form_for/3` that works without an anonymous function + +* Deprecations + * Deprecate `Phoenix.HTML.escape_javascript/1` in favor of `Phoenix.HTML.javascript_escape/1` for consistency + +## v2.12.0 (2018-08-06) + +* Enhancements + * Configurable and extendable data-confirm behaviour + * Allow data-confirm with submit buttons + * Support ISO 8601 formatted strings for date and time values + +* Bug fixes + * Provide a default id of the field name for `@conn` based forms + +## v2.11.2 (2018-04-13) + +* Enhancements + * Support custom precision on time input + +* Bug fixes + * Do not raise when `:` is part of a path on link/button attributes + +## v2.11.1 (2018-03-20) + +* Enhancements + * Add `label/1` + * Copy the target attribute of the link in the generated JS form + +* Bug fixes + * Support any value that is html escapable in `radio_button` + +## v2.11.0 (2018-03-09) + +* Enhancements + * Add date, datetime-local and time input types + * Enable string keys to be usable with forms + * Support carriage return in `text_to_html` + * Add support for HTML5 boolean attributes to `content_tag` and `tag` + * Improve performance by relying on `html_safe_to_iodata/1` + * Protect against CSRF tokens leaking across hosts when the POST URL is dynamic + * Require `to` attribute in links and buttons to explicitly pass protocols as a separate option for safety reasons + +* Bug fixes + * Guarantee `input_name/2` always returns strings + * Improve handling of uncommon whitespace and null in `escape_javascript` + * Escape value attribute so it is never treated as a boolean + +* Backwards incompatible changes + * The :csrf_token_generator configuration in the Phoenix.HTML app no longer works due to the improved security mechanisms + +## v2.10.5 (2017-11-08) + +* Enhancements + * Do not require the :as option in form_for + +## v2.10.4 (2017-08-15) + +* Bug fixes + * Fix formatting of days in datetime_builder + +## v2.10.3 (2017-07-30) + +* Enhancements + * Allow specifying a custom CSRF token generator + +* Bug fixes + * Do not submit `method: :get` in buttons as "post" + +## v2.10.2 (2017-07-24) + +* Bug fixes + * Traverse DOM elements up when handling data-method + +## v2.10.1 (2017-07-22) + +* Bug fixes + * Only generate CSRF token if necessary + +## v2.10.0 (2017-07-21) + +* Enhancements + * Support custom attributes in options in select + +* Bug fixes + * Accept non-binary values in textarea's content + * Allow nested forms on the javascript side. This means `link` and `button` no longer generate a child form such as the `:form` option has no effect and "data-submit=parent" is no longer supported. Instead "data-to" and "data-method" are set on the entities and the form is generated on the javascript side of things + +## v2.9.3 (2016-12-24) + +* Bug fixes + * Once again support any name for atom forms + +## v2.9.2 (2016-12-24) + +* Bug fixes + * Always read from `form.params` and then from `:selected` in `select` and `multiple_select` before falling back to `input_value/2` + +## v2.9.1 (2016-12-20) + +* Bug fixes + * Implement proper `input_value/3` callback + +## v2.9.0 (2016-12-19) + +* Enhancements + * Add `img_tag/2` helper to `Phoenix.HTML.Tag` + * Submit nearest form even if not direct descendent + * Use more iodata for `tag/2` and `content_tag/3` + * Add `input_value/3`, `input_id/2` and `input_name/2` as a unified API around the input (alongside `input_type/3` and `input_validations/2`) + +## v2.8.0 (2016-11-15) + +* Enhancements + * Add `csrf_meta_tag/0` helper to `Phoenix.HTML.Tag` + * Allow passing a `do:` option to `Phoenix.HTML.Link.button/2` + +## v2.7.0 (2016-09-21) + +* Enhancements + * Render button tags for form submits and in the `button/2` function + * Allow `submit/2` and `button/2` to receive `do` blocks + * Support the `:multiple` option in `file_input/3` + * Remove previously deprecated and unused `model` field + +## v2.6.1 (2016-07-08) + +* Enhancements + * Remove warnings on v1.4 + +* Bug fixes + * Ensure some contents are properly escaped as an integer + * Ensure JavaScript data-submit events bubble up until it finds the proper parent + +## v2.6.0 (2016-06-16) + +* Enhancements + * Raise helpful error when using invalid iodata + * Inline date/time API with Elixir v1.3 Calendar types + * Add `:insert_brs` option to `text_to_html/2` + * Run on Erlang 19 without warnings + +* Client-side changes + * Use event delegation in `phoenix_html.js` + * Drop IE8 support on `phoenix_html.js` + +* Backwards incompatible changes + * `:min`, `:sec` option in `Phoenix.HTML.Form` (`datetime_select/3` and `time_select/3`) are no longer supported. Use `:minute` or `:second` instead. + +## v2.5.1 (2016-03-12) + +* Bug fixes + * Ensure multipart files work with inputs_for + +## v2.5.0 (2016-01-28) + +* Enhancements + * Introduce `form.data` field instead of `form.model`. Currently those values are kept in sync then the form is built but `form.model` will be deprecated in the long term + +## v2.4.0 (2016-01-21) + +* Enhancements + * Add `rel=nofollow` auto generation for non-get links + * Introduce `:selected` option for `select` and `multiple_select` + +* Bug fixes + * Fix safe engine incorrectly marking safe code as unsafe when last expression is `<% ... %>` + +## v2.3.0 (2015-12-16) + +* Enhancements + * Add `escape_javascript/1` + * Add helpful error message when using unknown `@inner` assign + * Add `Phoenix.HTML.Format.text_to_html/2` + +## v2.2.0 (2015-09-01) + +* Bug fix + * Allow the `:name` to be given in forms. For this, using `:name` to configure the underlying input name prefix has been deprecated in favor of `:as` + +## v2.1.2 (2015-08-22) + +* Bug fix + * Do not include values in `password_input/3` + +## v2.1.1 (2015-08-15) + +* Enhancements + * Allow nil in `raw/1` + * Allow block options in `label/3` + * Introduce `:skip_deleted` in `inputs_for/4` + +## v2.1.0 (2015-08-06) + +* Enhancements + * Add an index field to forms to be used by `inputs_for/4` collections + +## v2.0.1 (2015-07-31) + +* Bug fix + * Include web directory in Hex package + +## v2.0.0 (2015-07-30) + +* Enhancements + * No longer generate onclick attributes. + + The main motivation for this is to provide support + for Content Security Policy, which recommends + disabling all inline scripts in a page. + + We took the opportunity to also add support for + data-confirm in `link/2`. + +## v1.4.0 (2015-07-26) + +* Enhancements + * Support `input_type/2` and `input_validations/2` as reflection mechanisms + +## v1.3.0 (2015-07-23) + +* Enhancements + * Add `Phoenix.HTML.Form.inputs_for/4` support + * Add multiple select support + * Add reset input + * Infer default text context for labels + +## v1.2.1 (2015-06-02) + +* Bug fix + * Ensure nil parameters are not discarded when rendering input + +## v1.2.0 (2015-05-30) + +* Enhancements + * Add `label/3` for generating a label tag within a form + +## v1.1.0 (2015-05-20) + +* Enhancements + * Allow do/end syntax with `link/2` + * Raise on missing assigns + +## v1.0.1 + +* Bug fixes + * Avoid variable clash in Phoenix.HTML engine buffers + +## v1.0.0 + +* Enhancements + * Provides an EEx engine with HTML safe rendering + * Provides a `Phoenix.HTML.Safe` protocol + * Provides a `Phoenix.HTML.FormData` protocol + * Provides functions for generating tags, links and form builders in a safe way diff --git a/deps/phoenix_html/LICENSE b/deps/phoenix_html/LICENSE new file mode 100644 index 0000000..7d74eb5 --- /dev/null +++ b/deps/phoenix_html/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2014 Chris McCord + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/phoenix_html/README.md b/deps/phoenix_html/README.md new file mode 100644 index 0000000..b89e5c3 --- /dev/null +++ b/deps/phoenix_html/README.md @@ -0,0 +1,36 @@ +# Phoenix.HTML + +[![Build Status](https://github.com/phoenixframework/phoenix_html/workflows/Tests/badge.svg)](https://github.com/phoenixframework/phoenix_html/actions?query=workflow%3ATests) + +Building blocks for working with HTML in Phoenix. + +This library provides three main functionalities: + + * HTML safety + * Form abstractions + * A tiny JavaScript library to enhance applications + +See the [docs](https://hexdocs.pm/phoenix_html/) for more information. + +## License + +Copyright (c) 2014 Chris McCord + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/phoenix_html/hex_metadata.config b/deps/phoenix_html/hex_metadata.config new file mode 100644 index 0000000..b53188c --- /dev/null +++ b/deps/phoenix_html/hex_metadata.config @@ -0,0 +1,19 @@ +{<<"links">>, + [{<<"Changelog">>,<<"https://hexdocs.pm/phoenix_html/changelog.html">>}, + {<<"GitHub">>,<<"https://github.com/phoenixframework/phoenix_html">>}]}. +{<<"name">>,<<"phoenix_html">>}. +{<<"version">>,<<"4.3.0">>}. +{<<"description">>, + <<"Phoenix view functions for working with HTML templates">>}. +{<<"elixir">>,<<"~> 1.7">>}. +{<<"files">>, + [<<"lib">>,<<"lib/phoenix_html">>,<<"lib/phoenix_html/form_field.ex">>, + <<"lib/phoenix_html/safe.ex">>,<<"lib/phoenix_html/form_data.ex">>, + <<"lib/phoenix_html/engine.ex">>,<<"lib/phoenix_html/form.ex">>, + <<"lib/phoenix_html.ex">>,<<"priv">>,<<"priv/static">>, + <<"priv/static/phoenix_html.js">>,<<"CHANGELOG.md">>,<<"LICENSE">>, + <<"mix.exs">>,<<"package.json">>,<<"README.md">>]}. +{<<"app">>,<<"phoenix_html">>}. +{<<"licenses">>,[<<"MIT">>]}. +{<<"requirements">>,[]}. +{<<"build_tools">>,[<<"mix">>]}. diff --git a/deps/phoenix_html/lib/phoenix_html.ex b/deps/phoenix_html/lib/phoenix_html.ex new file mode 100644 index 0000000..a2cd050 --- /dev/null +++ b/deps/phoenix_html/lib/phoenix_html.ex @@ -0,0 +1,417 @@ +defmodule Phoenix.HTML do + @moduledoc """ + Building blocks for working with HTML in Phoenix. + + This library provides three main functionalities: + + * HTML safety + * Form abstractions + * A tiny JavaScript library to enhance applications + + ## HTML safety + + One of the main responsibilities of this package is to + provide convenience functions for escaping and marking + HTML code as safe. + + By default, data output in templates is not considered + safe: + + ```heex + <%= "" %> + ``` + + will be shown as: + + ```html + <hello> + ``` + + User data or data coming from the database is almost never + considered safe. However, in some cases, you may want to tag + it as safe and show its "raw" contents: + + ```heex + <%= raw "" %> + ``` + + ## Form handling + + See `Phoenix.HTML.Form`. + + ## JavaScript library + + This project ships with a tiny bit of JavaScript that listens + to all click events to: + + * Support `data-confirm="message"` attributes, which shows + a confirmation modal with the given message + + * Support `data-method="patch|post|put|delete"` attributes, + which sends the current click as a PATCH/POST/PUT/DELETE + HTTP request. You will need to add `data-to` with the URL + and `data-csrf` with the CSRF token value + + * Dispatch a "phoenix.link.click" event. You can listen to this + event to customize the behaviour above. Returning false from + this event will disable `data-method`. Stopping propagation + will disable `data-confirm` + + To use the functionality above, you must load `priv/static/phoenix_html.js` + into your build tool. + + ### Overriding the default confirmation behaviour + + You can override the default implementation by hooking + into `phoenix.link.click`. Here is an example: + + ```javascript + window.addEventListener('phoenix.link.click', function (e) { + // Introduce custom behaviour + var message = e.target.getAttribute("data-prompt"); + var answer = e.target.getAttribute("data-prompt-answer"); + if(message && answer && (answer != window.prompt(message))) { + e.preventDefault(); + } + }, false); + ``` + + """ + + @doc false + defmacro __using__(_) do + raise """ + use Phoenix.HTML is no longer supported in v4.0. + + To keep compatibility with previous versions, \ + add {:phoenix_html_helpers, "~> 1.0"} to your mix.exs deps + and then, instead of "use Phoenix.HTML", you might: + + import Phoenix.HTML + import Phoenix.HTML.Form + use PhoenixHTMLHelpers + + """ + end + + @typedoc "Guaranteed to be safe" + @type safe :: {:safe, iodata} + + @typedoc "May be safe or unsafe (i.e. it needs to be converted)" + @type unsafe :: Phoenix.HTML.Safe.t() + + @doc """ + Marks the given content as raw. + + This means any HTML code inside the given + string won't be escaped. + + iex> raw("") + {:safe, ""} + iex> raw({:safe, ""}) + {:safe, ""} + iex> raw(nil) + {:safe, ""} + + """ + @spec raw(iodata | safe | nil) :: safe + def raw({:safe, value}), do: {:safe, value} + def raw(nil), do: {:safe, ""} + def raw(value) when is_binary(value) or is_list(value), do: {:safe, value} + + @doc """ + Escapes the HTML entities in the given term, returning safe iodata. + + iex> html_escape("") + {:safe, [[[] | "<"], "hello" | ">"]} + + iex> html_escape(~c"") + {:safe, ["<", 104, 101, 108, 108, 111, ">"]} + + iex> html_escape(1) + {:safe, "1"} + + iex> html_escape({:safe, ""}) + {:safe, ""} + + """ + @spec html_escape(unsafe) :: safe + def html_escape({:safe, _} = safe), do: safe + def html_escape(other), do: {:safe, Phoenix.HTML.Engine.encode_to_iodata!(other)} + + @doc """ + Converts a safe result into a string. + + Fails if the result is not safe. In such cases, you can + invoke `html_escape/1` or `raw/1` accordingly before. + + You can combine `html_escape/1` and `safe_to_string/1` + to convert a data structure to a escaped string: + + data |> html_escape() |> safe_to_string() + """ + @spec safe_to_string(safe) :: String.t() + def safe_to_string({:safe, iodata}) do + IO.iodata_to_binary(iodata) + end + + @doc ~S""" + Escapes an enumerable of attributes, returning iodata. + + The attributes are rendered in the given order. Note if + a map is given, the key ordering is not guaranteed. + + The keys and values can be of any shape, as long as they + implement the `Phoenix.HTML.Safe` protocol. In addition, + if the key is an atom, it will be "dasherized". In other + words, `:phx_value_id` will be converted to `phx-value-id`. + + Furthermore, the following attributes provide behaviour: + + * `:aria`, `:data`, and `:phx` - they accept a keyword list as + value. `data: [confirm: "are you sure?"]` is converted to + `data-confirm="are you sure?"`. + + * `:class` - it accepts a list of classes as argument. Each + element in the list is separated by space. `nil` and `false` + elements are discarded. `class: ["foo", nil, "bar"]` then + becomes `class="foo bar"`. + + * `:id` - it is validated raise if a number is given as ID, + which is not allowed by the HTML spec and leads to unpredictable + behaviour. + + ## Examples + + iex> safe_to_string attributes_escape(title: "the title", id: "the id", selected: true) + " title=\"the title\" id=\"the id\" selected" + + iex> safe_to_string attributes_escape(%{data: [confirm: "Are you sure?"]}) + " data-confirm=\"Are you sure?\"" + + iex> safe_to_string attributes_escape(%{phx: [value: [foo: "bar"]]}) + " phx-value-foo=\"bar\"" + + """ + def attributes_escape(attrs) when is_list(attrs) do + {:safe, build_attrs(attrs)} + end + + def attributes_escape(attrs) do + {:safe, attrs |> Enum.to_list() |> build_attrs()} + end + + defp build_attrs([{k, true} | t]), + do: [?\s, key_escape(k) | build_attrs(t)] + + defp build_attrs([{_, false} | t]), + do: build_attrs(t) + + defp build_attrs([{_, nil} | t]), + do: build_attrs(t) + + defp build_attrs([{:id, v} | t]), + do: [" id=\"", id_value(v), ?" | build_attrs(t)] + + defp build_attrs([{:class, v} | t]), + do: [" class=\"", class_value(v), ?" | build_attrs(t)] + + defp build_attrs([{:aria, v} | t]) when is_list(v), + do: nested_attrs(v, " aria", t) + + defp build_attrs([{:data, v} | t]) when is_list(v), + do: nested_attrs(v, " data", t) + + defp build_attrs([{:phx, v} | t]) when is_list(v), + do: nested_attrs(v, " phx", t) + + defp build_attrs([{"id", v} | t]), + do: [" id=\"", id_value(v), ?" | build_attrs(t)] + + defp build_attrs([{"class", v} | t]), + do: [" class=\"", class_value(v), ?" | build_attrs(t)] + + defp build_attrs([{"aria", v} | t]) when is_list(v), + do: nested_attrs(v, " aria", t) + + defp build_attrs([{"data", v} | t]) when is_list(v), + do: nested_attrs(v, " data", t) + + defp build_attrs([{"phx", v} | t]) when is_list(v), + do: nested_attrs(v, " phx", t) + + defp build_attrs([{k, v} | t]), + do: [?\s, key_escape(k), ?=, ?", attr_escape(v), ?" | build_attrs(t)] + + defp build_attrs([]), do: [] + + defp nested_attrs([{k, true} | kv], attr, t), + do: [attr, ?-, key_escape(k) | nested_attrs(kv, attr, t)] + + defp nested_attrs([{_, falsy} | kv], attr, t) when falsy in [false, nil], + do: nested_attrs(kv, attr, t) + + defp nested_attrs([{k, v} | kv], attr, t) when is_list(v), + do: [nested_attrs(v, "#{attr}-#{key_escape(k)}", []) | nested_attrs(kv, attr, t)] + + defp nested_attrs([{k, v} | kv], attr, t), + do: [attr, ?-, key_escape(k), ?=, ?", attr_escape(v), ?" | nested_attrs(kv, attr, t)] + + defp nested_attrs([], _attr, t), + do: build_attrs(t) + + defp id_value(value) when is_number(value) do + raise ArgumentError, + "attempting to set id attribute to #{value}, " <> + "but setting the DOM ID to a number can lead to unpredictable behaviour. " <> + "Instead consider prefixing the id with a string, such as \"user-#{value}\" or similar" + end + + defp id_value(value) do + attr_escape(value) + end + + defp class_value(value) when is_list(value) do + value + |> list_class_value() + |> attr_escape() + end + + defp class_value(value) do + attr_escape(value) + end + + defp list_class_value(value) do + value + |> Enum.flat_map(fn + nil -> [] + false -> [] + inner when is_list(inner) -> [list_class_value(inner)] + other -> [other] + end) + |> Enum.join(" ") + end + + defp key_escape(value) when is_atom(value), do: String.replace(Atom.to_string(value), "_", "-") + defp key_escape(value), do: attr_escape(value) + + defp attr_escape({:safe, data}), do: data + defp attr_escape(nil), do: [] + defp attr_escape(other) when is_binary(other), do: Phoenix.HTML.Engine.html_escape(other) + defp attr_escape(other), do: Phoenix.HTML.Safe.to_iodata(other) + + @doc """ + Escapes HTML content to be inserted into a JavaScript string. + + This function is useful in JavaScript responses when there is a need + to escape HTML rendered from other templates, like in the following: + + $("#container").append("<%= javascript_escape(render("post.html", post: @post)) %>"); + + It escapes quotes (double and single), double backslashes and others. + """ + @spec javascript_escape(binary) :: binary + @spec javascript_escape(safe) :: safe + def javascript_escape({:safe, data}), + do: {:safe, data |> IO.iodata_to_binary() |> javascript_escape("")} + + def javascript_escape(data) when is_binary(data), + do: javascript_escape(data, "") + + defp javascript_escape(<<0x2028::utf8, t::binary>>, acc), + do: javascript_escape(t, <>) + + defp javascript_escape(<<0x2029::utf8, t::binary>>, acc), + do: javascript_escape(t, <>) + + defp javascript_escape(<<0::utf8, t::binary>>, acc), + do: javascript_escape(t, <>) + + defp javascript_escape(<<">, acc), + do: javascript_escape(t, <>) + + defp javascript_escape(<<"\r\n", t::binary>>, acc), + do: javascript_escape(t, <>) + + defp javascript_escape(<>, acc) when h in [?", ?', ?\\, ?`], + do: javascript_escape(t, <>) + + defp javascript_escape(<>, acc) when h in [?\r, ?\n], + do: javascript_escape(t, <>) + + defp javascript_escape(<>, acc), + do: javascript_escape(t, <>) + + defp javascript_escape(<<>>, acc), do: acc + + @doc """ + Escapes a string for use as a CSS identifier. + + ## Examples + + iex> css_escape("hello world") + "hello\\\\ world" + + iex> css_escape("-123") + "-\\\\31 23" + + """ + @spec css_escape(String.t()) :: String.t() + def css_escape(value) when is_binary(value) do + # This is a direct translation of + # https://github.com/mathiasbynens/CSS.escape/blob/master/css.escape.js + # into Elixir. + value + |> String.to_charlist() + |> escape_css_chars() + |> IO.iodata_to_binary() + end + + defp escape_css_chars(chars) do + case chars do + # If the character is the first character and is a `-` (U+002D), and + # there is no second character, […] + [?- | []] -> ["\\-"] + _ -> escape_css_chars(chars, 0, []) + end + end + + defp escape_css_chars([], _, acc), do: Enum.reverse(acc) + + defp escape_css_chars([char | rest], index, acc) do + escaped = + cond do + # If the character is NULL (U+0000), then the REPLACEMENT CHARACTER + # (U+FFFD). + char == 0 -> + <<0xFFFD::utf8>> + + # If the character is in the range [\1-\1F] (U+0001 to U+001F) or is + # U+007F, + # if the character is the first character and is in the range [0-9] + # (U+0030 to U+0039), + # if the character is the second character and is in the range [0-9] + # (U+0030 to U+0039) and the first character is a `-` (U+002D), + char in 0x0001..0x001F or char == 0x007F or + (index == 0 and char in ?0..?9) or + (index == 1 and char in ?0..?9 and hd(acc) == "-") -> + # https://drafts.csswg.org/cssom/#escape-a-character-as-code-point + ["\\", Integer.to_string(char, 16), " "] + + # If the character is not handled by one of the above rules and is + # greater than or equal to U+0080, is `-` (U+002D) or `_` (U+005F), or + # is in one of the ranges [0-9] (U+0030 to U+0039), [A-Z] (U+0041 to + # U+005A), or [a-z] (U+0061 to U+007A), […] + char >= 0x0080 or char in [?-, ?_] or char in ?0..?9 or char in ?A..?Z or char in ?a..?z -> + # the character itself + <> + + true -> + # Otherwise, the escaped character. + # https://drafts.csswg.org/cssom/#escape-a-character + ["\\", <>] + end + + escape_css_chars(rest, index + 1, [escaped | acc]) + end +end diff --git a/deps/phoenix_html/lib/phoenix_html/engine.ex b/deps/phoenix_html/lib/phoenix_html/engine.ex new file mode 100644 index 0000000..7e479f7 --- /dev/null +++ b/deps/phoenix_html/lib/phoenix_html/engine.ex @@ -0,0 +1,196 @@ +defmodule Phoenix.HTML.Engine do + @moduledoc """ + An EEx.Engine that guarantees templates are HTML Safe. + """ + + @behaviour EEx.Engine + + @anno (if :erlang.system_info(:otp_release) >= ~c"19" do + [generated: true] + else + [line: -1] + end) + + @doc """ + Encodes the HTML templates to iodata. + """ + def encode_to_iodata!({:safe, body}), do: body + def encode_to_iodata!(nil), do: "" + def encode_to_iodata!(""), do: "" + def encode_to_iodata!(bin) when is_binary(bin), do: html_escape(bin) + def encode_to_iodata!(list) when is_list(list), do: Phoenix.HTML.Safe.List.to_iodata(list) + def encode_to_iodata!(other), do: Phoenix.HTML.Safe.to_iodata(other) + + @doc false + def html_escape(bin) when is_binary(bin) do + html_escape(bin, 0, bin, []) + end + + escapes = [ + {?<, "<"}, + {?>, ">"}, + {?&, "&"}, + {?", """}, + {?', "'"} + ] + + for {match, insert} <- escapes do + defp html_escape(<>, skip, original, acc) do + html_escape(rest, skip + 1, original, [acc | unquote(insert)]) + end + end + + defp html_escape(<<_char, rest::bits>>, skip, original, acc) do + html_escape(rest, skip, original, acc, 1) + end + + defp html_escape(<<>>, _skip, _original, acc) do + acc + end + + for {match, insert} <- escapes do + defp html_escape(<>, skip, original, acc, len) do + part = binary_part(original, skip, len) + html_escape(rest, skip + len + 1, original, [acc, part | unquote(insert)]) + end + end + + defp html_escape(<<_char, rest::bits>>, skip, original, acc, len) do + html_escape(rest, skip, original, acc, len + 1) + end + + defp html_escape(<<>>, 0, original, _acc, _len) do + original + end + + defp html_escape(<<>>, skip, original, acc, len) do + [acc | binary_part(original, skip, len)] + end + + @doc false + def init(_opts) do + %{ + iodata: [], + dynamic: [], + vars_count: 0 + } + end + + @doc false + def handle_begin(state) do + %{state | iodata: [], dynamic: []} + end + + @doc false + def handle_end(quoted) do + handle_body(quoted) + end + + @doc false + def handle_body(state) do + %{iodata: iodata, dynamic: dynamic} = state + safe = {:safe, Enum.reverse(iodata)} + {:__block__, [], Enum.reverse([safe | dynamic])} + end + + @doc false + def handle_text(state, text) do + handle_text(state, [], text) + end + + @doc false + def handle_text(state, _meta, text) do + %{iodata: iodata} = state + %{state | iodata: [text | iodata]} + end + + @doc false + def handle_expr(state, "=", ast) do + ast = traverse(ast) + %{iodata: iodata, dynamic: dynamic, vars_count: vars_count} = state + var = Macro.var(:"arg#{vars_count}", __MODULE__) + ast = quote do: unquote(var) = unquote(to_safe(ast)) + %{state | dynamic: [ast | dynamic], iodata: [var | iodata], vars_count: vars_count + 1} + end + + def handle_expr(state, "", ast) do + ast = traverse(ast) + %{dynamic: dynamic} = state + %{state | dynamic: [ast | dynamic]} + end + + def handle_expr(state, marker, ast) do + EEx.Engine.handle_expr(state, marker, ast) + end + + ## Safe conversion + + defp to_safe(ast), do: to_safe(ast, line_from_expr(ast)) + + defp line_from_expr({_, meta, _}) when is_list(meta), do: Keyword.get(meta, :line, 0) + defp line_from_expr(_), do: 0 + + # We can do the work at compile time + defp to_safe(literal, _line) + when is_binary(literal) or is_atom(literal) or is_number(literal) do + literal + |> Phoenix.HTML.Safe.to_iodata() + |> IO.iodata_to_binary() + end + + # We can do the work at runtime + defp to_safe(literal, line) when is_list(literal) do + quote line: line, do: Phoenix.HTML.Safe.List.to_iodata(unquote(literal)) + end + + # We need to check at runtime and we do so by optimizing common cases. + defp to_safe(expr, line) do + # Keep stacktraces for protocol dispatch and coverage + # bin_return uses generated: true to make Elixir's type system on v1.19 happy + safe_return = quote line: line, do: data + bin_return = quote line: line, generated: true, do: Phoenix.HTML.Engine.html_escape(bin) + other_return = quote line: line, do: Phoenix.HTML.Safe.to_iodata(other) + + # However ignore them for the generated clauses to avoid warnings + quote @anno do + case unquote(expr) do + {:safe, data} -> unquote(safe_return) + bin when is_binary(bin) -> unquote(bin_return) + other -> unquote(other_return) + end + end + end + + ## Traversal + + defp traverse(expr) do + Macro.prewalk(expr, &handle_assign/1) + end + + defp handle_assign({:@, meta, [{name, _, atom}]}) when is_atom(name) and is_atom(atom) do + quote line: meta[:line] || 0 do + Phoenix.HTML.Engine.fetch_assign!(var!(assigns), unquote(name)) + end + end + + defp handle_assign(arg), do: arg + + @doc false + def fetch_assign!(assigns, key) do + case Access.fetch(assigns, key) do + {:ok, val} -> + val + + :error -> + raise ArgumentError, """ + assign @#{key} not available in template. + + Please make sure all proper assigns have been set. If this + is a child template, ensure assigns are given explicitly by + the parent template as they are not automatically forwarded. + + Available assigns: #{inspect(Enum.map(assigns, &elem(&1, 0)))} + """ + end + end +end diff --git a/deps/phoenix_html/lib/phoenix_html/form.ex b/deps/phoenix_html/lib/phoenix_html/form.ex new file mode 100644 index 0000000..de5f807 --- /dev/null +++ b/deps/phoenix_html/lib/phoenix_html/form.ex @@ -0,0 +1,422 @@ +defmodule Phoenix.HTML.Form do + @moduledoc ~S""" + Define a `Phoenix.HTML.Form` struct and functions to interact with it. + + For building actual forms in your Phoenix application, see + [the `Phoenix.Component.form/1` component](https://hexdocs.pm/phoenix_live_view/Phoenix.Component.html#form/1). + + ## Access behaviour + + The `Phoenix.HTML.Form` struct implements the `Access` behaviour. + When you do `form[field]`, it returns a `Phoenix.HTML.FormField` + struct with the `id`, `name`, `value`, and `errors` prefilled. + + The field name can be either an atom or a string. If it is an atom, + it assumes the form keeps both data and errors as atoms. If it is a + string, it considers that data and errors are stored as strings for said + field. Forms backed by an `Ecto.Changeset` only support atom field names. + + It is possible to "access" fields which do not exist in the source data + structure. A `Phoenix.HTML.FormField` struct will be dynamically created + with some attributes such as `name` and `id` populated. + + ## Custom implementations + + There is a protocol named `Phoenix.HTML.FormData` which can be implemented + by any data structure that wants to be cast to the `Phoenix.HTML.Form` struct. + """ + + alias Phoenix.HTML.Form + import Phoenix.HTML + + @doc """ + Defines the Phoenix.HTML.Form struct. + + Its fields are: + + * `:source` - the data structure that implements the form data protocol + + * `:action` - The action that was taken against the form. This value can be + used to distinguish between different operations such as the user typing + into a form for validation, or submitting a form for a database insert. + + * `:impl` - the module with the form data protocol implementation. + This is used to avoid multiple protocol dispatches. + + * `:id` - the id to be used when generating input fields + + * `:index` - the index of the struct in the form + + * `:name` - the name to be used when generating input fields + + * `:data` - the field used to store lookup data + + * `:params` - the parameters associated with this form + + * `:hidden` - a keyword list of fields that are required to + submit the form behind the scenes as hidden inputs + + * `:options` - a copy of the options given when creating the + form without any form data specific key + + * `:errors` - a keyword list of errors that are associated with + the form + """ + defstruct source: nil, + impl: nil, + id: nil, + name: nil, + data: nil, + action: nil, + hidden: [], + params: %{}, + errors: [], + options: [], + index: nil + + @type t :: %Form{ + source: Phoenix.HTML.FormData.t(), + name: String.t(), + data: %{field => term}, + action: atom(), + params: %{binary => term}, + hidden: Keyword.t(), + options: Keyword.t(), + errors: [{field, term}], + impl: module, + id: String.t(), + index: nil | non_neg_integer + } + + @type field :: atom | String.t() + + @doc false + def fetch(%Form{} = form, field) when is_atom(field) do + fetch(form, field, Atom.to_string(field)) + end + + def fetch(%Form{} = form, field) when is_binary(field) do + fetch(form, field, field) + end + + def fetch(%Form{}, field) do + raise ArgumentError, + "accessing a form with form[field] requires the field to be an atom or a string, got: #{inspect(field)}" + end + + defp fetch(%{errors: errors} = form, field, field_as_string) do + {:ok, + %Phoenix.HTML.FormField{ + errors: field_errors(errors, field), + field: field, + form: form, + id: input_id(form, field_as_string), + name: input_name(form, field_as_string), + value: input_value(form, field) + }} + end + + @doc """ + Returns a value of a corresponding form field. + + The `form` should either be a `Phoenix.HTML.Form` or an atom. + The field is either a string or an atom. If the field is given + as an atom, it will attempt to look data with atom keys. If + a string, it will look data with string keys. + + When a form is given, it will look for changes, then + fallback to parameters, and finally fallback to the default + struct/map value. + + Since the function looks up parameter values too, there is + no guarantee that the value will have a certain type. For + example, a boolean field will be sent as "false" as a + parameter, and this function will return it as is. If you + need to normalize the result of `input_value`, see + `normalize_value/2`. + """ + @spec input_value(t | atom, field) :: term + def input_value(%{source: source, impl: impl} = form, field) + when is_atom(field) or is_binary(field) do + impl.input_value(source, form, field) + end + + def input_value(name, _field) when is_atom(name), do: nil + + @doc """ + Returns an id of a corresponding form field. + + The form should either be a `Phoenix.HTML.Form` or an atom. + """ + @spec input_id(t | atom, field) :: String.t() + def input_id(%{id: nil}, field), do: "#{field}" + + def input_id(%{id: id}, field) when is_atom(field) or is_binary(field) do + "#{id}_#{field}" + end + + def input_id(name, field) when (is_atom(name) and is_atom(field)) or is_binary(field) do + "#{name}_#{field}" + end + + @doc """ + Returns an id of a corresponding form field and value attached to it. + + Useful for radio buttons and inputs like multiselect checkboxes. + """ + @spec input_id(t | atom, field, Phoenix.HTML.Safe.t()) :: String.t() + def input_id(name, field, value) do + {:safe, value} = html_escape(value) + value_id = value |> IO.iodata_to_binary() |> String.replace(~r/\W/u, "_") + input_id(name, field) <> "_" <> value_id + end + + @doc """ + Returns a name of a corresponding form field. + + The first argument should either be a `Phoenix.HTML.Form` or an atom. + + ## Examples + + iex> Phoenix.HTML.Form.input_name(:user, :first_name) + "user[first_name]" + """ + @spec input_name(t | atom, field) :: String.t() + def input_name(form_or_name, field) + + def input_name(%{name: nil}, field), do: to_string(field) + + def input_name(%{name: name}, field) when is_atom(field) or is_binary(field), + do: "#{name}[#{field}]" + + def input_name(name, field) when (is_atom(name) and is_atom(field)) or is_binary(field), + do: "#{name}[#{field}]" + + @doc """ + Receives two forms structs and checks if the given field changed. + + The field will have changed if either its associated value, errors, + action, or implementation changed. This is mostly used for optimization + engines as an extension of the `Access` behaviour. + """ + @spec input_changed?(t, t, field()) :: boolean() + def input_changed?( + %Form{ + impl: impl1, + id: id1, + name: name1, + errors: errors1, + source: source1, + action: action1 + } = form1, + %Form{ + impl: impl2, + id: id2, + name: name2, + errors: errors2, + source: source2, + action: action2 + } = form2, + field + ) + when is_atom(field) or is_binary(field) do + impl1 != impl2 or id1 != id2 or name1 != name2 or action1 != action2 or + field_errors(errors1, field) != field_errors(errors2, field) or + impl1.input_value(source1, form1, field) != impl2.input_value(source2, form2, field) + end + + @doc """ + Returns the HTML validations that would apply to + the given field. + """ + @spec input_validations(t, field) :: Keyword.t() + def input_validations(%{source: source, impl: impl} = form, field) + when is_atom(field) or is_binary(field) do + impl.input_validations(source, form, field) + end + + @doc """ + Normalizes an input `value` according to its input `type`. + + Certain HTML input values must be cast, or they will have idiosyncracies + when they are rendered. The goal of this function is to encapsulate + this logic. In particular: + + * For "datetime-local" types, it converts `DateTime` and + `NaiveDateTime` to strings without the second precision + + * For "checkbox" types, it returns a boolean depending on + whether the input is "true" or not + + * For "textarea", it prefixes a newline to ensure newlines + won't be ignored on submission. This requires however + that the textarea is rendered with no spaces after its + content + """ + def normalize_value(type, value) + + def normalize_value("datetime-local", %struct{} = value) + when struct in [NaiveDateTime, DateTime] do + <> = struct.to_string(value) + {:safe, [date, ?T, hour_minute]} + end + + def normalize_value("textarea", value) do + {:safe, value} = html_escape(value || "") + {:safe, [?\n | value]} + end + + def normalize_value("checkbox", value) do + html_escape(value) == {:safe, "true"} + end + + def normalize_value(_type, value) do + value + end + + @doc """ + Returns options to be used inside a select element. + + `options` is expected to be an enumerable which will be used to + generate each `option` element. The function supports different data + for the individual elements: + + * keyword lists - each keyword list is expected to have the keys + `:key` and `:value`. Additional keys such as `:disabled` may + be given to customize the option. + * two-item tuples - where the first element is an atom, string or + integer to be used as the option label and the second element is + an atom, string or integer to be used as the option value + * simple atom, string or integer - which will be used as both label and value + for the generated select + + ## Option groups + + If `options` is map or keyword list where the first element is a string, + atom or integer and the second element is a list or a map, it is assumed + the key will be wrapped in an `` and the value will be used to + generate `` nested under the group. + + ## Examples + + options_for_select(["Admin": "admin", "User": "user"], "admin") + #=> + #=> + + Multiple selected values: + + options_for_select(["Admin": "admin", "User": "user", "Moderator": "moderator"], + ["admin", "moderator"]) + #=> + #=> + #=> + + Groups: + + options_for_select(["Europe": ["UK", "Sweden", "France"], ...], nil) + #=> + #=> + #=> + #=> + #=> + + Custom option tags: + + options_for_select(["Admin": "admin", "User": "user"], nil, tag: "opt") + #=> Admin + #=> User + + Horizontal separators can be added: + + options_for_select(["Admin", "User", :hr, "New"], nil) + #=> + #=> + #=>
+ #=> + + options_for_select(["Admin": "admin", "User": "user", hr: nil, "New": "new"], nil) + #=> + #=> + #=>
+ #=> + + + """ + def options_for_select(options, selected_values, extra \\ []) do + {:safe, + escaped_options_for_select( + options, + selected_values |> List.wrap() |> Enum.map(&html_escape/1), + extra + )} + end + + defp escaped_options_for_select(options, selected_values, extra) do + Enum.reduce(options, [], fn + {:hr, nil}, acc -> + [acc | hr_tag()] + + {option_key, option_value}, acc -> + [acc | option(option_key, option_value, extra, selected_values)] + + options, acc when is_list(options) -> + {option_key, options} = + case List.keytake(options, :key, 0) do + nil -> + raise ArgumentError, + "expected :key key when building