diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 1baa82c2..ca44e4b6 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -1,5 +1,8 @@ name: build -on: [ push, pull_request ] +on: + push: + branches: [ main ] + pull_request: jobs: build: runs-on: ubuntu-latest diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 4ebb9d56..3c959bb6 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -22,6 +22,7 @@ jobs: --health-retries 5 env: NTFY_TEST_DATABASE_URL: "postgres://ntfy:ntfy@localhost:5432/ntfy_test?sslmode=disable" + NTFY_TEST_S3_URL: ${{ secrets.NTFY_TEST_S3_URL }} steps: - name: Checkout code uses: actions/checkout@v3 diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 94f08fd9..4d6bbbdb 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -1,5 +1,8 @@ name: test -on: [ push, pull_request ] +on: + push: + branches: [ main ] + pull_request: jobs: test: runs-on: ubuntu-latest @@ -19,6 +22,7 @@ jobs: --health-retries 5 env: NTFY_TEST_DATABASE_URL: "postgres://ntfy:ntfy@localhost:5432/ntfy_test?sslmode=disable" + NTFY_TEST_S3_URL: ${{ secrets.NTFY_TEST_S3_URL }} steps: - name: Checkout code uses: actions/checkout@v3 diff --git a/.gitignore b/.gitignore index ed17b2d4..6d5deb67 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ server/site/ tools/fbsend/fbsend tools/pgimport/pgimport tools/loadtest/loadtest +tools/s3cli/s3cli playground/ secrets/ *.iml diff --git a/attachment/backend.go b/attachment/backend.go new file mode 100644 index 00000000..921ceb3e --- /dev/null +++ b/attachment/backend.go @@ -0,0 +1,23 @@ +package attachment + +import ( + "io" + "time" +) + +// backendObject represents an object stored in a backend. +type object struct { + ID string + Size int64 + LastModified time.Time +} + +// backend is a minimal I/O interface for storing and retrieving attachment files. +// It has no knowledge of size tracking, limiting, or ID validation. +type backend interface { + Put(id string, reader io.Reader, untrustedLength int64) error + Get(id string) (io.ReadCloser, int64, error) + List() ([]object, error) + Delete(ids ...string) error + DeleteIncomplete(cutoff time.Time) error +} diff --git a/attachment/backend_file.go b/attachment/backend_file.go new file mode 100644 index 00000000..e86ff1ec --- /dev/null +++ b/attachment/backend_file.go @@ -0,0 +1,94 @@ +package attachment + +import ( + "fmt" + "io" + "os" + "path/filepath" + "time" +) + +type fileBackend struct { + dir string +} + +var _ backend = (*fileBackend)(nil) + +func newFileBackend(dir string) (*fileBackend, error) { + if err := os.MkdirAll(dir, 0700); err != nil { + return nil, err + } + return &fileBackend{dir: dir}, nil +} + +func (b *fileBackend) Put(id string, reader io.Reader, untrustedLength int64) error { + if untrustedLength > 0 { + reader = io.LimitReader(reader, untrustedLength) + } + file := filepath.Join(b.dir, id) + f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + n, err := io.Copy(f, reader) + if err != nil { + os.Remove(file) + return err + } else if untrustedLength > 0 && n != untrustedLength { + os.Remove(file) + return fmt.Errorf("content length mismatch: claimed %d, got %d", untrustedLength, n) + } + if err := f.Close(); err != nil { + os.Remove(file) + return err + } + return nil +} + +func (b *fileBackend) List() ([]object, error) { + entries, err := os.ReadDir(b.dir) + if err != nil { + return nil, err + } + objects := make([]object, 0, len(entries)) + for _, e := range entries { + info, err := e.Info() + if err != nil { + return nil, err + } + objects = append(objects, object{ + ID: e.Name(), + Size: info.Size(), + LastModified: info.ModTime(), + }) + } + return objects, nil +} + +func (b *fileBackend) Get(id string) (io.ReadCloser, int64, error) { + file := filepath.Join(b.dir, id) + stat, err := os.Stat(file) + if err != nil { + return nil, 0, err + } + f, err := os.Open(file) + if err != nil { + return nil, 0, err + } + return f, stat.Size(), nil +} + +func (b *fileBackend) Delete(ids ...string) error { + for _, id := range ids { + file := filepath.Join(b.dir, id) + if err := os.Remove(file); err != nil && !os.IsNotExist(err) { + return err + } + } + return nil +} + +func (b *fileBackend) DeleteIncomplete(_ time.Time) error { + return nil +} diff --git a/attachment/backend_s3.go b/attachment/backend_s3.go new file mode 100644 index 00000000..9a2d4bef --- /dev/null +++ b/attachment/backend_s3.go @@ -0,0 +1,51 @@ +package attachment + +import ( + "context" + "io" + "time" + + "heckel.io/ntfy/v2/s3" +) + +type s3Backend struct { + client *s3.Client +} + +var _ backend = (*s3Backend)(nil) + +func newS3Backend(client *s3.Client) *s3Backend { + return &s3Backend{client: client} +} + +func (b *s3Backend) Put(id string, reader io.Reader, untrustedLength int64) error { + return b.client.PutObject(context.Background(), id, reader, untrustedLength) +} + +func (b *s3Backend) Get(id string) (io.ReadCloser, int64, error) { + return b.client.GetObject(context.Background(), id) +} + +func (b *s3Backend) List() ([]object, error) { + objects, err := b.client.ListObjectsV2(context.Background()) + if err != nil { + return nil, err + } + result := make([]object, 0, len(objects)) + for _, obj := range objects { + result = append(result, object{ + ID: obj.Key, + Size: obj.Size, + LastModified: obj.LastModified, + }) + } + return result, nil +} + +func (b *s3Backend) Delete(ids ...string) error { + return b.client.DeleteObjects(context.Background(), ids) +} + +func (b *s3Backend) DeleteIncomplete(cutoff time.Time) error { + return b.client.AbortIncompleteUploads(context.Background(), cutoff) +} diff --git a/attachment/store.go b/attachment/store.go new file mode 100644 index 00000000..d70ea2ab --- /dev/null +++ b/attachment/store.go @@ -0,0 +1,232 @@ +package attachment + +import ( + "errors" + "fmt" + "io" + "regexp" + "sync" + "time" + + "heckel.io/ntfy/v2/log" + "heckel.io/ntfy/v2/model" + "heckel.io/ntfy/v2/s3" + "heckel.io/ntfy/v2/util" +) + +const ( + tagStore = "attachment_store" + syncInterval = 15 * time.Minute // How often to run the background sync loop + orphanGracePeriod = time.Hour // Don't delete orphaned objects younger than this to avoid races with in-flight uploads +) + +var ( + fileIDRegex = regexp.MustCompile(fmt.Sprintf(`^[-_A-Za-z0-9]{%d}$`, model.MessageIDLength)) + errInvalidFileID = errors.New("invalid file ID") +) + +// Store manages attachment storage with shared logic for size tracking, limiting, +// ID validation, and background sync to reconcile storage with the database. +type Store struct { + backend backend + limit int64 // Defined limit of the store in bytes + size int64 // Current size of the store in bytes + sizes map[string]int64 // File ID -> size, for subtracting on Remove + localIDs func() ([]string, error) // Returns file IDs that should exist locally, used for sync() + closeChan chan struct{} + mu sync.RWMutex // Protects size and sizes +} + +// NewFileStore creates a new file-system backed attachment cache +func NewFileStore(dir string, totalSizeLimit int64, localIDsFn func() ([]string, error)) (*Store, error) { + b, err := newFileBackend(dir) + if err != nil { + return nil, err + } + return newStore(b, totalSizeLimit, localIDsFn) +} + +// NewS3Store creates a new S3-backed attachment cache. The s3URL must be in the format: +// +// s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT] +func NewS3Store(s3URL string, totalSizeLimit int64, localIDs func() ([]string, error)) (*Store, error) { + config, err := s3.ParseURL(s3URL) + if err != nil { + return nil, err + } + return newStore(newS3Backend(s3.New(config)), totalSizeLimit, localIDs) +} + +func newStore(backend backend, totalSizeLimit int64, localIDs func() ([]string, error)) (*Store, error) { + c := &Store{ + backend: backend, + limit: totalSizeLimit, + sizes: make(map[string]int64), + localIDs: localIDs, + closeChan: make(chan struct{}), + } + if localIDs != nil { + go c.syncLoop() + } + return c, nil +} + +// Write stores an attachment file. The id is validated, and the write is subject to +// the total size limit and any additional limiters. The untrustedLength is a hint +// from the client's Content-Length header; backends may use it to optimize uploads (e.g. +// streaming directly to S3 without buffering). +func (c *Store) Write(id string, reader io.Reader, untrustedLength int64, limiters ...util.Limiter) (int64, error) { + if !fileIDRegex.MatchString(id) { + return 0, errInvalidFileID + } + log.Tag(tagStore).Field("message_id", id).Debug("Writing attachment") + limiters = append(limiters, util.NewFixedLimiter(c.Remaining())) + countingReader := util.NewCountingReader(reader) + limitReader := util.NewLimitReader(countingReader, limiters...) + if err := c.backend.Put(id, limitReader, untrustedLength); err != nil { + c.backend.Delete(id) //nolint:errcheck + return 0, err + } + size := countingReader.Total() + c.mu.Lock() + c.size += size + c.sizes[id] = size + c.mu.Unlock() + return size, nil +} + +// Read retrieves an attachment file by ID +func (c *Store) Read(id string) (io.ReadCloser, int64, error) { + if !fileIDRegex.MatchString(id) { + return nil, 0, errInvalidFileID + } + return c.backend.Get(id) +} + +// Remove deletes attachment files by ID and subtracts their known sizes from +// the total. Sizes for objects not tracked (e.g. written before this process +// started and before the first sync) are corrected by the next sync() call. +func (c *Store) Remove(ids ...string) error { + for _, id := range ids { + if !fileIDRegex.MatchString(id) { + return errInvalidFileID + } + } + // Remove from backend + for _, id := range ids { + log.Tag(tagStore).Field("message_id", id).Debug("Removing attachment") + } + if err := c.backend.Delete(ids...); err != nil { + return err + } + // Update total cache size + c.mu.Lock() + for _, id := range ids { + if size, ok := c.sizes[id]; ok { + c.size -= size + delete(c.sizes, id) + } + } + if c.size < 0 { + c.size = 0 + } + c.mu.Unlock() + return nil +} + +// sync reconciles the backend storage with the database. It lists all objects, +// deletes orphans (not in the valid ID set and older than 1 hour), and recomputes +// the total size from the remaining objects. +func (c *Store) sync() error { + if c.localIDs == nil { + return nil + } + localIDs, err := c.localIDs() + if err != nil { + return fmt.Errorf("attachment sync: failed to get valid IDs: %w", err) + } + localIDMap := make(map[string]struct{}, len(localIDs)) + for _, id := range localIDs { + localIDMap[id] = struct{}{} + } + remoteObjects, err := c.backend.List() + if err != nil { + return fmt.Errorf("attachment sync: failed to list objects: %w", err) + } + // Calculate total cache size and collect orphaned attachments, excluding objects younger + // than the grace period to account for races, and skipping objects with invalid IDs. + cutoff := time.Now().Add(-orphanGracePeriod) + var orphanIDs []string + var count, size int64 + sizes := make(map[string]int64, len(remoteObjects)) + for _, obj := range remoteObjects { + if !fileIDRegex.MatchString(obj.ID) { + continue + } + if _, ok := localIDMap[obj.ID]; !ok && obj.LastModified.Before(cutoff) { + orphanIDs = append(orphanIDs, obj.ID) + } else { + count++ + size += obj.Size + sizes[obj.ID] = obj.Size + } + } + log.Tag(tagStore).Debug("Attachment store updated: %d attachment(s), %s", count, util.FormatSizeHuman(size)) + c.mu.Lock() + c.size = size + c.sizes = sizes + c.mu.Unlock() + // Delete orphaned attachments + if len(orphanIDs) > 0 { + log.Tag(tagStore).Debug("Deleting %d orphaned attachment(s)", len(orphanIDs)) + if err := c.backend.Delete(orphanIDs...); err != nil { + return fmt.Errorf("attachment sync: failed to delete orphaned objects: %w", err) + } + } + // Clean up incomplete uploads (S3 only) + if err := c.backend.DeleteIncomplete(cutoff); err != nil { + log.Tag(tagStore).Err(err).Warn("Failed to abort incomplete uploads from attachment cache") + } + return nil +} + +// Size returns the current total size of all attachments +func (c *Store) Size() int64 { + c.mu.RLock() + defer c.mu.RUnlock() + return c.size +} + +// Remaining returns the remaining capacity for attachments +func (c *Store) Remaining() int64 { + c.mu.RLock() + defer c.mu.RUnlock() + remaining := c.limit - c.size + if remaining < 0 { + return 0 + } + return remaining +} + +// Close stops the background sync goroutine +func (c *Store) Close() { + close(c.closeChan) +} + +func (c *Store) syncLoop() { + if err := c.sync(); err != nil { + log.Tag(tagStore).Err(err).Warn("Attachment sync failed") + } + ticker := time.NewTicker(syncInterval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + if err := c.sync(); err != nil { + log.Tag(tagStore).Err(err).Warn("Attachment sync failed") + } + case <-c.closeChan: + return + } + } +} diff --git a/attachment/store_file_test.go b/attachment/store_file_test.go new file mode 100644 index 00000000..d0b6e135 --- /dev/null +++ b/attachment/store_file_test.go @@ -0,0 +1,16 @@ +package attachment + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func newTestFileStore(t *testing.T, totalSizeLimit int64) (dir string, cache *Store) { + t.Helper() + dir = t.TempDir() + cache, err := NewFileStore(dir, totalSizeLimit, nil) + require.Nil(t, err) + t.Cleanup(func() { cache.Close() }) + return dir, cache +} diff --git a/attachment/store_s3_test.go b/attachment/store_s3_test.go new file mode 100644 index 00000000..6615f4e9 --- /dev/null +++ b/attachment/store_s3_test.go @@ -0,0 +1,120 @@ +package attachment + +import ( + "context" + "io" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + "heckel.io/ntfy/v2/s3" +) + +func TestS3Store_WriteWithPrefix(t *testing.T) { + s3URL := os.Getenv("NTFY_TEST_S3_URL") + if s3URL == "" { + t.Skip("NTFY_TEST_S3_URL not set") + } + cfg, err := s3.ParseURL(s3URL) + require.Nil(t, err) + cfg.Prefix = "test-prefix" + client := s3.New(cfg) + deleteAllObjects(t, client) + backend := newS3Backend(client) + cache, err := newStore(backend, 10*1024, nil) + require.Nil(t, err) + t.Cleanup(func() { + deleteAllObjects(t, client) + cache.Close() + }) + + size, err := cache.Write("abcdefghijkl", strings.NewReader("test"), 0) + require.Nil(t, err) + require.Equal(t, int64(4), size) + + reader, _, err := cache.Read("abcdefghijkl") + require.Nil(t, err) + data, err := io.ReadAll(reader) + reader.Close() + require.Nil(t, err) + require.Equal(t, "test", string(data)) +} + +// --- Helpers --- + +func newTestRealS3Store(t *testing.T, totalSizeLimit int64) (*Store, *modTimeOverrideBackend) { + t.Helper() + s3URL := os.Getenv("NTFY_TEST_S3_URL") + if s3URL == "" { + t.Skip("NTFY_TEST_S3_URL not set") + } + cfg, err := s3.ParseURL(s3URL) + require.Nil(t, err) + if cfg.Prefix != "" { + cfg.Prefix = cfg.Prefix + "/testpkg-attachment" + } else { + cfg.Prefix = "testpkg-attachment" + } + client := s3.New(cfg) + inner := newS3Backend(client) + wrapper := &modTimeOverrideBackend{backend: inner, modTimes: make(map[string]time.Time)} + deleteAllObjects(t, client) + store, err := newStore(wrapper, totalSizeLimit, nil) + require.Nil(t, err) + t.Cleanup(func() { + deleteAllObjects(t, client) + store.Close() + }) + return store, wrapper +} + +func deleteAllObjects(t *testing.T, client *s3.Client) { + t.Helper() + for i := 0; i < 20; i++ { + objects, err := client.ListObjectsV2(context.Background()) + require.Nil(t, err) + if len(objects) == 0 { + return + } + keys := make([]string, len(objects)) + for j, obj := range objects { + keys[j] = obj.Key + } + require.Nil(t, client.DeleteObjects(context.Background(), keys)) + time.Sleep(200 * time.Millisecond) + } + t.Fatal("timed out waiting for bucket to be empty") +} + +// modTimeOverrideBackend wraps a backend and allows overriding LastModified times returned by List(). +// This is used in tests to simulate old objects on backends (like real S3) where +// LastModified cannot be set directly. +type modTimeOverrideBackend struct { + backend + mu sync.Mutex + modTimes map[string]time.Time // object ID -> override time +} + +func (b *modTimeOverrideBackend) List() ([]object, error) { + objects, err := b.backend.List() + if err != nil { + return nil, err + } + b.mu.Lock() + defer b.mu.Unlock() + for i, obj := range objects { + if t, ok := b.modTimes[obj.ID]; ok { + objects[i].LastModified = t + } + } + return objects, nil +} + +func (b *modTimeOverrideBackend) setModTime(id string, t time.Time) { + b.mu.Lock() + b.modTimes[id] = t + b.mu.Unlock() +} diff --git a/attachment/store_test.go b/attachment/store_test.go new file mode 100644 index 00000000..11d0b244 --- /dev/null +++ b/attachment/store_test.go @@ -0,0 +1,352 @@ +package attachment + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "heckel.io/ntfy/v2/util" +) + +const testSizeLimit = 10 * 1024 + +func TestStore_WriteReadRemove(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) { + // Write + size, err := s.Write("abcdefghijkl", strings.NewReader("hello world"), 0) + require.Nil(t, err) + require.Equal(t, int64(11), size) + require.Equal(t, int64(11), s.Size()) + + // Read back + reader, readSize, err := s.Read("abcdefghijkl") + require.Nil(t, err) + require.Equal(t, int64(11), readSize) + data, err := io.ReadAll(reader) + reader.Close() + require.Nil(t, err) + require.Equal(t, "hello world", string(data)) + + // Remove + require.Nil(t, s.Remove("abcdefghijkl")) + require.Equal(t, int64(0), s.Size()) + + // Read after remove should fail + _, _, err = s.Read("abcdefghijkl") + require.Error(t, err) + }) +} + +func TestStore_WriteRemoveMultiple(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) { + for i := 0; i < 5; i++ { + _, err := s.Write(fmt.Sprintf("abcdefghijk%d", i), bytes.NewReader(make([]byte, 100)), 0) + require.Nil(t, err) + } + require.Equal(t, int64(500), s.Size()) + + require.Nil(t, s.Remove("abcdefghijk1", "abcdefghijk3")) + require.Equal(t, int64(300), s.Size()) + + // Removed files should not be readable + _, _, err := s.Read("abcdefghijk1") + require.Error(t, err) + _, _, err = s.Read("abcdefghijk3") + require.Error(t, err) + + // Remaining files should still be readable + for _, id := range []string{"abcdefghijk0", "abcdefghijk2", "abcdefghijk4"} { + reader, _, err := s.Read(id) + require.Nil(t, err) + reader.Close() + } + }) +} + +func TestStore_WriteTotalSizeLimit(t *testing.T) { + forEachBackend(t, 100, func(t *testing.T, s *Store, _ func(string)) { + // First write fits + _, err := s.Write("abcdefghijk0", bytes.NewReader(make([]byte, 80)), 0) + require.Nil(t, err) + require.Equal(t, int64(80), s.Size()) + require.Equal(t, int64(20), s.Remaining()) + + // Second write exceeds total limit + _, err = s.Write("abcdefghijk1", bytes.NewReader(make([]byte, 50)), 0) + require.ErrorIs(t, err, util.ErrLimitReached) + }) +} + +func TestStore_WriteAdditionalLimiter(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) { + _, err := s.Write("abcdefghijkl", bytes.NewReader(make([]byte, 200)), 0, util.NewFixedLimiter(100)) + require.ErrorIs(t, err, util.ErrLimitReached) + + // File should not be readable (was cleaned up) + _, _, err = s.Read("abcdefghijkl") + require.Error(t, err) + }) +} + +func TestStore_WriteWithLimiter(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) { + size, err := s.Write("abcdefghijkl", strings.NewReader("normal file"), 0, util.NewFixedLimiter(999)) + require.Nil(t, err) + require.Equal(t, int64(11), size) + require.Equal(t, int64(11), s.Size()) + }) +} + +func TestStore_WriteOverwriteSameID(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) { + // Write 100 bytes + _, err := s.Write("abcdefghijkl", bytes.NewReader(make([]byte, 100)), 0) + require.Nil(t, err) + require.Equal(t, int64(100), s.Size()) + + // Overwrite with 50 bytes + _, err = s.Write("abcdefghijkl", bytes.NewReader(make([]byte, 50)), 0) + require.Nil(t, err) + require.Equal(t, int64(150), s.Size()) // Store tracks both writes + + // Read back should return the latest content + reader, readSize, err := s.Read("abcdefghijkl") + require.Nil(t, err) + require.Equal(t, int64(50), readSize) + reader.Close() + }) +} + +func TestStore_WriteAfterFailure(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) { + // Failed write: limiter rejects it + _, err := s.Write("abcdefghijkl", bytes.NewReader(make([]byte, 200)), 0, util.NewFixedLimiter(100)) + require.ErrorIs(t, err, util.ErrLimitReached) + require.Equal(t, int64(0), s.Size()) + + // Subsequent write with a different ID should succeed + size, err := s.Write("abcdefghijk2", strings.NewReader("hello"), 0) + require.Nil(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, int64(5), s.Size()) + + // The failed ID should not be readable + _, _, err = s.Read("abcdefghijkl") + require.Error(t, err) + + // The successful ID should be readable + reader, _, err := s.Read("abcdefghijk2") + require.Nil(t, err) + reader.Close() + }) +} + +func TestStore_SyncRecomputesSize(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, makeOld func(string)) { + // Write two files + _, err := s.Write("abcdefghijk0", bytes.NewReader(make([]byte, 100)), 0) + require.Nil(t, err) + _, err = s.Write("abcdefghijk1", bytes.NewReader(make([]byte, 200)), 0) + require.Nil(t, err) + require.Equal(t, int64(300), s.Size()) + + // Corrupt the in-memory size tracking + s.mu.Lock() + s.size = 999 + s.mu.Unlock() + require.Equal(t, int64(999), s.Size()) + + // Set localIDs to include both files so nothing gets deleted + s.localIDs = func() ([]string, error) { + return []string{"abcdefghijk0", "abcdefghijk1"}, nil + } + + // Sync should recompute size from the backend + require.Nil(t, s.sync()) + require.Equal(t, int64(300), s.Size()) + }) +} + +func TestStore_ReadNotFound(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) { + _, _, err := s.Read("abcdefghijkl") + require.Error(t, err) + }) +} + +func TestStore_InvalidID(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) { + _, err := s.Write("bad", strings.NewReader("x"), 0) + require.Equal(t, errInvalidFileID, err) + + _, _, err = s.Read("bad") + require.Equal(t, errInvalidFileID, err) + + err = s.Remove("bad") + require.Equal(t, errInvalidFileID, err) + }) +} + +func TestStore_WriteLargeObjects(t *testing.T) { + sizes := map[string]int64{ + "100B": 100, + "6MB": 6 * 1024 * 1024, + "12MB": 12 * 1024 * 1024, + } + for name, sz := range sizes { + t.Run(name, func(t *testing.T) { + forEachBackend(t, sz+1024, func(t *testing.T, s *Store, _ func(string)) { + data := make([]byte, sz) + for i := range data { + data[i] = byte(i % 251) + } + + size, err := s.Write("abcdefghijkl", bytes.NewReader(data), 0) + require.Nil(t, err) + require.Equal(t, sz, size) + require.Equal(t, sz, s.Size()) + + reader, readSize, err := s.Read("abcdefghijkl") + require.Nil(t, err) + require.Equal(t, sz, readSize) + got, err := io.ReadAll(reader) + reader.Close() + require.Nil(t, err) + require.Equal(t, data, got) + }) + }) + } +} + +func TestStore_WriteUntrustedLengthExact(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) { + size, err := s.Write("abcdefghijkl", strings.NewReader("hello world"), 11) + require.Nil(t, err) + require.Equal(t, int64(11), size) + + reader, _, err := s.Read("abcdefghijkl") + require.Nil(t, err) + data, err := io.ReadAll(reader) + reader.Close() + require.Nil(t, err) + require.Equal(t, "hello world", string(data)) + }) +} + +func TestStore_WriteUntrustedLengthBodyLonger(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) { + // Body has 11 bytes, but we claim 5 — only first 5 bytes should be stored + size, err := s.Write("abcdefghijkl", strings.NewReader("hello world"), 5) + require.Nil(t, err) + require.Equal(t, int64(5), size) + + reader, _, err := s.Read("abcdefghijkl") + require.Nil(t, err) + data, err := io.ReadAll(reader) + reader.Close() + require.Nil(t, err) + require.Equal(t, "hello", string(data)) + }) +} + +func TestStore_WriteUntrustedLengthBodyShorter(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) { + // Body has 5 bytes, but we claim 100 — should fail + _, err := s.Write("abcdefghijkl", strings.NewReader("hello"), 100) + require.Error(t, err) + + // File should not be readable (was cleaned up) + _, _, err = s.Read("abcdefghijkl") + require.Error(t, err) + }) +} + +func TestStore_Sync(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, makeOld func(string)) { + // Write some files + _, err := s.Write("abcdefghijk0", strings.NewReader("file0"), 0) + require.Nil(t, err) + _, err = s.Write("abcdefghijk1", strings.NewReader("file1"), 0) + require.Nil(t, err) + _, err = s.Write("abcdefghijk2", strings.NewReader("file2"), 0) + require.Nil(t, err) + + require.Equal(t, int64(15), s.Size()) + + // Set the ID provider to only know about file 0 and 2 + s.localIDs = func() ([]string, error) { + return []string{"abcdefghijk0", "abcdefghijk2"}, nil + } + + // Make file 1 old enough to be cleaned up + makeOld("abcdefghijk1") + + // Run sync + require.Nil(t, s.sync()) + + // File 1 should be deleted (orphan, old enough) + _, _, err = s.Read("abcdefghijk1") + require.Error(t, err) + + // Files 0 and 2 should still be readable + r, _, err := s.Read("abcdefghijk0") + require.Nil(t, err) + r.Close() + r, _, err = s.Read("abcdefghijk2") + require.Nil(t, err) + r.Close() + + // Size should be updated + require.Equal(t, int64(10), s.Size()) + }) +} + +func TestStore_Sync_SkipsRecentFiles(t *testing.T) { + forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) { + // Write a file + _, err := s.Write("abcdefghijk0", strings.NewReader("file0"), 0) + require.Nil(t, err) + + // Set the ID provider to return empty (no valid IDs) + s.localIDs = func() ([]string, error) { + return []string{}, nil + } + + // File was just created, so it should NOT be deleted (< 1 hour old) + require.Nil(t, s.sync()) + + // File should still exist + reader, _, err := s.Read("abcdefghijk0") + require.Nil(t, err) + reader.Close() + }) +} + +// forEachBackend runs f against both the file and S3 backends. It also provides a makeOld +// callback that makes a specific object's timestamp old enough for orphan cleanup (> 1 hour). +// For the file backend, this uses os.Chtimes; for the S3 backend, it overrides the object's +// LastModified time via a modTimeOverrideBackend wrapper. Objects start with recent timestamps +// by default. The S3 subtest is skipped if NTFY_TEST_S3_URL is not set. +func forEachBackend(t *testing.T, totalSizeLimit int64, f func(t *testing.T, s *Store, makeOld func(string))) { + t.Run("file", func(t *testing.T) { + dir, s := newTestFileStore(t, totalSizeLimit) + makeOld := func(id string) { + oldTime := time.Unix(1, 0) + os.Chtimes(filepath.Join(dir, id), oldTime, oldTime) + } + f(t, s, makeOld) + }) + t.Run("s3", func(t *testing.T) { + s, wrapper := newTestRealS3Store(t, totalSizeLimit) + makeOld := func(id string) { + wrapper.setModTime(id, time.Unix(1, 0)) + } + f(t, s, makeOld) + }) +} diff --git a/cmd/serve.go b/cmd/serve.go index 3baf81ec..2af1f389 100644 --- a/cmd/serve.go +++ b/cmd/serve.go @@ -52,7 +52,7 @@ var flagsServe = append( altsrc.NewStringSliceFlag(&cli.StringSliceFlag{Name: "auth-users", Aliases: []string{"auth_users"}, EnvVars: []string{"NTFY_AUTH_USERS"}, Usage: "pre-provisioned declarative users"}), altsrc.NewStringSliceFlag(&cli.StringSliceFlag{Name: "auth-access", Aliases: []string{"auth_access"}, EnvVars: []string{"NTFY_AUTH_ACCESS"}, Usage: "pre-provisioned declarative access control entries"}), altsrc.NewStringSliceFlag(&cli.StringSliceFlag{Name: "auth-tokens", Aliases: []string{"auth_tokens"}, EnvVars: []string{"NTFY_AUTH_TOKENS"}, Usage: "pre-provisioned declarative access tokens"}), - altsrc.NewStringFlag(&cli.StringFlag{Name: "attachment-cache-dir", Aliases: []string{"attachment_cache_dir"}, EnvVars: []string{"NTFY_ATTACHMENT_CACHE_DIR"}, Usage: "cache directory for attached files"}), + altsrc.NewStringFlag(&cli.StringFlag{Name: "attachment-cache-dir", Aliases: []string{"attachment_cache_dir"}, EnvVars: []string{"NTFY_ATTACHMENT_CACHE_DIR"}, Usage: "cache directory for attached files, or S3 URL (s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT])"}), altsrc.NewStringFlag(&cli.StringFlag{Name: "attachment-total-size-limit", Aliases: []string{"attachment_total_size_limit", "A"}, EnvVars: []string{"NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT"}, Value: util.FormatSize(server.DefaultAttachmentTotalSizeLimit), Usage: "limit of the on-disk attachment cache"}), altsrc.NewStringFlag(&cli.StringFlag{Name: "attachment-file-size-limit", Aliases: []string{"attachment_file_size_limit", "Y"}, EnvVars: []string{"NTFY_ATTACHMENT_FILE_SIZE_LIMIT"}, Value: util.FormatSize(server.DefaultAttachmentFileSizeLimit), Usage: "per-file attachment size limit (e.g. 300k, 2M, 100M)"}), altsrc.NewStringFlag(&cli.StringFlag{Name: "attachment-expiry-duration", Aliases: []string{"attachment_expiry_duration", "X"}, EnvVars: []string{"NTFY_ATTACHMENT_EXPIRY_DURATION"}, Value: util.FormatDuration(server.DefaultAttachmentExpiryDuration), Usage: "duration after which uploaded attachments will be deleted (e.g. 3h, 20h)"}), diff --git a/docs/config.md b/docs/config.md index b9c8f07f..c9e6687d 100644 --- a/docs/config.md +++ b/docs/config.md @@ -489,30 +489,41 @@ Subscribers can retrieve cached messaging using the [`poll=1` parameter](subscri ## Attachments If desired, you may allow users to upload and [attach files to notifications](publish.md#attachments). To enable -this feature, you have to simply configure an attachment cache directory and a base URL (`attachment-cache-dir`, `base-url`). -Once these options are set and the directory is writable by the server user, you can upload attachments via PUT. +this feature, you have to configure an attachment storage backend and a base URL (`base-url`). Attachments can be stored +either on the [local filesystem](#filesystem-storage) or in an [S3-compatible object store](#s3-storage), both using the `attachment-cache-dir` option. +Once configured, you can upload attachments via PUT. -By default, attachments are stored in the disk-cache **for only 3 hours**. The main reason for this is to avoid legal issues -and such when hosting user controlled content. Typically, this is more than enough time for the user (or the auto download -feature) to download the file. The following config options are relevant to attachments: +By default, attachments are stored **for only 3 hours**. The main reason for this is to avoid legal issues +and such when hosting user controlled content. Typically, this is more than enough time for the user (or the auto download +feature) to download the file. You can increase this time by [purchasing ntfy Pro](https://ntfy.sh/app) via the web app. + +The following config options are relevant to attachments: * `base-url` is the root URL for the ntfy server; this is needed for the generated attachment URLs -* `attachment-cache-dir` is the cache directory for attached files -* `attachment-total-size-limit` is the size limit of the on-disk attachment cache (default: 5G) +* `attachment-cache-dir` is the cache directory for attached files, or an S3 URL for object storage +* `attachment-total-size-limit` is the size limit of the attachment storage (default: 5G) * `attachment-file-size-limit` is the per-file attachment size limit (e.g. 300k, 2M, 100M, default: 15M) * `attachment-expiry-duration` is the duration after which uploaded attachments will be deleted (e.g. 3h, 20h, default: 3h) -Here's an example config using mostly the defaults (except for the cache directory, which is empty by default): +!!! warning + ntfy takes full control over the attachment directory or S3 bucket. Files that match the message ID format without + entries in the message table will be deleted. **Do not use a directory or S3 bucket that is also used for something else.** + +Please also refer to the [rate limiting](#rate-limiting) settings below, specifically `visitor-attachment-total-size-limit` +and `visitor-attachment-daily-bandwidth-limit`. Setting these conservatively is necessary to avoid abuse. + +### Filesystem storage +Here's an example config using the local filesystem for attachment storage: === "/etc/ntfy/server.yml (minimal)" ``` yaml - base-url: "https://ntfy.sh" + base-url: "https://ntfy.example.com" attachment-cache-dir: "/var/cache/ntfy/attachments" ``` === "/etc/ntfy/server.yml (all options)" ``` yaml - base-url: "https://ntfy.sh" + base-url: "https://ntfy.example.com" attachment-cache-dir: "/var/cache/ntfy/attachments" attachment-total-size-limit: "5G" attachment-file-size-limit: "15M" @@ -521,8 +532,70 @@ Here's an example config using mostly the defaults (except for the cache directo visitor-attachment-daily-bandwidth-limit: "500M" ``` -Please also refer to the [rate limiting](#rate-limiting) settings below, specifically `visitor-attachment-total-size-limit` -and `visitor-attachment-daily-bandwidth-limit`. Setting these conservatively is necessary to avoid abuse. +### S3 storage +As an alternative to the local filesystem, you can store attachments in an S3-compatible object store (e.g. [AWS S3](https://aws.amazon.com/s3/), +[DigitalOcean Spaces](https://www.digitalocean.com/products/spaces)). This is useful for HA/cloud deployments where you don't want to rely on local disk storage. +To use an S3-compatible storage for attachments, set `attachment-cache-dir` to an S3 URL with the following format: + +``` +s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT] +``` + +Here are a few examples: + +=== "/etc/ntfy/server.yml (DigitalOcean Spaces)" + ``` yaml + base-url: "https://ntfy.example.com" + attachment-cache-dir: "s3://ACCESS_KEY:SECRET_KEY@my-bucket/attachments?region=nyc3&endpoint=https://nyc3.digitaloceanspaces.com" + ``` + +=== "/etc/ntfy/server.yml (AWS S3)" + ``` yaml + base-url: "https://ntfy.example.com" + attachment-cache-dir: "s3://ACCESS_KEY:SECRET_KEY@my-bucket/attachments?region=us-east-1" + ``` + +=== "/etc/ntfy/server.yml (custom endpoint)" + ``` yaml + base-url: "https://ntfy.example.com" + attachment-cache-dir: "s3://ACCESS_KEY:SECRET_KEY@my-bucket/attachments?region=us-east-1&endpoint=https://s3.example.com" + ``` + +Note that the access key and secret key may have to be URL encoded. For instance, a secret key `YmxhY+mxhYmxhC` (note the `+`) should +be encoded as `YmxhY%2BmxhYmxhC` (note the `%2B`), so the URL would be `s3://ACCESS_KEY:YmxhY%2BmxhYmxhC@my-bucket/attachments...`. + +!!! info + ntfy.sh is hosted and sponsored by DigitalOcean. I can highly recommend their public cloud offering. It's been rock solid + for 4 years. They offer an S3-compatible storage for $5/month and 250 GB of storage, with 1 TiB of bandwidth. + Also, if you **use [this referral link](https://m.do.co/c/442b929528db), you can get $200 credit**. + +For AWS S3, the IAM user needs the following permissions on the bucket: + +``` json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:ListBucketMultipartUploads" + ], + "Resource": "arn:aws:s3:::BUCKET_NAME" + }, + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:AbortMultipartUpload" + ], + "Resource": "arn:aws:s3:::BUCKET_NAME/*" + } + ] +} +``` ## Access control By default, the ntfy server is open for everyone, meaning **everyone can read and write to any topic** (this is how @@ -2094,80 +2167,80 @@ variable before running the `ntfy` command (e.g. `export NTFY_LISTEN_HTTP=:80`). `cache_duration` and `cache-duration` are both supported. This is to support stricter YAML parsers that do not support dashes. -| Config option | Env variable | Format | Default | Description | -|--------------------------------------------|-------------------------------------------------|-----------------------------------------------------|-------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `base-url` | `NTFY_BASE_URL` | *URL* | - | Public facing base URL of the service (e.g. `https://ntfy.sh`) | -| `listen-http` | `NTFY_LISTEN_HTTP` | `[host]:port` | `:80` | Listen address for the HTTP web server | -| `listen-https` | `NTFY_LISTEN_HTTPS` | `[host]:port` | - | Listen address for the HTTPS web server. If set, you also need to set `key-file` and `cert-file`. | -| `listen-unix` | `NTFY_LISTEN_UNIX` | *filename* | - | Path to a Unix socket to listen on | -| `listen-unix-mode` | `NTFY_LISTEN_UNIX_MODE` | *file mode* | *system default* | File mode of the Unix socket, e.g. 0700 or 0777 | -| `key-file` | `NTFY_KEY_FILE` | *filename* | - | HTTPS/TLS private key file, only used if `listen-https` is set. | -| `cert-file` | `NTFY_CERT_FILE` | *filename* | - | HTTPS/TLS certificate file, only used if `listen-https` is set. | -| `firebase-key-file` | `NTFY_FIREBASE_KEY_FILE` | *filename* | - | If set, also publish messages to a Firebase Cloud Messaging (FCM) topic for your app. This is optional and only required to save battery when using the Android app. See [Firebase (FCM)](#firebase-fcm). | +| Config option | Env variable | Format | Default | Description | +|--------------------------------------------|-------------------------------------------------|-----------------------------------------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `base-url` | `NTFY_BASE_URL` | *URL* | - | Public facing base URL of the service (e.g. `https://ntfy.sh`) | +| `listen-http` | `NTFY_LISTEN_HTTP` | `[host]:port` | `:80` | Listen address for the HTTP web server | +| `listen-https` | `NTFY_LISTEN_HTTPS` | `[host]:port` | - | Listen address for the HTTPS web server. If set, you also need to set `key-file` and `cert-file`. | +| `listen-unix` | `NTFY_LISTEN_UNIX` | *filename* | - | Path to a Unix socket to listen on | +| `listen-unix-mode` | `NTFY_LISTEN_UNIX_MODE` | *file mode* | *system default* | File mode of the Unix socket, e.g. 0700 or 0777 | +| `key-file` | `NTFY_KEY_FILE` | *filename* | - | HTTPS/TLS private key file, only used if `listen-https` is set. | +| `cert-file` | `NTFY_CERT_FILE` | *filename* | - | HTTPS/TLS certificate file, only used if `listen-https` is set. | +| `firebase-key-file` | `NTFY_FIREBASE_KEY_FILE` | *filename* | - | If set, also publish messages to a Firebase Cloud Messaging (FCM) topic for your app. This is optional and only required to save battery when using the Android app. See [Firebase (FCM)](#firebase-fcm). | | `database-url` | `NTFY_DATABASE_URL` | *string (connection URL)* | - | PostgreSQL connection string (e.g. `postgres://user:pass@host:5432/ntfy`). If set, uses PostgreSQL for all database-backed stores (message cache, user manager, web push) instead of SQLite. See [database options](#database-options). | -| `database-replica-urls` | `NTFY_DATABASE_REPLICA_URLS` | *list of strings (connection URLs)* | - | PostgreSQL read replica connection strings. Non-critical read-only queries are distributed across replicas (round-robin) with automatic fallback to primary. Requires `database-url`. See [read replicas](#read-replicas). | -| `cache-file` | `NTFY_CACHE_FILE` | *filename* | - | If set, messages are cached in a local SQLite database instead of only in-memory. This allows for service restarts without losing messages in support of the since= parameter. See [message cache](#message-cache). | -| `cache-duration` | `NTFY_CACHE_DURATION` | *duration* | 12h | Duration for which messages will be buffered before they are deleted. This is required to support the `since=...` and `poll=1` parameter. Set this to `0` to disable the cache entirely. | -| `cache-startup-queries` | `NTFY_CACHE_STARTUP_QUERIES` | *string (SQL queries)* | - | SQL queries to run during database startup; this is useful for tuning and [enabling WAL mode](#message-cache) | -| `cache-batch-size` | `NTFY_CACHE_BATCH_SIZE` | *int* | 0 | Max size of messages to batch together when writing to message cache (if zero, writes are synchronous) | -| `cache-batch-timeout` | `NTFY_CACHE_BATCH_TIMEOUT` | *duration* | 0s | Timeout for batched async writes to the message cache (if zero, writes are synchronous) | -| `auth-file` | `NTFY_AUTH_FILE` | *filename* | - | Auth database file used for access control (SQLite). If set, enables authentication and access control. Not required if `database-url` is set. See [access control](#access-control). | -| `auth-default-access` | `NTFY_AUTH_DEFAULT_ACCESS` | `read-write`, `read-only`, `write-only`, `deny-all` | `read-write` | Default permissions if no matching entries in the auth database are found. Default is `read-write`. | -| `behind-proxy` | `NTFY_BEHIND_PROXY` | *bool* | false | If set, use forwarded header (e.g. X-Forwarded-For, X-Client-IP) to determine visitor IP address (for rate limiting) | -| `proxy-forwarded-header` | `NTFY_PROXY_FORWARDED_HEADER` | *string* | `X-Forwarded-For` | Use specified header to determine visitor IP address (for rate limiting) | -| `proxy-trusted-hosts` | `NTFY_PROXY_TRUSTED_HOSTS` | *comma-separated host/IP/CIDR list* | - | Comma-separated list of trusted IP addresses, hosts, or CIDRs to remove from forwarded header | -| `attachment-cache-dir` | `NTFY_ATTACHMENT_CACHE_DIR` | *directory* | - | Cache directory for attached files. To enable attachments, this has to be set. | -| `attachment-total-size-limit` | `NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT` | *size* | 5G | Limit of the on-disk attachment cache directory. If the limits is exceeded, new attachments will be rejected. | -| `attachment-file-size-limit` | `NTFY_ATTACHMENT_FILE_SIZE_LIMIT` | *size* | 15M | Per-file attachment size limit (e.g. 300k, 2M, 100M). Larger attachment will be rejected. | -| `attachment-expiry-duration` | `NTFY_ATTACHMENT_EXPIRY_DURATION` | *duration* | 3h | Duration after which uploaded attachments will be deleted (e.g. 3h, 20h). Strongly affects `visitor-attachment-total-size-limit`. | -| `smtp-sender-addr` | `NTFY_SMTP_SENDER_ADDR` | `host:port` | - | SMTP server address to allow email sending | -| `smtp-sender-user` | `NTFY_SMTP_SENDER_USER` | *string* | - | SMTP user; only used if e-mail sending is enabled | -| `smtp-sender-pass` | `NTFY_SMTP_SENDER_PASS` | *string* | - | SMTP password; only used if e-mail sending is enabled | -| `smtp-sender-from` | `NTFY_SMTP_SENDER_FROM` | *e-mail address* | - | SMTP sender e-mail address; only used if e-mail sending is enabled | -| `smtp-server-listen` | `NTFY_SMTP_SERVER_LISTEN` | `[ip]:port` | - | Defines the IP address and port the SMTP server will listen on, e.g. `:25` or `1.2.3.4:25` | -| `smtp-server-domain` | `NTFY_SMTP_SERVER_DOMAIN` | *domain name* | - | SMTP server e-mail domain, e.g. `ntfy.sh` | -| `smtp-server-addr-prefix` | `NTFY_SMTP_SERVER_ADDR_PREFIX` | *string* | - | Optional prefix for the e-mail addresses to prevent spam, e.g. `ntfy-` | -| `twilio-account` | `NTFY_TWILIO_ACCOUNT` | *string* | - | Twilio account SID, e.g. AC12345beefbeef67890beefbeef122586 | -| `twilio-auth-token` | `NTFY_TWILIO_AUTH_TOKEN` | *string* | - | Twilio auth token, e.g. affebeef258625862586258625862586 | -| `twilio-phone-number` | `NTFY_TWILIO_PHONE_NUMBER` | *string* | - | Twilio outgoing phone number, e.g. +18775132586 | -| `twilio-verify-service` | `NTFY_TWILIO_VERIFY_SERVICE` | *string* | - | Twilio Verify service SID, e.g. VA12345beefbeef67890beefbeef122586 | -| `keepalive-interval` | `NTFY_KEEPALIVE_INTERVAL` | *duration* | 45s | Interval in which keepalive messages are sent to the client. This is to prevent intermediaries closing the connection for inactivity. Note that the Android app has a hardcoded timeout at 77s, so it should be less than that. | -| `manager-interval` | `NTFY_MANAGER_INTERVAL` | *duration* | 1m | Interval in which the manager prunes old messages, deletes topics and prints the stats. | -| `message-size-limit` | `NTFY_MESSAGE_SIZE_LIMIT` | *size* | 4K | The size limit for the message body. Please note that this is largely untested, and that FCM/APNS have limits around 4KB. If you increase this size limit, FCM and APNS will NOT work for large messages. | -| `message-delay-limit` | `NTFY_MESSAGE_DELAY_LIMIT` | *duration* | 3d | Amount of time a message can be [scheduled](publish.md#scheduled-delivery) into the future when using the `Delay` header | -| `global-topic-limit` | `NTFY_GLOBAL_TOPIC_LIMIT` | *number* | 15,000 | Rate limiting: Total number of topics before the server rejects new topics. | -| `upstream-base-url` | `NTFY_UPSTREAM_BASE_URL` | *URL* | `https://ntfy.sh` | Forward poll request to an upstream server, this is needed for iOS push notifications for self-hosted servers | -| `upstream-access-token` | `NTFY_UPSTREAM_ACCESS_TOKEN` | *string* | `tk_zyYLYj...` | Access token to use for the upstream server; needed only if upstream rate limits are exceeded or upstream server requires auth | -| `visitor-attachment-total-size-limit` | `NTFY_VISITOR_ATTACHMENT_TOTAL_SIZE_LIMIT` | *size* | 100M | Rate limiting: Total storage limit used for attachments per visitor, for all attachments combined. Storage is freed after attachments expire. See `attachment-expiry-duration`. | -| `visitor-attachment-daily-bandwidth-limit` | `NTFY_VISITOR_ATTACHMENT_DAILY_BANDWIDTH_LIMIT` | *size* | 500M | Rate limiting: Total daily attachment download/upload traffic limit per visitor. This is to protect your bandwidth costs from exploding. | -| `visitor-email-limit-burst` | `NTFY_VISITOR_EMAIL_LIMIT_BURST` | *number* | 16 | Rate limiting:Initial limit of e-mails per visitor | -| `visitor-email-limit-replenish` | `NTFY_VISITOR_EMAIL_LIMIT_REPLENISH` | *duration* | 1h | Rate limiting: Strongly related to `visitor-email-limit-burst`: The rate at which the bucket is refilled | -| `visitor-message-daily-limit` | `NTFY_VISITOR_MESSAGE_DAILY_LIMIT` | *number* | - | Rate limiting: Allowed number of messages per day per visitor, reset every day at midnight (UTC). By default, this value is unset. | -| `visitor-request-limit-burst` | `NTFY_VISITOR_REQUEST_LIMIT_BURST` | *number* | 60 | Rate limiting: Allowed GET/PUT/POST requests per second, per visitor. This setting is the initial bucket of requests each visitor has | -| `visitor-request-limit-replenish` | `NTFY_VISITOR_REQUEST_LIMIT_REPLENISH` | *duration* | 5s | Rate limiting: Strongly related to `visitor-request-limit-burst`: The rate at which the bucket is refilled | -| `visitor-request-limit-exempt-hosts` | `NTFY_VISITOR_REQUEST_LIMIT_EXEMPT_HOSTS` | *comma-separated host/IP/CIDR list* | - | Rate limiting: List of hostnames and IPs to be exempt from request rate limiting | -| `visitor-subscription-limit` | `NTFY_VISITOR_SUBSCRIPTION_LIMIT` | *number* | 30 | Rate limiting: Number of subscriptions per visitor (IP address) | -| `visitor-subscriber-rate-limiting` | `NTFY_VISITOR_SUBSCRIBER_RATE_LIMITING` | *bool* | `false` | Rate limiting: Enables subscriber-based rate limiting | -| `visitor-prefix-bits-ipv4` | `NTFY_VISITOR_PREFIX_BITS_IPV4` | *number* | 32 | Rate limiting: Number of bits to use for IPv4 visitor prefix, e.g. 24 for /24 | -| `visitor-prefix-bits-ipv6` | `NTFY_VISITOR_PREFIX_BITS_IPV6` | *number* | 64 | Rate limiting: Number of bits to use for IPv6 visitor prefix, e.g. 48 for /48 | -| `web-root` | `NTFY_WEB_ROOT` | *path*, e.g. `/` or `/app`, or `disable` | `/` | Sets root of the web app (e.g. /, or /app), or disables it entirely (disable) | -| `enable-signup` | `NTFY_ENABLE_SIGNUP` | *boolean* (`true` or `false`) | `false` | Allows users to sign up via the web app, or API | -| `enable-login` | `NTFY_ENABLE_LOGIN` | *boolean* (`true` or `false`) | `false` | Allows users to log in via the web app, or API | -| `enable-reservations` | `NTFY_ENABLE_RESERVATIONS` | *boolean* (`true` or `false`) | `false` | Allows users to reserve topics (if their tier allows it) | -| `require-login` | `NTFY_REQUIRE_LOGIN` | *boolean* (`true` or `false`) | `false` | All actions via the web app require a login | -| `stripe-secret-key` | `NTFY_STRIPE_SECRET_KEY` | *string* | - | Payments: Key used for the Stripe API communication, this enables payments | -| `stripe-webhook-key` | `NTFY_STRIPE_WEBHOOK_KEY` | *string* | - | Payments: Key required to validate the authenticity of incoming webhooks from Stripe | -| `billing-contact` | `NTFY_BILLING_CONTACT` | *email address* or *website* | - | Payments: Email or website displayed in Upgrade dialog as a billing contact | -| `web-push-public-key` | `NTFY_WEB_PUSH_PUBLIC_KEY` | *string* | - | Web Push: Public Key. Run `ntfy webpush keys` to generate | -| `web-push-private-key` | `NTFY_WEB_PUSH_PRIVATE_KEY` | *string* | - | Web Push: Private Key. Run `ntfy webpush keys` to generate | -| `web-push-file` | `NTFY_WEB_PUSH_FILE` | *string* | - | Web Push: Database file that stores subscriptions | -| `web-push-email-address` | `NTFY_WEB_PUSH_EMAIL_ADDRESS` | *string* | - | Web Push: Sender email address | -| `web-push-startup-queries` | `NTFY_WEB_PUSH_STARTUP_QUERIES` | *string* | - | Web Push: SQL queries to run against subscription database at startup | -| `web-push-expiry-duration` | `NTFY_WEB_PUSH_EXPIRY_DURATION` | *duration* | 60d | Web Push: Duration after which a subscription is considered stale and will be deleted. This is to prevent stale subscriptions. | -| `web-push-expiry-warning-duration` | `NTFY_WEB_PUSH_EXPIRY_WARNING_DURATION` | *duration* | 55d | Web Push: Duration after which a warning is sent to subscribers that their subscription will expire soon. This is to prevent stale subscriptions. | -| `log-format` | `NTFY_LOG_FORMAT` | *string* | `text` | Defines the output format, can be text or json | -| `log-file` | `NTFY_LOG_FILE` | *string* | - | Defines the filename to write logs to. If this is not set, ntfy logs to stderr | -| `log-level` | `NTFY_LOG_LEVEL` | *string* | `info` | Defines the default log level, can be one of trace, debug, info, warn or error | +| `database-replica-urls` | `NTFY_DATABASE_REPLICA_URLS` | *list of strings (connection URLs)* | - | PostgreSQL read replica connection strings. Non-critical read-only queries are distributed across replicas (round-robin) with automatic fallback to primary. Requires `database-url`. | +| `cache-file` | `NTFY_CACHE_FILE` | *filename* | - | If set, messages are cached in a local SQLite database instead of only in-memory. This allows for service restarts without losing messages in support of the since= parameter. See [message cache](#message-cache). | +| `cache-duration` | `NTFY_CACHE_DURATION` | *duration* | 12h | Duration for which messages will be buffered before they are deleted. This is required to support the `since=...` and `poll=1` parameter. Set this to `0` to disable the cache entirely. | +| `cache-startup-queries` | `NTFY_CACHE_STARTUP_QUERIES` | *string (SQL queries)* | - | SQL queries to run during database startup; this is useful for tuning and [enabling WAL mode](#message-cache) | +| `cache-batch-size` | `NTFY_CACHE_BATCH_SIZE` | *int* | 0 | Max size of messages to batch together when writing to message cache (if zero, writes are synchronous) | +| `cache-batch-timeout` | `NTFY_CACHE_BATCH_TIMEOUT` | *duration* | 0s | Timeout for batched async writes to the message cache (if zero, writes are synchronous) | +| `auth-file` | `NTFY_AUTH_FILE` | *filename* | - | Auth database file used for access control (SQLite). If set, enables authentication and access control. Not required if `database-url` is set. See [access control](#access-control). | +| `auth-default-access` | `NTFY_AUTH_DEFAULT_ACCESS` | `read-write`, `read-only`, `write-only`, `deny-all` | `read-write` | Default permissions if no matching entries in the auth database are found. Default is `read-write`. | +| `behind-proxy` | `NTFY_BEHIND_PROXY` | *bool* | false | If set, use forwarded header (e.g. X-Forwarded-For, X-Client-IP) to determine visitor IP address (for rate limiting) | +| `proxy-forwarded-header` | `NTFY_PROXY_FORWARDED_HEADER` | *string* | `X-Forwarded-For` | Use specified header to determine visitor IP address (for rate limiting) | +| `proxy-trusted-hosts` | `NTFY_PROXY_TRUSTED_HOSTS` | *comma-separated host/IP/CIDR list* | - | Comma-separated list of trusted IP addresses, hosts, or CIDRs to remove from forwarded header | +| `attachment-cache-dir` | `NTFY_ATTACHMENT_CACHE_DIR` | *directory or S3 URL* | - | Cache directory for attached files, or S3 URL for object storage (format: `s3://KEY:SECRET@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]`). | +| `attachment-total-size-limit` | `NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT` | *size* | 5G | Limit of the on-disk attachment cache directory. If the limits is exceeded, new attachments will be rejected. | +| `attachment-file-size-limit` | `NTFY_ATTACHMENT_FILE_SIZE_LIMIT` | *size* | 15M | Per-file attachment size limit (e.g. 300k, 2M, 100M). Larger attachment will be rejected. | +| `attachment-expiry-duration` | `NTFY_ATTACHMENT_EXPIRY_DURATION` | *duration* | 3h | Duration after which uploaded attachments will be deleted (e.g. 3h, 20h). Strongly affects `visitor-attachment-total-size-limit`. | +| `smtp-sender-addr` | `NTFY_SMTP_SENDER_ADDR` | `host:port` | - | SMTP server address to allow email sending | +| `smtp-sender-user` | `NTFY_SMTP_SENDER_USER` | *string* | - | SMTP user; only used if e-mail sending is enabled | +| `smtp-sender-pass` | `NTFY_SMTP_SENDER_PASS` | *string* | - | SMTP password; only used if e-mail sending is enabled | +| `smtp-sender-from` | `NTFY_SMTP_SENDER_FROM` | *e-mail address* | - | SMTP sender e-mail address; only used if e-mail sending is enabled | +| `smtp-server-listen` | `NTFY_SMTP_SERVER_LISTEN` | `[ip]:port` | - | Defines the IP address and port the SMTP server will listen on, e.g. `:25` or `1.2.3.4:25` | +| `smtp-server-domain` | `NTFY_SMTP_SERVER_DOMAIN` | *domain name* | - | SMTP server e-mail domain, e.g. `ntfy.sh` | +| `smtp-server-addr-prefix` | `NTFY_SMTP_SERVER_ADDR_PREFIX` | *string* | - | Optional prefix for the e-mail addresses to prevent spam, e.g. `ntfy-` | +| `twilio-account` | `NTFY_TWILIO_ACCOUNT` | *string* | - | Twilio account SID, e.g. AC12345beefbeef67890beefbeef122586 | +| `twilio-auth-token` | `NTFY_TWILIO_AUTH_TOKEN` | *string* | - | Twilio auth token, e.g. affebeef258625862586258625862586 | +| `twilio-phone-number` | `NTFY_TWILIO_PHONE_NUMBER` | *string* | - | Twilio outgoing phone number, e.g. +18775132586 | +| `twilio-verify-service` | `NTFY_TWILIO_VERIFY_SERVICE` | *string* | - | Twilio Verify service SID, e.g. VA12345beefbeef67890beefbeef122586 | +| `keepalive-interval` | `NTFY_KEEPALIVE_INTERVAL` | *duration* | 45s | Interval in which keepalive messages are sent to the client. This is to prevent intermediaries closing the connection for inactivity. Note that the Android app has a hardcoded timeout at 77s, so it should be less than that. | +| `manager-interval` | `NTFY_MANAGER_INTERVAL` | *duration* | 1m | Interval in which the manager prunes old messages, deletes topics and prints the stats. | +| `message-size-limit` | `NTFY_MESSAGE_SIZE_LIMIT` | *size* | 4K | The size limit for the message body. Please note that this is largely untested, and that FCM/APNS have limits around 4KB. If you increase this size limit, FCM and APNS will NOT work for large messages. | +| `message-delay-limit` | `NTFY_MESSAGE_DELAY_LIMIT` | *duration* | 3d | Amount of time a message can be [scheduled](publish.md#scheduled-delivery) into the future when using the `Delay` header | +| `global-topic-limit` | `NTFY_GLOBAL_TOPIC_LIMIT` | *number* | 15,000 | Rate limiting: Total number of topics before the server rejects new topics. | +| `upstream-base-url` | `NTFY_UPSTREAM_BASE_URL` | *URL* | `https://ntfy.sh` | Forward poll request to an upstream server, this is needed for iOS push notifications for self-hosted servers | +| `upstream-access-token` | `NTFY_UPSTREAM_ACCESS_TOKEN` | *string* | `tk_zyYLYj...` | Access token to use for the upstream server; needed only if upstream rate limits are exceeded or upstream server requires auth | +| `visitor-attachment-total-size-limit` | `NTFY_VISITOR_ATTACHMENT_TOTAL_SIZE_LIMIT` | *size* | 100M | Rate limiting: Total storage limit used for attachments per visitor, for all attachments combined. Storage is freed after attachments expire. See `attachment-expiry-duration`. | +| `visitor-attachment-daily-bandwidth-limit` | `NTFY_VISITOR_ATTACHMENT_DAILY_BANDWIDTH_LIMIT` | *size* | 500M | Rate limiting: Total daily attachment download/upload traffic limit per visitor. This is to protect your bandwidth costs from exploding. | +| `visitor-email-limit-burst` | `NTFY_VISITOR_EMAIL_LIMIT_BURST` | *number* | 16 | Rate limiting:Initial limit of e-mails per visitor | +| `visitor-email-limit-replenish` | `NTFY_VISITOR_EMAIL_LIMIT_REPLENISH` | *duration* | 1h | Rate limiting: Strongly related to `visitor-email-limit-burst`: The rate at which the bucket is refilled | +| `visitor-message-daily-limit` | `NTFY_VISITOR_MESSAGE_DAILY_LIMIT` | *number* | - | Rate limiting: Allowed number of messages per day per visitor, reset every day at midnight (UTC). By default, this value is unset. | +| `visitor-request-limit-burst` | `NTFY_VISITOR_REQUEST_LIMIT_BURST` | *number* | 60 | Rate limiting: Allowed GET/PUT/POST requests per second, per visitor. This setting is the initial bucket of requests each visitor has | +| `visitor-request-limit-replenish` | `NTFY_VISITOR_REQUEST_LIMIT_REPLENISH` | *duration* | 5s | Rate limiting: Strongly related to `visitor-request-limit-burst`: The rate at which the bucket is refilled | +| `visitor-request-limit-exempt-hosts` | `NTFY_VISITOR_REQUEST_LIMIT_EXEMPT_HOSTS` | *comma-separated host/IP/CIDR list* | - | Rate limiting: List of hostnames and IPs to be exempt from request rate limiting | +| `visitor-subscription-limit` | `NTFY_VISITOR_SUBSCRIPTION_LIMIT` | *number* | 30 | Rate limiting: Number of subscriptions per visitor (IP address) | +| `visitor-subscriber-rate-limiting` | `NTFY_VISITOR_SUBSCRIBER_RATE_LIMITING` | *bool* | `false` | Rate limiting: Enables subscriber-based rate limiting | +| `visitor-prefix-bits-ipv4` | `NTFY_VISITOR_PREFIX_BITS_IPV4` | *number* | 32 | Rate limiting: Number of bits to use for IPv4 visitor prefix, e.g. 24 for /24 | +| `visitor-prefix-bits-ipv6` | `NTFY_VISITOR_PREFIX_BITS_IPV6` | *number* | 64 | Rate limiting: Number of bits to use for IPv6 visitor prefix, e.g. 48 for /48 | +| `web-root` | `NTFY_WEB_ROOT` | *path*, e.g. `/` or `/app`, or `disable` | `/` | Sets root of the web app (e.g. /, or /app), or disables it entirely (disable) | +| `enable-signup` | `NTFY_ENABLE_SIGNUP` | *boolean* (`true` or `false`) | `false` | Allows users to sign up via the web app, or API | +| `enable-login` | `NTFY_ENABLE_LOGIN` | *boolean* (`true` or `false`) | `false` | Allows users to log in via the web app, or API | +| `enable-reservations` | `NTFY_ENABLE_RESERVATIONS` | *boolean* (`true` or `false`) | `false` | Allows users to reserve topics (if their tier allows it) | +| `require-login` | `NTFY_REQUIRE_LOGIN` | *boolean* (`true` or `false`) | `false` | All actions via the web app require a login | +| `stripe-secret-key` | `NTFY_STRIPE_SECRET_KEY` | *string* | - | Payments: Key used for the Stripe API communication, this enables payments | +| `stripe-webhook-key` | `NTFY_STRIPE_WEBHOOK_KEY` | *string* | - | Payments: Key required to validate the authenticity of incoming webhooks from Stripe | +| `billing-contact` | `NTFY_BILLING_CONTACT` | *email address* or *website* | - | Payments: Email or website displayed in Upgrade dialog as a billing contact | +| `web-push-public-key` | `NTFY_WEB_PUSH_PUBLIC_KEY` | *string* | - | Web Push: Public Key. Run `ntfy webpush keys` to generate | +| `web-push-private-key` | `NTFY_WEB_PUSH_PRIVATE_KEY` | *string* | - | Web Push: Private Key. Run `ntfy webpush keys` to generate | +| `web-push-file` | `NTFY_WEB_PUSH_FILE` | *string* | - | Web Push: Database file that stores subscriptions | +| `web-push-email-address` | `NTFY_WEB_PUSH_EMAIL_ADDRESS` | *string* | - | Web Push: Sender email address | +| `web-push-startup-queries` | `NTFY_WEB_PUSH_STARTUP_QUERIES` | *string* | - | Web Push: SQL queries to run against subscription database at startup | +| `web-push-expiry-duration` | `NTFY_WEB_PUSH_EXPIRY_DURATION` | *duration* | 60d | Web Push: Duration after which a subscription is considered stale and will be deleted. This is to prevent stale subscriptions. | +| `web-push-expiry-warning-duration` | `NTFY_WEB_PUSH_EXPIRY_WARNING_DURATION` | *duration* | 55d | Web Push: Duration after which a warning is sent to subscribers that their subscription will expire soon. This is to prevent stale subscriptions. | +| `log-format` | `NTFY_LOG_FORMAT` | *string* | `text` | Defines the output format, can be text or json | +| `log-file` | `NTFY_LOG_FILE` | *string* | - | Defines the filename to write logs to. If this is not set, ntfy logs to stderr | +| `log-level` | `NTFY_LOG_LEVEL` | *string* | `info` | Defines the default log level, can be one of trace, debug, info, warn or error | The format for a *duration* is: `(smhd)`, e.g. 30s, 20m, 1h or 3d. The format for a *size* is: `(GMK)`, e.g. 1G, 200M or 4000k. @@ -2218,7 +2291,7 @@ OPTIONS: --auth-file value, --auth_file value, -H value auth database file used for access control [$NTFY_AUTH_FILE] --auth-startup-queries value, --auth_startup_queries value queries run when the auth database is initialized [$NTFY_AUTH_STARTUP_QUERIES] --auth-default-access value, --auth_default_access value, -p value default permissions if no matching entries in the auth database are found (default: "read-write") [$NTFY_AUTH_DEFAULT_ACCESS] - --attachment-cache-dir value, --attachment_cache_dir value cache directory for attached files [$NTFY_ATTACHMENT_CACHE_DIR] + --attachment-cache-dir value, --attachment_cache_dir value cache directory for attached files, or S3 URL (s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]) [$NTFY_ATTACHMENT_CACHE_DIR] --attachment-total-size-limit value, --attachment_total_size_limit value, -A value limit of the on-disk attachment cache (default: "5G") [$NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT] --attachment-file-size-limit value, --attachment_file_size_limit value, -Y value per-file attachment size limit (e.g. 300k, 2M, 100M) (default: "15M") [$NTFY_ATTACHMENT_FILE_SIZE_LIMIT] --attachment-expiry-duration value, --attachment_expiry_duration value, -X value duration after which uploaded attachments will be deleted (e.g. 3h, 20h) (default: "3h") [$NTFY_ATTACHMENT_EXPIRY_DURATION] diff --git a/docs/releases.md b/docs/releases.md index 44b42243..975c7536 100644 --- a/docs/releases.md +++ b/docs/releases.md @@ -1800,6 +1800,10 @@ and the [ntfy Android app](https://github.com/binwiederhier/ntfy-android/release ### ntfy server v2.20.x (UNRELEASED) +**Features:** + +* Add S3-compatible object storage as an alternative [attachment store](config.md#attachments) via `attachment-cache-dir` config option + **Bug fixes + maintenance:** * Reject invalid e-mail addresses (e.g. multiple comma-separated recipients) with HTTP 400 diff --git a/go.mod b/go.mod index 4f0451b6..7edd3710 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require github.com/pkg/errors v0.9.1 // indirect require ( firebase.google.com/go/v4 v4.19.0 github.com/SherClockHolmes/webpush-go v1.4.0 - github.com/jackc/pgx/v5 v5.8.0 + github.com/jackc/pgx/v5 v5.9.0 github.com/microcosm-cc/bluemonday v1.0.27 github.com/prometheus/client_golang v1.23.2 github.com/stripe/stripe-go/v74 v74.30.0 diff --git a/go.sum b/go.sum index 0851929d..97738e0f 100644 --- a/go.sum +++ b/go.sum @@ -108,8 +108,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo= -github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw= +github.com/jackc/pgx/v5 v5.9.0 h1:T/dI+2TvmI2H8s/KH1/lXIbz1CUFk3gn5oTjr0/mBsE= +github.com/jackc/pgx/v5 v5.9.0/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= diff --git a/message/cache.go b/message/cache.go index 76aba4be..dd4ef0a4 100644 --- a/message/cache.go +++ b/message/cache.go @@ -46,6 +46,7 @@ type queries struct { selectStats string updateStats string updateMessageTime string + selectAttachmentIDs string } // Cache stores published messages @@ -252,18 +253,7 @@ func (c *Cache) MessagesExpired() ([]string, error) { return nil, err } defer rows.Close() - ids := make([]string, 0) - for rows.Next() { - var id string - if err := rows.Scan(&id); err != nil { - return nil, err - } - ids = append(ids, id) - } - if err := rows.Err(); err != nil { - return nil, err - } - return ids, nil + return readStrings(rows) } // Message returns the message with the given ID, or ErrMessageNotFound if not found @@ -319,18 +309,7 @@ func (c *Cache) Topics() ([]string, error) { return nil, err } defer rows.Close() - topics := make([]string, 0) - for rows.Next() { - var id string - if err := rows.Scan(&id); err != nil { - return nil, err - } - topics = append(topics, id) - } - if err := rows.Err(); err != nil { - return nil, err - } - return topics, nil + return readStrings(rows) } // DeleteMessages deletes the messages with the given IDs @@ -358,15 +337,8 @@ func (c *Cache) DeleteScheduledBySequenceID(topic, sequenceID string) ([]string, return nil, err } defer rows.Close() - ids := make([]string, 0) - for rows.Next() { - var id string - if err := rows.Scan(&id); err != nil { - return nil, err - } - ids = append(ids, id) - } - if err := rows.Err(); err != nil { + ids, err := readStrings(rows) + if err != nil { return nil, err } rows.Close() // Close rows before executing delete in same transaction @@ -391,6 +363,16 @@ func (c *Cache) ExpireMessages(topics ...string) error { }) } +// AttachmentIDs returns message IDs with active (non-expired, non-deleted) attachments +func (c *Cache) AttachmentIDs() ([]string, error) { + rows, err := c.db.ReadOnly().Query(c.queries.selectAttachmentIDs, time.Now().Unix()) + if err != nil { + return nil, err + } + defer rows.Close() + return readStrings(rows) +} + // AttachmentsExpired returns message IDs with expired attachments that have not been deleted func (c *Cache) AttachmentsExpired() ([]string, error) { rows, err := c.db.Query(c.queries.selectAttachmentsExpired, time.Now().Unix()) @@ -398,18 +380,7 @@ func (c *Cache) AttachmentsExpired() ([]string, error) { return nil, err } defer rows.Close() - ids := make([]string, 0) - for rows.Next() { - var id string - if err := rows.Scan(&id); err != nil { - return nil, err - } - ids = append(ids, id) - } - if err := rows.Err(); err != nil { - return nil, err - } - return ids, nil + return readStrings(rows) } // MarkAttachmentsDeleted marks the attachments for the given message IDs as deleted @@ -590,3 +561,18 @@ func readMessage(rows *sql.Rows) (*model.Message, error) { Encoding: encoding, }, nil } + +func readStrings(rows *sql.Rows) ([]string, error) { + strs := make([]string, 0) + for rows.Next() { + var s string + if err := rows.Scan(&s); err != nil { + return nil, err + } + strs = append(strs, s) + } + if err := rows.Err(); err != nil { + return nil, err + } + return strs, nil +} diff --git a/message/cache_postgres.go b/message/cache_postgres.go index ba162da2..d59b2590 100644 --- a/message/cache_postgres.go +++ b/message/cache_postgres.go @@ -74,6 +74,8 @@ const ( postgresSelectStatsQuery = `SELECT value FROM message_stats WHERE key = 'messages'` postgresUpdateStatsQuery = `UPDATE message_stats SET value = $1 WHERE key = 'messages'` postgresUpdateMessageTimeQuery = `UPDATE message SET time = $1 WHERE mid = $2` + + postgresSelectAttachmentIDsQuery = `SELECT mid FROM message WHERE attachment_expires > $1 AND attachment_deleted = FALSE` ) var postgresQueries = queries{ @@ -100,6 +102,7 @@ var postgresQueries = queries{ selectStats: postgresSelectStatsQuery, updateStats: postgresUpdateStatsQuery, updateMessageTime: postgresUpdateMessageTimeQuery, + selectAttachmentIDs: postgresSelectAttachmentIDsQuery, } // NewPostgresStore creates a new PostgreSQL-backed message cache store using an existing database connection pool. diff --git a/message/cache_sqlite.go b/message/cache_sqlite.go index a36aba0e..6126f1e1 100644 --- a/message/cache_sqlite.go +++ b/message/cache_sqlite.go @@ -77,6 +77,8 @@ const ( sqliteSelectStatsQuery = `SELECT value FROM stats WHERE key = 'messages'` sqliteUpdateStatsQuery = `UPDATE stats SET value = ? WHERE key = 'messages'` sqliteUpdateMessageTimeQuery = `UPDATE messages SET time = ? WHERE mid = ?` + + sqliteSelectAttachmentIDsQuery = `SELECT mid FROM messages WHERE attachment_expires > ? AND attachment_deleted = 0` ) var sqliteQueries = queries{ @@ -103,6 +105,7 @@ var sqliteQueries = queries{ selectStats: sqliteSelectStatsQuery, updateStats: sqliteUpdateStatsQuery, updateMessageTime: sqliteUpdateMessageTimeQuery, + selectAttachmentIDs: sqliteSelectAttachmentIDsQuery, } // NewSQLiteStore creates a SQLite file-backed cache diff --git a/s3/client.go b/s3/client.go new file mode 100644 index 00000000..8e84bbc5 --- /dev/null +++ b/s3/client.go @@ -0,0 +1,302 @@ +package s3 + +import ( + "bytes" + "context" + "crypto/md5" //nolint:gosec // MD5 is required by the S3 protocol for Content-MD5 headers + "encoding/base64" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "heckel.io/ntfy/v2/log" +) + +const ( + tagS3Client = "s3_client" +) + +// Client is a minimal S3-compatible client. It supports PutObject, GetObject, DeleteObjects, +// and ListObjectsV2 operations using AWS Signature V4 signing. The bucket and optional key prefix +// are fixed at construction time. All operations target the same bucket and prefix. +// +// The following IAM policy is required for AWS S3: +// +// { +// "Version": "2012-10-17", +// "Statement": [ +// { +// "Effect": "Allow", +// "Action": [ +// "s3:ListBucket", +// "s3:ListBucketMultipartUploads" +// ], +// "Resource": "arn:aws:s3:::BUCKET_NAME" +// }, +// { +// "Effect": "Allow", +// "Action": [ +// "s3:GetObject", +// "s3:PutObject", +// "s3:DeleteObject", +// "s3:AbortMultipartUpload" +// ], +// "Resource": "arn:aws:s3:::BUCKET_NAME/*" +// } +// ] +// } +// +// Fields must not be modified after the Client is passed to any method or goroutine. +type Client struct { + config *Config + http *http.Client +} + +// New creates a new S3 client from the given Config. +func New(config *Config) *Client { + httpClient := config.HTTPClient + if httpClient == nil { + httpClient = http.DefaultClient + } + return &Client{ + config: config, + http: httpClient, + } +} + +// PutObject uploads body to the given key. The key is automatically prefixed with the client's +// configured prefix. +// +// If untrustedLength is between 1 and 5 GB, the body is streamed directly to S3 via a +// single PUT request without buffering. The read is limited to untrustedLength bytes; +// any extra data in the body is ignored. If the body is shorter than claimed, the upload fails. +// +// Otherwise (untrustedLength <= 0 or > 5 GB), the first 5 MB are buffered to decide +// between a simple PUT and multipart upload. +// +// See https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// and https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +func (c *Client) PutObject(ctx context.Context, key string, body io.Reader, untrustedLength int64) error { + if untrustedLength > 0 && untrustedLength <= maxSinglePutSize { + // Stream directly: Content-Length is known (but untrusted). LimitReader ensures we send at most + // untrustedLength bytes, and any extra data in body is ignored. + return c.putObject(ctx, key, io.LimitReader(body, untrustedLength), untrustedLength) + } + // Buffered path: read first 5 MB to decide simple vs multipart + first := make([]byte, partSize) + n, err := io.ReadFull(body, first) + if errors.Is(err, io.ErrUnexpectedEOF) || err == io.EOF { + return c.putObject(ctx, key, bytes.NewReader(first[:n]), int64(n)) + } else if err != nil { + return fmt.Errorf("error reading object %s from client: %w", key, err) + } + return c.putObjectMultipart(ctx, key, io.MultiReader(bytes.NewReader(first), body)) +} + +// putObject uploads a body with known size using a simple PUT with UNSIGNED-PAYLOAD. +func (c *Client) putObject(ctx context.Context, key string, body io.Reader, size int64) error { + log.Tag(tagS3Client).Debug("Uploading object %s (%d bytes)", key, size) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, c.config.ObjectURL(key), body) + if err != nil { + return fmt.Errorf("creating upload request object %s failed: %w", key, err) + } + req.ContentLength = size + c.signV4(req, unsignedPayload) + resp, err := c.http.Do(req) + if err != nil { + return fmt.Errorf("uploading object %s failed: %w", key, err) + } + defer resp.Body.Close() + if !isHTTPSuccess(resp) { + return parseError(resp) + } + return nil +} + +// GetObject downloads an object. The key is automatically prefixed with the client's configured +// prefix. The caller must close the returned ReadCloser. +// +// See https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +func (c *Client) GetObject(ctx context.Context, key string) (io.ReadCloser, int64, error) { + log.Tag(tagS3Client).Debug("Fetching object %s", key) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.config.ObjectURL(key), nil) + if err != nil { + return nil, 0, fmt.Errorf("error creating HTTP GET request for %s: %w", key, err) + } + c.signV4(req, emptyPayloadHash) + resp, err := c.http.Do(req) + if err != nil { + return nil, 0, fmt.Errorf("error fetching object %s: %w", key, err) + } else if !isHTTPSuccess(resp) { + err := parseError(resp) + resp.Body.Close() + return nil, 0, err + } + return resp.Body, resp.ContentLength, nil +} + +// ListObjectsV2 returns all objects under the client's configured prefix by paginating through +// ListObjectsV2 results automatically. Keys in the returned objects have the prefix stripped, +// so they match the keys used with PutObject/GetObject/DeleteObjects. It stops after 10,000 +// pages as a safety valve. +// +// See https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html +func (c *Client) ListObjectsV2(ctx context.Context) ([]*Object, error) { + var all []*Object + var token string + for page := 0; page < maxPages; page++ { + result, err := c.listObjectsV2(ctx, token) + if err != nil { + return nil, err + } + for _, obj := range result.Contents { + var lastModified time.Time + if obj.LastModified != "" { + lastModified, _ = time.Parse(time.RFC3339, obj.LastModified) + } + all = append(all, &Object{ + Key: c.config.StripPrefix(obj.Key), + Size: obj.Size, + LastModified: lastModified, + }) + } + if !result.IsTruncated { + return all, nil + } + token = result.NextContinuationToken + } + return nil, fmt.Errorf("listing objects exceeded %d pages", maxPages) +} + +// listObjectsV2 performs a single ListObjectsV2 request using the client's configured prefix. +func (c *Client) listObjectsV2(ctx context.Context, continuationToken string) (*listObjectsV2Result, error) { + if continuationToken == "" { + log.Tag(tagS3Client).Debug("Listing remote objects") + } else { + log.Tag(tagS3Client).Debug("Listing remote objects, continuing with token '%s'", continuationToken) + } + query := url.Values{"list-type": {"2"}} + if prefix := c.config.ListPrefix(); prefix != "" { + query.Set("prefix", prefix) + } + if continuationToken != "" { + query.Set("continuation-token", continuationToken) + } + respBody, err := c.do(ctx, "ListObjects", http.MethodGet, c.config.BucketURL()+"?"+query.Encode(), nil, nil) + if err != nil { + return nil, err + } + var result listObjectsV2Result + if err := xml.Unmarshal(respBody, &result); err != nil { + return nil, fmt.Errorf("failed to unmarshal list object response: %w", err) + } + return &result, nil +} + +// DeleteObjects removes multiple objects in a single batch request. Keys are automatically +// prefixed with the client's configured prefix. S3 supports up to 1000 keys per call; the +// caller is responsible for batching if needed. +// +// Even when S3 returns HTTP 200, individual keys may fail. If any per-key errors are present +// in the response, they are returned as a combined error. +// +// See https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html +func (c *Client) DeleteObjects(ctx context.Context, keys []string) error { + // S3 DeleteObjects supports up to 1000 keys per call + for i := 0; i < len(keys); i += maxDeleteBatchSize { + end := i + maxDeleteBatchSize + if end > len(keys) { + end = len(keys) + } + if err := c.deleteObjects(ctx, keys[i:end]); err != nil { + return err + } + } + return nil +} + +func (c *Client) deleteObjects(ctx context.Context, keys []string) error { + log.Tag(tagS3Client).Debug("Deleting %d object(s)", len(keys)) + req := &deleteObjectsRequest{ + Quiet: true, + } + for _, key := range keys { + req.Objects = append(req.Objects, &deleteObject{Key: c.config.ObjectKey(key)}) + } + body, err := xml.Marshal(req) + if err != nil { + return fmt.Errorf("error marshalling XML for deleting objects: %w", err) + } + + // Content-MD5 is required by the S3 protocol for DeleteObjects requests. + md5Sum := md5.Sum(body) //nolint:gosec + headers := map[string]string{ + "Content-MD5": base64.StdEncoding.EncodeToString(md5Sum[:]), + } + reqURL := c.config.BucketURL() + "?delete" + respBody, err := c.do(ctx, "DeleteObjects", http.MethodPost, reqURL, body, headers) + if err != nil { + return fmt.Errorf("error deleting objects: %w", err) + } + + // S3 may return HTTP 200 with per-key errors in the response body + var result deleteObjectsResult + if err := xml.Unmarshal(respBody, &result); err != nil { + return nil // If we can't parse, assume success (Quiet mode returns empty body on success) + } + if len(result.Errors) > 0 { + var msgs []string + for _, e := range result.Errors { + msgs = append(msgs, fmt.Sprintf("%s: %s", e.Key, e.Message)) + } + return fmt.Errorf("error deleting objects, partial failure: %s", strings.Join(msgs, "; ")) + } + return nil +} + +// do creates a signed request, executes it, reads the response body, and checks for errors. +// If body is nil, the request is sent with an empty payload. If body is non-nil, it is sent +// with a computed SHA-256 payload hash and Content-Type: application/xml. +func (c *Client) do(ctx context.Context, op, method, reqURL string, body []byte, headers map[string]string) ([]byte, error) { + log.Tag(tagS3Client).Trace("Performing request %s %s %s (body: %d bytes)", op, method, reqURL, len(body)) + var reader io.Reader + var hash string + if body != nil { + reader = bytes.NewReader(body) + hash = sha256Hex(body) + } else { + hash = emptyPayloadHash + } + req, err := http.NewRequestWithContext(ctx, method, reqURL, reader) + if err != nil { + return nil, fmt.Errorf("s3: %s request: %w", op, err) + } + if body != nil { + req.ContentLength = int64(len(body)) + req.Header.Set("Content-Type", "application/xml") + } else { + req.ContentLength = 0 + } + for k, v := range headers { + req.Header.Set(k, v) + } + c.signV4(req, hash) + resp, err := c.http.Do(req) + if err != nil { + return nil, fmt.Errorf("s3: %s: %w", op, err) + } + respBody, err := io.ReadAll(io.LimitReader(resp.Body, maxResponseBytes)) + resp.Body.Close() + if err != nil { + return nil, fmt.Errorf("s3: %s read: %w", op, err) + } + if !isHTTPSuccess(resp) { + return nil, parseErrorFromBytes(resp.StatusCode, respBody) + } + return respBody, nil +} diff --git a/s3/client_auth.go b/s3/client_auth.go new file mode 100644 index 00000000..61aba73c --- /dev/null +++ b/s3/client_auth.go @@ -0,0 +1,71 @@ +package s3 + +import ( + "encoding/hex" + "fmt" + "net/http" + "sort" + "strings" + "time" +) + +// signV4 signs req in place using AWS Signature V4. payloadHash is the hex-encoded SHA-256 +// of the request body, or the literal string "UNSIGNED-PAYLOAD" for streaming uploads. +// +// See https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func (c *Client) signV4(req *http.Request, hash string) { + now := time.Now().UTC() + datestamp := now.Format("20060102") + amzDate := now.Format("20060102T150405Z") + + // Required headers + req.Header.Set("Host", c.config.HostHeader()) + req.Header.Set("X-Amz-Date", amzDate) + req.Header.Set("X-Amz-Content-Sha256", hash) + + // Canonical headers (all headers we set, sorted by lowercase key) + signedKeys := make([]string, 0, len(req.Header)) + canonHeaders := make(map[string]string, len(req.Header)) + for k := range req.Header { + lk := strings.ToLower(k) + signedKeys = append(signedKeys, lk) + canonHeaders[lk] = strings.TrimSpace(req.Header.Get(k)) + } + sort.Strings(signedKeys) + signedHeadersStr := strings.Join(signedKeys, ";") + var chBuf strings.Builder + for _, k := range signedKeys { + chBuf.WriteString(k) + chBuf.WriteByte(':') + chBuf.WriteString(canonHeaders[k]) + chBuf.WriteByte('\n') + } + + // Canonical request + canonicalRequest := strings.Join([]string{ + req.Method, + canonicalURI(req.URL), + canonicalQueryString(req.URL.Query()), + chBuf.String(), + signedHeadersStr, + hash, + }, "\n") + + // String to sign + credentialScope := datestamp + "/" + c.config.Region + "/s3/aws4_request" + stringToSign := "AWS4-HMAC-SHA256\n" + amzDate + "\n" + credentialScope + "\n" + sha256Hex([]byte(canonicalRequest)) + + // Signing key + signingKey := hmacSHA256(hmacSHA256(hmacSHA256(hmacSHA256( + []byte("AWS4"+c.config.SecretKey), []byte(datestamp)), + []byte(c.config.Region)), + []byte("s3")), + []byte("aws4_request")) + + signature := hex.EncodeToString(hmacSHA256(signingKey, []byte(stringToSign))) + header := fmt.Sprintf( + "AWS4-HMAC-SHA256 Credential=%s/%s, SignedHeaders=%s, Signature=%s", + c.config.AccessKey, credentialScope, signedHeadersStr, signature, + ) + req.Header.Set("Authorization", header) +} diff --git a/s3/client_multipart.go b/s3/client_multipart.go new file mode 100644 index 00000000..198175d4 --- /dev/null +++ b/s3/client_multipart.go @@ -0,0 +1,187 @@ +package s3 + +import ( + "bytes" + "context" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "time" + + "heckel.io/ntfy/v2/log" +) + +// AbortIncompleteUploads lists all in-progress multipart uploads and aborts those initiated +// before the given cutoff time. This cleans up orphaned upload parts from interrupted uploads. +// +// See https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// and https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +func (c *Client) AbortIncompleteUploads(ctx context.Context, cutoff time.Time) error { + uploads, err := c.listMultipartUploads(ctx) + if err != nil { + return err + } + for _, u := range uploads { + if !u.Initiated.IsZero() && u.Initiated.Before(cutoff) { + c.abortMultipartUpload(ctx, u.Key, u.UploadID) + } + } + return nil +} + +// listMultipartUploads returns in-progress multipart uploads for the client's prefix. +// It paginates automatically, stopping after 10,000 pages as a safety valve. +func (c *Client) listMultipartUploads(ctx context.Context) ([]*multipartUpload, error) { + var all []*multipartUpload + var keyMarker, uploadIDMarker string + for page := 0; page < maxPages; page++ { + query := url.Values{"uploads": {""}} + if prefix := c.config.ListPrefix(); prefix != "" { + query.Set("prefix", prefix) + } + if keyMarker != "" { + query.Set("key-marker", keyMarker) + query.Set("upload-id-marker", uploadIDMarker) + } + respBody, err := c.do(ctx, "ListMultipartUploads", http.MethodGet, c.config.BucketURL()+"?"+query.Encode(), nil, nil) + if err != nil { + return nil, err + } + var result listMultipartUploadsResult + if err := xml.Unmarshal(respBody, &result); err != nil { + return nil, fmt.Errorf("error unmarshalling multipart upload result: %w", err) + } + for _, u := range result.Uploads { + var initiated time.Time + if u.Initiated != "" { + initiated, _ = time.Parse(time.RFC3339, u.Initiated) + } + all = append(all, &multipartUpload{ + Key: u.Key, + UploadID: u.UploadID, + Initiated: initiated, + }) + } + if !result.IsTruncated { + return all, nil + } + keyMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + } + return nil, fmt.Errorf("error listing multipart uploads, exceeded %d pages", maxPages) +} + +// abortMultipartUpload cancels an in-progress multipart upload. Called on error to clean up. +func (c *Client) abortMultipartUpload(ctx context.Context, key, uploadID string) { + log.Tag(tagS3Client).Info("Aborting multipart upload for object %s", key) + reqURL := fmt.Sprintf("%s?uploadId=%s", c.config.ObjectURL(key), url.QueryEscape(uploadID)) + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, reqURL, nil) + if err != nil { + return + } + c.signV4(req, emptyPayloadHash) + resp, err := c.http.Do(req) + if err != nil { + return + } + resp.Body.Close() +} + +// putObjectMultipart uploads body using S3 multipart upload. It reads the body in partSize +// chunks, uploading each as a separate part. This allows uploading without knowing the total +// body size in advance. +func (c *Client) putObjectMultipart(ctx context.Context, key string, body io.Reader) error { + log.Tag(tagS3Client).Debug("Uploading multipart object %s", key) + + // Step 1: Initiate multipart upload + uploadID, err := c.initiateMultipartUpload(ctx, key) + if err != nil { + return err + } + + // Step 2: Upload parts + partNumber := 1 + buf := make([]byte, partSize) + var parts []*completedPart + for { + n, err := io.ReadFull(body, buf) + if n > 0 { + etag, uploadErr := c.uploadPart(ctx, key, uploadID, partNumber, buf[:n]) + if uploadErr != nil { + c.abortMultipartUpload(ctx, key, uploadID) + return uploadErr + } + parts = append(parts, &completedPart{ + PartNumber: partNumber, + ETag: etag, + }) + partNumber++ + } + if err == io.EOF || errors.Is(err, io.ErrUnexpectedEOF) { + break + } else if err != nil { + c.abortMultipartUpload(ctx, key, uploadID) + return fmt.Errorf("error uploading object %s, reading from client failed: %w", key, err) + } + } + + // Step 3: Complete multipart upload + return c.completeMultipartUpload(ctx, key, uploadID, parts) +} + +// initiateMultipartUpload starts a new multipart upload and returns the upload ID. +func (c *Client) initiateMultipartUpload(ctx context.Context, key string) (string, error) { + respBody, err := c.do(ctx, "InitiateMultipartUpload", http.MethodPost, c.config.ObjectURL(key)+"?uploads", nil, nil) + if err != nil { + return "", err + } + var result initiateMultipartUploadResult + if err := xml.Unmarshal(respBody, &result); err != nil { + return "", fmt.Errorf("error unmarshalling initiate multipart upload response: %w", err) + } + return result.UploadID, nil +} + +// uploadPart uploads a single part of a multipart upload and returns the ETag. +func (c *Client) uploadPart(ctx context.Context, key, uploadID string, partNumber int, data []byte) (string, error) { + log.Tag(tagS3Client).Debug("Uploading multipart part for object %s, part %d, size %d", key, partNumber, len(data)) + reqURL := fmt.Sprintf("%s?partNumber=%d&uploadId=%s", c.config.ObjectURL(key), partNumber, url.QueryEscape(uploadID)) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, reqURL, bytes.NewReader(data)) + if err != nil { + return "", fmt.Errorf("error creating multipart upload part request for object %s: %w", key, err) + } + req.ContentLength = int64(len(data)) + c.signV4(req, unsignedPayload) + resp, err := c.http.Do(req) + if err != nil { + return "", fmt.Errorf("error uploading multipart part for object %s: %w", key, err) + } + defer resp.Body.Close() + if !isHTTPSuccess(resp) { + return "", parseError(resp) + } + return resp.Header.Get("ETag"), nil +} + +// completeMultipartUpload finalizes a multipart upload with the given parts. +func (c *Client) completeMultipartUpload(ctx context.Context, key, uploadID string, parts []*completedPart) error { + log.Tag(tagS3Client).Debug("Completing multipart upload for object %s, %d parts", key, len(parts)) + bodyBytes, err := xml.Marshal(&completeMultipartUploadRequest{Parts: parts}) + if err != nil { + return fmt.Errorf("error marshalling complete multipart upload request: %w", err) + } + reqURL := fmt.Sprintf("%s?uploadId=%s", c.config.ObjectURL(key), url.QueryEscape(uploadID)) + respBody, err := c.do(ctx, "CompleteMultipartUpload", http.MethodPost, reqURL, bodyBytes, nil) + if err != nil { + return err + } + // Check if the response contains an error (S3 can return 200 with an error body) + var errResp errorResponse + if xml.Unmarshal(respBody, &errResp) == nil && errResp.Code != "" { + return &errResp + } + return nil +} diff --git a/s3/client_test.go b/s3/client_test.go new file mode 100644 index 00000000..23cde72c --- /dev/null +++ b/s3/client_test.go @@ -0,0 +1,414 @@ +package s3 + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestParseURL_Success(t *testing.T) { + cfg, err := ParseURL("s3://AKID:SECRET@my-bucket/attachments?region=us-east-1") + require.Nil(t, err) + require.Equal(t, "my-bucket", cfg.Bucket) + require.Equal(t, "attachments", cfg.Prefix) + require.Equal(t, "us-east-1", cfg.Region) + require.Equal(t, "AKID", cfg.AccessKey) + require.Equal(t, "SECRET", cfg.SecretKey) + require.Equal(t, "s3.us-east-1.amazonaws.com", cfg.Endpoint) + require.False(t, cfg.PathStyle) +} + +func TestParseURL_NoPrefix(t *testing.T) { + cfg, err := ParseURL("s3://AKID:SECRET@my-bucket?region=us-east-1") + require.Nil(t, err) + require.Equal(t, "my-bucket", cfg.Bucket) + require.Equal(t, "", cfg.Prefix) +} + +func TestParseURL_WithEndpoint(t *testing.T) { + cfg, err := ParseURL("s3://AKID:SECRET@my-bucket/prefix?region=us-east-1&endpoint=https://s3.example.com") + require.Nil(t, err) + require.Equal(t, "my-bucket", cfg.Bucket) + require.Equal(t, "prefix", cfg.Prefix) + require.Equal(t, "s3.example.com", cfg.Endpoint) + require.True(t, cfg.PathStyle) +} + +func TestParseURL_EndpointHTTP(t *testing.T) { + cfg, err := ParseURL("s3://AKID:SECRET@my-bucket?region=us-east-1&endpoint=http://localhost:9000") + require.Nil(t, err) + require.Equal(t, "localhost:9000", cfg.Endpoint) + require.True(t, cfg.PathStyle) +} + +func TestParseURL_EndpointTrailingSlash(t *testing.T) { + cfg, err := ParseURL("s3://AKID:SECRET@my-bucket?region=us-east-1&endpoint=https://s3.example.com/") + require.Nil(t, err) + require.Equal(t, "s3.example.com", cfg.Endpoint) +} + +func TestParseURL_NestedPrefix(t *testing.T) { + cfg, err := ParseURL("s3://AKID:SECRET@my-bucket/a/b/c?region=us-east-1") + require.Nil(t, err) + require.Equal(t, "my-bucket", cfg.Bucket) + require.Equal(t, "a/b/c", cfg.Prefix) +} + +func TestParseURL_MissingRegion(t *testing.T) { + _, err := ParseURL("s3://AKID:SECRET@my-bucket") + require.Error(t, err) + require.Contains(t, err.Error(), "region") +} + +func TestParseURL_MissingCredentials(t *testing.T) { + _, err := ParseURL("s3://my-bucket?region=us-east-1") + require.Error(t, err) + require.Contains(t, err.Error(), "access key") +} + +func TestParseURL_MissingSecretKey(t *testing.T) { + _, err := ParseURL("s3://AKID@my-bucket?region=us-east-1") + require.Error(t, err) + require.Contains(t, err.Error(), "secret key") +} + +func TestParseURL_WrongScheme(t *testing.T) { + _, err := ParseURL("http://AKID:SECRET@my-bucket?region=us-east-1") + require.Error(t, err) + require.Contains(t, err.Error(), "scheme") +} + +func TestParseURL_EmptyBucket(t *testing.T) { + _, err := ParseURL("s3://AKID:SECRET@?region=us-east-1") + require.Error(t, err) + require.Contains(t, err.Error(), "bucket") +} + +// --- Unit tests: URL construction --- + +func TestConfig_BucketURL_PathStyle(t *testing.T) { + c := &Config{Endpoint: "s3.example.com", Bucket: "my-bucket", PathStyle: true} + require.Equal(t, "https://s3.example.com/my-bucket", c.BucketURL()) +} + +func TestConfig_BucketURL_VirtualHosted(t *testing.T) { + c := &Config{Endpoint: "s3.us-east-1.amazonaws.com", Bucket: "my-bucket", PathStyle: false} + require.Equal(t, "https://my-bucket.s3.us-east-1.amazonaws.com", c.BucketURL()) +} + +func TestConfig_ObjectURL_PathStyle(t *testing.T) { + c := &Config{Endpoint: "s3.example.com", Bucket: "my-bucket", Prefix: "prefix", PathStyle: true} + require.Equal(t, "https://s3.example.com/my-bucket/prefix/obj", c.ObjectURL("obj")) +} + +func TestConfig_ObjectURL_VirtualHosted(t *testing.T) { + c := &Config{Endpoint: "s3.us-east-1.amazonaws.com", Bucket: "my-bucket", Prefix: "prefix", PathStyle: false} + require.Equal(t, "https://my-bucket.s3.us-east-1.amazonaws.com/prefix/obj", c.ObjectURL("obj")) +} + +func TestConfig_HostHeader_PathStyle(t *testing.T) { + c := &Config{Endpoint: "s3.example.com", Bucket: "my-bucket", PathStyle: true} + require.Equal(t, "s3.example.com", c.HostHeader()) +} + +func TestConfig_HostHeader_VirtualHosted(t *testing.T) { + c := &Config{Endpoint: "s3.us-east-1.amazonaws.com", Bucket: "my-bucket", PathStyle: false} + require.Equal(t, "my-bucket.s3.us-east-1.amazonaws.com", c.HostHeader()) +} + +func TestConfig_ObjectKey(t *testing.T) { + c := &Config{Prefix: "attachments"} + require.Equal(t, "attachments/file123", c.ObjectKey("file123")) + + c2 := &Config{Prefix: ""} + require.Equal(t, "file123", c2.ObjectKey("file123")) +} + +func TestConfig_ListPrefix(t *testing.T) { + c := &Config{Prefix: "attachments"} + require.Equal(t, "attachments/", c.ListPrefix()) + + c2 := &Config{Prefix: ""} + require.Equal(t, "", c2.ListPrefix()) +} + +// --- Integration tests using real S3 --- + +func TestClient_PutGetObject(t *testing.T) { + client := newTestClient(t) + ctx := context.Background() + + // Put + err := client.PutObject(ctx, "test-key", strings.NewReader("hello world"), 0) + require.Nil(t, err) + + // Get + reader, size, err := client.GetObject(ctx, "test-key") + require.Nil(t, err) + require.Equal(t, int64(11), size) + data, err := io.ReadAll(reader) + reader.Close() + require.Nil(t, err) + require.Equal(t, "hello world", string(data)) +} + +func TestClient_GetObject_NotFound(t *testing.T) { + client := newTestClient(t) + + _, _, err := client.GetObject(context.Background(), "nonexistent") + require.Error(t, err) +} + +func TestClient_DeleteObjects(t *testing.T) { + client := newTestClient(t) + ctx := context.Background() + + // Put several objects + for i := 0; i < 5; i++ { + err := client.PutObject(ctx, fmt.Sprintf("del-%d", i), bytes.NewReader([]byte("data")), 0) + require.Nil(t, err) + } + waitForCount(t, client, 5) + + // Delete some + err := client.DeleteObjects(ctx, []string{"del-1", "del-3"}) + require.Nil(t, err) + waitForCount(t, client, 3) + + // Verify deleted ones are gone + _, _, err = client.GetObject(ctx, "del-1") + require.Error(t, err) + _, _, err = client.GetObject(ctx, "del-3") + require.Error(t, err) + + // Verify remaining ones are still there + for _, key := range []string{"del-0", "del-2", "del-4"} { + reader, _, err := client.GetObject(ctx, key) + require.Nil(t, err) + reader.Close() + } +} + +func TestClient_ListObjects(t *testing.T) { + client := newTestClient(t) + ctx := context.Background() + + for i := 0; i < 3; i++ { + err := client.PutObject(ctx, fmt.Sprintf("list-%d", i), bytes.NewReader([]byte("x")), 0) + require.Nil(t, err) + } + waitForCount(t, client, 3) +} + +func TestClient_ListObjects_Pagination(t *testing.T) { + client := newTestClient(t) + ctx := context.Background() + + // Create 1010 objects in parallel (5 goroutines) + const total = 1010 + const workers = 5 + var wg sync.WaitGroup + errs := make(chan error, total) + for w := 0; w < workers; w++ { + wg.Add(1) + go func(start int) { + defer wg.Done() + for i := start; i < total; i += workers { + if err := client.PutObject(ctx, fmt.Sprintf("pg-%04d", i), bytes.NewReader([]byte("x")), 0); err != nil { + errs <- err + return + } + } + }(w) + } + wg.Wait() + close(errs) + for err := range errs { + require.Nil(t, err) + } + waitForCount(t, client, total) +} + +func TestClient_PutObject_LargeBody(t *testing.T) { + client := newTestClient(t) + ctx := context.Background() + + // 1 MB object + data := make([]byte, 1024*1024) + for i := range data { + data[i] = byte(i % 256) + } + err := client.PutObject(ctx, "large", bytes.NewReader(data), 0) + require.Nil(t, err) + + reader, size, err := client.GetObject(ctx, "large") + require.Nil(t, err) + require.Equal(t, int64(1024*1024), size) + got, err := io.ReadAll(reader) + reader.Close() + require.Nil(t, err) + require.Equal(t, data, got) +} + +func TestClient_PutObject_ChunkedUpload(t *testing.T) { + client := newTestClient(t) + ctx := context.Background() + + // 12 MB object, exceeds 5 MB partSize, triggers multipart upload path + data := make([]byte, 12*1024*1024) + for i := range data { + data[i] = byte(i % 256) + } + err := client.PutObject(ctx, "multipart", bytes.NewReader(data), 0) + require.Nil(t, err) + + reader, size, err := client.GetObject(ctx, "multipart") + require.Nil(t, err) + require.Equal(t, int64(12*1024*1024), size) + got, err := io.ReadAll(reader) + reader.Close() + require.Nil(t, err) + require.Equal(t, data, got) +} + +func TestClient_PutObject_ExactPartSize(t *testing.T) { + client := newTestClient(t) + ctx := context.Background() + + // Exactly 5 MB (partSize), should use the simple put path (ReadFull succeeds fully) + data := make([]byte, 5*1024*1024) + for i := range data { + data[i] = byte(i % 256) + } + err := client.PutObject(ctx, "exact", bytes.NewReader(data), 0) + require.Nil(t, err) + + reader, size, err := client.GetObject(ctx, "exact") + require.Nil(t, err) + require.Equal(t, int64(5*1024*1024), size) + got, err := io.ReadAll(reader) + reader.Close() + require.Nil(t, err) + require.Equal(t, data, got) +} + +func TestClient_PutObject_StreamingExactLength(t *testing.T) { + client := newTestClient(t) + ctx := context.Background() + + // untrustedLength matches body exactly — streams directly via putObject + err := client.PutObject(ctx, "stream-exact", strings.NewReader("hello world"), 11) + require.Nil(t, err) + + reader, size, err := client.GetObject(ctx, "stream-exact") + require.Nil(t, err) + require.Equal(t, int64(11), size) + got, err := io.ReadAll(reader) + reader.Close() + require.Nil(t, err) + require.Equal(t, "hello world", string(got)) +} + +func TestClient_PutObject_StreamingBodyLongerThanClaimed(t *testing.T) { + client := newTestClient(t) + ctx := context.Background() + + // Body has 11 bytes, but we claim 5 — only first 5 bytes should be stored + err := client.PutObject(ctx, "stream-long", strings.NewReader("hello world"), 5) + require.Nil(t, err) + + reader, size, err := client.GetObject(ctx, "stream-long") + require.Nil(t, err) + require.Equal(t, int64(5), size) + got, err := io.ReadAll(reader) + reader.Close() + require.Nil(t, err) + require.Equal(t, "hello", string(got)) +} + +func TestClient_PutObject_StreamingBodyShorterThanClaimed(t *testing.T) { + client := newTestClient(t) + ctx := context.Background() + + // Body has 5 bytes, but we claim 100 — should fail + err := client.PutObject(ctx, "stream-short", strings.NewReader("hello"), 100) + require.Error(t, err) + + // Object should not exist + _, _, err = client.GetObject(ctx, "stream-short") + require.Error(t, err) +} + +func TestClient_PutObject_NestedKey(t *testing.T) { + client := newTestClient(t) + ctx := context.Background() + + err := client.PutObject(ctx, "deep/nested/prefix/file.txt", strings.NewReader("nested"), 0) + require.Nil(t, err) + + reader, _, err := client.GetObject(ctx, "deep/nested/prefix/file.txt") + require.Nil(t, err) + data, _ := io.ReadAll(reader) + reader.Close() + require.Equal(t, "nested", string(data)) +} + +func newTestClient(t *testing.T) *Client { + t.Helper() + s3URL := os.Getenv("NTFY_TEST_S3_URL") + if s3URL == "" { + t.Skip("NTFY_TEST_S3_URL not set") + } + cfg, err := ParseURL(s3URL) + require.Nil(t, err) + // Use per-test prefix to isolate objects between tests + if cfg.Prefix != "" { + cfg.Prefix = cfg.Prefix + "/testpkg-s3/" + t.Name() + } else { + cfg.Prefix = "testpkg-s3/" + t.Name() + } + client := New(cfg) + deleteAllObjects(t, client) + t.Cleanup(func() { deleteAllObjects(t, client) }) + return client +} + +func deleteAllObjects(t *testing.T, client *Client) { + t.Helper() + for i := 0; i < 60; i++ { + objects, err := client.ListObjectsV2(context.Background()) + require.Nil(t, err) + if len(objects) == 0 { + return + } + keys := make([]string, len(objects)) + for j, obj := range objects { + keys[j] = obj.Key + } + require.Nil(t, client.DeleteObjects(context.Background(), keys)) + time.Sleep(500 * time.Millisecond) + } + t.Fatal("timed out waiting for bucket to be empty") +} + +func waitForCount(t *testing.T, client *Client, expected int) { + t.Helper() + for i := 0; i < 60; i++ { + objects, err := client.ListObjectsV2(context.Background()) + require.Nil(t, err) + if len(objects) == expected { + return + } + time.Sleep(500 * time.Millisecond) + } + objects, _ := client.ListObjectsV2(context.Background()) + t.Fatalf("timed out waiting for %d objects, got %d", expected, len(objects)) +} diff --git a/s3/types.go b/s3/types.go new file mode 100644 index 00000000..96b62649 --- /dev/null +++ b/s3/types.go @@ -0,0 +1,165 @@ +package s3 + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strings" + "time" +) + +// Config holds the parsed fields from an S3 URL. Use ParseURL to create one from a URL string. +type Config struct { + Endpoint string // host[:port] only, e.g. "s3.us-east-1.amazonaws.com" + PathStyle bool + Bucket string + Prefix string + Region string + AccessKey string + SecretKey string + HTTPClient *http.Client // if nil, http.DefaultClient is used +} + +// BucketURL returns the base URL for bucket-level operations. +func (c *Config) BucketURL() string { + if c.PathStyle { + return fmt.Sprintf("https://%s/%s", c.Endpoint, c.Bucket) + } + return fmt.Sprintf("https://%s.%s", c.Bucket, c.Endpoint) +} + +// HostHeader returns the value for the Host header. +func (c *Config) HostHeader() string { + if c.PathStyle { + return c.Endpoint + } + return c.Bucket + "." + c.Endpoint +} + +// ListPrefix returns the prefix to use in ListObjectsV2 requests, +// with a trailing slash so that only objects under the prefix directory are returned. +func (c *Config) ListPrefix() string { + if c.Prefix != "" { + return c.Prefix + "/" + } + return "" +} + +// StripPrefix removes the configured prefix from a key returned by ListObjectsV2, +// so keys match what was passed to PutObject/GetObject/DeleteObjects. +func (c *Config) StripPrefix(key string) string { + if c.Prefix != "" { + return strings.TrimPrefix(key, c.Prefix+"/") + } + return key +} + +// ObjectKey prepends the configured prefix to the given key. +func (c *Config) ObjectKey(key string) string { + if c.Prefix != "" { + return c.Prefix + "/" + key + } + return key +} + +// ObjectURL returns the full URL for an object, automatically prepending the configured prefix. +func (c *Config) ObjectURL(key string) string { + u, _ := url.JoinPath(c.BucketURL(), c.ObjectKey(key)) + return u +} + +// Object represents an S3 object returned by list operations. +type Object struct { + Key string + Size int64 + LastModified time.Time +} + +// errorResponse is returned when S3 responds with a non-2xx status code. +type errorResponse struct { + StatusCode int + Code string `xml:"Code"` + Message string `xml:"Message"` + Body string `xml:"-"` // raw response body +} + +func (e *errorResponse) Error() string { + if e.Code != "" { + return fmt.Sprintf("s3: %s (HTTP %d): %s", e.Code, e.StatusCode, e.Message) + } + return fmt.Sprintf("s3: HTTP %d: %s", e.StatusCode, e.Body) +} + +// listObjectsV2Result is the XML response from S3 ListObjectsV2 +type listObjectsV2Result struct { + Contents []*listObject `xml:"Contents"` + IsTruncated bool `xml:"IsTruncated"` + NextContinuationToken string `xml:"NextContinuationToken"` +} + +type listObject struct { + Key string `xml:"Key"` + Size int64 `xml:"Size"` + LastModified string `xml:"LastModified"` +} + +// deleteObjectsRequest is the XML request body for S3 DeleteObjects +type deleteObjectsRequest struct { + XMLName xml.Name `xml:"Delete"` + Quiet bool `xml:"Quiet"` + Objects []*deleteObject `xml:"Object"` +} + +type deleteObject struct { + Key string `xml:"Key"` +} + +// deleteObjectsResult is the XML response from S3 DeleteObjects +type deleteObjectsResult struct { + Errors []*deleteError `xml:"Error"` +} + +type deleteError struct { + Key string `xml:"Key"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +// listMultipartUploadsResult is the XML response from S3 listMultipartUploads +type listMultipartUploadsResult struct { + Uploads []*listUpload `xml:"Upload"` + IsTruncated bool `xml:"IsTruncated"` + NextKeyMarker string `xml:"NextKeyMarker"` + NextUploadIDMarker string `xml:"NextUploadIdMarker"` +} + +type listUpload struct { + Key string `xml:"Key"` + UploadID string `xml:"UploadId"` + Initiated string `xml:"Initiated"` +} + +// multipartUpload represents an in-progress multipart upload returned by listMultipartUploads. +type multipartUpload struct { + Key string + UploadID string + Initiated time.Time +} + +// initiateMultipartUploadResult is the XML response from S3 InitiateMultipartUpload +type initiateMultipartUploadResult struct { + UploadID string `xml:"UploadId"` +} + +// completeMultipartUploadRequest is the XML request body for S3 CompleteMultipartUpload +type completeMultipartUploadRequest struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Parts []*completedPart `xml:"Part"` +} + +// completedPart represents a successfully uploaded part for CompleteMultipartUpload +type completedPart struct { + PartNumber int `xml:"PartNumber"` + ETag string `xml:"ETag"` +} diff --git a/s3/util.go b/s3/util.go new file mode 100644 index 00000000..ae692735 --- /dev/null +++ b/s3/util.go @@ -0,0 +1,180 @@ +package s3 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" +) + +const ( + // SHA-256 hash of the empty string, used as the payload hash for bodiless requests + emptyPayloadHash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + // Sent as the payload hash for streaming uploads where the body is not buffered in memory + unsignedPayload = "UNSIGNED-PAYLOAD" + + // maxResponseBytes caps the size of S3 response bodies we read into memory + maxResponseBytes = 2 * 1024 * 1024 + + // partSize is the size of each part for multipart uploads (5 MB). This is also the threshold + // above which PutObject switches from a simple PUT to multipart upload. S3 requires a minimum + // part size of 5 MB for all parts except the last. + partSize = 5 * 1024 * 1024 + + // maxSinglePutSize is the maximum size for a single PUT upload (5 GB). + // Objects larger than this must use multipart upload. + maxSinglePutSize = 5 * 1024 * 1024 * 1024 + + // maxPages is the max number of pages to iterate through when listing objects + maxPages = 500 + + // maxDeleteBatchSize is the maximum number of keys per S3 DeleteObjects call + maxDeleteBatchSize = 1000 +) + +// ParseURL parses an S3 URL of the form: +// +// s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT] +// +// When endpoint is specified, path-style addressing is enabled automatically. +func ParseURL(s3URL string) (*Config, error) { + u, err := url.Parse(s3URL) + if err != nil { + return nil, fmt.Errorf("s3: invalid URL: %w", err) + } + if u.Scheme != "s3" { + return nil, fmt.Errorf("s3: URL scheme must be 's3', got '%s'", u.Scheme) + } + if u.Host == "" { + return nil, fmt.Errorf("s3: bucket name must be specified as host") + } + bucket := u.Host + prefix := strings.TrimPrefix(u.Path, "/") + accessKey := u.User.Username() + secretKey, _ := u.User.Password() + if accessKey == "" || secretKey == "" { + return nil, fmt.Errorf("s3: access key and secret key must be specified in URL") + } + region := u.Query().Get("region") + if region == "" { + return nil, fmt.Errorf("s3: region query parameter is required") + } + endpointParam := u.Query().Get("endpoint") + var endpoint string + var pathStyle bool + if endpointParam != "" { + // Custom endpoint: strip scheme prefix to extract host[:port] + ep := strings.TrimRight(endpointParam, "/") + ep = strings.TrimPrefix(ep, "https://") + ep = strings.TrimPrefix(ep, "http://") + endpoint = ep + pathStyle = true + } else { + endpoint = fmt.Sprintf("s3.%s.amazonaws.com", region) + pathStyle = false + } + return &Config{ + Endpoint: endpoint, + PathStyle: pathStyle, + Bucket: bucket, + Prefix: prefix, + Region: region, + AccessKey: accessKey, + SecretKey: secretKey, + }, nil +} + +// parseError reads an S3 error response and returns an *errorResponse. +func parseError(resp *http.Response) error { + body, err := io.ReadAll(io.LimitReader(resp.Body, maxResponseBytes)) + if err != nil { + return fmt.Errorf("error reading S3 error response: %w", err) + } + return parseErrorFromBytes(resp.StatusCode, body) +} + +func parseErrorFromBytes(statusCode int, body []byte) error { + errResp := &errorResponse{ + StatusCode: statusCode, + Body: string(body), + } + // Try to parse XML error; if it fails, we still have StatusCode and Body + _ = xml.Unmarshal(body, errResp) + return errResp +} + +// canonicalURI returns the URI-encoded path for the canonical request. Each path segment is +// percent-encoded per RFC 3986; forward slashes are preserved. +func canonicalURI(u *url.URL) string { + p := u.Path + if p == "" { + return "/" + } + segments := strings.Split(p, "/") + for i, seg := range segments { + segments[i] = uriEncode(seg) + } + return strings.Join(segments, "/") +} + +// canonicalQueryString builds the query string for the canonical request. Keys and values +// are URI-encoded per RFC 3986 (using %20, not +) and sorted lexically by key. +func canonicalQueryString(values url.Values) string { + if len(values) == 0 { + return "" + } + keys := make([]string, 0, len(values)) + for k := range values { + keys = append(keys, k) + } + sort.Strings(keys) + var pairs []string + for _, k := range keys { + ek := uriEncode(k) + vs := make([]string, len(values[k])) + copy(vs, values[k]) + sort.Strings(vs) + for _, v := range vs { + pairs = append(pairs, ek+"="+uriEncode(v)) + } + } + return strings.Join(pairs, "&") +} + +// uriEncode percent-encodes a string per RFC 3986, encoding everything except unreserved +// characters (A-Z a-z 0-9 - _ . ~). +func uriEncode(s string) string { + var buf strings.Builder + for i := 0; i < len(s); i++ { + b := s[i] + if (b >= 'A' && b <= 'Z') || (b >= 'a' && b <= 'z') || (b >= '0' && b <= '9') || + b == '-' || b == '_' || b == '.' || b == '~' { + buf.WriteByte(b) + } else { + fmt.Fprintf(&buf, "%%%02X", b) + } + } + return buf.String() +} + +func isHTTPSuccess(resp *http.Response) bool { + return resp.StatusCode/100 == 2 +} + +func sha256Hex(data []byte) string { + h := sha256.Sum256(data) + return hex.EncodeToString(h[:]) +} + +func hmacSHA256(key, data []byte) []byte { + h := hmac.New(sha256.New, key) + h.Write(data) + return h.Sum(nil) +} diff --git a/s3/util_test.go b/s3/util_test.go new file mode 100644 index 00000000..93ddd707 --- /dev/null +++ b/s3/util_test.go @@ -0,0 +1,181 @@ +package s3 + +import ( + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestURIEncode(t *testing.T) { + // Unreserved characters are not encoded + require.Equal(t, "abcdefghijklmnopqrstuvwxyz", uriEncode("abcdefghijklmnopqrstuvwxyz")) + require.Equal(t, "ABCDEFGHIJKLMNOPQRSTUVWXYZ", uriEncode("ABCDEFGHIJKLMNOPQRSTUVWXYZ")) + require.Equal(t, "0123456789", uriEncode("0123456789")) + require.Equal(t, "-_.~", uriEncode("-_.~")) + + // Spaces use %20, not + + require.Equal(t, "hello%20world", uriEncode("hello world")) + + // Slashes are encoded (canonicalURI handles slash splitting separately) + require.Equal(t, "a%2Fb", uriEncode("a/b")) + + // Special characters + require.Equal(t, "%2B", uriEncode("+")) + require.Equal(t, "%3D", uriEncode("=")) + require.Equal(t, "%26", uriEncode("&")) + require.Equal(t, "%40", uriEncode("@")) + require.Equal(t, "%23", uriEncode("#")) + + // Mixed + require.Equal(t, "test~file-name_1.txt", uriEncode("test~file-name_1.txt")) + require.Equal(t, "key%20with%20spaces%2Fand%2Fslashes", uriEncode("key with spaces/and/slashes")) + + // Empty string + require.Equal(t, "", uriEncode("")) +} + +func TestCanonicalURI(t *testing.T) { + // Simple path + u, _ := url.Parse("https://example.com/bucket/key") + require.Equal(t, "/bucket/key", canonicalURI(u)) + + // Root path + u, _ = url.Parse("https://example.com/") + require.Equal(t, "/", canonicalURI(u)) + + // Empty path + u, _ = url.Parse("https://example.com") + require.Equal(t, "/", canonicalURI(u)) + + // Path with special characters + u, _ = url.Parse("https://example.com/bucket/key%20with%20spaces") + require.Equal(t, "/bucket/key%20with%20spaces", canonicalURI(u)) + + // Nested path + u, _ = url.Parse("https://example.com/bucket/a/b/c/file.txt") + require.Equal(t, "/bucket/a/b/c/file.txt", canonicalURI(u)) +} + +func TestCanonicalQueryString(t *testing.T) { + // Multiple keys sorted alphabetically + vals := url.Values{ + "prefix": {"test/"}, + "list-type": {"2"}, + } + require.Equal(t, "list-type=2&prefix=test%2F", canonicalQueryString(vals)) + + // Empty values + require.Equal(t, "", canonicalQueryString(url.Values{})) + + // Single key + require.Equal(t, "key=value", canonicalQueryString(url.Values{"key": {"value"}})) + + // Key with multiple values (sorted) + vals = url.Values{"key": {"b", "a"}} + require.Equal(t, "key=a&key=b", canonicalQueryString(vals)) + + // Keys requiring encoding + vals = url.Values{"continuation-token": {"abc+def"}} + require.Equal(t, "continuation-token=abc%2Bdef", canonicalQueryString(vals)) +} + +func TestSHA256Hex(t *testing.T) { + // SHA-256 of empty string + require.Equal(t, emptyPayloadHash, sha256Hex([]byte(""))) + + // SHA-256 of known value + require.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", sha256Hex([]byte("hello"))) +} + +func TestHmacSHA256(t *testing.T) { + // Known test vector: HMAC-SHA256("key", "message") + result := hmacSHA256([]byte("key"), []byte("message")) + require.Len(t, result, 32) // SHA-256 produces 32 bytes + require.NotEqual(t, make([]byte, 32), result) + + // Same inputs should produce same output + result2 := hmacSHA256([]byte("key"), []byte("message")) + require.Equal(t, result, result2) + + // Different inputs should produce different output + result3 := hmacSHA256([]byte("different-key"), []byte("message")) + require.NotEqual(t, result, result3) +} + +func TestSignV4_SetsRequiredHeaders(t *testing.T) { + c := &Client{config: &Config{ + AccessKey: "AKID", + SecretKey: "SECRET", + Region: "us-east-1", + Endpoint: "s3.us-east-1.amazonaws.com", + Bucket: "my-bucket", + }} + + req, _ := http.NewRequest(http.MethodGet, "https://my-bucket.s3.us-east-1.amazonaws.com/test-key", nil) + c.signV4(req, emptyPayloadHash) + + // All required SigV4 headers must be set + require.NotEmpty(t, req.Header.Get("Host")) + require.NotEmpty(t, req.Header.Get("X-Amz-Date")) + require.Equal(t, emptyPayloadHash, req.Header.Get("X-Amz-Content-Sha256")) + + // Authorization header must have correct format + auth := req.Header.Get("Authorization") + require.Contains(t, auth, "AWS4-HMAC-SHA256") + require.Contains(t, auth, "Credential=AKID/") + require.Contains(t, auth, "/us-east-1/s3/aws4_request") + require.Contains(t, auth, "SignedHeaders=") + require.Contains(t, auth, "Signature=") +} + +func TestSignV4_UnsignedPayload(t *testing.T) { + c := &Client{config: &Config{ + AccessKey: "AKID", + SecretKey: "SECRET", + Region: "us-east-1", + Endpoint: "s3.us-east-1.amazonaws.com", + Bucket: "my-bucket", + }} + + req, _ := http.NewRequest(http.MethodPut, "https://my-bucket.s3.us-east-1.amazonaws.com/test-key", nil) + c.signV4(req, unsignedPayload) + + require.Equal(t, unsignedPayload, req.Header.Get("X-Amz-Content-Sha256")) +} + +func TestSignV4_DifferentRegions(t *testing.T) { + c1 := &Client{config: &Config{AccessKey: "AKID", SecretKey: "SECRET", Region: "us-east-1", Endpoint: "s3.us-east-1.amazonaws.com", Bucket: "b"}} + c2 := &Client{config: &Config{AccessKey: "AKID", SecretKey: "SECRET", Region: "eu-west-1", Endpoint: "s3.eu-west-1.amazonaws.com", Bucket: "b"}} + + req1, _ := http.NewRequest(http.MethodGet, "https://b.s3.us-east-1.amazonaws.com/key", nil) + c1.signV4(req1, emptyPayloadHash) + + req2, _ := http.NewRequest(http.MethodGet, "https://b.s3.eu-west-1.amazonaws.com/key", nil) + c2.signV4(req2, emptyPayloadHash) + + // Different regions should produce different signatures + require.NotEqual(t, req1.Header.Get("Authorization"), req2.Header.Get("Authorization")) +} + +func TestParseError_XMLResponse(t *testing.T) { + xmlBody := []byte(`NoSuchKeyThe specified key does not exist.`) + err := parseErrorFromBytes(404, xmlBody) + + var errResp *errorResponse + require.ErrorAs(t, err, &errResp) + require.Equal(t, 404, errResp.StatusCode) + require.Equal(t, "NoSuchKey", errResp.Code) + require.Equal(t, "The specified key does not exist.", errResp.Message) +} + +func TestParseError_NonXMLResponse(t *testing.T) { + err := parseErrorFromBytes(500, []byte("internal server error")) + + var errResp *errorResponse + require.ErrorAs(t, err, &errResp) + require.Equal(t, 500, errResp.StatusCode) + require.Equal(t, "", errResp.Code) // XML parsing failed, no code + require.Contains(t, errResp.Body, "internal server error") +} diff --git a/server/file_cache.go b/server/file_cache.go deleted file mode 100644 index a1803724..00000000 --- a/server/file_cache.go +++ /dev/null @@ -1,128 +0,0 @@ -package server - -import ( - "errors" - "fmt" - "heckel.io/ntfy/v2/log" - "heckel.io/ntfy/v2/model" - "heckel.io/ntfy/v2/util" - "io" - "os" - "path/filepath" - "regexp" - "sync" -) - -var ( - fileIDRegex = regexp.MustCompile(fmt.Sprintf(`^[-_A-Za-z0-9]{%d}$`, model.MessageIDLength)) - errInvalidFileID = errors.New("invalid file ID") - errFileExists = errors.New("file exists") -) - -type fileCache struct { - dir string - totalSizeCurrent int64 - totalSizeLimit int64 - mu sync.Mutex -} - -func newFileCache(dir string, totalSizeLimit int64) (*fileCache, error) { - if err := os.MkdirAll(dir, 0700); err != nil { - return nil, err - } - size, err := dirSize(dir) - if err != nil { - return nil, err - } - return &fileCache{ - dir: dir, - totalSizeCurrent: size, - totalSizeLimit: totalSizeLimit, - }, nil -} - -func (c *fileCache) Write(id string, in io.Reader, limiters ...util.Limiter) (int64, error) { - if !fileIDRegex.MatchString(id) { - return 0, errInvalidFileID - } - log.Tag(tagFileCache).Field("message_id", id).Debug("Writing attachment") - file := filepath.Join(c.dir, id) - if _, err := os.Stat(file); err == nil { - return 0, errFileExists - } - f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) - if err != nil { - return 0, err - } - defer f.Close() - limiters = append(limiters, util.NewFixedLimiter(c.Remaining())) - limitWriter := util.NewLimitWriter(f, limiters...) - size, err := io.Copy(limitWriter, in) - if err != nil { - os.Remove(file) - return 0, err - } - if err := f.Close(); err != nil { - os.Remove(file) - return 0, err - } - c.mu.Lock() - c.totalSizeCurrent += size - mset(metricAttachmentsTotalSize, c.totalSizeCurrent) - c.mu.Unlock() - return size, nil -} - -func (c *fileCache) Remove(ids ...string) error { - for _, id := range ids { - if !fileIDRegex.MatchString(id) { - return errInvalidFileID - } - log.Tag(tagFileCache).Field("message_id", id).Debug("Deleting attachment") - file := filepath.Join(c.dir, id) - if err := os.Remove(file); err != nil { - log.Tag(tagFileCache).Field("message_id", id).Err(err).Debug("Error deleting attachment") - } - } - size, err := dirSize(c.dir) - if err != nil { - return err - } - c.mu.Lock() - c.totalSizeCurrent = size - c.mu.Unlock() - mset(metricAttachmentsTotalSize, size) - return nil -} - -func (c *fileCache) Size() int64 { - c.mu.Lock() - defer c.mu.Unlock() - return c.totalSizeCurrent -} - -func (c *fileCache) Remaining() int64 { - c.mu.Lock() - defer c.mu.Unlock() - remaining := c.totalSizeLimit - c.totalSizeCurrent - if remaining < 0 { - return 0 - } - return remaining -} - -func dirSize(dir string) (int64, error) { - entries, err := os.ReadDir(dir) - if err != nil { - return 0, err - } - var size int64 - for _, e := range entries { - info, err := e.Info() - if err != nil { - return 0, err - } - size += info.Size() - } - return size, nil -} diff --git a/server/file_cache_test.go b/server/file_cache_test.go deleted file mode 100644 index e7dee3b3..00000000 --- a/server/file_cache_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package server - -import ( - "bytes" - "fmt" - "github.com/stretchr/testify/require" - "heckel.io/ntfy/v2/util" - "os" - "strings" - "testing" -) - -var ( - oneKilobyteArray = make([]byte, 1024) -) - -func TestFileCache_Write_Success(t *testing.T) { - dir, c := newTestFileCache(t) - size, err := c.Write("abcdefghijkl", strings.NewReader("normal file"), util.NewFixedLimiter(999)) - require.Nil(t, err) - require.Equal(t, int64(11), size) - require.Equal(t, "normal file", readFile(t, dir+"/abcdefghijkl")) - require.Equal(t, int64(11), c.Size()) - require.Equal(t, int64(10229), c.Remaining()) -} - -func TestFileCache_Write_Remove_Success(t *testing.T) { - dir, c := newTestFileCache(t) // max = 10k (10240), each = 1k (1024) - for i := 0; i < 10; i++ { // 10x999 = 9990 - size, err := c.Write(fmt.Sprintf("abcdefghijk%d", i), bytes.NewReader(make([]byte, 999))) - require.Nil(t, err) - require.Equal(t, int64(999), size) - } - require.Equal(t, int64(9990), c.Size()) - require.Equal(t, int64(250), c.Remaining()) - require.FileExists(t, dir+"/abcdefghijk1") - require.FileExists(t, dir+"/abcdefghijk5") - - require.Nil(t, c.Remove("abcdefghijk1", "abcdefghijk5")) - require.NoFileExists(t, dir+"/abcdefghijk1") - require.NoFileExists(t, dir+"/abcdefghijk5") - require.Equal(t, int64(7992), c.Size()) - require.Equal(t, int64(2248), c.Remaining()) -} - -func TestFileCache_Write_FailedTotalSizeLimit(t *testing.T) { - dir, c := newTestFileCache(t) - for i := 0; i < 10; i++ { - size, err := c.Write(fmt.Sprintf("abcdefghijk%d", i), bytes.NewReader(oneKilobyteArray)) - require.Nil(t, err) - require.Equal(t, int64(1024), size) - } - _, err := c.Write("abcdefghijkX", bytes.NewReader(oneKilobyteArray)) - require.Equal(t, util.ErrLimitReached, err) - require.NoFileExists(t, dir+"/abcdefghijkX") -} - -func TestFileCache_Write_FailedAdditionalLimiter(t *testing.T) { - dir, c := newTestFileCache(t) - _, err := c.Write("abcdefghijkl", bytes.NewReader(make([]byte, 1001)), util.NewFixedLimiter(1000)) - require.Equal(t, util.ErrLimitReached, err) - require.NoFileExists(t, dir+"/abcdefghijkl") -} - -func newTestFileCache(t *testing.T) (dir string, cache *fileCache) { - dir = t.TempDir() - cache, err := newFileCache(dir, 10*1024) - require.Nil(t, err) - return dir, cache -} - -func readFile(t *testing.T, f string) string { - b, err := os.ReadFile(f) - require.Nil(t, err) - return string(b) -} diff --git a/server/log.go b/server/log.go index efa49162..432f6743 100644 --- a/server/log.go +++ b/server/log.go @@ -24,7 +24,6 @@ const ( tagSMTP = "smtp" // Receive email tagEmail = "email" // Send email tagTwilio = "twilio" - tagFileCache = "file_cache" tagMessageCache = "message_cache" tagStripe = "stripe" tagAccount = "account" diff --git a/server/server.go b/server/server.go index 71f39357..77e7b0c0 100644 --- a/server/server.go +++ b/server/server.go @@ -32,6 +32,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "golang.org/x/sync/errgroup" "gopkg.in/yaml.v2" + "heckel.io/ntfy/v2/attachment" "heckel.io/ntfy/v2/db" "heckel.io/ntfy/v2/db/pg" "heckel.io/ntfy/v2/log" @@ -64,7 +65,7 @@ type Server struct { userManager *user.Manager // Might be nil! messageCache *message.Cache // Database that stores the messages webPush *webpush.Store // Database that stores web push subscriptions - fileCache *fileCache // File system based cache that stores attachments + attachment *attachment.Store // Attachment store (file system or S3) stripe stripeAPI // Stripe API, can be replaced with a mock priceCache *util.LookupCache[map[string]int64] // Stripe price ID -> price as cents (USD implied!) metricsHandler http.Handler // Handles /metrics if enable-metrics set, and listen-metrics-http not set @@ -228,12 +229,9 @@ func New(conf *Config) (*Server, error) { if err != nil { return nil, err } - var fileCache *fileCache - if conf.AttachmentCacheDir != "" { - fileCache, err = newFileCache(conf.AttachmentCacheDir, conf.AttachmentTotalSizeLimit) - if err != nil { - return nil, err - } + attachmentStore, err := createAttachmentStore(conf, messageCache) + if err != nil { + return nil, err } var userManager *user.Manager if conf.AuthFile != "" || pool != nil { @@ -277,7 +275,7 @@ func New(conf *Config) (*Server, error) { db: pool, messageCache: messageCache, webPush: wp, - fileCache: fileCache, + attachment: attachmentStore, firebaseClient: firebaseClient, smtpSender: mailer, topics: topics, @@ -302,6 +300,18 @@ func createMessageCache(conf *Config, pool *db.DB) (*message.Cache, error) { return message.NewMemStore() } +func createAttachmentStore(conf *Config, messageCache *message.Cache) (*attachment.Store, error) { + attachmentIDs := func() ([]string, error) { + return messageCache.AttachmentIDs() + } + if strings.HasPrefix(conf.AttachmentCacheDir, "s3://") { + return attachment.NewS3Store(conf.AttachmentCacheDir, conf.AttachmentTotalSizeLimit, attachmentIDs) + } else if conf.AttachmentCacheDir != "" { + return attachment.NewFileStore(conf.AttachmentCacheDir, conf.AttachmentTotalSizeLimit, attachmentIDs) + } + return nil, nil +} + // Run executes the main server. It listens on HTTP (+ HTTPS, if configured), and starts // a manager go routine to print stats and prune messages. func (s *Server) Run() error { @@ -422,6 +432,9 @@ func (s *Server) Stop() { if s.smtpServer != nil { s.smtpServer.Close() } + if s.attachment != nil { + s.attachment.Close() + } s.closeDatabases() close(s.closeChan) } @@ -596,7 +609,7 @@ func (s *Server) handleInternal(w http.ResponseWriter, r *http.Request, v *visit return s.ensureWebEnabled(s.handleStatic)(w, r, v) } else if r.Method == http.MethodGet && docsRegex.MatchString(r.URL.Path) { return s.ensureWebEnabled(s.handleDocs)(w, r, v) - } else if (r.Method == http.MethodGet || r.Method == http.MethodHead) && fileRegex.MatchString(r.URL.Path) && s.config.AttachmentCacheDir != "" { + } else if (r.Method == http.MethodGet || r.Method == http.MethodHead) && fileRegex.MatchString(r.URL.Path) && s.attachment != nil { return s.limitRequests(s.handleFile)(w, r, v) } else if r.Method == http.MethodOptions { return s.limitRequests(s.handleOptions)(w, r, v) // Should work even if the web app is not enabled, see #598 @@ -753,7 +766,7 @@ func (s *Server) handleStats(w http.ResponseWriter, _ *http.Request, _ *visitor) // Before streaming the file to a client, it locates uploader (m.Sender or m.User) in the message cache, so it // can associate the download bandwidth with the uploader. func (s *Server) handleFile(w http.ResponseWriter, r *http.Request, v *visitor) error { - if s.config.AttachmentCacheDir == "" { + if s.attachment == nil { return errHTTPInternalError } matches := fileRegex.FindStringSubmatch(r.URL.Path) @@ -761,16 +774,16 @@ func (s *Server) handleFile(w http.ResponseWriter, r *http.Request, v *visitor) return errHTTPInternalErrorInvalidPath } messageID := matches[1] - file := filepath.Join(s.config.AttachmentCacheDir, messageID) - stat, err := os.Stat(file) + reader, size, err := s.attachment.Read(messageID) if err != nil { return errHTTPNotFound.Fields(log.Context{ "message_id": messageID, - "error_context": "filesystem", + "error_context": "attachment_store", }) } + defer reader.Close() w.Header().Set("Access-Control-Allow-Origin", s.config.AccessControlAllowOrigin) // CORS, allow cross-origin requests - w.Header().Set("Content-Length", fmt.Sprintf("%d", stat.Size())) + w.Header().Set("Content-Length", fmt.Sprintf("%d", size)) if r.Method == http.MethodHead { return nil } @@ -806,19 +819,14 @@ func (s *Server) handleFile(w http.ResponseWriter, r *http.Request, v *visitor) } else if m.Sender.IsValid() { bandwidthVisitor = s.visitor(m.Sender, nil) } - if !bandwidthVisitor.BandwidthAllowed(stat.Size()) { + if !bandwidthVisitor.BandwidthAllowed(size) { return errHTTPTooManyRequestsLimitAttachmentBandwidth.With(m) } // Actually send file - f, err := os.Open(file) - if err != nil { - return err - } - defer f.Close() if m.Attachment.Name != "" { w.Header().Set("Content-Disposition", "attachment; filename="+strconv.Quote(m.Attachment.Name)) } - _, err = io.Copy(util.NewContentTypeWriter(w, r.URL.Path), f) + _, err = io.Copy(util.NewContentTypeWriter(w, r.URL.Path), reader) return err } @@ -927,8 +935,8 @@ func (s *Server) handlePublishInternal(r *http.Request, v *visitor) (*model.Mess return nil, err } // Delete attachment files for deleted scheduled messages - if s.fileCache != nil && len(deletedIDs) > 0 { - if err := s.fileCache.Remove(deletedIDs...); err != nil { + if s.attachment != nil && len(deletedIDs) > 0 { + if err := s.attachment.Remove(deletedIDs...); err != nil { logvrm(v, r, m).Tag(tagPublish).Err(err).Warn("Error removing attachments for deleted scheduled messages") } } @@ -1034,8 +1042,8 @@ func (s *Server) handleActionMessage(w http.ResponseWriter, r *http.Request, v * return err } // Delete attachment files for deleted scheduled messages - if s.fileCache != nil && len(deletedIDs) > 0 { - if err := s.fileCache.Remove(deletedIDs...); err != nil { + if s.attachment != nil && len(deletedIDs) > 0 { + if err := s.attachment.Remove(deletedIDs...); err != nil { logvrm(v, r, m).Tag(tagPublish).Err(err).Warn("Error removing attachments for deleted scheduled messages") } } @@ -1413,7 +1421,7 @@ func (s *Server) renderTemplate(name, tpl, source string) (string, error) { } func (s *Server) handleBodyAsAttachment(r *http.Request, v *visitor, m *model.Message, body *util.PeekedReadCloser) error { - if s.fileCache == nil || s.config.BaseURL == "" || s.config.AttachmentCacheDir == "" { + if s.attachment == nil || s.config.BaseURL == "" { return errHTTPBadRequestAttachmentsDisallowed.With(m) } vinfo, err := v.Info() @@ -1424,16 +1432,13 @@ func (s *Server) handleBodyAsAttachment(r *http.Request, v *visitor, m *model.Me if m.Time > attachmentExpiry { return errHTTPBadRequestAttachmentsExpiryBeforeDelivery.With(m) } - contentLengthStr := r.Header.Get("Content-Length") - if contentLengthStr != "" { // Early "do-not-trust" check, hard limit see below - contentLength, err := strconv.ParseInt(contentLengthStr, 10, 64) - if err == nil && (contentLength > vinfo.Stats.AttachmentTotalSizeRemaining || contentLength > vinfo.Limits.AttachmentFileSizeLimit) { - return errHTTPEntityTooLargeAttachment.With(m).Fields(log.Context{ - "message_content_length": contentLength, - "attachment_total_size_remaining": vinfo.Stats.AttachmentTotalSizeRemaining, - "attachment_file_size_limit": vinfo.Limits.AttachmentFileSizeLimit, - }) - } + // Early "do-not-trust" check, hard limit see below + if r.ContentLength > 0 && (r.ContentLength > vinfo.Stats.AttachmentTotalSizeRemaining || r.ContentLength > vinfo.Limits.AttachmentFileSizeLimit) { + return errHTTPEntityTooLargeAttachment.With(m).Fields(log.Context{ + "message_content_length": r.ContentLength, + "attachment_total_size_remaining": vinfo.Stats.AttachmentTotalSizeRemaining, + "attachment_file_size_limit": vinfo.Limits.AttachmentFileSizeLimit, + }) } if m.Attachment == nil { m.Attachment = &model.Attachment{} @@ -1453,7 +1458,7 @@ func (s *Server) handleBodyAsAttachment(r *http.Request, v *visitor, m *model.Me util.NewFixedLimiter(vinfo.Limits.AttachmentFileSizeLimit), util.NewFixedLimiter(vinfo.Stats.AttachmentTotalSizeRemaining), } - m.Attachment.Size, err = s.fileCache.Write(m.ID, body, limiters...) + m.Attachment.Size, err = s.attachment.Write(m.ID, body, r.ContentLength, limiters...) if errors.Is(err, util.ErrLimitReached) { return errHTTPEntityTooLargeAttachment.With(m) } else if err != nil { diff --git a/server/server.yml b/server/server.yml index 43cb5fb4..9dc92968 100644 --- a/server/server.yml +++ b/server/server.yml @@ -153,7 +153,8 @@ # If enabled, clients can attach files to notifications as attachments. Minimum settings to enable attachments # are "attachment-cache-dir" and "base-url". # -# - attachment-cache-dir is the cache directory for attached files +# - attachment-cache-dir is the cache directory for attached files, or an S3 URL for object storage +# e.g. /var/cache/ntfy/attachments, or s3://ACCESS_KEY:SECRET_KEY@bucket/prefix?region=us-east-1&endpoint=https://... # - attachment-total-size-limit is the limit of the on-disk attachment cache directory (total size) # - attachment-file-size-limit is the per-file attachment size limit (e.g. 300k, 2M, 100M) # - attachment-expiry-duration is the duration after which uploaded attachments will be deleted (e.g. 3h, 20h) diff --git a/server/server_manager.go b/server/server_manager.go index afed7b33..89ff38c2 100644 --- a/server/server_manager.go +++ b/server/server_manager.go @@ -99,6 +99,9 @@ func (s *Server) execManager() { mset(metricUsers, usersCount) mset(metricSubscribers, subscribers) mset(metricTopics, topicsCount) + if s.attachment != nil { + mset(metricAttachmentsTotalSize, s.attachment.Size()) + } } func (s *Server) pruneVisitors() { @@ -137,7 +140,7 @@ func (s *Server) pruneTokens() { } func (s *Server) pruneAttachments() { - if s.fileCache == nil { + if s.attachment == nil { return } log. @@ -150,7 +153,7 @@ func (s *Server) pruneAttachments() { if log.Tag(tagManager).IsDebug() { log.Tag(tagManager).Debug("Deleting attachments %s", strings.Join(ids, ", ")) } - if err := s.fileCache.Remove(ids...); err != nil { + if err := s.attachment.Remove(ids...); err != nil { log.Tag(tagManager).Err(err).Warn("Error deleting attachments") } if err := s.messageCache.MarkAttachmentsDeleted(ids...); err != nil { @@ -171,8 +174,8 @@ func (s *Server) pruneMessages() { if err != nil { log.Tag(tagManager).Err(err).Warn("Error retrieving expired messages") } else if len(expiredMessageIDs) > 0 { - if s.fileCache != nil { - if err := s.fileCache.Remove(expiredMessageIDs...); err != nil { + if s.attachment != nil { + if err := s.attachment.Remove(expiredMessageIDs...); err != nil { log.Tag(tagManager).Err(err).Warn("Error deleting attachments for expired messages") } } diff --git a/server/server_test.go b/server/server_test.go index cb20cbda..44b9ac94 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -2145,7 +2145,7 @@ func TestServer_PublishAttachmentShortWithFilename(t *testing.T) { require.Equal(t, "myfile.txt", msg.Attachment.Name) require.Equal(t, "text/plain; charset=utf-8", msg.Attachment.Type) require.Equal(t, int64(21), msg.Attachment.Size) - require.GreaterOrEqual(t, msg.Attachment.Expires, time.Now().Add(3*time.Hour).Unix()) + require.GreaterOrEqual(t, msg.Attachment.Expires, time.Now().Add(3*time.Hour).Unix()-1) require.Contains(t, msg.Attachment.URL, "http://127.0.0.1:12345/file/") require.Equal(t, netip.Addr{}, msg.Sender) // Should never be returned require.FileExists(t, filepath.Join(s.config.AttachmentCacheDir, msg.ID)) @@ -2218,8 +2218,8 @@ func TestServer_PublishAttachmentTooLargeContentLength(t *testing.T) { forEachBackend(t, func(t *testing.T, databaseURL string) { content := util.RandomString(5000) // > 4096 s := newTestServer(t, newTestConfig(t, databaseURL)) - response := request(t, s, "PUT", "/mytopic", content, map[string]string{ - "Content-Length": "20000000", + response := request(t, s, "PUT", "/mytopic", content, nil, func(r *http.Request) { + r.ContentLength = 20000000 }) err := toHTTPError(t, response.Body.String()) require.Equal(t, 413, response.Code) diff --git a/server/server_webpush_test.go b/server/server_webpush_test.go index 047c8708..bba13db4 100644 --- a/server/server_webpush_test.go +++ b/server/server_webpush_test.go @@ -235,13 +235,12 @@ func TestServer_WebPush_Publish_RemoveOnError(t *testing.T) { request(t, s, "POST", "/test-topic", "web push test", nil) - waitFor(t, func() bool { - return received.Load() - }) - // Receiving the 410 should've caused the publisher to expire all subscriptions on the endpoint - - requireSubscriptionCount(t, s, "test-topic", 0) + waitFor(t, func() bool { + subs, err := s.webPush.SubscriptionsForTopic("test-topic") + require.Nil(t, err) + return len(subs) == 0 + }) requireSubscriptionCount(t, s, "test-topic-abc", 0) }) } diff --git a/tools/s3cli/main.go b/tools/s3cli/main.go new file mode 100644 index 00000000..5de8a75c --- /dev/null +++ b/tools/s3cli/main.go @@ -0,0 +1,147 @@ +// Command s3cli is a minimal CLI for testing the s3 package. It supports put, get, rm, and ls. +// +// Usage: +// +// export S3_URL="s3://ACCESS_KEY:SECRET_KEY@BUCKET/PREFIX?region=REGION&endpoint=ENDPOINT" +// +// s3cli put Upload a file +// s3cli put - Upload from stdin +// s3cli get Download to stdout +// s3cli rm [...] Delete one or more objects +// s3cli ls List all objects +package main + +import ( + "context" + "fmt" + "io" + "os" + "text/tabwriter" + + "heckel.io/ntfy/v2/s3" +) + +func main() { + if len(os.Args) < 2 { + usage() + } + s3URL := os.Getenv("S3_URL") + if s3URL == "" { + fail("S3_URL environment variable is required") + } + cfg, err := s3.ParseURL(s3URL) + if err != nil { + fail("invalid S3_URL: %s", err) + } + client := s3.New(cfg) + ctx := context.Background() + + switch os.Args[1] { + case "put": + cmdPut(ctx, client) + case "get": + cmdGet(ctx, client) + case "rm": + cmdRm(ctx, client) + case "ls": + cmdLs(ctx, client) + default: + usage() + } +} + +func cmdPut(ctx context.Context, client *s3.Client) { + if len(os.Args) != 4 { + fail("usage: s3cli put \n") + } + key := os.Args[2] + path := os.Args[3] + + var r io.Reader + var size int64 + if path == "-" { + r = os.Stdin + } else { + f, err := os.Open(path) + if err != nil { + fail("open %s: %s", path, err) + } + defer f.Close() + stat, err := f.Stat() + if err != nil { + fail("stat %s: %s", path, err) + } + r = f + size = stat.Size() + } + + if err := client.PutObject(ctx, key, r, size); err != nil { + fail("put: %s", err) + } + fmt.Fprintf(os.Stderr, "uploaded %s\n", key) +} + +func cmdGet(ctx context.Context, client *s3.Client) { + if len(os.Args) != 3 { + fail("usage: s3cli get \n") + } + key := os.Args[2] + + reader, size, err := client.GetObject(ctx, key) + if err != nil { + fail("get: %s", err) + } + defer reader.Close() + n, err := io.Copy(os.Stdout, reader) + if err != nil { + fail("read: %s", err) + } + fmt.Fprintf(os.Stderr, "downloaded %s (%d bytes, content-length: %d)\n", key, n, size) +} + +func cmdRm(ctx context.Context, client *s3.Client) { + if len(os.Args) < 3 { + fail("usage: s3cli rm [...]\n") + } + keys := os.Args[2:] + if err := client.DeleteObjects(ctx, keys); err != nil { + fail("rm: %s", err) + } + fmt.Fprintf(os.Stderr, "deleted %d object(s)\n", len(keys)) +} + +func cmdLs(ctx context.Context, client *s3.Client) { + objects, err := client.ListObjectsV2(ctx) + if err != nil { + fail("ls: %s", err) + } + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + var totalSize int64 + for _, obj := range objects { + fmt.Fprintf(w, "%d\t%s\n", obj.Size, obj.Key) + totalSize += obj.Size + } + w.Flush() + fmt.Fprintf(os.Stderr, "%d object(s), %d bytes total\n", len(objects), totalSize) +} + +func usage() { + fmt.Fprintf(os.Stderr, `Usage: s3cli [args...] + +Commands: + put Upload a file (use - for stdin) + get Download to stdout + rm [keys...] Delete objects + ls List all objects + +Environment: + S3_URL S3 connection URL (required) + s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT] +`) + os.Exit(1) +} + +func fail(format string, args ...any) { + fmt.Fprintf(os.Stderr, format+"\n", args...) + os.Exit(1) +} diff --git a/util/limit.go b/util/limit.go index ad2118c7..9c39d3dc 100644 --- a/util/limit.go +++ b/util/limit.go @@ -152,6 +152,61 @@ func (l *RateLimiter) Reset() { l.value = 0 } +// CountingReader wraps an io.Reader and counts the number of bytes read through it. +type CountingReader struct { + r io.Reader + total int64 +} + +// NewCountingReader creates a new CountingReader +func NewCountingReader(r io.Reader) *CountingReader { + return &CountingReader{r: r} +} + +// Read passes through to the underlying reader and counts the bytes read +func (r *CountingReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + r.total += int64(n) + return +} + +// Total returns the total number of bytes read so far +func (r *CountingReader) Total() int64 { + return r.total +} + +// LimitReader implements an io.Reader that will pass through all Read calls to the underlying +// reader r until any of the limiter's limit is reached, at which point a Read will return ErrLimitReached. +// Each limiter's value is increased after every read based on the number of bytes actually read. +type LimitReader struct { + r io.Reader + limiters []Limiter +} + +// NewLimitReader creates a new LimitReader +func NewLimitReader(r io.Reader, limiters ...Limiter) *LimitReader { + return &LimitReader{ + r: r, + limiters: limiters, + } +} + +// Read passes through all reads to the underlying reader until any of the given limiter's limit is reached +func (r *LimitReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if n > 0 { + for i := 0; i < len(r.limiters); i++ { + if !r.limiters[i].AllowN(int64(n)) { + for j := i - 1; j >= 0; j-- { + r.limiters[j].AllowN(-int64(n)) // Revert limiters if not allowed + } + return 0, ErrLimitReached + } + } + } + return +} + // LimitWriter implements an io.Writer that will pass through all Write calls to the underlying // writer w until any of the limiter's limit is reached, at which point a Write will return ErrLimitReached. // Each limiter's value is increased with every write. diff --git a/util/limit_test.go b/util/limit_test.go index 51595351..9ca9fe39 100644 --- a/util/limit_test.go +++ b/util/limit_test.go @@ -2,9 +2,12 @@ package util import ( "bytes" - "github.com/stretchr/testify/require" + "io" + "strings" "testing" "time" + + "github.com/stretchr/testify/require" ) func TestFixedLimiter_AllowValueReset(t *testing.T) { @@ -147,3 +150,98 @@ func TestLimitWriter_WriteTwoDifferentLimiters_Wait_FixedLimiterFail(t *testing. _, err = lw.Write(make([]byte, 8)) // <<< FixedLimiter fails require.Equal(t, ErrLimitReached, err) } + +func TestCountingReader_Total(t *testing.T) { + cr := NewCountingReader(strings.NewReader("hello world")) + buf := make([]byte, 5) + + n, err := cr.Read(buf) + require.Nil(t, err) + require.Equal(t, 5, n) + require.Equal(t, int64(5), cr.Total()) + + n, err = cr.Read(buf) + require.Nil(t, err) + require.Equal(t, 5, n) + require.Equal(t, int64(10), cr.Total()) + + n, err = cr.Read(buf) + require.Nil(t, err) + require.Equal(t, 1, n) + require.Equal(t, int64(11), cr.Total()) + + _, err = cr.Read(buf) + require.Equal(t, io.EOF, err) + require.Equal(t, int64(11), cr.Total()) +} + +func TestCountingReader_Empty(t *testing.T) { + cr := NewCountingReader(strings.NewReader("")) + require.Equal(t, int64(0), cr.Total()) + + _, err := cr.Read(make([]byte, 10)) + require.Equal(t, io.EOF, err) + require.Equal(t, int64(0), cr.Total()) +} + +func TestLimitReader_ReadNoLimiter(t *testing.T) { + lr := NewLimitReader(strings.NewReader("hello")) + data, err := io.ReadAll(lr) + require.Nil(t, err) + require.Equal(t, "hello", string(data)) +} + +func TestLimitReader_ReadOneLimiter(t *testing.T) { + l := NewFixedLimiter(10) + lr := NewLimitReader(strings.NewReader("hello world!"), l) + + buf := make([]byte, 5) + n, err := lr.Read(buf) + require.Nil(t, err) + require.Equal(t, 5, n) + require.Equal(t, int64(5), l.Value()) + + n, err = lr.Read(buf) + require.Nil(t, err) + require.Equal(t, 5, n) + require.Equal(t, int64(10), l.Value()) + + _, err = lr.Read(buf) + require.Equal(t, ErrLimitReached, err) +} + +func TestLimitReader_ReadTwoLimiters(t *testing.T) { + l1 := NewFixedLimiter(11) + l2 := NewFixedLimiter(8) + lr := NewLimitReader(strings.NewReader("hello world!"), l1, l2) + + buf := make([]byte, 5) + n, err := lr.Read(buf) + require.Nil(t, err) + require.Equal(t, 5, n) + + // Second read: l2 (limit 8) should reject 5 more bytes + _, err = lr.Read(buf) + require.Equal(t, ErrLimitReached, err) + // l1 should have been reverted + require.Equal(t, int64(5), l1.Value()) + require.Equal(t, int64(5), l2.Value()) +} + +func TestLimitReader_ReadAll(t *testing.T) { + l := NewFixedLimiter(100) + lr := NewLimitReader(strings.NewReader("hello"), l) + data, err := io.ReadAll(lr) + require.Nil(t, err) + require.Equal(t, "hello", string(data)) + require.Equal(t, int64(5), l.Value()) +} + +func TestLimitReader_ReadExactLimit(t *testing.T) { + l := NewFixedLimiter(5) + lr := NewLimitReader(bytes.NewReader(make([]byte, 5)), l) + data, err := io.ReadAll(lr) + require.Nil(t, err) + require.Equal(t, 5, len(data)) + require.Equal(t, int64(5), l.Value()) +} diff --git a/web/package-lock.json b/web/package-lock.json index 513b0ab8..175ef11b 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -2738,9 +2738,9 @@ } }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.59.0.tgz", - "integrity": "sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.0.tgz", + "integrity": "sha512-WOhNW9K8bR3kf4zLxbfg6Pxu2ybOUbB2AjMDHSQx86LIF4rH4Ft7vmMwNt0loO0eonglSNy4cpD3MKXXKQu0/A==", "cpu": [ "arm" ], @@ -2752,9 +2752,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.59.0.tgz", - "integrity": "sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.0.tgz", + "integrity": "sha512-u6JHLll5QKRvjciE78bQXDmqRqNs5M/3GVqZeMwvmjaNODJih/WIrJlFVEihvV0MiYFmd+ZyPr9wxOVbPAG2Iw==", "cpu": [ "arm64" ], @@ -2766,9 +2766,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.59.0.tgz", - "integrity": "sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.0.tgz", + "integrity": "sha512-qEF7CsKKzSRc20Ciu2Zw1wRrBz4g56F7r/vRwY430UPp/nt1x21Q/fpJ9N5l47WWvJlkNCPJz3QRVw008fi7yA==", "cpu": [ "arm64" ], @@ -2780,9 +2780,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.59.0.tgz", - "integrity": "sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.0.tgz", + "integrity": "sha512-WADYozJ4QCnXCH4wPB+3FuGmDPoFseVCUrANmA5LWwGmC6FL14BWC7pcq+FstOZv3baGX65tZ378uT6WG8ynTw==", "cpu": [ "x64" ], @@ -2794,9 +2794,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.59.0.tgz", - "integrity": "sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.0.tgz", + "integrity": "sha512-6b8wGHJlDrGeSE3aH5mGNHBjA0TTkxdoNHik5EkvPHCt351XnigA4pS7Wsj/Eo9Y8RBU6f35cjN9SYmCFBtzxw==", "cpu": [ "arm64" ], @@ -2808,9 +2808,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.59.0.tgz", - "integrity": "sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.0.tgz", + "integrity": "sha512-h25Ga0t4jaylMB8M/JKAyrvvfxGRjnPQIR8lnCayyzEjEOx2EJIlIiMbhpWxDRKGKF8jbNH01NnN663dH638mA==", "cpu": [ "x64" ], @@ -2822,9 +2822,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.59.0.tgz", - "integrity": "sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.0.tgz", + "integrity": "sha512-RzeBwv0B3qtVBWtcuABtSuCzToo2IEAIQrcyB/b2zMvBWVbjo8bZDjACUpnaafaxhTw2W+imQbP2BD1usasK4g==", "cpu": [ "arm" ], @@ -2836,9 +2836,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.59.0.tgz", - "integrity": "sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.0.tgz", + "integrity": "sha512-Sf7zusNI2CIU1HLzuu9Tc5YGAHEZs5Lu7N1ssJG4Tkw6e0MEsN7NdjUDDfGNHy2IU+ENyWT+L2obgWiguWibWQ==", "cpu": [ "arm" ], @@ -2850,9 +2850,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.59.0.tgz", - "integrity": "sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.0.tgz", + "integrity": "sha512-DX2x7CMcrJzsE91q7/O02IJQ5/aLkVtYFryqCjduJhUfGKG6yJV8hxaw8pZa93lLEpPTP/ohdN4wFz7yp/ry9A==", "cpu": [ "arm64" ], @@ -2864,9 +2864,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.59.0.tgz", - "integrity": "sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.0.tgz", + "integrity": "sha512-09EL+yFVbJZlhcQfShpswwRZ0Rg+z/CsSELFCnPt3iK+iqwGsI4zht3secj5vLEs957QvFFXnzAT0FFPIxSrkQ==", "cpu": [ "arm64" ], @@ -2878,9 +2878,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-gnu": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.59.0.tgz", - "integrity": "sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.0.tgz", + "integrity": "sha512-i9IcCMPr3EXm8EQg5jnja0Zyc1iFxJjZWlb4wr7U2Wx/GrddOuEafxRdMPRYVaXjgbhvqalp6np07hN1w9kAKw==", "cpu": [ "loong64" ], @@ -2892,9 +2892,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-musl": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.59.0.tgz", - "integrity": "sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.0.tgz", + "integrity": "sha512-DGzdJK9kyJ+B78MCkWeGnpXJ91tK/iKA6HwHxF4TAlPIY7GXEvMe8hBFRgdrR9Ly4qebR/7gfUs9y2IoaVEyog==", "cpu": [ "loong64" ], @@ -2906,9 +2906,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-gnu": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.59.0.tgz", - "integrity": "sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.0.tgz", + "integrity": "sha512-RwpnLsqC8qbS8z1H1AxBA1H6qknR4YpPR9w2XX0vo2Sz10miu57PkNcnHVaZkbqyw/kUWfKMI73jhmfi9BRMUQ==", "cpu": [ "ppc64" ], @@ -2920,9 +2920,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-musl": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.59.0.tgz", - "integrity": "sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.0.tgz", + "integrity": "sha512-Z8pPf54Ly3aqtdWC3G4rFigZgNvd+qJlOE52fmko3KST9SoGfAdSRCwyoyG05q1HrrAblLbk1/PSIV+80/pxLg==", "cpu": [ "ppc64" ], @@ -2934,9 +2934,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.59.0.tgz", - "integrity": "sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.0.tgz", + "integrity": "sha512-3a3qQustp3COCGvnP4SvrMHnPQ9d1vzCakQVRTliaz8cIp/wULGjiGpbcqrkv0WrHTEp8bQD/B3HBjzujVWLOA==", "cpu": [ "riscv64" ], @@ -2948,9 +2948,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.59.0.tgz", - "integrity": "sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.0.tgz", + "integrity": "sha512-pjZDsVH/1VsghMJ2/kAaxt6dL0psT6ZexQVrijczOf+PeP2BUqTHYejk3l6TlPRydggINOeNRhvpLa0AYpCWSQ==", "cpu": [ "riscv64" ], @@ -2962,9 +2962,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.59.0.tgz", - "integrity": "sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.0.tgz", + "integrity": "sha512-3ObQs0BhvPgiUVZrN7gqCSvmFuMWvWvsjG5ayJ3Lraqv+2KhOsp+pUbigqbeWqueGIsnn+09HBw27rJ+gYK4VQ==", "cpu": [ "s390x" ], @@ -2976,9 +2976,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.59.0.tgz", - "integrity": "sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.0.tgz", + "integrity": "sha512-EtylprDtQPdS5rXvAayrNDYoJhIz1/vzN2fEubo3yLE7tfAw+948dO0g4M0vkTVFhKojnF+n6C8bDNe+gDRdTg==", "cpu": [ "x64" ], @@ -2990,9 +2990,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.59.0.tgz", - "integrity": "sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.0.tgz", + "integrity": "sha512-k09oiRCi/bHU9UVFqD17r3eJR9bn03TyKraCrlz5ULFJGdJGi7VOmm9jl44vOJvRJ6P7WuBi/s2A97LxxHGIdw==", "cpu": [ "x64" ], @@ -3004,9 +3004,9 @@ ] }, "node_modules/@rollup/rollup-openbsd-x64": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.59.0.tgz", - "integrity": "sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.0.tgz", + "integrity": "sha512-1o/0/pIhozoSaDJoDcec+IVLbnRtQmHwPV730+AOD29lHEEo4F5BEUB24H0OBdhbBBDwIOSuf7vgg0Ywxdfiiw==", "cpu": [ "x64" ], @@ -3018,9 +3018,9 @@ ] }, "node_modules/@rollup/rollup-openharmony-arm64": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.59.0.tgz", - "integrity": "sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.0.tgz", + "integrity": "sha512-pESDkos/PDzYwtyzB5p/UoNU/8fJo68vcXM9ZW2V0kjYayj1KaaUfi1NmTUTUpMn4UhU4gTuK8gIaFO4UGuMbA==", "cpu": [ "arm64" ], @@ -3032,9 +3032,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.59.0.tgz", - "integrity": "sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.0.tgz", + "integrity": "sha512-hj1wFStD7B1YBeYmvY+lWXZ7ey73YGPcViMShYikqKT1GtstIKQAtfUI6yrzPjAy/O7pO0VLXGmUVWXQMaYgTQ==", "cpu": [ "arm64" ], @@ -3046,9 +3046,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.59.0.tgz", - "integrity": "sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.0.tgz", + "integrity": "sha512-SyaIPFoxmUPlNDq5EHkTbiKzmSEmq/gOYFI/3HHJ8iS/v1mbugVa7dXUzcJGQfoytp9DJFLhHH4U3/eTy2Bq4w==", "cpu": [ "ia32" ], @@ -3060,9 +3060,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-gnu": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.59.0.tgz", - "integrity": "sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.0.tgz", + "integrity": "sha512-RdcryEfzZr+lAr5kRm2ucN9aVlCCa2QNq4hXelZxb8GG0NJSazq44Z3PCCc8wISRuCVnGs0lQJVX5Vp6fKA+IA==", "cpu": [ "x64" ], @@ -3074,9 +3074,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.59.0.tgz", - "integrity": "sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.0.tgz", + "integrity": "sha512-PrsWNQ8BuE00O3Xsx3ALh2Df8fAj9+cvvX9AIA6o4KpATR98c9mud4XtDWVvsEuyia5U4tVSTKygawyJkjm60w==", "cpu": [ "x64" ], @@ -3642,9 +3642,9 @@ "license": "MIT" }, "node_modules/baseline-browser-mapping": { - "version": "2.10.9", - "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.9.tgz", - "integrity": "sha512-OZd0e2mU11ClX8+IdXe3r0dbqMEznRiT4TfbhYIbcRPZkqJ7Qwer8ij3GZAmLsRKa+II9V1v5czCkvmHH3XZBg==", + "version": "2.10.10", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.10.tgz", + "integrity": "sha512-sUoJ3IMxx4AyRqO4MLeHlnGDkyXRoUG0/AI9fjK+vS72ekpV0yWVY7O0BVjmBcRtkNcsAO2QDZ4tdKKGoI6YaQ==", "dev": true, "license": "Apache-2.0", "bin": { @@ -3940,9 +3940,9 @@ } }, "node_modules/cosmiconfig/node_modules/yaml": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.3.tgz", + "integrity": "sha512-vIYeF1u3CjlhAFekPPAk2h/Kv4T3mAkMox5OymRiJQB0spDP10LHvt+K7G9Ny6NuuMAb25/6n1qyUjAcGNf/AA==", "license": "ISC", "engines": { "node": ">= 6" @@ -7580,9 +7580,9 @@ } }, "node_modules/rollup": { - "version": "4.59.0", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.59.0.tgz", - "integrity": "sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.0.tgz", + "integrity": "sha512-yqjxruMGBQJ2gG4HtjZtAfXArHomazDHoFwFFmZZl0r7Pdo7qCIXKqKHZc8yeoMgzJJ+pO6pEEHa+V7uzWlrAQ==", "dev": true, "license": "MIT", "dependencies": { @@ -7596,31 +7596,31 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.59.0", - "@rollup/rollup-android-arm64": "4.59.0", - "@rollup/rollup-darwin-arm64": "4.59.0", - "@rollup/rollup-darwin-x64": "4.59.0", - "@rollup/rollup-freebsd-arm64": "4.59.0", - "@rollup/rollup-freebsd-x64": "4.59.0", - "@rollup/rollup-linux-arm-gnueabihf": "4.59.0", - "@rollup/rollup-linux-arm-musleabihf": "4.59.0", - "@rollup/rollup-linux-arm64-gnu": "4.59.0", - "@rollup/rollup-linux-arm64-musl": "4.59.0", - "@rollup/rollup-linux-loong64-gnu": "4.59.0", - "@rollup/rollup-linux-loong64-musl": "4.59.0", - "@rollup/rollup-linux-ppc64-gnu": "4.59.0", - "@rollup/rollup-linux-ppc64-musl": "4.59.0", - "@rollup/rollup-linux-riscv64-gnu": "4.59.0", - "@rollup/rollup-linux-riscv64-musl": "4.59.0", - "@rollup/rollup-linux-s390x-gnu": "4.59.0", - "@rollup/rollup-linux-x64-gnu": "4.59.0", - "@rollup/rollup-linux-x64-musl": "4.59.0", - "@rollup/rollup-openbsd-x64": "4.59.0", - "@rollup/rollup-openharmony-arm64": "4.59.0", - "@rollup/rollup-win32-arm64-msvc": "4.59.0", - "@rollup/rollup-win32-ia32-msvc": "4.59.0", - "@rollup/rollup-win32-x64-gnu": "4.59.0", - "@rollup/rollup-win32-x64-msvc": "4.59.0", + "@rollup/rollup-android-arm-eabi": "4.60.0", + "@rollup/rollup-android-arm64": "4.60.0", + "@rollup/rollup-darwin-arm64": "4.60.0", + "@rollup/rollup-darwin-x64": "4.60.0", + "@rollup/rollup-freebsd-arm64": "4.60.0", + "@rollup/rollup-freebsd-x64": "4.60.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.60.0", + "@rollup/rollup-linux-arm-musleabihf": "4.60.0", + "@rollup/rollup-linux-arm64-gnu": "4.60.0", + "@rollup/rollup-linux-arm64-musl": "4.60.0", + "@rollup/rollup-linux-loong64-gnu": "4.60.0", + "@rollup/rollup-linux-loong64-musl": "4.60.0", + "@rollup/rollup-linux-ppc64-gnu": "4.60.0", + "@rollup/rollup-linux-ppc64-musl": "4.60.0", + "@rollup/rollup-linux-riscv64-gnu": "4.60.0", + "@rollup/rollup-linux-riscv64-musl": "4.60.0", + "@rollup/rollup-linux-s390x-gnu": "4.60.0", + "@rollup/rollup-linux-x64-gnu": "4.60.0", + "@rollup/rollup-linux-x64-musl": "4.60.0", + "@rollup/rollup-openbsd-x64": "4.60.0", + "@rollup/rollup-openharmony-arm64": "4.60.0", + "@rollup/rollup-win32-arm64-msvc": "4.60.0", + "@rollup/rollup-win32-ia32-msvc": "4.60.0", + "@rollup/rollup-win32-x64-gnu": "4.60.0", + "@rollup/rollup-win32-x64-msvc": "4.60.0", "fsevents": "~2.3.2" } }, @@ -9514,6 +9514,24 @@ "dev": true, "license": "ISC" }, + "node_modules/yaml": { + "version": "2.8.3", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.3.tgz", + "integrity": "sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg==", + "dev": true, + "license": "ISC", + "optional": true, + "peer": true, + "bin": { + "yaml": "bin.mjs" + }, + "engines": { + "node": ">= 14.6" + }, + "funding": { + "url": "https://github.com/sponsors/eemeli" + } + }, "node_modules/yocto-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",