diff --git a/config/config_test.go b/config/config_test.go index 45114203..a59bf8a0 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -480,24 +480,6 @@ func TestCleanupFrequencyHours(t *testing.T) { } } -func TestDeprecatedCleanupFrequencyHoursVar(t *testing.T) { - os.Clearenv() - os.Setenv("CLEANUP_FREQUENCY", "42") - - parser := NewParser() - opts, err := parser.ParseEnvironmentVariables() - if err != nil { - t.Fatalf(`Parsing failure: %v`, err) - } - - expected := 42 - result := opts.CleanupFrequencyHours() - - if result != expected { - t.Fatalf(`Unexpected CLEANUP_FREQUENCY value, got %v instead of %v`, result, expected) - } -} - func TestDefaultCleanupArchiveReadDaysValue(t *testing.T) { os.Clearenv() @@ -534,24 +516,6 @@ func TestCleanupArchiveReadDays(t *testing.T) { } } -func TestDeprecatedCleanupArchiveReadDaysVar(t *testing.T) { - os.Clearenv() - os.Setenv("ARCHIVE_READ_DAYS", "7") - - parser := NewParser() - opts, err := parser.ParseEnvironmentVariables() - if err != nil { - t.Fatalf(`Parsing failure: %v`, err) - } - - expected := 7 - result := opts.CleanupArchiveReadDays() - - if result != expected { - t.Fatalf(`Unexpected ARCHIVE_READ_DAYS value, got %v instead of %v`, result, expected) - } -} - func TestDefaultCleanupRemoveSessionsDaysValue(t *testing.T) { os.Clearenv() diff --git a/config/options.go b/config/options.go index bb7b5e2c..0934f83c 100644 --- a/config/options.go +++ b/config/options.go @@ -36,6 +36,7 @@ const ( defaultCertCache = "/tmp/cert_cache" defaultCleanupFrequencyHours = 24 defaultCleanupArchiveReadDays = 60 + defaultCleanupArchiveUnreadDays = 180 defaultCleanupRemoveSessionsDays = 30 defaultProxyImages = "http-only" defaultCreateAdmin = false @@ -79,6 +80,7 @@ type Options struct { certKeyFile string cleanupFrequencyHours int cleanupArchiveReadDays int + cleanupArchiveUnreadDays int cleanupRemoveSessionsDays int pollingFrequency int batchSize int @@ -129,6 +131,7 @@ func NewOptions() *Options { certKeyFile: defaultKeyFile, cleanupFrequencyHours: defaultCleanupFrequencyHours, cleanupArchiveReadDays: defaultCleanupArchiveReadDays, + cleanupArchiveUnreadDays: defaultCleanupArchiveUnreadDays, cleanupRemoveSessionsDays: defaultCleanupRemoveSessionsDays, pollingFrequency: defaultPollingFrequency, batchSize: defaultBatchSize, @@ -245,6 +248,11 @@ func (o *Options) CleanupArchiveReadDays() int { return o.cleanupArchiveReadDays } +// CleanupArchiveUnreadDays returns the number of days after which marking unread items as removed. +func (o *Options) CleanupArchiveUnreadDays() int { + return o.cleanupArchiveUnreadDays +} + // CleanupRemoveSessionsDays returns the number of days after which to remove sessions. func (o *Options) CleanupRemoveSessionsDays() int { return o.cleanupRemoveSessionsDays @@ -412,6 +420,7 @@ func (o *Options) String() string { builder.WriteString(fmt.Sprintf("CERT_CACHE: %v\n", o.certCache)) builder.WriteString(fmt.Sprintf("CLEANUP_FREQUENCY_HOURS: %v\n", o.cleanupFrequencyHours)) builder.WriteString(fmt.Sprintf("CLEANUP_ARCHIVE_READ_DAYS: %v\n", o.cleanupArchiveReadDays)) + builder.WriteString(fmt.Sprintf("CLEANUP_ARCHIVE_UNREAD_DAYS: %v\n", o.cleanupArchiveUnreadDays)) builder.WriteString(fmt.Sprintf("CLEANUP_REMOVE_SESSIONS_DAYS: %v\n", o.cleanupRemoveSessionsDays)) builder.WriteString(fmt.Sprintf("WORKER_POOL_SIZE: %v\n", o.workerPoolSize)) builder.WriteString(fmt.Sprintf("POLLING_FREQUENCY: %v\n", o.pollingFrequency)) diff --git a/config/parser.go b/config/parser.go index 2fb10903..fdad597d 100644 --- a/config/parser.go +++ b/config/parser.go @@ -15,8 +15,6 @@ import ( "os" "strconv" "strings" - - "miniflux.app/logger" ) // Parser handles configuration parsing. @@ -118,24 +116,10 @@ func (p *Parser) parseLines(lines []string) (err error) { p.opts.cleanupFrequencyHours = parseInt(value, defaultCleanupFrequencyHours) case "CLEANUP_ARCHIVE_READ_DAYS": p.opts.cleanupArchiveReadDays = parseInt(value, defaultCleanupArchiveReadDays) + case "CLEANUP_ARCHIVE_UNREAD_DAYS": + p.opts.cleanupArchiveUnreadDays = parseInt(value, defaultCleanupArchiveUnreadDays) case "CLEANUP_REMOVE_SESSIONS_DAYS": p.opts.cleanupRemoveSessionsDays = parseInt(value, defaultCleanupRemoveSessionsDays) - case "CLEANUP_FREQUENCY": - logger.Error("[Config] CLEANUP_FREQUENCY has been deprecated in favor of CLEANUP_FREQUENCY_HOURS.") - - if p.opts.cleanupFrequencyHours != defaultCleanupFrequencyHours { - logger.Error("[Config] Ignoring CLEANUP_FREQUENCY as CLEANUP_FREQUENCY_HOURS is already specified.") - } else { - p.opts.cleanupFrequencyHours = parseInt(value, defaultCleanupFrequencyHours) - } - case "ARCHIVE_READ_DAYS": - logger.Error("[Config] ARCHIVE_READ_DAYS has been deprecated in favor of CLEANUP_ARCHIVE_READ_DAYS.") - - if p.opts.cleanupArchiveReadDays != defaultCleanupArchiveReadDays { - logger.Error("[Config] Ignoring ARCHIVE_READ_DAYS as CLEANUP_ARCHIVE_READ_DAYS is already specified.") - } else { - p.opts.cleanupArchiveReadDays = parseInt(value, defaultCleanupArchiveReadDays) - } case "WORKER_POOL_SIZE": p.opts.workerPoolSize = parseInt(value, defaultWorkerPoolSize) case "POLLING_FREQUENCY": diff --git a/miniflux.1 b/miniflux.1 index c6d290ce..8ca59445 100644 --- a/miniflux.1 +++ b/miniflux.1 @@ -158,6 +158,11 @@ Number of days after marking read items as removed\&. .br Default is 60 days\&. .TP +.B CLEANUP_ARCHIVE_UNREAD_DAYS +Number of days after marking unread items as removed\&. +.br +Default is 180 days\&. +.TP .B CLEANUP_REMOVE_SESSIONS_DAYS Number of days after removing old sessions from the database\&. .br diff --git a/service/scheduler/scheduler.go b/service/scheduler/scheduler.go index 7a8eb1ca..9e822705 100644 --- a/service/scheduler/scheduler.go +++ b/service/scheduler/scheduler.go @@ -9,6 +9,7 @@ import ( "miniflux.app/config" "miniflux.app/logger" + "miniflux.app/model" "miniflux.app/storage" "miniflux.app/worker" ) @@ -28,6 +29,7 @@ func Serve(store *storage.Storage, pool *worker.Pool) { store, config.Opts.CleanupFrequencyHours(), config.Opts.CleanupArchiveReadDays(), + config.Opts.CleanupArchiveUnreadDays(), config.Opts.CleanupRemoveSessionsDays(), ) } @@ -45,15 +47,23 @@ func feedScheduler(store *storage.Storage, pool *worker.Pool, frequency, batchSi } } -func cleanupScheduler(store *storage.Storage, frequency int, archiveDays int, sessionsDays int) { +func cleanupScheduler(store *storage.Storage, frequency, archiveReadDays, archiveUnreadDays, sessionsDays int) { c := time.Tick(time.Duration(frequency) * time.Hour) for range c { nbSessions := store.CleanOldSessions(sessionsDays) nbUserSessions := store.CleanOldUserSessions(sessionsDays) logger.Info("[Scheduler:Cleanup] Cleaned %d sessions and %d user sessions", nbSessions, nbUserSessions) - if err := store.ArchiveEntries(archiveDays); err != nil { - logger.Error("[Scheduler:Cleanup] %v", err) + if rowsAffected, err := store.ArchiveEntries(model.EntryStatusRead, archiveReadDays); err != nil { + logger.Error("[Scheduler:ArchiveReadEntries] %v", err) + } else { + logger.Info("[Scheduler:ArchiveReadEntries] %d entries changed", rowsAffected) + } + + if rowsAffected, err := store.ArchiveEntries(model.EntryStatusUnread, archiveUnreadDays); err != nil { + logger.Error("[Scheduler:ArchiveUnreadEntries] %v", err) + } else { + logger.Info("[Scheduler:ArchiveUnreadEntries] %d entries changed", rowsAffected) } } } diff --git a/storage/entry.go b/storage/entry.go index a30f4ee9..e713bd87 100644 --- a/storage/entry.go +++ b/storage/entry.go @@ -209,26 +209,32 @@ func (s *Storage) UpdateEntries(userID, feedID int64, entries model.Entries, upd return nil } -// ArchiveEntries changes the status of read items to "removed" after specified days. -func (s *Storage) ArchiveEntries(days int) error { +// ArchiveEntries changes the status of entries to "removed" after the given number of days. +func (s *Storage) ArchiveEntries(status string, days int) (int64, error) { if days < 0 { - return nil + return 0, nil } - before := time.Now().AddDate(0, 0, -days) query := ` UPDATE entries SET - status=$1 + status='removed' WHERE - id=ANY(SELECT id FROM entries WHERE status=$2 AND starred is false AND share_code='' AND published_at < $3 LIMIT 5000) + id=ANY(SELECT id FROM entries WHERE status=$1 AND starred is false AND share_code='' AND published_at < now () - '%d days'::interval LIMIT 5000) ` - if _, err := s.db.Exec(query, model.EntryStatusRemoved, model.EntryStatusRead, before); err != nil { - return fmt.Errorf(`store: unable to archive read entries: %v`, err) + + result, err := s.db.Exec(fmt.Sprintf(query, days), status) + if err != nil { + return 0, fmt.Errorf(`store: unable to archive %s entries: %v`, status, err) } - return nil + count, err := result.RowsAffected() + if err != nil { + return 0, fmt.Errorf(`store: unable to get the number of rows affected: %v`, err) + } + + return count, nil } // SetEntriesStatus update the status of the given list of entries.