Add the possibility to run cleanup tasks from the command line

This commit is contained in:
Frédéric Guillot 2023-06-25 11:23:23 -07:00
parent 3dc8e5ebaf
commit 5550d662a2
9 changed files with 138 additions and 116 deletions

42
cli/cleanup_tasks.go Normal file
View File

@ -0,0 +1,42 @@
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package cli // import "miniflux.app/cli"
import (
"time"
"miniflux.app/config"
"miniflux.app/logger"
"miniflux.app/metric"
"miniflux.app/model"
"miniflux.app/storage"
)
func runCleanupTasks(store *storage.Storage) {
nbSessions := store.CleanOldSessions(config.Opts.CleanupRemoveSessionsDays())
nbUserSessions := store.CleanOldUserSessions(config.Opts.CleanupRemoveSessionsDays())
logger.Info("[Sessions] Removed %d application sessions and %d user sessions", nbSessions, nbUserSessions)
startTime := time.Now()
if rowsAffected, err := store.ArchiveEntries(model.EntryStatusRead, config.Opts.CleanupArchiveReadDays(), config.Opts.CleanupArchiveBatchSize()); err != nil {
logger.Error("[ArchiveReadEntries] %v", err)
} else {
logger.Info("[ArchiveReadEntries] %d entries changed", rowsAffected)
if config.Opts.HasMetricsCollector() {
metric.ArchiveEntriesDuration.WithLabelValues(model.EntryStatusRead).Observe(time.Since(startTime).Seconds())
}
}
startTime = time.Now()
if rowsAffected, err := store.ArchiveEntries(model.EntryStatusUnread, config.Opts.CleanupArchiveUnreadDays(), config.Opts.CleanupArchiveBatchSize()); err != nil {
logger.Error("[ArchiveUnreadEntries] %v", err)
} else {
logger.Info("[ArchiveUnreadEntries] %d entries changed", rowsAffected)
if config.Opts.HasMetricsCollector() {
metric.ArchiveEntriesDuration.WithLabelValues(model.EntryStatusUnread).Observe(time.Since(startTime).Seconds())
}
}
}

View File

@ -17,7 +17,7 @@ import (
)
const (
flagInfoHelp = "Show application information"
flagInfoHelp = "Show build information"
flagVersionHelp = "Show application version"
flagMigrateHelp = "Run SQL migrations"
flagFlushSessionsHelp = "Flush all sessions (disconnect users)"
@ -28,7 +28,8 @@ const (
flagConfigFileHelp = "Load configuration file"
flagConfigDumpHelp = "Print parsed configuration values"
flagHealthCheckHelp = `Perform a health check on the given endpoint (the value "auto" try to guess the health check endpoint).`
flagCronjobHelp = "Run Miniflux as a cronjob to refresh a batch of feeds and exit"
flagRefreshFeedsHelp = "Refresh a batch of feeds and exit"
flagRunCleanupTasksHelp = "Run cleanup tasks (delete old sessions and archives old entries)"
)
// Parse parses command line arguments.
@ -46,7 +47,8 @@ func Parse() {
flagConfigFile string
flagConfigDump bool
flagHealthCheck string
flagCronjob bool
flagRefreshFeeds bool
flagRunCleanupTasks bool
)
flag.BoolVar(&flagInfo, "info", false, flagInfoHelp)
@ -63,7 +65,8 @@ func Parse() {
flag.StringVar(&flagConfigFile, "c", "", flagConfigFileHelp)
flag.BoolVar(&flagConfigDump, "config-dump", false, flagConfigDumpHelp)
flag.StringVar(&flagHealthCheck, "healthcheck", "", flagHealthCheckHelp)
flag.BoolVar(&flagCronjob, "cronjob", false, flagCronjobHelp)
flag.BoolVar(&flagRefreshFeeds, "refresh-feeds", false, flagRefreshFeedsHelp)
flag.BoolVar(&flagRunCleanupTasks, "run-cleanup-tasks", false, flagRunCleanupTasksHelp)
flag.Parse()
cfg := config.NewParser()
@ -190,8 +193,13 @@ func Parse() {
createAdmin(store)
}
if flagCronjob {
runCronjob(store)
if flagRefreshFeeds {
refreshFeeds(store)
return
}
if flagRunCleanupTasks {
runCleanupTasks(store)
return
}

View File

@ -12,17 +12,16 @@ import (
"time"
"miniflux.app/config"
httpd "miniflux.app/http/server"
"miniflux.app/logger"
"miniflux.app/metric"
"miniflux.app/service/httpd"
"miniflux.app/service/scheduler"
"miniflux.app/storage"
"miniflux.app/systemd"
"miniflux.app/worker"
)
func startDaemon(store *storage.Storage) {
logger.Info("Starting Miniflux...")
logger.Info("Starting daemon...")
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt)
@ -31,12 +30,12 @@ func startDaemon(store *storage.Storage) {
pool := worker.NewPool(store, config.Opts.WorkerPoolSize())
if config.Opts.HasSchedulerService() && !config.Opts.HasMaintenanceMode() {
scheduler.Serve(store, pool)
runScheduler(store, pool)
}
var httpServer *http.Server
if config.Opts.HasHTTPService() {
httpServer = httpd.Serve(store, pool)
httpServer = httpd.StartWebServer(store, pool)
}
if config.Opts.HasMetricsCollector() {

View File

@ -14,7 +14,7 @@ import (
"miniflux.app/storage"
)
func runCronjob(store *storage.Storage) {
func refreshFeeds(store *storage.Storage) {
var wg sync.WaitGroup
startTime := time.Now()

47
cli/scheduler.go Normal file
View File

@ -0,0 +1,47 @@
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package cli // import "miniflux.app/cli"
import (
"time"
"miniflux.app/config"
"miniflux.app/logger"
"miniflux.app/storage"
"miniflux.app/worker"
)
func runScheduler(store *storage.Storage, pool *worker.Pool) {
logger.Info(`Starting background scheduler...`)
go feedScheduler(
store,
pool,
config.Opts.PollingFrequency(),
config.Opts.BatchSize(),
)
go cleanupScheduler(
store,
config.Opts.CleanupFrequencyHours(),
)
}
func feedScheduler(store *storage.Storage, pool *worker.Pool, frequency, batchSize int) {
for range time.Tick(time.Duration(frequency) * time.Minute) {
jobs, err := store.NewBatch(batchSize)
logger.Info("[Scheduler:Feed] Pushing %d jobs to the queue", len(jobs))
if err != nil {
logger.Error("[Scheduler:Feed] %v", err)
} else {
pool.Push(jobs)
}
}
}
func cleanupScheduler(store *storage.Storage, frequency int) {
for range time.Tick(time.Duration(frequency) * time.Hour) {
runCleanupTasks(store)
}
}

View File

@ -1,7 +1,7 @@
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package httpd // import "miniflux.app/service/httpd"
package httpd // import "miniflux.app/http/server"
import (
"crypto/tls"
@ -29,8 +29,7 @@ import (
"golang.org/x/crypto/acme/autocert"
)
// Serve starts a new HTTP server.
func Serve(store *storage.Storage, pool *worker.Pool) *http.Server {
func StartWebServer(store *storage.Storage, pool *worker.Pool) *http.Server {
certFile := config.Opts.CertFile()
keyFile := config.Opts.CertKeyFile()
certDomain := config.Opts.CertDomain()

View File

@ -1,7 +1,7 @@
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package httpd // import "miniflux.app/service/httpd"
package httpd // import "miniflux.app/http/server"
import (
"context"

View File

@ -1,39 +1,34 @@
.\" Manpage for miniflux.
.TH "MINIFLUX" "1" "August 29, 2022" "\ \&" "\ \&"
.TH "MINIFLUX" "1" "June 25, 2023" "\ \&" "\ \&"
.SH NAME
miniflux \- Minimalist and opinionated feed reader
.SH SYNOPSIS
\fBminiflux\fR [-vic] [-create-admin] [-debug] [-flush-sessions] [-info] [-migrate]
[-reset-feed-errors] [-reset-password] [-version] [-config-file]
[-config-dump] [-cronjob] [-healthcheck]
\fBminiflux\fR [-vic] [-config-dump] [-config-file] [-create-admin] [-debug] [-flush-sessions]
[-healthcheck] [-info] [-migrate] [-refresh-feeds] [-reset-feed-errors] [-reset-password]
[-run-cleanup-tasks] [-version]
.SH DESCRIPTION
\fBminiflux\fR is a minimalist and opinionated feed reader.
.SH OPTIONS
.PP
.B \-cronjob
.RS 4
Run Miniflux as a cronjob to refresh a batch of feeds and exit\&.
.RE
.PP
.B \-c
.RS 4
Load configuration file\&.
.RE
.PP
.B \-config-file
.RS 4
Load configuration file\&.
.RE
.PP
.B \-config-dump
.RS 4
Print parsed configuration values. This will include sensitive information like passwords\&.
.RE
.PP
.B \-c /path/to/miniflux.conf
.RS 4
Load configuration file\&.
.RE
.PP
.B \-config-file /path/to/miniflux.conf
.RS 4
Load configuration file\&.
.RE
.PP
.B \-create-admin
.RS 4
Create admin user\&.
@ -49,7 +44,7 @@ Show debug logs\&.
Flush all sessions (disconnect users)\&.
.RE
.PP
.B \-healthcheck
.B \-healthcheck <endpoint>
.RS 4
Perform a health check on the given endpoint\&.
.br
@ -58,12 +53,12 @@ The value "auto" try to guess the health check endpoint\&.
.PP
.B \-i
.RS 4
Show application information\&.
Show build information\&.
.RE
.PP
.B \-info
.RS 4
Show application information\&.
Show build information\&.
.RE
.PP
.B \-migrate
@ -71,6 +66,11 @@ Show application information\&.
Run SQL migrations\&.
.RE
.PP
.B \-refresh-feeds
.RS 4
Refresh a batch of feeds and exit\&.
.RE
.PP
.B \-reset-feed-errors
.RS 4
Clear all feed errors for all users\&.
@ -81,6 +81,11 @@ Clear all feed errors for all users\&.
Reset user password\&.
.RE
.PP
.B \-run-cleanup-tasks
.RS 4
Run cleanup tasks (delete old sessions and archives old entries)\&.
.RE
.PP
.B \-v
.RS 4
Show application version\&.

View File

@ -1,78 +0,0 @@
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package scheduler // import "miniflux.app/service/scheduler"
import (
"time"
"miniflux.app/config"
"miniflux.app/logger"
"miniflux.app/metric"
"miniflux.app/model"
"miniflux.app/storage"
"miniflux.app/worker"
)
// Serve starts the internal scheduler.
func Serve(store *storage.Storage, pool *worker.Pool) {
logger.Info(`Starting scheduler...`)
go feedScheduler(
store,
pool,
config.Opts.PollingFrequency(),
config.Opts.BatchSize(),
)
go cleanupScheduler(
store,
config.Opts.CleanupFrequencyHours(),
config.Opts.CleanupArchiveReadDays(),
config.Opts.CleanupArchiveUnreadDays(),
config.Opts.CleanupArchiveBatchSize(),
config.Opts.CleanupRemoveSessionsDays(),
)
}
func feedScheduler(store *storage.Storage, pool *worker.Pool, frequency, batchSize int) {
for range time.Tick(time.Duration(frequency) * time.Minute) {
jobs, err := store.NewBatch(batchSize)
logger.Info("[Scheduler:Feed] Pushing %d jobs to the queue", len(jobs))
if err != nil {
logger.Error("[Scheduler:Feed] %v", err)
} else {
pool.Push(jobs)
}
}
}
func cleanupScheduler(store *storage.Storage, frequency, archiveReadDays, archiveUnreadDays, archiveBatchSize, sessionsDays int) {
for range time.Tick(time.Duration(frequency) * time.Hour) {
nbSessions := store.CleanOldSessions(sessionsDays)
nbUserSessions := store.CleanOldUserSessions(sessionsDays)
logger.Info("[Scheduler:Cleanup] Cleaned %d sessions and %d user sessions", nbSessions, nbUserSessions)
startTime := time.Now()
if rowsAffected, err := store.ArchiveEntries(model.EntryStatusRead, archiveReadDays, archiveBatchSize); err != nil {
logger.Error("[Scheduler:ArchiveReadEntries] %v", err)
} else {
logger.Info("[Scheduler:ArchiveReadEntries] %d entries changed", rowsAffected)
if config.Opts.HasMetricsCollector() {
metric.ArchiveEntriesDuration.WithLabelValues(model.EntryStatusRead).Observe(time.Since(startTime).Seconds())
}
}
startTime = time.Now()
if rowsAffected, err := store.ArchiveEntries(model.EntryStatusUnread, archiveUnreadDays, archiveBatchSize); err != nil {
logger.Error("[Scheduler:ArchiveUnreadEntries] %v", err)
} else {
logger.Info("[Scheduler:ArchiveUnreadEntries] %d entries changed", rowsAffected)
if config.Opts.HasMetricsCollector() {
metric.ArchiveEntriesDuration.WithLabelValues(model.EntryStatusUnread).Observe(time.Since(startTime).Seconds())
}
}
}
}