From 3b62f904d6d3fdad168d3b212ff7c465c01b50f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Guillot?= Date: Fri, 19 Jan 2018 18:43:27 -0800 Subject: [PATCH] Do not crawl existing entry URLs --- reader/feed/handler.go | 4 ++-- reader/processor/processor.go | 19 +++++++++++++------ storage/entry.go | 8 ++++++++ 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/reader/feed/handler.go b/reader/feed/handler.go index 8b5658e5..c1f42ae6 100644 --- a/reader/feed/handler.go +++ b/reader/feed/handler.go @@ -70,7 +70,7 @@ func (h *Handler) CreateFeed(userID, categoryID int64, url string, crawler bool) return nil, err } - feedProcessor := processor.NewFeedProcessor(subscription) + feedProcessor := processor.NewFeedProcessor(userID, h.store, subscription) feedProcessor.WithCrawler(crawler) feedProcessor.Process() @@ -162,7 +162,7 @@ func (h *Handler) RefreshFeed(userID, feedID int64) error { return err } - feedProcessor := processor.NewFeedProcessor(subscription) + feedProcessor := processor.NewFeedProcessor(userID, h.store, subscription) feedProcessor.WithScraperRules(originalFeed.ScraperRules) feedProcessor.WithRewriteRules(originalFeed.RewriteRules) feedProcessor.WithCrawler(originalFeed.Crawler) diff --git a/reader/processor/processor.go b/reader/processor/processor.go index 33aa8ede..ca046039 100644 --- a/reader/processor/processor.go +++ b/reader/processor/processor.go @@ -10,10 +10,13 @@ import ( "github.com/miniflux/miniflux/reader/rewrite" "github.com/miniflux/miniflux/reader/sanitizer" "github.com/miniflux/miniflux/reader/scraper" + "github.com/miniflux/miniflux/storage" ) // FeedProcessor handles the processing of feed contents. type FeedProcessor struct { + userID int64 + store *storage.Storage feed *model.Feed scraperRules string rewriteRules string @@ -39,11 +42,15 @@ func (f *FeedProcessor) WithRewriteRules(rules string) { func (f *FeedProcessor) Process() { for _, entry := range f.feed.Entries { if f.crawler { - content, err := scraper.Fetch(entry.URL, f.scraperRules) - if err != nil { - logger.Error("[FeedProcessor] %v", err) + if f.store.EntryURLExists(f.userID, entry.URL) { + logger.Debug(`[FeedProcessor] Do not crawl existing entry URL: "%s"`, entry.URL) } else { - entry.Content = content + content, err := scraper.Fetch(entry.URL, f.scraperRules) + if err != nil { + logger.Error("[FeedProcessor] %v", err) + } else { + entry.Content = content + } } } @@ -53,6 +60,6 @@ func (f *FeedProcessor) Process() { } // NewFeedProcessor returns a new FeedProcessor. -func NewFeedProcessor(feed *model.Feed) *FeedProcessor { - return &FeedProcessor{feed: feed, crawler: false} +func NewFeedProcessor(userID int64, store *storage.Storage, feed *model.Feed) *FeedProcessor { + return &FeedProcessor{userID: userID, store: store, feed: feed, crawler: false} } diff --git a/storage/entry.go b/storage/entry.go index c6058d00..d1e8798e 100644 --- a/storage/entry.go +++ b/storage/entry.go @@ -226,3 +226,11 @@ func (s *Storage) MarkAllAsRead(userID int64) error { return nil } + +// EntryURLExists returns true if an entry with this URL already exists. +func (s *Storage) EntryURLExists(userID int64, entryURL string) bool { + var result int + query := `SELECT count(*) as c FROM entries WHERE user_id=$1 AND url=$2` + s.db.QueryRow(query, userID, entryURL).Scan(&result) + return result >= 1 +}