Improve content scraper

This commit is contained in:
Frédéric Guillot 2017-12-13 21:30:40 -08:00
parent 827683ab59
commit c6d9eb3614
7 changed files with 39 additions and 3 deletions

View File

@ -5,6 +5,7 @@
package rewrite package rewrite
import ( import (
"fmt"
"regexp" "regexp"
"strings" "strings"
@ -38,3 +39,10 @@ func addYoutubeVideo(entryURL, entryContent string) string {
} }
return entryContent return entryContent
} }
func addPDFLink(entryURL, entryContent string) string {
if strings.HasSuffix(entryURL, ".pdf") {
return fmt.Sprintf(`<a href="%s">PDF</a><br>%s`, entryURL, entryContent)
}
return entryContent
}

View File

@ -18,12 +18,16 @@ func Rewriter(entryURL, entryContent, customRewriteRules string) string {
} }
rules := strings.Split(rulesList, ",") rules := strings.Split(rulesList, ",")
rules = append(rules, "add_pdf_download_link")
for _, rule := range rules { for _, rule := range rules {
switch strings.TrimSpace(rule) { switch strings.TrimSpace(rule) {
case "add_image_title": case "add_image_title":
entryContent = addImageTitle(entryURL, entryContent) entryContent = addImageTitle(entryURL, entryContent)
case "add_youtube_video": case "add_youtube_video":
entryContent = addYoutubeVideo(entryURL, entryContent) entryContent = addYoutubeVideo(entryURL, entryContent)
case "add_pdf_download_link":
entryContent = addPDFLink(entryURL, entryContent)
} }
} }

View File

@ -58,3 +58,13 @@ func TestRewriteWithXkcdAndNoImage(t *testing.T) {
t.Errorf(`Not expected output: got "%s" instead of "%s"`, output, expected) t.Errorf(`Not expected output: got "%s" instead of "%s"`, output, expected)
} }
} }
func TestRewriteWithPDFLink(t *testing.T) {
description := "test"
output := Rewriter("https://example.org/document.pdf", description, ``)
expected := `<a href="https://example.org/document.pdf">PDF</a><br>test`
if expected != output {
t.Errorf(`Not expected output: got "%s" instead of "%s"`, output, expected)
}
}

View File

@ -7,10 +7,16 @@ package scraper
// List of predefined scraper rules (alphabetically sorted) // List of predefined scraper rules (alphabetically sorted)
// domain => CSS selectors // domain => CSS selectors
var predefinedRules = map[string]string{ var predefinedRules = map[string]string{
"github.com": "article.entry-content",
"igen.fr": "section.corps",
"lemonde.fr": "div#articleBody", "lemonde.fr": "div#articleBody",
"lesjoiesducode.fr": ".blog-post-content img", "lesjoiesducode.fr": ".blog-post-content img",
"linux.com": "div.content, div[property]", "linux.com": "div.content, div[property]",
"medium.com": ".section-content",
"opensource.com": "div[property]", "opensource.com": "div[property]",
"osnews.com": "div.newscontent1",
"phoronix.com": "div.content", "phoronix.com": "div.content",
"techcrunch.com": "div.article-entry", "techcrunch.com": "div.article-entry",
"theregister.co.uk": "#body",
"wired.com": "main figure, article",
} }

View File

@ -33,6 +33,9 @@ func Fetch(websiteURL, rules string) (string, error) {
return "", err return "", err
} }
// The entry URL could be a redirect somewhere else.
websiteURL = response.EffectiveURL
if rules == "" { if rules == "" {
rules = getPredefinedScraperRules(websiteURL) rules = getPredefinedScraperRules(websiteURL)
} }

File diff suppressed because one or more lines are too long

View File

@ -568,6 +568,11 @@ a.button {
max-width: 100%; max-width: 100%;
} }
.entry-content figure {
margin-top: 15px;
margin-bottom: 15px;
}
.entry-content figure img { .entry-content figure img {
border: 1px solid #000; border: 1px solid #000;
} }