aboutsummaryrefslogtreecommitdiff
path: root/internal/reader
diff options
context:
space:
mode:
authorGravatar Frédéric Guillot <f@miniflux.net> 2023-09-08 22:45:17 -0700
committerGravatar Frédéric Guillot <f@miniflux.net> 2023-09-09 13:11:42 -0700
commit48f6885f4472efbe0e23f990ae8d4545f9a6a73d (patch)
treea05b35013e65f95013f90006b07870ddaeaf4065 /internal/reader
parent32d33104a4934771ca99b1bcfe55bd0e4e88809b (diff)
downloadv2-48f6885f4472efbe0e23f990ae8d4545f9a6a73d.tar.gz
v2-48f6885f4472efbe0e23f990ae8d4545f9a6a73d.tar.zst
v2-48f6885f4472efbe0e23f990ae8d4545f9a6a73d.zip
Add generic webhook integration
Diffstat (limited to 'internal/reader')
-rw-r--r--internal/reader/atom/atom_03.go2
-rw-r--r--internal/reader/atom/atom_10.go4
-rw-r--r--internal/reader/handler/handler.go12
-rw-r--r--internal/reader/json/json.go7
-rw-r--r--internal/reader/processor/processor.go27
-rw-r--r--internal/reader/rdf/rdf.go2
-rw-r--r--internal/reader/rss/rss.go4
7 files changed, 22 insertions, 36 deletions
diff --git a/internal/reader/atom/atom_03.go b/internal/reader/atom/atom_03.go
index a760ce26..d7e99ae6 100644
--- a/internal/reader/atom/atom_03.go
+++ b/internal/reader/atom/atom_03.go
@@ -86,7 +86,7 @@ type atom03Entry struct {
}
func (a *atom03Entry) Transform() *model.Entry {
- entry := new(model.Entry)
+ entry := model.NewEntry()
entry.URL = a.Links.originalLink()
entry.Date = a.entryDate()
entry.Author = a.Author.String()
diff --git a/internal/reader/atom/atom_10.go b/internal/reader/atom/atom_10.go
index 2c6edf17..8eee69bf 100644
--- a/internal/reader/atom/atom_10.go
+++ b/internal/reader/atom/atom_10.go
@@ -95,7 +95,7 @@ type atom10Entry struct {
}
func (a *atom10Entry) Transform() *model.Entry {
- entry := new(model.Entry)
+ entry := model.NewEntry()
entry.URL = a.Links.originalLink()
entry.Date = a.entryDate()
entry.Author = a.Authors.String()
@@ -219,7 +219,7 @@ func (a *atom10Entry) entryEnclosures() model.EnclosureList {
}
func (r *atom10Entry) entryCategories() []string {
- var categoryList []string
+ categoryList := make([]string, 0)
for _, atomCategory := range r.Categories {
if strings.TrimSpace(atomCategory.Label) != "" {
diff --git a/internal/reader/handler/handler.go b/internal/reader/handler/handler.go
index 06bd78fc..320643b1 100644
--- a/internal/reader/handler/handler.go
+++ b/internal/reader/handler/handler.go
@@ -10,6 +10,7 @@ import (
"miniflux.app/v2/internal/config"
"miniflux.app/v2/internal/errors"
"miniflux.app/v2/internal/http/client"
+ "miniflux.app/v2/internal/integration"
"miniflux.app/v2/internal/locale"
"miniflux.app/v2/internal/logger"
"miniflux.app/v2/internal/model"
@@ -177,15 +178,24 @@ func RefreshFeed(store *storage.Storage, userID, feedID int64, forceRefresh bool
// We don't update existing entries when the crawler is enabled (we crawl only inexisting entries). Unless it is forced to refresh
updateExistingEntries := forceRefresh || !originalFeed.Crawler
- if storeErr := store.RefreshFeedEntries(originalFeed.UserID, originalFeed.ID, originalFeed.Entries, updateExistingEntries); storeErr != nil {
+ newEntries, storeErr := store.RefreshFeedEntries(originalFeed.UserID, originalFeed.ID, originalFeed.Entries, updateExistingEntries)
+ if storeErr != nil {
originalFeed.WithError(storeErr.Error())
store.UpdateFeedError(originalFeed)
return storeErr
}
+ userIntegrations, intErr := store.Integration(userID)
+ if intErr != nil {
+ logger.Error("[RefreshFeed] Fetching integrations for user %d failed: %v; the refresh process will go on, but no integrations will run this time.", userID, intErr)
+ } else if userIntegrations != nil && len(newEntries) > 0 {
+ go integration.PushEntries(originalFeed, newEntries, userIntegrations)
+ }
+
// We update caching headers only if the feed has been modified,
// because some websites don't return the same headers when replying with a 304.
originalFeed.WithClientResponse(response)
+
checkFeedIcon(
store,
originalFeed.ID,
diff --git a/internal/reader/json/json.go b/internal/reader/json/json.go
index 48f64c23..68f4c0f8 100644
--- a/internal/reader/json/json.go
+++ b/internal/reader/json/json.go
@@ -181,7 +181,7 @@ func (j *jsonItem) GetEnclosures() model.EnclosureList {
}
func (j *jsonItem) Transform() *model.Entry {
- entry := new(model.Entry)
+ entry := model.NewEntry()
entry.URL = j.URL
entry.Date = j.GetDate()
entry.Author = j.GetAuthor()
@@ -189,7 +189,10 @@ func (j *jsonItem) Transform() *model.Entry {
entry.Content = j.GetContent()
entry.Title = strings.TrimSpace(j.GetTitle())
entry.Enclosures = j.GetEnclosures()
- entry.Tags = j.Tags
+ if len(j.Tags) > 0 {
+ entry.Tags = j.Tags
+ }
+
return entry
}
diff --git a/internal/reader/processor/processor.go b/internal/reader/processor/processor.go
index 4dec59f0..d56d4289 100644
--- a/internal/reader/processor/processor.go
+++ b/internal/reader/processor/processor.go
@@ -13,8 +13,6 @@ import (
"time"
"unicode/utf8"
- "miniflux.app/v2/internal/integration"
-
"miniflux.app/v2/internal/config"
"miniflux.app/v2/internal/http/client"
"miniflux.app/v2/internal/logger"
@@ -41,9 +39,6 @@ var (
func ProcessFeedEntries(store *storage.Storage, feed *model.Feed, user *model.User, forceRefresh bool) {
var filteredEntries model.Entries
- // array used for bulk push
- entriesToPush := model.Entries{}
-
// Process older entries first
for i := len(feed.Entries) - 1; i >= 0; i-- {
entry := feed.Entries[i]
@@ -90,32 +85,10 @@ func ProcessFeedEntries(store *storage.Storage, feed *model.Feed, user *model.Us
// The sanitizer should always run at the end of the process to make sure unsafe HTML is filtered.
entry.Content = sanitizer.Sanitize(url, entry.Content)
- if entryIsNew {
- intg, err := store.Integration(feed.UserID)
- if err != nil {
- logger.Error("[Processor] Get integrations for user %d failed: %v; the refresh process will go on, but no integrations will run this time.", feed.UserID, err)
- } else if intg != nil {
- localEntry := entry
- go func() {
- integration.PushEntry(localEntry, feed, intg)
- }()
- entriesToPush = append(entriesToPush, localEntry)
- }
- }
-
updateEntryReadingTime(store, feed, entry, entryIsNew, user)
filteredEntries = append(filteredEntries, entry)
}
- intg, err := store.Integration(feed.UserID)
- if err != nil {
- logger.Error("[Processor] Get integrations for user %d failed: %v; the refresh process will go on, but no integrations will run this time.", feed.UserID, err)
- } else if intg != nil && len(entriesToPush) > 0 {
- go func() {
- integration.PushEntries(entriesToPush, intg)
- }()
- }
-
feed.Entries = filteredEntries
}
diff --git a/internal/reader/rdf/rdf.go b/internal/reader/rdf/rdf.go
index 935d0c0c..ca74cb2a 100644
--- a/internal/reader/rdf/rdf.go
+++ b/internal/reader/rdf/rdf.go
@@ -65,7 +65,7 @@ type rdfItem struct {
}
func (r *rdfItem) Transform() *model.Entry {
- entry := new(model.Entry)
+ entry := model.NewEntry()
entry.Title = r.entryTitle()
entry.Author = r.entryAuthor()
entry.URL = r.entryURL()
diff --git a/internal/reader/rss/rss.go b/internal/reader/rss/rss.go
index 323c6041..f2ecdaec 100644
--- a/internal/reader/rss/rss.go
+++ b/internal/reader/rss/rss.go
@@ -190,7 +190,7 @@ type rssItem struct {
}
func (r *rssItem) Transform() *model.Entry {
- entry := new(model.Entry)
+ entry := model.NewEntry()
entry.URL = r.entryURL()
entry.CommentsURL = r.entryCommentsURL()
entry.Date = r.entryDate()
@@ -388,7 +388,7 @@ func (r *rssItem) entryEnclosures() model.EnclosureList {
}
func (r *rssItem) entryCategories() []string {
- var categoryList []string
+ categoryList := make([]string, 0)
for _, rssCategory := range r.Categories {
if strings.Contains(rssCategory.Inner, "<![CDATA[") {