aboutsummaryrefslogtreecommitdiff
path: root/internal/cli/refresh_feeds.go
blob: 0e90a6cfd45a69e72a566cddf68f3dc192d6cf61 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package cli // import "miniflux.app/v2/internal/cli"

import (
	"sync"
	"time"

	"miniflux.app/v2/internal/config"
	"miniflux.app/v2/internal/logger"
	"miniflux.app/v2/internal/model"
	feedHandler "miniflux.app/v2/internal/reader/handler"
	"miniflux.app/v2/internal/storage"
)

func refreshFeeds(store *storage.Storage) {
	var wg sync.WaitGroup

	startTime := time.Now()
	jobs, err := store.NewBatch(config.Opts.BatchSize())
	if err != nil {
		logger.Error("[Cronjob] %v", err)
	}

	nbJobs := len(jobs)
	logger.Info("[Cronjob]] Created %d jobs from a batch size of %d", nbJobs, config.Opts.BatchSize())
	var jobQueue = make(chan model.Job, nbJobs)

	logger.Info("[Cronjob] Starting a pool of %d workers", config.Opts.WorkerPoolSize())
	for i := 0; i < config.Opts.WorkerPoolSize(); i++ {
		wg.Add(1)
		go func(workerID int) {
			defer wg.Done()
			for job := range jobQueue {
				logger.Info("[Cronjob] Refreshing feed #%d for user #%d in worker #%d", job.FeedID, job.UserID, workerID)
				if err := feedHandler.RefreshFeed(store, job.UserID, job.FeedID, false); err != nil {
					logger.Error("[Cronjob] Refreshing the feed #%d returned this error: %v", job.FeedID, err)
				}
			}
		}(i)
	}

	for _, job := range jobs {
		jobQueue <- job
	}
	close(jobQueue)

	wg.Wait()
	logger.Info("[Cronjob] Refreshed %d feed(s) in %s", nbJobs, time.Since(startTime))
}