1
0
Fork 0

🚧 Feeds page rendering fixed, still untested

This commit is contained in:
makeworld 2020-08-28 19:33:37 -04:00
parent 2357e25b07
commit 5c84991940
7 changed files with 81 additions and 50 deletions

View File

@ -1,6 +1,5 @@
// Package cache provides an interface for a cache of strings, aka text/gemini pages, and redirects.
// It is fully thread safe.
// The redirect cache is not limited.
package cache
import (
@ -22,7 +21,7 @@ func SetMaxPages(max int) {
maxPages = max
}
// SetMaxSize sets the max size the cache can be, in bytes.
// SetMaxSize sets the max size the page cache can be, in bytes.
// A value <= 0 means infinite size.
func SetMaxSize(max int) {
maxSize = max

View File

@ -5,6 +5,7 @@ import (
"strings"
"time"
"github.com/makeworld-the-better-one/amfora/cache"
"github.com/makeworld-the-better-one/amfora/feeds"
"github.com/makeworld-the-better-one/amfora/renderer"
"github.com/makeworld-the-better-one/amfora/structs"
@ -15,19 +16,33 @@ var feedPageRaw = "# Feeds & Pages\n\nUpdates" + strings.Repeat(" ", 80-25) + "[
var timeDay = 24 * time.Hour
var feedPageUpdated time.Time
// Feeds displays the feeds page on the current tab.
func Feeds(t *tab) {
// TODO; Decide about date in local time vs UTC
// TODO: Cache
// Retrieve cached version if there hasn't been updates
p, ok := cache.GetPage("about:feeds")
if feedPageUpdated == feeds.LastUpdated && ok {
setPage(t, p)
t.applyBottomBar()
return
}
pe := feeds.GetPageEntries()
curDay := time.Time{}.Round(timeDay)
// curDay represents what day of posts the loop is on.
// It only goes backwards in time.
// It's initial setting means:
// only display posts older than a day in the future.
curDay := time.Now().Round(timeDay).Add(timeDay)
for _, entry := range pe.Entries {
if entry.Published.Round(timeDay).After(curDay) {
for _, entry := range pe.Entries { // From new to old
// Convert to local time, remove sub-day info
pub := entry.Published.In(time.Local).Round(timeDay)
if pub.Before(curDay) {
// This post is on a new day, add a day header
curDay := entry.Published.Round(timeDay)
curDay := pub
feedPageRaw += fmt.Sprintf("\n## %s\n\n", curDay.Format("Jan 02, 2006"))
}
feedPageRaw += fmt.Sprintf("=>%s %s - %s\n", entry.URL, entry.Author, entry.Title)
@ -42,6 +57,13 @@ func Feeds(t *tab) {
Width: termW,
Mediatype: structs.TextGemini,
}
cache.AddPage(&page)
setPage(t, &page)
t.applyBottomBar()
feedPageUpdated = time.Now()
}
func feedInit() {
// TODO
}

View File

@ -150,6 +150,7 @@ func modalInit() {
bkmkInit()
dlInit()
feedInit()
}
// Error displays an error on the screen in a modal.

View File

@ -30,7 +30,11 @@ var (
ErrNotFeed = errors.New("not a valid feed")
)
var writeMu = sync.Mutex{}
var writeMu = sync.Mutex{} // Prevent concurrent writes to feeds.json file
// LastUpdated is the time when the in-memory data was last updated.
// It can be used to know if the feed page should be regenerated.
var LastUpdated time.Time
// Init should be called after config.Init.
func Init() error {
@ -42,6 +46,8 @@ func Init() error {
return fmt.Errorf("feeds json is corrupted: %v", err) //nolint:goerr113
}
LastUpdated = time.Now()
go updateAll()
return nil
}
@ -70,6 +76,10 @@ func IsTracked(url string) bool {
// GetFeed returns a Feed object and a bool indicating whether the passed
// content was actually recognized as a feed.
func GetFeed(mediatype, filename string, r io.Reader) (*gofeed.Feed, bool) {
if r == nil {
return nil, false
}
// Check mediatype and filename
if mediatype != "application/atom+xml" && mediatype != "application/rss+xml" &&
filename != "atom.xml" && filename != "feed.xml" &&
@ -119,34 +129,53 @@ func AddFeed(url string, feed *gofeed.Feed) error {
data.feedMu.Lock()
data.Feeds[url] = feed
data.feedMu.Unlock()
err := writeJSON()
if err != nil {
// Don't use in-memory if it couldn't be saved
data.feedMu.Lock()
delete(data.Feeds, url)
data.feedMu.Unlock()
return ErrSaving
}
data.feedMu.Unlock()
LastUpdated = time.Now()
return nil
}
// AddPage stores a page URL to track for changes.
// Do not use it to update a page, as it only resets the hash.
func AddPage(url string) error {
// AddPage stores a page to track for changes.
// It can be used to update the page as well, although the package
// will handle that on its own.
func AddPage(url string, r io.Reader) error {
if r == nil {
return nil
}
h := sha256.New()
if _, err := io.Copy(h, r); err != nil {
return err
}
newHash := fmt.Sprintf("%x", h.Sum(nil))
data.pageMu.Lock()
data.Pages[url] = &pageJSON{} // No hash yet
data.pageMu.Unlock()
_, ok := data.Pages[url]
if !ok || data.Pages[url].Hash != newHash {
// Page content is different, or it didn't exist
data.Pages[url] = &pageJSON{
Hash: newHash,
Changed: time.Now().UTC(),
}
}
err := writeJSON()
if err != nil {
// Don't use in-memory if it couldn't be saved
data.pageMu.Lock()
delete(data.Pages, url)
data.pageMu.Unlock()
return ErrSaving
return err
}
data.pageMu.Unlock()
LastUpdated = time.Now()
return nil
}
@ -188,32 +217,8 @@ func updatePage(url string) error {
if res.Status != gemini.StatusSuccess {
return ErrNotSuccess
}
h := sha256.New()
if _, err := io.Copy(h, res.Body); err != nil {
return err
}
newHash := fmt.Sprintf("%x", h.Sum(nil))
data.pageMu.Lock()
if data.Pages[url].Hash != newHash {
// Page content is different
data.Pages[url] = &pageJSON{
Hash: newHash,
Changed: time.Now().UTC(),
}
}
data.pageMu.Unlock()
err = writeJSON()
if err != nil {
// Don't use in-memory if it couldn't be saved
data.pageMu.Lock()
delete(data.Pages, url)
data.pageMu.Unlock()
return err
}
return nil
return AddPage(url, res.Body)
}
// updateAll updates all feeds and pages.
@ -221,15 +226,14 @@ func updateAll() {
// TODO: Is two goroutines the right amount?
worker := func(jobs <-chan [2]string, wg *sync.WaitGroup) {
// Each job is: []string{<type>, "url"}
// Each job is: [2]string{<type>, "url"}
// where <type> is "feed" or "page"
defer wg.Done()
for j := range jobs {
if j[0] == "feed" {
updateFeed(j[1]) //nolint:errcheck
}
if j[0] == "page" {
} else if j[0] == "page" {
updatePage(j[1]) //nolint:errcheck
}
}
@ -278,8 +282,9 @@ func updateAll() {
// GetPageEntries returns the current list of PageEntries
// for use in rendering a page.
// The contents of the entries will never change, and this
// function should be called again to get updates.
// The contents of the returned entries will never change,
// so this function needs to be called again to get updates.
// It always returns sorted entries - by post time, from newest to oldest.
func GetPageEntries() *PageEntries {
var pe PageEntries
@ -290,6 +295,8 @@ func GetPageEntries() *PageEntries {
var pub time.Time
// Try to use updated time first, then published
if !item.UpdatedParsed.IsZero() {
pub = *item.UpdatedParsed
} else if !item.PublishedParsed.IsZero() {
@ -321,5 +328,6 @@ func GetPageEntries() *PageEntries {
data.RUnlock()
sort.Sort(&pe)
return &pe
}

View File

@ -81,6 +81,7 @@ type PageEntry struct {
// PageEntries is new-to-old list of Entry structs, used to create a feed page.
// It should always be assumed to be sorted when used in other packages.
// Sorted by post time, from newest to oldest.
type PageEntries struct {
Entries []*PageEntry
}
@ -92,7 +93,7 @@ func (e *PageEntries) Len() int {
}
func (e *PageEntries) Less(i, j int) bool {
return e.Entries[i].Published.Before(e.Entries[j].Published)
return e.Entries[i].Published.After(e.Entries[j].Published)
}
func (e *PageEntries) Swap(i, j int) {