2020-07-25 20:26:40 -04:00
|
|
|
// Package cache provides an interface for a cache of strings, aka text/gemini pages, and redirects.
|
2020-06-18 16:54:48 -04:00
|
|
|
// It is fully thread safe.
|
2020-07-25 20:26:40 -04:00
|
|
|
// The redirect cache is not limited.
|
2020-06-18 16:54:48 -04:00
|
|
|
package cache
|
|
|
|
|
|
|
|
import (
|
2020-06-23 20:07:25 -04:00
|
|
|
"strings"
|
2020-06-18 16:54:48 -04:00
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/makeworld-the-better-one/amfora/structs"
|
|
|
|
)
|
|
|
|
|
|
|
|
var pages = make(map[string]*structs.Page) // The actual cache
|
|
|
|
var urls = make([]string, 0) // Duplicate of the keys in the `pages` map, but in order of being added
|
|
|
|
var maxPages = 0 // Max allowed number of pages in cache
|
|
|
|
var maxSize = 0 // Max allowed cache size in bytes
|
|
|
|
var lock = sync.RWMutex{}
|
|
|
|
|
|
|
|
// SetMaxPages sets the max number of pages the cache can hold.
|
|
|
|
// A value <= 0 means infinite pages.
|
|
|
|
func SetMaxPages(max int) {
|
|
|
|
maxPages = max
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetMaxSize sets the max size the cache can be, in bytes.
|
|
|
|
// A value <= 0 means infinite size.
|
|
|
|
func SetMaxSize(max int) {
|
|
|
|
maxSize = max
|
|
|
|
}
|
|
|
|
|
|
|
|
func removeIndex(s []string, i int) []string {
|
|
|
|
s[len(s)-1], s[i] = s[i], s[len(s)-1]
|
|
|
|
return s[:len(s)-1]
|
|
|
|
}
|
|
|
|
|
2020-08-27 11:47:57 -04:00
|
|
|
func removeURL(url string) {
|
2020-06-18 16:54:48 -04:00
|
|
|
for i := range urls {
|
|
|
|
if urls[i] == url {
|
|
|
|
urls = removeIndex(urls, i)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-04 21:05:12 -04:00
|
|
|
// AddPage adds a page to the cache, removing earlier pages as needed
|
2020-06-18 16:54:48 -04:00
|
|
|
// to keep the cache inside its limits.
|
|
|
|
//
|
|
|
|
// If your page is larger than the max cache size, the provided page
|
|
|
|
// will silently not be added to the cache.
|
2020-08-04 21:05:12 -04:00
|
|
|
func AddPage(p *structs.Page) {
|
2020-08-25 21:03:21 -04:00
|
|
|
if p.URL == "" || strings.HasPrefix(p.URL, "about:") {
|
2020-06-23 20:07:25 -04:00
|
|
|
// Just in case, these pages shouldn't be cached
|
2020-06-18 16:54:48 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if p.Size() > maxSize && maxSize > 0 {
|
|
|
|
// This page can never be added
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove earlier pages to make room for this one
|
|
|
|
// There should only ever be 1 page to remove at most,
|
|
|
|
// but this handles more just in case.
|
|
|
|
for NumPages() >= maxPages && maxPages > 0 {
|
2020-08-04 21:05:12 -04:00
|
|
|
RemovePage(urls[0])
|
2020-06-18 16:54:48 -04:00
|
|
|
}
|
|
|
|
// Do the same but for cache size
|
2020-08-04 21:05:12 -04:00
|
|
|
for SizePages()+p.Size() > maxSize && maxSize > 0 {
|
|
|
|
RemovePage(urls[0])
|
2020-06-18 16:54:48 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
2020-08-25 21:03:21 -04:00
|
|
|
pages[p.URL] = p
|
2020-06-18 16:54:48 -04:00
|
|
|
// Remove the URL if it was already there, then add it to the end
|
2020-08-27 11:47:57 -04:00
|
|
|
removeURL(p.URL)
|
2020-08-25 21:03:21 -04:00
|
|
|
urls = append(urls, p.URL)
|
2020-06-18 16:54:48 -04:00
|
|
|
}
|
|
|
|
|
2020-08-04 21:05:12 -04:00
|
|
|
// RemovePage will remove a page from the cache.
|
2020-06-18 16:54:48 -04:00
|
|
|
// Even if the page doesn't exist there will be no error.
|
2020-08-04 21:05:12 -04:00
|
|
|
func RemovePage(url string) {
|
2020-06-18 16:54:48 -04:00
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
delete(pages, url)
|
2020-08-27 11:47:57 -04:00
|
|
|
removeURL(url)
|
2020-06-18 16:54:48 -04:00
|
|
|
}
|
|
|
|
|
2020-08-04 21:05:12 -04:00
|
|
|
// ClearPages removes all pages from the cache.
|
|
|
|
func ClearPages() {
|
2020-06-18 16:54:48 -04:00
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
pages = make(map[string]*structs.Page)
|
|
|
|
urls = make([]string, 0)
|
|
|
|
}
|
|
|
|
|
2020-08-04 21:05:12 -04:00
|
|
|
// SizePages returns the approx. current size of the cache in bytes.
|
|
|
|
func SizePages() int {
|
2020-06-18 16:54:48 -04:00
|
|
|
lock.RLock()
|
|
|
|
defer lock.RUnlock()
|
|
|
|
n := 0
|
|
|
|
for _, page := range pages {
|
|
|
|
n += page.Size()
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func NumPages() int {
|
|
|
|
lock.RLock()
|
|
|
|
defer lock.RUnlock()
|
|
|
|
return len(pages)
|
|
|
|
}
|
|
|
|
|
2020-08-04 21:05:12 -04:00
|
|
|
// GetPage returns the page struct, and a bool indicating if the page was in the cache or not.
|
2020-07-07 21:13:45 -04:00
|
|
|
// An empty page struct is returned if the page isn't in the cache.
|
2020-08-04 21:05:12 -04:00
|
|
|
func GetPage(url string) (*structs.Page, bool) {
|
2020-06-18 16:54:48 -04:00
|
|
|
lock.RLock()
|
|
|
|
defer lock.RUnlock()
|
|
|
|
p, ok := pages[url]
|
|
|
|
return p, ok
|
|
|
|
}
|