2020-07-25 20:26:40 -04:00
|
|
|
// Package cache provides an interface for a cache of strings, aka text/gemini pages, and redirects.
|
2020-06-18 16:54:48 -04:00
|
|
|
// It is fully thread safe.
|
|
|
|
package cache
|
|
|
|
|
|
|
|
import (
|
|
|
|
"sync"
|
2020-12-20 15:54:47 -05:00
|
|
|
"time"
|
2020-06-18 16:54:48 -04:00
|
|
|
|
|
|
|
"github.com/makeworld-the-better-one/amfora/structs"
|
|
|
|
)
|
|
|
|
|
|
|
|
var pages = make(map[string]*structs.Page) // The actual cache
|
|
|
|
var urls = make([]string, 0) // Duplicate of the keys in the `pages` map, but in order of being added
|
|
|
|
var maxPages = 0 // Max allowed number of pages in cache
|
|
|
|
var maxSize = 0 // Max allowed cache size in bytes
|
|
|
|
var lock = sync.RWMutex{}
|
2020-12-20 15:54:47 -05:00
|
|
|
var timeout = time.Duration(0)
|
2020-06-18 16:54:48 -04:00
|
|
|
|
|
|
|
// SetMaxPages sets the max number of pages the cache can hold.
|
|
|
|
// A value <= 0 means infinite pages.
|
|
|
|
func SetMaxPages(max int) {
|
|
|
|
maxPages = max
|
|
|
|
}
|
|
|
|
|
2020-08-28 19:33:37 -04:00
|
|
|
// SetMaxSize sets the max size the page cache can be, in bytes.
|
2020-06-18 16:54:48 -04:00
|
|
|
// A value <= 0 means infinite size.
|
|
|
|
func SetMaxSize(max int) {
|
|
|
|
maxSize = max
|
|
|
|
}
|
|
|
|
|
2020-12-20 15:54:47 -05:00
|
|
|
// SetTimeout sets the max number of a seconds a page can still
|
|
|
|
// be valid for. A value <= 0 means forever.
|
|
|
|
func SetTimeout(t int) {
|
|
|
|
if t <= 0 {
|
|
|
|
timeout = time.Duration(0)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
timeout = time.Duration(t) * time.Second
|
|
|
|
}
|
|
|
|
|
2020-06-18 16:54:48 -04:00
|
|
|
func removeIndex(s []string, i int) []string {
|
|
|
|
s[len(s)-1], s[i] = s[i], s[len(s)-1]
|
|
|
|
return s[:len(s)-1]
|
|
|
|
}
|
|
|
|
|
2020-08-27 11:47:57 -04:00
|
|
|
func removeURL(url string) {
|
2020-06-18 16:54:48 -04:00
|
|
|
for i := range urls {
|
|
|
|
if urls[i] == url {
|
|
|
|
urls = removeIndex(urls, i)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-04 21:05:12 -04:00
|
|
|
// AddPage adds a page to the cache, removing earlier pages as needed
|
2020-06-18 16:54:48 -04:00
|
|
|
// to keep the cache inside its limits.
|
|
|
|
//
|
|
|
|
// If your page is larger than the max cache size, the provided page
|
|
|
|
// will silently not be added to the cache.
|
2020-08-04 21:05:12 -04:00
|
|
|
func AddPage(p *structs.Page) {
|
2020-11-17 11:59:06 -05:00
|
|
|
if p.URL == "" {
|
2020-06-23 20:07:25 -04:00
|
|
|
// Just in case, these pages shouldn't be cached
|
2020-06-18 16:54:48 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if p.Size() > maxSize && maxSize > 0 {
|
|
|
|
// This page can never be added
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove earlier pages to make room for this one
|
|
|
|
// There should only ever be 1 page to remove at most,
|
|
|
|
// but this handles more just in case.
|
|
|
|
for NumPages() >= maxPages && maxPages > 0 {
|
2020-08-04 21:05:12 -04:00
|
|
|
RemovePage(urls[0])
|
2020-06-18 16:54:48 -04:00
|
|
|
}
|
|
|
|
// Do the same but for cache size
|
2020-08-04 21:05:12 -04:00
|
|
|
for SizePages()+p.Size() > maxSize && maxSize > 0 {
|
|
|
|
RemovePage(urls[0])
|
2020-06-18 16:54:48 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
2020-08-25 21:03:21 -04:00
|
|
|
pages[p.URL] = p
|
2020-06-18 16:54:48 -04:00
|
|
|
// Remove the URL if it was already there, then add it to the end
|
2020-08-27 11:47:57 -04:00
|
|
|
removeURL(p.URL)
|
2020-08-25 21:03:21 -04:00
|
|
|
urls = append(urls, p.URL)
|
2020-06-18 16:54:48 -04:00
|
|
|
}
|
|
|
|
|
2020-08-04 21:05:12 -04:00
|
|
|
// RemovePage will remove a page from the cache.
|
2020-06-18 16:54:48 -04:00
|
|
|
// Even if the page doesn't exist there will be no error.
|
2020-08-04 21:05:12 -04:00
|
|
|
func RemovePage(url string) {
|
2020-06-18 16:54:48 -04:00
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
delete(pages, url)
|
2020-08-27 11:47:57 -04:00
|
|
|
removeURL(url)
|
2020-06-18 16:54:48 -04:00
|
|
|
}
|
|
|
|
|
2020-08-04 21:05:12 -04:00
|
|
|
// ClearPages removes all pages from the cache.
|
|
|
|
func ClearPages() {
|
2020-06-18 16:54:48 -04:00
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
pages = make(map[string]*structs.Page)
|
|
|
|
urls = make([]string, 0)
|
|
|
|
}
|
|
|
|
|
2020-08-04 21:05:12 -04:00
|
|
|
// SizePages returns the approx. current size of the cache in bytes.
|
|
|
|
func SizePages() int {
|
2020-06-18 16:54:48 -04:00
|
|
|
lock.RLock()
|
|
|
|
defer lock.RUnlock()
|
|
|
|
n := 0
|
|
|
|
for _, page := range pages {
|
|
|
|
n += page.Size()
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func NumPages() int {
|
|
|
|
lock.RLock()
|
|
|
|
defer lock.RUnlock()
|
|
|
|
return len(pages)
|
|
|
|
}
|
|
|
|
|
2020-08-04 21:05:12 -04:00
|
|
|
// GetPage returns the page struct, and a bool indicating if the page was in the cache or not.
|
2020-12-20 15:54:47 -05:00
|
|
|
// (nil, false) is returned if the page isn't in the cache.
|
2020-08-04 21:05:12 -04:00
|
|
|
func GetPage(url string) (*structs.Page, bool) {
|
2020-06-18 16:54:48 -04:00
|
|
|
lock.RLock()
|
|
|
|
defer lock.RUnlock()
|
2020-12-20 15:54:47 -05:00
|
|
|
|
2020-06-18 16:54:48 -04:00
|
|
|
p, ok := pages[url]
|
2020-12-20 15:54:47 -05:00
|
|
|
if ok && (timeout == 0 || time.Since(p.MadeAt) < timeout) {
|
|
|
|
return p, ok
|
|
|
|
}
|
|
|
|
return nil, false
|
2020-06-18 16:54:48 -04:00
|
|
|
}
|