htmgo/framework/h/cache/ttl_store.go
franchb cfcfe7cb21
Use GetOrCompute for atomic cache access
The commit introduces an atomic GetOrCompute method to the cache interface and refactors all cache implementations to use it. This prevents race conditions and duplicate computations when multiple goroutines request the same uncached key simultaneously.

The changes eliminate a time-of-check to time-of-use race condition in the original caching implementation, where separate Get/Set operations could lead to duplicate renders under high concurrency.

With GetOrCompute, the entire check-compute-store operation happens atomically while holding the lock, ensuring only one goroutine computes a value for any given key.

The API change is backwards compatible as the framework handles the GetOrCompute logic internally. Existing applications will automatically benefit from the
2025-07-03 17:46:09 +03:00

133 lines
2.7 KiB
Go

package cache
import (
"flag"
"log/slog"
"sync"
"time"
)
// TTLStore is a time-to-live based cache implementation that mimics
// the original htmgo caching behavior. It stores values with expiration
// times and periodically cleans up expired entries.
type TTLStore[K comparable, V any] struct {
cache map[K]*entry[V]
mutex sync.RWMutex
closeOnce sync.Once
closeChan chan struct{}
}
type entry[V any] struct {
value V
expiration time.Time
}
// NewTTLStore creates a new TTL-based cache store.
func NewTTLStore[K comparable, V any]() Store[K, V] {
s := &TTLStore[K, V]{
cache: make(map[K]*entry[V]),
closeChan: make(chan struct{}),
}
s.startCleaner()
return s
}
// Set adds or updates an entry in the cache with the given TTL.
func (s *TTLStore[K, V]) Set(key K, value V, ttl time.Duration) {
s.mutex.Lock()
defer s.mutex.Unlock()
s.cache[key] = &entry[V]{
value: value,
expiration: time.Now().Add(ttl),
}
}
// GetOrCompute atomically gets an existing value or computes and stores a new value.
func (s *TTLStore[K, V]) GetOrCompute(key K, compute func() V, ttl time.Duration) V {
s.mutex.Lock()
defer s.mutex.Unlock()
// Check if exists and not expired
if e, ok := s.cache[key]; ok && time.Now().Before(e.expiration) {
return e.value
}
// Compute while holding lock
value := compute()
// Store the result
s.cache[key] = &entry[V]{
value: value,
expiration: time.Now().Add(ttl),
}
return value
}
// Delete removes an entry from the cache.
func (s *TTLStore[K, V]) Delete(key K) {
s.mutex.Lock()
defer s.mutex.Unlock()
delete(s.cache, key)
}
// Purge removes all items from the cache.
func (s *TTLStore[K, V]) Purge() {
s.mutex.Lock()
defer s.mutex.Unlock()
s.cache = make(map[K]*entry[V])
}
// Close stops the background cleaner goroutine.
func (s *TTLStore[K, V]) Close() {
s.closeOnce.Do(func() {
close(s.closeChan)
})
}
// startCleaner starts a background goroutine that periodically removes expired entries.
func (s *TTLStore[K, V]) startCleaner() {
isTests := flag.Lookup("test.v") != nil
go func() {
ticker := time.NewTicker(time.Minute)
if isTests {
ticker = time.NewTicker(time.Second)
}
defer ticker.Stop()
for {
select {
case <-ticker.C:
s.clearExpired()
case <-s.closeChan:
return
}
}
}()
}
// clearExpired removes all expired entries from the cache.
func (s *TTLStore[K, V]) clearExpired() {
s.mutex.Lock()
defer s.mutex.Unlock()
now := time.Now()
deletedCount := 0
for key, e := range s.cache {
if now.After(e.expiration) {
delete(s.cache, key)
deletedCount++
}
}
if deletedCount > 0 {
slog.Debug("Deleted expired cache entries", slog.Int("count", deletedCount))
}
}