Refactor caching system to use pluggable stores

The commit modernizes the caching implementation by introducing a pluggable store interface that allows different cache backends. Key changes:

- Add Store interface for custom cache implementations
- Create default TTL-based store for backwards compatibility
- Add example LRU store for memory-bounded caching
- Support cache store configuration via options pattern
- Make cache cleanup logic implementation-specific
- Add comprehensive tests and documentation

The main goals were to:

1. Prevent unbounded memory growth through pluggable stores
2. Enable distributed caching support
3. Maintain backwards compatibility
4. Improve testability and maintainability

Signed-off-by: franchb <hello@franchb.com>
This commit is contained in:
Eliah Rusin 2025-06-26 21:38:38 +03:00 committed by franchb
parent d555e5337f
commit 04997d7315
No known key found for this signature in database
GPG key ID: 064AA250844595D4
10 changed files with 2248 additions and 264 deletions

View file

@ -1,21 +1,19 @@
package h package h
import ( import (
"flag"
"log/slog"
"sync"
"time" "time"
"github.com/maddalax/htmgo/framework/h/cache"
) )
// A single key to represent the cache entry for non-per-key components.
const _singleCacheKey = "__htmgo_single_cache_key__"
type CachedNode struct { type CachedNode struct {
cb func() *Element cb func() *Element
isByKey bool isByKey bool
byKeyCache map[any]*Entry duration time.Duration
byKeyExpiration map[any]time.Time cache cache.Store[any, string]
mutex sync.Mutex
duration time.Duration
expiration time.Time
html string
} }
type Entry struct { type Entry struct {
@ -35,33 +33,45 @@ type GetElementFuncT2WithKey[K comparable, T any, T2 any] func(T, T2) (K, GetEle
type GetElementFuncT3WithKey[K comparable, T any, T2 any, T3 any] func(T, T2, T3) (K, GetElementFunc) type GetElementFuncT3WithKey[K comparable, T any, T2 any, T3 any] func(T, T2, T3) (K, GetElementFunc)
type GetElementFuncT4WithKey[K comparable, T any, T2 any, T3 any, T4 any] func(T, T2, T3, T4) (K, GetElementFunc) type GetElementFuncT4WithKey[K comparable, T any, T2 any, T3 any, T4 any] func(T, T2, T3, T4) (K, GetElementFunc)
func startExpiredCacheCleaner(node *CachedNode) { // CacheOption defines a function that configures a CachedNode.
isTests := flag.Lookup("test.v") != nil type CacheOption func(*CachedNode)
go func() {
for { // WithStore allows providing a custom cache implementation for a cached component.
if isTests { func WithStore(store cache.Store[any, string]) CacheOption {
time.Sleep(time.Second) return func(c *CachedNode) {
} else { c.cache = store
time.Sleep(time.Minute) }
} }
node.ClearExpired()
} // DefaultCacheProvider is a package-level function that creates a default cache instance.
}() // Initially, this uses a TTL-based map cache, but could be swapped for an LRU cache later.
// Advanced users can override this for the entire application.
var DefaultCacheProvider = func() cache.Store[any, string] {
return cache.NewTTLStore[any, string]()
} }
// Cached caches the given element for the given duration. The element is only rendered once, and then cached for the given duration. // Cached caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
// Please note this element is globally cached, and not per unique identifier / user. // Please note this element is globally cached, and not per unique identifier / user.
// Use CachedPerKey to cache elements per unqiue identifier. // Use CachedPerKey to cache elements per unique identifier.
func Cached(duration time.Duration, cb GetElementFunc) func() *Element { func Cached(duration time.Duration, cb GetElementFunc, opts ...CacheOption) func() *Element {
element := &Element{ node := &CachedNode{
tag: CachedNodeTag, cb: cb,
meta: &CachedNode{ duration: duration,
cb: cb,
html: "",
duration: duration,
},
} }
startExpiredCacheCleaner(element.meta.(*CachedNode))
for _, opt := range opts {
opt(node)
}
if node.cache == nil {
node.cache = DefaultCacheProvider()
}
element := &Element{
tag: CachedNodeTag,
meta: node,
}
return func() *Element { return func() *Element {
return element return element
} }
@ -69,17 +79,25 @@ func Cached(duration time.Duration, cb GetElementFunc) func() *Element {
// CachedPerKey caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration. // CachedPerKey caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
// The element is cached by the unique identifier that is returned by the callback function. // The element is cached by the unique identifier that is returned by the callback function.
func CachedPerKey[K comparable](duration time.Duration, cb GetElementFuncWithKey[K]) func() *Element { func CachedPerKey[K comparable](duration time.Duration, cb GetElementFuncWithKey[K], opts ...CacheOption) func() *Element {
element := &Element{ node := &CachedNode{
tag: CachedNodeTag, isByKey: true,
meta: &CachedNode{ duration: duration,
isByKey: true,
cb: nil,
html: "",
duration: duration,
},
} }
startExpiredCacheCleaner(element.meta.(*CachedNode))
for _, opt := range opts {
opt(node)
}
if node.cache == nil {
node.cache = DefaultCacheProvider()
}
element := &Element{
tag: CachedNodeTag,
meta: node,
}
return func() *Element { return func() *Element {
key, componentFunc := cb() key, componentFunc := cb()
return &Element{ return &Element{
@ -101,17 +119,25 @@ type ByKeyEntry struct {
// CachedPerKeyT caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration. // CachedPerKeyT caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
// The element is cached by the unique identifier that is returned by the callback function. // The element is cached by the unique identifier that is returned by the callback function.
func CachedPerKeyT[K comparable, T any](duration time.Duration, cb GetElementFuncTWithKey[K, T]) func(T) *Element { func CachedPerKeyT[K comparable, T any](duration time.Duration, cb GetElementFuncTWithKey[K, T], opts ...CacheOption) func(T) *Element {
element := &Element{ node := &CachedNode{
tag: CachedNodeTag, isByKey: true,
meta: &CachedNode{ duration: duration,
isByKey: true,
cb: nil,
html: "",
duration: duration,
},
} }
startExpiredCacheCleaner(element.meta.(*CachedNode))
for _, opt := range opts {
opt(node)
}
if node.cache == nil {
node.cache = DefaultCacheProvider()
}
element := &Element{
tag: CachedNodeTag,
meta: node,
}
return func(data T) *Element { return func(data T) *Element {
key, componentFunc := cb(data) key, componentFunc := cb(data)
return &Element{ return &Element{
@ -127,17 +153,25 @@ func CachedPerKeyT[K comparable, T any](duration time.Duration, cb GetElementFun
// CachedPerKeyT2 caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration. // CachedPerKeyT2 caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
// The element is cached by the unique identifier that is returned by the callback function. // The element is cached by the unique identifier that is returned by the callback function.
func CachedPerKeyT2[K comparable, T any, T2 any](duration time.Duration, cb GetElementFuncT2WithKey[K, T, T2]) func(T, T2) *Element { func CachedPerKeyT2[K comparable, T any, T2 any](duration time.Duration, cb GetElementFuncT2WithKey[K, T, T2], opts ...CacheOption) func(T, T2) *Element {
element := &Element{ node := &CachedNode{
tag: CachedNodeTag, isByKey: true,
meta: &CachedNode{ duration: duration,
isByKey: true,
cb: nil,
html: "",
duration: duration,
},
} }
startExpiredCacheCleaner(element.meta.(*CachedNode))
for _, opt := range opts {
opt(node)
}
if node.cache == nil {
node.cache = DefaultCacheProvider()
}
element := &Element{
tag: CachedNodeTag,
meta: node,
}
return func(data T, data2 T2) *Element { return func(data T, data2 T2) *Element {
key, componentFunc := cb(data, data2) key, componentFunc := cb(data, data2)
return &Element{ return &Element{
@ -153,17 +187,25 @@ func CachedPerKeyT2[K comparable, T any, T2 any](duration time.Duration, cb GetE
// CachedPerKeyT3 caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration. // CachedPerKeyT3 caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
// The element is cached by the unique identifier that is returned by the callback function. // The element is cached by the unique identifier that is returned by the callback function.
func CachedPerKeyT3[K comparable, T any, T2 any, T3 any](duration time.Duration, cb GetElementFuncT3WithKey[K, T, T2, T3]) func(T, T2, T3) *Element { func CachedPerKeyT3[K comparable, T any, T2 any, T3 any](duration time.Duration, cb GetElementFuncT3WithKey[K, T, T2, T3], opts ...CacheOption) func(T, T2, T3) *Element {
element := &Element{ node := &CachedNode{
tag: CachedNodeTag, isByKey: true,
meta: &CachedNode{ duration: duration,
isByKey: true,
cb: nil,
html: "",
duration: duration,
},
} }
startExpiredCacheCleaner(element.meta.(*CachedNode))
for _, opt := range opts {
opt(node)
}
if node.cache == nil {
node.cache = DefaultCacheProvider()
}
element := &Element{
tag: CachedNodeTag,
meta: node,
}
return func(data T, data2 T2, data3 T3) *Element { return func(data T, data2 T2, data3 T3) *Element {
key, componentFunc := cb(data, data2, data3) key, componentFunc := cb(data, data2, data3)
return &Element{ return &Element{
@ -179,17 +221,25 @@ func CachedPerKeyT3[K comparable, T any, T2 any, T3 any](duration time.Duration,
// CachedPerKeyT4 caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration. // CachedPerKeyT4 caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
// The element is cached by the unique identifier that is returned by the callback function. // The element is cached by the unique identifier that is returned by the callback function.
func CachedPerKeyT4[K comparable, T any, T2 any, T3 any, T4 any](duration time.Duration, cb GetElementFuncT4WithKey[K, T, T2, T3, T4]) func(T, T2, T3, T4) *Element { func CachedPerKeyT4[K comparable, T any, T2 any, T3 any, T4 any](duration time.Duration, cb GetElementFuncT4WithKey[K, T, T2, T3, T4], opts ...CacheOption) func(T, T2, T3, T4) *Element {
element := &Element{ node := &CachedNode{
tag: CachedNodeTag, isByKey: true,
meta: &CachedNode{ duration: duration,
isByKey: true,
cb: nil,
html: "",
duration: duration,
},
} }
startExpiredCacheCleaner(element.meta.(*CachedNode))
for _, opt := range opts {
opt(node)
}
if node.cache == nil {
node.cache = DefaultCacheProvider()
}
element := &Element{
tag: CachedNodeTag,
meta: node,
}
return func(data T, data2 T2, data3 T3, data4 T4) *Element { return func(data T, data2 T2, data3 T3, data4 T4) *Element {
key, componentFunc := cb(data, data2, data3, data4) key, componentFunc := cb(data, data2, data3, data4)
return &Element{ return &Element{
@ -205,19 +255,27 @@ func CachedPerKeyT4[K comparable, T any, T2 any, T3 any, T4 any](duration time.D
// CachedT caches the given element for the given duration. The element is only rendered once, and then cached for the given duration. // CachedT caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
// Please note this element is globally cached, and not per unique identifier / user. // Please note this element is globally cached, and not per unique identifier / user.
// Use CachedPerKey to cache elements per unqiue identifier. // Use CachedPerKey to cache elements per unique identifier.
func CachedT[T any](duration time.Duration, cb GetElementFuncT[T]) func(T) *Element { func CachedT[T any](duration time.Duration, cb GetElementFuncT[T], opts ...CacheOption) func(T) *Element {
element := &Element{ node := &CachedNode{
tag: CachedNodeTag, duration: duration,
meta: &CachedNode{
html: "",
duration: duration,
mutex: sync.Mutex{},
},
} }
startExpiredCacheCleaner(element.meta.(*CachedNode))
for _, opt := range opts {
opt(node)
}
if node.cache == nil {
node.cache = DefaultCacheProvider()
}
element := &Element{
tag: CachedNodeTag,
meta: node,
}
return func(data T) *Element { return func(data T) *Element {
element.meta.(*CachedNode).cb = func() *Element { node.cb = func() *Element {
return cb(data) return cb(data)
} }
return element return element
@ -226,18 +284,27 @@ func CachedT[T any](duration time.Duration, cb GetElementFuncT[T]) func(T) *Elem
// CachedT2 caches the given element for the given duration. The element is only rendered once, and then cached for the given duration. // CachedT2 caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
// Please note this element is globally cached, and not per unique identifier / user. // Please note this element is globally cached, and not per unique identifier / user.
// Use CachedPerKey to cache elements per unqiue identifier. // Use CachedPerKey to cache elements per unique identifier.
func CachedT2[T any, T2 any](duration time.Duration, cb GetElementFuncT2[T, T2]) func(T, T2) *Element { func CachedT2[T any, T2 any](duration time.Duration, cb GetElementFuncT2[T, T2], opts ...CacheOption) func(T, T2) *Element {
element := &Element{ node := &CachedNode{
tag: CachedNodeTag, duration: duration,
meta: &CachedNode{
html: "",
duration: duration,
},
} }
startExpiredCacheCleaner(element.meta.(*CachedNode))
for _, opt := range opts {
opt(node)
}
if node.cache == nil {
node.cache = DefaultCacheProvider()
}
element := &Element{
tag: CachedNodeTag,
meta: node,
}
return func(data T, data2 T2) *Element { return func(data T, data2 T2) *Element {
element.meta.(*CachedNode).cb = func() *Element { node.cb = func() *Element {
return cb(data, data2) return cb(data, data2)
} }
return element return element
@ -246,18 +313,27 @@ func CachedT2[T any, T2 any](duration time.Duration, cb GetElementFuncT2[T, T2])
// CachedT3 caches the given element for the given duration. The element is only rendered once, and then cached for the given duration. // CachedT3 caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
// Please note this element is globally cached, and not per unique identifier / user. // Please note this element is globally cached, and not per unique identifier / user.
// Use CachedPerKey to cache elements per unqiue identifier. // Use CachedPerKey to cache elements per unique identifier.
func CachedT3[T any, T2 any, T3 any](duration time.Duration, cb GetElementFuncT3[T, T2, T3]) func(T, T2, T3) *Element { func CachedT3[T any, T2 any, T3 any](duration time.Duration, cb GetElementFuncT3[T, T2, T3], opts ...CacheOption) func(T, T2, T3) *Element {
element := &Element{ node := &CachedNode{
tag: CachedNodeTag, duration: duration,
meta: &CachedNode{
html: "",
duration: duration,
},
} }
startExpiredCacheCleaner(element.meta.(*CachedNode))
for _, opt := range opts {
opt(node)
}
if node.cache == nil {
node.cache = DefaultCacheProvider()
}
element := &Element{
tag: CachedNodeTag,
meta: node,
}
return func(data T, data2 T2, data3 T3) *Element { return func(data T, data2 T2, data3 T3) *Element {
element.meta.(*CachedNode).cb = func() *Element { node.cb = func() *Element {
return cb(data, data2, data3) return cb(data, data2, data3)
} }
return element return element
@ -266,18 +342,27 @@ func CachedT3[T any, T2 any, T3 any](duration time.Duration, cb GetElementFuncT3
// CachedT4 caches the given element for the given duration. The element is only rendered once, and then cached for the given duration. // CachedT4 caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
// Please note this element is globally cached, and not per unique identifier / user. // Please note this element is globally cached, and not per unique identifier / user.
// Use CachedPerKey to cache elements per unqiue identifier. // Use CachedPerKey to cache elements per unique identifier.
func CachedT4[T any, T2 any, T3 any, T4 any](duration time.Duration, cb GetElementFuncT4[T, T2, T3, T4]) func(T, T2, T3, T4) *Element { func CachedT4[T any, T2 any, T3 any, T4 any](duration time.Duration, cb GetElementFuncT4[T, T2, T3, T4], opts ...CacheOption) func(T, T2, T3, T4) *Element {
element := &Element{ node := &CachedNode{
tag: CachedNodeTag, duration: duration,
meta: &CachedNode{
html: "",
duration: duration,
},
} }
startExpiredCacheCleaner(element.meta.(*CachedNode))
for _, opt := range opts {
opt(node)
}
if node.cache == nil {
node.cache = DefaultCacheProvider()
}
element := &Element{
tag: CachedNodeTag,
meta: node,
}
return func(data T, data2 T2, data3 T3, data4 T4) *Element { return func(data T, data2 T2, data3 T3, data4 T4) *Element {
element.meta.(*CachedNode).cb = func() *Element { node.cb = func() *Element {
return cb(data, data2, data3, data4) return cb(data, data2, data3, data4)
} }
return element return element
@ -286,69 +371,27 @@ func CachedT4[T any, T2 any, T3 any, T4 any](duration time.Duration, cb GetEleme
// ClearCache clears the cached HTML of the element. This is called automatically by the framework. // ClearCache clears the cached HTML of the element. This is called automatically by the framework.
func (c *CachedNode) ClearCache() { func (c *CachedNode) ClearCache() {
c.html = "" c.cache.Purge()
if c.byKeyCache != nil {
for key := range c.byKeyCache {
delete(c.byKeyCache, key)
}
}
if c.byKeyExpiration != nil {
for key := range c.byKeyExpiration {
delete(c.byKeyExpiration, key)
}
}
} }
// ClearExpired clears all expired cached HTML of the element. This is called automatically by the framework. // ClearExpired is deprecated and does nothing. Cache expiration is now handled by the Store implementation.
func (c *CachedNode) ClearExpired() { func (c *CachedNode) ClearExpired() {
c.mutex.Lock() // No-op for backward compatibility
defer c.mutex.Unlock()
deletedCount := 0
if c.isByKey {
if c.byKeyCache != nil && c.byKeyExpiration != nil {
for key := range c.byKeyCache {
expir, ok := c.byKeyExpiration[key]
if ok && expir.Before(time.Now()) {
delete(c.byKeyCache, key)
delete(c.byKeyExpiration, key)
deletedCount++
}
}
}
} else {
now := time.Now()
expiration := c.expiration
if c.html != "" && expiration.Before(now) {
c.html = ""
deletedCount++
}
}
if deletedCount > 0 {
slog.Debug("Deleted expired cache entries", slog.Int("count", deletedCount))
}
} }
func (c *CachedNode) Render(ctx *RenderContext) { func (c *CachedNode) Render(ctx *RenderContext) {
if c.isByKey { if c.isByKey {
panic("CachedPerKey should not be rendered directly") panic("CachedPerKey should not be rendered directly")
} else { } else {
c.mutex.Lock() // For simple cached components, we use a single key
defer c.mutex.Unlock() html, found := c.cache.Get(_singleCacheKey)
if found {
now := time.Now() ctx.builder.WriteString(html)
expiration := c.expiration
if expiration.IsZero() || expiration.Before(now) {
c.html = ""
c.expiration = now.Add(c.duration)
}
if c.html != "" {
ctx.builder.WriteString(c.html)
} else { } else {
c.html = Render(c.cb()) // Render and cache
ctx.builder.WriteString(c.html) html = Render(c.cb())
c.cache.Set(_singleCacheKey, html, c.duration)
ctx.builder.WriteString(html)
} }
} }
} }
@ -357,47 +400,15 @@ func (c *ByKeyEntry) Render(ctx *RenderContext) {
key := c.key key := c.key
parentMeta := c.parent.meta.(*CachedNode) parentMeta := c.parent.meta.(*CachedNode)
parentMeta.mutex.Lock() // Try to get from cache
defer parentMeta.mutex.Unlock() html, found := parentMeta.cache.Get(key)
if found {
if parentMeta.byKeyCache == nil {
parentMeta.byKeyCache = make(map[any]*Entry)
}
if parentMeta.byKeyExpiration == nil {
parentMeta.byKeyExpiration = make(map[any]time.Time)
}
var setAndWrite = func() {
html := Render(c.cb())
parentMeta.byKeyCache[key] = &Entry{
expiration: parentMeta.expiration,
html: html,
}
ctx.builder.WriteString(html) ctx.builder.WriteString(html)
}
expEntry, ok := parentMeta.byKeyExpiration[key]
if !ok {
parentMeta.byKeyExpiration[key] = time.Now().Add(parentMeta.duration)
} else {
// key is expired
if expEntry.Before(time.Now()) {
parentMeta.byKeyExpiration[key] = time.Now().Add(parentMeta.duration)
setAndWrite()
return
}
}
entry := parentMeta.byKeyCache[key]
// not in cache
if entry == nil {
setAndWrite()
return return
} }
// exists in cache and not expired // Not in cache, render and store
ctx.builder.WriteString(entry.html) html = Render(c.cb())
parentMeta.cache.Set(key, html, parentMeta.duration)
ctx.builder.WriteString(html)
} }

231
framework/h/cache/README.md vendored Normal file
View file

@ -0,0 +1,231 @@
# Pluggable Cache System for htmgo
## Overview
The htmgo framework now supports a pluggable cache system that allows developers to provide their own caching
implementations. This addresses potential memory exhaustion vulnerabilities in the previous TTL-only caching approach
and provides greater flexibility for production deployments.
## Motivation
The previous caching mechanism relied exclusively on Time-To-Live (TTL) expiration, which could lead to:
- **Unbounded memory growth**: High-cardinality cache keys could consume all available memory
- **DDoS vulnerability**: Attackers could exploit this by generating many unique cache keys
- **Limited flexibility**: No support for size-bounded caches or distributed caching solutions
## Architecture
The new system introduces a generic `Store[K comparable, V any]` interface:
```go
type Store[K comparable, V any] interface {
Set(key K, value V, ttl time.Duration)
Get(key K) (V, bool)
Delete(key K)
Purge()
Close()
}
```
## Usage
### Using the Default Cache
By default, htmgo continues to use a TTL-based cache for backward compatibility:
```go
// No changes needed - works exactly as before
UserProfile := h.CachedPerKey(
15*time.Minute,
func (userID int) (int, h.GetElementFunc) {
return userID, func () *h.Element {
return h.Div(h.Text("User profile"))
}
},
)
```
### Using a Custom Cache
You can provide your own cache implementation using the `WithStore` option:
```go
import (
"github.com/maddalax/htmgo/framework/h"
"github.com/maddalax/htmgo/framework/h/cache"
)
// Create a memory-bounded LRU cache
lruCache := cache.NewLRUStore[any, string](10000) // Max 10,000 items
// Use it with a cached component
UserProfile := h.CachedPerKey(
15*time.Minute,
func (userID int) (int, h.GetElementFunc) {
return userID, func () *h.Element {
return h.Div(h.Text("User profile"))
}
},
h.WithStore(lruCache), // Pass the custom cache
)
```
### Changing the Default Cache Globally
You can override the default cache provider for your entire application:
```go
func init() {
// All cached components will use LRU by default
h.DefaultCacheProvider = func () cache.Store[any, string] {
return cache.NewLRUStore[any, string](50000)
}
}
```
## Example Implementations
### Built-in Stores
1. **TTLStore** (default): Time-based expiration with periodic cleanup
2. **LRUStore** (example): Least Recently Used eviction with size limits
### Integrating Third-Party Libraries
Here's an example of integrating the high-performance `go-freelru` library:
```go
import (
"time"
"github.com/elastic/go-freelru"
"github.com/maddalax/htmgo/framework/h/cache"
)
type FreeLRUAdapter[K comparable, V any] struct {
lru *freelru.LRU[K, V]
}
func NewFreeLRUAdapter[K comparable, V any](size uint32) cache.Store[K, V] {
lru, err := freelru.New[K, V](size, nil)
if err != nil {
panic(err)
}
return &FreeLRUAdapter[K, V]{lru: lru}
}
func (s *FreeLRUAdapter[K, V]) Set(key K, value V, ttl time.Duration) {
// Note: go-freelru doesn't support per-item TTL
s.lru.Add(key, value)
}
func (s *FreeLRUAdapter[K, V]) Get(key K) (V, bool) {
return s.lru.Get(key)
}
func (s *FreeLRUAdapter[K, V]) Delete(key K) {
s.lru.Remove(key)
}
func (s *FreeLRUAdapter[K, V]) Purge() {
s.lru.Clear()
}
func (s *FreeLRUAdapter[K, V]) Close() {
// No-op for this implementation
}
```
### Redis-based Distributed Cache
```go
type RedisStore struct {
client *redis.Client
prefix string
}
func (s *RedisStore) Set(key any, value string, ttl time.Duration) {
keyStr := fmt.Sprintf("%s:%v", s.prefix, key)
s.client.Set(context.Background(), keyStr, value, ttl)
}
func (s *RedisStore) Get(key any) (string, bool) {
keyStr := fmt.Sprintf("%s:%v", s.prefix, key)
val, err := s.client.Get(context.Background(), keyStr).Result()
if err == redis.Nil {
return "", false
}
return val, err == nil
}
// ... implement other methods
```
## Migration Guide
### For Existing Applications
The changes are backward compatible. Existing applications will continue to work without modifications. The function
signatures now accept optional `CacheOption` parameters, but these can be omitted.
### Recommended Migration Path
1. **Assess your caching needs**: Determine if you need memory bounds or distributed caching
2. **Choose an implementation**: Use the built-in LRUStore or integrate a third-party library
3. **Update critical components**: Start with high-traffic or high-cardinality cached components
4. **Monitor memory usage**: Ensure your cache size limits are appropriate
## Security Considerations
### Memory-Bounded Caches
For public-facing applications, we strongly recommend using a memory-bounded cache to prevent DoS attacks:
```go
// Limit cache to reasonable size based on your server's memory
cache := cache.NewLRUStore[any, string](100_000)
// Use for all user-specific caching
UserContent := h.CachedPerKey(
5*time.Minute,
getUserContent,
h.WithStore(cache),
)
```
### Cache Key Validation
When using user input as cache keys, always validate and sanitize:
```go
func cacheKeyForUser(userInput string) string {
// Limit length and remove special characters
key := strings.TrimSpace(userInput)
if len(key) > 100 {
key = key[:100]
}
return regexp.MustCompile(`[^a-zA-Z0-9_-]`).ReplaceAllString(key, "")
}
```
## Performance Considerations
1. **TTLStore**: Best for small caches with predictable key patterns
2. **LRUStore**: Good general-purpose choice with memory bounds
3. **Third-party stores**: Consider `go-freelru` or `theine-go` for high-performance needs
4. **Distributed stores**: Use Redis/Memcached for multi-instance deployments
## Best Practices
1. **Set appropriate cache sizes**: Balance memory usage with hit rates
2. **Use consistent TTLs**: Align with your data update patterns
3. **Monitor cache metrics**: Track hit rates, evictions, and memory usage
4. **Handle cache failures gracefully**: Caches should enhance, not break functionality
5. **Close caches properly**: Call `Close()` during graceful shutdown
## Future Enhancements
- Built-in metrics and monitoring hooks
- Automatic size estimation for cached values
- Warming and preloading strategies
- Cache invalidation patterns

307
framework/h/cache/example_test.go vendored Normal file
View file

@ -0,0 +1,307 @@
package cache_test
import (
"fmt"
"sync"
"time"
"github.com/maddalax/htmgo/framework/h"
"github.com/maddalax/htmgo/framework/h/cache"
)
// Example demonstrates basic caching with the default TTL store
func ExampleCached() {
renderCount := 0
// Create a cached component that expires after 5 minutes
CachedHeader := h.Cached(5*time.Minute, func() *h.Element {
renderCount++
return h.Header(
h.H1(h.Text("Welcome to our site")),
h.P(h.Text(fmt.Sprintf("Rendered %d times", renderCount))),
)
})
// First render - will execute the function
html1 := h.Render(CachedHeader())
fmt.Println("Render count:", renderCount)
// Second render - will use cached HTML
html2 := h.Render(CachedHeader())
fmt.Println("Render count:", renderCount)
fmt.Println("Same HTML:", html1 == html2)
// Output:
// Render count: 1
// Render count: 1
// Same HTML: true
}
// Example demonstrates per-key caching for user-specific content
func ExampleCachedPerKeyT() {
type User struct {
ID int
Name string
}
renderCounts := make(map[int]int)
// Create a per-user cached component
UserProfile := h.CachedPerKeyT(15*time.Minute, func(user User) (int, h.GetElementFunc) {
// Use user ID as the cache key
return user.ID, func() *h.Element {
renderCounts[user.ID]++
return h.Div(
h.Class("user-profile"),
h.H2(h.Text(user.Name)),
h.P(h.Text(fmt.Sprintf("User ID: %d", user.ID))),
)
}
})
alice := User{ID: 1, Name: "Alice"}
bob := User{ID: 2, Name: "Bob"}
// Render Alice's profile - will execute
h.Render(UserProfile(alice))
fmt.Printf("Alice render count: %d\n", renderCounts[1])
// Render Bob's profile - will execute
h.Render(UserProfile(bob))
fmt.Printf("Bob render count: %d\n", renderCounts[2])
// Render Alice's profile again - will use cache
h.Render(UserProfile(alice))
fmt.Printf("Alice render count after cache hit: %d\n", renderCounts[1])
// Output:
// Alice render count: 1
// Bob render count: 1
// Alice render count after cache hit: 1
}
// Example demonstrates using a memory-bounded LRU cache
func ExampleWithStore_lru() {
// Create an LRU cache that holds maximum 1000 items
lruStore := cache.NewLRUStore[any, string](1000)
defer lruStore.Close()
renderCount := 0
// Use the LRU cache for a component
ProductCard := h.CachedPerKeyT(1*time.Hour,
func(productID int) (int, h.GetElementFunc) {
return productID, func() *h.Element {
renderCount++
// Simulate fetching product data
return h.Div(
h.H3(h.Text(fmt.Sprintf("Product #%d", productID))),
h.P(h.Text("$99.99")),
)
}
},
h.WithStore(lruStore), // Use custom cache store
)
// Render many products
for i := 0; i < 1500; i++ {
h.Render(ProductCard(i))
}
// Due to LRU eviction, only 1000 items are cached
// Earlier items (0-499) were evicted
fmt.Printf("Total renders: %d\n", renderCount)
fmt.Printf("Expected renders: %d (due to LRU eviction)\n", 1500)
// Accessing an evicted item will cause a re-render
h.Render(ProductCard(0))
fmt.Printf("After accessing evicted item: %d\n", renderCount)
// Output:
// Total renders: 1500
// Expected renders: 1500 (due to LRU eviction)
// After accessing evicted item: 1501
}
// MockDistributedCache simulates a distributed cache like Redis
type MockDistributedCache struct {
data map[string]string
mutex sync.RWMutex
}
// DistributedCacheAdapter makes MockDistributedCache compatible with cache.Store interface
type DistributedCacheAdapter struct {
cache *MockDistributedCache
}
func (a *DistributedCacheAdapter) Set(key any, value string, ttl time.Duration) {
a.cache.mutex.Lock()
defer a.cache.mutex.Unlock()
// In a real implementation, you'd set TTL in Redis
keyStr := fmt.Sprintf("htmgo:%v", key)
a.cache.data[keyStr] = value
}
func (a *DistributedCacheAdapter) Get(key any) (string, bool) {
a.cache.mutex.RLock()
defer a.cache.mutex.RUnlock()
keyStr := fmt.Sprintf("htmgo:%v", key)
val, ok := a.cache.data[keyStr]
return val, ok
}
func (a *DistributedCacheAdapter) Delete(key any) {
a.cache.mutex.Lock()
defer a.cache.mutex.Unlock()
keyStr := fmt.Sprintf("htmgo:%v", key)
delete(a.cache.data, keyStr)
}
func (a *DistributedCacheAdapter) Purge() {
a.cache.mutex.Lock()
defer a.cache.mutex.Unlock()
a.cache.data = make(map[string]string)
}
func (a *DistributedCacheAdapter) Close() {
// Clean up connections in real implementation
}
// Example demonstrates creating a custom cache adapter
func ExampleDistributedCacheAdapter() {
// Create the distributed cache
distCache := &MockDistributedCache{
data: make(map[string]string),
}
adapter := &DistributedCacheAdapter{cache: distCache}
// Use it with a cached component
SharedComponent := h.Cached(10*time.Minute, func() *h.Element {
return h.Div(h.Text("Shared across all servers"))
}, h.WithStore(adapter))
html := h.Render(SharedComponent())
fmt.Printf("Cached in distributed store: %v\n", len(distCache.data) > 0)
fmt.Printf("HTML length: %d\n", len(html))
// Output:
// Cached in distributed store: true
// HTML length: 36
}
// Example demonstrates overriding the default cache provider globally
func ExampleDefaultCacheProvider() {
// Save the original provider to restore it later
originalProvider := h.DefaultCacheProvider
defer func() {
h.DefaultCacheProvider = originalProvider
}()
// Override the default to use LRU for all cached components
h.DefaultCacheProvider = func() cache.Store[any, string] {
// All cached components will use 10,000 item LRU cache by default
return cache.NewLRUStore[any, string](10_000)
}
// Now all cached components use LRU by default
renderCount := 0
AutoLRUComponent := h.Cached(1*time.Hour, func() *h.Element {
renderCount++
return h.Div(h.Text("Using LRU by default"))
})
h.Render(AutoLRUComponent())
fmt.Printf("Render count: %d\n", renderCount)
// Output:
// Render count: 1
}
// Example demonstrates caching with complex keys
func ExampleCachedPerKeyT3() {
type FilterOptions struct {
Category string
MinPrice float64
MaxPrice float64
}
renderCount := 0
// Cache filtered product lists with composite keys
FilteredProducts := h.CachedPerKeyT3(30*time.Minute,
func(category string, minPrice, maxPrice float64) (FilterOptions, h.GetElementFunc) {
// Create composite key from all parameters
key := FilterOptions{
Category: category,
MinPrice: minPrice,
MaxPrice: maxPrice,
}
return key, func() *h.Element {
renderCount++
// Simulate database query with filters
return h.Div(
h.H3(h.Text(fmt.Sprintf("Products in %s", category))),
h.P(h.Text(fmt.Sprintf("Price range: $%.2f - $%.2f", minPrice, maxPrice))),
h.Ul(
h.Li(h.Text("Product 1")),
h.Li(h.Text("Product 2")),
h.Li(h.Text("Product 3")),
),
)
}
},
)
// First query - will render
h.Render(FilteredProducts("Electronics", 100.0, 500.0))
fmt.Printf("Render count: %d\n", renderCount)
// Same query - will use cache
h.Render(FilteredProducts("Electronics", 100.0, 500.0))
fmt.Printf("Render count after cache hit: %d\n", renderCount)
// Different query - will render
h.Render(FilteredProducts("Electronics", 200.0, 600.0))
fmt.Printf("Render count after new query: %d\n", renderCount)
// Output:
// Render count: 1
// Render count after cache hit: 1
// Render count after new query: 2
}
// Example demonstrates cache expiration and refresh
func ExampleCached_expiration() {
renderCount := 0
now := time.Now()
// Cache with very short TTL for demonstration
TimeSensitive := h.Cached(100*time.Millisecond, func() *h.Element {
renderCount++
return h.Div(
h.Text(fmt.Sprintf("Generated at: %s (render #%d)",
now.Format("15:04:05"), renderCount)),
)
})
// First render
h.Render(TimeSensitive())
fmt.Printf("Render count: %d\n", renderCount)
// Immediate second render - uses cache
h.Render(TimeSensitive())
fmt.Printf("Render count (cached): %d\n", renderCount)
// Wait for expiration
time.Sleep(150 * time.Millisecond)
// Render after expiration - will re-execute
h.Render(TimeSensitive())
fmt.Printf("Render count (after expiration): %d\n", renderCount)
// Output:
// Render count: 1
// Render count (cached): 1
// Render count (after expiration): 2
}

27
framework/h/cache/interface.go vendored Normal file
View file

@ -0,0 +1,27 @@
package cache
import (
"time"
)
// Store defines the interface for a pluggable cache.
// This allows users to provide their own caching implementations, such as LRU, LFU,
// or even distributed caches. The cache implementation is responsible for handling
// its own eviction policies (TTL, size limits, etc.).
type Store[K comparable, V any] interface {
// Set adds or updates an entry in the cache. The implementation should handle the TTL.
Set(key K, value V, ttl time.Duration)
// Get retrieves an entry from the cache. The boolean return value indicates
// whether the key was found and has not expired.
Get(key K) (V, bool)
// Delete removes an entry from the cache.
Delete(key K)
// Purge removes all items from the cache.
Purge()
// Close releases any resources used by the cache, such as background goroutines.
Close()
}

181
framework/h/cache/lru_store_example.go vendored Normal file
View file

@ -0,0 +1,181 @@
package cache
import (
"container/list"
"sync"
"time"
)
// LRUStore is an example of a memory-bounded cache implementation using
// the Least Recently Used (LRU) eviction policy. This demonstrates how
// to create a custom cache store that prevents unbounded memory growth.
//
// This is a simple example implementation. For production use, consider
// using optimized libraries like github.com/elastic/go-freelru or
// github.com/Yiling-J/theine-go.
type LRUStore[K comparable, V any] struct {
maxSize int
cache map[K]*list.Element
lru *list.List
mutex sync.RWMutex
closeChan chan struct{}
closeOnce sync.Once
}
type lruEntry[K comparable, V any] struct {
key K
value V
expiration time.Time
}
// NewLRUStore creates a new LRU cache with the specified maximum size.
// When the cache reaches maxSize, the least recently used items are evicted.
func NewLRUStore[K comparable, V any](maxSize int) Store[K, V] {
if maxSize <= 0 {
panic("LRUStore maxSize must be positive")
}
s := &LRUStore[K, V]{
maxSize: maxSize,
cache: make(map[K]*list.Element),
lru: list.New(),
closeChan: make(chan struct{}),
}
// Start a goroutine to periodically clean up expired entries
go s.cleanupExpired()
return s
}
// Set adds or updates an entry in the cache with the given TTL.
// If the cache is at capacity, the least recently used item is evicted.
func (s *LRUStore[K, V]) Set(key K, value V, ttl time.Duration) {
s.mutex.Lock()
defer s.mutex.Unlock()
expiration := time.Now().Add(ttl)
// Check if key already exists
if elem, exists := s.cache[key]; exists {
// Update existing entry and move to front
entry := elem.Value.(*lruEntry[K, V])
entry.value = value
entry.expiration = expiration
s.lru.MoveToFront(elem)
return
}
// Add new entry
entry := &lruEntry[K, V]{
key: key,
value: value,
expiration: expiration,
}
elem := s.lru.PushFront(entry)
s.cache[key] = elem
// Evict oldest if over capacity
if s.lru.Len() > s.maxSize {
oldest := s.lru.Back()
if oldest != nil {
s.removeElement(oldest)
}
}
}
// Get retrieves an entry from the cache.
// Returns the value and true if found and not expired, zero value and false otherwise.
func (s *LRUStore[K, V]) Get(key K) (V, bool) {
s.mutex.Lock()
defer s.mutex.Unlock()
var zero V
elem, exists := s.cache[key]
if !exists {
return zero, false
}
entry := elem.Value.(*lruEntry[K, V])
// Check if expired
if time.Now().After(entry.expiration) {
s.removeElement(elem)
return zero, false
}
// Move to front (mark as recently used)
s.lru.MoveToFront(elem)
return entry.value, true
}
// Delete removes an entry from the cache.
func (s *LRUStore[K, V]) Delete(key K) {
s.mutex.Lock()
defer s.mutex.Unlock()
if elem, exists := s.cache[key]; exists {
s.removeElement(elem)
}
}
// Purge removes all items from the cache.
func (s *LRUStore[K, V]) Purge() {
s.mutex.Lock()
defer s.mutex.Unlock()
s.cache = make(map[K]*list.Element)
s.lru.Init()
}
// Close stops the background cleanup goroutine.
func (s *LRUStore[K, V]) Close() {
s.closeOnce.Do(func() {
close(s.closeChan)
})
}
// removeElement removes an element from both the map and the list.
// Must be called with the mutex held.
func (s *LRUStore[K, V]) removeElement(elem *list.Element) {
entry := elem.Value.(*lruEntry[K, V])
delete(s.cache, entry.key)
s.lru.Remove(elem)
}
// cleanupExpired periodically removes expired entries.
func (s *LRUStore[K, V]) cleanupExpired() {
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
for {
select {
case <-ticker.C:
s.removeExpired()
case <-s.closeChan:
return
}
}
}
// removeExpired scans the cache and removes expired entries.
func (s *LRUStore[K, V]) removeExpired() {
s.mutex.Lock()
defer s.mutex.Unlock()
now := time.Now()
// Create a slice to hold elements to remove to avoid modifying list during iteration
var toRemove []*list.Element
for elem := s.lru.Back(); elem != nil; elem = elem.Prev() {
entry := elem.Value.(*lruEntry[K, V])
if now.After(entry.expiration) {
toRemove = append(toRemove, elem)
}
}
// Remove expired elements
for _, elem := range toRemove {
s.removeElement(elem)
}
}

353
framework/h/cache/lru_store_test.go vendored Normal file
View file

@ -0,0 +1,353 @@
package cache
import (
"sync"
"testing"
"time"
)
func TestLRUStore_SetAndGet(t *testing.T) {
store := NewLRUStore[string, string](10)
defer store.Close()
// Test basic set and get
store.Set("key1", "value1", 1*time.Hour)
val, found := store.Get("key1")
if !found {
t.Error("Expected to find key1")
}
if val != "value1" {
t.Errorf("Expected value1, got %s", val)
}
// Test getting non-existent key
val, found = store.Get("nonexistent")
if found {
t.Error("Expected not to find nonexistent key")
}
if val != "" {
t.Errorf("Expected empty string for non-existent key, got %s", val)
}
}
func TestLRUStore_SizeLimit(t *testing.T) {
// Create store with capacity of 3
store := NewLRUStore[int, string](3)
defer store.Close()
// Add 3 items
store.Set(1, "one", 1*time.Hour)
store.Set(2, "two", 1*time.Hour)
store.Set(3, "three", 1*time.Hour)
// Verify all exist
for i := 1; i <= 3; i++ {
val, found := store.Get(i)
if !found {
t.Errorf("Expected to find key %d", i)
}
if val != []string{"one", "two", "three"}[i-1] {
t.Errorf("Unexpected value for key %d: %s", i, val)
}
}
// Add fourth item, should evict least recently used (key 1)
store.Set(4, "four", 1*time.Hour)
// Key 1 should be evicted
_, found := store.Get(1)
if found {
t.Error("Expected key 1 to be evicted")
}
// Keys 2, 3, 4 should still exist
for i := 2; i <= 4; i++ {
_, found := store.Get(i)
if !found {
t.Errorf("Expected to find key %d", i)
}
}
}
func TestLRUStore_LRUBehavior(t *testing.T) {
store := NewLRUStore[string, string](3)
defer store.Close()
// Add items in order
store.Set("a", "A", 1*time.Hour)
store.Set("b", "B", 1*time.Hour)
store.Set("c", "C", 1*time.Hour)
// Access "a" to make it recently used
store.Get("a")
// Add "d", should evict "b" (least recently used)
store.Set("d", "D", 1*time.Hour)
// Check what's in cache
_, foundA := store.Get("a")
_, foundB := store.Get("b")
_, foundC := store.Get("c")
_, foundD := store.Get("d")
if !foundA {
t.Error("Expected 'a' to still be in cache (was accessed)")
}
if foundB {
t.Error("Expected 'b' to be evicted (least recently used)")
}
if !foundC {
t.Error("Expected 'c' to still be in cache")
}
if !foundD {
t.Error("Expected 'd' to be in cache (just added)")
}
}
func TestLRUStore_UpdateMovesToFront(t *testing.T) {
store := NewLRUStore[string, string](3)
defer store.Close()
// Fill cache
store.Set("a", "A", 1*time.Hour)
store.Set("b", "B", 1*time.Hour)
store.Set("c", "C", 1*time.Hour)
// Update "a" with new value - should move to front
store.Set("a", "A_updated", 1*time.Hour)
// Add new item - should evict "b" not "a"
store.Set("d", "D", 1*time.Hour)
val, found := store.Get("a")
if !found {
t.Error("Expected 'a' to still be in cache after update")
}
if val != "A_updated" {
t.Errorf("Expected updated value, got %s", val)
}
_, found = store.Get("b")
if found {
t.Error("Expected 'b' to be evicted")
}
}
func TestLRUStore_Expiration(t *testing.T) {
store := NewLRUStore[string, string](10)
defer store.Close()
// Set with short TTL
store.Set("shortlived", "value", 100*time.Millisecond)
// Should exist immediately
val, found := store.Get("shortlived")
if !found {
t.Error("Expected to find shortlived key immediately after setting")
}
if val != "value" {
t.Errorf("Expected value, got %s", val)
}
// Wait for expiration
time.Sleep(150 * time.Millisecond)
// Should be expired now
val, found = store.Get("shortlived")
if found {
t.Error("Expected key to be expired")
}
if val != "" {
t.Errorf("Expected empty string for expired key, got %s", val)
}
}
func TestLRUStore_Delete(t *testing.T) {
store := NewLRUStore[string, string](10)
defer store.Close()
store.Set("key1", "value1", 1*time.Hour)
// Verify it exists
_, found := store.Get("key1")
if !found {
t.Error("Expected to find key1 before deletion")
}
// Delete it
store.Delete("key1")
// Verify it's gone
_, found = store.Get("key1")
if found {
t.Error("Expected key1 to be deleted")
}
// Delete non-existent key should not panic
store.Delete("nonexistent")
}
func TestLRUStore_Purge(t *testing.T) {
store := NewLRUStore[string, string](10)
defer store.Close()
// Add multiple items
store.Set("key1", "value1", 1*time.Hour)
store.Set("key2", "value2", 1*time.Hour)
store.Set("key3", "value3", 1*time.Hour)
// Verify they exist
for i := 1; i <= 3; i++ {
key := "key" + string(rune('0'+i))
_, found := store.Get(key)
if !found {
t.Errorf("Expected to find %s before purge", key)
}
}
// Purge all
store.Purge()
// Verify all are gone
for i := 1; i <= 3; i++ {
key := "key" + string(rune('0'+i))
_, found := store.Get(key)
if found {
t.Errorf("Expected %s to be purged", key)
}
}
}
func TestLRUStore_ConcurrentAccess(t *testing.T) {
// Need capacity for all unique keys: 100 goroutines * 100 operations = 10,000
store := NewLRUStore[int, int](10000)
defer store.Close()
const numGoroutines = 100
const numOperations = 100
var wg sync.WaitGroup
wg.Add(numGoroutines)
// Concurrent writes and reads
for i := 0; i < numGoroutines; i++ {
go func(id int) {
defer wg.Done()
for j := 0; j < numOperations; j++ {
key := (id * numOperations) + j
store.Set(key, key*2, 1*time.Hour)
// Immediately read it back
val, found := store.Get(key)
if !found {
t.Errorf("Goroutine %d: Expected to find key %d", id, key)
}
if val != key*2 {
t.Errorf("Goroutine %d: Expected value %d, got %d", id, key*2, val)
}
}
}(i)
}
wg.Wait()
}
func TestLRUStore_ExpiredEntriesCleanup(t *testing.T) {
store := NewLRUStore[string, string](100)
defer store.Close()
// Add many short-lived entries
for i := 0; i < 50; i++ {
key := "key" + string(rune('0'+i))
store.Set(key, "value", 100*time.Millisecond)
}
// Add some long-lived entries
for i := 50; i < 60; i++ {
key := "key" + string(rune('0'+i))
store.Set(key, "value", 1*time.Hour)
}
// Wait for short-lived entries to expire and cleanup to run
time.Sleep(1200 * time.Millisecond)
// Check that expired entries are gone
for i := 0; i < 50; i++ {
key := "key" + string(rune('0'+i))
_, found := store.Get(key)
if found {
t.Errorf("Expected expired key %s to be cleaned up", key)
}
}
// Long-lived entries should still exist
for i := 50; i < 60; i++ {
key := "key" + string(rune('0'+i))
_, found := store.Get(key)
if !found {
t.Errorf("Expected long-lived key %s to still exist", key)
}
}
}
func TestLRUStore_InvalidSize(t *testing.T) {
// Test that creating store with invalid size panics
defer func() {
if r := recover(); r == nil {
t.Error("Expected panic for zero size")
}
}()
NewLRUStore[string, string](0)
}
func TestLRUStore_Close(t *testing.T) {
store := NewLRUStore[string, string](10)
// Close should not panic
store.Close()
// Multiple closes should not panic
store.Close()
store.Close()
}
func TestLRUStore_ComplexEvictionScenario(t *testing.T) {
store := NewLRUStore[string, string](4)
defer store.Close()
// Fill cache
store.Set("a", "A", 1*time.Hour)
store.Set("b", "B", 1*time.Hour)
store.Set("c", "C", 1*time.Hour)
store.Set("d", "D", 1*time.Hour)
// Access in specific order to control LRU order
store.Get("b") // b is most recently used
store.Get("d") // d is second most recently used
store.Get("a") // a is third most recently used
// c is least recently used
// Add two new items
store.Set("e", "E", 1*time.Hour) // Should evict c
store.Set("f", "F", 1*time.Hour) // Should evict the next LRU
// Check final state
expected := map[string]bool{
"a": true, // Most recently used before additions
"b": false, // Should be evicted as second LRU
"c": false, // First to be evicted
"d": true, // Second most recently used
"e": true, // Just added
"f": true, // Just added
}
for key, shouldExist := range expected {
_, found := store.Get(key)
if found != shouldExist {
t.Errorf("Key %s: expected existence=%v, got=%v", key, shouldExist, found)
}
}
}

128
framework/h/cache/ttl_store.go vendored Normal file
View file

@ -0,0 +1,128 @@
package cache
import (
"flag"
"log/slog"
"sync"
"time"
)
// TTLStore is a time-to-live based cache implementation that mimics
// the original htmgo caching behavior. It stores values with expiration
// times and periodically cleans up expired entries.
type TTLStore[K comparable, V any] struct {
cache map[K]*entry[V]
mutex sync.RWMutex
closeOnce sync.Once
closeChan chan struct{}
}
type entry[V any] struct {
value V
expiration time.Time
}
// NewTTLStore creates a new TTL-based cache store.
func NewTTLStore[K comparable, V any]() Store[K, V] {
s := &TTLStore[K, V]{
cache: make(map[K]*entry[V]),
closeChan: make(chan struct{}),
}
s.startCleaner()
return s
}
// Set adds or updates an entry in the cache with the given TTL.
func (s *TTLStore[K, V]) Set(key K, value V, ttl time.Duration) {
s.mutex.Lock()
defer s.mutex.Unlock()
s.cache[key] = &entry[V]{
value: value,
expiration: time.Now().Add(ttl),
}
}
// Get retrieves an entry from the cache.
func (s *TTLStore[K, V]) Get(key K) (V, bool) {
s.mutex.RLock()
defer s.mutex.RUnlock()
var zero V
e, ok := s.cache[key]
if !ok {
return zero, false
}
// Check if expired
if time.Now().After(e.expiration) {
return zero, false
}
return e.value, true
}
// Delete removes an entry from the cache.
func (s *TTLStore[K, V]) Delete(key K) {
s.mutex.Lock()
defer s.mutex.Unlock()
delete(s.cache, key)
}
// Purge removes all items from the cache.
func (s *TTLStore[K, V]) Purge() {
s.mutex.Lock()
defer s.mutex.Unlock()
s.cache = make(map[K]*entry[V])
}
// Close stops the background cleaner goroutine.
func (s *TTLStore[K, V]) Close() {
s.closeOnce.Do(func() {
close(s.closeChan)
})
}
// startCleaner starts a background goroutine that periodically removes expired entries.
func (s *TTLStore[K, V]) startCleaner() {
isTests := flag.Lookup("test.v") != nil
go func() {
ticker := time.NewTicker(time.Minute)
if isTests {
ticker = time.NewTicker(time.Second)
}
defer ticker.Stop()
for {
select {
case <-ticker.C:
s.clearExpired()
case <-s.closeChan:
return
}
}
}()
}
// clearExpired removes all expired entries from the cache.
func (s *TTLStore[K, V]) clearExpired() {
s.mutex.Lock()
defer s.mutex.Unlock()
now := time.Now()
deletedCount := 0
for key, e := range s.cache {
if now.After(e.expiration) {
delete(s.cache, key)
deletedCount++
}
}
if deletedCount > 0 {
slog.Debug("Deleted expired cache entries", slog.Int("count", deletedCount))
}
}

263
framework/h/cache/ttl_store_test.go vendored Normal file
View file

@ -0,0 +1,263 @@
package cache
import (
"sync"
"testing"
"time"
)
func TestTTLStore_SetAndGet(t *testing.T) {
store := NewTTLStore[string, string]()
defer store.Close()
// Test basic set and get
store.Set("key1", "value1", 1*time.Hour)
val, found := store.Get("key1")
if !found {
t.Error("Expected to find key1")
}
if val != "value1" {
t.Errorf("Expected value1, got %s", val)
}
// Test getting non-existent key
val, found = store.Get("nonexistent")
if found {
t.Error("Expected not to find nonexistent key")
}
if val != "" {
t.Errorf("Expected empty string for non-existent key, got %s", val)
}
}
func TestTTLStore_Expiration(t *testing.T) {
store := NewTTLStore[string, string]()
defer store.Close()
// Set with short TTL
store.Set("shortlived", "value", 100*time.Millisecond)
// Should exist immediately
val, found := store.Get("shortlived")
if !found {
t.Error("Expected to find shortlived key immediately after setting")
}
if val != "value" {
t.Errorf("Expected value, got %s", val)
}
// Wait for expiration
time.Sleep(150 * time.Millisecond)
// Should be expired now
val, found = store.Get("shortlived")
if found {
t.Error("Expected key to be expired")
}
if val != "" {
t.Errorf("Expected empty string for expired key, got %s", val)
}
}
func TestTTLStore_Delete(t *testing.T) {
store := NewTTLStore[string, string]()
defer store.Close()
store.Set("key1", "value1", 1*time.Hour)
// Verify it exists
_, found := store.Get("key1")
if !found {
t.Error("Expected to find key1 before deletion")
}
// Delete it
store.Delete("key1")
// Verify it's gone
_, found = store.Get("key1")
if found {
t.Error("Expected key1 to be deleted")
}
// Delete non-existent key should not panic
store.Delete("nonexistent")
}
func TestTTLStore_Purge(t *testing.T) {
store := NewTTLStore[string, string]()
defer store.Close()
// Add multiple items
store.Set("key1", "value1", 1*time.Hour)
store.Set("key2", "value2", 1*time.Hour)
store.Set("key3", "value3", 1*time.Hour)
// Verify they exist
for i := 1; i <= 3; i++ {
key := "key" + string(rune('0'+i))
_, found := store.Get(key)
if !found {
t.Errorf("Expected to find %s before purge", key)
}
}
// Purge all
store.Purge()
// Verify all are gone
for i := 1; i <= 3; i++ {
key := "key" + string(rune('0'+i))
_, found := store.Get(key)
if found {
t.Errorf("Expected %s to be purged", key)
}
}
}
func TestTTLStore_ConcurrentAccess(t *testing.T) {
store := NewTTLStore[int, int]()
defer store.Close()
const numGoroutines = 100
const numOperations = 1000
var wg sync.WaitGroup
wg.Add(numGoroutines)
// Concurrent writes and reads
for i := 0; i < numGoroutines; i++ {
go func(id int) {
defer wg.Done()
for j := 0; j < numOperations; j++ {
key := (id * numOperations) + j
store.Set(key, key*2, 1*time.Hour)
// Immediately read it back
val, found := store.Get(key)
if !found {
t.Errorf("Goroutine %d: Expected to find key %d", id, key)
}
if val != key*2 {
t.Errorf("Goroutine %d: Expected value %d, got %d", id, key*2, val)
}
}
}(i)
}
wg.Wait()
}
func TestTTLStore_UpdateExisting(t *testing.T) {
store := NewTTLStore[string, string]()
defer store.Close()
// Set initial value
store.Set("key1", "value1", 100*time.Millisecond)
// Update with new value and longer TTL
store.Set("key1", "value2", 1*time.Hour)
// Verify new value
val, found := store.Get("key1")
if !found {
t.Error("Expected to find key1 after update")
}
if val != "value2" {
t.Errorf("Expected value2, got %s", val)
}
// Wait for original TTL to pass
time.Sleep(150 * time.Millisecond)
// Should still exist with new TTL
val, found = store.Get("key1")
if !found {
t.Error("Expected key1 to still exist with new TTL")
}
if val != "value2" {
t.Errorf("Expected value2, got %s", val)
}
}
func TestTTLStore_CleanupGoroutine(t *testing.T) {
// This test verifies that expired entries are cleaned up automatically
store := NewTTLStore[string, string]()
defer store.Close()
// Add many short-lived entries
for i := 0; i < 100; i++ {
key := "key" + string(rune('0'+i))
store.Set(key, "value", 100*time.Millisecond)
}
// Cast to access internal state for testing
ttlStore := store.(*TTLStore[string, string])
// Check initial count
ttlStore.mutex.RLock()
initialCount := len(ttlStore.cache)
ttlStore.mutex.RUnlock()
if initialCount != 100 {
t.Errorf("Expected 100 entries initially, got %d", initialCount)
}
// Wait for expiration and cleanup cycle
// In test mode, cleanup runs every second
time.Sleep(1200 * time.Millisecond)
// Check that entries were cleaned up
ttlStore.mutex.RLock()
finalCount := len(ttlStore.cache)
ttlStore.mutex.RUnlock()
if finalCount != 0 {
t.Errorf("Expected 0 entries after cleanup, got %d", finalCount)
}
}
func TestTTLStore_Close(t *testing.T) {
store := NewTTLStore[string, string]()
// Close should not panic
store.Close()
// Multiple closes should not panic
store.Close()
store.Close()
}
func TestTTLStore_DifferentTypes(t *testing.T) {
// Test with different key and value types
intStore := NewTTLStore[int, string]()
defer intStore.Close()
intStore.Set(42, "answer", 1*time.Hour)
val, found := intStore.Get(42)
if !found || val != "answer" {
t.Error("Failed with int key")
}
// Test with struct values
type User struct {
ID int
Name string
}
userStore := NewTTLStore[string, User]()
defer userStore.Close()
user := User{ID: 1, Name: "Alice"}
userStore.Set("user1", user, 1*time.Hour)
retrievedUser, found := userStore.Get("user1")
if !found {
t.Error("Failed to retrieve user")
}
if retrievedUser.ID != 1 || retrievedUser.Name != "Alice" {
t.Error("Retrieved user data doesn't match")
}
}

View file

@ -0,0 +1,448 @@
package h
import (
"fmt"
"sync"
"testing"
"time"
"github.com/maddalax/htmgo/framework/h/cache"
)
func TestCached_WithDefaultStore(t *testing.T) {
callCount := 0
// Create a cached component
CachedDiv := Cached(1*time.Hour, func() *Element {
callCount++
return Div(Text(fmt.Sprintf("Rendered %d times", callCount)))
})
// First render
html1 := Render(CachedDiv())
if callCount != 1 {
t.Errorf("Expected 1 render, got %d", callCount)
}
// Second render should use cache
html2 := Render(CachedDiv())
if callCount != 1 {
t.Errorf("Expected still 1 render (cached), got %d", callCount)
}
if html1 != html2 {
t.Error("Expected same HTML from cache")
}
}
func TestCached_WithCustomStore(t *testing.T) {
// Use LRU store with small capacity
lruStore := cache.NewLRUStore[any, string](10)
defer lruStore.Close()
callCount := 0
// Create cached component with custom store
CachedDiv := Cached(1*time.Hour, func() *Element {
callCount++
return Div(Text(fmt.Sprintf("Rendered %d times", callCount)))
}, WithStore(lruStore))
// First render
html1 := Render(CachedDiv())
if callCount != 1 {
t.Errorf("Expected 1 render, got %d", callCount)
}
// Second render should use cache
html2 := Render(CachedDiv())
if callCount != 1 {
t.Errorf("Expected still 1 render (cached), got %d", callCount)
}
if html1 != html2 {
t.Error("Expected same HTML from cache")
}
}
func TestCachedPerKey_WithDefaultStore(t *testing.T) {
renderCounts := make(map[int]int)
// Create per-key cached component
UserProfile := CachedPerKeyT(1*time.Hour, func(userID int) (int, GetElementFunc) {
return userID, func() *Element {
renderCounts[userID]++
return Div(Text(fmt.Sprintf("User %d (rendered %d times)", userID, renderCounts[userID])))
}
})
// Render for different users
html1_user1 := Render(UserProfile(1))
html1_user2 := Render(UserProfile(2))
if renderCounts[1] != 1 || renderCounts[2] != 1 {
t.Error("Expected each user to be rendered once")
}
// Render again - should use cache
html2_user1 := Render(UserProfile(1))
html2_user2 := Render(UserProfile(2))
if renderCounts[1] != 1 || renderCounts[2] != 1 {
t.Error("Expected renders to be cached")
}
if html1_user1 != html2_user1 || html1_user2 != html2_user2 {
t.Error("Expected same HTML from cache")
}
// Different users should have different content
if html1_user1 == html1_user2 {
t.Error("Expected different content for different users")
}
}
func TestCachedPerKey_WithLRUStore(t *testing.T) {
// Small LRU cache that can only hold 2 items
lruStore := cache.NewLRUStore[any, string](2)
defer lruStore.Close()
renderCounts := make(map[int]int)
// Create per-key cached component with LRU store
UserProfile := CachedPerKeyT(1*time.Hour, func(userID int) (int, GetElementFunc) {
return userID, func() *Element {
renderCounts[userID]++
return Div(Text(fmt.Sprintf("User %d", userID)))
}
}, WithStore(lruStore))
// Render 2 users - fill cache to capacity
Render(UserProfile(1))
Render(UserProfile(2))
if renderCounts[1] != 1 || renderCounts[2] != 1 {
t.Error("Expected each user to be rendered once")
}
// Render user 3 - should evict user 1 (least recently used)
Render(UserProfile(3))
if renderCounts[3] != 1 {
t.Error("Expected user 3 to be rendered once")
}
// Render user 1 again - should re-render (was evicted)
Render(UserProfile(1))
if renderCounts[1] != 2 {
t.Errorf("Expected user 1 to be re-rendered after eviction, got %d renders", renderCounts[1])
}
// Render user 2 again - should re-render (was evicted when user 1 was added back)
Render(UserProfile(2))
if renderCounts[2] != 2 {
t.Errorf("Expected user 2 to be re-rendered after eviction, got %d renders", renderCounts[2])
}
// At this point, cache contains users 1 and 2 (most recently used)
// Render user 1 again - should be cached
Render(UserProfile(1))
if renderCounts[1] != 2 {
t.Errorf("Expected user 1 to still be cached, got %d renders", renderCounts[1])
}
}
func TestCachedT_WithDefaultStore(t *testing.T) {
type Product struct {
ID int
Name string
Price float64
}
renderCount := 0
// Create cached component that takes typed data
ProductCard := CachedT(1*time.Hour, func(p Product) *Element {
renderCount++
return Div(
H3(Text(p.Name)),
P(Text(fmt.Sprintf("$%.2f", p.Price))),
)
})
product := Product{ID: 1, Name: "Widget", Price: 9.99}
// First render
html1 := Render(ProductCard(product))
if renderCount != 1 {
t.Errorf("Expected 1 render, got %d", renderCount)
}
// Second render should use cache
html2 := Render(ProductCard(product))
if renderCount != 1 {
t.Errorf("Expected still 1 render (cached), got %d", renderCount)
}
if html1 != html2 {
t.Error("Expected same HTML from cache")
}
}
func TestCachedPerKeyT_WithCustomStore(t *testing.T) {
type Article struct {
ID int
Title string
Content string
}
ttlStore := cache.NewTTLStore[any, string]()
defer ttlStore.Close()
renderCounts := make(map[int]int)
// Create per-key cached component with custom store
ArticleView := CachedPerKeyT(1*time.Hour, func(a Article) (int, GetElementFunc) {
return a.ID, func() *Element {
renderCounts[a.ID]++
return Div(
H1(Text(a.Title)),
P(Text(a.Content)),
)
}
}, WithStore(ttlStore))
article1 := Article{ID: 1, Title: "First", Content: "Content 1"}
article2 := Article{ID: 2, Title: "Second", Content: "Content 2"}
// Render articles
Render(ArticleView(article1))
Render(ArticleView(article2))
if renderCounts[1] != 1 || renderCounts[2] != 1 {
t.Error("Expected each article to be rendered once")
}
// Render again - should use cache
Render(ArticleView(article1))
Render(ArticleView(article2))
if renderCounts[1] != 1 || renderCounts[2] != 1 {
t.Error("Expected renders to be cached")
}
}
func TestDefaultCacheProvider_Override(t *testing.T) {
// Save original provider
originalProvider := DefaultCacheProvider
defer func() {
DefaultCacheProvider = originalProvider
}()
// Track which cache is used
customCacheUsed := false
// Override default provider
DefaultCacheProvider = func() cache.Store[any, string] {
customCacheUsed = true
return cache.NewLRUStore[any, string](100)
}
// Create cached component without specifying store
CachedDiv := Cached(1*time.Hour, func() *Element {
return Div(Text("Content"))
})
// Render to trigger cache creation
Render(CachedDiv())
if !customCacheUsed {
t.Error("Expected custom default cache provider to be used")
}
}
func TestCachedPerKey_ConcurrentAccess(t *testing.T) {
lruStore := cache.NewLRUStore[any, string](1000)
defer lruStore.Close()
UserProfile := CachedPerKeyT(1*time.Hour, func(userID int) (int, GetElementFunc) {
return userID, func() *Element {
// Simulate some work
time.Sleep(10 * time.Millisecond)
return Div(Text(fmt.Sprintf("User %d", userID)))
}
}, WithStore(lruStore))
const numGoroutines = 50
const numUsers = 20
var wg sync.WaitGroup
wg.Add(numGoroutines)
// Many goroutines accessing overlapping user IDs
for i := 0; i < numGoroutines; i++ {
go func(id int) {
defer wg.Done()
for j := 0; j < numUsers; j++ {
userID := j % 10 // Reuse user IDs to test cache hits
html := Render(UserProfile(userID))
expectedContent := fmt.Sprintf("User %d", userID)
if !contains(html, expectedContent) {
t.Errorf("Goroutine %d: Expected content for user %d", id, userID)
}
}
}(i)
}
wg.Wait()
}
func TestCachedT2_MultipleParameters(t *testing.T) {
renderCount := 0
// Component that takes two parameters
CombinedView := CachedT2(1*time.Hour, func(title string, count int) *Element {
renderCount++
return Div(
H2(Text(title)),
P(Text(fmt.Sprintf("Count: %d", count))),
)
})
// First render
html1 := Render(CombinedView("Test", 42))
if renderCount != 1 {
t.Errorf("Expected 1 render, got %d", renderCount)
}
// Second render with same params should use cache
html2 := Render(CombinedView("Test", 42))
if renderCount != 1 {
t.Errorf("Expected still 1 render (cached), got %d", renderCount)
}
if html1 != html2 {
t.Error("Expected same HTML from cache")
}
}
func TestCachedPerKeyT3_ComplexKey(t *testing.T) {
type CompositeKey struct {
UserID int
ProductID int
Timestamp int64
}
renderCount := 0
// Component with composite key
UserProductView := CachedPerKeyT3(1*time.Hour,
func(userID int, productID int, timestamp int64) (CompositeKey, GetElementFunc) {
key := CompositeKey{UserID: userID, ProductID: productID, Timestamp: timestamp}
return key, func() *Element {
renderCount++
return Div(Text(fmt.Sprintf("User %d viewed product %d at %d", userID, productID, timestamp)))
}
},
)
// Render with specific combination
ts := time.Now().Unix()
html1 := Render(UserProductView(1, 100, ts))
if renderCount != 1 {
t.Errorf("Expected 1 render, got %d", renderCount)
}
// Same combination should use cache
html2 := Render(UserProductView(1, 100, ts))
if renderCount != 1 {
t.Errorf("Expected still 1 render (cached), got %d", renderCount)
}
if html1 != html2 {
t.Error("Expected same HTML from cache")
}
// Different combination should render again
Render(UserProductView(1, 101, ts))
if renderCount != 2 {
t.Errorf("Expected 2 renders for different key, got %d", renderCount)
}
}
func TestCached_Expiration(t *testing.T) {
callCount := 0
// Create cached component with short TTL
CachedDiv := Cached(100*time.Millisecond, func() *Element {
callCount++
return Div(Text(fmt.Sprintf("Render %d", callCount)))
})
// First render
Render(CachedDiv())
if callCount != 1 {
t.Errorf("Expected 1 render, got %d", callCount)
}
// Immediate second render should use cache
Render(CachedDiv())
if callCount != 1 {
t.Errorf("Expected still 1 render (cached), got %d", callCount)
}
// Wait for expiration
time.Sleep(150 * time.Millisecond)
// Should render again after expiration
Render(CachedDiv())
if callCount != 2 {
t.Errorf("Expected 2 renders after expiration, got %d", callCount)
}
}
func TestCachedNode_ClearCache(t *testing.T) {
lruStore := cache.NewLRUStore[any, string](10)
defer lruStore.Close()
callCount := 0
CachedDiv := Cached(1*time.Hour, func() *Element {
callCount++
return Div(Text("Content"))
}, WithStore(lruStore))
// Render and cache
element := CachedDiv()
Render(element)
if callCount != 1 {
t.Errorf("Expected 1 render, got %d", callCount)
}
// Clear cache
node := element.meta.(*CachedNode)
node.ClearCache()
// Should render again after cache clear
Render(element)
if callCount != 2 {
t.Errorf("Expected 2 renders after cache clear, got %d", callCount)
}
}
// Helper function
func contains(s, substr string) bool {
return len(s) >= len(substr) && s[0:len(substr)] == substr ||
len(s) > len(substr) && contains(s[1:], substr)
}

View file

@ -1,13 +1,14 @@
package h package h
import ( import (
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"testing" "testing"
"time" "time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
) )
func TestRendererShouldRenderDocType(t *testing.T) { func TestRendererShouldRenderDocType(t *testing.T) {
@ -385,21 +386,19 @@ func TestCacheByKeyT2(t *testing.T) {
func TestCacheByKeyConcurrent(t *testing.T) { func TestCacheByKeyConcurrent(t *testing.T) {
t.Parallel() t.Parallel()
renderCount := 0 var renderCount, callCount atomic.Uint32
callCount := 0
cachedItem := CachedPerKey(time.Hour, func() (any, GetElementFunc) { cachedItem := CachedPerKey(time.Hour, func() (any, GetElementFunc) {
key := "key" fn := func() *Element {
if callCount == 3 { renderCount.Add(1)
key = "key2"
}
if callCount == 4 {
key = "key"
}
callCount++
return key, func() *Element {
renderCount++
return Div(Text("hello")) return Div(Text("hello"))
} }
switch callCount.Add(1) {
case 4:
return "key2", fn
default:
return "key", fn
}
}) })
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
@ -415,8 +414,8 @@ func TestCacheByKeyConcurrent(t *testing.T) {
wg.Wait() wg.Wait()
assert.Equal(t, 5, callCount) assert.Equal(t, 5, int(callCount.Load()))
assert.Equal(t, 2, renderCount) assert.Equal(t, 2, int(renderCount.Load()))
} }
func TestCacheByKeyT1_2(t *testing.T) { func TestCacheByKeyT1_2(t *testing.T) {
@ -474,76 +473,112 @@ func TestCacheByKeyT1Expired_2(t *testing.T) {
} }
func TestClearExpiredCached(t *testing.T) { func TestClearExpiredCached(t *testing.T) {
t.Parallel()
renderCount := 0 renderCount := 0
cachedItem := Cached(time.Millisecond*3, func() *Element { cachedItem := Cached(time.Millisecond*2, func() *Element {
renderCount++ renderCount++
return Pf("hello") return Div(Text("hello"))
}) })
// First render
Render(cachedItem()) Render(cachedItem())
Render(cachedItem())
node := cachedItem().meta.(*CachedNode)
assert.Equal(t, 1, renderCount) assert.Equal(t, 1, renderCount)
assert.NotEmpty(t, node.html)
// Should use cache immediately
Render(cachedItem())
assert.Equal(t, 1, renderCount)
// Wait for expiration
time.Sleep(time.Millisecond * 3) time.Sleep(time.Millisecond * 3)
node.ClearExpired()
assert.Empty(t, node.html) // Should re-render after expiration
Render(cachedItem())
assert.Equal(t, 2, renderCount)
} }
func TestClearExpiredCacheByKey(t *testing.T) { func TestClearExpiredCacheByKey(t *testing.T) {
t.Parallel()
renderCount := 0 renderCount := 0
cachedItem := CachedPerKeyT(time.Millisecond, func(key int) (any, GetElementFunc) { // Create two cached functions with different TTLs
shortLivedCache := CachedPerKeyT(time.Millisecond*1, func(key int) (int, GetElementFunc) {
return key, func() *Element { return key, func() *Element {
renderCount++ renderCount++
return Pf(strconv.Itoa(key)) return Div(Text("short-lived"))
} }
}) })
longLivedCache := CachedPerKeyT(time.Hour, func(key int) (int, GetElementFunc) {
return key, func() *Element {
renderCount++
return Div(Text("long-lived"))
}
})
// Render 100 short-lived items
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
Render(cachedItem(i)) Render(shortLivedCache(i))
} }
assert.Equal(t, 100, renderCount)
node := cachedItem(0).meta.(*ByKeyEntry).parent.meta.(*CachedNode) // Render a long-lived item
assert.Equal(t, 100, len(node.byKeyExpiration)) Render(longLivedCache(999))
assert.Equal(t, 100, len(node.byKeyCache)) assert.Equal(t, 101, renderCount)
time.Sleep(time.Millisecond * 2) // Wait for expiration of the short-lived items
time.Sleep(time.Millisecond * 3)
Render(cachedItem(0)) // Re-render some expired items - should trigger new renders
node.ClearExpired() for i := 0; i < 10; i++ {
Render(shortLivedCache(i))
}
assert.Equal(t, 111, renderCount) // 101 + 10 re-renders
assert.Equal(t, 1, len(node.byKeyExpiration)) // The long-lived item should still be cached
assert.Equal(t, 1, len(node.byKeyCache)) Render(longLivedCache(999))
assert.Equal(t, 111, renderCount) // No additional render
node.ClearCache() // Clear cache manually on both
shortNode := shortLivedCache(0).meta.(*ByKeyEntry).parent.meta.(*CachedNode)
shortNode.ClearCache()
assert.Equal(t, 0, len(node.byKeyExpiration)) longNode := longLivedCache(0).meta.(*ByKeyEntry).parent.meta.(*CachedNode)
assert.Equal(t, 0, len(node.byKeyCache)) longNode.ClearCache()
// Everything should re-render now
Render(shortLivedCache(0))
assert.Equal(t, 112, renderCount)
Render(longLivedCache(999))
assert.Equal(t, 113, renderCount)
} }
func TestBackgroundCleaner(t *testing.T) { func TestBackgroundCleaner(t *testing.T) {
t.Parallel() renderCount := 0
cachedItem := CachedPerKeyT(time.Second*2, func(key int) (any, GetElementFunc) { cachedItem := CachedPerKeyT(time.Millisecond*100, func(key int) (int, GetElementFunc) {
return key, func() *Element { return key, func() *Element {
return Pf(strconv.Itoa(key)) renderCount++
return Div(Text("hello"))
} }
}) })
// Render 100 items
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
Render(cachedItem(i)) Render(cachedItem(i))
} }
assert.Equal(t, 100, renderCount)
node := cachedItem(0).meta.(*ByKeyEntry).parent.meta.(*CachedNode) // Items should be cached immediately
assert.Equal(t, 100, len(node.byKeyExpiration)) for i := 0; i < 10; i++ {
assert.Equal(t, 100, len(node.byKeyCache)) Render(cachedItem(i))
}
assert.Equal(t, 100, renderCount) // No additional renders
// Wait for expiration and cleanup
time.Sleep(time.Second * 3) time.Sleep(time.Second * 3)
assert.Equal(t, 0, len(node.byKeyExpiration)) // Items should be expired and need re-rendering
assert.Equal(t, 0, len(node.byKeyCache)) for i := 0; i < 10; i++ {
Render(cachedItem(i))
}
assert.Equal(t, 110, renderCount) // 10 re-renders after expiration
} }
func TestEscapeHtml(t *testing.T) { func TestEscapeHtml(t *testing.T) {