Compare commits
No commits in common. "master" and "framework/v1.0.6" have entirely different histories.
master
...
framework/
45 changed files with 297 additions and 3566 deletions
|
|
@ -5,8 +5,8 @@ go 1.23.0
|
||||||
require (
|
require (
|
||||||
github.com/fsnotify/fsnotify v1.7.0
|
github.com/fsnotify/fsnotify v1.7.0
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a
|
||||||
github.com/maddalax/htmgo/tools/html-to-htmgo v0.0.0-20250703190716-06f01b3d7c1b
|
github.com/maddalax/htmgo/tools/html-to-htmgo v0.0.0-20250106162449-4f537567ad5a
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
golang.org/x/mod v0.21.0
|
golang.org/x/mod v0.21.0
|
||||||
golang.org/x/sys v0.26.0
|
golang.org/x/sys v0.26.0
|
||||||
|
|
|
||||||
|
|
@ -8,10 +8,10 @@ github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
|
||||||
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b h1:m+xI+HBEQdie/Rs+mYI0HTFTMlYQSCv0l/siPDoywA4=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a h1:OXcVeJO6jg5AA/vZqmjvIKf/68Mtc9iUSduA0S5gmUo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
||||||
github.com/maddalax/htmgo/tools/html-to-htmgo v0.0.0-20250703190716-06f01b3d7c1b h1:jvfp35fig2TzBjAgw82fe8+7cvaLX9EbipZUlj8FDDY=
|
github.com/maddalax/htmgo/tools/html-to-htmgo v0.0.0-20250106162449-4f537567ad5a h1:pF7vwxObfFZAb0iIeXvgS701ZGRr7s8t8UycAMXTZ54=
|
||||||
github.com/maddalax/htmgo/tools/html-to-htmgo v0.0.0-20250703190716-06f01b3d7c1b/go.mod h1:FraJsj3NRuLBQDk83ZVa+psbNRNLe+rajVtVhYMEme4=
|
github.com/maddalax/htmgo/tools/html-to-htmgo v0.0.0-20250106162449-4f537567ad5a/go.mod h1:FraJsj3NRuLBQDk83ZVa+psbNRNLe+rajVtVhYMEme4=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
const version = "1.0.6"
|
const version = "1.0.5"
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
needsSignals := true
|
needsSignals := true
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ module astgen-project-sample
|
||||||
|
|
||||||
go 1.23.0
|
go 1.23.0
|
||||||
|
|
||||||
require github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b
|
require github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/go-chi/chi/v5 v5.1.0 // indirect
|
github.com/go-chi/chi/v5 v5.1.0 // indirect
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,8 @@ github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
|
||||||
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b h1:m+xI+HBEQdie/Rs+mYI0HTFTMlYQSCv0l/siPDoywA4=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a h1:OXcVeJO6jg5AA/vZqmjvIKf/68Mtc9iUSduA0S5gmUo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
|
|
|
||||||
|
|
@ -78,7 +78,7 @@ func downloadTailwindCli() {
|
||||||
log.Fatal(fmt.Sprintf("Unsupported OS/ARCH: %s/%s", os, arch))
|
log.Fatal(fmt.Sprintf("Unsupported OS/ARCH: %s/%s", os, arch))
|
||||||
}
|
}
|
||||||
fileName := fmt.Sprintf(`tailwindcss-%s`, distro)
|
fileName := fmt.Sprintf(`tailwindcss-%s`, distro)
|
||||||
url := fmt.Sprintf(`https://github.com/tailwindlabs/tailwindcss/releases/download/v3.4.16/%s`, fileName)
|
url := fmt.Sprintf(`https://github.com/tailwindlabs/tailwindcss/releases/latest/download/%s`, fileName)
|
||||||
|
|
||||||
cmd := fmt.Sprintf(`curl -LO %s`, url)
|
cmd := fmt.Sprintf(`curl -LO %s`, url)
|
||||||
process.Run(process.NewRawCommand("tailwind-cli-download", cmd, process.ExitOnError))
|
process.Run(process.NewRawCommand("tailwind-cli-download", cmd, process.ExitOnError))
|
||||||
|
|
|
||||||
|
|
@ -18,14 +18,7 @@ func MakeBuildable() {
|
||||||
func Build() {
|
func Build() {
|
||||||
MakeBuildable()
|
MakeBuildable()
|
||||||
|
|
||||||
_ = os.RemoveAll("./dist")
|
process.RunOrExit(process.NewRawCommand("", "mkdir -p ./dist"))
|
||||||
|
|
||||||
err := os.Mkdir("./dist", 0755)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Error creating dist directory", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if os.Getenv("SKIP_GO_BUILD") != "1" {
|
if os.Getenv("SKIP_GO_BUILD") != "1" {
|
||||||
process.RunOrExit(process.NewRawCommand("", fmt.Sprintf("go build -tags prod -o ./dist")))
|
process.RunOrExit(process.NewRawCommand("", fmt.Sprintf("go build -tags prod -o ./dist")))
|
||||||
|
|
|
||||||
|
|
@ -1,42 +1,7 @@
|
||||||
package run
|
package run
|
||||||
|
|
||||||
import (
|
import "github.com/maddalax/htmgo/cli/htmgo/tasks/process"
|
||||||
"fmt"
|
|
||||||
"github.com/maddalax/htmgo/cli/htmgo/tasks/process"
|
|
||||||
"io/fs"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Server(flags ...process.RunFlag) error {
|
func Server(flags ...process.RunFlag) error {
|
||||||
buildDir := "./__htmgo/temp-build"
|
return process.Run(process.NewRawCommand("run-server", "go run .", flags...))
|
||||||
_ = os.RemoveAll(buildDir)
|
|
||||||
err := os.Mkdir(buildDir, 0755)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
process.RunOrExit(process.NewRawCommand("", fmt.Sprintf("go build -o %s", buildDir)))
|
|
||||||
|
|
||||||
binaryPath := ""
|
|
||||||
|
|
||||||
// find the binary that was built
|
|
||||||
err = filepath.WalkDir(buildDir, func(path string, d fs.DirEntry, err error) error {
|
|
||||||
if d.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
binaryPath = path
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if binaryPath == "" {
|
|
||||||
return fmt.Errorf("could not find the binary")
|
|
||||||
}
|
|
||||||
|
|
||||||
return process.Run(process.NewRawCommand("run-server", fmt.Sprintf("./%s", binaryPath), flags...))
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@ go 1.23.0
|
||||||
require (
|
require (
|
||||||
github.com/go-chi/chi/v5 v5.1.0
|
github.com/go-chi/chi/v5 v5.1.0
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a
|
||||||
github.com/mattn/go-sqlite3 v1.14.23
|
github.com/mattn/go-sqlite3 v1.14.23
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.4.0
|
github.com/puzpuzpuz/xsync/v3 v3.4.0
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,8 @@ github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
|
||||||
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b h1:m+xI+HBEQdie/Rs+mYI0HTFTMlYQSCv0l/siPDoywA4=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a h1:OXcVeJO6jg5AA/vZqmjvIKf/68Mtc9iUSduA0S5gmUo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
||||||
github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0=
|
github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0=
|
||||||
github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@ module hackernews
|
||||||
go 1.23.0
|
go 1.23.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a
|
||||||
github.com/microcosm-cc/bluemonday v1.0.27
|
github.com/microcosm-cc/bluemonday v1.0.27
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -8,8 +8,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
|
github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
|
||||||
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
|
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b h1:m+xI+HBEQdie/Rs+mYI0HTFTMlYQSCv0l/siPDoywA4=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a h1:OXcVeJO6jg5AA/vZqmjvIKf/68Mtc9iUSduA0S5gmUo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
||||||
github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
|
github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
|
||||||
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
|
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ go 1.23.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/go-chi/chi/v5 v5.1.0
|
github.com/go-chi/chi/v5 v5.1.0
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a
|
||||||
)
|
)
|
||||||
|
|
||||||
require github.com/google/uuid v1.6.0 // indirect
|
require github.com/google/uuid v1.6.0 // indirect
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,8 @@ github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
|
||||||
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b h1:m+xI+HBEQdie/Rs+mYI0HTFTMlYQSCv0l/siPDoywA4=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a h1:OXcVeJO6jg5AA/vZqmjvIKf/68Mtc9iUSduA0S5gmUo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@ module simpleauth
|
||||||
go 1.23.0
|
go 1.23.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a
|
||||||
github.com/mattn/go-sqlite3 v1.14.24
|
github.com/mattn/go-sqlite3 v1.14.24
|
||||||
golang.org/x/crypto v0.28.0
|
golang.org/x/crypto v0.28.0
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,8 @@ github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
|
||||||
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b h1:m+xI+HBEQdie/Rs+mYI0HTFTMlYQSCv0l/siPDoywA4=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a h1:OXcVeJO6jg5AA/vZqmjvIKf/68Mtc9iUSduA0S5gmUo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
||||||
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
||||||
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@ go 1.23.0
|
||||||
require (
|
require (
|
||||||
entgo.io/ent v0.14.1
|
entgo.io/ent v0.14.1
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a
|
||||||
github.com/mattn/go-sqlite3 v1.14.23
|
github.com/mattn/go-sqlite3 v1.14.23
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -33,8 +33,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b h1:m+xI+HBEQdie/Rs+mYI0HTFTMlYQSCv0l/siPDoywA4=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a h1:OXcVeJO6jg5AA/vZqmjvIKf/68Mtc9iUSduA0S5gmUo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
||||||
github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0=
|
github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0=
|
||||||
github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
|
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ go 1.23.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/maddalax/htmgo/extensions/websocket v0.0.0-20241109180553-34e816ff7c8a
|
github.com/maddalax/htmgo/extensions/websocket v0.0.0-20241109180553-34e816ff7c8a
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
|
|
||||||
|
|
@ -12,8 +12,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/maddalax/htmgo/extensions/websocket v0.0.0-20241109180553-34e816ff7c8a h1:BYVo9NCLHgXvf5pCGUnVg8UE7d9mWOyLgWXYTgVTkyA=
|
github.com/maddalax/htmgo/extensions/websocket v0.0.0-20241109180553-34e816ff7c8a h1:BYVo9NCLHgXvf5pCGUnVg8UE7d9mWOyLgWXYTgVTkyA=
|
||||||
github.com/maddalax/htmgo/extensions/websocket v0.0.0-20241109180553-34e816ff7c8a/go.mod h1:r6/VqntLp7VlAUpIXy3MWZMHs2EkPKJP5rJdDL8lFP4=
|
github.com/maddalax/htmgo/extensions/websocket v0.0.0-20241109180553-34e816ff7c8a/go.mod h1:r6/VqntLp7VlAUpIXy3MWZMHs2EkPKJP5rJdDL8lFP4=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b h1:m+xI+HBEQdie/Rs+mYI0HTFTMlYQSCv0l/siPDoywA4=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a h1:OXcVeJO6jg5AA/vZqmjvIKf/68Mtc9iUSduA0S5gmUo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4=
|
github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4=
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ go 1.23.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/gobwas/ws v1.4.0
|
github.com/gobwas/ws v1.4.0
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.4.0
|
github.com/puzpuzpuz/xsync/v3 v3.4.0
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -10,8 +10,8 @@ github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs=
|
||||||
github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc=
|
github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b h1:m+xI+HBEQdie/Rs+mYI0HTFTMlYQSCv0l/siPDoywA4=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a h1:OXcVeJO6jg5AA/vZqmjvIKf/68Mtc9iUSduA0S5gmUo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4=
|
github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4=
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ module github.com/maddalax/htmgo/framework-ui
|
||||||
|
|
||||||
go 1.23.0
|
go 1.23.0
|
||||||
|
|
||||||
require github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b
|
require github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/go-chi/chi/v5 v5.1.0 // indirect
|
github.com/go-chi/chi/v5 v5.1.0 // indirect
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,8 @@ github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
|
||||||
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b h1:m+xI+HBEQdie/Rs+mYI0HTFTMlYQSCv0l/siPDoywA4=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a h1:OXcVeJO6jg5AA/vZqmjvIKf/68Mtc9iUSduA0S5gmUo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
|
|
|
||||||
|
|
@ -1,19 +1,21 @@
|
||||||
package h
|
package h
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"flag"
|
||||||
|
"log/slog"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/maddalax/htmgo/framework/h/cache"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A single key to represent the cache entry for non-per-key components.
|
|
||||||
const _singleCacheKey = "__htmgo_single_cache_key__"
|
|
||||||
|
|
||||||
type CachedNode struct {
|
type CachedNode struct {
|
||||||
cb func() *Element
|
cb func() *Element
|
||||||
isByKey bool
|
isByKey bool
|
||||||
|
byKeyCache map[any]*Entry
|
||||||
|
byKeyExpiration map[any]time.Time
|
||||||
|
mutex sync.Mutex
|
||||||
duration time.Duration
|
duration time.Duration
|
||||||
cache cache.Store[any, string]
|
expiration time.Time
|
||||||
|
html string
|
||||||
}
|
}
|
||||||
|
|
||||||
type Entry struct {
|
type Entry struct {
|
||||||
|
|
@ -33,45 +35,33 @@ type GetElementFuncT2WithKey[K comparable, T any, T2 any] func(T, T2) (K, GetEle
|
||||||
type GetElementFuncT3WithKey[K comparable, T any, T2 any, T3 any] func(T, T2, T3) (K, GetElementFunc)
|
type GetElementFuncT3WithKey[K comparable, T any, T2 any, T3 any] func(T, T2, T3) (K, GetElementFunc)
|
||||||
type GetElementFuncT4WithKey[K comparable, T any, T2 any, T3 any, T4 any] func(T, T2, T3, T4) (K, GetElementFunc)
|
type GetElementFuncT4WithKey[K comparable, T any, T2 any, T3 any, T4 any] func(T, T2, T3, T4) (K, GetElementFunc)
|
||||||
|
|
||||||
// CacheOption defines a function that configures a CachedNode.
|
func startExpiredCacheCleaner(node *CachedNode) {
|
||||||
type CacheOption func(*CachedNode)
|
isTests := flag.Lookup("test.v") != nil
|
||||||
|
go func() {
|
||||||
// WithCacheStore allows providing a custom cache implementation for a cached component.
|
for {
|
||||||
func WithCacheStore(store cache.Store[any, string]) CacheOption {
|
if isTests {
|
||||||
return func(c *CachedNode) {
|
time.Sleep(time.Second)
|
||||||
c.cache = store
|
} else {
|
||||||
|
time.Sleep(time.Minute)
|
||||||
}
|
}
|
||||||
|
node.ClearExpired()
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
// DefaultCacheProvider is a package-level function that creates a default cache instance.
|
|
||||||
// Initially, this uses a TTL-based map cache, but could be swapped for an LRU cache later.
|
|
||||||
// Advanced users can override this for the entire application.
|
|
||||||
var DefaultCacheProvider = func() cache.Store[any, string] {
|
|
||||||
return cache.NewTTLStore[any, string]()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cached caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
|
// Cached caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
|
||||||
// Please note this element is globally cached, and not per unique identifier / user.
|
// Please note this element is globally cached, and not per unique identifier / user.
|
||||||
// Use CachedPerKey to cache elements per unique identifier.
|
// Use CachedPerKey to cache elements per unqiue identifier.
|
||||||
func Cached(duration time.Duration, cb GetElementFunc, opts ...CacheOption) func() *Element {
|
func Cached(duration time.Duration, cb GetElementFunc) func() *Element {
|
||||||
node := &CachedNode{
|
|
||||||
cb: cb,
|
|
||||||
duration: duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.cache == nil {
|
|
||||||
node.cache = DefaultCacheProvider()
|
|
||||||
}
|
|
||||||
|
|
||||||
element := &Element{
|
element := &Element{
|
||||||
tag: CachedNodeTag,
|
tag: CachedNodeTag,
|
||||||
meta: node,
|
meta: &CachedNode{
|
||||||
|
cb: cb,
|
||||||
|
html: "",
|
||||||
|
duration: duration,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
startExpiredCacheCleaner(element.meta.(*CachedNode))
|
||||||
return func() *Element {
|
return func() *Element {
|
||||||
return element
|
return element
|
||||||
}
|
}
|
||||||
|
|
@ -79,25 +69,17 @@ func Cached(duration time.Duration, cb GetElementFunc, opts ...CacheOption) func
|
||||||
|
|
||||||
// CachedPerKey caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
|
// CachedPerKey caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
|
||||||
// The element is cached by the unique identifier that is returned by the callback function.
|
// The element is cached by the unique identifier that is returned by the callback function.
|
||||||
func CachedPerKey[K comparable](duration time.Duration, cb GetElementFuncWithKey[K], opts ...CacheOption) func() *Element {
|
func CachedPerKey[K comparable](duration time.Duration, cb GetElementFuncWithKey[K]) func() *Element {
|
||||||
node := &CachedNode{
|
|
||||||
isByKey: true,
|
|
||||||
duration: duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.cache == nil {
|
|
||||||
node.cache = DefaultCacheProvider()
|
|
||||||
}
|
|
||||||
|
|
||||||
element := &Element{
|
element := &Element{
|
||||||
tag: CachedNodeTag,
|
tag: CachedNodeTag,
|
||||||
meta: node,
|
meta: &CachedNode{
|
||||||
|
isByKey: true,
|
||||||
|
cb: nil,
|
||||||
|
html: "",
|
||||||
|
duration: duration,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
startExpiredCacheCleaner(element.meta.(*CachedNode))
|
||||||
return func() *Element {
|
return func() *Element {
|
||||||
key, componentFunc := cb()
|
key, componentFunc := cb()
|
||||||
return &Element{
|
return &Element{
|
||||||
|
|
@ -119,25 +101,17 @@ type ByKeyEntry struct {
|
||||||
|
|
||||||
// CachedPerKeyT caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
|
// CachedPerKeyT caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
|
||||||
// The element is cached by the unique identifier that is returned by the callback function.
|
// The element is cached by the unique identifier that is returned by the callback function.
|
||||||
func CachedPerKeyT[K comparable, T any](duration time.Duration, cb GetElementFuncTWithKey[K, T], opts ...CacheOption) func(T) *Element {
|
func CachedPerKeyT[K comparable, T any](duration time.Duration, cb GetElementFuncTWithKey[K, T]) func(T) *Element {
|
||||||
node := &CachedNode{
|
|
||||||
isByKey: true,
|
|
||||||
duration: duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.cache == nil {
|
|
||||||
node.cache = DefaultCacheProvider()
|
|
||||||
}
|
|
||||||
|
|
||||||
element := &Element{
|
element := &Element{
|
||||||
tag: CachedNodeTag,
|
tag: CachedNodeTag,
|
||||||
meta: node,
|
meta: &CachedNode{
|
||||||
|
isByKey: true,
|
||||||
|
cb: nil,
|
||||||
|
html: "",
|
||||||
|
duration: duration,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
startExpiredCacheCleaner(element.meta.(*CachedNode))
|
||||||
return func(data T) *Element {
|
return func(data T) *Element {
|
||||||
key, componentFunc := cb(data)
|
key, componentFunc := cb(data)
|
||||||
return &Element{
|
return &Element{
|
||||||
|
|
@ -153,25 +127,17 @@ func CachedPerKeyT[K comparable, T any](duration time.Duration, cb GetElementFun
|
||||||
|
|
||||||
// CachedPerKeyT2 caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
|
// CachedPerKeyT2 caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
|
||||||
// The element is cached by the unique identifier that is returned by the callback function.
|
// The element is cached by the unique identifier that is returned by the callback function.
|
||||||
func CachedPerKeyT2[K comparable, T any, T2 any](duration time.Duration, cb GetElementFuncT2WithKey[K, T, T2], opts ...CacheOption) func(T, T2) *Element {
|
func CachedPerKeyT2[K comparable, T any, T2 any](duration time.Duration, cb GetElementFuncT2WithKey[K, T, T2]) func(T, T2) *Element {
|
||||||
node := &CachedNode{
|
|
||||||
isByKey: true,
|
|
||||||
duration: duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.cache == nil {
|
|
||||||
node.cache = DefaultCacheProvider()
|
|
||||||
}
|
|
||||||
|
|
||||||
element := &Element{
|
element := &Element{
|
||||||
tag: CachedNodeTag,
|
tag: CachedNodeTag,
|
||||||
meta: node,
|
meta: &CachedNode{
|
||||||
|
isByKey: true,
|
||||||
|
cb: nil,
|
||||||
|
html: "",
|
||||||
|
duration: duration,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
startExpiredCacheCleaner(element.meta.(*CachedNode))
|
||||||
return func(data T, data2 T2) *Element {
|
return func(data T, data2 T2) *Element {
|
||||||
key, componentFunc := cb(data, data2)
|
key, componentFunc := cb(data, data2)
|
||||||
return &Element{
|
return &Element{
|
||||||
|
|
@ -187,25 +153,17 @@ func CachedPerKeyT2[K comparable, T any, T2 any](duration time.Duration, cb GetE
|
||||||
|
|
||||||
// CachedPerKeyT3 caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
|
// CachedPerKeyT3 caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
|
||||||
// The element is cached by the unique identifier that is returned by the callback function.
|
// The element is cached by the unique identifier that is returned by the callback function.
|
||||||
func CachedPerKeyT3[K comparable, T any, T2 any, T3 any](duration time.Duration, cb GetElementFuncT3WithKey[K, T, T2, T3], opts ...CacheOption) func(T, T2, T3) *Element {
|
func CachedPerKeyT3[K comparable, T any, T2 any, T3 any](duration time.Duration, cb GetElementFuncT3WithKey[K, T, T2, T3]) func(T, T2, T3) *Element {
|
||||||
node := &CachedNode{
|
|
||||||
isByKey: true,
|
|
||||||
duration: duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.cache == nil {
|
|
||||||
node.cache = DefaultCacheProvider()
|
|
||||||
}
|
|
||||||
|
|
||||||
element := &Element{
|
element := &Element{
|
||||||
tag: CachedNodeTag,
|
tag: CachedNodeTag,
|
||||||
meta: node,
|
meta: &CachedNode{
|
||||||
|
isByKey: true,
|
||||||
|
cb: nil,
|
||||||
|
html: "",
|
||||||
|
duration: duration,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
startExpiredCacheCleaner(element.meta.(*CachedNode))
|
||||||
return func(data T, data2 T2, data3 T3) *Element {
|
return func(data T, data2 T2, data3 T3) *Element {
|
||||||
key, componentFunc := cb(data, data2, data3)
|
key, componentFunc := cb(data, data2, data3)
|
||||||
return &Element{
|
return &Element{
|
||||||
|
|
@ -221,25 +179,17 @@ func CachedPerKeyT3[K comparable, T any, T2 any, T3 any](duration time.Duration,
|
||||||
|
|
||||||
// CachedPerKeyT4 caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
|
// CachedPerKeyT4 caches the given element for the given duration. The element is only rendered once per key, and then cached for the given duration.
|
||||||
// The element is cached by the unique identifier that is returned by the callback function.
|
// The element is cached by the unique identifier that is returned by the callback function.
|
||||||
func CachedPerKeyT4[K comparable, T any, T2 any, T3 any, T4 any](duration time.Duration, cb GetElementFuncT4WithKey[K, T, T2, T3, T4], opts ...CacheOption) func(T, T2, T3, T4) *Element {
|
func CachedPerKeyT4[K comparable, T any, T2 any, T3 any, T4 any](duration time.Duration, cb GetElementFuncT4WithKey[K, T, T2, T3, T4]) func(T, T2, T3, T4) *Element {
|
||||||
node := &CachedNode{
|
|
||||||
isByKey: true,
|
|
||||||
duration: duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.cache == nil {
|
|
||||||
node.cache = DefaultCacheProvider()
|
|
||||||
}
|
|
||||||
|
|
||||||
element := &Element{
|
element := &Element{
|
||||||
tag: CachedNodeTag,
|
tag: CachedNodeTag,
|
||||||
meta: node,
|
meta: &CachedNode{
|
||||||
|
isByKey: true,
|
||||||
|
cb: nil,
|
||||||
|
html: "",
|
||||||
|
duration: duration,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
startExpiredCacheCleaner(element.meta.(*CachedNode))
|
||||||
return func(data T, data2 T2, data3 T3, data4 T4) *Element {
|
return func(data T, data2 T2, data3 T3, data4 T4) *Element {
|
||||||
key, componentFunc := cb(data, data2, data3, data4)
|
key, componentFunc := cb(data, data2, data3, data4)
|
||||||
return &Element{
|
return &Element{
|
||||||
|
|
@ -255,27 +205,19 @@ func CachedPerKeyT4[K comparable, T any, T2 any, T3 any, T4 any](duration time.D
|
||||||
|
|
||||||
// CachedT caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
|
// CachedT caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
|
||||||
// Please note this element is globally cached, and not per unique identifier / user.
|
// Please note this element is globally cached, and not per unique identifier / user.
|
||||||
// Use CachedPerKey to cache elements per unique identifier.
|
// Use CachedPerKey to cache elements per unqiue identifier.
|
||||||
func CachedT[T any](duration time.Duration, cb GetElementFuncT[T], opts ...CacheOption) func(T) *Element {
|
func CachedT[T any](duration time.Duration, cb GetElementFuncT[T]) func(T) *Element {
|
||||||
node := &CachedNode{
|
|
||||||
duration: duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.cache == nil {
|
|
||||||
node.cache = DefaultCacheProvider()
|
|
||||||
}
|
|
||||||
|
|
||||||
element := &Element{
|
element := &Element{
|
||||||
tag: CachedNodeTag,
|
tag: CachedNodeTag,
|
||||||
meta: node,
|
meta: &CachedNode{
|
||||||
|
html: "",
|
||||||
|
duration: duration,
|
||||||
|
mutex: sync.Mutex{},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
startExpiredCacheCleaner(element.meta.(*CachedNode))
|
||||||
return func(data T) *Element {
|
return func(data T) *Element {
|
||||||
node.cb = func() *Element {
|
element.meta.(*CachedNode).cb = func() *Element {
|
||||||
return cb(data)
|
return cb(data)
|
||||||
}
|
}
|
||||||
return element
|
return element
|
||||||
|
|
@ -284,27 +226,18 @@ func CachedT[T any](duration time.Duration, cb GetElementFuncT[T], opts ...Cache
|
||||||
|
|
||||||
// CachedT2 caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
|
// CachedT2 caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
|
||||||
// Please note this element is globally cached, and not per unique identifier / user.
|
// Please note this element is globally cached, and not per unique identifier / user.
|
||||||
// Use CachedPerKey to cache elements per unique identifier.
|
// Use CachedPerKey to cache elements per unqiue identifier.
|
||||||
func CachedT2[T any, T2 any](duration time.Duration, cb GetElementFuncT2[T, T2], opts ...CacheOption) func(T, T2) *Element {
|
func CachedT2[T any, T2 any](duration time.Duration, cb GetElementFuncT2[T, T2]) func(T, T2) *Element {
|
||||||
node := &CachedNode{
|
|
||||||
duration: duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.cache == nil {
|
|
||||||
node.cache = DefaultCacheProvider()
|
|
||||||
}
|
|
||||||
|
|
||||||
element := &Element{
|
element := &Element{
|
||||||
tag: CachedNodeTag,
|
tag: CachedNodeTag,
|
||||||
meta: node,
|
meta: &CachedNode{
|
||||||
|
html: "",
|
||||||
|
duration: duration,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
startExpiredCacheCleaner(element.meta.(*CachedNode))
|
||||||
return func(data T, data2 T2) *Element {
|
return func(data T, data2 T2) *Element {
|
||||||
node.cb = func() *Element {
|
element.meta.(*CachedNode).cb = func() *Element {
|
||||||
return cb(data, data2)
|
return cb(data, data2)
|
||||||
}
|
}
|
||||||
return element
|
return element
|
||||||
|
|
@ -313,27 +246,18 @@ func CachedT2[T any, T2 any](duration time.Duration, cb GetElementFuncT2[T, T2],
|
||||||
|
|
||||||
// CachedT3 caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
|
// CachedT3 caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
|
||||||
// Please note this element is globally cached, and not per unique identifier / user.
|
// Please note this element is globally cached, and not per unique identifier / user.
|
||||||
// Use CachedPerKey to cache elements per unique identifier.
|
// Use CachedPerKey to cache elements per unqiue identifier.
|
||||||
func CachedT3[T any, T2 any, T3 any](duration time.Duration, cb GetElementFuncT3[T, T2, T3], opts ...CacheOption) func(T, T2, T3) *Element {
|
func CachedT3[T any, T2 any, T3 any](duration time.Duration, cb GetElementFuncT3[T, T2, T3]) func(T, T2, T3) *Element {
|
||||||
node := &CachedNode{
|
|
||||||
duration: duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.cache == nil {
|
|
||||||
node.cache = DefaultCacheProvider()
|
|
||||||
}
|
|
||||||
|
|
||||||
element := &Element{
|
element := &Element{
|
||||||
tag: CachedNodeTag,
|
tag: CachedNodeTag,
|
||||||
meta: node,
|
meta: &CachedNode{
|
||||||
|
html: "",
|
||||||
|
duration: duration,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
startExpiredCacheCleaner(element.meta.(*CachedNode))
|
||||||
return func(data T, data2 T2, data3 T3) *Element {
|
return func(data T, data2 T2, data3 T3) *Element {
|
||||||
node.cb = func() *Element {
|
element.meta.(*CachedNode).cb = func() *Element {
|
||||||
return cb(data, data2, data3)
|
return cb(data, data2, data3)
|
||||||
}
|
}
|
||||||
return element
|
return element
|
||||||
|
|
@ -342,27 +266,18 @@ func CachedT3[T any, T2 any, T3 any](duration time.Duration, cb GetElementFuncT3
|
||||||
|
|
||||||
// CachedT4 caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
|
// CachedT4 caches the given element for the given duration. The element is only rendered once, and then cached for the given duration.
|
||||||
// Please note this element is globally cached, and not per unique identifier / user.
|
// Please note this element is globally cached, and not per unique identifier / user.
|
||||||
// Use CachedPerKey to cache elements per unique identifier.
|
// Use CachedPerKey to cache elements per unqiue identifier.
|
||||||
func CachedT4[T any, T2 any, T3 any, T4 any](duration time.Duration, cb GetElementFuncT4[T, T2, T3, T4], opts ...CacheOption) func(T, T2, T3, T4) *Element {
|
func CachedT4[T any, T2 any, T3 any, T4 any](duration time.Duration, cb GetElementFuncT4[T, T2, T3, T4]) func(T, T2, T3, T4) *Element {
|
||||||
node := &CachedNode{
|
|
||||||
duration: duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.cache == nil {
|
|
||||||
node.cache = DefaultCacheProvider()
|
|
||||||
}
|
|
||||||
|
|
||||||
element := &Element{
|
element := &Element{
|
||||||
tag: CachedNodeTag,
|
tag: CachedNodeTag,
|
||||||
meta: node,
|
meta: &CachedNode{
|
||||||
|
html: "",
|
||||||
|
duration: duration,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
startExpiredCacheCleaner(element.meta.(*CachedNode))
|
||||||
return func(data T, data2 T2, data3 T3, data4 T4) *Element {
|
return func(data T, data2 T2, data3 T3, data4 T4) *Element {
|
||||||
node.cb = func() *Element {
|
element.meta.(*CachedNode).cb = func() *Element {
|
||||||
return cb(data, data2, data3, data4)
|
return cb(data, data2, data3, data4)
|
||||||
}
|
}
|
||||||
return element
|
return element
|
||||||
|
|
@ -371,24 +286,70 @@ func CachedT4[T any, T2 any, T3 any, T4 any](duration time.Duration, cb GetEleme
|
||||||
|
|
||||||
// ClearCache clears the cached HTML of the element. This is called automatically by the framework.
|
// ClearCache clears the cached HTML of the element. This is called automatically by the framework.
|
||||||
func (c *CachedNode) ClearCache() {
|
func (c *CachedNode) ClearCache() {
|
||||||
c.cache.Purge()
|
c.html = ""
|
||||||
|
if c.byKeyCache != nil {
|
||||||
|
for key := range c.byKeyCache {
|
||||||
|
delete(c.byKeyCache, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.byKeyExpiration != nil {
|
||||||
|
for key := range c.byKeyExpiration {
|
||||||
|
delete(c.byKeyExpiration, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClearExpired is deprecated and does nothing. Cache expiration is now handled by the Store implementation.
|
// ClearExpired clears all expired cached HTML of the element. This is called automatically by the framework.
|
||||||
func (c *CachedNode) ClearExpired() {
|
func (c *CachedNode) ClearExpired() {
|
||||||
// No-op for backward compatibility
|
c.mutex.Lock()
|
||||||
|
defer c.mutex.Unlock()
|
||||||
|
deletedCount := 0
|
||||||
|
if c.isByKey {
|
||||||
|
if c.byKeyCache != nil && c.byKeyExpiration != nil {
|
||||||
|
for key := range c.byKeyCache {
|
||||||
|
expir, ok := c.byKeyExpiration[key]
|
||||||
|
if ok && expir.Before(time.Now()) {
|
||||||
|
delete(c.byKeyCache, key)
|
||||||
|
delete(c.byKeyExpiration, key)
|
||||||
|
deletedCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
now := time.Now()
|
||||||
|
expiration := c.expiration
|
||||||
|
if c.html != "" && expiration.Before(now) {
|
||||||
|
c.html = ""
|
||||||
|
deletedCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if deletedCount > 0 {
|
||||||
|
slog.Debug("Deleted expired cache entries", slog.Int("count", deletedCount))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *CachedNode) Render(ctx *RenderContext) {
|
func (c *CachedNode) Render(ctx *RenderContext) {
|
||||||
if c.isByKey {
|
if c.isByKey {
|
||||||
panic("CachedPerKey should not be rendered directly")
|
panic("CachedPerKey should not be rendered directly")
|
||||||
} else {
|
} else {
|
||||||
// For simple cached components, we use a single key
|
c.mutex.Lock()
|
||||||
// Use GetOrCompute for atomic check-and-set
|
defer c.mutex.Unlock()
|
||||||
html := c.cache.GetOrCompute(_singleCacheKey, func() string {
|
|
||||||
return Render(c.cb())
|
now := time.Now()
|
||||||
}, c.duration)
|
expiration := c.expiration
|
||||||
ctx.builder.WriteString(html)
|
|
||||||
|
if expiration.IsZero() || expiration.Before(now) {
|
||||||
|
c.html = ""
|
||||||
|
c.expiration = now.Add(c.duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.html != "" {
|
||||||
|
ctx.builder.WriteString(c.html)
|
||||||
|
} else {
|
||||||
|
c.html = Render(c.cb())
|
||||||
|
ctx.builder.WriteString(c.html)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -396,9 +357,47 @@ func (c *ByKeyEntry) Render(ctx *RenderContext) {
|
||||||
key := c.key
|
key := c.key
|
||||||
parentMeta := c.parent.meta.(*CachedNode)
|
parentMeta := c.parent.meta.(*CachedNode)
|
||||||
|
|
||||||
// Use GetOrCompute for atomic check-and-set
|
parentMeta.mutex.Lock()
|
||||||
html := parentMeta.cache.GetOrCompute(key, func() string {
|
defer parentMeta.mutex.Unlock()
|
||||||
return Render(c.cb())
|
|
||||||
}, parentMeta.duration)
|
if parentMeta.byKeyCache == nil {
|
||||||
|
parentMeta.byKeyCache = make(map[any]*Entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
if parentMeta.byKeyExpiration == nil {
|
||||||
|
parentMeta.byKeyExpiration = make(map[any]time.Time)
|
||||||
|
}
|
||||||
|
|
||||||
|
var setAndWrite = func() {
|
||||||
|
html := Render(c.cb())
|
||||||
|
parentMeta.byKeyCache[key] = &Entry{
|
||||||
|
expiration: parentMeta.expiration,
|
||||||
|
html: html,
|
||||||
|
}
|
||||||
ctx.builder.WriteString(html)
|
ctx.builder.WriteString(html)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
expEntry, ok := parentMeta.byKeyExpiration[key]
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
parentMeta.byKeyExpiration[key] = time.Now().Add(parentMeta.duration)
|
||||||
|
} else {
|
||||||
|
// key is expired
|
||||||
|
if expEntry.Before(time.Now()) {
|
||||||
|
parentMeta.byKeyExpiration[key] = time.Now().Add(parentMeta.duration)
|
||||||
|
setAndWrite()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entry := parentMeta.byKeyCache[key]
|
||||||
|
|
||||||
|
// not in cache
|
||||||
|
if entry == nil {
|
||||||
|
setAndWrite()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// exists in cache and not expired
|
||||||
|
ctx.builder.WriteString(entry.html)
|
||||||
|
}
|
||||||
|
|
|
||||||
292
framework/h/cache/README.md
vendored
292
framework/h/cache/README.md
vendored
|
|
@ -1,292 +0,0 @@
|
||||||
# Pluggable Cache System for htmgo
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
The htmgo framework now supports a pluggable cache system that allows developers to provide their own caching
|
|
||||||
implementations. This addresses potential memory exhaustion vulnerabilities in the previous TTL-only caching approach
|
|
||||||
and provides greater flexibility for production deployments.
|
|
||||||
|
|
||||||
## Motivation
|
|
||||||
|
|
||||||
The previous caching mechanism relied exclusively on Time-To-Live (TTL) expiration, which could lead to:
|
|
||||||
|
|
||||||
- **Unbounded memory growth**: High-cardinality cache keys could consume all available memory
|
|
||||||
- **DDoS vulnerability**: Attackers could exploit this by generating many unique cache keys
|
|
||||||
- **Limited flexibility**: No support for size-bounded caches or distributed caching solutions
|
|
||||||
|
|
||||||
## Architecture
|
|
||||||
|
|
||||||
The new system introduces a generic `Store[K comparable, V any]` interface:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
type Store[K comparable, V any] interface {
|
|
||||||
// Set adds or updates an entry in the cache with the given TTL
|
|
||||||
Set(key K, value V, ttl time.Duration)
|
|
||||||
|
|
||||||
// GetOrCompute atomically gets an existing value or computes and stores a new value
|
|
||||||
// This prevents duplicate computation when multiple goroutines request the same key
|
|
||||||
GetOrCompute(key K, compute func() V, ttl time.Duration) V
|
|
||||||
|
|
||||||
// Delete removes an entry from the cache
|
|
||||||
Delete(key K)
|
|
||||||
|
|
||||||
// Purge removes all items from the cache
|
|
||||||
Purge()
|
|
||||||
|
|
||||||
// Close releases any resources used by the cache
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Atomic Guarantees
|
|
||||||
|
|
||||||
The `GetOrCompute` method provides **atomic guarantees** to prevent cache stampedes and duplicate computations:
|
|
||||||
- When multiple goroutines request the same uncached key simultaneously, only one will execute the compute function
|
|
||||||
- Other goroutines will wait and receive the computed result
|
|
||||||
- This eliminates race conditions that could cause duplicate expensive operations like database queries or renders
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
### Using the Default Cache
|
|
||||||
|
|
||||||
By default, htmgo continues to use a TTL-based cache for backward compatibility:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// No changes needed - works exactly as before
|
|
||||||
UserProfile := h.CachedPerKeyT(
|
|
||||||
15*time.Minute,
|
|
||||||
func(userID int) (int, h.GetElementFunc) {
|
|
||||||
return userID, func() *h.Element {
|
|
||||||
return h.Div(h.Text("User profile"))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Using a Custom Cache
|
|
||||||
|
|
||||||
You can provide your own cache implementation using the `WithCacheStore` option:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/maddalax/htmgo/framework/h"
|
|
||||||
"github.com/maddalax/htmgo/framework/h/cache"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Create a memory-bounded LRU cache
|
|
||||||
lruCache = cache.NewLRUStore[any, string](10_000) // Max 10,000 items
|
|
||||||
|
|
||||||
// Use it with a cached component
|
|
||||||
UserProfile = h.CachedPerKeyT(
|
|
||||||
15*time.Minute,
|
|
||||||
func (userID int) (int, h.GetElementFunc) {
|
|
||||||
return userID, func () *h.Element {
|
|
||||||
return h.Div(h.Text("User profile"))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
h.WithCacheStore(lruCache), // Pass the custom cache
|
|
||||||
)
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Changing the Default Cache Globally
|
|
||||||
|
|
||||||
You can override the default cache provider for your entire application:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/maddalax/htmgo/framework/h"
|
|
||||||
"github.com/maddalax/htmgo/framework/h/cache"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// All cached components will use LRU by default
|
|
||||||
h.DefaultCacheProvider = func () cache.Store[any, string] {
|
|
||||||
return cache.NewLRUStore[any, string](50_000)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example Implementations
|
|
||||||
|
|
||||||
### Built-in Stores
|
|
||||||
|
|
||||||
1. **TTLStore** (default): Time-based expiration with periodic cleanup
|
|
||||||
2. **LRUStore** (example): Least Recently Used eviction with size limits
|
|
||||||
|
|
||||||
### Integrating Third-Party Libraries
|
|
||||||
|
|
||||||
Here's an example of integrating the high-performance `go-freelru` library:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
"github.com/elastic/go-freelru"
|
|
||||||
"github.com/maddalax/htmgo/framework/h/cache"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FreeLRUAdapter[K comparable, V any] struct {
|
|
||||||
lru *freelru.LRU[K, V]
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFreeLRUAdapter[K comparable, V any](size uint32) cache.Store[K, V] {
|
|
||||||
lru, err := freelru.New[K, V](size, nil)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return &FreeLRUAdapter[K, V]{lru: lru}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FreeLRUAdapter[K, V]) Set(key K, value V, ttl time.Duration) {
|
|
||||||
// Note: go-freelru doesn't support per-item TTL
|
|
||||||
s.lru.Add(key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FreeLRUAdapter[K, V]) GetOrCompute(key K, compute func() V, ttl time.Duration) V {
|
|
||||||
// Check if exists in cache
|
|
||||||
if val, ok := s.lru.Get(key); ok {
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not in cache, compute and store
|
|
||||||
// Note: This simple implementation doesn't provide true atomic guarantees
|
|
||||||
// For production use, you'd need additional synchronization
|
|
||||||
value := compute()
|
|
||||||
s.lru.Add(key, value)
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FreeLRUAdapter[K, V]) Delete(key K) {
|
|
||||||
s.lru.Remove(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FreeLRUAdapter[K, V]) Purge() {
|
|
||||||
s.lru.Clear()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FreeLRUAdapter[K, V]) Close() {
|
|
||||||
// No-op for this implementation
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Redis-based Distributed Cache
|
|
||||||
|
|
||||||
```go
|
|
||||||
type RedisStore struct {
|
|
||||||
client *redis.Client
|
|
||||||
prefix string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *RedisStore) Set(key any, value string, ttl time.Duration) {
|
|
||||||
keyStr := fmt.Sprintf("%s:%v", s.prefix, key)
|
|
||||||
s.client.Set(context.Background(), keyStr, value, ttl)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *RedisStore) GetOrCompute(key any, compute func() string, ttl time.Duration) string {
|
|
||||||
keyStr := fmt.Sprintf("%s:%v", s.prefix, key)
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Try to get from Redis
|
|
||||||
val, err := s.client.Get(ctx, keyStr).Result()
|
|
||||||
if err == nil {
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not in cache, compute new value
|
|
||||||
// For true atomic guarantees, use Redis SET with NX option
|
|
||||||
value := compute()
|
|
||||||
s.client.Set(ctx, keyStr, value, ttl)
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
// ... implement other methods
|
|
||||||
```
|
|
||||||
|
|
||||||
## Migration Guide
|
|
||||||
|
|
||||||
### For Existing Applications
|
|
||||||
|
|
||||||
The changes are backward compatible. Existing applications will continue to work without modifications. The function
|
|
||||||
signatures now accept optional `CacheOption` parameters, but these can be omitted.
|
|
||||||
|
|
||||||
### Recommended Migration Path
|
|
||||||
|
|
||||||
1. **Assess your caching needs**: Determine if you need memory bounds or distributed caching
|
|
||||||
2. **Choose an implementation**: Use the built-in LRUStore or integrate a third-party library
|
|
||||||
3. **Update critical components**: Start with high-traffic or high-cardinality cached components
|
|
||||||
4. **Monitor memory usage**: Ensure your cache size limits are appropriate
|
|
||||||
|
|
||||||
## Security Considerations
|
|
||||||
|
|
||||||
### Memory-Bounded Caches
|
|
||||||
|
|
||||||
For public-facing applications, we strongly recommend using a memory-bounded cache to prevent DoS attacks:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Limit cache to reasonable size based on your server's memory
|
|
||||||
cache := cache.NewLRUStore[any, string](100_000)
|
|
||||||
|
|
||||||
// Use for all user-specific caching
|
|
||||||
UserContent := h.CachedPerKey(
|
|
||||||
5*time.Minute,
|
|
||||||
getUserContent,
|
|
||||||
h.WithCacheStore(cache),
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Cache Key Validation
|
|
||||||
|
|
||||||
When using user input as cache keys, always validate and sanitize:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func cacheKeyForUser(userInput string) string {
|
|
||||||
// Limit length and remove special characters
|
|
||||||
key := strings.TrimSpace(userInput)
|
|
||||||
if len(key) > 100 {
|
|
||||||
key = key[:100]
|
|
||||||
}
|
|
||||||
return regexp.MustCompile(`[^a-zA-Z0-9_-]`).ReplaceAllString(key, "")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Performance Considerations
|
|
||||||
|
|
||||||
1. **TTLStore**: Best for small caches with predictable key patterns
|
|
||||||
2. **LRUStore**: Good general-purpose choice with memory bounds
|
|
||||||
3. **Third-party stores**: Consider `go-freelru` or `theine-go` for high-performance needs
|
|
||||||
4. **Distributed stores**: Use Redis/Memcached for multi-instance deployments
|
|
||||||
5. **Atomic Operations**: The `GetOrCompute` method prevents duplicate computations, significantly improving performance under high concurrency
|
|
||||||
|
|
||||||
### Concurrency Benefits
|
|
||||||
|
|
||||||
The atomic `GetOrCompute` method provides significant performance benefits:
|
|
||||||
- **Prevents Cache Stampedes**: When a popular cache entry expires, only one goroutine will recompute it
|
|
||||||
- **Reduces Load**: Expensive operations (database queries, API calls, complex renders) are never duplicated
|
|
||||||
- **Improves Response Times**: Waiting goroutines get results faster than computing themselves
|
|
||||||
|
|
||||||
## Best Practices
|
|
||||||
|
|
||||||
1. **Set appropriate cache sizes**: Balance memory usage with hit rates
|
|
||||||
2. **Use consistent TTLs**: Align with your data update patterns
|
|
||||||
3. **Monitor cache metrics**: Track hit rates, evictions, and memory usage
|
|
||||||
4. **Handle cache failures gracefully**: Caches should enhance, not break functionality
|
|
||||||
5. **Close caches properly**: Call `Close()` during graceful shutdown
|
|
||||||
6. **Implement atomic guarantees**: Ensure your `GetOrCompute` implementation prevents concurrent computation
|
|
||||||
7. **Test concurrent access**: Verify your cache handles simultaneous requests correctly
|
|
||||||
|
|
||||||
## Future Enhancements
|
|
||||||
|
|
||||||
- Built-in metrics and monitoring hooks
|
|
||||||
- Automatic size estimation for cached values
|
|
||||||
- Warming and preloading strategies
|
|
||||||
- Cache invalidation patterns
|
|
||||||
318
framework/h/cache/example_test.go
vendored
318
framework/h/cache/example_test.go
vendored
|
|
@ -1,318 +0,0 @@
|
||||||
package cache_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/maddalax/htmgo/framework/h"
|
|
||||||
"github.com/maddalax/htmgo/framework/h/cache"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Example demonstrates basic caching with the default TTL store
|
|
||||||
func ExampleCached() {
|
|
||||||
renderCount := 0
|
|
||||||
|
|
||||||
// Create a cached component that expires after 5 minutes
|
|
||||||
CachedHeader := h.Cached(5*time.Minute, func() *h.Element {
|
|
||||||
renderCount++
|
|
||||||
return h.Header(
|
|
||||||
h.H1(h.Text("Welcome to our site")),
|
|
||||||
h.P(h.Text(fmt.Sprintf("Rendered %d times", renderCount))),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
// First render - will execute the function
|
|
||||||
html1 := h.Render(CachedHeader())
|
|
||||||
fmt.Println("Render count:", renderCount)
|
|
||||||
|
|
||||||
// Second render - will use cached HTML
|
|
||||||
html2 := h.Render(CachedHeader())
|
|
||||||
fmt.Println("Render count:", renderCount)
|
|
||||||
fmt.Println("Same HTML:", html1 == html2)
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// Render count: 1
|
|
||||||
// Render count: 1
|
|
||||||
// Same HTML: true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example demonstrates per-key caching for user-specific content
|
|
||||||
func ExampleCachedPerKeyT() {
|
|
||||||
type User struct {
|
|
||||||
ID int
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
renderCounts := make(map[int]int)
|
|
||||||
|
|
||||||
// Create a per-user cached component
|
|
||||||
UserProfile := h.CachedPerKeyT(15*time.Minute, func(user User) (int, h.GetElementFunc) {
|
|
||||||
// Use user ID as the cache key
|
|
||||||
return user.ID, func() *h.Element {
|
|
||||||
renderCounts[user.ID]++
|
|
||||||
return h.Div(
|
|
||||||
h.Class("user-profile"),
|
|
||||||
h.H2(h.Text(user.Name)),
|
|
||||||
h.P(h.Text(fmt.Sprintf("User ID: %d", user.ID))),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
alice := User{ID: 1, Name: "Alice"}
|
|
||||||
bob := User{ID: 2, Name: "Bob"}
|
|
||||||
|
|
||||||
// Render Alice's profile - will execute
|
|
||||||
h.Render(UserProfile(alice))
|
|
||||||
fmt.Printf("Alice render count: %d\n", renderCounts[1])
|
|
||||||
|
|
||||||
// Render Bob's profile - will execute
|
|
||||||
h.Render(UserProfile(bob))
|
|
||||||
fmt.Printf("Bob render count: %d\n", renderCounts[2])
|
|
||||||
|
|
||||||
// Render Alice's profile again - will use cache
|
|
||||||
h.Render(UserProfile(alice))
|
|
||||||
fmt.Printf("Alice render count after cache hit: %d\n", renderCounts[1])
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// Alice render count: 1
|
|
||||||
// Bob render count: 1
|
|
||||||
// Alice render count after cache hit: 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example demonstrates using a memory-bounded LRU cache
|
|
||||||
func ExampleWithCacheStore_lru() {
|
|
||||||
// Create an LRU cache that holds maximum 1000 items
|
|
||||||
lruStore := cache.NewLRUStore[any, string](1000)
|
|
||||||
defer lruStore.Close()
|
|
||||||
|
|
||||||
renderCount := 0
|
|
||||||
|
|
||||||
// Use the LRU cache for a component
|
|
||||||
ProductCard := h.CachedPerKeyT(1*time.Hour,
|
|
||||||
func(productID int) (int, h.GetElementFunc) {
|
|
||||||
return productID, func() *h.Element {
|
|
||||||
renderCount++
|
|
||||||
// Simulate fetching product data
|
|
||||||
return h.Div(
|
|
||||||
h.H3(h.Text(fmt.Sprintf("Product #%d", productID))),
|
|
||||||
h.P(h.Text("$99.99")),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
h.WithCacheStore(lruStore), // Use custom cache store
|
|
||||||
)
|
|
||||||
|
|
||||||
// Render many products
|
|
||||||
for i := 0; i < 1500; i++ {
|
|
||||||
h.Render(ProductCard(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Due to LRU eviction, only 1000 items are cached
|
|
||||||
// Earlier items (0-499) were evicted
|
|
||||||
fmt.Printf("Total renders: %d\n", renderCount)
|
|
||||||
fmt.Printf("Expected renders: %d (due to LRU eviction)\n", 1500)
|
|
||||||
|
|
||||||
// Accessing an evicted item will cause a re-render
|
|
||||||
h.Render(ProductCard(0))
|
|
||||||
fmt.Printf("After accessing evicted item: %d\n", renderCount)
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// Total renders: 1500
|
|
||||||
// Expected renders: 1500 (due to LRU eviction)
|
|
||||||
// After accessing evicted item: 1501
|
|
||||||
}
|
|
||||||
|
|
||||||
// MockDistributedCache simulates a distributed cache like Redis
|
|
||||||
type MockDistributedCache struct {
|
|
||||||
data map[string]string
|
|
||||||
mutex sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// DistributedCacheAdapter makes MockDistributedCache compatible with cache.Store interface
|
|
||||||
type DistributedCacheAdapter struct {
|
|
||||||
cache *MockDistributedCache
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *DistributedCacheAdapter) Set(key any, value string, ttl time.Duration) {
|
|
||||||
a.cache.mutex.Lock()
|
|
||||||
defer a.cache.mutex.Unlock()
|
|
||||||
// In a real implementation, you'd set TTL in Redis
|
|
||||||
keyStr := fmt.Sprintf("htmgo:%v", key)
|
|
||||||
a.cache.data[keyStr] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *DistributedCacheAdapter) Delete(key any) {
|
|
||||||
a.cache.mutex.Lock()
|
|
||||||
defer a.cache.mutex.Unlock()
|
|
||||||
keyStr := fmt.Sprintf("htmgo:%v", key)
|
|
||||||
delete(a.cache.data, keyStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *DistributedCacheAdapter) Purge() {
|
|
||||||
a.cache.mutex.Lock()
|
|
||||||
defer a.cache.mutex.Unlock()
|
|
||||||
a.cache.data = make(map[string]string)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *DistributedCacheAdapter) Close() {
|
|
||||||
// Clean up connections in real implementation
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *DistributedCacheAdapter) GetOrCompute(key any, compute func() string, ttl time.Duration) string {
|
|
||||||
a.cache.mutex.Lock()
|
|
||||||
defer a.cache.mutex.Unlock()
|
|
||||||
|
|
||||||
keyStr := fmt.Sprintf("htmgo:%v", key)
|
|
||||||
|
|
||||||
// Check if exists
|
|
||||||
if val, ok := a.cache.data[keyStr]; ok {
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute and store
|
|
||||||
value := compute()
|
|
||||||
a.cache.data[keyStr] = value
|
|
||||||
// In a real implementation, you'd also set TTL in Redis
|
|
||||||
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example demonstrates creating a custom cache adapter
|
|
||||||
func ExampleDistributedCacheAdapter() {
|
|
||||||
|
|
||||||
// Create the distributed cache
|
|
||||||
distCache := &MockDistributedCache{
|
|
||||||
data: make(map[string]string),
|
|
||||||
}
|
|
||||||
adapter := &DistributedCacheAdapter{cache: distCache}
|
|
||||||
|
|
||||||
// Use it with a cached component
|
|
||||||
SharedComponent := h.Cached(10*time.Minute, func() *h.Element {
|
|
||||||
return h.Div(h.Text("Shared across all servers"))
|
|
||||||
}, h.WithCacheStore(adapter))
|
|
||||||
|
|
||||||
html := h.Render(SharedComponent())
|
|
||||||
fmt.Printf("Cached in distributed store: %v\n", len(distCache.data) > 0)
|
|
||||||
fmt.Printf("HTML length: %d\n", len(html))
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// Cached in distributed store: true
|
|
||||||
// HTML length: 36
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example demonstrates overriding the default cache provider globally
|
|
||||||
func ExampleDefaultCacheProvider() {
|
|
||||||
// Save the original provider to restore it later
|
|
||||||
originalProvider := h.DefaultCacheProvider
|
|
||||||
defer func() {
|
|
||||||
h.DefaultCacheProvider = originalProvider
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Override the default to use LRU for all cached components
|
|
||||||
h.DefaultCacheProvider = func() cache.Store[any, string] {
|
|
||||||
// All cached components will use 10,000 item LRU cache by default
|
|
||||||
return cache.NewLRUStore[any, string](10_000)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now all cached components use LRU by default
|
|
||||||
renderCount := 0
|
|
||||||
AutoLRUComponent := h.Cached(1*time.Hour, func() *h.Element {
|
|
||||||
renderCount++
|
|
||||||
return h.Div(h.Text("Using LRU by default"))
|
|
||||||
})
|
|
||||||
|
|
||||||
h.Render(AutoLRUComponent())
|
|
||||||
fmt.Printf("Render count: %d\n", renderCount)
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// Render count: 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example demonstrates caching with complex keys
|
|
||||||
func ExampleCachedPerKeyT3() {
|
|
||||||
type FilterOptions struct {
|
|
||||||
Category string
|
|
||||||
MinPrice float64
|
|
||||||
MaxPrice float64
|
|
||||||
}
|
|
||||||
|
|
||||||
renderCount := 0
|
|
||||||
|
|
||||||
// Cache filtered product lists with composite keys
|
|
||||||
FilteredProducts := h.CachedPerKeyT3(30*time.Minute,
|
|
||||||
func(category string, minPrice, maxPrice float64) (FilterOptions, h.GetElementFunc) {
|
|
||||||
// Create composite key from all parameters
|
|
||||||
key := FilterOptions{
|
|
||||||
Category: category,
|
|
||||||
MinPrice: minPrice,
|
|
||||||
MaxPrice: maxPrice,
|
|
||||||
}
|
|
||||||
return key, func() *h.Element {
|
|
||||||
renderCount++
|
|
||||||
// Simulate database query with filters
|
|
||||||
return h.Div(
|
|
||||||
h.H3(h.Text(fmt.Sprintf("Products in %s", category))),
|
|
||||||
h.P(h.Text(fmt.Sprintf("Price range: $%.2f - $%.2f", minPrice, maxPrice))),
|
|
||||||
h.Ul(
|
|
||||||
h.Li(h.Text("Product 1")),
|
|
||||||
h.Li(h.Text("Product 2")),
|
|
||||||
h.Li(h.Text("Product 3")),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
// First query - will render
|
|
||||||
h.Render(FilteredProducts("Electronics", 100.0, 500.0))
|
|
||||||
fmt.Printf("Render count: %d\n", renderCount)
|
|
||||||
|
|
||||||
// Same query - will use cache
|
|
||||||
h.Render(FilteredProducts("Electronics", 100.0, 500.0))
|
|
||||||
fmt.Printf("Render count after cache hit: %d\n", renderCount)
|
|
||||||
|
|
||||||
// Different query - will render
|
|
||||||
h.Render(FilteredProducts("Electronics", 200.0, 600.0))
|
|
||||||
fmt.Printf("Render count after new query: %d\n", renderCount)
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// Render count: 1
|
|
||||||
// Render count after cache hit: 1
|
|
||||||
// Render count after new query: 2
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example demonstrates cache expiration and refresh
|
|
||||||
func ExampleCached_expiration() {
|
|
||||||
renderCount := 0
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
// Cache with very short TTL for demonstration
|
|
||||||
TimeSensitive := h.Cached(100*time.Millisecond, func() *h.Element {
|
|
||||||
renderCount++
|
|
||||||
return h.Div(
|
|
||||||
h.Text(fmt.Sprintf("Generated at: %s (render #%d)",
|
|
||||||
now.Format("15:04:05"), renderCount)),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
// First render
|
|
||||||
h.Render(TimeSensitive())
|
|
||||||
fmt.Printf("Render count: %d\n", renderCount)
|
|
||||||
|
|
||||||
// Immediate second render - uses cache
|
|
||||||
h.Render(TimeSensitive())
|
|
||||||
fmt.Printf("Render count (cached): %d\n", renderCount)
|
|
||||||
|
|
||||||
// Wait for expiration
|
|
||||||
time.Sleep(150 * time.Millisecond)
|
|
||||||
|
|
||||||
// Render after expiration - will re-execute
|
|
||||||
h.Render(TimeSensitive())
|
|
||||||
fmt.Printf("Render count (after expiration): %d\n", renderCount)
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// Render count: 1
|
|
||||||
// Render count (cached): 1
|
|
||||||
// Render count (after expiration): 2
|
|
||||||
}
|
|
||||||
186
framework/h/cache/examples/atomic_example.go
vendored
186
framework/h/cache/examples/atomic_example.go
vendored
|
|
@ -1,186 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/maddalax/htmgo/framework/h"
|
|
||||||
"github.com/maddalax/htmgo/framework/h/cache"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This example demonstrates the atomic guarantees of GetOrCompute,
|
|
||||||
// showing how it prevents duplicate expensive computations when
|
|
||||||
// multiple goroutines request the same uncached key simultaneously.
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
fmt.Println("=== Atomic Cache Example ===")
|
|
||||||
|
|
||||||
// Demonstrate the problem without atomic guarantees
|
|
||||||
demonstrateProblem()
|
|
||||||
|
|
||||||
fmt.Println("\n=== Now with GetOrCompute atomic guarantees ===")
|
|
||||||
|
|
||||||
// Show the solution with GetOrCompute
|
|
||||||
demonstrateSolution()
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// demonstrateProblem shows what happens without atomic guarantees
|
|
||||||
func demonstrateProblem() {
|
|
||||||
fmt.Println("Without atomic guarantees (simulated):")
|
|
||||||
fmt.Println("Multiple goroutines checking cache and computing...")
|
|
||||||
|
|
||||||
var computeCount int32
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
// Simulate 10 goroutines trying to get the same uncached value
|
|
||||||
for i := range 10 {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(id int) {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
// Simulate checking cache (not found)
|
|
||||||
time.Sleep(time.Millisecond) // Small delay to increase collision chance
|
|
||||||
|
|
||||||
// All goroutines think the value is not cached
|
|
||||||
// so they all compute it
|
|
||||||
atomic.AddInt32(&computeCount, 1)
|
|
||||||
fmt.Printf("Goroutine %d: Computing expensive value...\n", id)
|
|
||||||
|
|
||||||
// Simulate expensive computation
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
|
||||||
}(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
fmt.Printf("\nResult: Computed %d times (wasteful!)\n", computeCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// demonstrateSolution shows how GetOrCompute solves the problem
|
|
||||||
func demonstrateSolution() {
|
|
||||||
// Create a cache store
|
|
||||||
store := cache.NewTTLStore[string, string]()
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
var computeCount int32
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
fmt.Println("With GetOrCompute atomic guarantees:")
|
|
||||||
fmt.Println("Multiple goroutines requesting the same key...")
|
|
||||||
|
|
||||||
startTime := time.Now()
|
|
||||||
|
|
||||||
// Launch 10 goroutines trying to get the same value
|
|
||||||
for i := range 10 {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(id int) {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
// All goroutines call GetOrCompute at the same time
|
|
||||||
result := store.GetOrCompute("expensive-key", func() string {
|
|
||||||
// Only ONE goroutine will execute this function
|
|
||||||
count := atomic.AddInt32(&computeCount, 1)
|
|
||||||
fmt.Printf("Goroutine %d: Computing expensive value (computation #%d)\n", id, count)
|
|
||||||
|
|
||||||
// Simulate expensive computation
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
|
||||||
|
|
||||||
return fmt.Sprintf("Expensive result computed by goroutine %d", id)
|
|
||||||
}, 1*time.Hour)
|
|
||||||
|
|
||||||
fmt.Printf("Goroutine %d: Got result: %s\n", id, result)
|
|
||||||
}(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
elapsed := time.Since(startTime)
|
|
||||||
|
|
||||||
fmt.Printf("\nResult: Computed only %d time (efficient!)\n", computeCount)
|
|
||||||
fmt.Printf("Total time: %v (vs ~500ms if all computed)\n", elapsed)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example with htmgo cached components
|
|
||||||
func ExampleCachedComponent() {
|
|
||||||
fmt.Println("\n=== Real-world htmgo Example ===")
|
|
||||||
|
|
||||||
var renderCount int32
|
|
||||||
|
|
||||||
// Create a cached component that simulates fetching user data
|
|
||||||
UserProfile := h.CachedPerKeyT(5*time.Minute, func(userID int) (int, h.GetElementFunc) {
|
|
||||||
return userID, func() *h.Element {
|
|
||||||
count := atomic.AddInt32(&renderCount, 1)
|
|
||||||
fmt.Printf("Fetching and rendering user %d (render #%d)\n", userID, count)
|
|
||||||
|
|
||||||
// Simulate database query
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
|
|
||||||
return h.Div(
|
|
||||||
h.H2(h.Text(fmt.Sprintf("User Profile #%d", userID))),
|
|
||||||
h.P(h.Text("This was expensive to compute!")),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Simulate multiple concurrent requests for the same user
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
for i := range 5 {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(requestID int) {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
// All requests are for user 123
|
|
||||||
html := h.Render(UserProfile(123))
|
|
||||||
fmt.Printf("Request %d: Received %d bytes of HTML\n", requestID, len(html))
|
|
||||||
}(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
fmt.Printf("\nTotal renders: %d (only one, despite 5 concurrent requests!)\n", renderCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example showing cache stampede prevention
|
|
||||||
func ExampleCacheStampedePrevention() {
|
|
||||||
fmt.Println("\n=== Cache Stampede Prevention ===")
|
|
||||||
|
|
||||||
store := cache.NewLRUStore[string, string](100)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
var dbQueries int32
|
|
||||||
|
|
||||||
// Simulate a popular cache key expiring
|
|
||||||
fetchPopularData := func(key string) string {
|
|
||||||
return store.GetOrCompute(key, func() string {
|
|
||||||
queries := atomic.AddInt32(&dbQueries, 1)
|
|
||||||
fmt.Printf("Database query #%d for key: %s\n", queries, key)
|
|
||||||
|
|
||||||
// Simulate slow database query
|
|
||||||
time.Sleep(200 * time.Millisecond)
|
|
||||||
|
|
||||||
return fmt.Sprintf("Popular data for %s", key)
|
|
||||||
}, 100*time.Millisecond) // Short TTL to simulate expiration
|
|
||||||
}
|
|
||||||
|
|
||||||
// First, populate the cache
|
|
||||||
_ = fetchPopularData("trending-posts")
|
|
||||||
fmt.Println("Cache populated")
|
|
||||||
|
|
||||||
// Wait for it to expire
|
|
||||||
time.Sleep(150 * time.Millisecond)
|
|
||||||
fmt.Println("\nCache expired, simulating traffic spike...")
|
|
||||||
|
|
||||||
// Simulate 20 concurrent requests right after expiration
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
for i := 0; i < 20; i++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(id int) {
|
|
||||||
defer wg.Done()
|
|
||||||
data := fetchPopularData("trending-posts")
|
|
||||||
fmt.Printf("Request %d: Got data: %s\n", id, data)
|
|
||||||
}(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
fmt.Printf("\nTotal database queries: %d (prevented 19 redundant queries!)\n", dbQueries)
|
|
||||||
}
|
|
||||||
28
framework/h/cache/interface.go
vendored
28
framework/h/cache/interface.go
vendored
|
|
@ -1,28 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Store defines the interface for a pluggable cache.
|
|
||||||
// This allows users to provide their own caching implementations, such as LRU, LFU,
|
|
||||||
// or even distributed caches. The cache implementation is responsible for handling
|
|
||||||
// its own eviction policies (TTL, size limits, etc.).
|
|
||||||
type Store[K comparable, V any] interface {
|
|
||||||
// Set adds or updates an entry in the cache. The implementation should handle the TTL.
|
|
||||||
Set(key K, value V, ttl time.Duration)
|
|
||||||
|
|
||||||
// GetOrCompute atomically gets an existing value or computes and stores a new value.
|
|
||||||
// This method prevents duplicate computation when multiple goroutines request the same key.
|
|
||||||
// The compute function is called only if the key is not found or has expired.
|
|
||||||
GetOrCompute(key K, compute func() V, ttl time.Duration) V
|
|
||||||
|
|
||||||
// Delete removes an entry from the cache.
|
|
||||||
Delete(key K)
|
|
||||||
|
|
||||||
// Purge removes all items from the cache.
|
|
||||||
Purge()
|
|
||||||
|
|
||||||
// Close releases any resources used by the cache, such as background goroutines.
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
200
framework/h/cache/lru_store_example.go
vendored
200
framework/h/cache/lru_store_example.go
vendored
|
|
@ -1,200 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"container/list"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LRUStore is an example of a memory-bounded cache implementation using
|
|
||||||
// the Least Recently Used (LRU) eviction policy. This demonstrates how
|
|
||||||
// to create a custom cache store that prevents unbounded memory growth.
|
|
||||||
//
|
|
||||||
// This is a simple example implementation. For production use, consider
|
|
||||||
// using optimized libraries like github.com/elastic/go-freelru or
|
|
||||||
// github.com/Yiling-J/theine-go.
|
|
||||||
type LRUStore[K comparable, V any] struct {
|
|
||||||
maxSize int
|
|
||||||
cache map[K]*list.Element
|
|
||||||
lru *list.List
|
|
||||||
mutex sync.RWMutex
|
|
||||||
closeChan chan struct{}
|
|
||||||
closeOnce sync.Once
|
|
||||||
}
|
|
||||||
|
|
||||||
type lruEntry[K comparable, V any] struct {
|
|
||||||
key K
|
|
||||||
value V
|
|
||||||
expiration time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLRUStore creates a new LRU cache with the specified maximum size.
|
|
||||||
// When the cache reaches maxSize, the least recently used items are evicted.
|
|
||||||
func NewLRUStore[K comparable, V any](maxSize int) Store[K, V] {
|
|
||||||
if maxSize <= 0 {
|
|
||||||
panic("LRUStore maxSize must be positive")
|
|
||||||
}
|
|
||||||
|
|
||||||
s := &LRUStore[K, V]{
|
|
||||||
maxSize: maxSize,
|
|
||||||
cache: make(map[K]*list.Element),
|
|
||||||
lru: list.New(),
|
|
||||||
closeChan: make(chan struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start a goroutine to periodically clean up expired entries
|
|
||||||
go s.cleanupExpired()
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set adds or updates an entry in the cache with the given TTL.
|
|
||||||
// If the cache is at capacity, the least recently used item is evicted.
|
|
||||||
func (s *LRUStore[K, V]) Set(key K, value V, ttl time.Duration) {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
|
|
||||||
expiration := time.Now().Add(ttl)
|
|
||||||
|
|
||||||
// Check if key already exists
|
|
||||||
if elem, exists := s.cache[key]; exists {
|
|
||||||
// Update existing entry and move to front
|
|
||||||
entry := elem.Value.(*lruEntry[K, V])
|
|
||||||
entry.value = value
|
|
||||||
entry.expiration = expiration
|
|
||||||
s.lru.MoveToFront(elem)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add new entry
|
|
||||||
entry := &lruEntry[K, V]{
|
|
||||||
key: key,
|
|
||||||
value: value,
|
|
||||||
expiration: expiration,
|
|
||||||
}
|
|
||||||
elem := s.lru.PushFront(entry)
|
|
||||||
s.cache[key] = elem
|
|
||||||
|
|
||||||
// Evict oldest if over capacity
|
|
||||||
if s.lru.Len() > s.maxSize {
|
|
||||||
oldest := s.lru.Back()
|
|
||||||
if oldest != nil {
|
|
||||||
s.removeElement(oldest)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOrCompute atomically gets an existing value or computes and stores a new value.
|
|
||||||
func (s *LRUStore[K, V]) GetOrCompute(key K, compute func() V, ttl time.Duration) V {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
|
|
||||||
// Check if key already exists
|
|
||||||
if elem, exists := s.cache[key]; exists {
|
|
||||||
entry := elem.Value.(*lruEntry[K, V])
|
|
||||||
|
|
||||||
// Check if expired
|
|
||||||
if time.Now().Before(entry.expiration) {
|
|
||||||
// Move to front (mark as recently used)
|
|
||||||
s.lru.MoveToFront(elem)
|
|
||||||
return entry.value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expired, remove it
|
|
||||||
s.removeElement(elem)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute the value while holding the lock
|
|
||||||
value := compute()
|
|
||||||
expiration := time.Now().Add(ttl)
|
|
||||||
|
|
||||||
// Add new entry
|
|
||||||
entry := &lruEntry[K, V]{
|
|
||||||
key: key,
|
|
||||||
value: value,
|
|
||||||
expiration: expiration,
|
|
||||||
}
|
|
||||||
elem := s.lru.PushFront(entry)
|
|
||||||
s.cache[key] = elem
|
|
||||||
|
|
||||||
// Evict oldest if over capacity
|
|
||||||
if s.lru.Len() > s.maxSize {
|
|
||||||
oldest := s.lru.Back()
|
|
||||||
if oldest != nil {
|
|
||||||
s.removeElement(oldest)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes an entry from the cache.
|
|
||||||
func (s *LRUStore[K, V]) Delete(key K) {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
|
|
||||||
if elem, exists := s.cache[key]; exists {
|
|
||||||
s.removeElement(elem)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge removes all items from the cache.
|
|
||||||
func (s *LRUStore[K, V]) Purge() {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
|
|
||||||
s.cache = make(map[K]*list.Element)
|
|
||||||
s.lru.Init()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close stops the background cleanup goroutine.
|
|
||||||
func (s *LRUStore[K, V]) Close() {
|
|
||||||
s.closeOnce.Do(func() {
|
|
||||||
close(s.closeChan)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeElement removes an element from both the map and the list.
|
|
||||||
// Must be called with the mutex held.
|
|
||||||
func (s *LRUStore[K, V]) removeElement(elem *list.Element) {
|
|
||||||
entry := elem.Value.(*lruEntry[K, V])
|
|
||||||
delete(s.cache, entry.key)
|
|
||||||
s.lru.Remove(elem)
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanupExpired periodically removes expired entries.
|
|
||||||
func (s *LRUStore[K, V]) cleanupExpired() {
|
|
||||||
ticker := time.NewTicker(time.Minute)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
s.removeExpired()
|
|
||||||
case <-s.closeChan:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeExpired scans the cache and removes expired entries.
|
|
||||||
func (s *LRUStore[K, V]) removeExpired() {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
// Create a slice to hold elements to remove to avoid modifying list during iteration
|
|
||||||
var toRemove []*list.Element
|
|
||||||
|
|
||||||
for elem := s.lru.Back(); elem != nil; elem = elem.Prev() {
|
|
||||||
entry := elem.Value.(*lruEntry[K, V])
|
|
||||||
if now.After(entry.expiration) {
|
|
||||||
toRemove = append(toRemove, elem)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove expired elements
|
|
||||||
for _, elem := range toRemove {
|
|
||||||
s.removeElement(elem)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
676
framework/h/cache/lru_store_test.go
vendored
676
framework/h/cache/lru_store_test.go
vendored
|
|
@ -1,676 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLRUStore_SetAndGet(t *testing.T) {
|
|
||||||
store := NewLRUStore[string, string](10)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Test basic set and get
|
|
||||||
store.Set("key1", "value1", 1*time.Hour)
|
|
||||||
|
|
||||||
val := store.GetOrCompute("key1", func() string {
|
|
||||||
t.Error("Should not compute for existing key")
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if val != "value1" {
|
|
||||||
t.Errorf("Expected value1, got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test getting non-existent key
|
|
||||||
computeCalled := false
|
|
||||||
val = store.GetOrCompute("nonexistent", func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "computed-value"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if !computeCalled {
|
|
||||||
t.Error("Expected compute function to be called for non-existent key")
|
|
||||||
}
|
|
||||||
if val != "computed-value" {
|
|
||||||
t.Errorf("Expected computed-value for non-existent key, got %s", val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestLRUStore_SizeLimit tests are commented out because they rely on
|
|
||||||
// being able to check cache contents without modifying LRU order,
|
|
||||||
// which is not possible with GetOrCompute-only interface
|
|
||||||
/*
|
|
||||||
func TestLRUStore_SizeLimit(t *testing.T) {
|
|
||||||
// Create store with capacity of 3
|
|
||||||
store := NewLRUStore[int, string](3)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Add 3 items
|
|
||||||
store.Set(1, "one", 1*time.Hour)
|
|
||||||
store.Set(2, "two", 1*time.Hour)
|
|
||||||
store.Set(3, "three", 1*time.Hour)
|
|
||||||
|
|
||||||
// Add fourth item, should evict least recently used (key 1)
|
|
||||||
store.Set(4, "four", 1*time.Hour)
|
|
||||||
|
|
||||||
// Key 1 should be evicted
|
|
||||||
computeCalled := false
|
|
||||||
val := store.GetOrCompute(1, func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "recomputed-one"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if !computeCalled {
|
|
||||||
t.Error("Expected key 1 to be evicted and recomputed")
|
|
||||||
}
|
|
||||||
if val != "recomputed-one" {
|
|
||||||
t.Errorf("Expected recomputed value for key 1, got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point, cache has keys: 1 (just added), 2, 3, 4
|
|
||||||
// But capacity is 3, so one of the original keys was evicted
|
|
||||||
// Let's just verify we have exactly 3 items and key 1 is now present
|
|
||||||
count := 0
|
|
||||||
for i := 1; i <= 4; i++ {
|
|
||||||
localI := i
|
|
||||||
computed := false
|
|
||||||
store.GetOrCompute(localI, func() string {
|
|
||||||
computed = true
|
|
||||||
return fmt.Sprintf("recomputed-%d", localI)
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if !computed {
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// We should have found 3 items in cache (since capacity is 3)
|
|
||||||
// The 4th check would have caused another eviction and recomputation
|
|
||||||
if count != 3 {
|
|
||||||
t.Errorf("Expected exactly 3 items in cache, found %d", count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
func TestLRUStore_LRUBehavior(t *testing.T) {
|
|
||||||
store := NewLRUStore[string, string](3)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Add items in order: c (MRU), b, a (LRU)
|
|
||||||
store.Set("a", "A", 1*time.Hour)
|
|
||||||
store.Set("b", "B", 1*time.Hour)
|
|
||||||
store.Set("c", "C", 1*time.Hour)
|
|
||||||
|
|
||||||
// Access "a" to make it recently used
|
|
||||||
// Now order is: a (MRU), c, b (LRU)
|
|
||||||
val := store.GetOrCompute("a", func() string {
|
|
||||||
t.Error("Should not compute for existing key")
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if val != "A" {
|
|
||||||
t.Errorf("Expected 'A', got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add "d", should evict "b" (least recently used)
|
|
||||||
// Now we have: d (MRU), a, c
|
|
||||||
store.Set("d", "D", 1*time.Hour)
|
|
||||||
|
|
||||||
// Verify "b" was evicted
|
|
||||||
computeCalled := false
|
|
||||||
val = store.GetOrCompute("b", func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "recomputed-b"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if !computeCalled {
|
|
||||||
t.Error("Expected 'b' to be evicted")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now cache has: b (MRU), d, a
|
|
||||||
// and "c" should have been evicted when we added "b" back
|
|
||||||
|
|
||||||
// Verify the current state matches expectations
|
|
||||||
// We'll collect all values without modifying order too much
|
|
||||||
presentKeys := make(map[string]bool)
|
|
||||||
for _, key := range []string{"a", "b", "c", "d"} {
|
|
||||||
localKey := key
|
|
||||||
computed := false
|
|
||||||
store.GetOrCompute(localKey, func() string {
|
|
||||||
computed = true
|
|
||||||
return "recomputed"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if !computed {
|
|
||||||
presentKeys[localKey] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We should have exactly 3 keys in cache
|
|
||||||
if len(presentKeys) > 3 {
|
|
||||||
t.Errorf("Cache has more than 3 items: %v", presentKeys)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLRUStore_UpdateMovesToFront(t *testing.T) {
|
|
||||||
store := NewLRUStore[string, string](3)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Fill cache
|
|
||||||
store.Set("a", "A", 1*time.Hour)
|
|
||||||
store.Set("b", "B", 1*time.Hour)
|
|
||||||
store.Set("c", "C", 1*time.Hour)
|
|
||||||
|
|
||||||
// Update "a" with new value - should move to front
|
|
||||||
store.Set("a", "A_updated", 1*time.Hour)
|
|
||||||
|
|
||||||
// Add new item - should evict "b" not "a"
|
|
||||||
store.Set("d", "D", 1*time.Hour)
|
|
||||||
|
|
||||||
val := store.GetOrCompute("a", func() string {
|
|
||||||
t.Error("Should not compute for existing key 'a'")
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if val != "A_updated" {
|
|
||||||
t.Errorf("Expected updated value, got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
computeCalled := false
|
|
||||||
store.GetOrCompute("b", func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "recomputed-b"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if !computeCalled {
|
|
||||||
t.Error("Expected 'b' to be evicted and recomputed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLRUStore_Expiration(t *testing.T) {
|
|
||||||
store := NewLRUStore[string, string](10)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Set with short TTL
|
|
||||||
store.Set("shortlived", "value", 100*time.Millisecond)
|
|
||||||
|
|
||||||
// Should exist immediately
|
|
||||||
val := store.GetOrCompute("shortlived", func() string {
|
|
||||||
t.Error("Should not compute for existing key")
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 100*time.Millisecond)
|
|
||||||
if val != "value" {
|
|
||||||
t.Errorf("Expected value, got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for expiration
|
|
||||||
time.Sleep(150 * time.Millisecond)
|
|
||||||
|
|
||||||
// Should be expired now
|
|
||||||
computeCalled := false
|
|
||||||
val = store.GetOrCompute("shortlived", func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "recomputed-after-expiry"
|
|
||||||
}, 100*time.Millisecond)
|
|
||||||
if !computeCalled {
|
|
||||||
t.Error("Expected compute function to be called for expired key")
|
|
||||||
}
|
|
||||||
if val != "recomputed-after-expiry" {
|
|
||||||
t.Errorf("Expected recomputed value for expired key, got %s", val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLRUStore_Delete(t *testing.T) {
|
|
||||||
store := NewLRUStore[string, string](10)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
store.Set("key1", "value1", 1*time.Hour)
|
|
||||||
|
|
||||||
// Verify it exists
|
|
||||||
val := store.GetOrCompute("key1", func() string {
|
|
||||||
t.Error("Should not compute for existing key")
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if val != "value1" {
|
|
||||||
t.Errorf("Expected value1, got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete it
|
|
||||||
store.Delete("key1")
|
|
||||||
|
|
||||||
// Verify it's gone
|
|
||||||
computeCalled := false
|
|
||||||
val = store.GetOrCompute("key1", func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "recomputed-after-delete"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if !computeCalled {
|
|
||||||
t.Error("Expected compute function to be called after deletion")
|
|
||||||
}
|
|
||||||
if val != "recomputed-after-delete" {
|
|
||||||
t.Errorf("Expected recomputed value after deletion, got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete non-existent key should not panic
|
|
||||||
store.Delete("nonexistent")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLRUStore_Purge(t *testing.T) {
|
|
||||||
store := NewLRUStore[string, string](10)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Add multiple items
|
|
||||||
store.Set("key1", "value1", 1*time.Hour)
|
|
||||||
store.Set("key2", "value2", 1*time.Hour)
|
|
||||||
store.Set("key3", "value3", 1*time.Hour)
|
|
||||||
|
|
||||||
// Verify they exist
|
|
||||||
for i := 1; i <= 3; i++ {
|
|
||||||
key := "key" + string(rune('0'+i))
|
|
||||||
val := store.GetOrCompute(key, func() string {
|
|
||||||
t.Errorf("Should not compute for existing key %s", key)
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
expectedVal := "value" + string(rune('0'+i))
|
|
||||||
if val != expectedVal {
|
|
||||||
t.Errorf("Expected to find %s with value %s, got %s", key, expectedVal, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge all
|
|
||||||
store.Purge()
|
|
||||||
|
|
||||||
// Verify all are gone
|
|
||||||
for i := 1; i <= 3; i++ {
|
|
||||||
key := "key" + string(rune('0'+i))
|
|
||||||
computeCalled := false
|
|
||||||
store.GetOrCompute(key, func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "recomputed-after-purge"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if !computeCalled {
|
|
||||||
t.Errorf("Expected %s to be purged and recomputed", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLRUStore_ConcurrentAccess(t *testing.T) {
|
|
||||||
// Need capacity for all unique keys: 100 goroutines * 100 operations = 10,000
|
|
||||||
store := NewLRUStore[int, int](10000)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
const numGoroutines = 100
|
|
||||||
const numOperations = 100
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(numGoroutines)
|
|
||||||
|
|
||||||
// Concurrent writes and reads
|
|
||||||
for i := 0; i < numGoroutines; i++ {
|
|
||||||
go func(id int) {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
for j := 0; j < numOperations; j++ {
|
|
||||||
key := (id * numOperations) + j
|
|
||||||
store.Set(key, key*2, 1*time.Hour)
|
|
||||||
|
|
||||||
// Immediately read it back
|
|
||||||
val := store.GetOrCompute(key, func() int {
|
|
||||||
t.Errorf("Goroutine %d: Should not compute for just-set key %d", id, key)
|
|
||||||
return -1
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if val != key*2 {
|
|
||||||
t.Errorf("Goroutine %d: Expected value %d, got %d", id, key*2, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLRUStore_ExpiredEntriesCleanup(t *testing.T) {
|
|
||||||
store := NewLRUStore[string, string](100)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Add many short-lived entries
|
|
||||||
for i := 0; i < 50; i++ {
|
|
||||||
key := "key" + string(rune('0'+i))
|
|
||||||
store.Set(key, "value", 100*time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add some long-lived entries
|
|
||||||
for i := 50; i < 60; i++ {
|
|
||||||
key := "key" + string(rune('0'+i))
|
|
||||||
store.Set(key, "value", 1*time.Hour)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for short-lived entries to expire and cleanup to run
|
|
||||||
time.Sleep(1200 * time.Millisecond)
|
|
||||||
|
|
||||||
// Check that expired entries are gone
|
|
||||||
for i := 0; i < 50; i++ {
|
|
||||||
key := "key" + string(rune('0'+i))
|
|
||||||
computeCalled := false
|
|
||||||
store.GetOrCompute(key, func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "recomputed-after-expiry"
|
|
||||||
}, 100*time.Millisecond)
|
|
||||||
if !computeCalled {
|
|
||||||
t.Errorf("Expected expired key %s to be cleaned up and recomputed", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Long-lived entries should still exist
|
|
||||||
for i := 50; i < 60; i++ {
|
|
||||||
key := "key" + string(rune('0'+i))
|
|
||||||
val := store.GetOrCompute(key, func() string {
|
|
||||||
t.Errorf("Should not compute for long-lived key %s", key)
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if val != "value" {
|
|
||||||
t.Errorf("Expected long-lived key %s to still exist with value 'value', got %s", key, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLRUStore_InvalidSize(t *testing.T) {
|
|
||||||
// Test that creating store with invalid size panics
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r == nil {
|
|
||||||
t.Error("Expected panic for zero size")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
NewLRUStore[string, string](0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLRUStore_Close(t *testing.T) {
|
|
||||||
store := NewLRUStore[string, string](10)
|
|
||||||
|
|
||||||
// Close should not panic
|
|
||||||
store.Close()
|
|
||||||
|
|
||||||
// Multiple closes should not panic
|
|
||||||
store.Close()
|
|
||||||
store.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestLRUStore_ComplexEvictionScenario is commented out because
|
|
||||||
// checking cache state with GetOrCompute modifies the LRU order
|
|
||||||
/*
|
|
||||||
func TestLRUStore_ComplexEvictionScenario(t *testing.T) {
|
|
||||||
store := NewLRUStore[string, string](4)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Fill cache: d (MRU), c, b, a (LRU)
|
|
||||||
store.Set("a", "A", 1*time.Hour)
|
|
||||||
store.Set("b", "B", 1*time.Hour)
|
|
||||||
store.Set("c", "C", 1*time.Hour)
|
|
||||||
store.Set("d", "D", 1*time.Hour)
|
|
||||||
|
|
||||||
// Access in specific order to control LRU order
|
|
||||||
store.GetOrCompute("b", func() string { return "B" }, 1*time.Hour) // b (MRU), d, c, a (LRU)
|
|
||||||
store.GetOrCompute("d", func() string { return "D" }, 1*time.Hour) // d (MRU), b, c, a (LRU)
|
|
||||||
store.GetOrCompute("a", func() string { return "A" }, 1*time.Hour) // a (MRU), d, b, c (LRU)
|
|
||||||
|
|
||||||
// Record initial state
|
|
||||||
initialOrder := "a (MRU), d, b, c (LRU)"
|
|
||||||
_ = initialOrder // for documentation
|
|
||||||
|
|
||||||
// Add two new items
|
|
||||||
store.Set("e", "E", 1*time.Hour) // Should evict c (LRU) -> a, d, b, e
|
|
||||||
store.Set("f", "F", 1*time.Hour) // Should evict b (LRU) -> a, d, e, f
|
|
||||||
|
|
||||||
// Check if our expectations match by counting present keys
|
|
||||||
// We'll check each key once to minimize LRU order changes
|
|
||||||
evicted := []string{}
|
|
||||||
present := []string{}
|
|
||||||
|
|
||||||
for _, key := range []string{"a", "b", "c", "d", "e", "f"} {
|
|
||||||
localKey := key
|
|
||||||
computeCalled := false
|
|
||||||
store.GetOrCompute(localKey, func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "recomputed-" + localKey
|
|
||||||
}, 1*time.Hour)
|
|
||||||
|
|
||||||
if computeCalled {
|
|
||||||
evicted = append(evicted, localKey)
|
|
||||||
} else {
|
|
||||||
present = append(present, localKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// After checking all 6 keys, we'll have at most 4 in cache
|
|
||||||
if len(present) > 4 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We expect c and b to have been evicted
|
|
||||||
expectedEvicted := map[string]bool{"b": true, "c": true}
|
|
||||||
for _, key := range evicted {
|
|
||||||
if !expectedEvicted[key] {
|
|
||||||
t.Errorf("Unexpected key %s was evicted", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify we have exactly 4 items in cache
|
|
||||||
if len(present) > 4 {
|
|
||||||
t.Errorf("Cache has more than 4 items: %v", present)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
func TestLRUStore_GetOrCompute(t *testing.T) {
|
|
||||||
store := NewLRUStore[string, string](10)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
computeCount := 0
|
|
||||||
|
|
||||||
// Test computing when not in cache
|
|
||||||
result := store.GetOrCompute("key1", func() string {
|
|
||||||
computeCount++
|
|
||||||
return "computed-value"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
|
|
||||||
if result != "computed-value" {
|
|
||||||
t.Errorf("Expected computed-value, got %s", result)
|
|
||||||
}
|
|
||||||
if computeCount != 1 {
|
|
||||||
t.Errorf("Expected compute to be called once, called %d times", computeCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test returning cached value
|
|
||||||
result = store.GetOrCompute("key1", func() string {
|
|
||||||
computeCount++
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
|
|
||||||
if result != "computed-value" {
|
|
||||||
t.Errorf("Expected cached value, got %s", result)
|
|
||||||
}
|
|
||||||
if computeCount != 1 {
|
|
||||||
t.Errorf("Expected compute to not be called again, total calls: %d", computeCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLRUStore_GetOrCompute_Expiration(t *testing.T) {
|
|
||||||
store := NewLRUStore[string, string](10)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
computeCount := 0
|
|
||||||
|
|
||||||
// Set with short TTL
|
|
||||||
result := store.GetOrCompute("shortlived", func() string {
|
|
||||||
computeCount++
|
|
||||||
return "value1"
|
|
||||||
}, 100*time.Millisecond)
|
|
||||||
|
|
||||||
if result != "value1" {
|
|
||||||
t.Errorf("Expected value1, got %s", result)
|
|
||||||
}
|
|
||||||
if computeCount != 1 {
|
|
||||||
t.Errorf("Expected 1 compute, got %d", computeCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should return cached value immediately
|
|
||||||
result = store.GetOrCompute("shortlived", func() string {
|
|
||||||
computeCount++
|
|
||||||
return "value2"
|
|
||||||
}, 100*time.Millisecond)
|
|
||||||
|
|
||||||
if result != "value1" {
|
|
||||||
t.Errorf("Expected cached value1, got %s", result)
|
|
||||||
}
|
|
||||||
if computeCount != 1 {
|
|
||||||
t.Errorf("Expected still 1 compute, got %d", computeCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for expiration
|
|
||||||
time.Sleep(150 * time.Millisecond)
|
|
||||||
|
|
||||||
// Should compute new value after expiration
|
|
||||||
result = store.GetOrCompute("shortlived", func() string {
|
|
||||||
computeCount++
|
|
||||||
return "value2"
|
|
||||||
}, 100*time.Millisecond)
|
|
||||||
|
|
||||||
if result != "value2" {
|
|
||||||
t.Errorf("Expected new value2, got %s", result)
|
|
||||||
}
|
|
||||||
if computeCount != 2 {
|
|
||||||
t.Errorf("Expected 2 computes after expiration, got %d", computeCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLRUStore_GetOrCompute_Concurrent(t *testing.T) {
|
|
||||||
store := NewLRUStore[string, string](100)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
var computeCount int32
|
|
||||||
const numGoroutines = 100
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(numGoroutines)
|
|
||||||
|
|
||||||
// Launch many goroutines trying to compute the same key
|
|
||||||
for i := 0; i < numGoroutines; i++ {
|
|
||||||
go func(id int) {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
result := store.GetOrCompute("shared-key", func() string {
|
|
||||||
// Increment atomically to count calls
|
|
||||||
atomic.AddInt32(&computeCount, 1)
|
|
||||||
// Simulate some work
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
return "shared-value"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
|
|
||||||
if result != "shared-value" {
|
|
||||||
t.Errorf("Goroutine %d: Expected shared-value, got %s", id, result)
|
|
||||||
}
|
|
||||||
}(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
// Only one goroutine should have computed the value
|
|
||||||
if computeCount != 1 {
|
|
||||||
t.Errorf("Expected exactly 1 compute for concurrent access, got %d", computeCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLRUStore_GetOrCompute_WithEviction(t *testing.T) {
|
|
||||||
// Small cache to test eviction behavior
|
|
||||||
store := NewLRUStore[int, string](3)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
computeCounts := make(map[int]int)
|
|
||||||
|
|
||||||
// Fill cache to capacity
|
|
||||||
for i := 1; i <= 3; i++ {
|
|
||||||
store.GetOrCompute(i, func() string {
|
|
||||||
computeCounts[i]++
|
|
||||||
return fmt.Sprintf("value-%d", i)
|
|
||||||
}, 1*time.Hour)
|
|
||||||
}
|
|
||||||
|
|
||||||
// All should be computed once
|
|
||||||
for i := 1; i <= 3; i++ {
|
|
||||||
if computeCounts[i] != 1 {
|
|
||||||
t.Errorf("Key %d: Expected 1 compute, got %d", i, computeCounts[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add fourth item - should evict key 1
|
|
||||||
store.GetOrCompute(4, func() string {
|
|
||||||
computeCounts[4]++
|
|
||||||
return "value-4"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
|
|
||||||
// Try to get key 1 again - should need to recompute
|
|
||||||
result := store.GetOrCompute(1, func() string {
|
|
||||||
computeCounts[1]++
|
|
||||||
return "value-1-recomputed"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
|
|
||||||
if result != "value-1-recomputed" {
|
|
||||||
t.Errorf("Expected recomputed value, got %s", result)
|
|
||||||
}
|
|
||||||
if computeCounts[1] != 2 {
|
|
||||||
t.Errorf("Key 1: Expected 2 computes after eviction, got %d", computeCounts[1])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestLRUStore_GetOrCompute_UpdatesLRU is commented out because
|
|
||||||
// verifying cache state with GetOrCompute modifies the LRU order
|
|
||||||
/*
|
|
||||||
func TestLRUStore_GetOrCompute_UpdatesLRU(t *testing.T) {
|
|
||||||
store := NewLRUStore[string, string](3)
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Fill cache: c (MRU), b, a (LRU)
|
|
||||||
store.GetOrCompute("a", func() string { return "A" }, 1*time.Hour)
|
|
||||||
store.GetOrCompute("b", func() string { return "B" }, 1*time.Hour)
|
|
||||||
store.GetOrCompute("c", func() string { return "C" }, 1*time.Hour)
|
|
||||||
|
|
||||||
// Access "a" again - should move to front
|
|
||||||
// Order becomes: a (MRU), c, b (LRU)
|
|
||||||
val := store.GetOrCompute("a", func() string { return "A-new" }, 1*time.Hour)
|
|
||||||
if val != "A" {
|
|
||||||
t.Errorf("Expected existing value 'A', got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add new item - should evict "b" (least recently used)
|
|
||||||
// Order becomes: d (MRU), a, c
|
|
||||||
store.GetOrCompute("d", func() string { return "D" }, 1*time.Hour)
|
|
||||||
|
|
||||||
// Verify "b" was evicted by trying to get it
|
|
||||||
computeCalled := false
|
|
||||||
val = store.GetOrCompute("b", func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "B-recomputed"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
|
|
||||||
if !computeCalled {
|
|
||||||
t.Error("Expected 'b' to be evicted and recomputed")
|
|
||||||
}
|
|
||||||
if val != "B-recomputed" {
|
|
||||||
t.Errorf("Expected 'B-recomputed', got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point, the cache contains b (just added), d, a
|
|
||||||
// and c was evicted when b was re-added
|
|
||||||
// Let's verify by checking the cache has exactly 3 items
|
|
||||||
presentCount := 0
|
|
||||||
for _, key := range []string{"a", "b", "c", "d"} {
|
|
||||||
localKey := key
|
|
||||||
computed := false
|
|
||||||
store.GetOrCompute(localKey, func() string {
|
|
||||||
computed = true
|
|
||||||
return "check-" + localKey
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if !computed {
|
|
||||||
presentCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if presentCount != 3 {
|
|
||||||
t.Errorf("Expected exactly 3 items in cache, found %d", presentCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
133
framework/h/cache/ttl_store.go
vendored
133
framework/h/cache/ttl_store.go
vendored
|
|
@ -1,133 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"log/slog"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TTLStore is a time-to-live based cache implementation that mimics
|
|
||||||
// the original htmgo caching behavior. It stores values with expiration
|
|
||||||
// times and periodically cleans up expired entries.
|
|
||||||
type TTLStore[K comparable, V any] struct {
|
|
||||||
cache map[K]*entry[V]
|
|
||||||
mutex sync.RWMutex
|
|
||||||
closeOnce sync.Once
|
|
||||||
closeChan chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type entry[V any] struct {
|
|
||||||
value V
|
|
||||||
expiration time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTTLStore creates a new TTL-based cache store.
|
|
||||||
func NewTTLStore[K comparable, V any]() Store[K, V] {
|
|
||||||
s := &TTLStore[K, V]{
|
|
||||||
cache: make(map[K]*entry[V]),
|
|
||||||
closeChan: make(chan struct{}),
|
|
||||||
}
|
|
||||||
s.startCleaner()
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set adds or updates an entry in the cache with the given TTL.
|
|
||||||
func (s *TTLStore[K, V]) Set(key K, value V, ttl time.Duration) {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
|
|
||||||
s.cache[key] = &entry[V]{
|
|
||||||
value: value,
|
|
||||||
expiration: time.Now().Add(ttl),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// GetOrCompute atomically gets an existing value or computes and stores a new value.
|
|
||||||
func (s *TTLStore[K, V]) GetOrCompute(key K, compute func() V, ttl time.Duration) V {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
|
|
||||||
// Check if exists and not expired
|
|
||||||
if e, ok := s.cache[key]; ok && time.Now().Before(e.expiration) {
|
|
||||||
return e.value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute while holding lock
|
|
||||||
value := compute()
|
|
||||||
|
|
||||||
// Store the result
|
|
||||||
s.cache[key] = &entry[V]{
|
|
||||||
value: value,
|
|
||||||
expiration: time.Now().Add(ttl),
|
|
||||||
}
|
|
||||||
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes an entry from the cache.
|
|
||||||
func (s *TTLStore[K, V]) Delete(key K) {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
|
|
||||||
delete(s.cache, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge removes all items from the cache.
|
|
||||||
func (s *TTLStore[K, V]) Purge() {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
|
|
||||||
s.cache = make(map[K]*entry[V])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close stops the background cleaner goroutine.
|
|
||||||
func (s *TTLStore[K, V]) Close() {
|
|
||||||
s.closeOnce.Do(func() {
|
|
||||||
close(s.closeChan)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// startCleaner starts a background goroutine that periodically removes expired entries.
|
|
||||||
func (s *TTLStore[K, V]) startCleaner() {
|
|
||||||
isTests := flag.Lookup("test.v") != nil
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
ticker := time.NewTicker(time.Minute)
|
|
||||||
if isTests {
|
|
||||||
ticker = time.NewTicker(time.Second)
|
|
||||||
}
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
s.clearExpired()
|
|
||||||
case <-s.closeChan:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// clearExpired removes all expired entries from the cache.
|
|
||||||
func (s *TTLStore[K, V]) clearExpired() {
|
|
||||||
s.mutex.Lock()
|
|
||||||
defer s.mutex.Unlock()
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
deletedCount := 0
|
|
||||||
|
|
||||||
for key, e := range s.cache {
|
|
||||||
if now.After(e.expiration) {
|
|
||||||
delete(s.cache, key)
|
|
||||||
deletedCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if deletedCount > 0 {
|
|
||||||
slog.Debug("Deleted expired cache entries", slog.Int("count", deletedCount))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
443
framework/h/cache/ttl_store_test.go
vendored
443
framework/h/cache/ttl_store_test.go
vendored
|
|
@ -1,443 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestTTLStore_SetAndGet(t *testing.T) {
|
|
||||||
store := NewTTLStore[string, string]()
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Test basic set and get
|
|
||||||
store.Set("key1", "value1", 1*time.Hour)
|
|
||||||
|
|
||||||
val := store.GetOrCompute("key1", func() string {
|
|
||||||
t.Error("Should not compute for existing key")
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if val != "value1" {
|
|
||||||
t.Errorf("Expected value1, got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test getting non-existent key
|
|
||||||
computeCalled := false
|
|
||||||
val = store.GetOrCompute("nonexistent", func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "computed-value"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if !computeCalled {
|
|
||||||
t.Error("Expected compute function to be called for non-existent key")
|
|
||||||
}
|
|
||||||
if val != "computed-value" {
|
|
||||||
t.Errorf("Expected computed-value for non-existent key, got %s", val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTTLStore_Expiration(t *testing.T) {
|
|
||||||
store := NewTTLStore[string, string]()
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Set with short TTL
|
|
||||||
store.Set("shortlived", "value", 100*time.Millisecond)
|
|
||||||
|
|
||||||
// Should exist immediately
|
|
||||||
val := store.GetOrCompute("shortlived", func() string {
|
|
||||||
t.Error("Should not compute for existing key")
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 100*time.Millisecond)
|
|
||||||
if val != "value" {
|
|
||||||
t.Errorf("Expected value, got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for expiration
|
|
||||||
time.Sleep(150 * time.Millisecond)
|
|
||||||
|
|
||||||
// Should be expired now
|
|
||||||
computeCalled := false
|
|
||||||
val = store.GetOrCompute("shortlived", func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "recomputed-after-expiry"
|
|
||||||
}, 100*time.Millisecond)
|
|
||||||
if !computeCalled {
|
|
||||||
t.Error("Expected compute function to be called for expired key")
|
|
||||||
}
|
|
||||||
if val != "recomputed-after-expiry" {
|
|
||||||
t.Errorf("Expected recomputed value for expired key, got %s", val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTTLStore_Delete(t *testing.T) {
|
|
||||||
store := NewTTLStore[string, string]()
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
store.Set("key1", "value1", 1*time.Hour)
|
|
||||||
|
|
||||||
// Verify it exists
|
|
||||||
val := store.GetOrCompute("key1", func() string {
|
|
||||||
t.Error("Should not compute for existing key")
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if val != "value1" {
|
|
||||||
t.Errorf("Expected value1, got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete it
|
|
||||||
store.Delete("key1")
|
|
||||||
|
|
||||||
// Verify it's gone
|
|
||||||
computeCalled := false
|
|
||||||
val = store.GetOrCompute("key1", func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "recomputed-after-delete"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if !computeCalled {
|
|
||||||
t.Error("Expected compute function to be called after deletion")
|
|
||||||
}
|
|
||||||
if val != "recomputed-after-delete" {
|
|
||||||
t.Errorf("Expected recomputed value after deletion, got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete non-existent key should not panic
|
|
||||||
store.Delete("nonexistent")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTTLStore_Purge(t *testing.T) {
|
|
||||||
store := NewTTLStore[string, string]()
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Add multiple items
|
|
||||||
store.Set("key1", "value1", 1*time.Hour)
|
|
||||||
store.Set("key2", "value2", 1*time.Hour)
|
|
||||||
store.Set("key3", "value3", 1*time.Hour)
|
|
||||||
|
|
||||||
// Verify they exist
|
|
||||||
for i := 1; i <= 3; i++ {
|
|
||||||
key := "key" + string(rune('0'+i))
|
|
||||||
val := store.GetOrCompute(key, func() string {
|
|
||||||
t.Errorf("Should not compute for existing key %s", key)
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
expectedVal := "value" + string(rune('0'+i))
|
|
||||||
if val != expectedVal {
|
|
||||||
t.Errorf("Expected to find %s with value %s, got %s", key, expectedVal, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge all
|
|
||||||
store.Purge()
|
|
||||||
|
|
||||||
// Verify all are gone
|
|
||||||
for i := 1; i <= 3; i++ {
|
|
||||||
key := "key" + string(rune('0'+i))
|
|
||||||
computeCalled := false
|
|
||||||
store.GetOrCompute(key, func() string {
|
|
||||||
computeCalled = true
|
|
||||||
return "recomputed-after-purge"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if !computeCalled {
|
|
||||||
t.Errorf("Expected %s to be purged and recomputed", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTTLStore_ConcurrentAccess(t *testing.T) {
|
|
||||||
store := NewTTLStore[int, int]()
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
const numGoroutines = 100
|
|
||||||
const numOperations = 1000
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(numGoroutines)
|
|
||||||
|
|
||||||
// Concurrent writes and reads
|
|
||||||
for i := 0; i < numGoroutines; i++ {
|
|
||||||
go func(id int) {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
for j := 0; j < numOperations; j++ {
|
|
||||||
key := (id * numOperations) + j
|
|
||||||
store.Set(key, key*2, 1*time.Hour)
|
|
||||||
|
|
||||||
// Immediately read it back
|
|
||||||
val := store.GetOrCompute(key, func() int {
|
|
||||||
t.Errorf("Goroutine %d: Should not compute for just-set key %d", id, key)
|
|
||||||
return -1
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if val != key*2 {
|
|
||||||
t.Errorf("Goroutine %d: Expected value %d, got %d", id, key*2, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTTLStore_UpdateExisting(t *testing.T) {
|
|
||||||
store := NewTTLStore[string, string]()
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Set initial value
|
|
||||||
store.Set("key1", "value1", 100*time.Millisecond)
|
|
||||||
|
|
||||||
// Update with new value and longer TTL
|
|
||||||
store.Set("key1", "value2", 1*time.Hour)
|
|
||||||
|
|
||||||
// Verify new value
|
|
||||||
val := store.GetOrCompute("key1", func() string {
|
|
||||||
t.Error("Should not compute for existing key")
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if val != "value2" {
|
|
||||||
t.Errorf("Expected value2, got %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for original TTL to pass
|
|
||||||
time.Sleep(150 * time.Millisecond)
|
|
||||||
|
|
||||||
// Should still exist with new TTL
|
|
||||||
val = store.GetOrCompute("key1", func() string {
|
|
||||||
t.Error("Should not compute for key with new TTL")
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if val != "value2" {
|
|
||||||
t.Errorf("Expected value2, got %s", val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTTLStore_CleanupGoroutine(t *testing.T) {
|
|
||||||
// This test verifies that expired entries are cleaned up automatically
|
|
||||||
store := NewTTLStore[string, string]()
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Add many short-lived entries
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
key := "key" + string(rune('0'+i))
|
|
||||||
store.Set(key, "value", 100*time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cast to access internal state for testing
|
|
||||||
ttlStore := store.(*TTLStore[string, string])
|
|
||||||
|
|
||||||
// Check initial count
|
|
||||||
ttlStore.mutex.RLock()
|
|
||||||
initialCount := len(ttlStore.cache)
|
|
||||||
ttlStore.mutex.RUnlock()
|
|
||||||
|
|
||||||
if initialCount != 100 {
|
|
||||||
t.Errorf("Expected 100 entries initially, got %d", initialCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for expiration and cleanup cycle
|
|
||||||
// In test mode, cleanup runs every second
|
|
||||||
time.Sleep(1200 * time.Millisecond)
|
|
||||||
|
|
||||||
// Check that entries were cleaned up
|
|
||||||
ttlStore.mutex.RLock()
|
|
||||||
finalCount := len(ttlStore.cache)
|
|
||||||
ttlStore.mutex.RUnlock()
|
|
||||||
|
|
||||||
if finalCount != 0 {
|
|
||||||
t.Errorf("Expected 0 entries after cleanup, got %d", finalCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTTLStore_Close(t *testing.T) {
|
|
||||||
store := NewTTLStore[string, string]()
|
|
||||||
|
|
||||||
// Close should not panic
|
|
||||||
store.Close()
|
|
||||||
|
|
||||||
// Multiple closes should not panic
|
|
||||||
store.Close()
|
|
||||||
store.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTTLStore_DifferentTypes(t *testing.T) {
|
|
||||||
// Test with different key and value types
|
|
||||||
intStore := NewTTLStore[int, string]()
|
|
||||||
defer intStore.Close()
|
|
||||||
|
|
||||||
intStore.Set(42, "answer", 1*time.Hour)
|
|
||||||
val := intStore.GetOrCompute(42, func() string {
|
|
||||||
t.Error("Should not compute for existing key")
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if val != "answer" {
|
|
||||||
t.Error("Failed with int key")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test with struct values
|
|
||||||
type User struct {
|
|
||||||
ID int
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
userStore := NewTTLStore[string, User]()
|
|
||||||
defer userStore.Close()
|
|
||||||
|
|
||||||
user := User{ID: 1, Name: "Alice"}
|
|
||||||
userStore.Set("user1", user, 1*time.Hour)
|
|
||||||
|
|
||||||
retrievedUser := userStore.GetOrCompute("user1", func() User {
|
|
||||||
t.Error("Should not compute for existing user")
|
|
||||||
return User{}
|
|
||||||
}, 1*time.Hour)
|
|
||||||
if retrievedUser.ID != 1 || retrievedUser.Name != "Alice" {
|
|
||||||
t.Error("Retrieved user data doesn't match")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTTLStore_GetOrCompute(t *testing.T) {
|
|
||||||
store := NewTTLStore[string, string]()
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
computeCount := 0
|
|
||||||
|
|
||||||
// Test computing when not in cache
|
|
||||||
result := store.GetOrCompute("key1", func() string {
|
|
||||||
computeCount++
|
|
||||||
return "computed-value"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
|
|
||||||
if result != "computed-value" {
|
|
||||||
t.Errorf("Expected computed-value, got %s", result)
|
|
||||||
}
|
|
||||||
if computeCount != 1 {
|
|
||||||
t.Errorf("Expected compute to be called once, called %d times", computeCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test returning cached value
|
|
||||||
result = store.GetOrCompute("key1", func() string {
|
|
||||||
computeCount++
|
|
||||||
return "should-not-compute"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
|
|
||||||
if result != "computed-value" {
|
|
||||||
t.Errorf("Expected cached value, got %s", result)
|
|
||||||
}
|
|
||||||
if computeCount != 1 {
|
|
||||||
t.Errorf("Expected compute to not be called again, total calls: %d", computeCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTTLStore_GetOrCompute_Expiration(t *testing.T) {
|
|
||||||
store := NewTTLStore[string, string]()
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
computeCount := 0
|
|
||||||
|
|
||||||
// Set with short TTL
|
|
||||||
result := store.GetOrCompute("shortlived", func() string {
|
|
||||||
computeCount++
|
|
||||||
return "value1"
|
|
||||||
}, 100*time.Millisecond)
|
|
||||||
|
|
||||||
if result != "value1" {
|
|
||||||
t.Errorf("Expected value1, got %s", result)
|
|
||||||
}
|
|
||||||
if computeCount != 1 {
|
|
||||||
t.Errorf("Expected 1 compute, got %d", computeCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should return cached value immediately
|
|
||||||
result = store.GetOrCompute("shortlived", func() string {
|
|
||||||
computeCount++
|
|
||||||
return "value2"
|
|
||||||
}, 100*time.Millisecond)
|
|
||||||
|
|
||||||
if result != "value1" {
|
|
||||||
t.Errorf("Expected cached value1, got %s", result)
|
|
||||||
}
|
|
||||||
if computeCount != 1 {
|
|
||||||
t.Errorf("Expected still 1 compute, got %d", computeCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for expiration
|
|
||||||
time.Sleep(150 * time.Millisecond)
|
|
||||||
|
|
||||||
// Should compute new value after expiration
|
|
||||||
result = store.GetOrCompute("shortlived", func() string {
|
|
||||||
computeCount++
|
|
||||||
return "value2"
|
|
||||||
}, 100*time.Millisecond)
|
|
||||||
|
|
||||||
if result != "value2" {
|
|
||||||
t.Errorf("Expected new value2, got %s", result)
|
|
||||||
}
|
|
||||||
if computeCount != 2 {
|
|
||||||
t.Errorf("Expected 2 computes after expiration, got %d", computeCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTTLStore_GetOrCompute_Concurrent(t *testing.T) {
|
|
||||||
store := NewTTLStore[string, string]()
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
var computeCount int32
|
|
||||||
const numGoroutines = 100
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(numGoroutines)
|
|
||||||
|
|
||||||
// Launch many goroutines trying to compute the same key
|
|
||||||
for i := 0; i < numGoroutines; i++ {
|
|
||||||
go func(id int) {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
result := store.GetOrCompute("shared-key", func() string {
|
|
||||||
// Increment atomically to count calls
|
|
||||||
atomic.AddInt32(&computeCount, 1)
|
|
||||||
// Simulate some work
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
return "shared-value"
|
|
||||||
}, 1*time.Hour)
|
|
||||||
|
|
||||||
if result != "shared-value" {
|
|
||||||
t.Errorf("Goroutine %d: Expected shared-value, got %s", id, result)
|
|
||||||
}
|
|
||||||
}(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
// Only one goroutine should have computed the value
|
|
||||||
if computeCount != 1 {
|
|
||||||
t.Errorf("Expected exactly 1 compute for concurrent access, got %d", computeCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTTLStore_GetOrCompute_MultipleKeys(t *testing.T) {
|
|
||||||
store := NewTTLStore[int, int]()
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
computeCounts := make(map[int]int)
|
|
||||||
var mu sync.Mutex
|
|
||||||
|
|
||||||
// Test multiple different keys
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
for j := 0; j < 3; j++ { // Access each key 3 times
|
|
||||||
result := store.GetOrCompute(i, func() int {
|
|
||||||
mu.Lock()
|
|
||||||
computeCounts[i]++
|
|
||||||
mu.Unlock()
|
|
||||||
return i * 10
|
|
||||||
}, 1*time.Hour)
|
|
||||||
|
|
||||||
if result != i*10 {
|
|
||||||
t.Errorf("Expected %d, got %d", i*10, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Each key should be computed exactly once
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
if computeCounts[i] != 1 {
|
|
||||||
t.Errorf("Key %d: Expected 1 compute, got %d", i, computeCounts[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,448 +0,0 @@
|
||||||
package h
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/maddalax/htmgo/framework/h/cache"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCached_WithDefaultStore(t *testing.T) {
|
|
||||||
callCount := 0
|
|
||||||
|
|
||||||
// Create a cached component
|
|
||||||
CachedDiv := Cached(1*time.Hour, func() *Element {
|
|
||||||
callCount++
|
|
||||||
return Div(Text(fmt.Sprintf("Rendered %d times", callCount)))
|
|
||||||
})
|
|
||||||
|
|
||||||
// First render
|
|
||||||
html1 := Render(CachedDiv())
|
|
||||||
if callCount != 1 {
|
|
||||||
t.Errorf("Expected 1 render, got %d", callCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second render should use cache
|
|
||||||
html2 := Render(CachedDiv())
|
|
||||||
if callCount != 1 {
|
|
||||||
t.Errorf("Expected still 1 render (cached), got %d", callCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
if html1 != html2 {
|
|
||||||
t.Error("Expected same HTML from cache")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCached_WithCustomStore(t *testing.T) {
|
|
||||||
// Use LRU store with small capacity
|
|
||||||
lruStore := cache.NewLRUStore[any, string](10)
|
|
||||||
defer lruStore.Close()
|
|
||||||
|
|
||||||
callCount := 0
|
|
||||||
|
|
||||||
// Create cached component with custom store
|
|
||||||
CachedDiv := Cached(1*time.Hour, func() *Element {
|
|
||||||
callCount++
|
|
||||||
return Div(Text(fmt.Sprintf("Rendered %d times", callCount)))
|
|
||||||
}, WithCacheStore(lruStore))
|
|
||||||
|
|
||||||
// First render
|
|
||||||
html1 := Render(CachedDiv())
|
|
||||||
if callCount != 1 {
|
|
||||||
t.Errorf("Expected 1 render, got %d", callCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second render should use cache
|
|
||||||
html2 := Render(CachedDiv())
|
|
||||||
if callCount != 1 {
|
|
||||||
t.Errorf("Expected still 1 render (cached), got %d", callCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
if html1 != html2 {
|
|
||||||
t.Error("Expected same HTML from cache")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCachedPerKey_WithDefaultStore(t *testing.T) {
|
|
||||||
renderCounts := make(map[int]int)
|
|
||||||
|
|
||||||
// Create per-key cached component
|
|
||||||
UserProfile := CachedPerKeyT(1*time.Hour, func(userID int) (int, GetElementFunc) {
|
|
||||||
return userID, func() *Element {
|
|
||||||
renderCounts[userID]++
|
|
||||||
return Div(Text(fmt.Sprintf("User %d (rendered %d times)", userID, renderCounts[userID])))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Render for different users
|
|
||||||
html1_user1 := Render(UserProfile(1))
|
|
||||||
html1_user2 := Render(UserProfile(2))
|
|
||||||
|
|
||||||
if renderCounts[1] != 1 || renderCounts[2] != 1 {
|
|
||||||
t.Error("Expected each user to be rendered once")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Render again - should use cache
|
|
||||||
html2_user1 := Render(UserProfile(1))
|
|
||||||
html2_user2 := Render(UserProfile(2))
|
|
||||||
|
|
||||||
if renderCounts[1] != 1 || renderCounts[2] != 1 {
|
|
||||||
t.Error("Expected renders to be cached")
|
|
||||||
}
|
|
||||||
|
|
||||||
if html1_user1 != html2_user1 || html1_user2 != html2_user2 {
|
|
||||||
t.Error("Expected same HTML from cache")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Different users should have different content
|
|
||||||
if html1_user1 == html1_user2 {
|
|
||||||
t.Error("Expected different content for different users")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCachedPerKey_WithLRUStore(t *testing.T) {
|
|
||||||
// Small LRU cache that can only hold 2 items
|
|
||||||
lruStore := cache.NewLRUStore[any, string](2)
|
|
||||||
defer lruStore.Close()
|
|
||||||
|
|
||||||
renderCounts := make(map[int]int)
|
|
||||||
|
|
||||||
// Create per-key cached component with LRU store
|
|
||||||
UserProfile := CachedPerKeyT(1*time.Hour, func(userID int) (int, GetElementFunc) {
|
|
||||||
return userID, func() *Element {
|
|
||||||
renderCounts[userID]++
|
|
||||||
return Div(Text(fmt.Sprintf("User %d", userID)))
|
|
||||||
}
|
|
||||||
}, WithCacheStore(lruStore))
|
|
||||||
|
|
||||||
// Render 2 users - fill cache to capacity
|
|
||||||
Render(UserProfile(1))
|
|
||||||
Render(UserProfile(2))
|
|
||||||
|
|
||||||
if renderCounts[1] != 1 || renderCounts[2] != 1 {
|
|
||||||
t.Error("Expected each user to be rendered once")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Render user 3 - should evict user 1 (least recently used)
|
|
||||||
Render(UserProfile(3))
|
|
||||||
|
|
||||||
if renderCounts[3] != 1 {
|
|
||||||
t.Error("Expected user 3 to be rendered once")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Render user 1 again - should re-render (was evicted)
|
|
||||||
Render(UserProfile(1))
|
|
||||||
|
|
||||||
if renderCounts[1] != 2 {
|
|
||||||
t.Errorf("Expected user 1 to be re-rendered after eviction, got %d renders", renderCounts[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Render user 2 again - should re-render (was evicted when user 1 was added back)
|
|
||||||
Render(UserProfile(2))
|
|
||||||
|
|
||||||
if renderCounts[2] != 2 {
|
|
||||||
t.Errorf("Expected user 2 to be re-rendered after eviction, got %d renders", renderCounts[2])
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point, cache contains users 1 and 2 (most recently used)
|
|
||||||
|
|
||||||
// Render user 1 again - should be cached
|
|
||||||
Render(UserProfile(1))
|
|
||||||
|
|
||||||
if renderCounts[1] != 2 {
|
|
||||||
t.Errorf("Expected user 1 to still be cached, got %d renders", renderCounts[1])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCachedT_WithDefaultStore(t *testing.T) {
|
|
||||||
type Product struct {
|
|
||||||
ID int
|
|
||||||
Name string
|
|
||||||
Price float64
|
|
||||||
}
|
|
||||||
|
|
||||||
renderCount := 0
|
|
||||||
|
|
||||||
// Create cached component that takes typed data
|
|
||||||
ProductCard := CachedT(1*time.Hour, func(p Product) *Element {
|
|
||||||
renderCount++
|
|
||||||
return Div(
|
|
||||||
H3(Text(p.Name)),
|
|
||||||
P(Text(fmt.Sprintf("$%.2f", p.Price))),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
product := Product{ID: 1, Name: "Widget", Price: 9.99}
|
|
||||||
|
|
||||||
// First render
|
|
||||||
html1 := Render(ProductCard(product))
|
|
||||||
if renderCount != 1 {
|
|
||||||
t.Errorf("Expected 1 render, got %d", renderCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second render should use cache
|
|
||||||
html2 := Render(ProductCard(product))
|
|
||||||
if renderCount != 1 {
|
|
||||||
t.Errorf("Expected still 1 render (cached), got %d", renderCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
if html1 != html2 {
|
|
||||||
t.Error("Expected same HTML from cache")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCachedPerKeyT_WithCustomStore(t *testing.T) {
|
|
||||||
type Article struct {
|
|
||||||
ID int
|
|
||||||
Title string
|
|
||||||
Content string
|
|
||||||
}
|
|
||||||
|
|
||||||
ttlStore := cache.NewTTLStore[any, string]()
|
|
||||||
defer ttlStore.Close()
|
|
||||||
|
|
||||||
renderCounts := make(map[int]int)
|
|
||||||
|
|
||||||
// Create per-key cached component with custom store
|
|
||||||
ArticleView := CachedPerKeyT(1*time.Hour, func(a Article) (int, GetElementFunc) {
|
|
||||||
return a.ID, func() *Element {
|
|
||||||
renderCounts[a.ID]++
|
|
||||||
return Div(
|
|
||||||
H1(Text(a.Title)),
|
|
||||||
P(Text(a.Content)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}, WithCacheStore(ttlStore))
|
|
||||||
|
|
||||||
article1 := Article{ID: 1, Title: "First", Content: "Content 1"}
|
|
||||||
article2 := Article{ID: 2, Title: "Second", Content: "Content 2"}
|
|
||||||
|
|
||||||
// Render articles
|
|
||||||
Render(ArticleView(article1))
|
|
||||||
Render(ArticleView(article2))
|
|
||||||
|
|
||||||
if renderCounts[1] != 1 || renderCounts[2] != 1 {
|
|
||||||
t.Error("Expected each article to be rendered once")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Render again - should use cache
|
|
||||||
Render(ArticleView(article1))
|
|
||||||
Render(ArticleView(article2))
|
|
||||||
|
|
||||||
if renderCounts[1] != 1 || renderCounts[2] != 1 {
|
|
||||||
t.Error("Expected renders to be cached")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDefaultCacheProvider_Override(t *testing.T) {
|
|
||||||
// Save original provider
|
|
||||||
originalProvider := DefaultCacheProvider
|
|
||||||
defer func() {
|
|
||||||
DefaultCacheProvider = originalProvider
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Track which cache is used
|
|
||||||
customCacheUsed := false
|
|
||||||
|
|
||||||
// Override default provider
|
|
||||||
DefaultCacheProvider = func() cache.Store[any, string] {
|
|
||||||
customCacheUsed = true
|
|
||||||
return cache.NewLRUStore[any, string](100)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create cached component without specifying store
|
|
||||||
CachedDiv := Cached(1*time.Hour, func() *Element {
|
|
||||||
return Div(Text("Content"))
|
|
||||||
})
|
|
||||||
|
|
||||||
// Render to trigger cache creation
|
|
||||||
Render(CachedDiv())
|
|
||||||
|
|
||||||
if !customCacheUsed {
|
|
||||||
t.Error("Expected custom default cache provider to be used")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCachedPerKey_ConcurrentAccess(t *testing.T) {
|
|
||||||
lruStore := cache.NewLRUStore[any, string](1000)
|
|
||||||
defer lruStore.Close()
|
|
||||||
|
|
||||||
UserProfile := CachedPerKeyT(1*time.Hour, func(userID int) (int, GetElementFunc) {
|
|
||||||
return userID, func() *Element {
|
|
||||||
// Simulate some work
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
return Div(Text(fmt.Sprintf("User %d", userID)))
|
|
||||||
}
|
|
||||||
}, WithCacheStore(lruStore))
|
|
||||||
|
|
||||||
const numGoroutines = 50
|
|
||||||
const numUsers = 20
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(numGoroutines)
|
|
||||||
|
|
||||||
// Many goroutines accessing overlapping user IDs
|
|
||||||
for i := 0; i < numGoroutines; i++ {
|
|
||||||
go func(id int) {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
for j := 0; j < numUsers; j++ {
|
|
||||||
userID := j % 10 // Reuse user IDs to test cache hits
|
|
||||||
html := Render(UserProfile(userID))
|
|
||||||
|
|
||||||
expectedContent := fmt.Sprintf("User %d", userID)
|
|
||||||
if !contains(html, expectedContent) {
|
|
||||||
t.Errorf("Goroutine %d: Expected content for user %d", id, userID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCachedT2_MultipleParameters(t *testing.T) {
|
|
||||||
renderCount := 0
|
|
||||||
|
|
||||||
// Component that takes two parameters
|
|
||||||
CombinedView := CachedT2(1*time.Hour, func(title string, count int) *Element {
|
|
||||||
renderCount++
|
|
||||||
return Div(
|
|
||||||
H2(Text(title)),
|
|
||||||
P(Text(fmt.Sprintf("Count: %d", count))),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
// First render
|
|
||||||
html1 := Render(CombinedView("Test", 42))
|
|
||||||
if renderCount != 1 {
|
|
||||||
t.Errorf("Expected 1 render, got %d", renderCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second render with same params should use cache
|
|
||||||
html2 := Render(CombinedView("Test", 42))
|
|
||||||
if renderCount != 1 {
|
|
||||||
t.Errorf("Expected still 1 render (cached), got %d", renderCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
if html1 != html2 {
|
|
||||||
t.Error("Expected same HTML from cache")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCachedPerKeyT3_ComplexKey(t *testing.T) {
|
|
||||||
type CompositeKey struct {
|
|
||||||
UserID int
|
|
||||||
ProductID int
|
|
||||||
Timestamp int64
|
|
||||||
}
|
|
||||||
|
|
||||||
renderCount := 0
|
|
||||||
|
|
||||||
// Component with composite key
|
|
||||||
UserProductView := CachedPerKeyT3(1*time.Hour,
|
|
||||||
func(userID int, productID int, timestamp int64) (CompositeKey, GetElementFunc) {
|
|
||||||
key := CompositeKey{UserID: userID, ProductID: productID, Timestamp: timestamp}
|
|
||||||
return key, func() *Element {
|
|
||||||
renderCount++
|
|
||||||
return Div(Text(fmt.Sprintf("User %d viewed product %d at %d", userID, productID, timestamp)))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
// Render with specific combination
|
|
||||||
ts := time.Now().Unix()
|
|
||||||
html1 := Render(UserProductView(1, 100, ts))
|
|
||||||
|
|
||||||
if renderCount != 1 {
|
|
||||||
t.Errorf("Expected 1 render, got %d", renderCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Same combination should use cache
|
|
||||||
html2 := Render(UserProductView(1, 100, ts))
|
|
||||||
|
|
||||||
if renderCount != 1 {
|
|
||||||
t.Errorf("Expected still 1 render (cached), got %d", renderCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
if html1 != html2 {
|
|
||||||
t.Error("Expected same HTML from cache")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Different combination should render again
|
|
||||||
Render(UserProductView(1, 101, ts))
|
|
||||||
|
|
||||||
if renderCount != 2 {
|
|
||||||
t.Errorf("Expected 2 renders for different key, got %d", renderCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCached_Expiration(t *testing.T) {
|
|
||||||
callCount := 0
|
|
||||||
|
|
||||||
// Create cached component with short TTL
|
|
||||||
CachedDiv := Cached(100*time.Millisecond, func() *Element {
|
|
||||||
callCount++
|
|
||||||
return Div(Text(fmt.Sprintf("Render %d", callCount)))
|
|
||||||
})
|
|
||||||
|
|
||||||
// First render
|
|
||||||
Render(CachedDiv())
|
|
||||||
if callCount != 1 {
|
|
||||||
t.Errorf("Expected 1 render, got %d", callCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Immediate second render should use cache
|
|
||||||
Render(CachedDiv())
|
|
||||||
if callCount != 1 {
|
|
||||||
t.Errorf("Expected still 1 render (cached), got %d", callCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for expiration
|
|
||||||
time.Sleep(150 * time.Millisecond)
|
|
||||||
|
|
||||||
// Should render again after expiration
|
|
||||||
Render(CachedDiv())
|
|
||||||
if callCount != 2 {
|
|
||||||
t.Errorf("Expected 2 renders after expiration, got %d", callCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCachedNode_ClearCache(t *testing.T) {
|
|
||||||
lruStore := cache.NewLRUStore[any, string](10)
|
|
||||||
defer lruStore.Close()
|
|
||||||
|
|
||||||
callCount := 0
|
|
||||||
|
|
||||||
CachedDiv := Cached(1*time.Hour, func() *Element {
|
|
||||||
callCount++
|
|
||||||
return Div(Text("Content"))
|
|
||||||
}, WithCacheStore(lruStore))
|
|
||||||
|
|
||||||
// Render and cache
|
|
||||||
element := CachedDiv()
|
|
||||||
Render(element)
|
|
||||||
|
|
||||||
if callCount != 1 {
|
|
||||||
t.Errorf("Expected 1 render, got %d", callCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear cache
|
|
||||||
node := element.meta.(*CachedNode)
|
|
||||||
node.ClearCache()
|
|
||||||
|
|
||||||
// Should render again after cache clear
|
|
||||||
Render(element)
|
|
||||||
|
|
||||||
if callCount != 2 {
|
|
||||||
t.Errorf("Expected 2 renders after cache clear, got %d", callCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper function
|
|
||||||
func contains(s, substr string) bool {
|
|
||||||
return len(s) >= len(substr) && s[0:len(substr)] == substr ||
|
|
||||||
len(s) > len(substr) && contains(s[1:], substr)
|
|
||||||
}
|
|
||||||
|
|
@ -1,13 +1,13 @@
|
||||||
package h
|
package h
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRendererShouldRenderDocType(t *testing.T) {
|
func TestRendererShouldRenderDocType(t *testing.T) {
|
||||||
|
|
@ -474,112 +474,76 @@ func TestCacheByKeyT1Expired_2(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClearExpiredCached(t *testing.T) {
|
func TestClearExpiredCached(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
renderCount := 0
|
renderCount := 0
|
||||||
cachedItem := Cached(time.Millisecond*2, func() *Element {
|
cachedItem := Cached(time.Millisecond*3, func() *Element {
|
||||||
renderCount++
|
renderCount++
|
||||||
return Div(Text("hello"))
|
return Pf("hello")
|
||||||
})
|
})
|
||||||
|
|
||||||
// First render
|
|
||||||
Render(cachedItem())
|
Render(cachedItem())
|
||||||
assert.Equal(t, 1, renderCount)
|
|
||||||
|
|
||||||
// Should use cache immediately
|
|
||||||
Render(cachedItem())
|
Render(cachedItem())
|
||||||
|
node := cachedItem().meta.(*CachedNode)
|
||||||
assert.Equal(t, 1, renderCount)
|
assert.Equal(t, 1, renderCount)
|
||||||
|
assert.NotEmpty(t, node.html)
|
||||||
|
|
||||||
// Wait for expiration
|
|
||||||
time.Sleep(time.Millisecond * 3)
|
time.Sleep(time.Millisecond * 3)
|
||||||
|
node.ClearExpired()
|
||||||
|
|
||||||
// Should re-render after expiration
|
assert.Empty(t, node.html)
|
||||||
Render(cachedItem())
|
|
||||||
assert.Equal(t, 2, renderCount)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClearExpiredCacheByKey(t *testing.T) {
|
func TestClearExpiredCacheByKey(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
renderCount := 0
|
renderCount := 0
|
||||||
// Create two cached functions with different TTLs
|
cachedItem := CachedPerKeyT(time.Millisecond, func(key int) (any, GetElementFunc) {
|
||||||
shortLivedCache := CachedPerKeyT(time.Millisecond*1, func(key int) (int, GetElementFunc) {
|
|
||||||
return key, func() *Element {
|
return key, func() *Element {
|
||||||
renderCount++
|
renderCount++
|
||||||
return Div(Text("short-lived"))
|
return Pf(strconv.Itoa(key))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
longLivedCache := CachedPerKeyT(time.Hour, func(key int) (int, GetElementFunc) {
|
|
||||||
return key, func() *Element {
|
|
||||||
renderCount++
|
|
||||||
return Div(Text("long-lived"))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Render 100 short-lived items
|
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
Render(shortLivedCache(i))
|
Render(cachedItem(i))
|
||||||
}
|
}
|
||||||
assert.Equal(t, 100, renderCount)
|
|
||||||
|
|
||||||
// Render a long-lived item
|
node := cachedItem(0).meta.(*ByKeyEntry).parent.meta.(*CachedNode)
|
||||||
Render(longLivedCache(999))
|
assert.Equal(t, 100, len(node.byKeyExpiration))
|
||||||
assert.Equal(t, 101, renderCount)
|
assert.Equal(t, 100, len(node.byKeyCache))
|
||||||
|
|
||||||
// Wait for expiration of the short-lived items
|
time.Sleep(time.Millisecond * 2)
|
||||||
time.Sleep(time.Millisecond * 3)
|
|
||||||
|
|
||||||
// Re-render some expired items - should trigger new renders
|
Render(cachedItem(0))
|
||||||
for i := 0; i < 10; i++ {
|
node.ClearExpired()
|
||||||
Render(shortLivedCache(i))
|
|
||||||
}
|
|
||||||
assert.Equal(t, 111, renderCount) // 101 + 10 re-renders
|
|
||||||
|
|
||||||
// The long-lived item should still be cached
|
assert.Equal(t, 1, len(node.byKeyExpiration))
|
||||||
Render(longLivedCache(999))
|
assert.Equal(t, 1, len(node.byKeyCache))
|
||||||
assert.Equal(t, 111, renderCount) // No additional render
|
|
||||||
|
|
||||||
// Clear cache manually on both
|
node.ClearCache()
|
||||||
shortNode := shortLivedCache(0).meta.(*ByKeyEntry).parent.meta.(*CachedNode)
|
|
||||||
shortNode.ClearCache()
|
|
||||||
|
|
||||||
longNode := longLivedCache(0).meta.(*ByKeyEntry).parent.meta.(*CachedNode)
|
assert.Equal(t, 0, len(node.byKeyExpiration))
|
||||||
longNode.ClearCache()
|
assert.Equal(t, 0, len(node.byKeyCache))
|
||||||
|
|
||||||
// Everything should re-render now
|
|
||||||
Render(shortLivedCache(0))
|
|
||||||
assert.Equal(t, 112, renderCount)
|
|
||||||
|
|
||||||
Render(longLivedCache(999))
|
|
||||||
assert.Equal(t, 113, renderCount)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackgroundCleaner(t *testing.T) {
|
func TestBackgroundCleaner(t *testing.T) {
|
||||||
renderCount := 0
|
t.Parallel()
|
||||||
cachedItem := CachedPerKeyT(time.Millisecond*100, func(key int) (int, GetElementFunc) {
|
cachedItem := CachedPerKeyT(time.Second*2, func(key int) (any, GetElementFunc) {
|
||||||
return key, func() *Element {
|
return key, func() *Element {
|
||||||
renderCount++
|
return Pf(strconv.Itoa(key))
|
||||||
return Div(Text("hello"))
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// Render 100 items
|
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
Render(cachedItem(i))
|
Render(cachedItem(i))
|
||||||
}
|
}
|
||||||
assert.Equal(t, 100, renderCount)
|
|
||||||
|
|
||||||
// Items should be cached immediately
|
node := cachedItem(0).meta.(*ByKeyEntry).parent.meta.(*CachedNode)
|
||||||
for i := 0; i < 10; i++ {
|
assert.Equal(t, 100, len(node.byKeyExpiration))
|
||||||
Render(cachedItem(i))
|
assert.Equal(t, 100, len(node.byKeyCache))
|
||||||
}
|
|
||||||
assert.Equal(t, 100, renderCount) // No additional renders
|
|
||||||
|
|
||||||
// Wait for expiration and cleanup
|
|
||||||
time.Sleep(time.Second * 3)
|
time.Sleep(time.Second * 3)
|
||||||
|
|
||||||
// Items should be expired and need re-rendering
|
assert.Equal(t, 0, len(node.byKeyExpiration))
|
||||||
for i := 0; i < 10; i++ {
|
assert.Equal(t, 0, len(node.byKeyCache))
|
||||||
Render(cachedItem(i))
|
|
||||||
}
|
|
||||||
assert.Equal(t, 110, renderCount) // 10 re-renders after expiration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEscapeHtml(t *testing.T) {
|
func TestEscapeHtml(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -6,8 +6,8 @@ require (
|
||||||
github.com/alecthomas/chroma/v2 v2.14.0
|
github.com/alecthomas/chroma/v2 v2.14.0
|
||||||
github.com/go-chi/chi/v5 v5.1.0
|
github.com/go-chi/chi/v5 v5.1.0
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a
|
||||||
github.com/maddalax/htmgo/tools/html-to-htmgo v0.0.0-20250703190716-06f01b3d7c1b
|
github.com/maddalax/htmgo/tools/html-to-htmgo v0.0.0-20250106162449-4f537567ad5a
|
||||||
github.com/yuin/goldmark v1.7.4
|
github.com/yuin/goldmark v1.7.4
|
||||||
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc
|
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -19,10 +19,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b h1:m+xI+HBEQdie/Rs+mYI0HTFTMlYQSCv0l/siPDoywA4=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a h1:OXcVeJO6jg5AA/vZqmjvIKf/68Mtc9iUSduA0S5gmUo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
||||||
github.com/maddalax/htmgo/tools/html-to-htmgo v0.0.0-20250703190716-06f01b3d7c1b h1:jvfp35fig2TzBjAgw82fe8+7cvaLX9EbipZUlj8FDDY=
|
github.com/maddalax/htmgo/tools/html-to-htmgo v0.0.0-20250106162449-4f537567ad5a h1:pF7vwxObfFZAb0iIeXvgS701ZGRr7s8t8UycAMXTZ54=
|
||||||
github.com/maddalax/htmgo/tools/html-to-htmgo v0.0.0-20250703190716-06f01b3d7c1b/go.mod h1:FraJsj3NRuLBQDk83ZVa+psbNRNLe+rajVtVhYMEme4=
|
github.com/maddalax/htmgo/tools/html-to-htmgo v0.0.0-20250106162449-4f537567ad5a/go.mod h1:FraJsj3NRuLBQDk83ZVa+psbNRNLe+rajVtVhYMEme4=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ var ExcludeRootSnippet = `automatic_page_routing_ignore: ["pages/root.go"]`
|
||||||
var AbstractedRootPageUsageSnippet = `func UserPage(ctx *h.RequestContext) *h.Page {
|
var AbstractedRootPageUsageSnippet = `func UserPage(ctx *h.RequestContext) *h.Page {
|
||||||
return base.RootPage(
|
return base.RootPage(
|
||||||
h.Div(
|
h.Div(
|
||||||
h.Pf("User ID: %s", ctx.UrlParam("id")),
|
h.Pf("User ID: %s", ctx.Param("id")),
|
||||||
),
|
),
|
||||||
}`
|
}`
|
||||||
|
|
||||||
|
|
@ -88,7 +88,7 @@ func autoRegistration() *h.Element {
|
||||||
users.go -> /users
|
users.go -> /users
|
||||||
users.$id.go -> /users/:id
|
users.$id.go -> /users/:id
|
||||||
`),
|
`),
|
||||||
HelpText(`Note: id parameter can be accessed in your page with ctx.UrlParam("id")`),
|
HelpText(`Note: id parameter can be accessed in your page with ctx.Param("id")`),
|
||||||
Text(`
|
Text(`
|
||||||
You may put any functions you like in your pages file, auto registration will ONLY register functions that return *h.Page
|
You may put any functions you like in your pages file, auto registration will ONLY register functions that return *h.Page
|
||||||
`),
|
`),
|
||||||
|
|
|
||||||
|
|
@ -18,10 +18,8 @@ var PartialsSnippet = `func CurrentTimePartial(ctx *h.RequestContext) *h.Partial
|
||||||
|
|
||||||
var examplePageSnippet = `func CurrentTimePage(ctx *h.RequestContext) *h.Page {
|
var examplePageSnippet = `func CurrentTimePage(ctx *h.RequestContext) *h.Page {
|
||||||
return RootPage(
|
return RootPage(
|
||||||
h.Div(
|
|
||||||
h.GetPartial(partials.CurrentTimePartial, "load, every 1s")
|
h.GetPartial(partials.CurrentTimePartial, "load, every 1s")
|
||||||
)
|
)
|
||||||
)
|
|
||||||
}`
|
}`
|
||||||
|
|
||||||
var examplePartialSnippet = `func CurrentTimePartial(ctx *h.RequestContext) *h.Partial {
|
var examplePartialSnippet = `func CurrentTimePartial(ctx *h.RequestContext) *h.Partial {
|
||||||
|
|
|
||||||
|
|
@ -32,22 +32,10 @@ func CachingPerKey(ctx *h.RequestContext) *h.Page {
|
||||||
The arguments passed into cached component <b>DO NOT</b> affect the cache key. The only thing that affects the cache key is the key returned by the GetElementFuncWithKey function.
|
The arguments passed into cached component <b>DO NOT</b> affect the cache key. The only thing that affects the cache key is the key returned by the GetElementFuncWithKey function.
|
||||||
Ensure the declaration of the cached component is outside the function that uses it. This is to prevent the component from being redeclared on each request.
|
Ensure the declaration of the cached component is outside the function that uses it. This is to prevent the component from being redeclared on each request.
|
||||||
`),
|
`),
|
||||||
Text(`
|
|
||||||
<b>New: Custom Cache Stores with Atomic Guarantees</b><br/>
|
|
||||||
htmgo now supports pluggable cache stores with built-in concurrency protection. The framework uses an atomic
|
|
||||||
GetOrCompute method that ensures only one goroutine computes a value for any given key, preventing duplicate
|
|
||||||
expensive operations like database queries or complex renders. This eliminates race conditions that could
|
|
||||||
previously cause the same content to be rendered multiple times.
|
|
||||||
`),
|
|
||||||
Text(`
|
|
||||||
You can implement custom caching backends like Redis, Memcached, or memory-bounded stores.
|
|
||||||
This helps prevent memory exhaustion attacks and enables distributed caching.
|
|
||||||
See <a href="/docs/performance/pluggable-caches" class="text-blue-500 hover:text-blue-400">Creating Custom Cache Stores</a> for more details.
|
|
||||||
`),
|
|
||||||
NextStep(
|
NextStep(
|
||||||
"mt-4",
|
"mt-4",
|
||||||
PrevBlock("Caching Globally", DocPath("/performance/caching-globally")),
|
PrevBlock("Caching Globally", DocPath("/performance/caching-globally")),
|
||||||
NextBlock("Custom Cache Stores", DocPath("/performance/pluggable-caches")),
|
NextBlock("Pushing Data", DocPath("/pushing-data/sse")),
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -1,451 +0,0 @@
|
||||||
package performance
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/maddalax/htmgo/framework/h"
|
|
||||||
. "htmgo-site/pages/docs"
|
|
||||||
"htmgo-site/ui"
|
|
||||||
)
|
|
||||||
|
|
||||||
func PluggableCaches(ctx *h.RequestContext) *h.Page {
|
|
||||||
return DocPage(
|
|
||||||
ctx,
|
|
||||||
h.Div(
|
|
||||||
h.Class("flex flex-col gap-3"),
|
|
||||||
Title("Creating Custom Cache Stores"),
|
|
||||||
Text(`
|
|
||||||
htmgo supports pluggable cache stores, allowing you to use any caching backend or implement custom caching strategies.
|
|
||||||
This feature enables better control over memory usage, distributed caching support, and protection against memory exhaustion attacks.
|
|
||||||
`),
|
|
||||||
|
|
||||||
SubTitle("The Cache Store Interface"),
|
|
||||||
Text(`
|
|
||||||
All cache stores implement the following interface:
|
|
||||||
`),
|
|
||||||
ui.GoCodeSnippet(CacheStoreInterface),
|
|
||||||
Text(`
|
|
||||||
The interface is generic, supporting any comparable key type and any value type.
|
|
||||||
`),
|
|
||||||
Text(`
|
|
||||||
<b>Important:</b> The <code>GetOrCompute</code> method provides <b>atomic guarantees</b>.
|
|
||||||
When multiple goroutines request the same key simultaneously, only one will execute the compute function,
|
|
||||||
preventing duplicate expensive operations like database queries or complex computations.
|
|
||||||
`),
|
|
||||||
|
|
||||||
SubTitle("Technical: The Race Condition Fix"),
|
|
||||||
Text(`
|
|
||||||
The previous implementation had a time-of-check to time-of-use (TOCTOU) race condition:
|
|
||||||
`),
|
|
||||||
Text(`
|
|
||||||
With GetOrCompute, the entire check-compute-store operation happens atomically while holding
|
|
||||||
the lock, eliminating the race window completely.
|
|
||||||
`),
|
|
||||||
Text(`
|
|
||||||
The <b>Close()</b> method allows for cleanup of resources when the cache is no longer needed.
|
|
||||||
`),
|
|
||||||
|
|
||||||
SubTitle("Using Custom Cache Stores"),
|
|
||||||
Text(`
|
|
||||||
You can use custom cache stores in two ways:
|
|
||||||
`),
|
|
||||||
StepTitle("1. Per-Component Configuration"),
|
|
||||||
ui.GoCodeSnippet(PerComponentExample),
|
|
||||||
|
|
||||||
StepTitle("2. Global Default Configuration"),
|
|
||||||
ui.GoCodeSnippet(GlobalConfigExample),
|
|
||||||
|
|
||||||
SubTitle("Implementing a Custom Cache Store"),
|
|
||||||
Text(`
|
|
||||||
Here's a complete example of implementing a Redis-based cache store:
|
|
||||||
`),
|
|
||||||
ui.GoCodeSnippet(RedisCacheExample),
|
|
||||||
|
|
||||||
SubTitle("Built-in Cache Stores"),
|
|
||||||
Text(`
|
|
||||||
htmgo provides two built-in cache implementations:
|
|
||||||
`),
|
|
||||||
|
|
||||||
StepTitle("TTL Store (Default)"),
|
|
||||||
Text(`
|
|
||||||
The default cache store that maintains backward compatibility with existing htmgo applications.
|
|
||||||
It automatically removes expired entries based on TTL.
|
|
||||||
`),
|
|
||||||
ui.GoCodeSnippet(TTLStoreExample),
|
|
||||||
|
|
||||||
StepTitle("LRU Store"),
|
|
||||||
Text(`
|
|
||||||
A memory-bounded cache that evicts least recently used items when the size limit is reached.
|
|
||||||
This is useful for preventing memory exhaustion attacks.
|
|
||||||
`),
|
|
||||||
ui.GoCodeSnippet(LRUStoreExample),
|
|
||||||
|
|
||||||
SubTitle("Migration Guide"),
|
|
||||||
Text(`
|
|
||||||
<b>Good news!</b> Existing htmgo applications require <b>no changes</b> to work with the new cache system.
|
|
||||||
The default behavior remains exactly the same, with improved concurrency guarantees.
|
|
||||||
The framework uses the atomic GetOrCompute method internally, preventing race conditions
|
|
||||||
that could cause duplicate renders.
|
|
||||||
`),
|
|
||||||
Text(`
|
|
||||||
If you want to take advantage of custom cache stores:
|
|
||||||
`),
|
|
||||||
|
|
||||||
StepTitle("Before (existing code):"),
|
|
||||||
ui.GoCodeSnippet(MigrationBefore),
|
|
||||||
|
|
||||||
StepTitle("After (with custom cache):"),
|
|
||||||
ui.GoCodeSnippet(MigrationAfter),
|
|
||||||
|
|
||||||
SubTitle("Best Practices"),
|
|
||||||
Text(`
|
|
||||||
<b>1. Resource Management:</b> Always implement the Close() method if your cache uses external resources.
|
|
||||||
`),
|
|
||||||
Text(`
|
|
||||||
<b>2. Thread Safety:</b> The GetOrCompute method must be thread-safe and provide atomic guarantees.
|
|
||||||
This means when multiple goroutines call GetOrCompute with the same key simultaneously,
|
|
||||||
only one should execute the compute function.
|
|
||||||
`),
|
|
||||||
Text(`
|
|
||||||
<b>3. Memory Bounds:</b> Consider implementing size limits to prevent unbounded memory growth.
|
|
||||||
`),
|
|
||||||
Text(`
|
|
||||||
<b>4. Error Handling:</b> Cache operations should be resilient to failures and not crash the application.
|
|
||||||
`),
|
|
||||||
Text(`
|
|
||||||
<b>5. Monitoring:</b> Consider adding metrics to track cache hit rates and performance.
|
|
||||||
`),
|
|
||||||
Text(`
|
|
||||||
<b>6. Atomic Operations:</b> Always use GetOrCompute for cache retrieval to ensure proper
|
|
||||||
concurrency handling and prevent cache stampedes.
|
|
||||||
`),
|
|
||||||
|
|
||||||
SubTitle("Common Use Cases"),
|
|
||||||
|
|
||||||
StepTitle("Distributed Caching"),
|
|
||||||
Text(`
|
|
||||||
Use Redis or Memcached for sharing cache across multiple application instances:
|
|
||||||
`),
|
|
||||||
ui.GoCodeSnippet(DistributedCacheExample),
|
|
||||||
|
|
||||||
StepTitle("Memory-Bounded Caching"),
|
|
||||||
Text(`
|
|
||||||
Prevent memory exhaustion by limiting cache size:
|
|
||||||
`),
|
|
||||||
ui.GoCodeSnippet(MemoryBoundedExample),
|
|
||||||
|
|
||||||
StepTitle("Tiered Caching"),
|
|
||||||
Text(`
|
|
||||||
Implement a multi-level cache with fast local storage and slower distributed storage:
|
|
||||||
`),
|
|
||||||
ui.GoCodeSnippet(TieredCacheExample),
|
|
||||||
|
|
||||||
Text(`
|
|
||||||
<b>Security Note:</b> The pluggable cache system helps mitigate memory exhaustion attacks by allowing
|
|
||||||
you to implement bounded caches. Always consider using size-limited caches in production environments
|
|
||||||
where untrusted input could influence cache keys.
|
|
||||||
`),
|
|
||||||
Text(`
|
|
||||||
<b>Concurrency Note:</b> The GetOrCompute method eliminates race conditions that could occur
|
|
||||||
in the previous implementation. When multiple goroutines request the same uncached key via
|
|
||||||
GetOrCompute method simultaneously, only one will execute the expensive render operation,
|
|
||||||
while others wait for the result. This prevents "cache stampedes" where many goroutines
|
|
||||||
simultaneously compute the same expensive value.
|
|
||||||
`),
|
|
||||||
|
|
||||||
NextStep(
|
|
||||||
"mt-4",
|
|
||||||
PrevBlock("Caching Per Key", DocPath("/performance/caching-per-key")),
|
|
||||||
NextBlock("Server Sent Events", DocPath("/pushing-data/sse")),
|
|
||||||
),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
const CacheStoreInterface = `
|
|
||||||
type Store[K comparable, V any] interface {
|
|
||||||
// Set adds or updates an entry in the cache with the given TTL
|
|
||||||
Set(key K, value V, ttl time.Duration)
|
|
||||||
|
|
||||||
// GetOrCompute atomically gets an existing value or computes and stores a new value
|
|
||||||
// This is the primary method for cache retrieval and prevents duplicate computation
|
|
||||||
GetOrCompute(key K, compute func() V, ttl time.Duration) V
|
|
||||||
|
|
||||||
// Delete removes an entry from the cache
|
|
||||||
Delete(key K)
|
|
||||||
|
|
||||||
// Purge removes all items from the cache
|
|
||||||
Purge()
|
|
||||||
|
|
||||||
// Close releases any resources used by the cache
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
`
|
|
||||||
|
|
||||||
const PerComponentExample = `
|
|
||||||
// Create a custom cache store
|
|
||||||
lruCache := cache.NewLRUStore[string, string](10000) // Max 10k items
|
|
||||||
|
|
||||||
// Use it with a cached component
|
|
||||||
var CachedUserProfile = h.CachedPerKeyT(
|
|
||||||
15*time.Minute,
|
|
||||||
getUserProfile,
|
|
||||||
h.WithCacheStore(lruCache), // Pass the custom store
|
|
||||||
)
|
|
||||||
`
|
|
||||||
|
|
||||||
const GlobalConfigExample = `
|
|
||||||
// Set a global default cache provider
|
|
||||||
func init() {
|
|
||||||
h.DefaultCacheProvider = func() cache.Store[any, string] {
|
|
||||||
return cache.NewLRUStore[any, string](50000)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// All cached components will now use LRU caching by default
|
|
||||||
var CachedData = h.Cached(5*time.Minute, getData) // Uses LRU store
|
|
||||||
`
|
|
||||||
|
|
||||||
const RedisCacheExample = `
|
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
"github.com/redis/go-redis/v9"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RedisStore[K comparable, V any] struct {
|
|
||||||
client *redis.Client
|
|
||||||
prefix string
|
|
||||||
ttl time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRedisStore[K comparable, V any](client *redis.Client, prefix string, ttl time.Duration) *RedisStore[K, V] {
|
|
||||||
return &RedisStore[K, V]{
|
|
||||||
client: client,
|
|
||||||
prefix: prefix,
|
|
||||||
ttl: ttl,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RedisStore[K, V]) Set(key K, value V, ttl time.Duration) {
|
|
||||||
ctx := context.Background()
|
|
||||||
redisKey := fmt.Sprintf("%s:%v", r.prefix, key)
|
|
||||||
|
|
||||||
// Serialize value
|
|
||||||
data, err := json.Marshal(value)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set in Redis with TTL
|
|
||||||
r.client.Set(ctx, redisKey, data, ttl)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RedisStore[K, V]) GetOrCompute(key K, compute func() V, ttl time.Duration) V {
|
|
||||||
ctx := context.Background()
|
|
||||||
redisKey := fmt.Sprintf("%s:%v", r.prefix, key)
|
|
||||||
|
|
||||||
// Try to get from Redis first
|
|
||||||
data, err := r.client.Get(ctx, redisKey).Bytes()
|
|
||||||
if err == nil {
|
|
||||||
// Found in cache, deserialize
|
|
||||||
var value V
|
|
||||||
if err := json.Unmarshal(data, &value); err == nil {
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not in cache or error, compute new value
|
|
||||||
value := compute()
|
|
||||||
|
|
||||||
// Serialize and store
|
|
||||||
if data, err := json.Marshal(value); err == nil {
|
|
||||||
r.client.Set(ctx, redisKey, data, ttl)
|
|
||||||
}
|
|
||||||
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RedisStore[K, V]) Purge() {
|
|
||||||
ctx := context.Background()
|
|
||||||
// Delete all keys with our prefix
|
|
||||||
iter := r.client.Scan(ctx, 0, r.prefix+"*", 0).Iterator()
|
|
||||||
for iter.Next(ctx) {
|
|
||||||
r.client.Del(ctx, iter.Val())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RedisStore[K, V]) Delete(key K) {
|
|
||||||
ctx := context.Background()
|
|
||||||
redisKey := fmt.Sprintf("%s:%v", r.prefix, key)
|
|
||||||
r.client.Del(ctx, redisKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RedisStore[K, V]) Close() {
|
|
||||||
r.client.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Usage
|
|
||||||
redisClient := redis.NewClient(&redis.Options{
|
|
||||||
Addr: "localhost:6379",
|
|
||||||
})
|
|
||||||
|
|
||||||
redisCache := NewRedisStore[string, string](
|
|
||||||
redisClient,
|
|
||||||
"myapp:cache",
|
|
||||||
15*time.Minute,
|
|
||||||
)
|
|
||||||
|
|
||||||
var CachedUserData = h.CachedPerKeyT(
|
|
||||||
15*time.Minute,
|
|
||||||
getUserData,
|
|
||||||
h.WithCacheStore(redisCache),
|
|
||||||
)
|
|
||||||
`
|
|
||||||
|
|
||||||
const TTLStoreExample = `
|
|
||||||
// Create a TTL-based cache (this is the default)
|
|
||||||
ttlCache := cache.NewTTLStore[string, string]()
|
|
||||||
|
|
||||||
// Use explicitly if needed
|
|
||||||
var CachedData = h.Cached(
|
|
||||||
5*time.Minute,
|
|
||||||
getData,
|
|
||||||
h.WithCacheStore(ttlCache),
|
|
||||||
)
|
|
||||||
`
|
|
||||||
|
|
||||||
const LRUStoreExample = `
|
|
||||||
// Create an LRU cache with max 1000 items
|
|
||||||
lruCache := cache.NewLRUStore[int, UserProfile](1000)
|
|
||||||
|
|
||||||
// Use with per-key caching
|
|
||||||
var CachedUserProfile = h.CachedPerKeyT(
|
|
||||||
30*time.Minute,
|
|
||||||
func(userID int) (int, h.GetElementFunc) {
|
|
||||||
return userID, func() *h.Element {
|
|
||||||
return renderUserProfile(userID)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
h.WithCacheStore(lruCache),
|
|
||||||
)
|
|
||||||
`
|
|
||||||
|
|
||||||
const MigrationBefore = `
|
|
||||||
// Existing code - continues to work without changes
|
|
||||||
var CachedDashboard = h.Cached(10*time.Minute, func() *h.Element {
|
|
||||||
return renderDashboard()
|
|
||||||
})
|
|
||||||
|
|
||||||
var CachedUserData = h.CachedPerKeyT(15*time.Minute, func(userID string) (string, h.GetElementFunc) {
|
|
||||||
return userID, func() *h.Element {
|
|
||||||
return renderUserData(userID)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
`
|
|
||||||
|
|
||||||
const MigrationAfter = `
|
|
||||||
// Enhanced with custom cache store
|
|
||||||
memoryCache := cache.NewLRUStore[any, string](10000)
|
|
||||||
|
|
||||||
var CachedDashboard = h.Cached(10*time.Minute, func() *h.Element {
|
|
||||||
return renderDashboard()
|
|
||||||
}, h.WithCacheStore(memoryCache))
|
|
||||||
|
|
||||||
var CachedUserData = h.CachedPerKeyT(15*time.Minute, func(userID string) (string, h.GetElementFunc) {
|
|
||||||
return userID, func() *h.Element {
|
|
||||||
return renderUserData(userID)
|
|
||||||
}
|
|
||||||
}, h.WithCacheStore(memoryCache))
|
|
||||||
`
|
|
||||||
|
|
||||||
const DistributedCacheExample = `
|
|
||||||
// Initialize Redis client
|
|
||||||
redisClient := redis.NewClient(&redis.Options{
|
|
||||||
Addr: "redis-cluster:6379",
|
|
||||||
Password: os.Getenv("REDIS_PASSWORD"),
|
|
||||||
DB: 0,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Create distributed cache
|
|
||||||
distributedCache := NewRedisStore[string, string](
|
|
||||||
redisClient,
|
|
||||||
"webapp:cache",
|
|
||||||
30*time.Minute,
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set as global default
|
|
||||||
h.DefaultCacheProvider = func() cache.Store[any, string] {
|
|
||||||
return distributedCache
|
|
||||||
}
|
|
||||||
`
|
|
||||||
|
|
||||||
const MemoryBoundedExample = `
|
|
||||||
// Limit cache to 5000 items to prevent memory exhaustion
|
|
||||||
boundedCache := cache.NewLRUStore[string, string](5000)
|
|
||||||
|
|
||||||
// Use for user-generated content where keys might be unpredictable
|
|
||||||
var CachedSearchResults = h.CachedPerKeyT(
|
|
||||||
5*time.Minute,
|
|
||||||
func(query string) (string, h.GetElementFunc) {
|
|
||||||
// Normalize and validate query to prevent cache poisoning
|
|
||||||
normalized := normalizeSearchQuery(query)
|
|
||||||
return normalized, func() *h.Element {
|
|
||||||
return performSearch(normalized)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
h.WithCacheStore(boundedCache),
|
|
||||||
)
|
|
||||||
`
|
|
||||||
|
|
||||||
const TieredCacheExample = `
|
|
||||||
type TieredCache[K comparable, V any] struct {
|
|
||||||
l1 cache.Store[K, V] // Fast local cache
|
|
||||||
l2 cache.Store[K, V] // Slower distributed cache
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTieredCache[K comparable, V any](local, distributed cache.Store[K, V]) *TieredCache[K, V] {
|
|
||||||
return &TieredCache[K, V]{l1: local, l2: distributed}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TieredCache[K, V]) Get(key K) (V, bool) {
|
|
||||||
// Check L1 first
|
|
||||||
if val, ok := t.l1.Get(key); ok {
|
|
||||||
return val, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check L2
|
|
||||||
if val, ok := t.l2.Get(key); ok {
|
|
||||||
// Populate L1 for next time
|
|
||||||
t.l1.Set(key, val)
|
|
||||||
return val, true
|
|
||||||
}
|
|
||||||
|
|
||||||
var zero V
|
|
||||||
return zero, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TieredCache[K, V]) Set(key K, value V) {
|
|
||||||
t.l1.Set(key, value)
|
|
||||||
t.l2.Set(key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TieredCache[K, V]) Delete(key K) {
|
|
||||||
t.l1.Delete(key)
|
|
||||||
t.l2.Delete(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TieredCache[K, V]) Close() error {
|
|
||||||
if err := t.l1.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return t.l2.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Usage
|
|
||||||
tieredCache := NewTieredCache(
|
|
||||||
cache.NewLRUStore[string, string](1000), // L1: 1k items in memory
|
|
||||||
NewRedisStore[string, string](redis, "", 1*time.Hour), // L2: Redis
|
|
||||||
)
|
|
||||||
`
|
|
||||||
|
|
@ -38,7 +38,7 @@ func ServerSentEvents(ctx *h.RequestContext) *h.Page {
|
||||||
ui.GoCodeSnippet(SseClearInputExample),
|
ui.GoCodeSnippet(SseClearInputExample),
|
||||||
NextStep(
|
NextStep(
|
||||||
"mt-4",
|
"mt-4",
|
||||||
PrevBlock("Custom Cache Stores", DocPath("/performance/pluggable-caches")),
|
PrevBlock("Caching Per Key", DocPath("/performance/caching-per-key")),
|
||||||
NextBlock("HTMX extensions", DocPath("/htmx-extensions/overview")),
|
NextBlock("HTMX extensions", DocPath("/htmx-extensions/overview")),
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
|
|
|
||||||
|
|
@ -59,7 +59,6 @@ var sections = []Section{
|
||||||
Pages: []*Page{
|
Pages: []*Page{
|
||||||
{Title: "Caching Globally", Path: DocPath("/performance/caching-globally")},
|
{Title: "Caching Globally", Path: DocPath("/performance/caching-globally")},
|
||||||
{Title: "Caching Per Key", Path: DocPath("/performance/caching-per-key")},
|
{Title: "Caching Per Key", Path: DocPath("/performance/caching-per-key")},
|
||||||
{Title: "Custom Cache Stores", Path: DocPath("/performance/pluggable-caches")},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ module starter-template
|
||||||
|
|
||||||
go 1.23.0
|
go 1.23.0
|
||||||
|
|
||||||
require github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b
|
require github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/go-chi/chi/v5 v5.1.0 // indirect
|
github.com/go-chi/chi/v5 v5.1.0 // indirect
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,8 @@ github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
|
||||||
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b h1:m+xI+HBEQdie/Rs+mYI0HTFTMlYQSCv0l/siPDoywA4=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a h1:OXcVeJO6jg5AA/vZqmjvIKf/68Mtc9iUSduA0S5gmUo=
|
||||||
github.com/maddalax/htmgo/framework v1.0.7-0.20250703190716-06f01b3d7c1b/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
github.com/maddalax/htmgo/framework v1.0.6-0.20250106162449-4f537567ad5a/go.mod h1:NGGzWVXWksrQJ9kV9SGa/A1F1Bjsgc08cN7ZVb98RqY=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue