Testing a sharded cache. Could be useful for massively parallel applications
This commit is contained in:
parent
0f0584a805
commit
52c269d8ae
161
cache.go
161
cache.go
|
@ -1,8 +1,10 @@
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/binary"
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"hash/fnv"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
@ -277,20 +279,19 @@ func (j *janitor) Run(c *cache) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *janitor) Stop() {
|
|
||||||
j.stop <- true
|
|
||||||
}
|
|
||||||
|
|
||||||
func stopJanitor(c *Cache) {
|
func stopJanitor(c *Cache) {
|
||||||
c.janitor.Stop()
|
c.janitor.stop <- true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a new cache with a given default expiration duration and cleanup
|
func runJanitor(c *cache, ci time.Duration) {
|
||||||
// interval. If the expiration duration is less than 1, the items in the cache
|
j := &janitor{
|
||||||
// never expire (by default), and must be deleted manually. If the cleanup
|
Interval: ci,
|
||||||
// interval is less than one, expired items are not deleted from the cache
|
}
|
||||||
// before their next lookup or before calling DeleteExpired.
|
c.janitor = j
|
||||||
func New(de, ci time.Duration) *Cache {
|
go j.Run(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCache(de time.Duration) *cache {
|
||||||
if de == 0 {
|
if de == 0 {
|
||||||
de = -1
|
de = -1
|
||||||
}
|
}
|
||||||
|
@ -299,21 +300,145 @@ func New(de, ci time.Duration) *Cache {
|
||||||
Items: map[string]*Item{},
|
Items: map[string]*Item{},
|
||||||
mu: sync.Mutex{},
|
mu: sync.Mutex{},
|
||||||
}
|
}
|
||||||
if ci > 0 {
|
return c
|
||||||
j := &janitor{
|
|
||||||
Interval: ci,
|
|
||||||
}
|
|
||||||
c.janitor = j
|
|
||||||
go j.Run(c)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return a new cache with a given default expiration duration and cleanup
|
||||||
|
// interval. If the expiration duration is less than 1, the items in the cache
|
||||||
|
// never expire (by default), and must be deleted manually. If the cleanup
|
||||||
|
// interval is less than one, expired items are not deleted from the cache
|
||||||
|
// before their next lookup or before calling DeleteExpired.
|
||||||
|
func New(defaultExpiration, cleanupInterval time.Duration) *Cache {
|
||||||
|
c := newCache(defaultExpiration)
|
||||||
// This trick ensures that the janitor goroutine (which--granted it
|
// This trick ensures that the janitor goroutine (which--granted it
|
||||||
// was enabled--is running DeleteExpired on c forever) does not keep
|
// was enabled--is running DeleteExpired on c forever) does not keep
|
||||||
// the returned C object from being garbage collected. When it is
|
// the returned C object from being garbage collected. When it is
|
||||||
// garbage collected, the finalizer stops the janitor goroutine, after
|
// garbage collected, the finalizer stops the janitor goroutine, after
|
||||||
// which c can be collected.
|
// which c can be collected.
|
||||||
C := &Cache{c}
|
C := &Cache{c}
|
||||||
if ci > 0 {
|
if cleanupInterval > 0 {
|
||||||
|
runJanitor(c, cleanupInterval)
|
||||||
runtime.SetFinalizer(C, stopJanitor)
|
runtime.SetFinalizer(C, stopJanitor)
|
||||||
}
|
}
|
||||||
return C
|
return C
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ShardedCache struct {
|
||||||
|
*shardedCache
|
||||||
|
}
|
||||||
|
|
||||||
|
type shardedCache struct {
|
||||||
|
m uint32
|
||||||
|
cs []*cache
|
||||||
|
janitor *shardedJanitor
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) index(k string) uint32 {
|
||||||
|
h := fnv.New32()
|
||||||
|
h.Write([]byte(k))
|
||||||
|
n := binary.BigEndian.Uint32(h.Sum(nil))
|
||||||
|
return n % sc.m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) {
|
||||||
|
sc.cs[sc.index(k)].Set(k, x, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error {
|
||||||
|
return sc.cs[sc.index(k)].Add(k, x, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error {
|
||||||
|
return sc.cs[sc.index(k)].Replace(k, x, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Get(k string) (interface{}, bool) {
|
||||||
|
return sc.cs[sc.index(k)].Get(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Increment(k string, n int64) error {
|
||||||
|
return sc.cs[sc.index(k)].Increment(k, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) IncrementFloat(k string, n float64) error {
|
||||||
|
return sc.cs[sc.index(k)].IncrementFloat(k, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Decrement(k string, n int64) error {
|
||||||
|
return sc.cs[sc.index(k)].Decrement(k, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Delete(k string) {
|
||||||
|
sc.cs[sc.index(k)].Delete(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) DeleteExpired() {
|
||||||
|
for _, v := range sc.cs {
|
||||||
|
v.DeleteExpired()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Flush() {
|
||||||
|
for _, v := range sc.cs {
|
||||||
|
v.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type shardedJanitor struct {
|
||||||
|
Interval time.Duration
|
||||||
|
stop chan bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *shardedJanitor) Run(sc *shardedCache) {
|
||||||
|
j.stop = make(chan bool)
|
||||||
|
tick := time.Tick(j.Interval)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tick:
|
||||||
|
sc.DeleteExpired()
|
||||||
|
case <-j.stop:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func stopShardedJanitor(sc *ShardedCache) {
|
||||||
|
sc.janitor.stop <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
func runShardedJanitor(sc *shardedCache, ci time.Duration) {
|
||||||
|
j := &shardedJanitor{
|
||||||
|
Interval: ci,
|
||||||
|
}
|
||||||
|
sc.janitor = j
|
||||||
|
go j.Run(sc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newShardedCache(n int, de time.Duration) *shardedCache {
|
||||||
|
sc := &shardedCache{
|
||||||
|
m: uint32(n - 1),
|
||||||
|
cs: make([]*cache, n),
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
c := &cache{
|
||||||
|
DefaultExpiration: de,
|
||||||
|
Items: map[string]*Item{},
|
||||||
|
mu: sync.Mutex{},
|
||||||
|
}
|
||||||
|
sc.cs[i] = c
|
||||||
|
}
|
||||||
|
return sc
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSharded(shards int, defaultExpiration, cleanupInterval time.Duration) *ShardedCache {
|
||||||
|
if defaultExpiration == 0 {
|
||||||
|
defaultExpiration = -1
|
||||||
|
}
|
||||||
|
sc := newShardedCache(shards, defaultExpiration)
|
||||||
|
SC := &ShardedCache{sc}
|
||||||
|
if cleanupInterval > 0 {
|
||||||
|
runShardedJanitor(sc, cleanupInterval)
|
||||||
|
runtime.SetFinalizer(SC, stopShardedJanitor)
|
||||||
|
}
|
||||||
|
return SC
|
||||||
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -636,18 +637,22 @@ func TestSerializeUnserializable(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkCacheGet(b *testing.B) {
|
func BenchmarkCacheGet(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
tc := New(0, 0)
|
tc := New(0, 0)
|
||||||
tc.Set("foo", "bar", 0)
|
tc.Set("foo", "bar", 0)
|
||||||
|
b.StartTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
tc.Get("foo")
|
tc.Get("foo")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkMutexMapGet(b *testing.B) {
|
func BenchmarkMutexMapGet(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
m := map[string]string{
|
m := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
}
|
}
|
||||||
mu := sync.Mutex{}
|
mu := sync.Mutex{}
|
||||||
|
b.StartTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
_, _ = m["foo"]
|
_, _ = m["foo"]
|
||||||
|
@ -656,12 +661,14 @@ func BenchmarkMutexMapGet(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkCacheGetConcurrent(b *testing.B) {
|
func BenchmarkCacheGetConcurrent(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
tc := New(0, 0)
|
tc := New(0, 0)
|
||||||
tc.Set("foo", "bar", 0)
|
tc.Set("foo", "bar", 0)
|
||||||
wg := new(sync.WaitGroup)
|
wg := new(sync.WaitGroup)
|
||||||
workers := runtime.NumCPU()
|
workers := runtime.NumCPU()
|
||||||
each := b.N / workers
|
each := b.N / workers
|
||||||
wg.Add(workers)
|
wg.Add(workers)
|
||||||
|
b.StartTimer()
|
||||||
for i := 0; i < workers; i++ {
|
for i := 0; i < workers; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
for j := 0; j < each; j++ {
|
for j := 0; j < each; j++ {
|
||||||
|
@ -674,6 +681,7 @@ func BenchmarkCacheGetConcurrent(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkMutexMapGetConcurrent(b *testing.B) {
|
func BenchmarkMutexMapGetConcurrent(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
m := map[string]string{
|
m := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
}
|
}
|
||||||
|
@ -682,6 +690,7 @@ func BenchmarkMutexMapGetConcurrent(b *testing.B) {
|
||||||
workers := runtime.NumCPU()
|
workers := runtime.NumCPU()
|
||||||
each := b.N / workers
|
each := b.N / workers
|
||||||
wg.Add(workers)
|
wg.Add(workers)
|
||||||
|
b.StartTimer()
|
||||||
for i := 0; i < workers; i++ {
|
for i := 0; i < workers; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
for j := 0; j < each; j++ {
|
for j := 0; j < each; j++ {
|
||||||
|
@ -695,16 +704,72 @@ func BenchmarkMutexMapGetConcurrent(b *testing.B) {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkCacheSet(b *testing.B) {
|
func BenchmarkCacheGetManyConcurrent(b *testing.B) {
|
||||||
|
// This is the same as BenchmarkCacheGetConcurrent, but its result
|
||||||
|
// can be compared against BenchmarkShardedCacheGetManyConcurrent.
|
||||||
|
b.StopTimer()
|
||||||
|
n := 10000
|
||||||
tc := New(0, 0)
|
tc := New(0, 0)
|
||||||
|
keys := make([]string, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
k := "foo" + strconv.Itoa(n)
|
||||||
|
keys[i] = k
|
||||||
|
tc.Set(k, "bar", 0)
|
||||||
|
}
|
||||||
|
each := b.N / n
|
||||||
|
wg := new(sync.WaitGroup)
|
||||||
|
wg.Add(n)
|
||||||
|
for _, v := range keys {
|
||||||
|
go func() {
|
||||||
|
for j := 0; j < each; j++ {
|
||||||
|
tc.Get(v)
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
b.StartTimer()
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkShardedCacheGetManyConcurrent(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
|
n := 10000
|
||||||
|
tsc := NewSharded(20, 0, 0)
|
||||||
|
keys := make([]string, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
k := "foo" + strconv.Itoa(n)
|
||||||
|
keys[i] = k
|
||||||
|
tsc.Set(k, "bar", 0)
|
||||||
|
}
|
||||||
|
each := b.N / n
|
||||||
|
wg := new(sync.WaitGroup)
|
||||||
|
wg.Add(n)
|
||||||
|
for _, v := range keys {
|
||||||
|
go func() {
|
||||||
|
for j := 0; j < each; j++ {
|
||||||
|
tsc.Get(v)
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
b.StartTimer()
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCacheSet(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
|
tc := New(0, 0)
|
||||||
|
b.StartTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
tc.Set("foo", "bar", 0)
|
tc.Set("foo", "bar", 0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkMutexMapSet(b *testing.B) {
|
func BenchmarkMutexMapSet(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
m := map[string]string{}
|
m := map[string]string{}
|
||||||
mu := sync.Mutex{}
|
mu := sync.Mutex{}
|
||||||
|
b.StartTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
m["foo"] = "bar"
|
m["foo"] = "bar"
|
||||||
|
@ -713,7 +778,9 @@ func BenchmarkMutexMapSet(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkCacheSetDelete(b *testing.B) {
|
func BenchmarkCacheSetDelete(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
tc := New(0, 0)
|
tc := New(0, 0)
|
||||||
|
b.StartTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
tc.Set("foo", "bar", 0)
|
tc.Set("foo", "bar", 0)
|
||||||
tc.Delete("foo")
|
tc.Delete("foo")
|
||||||
|
@ -721,8 +788,10 @@ func BenchmarkCacheSetDelete(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkMutexMapSetDelete(b *testing.B) {
|
func BenchmarkMutexMapSetDelete(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
m := map[string]string{}
|
m := map[string]string{}
|
||||||
mu := sync.Mutex{}
|
mu := sync.Mutex{}
|
||||||
|
b.StartTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
m["foo"] = "bar"
|
m["foo"] = "bar"
|
||||||
|
@ -734,7 +803,9 @@ func BenchmarkMutexMapSetDelete(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkCacheSetDeleteSingleLock(b *testing.B) {
|
func BenchmarkCacheSetDeleteSingleLock(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
tc := New(0, 0)
|
tc := New(0, 0)
|
||||||
|
b.StartTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
tc.mu.Lock()
|
tc.mu.Lock()
|
||||||
tc.set("foo", "bar", 0)
|
tc.set("foo", "bar", 0)
|
||||||
|
@ -744,8 +815,10 @@ func BenchmarkCacheSetDeleteSingleLock(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkMutexMapSetDeleteSingleLock(b *testing.B) {
|
func BenchmarkMutexMapSetDeleteSingleLock(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
m := map[string]string{}
|
m := map[string]string{}
|
||||||
mu := sync.Mutex{}
|
mu := sync.Mutex{}
|
||||||
|
b.StartTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
m["foo"] = "bar"
|
m["foo"] = "bar"
|
||||||
|
|
Loading…
Reference in New Issue