fetched recent changes

This commit is contained in:
Giuseppe 2015-12-01 19:57:19 +01:00
commit c562196a80
4 changed files with 125 additions and 43 deletions

View File

@ -103,4 +103,4 @@ one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats
### Reference
`godoc` or [http://godoc.org/github.com/pmylund/go-cache](http://godoc.org/github.com/pmylund/go-cache)
`godoc` or [http://godoc.org/github.com/patrickmn/go-cache](http://godoc.org/github.com/patrickmn/go-cache)

View File

@ -11,24 +11,25 @@ import (
"time"
)
var emptyTime = time.Time{}
type Item struct {
Object interface{}
Expiration time.Time
Expiration int64
Key string
}
func (item Item) Less(than llrb.Item) bool {
return item.Expiration.Before(than.(Item).Expiration)
//return item.Expiration.Before(than.(Item).Expiration)
return item.Expiration < than.(Item).Expiration
}
// Returns true if the item has expired.
func (item Item) Expired() bool {
if item.Expiration == emptyTime {
if item.Expiration == 0 {
return false
}
return item.Expiration.Before(time.Now())
return time.Now().UnixNano() > item.Expiration
}
const (
@ -74,19 +75,18 @@ func (c *cache) set(k string, x interface{}, d time.Duration) {
d = c.defaultExpiration
}
if d > 0 {
item.Expiration = time.Now().Add(d)
item.Expiration = time.Now().Add(d).UnixNano()
//if an item with the same key exists in the cache, remove it from the bst
old, found := c.items[k]
if found {
c.sortedItems.Delete(old)
c.sortedItems.InsertNoReplace(item)
}
} else {
item.Expiration = emptyTime
}
c.items[k] = item
}
// Add an item to the cache only if an item doesn't already exist for the given
// key, or if the existing item has expired. Returns an error otherwise.
func (c *cache) Add(k string, x interface{}, d time.Duration) error {
@ -119,16 +119,34 @@ func (c *cache) Replace(k string, x interface{}, d time.Duration) error {
// whether the key was found.
func (c *cache) Get(k string) (interface{}, bool) {
c.mu.RLock()
x, found := c.get(k)
// "Inlining" of get and Expired
item, found := c.items[k]
if !found {
c.mu.RUnlock()
return nil, false
}
if item.Expiration > 0 {
if time.Now().UnixNano() > item.Expiration {
c.mu.RUnlock()
return nil, false
}
}
c.mu.RUnlock()
return x, found
return item.Object, true
}
func (c *cache) get(k string) (interface{}, bool) {
item, found := c.items[k]
if !found || item.Expired() {
if !found {
return nil, false
}
// "Inlining" of Expired
if item.Expiration > 0 {
if time.Now().UnixNano() > item.Expiration {
c.mu.RUnlock()
return nil, false
}
}
return item.Object, true
}
@ -144,7 +162,7 @@ func (c *cache) Increment(k string, n int64) error {
c.mu.Unlock()
return fmt.Errorf("Item %s not found", k)
}
if v.Expiration != emptyTime {
if v.Expiration != 0 {
c.sortedItems.Delete(v)
}
switch v.Object.(type) {
@ -179,7 +197,7 @@ func (c *cache) Increment(k string, n int64) error {
return fmt.Errorf("The value for %s is not an integer", k)
}
c.items[k] = v
if v.Expiration != emptyTime {
if v.Expiration != 0 {
c.sortedItems.InsertNoReplace(v)
}
c.mu.Unlock()
@ -885,7 +903,7 @@ func (c *cache) delete(k string) (interface{}, bool) {
func (c *cache) DeleteExpired() {
var evictedItems []Item
c.mu.Lock()
c.sortedItems.DescendLessOrEqual(Item{Expiration: time.Now()}, func(i llrb.Item) bool {
c.sortedItems.DescendLessOrEqual(Item{Expiration: time.Now().UnixNano()}, func(i llrb.Item) bool {
v := i.(Item)
c.delete(v.Key)
evictedItems = append(evictedItems, v)
@ -902,13 +920,15 @@ func (c *cache) DeleteExpired() {
}
}
// Sets an (optional) function that is called with the key and value when an
// item is evicted from the cache. (Including when it is deleted manually, but
// not when it is overwritten.) Set to nil to disable.
func (c *cache) OnEvicted(f func(string, interface{})) {
c.mu.Lock()
defer c.mu.Unlock()
c.onEvicted = f
c.mu.Unlock()
}
// Write the cache's items (using Gob) to an io.Writer.
@ -965,7 +985,12 @@ func (c *cache) Load(r io.Reader) error {
ov, found := c.items[k]
if !found || ov.Expired() {
c.items[k] = v
if found {
c.sortedItems.Delete(ov)
}
c.sortedItems.InsertNoReplace(v)
}
}
}
return err
@ -1013,6 +1038,7 @@ func (c *cache) ItemCount() int {
func (c *cache) Flush() {
c.mu.Lock()
c.items = map[string]Item{}
c.sortedItems = llrb.New()
c.mu.Unlock()
}
@ -1055,16 +1081,16 @@ func newCache(de time.Duration, m map[string]Item) *cache {
defaultExpiration: de,
items: m,
}
return c
}
func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item) *Cache {
c := newCache(de, m)
c.sortedItems = llrb.New()
//we can probably do bulk insertion here to speed it up
for _, item := range m {
c.sortedItems.InsertNoReplace(item)
}
return c
}
func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item) *Cache {
c := newCache(de, m)
// This trick ensures that the janitor goroutine (which--granted it
// was enabled--is running DeleteExpired on c forever) does not keep
// the returned C object from being garbage collected. When it is

View File

@ -110,11 +110,11 @@ func TestNewFrom(t *testing.T) {
m := map[string]Item{
"a": Item{
Object: 1,
Expiration: emptyTime,
Expiration: 0,
},
"b": Item{
Object: 2,
Expiration: emptyTime,
Expiration: 0,
},
}
tc := NewFrom(DefaultExpiration, 0, m)
@ -1425,9 +1425,17 @@ func TestSerializeUnserializable(t *testing.T) {
}
}
func BenchmarkCacheGet(b *testing.B) {
func BenchmarkCacheGetExpiring(b *testing.B) {
benchmarkCacheGet(b, 5*time.Minute)
}
func BenchmarkCacheGetNotExpiring(b *testing.B) {
benchmarkCacheGet(b, NoExpiration)
}
func benchmarkCacheGet(b *testing.B, exp time.Duration) {
b.StopTimer()
tc := New(DefaultExpiration, 0)
tc := New(exp, 0)
tc.Set("foo", "bar", DefaultExpiration)
b.StartTimer()
for i := 0; i < b.N; i++ {
@ -1449,9 +1457,17 @@ func BenchmarkRWMutexMapGet(b *testing.B) {
}
}
func BenchmarkCacheGetConcurrent(b *testing.B) {
func BenchmarkCacheGetConcurrentExpiring(b *testing.B) {
benchmarkCacheGetConcurrent(b, 5*time.Minute)
}
func BenchmarkCacheGetConcurrentNotExpiring(b *testing.B) {
benchmarkCacheGetConcurrent(b, NoExpiration)
}
func benchmarkCacheGetConcurrent(b *testing.B, exp time.Duration) {
b.StopTimer()
tc := New(DefaultExpiration, 0)
tc := New(exp, 0)
tc.Set("foo", "bar", DefaultExpiration)
wg := new(sync.WaitGroup)
workers := runtime.NumCPU()
@ -1493,13 +1509,21 @@ func BenchmarkRWMutexMapGetConcurrent(b *testing.B) {
wg.Wait()
}
func BenchmarkCacheGetManyConcurrent(b *testing.B) {
func BenchmarkCacheGetManyConcurrentExpiring(b *testing.B) {
benchmarkCacheGetManyConcurrent(b, 5*time.Minute)
}
func BenchmarkCacheGetManyConcurrentNotExpiring(b *testing.B) {
benchmarkCacheGetManyConcurrent(b, NoExpiration)
}
func benchmarkCacheGetManyConcurrent(b *testing.B, exp time.Duration) {
// This is the same as BenchmarkCacheGetConcurrent, but its result
// can be compared against BenchmarkShardedCacheGetManyConcurrent
// in sharded_test.go.
b.StopTimer()
n := 10000
tc := New(DefaultExpiration, 0)
tc := New(exp, 0)
keys := make([]string, n)
for i := 0; i < n; i++ {
k := "foo" + strconv.Itoa(n)
@ -1521,9 +1545,17 @@ func BenchmarkCacheGetManyConcurrent(b *testing.B) {
wg.Wait()
}
func BenchmarkCacheSet(b *testing.B) {
func BenchmarkCacheSetExpiring(b *testing.B) {
benchmarkCacheSet(b, 5*time.Minute)
}
func BenchmarkCacheSetNotExpiring(b *testing.B) {
benchmarkCacheSet(b, NoExpiration)
}
func benchmarkCacheSet(b *testing.B, exp time.Duration) {
b.StopTimer()
tc := New(DefaultExpiration, 0)
tc := New(exp, 0)
b.StartTimer()
for i := 0; i < b.N; i++ {
tc.Set("foo", "bar", DefaultExpiration)
@ -1602,9 +1634,9 @@ func BenchmarkIncrementInt(b *testing.B) {
}
}
func BenchmarkDeleteExpired(b *testing.B) {
func BenchmarkDeleteExpiredLoop(b *testing.B) {
b.StopTimer()
tc := New(5 * time.Minute, 0)
tc := New(5*time.Minute, 0)
tc.mu.Lock()
for i := 0; i < 100000; i++ {
tc.set(strconv.Itoa(i), "bar", DefaultExpiration)
@ -1618,13 +1650,20 @@ func BenchmarkDeleteExpired(b *testing.B) {
func BenchmarkLargeCache(b *testing.B) {
b.StopTimer()
tc := New(100 * time.Millisecond, 1 * time.Millisecond)
//tc.mu.Lock()
tc := New(time.Second, 10*time.Millisecond)
end := time.Now().Add(tc.defaultExpiration)
var i int
for time.Now().Before(end) {
tc.Set(strconv.Itoa(i), "bar", DefaultExpiration)
i++
}
/*
for i := 0; i < 1000000; i++ {
tc.Set(strconv.Itoa(i), "bar", DefaultExpiration)
}
//tc.mu.Unlock()
*/
tc.DeleteExpired()
b.Logf("Cache size: %d", tc.ItemCount())
b.StartTimer()
for i := 1000000; i <1000000 + b.N; i++ {
tc.Set(strconv.Itoa(i), "bar", DefaultExpiration)

View File

@ -4,6 +4,7 @@ import (
"strconv"
"sync"
"testing"
"time"
)
// func TestDjb33(t *testing.T) {
@ -32,9 +33,17 @@ func TestShardedCache(t *testing.T) {
}
}
func BenchmarkShardedCacheGet(b *testing.B) {
func BenchmarkShardedCacheGetExpiring(b *testing.B) {
benchmarkShardedCacheGet(b, 5*time.Minute)
}
func BenchmarkShardedCacheGetNotExpiring(b *testing.B) {
benchmarkShardedCacheGet(b, NoExpiration)
}
func benchmarkShardedCacheGet(b *testing.B, exp time.Duration) {
b.StopTimer()
tc := unexportedNewSharded(DefaultExpiration, 0, 10)
tc := unexportedNewSharded(exp, 0, 10)
tc.Set("foobarba", "zquux", DefaultExpiration)
b.StartTimer()
for i := 0; i < b.N; i++ {
@ -42,10 +51,18 @@ func BenchmarkShardedCacheGet(b *testing.B) {
}
}
func BenchmarkShardedCacheGetManyConcurrent(b *testing.B) {
func BenchmarkShardedCacheGetManyConcurrentExpiring(b *testing.B) {
benchmarkShardedCacheGetManyConcurrent(b, 5*time.Minute)
}
func BenchmarkShardedCacheGetManyConcurrentNotExpiring(b *testing.B) {
benchmarkShardedCacheGetManyConcurrent(b, NoExpiration)
}
func benchmarkShardedCacheGetManyConcurrent(b *testing.B, exp time.Duration) {
b.StopTimer()
n := 10000
tsc := unexportedNewSharded(DefaultExpiration, 0, 20)
tsc := unexportedNewSharded(exp, 0, 20)
keys := make([]string, n)
for i := 0; i < n; i++ {
k := "foo" + strconv.Itoa(n)