This commit is contained in:
Stephen McQuay 2017-07-22 04:01:46 +00:00 committed by GitHub
commit 9d1b838e0e
5 changed files with 517 additions and 78 deletions

View File

@ -5,5 +5,7 @@ code was contributed.)
Dustin Sallings <dustin@spy.net>
Jason Mooberry <jasonmoo@me.com>
Matthew Keller <m@cognusion.com>
Sergey Shepelev <temotor@gmail.com>
Alex Edwards <ajmedwards@gmail.com>
Stephen McQuay <stephen@mcquay.me>

View File

@ -13,6 +13,15 @@ cache can be saved to and loaded from a file (using `c.Items()` to retrieve the
items map to serialize, and `NewFrom()` to create a cache from a deserialized
one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.)
When creating a cache object using `NewWithLRU()`, if you set the maxItems value
above 0, the LRU functionality is enabled. The cache automatically updates a
timestamp every time a given item is retrieved. In the background, the janitor takes
care of deleting items that have expired because of staleness, or are
least-recently-used when the cache is under pressure. Whatever you set your purge
interval to controls when the item will actually be removed from the cache. If you
don't want to use the janitor, and wish to manually purge LRU items, then
`c.DeleteLRU(n)` where `n` is the number of items you'd like to purge.
### Installation
`go get github.com/patrickmn/go-cache`

443
cache.go
View File

@ -1,3 +1,4 @@
// Package cache implements an in-memory key:value store/cache (similar to Memcached) library for Go, suitable for single-machine applications.
package cache
import (
@ -10,12 +11,16 @@ import (
"time"
)
// Item represents an item stored in the cache.
//
// Item stores information relevant to expiring it from the cache.
type Item struct {
Object interface{}
Expiration int64
Accessed int64
}
// Returns true if the item has expired.
// Expired returns true if the item has expired.
func (item Item) Expired() bool {
if item.Expiration == 0 {
return false
@ -23,15 +28,21 @@ func (item Item) Expired() bool {
return time.Now().UnixNano() > item.Expiration
}
// LastAccessed returns the time at which this item was last accessed.
func (item Item) LastAccessed() time.Time {
return time.Unix(0, item.Accessed)
}
const (
// For use with functions that take an expiration time.
// NoExpiration is for use with functions that take an expiration time.
NoExpiration time.Duration = -1
// For use with functions that take an expiration time. Equivalent to
// passing in the same expiration duration as was given to New() or
// NewFrom() when the cache was created (e.g. 5 minutes.)
// DefaultExpiration is for use with functions that take an expiration
// time. Equivalent to passing in the same expiration duration as was given
// to New() or NewFrom() when the cache was created (e.g. 5 minutes.)
DefaultExpiration time.Duration = 0
)
// Cache implements the in-memory key:value cache.
type Cache struct {
*cache
// If this is confusing, see the comment at the bottom of New()
@ -43,6 +54,7 @@ type cache struct {
mu sync.RWMutex
onEvicted func(string, interface{})
janitor *janitor
maxItems int
}
// Add an item to the cache, replacing any existing item. If the duration is 0
@ -50,34 +62,68 @@ type cache struct {
// (NoExpiration), the item never expires.
func (c *cache) Set(k string, x interface{}, d time.Duration) {
// "Inlining" of set
var e int64
var (
now time.Time
e int64
)
if d == DefaultExpiration {
d = c.defaultExpiration
}
if d > 0 {
e = time.Now().Add(d).UnixNano()
now = time.Now()
e = now.Add(d).UnixNano()
}
c.mu.Lock()
c.items[k] = Item{
Object: x,
Expiration: e,
if c.maxItems > 0 {
if d <= 0 {
// d <= 0 means we didn't set now above
now = time.Now()
}
c.mu.Lock()
c.items[k] = Item{
Object: x,
Expiration: e,
Accessed: now.UnixNano(),
}
// TODO: Calls to mu.Unlock are currently not deferred because
// defer adds ~200 ns (as of go1.)
c.mu.Unlock()
} else {
c.mu.Lock()
c.items[k] = Item{
Object: x,
Expiration: e,
}
c.mu.Unlock()
}
// TODO: Calls to mu.Unlock are currently not deferred because defer
// adds ~200 ns (as of go1.)
c.mu.Unlock()
}
func (c *cache) set(k string, x interface{}, d time.Duration) {
var e int64
var (
now time.Time
e int64
)
if d == DefaultExpiration {
d = c.defaultExpiration
}
if d > 0 {
e = time.Now().Add(d).UnixNano()
now = time.Now()
e = now.Add(d).UnixNano()
}
c.items[k] = Item{
Object: x,
Expiration: e,
if c.maxItems > 0 {
if d <= 0 {
// d <= 0 means we didn't set now above
now = time.Now()
}
c.items[k] = Item{
Object: x,
Expiration: e,
Accessed: now.UnixNano(),
}
} else {
c.items[k] = Item{
Object: x,
Expiration: e,
}
}
}
@ -118,20 +164,70 @@ func (c *cache) Replace(k string, x interface{}, d time.Duration) error {
// Get an item from the cache. Returns the item or nil, and a bool indicating
// whether the key was found.
func (c *cache) Get(k string) (interface{}, bool) {
c.mu.RLock()
if c.maxItems > 0 {
// LRU enabled; Get implies write
c.mu.Lock()
} else {
// LRU not enabled; Get is read-only
c.mu.RLock()
}
// "Inlining" of get and Expired
item, found := c.items[k]
if !found {
c.mu.RUnlock()
if c.maxItems > 0 {
c.mu.Unlock()
} else {
c.mu.RUnlock()
}
return nil, false
}
var now int64
if item.Expiration > 0 {
if time.Now().UnixNano() > item.Expiration {
c.mu.RUnlock()
now = time.Now().UnixNano()
if now > item.Expiration {
if c.maxItems > 0 {
c.mu.Unlock()
} else {
c.mu.RUnlock()
}
return nil, false
}
}
c.mu.RUnlock()
if c.maxItems > 0 {
if now == 0 {
now = time.Now().UnixNano()
}
item.Accessed = now
c.items[k] = item
c.mu.Unlock()
} else {
c.mu.RUnlock()
}
return item.Object, true
}
// If LRU functionality is being used (and get implies updating item.Accessed,)
// this function must be write-locked.
func (c *cache) get(k string) (interface{}, bool) {
item, found := c.items[k]
if !found {
return nil, false
}
// "Inlining" of Expired
var now int64
if item.Expiration > 0 {
now = time.Now().UnixNano()
if now > item.Expiration {
return nil, false
}
}
if c.maxItems > 0 {
if now == 0 {
now = time.Now().UnixNano()
}
item.Accessed = now
c.items[k] = item
}
return item.Object, true
}
@ -140,45 +236,61 @@ func (c *cache) Get(k string) (interface{}, bool) {
// never expires a zero value for time.Time is returned), and a bool indicating
// whether the key was found.
func (c *cache) GetWithExpiration(k string) (interface{}, time.Time, bool) {
c.mu.RLock()
if c.maxItems > 0 {
// LRU enabled; GetWithExpiration implies write
c.mu.Lock()
} else {
// LRU not enabled; GetWithExpiration is read-only
c.mu.RLock()
}
// "Inlining" of get and Expired
item, found := c.items[k]
if !found {
c.mu.RUnlock()
if c.maxItems > 0 {
c.mu.Unlock()
} else {
c.mu.RUnlock()
}
return nil, time.Time{}, false
}
var now int64
if item.Expiration > 0 {
if time.Now().UnixNano() > item.Expiration {
c.mu.RUnlock()
now = time.Now().UnixNano()
if now > item.Expiration {
if c.maxItems > 0 {
c.mu.Unlock()
} else {
c.mu.RUnlock()
}
return nil, time.Time{}, false
}
// Return the item and the expiration time
c.mu.RUnlock()
if c.maxItems > 0 {
if now == 0 {
now = time.Now().UnixNano()
}
item.Accessed = now
c.items[k] = item
c.mu.Unlock()
} else {
c.mu.RUnlock()
}
return item.Object, time.Unix(0, item.Expiration), true
}
if c.maxItems > 0 {
if now == 0 {
now = time.Now().UnixNano()
}
item.Accessed = now
c.items[k] = item
c.mu.Unlock()
} else {
c.mu.RUnlock()
}
// If expiration <= 0 (i.e. no expiration time set) then return the item
// and a zeroed time.Time
c.mu.RUnlock()
return item.Object, time.Time{}, true
}
func (c *cache) get(k string) (interface{}, bool) {
item, found := c.items[k]
if !found {
return nil, false
}
// "Inlining" of Expired
if item.Expiration > 0 {
if time.Now().UnixNano() > item.Expiration {
return nil, false
}
}
return item.Object, true
}
// Increment an item of type int, int8, int16, int32, int64, uintptr, uint,
// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the
// item's value is not an integer, if it was not found, or if it is not
@ -191,6 +303,9 @@ func (c *cache) Increment(k string, n int64) error {
c.mu.Unlock()
return fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
switch v.Object.(type) {
case int:
v.Object = v.Object.(int) + int(n)
@ -239,6 +354,9 @@ func (c *cache) IncrementFloat(k string, n float64) error {
c.mu.Unlock()
return fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
switch v.Object.(type) {
case float32:
v.Object = v.Object.(float32) + float32(n)
@ -263,6 +381,9 @@ func (c *cache) IncrementInt(k string, n int) (int, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(int)
if !ok {
c.mu.Unlock()
@ -285,6 +406,9 @@ func (c *cache) IncrementInt8(k string, n int8) (int8, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(int8)
if !ok {
c.mu.Unlock()
@ -307,6 +431,9 @@ func (c *cache) IncrementInt16(k string, n int16) (int16, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(int16)
if !ok {
c.mu.Unlock()
@ -329,6 +456,9 @@ func (c *cache) IncrementInt32(k string, n int32) (int32, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(int32)
if !ok {
c.mu.Unlock()
@ -351,6 +481,9 @@ func (c *cache) IncrementInt64(k string, n int64) (int64, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(int64)
if !ok {
c.mu.Unlock()
@ -373,6 +506,9 @@ func (c *cache) IncrementUint(k string, n uint) (uint, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(uint)
if !ok {
c.mu.Unlock()
@ -395,6 +531,9 @@ func (c *cache) IncrementUintptr(k string, n uintptr) (uintptr, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(uintptr)
if !ok {
c.mu.Unlock()
@ -417,6 +556,9 @@ func (c *cache) IncrementUint8(k string, n uint8) (uint8, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(uint8)
if !ok {
c.mu.Unlock()
@ -439,6 +581,9 @@ func (c *cache) IncrementUint16(k string, n uint16) (uint16, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(uint16)
if !ok {
c.mu.Unlock()
@ -461,6 +606,9 @@ func (c *cache) IncrementUint32(k string, n uint32) (uint32, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(uint32)
if !ok {
c.mu.Unlock()
@ -483,6 +631,9 @@ func (c *cache) IncrementUint64(k string, n uint64) (uint64, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(uint64)
if !ok {
c.mu.Unlock()
@ -505,6 +656,9 @@ func (c *cache) IncrementFloat32(k string, n float32) (float32, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(float32)
if !ok {
c.mu.Unlock()
@ -527,6 +681,9 @@ func (c *cache) IncrementFloat64(k string, n float64) (float64, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(float64)
if !ok {
c.mu.Unlock()
@ -553,6 +710,9 @@ func (c *cache) Decrement(k string, n int64) error {
c.mu.Unlock()
return fmt.Errorf("Item not found")
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
switch v.Object.(type) {
case int:
v.Object = v.Object.(int) - int(n)
@ -601,6 +761,9 @@ func (c *cache) DecrementFloat(k string, n float64) error {
c.mu.Unlock()
return fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
switch v.Object.(type) {
case float32:
v.Object = v.Object.(float32) - float32(n)
@ -625,6 +788,9 @@ func (c *cache) DecrementInt(k string, n int) (int, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(int)
if !ok {
c.mu.Unlock()
@ -647,6 +813,9 @@ func (c *cache) DecrementInt8(k string, n int8) (int8, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(int8)
if !ok {
c.mu.Unlock()
@ -669,6 +838,9 @@ func (c *cache) DecrementInt16(k string, n int16) (int16, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(int16)
if !ok {
c.mu.Unlock()
@ -691,6 +863,9 @@ func (c *cache) DecrementInt32(k string, n int32) (int32, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(int32)
if !ok {
c.mu.Unlock()
@ -713,6 +888,9 @@ func (c *cache) DecrementInt64(k string, n int64) (int64, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(int64)
if !ok {
c.mu.Unlock()
@ -735,6 +913,9 @@ func (c *cache) DecrementUint(k string, n uint) (uint, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(uint)
if !ok {
c.mu.Unlock()
@ -757,6 +938,9 @@ func (c *cache) DecrementUintptr(k string, n uintptr) (uintptr, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(uintptr)
if !ok {
c.mu.Unlock()
@ -779,6 +963,9 @@ func (c *cache) DecrementUint8(k string, n uint8) (uint8, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(uint8)
if !ok {
c.mu.Unlock()
@ -801,6 +988,9 @@ func (c *cache) DecrementUint16(k string, n uint16) (uint16, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(uint16)
if !ok {
c.mu.Unlock()
@ -823,6 +1013,9 @@ func (c *cache) DecrementUint32(k string, n uint32) (uint32, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(uint32)
if !ok {
c.mu.Unlock()
@ -845,6 +1038,9 @@ func (c *cache) DecrementUint64(k string, n uint64) (uint64, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(uint64)
if !ok {
c.mu.Unlock()
@ -867,6 +1063,9 @@ func (c *cache) DecrementFloat32(k string, n float32) (float32, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(float32)
if !ok {
c.mu.Unlock()
@ -889,6 +1088,9 @@ func (c *cache) DecrementFloat64(k string, n float64) (float64, error) {
c.mu.Unlock()
return 0, fmt.Errorf("Item %s not found", k)
}
if c.maxItems > 0 {
v.Accessed = time.Now().UnixNano()
}
rv, ok := v.Object.(float64)
if !ok {
c.mu.Unlock()
@ -905,9 +1107,10 @@ func (c *cache) DecrementFloat64(k string, n float64) (float64, error) {
func (c *cache) Delete(k string) {
c.mu.Lock()
v, evicted := c.delete(k)
evictFunc := c.onEvicted
c.mu.Unlock()
if evicted {
c.onEvicted(k, v)
evictFunc(k, v)
}
}
@ -932,8 +1135,9 @@ func (c *cache) DeleteExpired() {
var evictedItems []keyAndValue
now := time.Now().UnixNano()
c.mu.Lock()
evictFunc := c.onEvicted
for k, v := range c.items {
// "Inlining" of expired
// "Inlining" of Expired
if v.Expiration > 0 && now > v.Expiration {
ov, evicted := c.delete(k)
if evicted {
@ -943,7 +1147,7 @@ func (c *cache) DeleteExpired() {
}
c.mu.Unlock()
for _, v := range evictedItems {
c.onEvicted(v.key, v.value)
evictFunc(v.key, v.value)
}
}
@ -956,6 +1160,80 @@ func (c *cache) OnEvicted(f func(string, interface{})) {
c.mu.Unlock()
}
// Delete some of the oldest items in the cache if the soft size limit has been
// exceeded.
func (c *cache) DeleteLRU() {
c.mu.Lock()
var (
overCount = c.itemCount() - c.maxItems
evictFunc = c.onEvicted
)
evicted := c.deleteLRUAmount(overCount)
c.mu.Unlock()
for _, v := range evicted {
evictFunc(v.key, v.value)
}
}
// Delete a number of the oldest items from the cache.
func (c *cache) DeleteLRUAmount(numItems int) {
c.mu.Lock()
evictFunc := c.onEvicted
evicted := c.deleteLRUAmount(numItems)
c.mu.Unlock()
for _, v := range evicted {
evictFunc(v.key, v.value)
}
}
func (c *cache) deleteLRUAmount(numItems int) []keyAndValue {
if numItems <= 0 {
return nil
}
var (
lastTime int64
lastItems = make([]string, numItems) // Ring buffer
liCount = 0
full = false
evictedItems []keyAndValue
now = time.Now().UnixNano()
)
if c.onEvicted != nil {
evictedItems = make([]keyAndValue, 0, numItems)
}
for k, v := range c.items {
// "Inlining" of !Expired
if v.Expiration == 0 || now <= v.Expiration {
if full == false || v.Accessed < lastTime {
// We found a least-recently-used item, or our
// purge buffer isn't full yet
lastTime = v.Accessed
// Append it to the buffer, or start overwriting
// it
if liCount < numItems {
lastItems[liCount] = k
liCount++
} else {
lastItems[0] = k
liCount = 1
full = true
}
}
}
}
if lastTime > 0 {
for _, v := range lastItems {
if v != "" {
ov, evicted := c.delete(v)
if evicted {
evictedItems = append(evictedItems, keyAndValue{v, ov})
}
}
}
}
return evictedItems
}
// Write the cache's items (using Gob) to an io.Writer.
//
// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the
@ -1061,6 +1339,14 @@ func (c *cache) ItemCount() int {
return n
}
// Returns the number of items in the cache without locking. This may include
// items that have expired, but have not yet been cleaned up. Equivalent to
// len(c.Items()).
func (c *cache) itemCount() int {
n := len(c.items)
return n
}
// Delete all items from the cache.
func (c *cache) Flush() {
c.mu.Lock()
@ -1079,6 +1365,9 @@ func (j *janitor) Run(c *cache) {
select {
case <-ticker.C:
c.DeleteExpired()
if c.maxItems > 0 {
c.DeleteLRU()
}
case <-j.stop:
ticker.Stop()
return
@ -1099,19 +1388,20 @@ func runJanitor(c *cache, ci time.Duration) {
go j.Run(c)
}
func newCache(de time.Duration, m map[string]Item) *cache {
func newCache(de time.Duration, m map[string]Item, mi int) *cache {
if de == 0 {
de = -1
}
c := &cache{
defaultExpiration: de,
maxItems: mi,
items: m,
}
return c
}
func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item) *Cache {
c := newCache(de, m)
func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item, mi int) *Cache {
c := newCache(de, m, mi)
// This trick ensures that the janitor goroutine (which--granted it
// was enabled--is running DeleteExpired on c forever) does not keep
// the returned C object from being garbage collected. When it is
@ -1125,21 +1415,36 @@ func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item)
return C
}
// Return a new cache with a given default expiration duration and cleanup
// interval. If the expiration duration is less than one (or NoExpiration),
// the items in the cache never expire (by default), and must be deleted
// manually. If the cleanup interval is less than one, expired items are not
// deleted from the cache before calling c.DeleteExpired().
// New returns a new cache with a given default expiration duration and cleanup
// interval. If the expiration duration is less than one (or NoExpiration), the
// items in the cache never expire (by default), and must be deleted manually.
// If the cleanup interval is less than one, expired items are not deleted from
// the cache before calling c.DeleteExpired().
func New(defaultExpiration, cleanupInterval time.Duration) *Cache {
items := make(map[string]Item)
return newCacheWithJanitor(defaultExpiration, cleanupInterval, items)
return newCacheWithJanitor(defaultExpiration, cleanupInterval, items, 0)
}
// Return a new cache with a given default expiration duration and cleanup
// interval. If the expiration duration is less than one (or NoExpiration),
// the items in the cache never expire (by default), and must be deleted
// manually. If the cleanup interval is less than one, expired items are not
// deleted from the cache before calling c.DeleteExpired().
// NewWithLRU returns a new cache with a given default expiration duration,
// cleanup interval, and maximum-ish number of items. If the expiration
// duration is less than one (or NoExpiration), the items in the cache never
// expire (by default), and must be deleted manually. If the cleanup interval
// is less than one, expired items are not deleted from the cache before
// calling c.DeleteExpired(), c.DeleteLRU(), or c.DeleteLRUAmount(). If
// maxItems is not greater than zero, then there will be no soft cap on the
// number of items in the cache.
//
// Using the LRU functionality makes Get() a slower, write-locked operation.
func NewWithLRU(defaultExpiration, cleanupInterval time.Duration, maxItems int) *Cache {
items := make(map[string]Item)
return newCacheWithJanitor(defaultExpiration, cleanupInterval, items, maxItems)
}
// NewFrom returns a new cache with a given default expiration duration and
// cleanup interval. If the expiration duration is less than one (or
// NoExpiration), the items in the cache never expire (by default), and must be
// deleted manually. If the cleanup interval is less than one, expired items
// are not deleted from the cache before calling c.DeleteExpired().
//
// NewFrom() also accepts an items map which will serve as the underlying map
// for the cache. This is useful for starting from a deserialized cache
@ -1157,5 +1462,11 @@ func New(defaultExpiration, cleanupInterval time.Duration) *Cache {
// map retrieved with c.Items(), and to register those same types before
// decoding a blob containing an items map.
func NewFrom(defaultExpiration, cleanupInterval time.Duration, items map[string]Item) *Cache {
return newCacheWithJanitor(defaultExpiration, cleanupInterval, items)
return newCacheWithJanitor(defaultExpiration, cleanupInterval, items, 0)
}
// NewFromWithLRU is similar to NewFrom, but creates a cache with LRU
// functionality enabled.
func NewFromWithLRU(defaultExpiration, cleanupInterval time.Duration, items map[string]Item, maxItems int) *Cache {
return newCacheWithJanitor(defaultExpiration, cleanupInterval, items, maxItems)
}

View File

@ -108,11 +108,11 @@ func TestCacheTimes(t *testing.T) {
func TestNewFrom(t *testing.T) {
m := map[string]Item{
"a": Item{
"a": {
Object: 1,
Expiration: 0,
},
"b": Item{
"b": {
Object: 2,
Expiration: 0,
},
@ -1224,6 +1224,41 @@ func TestDecrementUnderflowUint(t *testing.T) {
}
}
// TODO: Ring buffer is more efficient but doesn't guarantee that the actually
// oldest items are removed, just some old items. This shouldn't be significant
// for large caches, but we can't test it easily.
//
// func TestDeleteLRU(t *testing.T) {
// tc := NewWithLRU(1*time.Second, 0, 1)
// tc.Set("foo", 0, DefaultExpiration)
// tc.Set("bar", 1, DefaultExpiration)
// tc.Set("baz", 2, DefaultExpiration)
// tc.Get("foo")
// tc.Get("baz")
// time.Sleep(5 * time.Millisecond)
// tc.Get("bar")
// // Bar was accessed most recently, and should be the only value that
// // stays.
// tc.DeleteLRU()
// if tc.ItemCount() != 1 {
// t.Error("tc.ItemCount() is not 1")
// }
// if _, found := tc.Get("bar"); !found {
// t.Error("bar was not found")
// }
// }
func TestDeleteLRU(t *testing.T) {
tc := NewWithLRU(1*time.Second, 0, 1)
tc.Set("foo", 0, DefaultExpiration)
tc.Set("bar", 1, DefaultExpiration)
tc.Set("baz", 2, DefaultExpiration)
tc.DeleteLRU()
if tc.ItemCount() != 1 {
t.Error("tc.ItemCount() is not 1")
}
}
func TestOnEvicted(t *testing.T) {
tc := New(DefaultExpiration, 0)
tc.Set("foo", 3, DefaultExpiration)
@ -1267,14 +1302,14 @@ func testFillAndSerialize(t *testing.T, tc *Cache) {
{Num: 3},
}, DefaultExpiration)
tc.Set("[]*struct", []*TestStruct{
&TestStruct{Num: 4},
&TestStruct{Num: 5},
{Num: 4},
{Num: 5},
}, DefaultExpiration)
tc.Set("structception", &TestStruct{
Num: 42,
Children: []*TestStruct{
&TestStruct{Num: 6174},
&TestStruct{Num: 4716},
{Num: 6174},
{Num: 4716},
},
}, DefaultExpiration)
@ -1443,6 +1478,24 @@ func benchmarkCacheGet(b *testing.B, exp time.Duration) {
}
}
func BenchmarkCacheWithLRUGetExpiring(b *testing.B) {
benchmarkCacheWithLRUGet(b, 5*time.Minute, 10)
}
func BenchmarkCacheWithLRUGetNotExpiring(b *testing.B) {
benchmarkCacheWithLRUGet(b, NoExpiration, 10)
}
func benchmarkCacheWithLRUGet(b *testing.B, exp time.Duration, max int) {
b.StopTimer()
tc := NewWithLRU(exp, 0, max)
tc.Set("foo", "bar", DefaultExpiration)
b.StartTimer()
for i := 0; i < b.N; i++ {
tc.Get("foo")
}
}
func BenchmarkRWMutexMapGet(b *testing.B) {
b.StopTimer()
m := map[string]string{
@ -1514,6 +1567,34 @@ func benchmarkCacheGetConcurrent(b *testing.B, exp time.Duration) {
wg.Wait()
}
func BenchmarkCacheWithLRUGetConcurrentExpiring(b *testing.B) {
benchmarkCacheWithLRUGetConcurrent(b, 5*time.Minute, 10)
}
func BenchmarkCacheWithLRUGetConcurrentNotExpiring(b *testing.B) {
benchmarkCacheWithLRUGetConcurrent(b, NoExpiration, 10)
}
func benchmarkCacheWithLRUGetConcurrent(b *testing.B, exp time.Duration, max int) {
b.StopTimer()
tc := NewWithLRU(exp, 0, max)
tc.Set("foo", "bar", DefaultExpiration)
wg := new(sync.WaitGroup)
workers := runtime.NumCPU()
each := b.N / workers
wg.Add(workers)
b.StartTimer()
for i := 0; i < workers; i++ {
go func() {
for j := 0; j < each; j++ {
tc.Get("foo")
}
wg.Done()
}()
}
wg.Wait()
}
func BenchmarkRWMutexMapGetConcurrent(b *testing.B) {
b.StopTimer()
m := map[string]string{
@ -1563,12 +1644,48 @@ func benchmarkCacheGetManyConcurrent(b *testing.B, exp time.Duration) {
wg := new(sync.WaitGroup)
wg.Add(n)
for _, v := range keys {
go func() {
go func(key string) {
for j := 0; j < each; j++ {
tc.Get(v)
tc.Get(key)
}
wg.Done()
}()
}(v)
}
b.StartTimer()
wg.Wait()
}
func BenchmarkCacheWithLRUGetManyConcurrentExpiring(b *testing.B) {
benchmarkCacheWithLRUGetManyConcurrent(b, 5*time.Minute, 10000)
}
func BenchmarkCacheWithLRUGetManyConcurrentNotExpiring(b *testing.B) {
benchmarkCacheWithLRUGetManyConcurrent(b, NoExpiration, 10000)
}
func benchmarkCacheWithLRUGetManyConcurrent(b *testing.B, exp time.Duration, max int) {
// This is the same as BenchmarkCacheWithLRUGetConcurrent, but its result
// can be compared against BenchmarkShardedCacheWithLRUGetManyConcurrent
// in sharded_test.go.
b.StopTimer()
n := 10000
tc := NewWithLRU(exp, 0, max)
keys := make([]string, n)
for i := 0; i < n; i++ {
k := "foo" + strconv.Itoa(n)
keys[i] = k
tc.Set(k, "bar", DefaultExpiration)
}
each := b.N / n
wg := new(sync.WaitGroup)
wg.Add(n)
for _, v := range keys {
go func(key string) {
for j := 0; j < each; j++ {
tc.Get(key)
}
wg.Done()
}(v)
}
b.StartTimer()
wg.Wait()

View File

@ -73,12 +73,12 @@ func benchmarkShardedCacheGetManyConcurrent(b *testing.B, exp time.Duration) {
wg := new(sync.WaitGroup)
wg.Add(n)
for _, v := range keys {
go func() {
go func(key string) {
for j := 0; j < each; j++ {
tsc.Get(v)
tsc.Get(key)
}
wg.Done()
}()
}(v)
}
b.StartTimer()
wg.Wait()