Use runtime.numCPU() workers in concurrent benchmarks, remove pointless benchmarks for racy maps, and add replacement benchmarks using mutexed maps

This commit is contained in:
Patrick Mylund Nielsen 2012-06-22 03:06:06 +01:00
parent 3bd539b94d
commit 803ceeaf1a
1 changed files with 55 additions and 31 deletions

View File

@ -3,6 +3,7 @@ package cache
import ( import (
"bytes" "bytes"
"io/ioutil" "io/ioutil"
"runtime"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -642,22 +643,28 @@ func BenchmarkCacheGet(b *testing.B) {
} }
} }
func BenchmarkMutexMapGet(b *testing.B) {
m := map[string]string{
"foo": "bar",
}
mu := sync.Mutex{}
for i := 0; i < b.N; i++ {
mu.Lock()
_, _ = m["foo"]
mu.Unlock()
}
}
func BenchmarkCacheGetConcurrent(b *testing.B) { func BenchmarkCacheGetConcurrent(b *testing.B) {
tc := New(0, 0) tc := New(0, 0)
tc.Set("foo", "bar", 0) tc.Set("foo", "bar", 0)
wg := new(sync.WaitGroup) wg := new(sync.WaitGroup)
children := b.N workers := runtime.NumCPU()
iterations := 1 each := b.N / workers
wg.Add(workers)
if children > 10000 { for i := 0; i < workers; i++ {
children = 10000
iterations = b.N / children
}
wg.Add(children)
for i := 0; i < children; i++ {
go func() { go func() {
for j := 0; j < iterations; j++ { for j := 0; j < each; j++ {
tc.Get("foo") tc.Get("foo")
} }
wg.Done() wg.Done()
@ -666,30 +673,28 @@ func BenchmarkCacheGetConcurrent(b *testing.B) {
wg.Wait() wg.Wait()
} }
func BenchmarkMapGet(b *testing.B) { func BenchmarkMutexMapGetConcurrent(b *testing.B) {
m := map[string]string{ m := map[string]string{
"foo": "bar", "foo": "bar",
} }
for i := 0; i < b.N; i++ { mu := sync.Mutex{}
_, _ = m["foo"] wg := new(sync.WaitGroup)
workers := runtime.NumCPU()
each := b.N / workers
wg.Add(workers)
for i := 0; i < workers; i++ {
go func() {
for j := 0; j < each; j++ {
mu.Lock()
_, _ = m["foo"]
mu.Unlock()
}
wg.Done()
}()
} }
wg.Wait()
} }
// func BenchmarkMapGetConcurrent(b *testing.B) {
// m := map[string]string{
// "foo": "bar",
// }
// wg := new(sync.WaitGroup)
// wg.Add(b.N)
// for i := 0; i < b.N; i++ {
// go func() {
// _, _ = m["foo"]
// wg.Done()
// }()
// }
// wg.Wait()
// }
func BenchmarkCacheSet(b *testing.B) { func BenchmarkCacheSet(b *testing.B) {
tc := New(0, 0) tc := New(0, 0)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -697,10 +702,13 @@ func BenchmarkCacheSet(b *testing.B) {
} }
} }
func BenchmarkMapSet(b *testing.B) { func BenchmarkMutexMapSet(b *testing.B) {
m := map[string]string{} m := map[string]string{}
mu := sync.Mutex{}
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
mu.Lock()
m["foo"] = "bar" m["foo"] = "bar"
mu.Unlock()
} }
} }
@ -712,6 +720,19 @@ func BenchmarkCacheSetDelete(b *testing.B) {
} }
} }
func BenchmarkMutexMapSetDelete(b *testing.B) {
m := map[string]string{}
mu := sync.Mutex{}
for i := 0; i < b.N; i++ {
mu.Lock()
m["foo"] = "bar"
mu.Unlock()
mu.Lock()
delete(m, "foo")
mu.Unlock()
}
}
func BenchmarkCacheSetDeleteSingleLock(b *testing.B) { func BenchmarkCacheSetDeleteSingleLock(b *testing.B) {
tc := New(0, 0) tc := New(0, 0)
tc.mu.Lock() tc.mu.Lock()
@ -722,10 +743,13 @@ func BenchmarkCacheSetDeleteSingleLock(b *testing.B) {
tc.mu.Unlock() tc.mu.Unlock()
} }
func BenchmarkMapSetDelete(b *testing.B) { func BenchmarkMutexMapSetDeleteSingleLock(b *testing.B) {
m := map[string]string{} m := map[string]string{}
mu := sync.Mutex{}
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
mu.Lock()
m["foo"] = "bar" m["foo"] = "bar"
delete(m, "foo") delete(m, "foo")
mu.Unlock()
} }
} }