Skip to content

Get multiple cached values #379

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 18 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 53 additions & 0 deletions bigcache.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,59 @@ func (c *BigCache) Get(key string) ([]byte, error) {
return shard.get(key, hashedKey)
}

// Used to store information about keys in GetMulti function
// order is the index in the slice the data should go
// hashedKey is the Sum64 hash of the key
// key is the original key input
type keyInfo struct {
order int
hashedKey uint64
key string
}

// GetMulti reads entry for each of the keys.
// returns entries in the same order as the provided keys.
// if entry is not found for a given key, the index will contain nil
func (c *BigCache) GetMulti(keys []string) [][]byte {
shards := make(map[uint64][]keyInfo, len(c.shards))
entries := make([][]byte, len(keys))

for i, key := range keys {
hashedKey := c.hash.Sum64(key)
shardIndex := hashedKey & c.shardMask
shards[shardIndex] = append(shards[shardIndex], keyInfo{order: i, hashedKey: hashedKey, key: key})
}

for shardKey, keyInfos := range shards {
hits := make([]uint64, 0, len(keyInfos))
shard := c.shards[shardKey]
shard.lock.RLock()
for i := range keyInfos {
entry, _ := shard.getWithoutLock(keyInfos[i].key, keyInfos[i].hashedKey)

if entry != nil {
hits = append(hits, keyInfos[i].hashedKey)
}

entries[keyInfos[i].order] = entry
}
shard.lock.RUnlock()

if shard.statsEnabled{
shard.lock.Lock()
}

for i := range hits {
shard.hitWithoutLock(hits[i])
}

if shard.statsEnabled{
shard.lock.Unlock()
}
}
return entries
}

// GetWithInfo reads entry for the key with Response info.
// It returns an ErrEntryNotFound when
// no entry exists for the given key.
Expand Down
82 changes: 82 additions & 0 deletions bigcache_bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,88 @@ func BenchmarkReadFromCache(b *testing.B) {
}
}

func BenchmarkReadFromCacheManySingle(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
cache, _ := New(context.Background(), Config{
Shards: shards,
LifeWindow: 1000 * time.Second,
MaxEntriesInWindow: max(b.N, 100),
MaxEntrySize: 500,
})

keys := make([]string, b.N)
for i := 0; i < b.N; i++ {
keys[i] = fmt.Sprintf("key-%d", i)
cache.Set(keys[i], message)
}

b.ReportAllocs()
b.ResetTimer()
for _, key := range keys {
cache.Get(key)
}

})
}
}

func BenchmarkReadFromCacheManyMulti(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
cache, _ := New(context.Background(), Config{
Shards: shards,
LifeWindow: 1000 * time.Second,
MaxEntriesInWindow: max(b.N, 100),
MaxEntrySize: 500,
})
keys := make([]string, b.N)
for i := 0; i < b.N; i++ {
keys[i] = fmt.Sprintf("key-%d", i)
cache.Set(keys[i], message)
}

b.ReportAllocs()
b.ResetTimer()
cache.GetMulti(keys)
})
}
}

func BenchmarkReadFromCacheManyMultiBatches(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
for _, batchSize := range []int{1, 5, 10, 100} {
b.Run(fmt.Sprintf("%d-shards %d-batchSize", shards, batchSize), func(b *testing.B) {
cache, _ := New(context.Background(), Config{
Shards: shards,
LifeWindow: 1000 * time.Second,
MaxEntriesInWindow: max(b.N, 100),
MaxEntrySize: 500,
})
keys := make([]string, b.N)
for i := 0; i < b.N; i++ {
keys[i] = fmt.Sprintf("key-%d", i)
cache.Set(keys[i], message)
}

batches := make([][]string, 0, (len(keys)+batchSize-1)/batchSize)

for batchSize < len(keys) {
keys, batches = keys[batchSize:], append(batches, keys[0:batchSize:batchSize])
}
batches = append(batches, keys)

b.ReportAllocs()
b.ResetTimer()
for _, b := range batches {
cache.GetMulti(b)

}
})
}
}
}

func BenchmarkReadFromCacheWithInfo(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
Expand Down
87 changes: 87 additions & 0 deletions bigcache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,53 @@ func TestWriteAndGetOnCache(t *testing.T) {
assertEqual(t, value, cachedValue)
}

func TestWriteAndGetOnCacheMulti(t *testing.T) {
t.Parallel()
for _, tc := range []struct {
keys []string
data [][]byte
want string
}{
{
keys: []string{"k1", "k2", "k3", "k4", "k5"},
data: [][]byte{
blob('a', 10),
blob('b', 10),
blob('c', 10),
blob('d', 10),
blob('e', 10),
},
want: "Get all values ordered",
},
{
keys: []string{"k1", "k2", "k3", "k4", "k5"},
data: [][]byte{
blob('a', 10),
blob('b', 10),
nil,
blob('d', 10),
blob('e', 10),
},
want: "Get all values ordered with nil",
},
} {
t.Run(tc.want, func(t *testing.T) {
cache, _ := New(context.Background(), DefaultConfig(5*time.Second))

for i := range tc.keys {
if tc.data[i] != nil {
cache.Set(tc.keys[i], tc.data[i])
}
}

cachedValues := cache.GetMulti(tc.keys)

assertEqual(t, tc.data, cachedValues)
})

}
}

func TestAppendAndGetOnCache(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -836,6 +883,46 @@ func TestWriteAndReadParallelSameKeyWithStats(t *testing.T) {
assertEqual(t, ntest*n, int(cache.KeyMetadata(key).RequestCount))
}

func TestWriteAndReadManyParallelSameKeyWithStats(t *testing.T) {
t.Parallel()

c := DefaultConfig(0)
c.StatsEnabled = true

cache, _ := New(context.Background(), c)
var wg sync.WaitGroup
ntest := 100
n := 10
wg.Add(n)

keys := []string{"key1", "key2", "key3"}
values := [][]byte{blob('a', 64), blob('b', 64), blob('c', 64)}

for i := 0; i < ntest; i++ {
for j := range keys {
assertEqual(t, nil, cache.Set(keys[j], values[j]))
}
}

for j := 0; j < n; j++ {
go func() {
for i := 0; i < ntest; i++ {
v := cache.GetMulti(keys)
assertEqual(t, values, v)
}
wg.Done()
}()
}

wg.Wait()

assertEqual(t, Stats{Hits: int64(n * ntest * len(keys))}, cache.Stats())

for i := range keys {
assertEqual(t, ntest*n, int(cache.KeyMetadata(keys[i]).RequestCount))
}
}

func TestCacheReset(t *testing.T) {
t.Parallel()

Expand Down
18 changes: 14 additions & 4 deletions shard.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,22 +61,32 @@ func (s *cacheShard) getWithInfo(key string, hashedKey uint64) (entry []byte, re

func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
s.lock.RLock()
entry, err := s.getWithoutLock(key, hashedKey)
s.lock.RUnlock()

if err != nil {
return nil, err
}

s.hit(hashedKey)
return entry, nil
}

func (s *cacheShard) getWithoutLock(key string, hashedKey uint64) ([]byte, error) {
wrappedEntry, err := s.getWrappedEntry(hashedKey)
if err != nil {
s.lock.RUnlock()
return nil, err
}

if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey {
s.lock.RUnlock()
s.collision()
if s.isVerbose {
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
}
return nil, ErrEntryNotFound
}

entry := readEntry(wrappedEntry)
s.lock.RUnlock()
s.hit(hashedKey)

return entry, nil
}
Expand Down