Skip to content

Feature/hook new #1040

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Jun 1, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
# Changelog

## Unreleased
## v7 WIP

- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
- WithContext no longer creates shallow copy.

## v6.15

- Cluster and Ring pipelines process commands for each node in its own goroutine.

Expand Down
138 changes: 138 additions & 0 deletions bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package redis_test

import (
"bytes"
"context"
"fmt"
"strings"
"testing"
Expand Down Expand Up @@ -198,3 +199,140 @@ func BenchmarkZAdd(b *testing.B) {
}
})
}

var clientSink *redis.Client

func BenchmarkWithContext(b *testing.B) {
rdb := benchmarkRedisClient(10)
defer rdb.Close()

ctx := context.Background()

b.ResetTimer()
b.ReportAllocs()

for i := 0; i < b.N; i++ {
clientSink = rdb.WithContext(ctx)
}
}

var ringSink *redis.Ring

func BenchmarkRingWithContext(b *testing.B) {
rdb := redis.NewRing(&redis.RingOptions{})
defer rdb.Close()

ctx := context.Background()

b.ResetTimer()
b.ReportAllocs()

for i := 0; i < b.N; i++ {
ringSink = rdb.WithContext(ctx)
}
}

//------------------------------------------------------------------------------

func newClusterScenario() *clusterScenario {
return &clusterScenario{
ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"},
nodeIds: make([]string, 6),
processes: make(map[string]*redisProcess, 6),
clients: make(map[string]*redis.Client, 6),
}
}

func BenchmarkClusterPing(b *testing.B) {
if testing.Short() {
b.Skip("skipping in short mode")
}

cluster := newClusterScenario()
if err := startCluster(cluster); err != nil {
b.Fatal(err)
}
defer stopCluster(cluster)

client := cluster.clusterClient(redisClusterOptions())
defer client.Close()

b.ResetTimer()

b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
err := client.Ping().Err()
if err != nil {
b.Fatal(err)
}
}
})
}

func BenchmarkClusterSetString(b *testing.B) {
if testing.Short() {
b.Skip("skipping in short mode")
}

cluster := newClusterScenario()
if err := startCluster(cluster); err != nil {
b.Fatal(err)
}
defer stopCluster(cluster)

client := cluster.clusterClient(redisClusterOptions())
defer client.Close()

value := string(bytes.Repeat([]byte{'1'}, 10000))

b.ResetTimer()

b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
err := client.Set("key", value, 0).Err()
if err != nil {
b.Fatal(err)
}
}
})
}

func BenchmarkClusterReloadState(b *testing.B) {
if testing.Short() {
b.Skip("skipping in short mode")
}

cluster := newClusterScenario()
if err := startCluster(cluster); err != nil {
b.Fatal(err)
}
defer stopCluster(cluster)

client := cluster.clusterClient(redisClusterOptions())
defer client.Close()

b.ResetTimer()

for i := 0; i < b.N; i++ {
err := client.ReloadState()
if err != nil {
b.Fatal(err)
}
}
}

var clusterSink *redis.ClusterClient

func BenchmarkClusterWithContext(b *testing.B) {
rdb := redis.NewClusterClient(&redis.ClusterOptions{})
defer rdb.Close()

ctx := context.Background()

b.ResetTimer()
b.ReportAllocs()

for i := 0; i < b.N; i++ {
clusterSink = rdb.WithContext(ctx)
}
}
Loading