diff --git a/cmd/scw-benchstat/main.go b/cmd/scw-benchstat/main.go new file mode 100644 index 0000000000..3f9e8272b5 --- /dev/null +++ b/cmd/scw-benchstat/main.go @@ -0,0 +1,417 @@ +package main + +import ( + "context" + "encoding/csv" + "errors" + "flag" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" +) + +const ( + benchstatVersion = "v0.0.0-20251023143056-3684bd442cc8" +) + +type config struct { + bench string + benchtime string + count int + benchmem bool + failMetrics []string + threshold float64 + installTool bool + targetDirs []string + verbose bool + update bool +} + +func main() { + cfg := parseFlags() + + if cfg.installTool { + if err := installBenchstat(); err != nil { + log.Fatalf("failed to install benchstat: %v", err) + } + } + + if !isBenchstatAvailable() { + log.Fatalf( + "benchstat not found in PATH; install golang.org/x/perf/cmd/benchstat@%s in your environment or run with --install-benchstat", + benchstatVersion, + ) + } + + if len(cfg.targetDirs) == 0 { + cfg.targetDirs = findBenchmarkDirs() + } + + if len(cfg.targetDirs) == 0 { + log.Fatal("no benchmark directories found") + } + + var hadError bool + for _, dir := range cfg.targetDirs { + if err := runBenchmarksForDir(cfg, dir); err != nil { + log.Printf("❌ failed to run benchmarks for %s: %v", dir, err) + hadError = true + } + } + + if hadError { + os.Exit(1) + } +} + +func parseFlags() config { + cfg := config{} + + flag.StringVar(&cfg.bench, "bench", ".", "benchmark pattern to run") + flag.StringVar(&cfg.benchtime, "benchtime", "1s", "benchmark time") + flag.IntVar(&cfg.count, "count", 5, "number of benchmark runs") + flag.BoolVar(&cfg.benchmem, "benchmem", false, "include memory allocation stats") + flag.Float64Var( + &cfg.threshold, + "threshold", + 1.5, + "performance regression threshold (e.g., 1.5 = 50% slower)", + ) + flag.BoolVar( + &cfg.installTool, + "install-benchstat", + false, + "install benchstat tool if not found", + ) + flag.BoolVar(&cfg.verbose, "verbose", false, "verbose output") + flag.BoolVar(&cfg.update, "update", false, "update baseline files instead of comparing") + + var failMetricsStr string + flag.StringVar( + &failMetricsStr, + "fail-metrics", + "", + "comma-separated list of metrics to check for regressions (default: time/op)", + ) + + var targetDirsStr string + flag.StringVar( + &targetDirsStr, + "target-dirs", + "", + "comma-separated list of directories to benchmark", + ) + + flag.Parse() + + if failMetricsStr != "" { + cfg.failMetrics = strings.Split(failMetricsStr, ",") + } else { + cfg.failMetrics = []string{"time/op"} + } + + if targetDirsStr != "" { + cfg.targetDirs = strings.Split(targetDirsStr, ",") + } + + return cfg +} + +func installBenchstat() error { + fmt.Printf("Installing benchstat@%s...\n", benchstatVersion) + cmd := exec.Command("go", "install", "golang.org/x/perf/cmd/benchstat@"+benchstatVersion) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + return cmd.Run() +} + +func isBenchstatAvailable() bool { + _, err := exec.LookPath("benchstat") + + return err == nil +} + +func findBenchmarkDirs() []string { + var dirs []string + + err := filepath.WalkDir( + "internal/namespaces", + func(path string, d os.DirEntry, err error) error { + if err != nil { + return err + } + + if d.IsDir() { + return nil + } + + if strings.HasSuffix(d.Name(), "_benchmark_test.go") { + dir := filepath.Dir(path) + dirs = append(dirs, dir) + } + + return nil + }, + ) + if err != nil { + log.Printf("error scanning for benchmark directories: %v", err) + } + + return dirs +} + +func runBenchmarksForDir(cfg config, dir string) error { + fmt.Printf(">>> Running benchmarks for %s\n", dir) + + baselineFile := filepath.Join(dir, "testdata", "benchmark.baseline") + + // Run benchmarks + newResults, err := runBenchmarks(cfg, dir) + if err != nil { + return fmt.Errorf("failed to run benchmarks: %w", err) + } + + // Update mode: always overwrite baseline + if cfg.update { + if err := saveBaseline(baselineFile, newResults); err != nil { + return fmt.Errorf("failed to update baseline: %w", err) + } + fmt.Printf("✅ Baseline updated: %s\n", baselineFile) + + return nil + } + + // Check if baseline exists + if _, err := os.Stat(baselineFile); os.IsNotExist(err) { + fmt.Printf("No baseline found at %s. Creating new baseline.\n", baselineFile) + if err := saveBaseline(baselineFile, newResults); err != nil { + return fmt.Errorf("failed to save baseline: %w", err) + } + fmt.Printf("Baseline saved to %s\n", baselineFile) + + return nil + } + + // Compare with baseline + return compareWithBaseline(cfg, baselineFile, newResults) +} + +func runBenchmarks(cfg config, dir string) (string, error) { + args := []string{ + "test", + "-bench=" + cfg.bench, + "-benchtime=" + cfg.benchtime, + "-count=" + strconv.Itoa(cfg.count), + } + + if cfg.benchmem { + args = append(args, "-benchmem") + } + + args = append(args, "./"+dir) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + + cmd := exec.CommandContext(ctx, "go", args...) + cmd.Env = append(os.Environ(), "CLI_RUN_BENCHMARKS=true") + + if cfg.verbose { + fmt.Printf("Running: go %s\n", strings.Join(args, " ")) + } + + output, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("benchmark execution failed: %w\nOutput: %s", err, output) + } + + return string(output), nil +} + +func saveBaseline(filename, content string) error { + dir := filepath.Dir(filename) + if err := os.MkdirAll(dir, 0o755); err != nil { + return err + } + + return os.WriteFile(filename, []byte(content), 0o644) +} + +func compareWithBaseline(cfg config, baselineFile, newResults string) error { + // Create temporary file for new results + tmpFile, err := os.CreateTemp("", "benchmark-new-*.txt") + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + if _, err := tmpFile.WriteString(newResults); err != nil { + return fmt.Errorf("failed to write new results: %w", err) + } + tmpFile.Close() + + // Run benchstat comparison + cmd := exec.Command("benchstat", "-format=csv", baselineFile, tmpFile.Name()) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf( + "failed to compare with benchstat for %s: %w\nOutput: %s", + filepath.Dir(baselineFile), + err, + output, + ) + } + + // Parse CSV output and check for regressions + return checkForRegressions(cfg, string(output)) +} + +func checkForRegressions(cfg config, csvOutput string) error { + reader := csv.NewReader(strings.NewReader(csvOutput)) + records, err := reader.ReadAll() + if err != nil { + return fmt.Errorf("failed to parse benchstat CSV output: %w", err) + } + + if len(records) < 2 { + fmt.Println("No benchmark comparisons found") + + return nil + } + + // Find column indices + header := records[0] + nameIdx := findColumnIndex(header, "name") + oldTimeIdx := findColumnIndex(header, "old time/op") + newTimeIdx := findColumnIndex(header, "new time/op") + oldBytesIdx := findColumnIndex(header, "old B/op") + newBytesIdx := findColumnIndex(header, "new B/op") + oldAllocsIdx := findColumnIndex(header, "old allocs/op") + newAllocsIdx := findColumnIndex(header, "new allocs/op") + + if nameIdx == -1 { + return errors.New("could not find 'name' column in benchstat output") + } + + var regressions []string + + for i, record := range records[1:] { + if len(record) <= nameIdx { + continue + } + + benchName := record[nameIdx] + + // Check time/op regression + if contains(cfg.failMetrics, "time/op") && oldTimeIdx != -1 && newTimeIdx != -1 { + if regression := checkMetricRegression(record, oldTimeIdx, newTimeIdx, cfg.threshold); regression != "" { + regressions = append( + regressions, + fmt.Sprintf("%s: time/op %s", benchName, regression), + ) + } + } + + // Check B/op regression + if contains(cfg.failMetrics, "B/op") && oldBytesIdx != -1 && newBytesIdx != -1 { + if regression := checkMetricRegression(record, oldBytesIdx, newBytesIdx, cfg.threshold); regression != "" { + regressions = append(regressions, fmt.Sprintf("%s: B/op %s", benchName, regression)) + } + } + + // Check allocs/op regression + if contains(cfg.failMetrics, "allocs/op") && oldAllocsIdx != -1 && newAllocsIdx != -1 { + if regression := checkMetricRegression(record, oldAllocsIdx, newAllocsIdx, cfg.threshold); regression != "" { + regressions = append( + regressions, + fmt.Sprintf("%s: allocs/op %s", benchName, regression), + ) + } + } + + if cfg.verbose && i < 5 { // Show first few comparisons + fmt.Printf(" %s: compared\n", benchName) + } + } + + // Print full benchstat output + fmt.Println("Benchmark comparison results:") + fmt.Println(csvOutput) + + if len(regressions) > 0 { + fmt.Printf("\n❌ Performance regressions detected (threshold: %.1fx):\n", cfg.threshold) + for _, regression := range regressions { + fmt.Printf(" - %s\n", regression) + } + + return errors.New("performance regressions detected") + } + + fmt.Printf( + "✅ No significant performance regressions detected (threshold: %.1fx)\n", + cfg.threshold, + ) + + return nil +} + +func findColumnIndex(header []string, columnName string) int { + for i, col := range header { + if strings.Contains(strings.ToLower(col), strings.ToLower(columnName)) { + return i + } + } + + return -1 +} + +func checkMetricRegression(record []string, oldIdx, newIdx int, threshold float64) string { + if oldIdx >= len(record) || newIdx >= len(record) { + return "" + } + + oldVal, err1 := parseMetricValue(record[oldIdx]) + newVal, err2 := parseMetricValue(record[newIdx]) + + if err1 != nil || err2 != nil || oldVal == 0 { + return "" + } + + ratio := newVal / oldVal + if ratio > threshold { + return fmt.Sprintf("%.2fx slower (%.2f → %.2f)", ratio, oldVal, newVal) + } + + return "" +} + +func parseMetricValue(s string) (float64, error) { + // Remove common suffixes and parse + s = strings.TrimSpace(s) + s = strings.ReplaceAll(s, "ns", "") + s = strings.ReplaceAll(s, "B", "") + s = strings.TrimSpace(s) + + if s == "" || s == "-" { + return 0, errors.New("empty value") + } + + return strconv.ParseFloat(s, 64) +} + +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + + return false +} diff --git a/docs/BENCHMARKS.md b/docs/BENCHMARKS.md new file mode 100644 index 0000000000..5e9a7a4c07 --- /dev/null +++ b/docs/BENCHMARKS.md @@ -0,0 +1,426 @@ +# Performance Benchmarks + +This document describes the performance benchmarking system for the Scaleway CLI. + +## Overview + +The benchmarking system allows you to: +- **Measure performance** of CLI commands over time +- **Detect regressions** automatically before merging code +- **Track performance trends** across releases +- **Compare implementations** between branches + +## Architecture + +The system consists of two main components: + +1. **Benchmark tests** (`*_benchmark_test.go`) - Go benchmark functions that measure command performance +2. **Benchstat tool** (`cmd/scw-benchstat`) - Wrapper that runs benchmarks and detects regressions using [benchstat](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) + +### Workflow + +``` +┌─────────────────────────────────────────────────────────────┐ +│ 1. Scan project for *_benchmark_test.go files │ +└──────────────────┬──────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ 2. Run benchmarks: go test -bench=. -benchtime=1s -count=10│ +└──────────────────┬──────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ 3. Save/Load baseline from testdata/benchmark.baseline │ +└──────────────────┬──────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ 4. Compare with benchstat (statistical analysis) │ +└──────────────────┬──────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ 5. Fail CI if regression > threshold (e.g., 1.5x slower) │ +└─────────────────────────────────────────────────────────────┘ +``` + +## Quick Start + +### Running Benchmarks + +```bash +# Run all benchmarks for all namespaces +SCW_PROFILE=cli \ +CLI_RUN_BENCHMARKS=true \ +go run ./cmd/scw-benchstat + +# Run benchmarks for a specific namespace +SCW_PROFILE=cli \ +CLI_RUN_BENCHMARKS=true \ +go run ./cmd/scw-benchstat --target-dirs=internal/namespaces/rdb/v1 +``` + +### Creating a Baseline + +The first time you run benchmarks, a baseline file is automatically created at: +``` +internal/namespaces///testdata/benchmark.baseline +``` + +Example for RDB: +``` +internal/namespaces/rdb/v1/testdata/benchmark.baseline +``` + +**Commit this baseline file** to track performance over time: + +```bash +git add internal/namespaces/rdb/v1/testdata/benchmark.baseline +git commit -S -m "chore(rdb): add performance benchmark baseline" +``` + +### Updating the Baseline + +When you intentionally make performance changes, update the baseline: + +```bash +# Run benchmarks and save new baseline +CLI_RUN_BENCHMARKS=true go test \ + -bench=. \ + -benchtime=1s \ + -count=10 \ + ./internal/namespaces/rdb/v1 \ + > internal/namespaces/rdb/v1/testdata/benchmark.baseline + +# Commit the updated baseline +git add internal/namespaces/rdb/v1/testdata/benchmark.baseline +git commit -S -m "chore(rdb): update benchmark baseline after optimization" +``` + +## Command Options + +### Basic Usage + +```bash +go run ./cmd/scw-benchstat [OPTIONS] +``` + +### Available Options + +| Option | Default | Description | +|--------|---------|-------------| +| `--bench` | `.` | Benchmark pattern (Go regex) | +| `--benchtime` | `1s` | Duration per benchmark run | +| `--count` | `5` | Number of benchmark runs (for statistical accuracy) | +| `--benchmem` | `false` | Measure memory allocations | +| `--fail-metrics` | - | Comma-separated metrics to check: `time/op`, `B/op`, `allocs/op` | +| `--threshold` | `1.5` | Regression threshold (1.5 = fail if 50% slower) | +| `--install-benchstat` | `false` | Auto-install benchstat if not found | +| `--target-dirs` | (all) | Comma-separated directories to benchmark | +| `--verbose` | `false` | Enable verbose output | + +### Examples + +#### Run specific benchmarks only +```bash +SCW_PROFILE=cli CLI_RUN_BENCHMARKS=true \ +go run ./cmd/scw-benchstat --bench="BenchmarkInstance.*" +``` + +#### Strict regression detection (20% threshold) +```bash +SCW_PROFILE=cli CLI_RUN_BENCHMARKS=true \ +go run ./cmd/scw-benchstat \ + --threshold=1.2 \ + --fail-metrics=time/op,B/op +``` + +#### Quick run with fewer iterations +```bash +SCW_PROFILE=cli CLI_RUN_BENCHMARKS=true \ +go run ./cmd/scw-benchstat --count=3 --benchtime=500ms +``` + +#### Precise measurement with more runs +```bash +SCW_PROFILE=cli CLI_RUN_BENCHMARKS=true \ +go run ./cmd/scw-benchstat --count=20 --benchtime=3s +``` + +## Benchmark Metrics + +Each benchmark reports three key metrics: + +| Metric | Unit | Description | +|--------|------|-------------| +| **time/op** | ns/op | Nanoseconds per operation (execution time) | +| **B/op** | bytes | Bytes allocated per operation (memory usage) | +| **allocs/op** | count | Number of memory allocations per operation | + +### Example Output + +``` +BenchmarkInstanceGet-11 5 235361983 ns/op 379590 B/op 4369 allocs/op +BenchmarkBackupGet-11 15 70244775 ns/op 272052 B/op 2845 allocs/op +BenchmarkBackupList-11 12 92052913 ns/op 284125 B/op 2994 allocs/op +BenchmarkDatabaseList-11 9 164681597 ns/op 299008 B/op 3152 allocs/op +``` + +**Reading the output:** +- **Column 1**: Benchmark name + number of parallel workers +- **Column 2**: Number of iterations executed +- **Column 3**: Average time per operation (in nanoseconds) +- **Column 4**: Average memory allocated per operation +- **Column 5**: Average number of allocations per operation + +## Interpreting Results + +### Comparison Output (benchstat) + +When comparing with a baseline, `benchstat` shows statistical differences: + +``` +name old time/op new time/op delta +InstanceGet-11 235ms ± 2% 220ms ± 3% -6.38% (p=0.000 n=10+10) +BackupGet-11 70.2ms ± 5% 72.1ms ± 4% +2.70% (p=0.043 n=10+10) + +name old alloc/op new alloc/op delta +InstanceGet-11 380kB ± 1% 383kB ± 1% +0.79% (p=0.000 n=10+10) +``` + +**Understanding the columns:** +- `±` : Standard deviation (variance in measurements) +- `delta` : Percentage change (negative = improvement, positive = regression) +- `p=0.000` : Statistical significance (p < 0.05 = significant change) +- `n=10+10` : Number of samples in each measurement + +### Regression Detection + +The tool fails if any metric exceeds the threshold: + +```bash +❌ Performance regressions detected (threshold: 1.5x): + - BenchmarkInstanceGet: time/op 2.1x slower (235ms → 493ms) + - BenchmarkBackupList: B/op 1.8x more memory (284KB → 512KB) +``` + + +## Writing Benchmarks + +### Basic Structure + +Create a file named `custom_benchmark_test.go` in your namespace: + +```go +package mynamespace_test + +import ( + "os" + "testing" + "time" +) + +func BenchmarkMyCommand(b *testing.B) { + // Skip unless explicitly enabled + if os.Getenv("CLI_RUN_BENCHMARKS") != "true" { + b.Skip("Skipping benchmark. Set CLI_RUN_BENCHMARKS=true to run.") + } + + // Setup: create resources, clients, etc. + client, meta, executeCmd := setupBenchmark(b) + + // Reset timer to exclude setup time + b.ResetTimer() + b.ReportAllocs() // Track memory allocations + + // Benchmark loop (Go adjusts b.N automatically) + for range b.N { + executeCmd([]string{"scw", "my-namespace", "my-command", "arg1"}) + } + + b.StopTimer() +} +``` + +### Best Practices + +#### 1. **Resource Management** + +Reuse expensive resources across benchmarks: + +```go +var ( + sharedInstance *MyResource + sharedInstanceMu sync.Mutex +) + +func getOrCreateSharedInstance(b *testing.B) *MyResource { + b.Helper() + sharedInstanceMu.Lock() + defer sharedInstanceMu.Unlock() + + if sharedInstance != nil { + b.Log("Reusing existing shared instance") + return sharedInstance + } + + b.Log("Creating shared instance...") + sharedInstance = createExpensiveResource() + return sharedInstance +} + +func TestMain(m *testing.M) { + code := m.Run() + cleanupSharedInstance() + os.Exit(code) +} +``` + +#### 2. **Proper Timing** + +Exclude setup and cleanup from measurements: + +```go +func BenchmarkMyCommand(b *testing.B) { + // Setup (not timed) + resource := createResource() + b.Cleanup(func() { deleteResource(resource) }) + + b.ResetTimer() // ⏱️ Start timing here + + for range b.N { + executeCmd(...) + } + + b.StopTimer() // ⏹️ Stop timing before cleanup +} +``` + +#### 3. **Avoid Interference** + +Serialize operations that cannot run in parallel: + +```go +var operationMu sync.Mutex + +func BenchmarkOperation(b *testing.B) { + operationMu.Lock() + defer operationMu.Unlock() + + // Only one benchmark can run this at a time + performExclusiveOperation() +} +``` + +## Advanced Usage + +### Custom Statistics + +Enable detailed statistics with `CLI_BENCH_TRACE`: + +```bash +CLI_BENCH_TRACE=true CLI_RUN_BENCHMARKS=true go test -bench=. -v +``` + +Output: +``` +Distribution (n=100): min=200ms median=235ms mean=238ms p95=280ms max=320ms +``` + +### CPU Profiling + +Profile a slow benchmark: + +```bash +CLI_RUN_BENCHMARKS=true go test -bench=BenchmarkSlowCommand \ + -cpuprofile=cpu.prof + +go tool pprof -http=:8080 cpu.prof +``` + +### Memory Profiling + +Analyze memory allocations: + +```bash +CLI_RUN_BENCHMARKS=true go test -bench=BenchmarkMemoryHeavy \ + -memprofile=mem.prof + +go tool pprof -http=:8080 mem.prof +``` + +### Comparing Branches + +```bash +# Run on main branch +git checkout main +CLI_RUN_BENCHMARKS=true go test -bench=. -count=10 > /tmp/main.txt + +# Run on feature branch +git checkout feature/my-optimization +CLI_RUN_BENCHMARKS=true go test -bench=. -count=10 > /tmp/feature.txt + +# Compare +benchstat /tmp/main.txt /tmp/feature.txt +``` + +## Troubleshooting + +### "benchstat not found" + +Install benchstat: +```bash +go install golang.org/x/perf/cmd/benchstat@latest +``` + +Or use auto-install: +```bash +go run ./cmd/scw-benchstat --install-benchstat +``` + +### "signal: killed" + +The benchmark process was killed (timeout or OOM). Try: +- Reduce `--count` or `--benchtime` +- Run specific benchmarks only with `--bench` +- Check system resources (RAM, CPU) + +### "409 Conflict" errors + +Resources are in a transient state. The cleanup retry mechanism should handle this automatically. If it persists: +- Increase retry attempts in `cleanupWithRetry` +- Add more wait time between operations + +### Inconsistent Results + +Run with more iterations for better statistical accuracy: +```bash +go run ./cmd/scw-benchstat --count=20 --benchtime=3s +``` + +## FAQ + +**Q: How often should I update the baseline?** +A: Update it after intentional performance changes or major refactors. Don't update for every PR unless you've specifically optimized performance. + +**Q: What's a good threshold value?** +A: Start with 1.5 (50% regression). Adjust based on your tolerance: +- 1.2 (20%) = strict +- 1.5 (50%) = balanced +- 2.0 (100%) = lenient + +**Q: Should I commit baseline files?** +A: Yes! Baselines should be tracked in git to enable comparison across branches and time. + +**Q: How do I run benchmarks locally?** +A: Use the same command as CI with your local credentials: +```bash +SCW_PROFILE=cli CLI_RUN_BENCHMARKS=true go run ./cmd/scw-benchstat +``` + +**Q: Can I benchmark other namespaces (instance, vpc, etc.)?** +A: Yes! Create a `custom_benchmark_test.go` file in any namespace directory following the same pattern as RDB. + +## References + +- [Go Benchmarking Documentation](https://pkg.go.dev/testing#hdr-Benchmarks) +- [benchstat Tool](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) +- [Performance Profiling in Go](https://go.dev/blog/pprof) + diff --git a/internal/env/env.go b/internal/env/env.go new file mode 100644 index 0000000000..ccb8032aa1 --- /dev/null +++ b/internal/env/env.go @@ -0,0 +1,19 @@ +// Package env contains a list of environment variables used to modify the behaviour of the CLI. +package env + +const ( + // RunBenchmarks if set to "true" will enable benchmark execution + RunBenchmarks = "CLI_RUN_BENCHMARKS" + + // BenchTrace if set to "true" will enable detailed benchmark statistics (min, median, mean, p95, max) + BenchTrace = "CLI_BENCH_TRACE" + + // BenchCmdTimeout sets the command timeout for benchmarks (e.g., "30s", "1m"). Default: 30s + BenchCmdTimeout = "CLI_BENCH_CMD_TIMEOUT" + + // UpdateCassettes if set to "true" will trigger the cassettes to be recorded + UpdateCassettes = "CLI_UPDATE_CASSETTES" + + // UpdateGoldens if set to "true" will update golden files during tests + UpdateGoldens = "CLI_UPDATE_GOLDENS" +) diff --git a/internal/namespaces/rdb/v1/custom_benchmark_test.go b/internal/namespaces/rdb/v1/custom_benchmark_test.go new file mode 100644 index 0000000000..ea6da184b7 --- /dev/null +++ b/internal/namespaces/rdb/v1/custom_benchmark_test.go @@ -0,0 +1,419 @@ +package rdb_test + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/http" + "os" + "sort" + "strings" + "testing" + "time" + + "github.com/scaleway/scaleway-cli/v2/core" + "github.com/scaleway/scaleway-cli/v2/internal/env" + "github.com/scaleway/scaleway-cli/v2/internal/namespaces/rdb/v1" + "github.com/scaleway/scaleway-cli/v2/internal/testhelpers" + rdbSDK "github.com/scaleway/scaleway-sdk-go/api/rdb/v1" + "github.com/scaleway/scaleway-sdk-go/scw" +) + +// Benchmarks for RDB commands. +// +// Baseline stored in testdata/benchmark.baseline (like golden files). +// +// Install benchstat (required for comparison): +// +// go install golang.org/x/perf/cmd/benchstat@latest +// +// To compare performance: +// +// benchstat testdata/benchmark.baseline <(CLI_RUN_BENCHMARKS=true go test -bench=. -benchtime=100x .) +// +// To update baseline: +// +// CLI_RUN_BENCHMARKS=true go test -bench=. -benchtime=100x . > testdata/benchmark.baseline +// +// Or use the automated tool (installs benchstat automatically): +// +// go run ./cmd/scw-benchstat --install-benchstat --bench=. --count=10 + +var sharedInstance *rdbSDK.Instance + +// TestMain ensures shared instance cleanup +func TestMain(m *testing.M) { + code := m.Run() + cleanupSharedInstance() + os.Exit(code) +} + +func setupBenchmark(b *testing.B) (*scw.Client, core.TestMetadata, func(args []string) any) { + b.Helper() + + return testhelpers.SetupBenchmark(b, rdb.GetCommands()) +} + +func cleanupWithRetry(b *testing.B, name string, resourceID string, cleanupFn func() error) { + b.Helper() + + maxRetries := 5 + for i := range maxRetries { + err := cleanupFn() + if err == nil { + return + } + + // Check if it's a 409 Conflict using typed error + var respErr *scw.ResponseError + isConflict := errors.As(err, &respErr) && respErr.StatusCode == http.StatusConflict + + // Fallback: check error message for transient state keywords + if !isConflict { + errMsg := err.Error() + isConflict = strings.Contains(errMsg, "transient state") || + strings.Contains(errMsg, "backuping") + } + + if isConflict && i < maxRetries-1 { + waitTime := time.Duration(2*(i+1)) * time.Second + b.Logf( + "cleanup conflict for %s=%s (attempt %d/%d), waiting %v: %v", + name, + resourceID, + i+1, + maxRetries, + waitTime, + err, + ) + time.Sleep(waitTime) + + continue + } + + b.Errorf("cleanup failure (%s=%s) after %d attempts: %v", name, resourceID, i+1, err) + + return + } +} + +type benchmarkStats struct { + timings []time.Duration + enabled bool +} + +func newBenchmarkStats() *benchmarkStats { + return &benchmarkStats{ + enabled: os.Getenv(env.BenchTrace) == "true", + timings: make([]time.Duration, 0, 1000), + } +} + +func (s *benchmarkStats) record(d time.Duration) { + s.timings = append(s.timings, d) +} + +func (s *benchmarkStats) getMean() time.Duration { + if len(s.timings) == 0 { + return 0 + } + + var sum time.Duration + for _, t := range s.timings { + sum += t + } + + return sum / time.Duration(len(s.timings)) +} + +func (s *benchmarkStats) report(b *testing.B) { + b.Helper() + + if !s.enabled || len(s.timings) == 0 { + return + } + + sort.Slice(s.timings, func(i, j int) bool { + return s.timings[i] < s.timings[j] + }) + + minVal := s.timings[0] + maxVal := s.timings[len(s.timings)-1] + median := s.timings[len(s.timings)/2] + p95 := s.timings[int(float64(len(s.timings))*0.95)] + mean := s.getMean() + + b.Logf("Distribution (n=%d): min=%v median=%v mean=%v p95=%v max=%v", + len(s.timings), minVal, median, mean, p95, maxVal) +} + +func getOrCreateSharedInstance( + b *testing.B, + client *scw.Client, + executeCmd func([]string) any, + meta core.TestMetadata, +) *rdbSDK.Instance { + b.Helper() + + if sharedInstance != nil { + b.Log("Reusing existing shared RDB instance") + + return sharedInstance + } + + b.Log("Creating shared RDB instance for all benchmarks...") + ctx := &core.BeforeFuncCtx{ + Client: client, + ExecuteCmd: executeCmd, + Meta: meta, + } + + if err := createInstanceDirect(engine)(ctx); err != nil { + b.Fatalf("Failed to create shared instance: %v", err) + } + + instance := meta["Instance"].(rdb.CreateInstanceResult).Instance + sharedInstance = instance + + b.Logf("Shared RDB instance created: %s", instance.ID) + + if err := waitForInstanceReady(executeCmd, instance.ID); err != nil { + b.Fatalf("Shared instance not ready: %v", err) + } + + b.Log("Shared instance is ready") + + return sharedInstance +} + +func cleanupSharedInstance() { + if sharedInstance == nil { + return + } + + fmt.Printf("Cleaning up shared RDB instance: %s\n", sharedInstance.ID) + + client, err := scw.NewClient( + scw.WithDefaultRegion(scw.RegionFrPar), + scw.WithDefaultZone(scw.ZoneFrPar1), + scw.WithEnv(), + ) + if err != nil { + fmt.Printf("Error creating client for cleanup: %v\n", err) + + return + } + + config, err := scw.LoadConfig() + if err == nil { + activeProfile, err := config.GetActiveProfile() + if err == nil { + envProfile := scw.LoadEnvProfile() + profile := scw.MergeProfiles(activeProfile, envProfile) + client, _ = scw.NewClient( + scw.WithDefaultRegion(scw.RegionFrPar), + scw.WithDefaultZone(scw.ZoneFrPar1), + scw.WithProfile(profile), + scw.WithEnv(), + ) + } + } + + executeCmd := func(args []string) any { + stdoutBuffer := &bytes.Buffer{} + stderrBuffer := &bytes.Buffer{} + _, result, _ := core.Bootstrap(&core.BootstrapConfig{ + Args: args, + Commands: rdb.GetCommands().Copy(), + BuildInfo: &core.BuildInfo{}, + Stdout: stdoutBuffer, + Stderr: stderrBuffer, + Client: client, + DisableTelemetry: true, + DisableAliases: true, + OverrideEnv: map[string]string{}, + Ctx: context.Background(), + }) + + return result + } + + meta := core.TestMetadata{ + "Instance": rdb.CreateInstanceResult{Instance: sharedInstance}, + } + + afterCtx := &core.AfterFuncCtx{ + Client: client, + ExecuteCmd: executeCmd, + Meta: meta, + } + + if err := deleteInstanceDirect()(afterCtx); err != nil { + fmt.Printf("Error deleting shared instance: %v\n", err) + time.Sleep(2 * time.Second) + if err2 := deleteInstanceDirect()(afterCtx); err2 != nil { + fmt.Printf("Final cleanup failure: %v\n", err2) + } + } + + sharedInstance = nil +} + +func BenchmarkInstanceGet(b *testing.B) { + if os.Getenv(env.RunBenchmarks) != "true" { + b.Skip("Skipping benchmark. Set CLI_RUN_BENCHMARKS=true to run.") + } + + client, meta, executeCmd := setupBenchmark(b) + instance := getOrCreateSharedInstance(b, client, executeCmd, meta) + + stats := newBenchmarkStats() + b.ResetTimer() + b.ReportAllocs() + + for range b.N { + start := time.Now() + executeCmd([]string{"scw", "rdb", "instance", "get", instance.ID}) + stats.record(time.Since(start)) + } + + b.StopTimer() + stats.report(b) +} + +func BenchmarkBackupGet(b *testing.B) { + if os.Getenv(env.RunBenchmarks) != "true" { + b.Skip("Skipping benchmark. Set CLI_RUN_BENCHMARKS=true to run.") + } + + client, meta, executeCmd := setupBenchmark(b) + instance := getOrCreateSharedInstance(b, client, executeCmd, meta) + + ctx := &core.BeforeFuncCtx{ + Client: client, + ExecuteCmd: executeCmd, + Meta: meta, + } + + meta["Instance"] = rdb.CreateInstanceResult{Instance: instance} + + if err := waitForInstanceReady(executeCmd, instance.ID); err != nil { + b.Fatalf("Instance not ready before backup: %v", err) + } + + if err := createBackupDirect("Backup")(ctx); err != nil { + b.Fatalf("Failed to create backup: %v", err) + } + + backup := meta["Backup"].(*rdbSDK.DatabaseBackup) + + b.Cleanup(func() { + afterCtx := &core.AfterFuncCtx{ + Client: client, + ExecuteCmd: executeCmd, + Meta: meta, + } + cleanupWithRetry(b, "backup", backup.ID, func() error { + return deleteBackupDirect("Backup")(afterCtx) + }) + }) + + stats := newBenchmarkStats() + b.ResetTimer() + b.ReportAllocs() + + for range b.N { + start := time.Now() + executeCmd([]string{"scw", "rdb", "backup", "get", backup.ID}) + stats.record(time.Since(start)) + } + + b.StopTimer() + stats.report(b) +} + +func BenchmarkBackupList(b *testing.B) { + if os.Getenv(env.RunBenchmarks) != "true" { + b.Skip("Skipping benchmark. Set CLI_RUN_BENCHMARKS=true to run.") + } + + client, meta, executeCmd := setupBenchmark(b) + instance := getOrCreateSharedInstance(b, client, executeCmd, meta) + + ctx := &core.BeforeFuncCtx{ + Client: client, + ExecuteCmd: executeCmd, + Meta: meta, + } + + meta["Instance"] = rdb.CreateInstanceResult{Instance: instance} + + if err := waitForInstanceReady(executeCmd, instance.ID); err != nil { + b.Fatalf("Instance not ready before backup 1: %v", err) + } + + if err := createBackupDirect("Backup1")(ctx); err != nil { + b.Fatalf("Failed to create backup 1: %v", err) + } + + if err := waitForInstanceReady(executeCmd, instance.ID); err != nil { + b.Fatalf("Instance not ready before backup 2: %v", err) + } + + if err := createBackupDirect("Backup2")(ctx); err != nil { + b.Fatalf("Failed to create backup 2: %v", err) + } + + backup1 := meta["Backup1"].(*rdbSDK.DatabaseBackup) + backup2 := meta["Backup2"].(*rdbSDK.DatabaseBackup) + + b.Cleanup(func() { + afterCtx := &core.AfterFuncCtx{ + Client: client, + ExecuteCmd: executeCmd, + Meta: meta, + } + cleanupWithRetry(b, "backup1", backup1.ID, func() error { + return deleteBackupDirect("Backup1")(afterCtx) + }) + cleanupWithRetry(b, "backup2", backup2.ID, func() error { + return deleteBackupDirect("Backup2")(afterCtx) + }) + }) + + stats := newBenchmarkStats() + b.ResetTimer() + b.ReportAllocs() + + for range b.N { + start := time.Now() + executeCmd([]string{"scw", "rdb", "backup", "list", "instance-id=" + instance.ID}) + stats.record(time.Since(start)) + } + + b.StopTimer() + stats.report(b) +} + +func BenchmarkDatabaseList(b *testing.B) { + if os.Getenv(env.RunBenchmarks) != "true" { + b.Skip("Skipping benchmark. Set CLI_RUN_BENCHMARKS=true to run.") + } + + client, meta, executeCmd := setupBenchmark(b) + instance := getOrCreateSharedInstance(b, client, executeCmd, meta) + + stats := newBenchmarkStats() + b.ResetTimer() + b.ReportAllocs() + + for range b.N { + start := time.Now() + executeCmd([]string{"scw", "rdb", "database", "list", "instance-id=" + instance.ID}) + stats.record(time.Since(start)) + } + + b.StopTimer() + stats.report(b) +} diff --git a/internal/namespaces/rdb/v1/helper_test.go b/internal/namespaces/rdb/v1/helper_test.go index b1067f4886..b7801fc64b 100644 --- a/internal/namespaces/rdb/v1/helper_test.go +++ b/internal/namespaces/rdb/v1/helper_test.go @@ -1,8 +1,10 @@ package rdb_test import ( + "context" "errors" "fmt" + "time" "github.com/scaleway/scaleway-cli/v2/core" "github.com/scaleway/scaleway-cli/v2/internal/namespaces/rdb/v1" @@ -113,3 +115,109 @@ func deleteInstance() core.AfterFunc { func deleteInstanceAndWait() core.AfterFunc { return core.ExecAfterCmd("scw rdb instance delete {{ .Instance.ID }} --wait") } + +func createInstanceDirect(_ string) core.BeforeFunc { + return func(ctx *core.BeforeFuncCtx) error { + result := ctx.ExecuteCmd([]string{ + "scw", "rdb", "instance", "create", + "node-type=DB-DEV-S", + "is-ha-cluster=false", + "name=" + name, + "engine=" + engine, + "user-name=" + user, + "password=" + password, + "--wait", + }) + ctx.Meta["Instance"] = result + + return nil + } +} + +func createBackupDirect(metaKey string) core.BeforeFunc { + return func(ctx *core.BeforeFuncCtx) error { + instanceResult := ctx.Meta["Instance"].(rdb.CreateInstanceResult) + instance := instanceResult.Instance + + result := ctx.ExecuteCmd([]string{ + "scw", "rdb", "backup", "create", + "name=cli-test-backup", + "expires-at=2032-01-02T15:04:05-07:00", + "instance-id=" + instance.ID, + "database-name=rdb", + "--wait", + }) + ctx.Meta[metaKey] = result + + return nil + } +} + +func deleteBackupDirect(metaKey string) core.AfterFunc { + return func(ctx *core.AfterFuncCtx) error { + backup := ctx.Meta[metaKey].(*rdbSDK.DatabaseBackup) + ctx.ExecuteCmd([]string{ + "scw", "rdb", "backup", "delete", + backup.ID, + }) + + return nil + } +} + +func deleteInstanceDirect() core.AfterFunc { + return func(ctx *core.AfterFuncCtx) error { + instance := ctx.Meta["Instance"].(rdb.CreateInstanceResult).Instance + ctx.ExecuteCmd([]string{ + "scw", "rdb", "instance", "delete", + instance.ID, + }) + + return nil + } +} + +func waitForInstanceReady( + executeCmd func([]string) any, + instanceID string, +) error { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) + defer cancel() + + backoff := time.Second + for { + select { + case <-ctx.Done(): + return fmt.Errorf( + "timeout waiting for instance %s to be ready for operations", + instanceID, + ) + default: + result := executeCmd([]string{"scw", "rdb", "instance", "get", instanceID}) + + // Try direct type assertion first + if instance, ok := result.(*rdbSDK.Instance); ok { + if instance.Status == rdbSDK.InstanceStatusReady { + time.Sleep(5 * time.Second) + + return nil + } + } else { + v := result.(struct { + *rdbSDK.Instance + ACLs []*rdbSDK.ACLRule `json:"acls"` + }) + if v.Instance != nil && v.Instance.Status == rdbSDK.InstanceStatusReady { + time.Sleep(5 * time.Second) + + return nil + } + } + + time.Sleep(backoff) + if backoff < 10*time.Second { + backoff *= 2 + } + } + } +} diff --git a/internal/namespaces/rdb/v1/testdata/benchmark.baseline b/internal/namespaces/rdb/v1/testdata/benchmark.baseline new file mode 100644 index 0000000000..4037c89950 --- /dev/null +++ b/internal/namespaces/rdb/v1/testdata/benchmark.baseline @@ -0,0 +1,10 @@ +goos: darwin +goarch: amd64 +pkg: github.com/scaleway/scaleway-cli/v2/internal/namespaces/rdb/v1 +cpu: VirtualApple @ 2.50GHz +BenchmarkInstanceGet-11 100 239648044 ns/op 350232 B/op 4121 allocs/op +BenchmarkBackupGet-11 100 85116384 ns/op 271182 B/op 2839 allocs/op +BenchmarkBackupList-11 100 118312364 ns/op 284095 B/op 2996 allocs/op +BenchmarkDatabaseList-11 100 136427978 ns/op 283705 B/op 3046 allocs/op +PASS + diff --git a/internal/testhelpers/helpers_benchmark.go b/internal/testhelpers/helpers_benchmark.go new file mode 100644 index 0000000000..084f29c275 --- /dev/null +++ b/internal/testhelpers/helpers_benchmark.go @@ -0,0 +1,73 @@ +package testhelpers + +import ( + "bytes" + "context" + "testing" + + "github.com/scaleway/scaleway-cli/v2/core" + "github.com/scaleway/scaleway-sdk-go/scw" +) + +// SetupBenchmark initializes a Scaleway client and test metadata for benchmarks. +// It loads credentials from the active profile and environment variables. +func SetupBenchmark( + b *testing.B, + commands *core.Commands, +) (*scw.Client, core.TestMetadata, func(args []string) any) { + b.Helper() + + clientOpts := []scw.ClientOption{ + scw.WithDefaultRegion(scw.RegionFrPar), + scw.WithDefaultZone(scw.ZoneFrPar1), + scw.WithUserAgent("cli-benchmark-test"), + scw.WithEnv(), + } + + config, err := scw.LoadConfig() + if err == nil { + activeProfile, err := config.GetActiveProfile() + if err == nil { + envProfile := scw.LoadEnvProfile() + profile := scw.MergeProfiles(activeProfile, envProfile) + clientOpts = append(clientOpts, scw.WithProfile(profile)) + } + } + + client, err := scw.NewClient(clientOpts...) + if err != nil { + b.Fatalf( + "Failed to create Scaleway client: %v\nMake sure you have configured your credentials with 'scw config'", + err, + ) + } + + meta := core.TestMetadata{ + "t": b, + } + + executeCmd := func(args []string) any { + stdoutBuffer := &bytes.Buffer{} + stderrBuffer := &bytes.Buffer{} + _, result, err := core.Bootstrap(&core.BootstrapConfig{ + Args: args, + Commands: commands.Copy(), + BuildInfo: nil, + Stdout: stdoutBuffer, + Stderr: stderrBuffer, + Client: client, + DisableTelemetry: true, + DisableAliases: true, + OverrideEnv: map[string]string{}, + Ctx: context.Background(), + }) + if err != nil { + b.Errorf("error executing cmd (%s): %v\nstdout: %s\nstderr: %s", + args, err, stdoutBuffer.String(), stderrBuffer.String()) + } + + return result + } + + return client, meta, executeCmd +}