Skip to content

Commit fd56389

Browse files
committed
Refactor: use newer AWS API for paginated queries
This is less code, and more robust when retrying requests. We don't need an indirection on the request object for testing now. Note the `readThrottle` is no longer used on queries: it should only have been applied to chunk operations. Signed-off-by: Bryan Boreham <[email protected]>
1 parent 3944ee2 commit fd56389

File tree

4 files changed

+84
-68
lines changed

4 files changed

+84
-68
lines changed

pkg/chunk/aws/dynamodb_storage_client.go

Lines changed: 28 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import (
1212

1313
"github.com/go-kit/kit/log/level"
1414
ot "github.com/opentracing/opentracing-go"
15+
otlog "github.com/opentracing/opentracing-go/log"
1516
"golang.org/x/time/rate"
1617

1718
"github.com/aws/aws-sdk-go/aws"
@@ -144,7 +145,6 @@ type dynamoDBStorageClient struct {
144145

145146
// These functions exists for mocking, so we don't have to write a whole load
146147
// of boilerplate.
147-
queryRequestFn func(ctx context.Context, input *dynamodb.QueryInput) dynamoDBRequest
148148
batchGetItemRequestFn func(ctx context.Context, input *dynamodb.BatchGetItemInput) dynamoDBRequest
149149
batchWriteItemRequestFn func(ctx context.Context, input *dynamodb.BatchWriteItemInput) dynamoDBRequest
150150
}
@@ -172,7 +172,6 @@ func newDynamoDBStorageClient(cfg DynamoDBConfig, schemaCfg chunk.SchemaConfig)
172172
DynamoDB: dynamoDB,
173173
writeThrottle: rate.NewLimiter(rate.Limit(cfg.ThrottleLimit), dynamoDBMaxWriteBatchSize),
174174
}
175-
client.queryRequestFn = client.queryRequest
176175
client.batchGetItemRequestFn = client.batchGetItemRequest
177176
client.batchWriteItemRequestFn = client.batchWriteItemRequest
178177
return client, nil
@@ -327,71 +326,35 @@ func (a dynamoDBStorageClient) query(ctx context.Context, query chunk.IndexQuery
327326
}
328327
}
329328

330-
request := a.queryRequestFn(ctx, input)
331329
pageCount := 0
332330
defer func() {
333331
dynamoQueryPagesCount.Observe(float64(pageCount))
334332
}()
335333

336-
for page := request; page != nil; page = page.NextPage() {
337-
pageCount++
338-
339-
response, err := a.queryPage(ctx, input, page, query.HashValue, pageCount)
340-
if err != nil {
341-
return err
334+
retryer := newRetryer(ctx, a.cfg.backoffConfig)
335+
err := instrument.CollectedRequest(ctx, "DynamoDB.QueryPages", dynamoRequestDuration, instrument.ErrorCode, func(innerCtx context.Context) error {
336+
if sp := ot.SpanFromContext(innerCtx); sp != nil {
337+
sp.SetTag("tableName", query.TableName)
338+
sp.SetTag("hashValue", query.HashValue)
342339
}
343-
344-
if !callback(response) {
345-
if err != nil {
346-
return fmt.Errorf("QueryPages error: table=%v, err=%v", *input.TableName, page.Error())
347-
}
348-
return nil
349-
}
350-
if !page.HasNextPage() {
351-
return nil
352-
}
353-
}
354-
return nil
355-
}
356-
357-
func (a dynamoDBStorageClient) queryPage(ctx context.Context, input *dynamodb.QueryInput, page dynamoDBRequest, hashValue string, pageCount int) (*dynamoDBReadResponse, error) {
358-
backoff := util.NewBackoff(ctx, a.cfg.backoffConfig)
359-
360-
var err error
361-
for backoff.Ongoing() {
362-
err = instrument.CollectedRequest(ctx, "DynamoDB.QueryPages", dynamoRequestDuration, instrument.ErrorCode, func(innerCtx context.Context) error {
340+
return a.DynamoDB.QueryPagesWithContext(ctx, input, func(output *dynamodb.QueryOutput, _ bool) bool {
341+
pageCount++
363342
if sp := ot.SpanFromContext(innerCtx); sp != nil {
364-
sp.SetTag("tableName", aws.StringValue(input.TableName))
365-
sp.SetTag("hashValue", hashValue)
366-
sp.SetTag("page", pageCount)
367-
sp.SetTag("retry", backoff.NumRetries())
343+
sp.LogFields(otlog.Int("page", pageCount))
368344
}
369-
return page.Send()
370-
})
371-
372-
if cc := page.Data().(*dynamodb.QueryOutput).ConsumedCapacity; cc != nil {
373-
dynamoConsumedCapacity.WithLabelValues("DynamoDB.QueryPages", *cc.TableName).
374-
Add(float64(*cc.CapacityUnits))
375-
}
376345

377-
if err != nil {
378-
recordDynamoError(*input.TableName, err, "DynamoDB.QueryPages")
379-
if awsErr, ok := err.(awserr.Error); ok && ((awsErr.Code() == dynamodb.ErrCodeProvisionedThroughputExceededException) || page.Retryable()) {
380-
if awsErr.Code() != dynamodb.ErrCodeProvisionedThroughputExceededException {
381-
level.Warn(util.Logger).Log("msg", "DynamoDB error", "retry", backoff.NumRetries(), "table", *input.TableName, "err", err)
382-
}
383-
backoff.Wait()
384-
continue
346+
if cc := output.ConsumedCapacity; cc != nil {
347+
dynamoConsumedCapacity.WithLabelValues("DynamoDB.QueryPages", *cc.TableName).
348+
Add(float64(*cc.CapacityUnits))
385349
}
386-
return nil, fmt.Errorf("QueryPage error: table=%v, err=%v", *input.TableName, err)
387-
}
388350

389-
queryOutput := page.Data().(*dynamodb.QueryOutput)
390-
return &dynamoDBReadResponse{
391-
items: queryOutput.Items,
392-
}, nil
351+
return callback(&dynamoDBReadResponse{items: output.Items})
352+
}, retryer.withRetrys, withErrorHandler(query.TableName, "DynamoDB.QueryPages"))
353+
})
354+
if err != nil {
355+
return fmt.Errorf("QueryPage error: table=%v, err=%v", query.TableName, err)
393356
}
394-
return nil, fmt.Errorf("QueryPage error: %s for table %v, last error %v", backoff.Err(), *input.TableName, err)
357+
return err
395358
}
396359

397360
type dynamoDBRequest interface {
@@ -403,12 +366,6 @@ type dynamoDBRequest interface {
403366
Retryable() bool
404367
}
405368

406-
func (a dynamoDBStorageClient) queryRequest(ctx context.Context, input *dynamodb.QueryInput) dynamoDBRequest {
407-
req, _ := a.DynamoDB.QueryRequest(input)
408-
req.SetContext(ctx)
409-
return dynamoDBRequestAdapter{req}
410-
}
411-
412369
func (a dynamoDBStorageClient) batchGetItemRequest(ctx context.Context, input *dynamodb.BatchGetItemInput) dynamoDBRequest {
413370
req, _ := a.DynamoDB.BatchGetItemRequest(input)
414371
req.SetContext(ctx)
@@ -840,6 +797,16 @@ func (b dynamoDBReadRequest) TakeReqs(from dynamoDBReadRequest, max int) {
840797
}
841798
}
842799

800+
func withErrorHandler(tableName, operation string) func(req *request.Request) {
801+
return func(req *request.Request) {
802+
req.Handlers.CompleteAttempt.PushBack(func(req *request.Request) {
803+
if req.Error != nil {
804+
recordDynamoError(tableName, req.Error, operation)
805+
}
806+
})
807+
}
808+
}
809+
843810
func recordDynamoError(tableName string, err error, operation string) {
844811
if awsErr, ok := err.(awserr.Error); ok {
845812
dynamoFailures.WithLabelValues(tableName, awsErr.Code(), operation).Add(float64(1))

pkg/chunk/aws/fixtures.go

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@ var Fixtures = []testutils.Fixture{
4141
}
4242
index := &dynamoDBStorageClient{
4343
DynamoDB: dynamoDB,
44-
queryRequestFn: dynamoDB.queryRequest,
4544
batchGetItemRequestFn: dynamoDB.batchGetItemRequest,
4645
batchWriteItemRequestFn: dynamoDB.batchWriteItemRequest,
4746
schemaCfg: schemaConfig,
@@ -80,7 +79,6 @@ func dynamoDBFixture(provisionedErr, gangsize, maxParallelism int) testutils.Fix
8079
},
8180
DynamoDB: dynamoDB,
8281
writeThrottle: rate.NewLimiter(10, dynamoDBMaxWriteBatchSize),
83-
queryRequestFn: dynamoDB.queryRequest,
8482
batchGetItemRequestFn: dynamoDB.batchGetItemRequest,
8583
batchWriteItemRequestFn: dynamoDB.batchWriteItemRequest,
8684
schemaCfg: schemaCfg,

pkg/chunk/aws/mock.go

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ func (m *mockDynamoDBClient) batchGetItemRequest(_ context.Context, input *dynam
194194
}
195195
}
196196

197-
func (m *mockDynamoDBClient) queryRequest(_ context.Context, input *dynamodb.QueryInput) dynamoDBRequest {
197+
func (m *mockDynamoDBClient) QueryPagesWithContext(ctx aws.Context, input *dynamodb.QueryInput, fn func(*dynamodb.QueryOutput, bool) bool, opts ...request.Option) error {
198198
result := &dynamodb.QueryOutput{
199199
Items: []map[string]*dynamodb.AttributeValue{},
200200
}
@@ -241,10 +241,8 @@ func (m *mockDynamoDBClient) queryRequest(_ context.Context, input *dynamodb.Que
241241

242242
result.Items = append(result.Items, item)
243243
}
244-
245-
return &dynamoDBMockRequest{
246-
result: result,
247-
}
244+
fn(result, true)
245+
return nil
248246
}
249247

250248
type dynamoDBMockRequest struct {

pkg/chunk/aws/retryer.go

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
package aws
2+
3+
import (
4+
"context"
5+
"time"
6+
7+
"github.com/aws/aws-sdk-go/aws/client"
8+
"github.com/aws/aws-sdk-go/aws/request"
9+
ot "github.com/opentracing/opentracing-go"
10+
otlog "github.com/opentracing/opentracing-go/log"
11+
12+
"github.com/cortexproject/cortex/pkg/util"
13+
)
14+
15+
// Map Cortex Backoff into AWS Retryer interface
16+
type retryer struct {
17+
*util.Backoff
18+
maxRetries int
19+
}
20+
21+
var _ request.Retryer = &retryer{}
22+
23+
func newRetryer(ctx context.Context, cfg util.BackoffConfig) *retryer {
24+
return &retryer{
25+
Backoff: util.NewBackoff(ctx, cfg),
26+
maxRetries: cfg.MaxRetries,
27+
}
28+
}
29+
30+
func (r *retryer) withRetrys(req *request.Request) {
31+
req.Retryer = r
32+
}
33+
34+
// RetryRules return the retry delay that should be used by the SDK before
35+
// making another request attempt for the failed request.
36+
func (r *retryer) RetryRules(req *request.Request) time.Duration {
37+
if sp := ot.SpanFromContext(req.Context()); sp != nil {
38+
sp.LogFields(otlog.Int("retry", r.NumRetries()))
39+
}
40+
return r.Backoff.NextDelay()
41+
}
42+
43+
// ShouldRetry returns if the failed request is retryable.
44+
func (r *retryer) ShouldRetry(req *request.Request) bool {
45+
var d client.DefaultRetryer
46+
return d.ShouldRetry(req)
47+
}
48+
49+
// MaxRetries is the number of times a request may be retried before
50+
// failing.
51+
func (r *retryer) MaxRetries() int {
52+
return r.maxRetries
53+
}

0 commit comments

Comments
 (0)