Skip to content

Commit 281c33f

Browse files
committed
Review feedback.
Signed-off-by: Tom Wilkie <[email protected]>
1 parent 17da42e commit 281c33f

File tree

2 files changed

+8
-8
lines changed

2 files changed

+8
-8
lines changed

pkg/chunk/schema_config.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ func (cfg *SchemaConfig) RegisterFlags(f *flag.FlagSet) {
6060
f.Var(&cfg.V6SchemaFrom, "dynamodb.v6-schema-from", "The date (in the format YYYY-MM-DD) after which we enable v6 schema.")
6161
f.Var(&cfg.V7SchemaFrom, "dynamodb.v7-schema-from", "The date (in the format YYYY-MM-DD) after which we enable v7 schema (Deprecated).")
6262
f.Var(&cfg.V8SchemaFrom, "dynamodb.v8-schema-from", "The date (in the format YYYY-MM-DD) after which we enable v8 schema (Deprecated).")
63-
f.Var(&cfg.V9SchemaFrom, "dynamodb.v9-schema-from", "The data (in the format YYYY-MM-DD) after which we enable v9 schema (Series indexing).")
63+
f.Var(&cfg.V9SchemaFrom, "dynamodb.v9-schema-from", "The date (in the format YYYY-MM-DD) after which we enable v9 schema (Series indexing).")
6464

6565
f.BoolVar(&cfg.ThroughputUpdatesDisabled, "table-manager.throughput-updates-disabled", false, "If true, disable all changes to DB capacity")
6666
f.DurationVar(&cfg.DynamoDBPollInterval, "dynamodb.poll-interval", 2*time.Minute, "How frequently to poll DynamoDB to learn our capacity.")

pkg/chunk/series_store.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -79,13 +79,6 @@ func (c *seriesStore) Get(ctx context.Context, from, through model.Time, allMatc
7979
}
8080
level.Debug(log).Log("Chunk IDs", len(chunkIDs))
8181

82-
// Protect ourselves against OOMing.
83-
if len(chunkIDs) > c.cfg.QueryChunkLimit {
84-
err := fmt.Errorf("Query %v fetched too many chunks (%d > %d)", allMatchers, len(chunkIDs), c.cfg.QueryChunkLimit)
85-
level.Error(log).Log("err", err)
86-
return nil, err
87-
}
88-
8982
// Filter out chunks that are not in the selected time range.
9083
chunks, err := c.convertChunkIDsToChunks(ctx, chunkIDs)
9184
if err != nil {
@@ -94,6 +87,13 @@ func (c *seriesStore) Get(ctx context.Context, from, through model.Time, allMatc
9487
filtered, keys := filterChunksByTime(from, through, chunks)
9588
level.Debug(log).Log("Chunks post filtering", len(chunks))
9689

90+
// Protect ourselves against OOMing.
91+
if len(chunkIDs) > c.cfg.QueryChunkLimit {
92+
err := fmt.Errorf("Query %v fetched too many chunks (%d > %d)", allMatchers, len(chunkIDs), c.cfg.QueryChunkLimit)
93+
level.Error(log).Log("err", err)
94+
return nil, err
95+
}
96+
9797
// Now fetch the actual chunk data from Memcache / S3
9898
allChunks, err := c.fetchChunks(ctx, filtered, keys)
9999
if err != nil {

0 commit comments

Comments
 (0)