From 23c1d76df94bd4ee55335b8d9a4de084d941ad26 Mon Sep 17 00:00:00 2001 From: Goutham Veeramachaneni Date: Wed, 4 Nov 2020 15:04:05 +0100 Subject: [PATCH 1/2] Query store for series lookups Signed-off-by: Goutham Veeramachaneni --- CHANGELOG.md | 1 + docs/blocks-storage/querier.md | 5 ++++ docs/configuration/config-file-reference.md | 5 ++++ integration/querier_test.go | 27 ++++++++++++++++++--- pkg/querier/blocks_store_queryable.go | 12 ++++++--- pkg/querier/querier.go | 26 +++++++++++--------- 6 files changed, 59 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2be5d52031..164d96d3c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ * [ENHANCEMENT] Added `cortex_alertmanager_config_hash` metric to expose hash of Alertmanager Config loaded per user. #3388 * [ENHANCEMENT] Query-Frontend / Query-Scheduler: New component called "Query-Scheduler" has been introduced. Query-Scheduler is simply a queue of requests, moved outside of Query-Frontend. This allows Query-Frontend to be scaled separately from number of queues. To make Query-Frontend and Querier use Query-Scheduler, they need to be started with `-frontend.scheduler-address` and `-querier.scheduler-address` options respectively. #3374 * [ENHANCEMENT] Query-frontend / Querier / Ruler: added `-querier.max-query-lookback` to limit how long back data (series and metadata) can be queried. This setting can be overridden on a per-tenant basis and is enforced in the query-frontend, querier and ruler. #3452 #3458 +* [ENHANCEMENT] Querier: added `-querier.query-store-for-labels-enabled` to query store for series API. Only works with blocks storage engine. #3461 * [BUGFIX] Blocks storage ingester: fixed some cases leading to a TSDB WAL corruption after a partial write to disk. #3423 * [BUGFIX] Blocks storage: Fix the race between ingestion and `/flush` call resulting in overlapping blocks. #3422 * [BUGFIX] Querier: fixed `-querier.max-query-into-future` which wasn't correctly enforced on range queries. #3452 diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index 703465d2f7..96048c2b22 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -110,6 +110,11 @@ querier: # CLI flag: -querier.query-ingesters-within [query_ingesters_within: | default = 0s] + # Query long-term store for series, label values and label names APIs. Works + # only with blocks engine. + # CLI flag: -querier.query-store-for-labels + [query_store_for_labels: | default = false] + # The time after which a metric should only be queried from storage and not # just ingesters. 0 means all queries are sent to store. When running the # blocks storage, if this option is enabled, the time range of the query sent diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 68d15e797f..d4de2068bc 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -751,6 +751,11 @@ The `querier_config` configures the Cortex querier. # CLI flag: -querier.query-ingesters-within [query_ingesters_within: | default = 0s] +# Query long-term store for series, label values and label names APIs. Works +# only with blocks engine. +# CLI flag: -querier.query-store-for-labels +[query_store_for_labels: | default = false] + # The time after which a metric should only be queried from storage and not just # ingesters. 0 means all queries are sent to store. When running the blocks # storage, if this option is enabled, the time range of the query sent to the diff --git a/integration/querier_test.go b/integration/querier_test.go index f158d2b5ee..a08c87856a 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -82,6 +82,7 @@ func TestQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T) { "-store-gateway.sharding-strategy": testCfg.blocksShardingStrategy, "-store-gateway.tenant-shard-size": fmt.Sprintf("%d", testCfg.tenantShardSize), "-querier.ingester-streaming": strconv.FormatBool(testCfg.ingesterStreamingEnabled), + "-querier.query-store-for-labels": "true", }) // Start dependencies. @@ -293,6 +294,7 @@ func TestQuerierWithBlocksStorageRunningInSingleBinaryMode(t *testing.T) { "-blocks-storage.bucket-store.index-cache.backend": testCfg.indexCacheBackend, "-blocks-storage.bucket-store.index-cache.memcached.addresses": "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort), "-querier.ingester-streaming": strconv.FormatBool(testCfg.ingesterStreamingEnabled), + "-querier.query-store-for-labels": "true", // Ingester. "-ring.store": "consul", "-consul.hostname": consul.NetworkHTTPEndpoint(), @@ -432,6 +434,7 @@ func testMetadataQueriesWithBlocksStorage( var ( lastSeriesInIngesterBlocksName = getMetricName(lastSeriesInIngesterBlocks.Labels) firstSeriesInIngesterHeadName = getMetricName(firstSeriesInIngesterHead.Labels) + lastSeriesInStorageName = getMetricName(lastSeriesInStorage.Labels) lastSeriesInStorageTs = util.TimeFromMillis(lastSeriesInStorage.Samples[0].Timestamp) lastSeriesInIngesterBlocksTs = util.TimeFromMillis(lastSeriesInIngesterBlocks.Samples[0].Timestamp) @@ -471,6 +474,10 @@ func testMetadataQueriesWithBlocksStorage( lookup: lastSeriesInIngesterBlocksName, ok: false, }, + { + lookup: lastSeriesInStorageName, + ok: false, + }, }, labelValuesTests: []labelValuesTest{ { @@ -493,6 +500,10 @@ func testMetadataQueriesWithBlocksStorage( ok: true, resp: lastSeriesInIngesterBlocks.Labels, }, + { + lookup: lastSeriesInStorageName, + ok: false, + }, }, labelValuesTests: []labelValuesTest{ { @@ -502,7 +513,7 @@ func testMetadataQueriesWithBlocksStorage( }, labelNames: []string{labels.MetricName, lastSeriesInIngesterBlocksName}, }, - "query metadata partially inside the ingester range should return the head + local disk data": { + "query metadata partially inside the ingester range": { from: lastSeriesInStorageTs.Add(-blockRangePeriod), to: firstSeriesInIngesterHeadTs.Add(blockRangePeriod), seriesTests: []seriesTest{ @@ -516,6 +527,11 @@ func testMetadataQueriesWithBlocksStorage( ok: true, resp: lastSeriesInIngesterBlocks.Labels, }, + { + lookup: lastSeriesInStorageName, + ok: true, + resp: lastSeriesInStorage.Labels, + }, }, labelValuesTests: []labelValuesTest{ { @@ -525,9 +541,9 @@ func testMetadataQueriesWithBlocksStorage( }, labelNames: []string{labels.MetricName, lastSeriesInIngesterBlocksName, firstSeriesInIngesterHeadName}, }, - "query metadata entirely outside the ingester range should return the head data only": { + "query metadata entirely outside the ingester range should return the head data as well": { from: lastSeriesInStorageTs.Add(-2 * blockRangePeriod), - to: lastSeriesInStorageTs.Add(-blockRangePeriod), + to: lastSeriesInStorageTs, seriesTests: []seriesTest{ { lookup: firstSeriesInIngesterHeadName, @@ -538,6 +554,11 @@ func testMetadataQueriesWithBlocksStorage( lookup: lastSeriesInIngesterBlocksName, ok: false, }, + { + lookup: lastSeriesInStorageName, + ok: true, + resp: lastSeriesInStorage.Labels, + }, }, labelValuesTests: []labelValuesTest{ { diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 22ebaa2857..a923a01999 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -382,7 +382,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* // Fetch series from stores. If an error occur we do not retry because retries // are only meant to cover missing blocks. - seriesSets, queriedBlocks, warnings, numChunks, err := q.fetchSeriesFromStores(spanCtx, clients, minT, maxT, matchers, convertedMatchers, maxChunksLimit, leftChunksLimit) + seriesSets, queriedBlocks, warnings, numChunks, err := q.fetchSeriesFromStores(spanCtx, sp, clients, minT, maxT, matchers, convertedMatchers, maxChunksLimit, leftChunksLimit) if err != nil { return storage.ErrSeriesSet(err) } @@ -433,6 +433,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* func (q *blocksStoreQuerier) fetchSeriesFromStores( ctx context.Context, + sp *storage.SelectHints, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, @@ -459,7 +460,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( blockIDs := blockIDs g.Go(func() error { - req, err := createSeriesRequest(minT, maxT, convertedMatchers, blockIDs) + req, err := createSeriesRequest(minT, maxT, convertedMatchers, sp, blockIDs) if err != nil { return errors.Wrapf(err, "failed to create series request") } @@ -546,7 +547,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( return seriesSets, queriedBlocks, warnings, int(numChunks.Load()), nil } -func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, blockIDs []ulid.ULID) (*storepb.SeriesRequest, error) { +func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, sp *storage.SelectHints, blockIDs []ulid.ULID) (*storepb.SeriesRequest, error) { // Selectively query only specific blocks. hints := &hintspb.SeriesRequestHints{ BlockMatchers: []storepb.LabelMatcher{ @@ -569,6 +570,11 @@ func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, bloc Matchers: matchers, PartialResponseStrategy: storepb.PartialResponseStrategy_ABORT, Hints: anyHints, + // See: https://github.com/prometheus/prometheus/pull/8050 + // TODO(goutham): we should ideally be passing the hints down to the storage layer + // and let the TSDB return us data with no chunks as in prometheus#8050. + // But this is an acceptable workaround for now. + SkipChunks: sp != nil && sp.Func == "series", }, nil } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 183e28b1dc..0b978ddea1 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -39,6 +39,7 @@ type Config struct { IngesterStreaming bool `yaml:"ingester_streaming"` MaxSamples int `yaml:"max_samples"` QueryIngestersWithin time.Duration `yaml:"query_ingesters_within"` + QueryStoreForLabels bool `yaml:"query_store_for_labels"` // QueryStoreAfter the time after which queries should also be sent to the store and not just ingesters. QueryStoreAfter time.Duration `yaml:"query_store_after"` @@ -84,6 +85,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.IngesterStreaming, "querier.ingester-streaming", true, "Use streaming RPCs to query ingester.") f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, "Maximum number of samples a single query can load into memory.") f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") + f.BoolVar(&cfg.QueryStoreForLabels, "querier.query-store-for-labels", false, "Query long-term store for series, label values and label names APIs. Works only with blocks engine.") f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, "The default evaluation interval or step size for subqueries.") f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should only be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") @@ -218,13 +220,14 @@ func NewQueryable(distributor QueryableWithFilter, stores []QueryableWithFilter, } q := querier{ - ctx: ctx, - mint: mint, - maxt: maxt, - chunkIterFn: chunkIterFn, - tombstonesLoader: tombstonesLoader, - limits: limits, - maxQueryIntoFuture: cfg.MaxQueryIntoFuture, + ctx: ctx, + mint: mint, + maxt: maxt, + chunkIterFn: chunkIterFn, + tombstonesLoader: tombstonesLoader, + limits: limits, + maxQueryIntoFuture: cfg.MaxQueryIntoFuture, + queryStoreForLabels: cfg.QueryStoreForLabels, } dqr, err := distributor.Querier(ctx, mint, maxt) @@ -266,9 +269,10 @@ type querier struct { ctx context.Context mint, maxt int64 - tombstonesLoader *purger.TombstonesLoader - limits *validation.Overrides - maxQueryIntoFuture time.Duration + tombstonesLoader *purger.TombstonesLoader + limits *validation.Overrides + maxQueryIntoFuture time.Duration + queryStoreForLabels bool } // Select implements storage.Querier interface. @@ -287,7 +291,7 @@ func (q querier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Mat // querying the long-term storage. // Also, in the recent versions of Prometheus, we pass in the hint but with Func set to "series". // See: https://github.com/prometheus/prometheus/pull/8050 - if sp == nil || sp.Func == "series" { + if sp == nil || sp.Func == "series" && !q.queryStoreForLabels { // In this case, the query time range has already been validated when the querier has been // created. return q.metadataQuerier.Select(true, sp, matchers...) From f301635fdbb8d9d7f995f9abbc38cdd25399221e Mon Sep 17 00:00:00 2001 From: Goutham Veeramachaneni Date: Mon, 9 Nov 2020 09:59:05 +0100 Subject: [PATCH 2/2] Review feedback Signed-off-by: Goutham Veeramachaneni --- docs/api/_index.md | 2 +- docs/blocks-storage/querier.md | 4 ++-- docs/configuration/config-file-reference.md | 4 ++-- docs/configuration/v1-guarantees.md | 3 ++- docs/guides/limitations.md | 2 +- integration/querier_test.go | 4 ++-- pkg/querier/blocks_store_queryable.go | 16 +++++++++------- pkg/querier/querier.go | 6 +++--- 8 files changed, 22 insertions(+), 19 deletions(-) diff --git a/docs/api/_index.md b/docs/api/_index.md index a74d34e67c..9ce6ba83ee 100644 --- a/docs/api/_index.md +++ b/docs/api/_index.md @@ -279,7 +279,7 @@ GET,POST /api/v1/series GET,POST /api/v1/series ``` -Find series by label matchers. Differently than Prometheus and due to scalability and performances reasons, Cortex currently ignores the `start` and `end` request parameters and always fetches the series from in-memory data stored in the ingesters. +Find series by label matchers. Differently than Prometheus and due to scalability and performances reasons, Cortex currently ignores the `start` and `end` request parameters and always fetches the series from in-memory data stored in the ingesters. There is experimental support to query the long-term store with the *blocks* storage engine when `-querier.query-store-for-labels-enabled` is set. _For more information, please check out the Prometheus [series endpoint](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers) documentation._ diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index 96048c2b22..6a0d9d0254 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -112,8 +112,8 @@ querier: # Query long-term store for series, label values and label names APIs. Works # only with blocks engine. - # CLI flag: -querier.query-store-for-labels - [query_store_for_labels: | default = false] + # CLI flag: -querier.query-store-for-labels-enabled + [query_store_for_labels_enabled: | default = false] # The time after which a metric should only be queried from storage and not # just ingesters. 0 means all queries are sent to store. When running the diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index d4de2068bc..90c3e142df 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -753,8 +753,8 @@ The `querier_config` configures the Cortex querier. # Query long-term store for series, label values and label names APIs. Works # only with blocks engine. -# CLI flag: -querier.query-store-for-labels -[query_store_for_labels: | default = false] +# CLI flag: -querier.query-store-for-labels-enabled +[query_store_for_labels_enabled: | default = false] # The time after which a metric should only be queried from storage and not just # ingesters. 0 means all queries are sent to store. When running the blocks diff --git a/docs/configuration/v1-guarantees.md b/docs/configuration/v1-guarantees.md index 106cdcdd12..a0c77402b1 100644 --- a/docs/configuration/v1-guarantees.md +++ b/docs/configuration/v1-guarantees.md @@ -24,7 +24,7 @@ The Cortex maintainers commit to ensuring future version of Cortex can read data Cortex strives to be 100% API compatible with Prometheus (under `/api/prom/*`); any deviation from this is considered a bug, except: - Requiring the `__name__` label on queries when querying the [chunks storage](../chunks-storage/_index.md) (queries to ingesters or clusters running the blocks storage are not affected). -- For queries to the `/api/v1/series`, `/api/v1/labels` and `/api/v1/label/{name}/values` endpoints, query's time range is ignored and the data is always fetched from ingesters. +- For queries to the `/api/v1/series`, `/api/v1/labels` and `/api/v1/label/{name}/values` endpoints, query's time range is ignored and the data is always fetched from ingesters. There is experimental support to query the long-term store with the *blocks* storage engine when `-querier.query-store-for-labels-enabled` is set. - Additional API endpoints for creating, removing and modifying alerts and recording rules. - Additional API around pushing metrics (under `/api/push`). - Additional API endpoints for management of Cortex itself, such as the ring. These APIs are not part of the any compatibility guarantees. @@ -56,3 +56,4 @@ Currently experimental features are: - OpenStack Swift storage support. - Metric relabeling in the distributor. - Scalable query-frontend (when using query-scheduler) +- Querying store for series, labels APIs (`-querier.query-store-for-labels-enabled`) diff --git a/docs/guides/limitations.md b/docs/guides/limitations.md index 131eab51e8..cd2caefabc 100644 --- a/docs/guides/limitations.md +++ b/docs/guides/limitations.md @@ -42,4 +42,4 @@ The Cortex chunks storage doesn't support queries without a metric name, like `c ## Query series and labels -When running queries to the `/api/v1/series`, `/api/v1/labels` and `/api/v1/label/{name}/values` endpoints, query's time range is ignored and the data is always fetched from ingesters. +When running queries to the `/api/v1/series`, `/api/v1/labels` and `/api/v1/label/{name}/values` endpoints, query's time range is ignored and the data is always fetched from ingesters. There is experimental support to query the long-term store with the *blocks* storage engine when `-querier.query-store-for-labels-enabled` is set. diff --git a/integration/querier_test.go b/integration/querier_test.go index a08c87856a..1ea7fb7afe 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -82,7 +82,7 @@ func TestQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T) { "-store-gateway.sharding-strategy": testCfg.blocksShardingStrategy, "-store-gateway.tenant-shard-size": fmt.Sprintf("%d", testCfg.tenantShardSize), "-querier.ingester-streaming": strconv.FormatBool(testCfg.ingesterStreamingEnabled), - "-querier.query-store-for-labels": "true", + "-querier.query-store-for-labels-enabled": "true", }) // Start dependencies. @@ -294,7 +294,7 @@ func TestQuerierWithBlocksStorageRunningInSingleBinaryMode(t *testing.T) { "-blocks-storage.bucket-store.index-cache.backend": testCfg.indexCacheBackend, "-blocks-storage.bucket-store.index-cache.memcached.addresses": "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort), "-querier.ingester-streaming": strconv.FormatBool(testCfg.ingesterStreamingEnabled), - "-querier.query-store-for-labels": "true", + "-querier.query-store-for-labels-enabled": "true", // Ingester. "-ring.store": "consul", "-consul.hostname": consul.NetworkHTTPEndpoint(), diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index a923a01999..29db5ee3e8 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -460,7 +460,13 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( blockIDs := blockIDs g.Go(func() error { - req, err := createSeriesRequest(minT, maxT, convertedMatchers, sp, blockIDs) + // See: https://github.com/prometheus/prometheus/pull/8050 + // TODO(goutham): we should ideally be passing the hints down to the storage layer + // and let the TSDB return us data with no chunks as in prometheus#8050. + // But this is an acceptable workaround for now. + skipChunks := sp != nil && sp.Func == "series" + + req, err := createSeriesRequest(minT, maxT, convertedMatchers, skipChunks, blockIDs) if err != nil { return errors.Wrapf(err, "failed to create series request") } @@ -547,7 +553,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( return seriesSets, queriedBlocks, warnings, int(numChunks.Load()), nil } -func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, sp *storage.SelectHints, blockIDs []ulid.ULID) (*storepb.SeriesRequest, error) { +func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, skipChunks bool, blockIDs []ulid.ULID) (*storepb.SeriesRequest, error) { // Selectively query only specific blocks. hints := &hintspb.SeriesRequestHints{ BlockMatchers: []storepb.LabelMatcher{ @@ -570,11 +576,7 @@ func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, sp * Matchers: matchers, PartialResponseStrategy: storepb.PartialResponseStrategy_ABORT, Hints: anyHints, - // See: https://github.com/prometheus/prometheus/pull/8050 - // TODO(goutham): we should ideally be passing the hints down to the storage layer - // and let the TSDB return us data with no chunks as in prometheus#8050. - // But this is an acceptable workaround for now. - SkipChunks: sp != nil && sp.Func == "series", + SkipChunks: skipChunks, }, nil } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 0b978ddea1..7fa7042787 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -39,7 +39,7 @@ type Config struct { IngesterStreaming bool `yaml:"ingester_streaming"` MaxSamples int `yaml:"max_samples"` QueryIngestersWithin time.Duration `yaml:"query_ingesters_within"` - QueryStoreForLabels bool `yaml:"query_store_for_labels"` + QueryStoreForLabels bool `yaml:"query_store_for_labels_enabled"` // QueryStoreAfter the time after which queries should also be sent to the store and not just ingesters. QueryStoreAfter time.Duration `yaml:"query_store_after"` @@ -85,7 +85,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.IngesterStreaming, "querier.ingester-streaming", true, "Use streaming RPCs to query ingester.") f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, "Maximum number of samples a single query can load into memory.") f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") - f.BoolVar(&cfg.QueryStoreForLabels, "querier.query-store-for-labels", false, "Query long-term store for series, label values and label names APIs. Works only with blocks engine.") + f.BoolVar(&cfg.QueryStoreForLabels, "querier.query-store-for-labels-enabled", false, "Query long-term store for series, label values and label names APIs. Works only with blocks engine.") f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, "The default evaluation interval or step size for subqueries.") f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should only be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") @@ -291,7 +291,7 @@ func (q querier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Mat // querying the long-term storage. // Also, in the recent versions of Prometheus, we pass in the hint but with Func set to "series". // See: https://github.com/prometheus/prometheus/pull/8050 - if sp == nil || sp.Func == "series" && !q.queryStoreForLabels { + if (sp == nil || sp.Func == "series") && !q.queryStoreForLabels { // In this case, the query time range has already been validated when the querier has been // created. return q.metadataQuerier.Select(true, sp, matchers...)