diff --git a/Gopkg.lock b/Gopkg.lock index 2f07af9fe40..87817b4983d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -351,7 +351,7 @@ branch = "master" name = "github.com/grpc-ecosystem/grpc-opentracing" packages = ["go/otgrpc"] - revision = "0e7658f8ee99ee5aa683e2a032b8880091b7a055" + revision = "8e809c8a86450a29b90dcc9efbf062d0fe6d9746" [[projects]] branch = "master" @@ -554,8 +554,8 @@ "ext", "log" ] - revision = "6edb48674bd9467b8e91fda004f2bd7202d60ce4" - version = "v1.0.1" + revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" + version = "v1.0.2" [[projects]] branch = "master" @@ -680,7 +680,7 @@ "util/treecache", "web/api/v1" ] - revision = "25e2d9f152b634d65123ef11342aceb23e66a1a2" + revision = "504acf4a0aec394fa7993dc2fe5744ef59f97b2c" [[projects]] branch = "master" @@ -708,10 +708,10 @@ version = "v1.1.0" [[projects]] - branch = "master" name = "github.com/sercand/kuberesolver" packages = ["."] - revision = "2f561e34ecb6206fcad82f0c5842379188d8db40" + revision = "aa801ca262949d887bbe0bae3f6f731ac82c26f6" + version = "v1.0.0" [[projects]] name = "github.com/sirupsen/logrus" @@ -776,7 +776,7 @@ revision = "be0d55e547b147ea1817f037cab9458bf7fc7850" [[projects]] - branch = "master" + branch = "logging" name = "github.com/weaveworks/common" packages = [ "aws", @@ -795,7 +795,8 @@ "tracing", "user" ] - revision = "b6e3b7fd56106061b1d60ec4da46c0d6770eb111" + revision = "061519e3f19e6384aedbf83488b6f98f18b9610b" + source = "github.com/tomwilkie/weaveworks-common" [[projects]] name = "github.com/weaveworks/mesh" @@ -1122,6 +1123,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "dfbcc50df522b353f27b1af1328cc6389e305dd0bdf23e8ea4200d7e9bca9d2b" + inputs-digest = "374aacd909431b9d5c2ac19c4a78c3ce136221d5ae4794078a16bb6fa78eb630" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 3cc9d22e9ad..83c22b7625d 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -2,6 +2,11 @@ unused-packages = true go-tests = true +[[constraint]] + name = "github.com/weaveworks/common" + branch = "logging" + source = "github.com/tomwilkie/weaveworks-common" + [[constraint]] name = "github.com/aws/aws-sdk-go" version = "v1.10.8" diff --git a/cmd/alertmanager/main.go b/cmd/alertmanager/main.go index fa48bc1c41f..934cb8fba94 100644 --- a/cmd/alertmanager/main.go +++ b/cmd/alertmanager/main.go @@ -35,12 +35,11 @@ func main() { }, } alertmanagerConfig alertmanager.MultitenantAlertmanagerConfig - logLevel util.LogLevel ) - util.RegisterFlags(&serverConfig, &alertmanagerConfig, &logLevel) + util.RegisterFlags(&serverConfig, &alertmanagerConfig) flag.Parse() - util.InitLogger(logLevel.AllowedLevel) + util.InitLogger(&serverConfig) multiAM, err := alertmanager.NewMultitenantAlertmanager(&alertmanagerConfig) if err != nil { diff --git a/cmd/configs/main.go b/cmd/configs/main.go index b84d7ea4ff5..f4978012173 100644 --- a/cmd/configs/main.go +++ b/cmd/configs/main.go @@ -25,12 +25,11 @@ func main() { }, } dbConfig db.Config - logLevel util.LogLevel ) - util.RegisterFlags(&serverConfig, &dbConfig, &logLevel) + util.RegisterFlags(&serverConfig, &dbConfig) flag.Parse() - util.InitLogger(logLevel.AllowedLevel) + util.InitLogger(&serverConfig) db, err := db.New(dbConfig) if err != nil { diff --git a/cmd/distributor/main.go b/cmd/distributor/main.go index 1d885951f83..11a92fb2bb3 100644 --- a/cmd/distributor/main.go +++ b/cmd/distributor/main.go @@ -47,12 +47,11 @@ func main() { } ringConfig ring.Config distributorConfig distributor.Config - logLevel util.LogLevel ) - util.RegisterFlags(&serverConfig, &ringConfig, &distributorConfig, &logLevel) + util.RegisterFlags(&serverConfig, &ringConfig, &distributorConfig) flag.Parse() - util.InitLogger(logLevel.AllowedLevel) + util.InitLogger(&serverConfig) // Setting the environment variable JAEGER_AGENT_HOST enables tracing trace := tracing.NewFromEnv("distributor") diff --git a/cmd/ingester/main.go b/cmd/ingester/main.go index 23b7a51960e..3cfadc3a428 100644 --- a/cmd/ingester/main.go +++ b/cmd/ingester/main.go @@ -33,7 +33,6 @@ func main() { schemaConfig chunk.SchemaConfig storageConfig storage.Config ingesterConfig ingester.Config - logLevel util.LogLevel eventSampleRate int maxStreams uint ) @@ -45,12 +44,12 @@ func main() { // Ingester needs to know our gRPC listen port. ingesterConfig.LifecyclerConfig.ListenPort = &serverConfig.GRPCListenPort util.RegisterFlags(&serverConfig, &chunkStoreConfig, &storageConfig, - &schemaConfig, &ingesterConfig, &logLevel) + &schemaConfig, &ingesterConfig) flag.UintVar(&maxStreams, "ingester.max-concurrent-streams", 1000, "Limit on the number of concurrent streams for gRPC calls (0 = unlimited)") flag.IntVar(&eventSampleRate, "event.sample-rate", 0, "How often to sample observability events (0 = never).") flag.Parse() - util.InitLogger(logLevel.AllowedLevel) + util.InitLogger(&serverConfig) util.InitEvents(eventSampleRate) if maxStreams > 0 { @@ -70,7 +69,7 @@ func main() { os.Exit(1) } - chunkStore, err := chunk.NewStore(chunkStoreConfig, schemaConfig, storageClient) + chunkStore, err := chunk.NewCompositeStore(chunkStoreConfig, schemaConfig, storageClient) if err != nil { level.Error(util.Logger).Log("err", err) os.Exit(1) diff --git a/cmd/lite/main.go b/cmd/lite/main.go index 7d9fac51999..fe3c05d68ee 100644 --- a/cmd/lite/main.go +++ b/cmd/lite/main.go @@ -11,7 +11,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/route" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/web/api/v1" "github.com/prometheus/tsdb" "google.golang.org/grpc" @@ -48,14 +47,13 @@ func main() { rulerConfig ruler.Config schemaConfig chunk.SchemaConfig storageConfig storage.Config - logLevel util.LogLevel unauthenticated bool ) // Ingester needs to know our gRPC listen port. ingesterConfig.LifecyclerConfig.ListenPort = &serverConfig.GRPCListenPort util.RegisterFlags(&serverConfig, &chunkStoreConfig, &distributorConfig, &querierConfig, - &ingesterConfig, &configStoreConfig, &rulerConfig, &storageConfig, &schemaConfig, &logLevel) + &ingesterConfig, &configStoreConfig, &rulerConfig, &storageConfig, &schemaConfig) flag.BoolVar(&unauthenticated, "unauthenticated", false, "Set to true to disable multitenancy.") flag.Parse() ingesterConfig.SetClientConfig(distributorConfig.IngesterClientConfig) @@ -64,7 +62,7 @@ func main() { trace := tracing.NewFromEnv("ingester") defer trace.Close() - util.InitLogger(logLevel.AllowedLevel) + util.InitLogger(&serverConfig) server, err := server.New(serverConfig) if err != nil { @@ -79,7 +77,7 @@ func main() { os.Exit(1) } - chunkStore, err := chunk.NewStore(chunkStoreConfig, schemaConfig, storageClient) + chunkStore, err := chunk.NewCompositeStore(chunkStoreConfig, schemaConfig, storageClient) if err != nil { level.Error(util.Logger).Log("err", err) os.Exit(1) @@ -124,8 +122,7 @@ func main() { tableManager.Start() defer tableManager.Stop() - engine := promql.NewEngine(util.Logger, nil, querierConfig.MaxConcurrent, querierConfig.Timeout) - queryable := querier.NewQueryable(dist, chunkStore) + queryable, engine := querier.Make(querierConfig, dist, chunkStore) if configStoreConfig.ConfigsAPIURL.String() != "" || configStoreConfig.DBConfig.URI != "" { rulesAPI, err := ruler.NewRulesAPI(configStoreConfig) @@ -186,7 +183,7 @@ func main() { subrouter := server.HTTP.PathPrefix("/api/prom").Subrouter() subrouter.PathPrefix("/api/v1").Handler(activeMiddleware.Wrap(promRouter)) - subrouter.Path("/read").Handler(activeMiddleware.Wrap(http.HandlerFunc(queryable.RemoteReadHandler))) + subrouter.Path("/read").Handler(activeMiddleware.Wrap(querier.RemoteReadHandler(queryable))) subrouter.Path("/validate_expr").Handler(activeMiddleware.Wrap(http.HandlerFunc(dist.ValidateExprHandler))) subrouter.Path("/user_stats").Handler(activeMiddleware.Wrap(http.HandlerFunc(dist.UserStatsHandler))) diff --git a/cmd/querier/main.go b/cmd/querier/main.go index 943399c26ac..3bef036a57f 100644 --- a/cmd/querier/main.go +++ b/cmd/querier/main.go @@ -11,7 +11,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/route" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/web/api/v1" "github.com/prometheus/tsdb" @@ -40,17 +39,16 @@ func main() { chunkStoreConfig chunk.StoreConfig schemaConfig chunk.SchemaConfig storageConfig storage.Config - logLevel util.LogLevel ) util.RegisterFlags(&serverConfig, &ringConfig, &distributorConfig, &querierConfig, - &chunkStoreConfig, &schemaConfig, &storageConfig, &logLevel) + &chunkStoreConfig, &schemaConfig, &storageConfig) flag.Parse() // Setting the environment variable JAEGER_AGENT_HOST enables tracing trace := tracing.NewFromEnv("querier") defer trace.Close() - util.InitLogger(logLevel.AllowedLevel) + util.InitLogger(&serverConfig) r, err := ring.New(ringConfig) if err != nil { @@ -81,16 +79,14 @@ func main() { os.Exit(1) } - chunkStore, err := chunk.NewStore(chunkStoreConfig, schemaConfig, storageClient) + chunkStore, err := chunk.NewCompositeStore(chunkStoreConfig, schemaConfig, storageClient) if err != nil { level.Error(util.Logger).Log("err", err) os.Exit(1) } defer chunkStore.Stop() - queryable := querier.NewQueryable(dist, chunkStore) - - engine := promql.NewEngine(util.Logger, nil, querierConfig.MaxConcurrent, querierConfig.Timeout) + queryable, engine := querier.Make(querierConfig, dist, chunkStore) api := v1.NewAPI( engine, queryable, @@ -107,7 +103,7 @@ func main() { subrouter := server.HTTP.PathPrefix("/api/prom").Subrouter() subrouter.PathPrefix("/api/v1").Handler(middleware.AuthenticateUser.Wrap(promRouter)) - subrouter.Path("/read").Handler(middleware.AuthenticateUser.Wrap(http.HandlerFunc(queryable.RemoteReadHandler))) + subrouter.Path("/read").Handler(middleware.AuthenticateUser.Wrap(querier.RemoteReadHandler(queryable))) subrouter.Path("/validate_expr").Handler(middleware.AuthenticateUser.Wrap(http.HandlerFunc(dist.ValidateExprHandler))) subrouter.Path("/user_stats").Handler(middleware.AuthenticateUser.Wrap(http.HandlerFunc(dist.UserStatsHandler))) diff --git a/cmd/ruler/main.go b/cmd/ruler/main.go index 4601d23725f..b78f3aec2bb 100644 --- a/cmd/ruler/main.go +++ b/cmd/ruler/main.go @@ -36,7 +36,7 @@ func main() { schemaConfig chunk.SchemaConfig storageConfig storage.Config configStoreConfig ruler.ConfigStoreConfig - logLevel util.LogLevel + querierConfig querier.Config ) // Setting the environment variable JAEGER_AGENT_HOST enables tracing @@ -44,10 +44,11 @@ func main() { defer trace.Close() util.RegisterFlags(&serverConfig, &ringConfig, &distributorConfig, - &rulerConfig, &chunkStoreConfig, &storageConfig, &schemaConfig, &configStoreConfig, &logLevel) + &rulerConfig, &chunkStoreConfig, &storageConfig, &schemaConfig, &configStoreConfig, + &querierConfig) flag.Parse() - util.InitLogger(logLevel.AllowedLevel) + util.InitLogger(&serverConfig) storageClient, err := storage.NewStorageClient(storageConfig, schemaConfig) if err != nil { @@ -55,7 +56,7 @@ func main() { os.Exit(1) } - chunkStore, err := chunk.NewStore(chunkStoreConfig, schemaConfig, storageClient) + chunkStore, err := chunk.NewCompositeStore(chunkStoreConfig, schemaConfig, storageClient) if err != nil { level.Error(util.Logger).Log("err", err) os.Exit(1) @@ -78,7 +79,7 @@ func main() { prometheus.MustRegister(dist) engine := promql.NewEngine(util.Logger, prometheus.DefaultRegisterer, rulerConfig.NumWorkers, rulerConfig.GroupTimeout) - queryable := querier.NewQueryable(dist, chunkStore) + queryable := querier.NewQueryable(dist, chunkStore, querierConfig.Iterators) rlr, err := ruler.NewRuler(rulerConfig, engine, queryable, dist) if err != nil { diff --git a/cmd/table-manager/main.go b/cmd/table-manager/main.go index b6448ed2983..66a9e883c25 100644 --- a/cmd/table-manager/main.go +++ b/cmd/table-manager/main.go @@ -27,12 +27,11 @@ func main() { ingesterConfig ingester.Config storageConfig storage.Config schemaConfig chunk.SchemaConfig - logLevel util.LogLevel ) - util.RegisterFlags(&ingesterConfig, &serverConfig, &storageConfig, &schemaConfig, &logLevel) + util.RegisterFlags(&ingesterConfig, &serverConfig, &storageConfig, &schemaConfig) flag.Parse() - util.InitLogger(logLevel.AllowedLevel) + util.InitLogger(&serverConfig) if (schemaConfig.ChunkTables.WriteScale.Enabled || schemaConfig.IndexTables.WriteScale.Enabled || diff --git a/pkg/chunk/chunk.go b/pkg/chunk/chunk.go index 87343017a8a..c2541e4002c 100644 --- a/pkg/chunk/chunk.go +++ b/pkg/chunk/chunk.go @@ -319,36 +319,31 @@ func equalByKey(a, b Chunk) bool { a.From == b.From && a.Through == b.Through && a.Checksum == b.Checksum } -func chunksToMatrix(ctx context.Context, chunks []Chunk, from, through model.Time) (model.Matrix, error) { +// ChunksToMatrix converts a set of chunks to a model.Matrix. +func ChunksToMatrix(ctx context.Context, chunks []Chunk, from, through model.Time) (model.Matrix, error) { sp, ctx := ot.StartSpanFromContext(ctx, "chunksToMatrix") defer sp.Finish() sp.LogFields(otlog.Int("chunks", len(chunks))) // Group chunks by series, sort and dedupe samples. - sampleStreams := map[model.Fingerprint]*model.SampleStream{} + metrics := map[model.Fingerprint]model.Metric{} + samples := map[model.Fingerprint][][]model.SamplePair{} for _, c := range chunks { - ss, ok := sampleStreams[c.Fingerprint] - if !ok { - ss = &model.SampleStream{ - Metric: c.Metric, - } - sampleStreams[c.Fingerprint] = ss - } - - samples, err := c.Samples(from, through) + ss, err := c.Samples(from, through) if err != nil { return nil, err } - ss.Values = util.MergeSampleSets(ss.Values, samples) + metrics[c.Fingerprint] = c.Metric + samples[c.Fingerprint] = append(samples[c.Fingerprint], ss) } - sp.LogFields(otlog.Int("sample streams", len(sampleStreams))) + sp.LogFields(otlog.Int("sample streams", len(samples))) - matrix := make(model.Matrix, 0, len(sampleStreams)) - for _, ss := range sampleStreams { + matrix := make(model.Matrix, 0, len(samples)) + for fp, ss := range samples { matrix = append(matrix, &model.SampleStream{ - Metric: ss.Metric, - Values: ss.Values, + Metric: metrics[fp], + Values: util.MergeNSampleSets(ss...), }) } diff --git a/pkg/chunk/chunk_store.go b/pkg/chunk/chunk_store.go index 84bf658b3ea..3a68c629ede 100644 --- a/pkg/chunk/chunk_store.go +++ b/pkg/chunk/chunk_store.go @@ -57,9 +57,6 @@ type StoreConfig struct { MinChunkAge time.Duration QueryChunkLimit int - - // For injecting different schemas in tests. - schemaFactory func(cfg SchemaConfig) Schema } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -69,8 +66,8 @@ func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.QueryChunkLimit, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query.") } -// Store implements Store -type Store struct { +// store implements Store +type store struct { cfg StoreConfig storage StorageClient @@ -78,25 +75,13 @@ type Store struct { schema Schema } -// NewStore makes a new ChunkStore -func NewStore(cfg StoreConfig, schemaCfg SchemaConfig, storage StorageClient) (*Store, error) { - var schema Schema - var err error - if cfg.schemaFactory == nil { - schema, err = newCompositeSchema(schemaCfg) - } else { - schema = cfg.schemaFactory(schemaCfg) - } - if err != nil { - return nil, err - } - +func newStore(cfg StoreConfig, schema Schema, storage StorageClient) (*store, error) { cache, err := cache.New(cfg.CacheConfig) if err != nil { return nil, err } - return &Store{ + return &store{ cfg: cfg, storage: storage, schema: schema, @@ -105,12 +90,12 @@ func NewStore(cfg StoreConfig, schemaCfg SchemaConfig, storage StorageClient) (* } // Stop any background goroutines (ie in the cache.) -func (c *Store) Stop() { +func (c *store) Stop() { c.cache.Stop() } // Put implements ChunkStore -func (c *Store) Put(ctx context.Context, chunks []Chunk) error { +func (c *store) Put(ctx context.Context, chunks []Chunk) error { userID, err := user.ExtractOrgID(ctx) if err != nil { return err @@ -125,7 +110,7 @@ func (c *Store) Put(ctx context.Context, chunks []Chunk) error { return c.updateIndex(ctx, userID, chunks) } -func (c *Store) updateIndex(ctx context.Context, userID string, chunks []Chunk) error { +func (c *store) updateIndex(ctx context.Context, userID string, chunks []Chunk) error { writeReqs, err := c.calculateDynamoWrites(userID, chunks) if err != nil { return err @@ -136,7 +121,7 @@ func (c *Store) updateIndex(ctx context.Context, userID string, chunks []Chunk) // calculateDynamoWrites creates a set of batched WriteRequests to dynamo for all // the chunks it is given. -func (c *Store) calculateDynamoWrites(userID string, chunks []Chunk) (WriteBatch, error) { +func (c *store) calculateDynamoWrites(userID string, chunks []Chunk) (WriteBatch, error) { seenIndexEntries := map[string]struct{}{} writeReqs := c.storage.NewWriteBatch() @@ -195,7 +180,7 @@ func (s *spanLogger) Log(kvps ...interface{}) error { } // Get implements ChunkStore -func (c *Store) Get(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) (model.Matrix, error) { +func (c *store) Get(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([]Chunk, error) { log, ctx := newSpanLogger(ctx, "ChunkStore.Get") defer log.Span.Finish() @@ -227,22 +212,14 @@ func (c *Store) Get(ctx context.Context, from, through model.Time, allMatchers . metricNameMatcher, matchers, ok := extract.MetricNameMatcherFromMatchers(allMatchers) if ok && metricNameMatcher.Type == labels.MatchEqual { log.Span.SetTag("metric", metricNameMatcher.Value) - return c.getMetricNameMatrix(ctx, from, through, matchers, metricNameMatcher.Value) + return c.getMetricNameChunks(ctx, from, through, matchers, metricNameMatcher.Value) } // Otherwise we consult the metric name index first and then create queries for each matching metric name. - return c.getSeriesMatrix(ctx, from, through, matchers, metricNameMatcher) -} - -func (c *Store) getMetricNameMatrix(ctx context.Context, from, through model.Time, allMatchers []*labels.Matcher, metricName string) (model.Matrix, error) { - chunks, err := c.getMetricNameChunks(ctx, from, through, allMatchers, metricName) - if err != nil { - return nil, err - } - return chunksToMatrix(ctx, chunks, from, through) + return c.getSeriesChunks(ctx, from, through, matchers, metricNameMatcher) } -func (c *Store) getMetricNameChunks(ctx context.Context, from, through model.Time, allMatchers []*labels.Matcher, metricName string) ([]Chunk, error) { +func (c *store) getMetricNameChunks(ctx context.Context, from, through model.Time, allMatchers []*labels.Matcher, metricName string) ([]Chunk, error) { log, ctx := newSpanLogger(ctx, "ChunkStore.getMetricNameChunks") level.Debug(log).Log("from", from, "through", through, "metricName", metricName, "matchers", len(allMatchers)) @@ -345,7 +322,7 @@ func ProcessCacheResponse(chunks []Chunk, keys []string, bufs [][]byte) (found [ return } -func (c *Store) getSeriesMatrix(ctx context.Context, from, through model.Time, allMatchers []*labels.Matcher, metricNameMatcher *labels.Matcher) (model.Matrix, error) { +func (c *store) getSeriesChunks(ctx context.Context, from, through model.Time, allMatchers []*labels.Matcher, metricNameMatcher *labels.Matcher) ([]Chunk, error) { // Get all series from the index userID, err := user.ExtractOrgID(ctx) if err != nil { @@ -406,10 +383,10 @@ outer: } } } - return chunksToMatrix(ctx, chunks, from, through) + return chunks, nil } -func (c *Store) lookupChunksByMetricName(ctx context.Context, from, through model.Time, matchers []*labels.Matcher, metricName string) ([]Chunk, error) { +func (c *store) lookupChunksByMetricName(ctx context.Context, from, through model.Time, matchers []*labels.Matcher, metricName string) ([]Chunk, error) { log, ctx := newSpanLogger(ctx, "ChunkStore.lookupChunksByMetricName") userID, err := user.ExtractOrgID(ctx) @@ -503,7 +480,7 @@ func (c *Store) lookupChunksByMetricName(ctx context.Context, from, through mode return c.convertChunkIDsToChunks(ctx, chunkIDs) } -func (c *Store) lookupEntriesByQueries(ctx context.Context, queries []IndexQuery) ([]IndexEntry, error) { +func (c *store) lookupEntriesByQueries(ctx context.Context, queries []IndexQuery) ([]IndexEntry, error) { incomingEntries := make(chan []IndexEntry) incomingErrors := make(chan error) for _, query := range queries { @@ -532,7 +509,7 @@ func (c *Store) lookupEntriesByQueries(ctx context.Context, queries []IndexQuery return entries, lastErr } -func (c *Store) lookupEntriesByQuery(ctx context.Context, query IndexQuery) ([]IndexEntry, error) { +func (c *store) lookupEntriesByQuery(ctx context.Context, query IndexQuery) ([]IndexEntry, error) { var entries []IndexEntry if err := c.storage.QueryPages(ctx, query, func(resp ReadBatch) (shouldContinue bool) { @@ -553,7 +530,7 @@ func (c *Store) lookupEntriesByQuery(ctx context.Context, query IndexQuery) ([]I return entries, nil } -func (c *Store) parseIndexEntries(ctx context.Context, entries []IndexEntry, matcher *labels.Matcher) ([]string, error) { +func (c *store) parseIndexEntries(ctx context.Context, entries []IndexEntry, matcher *labels.Matcher) ([]string, error) { result := make([]string, 0, len(entries)) for _, entry := range entries { @@ -575,7 +552,7 @@ func (c *Store) parseIndexEntries(ctx context.Context, entries []IndexEntry, mat return result, nil } -func (c *Store) convertChunkIDsToChunks(ctx context.Context, chunkIDs []string) ([]Chunk, error) { +func (c *store) convertChunkIDsToChunks(ctx context.Context, chunkIDs []string) ([]Chunk, error) { userID, err := user.ExtractOrgID(ctx) if err != nil { return nil, err @@ -593,7 +570,7 @@ func (c *Store) convertChunkIDsToChunks(ctx context.Context, chunkIDs []string) return chunkSet, nil } -func (c *Store) writeBackCache(ctx context.Context, chunks []Chunk) error { +func (c *store) writeBackCache(ctx context.Context, chunks []Chunk) error { for i := range chunks { encoded, err := chunks[i].Encode() if err != nil { diff --git a/pkg/chunk/chunk_store_test.go b/pkg/chunk/chunk_store_test.go index b47a92d1347..716c5cfa538 100644 --- a/pkg/chunk/chunk_store_test.go +++ b/pkg/chunk/chunk_store_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/weaveworks/cortex/pkg/prom1/storage/local/chunk" + "github.com/weaveworks/cortex/pkg/util" "github.com/weaveworks/cortex/pkg/util/extract" "golang.org/x/net/context" @@ -21,15 +22,37 @@ import ( "github.com/weaveworks/common/user" ) +var schemas = []struct { + name string + fn func(cfg SchemaConfig) Schema + requireMetricName bool +}{ + {"v1 schema", v1Schema, true}, + {"v2 schema", v2Schema, true}, + {"v3 schema", v3Schema, true}, + {"v4 schema", v4Schema, true}, + {"v5 schema", v5Schema, true}, + {"v6 schema", v6Schema, true}, + {"v7 schema", v7Schema, true}, + {"v8 schema", v8Schema, false}, +} + // newTestStore creates a new Store for testing. -func newTestChunkStore(t *testing.T, cfg StoreConfig) *Store { +func newTestChunkStore(t *testing.T, schemaFn func(SchemaConfig) Schema) *store { + var ( + storeCfg StoreConfig + schemaCfg SchemaConfig + ) + util.DefaultValues(&storeCfg, &schemaCfg) + storage := NewMockStorage() - schemaCfg := SchemaConfig{} tableManager, err := NewTableManager(schemaCfg, maxChunkAge, storage) require.NoError(t, err) + err = tableManager.SyncTables(context.Background()) require.NoError(t, err) - store, err := NewStore(cfg, schemaCfg, storage) + + store, err := newStore(storeCfg, schemaFn(schemaCfg), storage) require.NoError(t, err) return store } @@ -102,21 +125,6 @@ func TestChunkStore_Get(t *testing.T) { barSampleStream2, err := createSampleStreamFrom(barChunk2) require.NoError(t, err) - schemas := []struct { - name string - fn func(cfg SchemaConfig) Schema - requireMetricName bool - }{ - {"v1 schema", v1Schema, true}, - {"v2 schema", v2Schema, true}, - {"v3 schema", v3Schema, true}, - {"v4 schema", v4Schema, true}, - {"v5 schema", v5Schema, true}, - {"v6 schema", v6Schema, true}, - {"v7 schema", v7Schema, true}, - {"v8 schema", v8Schema, false}, - } - for _, tc := range []struct { query string expect model.Matrix @@ -193,10 +201,7 @@ func TestChunkStore_Get(t *testing.T) { for _, schema := range schemas { t.Run(fmt.Sprintf("%s / %s", tc.query, schema.name), func(t *testing.T) { t.Log("========= Running query", tc.query, "with schema", schema.name) - store := newTestChunkStore(t, StoreConfig{ - schemaFactory: schema.fn, - QueryChunkLimit: 2e6, - }) + store := newTestChunkStore(t, schema.fn) if err := store.Put(ctx, []Chunk{ fooChunk1, @@ -218,7 +223,10 @@ func TestChunkStore_Get(t *testing.T) { } // Query with ordinary time-range - matrix1, err := store.Get(ctx, now.Add(-time.Hour), now, matchers...) + chunks1, err := store.Get(ctx, now.Add(-time.Hour), now, matchers...) + require.NoError(t, err) + + matrix1, err := ChunksToMatrix(ctx, chunks1, now.Add(-time.Hour), now) require.NoError(t, err) sort.Sort(ByFingerprint(matrix1)) @@ -229,7 +237,10 @@ func TestChunkStore_Get(t *testing.T) { } // Pushing end of time-range into future should yield exact same resultset - matrix2, err := store.Get(ctx, now.Add(-time.Hour), now.Add(time.Hour*24*30), matchers...) + chunks2, err := store.Get(ctx, now.Add(-time.Hour), now.Add(time.Hour*24*30), matchers...) + require.NoError(t, err) + + matrix2, err := ChunksToMatrix(ctx, chunks2, now.Add(-time.Hour), now) require.NoError(t, err) sort.Sort(ByFingerprint(matrix2)) @@ -266,20 +277,6 @@ func TestChunkStore_getMetricNameChunks(t *testing.T) { "toms": "code", }) - schemas := []struct { - name string - fn func(cfg SchemaConfig) Schema - }{ - {"v1 schema", v1Schema}, - {"v2 schema", v2Schema}, - {"v3 schema", v3Schema}, - {"v4 schema", v4Schema}, - {"v5 schema", v5Schema}, - {"v6 schema", v6Schema}, - {"v7 schema", v7Schema}, - {"v8 schema", v8Schema}, - } - for _, tc := range []struct { query string expect []Chunk @@ -333,10 +330,7 @@ func TestChunkStore_getMetricNameChunks(t *testing.T) { for _, schema := range schemas { t.Run(fmt.Sprintf("%s / %s", tc.query, schema.name), func(t *testing.T) { t.Log("========= Running query", tc.query, "with schema", schema.name) - store := newTestChunkStore(t, StoreConfig{ - schemaFactory: schema.fn, - QueryChunkLimit: 2e6, - }) + store := newTestChunkStore(t, schema.fn) if err := store.Put(ctx, []Chunk{chunk1, chunk2}); err != nil { t.Fatal(err) @@ -363,96 +357,72 @@ func mustNewLabelMatcher(matchType labels.MatchType, name string, value string) func TestChunkStoreRandom(t *testing.T) { ctx := user.InjectOrgID(context.Background(), userID) - schemas := []struct { - name string - fn func(cfg SchemaConfig) Schema - store *Store - }{ - {name: "v1 schema", fn: v1Schema}, - {name: "v2 schema", fn: v2Schema}, - {name: "v3 schema", fn: v3Schema}, - {name: "v4 schema", fn: v4Schema}, - {name: "v5 schema", fn: v5Schema}, - {name: "v6 schema", fn: v6Schema}, - {name: "v7 schema", fn: v7Schema}, - {name: "v8 schema", fn: v8Schema}, - } - for i := range schemas { - schemas[i].store = newTestChunkStore(t, StoreConfig{ - schemaFactory: schemas[i].fn, - QueryChunkLimit: 2e6, - }) - } + for _, schema := range schemas { + t.Run(schema.name, func(t *testing.T) { + store := newTestChunkStore(t, schema.fn) + + // put 100 chunks from 0 to 99 + const chunkLen = 13 * 3600 // in seconds + for i := 0; i < 100; i++ { + ts := model.TimeFromUnix(int64(i * chunkLen)) + chunks, _ := chunk.New().Add(model.SamplePair{ + Timestamp: ts, + Value: model.SampleValue(float64(i)), + }) + chunk := NewChunk( + userID, + model.Fingerprint(1), + model.Metric{ + model.MetricNameLabel: "foo", + "bar": "baz", + }, + chunks[0], + ts, + ts.Add(chunkLen*time.Second), + ) + + err := store.Put(ctx, []Chunk{chunk}) + require.NoError(t, err) + } - // put 100 chunks from 0 to 99 - const chunkLen = 13 * 3600 // in seconds - for i := 0; i < 100; i++ { - ts := model.TimeFromUnix(int64(i * chunkLen)) - chunks, _ := chunk.New().Add(model.SamplePair{ - Timestamp: ts, - Value: model.SampleValue(float64(i)), - }) - chunk := NewChunk( - userID, - model.Fingerprint(1), - model.Metric{ - model.MetricNameLabel: "foo", - "bar": "baz", - }, - chunks[0], - ts, - ts.Add(chunkLen*time.Second), - ) - for _, s := range schemas { - err := s.store.Put(ctx, []Chunk{chunk}) - require.NoError(t, err) - } - } + // pick two random numbers and do a query + for i := 0; i < 100; i++ { + start := rand.Int63n(100 * chunkLen) + end := start + rand.Int63n((100*chunkLen)-start) + assert.True(t, start < end) - // pick two random numbers and do a query - for i := 0; i < 100; i++ { - start := rand.Int63n(100 * chunkLen) - end := start + rand.Int63n((100*chunkLen)-start) - assert.True(t, start < end) + startTime := model.TimeFromUnix(start) + endTime := model.TimeFromUnix(end) - startTime := model.TimeFromUnix(start) - endTime := model.TimeFromUnix(end) + metricNameLabel := mustNewLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo") + matchers := []*labels.Matcher{mustNewLabelMatcher(labels.MatchEqual, "bar", "baz")} + chunks, err := store.getMetricNameChunks(ctx, startTime, endTime, + matchers, metricNameLabel.Value) + require.NoError(t, err) - metricNameLabel := mustNewLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo") - matchers := []*labels.Matcher{mustNewLabelMatcher(labels.MatchEqual, "bar", "baz")} + // We need to check that each chunk is in the time range + for _, chunk := range chunks { + assert.False(t, chunk.From.After(endTime)) + assert.False(t, chunk.Through.Before(startTime)) + samples, err := chunk.Samples(chunk.From, chunk.Through) + assert.NoError(t, err) + assert.Equal(t, 1, len(samples)) + // TODO verify chunk contents + } - for _, s := range schemas { - chunks, err := s.store.getMetricNameChunks(ctx, startTime, endTime, - matchers, - metricNameLabel.Value, - ) - require.NoError(t, err) - - // We need to check that each chunk is in the time range - for _, chunk := range chunks { - assert.False(t, chunk.From.After(endTime)) - assert.False(t, chunk.Through.Before(startTime)) - samples, err := chunk.Samples(chunk.From, chunk.Through) - assert.NoError(t, err) - assert.Equal(t, 1, len(samples)) - // TODO verify chunk contents + // And check we got all the chunks we want + numChunks := (end / chunkLen) - (start / chunkLen) + 1 + assert.Equal(t, int(numChunks), len(chunks)) } - - // And check we got all the chunks we want - numChunks := (end / chunkLen) - (start / chunkLen) + 1 - assert.Equal(t, int(numChunks), len(chunks), s.name) - } + }) } } func TestChunkStoreLeastRead(t *testing.T) { // Test we don't read too much from the index ctx := user.InjectOrgID(context.Background(), userID) - store := newTestChunkStore(t, StoreConfig{ - schemaFactory: v6Schema, - QueryChunkLimit: 2e6, - }) + store := newTestChunkStore(t, v6Schema) // Put 24 chunks 1hr chunks in the store const chunkLen = 60 // in seconds diff --git a/pkg/chunk/chunk_test.go b/pkg/chunk/chunk_test.go index 040d01c5110..b486565dfc8 100644 --- a/pkg/chunk/chunk_test.go +++ b/pkg/chunk/chunk_test.go @@ -188,7 +188,7 @@ func TestChunksToMatrix(t *testing.T) { }, }, } { - matrix, err := chunksToMatrix(context.Background(), c.chunks, chunk1.From, chunk3.Through) + matrix, err := ChunksToMatrix(context.Background(), c.chunks, chunk1.From, chunk3.Through) require.NoError(t, err) sort.Sort(matrix) diff --git a/pkg/chunk/composite_store.go b/pkg/chunk/composite_store.go new file mode 100644 index 00000000000..17428da6c3a --- /dev/null +++ b/pkg/chunk/composite_store.go @@ -0,0 +1,194 @@ +package chunk + +import ( + "context" + "fmt" + "sort" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" +) + +// Store for chunks. +type Store interface { + Put(ctx context.Context, chunks []Chunk) error + Get(tx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]Chunk, error) + Stop() +} + +// compositeStore is a Store which delegates to various stores depending +// on when they were activated. +type compositeStore struct { + stores []compositeStoreEntry +} + +type compositeStoreEntry struct { + start model.Time + Store +} + +type byStart []compositeStoreEntry + +func (a byStart) Len() int { return len(a) } +func (a byStart) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byStart) Less(i, j int) bool { return a[i].start < a[j].start } + +// NewCompositeStore creates a new Store which delegates to different stores depending +// on time. +func NewCompositeStore(cfg StoreConfig, schemaCfg SchemaConfig, storage StorageClient) (Store, error) { + store, err := newStore(cfg, v1Schema(schemaCfg), storage) + if err != nil { + return nil, err + } + + stores := []compositeStoreEntry{ + {0, store}, + } + + if schemaCfg.DailyBucketsFrom.IsSet() { + store, err := newStore(cfg, v2Schema(schemaCfg), storage) + if err != nil { + return nil, err + } + stores = append(stores, compositeStoreEntry{schemaCfg.DailyBucketsFrom.Time, store}) + } + + if schemaCfg.Base64ValuesFrom.IsSet() { + store, err := newStore(cfg, v3Schema(schemaCfg), storage) + if err != nil { + return nil, err + } + stores = append(stores, compositeStoreEntry{schemaCfg.Base64ValuesFrom.Time, store}) + } + + if schemaCfg.V4SchemaFrom.IsSet() { + store, err := newStore(cfg, v4Schema(schemaCfg), storage) + if err != nil { + return nil, err + } + stores = append(stores, compositeStoreEntry{schemaCfg.V4SchemaFrom.Time, store}) + } + + if schemaCfg.V5SchemaFrom.IsSet() { + store, err := newStore(cfg, v5Schema(schemaCfg), storage) + if err != nil { + return nil, err + } + stores = append(stores, compositeStoreEntry{schemaCfg.V5SchemaFrom.Time, store}) + } + + if schemaCfg.V6SchemaFrom.IsSet() { + store, err := newStore(cfg, v6Schema(schemaCfg), storage) + if err != nil { + return nil, err + } + stores = append(stores, compositeStoreEntry{schemaCfg.V6SchemaFrom.Time, store}) + } + + if schemaCfg.V7SchemaFrom.IsSet() { + store, err := newStore(cfg, v7Schema(schemaCfg), storage) + if err != nil { + return nil, err + } + stores = append(stores, compositeStoreEntry{schemaCfg.V7SchemaFrom.Time, store}) + } + + if schemaCfg.V8SchemaFrom.IsSet() { + store, err := newStore(cfg, v8Schema(schemaCfg), storage) + if err != nil { + return nil, err + } + stores = append(stores, compositeStoreEntry{schemaCfg.V8SchemaFrom.Time, store}) + } + + if !sort.IsSorted(byStart(stores)) { + return nil, fmt.Errorf("schemas not in time-sorted order") + } + + return compositeStore{stores}, nil +} + +func (c compositeStore) Put(ctx context.Context, chunks []Chunk) error { + for _, chunk := range chunks { + err := c.forStores(chunk.From, chunk.From, func(_, _ model.Time, store Store) error { + return store.Put(ctx, []Chunk{chunk}) + }) + if err != nil { + return err + } + } + return nil +} + +func (c compositeStore) Get(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]Chunk, error) { + var results []Chunk + err := c.forStores(from, through, func(from, through model.Time, store Store) error { + chunks, err := store.Get(ctx, from, through, matchers...) + if err != nil { + return err + } + results = append(results, chunks...) + return nil + }) + return results, err +} + +func (c compositeStore) Stop() { + for _, store := range c.stores { + store.Stop() + } +} + +func (c compositeStore) forStores(from, through model.Time, callback func(from, through model.Time, store Store) error) error { + if len(c.stores) == 0 { + return nil + } + + // first, find the schema with the highest start _before or at_ from + i := sort.Search(len(c.stores), func(i int) bool { + return c.stores[i].start > from + }) + if i > 0 { + i-- + } else { + // This could happen if we get passed a sample from before 1970. + i = 0 + from = c.stores[0].start + } + + // next, find the schema with the lowest start _after_ through + j := sort.Search(len(c.stores), func(j int) bool { + return c.stores[j].start > through + }) + + min := func(a, b model.Time) model.Time { + if a < b { + return a + } + return b + } + + start := from + for ; i < j; i++ { + nextSchemaStarts := model.Latest + if i+1 < len(c.stores) { + nextSchemaStarts = c.stores[i+1].start + } + + // If the next schema starts at the same time as this one, + // skip this one. + if nextSchemaStarts == c.stores[i].start { + continue + } + + end := min(through, nextSchemaStarts-1) + err := callback(start, end, c.stores[i].Store) + if err != nil { + return err + } + + start = nextSchemaStarts + } + + return nil +} diff --git a/pkg/chunk/composite_store_test.go b/pkg/chunk/composite_store_test.go new file mode 100644 index 00000000000..c315aef0d27 --- /dev/null +++ b/pkg/chunk/composite_store_test.go @@ -0,0 +1,167 @@ +package chunk + +import ( + "context" + "fmt" + "reflect" + "testing" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/weaveworks/common/test" +) + +type mockStore int + +func (m mockStore) Put(ctx context.Context, chunks []Chunk) error { + return nil +} + +func (m mockStore) Get(tx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]Chunk, error) { + return nil, nil +} + +func (m mockStore) Stop() {} + +func TestCompositeStore(t *testing.T) { + type result struct { + from, through model.Time + store Store + } + collect := func(results *[]result) func(from, through model.Time, store Store) error { + return func(from, through model.Time, store Store) error { + *results = append(*results, result{from, through, store}) + return nil + } + } + cs := compositeStore{ + stores: []compositeStoreEntry{ + {model.TimeFromUnix(0), mockStore(1)}, + {model.TimeFromUnix(100), mockStore(2)}, + {model.TimeFromUnix(200), mockStore(3)}, + }, + } + + for i, tc := range []struct { + cs compositeStore + from, through int64 + want []result + }{ + // Test we have sensible results when there are no schema's defined + {compositeStore{}, 0, 1, []result{}}, + + // Test we have sensible results when there is a single schema + { + compositeStore{ + stores: []compositeStoreEntry{ + {model.TimeFromUnix(0), mockStore(1)}, + }, + }, + 0, 10, + []result{ + {model.TimeFromUnix(0), model.TimeFromUnix(10), mockStore(1)}, + }, + }, + + // Test we have sensible results for negative (ie pre 1970) times + { + compositeStore{ + stores: []compositeStoreEntry{ + {model.TimeFromUnix(0), mockStore(1)}, + }, + }, + -10, -9, + []result{}, + }, + { + compositeStore{ + stores: []compositeStoreEntry{ + {model.TimeFromUnix(0), mockStore(1)}, + }, + }, + -10, 10, + []result{ + {model.TimeFromUnix(0), model.TimeFromUnix(10), mockStore(1)}, + }, + }, + + // Test we have sensible results when there is two schemas + { + compositeStore{ + stores: []compositeStoreEntry{ + {model.TimeFromUnix(0), mockStore(1)}, + {model.TimeFromUnix(100), mockStore(2)}, + }, + }, + 34, 165, + []result{ + {model.TimeFromUnix(34), model.TimeFromUnix(100) - 1, mockStore(1)}, + {model.TimeFromUnix(100), model.TimeFromUnix(165), mockStore(2)}, + }, + }, + + // Test we get only one result when two schema start at same time + { + compositeStore{ + stores: []compositeStoreEntry{ + {model.TimeFromUnix(0), mockStore(1)}, + {model.TimeFromUnix(10), mockStore(2)}, + {model.TimeFromUnix(10), mockStore(3)}, + }, + }, + 0, 165, + []result{ + {model.TimeFromUnix(0), model.TimeFromUnix(10) - 1, mockStore(1)}, + {model.TimeFromUnix(10), model.TimeFromUnix(165), mockStore(3)}, + }, + }, + + // Test all the various combination we can get when there are three schemas + { + cs, 34, 65, + []result{ + {model.TimeFromUnix(34), model.TimeFromUnix(65), mockStore(1)}, + }, + }, + + { + cs, 244, 6785, + []result{ + {model.TimeFromUnix(244), model.TimeFromUnix(6785), mockStore(3)}, + }, + }, + + { + cs, 34, 165, + []result{ + {model.TimeFromUnix(34), model.TimeFromUnix(100) - 1, mockStore(1)}, + {model.TimeFromUnix(100), model.TimeFromUnix(165), mockStore(2)}, + }, + }, + + { + cs, 151, 264, + []result{ + {model.TimeFromUnix(151), model.TimeFromUnix(200) - 1, mockStore(2)}, + {model.TimeFromUnix(200), model.TimeFromUnix(264), mockStore(3)}, + }, + }, + + { + cs, 32, 264, + []result{ + {model.TimeFromUnix(32), model.TimeFromUnix(100) - 1, mockStore(1)}, + {model.TimeFromUnix(100), model.TimeFromUnix(200) - 1, mockStore(2)}, + {model.TimeFromUnix(200), model.TimeFromUnix(264), mockStore(3)}, + }, + }, + } { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + have := []result{} + tc.cs.forStores(model.TimeFromUnix(tc.from), model.TimeFromUnix(tc.through), collect(&have)) + if !reflect.DeepEqual(tc.want, have) { + t.Fatalf("wrong stores - %s", test.Diff(tc.want, have)) + } + }) + } +} diff --git a/pkg/chunk/schema_config.go b/pkg/chunk/schema_config.go index d9e65862bef..44d2b85306e 100644 --- a/pkg/chunk/schema_config.go +++ b/pkg/chunk/schema_config.go @@ -3,7 +3,6 @@ package chunk import ( "flag" "fmt" - "sort" "strconv" "time" @@ -282,202 +281,3 @@ func (cfg *PeriodicTableConfig) TableFor(t model.Time) string { ) return cfg.Prefix + strconv.Itoa(int(table)) } - -// compositeSchema is a Schema which delegates to various schemas depending -// on when they were activated. -type compositeSchema struct { - schemas []compositeSchemaEntry -} - -type compositeSchemaEntry struct { - start model.Time - Schema -} - -type byStart []compositeSchemaEntry - -func (a byStart) Len() int { return len(a) } -func (a byStart) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byStart) Less(i, j int) bool { return a[i].start < a[j].start } - -func newCompositeSchema(cfg SchemaConfig) (Schema, error) { - schemas := []compositeSchemaEntry{ - {0, v1Schema(cfg)}, - } - - if cfg.DailyBucketsFrom.IsSet() { - schemas = append(schemas, compositeSchemaEntry{cfg.DailyBucketsFrom.Time, v2Schema(cfg)}) - } - - if cfg.Base64ValuesFrom.IsSet() { - schemas = append(schemas, compositeSchemaEntry{cfg.Base64ValuesFrom.Time, v3Schema(cfg)}) - } - - if cfg.V4SchemaFrom.IsSet() { - schemas = append(schemas, compositeSchemaEntry{cfg.V4SchemaFrom.Time, v4Schema(cfg)}) - } - - if cfg.V5SchemaFrom.IsSet() { - schemas = append(schemas, compositeSchemaEntry{cfg.V5SchemaFrom.Time, v5Schema(cfg)}) - } - - if cfg.V6SchemaFrom.IsSet() { - schemas = append(schemas, compositeSchemaEntry{cfg.V6SchemaFrom.Time, v6Schema(cfg)}) - } - - if cfg.V7SchemaFrom.IsSet() { - schemas = append(schemas, compositeSchemaEntry{cfg.V7SchemaFrom.Time, v7Schema(cfg)}) - } - - if cfg.V8SchemaFrom.IsSet() { - schemas = append(schemas, compositeSchemaEntry{cfg.V8SchemaFrom.Time, v8Schema(cfg)}) - } - - if !sort.IsSorted(byStart(schemas)) { - return nil, fmt.Errorf("schemas not in time-sorted order") - } - - return compositeSchema{schemas}, nil -} - -func (c compositeSchema) forSchemasIndexQuery(from, through model.Time, callback func(from, through model.Time, schema Schema) ([]IndexQuery, error)) ([]IndexQuery, error) { - if len(c.schemas) == 0 { - return nil, nil - } - - // first, find the schema with the highest start _before or at_ from - i := sort.Search(len(c.schemas), func(i int) bool { - return c.schemas[i].start > from - }) - if i > 0 { - i-- - } else { - // This could happen if we get passed a sample from before 1970. - i = 0 - from = c.schemas[0].start - } - - // next, find the schema with the lowest start _after_ through - j := sort.Search(len(c.schemas), func(j int) bool { - return c.schemas[j].start > through - }) - - min := func(a, b model.Time) model.Time { - if a < b { - return a - } - return b - } - - start := from - result := []IndexQuery{} - for ; i < j; i++ { - nextSchemaStarts := model.Latest - if i+1 < len(c.schemas) { - nextSchemaStarts = c.schemas[i+1].start - } - - // If the next schema starts at the same time as this one, - // skip this one. - if nextSchemaStarts == c.schemas[i].start { - continue - } - - end := min(through, nextSchemaStarts-1) - entries, err := callback(start, end, c.schemas[i].Schema) - if err != nil { - return nil, err - } - - result = append(result, entries...) - start = nextSchemaStarts - } - - return result, nil -} - -func (c compositeSchema) forSchemasIndexEntry(from, through model.Time, callback func(from, through model.Time, schema Schema) ([]IndexEntry, error)) ([]IndexEntry, error) { - if len(c.schemas) == 0 { - return nil, nil - } - - // first, find the schema with the highest start _before or at_ from - i := sort.Search(len(c.schemas), func(i int) bool { - return c.schemas[i].start > from - }) - if i > 0 { - i-- - } else { - // This could happen if we get passed a sample from before 1970. - i = 0 - from = c.schemas[0].start - } - - // next, find the schema with the lowest start _after_ through - j := sort.Search(len(c.schemas), func(j int) bool { - return c.schemas[j].start > through - }) - - min := func(a, b model.Time) model.Time { - if a < b { - return a - } - return b - } - - start := from - result := []IndexEntry{} - for ; i < j; i++ { - nextSchemaStarts := model.Latest - if i+1 < len(c.schemas) { - nextSchemaStarts = c.schemas[i+1].start - } - - // If the next schema starts at the same time as this one, - // skip this one. - if nextSchemaStarts == c.schemas[i].start { - continue - } - - end := min(through, nextSchemaStarts-1) - entries, err := callback(start, end, c.schemas[i].Schema) - if err != nil { - return nil, err - } - - result = append(result, entries...) - start = nextSchemaStarts - } - - return result, nil -} - -func (c compositeSchema) GetWriteEntries(from, through model.Time, userID string, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { - return c.forSchemasIndexEntry(from, through, func(from, through model.Time, schema Schema) ([]IndexEntry, error) { - return schema.GetWriteEntries(from, through, userID, metricName, labels, chunkID) - }) -} - -func (c compositeSchema) GetReadQueries(from, through model.Time, userID string) ([]IndexQuery, error) { - return c.forSchemasIndexQuery(from, through, func(from, through model.Time, schema Schema) ([]IndexQuery, error) { - return schema.GetReadQueries(from, through, userID) - }) -} - -func (c compositeSchema) GetReadQueriesForMetric(from, through model.Time, userID string, metricName model.LabelValue) ([]IndexQuery, error) { - return c.forSchemasIndexQuery(from, through, func(from, through model.Time, schema Schema) ([]IndexQuery, error) { - return schema.GetReadQueriesForMetric(from, through, userID, metricName) - }) -} - -func (c compositeSchema) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) { - return c.forSchemasIndexQuery(from, through, func(from, through model.Time, schema Schema) ([]IndexQuery, error) { - return schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, labelName) - }) -} - -func (c compositeSchema) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) { - return c.forSchemasIndexQuery(from, through, func(from, through model.Time, schema Schema) ([]IndexQuery, error) { - return schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, labelName, labelValue) - }) -} diff --git a/pkg/chunk/schema_config_test.go b/pkg/chunk/schema_config_test.go index 2bf6e1d1d41..3ff0455e2d3 100644 --- a/pkg/chunk/schema_config_test.go +++ b/pkg/chunk/schema_config_test.go @@ -1,12 +1,10 @@ package chunk import ( - "fmt" "reflect" "testing" "github.com/prometheus/common/model" - "github.com/weaveworks/common/test" ) func TestHourlyBuckets(t *testing.T) { @@ -180,146 +178,3 @@ func TestDailyBuckets(t *testing.T) { }) } } - -func TestCompositeSchema(t *testing.T) { - type result struct { - from, through model.Time - schema Schema - } - collect := func(results *[]result) func(from, through model.Time, schema Schema) ([]IndexEntry, error) { - return func(from, through model.Time, schema Schema) ([]IndexEntry, error) { - *results = append(*results, result{from, through, schema}) - return nil, nil - } - } - cs := compositeSchema{ - schemas: []compositeSchemaEntry{ - {model.TimeFromUnix(0), mockSchema(1)}, - {model.TimeFromUnix(100), mockSchema(2)}, - {model.TimeFromUnix(200), mockSchema(3)}, - }, - } - - for i, tc := range []struct { - cs compositeSchema - from, through int64 - want []result - }{ - // Test we have sensible results when there are no schema's defined - {compositeSchema{}, 0, 1, []result{}}, - - // Test we have sensible results when there is a single schema - { - compositeSchema{ - schemas: []compositeSchemaEntry{ - {model.TimeFromUnix(0), mockSchema(1)}, - }, - }, - 0, 10, - []result{ - {model.TimeFromUnix(0), model.TimeFromUnix(10), mockSchema(1)}, - }, - }, - - // Test we have sensible results for negative (ie pre 1970) times - { - compositeSchema{ - schemas: []compositeSchemaEntry{ - {model.TimeFromUnix(0), mockSchema(1)}, - }, - }, - -10, -9, - []result{}, - }, - { - compositeSchema{ - schemas: []compositeSchemaEntry{ - {model.TimeFromUnix(0), mockSchema(1)}, - }, - }, - -10, 10, - []result{ - {model.TimeFromUnix(0), model.TimeFromUnix(10), mockSchema(1)}, - }, - }, - - // Test we have sensible results when there is two schemas - { - compositeSchema{ - schemas: []compositeSchemaEntry{ - {model.TimeFromUnix(0), mockSchema(1)}, - {model.TimeFromUnix(100), mockSchema(2)}, - }, - }, - 34, 165, - []result{ - {model.TimeFromUnix(34), model.TimeFromUnix(100) - 1, mockSchema(1)}, - {model.TimeFromUnix(100), model.TimeFromUnix(165), mockSchema(2)}, - }, - }, - - // Test we get only one result when two schema start at same time - { - compositeSchema{ - schemas: []compositeSchemaEntry{ - {model.TimeFromUnix(0), mockSchema(1)}, - {model.TimeFromUnix(10), mockSchema(2)}, - {model.TimeFromUnix(10), mockSchema(3)}, - }, - }, - 0, 165, - []result{ - {model.TimeFromUnix(0), model.TimeFromUnix(10) - 1, mockSchema(1)}, - {model.TimeFromUnix(10), model.TimeFromUnix(165), mockSchema(3)}, - }, - }, - - // Test all the various combination we can get when there are three schemas - { - cs, 34, 65, - []result{ - {model.TimeFromUnix(34), model.TimeFromUnix(65), mockSchema(1)}, - }, - }, - - { - cs, 244, 6785, - []result{ - {model.TimeFromUnix(244), model.TimeFromUnix(6785), mockSchema(3)}, - }, - }, - - { - cs, 34, 165, - []result{ - {model.TimeFromUnix(34), model.TimeFromUnix(100) - 1, mockSchema(1)}, - {model.TimeFromUnix(100), model.TimeFromUnix(165), mockSchema(2)}, - }, - }, - - { - cs, 151, 264, - []result{ - {model.TimeFromUnix(151), model.TimeFromUnix(200) - 1, mockSchema(2)}, - {model.TimeFromUnix(200), model.TimeFromUnix(264), mockSchema(3)}, - }, - }, - - { - cs, 32, 264, - []result{ - {model.TimeFromUnix(32), model.TimeFromUnix(100) - 1, mockSchema(1)}, - {model.TimeFromUnix(100), model.TimeFromUnix(200) - 1, mockSchema(2)}, - {model.TimeFromUnix(200), model.TimeFromUnix(264), mockSchema(3)}, - }, - }, - } { - t.Run(fmt.Sprintf("TestSchemaComposite[%d]", i), func(t *testing.T) { - have := []result{} - tc.cs.forSchemasIndexEntry(model.TimeFromUnix(tc.from), model.TimeFromUnix(tc.through), collect(&have)) - if !reflect.DeepEqual(tc.want, have) { - t.Fatalf("wrong schemas - %s", test.Diff(tc.want, have)) - } - }) - } -} diff --git a/pkg/chunk/schema_test.go b/pkg/chunk/schema_test.go index 70403f6d68c..73a7d7e2cb7 100644 --- a/pkg/chunk/schema_test.go +++ b/pkg/chunk/schema_test.go @@ -17,24 +17,6 @@ import ( "github.com/weaveworks/cortex/pkg/util" ) -type mockSchema int - -func (mockSchema) GetWriteEntries(from, through model.Time, userID string, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { - return nil, nil -} -func (mockSchema) GetReadQueries(from, through model.Time, userID string) ([]IndexQuery, error) { - return nil, nil -} -func (mockSchema) GetReadQueriesForMetric(from, through model.Time, userID string, metricName model.LabelValue) ([]IndexQuery, error) { - return nil, nil -} -func (mockSchema) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) { - return nil, nil -} -func (mockSchema) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) { - return nil, nil -} - type ByHashRangeKey []IndexEntry func (a ByHashRangeKey) Len() int { return len(a) } @@ -81,15 +63,6 @@ func TestSchemaHashKeys(t *testing.T) { From: util.NewDayValue(model.TimeFromUnix(5 * 24 * 60 * 60)), }, } - compositeSchema := func(dailyBucketsFrom model.Time) Schema { - cfgCp := cfg - cfgCp.DailyBucketsFrom = util.NewDayValue(dailyBucketsFrom) - schema, err := newCompositeSchema(cfgCp) - if err != nil { - t.Fatal(err) - } - return schema - } hourlyBuckets := v1Schema(cfg) dailyBuckets := v3Schema(cfg) labelBuckets := v4Schema(cfg) @@ -134,83 +107,6 @@ func TestSchemaHashKeys(t *testing.T) { mkResult(table, "userid:d%d:foo:bar", 0, 3), ), }, - - // Buckets are by hour until we reach the `dailyBucketsFrom`, after which they are by day. - { - compositeSchema(model.TimeFromUnix(0).Add(1 * 24 * time.Hour)), - 0, (3 * 24 * 60 * 60) - 1, "foo", - mergeResults( - mkResult(table, "userid:%d:foo", 0, 1*24), - mkResult(table, "userid:d%d:foo", 1, 3), - ), - }, - - // Only the day part of `dailyBucketsFrom` matters, not the time part. - { - compositeSchema(model.TimeFromUnix(0).Add(2*24*time.Hour) - 1), - 0, (3 * 24 * 60 * 60) - 1, "foo", - mergeResults( - mkResult(table, "userid:%d:foo", 0, 1*24), - mkResult(table, "userid:d%d:foo", 1, 3), - ), - }, - - // Moving dailyBucketsFrom to the previous day compared to the above makes 24 1-hour buckets disappear. - { - compositeSchema(model.TimeFromUnix(0).Add(1*24*time.Hour) - 1), - 0, (3 * 24 * 60 * 60) - 1, "foo", - mkResult(table, "userid:d%d:foo", 0, 3), - }, - - // If `dailyBucketsFrom` is after the interval, everything will be bucketed by hour. - { - compositeSchema(model.TimeFromUnix(0).Add(99 * 24 * time.Hour)), - 0, (2 * 24 * 60 * 60) - 1, "foo", - mkResult(table, "userid:%d:foo", 0, 2*24), - }, - - // Should only return daily buckets when dailyBucketsFrom is before the interval. - { - compositeSchema(model.TimeFromUnix(0)), - 1 * 24 * 60 * 60, (3 * 24 * 60 * 60) - 1, "foo", - mkResult(table, "userid:d%d:foo", 1, 3), - }, - - // Basic weekly- ables. - { - compositeSchema(model.TimeFromUnix(0)), - 5 * 24 * 60 * 60, (10 * 24 * 60 * 60) - 1, "foo", - mergeResults( - mkResult(periodicPrefix+"2", "userid:d%d:foo", 5, 6), - mkResult(periodicPrefix+"3", "userid:d%d:foo", 6, 8), - mkResult(periodicPrefix+"4", "userid:d%d:foo", 8, 10), - ), - }, - - // Daily buckets + weekly tables. - { - compositeSchema(model.TimeFromUnix(0)), - 0, (10 * 24 * 60 * 60) - 1, "foo", - mergeResults( - mkResult(table, "userid:d%d:foo", 0, 5), - mkResult(periodicPrefix+"2", "userid:d%d:foo", 5, 6), - mkResult(periodicPrefix+"3", "userid:d%d:foo", 6, 8), - mkResult(periodicPrefix+"4", "userid:d%d:foo", 8, 10), - ), - }, - - // Houly Buckets, then daily buckets, then weekly tables. - { - compositeSchema(model.TimeFromUnix(2 * 24 * 60 * 60)), - 0, (10 * 24 * 60 * 60) - 1, "foo", - mergeResults( - mkResult(table, "userid:%d:foo", 0, 2*24), - mkResult(table, "userid:d%d:foo", 2, 5), - mkResult(periodicPrefix+"2", "userid:d%d:foo", 5, 6), - mkResult(periodicPrefix+"3", "userid:d%d:foo", 6, 8), - mkResult(periodicPrefix+"4", "userid:d%d:foo", 8, 10), - ), - }, } { t.Run(fmt.Sprintf("TestSchemaHashKeys[%d]", i), func(t *testing.T) { have, err := tc.Schema.GetWriteEntries( diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 835ab052cd0..8fb57e80183 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -3,6 +3,7 @@ package ingester import ( "context" "fmt" + "math" "net/http" "sort" "sync" @@ -18,7 +19,6 @@ import ( "github.com/weaveworks/common/user" "github.com/weaveworks/cortex/pkg/chunk" "github.com/weaveworks/cortex/pkg/ingester/client" - "github.com/weaveworks/cortex/pkg/util" ) type testStore struct { @@ -86,36 +86,6 @@ func matrixToSamples(m model.Matrix) []model.Sample { return samples } -// chunksToMatrix converts a slice of chunks into a model.Matrix. -func chunksToMatrix(chunks []chunk.Chunk) (model.Matrix, error) { - // Group chunks by series, sort and dedupe samples. - sampleStreams := map[model.Fingerprint]*model.SampleStream{} - for _, c := range chunks { - fp := c.Metric.Fingerprint() - ss, ok := sampleStreams[fp] - if !ok { - ss = &model.SampleStream{ - Metric: c.Metric, - } - sampleStreams[fp] = ss - } - - samples, err := c.Samples(c.From, c.Through) - if err != nil { - return nil, err - } - - ss.Values = util.MergeSampleSets(ss.Values, samples) - } - - matrix := make(model.Matrix, 0, len(sampleStreams)) - for _, ss := range sampleStreams { - matrix = append(matrix, ss) - } - - return matrix, nil -} - func TestIngesterAppend(t *testing.T) { store, ing := newTestStore(t, defaultIngesterTestConfig()) @@ -154,7 +124,7 @@ func TestIngesterAppend(t *testing.T) { // Read samples back via chunk store. ing.Shutdown() for _, userID := range userIDs { - res, err := chunksToMatrix(store.chunks[userID]) + res, err := chunk.ChunksToMatrix(context.Background(), store.chunks[userID], model.Time(0), model.Time(math.MaxInt64)) require.NoError(t, err) sort.Sort(res) assert.Equal(t, testData[userID], res) diff --git a/pkg/ingester/lifecycle_test.go b/pkg/ingester/lifecycle_test.go index bce5b745501..1de9249a80f 100644 --- a/pkg/ingester/lifecycle_test.go +++ b/pkg/ingester/lifecycle_test.go @@ -2,6 +2,7 @@ package ingester import ( "io" + "math" "reflect" "runtime" "testing" @@ -17,6 +18,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/weaveworks/common/user" + "github.com/weaveworks/cortex/pkg/chunk" "github.com/weaveworks/cortex/pkg/ingester/client" "github.com/weaveworks/cortex/pkg/ring" "github.com/weaveworks/cortex/pkg/util" @@ -314,7 +316,7 @@ func TestIngesterFlush(t *testing.T) { }) // And check the store has the chunk - res, err := chunksToMatrix(store.chunks[userID]) + res, err := chunk.ChunksToMatrix(context.Background(), store.chunks[userID], model.Time(0), model.Time(math.MaxInt64)) require.NoError(t, err) assert.Equal(t, model.Matrix{ &model.SampleStream{ diff --git a/pkg/querier/benchmark_test.go b/pkg/querier/benchmark_test.go new file mode 100644 index 00000000000..a674db7eb91 --- /dev/null +++ b/pkg/querier/benchmark_test.go @@ -0,0 +1,27 @@ +package querier + +import ( + "fmt" + "testing" + + "github.com/prometheus/prometheus/promql" +) + +var result *promql.Result + +func BenchmarkChunkQueryable(b *testing.B) { + for _, encoding := range encodings { + store, from := makeMockChunkStore(b, 24*30, encoding.e) + + for _, q := range queryables { + b.Run(fmt.Sprintf("%s/%s", q.name, encoding.name), func(b *testing.B) { + queryable := q.f(store) + var r *promql.Result + for n := 0; n < b.N; n++ { + r = testQuery(b, queryable, from) + } + result = r + }) + } + } +} diff --git a/pkg/querier/chunk_iterator.go b/pkg/querier/chunk_iterator.go new file mode 100644 index 00000000000..5a57a080c01 --- /dev/null +++ b/pkg/querier/chunk_iterator.go @@ -0,0 +1,59 @@ +package querier + +import ( + "github.com/prometheus/common/model" + "github.com/weaveworks/cortex/pkg/chunk" + promchunk "github.com/weaveworks/cortex/pkg/prom1/storage/local/chunk" +) + +type chunkIterator struct { + chunk.Chunk + it promchunk.Iterator + + // At() is called often in the heap code, so caching its result seems like + // a good idea. + cacheValid bool + cachedTime int64 + cachedValue float64 +} + +// Seek advances the iterator forward to the value at or after +// the given timestamp. +func (i *chunkIterator) Seek(t int64) bool { + i.cacheValid = false + + // We assume seeks only care about a specific window; if this chunk doesn't + // contain samples in that window, we can shortcut. + if int64(i.Through) < t { + return false + } + + return i.it.FindAtOrAfter(model.Time(t)) +} + +func (i *chunkIterator) AtTime() int64 { + if !i.cacheValid { + v := i.it.Value() + i.cachedTime, i.cachedValue = int64(v.Timestamp), float64(v.Value) + i.cacheValid = true + } + return i.cachedTime +} + +func (i *chunkIterator) At() (int64, float64) { + if !i.cacheValid { + v := i.it.Value() + i.cachedTime, i.cachedValue = int64(v.Timestamp), float64(v.Value) + i.cacheValid = true + } + return i.cachedTime, i.cachedValue +} + +func (i *chunkIterator) Next() bool { + i.cacheValid = false + return i.it.Scan() +} + +func (i *chunkIterator) Err() error { + return i.it.Err() +} diff --git a/pkg/querier/chunk_merge_iterator.go b/pkg/querier/chunk_merge_iterator.go new file mode 100644 index 00000000000..7bd679fd7de --- /dev/null +++ b/pkg/querier/chunk_merge_iterator.go @@ -0,0 +1,155 @@ +package querier + +import ( + "container/heap" + "sort" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/storage" + "github.com/weaveworks/cortex/pkg/chunk" +) + +// Limit on the window size of seeks. +const window = 24 * time.Hour +const chunkSize = 12 * time.Hour + +type chunkMergeIterator struct { + chunks []*chunkIterator + h seriesIteratorHeap + + curr *chunkIterator + lastErr error +} + +func newChunkMergeIterator(cs []chunk.Chunk) storage.SeriesIterator { + chunks := make([]*chunkIterator, len(cs), len(cs)) + for i := range cs { + chunks[i] = &chunkIterator{ + Chunk: cs[i], + it: cs[i].Data.NewIterator(), + } + } + sort.Sort(byFrom(chunks)) + + c := &chunkMergeIterator{ + chunks: chunks, + h: make(seriesIteratorHeap, 0, len(chunks)), + } + + for _, iter := range c.chunks { + if iter.Next() { + heap.Push(&c.h, iter) + } else if err := iter.Err(); err != nil { + c.lastErr = err + } + } + return c +} + +func (c *chunkMergeIterator) findChunks(t int64) []*chunkIterator { + // Find beginning and end index into list of chunks. + i := sort.Search(len(c.chunks), func(i int) bool { + return c.chunks[i].From.Add(chunkSize) >= model.Time(t) + }) + j := sort.Search(len(c.chunks), func(i int) bool { + return model.Time(t).Add(window) <= c.chunks[i].From + }) + return c.chunks[i:j] +} + +func (c *chunkMergeIterator) Seek(t int64) bool { + chunks := c.findChunks(t) + c.curr = nil + c.h = c.h[:0] + + for _, iter := range chunks { + if iter.Seek(t) { + heap.Push(&c.h, iter) + } else if err := iter.Err(); err != nil { + c.lastErr = err + return false + } + } + + return c.popAndDedupe() +} + +func (c *chunkMergeIterator) Next() bool { + if c.curr != nil { + if c.curr.Next() { + heap.Push(&c.h, c.curr) + } else if err := c.curr.Err(); err != nil { + c.lastErr = err + return false + } + c.curr = nil + } + + return c.popAndDedupe() +} + +func (c *chunkMergeIterator) popAndDedupe() bool { + if len(c.h) == 0 { + return false + } + + c.curr = heap.Pop(&c.h).(*chunkIterator) + for len(c.h) > 0 { + next := c.h[0] + if next.AtTime() != c.curr.AtTime() { + break + } + + if next.Next() { + heap.Fix(&c.h, 0) + continue + } + + heap.Pop(&c.h) + if err := next.Err(); err != nil { + c.lastErr = err + return false + } + } + return true +} + +func (c *chunkMergeIterator) At() (t int64, v float64) { + if c.curr == nil { + panic("mergeIterator.At() called after .Next() returned false.") + } + + return c.curr.At() +} + +func (c *chunkMergeIterator) Err() error { + return c.lastErr +} + +type seriesIteratorHeap []*chunkIterator + +func (h seriesIteratorHeap) Len() int { return len(h) } +func (h seriesIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +func (h seriesIteratorHeap) Less(i, j int) bool { + return h[i].AtTime() < h[j].AtTime() +} + +func (h *seriesIteratorHeap) Push(x interface{}) { + *h = append(*h, x.(*chunkIterator)) +} + +func (h *seriesIteratorHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +type byFrom []*chunkIterator + +func (b byFrom) Len() int { return len(b) } +func (b byFrom) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byFrom) Less(i, j int) bool { return b[i].From < b[j].From } diff --git a/pkg/querier/chunk_merge_iterator_test.go b/pkg/querier/chunk_merge_iterator_test.go new file mode 100644 index 00000000000..ff22fcf7d37 --- /dev/null +++ b/pkg/querier/chunk_merge_iterator_test.go @@ -0,0 +1,89 @@ +package querier + +import ( + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/weaveworks/cortex/pkg/chunk" + promchunk "github.com/weaveworks/cortex/pkg/prom1/storage/local/chunk" +) + +func TestChunkMergeIterator(t *testing.T) { + for i, tc := range []struct { + chunks []chunk.Chunk + mint, maxt int64 + }{ + { + chunks: []chunk.Chunk{ + mkChunk(t, 0, 100, 1*time.Millisecond, promchunk.Varbit), + }, + maxt: 100, + }, + + { + chunks: []chunk.Chunk{ + mkChunk(t, 0, 100, 1*time.Millisecond, promchunk.Varbit), + mkChunk(t, 0, 100, 1*time.Millisecond, promchunk.Varbit), + }, + maxt: 100, + }, + + { + chunks: []chunk.Chunk{ + mkChunk(t, 0, 100, 1*time.Millisecond, promchunk.Varbit), + mkChunk(t, 50, 150, 1*time.Millisecond, promchunk.Varbit), + mkChunk(t, 100, 200, 1*time.Millisecond, promchunk.Varbit), + }, + maxt: 200, + }, + + { + chunks: []chunk.Chunk{ + mkChunk(t, 0, 100, 1*time.Millisecond, promchunk.Varbit), + mkChunk(t, 100, 200, 1*time.Millisecond, promchunk.Varbit), + }, + maxt: 200, + }, + } { + t.Run(strconv.Itoa(i), func(t *testing.T) { + iter := newChunkMergeIterator(tc.chunks) + for i := tc.mint; i < tc.maxt; i++ { + require.True(t, iter.Next()) + ts, s := iter.At() + assert.Equal(t, i, ts) + assert.Equal(t, float64(i), s) + assert.NoError(t, iter.Err()) + } + assert.False(t, iter.Next()) + }) + } +} + +func TestChunkMergeIteratorSeek(t *testing.T) { + iter := newChunkMergeIterator([]chunk.Chunk{ + mkChunk(t, 0, 100, 1*time.Millisecond, promchunk.Varbit), + mkChunk(t, 50, 150, 1*time.Millisecond, promchunk.Varbit), + mkChunk(t, 100, 200, 1*time.Millisecond, promchunk.Varbit), + }) + + for i := int64(0); i < 10; i += 20 { + require.True(t, iter.Seek(i)) + ts, s := iter.At() + assert.Equal(t, i, ts) + assert.Equal(t, float64(i), s) + assert.NoError(t, iter.Err()) + + for j := i + 1; j < 200; j++ { + require.True(t, iter.Next()) + ts, s := iter.At() + assert.Equal(t, j, ts) + assert.Equal(t, float64(j), s) + assert.NoError(t, iter.Err()) + } + assert.False(t, iter.Next()) + } +} diff --git a/pkg/querier/chunk_queryable.go b/pkg/querier/chunk_queryable.go new file mode 100644 index 00000000000..e1138e58ca8 --- /dev/null +++ b/pkg/querier/chunk_queryable.go @@ -0,0 +1,106 @@ +package querier + +import ( + "context" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage" + "github.com/weaveworks/cortex/pkg/chunk" +) + +// ChunkStore is the read-interface to the Chunk Store. Made an interface here +// to reduce package coupling. +type ChunkStore interface { + Get(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]chunk.Chunk, error) +} + +// NewQueryable creates a new Queryable for cortex. +func NewQueryable(distributor Distributor, chunkStore ChunkStore, iterators bool) storage.Queryable { + dq := newDistributorQueryable(distributor) + cq := newChunkQueryable(chunkStore) + if iterators { + cq = newIterChunkQueryable(chunkStore) + } + + return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + dqr, err := dq.Querier(ctx, mint, maxt) + if err != nil { + return nil, err + } + + cqr, err := cq.Querier(ctx, mint, maxt) + if err != nil { + return nil, err + } + + return querier{ + Querier: storage.NewMergeQuerier([]storage.Querier{dqr, cqr}), + distributor: distributor, + ctx: ctx, + mint: mint, + maxt: maxt, + }, nil + }) +} + +type querier struct { + storage.Querier + + distributor Distributor + ctx context.Context + mint, maxt int64 +} + +func (q querier) Select(sp *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, error) { + // Kludge: Prometheus passes nil SelectParams if it is doing a 'series' operation, + // which needs only metadata. + if sp != nil { + return q.Querier.Select(sp, matchers...) + } + + ms, err := q.distributor.MetricsForLabelMatchers(q.ctx, model.Time(q.mint), model.Time(q.maxt), matchers...) + if err != nil { + return nil, err + } + return metricsToSeriesSet(ms), nil +} + +func newChunkQueryable(store ChunkStore) storage.Queryable { + return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return &chunkQuerier{ + store: store, + ctx: ctx, + mint: mint, + maxt: maxt, + }, nil + }) +} + +type chunkQuerier struct { + store ChunkStore + ctx context.Context + mint, maxt int64 +} + +func (c chunkQuerier) Select(_ *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, error) { + chunks, err := c.store.Get(c.ctx, model.Time(c.mint), model.Time(c.maxt), matchers...) + if err != nil { + return nil, err + } + + matrix, err := chunk.ChunksToMatrix(c.ctx, chunks, model.Time(c.mint), model.Time(c.maxt)) + if err != nil { + return nil, err + } + + return matrixToSeriesSet(matrix), nil +} + +func (c chunkQuerier) LabelValues(name string) ([]string, error) { + return nil, nil +} + +func (c chunkQuerier) Close() error { + return nil +} diff --git a/pkg/querier/chunk_queryable_iter.go b/pkg/querier/chunk_queryable_iter.go new file mode 100644 index 00000000000..b23f36f2f1e --- /dev/null +++ b/pkg/querier/chunk_queryable_iter.go @@ -0,0 +1,74 @@ +package querier + +import ( + "context" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + + "github.com/weaveworks/cortex/pkg/chunk" +) + +func newIterChunkQueryable(store ChunkStore) storage.Queryable { + return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return &iterChunkQuerier{ + store: store, + ctx: ctx, + mint: mint, + maxt: maxt, + }, nil + }) +} + +type iterChunkQuerier struct { + store ChunkStore + ctx context.Context + mint, maxt int64 +} + +func (q *iterChunkQuerier) Select(_ *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, error) { + chunks, err := q.store.Get(q.ctx, model.Time(q.mint), model.Time(q.maxt), matchers...) + if err != nil { + return nil, promql.ErrStorage(err) + } + + chunksBySeries := map[model.Fingerprint][]chunk.Chunk{} + for _, c := range chunks { + fp := c.Metric.Fingerprint() + chunksBySeries[fp] = append(chunksBySeries[fp], c) + } + + series := make([]storage.Series, 0, len(chunksBySeries)) + for i := range chunksBySeries { + series = append(series, &chunkSeries{ + labels: metricToLabels(chunksBySeries[i][0].Metric), + chunks: chunksBySeries[i], + }) + } + + return newConcreteSeriesSet(series), nil +} + +func (q *iterChunkQuerier) LabelValues(name string) ([]string, error) { + return nil, nil +} + +func (q *iterChunkQuerier) Close() error { + return nil +} + +type chunkSeries struct { + labels labels.Labels + chunks []chunk.Chunk +} + +func (s *chunkSeries) Labels() labels.Labels { + return s.labels +} + +// Iterator returns a new iterator of the data of the series. +func (s *chunkSeries) Iterator() storage.SeriesIterator { + return newChunkMergeIterator(s.chunks) +} diff --git a/pkg/querier/chunk_queryable_test.go b/pkg/querier/chunk_queryable_test.go new file mode 100644 index 00000000000..b02e3981ac0 --- /dev/null +++ b/pkg/querier/chunk_queryable_test.go @@ -0,0 +1,120 @@ +package querier + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/weaveworks/cortex/pkg/chunk" + promchunk "github.com/weaveworks/cortex/pkg/prom1/storage/local/chunk" + "github.com/weaveworks/cortex/pkg/util" +) + +const ( + userID = "userID" + fp = 1 + chunkOffset = 6 * time.Minute + chunkLength = 3 * time.Hour + sampleRate = 15 * time.Second + samplesPerChunk = chunkLength / sampleRate +) + +var ( + queryables = []struct { + name string + f func(ChunkStore) storage.Queryable + }{ + {"matrixes", newChunkQueryable}, + {"iterators", newIterChunkQueryable}, + } + + encodings = []struct { + name string + e promchunk.Encoding + }{ + {"DoubleDelta", promchunk.DoubleDelta}, + {"Varbit", promchunk.Varbit}, + } +) + +func TestChunkQueryable(t *testing.T) { + for _, q := range queryables { + for _, encoding := range encodings { + t.Run(fmt.Sprintf("%s/%s", q.name, encoding.name), func(t *testing.T) { + store, from := makeMockChunkStore(t, 24*30, encoding.e) + queryable := q.f(store) + testQuery(t, queryable, from) + }) + } + } +} + +type mockChunkStore struct { + chunks []chunk.Chunk +} + +func (m mockChunkStore) Get(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]chunk.Chunk, error) { + return m.chunks, nil +} + +func makeMockChunkStore(t require.TestingT, numChunks int, encoding promchunk.Encoding) (ChunkStore, model.Time) { + var ( + chunks = make([]chunk.Chunk, 0, numChunks) + from = model.Time(0) + ) + for i := 0; i < numChunks; i++ { + c := mkChunk(t, from, from.Add(samplesPerChunk*sampleRate), sampleRate, encoding) + chunks = append(chunks, c) + from = from.Add(chunkOffset) + } + return mockChunkStore{chunks}, from +} + +func mkChunk(t require.TestingT, mint, maxt model.Time, step time.Duration, encoding promchunk.Encoding) chunk.Chunk { + metric := model.Metric{ + model.MetricNameLabel: "foo", + } + pc, err := promchunk.NewForEncoding(encoding) + require.NoError(t, err) + for i := mint; i.Before(maxt); i = i.Add(step) { + pcs, err := pc.Add(model.SamplePair{ + Timestamp: i, + Value: model.SampleValue(float64(i)), + }) + require.NoError(t, err) + require.Len(t, pcs, 1) + pc = pcs[0] + } + return chunk.NewChunk(userID, fp, metric, pc, mint, maxt) +} + +func testQuery(t require.TestingT, queryable storage.Queryable, end model.Time) *promql.Result { + from, through, step := time.Unix(0, 0), end.Time(), sampleRate*4 + engine := promql.NewEngine(util.Logger, nil, 10, 1*time.Minute) + query, err := engine.NewRangeQuery(queryable, "rate(foo[1m])", from, through, step) + require.NoError(t, err) + + r := query.Exec(context.Background()) + m, err := r.Matrix() + require.NoError(t, err) + require.Len(t, m, 1) + + series := m[0] + assert.Equal(t, labels.Labels{}, series.Metric) + assert.Equal(t, int(through.Sub(from)/step), len(series.Points)) + ts := int64(step / time.Millisecond) + for _, point := range series.Points { + assert.Equal(t, ts, point.T) + assert.Equal(t, 1000.0, point.V) + ts += int64(step / time.Millisecond) + } + return r +} diff --git a/pkg/querier/config.go b/pkg/querier/config.go new file mode 100644 index 00000000000..fdb590d0dda --- /dev/null +++ b/pkg/querier/config.go @@ -0,0 +1,34 @@ +package querier + +import ( + "flag" + "time" + + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + "github.com/weaveworks/cortex/pkg/util" +) + +// Config contains the configuration require to create a querier +type Config struct { + MaxConcurrent int + Timeout time.Duration + Iterators bool +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.IntVar(&cfg.MaxConcurrent, "querier.max-concurrent", 20, "The maximum number of concurrent queries.") + f.DurationVar(&cfg.Timeout, "querier.timeout", 2*time.Minute, "The timeout for a query.") + if f.Lookup("promql.lookback-delta") == nil { + f.DurationVar(&promql.LookbackDelta, "promql.lookback-delta", promql.LookbackDelta, "Time since the last sample after which a time series is considered stale and ignored by expression evaluations.") + } + f.BoolVar(&cfg.Iterators, "querier.iterators", false, "Use iterators to execute query, as opposed to fully materialising the series in memory.") +} + +// Make builds a queryable and promql engine. +func Make(cfg Config, distributor Distributor, chunkStore ChunkStore) (storage.Queryable, *promql.Engine) { + queryable := NewQueryable(distributor, chunkStore, cfg.Iterators) + engine := promql.NewEngine(util.Logger, nil, cfg.MaxConcurrent, cfg.Timeout) + return queryable, engine +} diff --git a/pkg/querier/distributor_queryable.go b/pkg/querier/distributor_queryable.go new file mode 100644 index 00000000000..5c84c0ba166 --- /dev/null +++ b/pkg/querier/distributor_queryable.go @@ -0,0 +1,63 @@ +package querier + +import ( + "context" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + + "github.com/weaveworks/cortex/pkg/prom1/storage/metric" +) + +// Distributor is the read interface to the distributor, made an interface here +// to reduce package coupling. +type Distributor interface { + Query(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (model.Matrix, error) + LabelValuesForLabelName(context.Context, model.LabelName) (model.LabelValues, error) + MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]metric.Metric, error) +} + +func newDistributorQueryable(distributor Distributor) storage.Queryable { + return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return &distributorQuerier{ + distributor: distributor, + ctx: ctx, + mint: mint, + maxt: maxt, + }, nil + }) +} + +type distributorQuerier struct { + distributor Distributor + ctx context.Context + mint, maxt int64 +} + +func (q *distributorQuerier) Select(_ *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, error) { + matrix, err := q.distributor.Query(q.ctx, model.Time(q.mint), model.Time(q.maxt), matchers...) + if err != nil { + return nil, promql.ErrStorage(err) + } + + return matrixToSeriesSet(matrix), nil +} + +func (q *distributorQuerier) LabelValues(name string) ([]string, error) { + values, err := q.distributor.LabelValuesForLabelName(q.ctx, model.LabelName(name)) + if err != nil { + return nil, err + } + + result := make([]string, len(values), len(values)) + for i := 0; i < len(values); i++ { + result[i] = string(values[i]) + } + return result, nil +} + +func (q *distributorQuerier) Close() error { + return nil +} diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go new file mode 100644 index 00000000000..1bc3ff7bac9 --- /dev/null +++ b/pkg/querier/distributor_queryable_test.go @@ -0,0 +1,64 @@ +package querier + +import ( + "context" + "testing" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/stretchr/testify/require" + "github.com/weaveworks/cortex/pkg/prom1/storage/metric" +) + +const ( + maxt, mint = 0, 10 +) + +func TestDistributorQuerier(t *testing.T) { + d := &mockDistributor{ + m: model.Matrix{ + // Matrixes are unsorted, so this tests that the labels get sorted. + &model.SampleStream{ + Metric: model.Metric{ + "foo": "bar", + }, + }, + &model.SampleStream{ + Metric: model.Metric{ + "bar": "baz", + }, + }, + }, + } + queryable := newDistributorQueryable(d) + querier, err := queryable.Querier(context.Background(), mint, maxt) + require.NoError(t, err) + + seriesSet, err := querier.Select(nil) + require.NoError(t, err) + + require.True(t, seriesSet.Next()) + series := seriesSet.At() + require.Equal(t, labels.Labels{{Name: "bar", Value: "baz"}}, series.Labels()) + + require.True(t, seriesSet.Next()) + series = seriesSet.At() + require.Equal(t, labels.Labels{{Name: "foo", Value: "bar"}}, series.Labels()) + + require.False(t, seriesSet.Next()) + require.NoError(t, seriesSet.Err()) +} + +type mockDistributor struct { + m model.Matrix +} + +func (m *mockDistributor) Query(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (model.Matrix, error) { + return m.m, nil +} +func (m *mockDistributor) LabelValuesForLabelName(context.Context, model.LabelName) (model.LabelValues, error) { + return nil, nil +} +func (m *mockDistributor) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]metric.Metric, error) { + return nil, nil +} diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go deleted file mode 100644 index 797726a132d..00000000000 --- a/pkg/querier/querier.go +++ /dev/null @@ -1,281 +0,0 @@ -package querier - -import ( - "context" - "flag" - "net/http" - "time" - - "github.com/go-kit/kit/log/level" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/storage" - - "github.com/weaveworks/cortex/pkg/ingester/client" - "github.com/weaveworks/cortex/pkg/prom1/storage/metric" - - "github.com/weaveworks/cortex/pkg/util" -) - -// Config contains the configuration require to create a querier -type Config struct { - MaxConcurrent int - Timeout time.Duration -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - flag.IntVar(&cfg.MaxConcurrent, "querier.max-concurrent", 20, "The maximum number of concurrent queries.") - flag.DurationVar(&cfg.Timeout, "querier.timeout", 2*time.Minute, "The timeout for a query.") - if flag.Lookup("promql.lookback-delta") == nil { - flag.DurationVar(&promql.LookbackDelta, "promql.lookback-delta", promql.LookbackDelta, "Time since the last sample after which a time series is considered stale and ignored by expression evaluations.") - } -} - -// ChunkStore is the interface we need to get chunks -type ChunkStore interface { - Get(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) (model.Matrix, error) -} - -// NewQueryable creates a new Queryable for cortex. -func NewQueryable(distributor Querier, chunkStore ChunkStore) MergeQueryable { - return MergeQueryable{ - queriers: []Querier{ - distributor, - &chunkQuerier{ - store: chunkStore, - }, - }, - } -} - -// A Querier allows querying an underlying storage for time series samples or metadata. -type Querier interface { - Query(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (model.Matrix, error) - MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]metric.Metric, error) - LabelValuesForLabelName(context.Context, model.LabelName) (model.LabelValues, error) -} - -// A chunkQuerier is a Querier that fetches samples from a ChunkStore. -type chunkQuerier struct { - store ChunkStore -} - -// Query implements Querier and transforms a list of chunks into sample -// matrices. -func (q *chunkQuerier) Query(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (model.Matrix, error) { - // Get iterators for all matching series from ChunkStore. - matrix, err := q.store.Get(ctx, from, to, matchers...) - if err != nil { - return nil, promql.ErrStorage(err) - } - - return matrix, nil -} - -// LabelValuesForLabelName returns all of the label values that are associated with a given label name. -func (q *chunkQuerier) LabelValuesForLabelName(ctx context.Context, ln model.LabelName) (model.LabelValues, error) { - // TODO: Support querying historical label values at some point? - return nil, nil -} - -// MetricsForLabelMatchers is a noop for chunk querier. -func (q *chunkQuerier) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matcherSets ...*labels.Matcher) ([]metric.Metric, error) { - return nil, nil -} - -func mergeMatrices(matrices chan model.Matrix, errors chan error, n int) (model.Matrix, error) { - // Group samples from all matrices by fingerprint. - fpToSS := map[model.Fingerprint]*model.SampleStream{} - var lastErr error - for i := 0; i < n; i++ { - select { - case err := <-errors: - lastErr = err - - case matrix := <-matrices: - for _, ss := range matrix { - fp := ss.Metric.Fingerprint() - if fpSS, ok := fpToSS[fp]; !ok { - fpToSS[fp] = ss - } else { - fpSS.Values = util.MergeSampleSets(fpSS.Values, ss.Values) - } - } - } - } - if lastErr != nil { - return nil, lastErr - } - - matrix := make(model.Matrix, 0, len(fpToSS)) - for _, ss := range fpToSS { - matrix = append(matrix, ss) - } - return matrix, nil -} - -// A MergeQueryable is a storage.Queryable that produces a storage.Querier which merges -// results from multiple underlying Queriers. -type MergeQueryable struct { - queriers []Querier -} - -// Querier implements storage.Queryable. -func (q MergeQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return mergeQuerier{ - ctx: ctx, - queriers: q.queriers, - mint: mint, - maxt: maxt, - }, nil -} - -// RemoteReadHandler handles Prometheus remote read requests. -func (q MergeQueryable) RemoteReadHandler(w http.ResponseWriter, r *http.Request) { - compressionType := util.CompressionTypeFor(r.Header.Get("X-Prometheus-Remote-Read-Version")) - - ctx := r.Context() - var req client.ReadRequest - logger := util.WithContext(r.Context(), util.Logger) - if _, err := util.ParseProtoReader(ctx, r.Body, &req, compressionType); err != nil { - level.Error(logger).Log("err", err.Error()) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // Fetch samples for all queries in parallel. - resp := client.ReadResponse{ - Results: make([]*client.QueryResponse, len(req.Queries)), - } - errors := make(chan error) - for i, qr := range req.Queries { - go func(i int, qr *client.QueryRequest) { - from, to, matchers, err := client.FromQueryRequest(qr) - if err != nil { - errors <- err - return - } - - querier, err := q.Querier(ctx, int64(from), int64(to)) - if err != nil { - errors <- err - return - } - - matrix, err := querier.(mergeQuerier).selectSamplesMatrix(matchers...) - if err != nil { - errors <- err - return - } - - resp.Results[i] = client.ToQueryResponse(matrix) - errors <- nil - }(i, qr) - } - - var lastErr error - for range req.Queries { - err := <-errors - if err != nil { - lastErr = err - } - } - if lastErr != nil { - http.Error(w, lastErr.Error(), http.StatusBadRequest) - return - } - - if err := util.SerializeProtoResponse(w, &resp, compressionType); err != nil { - level.Error(logger).Log("msg", "error sending remote read response", "err", err) - } -} - -type mergeQuerier struct { - ctx context.Context - queriers []Querier - mint int64 - maxt int64 -} - -func (mq mergeQuerier) Select(sp *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, error) { - // TODO: Update underlying selectors to return errors directly. - // Kludge: Prometheus passes nil SelectParams if it is doing a 'series' operation, which needs only metadata - if sp == nil { - return mq.selectMetadata(matchers...), nil - } - return mq.selectSamples(matchers...), nil -} - -func (mq mergeQuerier) selectMetadata(matchers ...*labels.Matcher) storage.SeriesSet { - // NB that we don't do this in parallel, as in practice we only have two queriers, - // one of which is the chunk store, which doesn't implement this yet. - seriesSets := make([]storage.SeriesSet, 0, len(mq.queriers)) - for _, q := range mq.queriers { - ms, err := q.MetricsForLabelMatchers(mq.ctx, model.Time(mq.mint), model.Time(mq.maxt), matchers...) - if err != nil { - return errSeriesSet{err: err} - } - ss := metricsToSeriesSet(ms) - seriesSets = append(seriesSets, ss) - } - - return storage.NewMergeSeriesSet(seriesSets) -} - -func (mq mergeQuerier) selectSamples(matchers ...*labels.Matcher) storage.SeriesSet { - matrix, err := mq.selectSamplesMatrix(matchers...) - if err != nil { - return errSeriesSet{ - err: err, - } - } - return matrixToSeriesSet(matrix) -} - -func (mq mergeQuerier) selectSamplesMatrix(matchers ...*labels.Matcher) (model.Matrix, error) { - incomingMatrices := make(chan model.Matrix) - incomingErrors := make(chan error) - - for _, q := range mq.queriers { - go func(q Querier) { - matrix, err := q.Query(mq.ctx, model.Time(mq.mint), model.Time(mq.maxt), matchers...) - if err != nil { - incomingErrors <- err - } else { - incomingMatrices <- matrix - } - }(q) - } - - mergedMatrix, err := mergeMatrices(incomingMatrices, incomingErrors, len(mq.queriers)) - if err != nil { - level.Error(util.WithContext(mq.ctx, util.Logger)).Log("msg", "error in mergeQuerier.selectSamples", "err", err) - return nil, err - } - return mergedMatrix, nil -} - -func (mq mergeQuerier) LabelValues(name string) ([]string, error) { - valueSet := map[string]struct{}{} - for _, q := range mq.queriers { - vals, err := q.LabelValuesForLabelName(mq.ctx, model.LabelName(name)) - if err != nil { - return nil, err - } - for _, v := range vals { - valueSet[string(v)] = struct{}{} - } - } - - values := make([]string, 0, len(valueSet)) - for v := range valueSet { - values = append(values, v) - } - return values, nil -} - -func (mq mergeQuerier) Close() error { - return nil -} diff --git a/pkg/querier/remote_read.go b/pkg/querier/remote_read.go new file mode 100644 index 00000000000..c1dd260fb74 --- /dev/null +++ b/pkg/querier/remote_read.go @@ -0,0 +1,105 @@ +package querier + +import ( + "net/http" + + "github.com/go-kit/kit/log/level" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/storage" + "github.com/weaveworks/cortex/pkg/ingester/client" + "github.com/weaveworks/cortex/pkg/util" +) + +// RemoteReadHandler handles Prometheus remote read requests. +func RemoteReadHandler(q storage.Queryable) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + compressionType := util.CompressionTypeFor(r.Header.Get("X-Prometheus-Remote-Read-Version")) + + ctx := r.Context() + var req client.ReadRequest + logger := util.WithContext(r.Context(), util.Logger) + if _, err := util.ParseProtoReader(ctx, r.Body, &req, compressionType); err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Fetch samples for all queries in parallel. + resp := client.ReadResponse{ + Results: make([]*client.QueryResponse, len(req.Queries)), + } + errors := make(chan error) + for i, qr := range req.Queries { + go func(i int, qr *client.QueryRequest) { + from, to, matchers, err := client.FromQueryRequest(qr) + if err != nil { + errors <- err + return + } + + querier, err := q.Querier(ctx, int64(from), int64(to)) + if err != nil { + errors <- err + return + } + + seriesSet, err := querier.Select(nil, matchers...) + if err != nil { + errors <- err + return + } + + matrix, err := seriesSetToMatrix(seriesSet) + if err != nil { + errors <- err + return + } + + resp.Results[i] = client.ToQueryResponse(matrix) + errors <- nil + }(i, qr) + } + + var lastErr error + for range req.Queries { + err := <-errors + if err != nil { + lastErr = err + } + } + if lastErr != nil { + http.Error(w, lastErr.Error(), http.StatusBadRequest) + return + } + + if err := util.SerializeProtoResponse(w, &resp, compressionType); err != nil { + level.Error(logger).Log("msg", "error sending remote read response", "err", err) + } + }) +} + +func seriesSetToMatrix(s storage.SeriesSet) (model.Matrix, error) { + result := model.Matrix{} + + for s.Next() { + series := s.At() + values := []model.SamplePair{} + it := series.Iterator() + for it.Next() { + t, v := it.At() + values = append(values, model.SamplePair{ + Timestamp: model.Time(t), + Value: model.SampleValue(v), + }) + } + if err := it.Err(); err != nil { + return nil, err + } + result = append(result, &model.SampleStream{ + Metric: labelsToMetric(series.Labels()), + Values: values, + }) + } + + return result, s.Err() +} diff --git a/pkg/querier/querier_test.go b/pkg/querier/remote_read_test.go similarity index 50% rename from pkg/querier/querier_test.go rename to pkg/querier/remote_read_test.go index 620fbb46b3e..1d03e71ce02 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/remote_read_test.go @@ -15,28 +15,26 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/stretchr/testify/require" "github.com/weaveworks/cortex/pkg/ingester/client" - "github.com/weaveworks/cortex/pkg/prom1/storage/metric" "github.com/weaveworks/cortex/pkg/util/wire" ) func TestRemoteReadHandler(t *testing.T) { - q := MergeQueryable{ - queriers: []Querier{ - mockQuerier{ - matrix: model.Matrix{ - { - Metric: model.Metric{"foo": "bar"}, - Values: []model.SamplePair{ - {Timestamp: 0, Value: 0}, - {Timestamp: 1, Value: 1}, - {Timestamp: 2, Value: 2}, - {Timestamp: 3, Value: 3}, - }, + q := storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return mockQuerier{ + matrix: model.Matrix{ + { + Metric: model.Metric{"foo": "bar"}, + Values: []model.SamplePair{ + {Timestamp: 0, Value: 0}, + {Timestamp: 1, Value: 1}, + {Timestamp: 2, Value: 2}, + {Timestamp: 3, Value: 3}, }, }, }, - }, - } + }, nil + }) + handler := RemoteReadHandler(q) requestBody, err := proto.Marshal(&client.ReadRequest{ Queries: []*client.QueryRequest{ @@ -50,7 +48,7 @@ func TestRemoteReadHandler(t *testing.T) { request.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0") recorder := httptest.NewRecorder() - q.RemoteReadHandler(recorder, request) + handler.ServeHTTP(recorder, request) require.Equal(t, 200, recorder.Result().StatusCode) responseBody, err := ioutil.ReadAll(recorder.Result().Body) @@ -86,58 +84,18 @@ func TestRemoteReadHandler(t *testing.T) { require.Equal(t, expected, response) } -func TestMergeQuerierSortsMetricLabels(t *testing.T) { - mq := mergeQuerier{ - ctx: context.Background(), - queriers: []Querier{ - mockQuerier{ - matrix: model.Matrix{ - { - Metric: model.Metric{ - model.MetricNameLabel: "testmetric", - "e": "f", - "a": "b", - "g": "h", - "c": "d", - }, - Values: []model.SamplePair{{Timestamp: 0, Value: 0}}, - }, - }, - }, - }, - mint: 0, - maxt: 0, - } - m, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "testmetric") - require.NoError(t, err) - dummyParams := storage.SelectParams{} - ss, err := mq.Select(&dummyParams, m) - require.NoError(t, err) - require.NoError(t, ss.Err()) - ss.Next() - require.NoError(t, ss.Err()) - l := ss.At().Labels() - require.Equal(t, labels.Labels{ - {Name: string(model.MetricNameLabel), Value: "testmetric"}, - {Name: "a", Value: "b"}, - {Name: "c", Value: "d"}, - {Name: "e", Value: "f"}, - {Name: "g", Value: "h"}, - }, l) -} - type mockQuerier struct { matrix model.Matrix } -func (m mockQuerier) Query(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (model.Matrix, error) { - return m.matrix, nil +func (m mockQuerier) Select(_ *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, error) { + return matrixToSeriesSet(m.matrix), nil } -func (mockQuerier) LabelValuesForLabelName(context.Context, model.LabelName) (model.LabelValues, error) { +func (m mockQuerier) LabelValues(name string) ([]string, error) { return nil, nil } -func (mockQuerier) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matcherSets ...*labels.Matcher) ([]metric.Metric, error) { - return nil, nil +func (mockQuerier) Close() error { + return nil } diff --git a/pkg/querier/series_set.go b/pkg/querier/series_set.go index f59b7dea837..9f386572cfc 100644 --- a/pkg/querier/series_set.go +++ b/pkg/querier/series_set.go @@ -48,13 +48,21 @@ type concreteSeriesSet struct { series []storage.Series } +func newConcreteSeriesSet(series []storage.Series) storage.SeriesSet { + sort.Sort(byLabels(series)) + return &concreteSeriesSet{ + cur: -1, + series: series, + } +} + func (c *concreteSeriesSet) Next() bool { c.cur++ - return c.cur-1 < len(c.series) + return c.cur < len(c.series) } func (c *concreteSeriesSet) At() storage.Series { - return c.series[c.cur-1] + return c.series[c.cur] } func (c *concreteSeriesSet) Err() error { @@ -109,19 +117,6 @@ func (c *concreteSeriesIterator) Err() error { return nil } -func metricsToSeriesSet(ms []metric.Metric) storage.SeriesSet { - series := make([]storage.Series, 0, len(ms)) - for _, m := range ms { - series = append(series, &concreteSeries{ - labels: metricToLabels(m.Metric), - samples: nil, - }) - } - return &concreteSeriesSet{ - series: series, - } -} - func matrixToSeriesSet(m model.Matrix) storage.SeriesSet { series := make([]storage.Series, 0, len(m)) for _, ss := range m { @@ -130,9 +125,18 @@ func matrixToSeriesSet(m model.Matrix) storage.SeriesSet { samples: ss.Values, }) } - return &concreteSeriesSet{ - series: series, + return newConcreteSeriesSet(series) +} + +func metricsToSeriesSet(ms []metric.Metric) storage.SeriesSet { + series := make([]storage.Series, 0, len(ms)) + for _, m := range ms { + series = append(series, &concreteSeries{ + labels: metricToLabels(m.Metric), + samples: nil, + }) } + return newConcreteSeriesSet(series) } func metricToLabels(m model.Metric) labels.Labels { @@ -148,3 +152,17 @@ func metricToLabels(m model.Metric) labels.Labels { sort.Sort(ls) return ls } + +func labelsToMetric(ls labels.Labels) model.Metric { + m := make(model.Metric, len(ls)) + for _, l := range ls { + m[model.LabelName(l.Name)] = model.LabelValue(l.Value) + } + return m +} + +type byLabels []storage.Series + +func (b byLabels) Len() int { return len(b) } +func (b byLabels) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byLabels) Less(i, j int) bool { return labels.Compare(b[i].Labels(), b[j].Labels()) < 0 } diff --git a/pkg/querier/series_set_test.go b/pkg/querier/series_set_test.go index e006f3d257e..9f5e3528014 100644 --- a/pkg/querier/series_set_test.go +++ b/pkg/querier/series_set_test.go @@ -6,6 +6,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" + "github.com/stretchr/testify/require" ) func TestConcreteSeriesSet(t *testing.T) { @@ -17,22 +18,37 @@ func TestConcreteSeriesSet(t *testing.T) { labels: labels.FromStrings("foo", "baz"), samples: []model.SamplePair{{Value: 3, Timestamp: 4}}, } - c := &concreteSeriesSet{ - series: []storage.Series{series1, series2}, - } - if !c.Next() { - t.Fatalf("Expected Next() to be true.") - } - if c.At() != series1 { - t.Fatalf("Unexpected series returned.") - } - if !c.Next() { - t.Fatalf("Expected Next() to be true.") - } - if c.At() != series2 { - t.Fatalf("Unexpected series returned.") - } - if c.Next() { - t.Fatalf("Expected Next() to be false.") - } + c := newConcreteSeriesSet([]storage.Series{series2, series1}) + require.True(t, c.Next()) + require.Equal(t, series1, c.At()) + require.True(t, c.Next()) + require.Equal(t, series2, c.At()) + require.False(t, c.Next()) +} + +func TestMatrixToSeriesSetSortsMetricLabels(t *testing.T) { + matrix := model.Matrix{ + { + Metric: model.Metric{ + model.MetricNameLabel: "testmetric", + "e": "f", + "a": "b", + "g": "h", + "c": "d", + }, + Values: []model.SamplePair{{Timestamp: 0, Value: 0}}, + }, + } + ss := matrixToSeriesSet(matrix) + require.True(t, ss.Next()) + require.NoError(t, ss.Err()) + + l := ss.At().Labels() + require.Equal(t, labels.Labels{ + {Name: string(model.MetricNameLabel), Value: "testmetric"}, + {Name: "a", Value: "b"}, + {Name: "c", Value: "d"}, + {Name: "e", Value: "f"}, + {Name: "g", Value: "h"}, + }, l) } diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index ec98c6fd1f6..0d539020d72 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -29,7 +29,7 @@ func newTestRuler(t *testing.T, alertmanagerURL string) *Ruler { // other kinds of tests. engine := promql.NewEngine(nil, nil, 20, 2*time.Minute) - queryable := querier.NewQueryable(nil, nil) + queryable := querier.NewQueryable(nil, nil, false) ruler, err := NewRuler(cfg, engine, queryable, nil) if err != nil { t.Fatal(err) diff --git a/pkg/util/log.go b/pkg/util/log.go index 70206557219..4edb6d7c388 100644 --- a/pkg/util/log.go +++ b/pkg/util/log.go @@ -1,117 +1,75 @@ package util import ( - "flag" "os" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/weaveworks/common/logging" + "github.com/weaveworks/common/server" "github.com/weaveworks/common/user" "golang.org/x/net/context" ) -// Logger is a shared go-kit logger. -// TODO: Change all components to take a non-global logger via their constructors. -var Logger = log.NewNopLogger() +var ( + // Logger is a shared go-kit logger. + // TODO: Change all components to take a non-global logger via their constructors. + Logger = log.NewNopLogger() -// InitLogger initializes the global logger according to the allowed log level. -func InitLogger(level AllowedLevel) { - Logger = MustNewPrometheusLogger(level) -} + logMessages = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "log_messages_total", + Help: "Total number of log messages.", + }, []string{"level"}) -// LogLevel supports registering a flag for the desired log level. -type LogLevel struct { - AllowedLevel -} + supportedLevels = []level.Value{ + level.DebugValue(), + level.InfoValue(), + level.WarnValue(), + level.ErrorValue(), + } +) -// RegisterFlags adds the log level flag to the provided flagset. -func (l *LogLevel) RegisterFlags(f *flag.FlagSet) { - l.Set("info") - f.Var( - &l.AllowedLevel, - "log.level", - "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]", - ) +func init() { + prometheus.MustRegister(logMessages) } -// WithContext returns a Logger that has information about the current user in -// its details. -// -// e.g. -// log := util.WithContext(ctx) -// log.Errorf("Could not chunk chunks: %v", err) -func WithContext(ctx context.Context, l log.Logger) log.Logger { - // Weaveworks uses "orgs" and "orgID" to represent Cortex users, - // even though the code-base generally uses `userID` to refer to the same thing. - userID, err := user.ExtractOrgID(ctx) +// InitLogger initialises the global gokit logger (util.Logger) and overrides the +// default logger for the server. +func InitLogger(cfg *server.Config) { + l, err := NewPrometheusLogger(cfg.LogLevel) if err != nil { - return l + panic(err) } - return WithUserID(userID, l) -} -// WithUserID returns a Logger that has information about the current user in -// its details. -func WithUserID(userID string, l log.Logger) log.Logger { - // See note in WithContext. - return log.With(l, "org_id", userID) + Logger = l + cfg.Log = logging.GoKit(l) } // PrometheusLogger exposes Prometheus counters for each of go-kit's log levels. type PrometheusLogger struct { - counterVec *prometheus.CounterVec - logger log.Logger + logger log.Logger } -var supportedLevels = []level.Value{level.DebugValue(), level.InfoValue(), level.WarnValue(), level.ErrorValue()} - -// NewPrometheusLogger creates a new instance of PrometheusLogger which exposes Prometheus counters for various log levels. -// Contrarily to MustNewPrometheusLogger, it returns an error to the caller in case of issue. -// Use NewPrometheusLogger if you want more control. Use MustNewPrometheusLogger if you want a less verbose logger creation. -func NewPrometheusLogger(al AllowedLevel) (log.Logger, error) { +// NewPrometheusLogger creates a new instance of PrometheusLogger which exposes +// Prometheus counters for various log levels. +func NewPrometheusLogger(l logging.Level) (log.Logger, error) { // This code copy-pasted from prometheus/common/promlog.New() - l := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - l = al.Filter(l) + logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = level.NewFilter(logger, l.Gokit) - counterVec := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "log_messages", - Help: "Total number of log messages.", - }, []string{"level"}) // Initialise counters for all supported levels: for _, level := range supportedLevels { - counterVec.WithLabelValues(level.String()) - } - err := prometheus.Register(counterVec) - // If another library already registered the same metric, use it - if err != nil { - ar, ok := err.(prometheus.AlreadyRegisteredError) - if !ok { - return nil, err - } - counterVec, ok = ar.ExistingCollector.(*prometheus.CounterVec) - if !ok { - return nil, err - } - } - l = &PrometheusLogger{ - counterVec: counterVec, - logger: l, + logMessages.WithLabelValues(level.String()) } - // DefaultCaller must be the last wrapper - l = log.With(l, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) - return l, nil -} -// MustNewPrometheusLogger creates a new instance of PrometheusLogger which exposes Prometheus counters for various log levels. -// Contrarily to NewPrometheusLogger, it does not return any error to the caller, but panics instead. -// Use MustNewPrometheusLogger if you want a less verbose logger creation. Use NewPrometheusLogger if you want more control. -func MustNewPrometheusLogger(al AllowedLevel) log.Logger { - logger, err := NewPrometheusLogger(al) - if err != nil { - panic(err) + logger = &PrometheusLogger{ + logger: logger, } - return logger + + // DefaultCaller must be the last wrapper + logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + return logger, nil } // Log increments the appropriate Prometheus counter depending on the log level. @@ -124,6 +82,29 @@ func (pl *PrometheusLogger) Log(kv ...interface{}) error { break } } - pl.counterVec.WithLabelValues(l).Inc() + logMessages.WithLabelValues(l).Inc() return nil } + +// WithContext returns a Logger that has information about the current user in +// its details. +// +// e.g. +// log := util.WithContext(ctx) +// log.Errorf("Could not chunk chunks: %v", err) +func WithContext(ctx context.Context, l log.Logger) log.Logger { + // Weaveworks uses "orgs" and "orgID" to represent Cortex users, + // even though the code-base generally uses `userID` to refer to the same thing. + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return l + } + return WithUserID(userID, l) +} + +// WithUserID returns a Logger that has information about the current user in +// its details. +func WithUserID(userID string, l log.Logger) log.Logger { + // See note in WithContext. + return log.With(l, "org_id", userID) +} diff --git a/pkg/util/promlog.go b/pkg/util/promlog.go deleted file mode 100644 index 9ef673c02f0..00000000000 --- a/pkg/util/promlog.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copy-pasted from prometheus/common/promlog until -// https://github.com/prometheus/common/pull/116/files is merged -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "os" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - "github.com/pkg/errors" -) - -// AllowedLevel is a settable identifier for the minimum level a log entry -// must be have. -type AllowedLevel struct { - s string - o level.Option -} - -func (l *AllowedLevel) String() string { - return l.s -} - -// Set updates the value of the allowed level. -func (l *AllowedLevel) Set(s string) error { - switch s { - case "debug": - l.o = level.AllowDebug() - case "info": - l.o = level.AllowInfo() - case "warn": - l.o = level.AllowWarn() - case "error": - l.o = level.AllowError() - default: - return errors.Errorf("unrecognized log level %q", s) - } - l.s = s - return nil -} - -// Filter wraps logger with a filter corresponding to the allowed level -func (l *AllowedLevel) Filter(logger log.Logger) log.Logger { - return level.NewFilter(logger, l.o) -} - -// New returns a new leveled oklog logger in the logfmt format. Each logged line will be annotated -// with a timestamp. The output always goes to stderr. -func New(al AllowedLevel) log.Logger { - l := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - l = al.Filter(l) - l = log.With(l, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) - return l -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md index c8ef21aadc4..78c49dbbeaa 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md @@ -28,7 +28,9 @@ conn, err := grpc.Dial( address, ... // other options grpc.WithUnaryInterceptor( - otgrpc.OpenTracingClientInterceptor(tracer))) + otgrpc.OpenTracingClientInterceptor(tracer)), + grpc.WithStreamInterceptor( + otgrpc.OpenTracingStreamClientInterceptor(tracer))) // All future RPC activity involving `conn` will be automatically traced. ``` @@ -46,7 +48,9 @@ var tracer opentracing.Tracer = ... s := grpc.NewServer( ... // other options grpc.UnaryInterceptor( - otgrpc.OpenTracingServerInterceptor(tracer))) + otgrpc.OpenTracingServerInterceptor(tracer)), + grpc.StreamInterceptor( + otgrpc.OpenTracingStreamServerInterceptor(tracer))) // All future RPC activity involving `s` will be automatically traced. ``` diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go index 3975c3cf510..3414e55cb1f 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go @@ -7,6 +7,9 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/metadata" + "io" + "runtime" + "sync/atomic" ) // OpenTracingClientInterceptor returns a grpc.UnaryClientInterceptor suitable @@ -50,19 +53,7 @@ func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) gRPCComponentTag, ) defer clientSpan.Finish() - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - md = metadata.New(nil) - } else { - md = md.Copy() - } - mdWriter := metadataReaderWriter{md} - err = tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, mdWriter) - // We have no better place to record an error than the Span itself :-/ - if err != nil { - clientSpan.LogFields(log.String("event", "Tracer.Inject() failed"), log.Error(err)) - } - ctx = metadata.NewOutgoingContext(ctx, md) + ctx = injectSpanContext(ctx, tracer, clientSpan) if otgrpcOpts.logPayloads { clientSpan.LogFields(log.Object("gRPC request", req)) } @@ -81,3 +72,168 @@ func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) return err } } + +// OpenTracingStreamClientInterceptor returns a grpc.StreamClientInterceptor suitable +// for use in a grpc.Dial call. The interceptor instruments streaming RPCs by creating +// a single span to correspond to the lifetime of the RPC's stream. +// +// For example: +// +// conn, err := grpc.Dial( +// address, +// ..., // (existing DialOptions) +// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer))) +// +// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC +// metadata; they will also look in the context.Context for an active +// in-process parent Span and establish a ChildOf reference if such a parent +// Span could be found. +func OpenTracingStreamClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamClientInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func( + ctx context.Context, + desc *grpc.StreamDesc, + cc *grpc.ClientConn, + method string, + streamer grpc.Streamer, + opts ...grpc.CallOption, + ) (grpc.ClientStream, error) { + var err error + var parentCtx opentracing.SpanContext + if parent := opentracing.SpanFromContext(ctx); parent != nil { + parentCtx = parent.Context() + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(parentCtx, method, nil, nil) { + return streamer(ctx, desc, cc, method, opts...) + } + + clientSpan := tracer.StartSpan( + method, + opentracing.ChildOf(parentCtx), + ext.SpanKindRPCClient, + gRPCComponentTag, + ) + ctx = injectSpanContext(ctx, tracer, clientSpan) + cs, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + SetSpanTags(clientSpan, err, true) + clientSpan.Finish() + return cs, err + } + return newOpenTracingClientStream(cs, method, desc, clientSpan, otgrpcOpts), nil + } +} + +func newOpenTracingClientStream(cs grpc.ClientStream, method string, desc *grpc.StreamDesc, clientSpan opentracing.Span, otgrpcOpts *options) grpc.ClientStream { + finishChan := make(chan struct{}) + + isFinished := new(int32) + *isFinished = 0 + finishFunc := func(err error) { + // The current OpenTracing specification forbids finishing a span more than + // once. Since we have multiple code paths that could concurrently call + // `finishFunc`, we need to add some sort of synchronization to guard against + // multiple finishing. + if !atomic.CompareAndSwapInt32(isFinished, 0, 1) { + return + } + close(finishChan) + defer clientSpan.Finish() + if err != nil { + clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + SetSpanTags(clientSpan, err, true) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(clientSpan, method, nil, nil, err) + } + } + go func() { + select { + case <-finishChan: + // The client span is being finished by another code path; hence, no + // action is necessary. + case <-cs.Context().Done(): + finishFunc(cs.Context().Err()) + } + }() + otcs := &openTracingClientStream{ + ClientStream: cs, + desc: desc, + finishFunc: finishFunc, + } + + // The `ClientStream` interface allows one to omit calling `Recv` if it's + // known that the result will be `io.EOF`. See + // http://stackoverflow.com/q/42915337 + // In such cases, there's nothing that triggers the span to finish. We, + // therefore, set a finalizer so that the span and the context goroutine will + // at least be cleaned up when the garbage collector is run. + runtime.SetFinalizer(otcs, func(otcs *openTracingClientStream) { + otcs.finishFunc(nil) + }) + return otcs +} + +type openTracingClientStream struct { + grpc.ClientStream + desc *grpc.StreamDesc + finishFunc func(error) +} + +func (cs *openTracingClientStream) Header() (metadata.MD, error) { + md, err := cs.ClientStream.Header() + if err != nil { + cs.finishFunc(err) + } + return md, err +} + +func (cs *openTracingClientStream) SendMsg(m interface{}) error { + err := cs.ClientStream.SendMsg(m) + if err != nil { + cs.finishFunc(err) + } + return err +} + +func (cs *openTracingClientStream) RecvMsg(m interface{}) error { + err := cs.ClientStream.RecvMsg(m) + if err == io.EOF { + cs.finishFunc(nil) + return err + } else if err != nil { + cs.finishFunc(err) + return err + } + if !cs.desc.ServerStreams { + cs.finishFunc(nil) + } + return err +} + +func (cs *openTracingClientStream) CloseSend() error { + err := cs.ClientStream.CloseSend() + if err != nil { + cs.finishFunc(err) + } + return err +} + +func injectSpanContext(ctx context.Context, tracer opentracing.Tracer, clientSpan opentracing.Span) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.New(nil) + } else { + md = md.Copy() + } + mdWriter := metadataReaderWriter{md} + err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, mdWriter) + // We have no better place to record an error than the Span itself :-/ + if err != nil { + clientSpan.LogFields(log.String("event", "Tracer.Inject() failed"), log.Error(err)) + } + return metadata.NewOutgoingContext(ctx, md) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go index 2e0d5ab9d9f..62cf54d2217 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go @@ -33,11 +33,7 @@ func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, ) (resp interface{}, err error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - md = metadata.New(nil) - } - spanContext, err := tracer.Extract(opentracing.HTTPHeaders, metadataReaderWriter{md}) + spanContext, err := extractSpanContext(ctx, tracer) if err != nil && err != opentracing.ErrSpanContextNotFound { // TODO: establish some sort of error reporting mechanism here. We // don't know where to put such an error and must rely on Tracer @@ -73,3 +69,73 @@ func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) return resp, err } } + +// OpenTracingStreamServerInterceptor returns a grpc.StreamServerInterceptor suitable +// for use in a grpc.NewServer call. The interceptor instruments streaming RPCs by +// creating a single span to correspond to the lifetime of the RPC's stream. +// +// For example: +// +// s := grpc.NewServer( +// ..., // (existing ServerOptions) +// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer))) +// +// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC +// metadata; if found, the server span will act as the ChildOf that RPC +// SpanContext. +// +// Root or not, the server Span will be embedded in the context.Context for the +// application-specific gRPC handler(s) to access. +func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamServerInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + spanContext, err := extractSpanContext(ss.Context(), tracer) + if err != nil && err != opentracing.ErrSpanContextNotFound { + // TODO: establish some sort of error reporting mechanism here. We + // don't know where to put such an error and must rely on Tracer + // implementations to do something appropriate for the time being. + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, nil, nil) { + return handler(srv, ss) + } + + serverSpan := tracer.StartSpan( + info.FullMethod, + ext.RPCServerOption(spanContext), + gRPCComponentTag, + ) + defer serverSpan.Finish() + ss = &openTracingServerStream{ + ServerStream: ss, + ctx: opentracing.ContextWithSpan(ss.Context(), serverSpan), + } + err = handler(srv, ss) + if err != nil { + SetSpanTags(serverSpan, err, false) + serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(serverSpan, info.FullMethod, nil, nil, err) + } + return err + } +} + +type openTracingServerStream struct { + grpc.ServerStream + ctx context.Context +} + +func (ss *openTracingServerStream) Context() context.Context { + return ss.ctx +} + +func extractSpanContext(ctx context.Context, tracer opentracing.Tracer) (opentracing.SpanContext, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + md = metadata.New(nil) + } + return tracer.Extract(opentracing.HTTPHeaders, metadataReaderWriter{md}) +} diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml index 9a2fd0e3d4a..0538f1bfc00 100644 --- a/vendor/github.com/opentracing/opentracing-go/.travis.yml +++ b/vendor/github.com/opentracing/opentracing-go/.travis.yml @@ -3,6 +3,7 @@ language: go go: - 1.6 - 1.7 + - 1.8 - tip install: diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md index 5641a6d6487..1fb77d227fe 100644 --- a/vendor/github.com/opentracing/opentracing-go/README.md +++ b/vendor/github.com/opentracing/opentracing-go/README.md @@ -95,7 +95,7 @@ reference. // Transmit the span's TraceContext as HTTP headers on our // outbound request. - tracer.Inject( + opentracing.GlobalTracer().Inject( span.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(httpReq.Header)) diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go index 09f647b5afd..c67ab5eef55 100644 --- a/vendor/github.com/opentracing/opentracing-go/ext/tags.go +++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go @@ -15,7 +15,7 @@ import opentracing "github.com/opentracing/opentracing-go" // var ( ////////////////////////////////////////////////////////////////////// - // SpanKind (client/server) + // SpanKind (client/server or producer/consumer) ////////////////////////////////////////////////////////////////////// // SpanKind hints at relationship between spans, e.g. client/server @@ -31,6 +31,16 @@ var ( SpanKindRPCServerEnum = SpanKindEnum("server") SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} + // SpanKindProducer marks a span representing the producer-side of a + // message bus + SpanKindProducerEnum = SpanKindEnum("producer") + SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} + + // SpanKindConsumer marks a span representing the consumer-side of a + // message bus + SpanKindConsumerEnum = SpanKindEnum("consumer") + SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} + ////////////////////////////////////////////////////////////////////// // Component name ////////////////////////////////////////////////////////////////////// @@ -52,9 +62,14 @@ var ( // communications, like an RPC call. ////////////////////////////////////////////////////////////////////// - // PeerService records the service name of the peer + // PeerService records the service name of the peer. PeerService = stringTagName("peer.service") + // PeerAddress records the address name of the peer. This may be a "ip:port", + // a bare "hostname", a FQDN or even a database DSN substring + // like "mysql://username@127.0.0.1:3306/dbname" + PeerAddress = stringTagName("peer.address") + // PeerHostname records the host name of the peer PeerHostname = stringTagName("peer.hostname") @@ -82,6 +97,31 @@ var ( // HTTP response. HTTPStatusCode = uint16TagName("http.status_code") + ////////////////////////////////////////////////////////////////////// + // DB Tags + ////////////////////////////////////////////////////////////////////// + + // DBInstance is database instance name. + DBInstance = stringTagName("db.instance") + + // DBStatement is a database statement for the given database type. + // It can be a query or a prepared statement (i.e., before substitution). + DBStatement = stringTagName("db.statement") + + // DBType is a database type. For any SQL database, "sql". + // For others, the lower-case database category, e.g. "redis" + DBType = stringTagName("db.type") + + // DBUser is a username for accessing database. + DBUser = stringTagName("db.user") + + ////////////////////////////////////////////////////////////////////// + // Message Bus Tag + ////////////////////////////////////////////////////////////////////// + + // MessageBusDestination is an address at which messages can be exchanged + MessageBusDestination = stringTagName("message_bus.destination") + ////////////////////////////////////////////////////////////////////// // Error Tag ////////////////////////////////////////////////////////////////////// diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 746aae3bf7b..b7a566764d7 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -370,6 +370,13 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } } } + + // Add index to the static config target groups for unique identification + // within scrape pool. + for i, tg := range c.ServiceDiscoveryConfig.StaticConfigs { + tg.Source = fmt.Sprintf("%d", i) + } + return nil } @@ -432,6 +439,13 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er } } } + + // Add index to the static config target groups for unique identification + // within scrape pool. + for i, tg := range c.ServiceDiscoveryConfig.StaticConfigs { + tg.Source = fmt.Sprintf("%d", i) + } + return nil } diff --git a/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go b/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go index 4a7c6cf906e..a0bcf134611 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go +++ b/vendor/github.com/prometheus/prometheus/discovery/consul/consul.go @@ -108,7 +108,7 @@ type SDConfig struct { // See https://www.consul.io/api/catalog.html#list-services // The list of services for which targets are discovered. // Defaults to all services if empty. - Services []string `yaml:"services"` + Services []string `yaml:"services,omitempty"` // An optional tag used to filter instances inside a service. A single tag is supported // here to match the Consul API. ServiceTag string `yaml:"tag,omitempty"` diff --git a/vendor/github.com/prometheus/prometheus/discovery/file/file.go b/vendor/github.com/prometheus/prometheus/discovery/file/file.go index 780e2581985..be6337822d0 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/file/file.go +++ b/vendor/github.com/prometheus/prometheus/discovery/file/file.go @@ -279,7 +279,7 @@ func (d *Discovery) deleteTimestamp(filename string) { // stop shuts down the file watcher. func (d *Discovery) stop() { - level.Debug(d.logger).Log("msg", "Stopping file discovery...", "paths", d.paths) + level.Debug(d.logger).Log("msg", "Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths)) done := make(chan struct{}) defer close(done) @@ -299,10 +299,10 @@ func (d *Discovery) stop() { } }() if err := d.watcher.Close(); err != nil { - level.Error(d.logger).Log("msg", "Error closing file watcher", "paths", d.paths, "err", err) + level.Error(d.logger).Log("msg", "Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err) } - level.Debug(d.logger).Log("File discovery stopped", "paths", d.paths) + level.Debug(d.logger).Log("msg", "File discovery stopped") } // refresh reads all files matching the discovery's patterns and sends the respective diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go index 592550212f1..0ff3b0e0a8e 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go @@ -176,13 +176,22 @@ func (s *Ingress) buildIngress(ingress *v1beta1.Ingress) *targetgroup.Group { } tg.Labels = ingressLabels(ingress) - schema := "http" - if ingress.Spec.TLS != nil { - schema = "https" + tlsHosts := make(map[string]struct{}) + for _, tls := range ingress.Spec.TLS { + for _, host := range tls.Hosts { + tlsHosts[host] = struct{}{} + } } + for _, rule := range ingress.Spec.Rules { paths := pathsFromIngressRule(&rule.IngressRuleValue) + schema := "http" + _, isTLS := tlsHosts[rule.Host] + if isTLS { + schema = "https" + } + for _, path := range paths { tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: lv(rule.Host), diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go index a55afde0785..faf4087ee42 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go @@ -84,13 +84,13 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { // SDConfig is the configuration for Kubernetes service discovery. type SDConfig struct { - APIServer config_util.URL `yaml:"api_server"` + APIServer config_util.URL `yaml:"api_server,omitempty"` Role Role `yaml:"role"` BasicAuth *config_util.BasicAuth `yaml:"basic_auth,omitempty"` BearerToken config_util.Secret `yaml:"bearer_token,omitempty"` BearerTokenFile string `yaml:"bearer_token_file,omitempty"` TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"` - NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces"` + NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -250,28 +250,31 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { switch d.role { case RoleEndpoint: for _, namespace := range namespaces { + e := d.client.CoreV1().Endpoints(namespace) elw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return d.client.CoreV1().Endpoints(namespace).List(options) + return e.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return d.client.CoreV1().Endpoints(namespace).Watch(options) + return e.Watch(options) }, } + s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return d.client.CoreV1().Services(namespace).List(options) + return s.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return d.client.CoreV1().Services(namespace).Watch(options) + return s.Watch(options) }, } + p := d.client.CoreV1().Pods(namespace) plw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return d.client.CoreV1().Pods(namespace).List(options) + return p.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return d.client.CoreV1().Pods(namespace).Watch(options) + return p.Watch(options) }, } eps := NewEndpoints( @@ -287,12 +290,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } case RolePod: for _, namespace := range namespaces { + p := d.client.CoreV1().Pods(namespace) plw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return d.client.CoreV1().Pods(namespace).List(options) + return p.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return d.client.CoreV1().Pods(namespace).Watch(options) + return p.Watch(options) }, } pod := NewPod( @@ -304,12 +308,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } case RoleService: for _, namespace := range namespaces { + s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return d.client.CoreV1().Services(namespace).List(options) + return s.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return d.client.CoreV1().Services(namespace).Watch(options) + return s.Watch(options) }, } svc := NewService( @@ -321,12 +326,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } case RoleIngress: for _, namespace := range namespaces { + i := d.client.ExtensionsV1beta1().Ingresses(namespace) ilw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return d.client.ExtensionsV1beta1().Ingresses(namespace).List(options) + return i.List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return d.client.ExtensionsV1beta1().Ingresses(namespace).Watch(options) + return i.Watch(options) }, } ingress := NewIngress( diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index 669a91dc559..97468a54907 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -285,7 +285,7 @@ func (m *Manager) providersFromConfig(cfg sd_config.ServiceDiscoveryConfig) map[ app("triton", i, t) } if len(cfg.StaticConfigs) > 0 { - app("static", 0, NewStaticProvider(cfg.StaticConfigs)) + app("static", 0, &StaticProvider{cfg.StaticConfigs}) } return providers @@ -296,15 +296,6 @@ type StaticProvider struct { TargetGroups []*targetgroup.Group } -// NewStaticProvider returns a StaticProvider configured with the given -// target groups. -func NewStaticProvider(groups []*targetgroup.Group) *StaticProvider { - for i, tg := range groups { - tg.Source = fmt.Sprintf("%d", i) - } - return &StaticProvider{groups} -} - // Run implements the Worker interface. func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { // We still have to consider that the consumer exits right away in which case diff --git a/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go b/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go index 60a26e8605c..4871214576a 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go +++ b/vendor/github.com/prometheus/prometheus/discovery/zookeeper/zookeeper.go @@ -137,8 +137,11 @@ func NewDiscovery( logger = log.NewNopLogger() } - conn, _, err := zk.Connect(srvs, timeout) - conn.SetLogger(treecache.NewZookeeperLogger(logger)) + conn, _, err := zk.Connect( + srvs, timeout, + func(c *zk.Conn) { + c.SetLogger(treecache.NewZookeeperLogger(logger)) + }) if err != nil { return nil } diff --git a/vendor/github.com/prometheus/prometheus/prompb/README.md b/vendor/github.com/prometheus/prometheus/prompb/README.md new file mode 100644 index 00000000000..d2aa933ef9d --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/prompb/README.md @@ -0,0 +1,14 @@ +The compiled protobufs are version controlled and you won't normally need to +re-compile them when building Prometheus. + +If however you have modified the defs and do need to re-compile, run +`./scripts/genproto.sh` from the parent dir. + +In order for the script to run, you'll need `protoc` (version 3.5) in your +PATH, and the following Go packages installed: + +- github.com/gogo/protobuf +- github.com/gogo/protobuf/protoc-gen-gogofast +- github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/ +- github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger +- golang.org/x/tools/cmd/goimports diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index adf3bf00cd8..20f2faf2292 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -493,7 +493,7 @@ func (ng *Engine) populateSeries(ctx context.Context, q storage.Queryable, s *Ev level.Error(ng.logger).Log("msg", "error selecting series set", "err", err) return err } - n.series, err = expandSeriesSet(set) + n.series, err = expandSeriesSet(ctx, set) if err != nil { // TODO(fabxc): use multi-error. level.Error(ng.logger).Log("msg", "error expanding series set", "err", err) @@ -508,7 +508,7 @@ func (ng *Engine) populateSeries(ctx context.Context, q storage.Queryable, s *Ev level.Error(ng.logger).Log("msg", "error selecting series set", "err", err) return err } - n.series, err = expandSeriesSet(set) + n.series, err = expandSeriesSet(ctx, set) if err != nil { level.Error(ng.logger).Log("msg", "error expanding series set", "err", err) return err @@ -538,8 +538,13 @@ func extractFuncFromPath(p []Node) string { return extractFuncFromPath(p[:len(p)-1]) } -func expandSeriesSet(it storage.SeriesSet) (res []storage.Series, err error) { +func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, err error) { for it.Next() { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } res = append(res, it.At()) } return res, it.Err() @@ -1039,6 +1044,9 @@ func (ev *evaluator) matrixSelector(node *MatrixSelector) Matrix { var it *storage.BufferedSeriesIterator for i, s := range node.series { + if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + ev.error(err) + } if it == nil { it = storage.NewBuffer(s.Iterator(), durationMilliseconds(node.Range)) } else { diff --git a/vendor/github.com/prometheus/prometheus/promql/test.go b/vendor/github.com/prometheus/prometheus/promql/test.go index 0b512881c6a..14a5f399f49 100644 --- a/vendor/github.com/prometheus/prometheus/promql/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/test.go @@ -160,7 +160,7 @@ func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) { } ts := testStartTime.Add(time.Duration(offset)) - cmd := newEvalCmd(expr, ts) + cmd := newEvalCmd(expr, ts, i+1) switch mod { case "ordered": cmd.ordered = true @@ -303,6 +303,7 @@ func (cmd *loadCmd) append(a storage.Appender) error { type evalCmd struct { expr string start time.Time + line int fail, ordered bool @@ -319,10 +320,11 @@ func (e entry) String() string { return fmt.Sprintf("%d: %s", e.pos, e.vals) } -func newEvalCmd(expr string, start time.Time) *evalCmd { +func newEvalCmd(expr string, start time.Time, line int) *evalCmd { return &evalCmd{ expr: expr, start: start, + line: line, metrics: map[uint64]labels.Labels{}, expected: map[uint64]entry{}, @@ -437,11 +439,11 @@ func (t *Test) exec(tc testCommand) error { if cmd.fail { return nil } - return fmt.Errorf("error evaluating query %q: %s", cmd.expr, res.Err) + return fmt.Errorf("error evaluating query %q (line %d): %s", cmd.expr, cmd.line, res.Err) } defer q.Close() if res.Err == nil && cmd.fail { - return fmt.Errorf("expected error evaluating query but got none") + return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line) } err := cmd.compareResult(res.Value) @@ -454,7 +456,7 @@ func (t *Test) exec(tc testCommand) error { q, _ = t.queryEngine.NewRangeQuery(t.storage, cmd.expr, cmd.start.Add(-time.Minute), cmd.start.Add(time.Minute), time.Minute) rangeRes := q.Exec(t.context) if rangeRes.Err != nil { - return fmt.Errorf("error evaluating query %q in range mode: %s", cmd.expr, rangeRes.Err) + return fmt.Errorf("error evaluating query %q (line %d) in range mode: %s", cmd.expr, cmd.line, rangeRes.Err) } defer q.Close() if cmd.ordered { @@ -477,7 +479,7 @@ func (t *Test) exec(tc testCommand) error { err = cmd.compareResult(vec) } if err != nil { - return fmt.Errorf("error in %s %s rande mode: %s", cmd, cmd.expr, err) + return fmt.Errorf("error in %s %s (line %d) rande mode: %s", cmd, cmd.expr, cmd.line, err) } default: diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go index 32828715972..3ab994391c5 100644 --- a/vendor/github.com/prometheus/prometheus/storage/fanout.go +++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go @@ -450,10 +450,10 @@ func (c *mergeIterator) Next() bool { return false } - currt, currv := c.At() + currt, _ := c.At() for len(c.h) > 0 { - nextt, nextv := c.h[0].At() - if nextt != currt || nextv != currv { + nextt, _ := c.h[0].At() + if nextt != currt { break } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index d3858de74f3..66037a9b5ff 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -96,9 +96,12 @@ func ToQuery(from, to int64, matchers []*labels.Matcher, p *storage.SelectParams return nil, err } - rp := &prompb.ReadHints{ - StepMs: p.Step, - Func: p.Func, + var rp *prompb.ReadHints + if p != nil { + rp = &prompb.ReadHints{ + StepMs: p.Step, + Func: p.Func, + } } return &prompb.Query{ diff --git a/vendor/github.com/sercand/kuberesolver/resolver.go b/vendor/github.com/sercand/kuberesolver/resolver.go index f591c62714c..1c6a92d4cb6 100644 --- a/vendor/github.com/sercand/kuberesolver/resolver.go +++ b/vendor/github.com/sercand/kuberesolver/resolver.go @@ -2,6 +2,7 @@ package kuberesolver import ( "fmt" + "io/ioutil" "net/http" "net/url" "time" @@ -66,7 +67,8 @@ func (r *kubeResolver) watch(target string, stopCh <-chan struct{}, resultCh cha } if resp.StatusCode != http.StatusOK { defer resp.Body.Close() - return fmt.Errorf("invalid response code %d", resp.StatusCode) + rbody, _ := ioutil.ReadAll(resp.Body) + return fmt.Errorf("invalid response code %d: %s", resp.StatusCode, rbody) } sw := newStreamWatcher(resp.Body) for { diff --git a/vendor/github.com/sercand/kuberesolver/util.go b/vendor/github.com/sercand/kuberesolver/util.go index 6ad5e3cbbf4..e0726092002 100644 --- a/vendor/github.com/sercand/kuberesolver/util.go +++ b/vendor/github.com/sercand/kuberesolver/util.go @@ -1,8 +1,7 @@ package kuberesolver import ( - "fmt" - "runtime" + "runtime/debug" "time" "google.golang.org/grpc/grpclog" @@ -30,14 +29,7 @@ func until(f func(), period time.Duration, stopCh <-chan struct{}) { // HandleCrash simply catches a crash and logs an error. Meant to be called via defer. func handleCrash() { if r := recover(); r != nil { - callers := "" - for i := 0; true; i++ { - _, file, line, ok := runtime.Caller(i) - if !ok { - break - } - callers = callers + fmt.Sprintf("%v:%v\n", file, line) - } + callers := string(debug.Stack()) grpclog.Printf("kuberesolver: recovered from panic: %#v (%v)\n%v", r, r, callers) } } diff --git a/vendor/github.com/weaveworks/common/logging/dedupe.go b/vendor/github.com/weaveworks/common/logging/dedupe.go new file mode 100644 index 00000000000..caa523ef93f --- /dev/null +++ b/vendor/github.com/weaveworks/common/logging/dedupe.go @@ -0,0 +1,137 @@ +package logging + +import ( + "fmt" + "strings" + "sync" + "time" + + log "github.com/sirupsen/logrus" +) + +const ( + defaultDedupeInterval = time.Minute +) + +// SetupDeduplication should be performed after any other logging setup. +// For all logs less severe or equal to the given log level (but still higher than the logger's configured log level), +// these logs will be 'deduplicated'. What this means is that, excluding certain special fields like time, multiple +// identical log entries will be grouped up and a summary message emitted. +// For example, instead of: +// 00:00:00 INFO User 123 did xyz +// 00:00:10 INFO User 123 did xyz +// 00:00:25 INFO User 123 did xyz +// 00:00:55 INFO User 123 did xyz +// you would get: +// 00:00:00 INFO User 123 did xyz +// 00:01:00 INFO Repeated 3 times: User 123 did xyz +// The interval argument controls how long to wait for additional messages to arrive before reporting. +// Increase it to deduplicate more aggressively, decrease it to lower latency from a log occurring to it appearing. +// Set it to 0 to pick a sensible default value (recommended). +// NOTE: For simplicity and efficiency, fields are considered 'equal' if and only if their string representations (%v) are equal. +func SetupDeduplication(logLevel string, interval time.Duration) error { + dedupeLevel, err := log.ParseLevel(logLevel) + if err != nil { + return fmt.Errorf("Error parsing log level: %v", err) + } + if interval <= 0 { + interval = defaultDedupeInterval + } + + // We use a special Formatter to either format the log using the original formatter, or to return "" + // so nothing will be written for that event. The repeated entries are later logged along with a field flag + // that tells the formatter to ignore the message. + stdLogger := log.StandardLogger() + stdLogger.Formatter = newDedupeFormatter(stdLogger.Formatter, dedupeLevel, interval) + return nil +} + +type entryCount struct { + entry log.Entry + count int +} + +type dedupeFormatter struct { + innerFormatter log.Formatter + level log.Level + interval time.Duration + seen map[string]entryCount + lock sync.Mutex +} + +func newDedupeFormatter(innerFormatter log.Formatter, level log.Level, interval time.Duration) *dedupeFormatter { + return &dedupeFormatter{ + innerFormatter: innerFormatter, + level: level, + interval: interval, + seen: map[string]entryCount{}, + } +} + +func (f *dedupeFormatter) Format(entry *log.Entry) ([]byte, error) { + if f.shouldLog(entry) { + b, err := f.innerFormatter.Format(entry) + return b, err + } + return []byte{}, nil +} + +func (f *dedupeFormatter) shouldLog(entry *log.Entry) bool { + if _, ok := entry.Data["deduplicated"]; ok { + // ignore our own logs about deduped messages + return true + } + if entry.Level < f.level { + // ignore logs more severe than our level + return true + } + key := fmt.Sprintf("%s %s", entry.Message, fieldsToString(entry.Data)) + f.lock.Lock() + defer f.lock.Unlock() + if ec, ok := f.seen[key]; ok { + // already seen, increment count and do not log + ec.count++ + f.seen[key] = ec + return false + } + // New message, log it but add it to seen. + // We need to copy because the pointer ceases to be valid after we return from Format + f.seen[key] = entryCount{entry: *entry} + go f.evictEntry(key) // queue to evict later + return true +} + +// Wait for interval seconds then evict the entry and send the log +func (f *dedupeFormatter) evictEntry(key string) { + time.Sleep(f.interval) + var ec entryCount + func() { + f.lock.Lock() + defer f.lock.Unlock() + ec = f.seen[key] + delete(f.seen, key) + }() + if ec.count == 0 { + return + } + entry := log.WithFields(ec.entry.Data).WithField("deduplicated", ec.count) + message := fmt.Sprintf("Repeated %d times: %s", ec.count, ec.entry.Message) + // There's no way to choose the log level dynamically, so we have to do this hack + map[log.Level]func(args ...interface{}){ + log.PanicLevel: entry.Panic, + log.FatalLevel: entry.Fatal, + log.ErrorLevel: entry.Error, + log.WarnLevel: entry.Warn, + log.InfoLevel: entry.Info, + log.DebugLevel: entry.Debug, + }[ec.entry.Level](message) +} + +func fieldsToString(data log.Fields) string { + parts := make([]string, 0, len(data)) + // traversal order here is arbitrary but stable, which is fine for our purposes + for k, v := range data { + parts = append(parts, fmt.Sprintf("%s=%v", k, v)) + } + return strings.Join(parts, " ") +} diff --git a/vendor/github.com/weaveworks/common/logging/global.go b/vendor/github.com/weaveworks/common/logging/global.go new file mode 100644 index 00000000000..5dd69baac4e --- /dev/null +++ b/vendor/github.com/weaveworks/common/logging/global.go @@ -0,0 +1,58 @@ +package logging + +var global Interface = Noop() + +// Global returns the global logger. +func Global() Interface { + return global +} + +// SetGlobal sets the global logger. +func SetGlobal(i Interface) { + global = i +} + +// Debugf convenience function calls the global loggerr. +func Debugf(format string, args ...interface{}) { + global.Debugf(format, args...) +} + +// Debugln convenience function calls the global logger. +func Debugln(args ...interface{}) { + global.Debugln(args...) +} + +// Infof convenience function calls the global logger. +func Infof(format string, args ...interface{}) { + global.Infof(format, args...) +} + +// Infoln convenience function calls the global logger. +func Infoln(args ...interface{}) { + global.Infoln(args...) +} + +// Warnf convenience function calls the global logger. +func Warnf(format string, args ...interface{}) { + global.Warnf(format, args...) +} + +// Warnln convenience function calls the global logger. +func Warnln(args ...interface{}) { + global.Warnln(args...) +} + +// Errorf convenience function calls the global logger. +func Errorf(format string, args ...interface{}) { + global.Errorf(format, args...) +} + +// Errorln convenience function calls the global logger. +func Errorln(args ...interface{}) { + global.Errorln(args...) +} + +// WithField convenience function calls the global logger. +func WithField(key string, value interface{}) Interface { + return global.WithField(key, value) +} diff --git a/vendor/github.com/weaveworks/common/logging/gokit.go b/vendor/github.com/weaveworks/common/logging/gokit.go new file mode 100644 index 00000000000..b5137fa4d62 --- /dev/null +++ b/vendor/github.com/weaveworks/common/logging/gokit.go @@ -0,0 +1,66 @@ +package logging + +import ( + "fmt" + "os" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" +) + +// NewGoKit creates a new Interface backed by a GoKit logger +func NewGoKit(l Level) Interface { + logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = level.NewFilter(logger, l.Gokit) + logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + return gokit{logger} +} + +// GoKit wraps an existing gokit Logger. +func GoKit(logger log.Logger) Interface { + return gokit{logger} +} + +type gokit struct { + log.Logger +} + +func (g gokit) Debugf(format string, args ...interface{}) { + level.Debug(g.Logger).Log("msg", fmt.Sprintf(format, args...)) +} +func (g gokit) Debugln(args ...interface{}) { + level.Debug(g.Logger).Log("msg", fmt.Sprintln(args...)) +} + +func (g gokit) Infof(format string, args ...interface{}) { + level.Info(g.Logger).Log("msg", fmt.Sprintf(format, args...)) +} +func (g gokit) Infoln(args ...interface{}) { + level.Info(g.Logger).Log("msg", fmt.Sprintln(args...)) +} + +func (g gokit) Warnf(format string, args ...interface{}) { + level.Warn(g.Logger).Log("msg", fmt.Sprintf(format, args...)) +} +func (g gokit) Warnln(args ...interface{}) { + level.Warn(g.Logger).Log("msg", fmt.Sprintln(args...)) +} + +func (g gokit) Errorf(format string, args ...interface{}) { + level.Error(g.Logger).Log("msg", fmt.Sprintf(format, args...)) +} +func (g gokit) Errorln(args ...interface{}) { + level.Error(g.Logger).Log("msg", fmt.Sprintln(args...)) +} + +func (g gokit) WithField(key string, value interface{}) Interface { + return gokit{log.With(g.Logger, key, value)} +} + +func (g gokit) WithFields(fields Fields) Interface { + logger := g.Logger + for k, v := range fields { + logger = log.With(logger, k, v) + } + return gokit{logger} +} diff --git a/vendor/github.com/weaveworks/common/logging/interface.go b/vendor/github.com/weaveworks/common/logging/interface.go new file mode 100644 index 00000000000..f4625e474f8 --- /dev/null +++ b/vendor/github.com/weaveworks/common/logging/interface.go @@ -0,0 +1,24 @@ +package logging + +// Interface 'unifies' gokit logging and logrus logging, such that +// the middleware in this repo can be used in projects which use either +// loggers. +type Interface interface { + Debugf(format string, args ...interface{}) + Debugln(args ...interface{}) + + Infof(format string, args ...interface{}) + Infoln(args ...interface{}) + + Errorf(format string, args ...interface{}) + Errorln(args ...interface{}) + + Warnf(format string, args ...interface{}) + Warnln(args ...interface{}) + + WithField(key string, value interface{}) Interface + WithFields(Fields) Interface +} + +// Fields convenience type for adding multiple fields to a log statement. +type Fields map[string]interface{} diff --git a/vendor/github.com/weaveworks/common/logging/level.go b/vendor/github.com/weaveworks/common/logging/level.go new file mode 100644 index 00000000000..e9d60e63537 --- /dev/null +++ b/vendor/github.com/weaveworks/common/logging/level.go @@ -0,0 +1,64 @@ +package logging + +// Copy-pasted from prometheus/common/promlog. +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "flag" + + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Level is a settable identifier for the minimum level a log entry +// must be have. +type Level struct { + s string + Logrus logrus.Level + Gokit level.Option +} + +// RegisterFlags adds the log level flag to the provided flagset. +func (l *Level) RegisterFlags(f *flag.FlagSet) { + l.Set("info") + f.Var(l, "log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error]") +} + +func (l *Level) String() string { + return l.s +} + +// Set updates the value of the allowed level. +func (l *Level) Set(s string) error { + switch s { + case "debug": + l.Logrus = logrus.DebugLevel + l.Gokit = level.AllowDebug() + case "info": + l.Logrus = logrus.InfoLevel + l.Gokit = level.AllowInfo() + case "warn": + l.Logrus = logrus.WarnLevel + l.Gokit = level.AllowWarn() + case "error": + l.Logrus = logrus.ErrorLevel + l.Gokit = level.AllowError() + default: + return errors.Errorf("unrecognized log level %q", s) + } + + l.s = s + return nil +} diff --git a/vendor/github.com/weaveworks/common/logging/logging.go b/vendor/github.com/weaveworks/common/logging/logging.go index 1047e7121d3..744b76c7482 100644 --- a/vendor/github.com/weaveworks/common/logging/logging.go +++ b/vendor/github.com/weaveworks/common/logging/logging.go @@ -1,24 +1,13 @@ package logging import ( - "bytes" "fmt" "os" - "strings" - "sync" - "time" - - "golang.org/x/net/context" log "github.com/sirupsen/logrus" - "github.com/weaveworks/common/user" "github.com/weaveworks/promrus" ) -const ( - defaultDedupeInterval = time.Minute -) - // Setup configures logging output to stderr, sets the log level and sets the formatter. func Setup(logLevel string) error { log.SetOutput(os.Stderr) @@ -35,153 +24,3 @@ func Setup(logLevel string) error { log.AddHook(hook) return nil } - -// SetupDeduplication should be performed after any other logging setup. -// For all logs less severe or equal to the given log level (but still higher than the logger's configured log level), -// these logs will be 'deduplicated'. What this means is that, excluding certain special fields like time, multiple -// identical log entries will be grouped up and a summary message emitted. -// For example, instead of: -// 00:00:00 INFO User 123 did xyz -// 00:00:10 INFO User 123 did xyz -// 00:00:25 INFO User 123 did xyz -// 00:00:55 INFO User 123 did xyz -// you would get: -// 00:00:00 INFO User 123 did xyz -// 00:01:00 INFO Repeated 3 times: User 123 did xyz -// The interval argument controls how long to wait for additional messages to arrive before reporting. -// Increase it to deduplicate more aggressively, decrease it to lower latency from a log occurring to it appearing. -// Set it to 0 to pick a sensible default value (recommended). -// NOTE: For simplicity and efficiency, fields are considered 'equal' if and only if their string representations (%v) are equal. -func SetupDeduplication(logLevel string, interval time.Duration) error { - dedupeLevel, err := log.ParseLevel(logLevel) - if err != nil { - return fmt.Errorf("Error parsing log level: %v", err) - } - if interval <= 0 { - interval = defaultDedupeInterval - } - - // We use a special Formatter to either format the log using the original formatter, or to return "" - // so nothing will be written for that event. The repeated entries are later logged along with a field flag - // that tells the formatter to ignore the message. - stdLogger := log.StandardLogger() - stdLogger.Formatter = newDedupeFormatter(stdLogger.Formatter, dedupeLevel, interval) - return nil -} - -type textFormatter struct{} - -// Based off logrus.TextFormatter, which behaves completely -// differently when you don't want colored output -func (f *textFormatter) Format(entry *log.Entry) ([]byte, error) { - b := &bytes.Buffer{} - - levelText := strings.ToUpper(entry.Level.String())[0:4] - timeStamp := entry.Time.Format("2006/01/02 15:04:05.000000") - fmt.Fprintf(b, "%s: %s %s", levelText, timeStamp, entry.Message) - if len(entry.Data) > 0 { - b.WriteString(" " + fieldsToString(entry.Data)) - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -// With returns a log entry with common Weaveworks logging information. -// -// e.g. -// logger := logging.With(ctx) -// logger.Errorf("Some error") -func With(ctx context.Context) *log.Entry { - return log.WithFields(user.LogFields(ctx)) -} - -type entryCount struct { - entry log.Entry - count int -} - -type dedupeFormatter struct { - innerFormatter log.Formatter - level log.Level - interval time.Duration - seen map[string]entryCount - lock sync.Mutex -} - -func newDedupeFormatter(innerFormatter log.Formatter, level log.Level, interval time.Duration) *dedupeFormatter { - return &dedupeFormatter{ - innerFormatter: innerFormatter, - level: level, - interval: interval, - seen: map[string]entryCount{}, - } -} - -func (f *dedupeFormatter) Format(entry *log.Entry) ([]byte, error) { - if f.shouldLog(entry) { - b, err := f.innerFormatter.Format(entry) - return b, err - } - return []byte{}, nil -} - -func (f *dedupeFormatter) shouldLog(entry *log.Entry) bool { - if _, ok := entry.Data["deduplicated"]; ok { - // ignore our own logs about deduped messages - return true - } - if entry.Level < f.level { - // ignore logs more severe than our level - return true - } - key := fmt.Sprintf("%s %s", entry.Message, fieldsToString(entry.Data)) - f.lock.Lock() - defer f.lock.Unlock() - if ec, ok := f.seen[key]; ok { - // already seen, increment count and do not log - ec.count++ - f.seen[key] = ec - return false - } - // New message, log it but add it to seen. - // We need to copy because the pointer ceases to be valid after we return from Format - f.seen[key] = entryCount{entry: *entry} - go f.evictEntry(key) // queue to evict later - return true -} - -// Wait for interval seconds then evict the entry and send the log -func (f *dedupeFormatter) evictEntry(key string) { - time.Sleep(f.interval) - var ec entryCount - func() { - f.lock.Lock() - defer f.lock.Unlock() - ec = f.seen[key] - delete(f.seen, key) - }() - if ec.count == 0 { - return - } - entry := log.WithFields(ec.entry.Data).WithField("deduplicated", ec.count) - message := fmt.Sprintf("Repeated %d times: %s", ec.count, ec.entry.Message) - // There's no way to choose the log level dynamically, so we have to do this hack - map[log.Level]func(args ...interface{}){ - log.PanicLevel: entry.Panic, - log.FatalLevel: entry.Fatal, - log.ErrorLevel: entry.Error, - log.WarnLevel: entry.Warn, - log.InfoLevel: entry.Info, - log.DebugLevel: entry.Debug, - }[ec.entry.Level](message) -} - -func fieldsToString(data log.Fields) string { - parts := make([]string, 0, len(data)) - // traversal order here is arbitrary but stable, which is fine for our purposes - for k, v := range data { - parts = append(parts, fmt.Sprintf("%s=%v", k, v)) - } - return strings.Join(parts, " ") -} diff --git a/vendor/github.com/weaveworks/common/logging/logrus.go b/vendor/github.com/weaveworks/common/logging/logrus.go new file mode 100644 index 00000000000..7896b358bb5 --- /dev/null +++ b/vendor/github.com/weaveworks/common/logging/logrus.go @@ -0,0 +1,74 @@ +package logging + +import ( + "bytes" + "fmt" + "os" + "strings" + + "github.com/sirupsen/logrus" +) + +// NewLogrus makes a new Interface backed by a logrus logger +func NewLogrus(level Level) Interface { + log := logrus.New() + log.Out = os.Stderr + log.Level = level.Logrus + log.Formatter = &textFormatter{} + return logrusLogger{log} +} + +// Logrus wraps an existing Logrus logger. +func Logrus(l *logrus.Logger) Interface { + return logrusLogger{l} +} + +type logrusLogger struct { + *logrus.Logger +} + +func (l logrusLogger) WithField(key string, value interface{}) Interface { + return logusEntry{ + Entry: l.Logger.WithField(key, value), + } +} + +func (l logrusLogger) WithFields(fields Fields) Interface { + return logusEntry{ + Entry: l.Logger.WithFields(map[string]interface{}(fields)), + } +} + +type logusEntry struct { + *logrus.Entry +} + +func (l logusEntry) WithField(key string, value interface{}) Interface { + return logusEntry{ + Entry: l.Entry.WithField(key, value), + } +} + +func (l logusEntry) WithFields(fields Fields) Interface { + return logusEntry{ + Entry: l.Entry.WithFields(map[string]interface{}(fields)), + } +} + +type textFormatter struct{} + +// Based off logrus.TextFormatter, which behaves completely +// differently when you don't want colored output +func (f *textFormatter) Format(entry *logrus.Entry) ([]byte, error) { + b := &bytes.Buffer{} + + levelText := strings.ToUpper(entry.Level.String())[0:4] + timeStamp := entry.Time.Format("2006/01/02 15:04:05.000000") + fmt.Fprintf(b, "%s: %s %s", levelText, timeStamp, entry.Message) + if len(entry.Data) > 0 { + b.WriteString(" " + fieldsToString(entry.Data)) + } + + b.WriteByte('\n') + return b.Bytes(), nil +} diff --git a/vendor/github.com/weaveworks/common/logging/noop.go b/vendor/github.com/weaveworks/common/logging/noop.go new file mode 100644 index 00000000000..8b7201ca948 --- /dev/null +++ b/vendor/github.com/weaveworks/common/logging/noop.go @@ -0,0 +1,23 @@ +package logging + +// Noop logger. +func Noop() Interface { + return noop{} +} + +type noop struct{} + +func (noop) Debugf(format string, args ...interface{}) {} +func (noop) Debugln(args ...interface{}) {} +func (noop) Infof(format string, args ...interface{}) {} +func (noop) Infoln(args ...interface{}) {} +func (noop) Warnf(format string, args ...interface{}) {} +func (noop) Warnln(args ...interface{}) {} +func (noop) Errorf(format string, args ...interface{}) {} +func (noop) Errorln(args ...interface{}) {} +func (noop) WithField(key string, value interface{}) Interface { + return noop{} +} +func (noop) WithFields(Fields) Interface { + return noop{} +} diff --git a/vendor/github.com/weaveworks/common/middleware/grpc_auth.go b/vendor/github.com/weaveworks/common/middleware/grpc_auth.go index 35f548792cc..10be1f8d706 100644 --- a/vendor/github.com/weaveworks/common/middleware/grpc_auth.go +++ b/vendor/github.com/weaveworks/common/middleware/grpc_auth.go @@ -17,6 +17,17 @@ func ClientUserHeaderInterceptor(ctx context.Context, method string, req, reply return invoker(ctx, method, req, reply, cc, opts...) } +// StreamClientUserHeaderInterceptor propagates the user ID from the context to gRPC metadata, which eventually ends up as a HTTP2 header. +// For streaming gRPC requests. +func StreamClientUserHeaderInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + ctx, err := user.InjectIntoGRPCRequest(ctx) + if err != nil { + return nil, err + } + + return streamer(ctx, desc, cc, method, opts...) +} + // ServerUserHeaderInterceptor propagates the user ID from the gRPC metadata back to our context. func ServerUserHeaderInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { _, ctx, err := user.ExtractFromGRPCRequest(ctx) @@ -26,3 +37,25 @@ func ServerUserHeaderInterceptor(ctx context.Context, req interface{}, info *grp return handler(ctx, req) } + +// StreamServerUserHeaderInterceptor propagates the user ID from the gRPC metadata back to our context. +func StreamServerUserHeaderInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + _, ctx, err := user.ExtractFromGRPCRequest(ss.Context()) + if err != nil { + return err + } + + return handler(srv, serverStream{ + ctx: ctx, + ServerStream: ss, + }) +} + +type serverStream struct { + ctx context.Context + grpc.ServerStream +} + +func (ss serverStream) Context() context.Context { + return ss.ctx +} diff --git a/vendor/github.com/weaveworks/common/middleware/grpc_instrumentation.go b/vendor/github.com/weaveworks/common/middleware/grpc_instrumentation.go index 0614433448a..c5c7d8468aa 100644 --- a/vendor/github.com/weaveworks/common/middleware/grpc_instrumentation.go +++ b/vendor/github.com/weaveworks/common/middleware/grpc_instrumentation.go @@ -10,8 +10,8 @@ import ( "google.golang.org/grpc" ) -// ServerInstrumentInterceptor instruments gRPC requests for errors and latency. -func ServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.UnaryServerInterceptor { +// UnaryServerInstrumentInterceptor instruments gRPC requests for errors and latency. +func UnaryServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { begin := time.Now() resp, err := handler(ctx, req) @@ -28,3 +28,22 @@ func ServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.UnaryServer return resp, err } } + +// StreamServerInstrumentInterceptor instruments gRPC requests for errors and latency. +func StreamServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.StreamServerInterceptor { + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + begin := time.Now() + err := handler(srv, ss) + duration := time.Since(begin).Seconds() + respStatus := "success" + if err != nil { + if errResp, ok := httpgrpc.HTTPResponseFromError(err); ok { + respStatus = strconv.Itoa(int(errResp.Code)) + } else { + respStatus = "error" + } + } + hist.WithLabelValues(gRPC, info.FullMethod, respStatus, "false").Observe(duration) + return err + } +} diff --git a/vendor/github.com/weaveworks/common/middleware/grpc_logging.go b/vendor/github.com/weaveworks/common/middleware/grpc_logging.go index a95189e045e..bc9c9f37d51 100644 --- a/vendor/github.com/weaveworks/common/middleware/grpc_logging.go +++ b/vendor/github.com/weaveworks/common/middleware/grpc_logging.go @@ -3,17 +3,21 @@ package middleware import ( "time" - log "github.com/sirupsen/logrus" "golang.org/x/net/context" "google.golang.org/grpc" "github.com/weaveworks/common/logging" + "github.com/weaveworks/common/user" ) -const gRPC = "gRPC" +const ( + gRPC = "gRPC" + errorKey = "err" +) // GRPCServerLog logs grpc requests, errors, and latency. type GRPCServerLog struct { + Log logging.Interface // WithRequest will log the entire request rather than just the error WithRequest bool } @@ -22,14 +26,27 @@ type GRPCServerLog struct { func (s GRPCServerLog) UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { begin := time.Now() resp, err := handler(ctx, req) - entry := logging.With(ctx).WithFields(log.Fields{"method": info.FullMethod, "duration": time.Since(begin)}) + entry := user.LogWith(ctx, s.Log).WithFields(logging.Fields{"method": info.FullMethod, "duration": time.Since(begin)}) if err != nil { if s.WithRequest { entry = entry.WithField("request", req) } - entry.WithError(err).Warn(gRPC) + entry.WithField(errorKey, err).Warnln(gRPC) } else { entry.Debugf("%s (success)", gRPC) } return resp, err } + +// StreamServerInterceptor returns an interceptor that logs gRPC requests +func (s GRPCServerLog) StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + begin := time.Now() + err := handler(srv, ss) + entry := user.LogWith(ss.Context(), s.Log).WithFields(logging.Fields{"method": info.FullMethod, "duration": time.Since(begin)}) + if err != nil { + entry.WithField(errorKey, err).Warnln(gRPC) + } else { + entry.Debugf("%s (success)", gRPC) + } + return err +} diff --git a/vendor/github.com/weaveworks/common/middleware/logging.go b/vendor/github.com/weaveworks/common/middleware/logging.go index 4dbc049bc9d..fc669d0edc8 100644 --- a/vendor/github.com/weaveworks/common/middleware/logging.go +++ b/vendor/github.com/weaveworks/common/middleware/logging.go @@ -6,19 +6,19 @@ import ( "net/http/httputil" "time" - log "github.com/sirupsen/logrus" - + "github.com/weaveworks/common/logging" "github.com/weaveworks/common/user" ) // Log middleware logs http requests type Log struct { + Log logging.Interface LogRequestHeaders bool // LogRequestHeaders true -> dump http headers at debug log level } // logWithRequest information from the request and context as fields. -func logWithRequest(r *http.Request) *log.Entry { - return log.WithFields(user.LogFields(r.Context())) +func (l Log) logWithRequest(r *http.Request) logging.Interface { + return user.LogWith(r.Context(), l.Log) } // Wrap implements Middleware @@ -30,27 +30,29 @@ func (l Log) Wrap(next http.Handler) http.Handler { headers, err := httputil.DumpRequest(r, false) if err != nil { headers = nil - logWithRequest(r).Errorf("Could not dump request headers: %v", err) + l.logWithRequest(r).Errorf("Could not dump request headers: %v", err) } var buf bytes.Buffer wrapped := newBadResponseLoggingWriter(w, &buf) next.ServeHTTP(wrapped, r) statusCode := wrapped.statusCode if 100 <= statusCode && statusCode < 500 || statusCode == http.StatusBadGateway || statusCode == http.StatusServiceUnavailable { - logWithRequest(r).Debugf("%s %s (%d) %s", r.Method, uri, statusCode, time.Since(begin)) + l.logWithRequest(r).Debugf("%s %s (%d) %s", r.Method, uri, statusCode, time.Since(begin)) if l.LogRequestHeaders && headers != nil { - logWithRequest(r).Debugf("Is websocket request: %v\n%s", IsWSHandshakeRequest(r), string(headers)) + l.logWithRequest(r).Debugf("Is websocket request: %v\n%s", IsWSHandshakeRequest(r), string(headers)) } } else { - logWithRequest(r).Warnf("%s %s (%d) %s", r.Method, uri, statusCode, time.Since(begin)) + l.logWithRequest(r).Warnf("%s %s (%d) %s", r.Method, uri, statusCode, time.Since(begin)) if headers != nil { - logWithRequest(r).Warnf("Is websocket request: %v\n%s", IsWSHandshakeRequest(r), string(headers)) + l.logWithRequest(r).Warnf("Is websocket request: %v\n%s", IsWSHandshakeRequest(r), string(headers)) } - logWithRequest(r).Warnf("Response: %s", buf.Bytes()) + l.logWithRequest(r).Warnf("Response: %s", buf.Bytes()) } }) } // Logging middleware logs each HTTP request method, path, response code and // duration for all HTTP requests. -var Logging = Log{} +var Logging = Log{ + Log: logging.Global(), +} diff --git a/vendor/github.com/weaveworks/common/server/server.go b/vendor/github.com/weaveworks/common/server/server.go index 3d1496f8ff9..3064136a192 100644 --- a/vendor/github.com/weaveworks/common/server/server.go +++ b/vendor/github.com/weaveworks/common/server/server.go @@ -14,13 +14,13 @@ import ( "github.com/opentracing-contrib/go-stdlib/nethttp" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" "golang.org/x/net/context" "google.golang.org/grpc" "github.com/weaveworks/common/httpgrpc" httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" "github.com/weaveworks/common/instrument" + "github.com/weaveworks/common/logging" "github.com/weaveworks/common/middleware" "github.com/weaveworks/common/signals" ) @@ -39,9 +39,13 @@ type Config struct { HTTPServerWriteTimeout time.Duration HTTPServerIdleTimeout time.Duration - GRPCOptions []grpc.ServerOption - GRPCMiddleware []grpc.UnaryServerInterceptor - HTTPMiddleware []middleware.Interface + GRPCOptions []grpc.ServerOption + GRPCMiddleware []grpc.UnaryServerInterceptor + GRPCStreamMiddleware []grpc.StreamServerInterceptor + HTTPMiddleware []middleware.Interface + + LogLevel logging.Level + Log logging.Interface } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -53,6 +57,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.HTTPServerReadTimeout, "server.http-read-timeout", 5*time.Second, "Read timeout for HTTP server") f.DurationVar(&cfg.HTTPServerWriteTimeout, "server.http-write-timeout", 5*time.Second, "Write timeout for HTTP server") f.DurationVar(&cfg.HTTPServerIdleTimeout, "server.http-idle-timeout", 120*time.Second, "Idle timeout for HTTP server") + cfg.LogLevel.RegisterFlags(f) } // Server wraps a HTTP and gRPC server, and some common initialization. @@ -67,6 +72,7 @@ type Server struct { HTTP *mux.Router GRPC *grpc.Server + Log logging.Interface } // New makes a new Server @@ -91,18 +97,39 @@ func New(cfg Config) (*Server, error) { }, []string{"method", "route", "status_code", "ws"}) prometheus.MustRegister(requestDuration) + // If user doesn't supply a logging implementation, by default instantiate + // logrus. + log := cfg.Log + if log == nil { + log = logging.NewLogrus(cfg.LogLevel) + } + // Setup gRPC server - serverLog := middleware.GRPCServerLog{WithRequest: !cfg.ExcludeRequestInLog} + serverLog := middleware.GRPCServerLog{ + WithRequest: !cfg.ExcludeRequestInLog, + Log: log, + } grpcMiddleware := []grpc.UnaryServerInterceptor{ serverLog.UnaryServerInterceptor, - middleware.ServerInstrumentInterceptor(requestDuration), + middleware.UnaryServerInstrumentInterceptor(requestDuration), otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer()), } grpcMiddleware = append(grpcMiddleware, cfg.GRPCMiddleware...) + + grpcStreamMiddleware := []grpc.StreamServerInterceptor{ + serverLog.StreamServerInterceptor, + middleware.StreamServerInstrumentInterceptor(requestDuration), + otgrpc.OpenTracingStreamServerInterceptor(opentracing.GlobalTracer()), + } + grpcStreamMiddleware = append(grpcStreamMiddleware, cfg.GRPCStreamMiddleware...) + grpcOptions := []grpc.ServerOption{ grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( grpcMiddleware..., )), + grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( + grpcStreamMiddleware..., + )), } grpcOptions = append(grpcOptions, cfg.GRPCOptions...) grpcServer := grpc.NewServer(grpcOptions...) @@ -113,7 +140,9 @@ func New(cfg Config) (*Server, error) { RegisterInstrumentation(router) } httpMiddleware := []middleware.Interface{ - middleware.Log{}, + middleware.Log{ + Log: log, + }, middleware.Instrument{ Duration: requestDuration, RouteMatcher: router, @@ -135,10 +164,11 @@ func New(cfg Config) (*Server, error) { httpListener: httpListener, grpcListener: grpcListener, httpServer: httpServer, - handler: signals.NewHandler(log.StandardLogger()), + handler: signals.NewHandler(log), HTTP: router, GRPC: grpcServer, + Log: log, }, nil } diff --git a/vendor/github.com/weaveworks/common/signals/signals.go b/vendor/github.com/weaveworks/common/signals/signals.go index cca60b9b5da..1c5569a4e9c 100644 --- a/vendor/github.com/weaveworks/common/signals/signals.go +++ b/vendor/github.com/weaveworks/common/signals/signals.go @@ -5,6 +5,8 @@ import ( "os/signal" "runtime" "syscall" + + "github.com/weaveworks/common/logging" ) // SignalReceiver represents a subsystem/server/... that can be stopped or @@ -13,22 +15,17 @@ type SignalReceiver interface { Stop() error } -// Logger is something to log too. -type Logger interface { - Infof(format string, args ...interface{}) -} - // Handler handles signals, can be interrupted. // On SIGINT or SIGTERM it will exit, on SIGQUIT it // will dump goroutine stacks to the Logger. type Handler struct { - log Logger + log logging.Interface receivers []SignalReceiver quit chan struct{} } // NewHandler makes a new Handler. -func NewHandler(log Logger, receivers ...SignalReceiver) *Handler { +func NewHandler(log logging.Interface, receivers ...SignalReceiver) *Handler { return &Handler{ log: log, receivers: receivers, @@ -70,6 +67,6 @@ func (h *Handler) Loop() { // SignalHandlerLoop blocks until it receives a SIGINT, SIGTERM or SIGQUIT. // For SIGINT and SIGTERM, it exits; for SIGQUIT is print a goroutine stack // dump. -func SignalHandlerLoop(log Logger, ss ...SignalReceiver) { +func SignalHandlerLoop(log logging.Interface, ss ...SignalReceiver) { NewHandler(log, ss...).Loop() } diff --git a/vendor/github.com/weaveworks/common/user/logging.go b/vendor/github.com/weaveworks/common/user/logging.go index 05123df53cf..b873945be36 100644 --- a/vendor/github.com/weaveworks/common/user/logging.go +++ b/vendor/github.com/weaveworks/common/user/logging.go @@ -3,19 +3,20 @@ package user import ( "golang.org/x/net/context" - log "github.com/sirupsen/logrus" + "github.com/weaveworks/common/logging" ) -// LogFields returns user and org information from the context as log fields. -func LogFields(ctx context.Context) log.Fields { - fields := log.Fields{} +// LogWith returns user and org information from the context as log fields. +func LogWith(ctx context.Context, log logging.Interface) logging.Interface { userID, err := ExtractUserID(ctx) if err == nil { - fields["userID"] = userID + log = log.WithField("userID", userID) } + orgID, err := ExtractOrgID(ctx) if err == nil { - fields["orgID"] = orgID + log = log.WithField("orgID", orgID) } - return fields + + return log }