Skip to content

Use gRPC for distributor <-> ingester rpcs. #144

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Nov 22, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 49 additions & 49 deletions cmd/cortex/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package main
import (
"flag"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
Expand All @@ -11,13 +12,16 @@ import (
"time"

"github.com/gorilla/mux"
"github.com/mwitkow/go-grpc-middleware"
"github.com/weaveworks/scope/common/middleware"
"golang.org/x/net/context"
"google.golang.org/grpc"

"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/common/route"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/web/api/v1"
"github.com/weaveworks/scope/common/middleware"
"golang.org/x/net/context"

"github.com/weaveworks/cortex"
"github.com/weaveworks/cortex/chunk"
Expand All @@ -28,15 +32,15 @@ import (
"github.com/weaveworks/cortex/ruler"
"github.com/weaveworks/cortex/ui"
"github.com/weaveworks/cortex/user"
cortex_grpc_middleware "github.com/weaveworks/cortex/util/middleware"
)

const (
modeDistributor = "distributor"
modeIngester = "ingester"
modeRuler = "ruler"

infName = "eth0"
userIDHeaderName = "X-Scope-OrgID"
infName = "eth0"
)

var (
Expand Down Expand Up @@ -84,30 +88,38 @@ func main() {
var cfg cfg
flag.StringVar(&cfg.mode, "mode", modeDistributor, "Mode (distributor, ingester, ruler).")
flag.IntVar(&cfg.listenPort, "web.listen-port", 9094, "HTTP server listen port.")
flag.BoolVar(&cfg.logSuccess, "log.success", false, "Log successful requests")

flag.StringVar(&cfg.consulHost, "consul.hostname", "localhost:8500", "Hostname and port of Consul.")
flag.StringVar(&cfg.consulPrefix, "consul.prefix", "collectors/", "Prefix for keys in Consul.")

flag.StringVar(&cfg.s3URL, "s3.url", "localhost:4569", "S3 endpoint URL.")
flag.StringVar(&cfg.dynamodbURL, "dynamodb.url", "localhost:8000", "DynamoDB endpoint URL.")
flag.BoolVar(&cfg.dynamodbCreateTables, "dynamodb.create-tables", false, "Create required DynamoDB tables on startup.")
flag.DurationVar(&cfg.dynamodbPollInterval, "dynamodb.poll-interval", 2*time.Minute, "How frequently to poll DynamoDB to learn our capacity.")
flag.BoolVar(&cfg.dynamodbCreateTables, "dynamodb.create-tables", false, "Create required DynamoDB tables on startup.")
flag.BoolVar(&cfg.watchDynamo, "watch-dynamo", false, "Periodically collect DynamoDB provisioned throughput.")

flag.StringVar(&cfg.memcachedHostname, "memcached.hostname", "", "Hostname for memcached service to use when caching chunks. If empty, no memcached will be used.")
flag.StringVar(&cfg.memcachedService, "memcached.service", "memcached", "SRV service used to discover memcache servers.")
flag.DurationVar(&cfg.memcachedTimeout, "memcached.timeout", 100*time.Millisecond, "Maximum time to wait before giving up on memcached requests.")
flag.DurationVar(&cfg.memcachedExpiration, "memcached.expiration", 0, "How long chunks stay in the memcache.")
flag.StringVar(&cfg.memcachedService, "memcached.service", "memcached", "SRV service used to discover memcache servers.")
flag.DurationVar(&cfg.remoteTimeout, "remote.timeout", 5*time.Second, "Timeout for downstream ingesters.")

flag.DurationVar(&cfg.ingesterConfig.FlushCheckPeriod, "ingester.flush-period", 1*time.Minute, "Period with which to attempt to flush chunks.")
flag.DurationVar(&cfg.ingesterConfig.RateUpdatePeriod, "ingester.rate-update-period", 15*time.Second, "Period with which to update the per-user ingestion rates.")
flag.DurationVar(&cfg.ingesterConfig.MaxChunkIdle, "ingester.max-chunk-idle", 1*time.Hour, "Maximum chunk idle time before flushing.")
flag.IntVar(&cfg.ingesterConfig.ConcurrentFlushes, "ingester.concurrent-flushes", 25, "Number of concurrent goroutines flushing to dynamodb.")
flag.IntVar(&cfg.numTokens, "ingester.num-tokens", 128, "Number of tokens for each ingester.")
flag.IntVar(&cfg.ingesterConfig.GRPCListenPort, "ingester.grpc.listen-port", 9095, "gRPC server listen port.")

flag.IntVar(&cfg.distributorConfig.ReplicationFactor, "distributor.replication-factor", 3, "The number of ingesters to write to and read from.")
flag.IntVar(&cfg.distributorConfig.MinReadSuccesses, "distributor.min-read-successes", 2, "The minimum number of ingesters from which a read must succeed.")
flag.DurationVar(&cfg.distributorConfig.HeartbeatTimeout, "distributor.heartbeat-timeout", time.Minute, "The heartbeat timeout after which ingesters are skipped for reads/writes.")
flag.DurationVar(&cfg.distributorConfig.RemoteTimeout, "distributor.remote-timeout", 5*time.Second, "Timeout for downstream ingesters.")

flag.StringVar(&cfg.rulerConfig.ConfigsAPIURL, "ruler.configs.url", "", "URL of configs API server.")
flag.StringVar(&cfg.rulerConfig.UserID, "ruler.userID", "", "Weave Cloud org to run rules for")
flag.DurationVar(&cfg.rulerConfig.EvaluationInterval, "ruler.evaluation-interval", 15*time.Second, "How frequently to evaluate rules")
flag.BoolVar(&cfg.logSuccess, "log.success", false, "Log successful requests")
flag.BoolVar(&cfg.watchDynamo, "watch-dynamo", false, "Periodically collect DynamoDB provisioned throughput.")

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What's up with all this shuffling? It doesn't matter much, just curious.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For my own sanity, I grouped the flags by common, ingester, distributor etc

flag.Parse()

chunkStore, err := setupChunkStore(cfg)
Expand Down Expand Up @@ -139,21 +151,34 @@ func main() {
switch cfg.mode {
case modeDistributor:
cfg.distributorConfig.Ring = r
cfg.distributorConfig.ClientFactory = func(address string) (*distributor.IngesterClient, error) {
return distributor.NewIngesterClient(address, cfg.remoteTimeout)
}
setupDistributor(cfg.distributorConfig, chunkStore, router.PathPrefix("/api/prom").Subrouter())

case modeIngester:
cfg.ingesterConfig.Ring = r
registration, err := ring.RegisterIngester(consul, cfg.listenPort, cfg.numTokens)
registration, err := ring.RegisterIngester(consul, cfg.listenPort, cfg.ingesterConfig.GRPCListenPort, cfg.numTokens)
if err != nil {
// This only happens for errors in configuration & set-up, not for
// network errors.
log.Fatalf("Could not register ingester: %v", err)
}
ing := setupIngester(chunkStore, cfg.ingesterConfig, router)

// Setup gRPC server
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", cfg.ingesterConfig.GRPCListenPort))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
grpcServer := grpc.NewServer(
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
cortex_grpc_middleware.ServerInstrumentInterceptor(requestDuration),
cortex_grpc_middleware.ServerLoggingInterceptor(cfg.logSuccess),
cortex_grpc_middleware.ServerUserHeaderInterceptor,
)),
)
cortex.RegisterIngesterServer(grpcServer, ing)
go grpcServer.Serve(lis)
defer grpcServer.Stop()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe this function (main) is getting too big?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah. I think we'll end up breaking this into a bunch of binaries / images next.


// Deferring a func to make ordering obvious
defer func() {
registration.ChangeState(ring.Leaving)
Expand All @@ -166,9 +191,6 @@ func main() {
case modeRuler:
// XXX: Too much duplication w/ distributor set up.
cfg.distributorConfig.Ring = r
cfg.distributorConfig.ClientFactory = func(address string) (*distributor.IngesterClient, error) {
return distributor.NewIngesterClient(address, cfg.remoteTimeout)
}
cfg.rulerConfig.DistributorConfig = cfg.distributorConfig
ruler, err := setupRuler(chunkStore, cfg.rulerConfig)
if err != nil {
Expand Down Expand Up @@ -242,26 +264,12 @@ func setupDistributor(
}
prometheus.MustRegister(dist)

router.Path("/push").Handler(cortex.AppenderHandler(dist, handleDistributorError))
router.Path("/push").Handler(http.HandlerFunc(dist.PushHandler))

// TODO: Move querier to separate binary.
setupQuerier(dist, chunkStore, router)
}

func handleDistributorError(w http.ResponseWriter, err error) {
switch e := err.(type) {
case distributor.IngesterError:
switch {
case 400 <= e.StatusCode && e.StatusCode < 500:
log.Warnf("append err: %v", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
log.Errorf("append err: %v", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}

// setupQuerier sets up a complete querying pipeline:
//
// PromQL -> MergeQuerier -> Distributor -> IngesterQuerier -> Ingester
Expand All @@ -276,12 +284,15 @@ func setupQuerier(
engine := promql.NewEngine(queryable, nil)
api := v1.NewAPI(engine, querier.DummyStorage{Queryable: queryable})
promRouter := route.New(func(r *http.Request) (context.Context, error) {
userID := r.Header.Get(userIDHeaderName)
userID := r.Header.Get(user.UserIDHeaderName)
if userID == "" {
return nil, fmt.Errorf("no %s header", user.UserIDHeaderName)
}
return user.WithID(context.Background(), userID), nil
}).WithPrefix("/api/prom/api/v1")
api.Register(promRouter)
router.PathPrefix("/api/v1").Handler(promRouter)
router.Path("/user_stats").Handler(cortex.DistributorUserStatsHandler(distributor.UserStats))
router.Path("/user_stats").Handler(http.HandlerFunc(distributor.UserStatsHandler))
router.Path("/graph").Handler(ui.GraphHandler())
router.PathPrefix("/static/").Handler(ui.StaticAssetsHandler("/api/prom/static/"))
}
Expand All @@ -297,25 +308,14 @@ func setupIngester(
}
prometheus.MustRegister(ingester)

router.Path("/push").Handler(cortex.AppenderHandler(ingester, handleIngesterError))
router.Path("/query").Handler(cortex.QueryHandler(ingester))
router.Path("/label_values").Handler(cortex.LabelValuesHandler(ingester))
router.Path("/user_stats").Handler(cortex.IngesterUserStatsHandler(ingester.UserStats))
router.Path("/ready").Handler(cortex.IngesterReadinessHandler(ingester))
router.Path("/push").Handler(http.HandlerFunc(ingester.PushHandler))
router.Path("/query").Handler(http.HandlerFunc(ingester.QueryHandler))
router.Path("/label_values").Handler(http.HandlerFunc(ingester.LabelValuesHandler))
router.Path("/user_stats").Handler(http.HandlerFunc(ingester.UserStatsHandler))
router.Path("/ready").Handler(http.HandlerFunc(ingester.ReadinessHandler))
return ingester
}

func handleIngesterError(w http.ResponseWriter, err error) {
switch err {
case ingester.ErrOutOfOrderSample, ingester.ErrDuplicateSampleForTimestamp:
log.Warnf("append err: %v", err)
http.Error(w, err.Error(), http.StatusBadRequest)
default:
log.Errorf("append err: %v", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}

// setupRuler sets up a ruler.
func setupRuler(chunkStore chunk.Store, cfg ruler.Config) (*ruler.Ruler, error) {
return ruler.New(chunkStore, cfg)
Expand Down
55 changes: 27 additions & 28 deletions cortex.proto
Original file line number Diff line number Diff line change
@@ -1,44 +1,27 @@
syntax = "proto3";

package cortex;

message Sample {
double value = 1;
int64 timestamp_ms = 2;
}

message LabelPair {
string name = 1;
string value = 2;
}
import "github.com/prometheus/prometheus/storage/remote/remote.proto";

message TimeSeries {
repeated LabelPair labels = 1;
// Sorted by time, oldest sample first.
repeated Sample samples = 2;
}
package cortex;

enum MatchType {
EQUAL = 0;
NOT_EQUAL = 1;
REGEX_MATCH = 2;
REGEX_NO_MATCH = 3;
service Ingester {
rpc Push(remote.WriteRequest) returns (WriteResponse) {};
rpc Query(QueryRequest) returns (QueryResponse) {};
rpc LabelValues(LabelValuesRequest) returns (LabelValuesResponse) {};
rpc UserStats(UserStatsRequest) returns (UserStatsResponse) {};
}

message LabelMatcher {
MatchType type = 1;
string name = 2;
string value = 3;
message WriteResponse {
}

message ReadRequest {
message QueryRequest {
int64 start_timestamp_ms = 1;
int64 end_timestamp_ms = 2;
repeated LabelMatcher matchers = 3;
}

message ReadResponse {
repeated TimeSeries timeseries = 1;
message QueryResponse {
repeated remote.TimeSeries timeseries = 1;
}

message LabelValuesRequest {
Expand All @@ -49,7 +32,23 @@ message LabelValuesResponse {
repeated string label_values = 1;
}

message UserStatsRequest {
}

message UserStatsResponse {
double ingestion_rate = 1;
uint64 num_series = 2;
}

enum MatchType {
EQUAL = 0;
NOT_EQUAL = 1;
REGEX_MATCH = 2;
REGEX_NO_MATCH = 3;
}

message LabelMatcher {
MatchType type = 1;
string name = 2;
string value = 3;
}
Loading