diff --git a/Makefile b/Makefile index fe0aa5c8576..59257e45478 100644 --- a/Makefile +++ b/Makefile @@ -52,6 +52,8 @@ pkg/ring/ring.pb.go: pkg/ring/ring.proto pkg/querier/frontend/frontend.pb.go: pkg/querier/frontend/frontend.proto pkg/chunk/storage/caching_index_client.pb.go: pkg/chunk/storage/caching_index_client.proto pkg/distributor/ha_tracker.pb.go: pkg/distributor/ha_tracker.proto +pkg/storage/rules/rules.pb.go: pkg/storage/rules/rules.proto +pkg/util/usertracker/usertracker.pb.go: pkg/util/usertracker/usertracker.proto all: $(UPTODATE_FILES) test: protos mod-check: protos diff --git a/pkg/alertmanager/api.go b/pkg/alertmanager/api.go new file mode 100644 index 00000000000..998ce80437e --- /dev/null +++ b/pkg/alertmanager/api.go @@ -0,0 +1,116 @@ +package alertmanager + +import ( + "io/ioutil" + "net/http" + + "github.com/cortexproject/cortex/pkg/storage/alerts" + "github.com/cortexproject/cortex/pkg/util" + "github.com/go-kit/kit/log/level" + "github.com/gorilla/mux" + "github.com/weaveworks/common/user" + "gopkg.in/yaml.v2" +) + +// RegisterRoutes registers the configs API HTTP routes with the provided Router. +func (am *MultitenantAlertmanager) RegisterRoutes(r *mux.Router) { + // if no store is set return without regisering routes + if am.store == nil { + return + } + for _, route := range []struct { + name, method, path string + handler http.HandlerFunc + }{ + {"get_config", "GET", "/api/prom/alertmanager", am.getUserConfig}, + {"set_config", "POST", "/api/prom/alertmanager", am.setUserConfig}, + {"delete_config", "DELETE", "/api/prom/alertmanager", am.deleteUserConfig}, + } { + r.Handle(route.path, route.handler).Methods(route.method).Name(route.name) + } +} + +func (am *MultitenantAlertmanager) getUserConfig(w http.ResponseWriter, r *http.Request) { + userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + if err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + + logger := util.WithContext(r.Context(), util.Logger) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + if userID == "" { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + + cfg, err := am.store.GetAlertConfig(r.Context(), userID) + + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + d, err := yaml.Marshal(&cfg) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/yaml") + if _, err := w.Write(d); err != nil { + level.Error(logger).Log("msg", "error marshalling yaml alertmanager config", "err", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func (am *MultitenantAlertmanager) setUserConfig(w http.ResponseWriter, r *http.Request) { + userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + if err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + + logger := util.WithContext(r.Context(), util.Logger) + + if userID == "" { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + + payload, err := ioutil.ReadAll(r.Body) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + cfg := alerts.AlertConfig{} + err = yaml.Unmarshal(payload, &cfg) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + err = am.store.SetAlertConfig(r.Context(), userID, cfg) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + w.WriteHeader(http.StatusOK) +} + +func (am *MultitenantAlertmanager) deleteUserConfig(w http.ResponseWriter, r *http.Request) { + +} diff --git a/pkg/alertmanager/kv_poller.go b/pkg/alertmanager/kv_poller.go new file mode 100644 index 00000000000..7cb6119b832 --- /dev/null +++ b/pkg/alertmanager/kv_poller.go @@ -0,0 +1,101 @@ +package alertmanager + +import ( + "context" + + "github.com/cortexproject/cortex/pkg/storage/alerts" + "github.com/cortexproject/cortex/pkg/util/usertracker" +) + +// trackedAlertPoller checks for updated user configs and +// Retrieves the updated configuration from the backend +type trackedAlertPoller struct { + tracker *usertracker.Tracker + store alerts.AlertStore + + initialized bool +} + +func newTrackedAlertPoller(tracker *usertracker.Tracker, store alerts.AlertStore) (*trackedAlertPoller, error) { + return &trackedAlertPoller{ + tracker: tracker, + store: store, + + initialized: false, + }, nil +} + +func (p *trackedAlertPoller) trackedAlertStore() *trackedAlertStore { + return &trackedAlertStore{ + tracker: p.tracker, + store: p.store, + } +} + +// PollAlerts returns the alerts changed since the last poll +// All alert configurations are returned on the first poll +func (p *trackedAlertPoller) PollAlerts(ctx context.Context) (map[string]alerts.AlertConfig, error) { + updatedConfigs := map[string]alerts.AlertConfig{} + + // First poll will return all rule groups + if !p.initialized { + p.initialized = true + return p.store.ListAlertConfigs(ctx) + } + + // Get the changed users from the user update tracker + users := p.tracker.GetUpdatedUsers(ctx) + + // Retrieve user configuration from the rule store + // TODO: Add Retry logic for failed requests + // TODO: store users that were failed to be updated and reattempt to retrieve on the next poll + for _, u := range users { + cfg, err := p.store.GetAlertConfig(ctx, u) + if err != nil { + return nil, err + } + + updatedConfigs[u] = cfg + } + + return updatedConfigs, nil +} + +func (p *trackedAlertPoller) Stop() { + p.tracker.Stop() +} + +type trackedAlertStore struct { + tracker *usertracker.Tracker + store alerts.AlertStore +} + +// ListAlertConfigs passes through to the embedded alert store +func (w *trackedAlertStore) ListAlertConfigs(ctx context.Context) (map[string]alerts.AlertConfig, error) { + return w.store.ListAlertConfigs(ctx) +} + +// GetAlertConfig passes through to the embedded alert store +func (w *trackedAlertStore) GetAlertConfig(ctx context.Context, id string) (alerts.AlertConfig, error) { + return w.store.GetAlertConfig(ctx, id) +} + +// SetAlertConfig passes through to the embedded alert store, and tracks a user change +func (w *trackedAlertStore) SetAlertConfig(ctx context.Context, id string, cfg alerts.AlertConfig) error { + err := w.store.SetAlertConfig(ctx, id, cfg) + if err != nil { + return err + } + + return w.tracker.UpdateUser(ctx, id) +} + +// DeleteAlertConfig passes through to the embedded alert store, and tracks a user change +func (w *trackedAlertStore) DeleteAlertConfig(ctx context.Context, id string) error { + err := w.store.DeleteAlertConfig(ctx, id) + if err != nil { + return err + } + + return w.tracker.UpdateUser(ctx, id) +} diff --git a/pkg/alertmanager/multitenant.go b/pkg/alertmanager/multitenant.go index 608377893d7..5c4e251a02c 100644 --- a/pkg/alertmanager/multitenant.go +++ b/pkg/alertmanager/multitenant.go @@ -22,8 +22,7 @@ import ( "github.com/weaveworks/common/user" "github.com/weaveworks/mesh" - "github.com/cortexproject/cortex/pkg/configs" - configs_client "github.com/cortexproject/cortex/pkg/configs/client" + "github.com/cortexproject/cortex/pkg/storage/alerts" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" ) @@ -185,10 +184,13 @@ type MultitenantAlertmanagerConfig struct { FallbackConfigFile string AutoWebhookRoot string AutoSlackRoot string + + AlertStore AlertStoreConfig } // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *MultitenantAlertmanagerConfig) RegisterFlags(f *flag.FlagSet) { + cfg.AlertStore.RegisterFlags(f) flag.StringVar(&cfg.DataDir, "alertmanager.storage.path", "data/", "Base path for data storage.") flag.DurationVar(&cfg.Retention, "alertmanager.storage.retention", 5*24*time.Hour, "How long to keep data for.") @@ -214,7 +216,8 @@ func (cfg *MultitenantAlertmanagerConfig) RegisterFlags(f *flag.FlagSet) { type MultitenantAlertmanager struct { cfg *MultitenantAlertmanagerConfig - configsAPI configs_client.Client + store alerts.AlertStore + poller alerts.AlertPoller // The fallback config is stored as a string and parsed every time it's needed // because we mutate the parsed results and don't want those changes to take @@ -222,14 +225,11 @@ type MultitenantAlertmanager struct { fallbackConfig string // All the organization configurations that we have. Only used for instrumentation. - cfgs map[string]configs.Config + cfgs map[string]alerts.AlertConfig alertmanagersMtx sync.Mutex alertmanagers map[string]*Alertmanager - latestConfig configs.ID - latestMutex sync.RWMutex - meshRouter *gossipFactory srvDiscovery *srvDiscovery @@ -238,17 +238,12 @@ type MultitenantAlertmanager struct { } // NewMultitenantAlertmanager creates a new MultitenantAlertmanager. -func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, cfgCfg configs_client.Config) (*MultitenantAlertmanager, error) { +func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig) (*MultitenantAlertmanager, error) { err := os.MkdirAll(cfg.DataDir, 0777) if err != nil { return nil, fmt.Errorf("unable to create Alertmanager data directory %q: %s", cfg.DataDir, err) } - configsAPI, err := configs_client.New(cfgCfg) - if err != nil { - return nil, err - } - mrouter := initMesh(cfg.MeshListenAddr, cfg.MeshHWAddr, cfg.MeshNickname, cfg.MeshPassword) mrouter.Start() @@ -265,11 +260,17 @@ func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, cfgCfg confi } gf := newGossipFactory(mrouter) + + poller, store, err := NewAlertStore(cfg.AlertStore) + if err != nil { + return nil, fmt.Errorf("unable to create Alertmanager storage, %s", err) + } + am := &MultitenantAlertmanager{ cfg: cfg, - configsAPI: configsAPI, + poller: poller, + store: store, fallbackConfig: string(fallbackConfig), - cfgs: map[string]configs.Config{}, alertmanagers: map[string]*Alertmanager{}, meshRouter: &gf, srvDiscovery: newSRVDiscovery(cfg.MeshPeerService, cfg.MeshPeerHost, cfg.MeshPeerRefreshInterval), @@ -314,6 +315,7 @@ func (am *MultitenantAlertmanager) Run() { // Stop stops the MultitenantAlertmanager. func (am *MultitenantAlertmanager) Stop() { + am.poller.Stop() am.srvDiscovery.Stop() close(am.stop) <-am.done @@ -326,7 +328,7 @@ func (am *MultitenantAlertmanager) Stop() { // Load the full set of configurations from the server, retrying with backoff // until we can get them. -func (am *MultitenantAlertmanager) loadAllConfigs() map[string]configs.View { +func (am *MultitenantAlertmanager) loadAllConfigs() map[string]alerts.AlertConfig { backoff := util.NewBackoff(context.Background(), backoffConfig) for { cfgs, err := am.poll() @@ -349,25 +351,21 @@ func (am *MultitenantAlertmanager) updateConfigs(now time.Time) error { } // poll the configuration server. Not re-entrant. -func (am *MultitenantAlertmanager) poll() (map[string]configs.View, error) { - configID := am.latestConfig - cfgs, err := am.configsAPI.GetAlerts(context.Background(), configID) +func (am *MultitenantAlertmanager) poll() (map[string]alerts.AlertConfig, error) { + cfgs, err := am.poller.PollAlerts(context.Background()) if err != nil { level.Warn(util.Logger).Log("msg", "MultitenantAlertmanager: configs server poll failed", "err", err) return nil, err } - am.latestMutex.Lock() - am.latestConfig = cfgs.GetLatestConfigID() - am.latestMutex.Unlock() - return cfgs.Configs, nil + return cfgs, nil } -func (am *MultitenantAlertmanager) addNewConfigs(cfgs map[string]configs.View) { +func (am *MultitenantAlertmanager) addNewConfigs(cfgs map[string]alerts.AlertConfig) { // TODO: instrument how many configs we have, both valid & invalid. level.Debug(util.Logger).Log("msg", "adding configurations", "num_configs", len(cfgs)) for userID, config := range cfgs { - err := am.setConfig(userID, config.Config) + err := am.setConfig(userID, config) if err != nil { level.Warn(util.Logger).Log("msg", "MultitenantAlertmanager: error applying config", "err", err) continue @@ -426,7 +424,7 @@ func (am *MultitenantAlertmanager) createTemplatesFile(userID, fn, content strin // setConfig applies the given configuration to the alertmanager for `userID`, // creating an alertmanager if it doesn't already exist. -func (am *MultitenantAlertmanager) setConfig(userID string, config configs.Config) error { +func (am *MultitenantAlertmanager) setConfig(userID string, config alerts.AlertConfig) error { _, hasExisting := am.alertmanagers[userID] var amConfig *amconfig.Config var err error @@ -489,7 +487,7 @@ func (am *MultitenantAlertmanager) setConfig(userID string, config configs.Confi } // alertmanagerConfigFromConfig returns the Alertmanager config from the Cortex configuration. -func alertmanagerConfigFromConfig(c configs.Config) (*amconfig.Config, error) { +func alertmanagerConfigFromConfig(c alerts.AlertConfig) (*amconfig.Config, error) { cfg, err := amconfig.Load(c.AlertmanagerConfig) if err != nil { return nil, fmt.Errorf("error parsing Alertmanager config: %s", err) diff --git a/pkg/alertmanager/storage.go b/pkg/alertmanager/storage.go new file mode 100644 index 00000000000..e75f59f53db --- /dev/null +++ b/pkg/alertmanager/storage.go @@ -0,0 +1,56 @@ +package alertmanager + +import ( + "context" + "flag" + "fmt" + + "github.com/cortexproject/cortex/pkg/storage/alerts" + "github.com/cortexproject/cortex/pkg/storage/clients/configdb" + "github.com/cortexproject/cortex/pkg/storage/clients/gcp" + "github.com/cortexproject/cortex/pkg/util/usertracker" +) + +// AlertStoreConfig configures the alertmanager backend +type AlertStoreConfig struct { + Type string `yaml:"type"` + + ConfigDB configdb.Config + + GCS gcp.GCSConfig + Tracker usertracker.Config +} + +// RegisterFlags registers flags. +func (cfg *AlertStoreConfig) RegisterFlags(f *flag.FlagSet) { + cfg.ConfigDB.RegisterFlagsWithPrefix("alertmanager", f) + cfg.GCS.RegisterFlagsWithPrefix("alertmanager.store.", f) + cfg.Tracker.RegisterFlagsWithPrefix("alertmanager.", f) + f.StringVar(&cfg.Type, "alertmanager.storage.type", "configdb", "Method to use for backend rule storage (configdb, gcs)") +} + +// NewAlertStore returns a new rule storage backend poller and store +func NewAlertStore(cfg AlertStoreConfig) (alerts.AlertPoller, alerts.AlertStore, error) { + var ( + alertStore alerts.AlertStore + err error + ) + switch cfg.Type { + case "configdb": + poller, err := configdb.New(cfg.ConfigDB) + return poller, nil, err + case "gcs": + alertStore, err = gcp.NewGCSClient(context.Background(), cfg.GCS) + default: + return nil, nil, fmt.Errorf("Unrecognized rule storage mode %v, choose one of: configdb, gcs", cfg.Type) + } + + tracker, err := usertracker.NewTracker(cfg.Tracker) + if err != nil { + return nil, nil, err + } + + p, err := newTrackedAlertPoller(tracker, alertStore) + + return p, p.trackedAlertStore(), err +} diff --git a/pkg/chunk/gcp/gcs_object_client.go b/pkg/chunk/gcp/gcs_object_client.go index c07f25ac94c..62ebe70db83 100644 --- a/pkg/chunk/gcp/gcs_object_client.go +++ b/pkg/chunk/gcp/gcs_object_client.go @@ -36,7 +36,7 @@ func (cfg *GCSConfig) RegisterFlags(f *flag.FlagSet) { // NewGCSObjectClient makes a new chunk.ObjectClient that writes chunks to GCS. func NewGCSObjectClient(ctx context.Context, cfg GCSConfig, schemaCfg chunk.SchemaConfig) (chunk.ObjectClient, error) { - option, err := gcsInstrumentation(ctx, storage.ScopeReadWrite) + option, err := gcsInstrumentation(ctx, "chunk", storage.ScopeReadWrite) if err != nil { return nil, err } diff --git a/pkg/chunk/gcp/instrumentation.go b/pkg/chunk/gcp/instrumentation.go index 1347ee69cef..b49c9ff14cf 100644 --- a/pkg/chunk/gcp/instrumentation.go +++ b/pkg/chunk/gcp/instrumentation.go @@ -36,7 +36,7 @@ var ( // GCS latency seems to range from a few ms to a few secs and is // important. So use 6 buckets from 5ms to 5s. Buckets: prometheus.ExponentialBuckets(0.005, 4, 6), - }, []string{"operation", "status_code"}) + }, []string{"operation", "status_code", "type"}) ) func bigtableInstrumentation() ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { @@ -50,15 +50,16 @@ func bigtableInstrumentation() ([]grpc.UnaryClientInterceptor, []grpc.StreamClie } } -func gcsInstrumentation(ctx context.Context, scope string) (option.ClientOption, error) { +func gcsInstrumentation(ctx context.Context, clientType, scope string) (option.ClientOption, error) { transport, err := google_http.NewTransport(ctx, http.DefaultTransport, option.WithScopes(scope)) if err != nil { return nil, err } client := &http.Client{ Transport: instrumentedTransport{ - observer: gcsRequestDuration, - next: transport, + observer: gcsRequestDuration, + next: transport, + clientType: clientType, }, } return option.WithHTTPClient(client), nil @@ -73,15 +74,16 @@ func toOptions(opts []grpc.DialOption) []option.ClientOption { } type instrumentedTransport struct { - observer prometheus.ObserverVec - next http.RoundTripper + observer prometheus.ObserverVec + next http.RoundTripper + clientType string } func (i instrumentedTransport) RoundTrip(req *http.Request) (*http.Response, error) { start := time.Now() resp, err := i.next.RoundTrip(req) if err == nil { - i.observer.WithLabelValues(req.Method, strconv.Itoa(resp.StatusCode)).Observe(time.Since(start).Seconds()) + i.observer.WithLabelValues(req.Method, strconv.Itoa(resp.StatusCode), i.clientType).Observe(time.Since(start).Seconds()) } return resp, err } diff --git a/pkg/configs/client/client.go b/pkg/configs/client/client.go deleted file mode 100644 index a5528b29ecc..00000000000 --- a/pkg/configs/client/client.go +++ /dev/null @@ -1,165 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "time" - - "github.com/cortexproject/cortex/pkg/configs" - "github.com/cortexproject/cortex/pkg/configs/db" - "github.com/cortexproject/cortex/pkg/util" - "github.com/go-kit/kit/log/level" -) - -// Client is what the ruler and altermanger needs from a config store to process rules. -type Client interface { - // GetRules returns all Cortex configurations from a configs API server - // that have been updated after the given configs.ID was last updated. - GetRules(ctx context.Context, since configs.ID) (map[string]configs.VersionedRulesConfig, error) - - // GetAlerts fetches all the alerts that have changes since since. - GetAlerts(ctx context.Context, since configs.ID) (*ConfigsResponse, error) -} - -// New creates a new ConfigClient. -func New(cfg Config) (Client, error) { - // All of this falderal is to allow for a smooth transition away from - // using the configs server and toward directly connecting to the database. - // See https://github.com/cortexproject/cortex/issues/619 - if cfg.ConfigsAPIURL.URL != nil { - return instrumented{ - next: configsClient{ - URL: cfg.ConfigsAPIURL.URL, - Timeout: cfg.ClientTimeout, - }, - }, nil - } - - db, err := db.New(cfg.DBConfig) - if err != nil { - return nil, err - } - return instrumented{ - next: dbStore{ - db: db, - }, - }, nil -} - -// configsClient allows retrieving recording and alerting rules from the configs server. -type configsClient struct { - URL *url.URL - Timeout time.Duration -} - -// GetRules implements ConfigClient. -func (c configsClient) GetRules(ctx context.Context, since configs.ID) (map[string]configs.VersionedRulesConfig, error) { - suffix := "" - if since != 0 { - suffix = fmt.Sprintf("?since=%d", since) - } - endpoint := fmt.Sprintf("%s/private/api/prom/configs/rules%s", c.URL.String(), suffix) - response, err := doRequest(endpoint, c.Timeout, since) - if err != nil { - return nil, err - } - configs := map[string]configs.VersionedRulesConfig{} - for id, view := range response.Configs { - cfg := view.GetVersionedRulesConfig() - if cfg != nil { - configs[id] = *cfg - } - } - return configs, nil -} - -// GetAlerts implements ConfigClient. -func (c configsClient) GetAlerts(ctx context.Context, since configs.ID) (*ConfigsResponse, error) { - suffix := "" - if since != 0 { - suffix = fmt.Sprintf("?since=%d", since) - } - endpoint := fmt.Sprintf("%s/private/api/prom/configs/alertmanager%s", c.URL.String(), suffix) - return doRequest(endpoint, c.Timeout, since) -} - -func doRequest(endpoint string, timeout time.Duration, since configs.ID) (*ConfigsResponse, error) { - req, err := http.NewRequest("GET", endpoint, nil) - if err != nil { - return nil, err - } - - client := &http.Client{Timeout: timeout} - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("Invalid response from configs server: %v", resp.StatusCode) - } - - var config ConfigsResponse - if err := json.NewDecoder(resp.Body).Decode(&config); err != nil { - level.Error(util.Logger).Log("msg", "configs: couldn't decode JSON body", "err", err) - return nil, err - } - - config.since = since - return &config, nil -} - -type dbStore struct { - db db.DB -} - -// GetRules implements ConfigClient. -func (d dbStore) GetRules(ctx context.Context, since configs.ID) (map[string]configs.VersionedRulesConfig, error) { - if since == 0 { - return d.db.GetAllRulesConfigs(ctx) - } - return d.db.GetRulesConfigs(ctx, since) -} - -// GetAlerts implements ConfigClient. -func (d dbStore) GetAlerts(ctx context.Context, since configs.ID) (*ConfigsResponse, error) { - var resp map[string]configs.View - var err error - if since == 0 { - resp, err = d.db.GetAllConfigs(ctx) - - } - resp, err = d.db.GetConfigs(ctx, since) - if err != nil { - return nil, err - } - - return &ConfigsResponse{ - since: since, - Configs: resp, - }, nil -} - -// ConfigsResponse is a response from server for GetConfigs. -type ConfigsResponse struct { - // The version since which these configs were changed - since configs.ID - - // Configs maps user ID to their latest configs.View. - Configs map[string]configs.View `json:"configs"` -} - -// GetLatestConfigID returns the last config ID from a set of configs. -func (c ConfigsResponse) GetLatestConfigID() configs.ID { - latest := c.since - for _, config := range c.Configs { - if config.ID > latest { - latest = config.ID - } - } - return latest -} diff --git a/pkg/configs/client/config.go b/pkg/configs/client/config.go deleted file mode 100644 index 3850b967c68..00000000000 --- a/pkg/configs/client/config.go +++ /dev/null @@ -1,70 +0,0 @@ -package client - -import ( - "context" - "flag" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/weaveworks/common/instrument" - - "github.com/cortexproject/cortex/pkg/configs" - "github.com/cortexproject/cortex/pkg/configs/db" - "github.com/cortexproject/cortex/pkg/util/flagext" -) - -// Config says where we can find the ruler configs. -type Config struct { - DBConfig db.Config - - // DEPRECATED - ConfigsAPIURL flagext.URLValue - - // DEPRECATED. HTTP timeout duration for requests made to the Weave Cloud - // configs service. - ClientTimeout time.Duration -} - -// RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.DBConfig.RegisterFlags(f) - f.Var(&cfg.ConfigsAPIURL, "ruler.configs.url", "DEPRECATED. URL of configs API server.") - f.DurationVar(&cfg.ClientTimeout, "ruler.client-timeout", 5*time.Second, "DEPRECATED. Timeout for requests to Weave Cloud configs service.") - flag.Var(&cfg.ConfigsAPIURL, "alertmanager.configs.url", "URL of configs API server.") - flag.DurationVar(&cfg.ClientTimeout, "alertmanager.configs.client-timeout", 5*time.Second, "Timeout for requests to Weave Cloud configs service.") -} - -var configsRequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "configs_request_duration_seconds", - Help: "Time spent requesting configs.", - Buckets: prometheus.DefBuckets, -}, []string{"operation", "status_code"})) - -func init() { - configsRequestDuration.Register() -} - -type instrumented struct { - next Client -} - -func (i instrumented) GetRules(ctx context.Context, since configs.ID) (map[string]configs.VersionedRulesConfig, error) { - var cfgs map[string]configs.VersionedRulesConfig - err := instrument.CollectedRequest(context.Background(), "Configs.GetConfigs", configsRequestDuration, instrument.ErrorCode, func(_ context.Context) error { - var err error - cfgs, err = i.next.GetRules(ctx, since) // Warning: this will produce an incorrect result if the configID ever overflows - return err - }) - return cfgs, err -} - -func (i instrumented) GetAlerts(ctx context.Context, since configs.ID) (*ConfigsResponse, error) { - var cfgs *ConfigsResponse - err := instrument.CollectedRequest(context.Background(), "Configs.GetConfigs", configsRequestDuration, instrument.ErrorCode, func(_ context.Context) error { - var err error - cfgs, err = i.next.GetAlerts(ctx, since) // Warning: this will produce an incorrect result if the configID ever overflows - return err - }) - return cfgs, err -} diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index 19604dd3ba0..ac0fae0c59e 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -18,7 +18,6 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/storage" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" "github.com/cortexproject/cortex/pkg/configs/api" - config_client "github.com/cortexproject/cortex/pkg/configs/client" "github.com/cortexproject/cortex/pkg/configs/db" "github.com/cortexproject/cortex/pkg/distributor" "github.com/cortexproject/cortex/pkg/ingester" @@ -69,8 +68,8 @@ type Config struct { TableManager chunk.TableManagerConfig `yaml:"table_manager,omitempty"` Encoding encoding.Config `yaml:"-"` // No yaml for this, it only works with flags. + ConfigDB db.Config `yaml:"configdb,omitempty"` Ruler ruler.Config `yaml:"ruler,omitempty"` - ConfigStore config_client.Config `yaml:"config_store,omitempty"` Alertmanager alertmanager.MultitenantAlertmanagerConfig `yaml:"alertmanager,omitempty"` } @@ -99,7 +98,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Encoding.RegisterFlags(f) c.Ruler.RegisterFlags(f) - c.ConfigStore.RegisterFlags(f) + c.ConfigDB.RegisterFlags(f) c.Alertmanager.RegisterFlags(f) // These don't seem to have a home. diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index 51d224a7f74..9ef69fb37ce 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -21,7 +21,6 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/storage" "github.com/cortexproject/cortex/pkg/configs/api" - config_client "github.com/cortexproject/cortex/pkg/configs/client" "github.com/cortexproject/cortex/pkg/configs/db" "github.com/cortexproject/cortex/pkg/distributor" "github.com/cortexproject/cortex/pkg/ingester" @@ -320,26 +319,12 @@ func (t *Cortex) initRuler(cfg *Config) (err error) { cfg.Ruler.LifecyclerConfig.ListenPort = &cfg.Server.GRPCListenPort queryable, engine := querier.New(cfg.Querier, t.distributor, t.store) - rulesAPI, err := config_client.New(cfg.ConfigStore) - if err != nil { - return err - } - - t.ruler, err = ruler.NewRuler(cfg.Ruler, engine, queryable, t.distributor, rulesAPI) + t.ruler, err = ruler.NewRuler(cfg.Ruler, engine, queryable, t.distributor) if err != nil { return } - // Only serve the API for setting & getting rules configs if we're not - // serving configs from the configs API. Allows for smoother - // migration. See https://github.com/cortexproject/cortex/issues/619 - if cfg.ConfigStore.ConfigsAPIURL.URL == nil { - a, err := ruler.NewAPIFromConfig(cfg.ConfigStore.DBConfig) - if err != nil { - return err - } - a.RegisterRoutes(t.server.HTTP) - } + t.ruler.RegisterRoutes(t.server.HTTP) t.server.HTTP.Handle("/ruler_ring", t.ruler) return @@ -351,7 +336,7 @@ func (t *Cortex) stopRuler() error { } func (t *Cortex) initConfigs(cfg *Config) (err error) { - t.configDB, err = db.New(cfg.ConfigStore.DBConfig) + t.configDB, err = db.New(cfg.ConfigDB) if err != nil { return } @@ -367,14 +352,16 @@ func (t *Cortex) stopConfigs() error { } func (t *Cortex) initAlertmanager(cfg *Config) (err error) { - t.alertmanager, err = alertmanager.NewMultitenantAlertmanager(&cfg.Alertmanager, cfg.ConfigStore) + t.alertmanager, err = alertmanager.NewMultitenantAlertmanager(&cfg.Alertmanager) if err != nil { return } + + t.alertmanager.RegisterRoutes(t.server.HTTP) + go t.alertmanager.Run() t.server.HTTP.PathPrefix("/status").Handler(t.alertmanager.GetStatusHandler()) - // TODO this clashed with the queirer and the distributor, so we cannot // run them in the same process. t.server.HTTP.PathPrefix("/api/prom").Handler(middleware.AuthenticateUser.Wrap(t.alertmanager)) diff --git a/pkg/ruler/api.go b/pkg/ruler/api.go index aeee8fe5ba2..b9f98b7a8bc 100644 --- a/pkg/ruler/api.go +++ b/pkg/ruler/api.go @@ -1,118 +1,251 @@ package ruler import ( - "database/sql" - "encoding/json" - "fmt" + "errors" + "io/ioutil" "net/http" - "github.com/go-kit/kit/log/level" - "github.com/gorilla/mux" + "github.com/prometheus/prometheus/pkg/rulefmt" - "github.com/cortexproject/cortex/pkg/configs" - "github.com/cortexproject/cortex/pkg/configs/db" + store "github.com/cortexproject/cortex/pkg/storage/rules" "github.com/cortexproject/cortex/pkg/util" + "github.com/go-kit/kit/log/level" + "github.com/gorilla/mux" "github.com/weaveworks/common/user" + "gopkg.in/yaml.v2" ) -// API implements the configs api. -type API struct { - db db.DB - http.Handler -} - -// NewAPIFromConfig makes a new API from our database config. -func NewAPIFromConfig(cfg db.Config) (*API, error) { - db, err := db.New(cfg) - if err != nil { - return nil, err - } - return NewAPI(db), nil -} - -// NewAPI creates a new API. -func NewAPI(db db.DB) *API { - a := &API{db: db} - r := mux.NewRouter() - a.RegisterRoutes(r) - a.Handler = r - return a -} +var ( + // ErrNoNamespace signals the requested namespace does not exist + ErrNoNamespace = errors.New("a namespace must be provided in the url") + // ErrNoGroupName signals a group name url parameter was not found + ErrNoGroupName = errors.New("a matching group name must be provided in the url") + // ErrNoRuleGroups signals the rule group requested does not exist + ErrNoRuleGroups = errors.New("no rule groups found") + // ErrNoUserID is returned when no user ID is provided + ErrNoUserID = errors.New("no id provided") +) // RegisterRoutes registers the configs API HTTP routes with the provided Router. -func (a *API) RegisterRoutes(r *mux.Router) { +func (r *Ruler) RegisterRoutes(router *mux.Router) { + // If no store is set do not register routes in the api. This will only be the case if the configdb + // is used to store rules + if r.store == nil { + return + } for _, route := range []struct { name, method, path string handler http.HandlerFunc }{ - {"get_rules", "GET", "/api/prom/rules", a.getConfig}, - {"cas_rules", "POST", "/api/prom/rules", a.casConfig}, + {"list_rules", "GET", "/api/prom/rules", r.listRules}, + {"getRuleNamespace", "GET", "/api/prom/rules/{namespace}", r.listRules}, + {"get_rulegroup", "GET", "/api/prom/rules/{namespace}/{groupName}", r.getRuleGroup}, + {"set_rulegroup", "POST", "/api/prom/rules/{namespace}", r.createRuleGroup}, + {"delete_rulegroup", "DELETE", "/api/prom/rules/{namespace}/{groupName}", r.deleteRuleGroup}, } { - r.Handle(route.path, route.handler).Methods(route.method).Name(route.name) + router.Handle(route.path, route.handler).Methods(route.method).Name(route.name) } } -// getConfig returns the request configuration. -func (a *API) getConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) +func (r *Ruler) listRules(w http.ResponseWriter, req *http.Request) { + logger := util.WithContext(req.Context(), util.Logger) + userID, _, err := user.ExtractOrgIDFromHTTPRequest(req) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return } - logger := util.WithContext(r.Context(), util.Logger) - cfg, err := a.db.GetRulesConfig(r.Context(), userID) - if err == sql.ErrNoRows { - http.Error(w, "No configuration", http.StatusNotFound) + if userID == "" { + http.Error(w, ErrNoUserID.Error(), http.StatusUnauthorized) + return + } + + options := store.RuleStoreConditions{ + UserID: userID, + } + + vars := mux.Vars(req) + + namespace := vars["namespace"] + if namespace != "" { + level.Debug(logger).Log("msg", "retrieving rule groups with namespace", "userID", userID, "namespace", namespace) + options.Namespace = namespace + } + + level.Debug(logger).Log("msg", "retrieving rule groups from rule store", "userID", userID) + rgs, err := r.store.ListRuleGroups(req.Context(), options) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + level.Debug(logger).Log("msg", "retrieved rule groups from rule store", "userID", userID, "num_namespaces", len(rgs)) + + if len(rgs) == 0 { + level.Info(logger).Log("msg", "no rule groups found", "userID", userID) + http.Error(w, ErrNoRuleGroups.Error(), http.StatusNotFound) return - } else if err != nil { - level.Error(logger).Log("msg", "error getting config", "err", err) + } + + formatted := rgs.Formatted(userID) + + d, err := yaml.Marshal(&formatted) + if err != nil { + level.Error(logger).Log("msg", "error marshalling yaml rule groups", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(cfg); err != nil { - level.Error(logger).Log("msg", "error encoding config", "err", err) + w.Header().Set("Content-Type", "application/yaml") + if _, err := w.Write(d); err != nil { + level.Error(logger).Log("msg", "error writing yaml response", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } } -type configUpdateRequest struct { - OldConfig configs.RulesConfig `json:"old_config"` - NewConfig configs.RulesConfig `json:"new_config"` +func (r *Ruler) getRuleGroup(w http.ResponseWriter, req *http.Request) { + logger := util.WithContext(req.Context(), util.Logger) + + userID, _, err := user.ExtractOrgIDFromHTTPRequest(req) + if err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + + if userID == "" { + http.Error(w, ErrNoUserID.Error(), http.StatusUnauthorized) + return + } + + vars := mux.Vars(req) + namespace, exists := vars["namespace"] + if !exists { + http.Error(w, ErrNoNamespace.Error(), http.StatusUnauthorized) + return + } + + groupName, exists := vars["groupName"] + if !exists { + http.Error(w, ErrNoGroupName.Error(), http.StatusUnauthorized) + return + } + + rg, err := r.store.GetRuleGroup(req.Context(), userID, namespace, groupName) + if err != nil { + if err == store.ErrGroupNotFound { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + d, err := yaml.Marshal(&rg) + if err != nil { + level.Error(logger).Log("msg", "error marshalling yaml rule groups", "err", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/yaml") + if _, err := w.Write(d); err != nil { + level.Error(logger).Log("msg", "error writing yaml response", "err", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } } -func (a *API) casConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) +func (r *Ruler) createRuleGroup(w http.ResponseWriter, req *http.Request) { + logger := util.WithContext(req.Context(), util.Logger) + userID, _, err := user.ExtractOrgIDFromHTTPRequest(req) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return } - logger := util.WithContext(r.Context(), util.Logger) - var updateReq configUpdateRequest - if err := json.NewDecoder(r.Body).Decode(&updateReq); err != nil { - level.Error(logger).Log("msg", "error decoding json body", "err", err) + if userID == "" { + http.Error(w, ErrNoUserID.Error(), http.StatusUnauthorized) + return + } + + vars := mux.Vars(req) + + namespace := vars["namespace"] + if namespace == "" { + level.Error(logger).Log("err", "no namespace provided with rule group") + http.Error(w, ErrNoNamespace.Error(), http.StatusBadRequest) + return + } + + payload, err := ioutil.ReadAll(req.Body) + if err != nil { + level.Error(logger).Log("err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } - if _, err = updateReq.NewConfig.Parse(); err != nil { - level.Error(logger).Log("msg", "invalid rules", "err", err) - http.Error(w, fmt.Sprintf("Invalid rules: %v", err), http.StatusBadRequest) + level.Debug(logger).Log("msg", "attempting to unmarshal rulegroup", "userID", userID, "group", string(payload)) + + rg := rulefmt.RuleGroup{} + err = yaml.Unmarshal(payload, &rg) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) return } - updated, err := a.db.SetRulesConfig(r.Context(), userID, updateReq.OldConfig, updateReq.NewConfig) + errs := store.ValidateRuleGroup(rg) + if len(errs) > 0 { + level.Error(logger).Log("err", err.Error()) + http.Error(w, errs[0].Error(), http.StatusBadRequest) + return + } + + err = r.store.SetRuleGroup(req.Context(), userID, namespace, rg) if err != nil { - level.Error(logger).Log("msg", "error storing config", "err", err) + level.Error(logger).Log("err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } - if !updated { - http.Error(w, "Supplied configuration doesn't match current configuration", http.StatusConflict) + + // Return a status accepted because the rule has been stored and queued for polling, but is not currently active + w.WriteHeader(http.StatusAccepted) +} + +func (r *Ruler) deleteRuleGroup(w http.ResponseWriter, req *http.Request) { + logger := util.WithContext(req.Context(), util.Logger) + userID, _, err := user.ExtractOrgIDFromHTTPRequest(req) + if err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + + if userID == "" { + http.Error(w, ErrNoUserID.Error(), http.StatusUnauthorized) + return } - w.WriteHeader(http.StatusNoContent) + + vars := mux.Vars(req) + namespace, exists := vars["namespace"] + if !exists { + http.Error(w, ErrNoNamespace.Error(), http.StatusUnauthorized) + return + } + + groupName, exists := vars["groupName"] + if !exists { + http.Error(w, ErrNoGroupName.Error(), http.StatusUnauthorized) + return + } + + err = r.store.DeleteRuleGroup(req.Context(), userID, namespace, groupName) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Return a status accepted because the rule has been stored and queued for polling, but is not currently active + w.WriteHeader(http.StatusAccepted) } diff --git a/pkg/ruler/api_test.go b/pkg/ruler/api_test.go deleted file mode 100644 index 3d8e4f51c26..00000000000 --- a/pkg/ruler/api_test.go +++ /dev/null @@ -1,469 +0,0 @@ -package ruler - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/cortexproject/cortex/pkg/configs" - "github.com/cortexproject/cortex/pkg/configs/api" - "github.com/cortexproject/cortex/pkg/configs/client" - "github.com/cortexproject/cortex/pkg/configs/db" - "github.com/cortexproject/cortex/pkg/configs/db/dbtest" - "github.com/weaveworks/common/user" -) - -const ( - endpoint = "/api/prom/rules" -) - -var ( - app *API - database db.DB - counter int - privateAPI client.Client -) - -// setup sets up the environment for the tests. -func setup(t *testing.T) { - database = dbtest.Setup(t) - app = NewAPI(database) - counter = 0 - var err error - privateAPI, err = client.New(client.Config{ - DBConfig: db.Config{ - URI: "mock", // trigger client.NewConfigClient to use the mock DB. - Mock: database, - }, - }) - require.NoError(t, err) -} - -// cleanup cleans up the environment after a test. -func cleanup(t *testing.T) { - dbtest.Cleanup(t, database) -} - -// request makes a request to the configs API. -func request(t *testing.T, handler http.Handler, method, urlStr string, body io.Reader) *httptest.ResponseRecorder { - w := httptest.NewRecorder() - r, err := http.NewRequest(method, urlStr, body) - require.NoError(t, err) - handler.ServeHTTP(w, r) - return w -} - -// requestAsUser makes a request to the configs API as the given user. -func requestAsUser(t *testing.T, handler http.Handler, userID string, method, urlStr string, body io.Reader) *httptest.ResponseRecorder { - w := httptest.NewRecorder() - r, err := http.NewRequest(method, urlStr, body) - require.NoError(t, err) - r = r.WithContext(user.InjectOrgID(r.Context(), userID)) - user.InjectOrgIDIntoHTTPRequest(r.Context(), r) - handler.ServeHTTP(w, r) - return w -} - -// makeString makes a string, guaranteed to be unique within a test. -func makeString(pattern string) string { - counter++ - return fmt.Sprintf(pattern, counter) -} - -// makeUserID makes an arbitrary user ID. Guaranteed to be unique within a test. -func makeUserID() string { - return makeString("user%d") -} - -// makeRulerConfig makes an arbitrary ruler config -func makeRulerConfig(rfv configs.RuleFormatVersion) configs.RulesConfig { - switch rfv { - case configs.RuleFormatV1: - return configs.RulesConfig{ - Files: map[string]string{ - "filename.rules": makeString(` -# Config no. %d. -ALERT ScrapeFailed - IF up != 1 - FOR 10m - LABELS { severity="warning" } - ANNOTATIONS { - summary = "Scrape of {{$labels.job}} (pod: {{$labels.instance}}) failed.", - description = "Prometheus cannot reach the /metrics page on the {{$labels.instance}} pod.", - impact = "We have no monitoring data for {{$labels.job}} - {{$labels.instance}}. At worst, it's completely down. At best, we cannot reliably respond to operational issues.", - dashboardURL = "$${base_url}/admin/prometheus/targets", - } - `), - }, - FormatVersion: configs.RuleFormatV1, - } - case configs.RuleFormatV2: - return configs.RulesConfig{ - Files: map[string]string{ - "filename.rules": makeString(` -# Config no. %d. -groups: -- name: example - rules: - - alert: ScrapeFailed - expr: 'up != 1' - for: 10m - labels: - severity: warning - annotations: - summary: "Scrape of {{$labels.job}} (pod: {{$labels.instance}}) failed." - description: "Prometheus cannot reach the /metrics page on the {{$labels.instance}} pod." - impact: "We have no monitoring data for {{$labels.job}} - {{$labels.instance}}. At worst, it's completely down. At best, we cannot reliably respond to operational issues." - dashboardURL: "$${base_url}/admin/prometheus/targets" - `), - }, - FormatVersion: configs.RuleFormatV2, - } - default: - panic("unknown rule format") - } -} - -// parseVersionedRulesConfig parses a configs.VersionedRulesConfig from JSON. -func parseVersionedRulesConfig(t *testing.T, b []byte) configs.VersionedRulesConfig { - var x configs.VersionedRulesConfig - err := json.Unmarshal(b, &x) - require.NoError(t, err, "Could not unmarshal JSON: %v", string(b)) - return x -} - -// post a config -func post(t *testing.T, userID string, oldConfig configs.RulesConfig, newConfig configs.RulesConfig) configs.VersionedRulesConfig { - updateRequest := configUpdateRequest{ - OldConfig: oldConfig, - NewConfig: newConfig, - } - b, err := json.Marshal(updateRequest) - require.NoError(t, err) - reader := bytes.NewReader(b) - w := requestAsUser(t, app, userID, "POST", endpoint, reader) - require.Equal(t, http.StatusNoContent, w.Code) - return get(t, userID) -} - -// get a config -func get(t *testing.T, userID string) configs.VersionedRulesConfig { - w := requestAsUser(t, app, userID, "GET", endpoint, nil) - return parseVersionedRulesConfig(t, w.Body.Bytes()) -} - -// configs returns 404 if no config has been created yet. -func Test_GetConfig_NotFound(t *testing.T) { - setup(t) - defer cleanup(t) - - userID := makeUserID() - w := requestAsUser(t, app, userID, "GET", endpoint, nil) - assert.Equal(t, http.StatusNotFound, w.Code) -} - -// configs returns 401 to requests without authentication. -func Test_PostConfig_Anonymous(t *testing.T) { - setup(t) - defer cleanup(t) - - w := request(t, app, "POST", endpoint, nil) - assert.Equal(t, http.StatusUnauthorized, w.Code) -} - -// Posting to a configuration sets it so that you can get it again. -func Test_PostConfig_CreatesConfig(t *testing.T) { - setup(t) - defer cleanup(t) - - userID := makeUserID() - config := makeRulerConfig(configs.RuleFormatV2) - result := post(t, userID, configs.RulesConfig{}, config) - assert.Equal(t, config, result.Config) -} - -// Posting an invalid config when there's none set returns an error and leaves the config unset. -func Test_PostConfig_InvalidNewConfig(t *testing.T) { - setup(t) - defer cleanup(t) - - userID := makeUserID() - invalidConfig := configs.RulesConfig{ - Files: map[string]string{ - "some.rules": "invalid config", - }, - FormatVersion: configs.RuleFormatV2, - } - updateRequest := configUpdateRequest{ - OldConfig: configs.RulesConfig{}, - NewConfig: invalidConfig, - } - b, err := json.Marshal(updateRequest) - require.NoError(t, err) - reader := bytes.NewReader(b) - { - w := requestAsUser(t, app, userID, "POST", endpoint, reader) - require.Equal(t, http.StatusBadRequest, w.Code) - } - { - w := requestAsUser(t, app, userID, "GET", endpoint, nil) - require.Equal(t, http.StatusNotFound, w.Code) - } -} - -// Posting a v1 rule format configuration sets it so that you can get it again. -func Test_PostConfig_UpdatesConfig_V1RuleFormat(t *testing.T) { - setup(t) - app = NewAPI(database) - defer cleanup(t) - - userID := makeUserID() - config1 := makeRulerConfig(configs.RuleFormatV1) - view1 := post(t, userID, configs.RulesConfig{}, config1) - config2 := makeRulerConfig(configs.RuleFormatV1) - view2 := post(t, userID, config1, config2) - assert.True(t, view2.ID > view1.ID, "%v > %v", view2.ID, view1.ID) - assert.Equal(t, config2, view2.Config) -} - -// Posting an invalid v1 rule format config when there's one already set returns an error and leaves the config as is. -func Test_PostConfig_InvalidChangedConfig_V1RuleFormat(t *testing.T) { - setup(t) - app = NewAPI(database) - defer cleanup(t) - - userID := makeUserID() - config := makeRulerConfig(configs.RuleFormatV1) - post(t, userID, configs.RulesConfig{}, config) - invalidConfig := configs.RulesConfig{ - Files: map[string]string{ - "some.rules": "invalid config", - }, - FormatVersion: configs.RuleFormatV1, - } - updateRequest := configUpdateRequest{ - OldConfig: configs.RulesConfig{}, - NewConfig: invalidConfig, - } - b, err := json.Marshal(updateRequest) - require.NoError(t, err) - reader := bytes.NewReader(b) - { - w := requestAsUser(t, app, userID, "POST", endpoint, reader) - require.Equal(t, http.StatusBadRequest, w.Code) - } - result := get(t, userID) - assert.Equal(t, config, result.Config) -} - -// Posting a v2 rule format configuration sets it so that you can get it again. -func Test_PostConfig_UpdatesConfig_V2RuleFormat(t *testing.T) { - setup(t) - defer cleanup(t) - - userID := makeUserID() - config1 := makeRulerConfig(configs.RuleFormatV2) - view1 := post(t, userID, configs.RulesConfig{}, config1) - config2 := makeRulerConfig(configs.RuleFormatV2) - view2 := post(t, userID, config1, config2) - assert.True(t, view2.ID > view1.ID, "%v > %v", view2.ID, view1.ID) - assert.Equal(t, config2, view2.Config) -} - -// Posting an invalid v2 rule format config when there's one already set returns an error and leaves the config as is. -func Test_PostConfig_InvalidChangedConfig_V2RuleFormat(t *testing.T) { - setup(t) - defer cleanup(t) - - userID := makeUserID() - config := makeRulerConfig(configs.RuleFormatV2) - post(t, userID, configs.RulesConfig{}, config) - invalidConfig := configs.RulesConfig{ - Files: map[string]string{ - "some.rules": "invalid config", - }, - } - updateRequest := configUpdateRequest{ - OldConfig: configs.RulesConfig{}, - NewConfig: invalidConfig, - } - b, err := json.Marshal(updateRequest) - require.NoError(t, err) - reader := bytes.NewReader(b) - { - w := requestAsUser(t, app, userID, "POST", endpoint, reader) - require.Equal(t, http.StatusBadRequest, w.Code) - } - result := get(t, userID) - assert.Equal(t, config, result.Config) -} - -// Posting a config with an invalid rule format version returns an error and leaves the config as is. -func Test_PostConfig_InvalidChangedConfig_InvalidRuleFormat(t *testing.T) { - setup(t) - defer cleanup(t) - - userID := makeUserID() - config := makeRulerConfig(configs.RuleFormatV2) - post(t, userID, configs.RulesConfig{}, config) - - // We have to provide the marshaled JSON manually here because json.Marshal() would error - // on a bad rule format version. - reader := strings.NewReader(`{"old_config":{"format_version":"1","files":null},"new_config":{"format_version":"","files":{"filename.rules":"# Empty."}}}`) - { - w := requestAsUser(t, app, userID, "POST", endpoint, reader) - require.Equal(t, http.StatusBadRequest, w.Code) - } - result := get(t, userID) - assert.Equal(t, config, result.Config) -} - -// Different users can have different configurations. -func Test_PostConfig_MultipleUsers(t *testing.T) { - setup(t) - defer cleanup(t) - - userID1 := makeUserID() - userID2 := makeUserID() - config1 := post(t, userID1, configs.RulesConfig{}, makeRulerConfig(configs.RuleFormatV2)) - config2 := post(t, userID2, configs.RulesConfig{}, makeRulerConfig(configs.RuleFormatV2)) - foundConfig1 := get(t, userID1) - assert.Equal(t, config1, foundConfig1) - foundConfig2 := get(t, userID2) - assert.Equal(t, config2, foundConfig2) - assert.True(t, config2.ID > config1.ID, "%v > %v", config2.ID, config1.ID) -} - -// GetAllConfigs returns an empty list of configs if there aren't any. -func Test_GetAllConfigs_Empty(t *testing.T) { - setup(t) - defer cleanup(t) - - configs, err := privateAPI.GetRules(context.Background(), 0) - assert.NoError(t, err, "error getting configs") - assert.Equal(t, 0, len(configs)) -} - -// GetAllConfigs returns all created configs. -func Test_GetAllConfigs(t *testing.T) { - setup(t) - defer cleanup(t) - - userID := makeUserID() - config := makeRulerConfig(configs.RuleFormatV2) - view := post(t, userID, configs.RulesConfig{}, config) - - found, err := privateAPI.GetRules(context.Background(), 0) - assert.NoError(t, err, "error getting configs") - assert.Equal(t, map[string]configs.VersionedRulesConfig{ - userID: view, - }, found) -} - -// GetAllConfigs returns the *newest* versions of all created configs. -func Test_GetAllConfigs_Newest(t *testing.T) { - setup(t) - defer cleanup(t) - - userID := makeUserID() - - config1 := post(t, userID, configs.RulesConfig{}, makeRulerConfig(configs.RuleFormatV2)) - config2 := post(t, userID, config1.Config, makeRulerConfig(configs.RuleFormatV2)) - lastCreated := post(t, userID, config2.Config, makeRulerConfig(configs.RuleFormatV2)) - - found, err := privateAPI.GetRules(context.Background(), 0) - assert.NoError(t, err, "error getting configs") - assert.Equal(t, map[string]configs.VersionedRulesConfig{ - userID: lastCreated, - }, found) -} - -func Test_GetConfigs_IncludesNewerConfigsAndExcludesOlder(t *testing.T) { - setup(t) - defer cleanup(t) - - post(t, makeUserID(), configs.RulesConfig{}, makeRulerConfig(configs.RuleFormatV2)) - config2 := post(t, makeUserID(), configs.RulesConfig{}, makeRulerConfig(configs.RuleFormatV2)) - userID3 := makeUserID() - config3 := post(t, userID3, configs.RulesConfig{}, makeRulerConfig(configs.RuleFormatV2)) - - found, err := privateAPI.GetRules(context.Background(), config2.ID) - assert.NoError(t, err, "error getting configs") - assert.Equal(t, map[string]configs.VersionedRulesConfig{ - userID3: config3, - }, found) -} - -// postAlertmanagerConfig posts an alertmanager config to the alertmanager configs API. -func postAlertmanagerConfig(t *testing.T, userID, configFile string) { - config := configs.Config{ - AlertmanagerConfig: configFile, - RulesConfig: configs.RulesConfig{}, - } - b, err := json.Marshal(config) - require.NoError(t, err) - reader := bytes.NewReader(b) - configsAPI := api.New(database) - w := requestAsUser(t, configsAPI, userID, "POST", "/api/prom/configs/alertmanager", reader) - require.Equal(t, http.StatusNoContent, w.Code) -} - -// getAlertmanagerConfig posts an alertmanager config to the alertmanager configs API. -func getAlertmanagerConfig(t *testing.T, userID string) string { - w := requestAsUser(t, api.New(database), userID, "GET", "/api/prom/configs/alertmanager", nil) - var x configs.View - b := w.Body.Bytes() - err := json.Unmarshal(b, &x) - require.NoError(t, err, "Could not unmarshal JSON: %v", string(b)) - return x.Config.AlertmanagerConfig -} - -// If a user has only got alertmanager config set, then we learn nothing about them via GetConfigs. -func Test_AlertmanagerConfig_NotInAllConfigs(t *testing.T) { - setup(t) - defer cleanup(t) - - config := makeString(` - # Config no. %d. - route: - receiver: noop - - receivers: - - name: noop`) - postAlertmanagerConfig(t, makeUserID(), config) - - found, err := privateAPI.GetRules(context.Background(), 0) - assert.NoError(t, err, "error getting configs") - assert.Equal(t, map[string]configs.VersionedRulesConfig{}, found) -} - -// Setting a ruler config doesn't change alertmanager config. -func Test_AlertmanagerConfig_RulerConfigDoesntChangeIt(t *testing.T) { - setup(t) - defer cleanup(t) - - userID := makeUserID() - alertmanagerConfig := makeString(` - # Config no. %d. - route: - receiver: noop - - receivers: - - name: noop`) - postAlertmanagerConfig(t, userID, alertmanagerConfig) - - rulerConfig := makeRulerConfig(configs.RuleFormatV2) - post(t, userID, configs.RulesConfig{}, rulerConfig) - - newAlertmanagerConfig := getAlertmanagerConfig(t, userID) - assert.Equal(t, alertmanagerConfig, newAlertmanagerConfig) -} diff --git a/pkg/ruler/group.go b/pkg/ruler/group.go index a9a9d34678e..be56338f35d 100644 --- a/pkg/ruler/group.go +++ b/pkg/ruler/group.go @@ -7,24 +7,24 @@ import ( "github.com/prometheus/prometheus/rules" ) -// group is a wrapper around a prometheus rules.Group, with a mutable appendable +// wrappedGroup is a wrapper around a prometheus rules.Group, with a mutable appendable // appendable stored here will be the same appendable as in promGroup.opts.Appendable -type group struct { +type wrappedGroup struct { promGroup *rules.Group appendable *appendableAppender } -func newGroup(name string, rls []rules.Rule, appendable *appendableAppender, opts *rules.ManagerOptions) *group { +func newGroup(name string, rls []rules.Rule, appendable *appendableAppender, opts *rules.ManagerOptions) *wrappedGroup { delay := 0 * time.Second // Unused, so 0 value is fine. promGroup := rules.NewGroup(name, "none", delay, rls, false, opts) - return &group{promGroup, appendable} + return &wrappedGroup{promGroup, appendable} } -func (g *group) Eval(ctx context.Context, ts time.Time) { +func (g *wrappedGroup) Eval(ctx context.Context, ts time.Time) { g.appendable.ctx = ctx g.promGroup.Eval(ctx, ts) } -func (g *group) Rules() []rules.Rule { +func (g *wrappedGroup) Rules() []rules.Rule { return g.promGroup.Rules() } diff --git a/pkg/ruler/kv_poller.go b/pkg/ruler/kv_poller.go new file mode 100644 index 00000000000..d4f1958841f --- /dev/null +++ b/pkg/ruler/kv_poller.go @@ -0,0 +1,113 @@ +package ruler + +import ( + "context" + + "github.com/cortexproject/cortex/pkg/storage/rules" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/usertracker" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/prometheus/pkg/rulefmt" +) + +type trackedPoller struct { + tracker *usertracker.Tracker + store rules.RuleStore + + initialized bool +} + +func newTrackedPoller(tracker *usertracker.Tracker, store rules.RuleStore) (*trackedPoller, error) { + return &trackedPoller{ + tracker: tracker, + store: store, + + initialized: false, + }, nil +} + +func (p *trackedPoller) trackedRuleStore() *trackedRuleStore { + return &trackedRuleStore{ + tracker: p.tracker, + store: p.store, + } +} + +func (p *trackedPoller) PollRules(ctx context.Context) (map[string][]rules.RuleGroup, error) { + updatedRules := map[string][]rules.RuleGroup{} + + level.Debug(util.Logger).Log("msg", "polling for new rules") + + // First poll will return all rule groups + if !p.initialized { + level.Debug(util.Logger).Log("msg", "first poll, loading all rules") + rgs, err := p.store.ListRuleGroups(ctx, rules.RuleStoreConditions{}) + if err != nil { + return nil, err + } + for _, rg := range rgs { + if _, exists := updatedRules[rg.User()]; !exists { + updatedRules[rg.User()] = []rules.RuleGroup{rg} + } else { + updatedRules[rg.User()] = append(updatedRules[rg.User()], rg) + } + } + p.initialized = true + } else { + users := p.tracker.GetUpdatedUsers(ctx) + for _, u := range users { + level.Debug(util.Logger).Log("msg", "poll found updated user", "user", u) + rgs, err := p.store.ListRuleGroups(ctx, rules.RuleStoreConditions{ + UserID: u, + }) + if err != nil { + return nil, err + } + + updatedRules[u] = rgs + } + } + + return updatedRules, nil +} + +func (p *trackedPoller) Stop() { + p.tracker.Stop() +} + +type trackedRuleStore struct { + tracker *usertracker.Tracker + store rules.RuleStore +} + +// ListRuleGroups returns set of all rule groups matching the provided conditions +func (w *trackedRuleStore) ListRuleGroups(ctx context.Context, options rules.RuleStoreConditions) (rules.RuleGroupList, error) { + return w.store.ListRuleGroups(ctx, options) +} + +// GetRuleGroup retrieves the specified rule group from the backend store +func (w *trackedRuleStore) GetRuleGroup(ctx context.Context, userID, namespace, group string) (rules.RuleGroup, error) { + return w.store.GetRuleGroup(ctx, userID, namespace, group) +} + +// SetRuleGroup updates a rule group in the backend persistent store, then it pushes a change update to the +// userID key entry in the KV store +func (w *trackedRuleStore) SetRuleGroup(ctx context.Context, userID, namespace string, group rulefmt.RuleGroup) error { + err := w.store.SetRuleGroup(ctx, userID, namespace, group) + if err != nil { + return err + } + + return w.tracker.UpdateUser(ctx, userID) +} + +// DeleteRuleGroup deletes a rule group in the backend persistent store, then it pushes a change update to the +// userID key entry in the KV store +func (w *trackedRuleStore) DeleteRuleGroup(ctx context.Context, userID, namespace string, group string) error { + err := w.store.DeleteRuleGroup(ctx, userID, namespace, group) + if err != nil { + return err + } + + return w.tracker.UpdateUser(ctx, userID) +} diff --git a/pkg/ruler/lifecycle.go b/pkg/ruler/lifecycle.go index 0fc7020a566..49dfb0034d8 100644 --- a/pkg/ruler/lifecycle.go +++ b/pkg/ruler/lifecycle.go @@ -8,10 +8,6 @@ import ( "github.com/go-kit/kit/log/level" ) -const ( - pendingSearchIterations = 10 -) - // TransferOut is a noop for the ruler func (r *Ruler) TransferOut(ctx context.Context) error { return nil diff --git a/pkg/ruler/lifecycle_test.go b/pkg/ruler/lifecycle_test.go index d8240552211..272cf43e81a 100644 --- a/pkg/ruler/lifecycle_test.go +++ b/pkg/ruler/lifecycle_test.go @@ -8,8 +8,6 @@ import ( "github.com/cortexproject/cortex/pkg/util/test" ) -const userID = "1" - // TestRulerShutdown tests shutting down ruler unregisters correctly func TestRulerShutdown(t *testing.T) { config := defaultRulerConfig() diff --git a/pkg/ruler/mock_store.go b/pkg/ruler/mock_store.go new file mode 100644 index 00000000000..73c24c82bac --- /dev/null +++ b/pkg/ruler/mock_store.go @@ -0,0 +1,116 @@ +package ruler + +import ( + "context" + "strings" + "sync" + + "github.com/cortexproject/cortex/pkg/storage/rules" + "github.com/prometheus/prometheus/pkg/rulefmt" +) + +type mockRuleStore struct { + sync.Mutex + rules map[string]rules.RuleGroup + + pollPayload map[string][]rules.RuleGroup +} + +func (m *mockRuleStore) PollRules(ctx context.Context) (map[string][]rules.RuleGroup, error) { + m.Lock() + defer m.Unlock() + pollPayload := m.pollPayload + m.pollPayload = map[string][]rules.RuleGroup{} + return pollPayload, nil +} + +func (m *mockRuleStore) Stop() {} + +// RuleStore returns an RuleStore from the client +func (m *mockRuleStore) RuleStore() rules.RuleStore { + return m +} + +func (m *mockRuleStore) ListRuleGroups(ctx context.Context, options rules.RuleStoreConditions) (rules.RuleGroupList, error) { + m.Lock() + defer m.Unlock() + + groupPrefix := options.UserID + ":" + + namespaces := []string{} + nss := rules.RuleGroupList{} + for n := range m.rules { + if strings.HasPrefix(n, groupPrefix) { + components := strings.Split(n, ":") + if len(components) != 3 { + continue + } + namespaces = append(namespaces, components[1]) + } + } + + if len(namespaces) == 0 { + return nss, rules.ErrUserNotFound + } + + for _, n := range namespaces { + ns, err := m.getRuleNamespace(ctx, options.UserID, n) + if err != nil { + continue + } + + nss = append(nss, ns...) + } + + return nss, nil +} + +func (m *mockRuleStore) getRuleNamespace(ctx context.Context, userID string, namespace string) (rules.RuleGroupList, error) { + groupPrefix := userID + ":" + namespace + ":" + + ns := rules.RuleGroupList{} + for n, g := range m.rules { + if strings.HasPrefix(n, groupPrefix) { + ns = append(ns, g) + } + } + + if len(ns) == 0 { + return ns, rules.ErrGroupNamespaceNotFound + } + + return ns, nil +} + +func (m *mockRuleStore) GetRuleGroup(ctx context.Context, userID string, namespace string, group string) (rules.RuleGroup, error) { + m.Lock() + defer m.Unlock() + + groupID := userID + ":" + namespace + ":" + group + g, ok := m.rules[groupID] + + if !ok { + return nil, rules.ErrGroupNotFound + } + + return g, nil + +} + +func (m *mockRuleStore) SetRuleGroup(ctx context.Context, userID string, namespace string, group rulefmt.RuleGroup) error { + m.Lock() + defer m.Unlock() + + groupID := userID + ":" + namespace + ":" + group.Name + m.rules[groupID] = rules.NewRuleGroup(group.Name, namespace, userID, group.Rules) + return nil +} + +func (m *mockRuleStore) DeleteRuleGroup(ctx context.Context, userID string, namespace string, group string) error { + m.Lock() + defer m.Unlock() + + groupID := userID + ":" + namespace + ":" + group + delete(m.rules, groupID) + return nil +} diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 790d0a12f00..87335f5a2a1 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -10,21 +10,21 @@ import ( "time" "github.com/go-kit/kit/log/level" - opentracing "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/rules" - "github.com/prometheus/prometheus/storage" + promRules "github.com/prometheus/prometheus/rules" + promStorage "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/strutil" "golang.org/x/net/context" "golang.org/x/net/context/ctxhttp" - "github.com/cortexproject/cortex/pkg/configs/client" "github.com/cortexproject/cortex/pkg/distributor" "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/storage/rules" + store "github.com/cortexproject/cortex/pkg/storage/rules" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/weaveworks/common/instrument" @@ -36,24 +36,17 @@ var ( Namespace: "cortex", Name: "group_evaluation_duration_seconds", Help: "The duration for a rule group to execute.", - Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 25}, - }) - rulesProcessed = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "rules_processed_total", - Help: "How many rules have been processed.", + Buckets: []float64{.5, 1, 2.5, 5, 10, 25, 60, 120}, }) ringCheckErrors = promauto.NewCounter(prometheus.CounterOpts{ Namespace: "cortex", Name: "ruler_ring_check_errors_total", Help: "Number of errors that have occurred when checking the ring for ownership", }) - ruleMetrics *rules.Metrics ) func init() { evalDuration.Register() - ruleMetrics = rules.NewGroupMetrics(prometheus.DefaultRegisterer) } // Config is the configuration for the recording rules server. @@ -85,11 +78,14 @@ type Config struct { SearchPendingFor time.Duration LifecyclerConfig ring.LifecyclerConfig FlushCheckPeriod time.Duration + + StoreConfig RuleStoreConfig } // RegisterFlags adds the flags required to config this to the given FlagSet func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.LifecyclerConfig.RegisterFlagsWithPrefix("ruler.", f) + cfg.StoreConfig.RegisterFlags(f) cfg.ExternalURL.URL, _ = url.Parse("") // Must be non-nil f.Var(&cfg.ExternalURL, "ruler.external.url", "URL of alerts return path.") @@ -113,7 +109,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { type Ruler struct { cfg Config engine *promql.Engine - queryable storage.Queryable + queryable promStorage.Queryable pusher Pusher alertURL *url.URL notifierCfg *config.Config @@ -124,13 +120,19 @@ type Ruler struct { lifecycler *ring.Lifecycler ring *ring.Ring + store rules.RuleStore + // Per-user notifiers with separate queues. notifiersMtx sync.Mutex notifiers map[string]*rulerNotifier + + // Per-user rules metrics + userMetricsMtx sync.Mutex + userMetrics map[string]*promRules.Metrics } // NewRuler creates a new ruler from a distributor and chunk store. -func NewRuler(cfg Config, engine *promql.Engine, queryable storage.Queryable, d *distributor.Distributor, rulesAPI client.Client) (*Ruler, error) { +func NewRuler(cfg Config, engine *promql.Engine, queryable promStorage.Queryable, d *distributor.Distributor) (*Ruler, error) { if cfg.NumWorkers <= 0 { return nil, fmt.Errorf("must have at least 1 worker, got %d", cfg.NumWorkers) } @@ -140,6 +142,11 @@ func NewRuler(cfg Config, engine *promql.Engine, queryable storage.Queryable, d return nil, err } + rulePoller, ruleStore, err := NewRuleStorage(cfg.StoreConfig) + if err != nil { + return nil, err + } + ruler := &Ruler{ cfg: cfg, engine: engine, @@ -149,9 +156,11 @@ func NewRuler(cfg Config, engine *promql.Engine, queryable storage.Queryable, d notifierCfg: ncfg, notifiers: map[string]*rulerNotifier{}, workerWG: &sync.WaitGroup{}, + userMetrics: map[string]*promRules.Metrics{}, + store: ruleStore, } - ruler.scheduler = newScheduler(rulesAPI, cfg.EvaluationInterval, cfg.EvaluationInterval, ruler.newGroup) + ruler.scheduler = newScheduler(rulePoller, cfg.EvaluationInterval, cfg.EvaluationInterval, ruler.newGroup) // If sharding is enabled, create/join a ring to distribute tokens to // the ruler @@ -208,35 +217,53 @@ func (r *Ruler) Stop() { } } -func (r *Ruler) newGroup(userID string, groupName string, rls []rules.Rule) (*group, error) { +func (r *Ruler) newGroup(ctx context.Context, g store.RuleGroup) (*wrappedGroup, error) { + user := g.User() appendable := &appendableAppender{pusher: r.pusher} - notifier, err := r.getOrCreateNotifier(userID) + notifier, err := r.getOrCreateNotifier(user) + if err != nil { + return nil, err + } + + rls, err := g.Rules(ctx) if err != nil { return nil, err } - opts := &rules.ManagerOptions{ + + // Get the rule group metrics for set user or create it if it does not exist + r.userMetricsMtx.Lock() + metrics, exists := r.userMetrics[user] + if !exists { + // Wrap the default register with the users ID and pass + reg := prometheus.WrapRegistererWith(prometheus.Labels{"user": user}, prometheus.DefaultRegisterer) + metrics = promRules.NewGroupMetrics(reg) + r.userMetrics[user] = metrics + } + r.userMetricsMtx.Unlock() + + opts := &promRules.ManagerOptions{ Appendable: appendable, - QueryFunc: rules.EngineQueryFunc(r.engine, r.queryable), + QueryFunc: promRules.EngineQueryFunc(r.engine, r.queryable), Context: context.Background(), ExternalURL: r.alertURL, NotifyFunc: sendAlerts(notifier, r.alertURL.String()), Logger: util.Logger, - Metrics: ruleMetrics, + Metrics: metrics, } - return newGroup(groupName, rls, appendable, opts), nil + return newGroup(g.ID(), rls, appendable, opts), nil } -// sendAlerts implements a rules.NotifyFunc for a Notifier. +// sendAlerts implements a promRules.NotifyFunc for a Notifier. // It filters any non-firing alerts from the input. // // Copied from Prometheus's main.go. -func sendAlerts(n *notifier.Manager, externalURL string) rules.NotifyFunc { - return func(ctx native_ctx.Context, expr string, alerts ...*rules.Alert) { +func sendAlerts(n *notifier.Manager, externalURL string) promRules.NotifyFunc { + return func(ctx native_ctx.Context, expr string, alerts ...*promRules.Alert) { var res []*notifier.Alert for _, alert := range alerts { // Only send actually firing alerts. - if alert.State == rules.StatePending { + if alert.State == promRules.StatePending { continue } a := ¬ifier.Alert{ @@ -292,33 +319,6 @@ func (r *Ruler) getOrCreateNotifier(userID string) (*notifier.Manager, error) { return n.notifier, nil } -// Evaluate a list of rules in the given context. -func (r *Ruler) Evaluate(userID string, item *workItem) { - ctx := user.InjectOrgID(context.Background(), userID) - logger := util.WithContext(ctx, util.Logger) - if r.cfg.EnableSharding && !r.ownsRule(item.hash) { - level.Debug(util.Logger).Log("msg", "ruler: skipping evaluation, not owned", "user_id", item.userID, "group", item.groupName) - return - } - level.Debug(logger).Log("msg", "evaluating rules...", "num_rules", len(item.group.Rules())) - ctx, cancelTimeout := context.WithTimeout(ctx, r.cfg.GroupTimeout) - instrument.CollectedRequest(ctx, "Evaluate", evalDuration, nil, func(ctx native_ctx.Context) error { - if span := opentracing.SpanFromContext(ctx); span != nil { - span.SetTag("instance", userID) - span.SetTag("groupName", item.groupName) - } - item.group.Eval(ctx, time.Now()) - return nil - }) - if err := ctx.Err(); err == nil { - cancelTimeout() // release resources - } else { - level.Warn(logger).Log("msg", "context error", "error", err) - } - - rulesProcessed.Add(float64(len(item.group.Rules()))) -} - func (r *Ruler) ownsRule(hash uint32) bool { rlrs, err := r.ring.Get(hash, ring.Read) // If an error occurs evaluate a rule as if it is owned @@ -330,9 +330,10 @@ func (r *Ruler) ownsRule(hash uint32) bool { return true } if rlrs.Ingesters[0].Addr == r.lifecycler.Addr { + level.Debug(util.Logger).Log("msg", "rule group owned", "owner_addr", rlrs.Ingesters[0].Addr, "addr", r.lifecycler.Addr) return true } - level.Debug(util.Logger).Log("msg", "rule group not owned, address does not match", "owner", rlrs.Ingesters[0].Addr, "current", r.cfg.LifecyclerConfig.Addr) + level.Debug(util.Logger).Log("msg", "rule group not owned, address does not match", "owner_addr", rlrs.Ingesters[0].Addr, "addr", r.lifecycler.Addr) return false } @@ -353,6 +354,9 @@ func (r *Ruler) ServeHTTP(w http.ResponseWriter, req *http.Request) { ` w.WriteHeader(http.StatusOK) - w.Write([]byte(unshardedPage)) + _, err := w.Write([]byte(unshardedPage)) + if err != nil { + level.Error(util.Logger).Log("msg", "unable to serve status page", "err", err) + } } } diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index 51725da5c7b..c125a85beb1 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -1,42 +1,34 @@ package ruler import ( - "context" "net/http" "net/http/httptest" "sync" "testing" "time" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/promql" + "github.com/stretchr/testify/require" - "github.com/cortexproject/cortex/pkg/configs" - client_config "github.com/cortexproject/cortex/pkg/configs/client" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/ring/kv/consul" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/prometheus/prometheus/notifier" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" "github.com/stretchr/testify/assert" "github.com/weaveworks/common/user" ) -type mockRuleStore struct{} - -func (m *mockRuleStore) GetRules(ctx context.Context, since configs.ID) (map[string]configs.VersionedRulesConfig, error) { - return map[string]configs.VersionedRulesConfig{}, nil -} - -func (m *mockRuleStore) GetAlerts(ctx context.Context, since configs.ID) (*client_config.ConfigsResponse, error) { - return nil, nil -} - func defaultRulerConfig() Config { codec := codec.Proto{Factory: ring.ProtoDescFactory} consul := consul.NewInMemoryClient(codec) - cfg := Config{} + cfg := Config{ + StoreConfig: RuleStoreConfig{ + mock: &mockRuleStore{}, + }, + } flagext.DefaultValues(&cfg) flagext.DefaultValues(&cfg.LifecyclerConfig) cfg.LifecyclerConfig.RingConfig.ReplicationFactor = 1 @@ -59,7 +51,8 @@ func newTestRuler(t *testing.T, cfg Config) *Ruler { Timeout: 2 * time.Minute, }) queryable := querier.NewQueryable(nil, nil, nil, 0) - ruler, err := NewRuler(cfg, engine, queryable, nil, &mockRuleStore{}) + ruler, err := NewRuler(cfg, engine, queryable, nil) + ruler.store = &mockRuleStore{} if err != nil { t.Fatal(err) } @@ -78,7 +71,8 @@ func TestNotifierSendsUserIDHeader(t *testing.T) { defer ts.Close() cfg := defaultRulerConfig() - cfg.AlertmanagerURL.Set(ts.URL) + err := cfg.AlertmanagerURL.Set(ts.URL) + require.NoError(t, err) cfg.AlertmanagerDiscovery = false r := newTestRuler(t, cfg) diff --git a/pkg/ruler/scheduler.go b/pkg/ruler/scheduler.go index 3a9ff8ec704..8466a8d8e1f 100644 --- a/pkg/ruler/scheduler.go +++ b/pkg/ruler/scheduler.go @@ -9,24 +9,14 @@ import ( "sync" "time" + "github.com/cortexproject/cortex/pkg/storage/rules" + "github.com/cortexproject/cortex/pkg/util" "github.com/go-kit/kit/log/level" "github.com/jonboulle/clockwork" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/rules" - - "github.com/cortexproject/cortex/pkg/configs" - config_client "github.com/cortexproject/cortex/pkg/configs/client" - "github.com/cortexproject/cortex/pkg/util" ) -var backoffConfig = util.BackoffConfig{ - // Backoff for loading initial configuration set. - MinBackoff: 100 * time.Millisecond, - MaxBackoff: 2 * time.Second, -} - const ( timeLogFormat = "2006-01-02T15:04:05" ) @@ -35,32 +25,39 @@ var ( totalConfigs = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: "cortex", Name: "scheduler_configs_total", - Help: "How many configs the scheduler knows about.", + Help: "How many user configs the scheduler knows about.", }) totalRuleGroups = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: "cortex", Name: "scheduler_groups_total", Help: "How many rule groups the scheduler is currently evaluating", }) - configUpdates = promauto.NewCounter(prometheus.CounterOpts{ + evalLatency = promauto.NewHistogram(prometheus.HistogramOpts{ Namespace: "cortex", - Name: "scheduler_config_updates_total", - Help: "How many config updates the scheduler has made.", + Name: "group_evaluation_latency_seconds", + Help: "How far behind the target time each rule group executed.", + Buckets: []float64{.025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60}, }) + iterationsMissed = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "rule_group_iterations_missed_total", + Help: "The total number of rule group evaluations missed due to slow rule group evaluation.", + }, []string{"user"}) ) type workItem struct { - userID string - groupName string - hash uint32 - group *group - scheduled time.Time - generation configs.ID // a monotonically increasing number used to spot out of date work items + userID string + groupID string + hash uint32 + group *wrappedGroup + scheduled time.Time + + done chan struct{} } // Key implements ScheduledItem func (w workItem) Key() string { - return w.userID + ":" + w.groupName + return w.userID + ":" + w.groupID } // Scheduled implements ScheduledItem @@ -68,40 +65,35 @@ func (w workItem) Scheduled() time.Time { return w.scheduled } -// Defer returns a work item with updated rules, rescheduled to a later time. -func (w workItem) Defer(interval time.Duration) workItem { - return workItem{w.userID, w.groupName, w.hash, w.group, w.scheduled.Add(interval), w.generation} -} - func (w workItem) String() string { - return fmt.Sprintf("%s:%s:%d@%s", w.userID, w.groupName, len(w.group.Rules()), w.scheduled.Format(timeLogFormat)) + return fmt.Sprintf("%s:%s:%d@%s", w.userID, w.groupID, len(w.group.Rules()), w.scheduled.Format(timeLogFormat)) } type userConfig struct { - rules map[string][]rules.Rule - generation configs.ID // a monotonically increasing number used to spot out of date work items + done chan struct{} + id string + rules []rules.RuleGroup } -type groupFactory func(userID string, groupName string, rls []rules.Rule) (*group, error) +type groupFactory func(context.Context, rules.RuleGroup) (*wrappedGroup, error) type scheduler struct { - ruleStore config_client.Client + poller rules.RulePoller evaluationInterval time.Duration // how often we re-evaluate each rule set q *SchedulingQueue pollInterval time.Duration // how often we check for new config - cfgs map[string]userConfig // all rules for all users - latestConfig configs.ID // # of last update received from config - groupFn groupFactory // function to create a new group + cfgs map[string]userConfig // all rules for all users + groupFn groupFactory // function to create a new group sync.RWMutex done chan struct{} } // newScheduler makes a new scheduler. -func newScheduler(ruleStore config_client.Client, evaluationInterval, pollInterval time.Duration, groupFn groupFactory) *scheduler { +func newScheduler(poller rules.RulePoller, evaluationInterval, pollInterval time.Duration, groupFn groupFactory) *scheduler { return &scheduler{ - ruleStore: ruleStore, + poller: poller, evaluationInterval: evaluationInterval, pollInterval: pollInterval, q: NewSchedulingQueue(clockwork.NewRealClock()), @@ -116,13 +108,16 @@ func newScheduler(ruleStore config_client.Client, evaluationInterval, pollInterv func (s *scheduler) Run() { level.Debug(util.Logger).Log("msg", "scheduler started") - // Load initial set of all configurations before polling for new ones. - s.addNewConfigs(time.Now(), s.loadAllConfigs()) + err := s.updateConfigs(context.TODO()) + if err != nil { + level.Error(util.Logger).Log("msg", "scheduler: error updating rule groups", "err", err) + } + ticker := time.NewTicker(s.pollInterval) for { select { - case now := <-ticker.C: - err := s.updateConfigs(now) + case <-ticker.C: + err := s.updateConfigs(context.TODO()) if err != nil { level.Warn(util.Logger).Log("msg", "scheduler: error updating configs", "err", err) } @@ -135,72 +130,97 @@ func (s *scheduler) Run() { } func (s *scheduler) Stop() { + s.poller.Stop() close(s.done) s.q.Close() level.Debug(util.Logger).Log("msg", "scheduler stopped") } -// Load the full set of configurations from the server, retrying with backoff -// until we can get them. -func (s *scheduler) loadAllConfigs() map[string]configs.VersionedRulesConfig { - backoff := util.NewBackoff(context.Background(), backoffConfig) - for { - cfgs, err := s.poll() - if err == nil { - level.Debug(util.Logger).Log("msg", "scheduler: initial configuration load", "num_configs", len(cfgs)) - return cfgs - } - level.Warn(util.Logger).Log("msg", "scheduler: error fetching all configurations, backing off", "err", err) - backoff.Wait() - } -} - -func (s *scheduler) updateConfigs(now time.Time) error { - cfgs, err := s.poll() +func (s *scheduler) updateConfigs(ctx context.Context) error { + cfgs, err := s.poller.PollRules(ctx) if err != nil { return err } - s.addNewConfigs(now, cfgs) + + for user, cfg := range cfgs { + s.addUserConfig(ctx, user, cfg) + } + + totalConfigs.Set(float64(len(s.cfgs))) return nil } -// poll the configuration server. Not re-entrant. -func (s *scheduler) poll() (map[string]configs.VersionedRulesConfig, error) { - s.Lock() - configID := s.latestConfig - s.Unlock() +func (s *scheduler) addUserConfig(ctx context.Context, userID string, rgs []rules.RuleGroup) { + level.Info(util.Logger).Log("msg", "scheduler: updating rules for user", "user_id", userID, "num_groups", len(rgs)) - cfgs, err := s.ruleStore.GetRules(context.Background(), configID) // Warning: this will produce an incorrect result if the configID ever overflows - if err != nil { - level.Warn(util.Logger).Log("msg", "scheduler: configs server poll failed", "err", err) - return nil, err + // create a new userchan for rulegroups of this user + userChan := make(chan struct{}) + + ringHasher := fnv.New32a() + workItems := []workItem{} + evalTime := s.determineEvalTime(userID) + + for _, rg := range rgs { + level.Debug(util.Logger).Log("msg", "scheduler: updating rules for user and group", "user_id", userID, "group", rg.ID()) + grp, err := s.groupFn(ctx, rg) + if err != nil { + level.Error(util.Logger).Log("msg", "scheduler: failed to create group for user", "user_id", userID, "group", rg.ID(), "err", err) + return + } + + ringHasher.Reset() + _, err = ringHasher.Write([]byte(rg.ID())) + if err != nil { + level.Error(util.Logger).Log("msg", "scheduler: failed to create group for user", "user_id", userID, "group", rg.ID(), "err", err) + return + } + + hash := ringHasher.Sum32() + workItems = append(workItems, workItem{userID, rg.ID(), hash, grp, evalTime, userChan}) + } + + s.updateUserConfig(ctx, userConfig{ + id: userID, + rules: rgs, + done: userChan, + }) + + for _, i := range workItems { + s.addWorkItem(i) } + + totalRuleGroups.Add(float64(len(workItems))) +} + +func (s *scheduler) updateUserConfig(ctx context.Context, cfg userConfig) { + // Retrieve any previous configuration and update to the new configuration s.Lock() - s.latestConfig = getLatestConfigID(cfgs, configID) + curr, exists := s.cfgs[cfg.id] + s.cfgs[cfg.id] = cfg s.Unlock() - return cfgs, nil -} -// getLatestConfigID gets the latest configs ID. -// max [latest, max (map getID cfgs)] -func getLatestConfigID(cfgs map[string]configs.VersionedRulesConfig, latest configs.ID) configs.ID { - ret := latest - for _, config := range cfgs { - if config.ID > ret { - ret = config.ID - } + if exists { + close(curr.done) // If a previous configuration exists, ensure it is closed } - return ret +} + +func (s *scheduler) determineEvalTime(userID string) time.Time { + now := time.Now() + hasher := fnv.New64a() + return computeNextEvalTime(hasher, now, float64(s.evaluationInterval.Nanoseconds()), userID) } // computeNextEvalTime Computes when a user's rules should be next evaluated, based on how far we are through an evaluation cycle -func (s *scheduler) computeNextEvalTime(hasher hash.Hash64, now time.Time, userID string) time.Time { - intervalNanos := float64(s.evaluationInterval.Nanoseconds()) +func computeNextEvalTime(hasher hash.Hash64, now time.Time, intervalNanos float64, userID string) time.Time { // Compute how far we are into the current evaluation cycle currentEvalCyclePoint := math.Mod(float64(now.UnixNano()), intervalNanos) hasher.Reset() - hasher.Write([]byte(userID)) + _, err := hasher.Write([]byte(userID)) + if err != nil { + // if an error occurs just return the current time plus a minute + return now.Add(time.Minute) + } offset := math.Mod( // We subtract our current point in the cycle to cause the entries // before 'now' to wrap around to the end. @@ -210,78 +230,13 @@ func (s *scheduler) computeNextEvalTime(hasher hash.Hash64, now time.Time, userI return now.Add(time.Duration(int64(offset))) } -func (s *scheduler) addNewConfigs(now time.Time, cfgs map[string]configs.VersionedRulesConfig) { - // TODO: instrument how many configs we have, both valid & invalid. - level.Debug(util.Logger).Log("msg", "adding configurations", "num_configs", len(cfgs)) - hasher := fnv.New64a() - s.Lock() - generation := s.latestConfig - s.Unlock() - - for userID, config := range cfgs { - s.addUserConfig(now, hasher, generation, userID, config) - } - - configUpdates.Add(float64(len(cfgs))) - s.Lock() - lenCfgs := len(s.cfgs) - s.Unlock() - totalConfigs.Set(float64(lenCfgs)) -} - -func (s *scheduler) addUserConfig(now time.Time, hasher hash.Hash64, generation configs.ID, userID string, config configs.VersionedRulesConfig) { - rulesByGroup, err := config.Config.Parse() - if err != nil { - // XXX: This means that if a user has a working configuration and - // they submit a broken one, we'll keep processing the last known - // working configuration, and they'll never know. - // TODO: Provide a way of deleting / cancelling recording rules. - level.Warn(util.Logger).Log("msg", "scheduler: invalid Cortex configuration", "user_id", userID, "err", err) - return - } - - level.Info(util.Logger).Log("msg", "scheduler: updating rules for user", "user_id", userID, "num_groups", len(rulesByGroup), "is_deleted", config.IsDeleted()) - s.Lock() - // if deleted remove from map, otherwise - update map - if config.IsDeleted() { - delete(s.cfgs, userID) - s.Unlock() - return - } - s.cfgs[userID] = userConfig{rules: rulesByGroup, generation: generation} - s.Unlock() - - ringHasher := fnv.New32a() - evalTime := s.computeNextEvalTime(hasher, now, userID) - workItems := []workItem{} - for group, rules := range rulesByGroup { - level.Debug(util.Logger).Log("msg", "scheduler: updating rules for user and group", "user_id", userID, "group", group, "num_rules", len(rules)) - g, err := s.groupFn(userID, group, rules) - if err != nil { - // XXX: similarly to above if a user has a working configuration and - // for some reason we cannot create a group for the new one we'll use - // the last known working configuration - level.Warn(util.Logger).Log("msg", "scheduler: failed to create group for user", "user_id", userID, "group", group, "err", err) - return - } - ringHasher.Reset() - ringHasher.Write([]byte(userID + ":" + group)) - hash := ringHasher.Sum32() - workItems = append(workItems, workItem{userID, group, hash, g, evalTime, generation}) - } - for _, i := range workItems { - totalRuleGroups.Inc() - s.addWorkItem(i) - } -} - func (s *scheduler) addWorkItem(i workItem) { select { case <-s.done: level.Debug(util.Logger).Log("msg", "scheduler: work item not added, scheduler stoped", "item", i) return default: - // The queue is keyed by userID+groupName, so items for existing userID+groupName will be replaced. + // The queue is keyed by userID+groupID, so items for existing userID+groupID will be replaced. s.q.Enqueue(i) level.Debug(util.Logger).Log("msg", "scheduler: work item added", "item", i) } @@ -302,26 +257,34 @@ func (s *scheduler) nextWorkItem() *workItem { } item := op.(workItem) level.Debug(util.Logger).Log("msg", "scheduler: work item granted", "item", item) + + // Record the latency of the items evaluation here + latency := time.Since(item.scheduled) + evalLatency.Observe(latency.Seconds()) + level.Debug(util.Logger).Log("msg", "sheduler: returning item", "item", item, "latency", latency.String()) + return &item } // workItemDone marks the given item as being ready to be rescheduled. func (s *scheduler) workItemDone(i workItem) { - s.Lock() - config, found := s.cfgs[i.userID] - var currentRules []rules.Rule - if found { - currentRules = config.rules[i.groupName] - } - s.Unlock() - if !found || len(currentRules) == 0 || i.generation < config.generation { - // Warning: this test will produce an incorrect result if the generation ever overflows - level.Debug(util.Logger).Log("msg", "scheduler: stopping item", "user_id", i.userID, "group", i.groupName, "found", found, "len", len(currentRules)) - totalRuleGroups.Dec() + select { + case <-i.done: + // Unschedule the work item + level.Debug(util.Logger).Log("msg", "scheduler: work item dropped", "item", i) return - } + default: + // If the evaluation of the item took longer than it's evaluation interval, skip to the next valid interval + // and record any evaluation misses. This must be differentiated from lateness due to scheduling which is + // caused by the overall workload, not the result of latency within a single rule group. + missed := (time.Since(i.scheduled) / s.evaluationInterval) - 1 + if missed > 0 { + level.Warn(util.Logger).Log("msg", "scheduler: work item missed evaluation", "item", i, "late_by", missed.String) + iterationsMissed.WithLabelValues(i.userID).Add(float64(missed)) + } - next := i.Defer(s.evaluationInterval) - level.Debug(util.Logger).Log("msg", "scheduler: work item rescheduled", "item", i, "time", next.scheduled.Format(timeLogFormat)) - s.addWorkItem(next) + i.scheduled = i.scheduled.Add((missed + 1) * s.evaluationInterval) + level.Debug(util.Logger).Log("msg", "scheduler: work item rescheduled", "item", i, "time", i.scheduled.Format(timeLogFormat)) + s.addWorkItem(i) + } } diff --git a/pkg/ruler/scheduler_test.go b/pkg/ruler/scheduler_test.go index 1a2a48fa0d4..b3dae8de0fe 100644 --- a/pkg/ruler/scheduler_test.go +++ b/pkg/ruler/scheduler_test.go @@ -1,18 +1,18 @@ package ruler import ( + "context" "strconv" "testing" "time" + "github.com/cortexproject/cortex/pkg/storage/rules" + "github.com/prometheus/prometheus/pkg/rulefmt" "github.com/stretchr/testify/assert" - - "github.com/prometheus/prometheus/rules" ) type fakeHasher struct { - something uint32 - data *[]byte + data *[]byte } func (h *fakeHasher) Write(data []byte) (int, error) { @@ -44,7 +44,7 @@ func TestSchedulerComputeNextEvalTime(t *testing.T) { // We use the fake hasher to give us control over the hash output // so that we can test the wrap-around behaviour of the modulo fakeUserID := strconv.FormatInt(hashResult, 10) - return s.computeNextEvalTime(&h, time.Unix(0, now), fakeUserID).UnixNano() + return computeNextEvalTime(&h, time.Unix(0, now), 15, fakeUserID).UnixNano() } { cycleStartTime := int64(30) @@ -71,27 +71,43 @@ func TestSchedulerComputeNextEvalTime(t *testing.T) { func TestSchedulerRulesOverlap(t *testing.T) { s := newScheduler(nil, 15, 15, nil) userID := "bob" - groupName := "test" + groupOne := "test1" + groupTwo := "test2" next := time.Now() - ruleSet := []rules.Rule{ - nil, + ruleSetsOne := []rules.RuleGroup{ + rules.NewRuleGroup(groupOne, "default", userID, []rulefmt.Rule{}), } - ruleSets := map[string][]rules.Rule{} - ruleSets[groupName] = ruleSet - cfg := userConfig{generation: 1, rules: ruleSets} - s.cfgs[userID] = cfg - w1 := workItem{userID: userID, groupName: groupName, scheduled: next, generation: cfg.generation} - s.workItemDone(w1) + ruleSetsTwo := []rules.RuleGroup{ + rules.NewRuleGroup(groupTwo, "default", userID, []rulefmt.Rule{}), + } + userChanOne := make(chan struct{}) + userChanTwo := make(chan struct{}) + + cfgOne := userConfig{rules: ruleSetsOne, done: userChanOne} + cfgTwo := userConfig{rules: ruleSetsTwo, done: userChanTwo} + + s.updateUserConfig(context.Background(), cfgOne) + w0 := workItem{userID: userID, groupID: groupOne, scheduled: next, done: userChanOne} + s.workItemDone(w0) item := s.q.Dequeue().(workItem) - assert.Equal(t, w1.generation, item.generation) + assert.Equal(t, item.groupID, groupOne) - w0 := workItem{userID: userID, groupName: groupName, scheduled: next, generation: cfg.generation - 1} - s.workItemDone(w1) + // create a new workitem for the updated ruleset + w1 := workItem{userID: userID, groupID: groupTwo, scheduled: next, done: userChanTwo} + + // Apply the new config, scheduling the previous config to be dropped + s.updateUserConfig(context.Background(), cfgTwo) + + // Reschedule the old config first, then the new config s.workItemDone(w0) + s.workItemDone(w1) + + // Ensure the old config was dropped due to the done channel being closed + // when the new user config was updated item = s.q.Dequeue().(workItem) - assert.Equal(t, w1.generation, item.generation) + assert.Equal(t, item.groupID, groupTwo) s.q.Close() assert.Equal(t, nil, s.q.Dequeue()) diff --git a/pkg/ruler/scheduling_queue.go b/pkg/ruler/scheduling_queue.go index d4c0233916d..23808d8685c 100644 --- a/pkg/ruler/scheduling_queue.go +++ b/pkg/ruler/scheduling_queue.go @@ -73,7 +73,7 @@ func (q *queueState) Enqueue(op ScheduledItem) { func (q *queueState) Dequeue() ScheduledItem { item := heap.Pop(q).(ScheduledItem) - itemEvaluationLatency.Observe(time.Now().Sub(item.Scheduled()).Seconds()) + itemEvaluationLatency.Observe(time.Since(item.Scheduled()).Seconds()) return item } diff --git a/pkg/ruler/storage.go b/pkg/ruler/storage.go new file mode 100644 index 00000000000..7d2e1e7693a --- /dev/null +++ b/pkg/ruler/storage.go @@ -0,0 +1,63 @@ +package ruler + +import ( + "context" + "flag" + "fmt" + + "github.com/cortexproject/cortex/pkg/storage/clients/configdb" + "github.com/cortexproject/cortex/pkg/storage/clients/gcp" + "github.com/cortexproject/cortex/pkg/storage/rules" + "github.com/cortexproject/cortex/pkg/util/usertracker" +) + +// RuleStoreConfig conigures a rule store +type RuleStoreConfig struct { + Type string `yaml:"type"` + ConfigDB configdb.Config + GCS gcp.GCSConfig + Tracker usertracker.Config + + mock *mockRuleStore +} + +// RegisterFlags registers flags. +func (cfg *RuleStoreConfig) RegisterFlags(f *flag.FlagSet) { + cfg.ConfigDB.RegisterFlagsWithPrefix("ruler", f) + cfg.GCS.RegisterFlagsWithPrefix("ruler.store.", f) + cfg.Tracker.RegisterFlagsWithPrefix("ruler.", f) + f.StringVar(&cfg.Type, "ruler.storage.type", "configdb", "Method to use for backend rule storage (configdb, gcs)") +} + +// NewRuleStorage returns a new rule storage backend poller and store +func NewRuleStorage(cfg RuleStoreConfig) (rules.RulePoller, rules.RuleStore, error) { + if cfg.mock != nil { + return cfg.mock, cfg.mock, nil + } + + var ( + ruleStore rules.RuleStore + err error + ) + switch cfg.Type { + case "configdb": + poller, err := configdb.New(cfg.ConfigDB) + return poller, nil, err + case "gcs": + ruleStore, err = gcp.NewGCSClient(context.Background(), cfg.GCS) + if err != nil { + return nil, nil, err + } + default: + return nil, nil, fmt.Errorf("Unrecognized rule storage mode %v, choose one of: configdb, gcs", cfg.Type) + } + + tracker, err := usertracker.NewTracker(cfg.Tracker) + if err != nil { + return nil, nil, err + } + + p, err := newTrackedPoller(tracker, ruleStore) + + return p, p.trackedRuleStore(), err +} diff --git a/pkg/ruler/worker.go b/pkg/ruler/worker.go index a56eae0f707..29be0f707d1 100644 --- a/pkg/ruler/worker.go +++ b/pkg/ruler/worker.go @@ -1,30 +1,30 @@ package ruler import ( + "context" + native_ctx "context" "time" "github.com/cortexproject/cortex/pkg/util" "github.com/go-kit/kit/log/level" + opentracing "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/weaveworks/common/instrument" + "github.com/weaveworks/common/user" ) var ( - blockedWorkers = prometheus.NewGauge(prometheus.GaugeOpts{ + blockedWorkers = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: "cortex", Name: "blocked_workers", Help: "How many workers are waiting on an item to be ready.", }) - workerIdleTime = prometheus.NewCounter(prometheus.CounterOpts{ + workerIdleTime = promauto.NewCounter(prometheus.CounterOpts{ Namespace: "cortex", Name: "worker_idle_seconds_total", Help: "How long workers have spent waiting for work.", }) - evalLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "group_evaluation_latency_seconds", - Help: "How far behind the target time each rule group executed.", - Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 25}, - }) ) // Worker does a thing until it's told to stop. @@ -47,21 +47,48 @@ func newWorker(ruler *Ruler) worker { func (w *worker) Run() { for { + // Grab next scheduled item from the queue + level.Debug(util.Logger).Log("msg", "waiting for next work item") waitStart := time.Now() + blockedWorkers.Inc() - level.Debug(util.Logger).Log("msg", "waiting for next work item") item := w.scheduler.nextWorkItem() blockedWorkers.Dec() - waitElapsed := time.Now().Sub(waitStart) + + waitElapsed := time.Since(waitStart) + workerIdleTime.Add(waitElapsed.Seconds()) + + // If no item is returned, worker is safe to terminate if item == nil { level.Debug(util.Logger).Log("msg", "queue closed and empty; terminating worker") return } - evalLatency.Observe(time.Since(item.scheduled).Seconds()) - workerIdleTime.Add(waitElapsed.Seconds()) - level.Debug(util.Logger).Log("msg", "processing item", "item", item) - w.ruler.Evaluate(item.userID, item) + + w.Evaluate(item.userID, item) w.scheduler.workItemDone(*item) level.Debug(util.Logger).Log("msg", "item handed back to queue", "item", item) } } + +// Evaluate a list of rules in the given context. +func (w *worker) Evaluate(userID string, item *workItem) { + ctx := user.InjectOrgID(context.Background(), userID) + logger := util.WithContext(ctx, util.Logger) + if w.ruler.cfg.EnableSharding && !w.ruler.ownsRule(item.hash) { + level.Debug(util.Logger).Log("msg", "ruler: skipping evaluation, not owned", "user_id", item.userID, "group", item.groupID) + return + } + level.Debug(logger).Log("msg", "evaluating rules...", "num_rules", len(item.group.Rules())) + + err := instrument.CollectedRequest(ctx, "Evaluate", evalDuration, nil, func(ctx native_ctx.Context) error { + if span := opentracing.SpanFromContext(ctx); span != nil { + span.SetTag("instance", userID) + span.SetTag("groupID", item.groupID) + } + item.group.Eval(ctx, time.Now()) + return nil + }) + if err != nil { + level.Debug(logger).Log("msg", "failed instrumented worker evaluation", "err", err) + } +} diff --git a/pkg/storage/alerts/alert_store.go b/pkg/storage/alerts/alert_store.go new file mode 100644 index 00000000000..b65043fb6a8 --- /dev/null +++ b/pkg/storage/alerts/alert_store.go @@ -0,0 +1,25 @@ +package alerts + +import ( + "context" +) + +// AlertConfig is used to configure user alert managers +type AlertConfig struct { + TemplateFiles map[string]string `json:"template_files"` + AlertmanagerConfig string `json:"alertmanager_config"` +} + +// AlertPoller polls for updated alerts +type AlertPoller interface { + PollAlerts(ctx context.Context) (map[string]AlertConfig, error) + Stop() +} + +// AlertStore stores config information and template files to configure alertmanager tenants +type AlertStore interface { + ListAlertConfigs(ctx context.Context) (map[string]AlertConfig, error) + GetAlertConfig(ctx context.Context, id string) (AlertConfig, error) + SetAlertConfig(ctx context.Context, id string, cfg AlertConfig) error + DeleteAlertConfig(ctx context.Context, id string) error +} diff --git a/pkg/storage/clients/client_test.go b/pkg/storage/clients/client_test.go new file mode 100644 index 00000000000..304267196c5 --- /dev/null +++ b/pkg/storage/clients/client_test.go @@ -0,0 +1,60 @@ +package clients + +import ( + "context" + "testing" + "time" + + "github.com/cortexproject/cortex/pkg/storage/alerts" + "github.com/cortexproject/cortex/pkg/storage/clients/gcp" + "github.com/cortexproject/cortex/pkg/storage/rules" + "github.com/cortexproject/cortex/pkg/storage/testutils" + "github.com/prometheus/prometheus/pkg/rulefmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + userID = "userID" + namespace = "default" +) + +var ( + exampleRuleGrp = rulefmt.RuleGroup{ + Name: "example_rulegroup_one", + } +) + +func TestRuleStoreBasic(t *testing.T) { + forAllFixtures(t, func(t *testing.T, _ alerts.AlertStore, client rules.RuleStore) { + const batchSize = 5 + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + err := client.SetRuleGroup(ctx, userID, namespace, exampleRuleGrp) + require.NoError(t, err) + + rg, err := client.GetRuleGroup(ctx, userID, namespace, exampleRuleGrp.Name) + require.NoError(t, err) + assert.Equal(t, exampleRuleGrp.Name, rg.Name()) + + err = client.DeleteRuleGroup(ctx, userID, namespace, exampleRuleGrp.Name) + require.NoError(t, err) + + rg, err = client.GetRuleGroup(ctx, userID, namespace, exampleRuleGrp.Name) + require.Error(t, err) + assert.Nil(t, rg) + }) +} + +func forAllFixtures(t *testing.T, clientTest func(*testing.T, alerts.AlertStore, rules.RuleStore)) { + var fixtures []testutils.Fixture + fixtures = append(fixtures, gcp.Fixtures...) + + for _, fixture := range fixtures { + a, r, err := fixture.Clients() + require.NoError(t, err) + + clientTest(t, a, r) + } +} diff --git a/pkg/storage/clients/configdb/client.go b/pkg/storage/clients/configdb/client.go new file mode 100644 index 00000000000..3bf450a7938 --- /dev/null +++ b/pkg/storage/clients/configdb/client.go @@ -0,0 +1,195 @@ +package configdb + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/cortexproject/cortex/pkg/configs" + "github.com/cortexproject/cortex/pkg/storage/alerts" + "github.com/cortexproject/cortex/pkg/storage/rules" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/go-kit/kit/log/level" +) + +// Config says where we can find the ruler configs. +type Config struct { + ConfigsAPIURL flagext.URLValue + ClientTimeout time.Duration +} + +// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.Var(&cfg.ConfigsAPIURL, prefix+".configs.url", "DEPRECATED. URL of configs API server.") + f.DurationVar(&cfg.ClientTimeout, prefix+".client-timeout", 5*time.Second, "DEPRECATED. Timeout for requests to Weave Cloud configs service.") +} + +// ConfigsClient allows retrieving recording and alerting rules from the configs server. +type ConfigsClient struct { + URL *url.URL + Timeout time.Duration + + lastPoll configs.ID +} + +// New creates a new ConfigClient. +func New(cfg Config) (*ConfigsClient, error) { + return &ConfigsClient{ + URL: cfg.ConfigsAPIURL.URL, + Timeout: cfg.ClientTimeout, + + lastPoll: 0, + }, nil +} + +// GetRules implements ConfigClient. +func (c *ConfigsClient) GetRules(ctx context.Context, since configs.ID) (map[string]configs.VersionedRulesConfig, error) { + suffix := "" + if since != 0 { + suffix = fmt.Sprintf("?since=%d", since) + } + endpoint := fmt.Sprintf("%s/private/api/prom/configs/rules%s", c.URL.String(), suffix) + response, err := doRequest(endpoint, c.Timeout, since) + if err != nil { + return nil, err + } + configs := map[string]configs.VersionedRulesConfig{} + for id, view := range response.Configs { + cfg := view.GetVersionedRulesConfig() + if cfg != nil { + configs[id] = *cfg + } + } + return configs, nil +} + +// Stop stops rthe config client +func (c *ConfigsClient) Stop() {} + +// GetAlerts implements ConfigClient. +func (c *ConfigsClient) GetAlerts(ctx context.Context, since configs.ID) (*ConfigsResponse, error) { + suffix := "" + if since != 0 { + suffix = fmt.Sprintf("?since=%d", since) + } + endpoint := fmt.Sprintf("%s/private/api/prom/configs/alertmanager%s", c.URL.String(), suffix) + return doRequest(endpoint, c.Timeout, since) +} + +func doRequest(endpoint string, timeout time.Duration, since configs.ID) (*ConfigsResponse, error) { + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return nil, err + } + + client := &http.Client{Timeout: timeout} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("Invalid response from configs server: %v", resp.StatusCode) + } + + var config ConfigsResponse + if err := json.NewDecoder(resp.Body).Decode(&config); err != nil { + level.Error(util.Logger).Log("msg", "configs: couldn't decode JSON body", "err", err) + return nil, err + } + + config.since = since + return &config, nil +} + +// ConfigsResponse is a response from server for GetConfigs. +type ConfigsResponse struct { + // The version since which these configs were changed + since configs.ID + + // Configs maps user ID to their latest configs.View. + Configs map[string]configs.View `json:"configs"` +} + +// GetLatestConfigID returns the last config ID from a set of configs. +func (c ConfigsResponse) GetLatestConfigID() configs.ID { + latest := c.since + for _, config := range c.Configs { + if config.ID > latest { + latest = config.ID + } + } + return latest +} + +// PollAlerts polls the configdb for updated alerts +func (c *ConfigsClient) PollAlerts(ctx context.Context) (map[string]alerts.AlertConfig, error) { + resp, err := c.GetAlerts(ctx, c.lastPoll) + if err != nil { + return nil, err + } + + newConfigs := map[string]alerts.AlertConfig{} + for user, c := range resp.Configs { + newConfigs[user] = alerts.AlertConfig{ + TemplateFiles: c.Config.TemplateFiles, + AlertmanagerConfig: c.Config.AlertmanagerConfig, + } + } + + c.lastPoll = resp.GetLatestConfigID() + + return newConfigs, nil +} + +// PollRules polls the configdb server and returns the updated rule groups +func (c *ConfigsClient) PollRules(ctx context.Context) (map[string][]rules.RuleGroup, error) { + resp, err := c.GetRules(ctx, c.lastPoll) + if err != nil { + return nil, err + } + + newRules := map[string][]rules.RuleGroup{} + + var highestID configs.ID + for user, cfg := range resp { + if cfg.ID > highestID { + highestID = cfg.ID + } + userRules := []rules.RuleGroup{} + if cfg.IsDeleted() { + newRules[user] = []rules.RuleGroup{} + } + rMap, err := cfg.Config.Parse() + if err != nil { + return nil, err + } + for groupSlug, r := range rMap { + name, file := decomposeGroupSlug(groupSlug) + userRules = append(userRules, rules.FormattedToRuleGroup(user, file, name, r)) + } + newRules[user] = userRules + } + + if err != nil { + return nil, err + } + + c.lastPoll = highestID + + return newRules, nil +} + +// decomposeGroupSlug breaks the group slug from Parse +// into it's group name and file name +func decomposeGroupSlug(slug string) (string, string) { + components := strings.Split(slug, ";") + return components[0], components[1] +} diff --git a/pkg/configs/client/configs_test.go b/pkg/storage/clients/configdb/client_test.go similarity index 98% rename from pkg/configs/client/configs_test.go rename to pkg/storage/clients/configdb/client_test.go index af7acf25b45..bf0d38cdbb5 100644 --- a/pkg/configs/client/configs_test.go +++ b/pkg/storage/clients/configdb/client_test.go @@ -1,4 +1,4 @@ -package client +package configdb import ( "net/http" diff --git a/pkg/storage/clients/gcp/config_client.go b/pkg/storage/clients/gcp/config_client.go new file mode 100644 index 00000000000..41524d71050 --- /dev/null +++ b/pkg/storage/clients/gcp/config_client.go @@ -0,0 +1,326 @@ +package gcp + +import ( + "context" + "encoding/json" + "errors" + "flag" + "io/ioutil" + "strings" + + "github.com/cortexproject/cortex/pkg/storage/alerts" + "github.com/cortexproject/cortex/pkg/storage/rules" + "github.com/cortexproject/cortex/pkg/util" + + gstorage "cloud.google.com/go/storage" + "github.com/go-kit/kit/log/level" + "github.com/golang/protobuf/proto" + "github.com/prometheus/prometheus/pkg/rulefmt" + "google.golang.org/api/iterator" +) + +const ( + alertPrefix = "alerts/" + rulePrefix = "rules/" +) + +var ( + errBadRuleGroup = errors.New("unable to decompose handle for rule object") +) + +// GCSConfig is config for the GCS Chunk Client. +type GCSConfig struct { + BucketName string `yaml:"bucket_name"` +} + +// RegisterFlagsWithPrefix registers flags. +func (cfg *GCSConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.BucketName, prefix+"gcs.bucketname", "", "Name of GCS bucket to put chunks in.") +} + +// GCSClient acts as a config backend. It is not safe to use concurrently when polling for rules. +// This is not an issue with the current scheduler architecture, but must be noted. +type GCSClient struct { + client *gstorage.Client + bucket *gstorage.BucketHandle +} + +// NewGCSClient makes a new chunk.ObjectClient that writes chunks to GCS. +func NewGCSClient(ctx context.Context, cfg GCSConfig) (*GCSClient, error) { + client, err := gstorage.NewClient(ctx) + if err != nil { + return nil, err + } + + return newGCSClient(cfg, client), nil +} + +// newGCSClient makes a new chunk.ObjectClient that writes chunks to GCS. +func newGCSClient(cfg GCSConfig, client *gstorage.Client) *GCSClient { + bucket := client.Bucket(cfg.BucketName) + return &GCSClient{ + client: client, + bucket: bucket, + } +} + +// ListAlertConfigs returns all of the active alert configus in this store +func (g *GCSClient) ListAlertConfigs(ctx context.Context) (map[string]alerts.AlertConfig, error) { + it := g.bucket.Objects(ctx, &gstorage.Query{ + Prefix: alertPrefix, + }) + + configs := map[string]alerts.AlertConfig{} + + for { + obj, err := it.Next() + if err == iterator.Done { + break + } + + if err != nil { + return nil, err + } + + alertConfig, err := g.getAlertConfig(ctx, obj.Name) + if err != nil { + return nil, err + } + + user := strings.TrimPrefix(obj.Name, alertPrefix) + + configs[user] = alertConfig + } + + return configs, nil +} + +func (g *GCSClient) getAlertConfig(ctx context.Context, obj string) (alerts.AlertConfig, error) { + reader, err := g.bucket.Object(obj).NewReader(ctx) + if err == gstorage.ErrObjectNotExist { + level.Debug(util.Logger).Log("msg", "object does not exist", "name", obj) + return alerts.AlertConfig{}, nil + } + if err != nil { + return alerts.AlertConfig{}, err + } + defer reader.Close() + + buf, err := ioutil.ReadAll(reader) + if err != nil { + return alerts.AlertConfig{}, err + } + + config := alerts.AlertConfig{} + err = json.Unmarshal(buf, &config) + if err != nil { + return alerts.AlertConfig{}, err + } + + return config, nil +} + +// GetAlertConfig returns a specified users alertmanager configuration +func (g *GCSClient) GetAlertConfig(ctx context.Context, userID string) (alerts.AlertConfig, error) { + return g.getAlertConfig(ctx, alertPrefix+userID) +} + +// SetAlertConfig sets a specified users alertmanager configuration +func (g *GCSClient) SetAlertConfig(ctx context.Context, userID string, cfg alerts.AlertConfig) error { + cfgBytes, err := json.Marshal(cfg) + if err != nil { + return err + } + + objHandle := g.bucket.Object(alertPrefix + userID) + + writer := objHandle.NewWriter(ctx) + if _, err := writer.Write(cfgBytes); err != nil { + return err + } + + if err := writer.Close(); err != nil { + return err + } + + return nil +} + +// DeleteAlertConfig deletes a specified users alertmanager configuration +func (g *GCSClient) DeleteAlertConfig(ctx context.Context, userID string) error { + err := g.bucket.Object(alertPrefix + userID).Delete(ctx) + if err != nil { + return err + } + return nil +} + +func (g *GCSClient) getAllRuleGroups(ctx context.Context, userID string) ([]rules.RuleGroup, error) { + it := g.bucket.Objects(ctx, &gstorage.Query{ + Prefix: generateRuleHandle(userID, "", ""), + }) + + rgs := []rules.RuleGroup{} + + for { + obj, err := it.Next() + if err == iterator.Done { + break + } + + if err != nil { + return []rules.RuleGroup{}, err + } + + rgProto, err := g.getRuleGroup(ctx, obj.Name) + if err != nil { + return []rules.RuleGroup{}, err + } + + rgs = append(rgs, rules.ToRuleGroup(rgProto)) + } + + return rgs, nil +} + +// ListRuleGroups returns all the active rule groups for a user +func (g *GCSClient) ListRuleGroups(ctx context.Context, options rules.RuleStoreConditions) (rules.RuleGroupList, error) { + it := g.bucket.Objects(ctx, &gstorage.Query{ + Prefix: generateRuleHandle(options.UserID, options.Namespace, ""), + }) + + groups := []rules.RuleGroup{} + for { + obj, err := it.Next() + if err == iterator.Done { + break + } + + if err != nil { + return nil, err + } + + level.Debug(util.Logger).Log("msg", "listing rule group", "handle", obj.Name) + + rg, err := g.getRuleGroup(ctx, obj.Name) + if err != nil { + return nil, err + } + groups = append(groups, rules.ToRuleGroup(rg)) + } + return groups, nil +} + +func (g *GCSClient) getRuleNamespace(ctx context.Context, userID string, namespace string) ([]*rules.RuleGroupDesc, error) { + it := g.bucket.Objects(ctx, &gstorage.Query{ + Prefix: generateRuleHandle(userID, namespace, ""), + }) + + groups := []*rules.RuleGroupDesc{} + + for { + obj, err := it.Next() + if err == iterator.Done { + break + } + + if err != nil { + return nil, err + } + + rg, err := g.getRuleGroup(ctx, obj.Name) + if err != nil { + return nil, err + } + + groups = append(groups, rg) + } + + return groups, nil +} + +// GetRuleGroup returns the requested rule group +func (g *GCSClient) GetRuleGroup(ctx context.Context, userID string, namespace string, grp string) (rules.RuleGroup, error) { + handle := generateRuleHandle(userID, namespace, grp) + rg, err := g.getRuleGroup(ctx, handle) + if err != nil { + return nil, err + } + + if rg == nil { + return nil, rules.ErrGroupNotFound + } + + return rules.ToRuleGroup(rg), nil +} + +func (g *GCSClient) getRuleGroup(ctx context.Context, handle string) (*rules.RuleGroupDesc, error) { + reader, err := g.bucket.Object(handle).NewReader(ctx) + if err == gstorage.ErrObjectNotExist { + level.Debug(util.Logger).Log("msg", "rule group does not exist", "name", handle) + return nil, nil + } + if err != nil { + return nil, err + } + defer reader.Close() + + buf, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + rg := &rules.RuleGroupDesc{} + + err = proto.Unmarshal(buf, rg) + if err != nil { + return nil, err + } + + return rg, nil +} + +// SetRuleGroup sets provided rule group +func (g *GCSClient) SetRuleGroup(ctx context.Context, userID string, namespace string, grp rulefmt.RuleGroup) error { + rg := rules.ToProto(userID, namespace, grp) + rgBytes, err := proto.Marshal(&rg) + if err != nil { + return err + } + + handle := generateRuleHandle(userID, namespace, grp.Name) + objHandle := g.bucket.Object(handle) + + writer := objHandle.NewWriter(ctx) + if _, err := writer.Write(rgBytes); err != nil { + return err + } + + if err := writer.Close(); err != nil { + return err + } + + return nil +} + +// DeleteRuleGroup deletes the specified rule group +func (g *GCSClient) DeleteRuleGroup(ctx context.Context, userID string, namespace string, group string) error { + handle := generateRuleHandle(userID, namespace, group) + err := g.bucket.Object(handle).Delete(ctx) + if err != nil { + return err + } + + return nil +} + +func generateRuleHandle(id, namespace, name string) string { + if id == "" { + return rulePrefix + } + prefix := rulePrefix + id + "/" + if namespace == "" { + return prefix + } + return prefix + namespace + "/" + name +} diff --git a/pkg/storage/clients/gcp/fixtures.go b/pkg/storage/clients/gcp/fixtures.go new file mode 100644 index 00000000000..b2dc7d92262 --- /dev/null +++ b/pkg/storage/clients/gcp/fixtures.go @@ -0,0 +1,45 @@ +package gcp + +import ( + "github.com/cortexproject/cortex/pkg/storage/alerts" + "github.com/cortexproject/cortex/pkg/storage/rules" + "github.com/cortexproject/cortex/pkg/storage/testutils" + "github.com/fsouza/fake-gcs-server/fakestorage" +) + +const ( + proj, instance = "proj", "instance" +) + +type fixture struct { + gcssrv *fakestorage.Server + + name string +} + +func (f *fixture) Name() string { + return f.name +} + +func (f *fixture) Clients() (alerts.AlertStore, rules.RuleStore, error) { + f.gcssrv = fakestorage.NewServer(nil) + f.gcssrv.CreateBucket("configdb") + cli := newGCSClient(GCSConfig{ + BucketName: "configdb", + }, f.gcssrv.Client()) + + return cli, cli, nil +} + +func (f *fixture) Teardown() error { + f.gcssrv.Stop() + return nil +} + +// Fixtures for unit testing GCP storage. +var Fixtures = func() []testutils.Fixture { + fixtures := []testutils.Fixture{ + &fixture{name: "gcs"}, + } + return fixtures +}() diff --git a/pkg/storage/rules/compat.go b/pkg/storage/rules/compat.go new file mode 100644 index 00000000000..2aa8037a6dc --- /dev/null +++ b/pkg/storage/rules/compat.go @@ -0,0 +1,98 @@ +package rules + +import ( + time "time" + + "github.com/cortexproject/cortex/pkg/ingester/client" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/rulefmt" + "github.com/prometheus/prometheus/rules" +) + +// ProtoRuleUpdateDescFactory makes new RuleUpdateDesc +func ProtoRuleUpdateDescFactory() proto.Message { + return NewRuleUpdateDesc() +} + +// NewRuleUpdateDesc returns an empty *distributor.RuleUpdateDesc. +func NewRuleUpdateDesc() *RuleUpdateDesc { + return &RuleUpdateDesc{} +} + +// ToProto transforms a formatted prometheus rulegroup to a rule group protobuf +func ToProto(user string, namespace string, rl rulefmt.RuleGroup) RuleGroupDesc { + dur := time.Duration(rl.Interval) + rg := RuleGroupDesc{ + Name: rl.Name, + Namespace: namespace, + Interval: &dur, + Rules: formattedRuleToProto(rl.Rules), + User: user, + } + return rg +} + +func formattedRuleToProto(rls []rulefmt.Rule) []*RuleDesc { + rules := make([]*RuleDesc, len(rls)) + for i := range rls { + f := time.Duration(rls[i].For) + + rules[i] = &RuleDesc{ + Expr: rls[i].Expr, + Record: rls[i].Record, + Alert: rls[i].Alert, + + For: &f, + Labels: client.FromLabelsToLabelAdapaters(labels.FromMap(rls[i].Labels)), + Annotations: client.FromLabelsToLabelAdapaters(labels.FromMap(rls[i].Labels)), + } + } + + return rules +} + +// FromProto generates a rulefmt RuleGroup +func FromProto(rg *RuleGroupDesc) *rulefmt.RuleGroup { + formattedRuleGroup := rulefmt.RuleGroup{ + Name: rg.GetName(), + Interval: model.Duration(*rg.Interval), + Rules: make([]rulefmt.Rule, len(rg.GetRules())), + } + + for i, rl := range rg.GetRules() { + formattedRuleGroup.Rules[i] = rulefmt.Rule{ + Record: rl.GetRecord(), + Alert: rl.GetAlert(), + Expr: rl.GetExpr(), + For: model.Duration(*rl.GetFor()), + Labels: client.FromLabelAdaptersToLabels(rl.Labels).Map(), + Annotations: client.FromLabelAdaptersToLabels(rl.Annotations).Map(), + } + } + + return &formattedRuleGroup +} + +// ToRuleGroup returns a functional rulegroup from a proto +func ToRuleGroup(rg *RuleGroupDesc) *Group { + return &Group{ + name: rg.GetName(), + namespace: rg.GetNamespace(), + user: rg.GetUser(), + interval: *rg.Interval, + rules: rg.Rules, + } +} + +// FormattedToRuleGroup transforms a formatted prometheus rulegroup to a rule group protobuf +func FormattedToRuleGroup(user string, namespace string, name string, rls []rules.Rule) *Group { + return &Group{ + name: name, + namespace: namespace, + user: user, + activeRules: rls, + } +} diff --git a/pkg/storage/rules/group.go b/pkg/storage/rules/group.go new file mode 100644 index 00000000000..ab1e3d42aa3 --- /dev/null +++ b/pkg/storage/rules/group.go @@ -0,0 +1,118 @@ +package rules + +import ( + "context" + time "time" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util" + "github.com/go-kit/kit/log" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/rulefmt" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/rules" +) + +// TODO: Add a lazy rule group that only loads rules when they are needed +// TODO: The cortex project should implement a separate Group struct from +// the prometheus project. This will allow for more precise instrumentation + +// Group is used as a compatibility format between storage and evaluation +type Group struct { + name string + namespace string + user string + interval time.Duration + rules []*RuleDesc + + // activeRules allows for the support of the configdb client + // TODO: figure out a better way to accomplish this + activeRules []rules.Rule +} + +// NewRuleGroup returns a Group +func NewRuleGroup(name, namespace, user string, rls []rulefmt.Rule) *Group { + return &Group{ + name: name, + namespace: namespace, + user: user, + rules: formattedRuleToProto(rls), + } +} + +// Rules returns eval ready prometheus rules +func (g *Group) Rules(ctx context.Context) ([]rules.Rule, error) { + // Used to be compatible with configdb client + if g.rules == nil && g.activeRules != nil { + return g.activeRules, nil + } + + rls := make([]rules.Rule, 0, len(g.rules)) + for _, rl := range g.rules { + expr, err := promql.ParseExpr(rl.GetExpr()) + if err != nil { + return nil, err + } + + if rl.Alert != "" { + rls = append(rls, rules.NewAlertingRule( + rl.Alert, + expr, + *rl.GetFor(), + client.FromLabelAdaptersToLabels(rl.Labels), + client.FromLabelAdaptersToLabels(rl.Annotations), + true, + log.With(util.Logger, "alert", rl.Alert), + )) + continue + } + rls = append(rls, rules.NewRecordingRule( + rl.Record, + expr, + client.FromLabelAdaptersToLabels(rl.Labels), + )) + } + return rls, nil +} + +// ID returns a unique group identifier with the namespace and name +func (g *Group) ID() string { + return g.namespace + "/" + g.name +} + +// Name returns the name of the rule group +func (g *Group) Name() string { + return g.name +} + +// Namespace returns the Namespace of the rule group +func (g *Group) Namespace() string { + return g.namespace +} + +// User returns the User of the rule group +func (g *Group) User() string { + return g.user +} + +// Formatted returns a prometheus rulefmt formatted rule group +func (g *Group) Formatted() rulefmt.RuleGroup { + formattedRuleGroup := rulefmt.RuleGroup{ + Name: g.name, + Interval: model.Duration(g.interval), + Rules: make([]rulefmt.Rule, len(g.rules)), + } + + for i, rl := range g.rules { + formattedRuleGroup.Rules[i] = rulefmt.Rule{ + Record: rl.GetRecord(), + Alert: rl.GetAlert(), + Expr: rl.GetExpr(), + For: model.Duration(*rl.GetFor()), + Labels: client.FromLabelAdaptersToLabels(rl.Labels).Map(), + Annotations: client.FromLabelAdaptersToLabels(rl.Annotations).Map(), + } + } + + return formattedRuleGroup +} diff --git a/pkg/storage/rules/rules.pb.go b/pkg/storage/rules/rules.pb.go new file mode 100644 index 00000000000..d4c15283775 --- /dev/null +++ b/pkg/storage/rules/rules.pb.go @@ -0,0 +1,1491 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: rules.proto + +package rules + +import ( + fmt "fmt" + _ "github.com/cortexproject/cortex/pkg/ingester/client" + github_com_cortexproject_cortex_pkg_ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + _ "github.com/golang/protobuf/ptypes/duration" + io "io" + math "math" + reflect "reflect" + strings "strings" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type RuleUpdateDesc struct { + User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + UpdatedAt int64 `protobuf:"varint,2,opt,name=updatedAt,proto3" json:"updatedAt,omitempty"` +} + +func (m *RuleUpdateDesc) Reset() { *m = RuleUpdateDesc{} } +func (*RuleUpdateDesc) ProtoMessage() {} +func (*RuleUpdateDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_8e722d3e922f0937, []int{0} +} +func (m *RuleUpdateDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleUpdateDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RuleUpdateDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RuleUpdateDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleUpdateDesc.Merge(m, src) +} +func (m *RuleUpdateDesc) XXX_Size() int { + return m.Size() +} +func (m *RuleUpdateDesc) XXX_DiscardUnknown() { + xxx_messageInfo_RuleUpdateDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleUpdateDesc proto.InternalMessageInfo + +func (m *RuleUpdateDesc) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *RuleUpdateDesc) GetUpdatedAt() int64 { + if m != nil { + return m.UpdatedAt + } + return 0 +} + +type RuleGroupDesc struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Interval *time.Duration `protobuf:"bytes,3,opt,name=interval,proto3,stdduration" json:"interval,omitempty"` + Rules []*RuleDesc `protobuf:"bytes,4,rep,name=rules,proto3" json:"rules,omitempty"` + Deleted bool `protobuf:"varint,5,opt,name=deleted,proto3" json:"deleted,omitempty"` + User string `protobuf:"bytes,6,opt,name=user,proto3" json:"user,omitempty"` +} + +func (m *RuleGroupDesc) Reset() { *m = RuleGroupDesc{} } +func (*RuleGroupDesc) ProtoMessage() {} +func (*RuleGroupDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_8e722d3e922f0937, []int{1} +} +func (m *RuleGroupDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleGroupDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RuleGroupDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RuleGroupDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleGroupDesc.Merge(m, src) +} +func (m *RuleGroupDesc) XXX_Size() int { + return m.Size() +} +func (m *RuleGroupDesc) XXX_DiscardUnknown() { + xxx_messageInfo_RuleGroupDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleGroupDesc proto.InternalMessageInfo + +func (m *RuleGroupDesc) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RuleGroupDesc) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *RuleGroupDesc) GetInterval() *time.Duration { + if m != nil { + return m.Interval + } + return nil +} + +func (m *RuleGroupDesc) GetRules() []*RuleDesc { + if m != nil { + return m.Rules + } + return nil +} + +func (m *RuleGroupDesc) GetDeleted() bool { + if m != nil { + return m.Deleted + } + return false +} + +func (m *RuleGroupDesc) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +type RuleDesc struct { + Expr string `protobuf:"bytes,1,opt,name=expr,proto3" json:"expr,omitempty"` + Record string `protobuf:"bytes,2,opt,name=record,proto3" json:"record,omitempty"` + Alert string `protobuf:"bytes,3,opt,name=alert,proto3" json:"alert,omitempty"` + For *time.Duration `protobuf:"bytes,4,opt,name=for,proto3,stdduration" json:"for,omitempty"` + Labels []github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter `protobuf:"bytes,5,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter" json:"labels"` + Annotations []github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter `protobuf:"bytes,6,rep,name=annotations,proto3,customtype=github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter" json:"annotations"` +} + +func (m *RuleDesc) Reset() { *m = RuleDesc{} } +func (*RuleDesc) ProtoMessage() {} +func (*RuleDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_8e722d3e922f0937, []int{2} +} +func (m *RuleDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RuleDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RuleDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleDesc.Merge(m, src) +} +func (m *RuleDesc) XXX_Size() int { + return m.Size() +} +func (m *RuleDesc) XXX_DiscardUnknown() { + xxx_messageInfo_RuleDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleDesc proto.InternalMessageInfo + +func (m *RuleDesc) GetExpr() string { + if m != nil { + return m.Expr + } + return "" +} + +func (m *RuleDesc) GetRecord() string { + if m != nil { + return m.Record + } + return "" +} + +func (m *RuleDesc) GetAlert() string { + if m != nil { + return m.Alert + } + return "" +} + +func (m *RuleDesc) GetFor() *time.Duration { + if m != nil { + return m.For + } + return nil +} + +func init() { + proto.RegisterType((*RuleUpdateDesc)(nil), "rules.RuleUpdateDesc") + proto.RegisterType((*RuleGroupDesc)(nil), "rules.RuleGroupDesc") + proto.RegisterType((*RuleDesc)(nil), "rules.RuleDesc") +} + +func init() { proto.RegisterFile("rules.proto", fileDescriptor_8e722d3e922f0937) } + +var fileDescriptor_8e722d3e922f0937 = []byte{ + // 478 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xbf, 0x8e, 0xd4, 0x30, + 0x10, 0xc6, 0xe3, 0xdb, 0x3f, 0xec, 0x7a, 0x05, 0x08, 0x0b, 0x21, 0x73, 0x42, 0xde, 0x68, 0x25, + 0xa4, 0x34, 0x24, 0xe2, 0x28, 0xaf, 0xe1, 0x56, 0x27, 0x41, 0x41, 0x81, 0x2c, 0xd1, 0xd0, 0x79, + 0x93, 0xb9, 0x10, 0xf0, 0xc5, 0x91, 0xe3, 0x20, 0x1a, 0x24, 0x1e, 0x81, 0x92, 0x47, 0xe0, 0x51, + 0xae, 0x5c, 0x89, 0xe6, 0x44, 0x71, 0xb0, 0xd9, 0x86, 0xf2, 0x24, 0x5e, 0x00, 0xd9, 0x4e, 0x6e, + 0xaf, 0x44, 0x48, 0x54, 0x99, 0xcf, 0x33, 0xfe, 0x66, 0x7e, 0xe3, 0xe0, 0x99, 0x6e, 0x24, 0xd4, + 0x71, 0xa5, 0x95, 0x51, 0x64, 0xe4, 0xc4, 0xfe, 0xa3, 0xbc, 0x30, 0x6f, 0x9a, 0x55, 0x9c, 0xaa, + 0xd3, 0x24, 0x57, 0xb9, 0x4a, 0x5c, 0x76, 0xd5, 0x9c, 0x38, 0xe5, 0x84, 0x8b, 0xfc, 0xad, 0x7d, + 0x96, 0x2b, 0x95, 0x4b, 0xd8, 0x55, 0x65, 0x8d, 0x16, 0xa6, 0x50, 0x65, 0x97, 0x7f, 0x7a, 0xcd, + 0x2e, 0x55, 0xda, 0xc0, 0x87, 0x4a, 0xab, 0xb7, 0x90, 0x9a, 0x4e, 0x25, 0xd5, 0xbb, 0x3c, 0x29, + 0xca, 0x1c, 0x6a, 0x03, 0x3a, 0x49, 0x65, 0x01, 0x65, 0x9f, 0xf2, 0x0e, 0x8b, 0x25, 0xbe, 0xc5, + 0x1b, 0x09, 0xaf, 0xaa, 0x4c, 0x18, 0x38, 0x86, 0x3a, 0x25, 0x04, 0x0f, 0x9b, 0x1a, 0x34, 0x45, + 0x21, 0x8a, 0xa6, 0xdc, 0xc5, 0xe4, 0x01, 0x9e, 0x36, 0xae, 0x22, 0x3b, 0x32, 0x74, 0x2f, 0x44, + 0xd1, 0x80, 0xef, 0x0e, 0x16, 0xdf, 0x10, 0xbe, 0x69, 0x4d, 0x9e, 0x69, 0xd5, 0x54, 0xbd, 0x47, + 0x29, 0x4e, 0xa1, 0xf7, 0xb0, 0xb1, 0xf5, 0xb0, 0xdf, 0xba, 0x12, 0x29, 0x38, 0x8f, 0x29, 0xdf, + 0x1d, 0x90, 0x43, 0x3c, 0x29, 0x4a, 0x03, 0xfa, 0xbd, 0x90, 0x74, 0x10, 0xa2, 0x68, 0x76, 0x70, + 0x3f, 0xf6, 0xf0, 0x71, 0x0f, 0x1f, 0x1f, 0x77, 0xf0, 0xcb, 0xe1, 0x97, 0x1f, 0x73, 0xc4, 0xaf, + 0x2e, 0x90, 0x87, 0xd8, 0xaf, 0x97, 0x0e, 0xc3, 0x41, 0x34, 0x3b, 0xb8, 0x1d, 0xfb, 0xcd, 0xdb, + 0x99, 0xec, 0x38, 0xdc, 0x67, 0x09, 0xc5, 0x37, 0x32, 0x90, 0x60, 0x20, 0xa3, 0xa3, 0x10, 0x45, + 0x13, 0xde, 0xcb, 0x2b, 0xe6, 0xf1, 0x8e, 0x79, 0xf1, 0x7b, 0x0f, 0x4f, 0x7a, 0x07, 0x5b, 0x60, + 0x77, 0xdb, 0x03, 0xd9, 0x98, 0xdc, 0xc3, 0x63, 0x0d, 0xa9, 0xd2, 0x59, 0x47, 0xd3, 0x29, 0x72, + 0x17, 0x8f, 0x84, 0x04, 0x6d, 0x1c, 0xc7, 0x94, 0x7b, 0x41, 0x1e, 0xe3, 0xc1, 0x89, 0xd2, 0x74, + 0xf8, 0x77, 0x6c, 0xb6, 0x96, 0xd4, 0x78, 0x2c, 0xc5, 0x0a, 0x64, 0x4d, 0x47, 0x8e, 0xeb, 0x4e, + 0xdc, 0x3d, 0xdd, 0x0b, 0x7b, 0xfa, 0x52, 0x14, 0x7a, 0xf9, 0xfc, 0xec, 0x62, 0x1e, 0x7c, 0xbf, + 0x98, 0xff, 0xcb, 0x8f, 0xe0, 0x6d, 0x8e, 0x32, 0x51, 0x19, 0xd0, 0xbc, 0x6b, 0x45, 0x3e, 0xe2, + 0x99, 0x28, 0x4b, 0x65, 0xdc, 0x34, 0x35, 0x1d, 0xff, 0xff, 0xce, 0xd7, 0xfb, 0x2d, 0x0f, 0xd7, + 0x1b, 0x16, 0x9c, 0x6f, 0x58, 0x70, 0xb9, 0x61, 0xe8, 0x53, 0xcb, 0xd0, 0xd7, 0x96, 0xa1, 0xb3, + 0x96, 0xa1, 0x75, 0xcb, 0xd0, 0xcf, 0x96, 0xa1, 0x5f, 0x2d, 0x0b, 0x2e, 0x5b, 0x86, 0x3e, 0x6f, + 0x59, 0xb0, 0xde, 0xb2, 0xe0, 0x7c, 0xcb, 0x82, 0xd7, 0xfe, 0x81, 0x57, 0x63, 0xb7, 0xce, 0x27, + 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x70, 0xb7, 0x00, 0xbd, 0x7a, 0x03, 0x00, 0x00, +} + +func (this *RuleUpdateDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RuleUpdateDesc) + if !ok { + that2, ok := that.(RuleUpdateDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.User != that1.User { + return false + } + if this.UpdatedAt != that1.UpdatedAt { + return false + } + return true +} +func (this *RuleGroupDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RuleGroupDesc) + if !ok { + that2, ok := that.(RuleGroupDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Namespace != that1.Namespace { + return false + } + if this.Interval != nil && that1.Interval != nil { + if *this.Interval != *that1.Interval { + return false + } + } else if this.Interval != nil { + return false + } else if that1.Interval != nil { + return false + } + if len(this.Rules) != len(that1.Rules) { + return false + } + for i := range this.Rules { + if !this.Rules[i].Equal(that1.Rules[i]) { + return false + } + } + if this.Deleted != that1.Deleted { + return false + } + if this.User != that1.User { + return false + } + return true +} +func (this *RuleDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RuleDesc) + if !ok { + that2, ok := that.(RuleDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Expr != that1.Expr { + return false + } + if this.Record != that1.Record { + return false + } + if this.Alert != that1.Alert { + return false + } + if this.For != nil && that1.For != nil { + if *this.For != *that1.For { + return false + } + } else if this.For != nil { + return false + } else if that1.For != nil { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + if len(this.Annotations) != len(that1.Annotations) { + return false + } + for i := range this.Annotations { + if !this.Annotations[i].Equal(that1.Annotations[i]) { + return false + } + } + return true +} +func (this *RuleUpdateDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&rules.RuleUpdateDesc{") + s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") + s = append(s, "UpdatedAt: "+fmt.Sprintf("%#v", this.UpdatedAt)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RuleGroupDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&rules.RuleGroupDesc{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Namespace: "+fmt.Sprintf("%#v", this.Namespace)+",\n") + s = append(s, "Interval: "+fmt.Sprintf("%#v", this.Interval)+",\n") + if this.Rules != nil { + s = append(s, "Rules: "+fmt.Sprintf("%#v", this.Rules)+",\n") + } + s = append(s, "Deleted: "+fmt.Sprintf("%#v", this.Deleted)+",\n") + s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RuleDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&rules.RuleDesc{") + s = append(s, "Expr: "+fmt.Sprintf("%#v", this.Expr)+",\n") + s = append(s, "Record: "+fmt.Sprintf("%#v", this.Record)+",\n") + s = append(s, "Alert: "+fmt.Sprintf("%#v", this.Alert)+",\n") + s = append(s, "For: "+fmt.Sprintf("%#v", this.For)+",\n") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + s = append(s, "Annotations: "+fmt.Sprintf("%#v", this.Annotations)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringRules(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *RuleUpdateDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleUpdateDesc) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.User) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRules(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if m.UpdatedAt != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRules(dAtA, i, uint64(m.UpdatedAt)) + } + return i, nil +} + +func (m *RuleGroupDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleGroupDesc) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRules(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Namespace) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRules(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + } + if m.Interval != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRules(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Interval))) + n1, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Interval, dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x22 + i++ + i = encodeVarintRules(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Deleted { + dAtA[i] = 0x28 + i++ + if m.Deleted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.User) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintRules(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + return i, nil +} + +func (m *RuleDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleDesc) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Expr) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRules(dAtA, i, uint64(len(m.Expr))) + i += copy(dAtA[i:], m.Expr) + } + if len(m.Record) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRules(dAtA, i, uint64(len(m.Record))) + i += copy(dAtA[i:], m.Record) + } + if len(m.Alert) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRules(dAtA, i, uint64(len(m.Alert))) + i += copy(dAtA[i:], m.Alert) + } + if m.For != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRules(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(*m.For))) + n2, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.For, dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if len(m.Labels) > 0 { + for _, msg := range m.Labels { + dAtA[i] = 0x2a + i++ + i = encodeVarintRules(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Annotations) > 0 { + for _, msg := range m.Annotations { + dAtA[i] = 0x32 + i++ + i = encodeVarintRules(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeVarintRules(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RuleUpdateDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + if l > 0 { + n += 1 + l + sovRules(uint64(l)) + } + if m.UpdatedAt != 0 { + n += 1 + sovRules(uint64(m.UpdatedAt)) + } + return n +} + +func (m *RuleGroupDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRules(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovRules(uint64(l)) + } + if m.Interval != nil { + l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Interval) + n += 1 + l + sovRules(uint64(l)) + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovRules(uint64(l)) + } + } + if m.Deleted { + n += 2 + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovRules(uint64(l)) + } + return n +} + +func (m *RuleDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expr) + if l > 0 { + n += 1 + l + sovRules(uint64(l)) + } + l = len(m.Record) + if l > 0 { + n += 1 + l + sovRules(uint64(l)) + } + l = len(m.Alert) + if l > 0 { + n += 1 + l + sovRules(uint64(l)) + } + if m.For != nil { + l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.For) + n += 1 + l + sovRules(uint64(l)) + } + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovRules(uint64(l)) + } + } + if len(m.Annotations) > 0 { + for _, e := range m.Annotations { + l = e.Size() + n += 1 + l + sovRules(uint64(l)) + } + } + return n +} + +func sovRules(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRules(x uint64) (n int) { + return sovRules(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RuleUpdateDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleUpdateDesc{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `UpdatedAt:` + fmt.Sprintf("%v", this.UpdatedAt) + `,`, + `}`, + }, "") + return s +} +func (this *RuleGroupDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleGroupDesc{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Interval:` + strings.Replace(fmt.Sprintf("%v", this.Interval), "Duration", "duration.Duration", 1) + `,`, + `Rules:` + strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleDesc", "RuleDesc", 1) + `,`, + `Deleted:` + fmt.Sprintf("%v", this.Deleted) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `}`, + }, "") + return s +} +func (this *RuleDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleDesc{`, + `Expr:` + fmt.Sprintf("%v", this.Expr) + `,`, + `Record:` + fmt.Sprintf("%v", this.Record) + `,`, + `Alert:` + fmt.Sprintf("%v", this.Alert) + `,`, + `For:` + strings.Replace(fmt.Sprintf("%v", this.For), "Duration", "duration.Duration", 1) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, + `}`, + }, "") + return s +} +func valueToStringRules(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RuleUpdateDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleUpdateDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleUpdateDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + m.UpdatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRules(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRules + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRules + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleGroupDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleGroupDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleGroupDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Interval == nil { + m.Interval = new(time.Duration) + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.Interval, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, &RuleDesc{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Deleted = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRules(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRules + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRules + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Record = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alert", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Alert = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field For", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.For == nil { + m.For = new(time.Duration) + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.For, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotations = append(m.Annotations, github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter{}) + if err := m.Annotations[len(m.Annotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRules(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRules + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRules + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRules(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRules + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRules + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRules + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRules + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthRules + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRules + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRules(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthRules + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRules = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRules = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/storage/rules/rules.proto b/pkg/storage/rules/rules.proto new file mode 100644 index 00000000000..7f13695d659 --- /dev/null +++ b/pkg/storage/rules/rules.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package rules; + +option go_package = "rules"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +message RuleUpdateDesc { + string user = 1; + int64 updatedAt = 2; +} + +message RuleGroupDesc { + string name = 1; + string namespace = 2; + google.protobuf.Duration interval = 3 [(gogoproto.stdduration) = true]; + + repeated RuleDesc rules = 4; + + bool deleted = 5; + + string user = 6; + } + + message RuleDesc { + string expr = 1; + string record = 2; + string alert = 3; + google.protobuf.Duration for = 4 [(gogoproto.stdduration) = true]; + repeated cortex.LabelPair labels = 5 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter"];; + repeated cortex.LabelPair annotations = 6 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter"];; + } \ No newline at end of file diff --git a/pkg/storage/rules/store.go b/pkg/storage/rules/store.go new file mode 100644 index 00000000000..90723d773ec --- /dev/null +++ b/pkg/storage/rules/store.go @@ -0,0 +1,133 @@ +package rules + +import ( + "context" + "errors" + "fmt" + + "github.com/prometheus/prometheus/pkg/rulefmt" + "github.com/prometheus/prometheus/rules" +) + +var ( + // ErrGroupNotFound is returned if a rule group does not exist + ErrGroupNotFound = errors.New("group does not exist") + // ErrGroupNamespaceNotFound is returned if a namespace does not exist + ErrGroupNamespaceNotFound = errors.New("group namespace does not exist") + // ErrUserNotFound is returned if the user does not currently exist + ErrUserNotFound = errors.New("no rule groups found for user") +) + +// RulePoller is used to poll for recently updated rules +type RulePoller interface { + PollRules(ctx context.Context) (map[string][]RuleGroup, error) + Stop() +} + +// RuleStoreConditions are used to filter retrieived results from a rule store +type RuleStoreConditions struct { + // UserID specifies to only retrieve rules with this ID + UserID string + + // Namespaces filters results only rule groups with the specified namespace + // are retrieved + Namespace string +} + +// RuleStore is used to store and retrieve rules +type RuleStore interface { + ListRuleGroups(ctx context.Context, options RuleStoreConditions) (RuleGroupList, error) + GetRuleGroup(ctx context.Context, userID, namespace, group string) (RuleGroup, error) + SetRuleGroup(ctx context.Context, userID, namespace string, group rulefmt.RuleGroup) error + DeleteRuleGroup(ctx context.Context, userID, namespace string, group string) error +} + +// RuleGroup is used to retrieve rules from the database to evaluate, +// an interface is used to allow for lazy evaluation implementations +type RuleGroup interface { + Rules(ctx context.Context) ([]rules.Rule, error) + ID() string + Name() string + Namespace() string + User() string + Formatted() rulefmt.RuleGroup +} + +// RuleGroupList contains a set of rule groups +type RuleGroupList []RuleGroup + +// Formatted returns the rule group list as a set of formatted rule groups mapped +// by namespace +func (l RuleGroupList) Formatted(user string) map[string][]rulefmt.RuleGroup { + ruleMap := map[string][]rulefmt.RuleGroup{} + for _, g := range l { + if g.User() != user { + continue + } + + if _, exists := ruleMap[g.Namespace()]; !exists { + ruleMap[g.Namespace()] = []rulefmt.RuleGroup{g.Formatted()} + } + ruleMap[g.Namespace()] = append(ruleMap[g.Namespace()], g.Formatted()) + + } + return ruleMap +} + +// RuleNamespace is used to parse a slightly modified prometheus +// rule file format, if no namespace is set, the default namespace +// is used +type RuleNamespace struct { + // Namespace field only exists for setting namespace in namespace body instead of file name + Namespace string `yaml:"namespace,omitempty"` + + Groups []rulefmt.RuleGroup `yaml:"groups"` +} + +// Validate each rule in the rule namespace is valid +func (r RuleNamespace) Validate() []error { + set := map[string]struct{}{} + var errs []error + + for _, g := range r.Groups { + if g.Name == "" { + errs = append(errs, fmt.Errorf("Groupname should not be empty")) + } + + if _, ok := set[g.Name]; ok { + errs = append( + errs, + fmt.Errorf("groupname: \"%s\" is repeated in the same namespace", g.Name), + ) + } + + set[g.Name] = struct{}{} + + errs = append(errs, ValidateRuleGroup(g)...) + } + + return errs +} + +// ValidateRuleGroup validates a rulegroup +func ValidateRuleGroup(g rulefmt.RuleGroup) []error { + var errs []error + for i, r := range g.Rules { + for _, err := range r.Validate() { + var ruleName string + if r.Alert != "" { + ruleName = r.Alert + } else { + ruleName = r.Record + } + errs = append(errs, &rulefmt.Error{ + Group: g.Name, + Rule: i, + RuleName: ruleName, + Err: err, + }) + } + } + + return errs +} diff --git a/pkg/storage/testutils/testutils.go b/pkg/storage/testutils/testutils.go new file mode 100644 index 00000000000..f74b59705e0 --- /dev/null +++ b/pkg/storage/testutils/testutils.go @@ -0,0 +1,13 @@ +package testutils + +import ( + "github.com/cortexproject/cortex/pkg/storage/alerts" + "github.com/cortexproject/cortex/pkg/storage/rules" +) + +// Fixture type for per-backend testing. +type Fixture interface { + Name() string + Clients() (alerts.AlertStore, rules.RuleStore, error) + Teardown() error +} diff --git a/pkg/util/usertracker/tracker.go b/pkg/util/usertracker/tracker.go new file mode 100644 index 00000000000..8e7dcef315e --- /dev/null +++ b/pkg/util/usertracker/tracker.go @@ -0,0 +1,119 @@ +package usertracker + +import ( + "context" + "flag" + "sync" + "time" + + "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/ring/kv/codec" + "github.com/cortexproject/cortex/pkg/util" + "github.com/go-kit/kit/log" + "github.com/golang/protobuf/proto" +) + +// ProtoUserUpdateDescFactory makes new UserUpdateDesc +func ProtoUserUpdateDescFactory() proto.Message { + return NewUserUpdateDesc() +} + +// NewUserUpdateDesc returns an empty *distributor.UserUpdateDesc. +func NewUserUpdateDesc() *UserUpdateDesc { + return &UserUpdateDesc{} +} + +// Tracker tracks when users update their configs to allow for efficient polling +type Tracker struct { + logger log.Logger + cfg Config + client kv.Client + + // Replicas we are accepting samples from. + updatedUsersMtx sync.RWMutex + updatedUsers map[string]UserUpdateDesc + done chan struct{} + cancel context.CancelFunc +} + +// Config contains the configuration require to create a User Tracker. +type Config struct { + KVStore kv.Config +} + +// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + // We want the ability to use different Consul instances for the ring and for HA cluster tracking. + cfg.KVStore.RegisterFlagsWithPrefix(prefix+"user-tracker.", f) +} + +// NewTracker returns a new HA cluster tracker using either Consul +// or in-memory KV store. +func NewTracker(cfg Config) (*Tracker, error) { + codec := codec.Proto{Factory: ProtoUserUpdateDescFactory} + + client, err := kv.NewClient(cfg.KVStore, codec) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(context.Background()) + t := Tracker{ + logger: util.Logger, + cfg: cfg, + done: make(chan struct{}), + updatedUsers: map[string]UserUpdateDesc{}, + client: client, + cancel: cancel, + } + go t.loop(ctx) + return &t, nil +} + +// Follows pattern used by ring for WatchKey. +func (c *Tracker) loop(ctx context.Context) { + defer close(c.done) + // The KVStore config we gave when creating c should have contained a prefix, + // which would have given us a prefixed KVStore client. So, we can pass empty string here. + c.client.WatchPrefix(ctx, "", func(key string, value interface{}) bool { + replica := value.(*UserUpdateDesc) + c.updatedUsersMtx.Lock() + defer c.updatedUsersMtx.Unlock() + c.updatedUsers[key] = *replica + return true + }) +} + +// Stop ends calls the trackers cancel function, which will end the loop for WatchPrefix. +func (c *Tracker) Stop() { + c.cancel() + <-c.done +} + +// GetUpdatedUsers returns all of the users updated since the last +// poll +func (c *Tracker) GetUpdatedUsers(ctx context.Context) []string { + c.updatedUsersMtx.Lock() + defer c.updatedUsersMtx.Unlock() + + users := make([]string, len(c.updatedUsers)) + for u := range c.updatedUsers { + users = append(users, u) + } + + c.updatedUsers = map[string]UserUpdateDesc{} + + return users +} + +// UpdateUser pushes a change to the kvstore to signal the user has been +// updated +func (c *Tracker) UpdateUser(ctx context.Context, userID string) error { + return c.client.CAS(ctx, userID, func(in interface{}) (out interface{}, retry bool, err error) { + // Add an entry to mark an update to a users rule configs + return &UserUpdateDesc{ + User: userID, + UpdatedAt: time.Now().UnixNano(), + }, true, nil + }) +} diff --git a/pkg/util/usertracker/tracker.pb.go b/pkg/util/usertracker/tracker.pb.go new file mode 100644 index 00000000000..960d852cdf4 --- /dev/null +++ b/pkg/util/usertracker/tracker.pb.go @@ -0,0 +1,446 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tracker.proto + +package usertracker + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type UserUpdateDesc struct { + User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + UpdatedAt int64 `protobuf:"varint,2,opt,name=updatedAt,proto3" json:"updatedAt,omitempty"` +} + +func (m *UserUpdateDesc) Reset() { *m = UserUpdateDesc{} } +func (*UserUpdateDesc) ProtoMessage() {} +func (*UserUpdateDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_a0ba8625d8751af3, []int{0} +} +func (m *UserUpdateDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserUpdateDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserUpdateDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UserUpdateDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserUpdateDesc.Merge(m, src) +} +func (m *UserUpdateDesc) XXX_Size() int { + return m.Size() +} +func (m *UserUpdateDesc) XXX_DiscardUnknown() { + xxx_messageInfo_UserUpdateDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_UserUpdateDesc proto.InternalMessageInfo + +func (m *UserUpdateDesc) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *UserUpdateDesc) GetUpdatedAt() int64 { + if m != nil { + return m.UpdatedAt + } + return 0 +} + +func init() { + proto.RegisterType((*UserUpdateDesc)(nil), "usertracker.UserUpdateDesc") +} + +func init() { proto.RegisterFile("tracker.proto", fileDescriptor_a0ba8625d8751af3) } + +var fileDescriptor_a0ba8625d8751af3 = []byte{ + // 195 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0x29, 0x4a, 0x4c, + 0xce, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2e, 0x2d, 0x4e, 0x2d, 0x82, + 0x0a, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, + 0xa7, 0xe7, 0xeb, 0x83, 0xd5, 0x24, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xab, + 0xe4, 0xc4, 0xc5, 0x17, 0x5a, 0x9c, 0x5a, 0x14, 0x5a, 0x90, 0x92, 0x58, 0x92, 0xea, 0x92, 0x5a, + 0x9c, 0x2c, 0x24, 0xc4, 0xc5, 0x02, 0x32, 0x4f, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xcc, + 0x16, 0x92, 0xe1, 0xe2, 0x2c, 0x05, 0xab, 0x48, 0x71, 0x2c, 0x91, 0x60, 0x52, 0x60, 0xd4, 0x60, + 0x0e, 0x42, 0x08, 0x38, 0x39, 0x5e, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, + 0x72, 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, + 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, + 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x42, 0x76, 0x75, + 0x12, 0x1b, 0xd8, 0x35, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8a, 0xdb, 0xd4, 0x4d, 0xda, + 0x00, 0x00, 0x00, +} + +func (this *UserUpdateDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UserUpdateDesc) + if !ok { + that2, ok := that.(UserUpdateDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.User != that1.User { + return false + } + if this.UpdatedAt != that1.UpdatedAt { + return false + } + return true +} +func (this *UserUpdateDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&usertracker.UserUpdateDesc{") + s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") + s = append(s, "UpdatedAt: "+fmt.Sprintf("%#v", this.UpdatedAt)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTracker(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *UserUpdateDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserUpdateDesc) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.User) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTracker(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if m.UpdatedAt != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTracker(dAtA, i, uint64(m.UpdatedAt)) + } + return i, nil +} + +func encodeVarintTracker(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *UserUpdateDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + if l > 0 { + n += 1 + l + sovTracker(uint64(l)) + } + if m.UpdatedAt != 0 { + n += 1 + sovTracker(uint64(m.UpdatedAt)) + } + return n +} + +func sovTracker(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTracker(x uint64) (n int) { + return sovTracker(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *UserUpdateDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserUpdateDesc{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `UpdatedAt:` + fmt.Sprintf("%v", this.UpdatedAt) + `,`, + `}`, + }, "") + return s +} +func valueToStringTracker(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *UserUpdateDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTracker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserUpdateDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserUpdateDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTracker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTracker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTracker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + m.UpdatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTracker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTracker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTracker + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTracker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTracker(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTracker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTracker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTracker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTracker + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthTracker + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTracker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTracker(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthTracker + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTracker = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTracker = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/util/usertracker/tracker.proto b/pkg/util/usertracker/tracker.proto new file mode 100644 index 00000000000..d35b52ce5d4 --- /dev/null +++ b/pkg/util/usertracker/tracker.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package usertracker; + +option go_package = "usertracker"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +message UserUpdateDesc { + string user = 1; + int64 updatedAt = 2; +} \ No newline at end of file diff --git a/vendor/modules.txt b/vendor/modules.txt index d2dd5064b14..76998bd12bd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -588,8 +588,8 @@ golang.org/x/tools/cover # google.golang.org/api v0.4.0 google.golang.org/api/option google.golang.org/api/transport/http -google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/iterator +google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/transport/grpc google.golang.org/api/googleapi google.golang.org/api/storage/v1