Skip to content

Commit 31a7147

Browse files
committed
Fix up integration tests
Signed-off-by: Andrew Bloomgarden <[email protected]>
1 parent e1e0a93 commit 31a7147

File tree

5 files changed

+169
-226
lines changed

5 files changed

+169
-226
lines changed

integration/alertmanager_test.go

Lines changed: 94 additions & 114 deletions
Original file line numberDiff line numberDiff line change
@@ -89,145 +89,125 @@ func TestAlertmanager(t *testing.T) {
8989
}
9090

9191
func TestAlertmanagerStoreAPI(t *testing.T) {
92-
tests := map[string]struct {
93-
legacyAlertStore bool
94-
}{
95-
"bucket alertstore": {legacyAlertStore: false},
96-
}
97-
98-
for testName, testCfg := range tests {
99-
t.Run(testName, func(t *testing.T) {
100-
s, err := e2e.NewScenario(networkName)
101-
require.NoError(t, err)
102-
defer s.Close()
92+
s, err := e2e.NewScenario(networkName)
93+
require.NoError(t, err)
94+
defer s.Close()
10395

104-
flags := mergeFlags(AlertmanagerFlags(), AlertmanagerS3Flags(testCfg.legacyAlertStore))
96+
flags := mergeFlags(AlertmanagerFlags(), AlertmanagerS3Flags())
10597

106-
minio := e2edb.NewMinio(9000, alertsBucketName)
107-
require.NoError(t, s.StartAndWaitReady(minio))
98+
minio := e2edb.NewMinio(9000, alertsBucketName)
99+
require.NoError(t, s.StartAndWaitReady(minio))
108100

109-
am := e2ecortex.NewAlertmanager(
110-
"alertmanager",
111-
flags,
112-
"",
113-
)
101+
am := e2ecortex.NewAlertmanager(
102+
"alertmanager",
103+
flags,
104+
"",
105+
)
114106

115-
require.NoError(t, s.StartAndWaitReady(am))
116-
require.NoError(t, am.WaitSumMetrics(e2e.Equals(1), "alertmanager_cluster_members"))
107+
require.NoError(t, s.StartAndWaitReady(am))
108+
require.NoError(t, am.WaitSumMetrics(e2e.Equals(1), "alertmanager_cluster_members"))
117109

118-
c, err := e2ecortex.NewClient("", "", am.HTTPEndpoint(), "", "user-1")
119-
require.NoError(t, err)
110+
c, err := e2ecortex.NewClient("", "", am.HTTPEndpoint(), "", "user-1")
111+
require.NoError(t, err)
120112

121-
_, err = c.GetAlertmanagerConfig(context.Background())
122-
require.Error(t, err)
123-
require.EqualError(t, err, e2ecortex.ErrNotFound.Error())
113+
_, err = c.GetAlertmanagerConfig(context.Background())
114+
require.Error(t, err)
115+
require.EqualError(t, err, e2ecortex.ErrNotFound.Error())
124116

125-
err = c.SetAlertmanagerConfig(context.Background(), cortexAlertmanagerUserConfigYaml, map[string]string{})
126-
require.NoError(t, err)
117+
err = c.SetAlertmanagerConfig(context.Background(), cortexAlertmanagerUserConfigYaml, map[string]string{})
118+
require.NoError(t, err)
127119

128-
require.NoError(t, am.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_alertmanager_config_last_reload_successful"},
129-
e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "user", "user-1")),
130-
e2e.WaitMissingMetrics))
131-
require.NoError(t, am.WaitSumMetricsWithOptions(e2e.Greater(0), []string{"cortex_alertmanager_config_last_reload_successful_seconds"},
132-
e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "user", "user-1")),
133-
e2e.WaitMissingMetrics))
120+
require.NoError(t, am.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_alertmanager_config_last_reload_successful"},
121+
e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "user", "user-1")),
122+
e2e.WaitMissingMetrics))
123+
require.NoError(t, am.WaitSumMetricsWithOptions(e2e.Greater(0), []string{"cortex_alertmanager_config_last_reload_successful_seconds"},
124+
e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "user", "user-1")),
125+
e2e.WaitMissingMetrics))
134126

135-
cfg, err := c.GetAlertmanagerConfig(context.Background())
136-
require.NoError(t, err)
127+
cfg, err := c.GetAlertmanagerConfig(context.Background())
128+
require.NoError(t, err)
137129

138-
// Ensure the returned status config matches the loaded config
139-
require.NotNil(t, cfg)
140-
require.Equal(t, "example_receiver", cfg.Route.Receiver)
141-
require.Len(t, cfg.Route.GroupByStr, 1)
142-
require.Equal(t, "example_groupby", cfg.Route.GroupByStr[0])
143-
require.Len(t, cfg.Receivers, 1)
144-
require.Equal(t, "example_receiver", cfg.Receivers[0].Name)
130+
// Ensure the returned status config matches the loaded config
131+
require.NotNil(t, cfg)
132+
require.Equal(t, "example_receiver", cfg.Route.Receiver)
133+
require.Len(t, cfg.Route.GroupByStr, 1)
134+
require.Equal(t, "example_groupby", cfg.Route.GroupByStr[0])
135+
require.Len(t, cfg.Receivers, 1)
136+
require.Equal(t, "example_receiver", cfg.Receivers[0].Name)
145137

146-
err = c.SendAlertToAlermanager(context.Background(), &model.Alert{Labels: model.LabelSet{"foo": "bar"}})
147-
require.NoError(t, err)
138+
err = c.SendAlertToAlermanager(context.Background(), &model.Alert{Labels: model.LabelSet{"foo": "bar"}})
139+
require.NoError(t, err)
148140

149-
require.NoError(t, am.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_alertmanager_alerts_received_total"},
150-
e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "user", "user-1")),
151-
e2e.WaitMissingMetrics))
141+
require.NoError(t, am.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"cortex_alertmanager_alerts_received_total"},
142+
e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "user", "user-1")),
143+
e2e.WaitMissingMetrics))
152144

153-
err = c.DeleteAlertmanagerConfig(context.Background())
154-
require.NoError(t, err)
145+
err = c.DeleteAlertmanagerConfig(context.Background())
146+
require.NoError(t, err)
155147

156-
// The deleted config is applied asynchronously, so we should wait until the metric
157-
// disappear for the specific user.
158-
require.NoError(t, am.WaitRemovedMetric("cortex_alertmanager_config_last_reload_successful", e2e.WithLabelMatchers(
159-
labels.MustNewMatcher(labels.MatchEqual, "user", "user-1"))))
160-
require.NoError(t, am.WaitRemovedMetric("cortex_alertmanager_config_last_reload_successful_seconds", e2e.WithLabelMatchers(
161-
labels.MustNewMatcher(labels.MatchEqual, "user", "user-1"))))
162-
163-
cfg, err = c.GetAlertmanagerConfig(context.Background())
164-
require.Error(t, err)
165-
require.Nil(t, cfg)
166-
require.EqualError(t, err, "not found")
167-
})
168-
}
148+
// The deleted config is applied asynchronously, so we should wait until the metric
149+
// disappear for the specific user.
150+
require.NoError(t, am.WaitRemovedMetric("cortex_alertmanager_config_last_reload_successful", e2e.WithLabelMatchers(
151+
labels.MustNewMatcher(labels.MatchEqual, "user", "user-1"))))
152+
require.NoError(t, am.WaitRemovedMetric("cortex_alertmanager_config_last_reload_successful_seconds", e2e.WithLabelMatchers(
153+
labels.MustNewMatcher(labels.MatchEqual, "user", "user-1"))))
154+
155+
cfg, err = c.GetAlertmanagerConfig(context.Background())
156+
require.Error(t, err)
157+
require.Nil(t, cfg)
158+
require.EqualError(t, err, "not found")
169159
}
170160

171161
func TestAlertmanagerClustering(t *testing.T) {
172-
tests := map[string]struct {
173-
legacyAlertStore bool
174-
}{
175-
"bucket alertstore": {legacyAlertStore: false},
176-
}
177-
178-
for testName, testCfg := range tests {
179-
t.Run(testName, func(t *testing.T) {
180-
s, err := e2e.NewScenario(networkName)
181-
require.NoError(t, err)
182-
defer s.Close()
162+
s, err := e2e.NewScenario(networkName)
163+
require.NoError(t, err)
164+
defer s.Close()
183165

184-
flags := mergeFlags(AlertmanagerFlags(), AlertmanagerS3Flags(testCfg.legacyAlertStore))
166+
flags := mergeFlags(AlertmanagerFlags(), AlertmanagerS3Flags())
185167

186-
// Start dependencies.
187-
minio := e2edb.NewMinio(9000, alertsBucketName)
188-
require.NoError(t, s.StartAndWaitReady(minio))
168+
// Start dependencies.
169+
minio := e2edb.NewMinio(9000, alertsBucketName)
170+
require.NoError(t, s.StartAndWaitReady(minio))
189171

190-
client, err := s3.NewBucketWithConfig(nil, s3.Config{
191-
Endpoint: minio.HTTPEndpoint(),
192-
Insecure: true,
193-
Bucket: alertsBucketName,
194-
AccessKey: e2edb.MinioAccessKey,
195-
SecretKey: e2edb.MinioSecretKey,
196-
}, "alertmanager-test")
197-
require.NoError(t, err)
172+
client, err := s3.NewBucketWithConfig(nil, s3.Config{
173+
Endpoint: minio.HTTPEndpoint(),
174+
Insecure: true,
175+
Bucket: alertsBucketName,
176+
AccessKey: e2edb.MinioAccessKey,
177+
SecretKey: e2edb.MinioSecretKey,
178+
}, "alertmanager-test")
179+
require.NoError(t, err)
198180

199-
// Create and upload an Alertmanager configuration.
200-
user := "user-1"
201-
desc := alertspb.AlertConfigDesc{RawConfig: simpleAlertmanagerConfig, User: user, Templates: []*alertspb.TemplateDesc{}}
181+
// Create and upload an Alertmanager configuration.
182+
user := "user-1"
183+
desc := alertspb.AlertConfigDesc{RawConfig: simpleAlertmanagerConfig, User: user, Templates: []*alertspb.TemplateDesc{}}
202184

203-
d, err := desc.Marshal()
204-
require.NoError(t, err)
205-
err = client.Upload(context.Background(), fmt.Sprintf("/alerts/%s", user), bytes.NewReader(d))
206-
require.NoError(t, err)
185+
d, err := desc.Marshal()
186+
require.NoError(t, err)
187+
err = client.Upload(context.Background(), fmt.Sprintf("/alerts/%s", user), bytes.NewReader(d))
188+
require.NoError(t, err)
207189

208-
peers := strings.Join([]string{
209-
e2e.NetworkContainerHostPort(networkName, "alertmanager-1", e2ecortex.GossipPort),
210-
e2e.NetworkContainerHostPort(networkName, "alertmanager-2", e2ecortex.GossipPort),
211-
}, ",")
212-
flags = mergeFlags(flags, AlertmanagerClusterFlags(peers))
190+
peers := strings.Join([]string{
191+
e2e.NetworkContainerHostPort(networkName, "alertmanager-1", e2ecortex.GossipPort),
192+
e2e.NetworkContainerHostPort(networkName, "alertmanager-2", e2ecortex.GossipPort),
193+
}, ",")
194+
flags = mergeFlags(flags, AlertmanagerClusterFlags(peers))
213195

214-
// Wait for the Alertmanagers to start.
215-
alertmanager1 := e2ecortex.NewAlertmanager("alertmanager-1", flags, "")
216-
alertmanager2 := e2ecortex.NewAlertmanager("alertmanager-2", flags, "")
196+
// Wait for the Alertmanagers to start.
197+
alertmanager1 := e2ecortex.NewAlertmanager("alertmanager-1", flags, "")
198+
alertmanager2 := e2ecortex.NewAlertmanager("alertmanager-2", flags, "")
217199

218-
alertmanagers := e2ecortex.NewCompositeCortexService(alertmanager1, alertmanager2)
200+
alertmanagers := e2ecortex.NewCompositeCortexService(alertmanager1, alertmanager2)
219201

220-
// Start Alertmanager instances.
221-
for _, am := range alertmanagers.Instances() {
222-
require.NoError(t, s.StartAndWaitReady(am))
223-
}
202+
// Start Alertmanager instances.
203+
for _, am := range alertmanagers.Instances() {
204+
require.NoError(t, s.StartAndWaitReady(am))
205+
}
224206

225-
for _, am := range alertmanagers.Instances() {
226-
require.NoError(t, am.WaitSumMetrics(e2e.Equals(float64(0)), "alertmanager_cluster_health_score")) // Lower means healthier, 0 being totally healthy.
227-
require.NoError(t, am.WaitSumMetrics(e2e.Equals(float64(0)), "alertmanager_cluster_failed_peers"))
228-
require.NoError(t, am.WaitSumMetrics(e2e.Equals(float64(2)), "alertmanager_cluster_members"))
229-
}
230-
})
207+
for _, am := range alertmanagers.Instances() {
208+
require.NoError(t, am.WaitSumMetrics(e2e.Equals(float64(0)), "alertmanager_cluster_health_score")) // Lower means healthier, 0 being totally healthy.
209+
require.NoError(t, am.WaitSumMetrics(e2e.Equals(float64(0)), "alertmanager_cluster_failed_peers"))
210+
require.NoError(t, am.WaitSumMetrics(e2e.Equals(float64(2)), "alertmanager_cluster_members"))
231211
}
232212
}
233213

@@ -245,7 +225,7 @@ func TestAlertmanagerSharding(t *testing.T) {
245225
require.NoError(t, err)
246226
defer s.Close()
247227

248-
flags := mergeFlags(AlertmanagerFlags(), AlertmanagerS3Flags(false))
228+
flags := mergeFlags(AlertmanagerFlags(), AlertmanagerS3Flags())
249229

250230
// Start dependencies.
251231
consul := e2edb.NewConsul()
@@ -645,7 +625,7 @@ func TestAlertmanagerShardingScaling(t *testing.T) {
645625
}
646626

647627
flags := mergeFlags(AlertmanagerFlags(),
648-
AlertmanagerS3Flags(false),
628+
AlertmanagerS3Flags(),
649629
AlertmanagerShardingFlags(consul.NetworkHTTPEndpoint(), testCfg.replicationFactor),
650630
AlertmanagerPersisterFlags(persistInterval))
651631

integration/configs.go

Lines changed: 4 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -106,21 +106,12 @@ var (
106106

107107
AlertmanagerLocalFlags = func() map[string]string {
108108
return map[string]string{
109-
"-alertmanager.storage.type": "local",
110-
"-alertmanager.storage.local.path": filepath.Join(e2e.ContainerSharedDir, "alertmanager_configs"),
109+
"-alertmanager-storage.backend": "local",
110+
"-alertmanager-storage.local.path": filepath.Join(e2e.ContainerSharedDir, "alertmanager_configs"),
111111
}
112112
}
113113

114-
AlertmanagerS3Flags = func(legacy bool) map[string]string {
115-
if legacy {
116-
return map[string]string{
117-
"-alertmanager.storage.type": "s3",
118-
"-alertmanager.storage.s3.buckets": alertsBucketName,
119-
"-alertmanager.storage.s3.force-path-style": "true",
120-
"-alertmanager.storage.s3.url": fmt.Sprintf("s3://%s:%s@%s-minio-9000.:9000", e2edb.MinioAccessKey, e2edb.MinioSecretKey, networkName),
121-
}
122-
}
123-
114+
AlertmanagerS3Flags = func() map[string]string {
124115
return map[string]string{
125116
"-alertmanager-storage.backend": "s3",
126117
"-alertmanager-storage.s3.access-key-id": e2edb.MinioAccessKey,
@@ -131,19 +122,7 @@ var (
131122
}
132123
}
133124

134-
RulerFlags = func(legacy bool) map[string]string {
135-
if legacy {
136-
return map[string]string{
137-
"-api.response-compression-enabled": "true",
138-
"-ruler.enable-sharding": "false",
139-
"-ruler.poll-interval": "2s",
140-
"-experimental.ruler.enable-api": "true",
141-
"-ruler.storage.type": "s3",
142-
"-ruler.storage.s3.buckets": rulestoreBucketName,
143-
"-ruler.storage.s3.force-path-style": "true",
144-
"-ruler.storage.s3.url": fmt.Sprintf("s3://%s:%s@%s-minio-9000.:9000", e2edb.MinioAccessKey, e2edb.MinioSecretKey, networkName),
145-
}
146-
}
125+
RulerFlags = func() map[string]string {
147126
return map[string]string{
148127
"-api.response-compression-enabled": "true",
149128
"-ruler.enable-sharding": "false",

integration/e2ecortex/services.go

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,6 @@ func NewIngesterWithConfigFile(name string, store RingStore, address, configFile
193193
"-ingester.join-after": "0s",
194194
"-ingester.min-ready-duration": "0s",
195195
"-ingester.concurrent-flushes": "10",
196-
"-ingester.max-transfer-retries": "10",
197196
"-ingester.num-tokens": "512",
198197
}, flags))...),
199198
e2e.NewHTTPReadinessProbe(httpPort, "/ready", 200, 299),
@@ -339,7 +338,6 @@ func NewSingleBinary(name string, flags map[string]string, image string, otherPo
339338
"-ingester.join-after": "0s",
340339
"-ingester.min-ready-duration": "0s",
341340
"-ingester.concurrent-flushes": "10",
342-
"-ingester.max-transfer-retries": "10",
343341
"-ingester.num-tokens": "512",
344342
// Startup quickly.
345343
"-store-gateway.sharding-ring.wait-stability-min-duration": "0",

integration/integration_memberlist_single_binary_test.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,6 @@ func newSingleBinary(name string, servername string, join string, testFlags map[
120120
"-ingester.join-after": "0s", // join quickly
121121
"-ingester.min-ready-duration": "0s",
122122
"-ingester.concurrent-flushes": "10",
123-
"-ingester.max-transfer-retries": "0", // disable
124123
"-ingester.num-tokens": "512",
125124
"-ingester.observe-period": "5s", // to avoid conflicts in tokens
126125
"-ring.store": "memberlist",

0 commit comments

Comments
 (0)