@@ -1251,35 +1251,35 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte
1251
1251
}
1252
1252
}
1253
1253
1254
- handleAppendFailure = func (err error , timestampMs int64 , lbls []cortexpb.LabelAdapter , copiedLabels labels.Labels , matchedLabelSetLimits []validation.LimitsPerLabelSet , seriesHash uint64 ) (rollback bool ) {
1254
+ handleAppendFailure = func (err error , timestampMs int64 , lbls []cortexpb.LabelAdapter , copiedLabels labels.Labels , matchedLabelSetLimits []validation.LimitsPerLabelSet ) (rollback bool ) {
1255
1255
// Check if the error is a soft error we can proceed on. If so, we keep track
1256
1256
// of it, so that we can return it back to the distributor, which will return a
1257
1257
// 400 error to the client. The client (Prometheus) will not retry on 400, and
1258
1258
// we actually ingested all samples which haven't failed.
1259
1259
switch cause := errors .Cause (err ); {
1260
1260
case errors .Is (cause , storage .ErrOutOfBounds ):
1261
1261
sampleOutOfBoundsCount ++
1262
- i .validateMetrics .DiscardedSeriesTracker .Track ("sample_out_of_bounds" , userID , seriesHash )
1262
+ i .validateMetrics .DiscardedSeriesTracker .Track (sampleOutOfBounds , userID , & copiedLabels )
1263
1263
updateFirstPartial (func () error { return wrappedTSDBIngestErr (err , model .Time (timestampMs ), lbls ) })
1264
1264
1265
1265
case errors .Is (cause , storage .ErrOutOfOrderSample ):
1266
1266
sampleOutOfOrderCount ++
1267
- i .validateMetrics .DiscardedSeriesTracker .Track ("sample_out_of_order" , userID , seriesHash )
1267
+ i .validateMetrics .DiscardedSeriesTracker .Track (sampleOutOfOrder , userID , & copiedLabels )
1268
1268
updateFirstPartial (func () error { return wrappedTSDBIngestErr (err , model .Time (timestampMs ), lbls ) })
1269
1269
1270
1270
case errors .Is (cause , storage .ErrDuplicateSampleForTimestamp ):
1271
1271
newValueForTimestampCount ++
1272
- i .validateMetrics .DiscardedSeriesTracker .Track ("new_value_for_timestamp" , userID , seriesHash )
1272
+ i .validateMetrics .DiscardedSeriesTracker .Track (newValueForTimestamp , userID , & copiedLabels )
1273
1273
updateFirstPartial (func () error { return wrappedTSDBIngestErr (err , model .Time (timestampMs ), lbls ) })
1274
1274
1275
1275
case errors .Is (cause , storage .ErrTooOldSample ):
1276
1276
sampleTooOldCount ++
1277
- i .validateMetrics .DiscardedSeriesTracker .Track ("sample_too_old" , userID , seriesHash )
1277
+ i .validateMetrics .DiscardedSeriesTracker .Track (sampleTooOld , userID , & copiedLabels )
1278
1278
updateFirstPartial (func () error { return wrappedTSDBIngestErr (err , model .Time (timestampMs ), lbls ) })
1279
1279
1280
1280
case errors .Is (cause , errMaxSeriesPerUserLimitExceeded ):
1281
1281
perUserSeriesLimitCount ++
1282
- i .validateMetrics .DiscardedSeriesTracker .Track ("per_user_series_limit" , userID , seriesHash )
1282
+ i .validateMetrics .DiscardedSeriesTracker .Track (perUserSeriesLimit , userID , & copiedLabels )
1283
1283
updateFirstPartial (func () error {
1284
1284
return makeLimitError (perUserSeriesLimit , i .limiter .FormatError (userID , cause , copiedLabels ))
1285
1285
})
@@ -1292,14 +1292,14 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte
1292
1292
1293
1293
case errors .Is (cause , errMaxSeriesPerMetricLimitExceeded ):
1294
1294
perMetricSeriesLimitCount ++
1295
- i .validateMetrics .DiscardedSeriesTracker .Track ("per_metric_series_limit" , userID , seriesHash )
1295
+ i .validateMetrics .DiscardedSeriesTracker .Track (perMetricSeriesLimit , userID , & copiedLabels )
1296
1296
updateFirstPartial (func () error {
1297
1297
return makeMetricLimitError (perMetricSeriesLimit , copiedLabels , i .limiter .FormatError (userID , cause , copiedLabels ))
1298
1298
})
1299
1299
1300
1300
case errors .As (cause , & errMaxSeriesPerLabelSetLimitExceeded {}):
1301
1301
perLabelSetSeriesLimitCount ++
1302
- i .validateMetrics .DiscardedSeriesTracker .Track ("per_labelset_series_limit" , userID , seriesHash )
1302
+ i .validateMetrics .DiscardedSeriesTracker .Track (perLabelsetSeriesLimit , userID , & copiedLabels )
1303
1303
// We only track per labelset discarded samples for throttling by labelset limit.
1304
1304
reasonCounter .increment (matchedLabelSetLimits , perLabelsetSeriesLimit )
1305
1305
updateFirstPartial (func () error {
@@ -1381,7 +1381,7 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte
1381
1381
1382
1382
failedSamplesCount ++
1383
1383
1384
- if rollback := handleAppendFailure (err , s .TimestampMs , ts .Labels , copiedLabels , matchedLabelSetLimits , tsLabelsHash ); ! rollback {
1384
+ if rollback := handleAppendFailure (err , s .TimestampMs , ts .Labels , copiedLabels , matchedLabelSetLimits ); ! rollback {
1385
1385
continue
1386
1386
}
1387
1387
// The error looks an issue on our side, so we should rollback
@@ -1426,7 +1426,7 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte
1426
1426
1427
1427
failedHistogramsCount ++
1428
1428
1429
- if rollback := handleAppendFailure (err , hp .TimestampMs , ts .Labels , copiedLabels , matchedLabelSetLimits , tsLabelsHash ); ! rollback {
1429
+ if rollback := handleAppendFailure (err , hp .TimestampMs , ts .Labels , copiedLabels , matchedLabelSetLimits ); ! rollback {
1430
1430
continue
1431
1431
}
1432
1432
// The error looks an issue on our side, so we should rollback
0 commit comments