-
Notifications
You must be signed in to change notification settings - Fork 1.2k
GetWorkflowExecutionHistory long poll soft timeout #8238
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,37 @@ | ||
| package contextutil | ||
|
|
||
| import ( | ||
| "context" | ||
| "time" | ||
| ) | ||
|
|
||
| var noop = func() {} | ||
|
|
||
| // WithDeadlineBuffer creates a child context with desired timeout. | ||
| // If buffer is non-zero, then child context timeout will be | ||
| // the minOf(parentCtx.Deadline()-buffer, maxTimeout). Use this | ||
| // method to create child context when childContext cannot use | ||
| // all of parent's deadline but instead there is a need to leave | ||
| // some time for parent to do some post-work | ||
| func WithDeadlineBuffer( | ||
| parent context.Context, | ||
| timeout time.Duration, | ||
| buffer time.Duration, | ||
| ) (context.Context, context.CancelFunc) { | ||
| if parent.Err() != nil { | ||
| return parent, noop | ||
| } | ||
|
|
||
| deadline, hasDeadline := parent.Deadline() | ||
|
|
||
| if !hasDeadline { | ||
| return context.WithTimeout(parent, timeout) | ||
| } | ||
|
|
||
| remaining := time.Until(deadline) - buffer | ||
| if remaining < timeout { | ||
| // Cap the timeout to the remaining time minus buffer. | ||
| timeout = max(0, remaining) | ||
| } | ||
| return context.WithTimeout(parent, timeout) | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,59 @@ | ||
| package contextutil | ||
|
|
||
| import ( | ||
| "context" | ||
| "math" | ||
| "testing" | ||
| "time" | ||
|
|
||
| "github.com/stretchr/testify/require" | ||
| ) | ||
|
|
||
| const testTolerance = 5 * time.Second | ||
|
|
||
| func TestWithDeadlineBuffer(t *testing.T) { | ||
| const timeout = 10 * time.Minute | ||
| const buffer = 1 * time.Minute | ||
| start := time.Now() | ||
|
|
||
| t.Run("parent is cancelled", func(t *testing.T) { | ||
| parent, cancel := context.WithCancel(context.Background()) | ||
| cancel() | ||
|
|
||
| child, _ := WithDeadlineBuffer(parent, timeout, buffer) | ||
| require.Equal(t, parent, child) | ||
| }) | ||
|
|
||
| t.Run("parent has no deadline", func(t *testing.T) { | ||
| parent := context.Background() | ||
|
|
||
| t.Run("timeout specified", func(t *testing.T) { | ||
| child, _ := WithDeadlineBuffer(parent, timeout, 0) | ||
| dl, _ := child.Deadline() | ||
| require.WithinDuration(t, start.Add(timeout), dl, testTolerance) | ||
| }) | ||
| }) | ||
|
|
||
| t.Run("parent has deadline", func(t *testing.T) { | ||
| parent, parentCancel := context.WithTimeout(context.Background(), timeout) | ||
| defer parentCancel() | ||
| parentDeadline, _ := parent.Deadline() | ||
|
|
||
| t.Run("enough buffer left", func(t *testing.T) { | ||
| child, _ := WithDeadlineBuffer(parent, math.MaxInt, buffer) | ||
| dl, _ := child.Deadline() | ||
| require.WithinDuration(t, parentDeadline.Add(-buffer), dl, testTolerance) | ||
| }) | ||
|
|
||
| t.Run("no buffer left", func(t *testing.T) { | ||
| child, _ := WithDeadlineBuffer(parent, math.MaxInt, math.MaxInt) | ||
| require.Equal(t, child.Err(), context.DeadlineExceeded) | ||
| }) | ||
|
|
||
| t.Run("enough buffer left but less than max timeout", func(t *testing.T) { | ||
| child, _ := WithDeadlineBuffer(parent, timeout/2, buffer) | ||
| dl, _ := child.Deadline() | ||
| require.WithinDuration(t, parentDeadline.Add(-timeout/2), dl, testTolerance) | ||
| }) | ||
| }) | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -8,6 +8,7 @@ import ( | |
| "github.com/stretchr/testify/suite" | ||
| metricsspb "go.temporal.io/server/api/metrics/v1" | ||
| "go.temporal.io/server/common/log" | ||
| "go.temporal.io/server/common/testing/rpctest" | ||
| "go.uber.org/mock/gomock" | ||
| "google.golang.org/grpc" | ||
| "google.golang.org/grpc/metadata" | ||
|
|
@@ -36,7 +37,7 @@ func (s *grpcSuite) TearDownTest() {} | |
| func (s *grpcSuite) TestMetadataMetricInjection() { | ||
| logger := log.NewMockLogger(s.controller) | ||
| ctx := context.Background() | ||
| ssts := newMockServerTransportStream() | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Moved this to a separate package for re-use. |
||
| ssts := rpctest.NewMockServerTransportStream("/temporal.test/MetadataMetricInjection") | ||
| ctx = grpc.NewContextWithServerTransportStream(ctx, ssts) | ||
| anyMetricName := "any_metric_name" | ||
|
|
||
|
|
@@ -73,8 +74,9 @@ func (s *grpcSuite) TestMetadataMetricInjection() { | |
| ) | ||
|
|
||
| s.Nil(err) | ||
| s.Equal(len(ssts.trailers), 1) | ||
| propagationContextBlobs := ssts.trailers[0].Get(metricsTrailerKey) | ||
| trailers := ssts.CapturedTrailers() | ||
| s.Equal(1, len(trailers)) | ||
| propagationContextBlobs := trailers[0].Get(metricsTrailerKey) | ||
| s.NotNil(propagationContextBlobs) | ||
| s.Equal(1, len(propagationContextBlobs)) | ||
| baggage := &metricsspb.Baggage{} | ||
|
|
@@ -93,7 +95,7 @@ func (s *grpcSuite) TestMetadataMetricInjection() { | |
| func (s *grpcSuite) TestMetadataMetricInjection_NoMetricPresent() { | ||
| logger := log.NewMockLogger(s.controller) | ||
| ctx := context.Background() | ||
| ssts := newMockServerTransportStream() | ||
| ssts := rpctest.NewMockServerTransportStream("/temporal.test/MetadataMetricInjectionNoMetric") | ||
| ctx = grpc.NewContextWithServerTransportStream(ctx, ssts) | ||
|
|
||
| smcii := NewServerMetricsContextInjectorInterceptor() | ||
|
|
@@ -128,8 +130,9 @@ func (s *grpcSuite) TestMetadataMetricInjection_NoMetricPresent() { | |
| ) | ||
|
|
||
| s.Nil(err) | ||
| s.Equal(len(ssts.trailers), 1) | ||
| propagationContextBlobs := ssts.trailers[0].Get(metricsTrailerKey) | ||
| trailers := ssts.CapturedTrailers() | ||
| s.Equal(1, len(trailers)) | ||
| propagationContextBlobs := trailers[0].Get(metricsTrailerKey) | ||
| s.NotNil(propagationContextBlobs) | ||
| s.Equal(1, len(propagationContextBlobs)) | ||
| baggage := &metricsspb.Baggage{} | ||
|
|
@@ -162,26 +165,3 @@ func (s *grpcSuite) TestContextCounterAddNoMetricsContext() { | |
| testCounterName := "test_counter" | ||
| ContextCounterAdd(context.Background(), testCounterName, 3) | ||
| } | ||
|
|
||
| func newMockServerTransportStream() *mockServerTransportStream { | ||
| return &mockServerTransportStream{trailers: []*metadata.MD{}} | ||
| } | ||
|
|
||
| type mockServerTransportStream struct { | ||
| trailers []*metadata.MD | ||
| } | ||
|
|
||
| func (s *mockServerTransportStream) Method() string { | ||
| return "mockssts" | ||
| } | ||
| func (s *mockServerTransportStream) SetHeader(md metadata.MD) error { | ||
| return nil | ||
| } | ||
| func (s *mockServerTransportStream) SendHeader(md metadata.MD) error { | ||
| return nil | ||
| } | ||
| func (s *mockServerTransportStream) SetTrailer(md metadata.MD) error { | ||
| mdCopy := md.Copy() | ||
| s.trailers = append(s.trailers, &mdCopy) | ||
| return nil | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,60 @@ | ||
| package interceptor | ||
|
|
||
| import ( | ||
| "context" | ||
|
|
||
| "go.temporal.io/api/serviceerror" | ||
| "go.temporal.io/server/common/api" | ||
| "go.temporal.io/server/common/log" | ||
| "go.temporal.io/server/common/log/tag" | ||
| serviceerrors "go.temporal.io/server/common/serviceerror" | ||
| "google.golang.org/grpc" | ||
| "google.golang.org/grpc/metadata" | ||
| ) | ||
|
|
||
| const ( | ||
| // ResourceExhaustedCauseHeader is added to rpc response if request returns ResourceExhausted error. | ||
| ResourceExhaustedCauseHeader = "X-Resource-Exhausted-Cause" | ||
|
|
||
| // ResourceExhaustedScopeHeader is added to rpc response if request returns ResourceExhausted error. | ||
| ResourceExhaustedScopeHeader = "X-Resource-Exhausted-Scope" | ||
| ) | ||
|
|
||
| // NewFrontendServiceErrorInterceptor returns a gRPC interceptor that has two responsibilities: | ||
| // 1. Mask certain internal service error details. | ||
| // 2. Propagate resource exhaustion details via gRPC headers. | ||
| func NewFrontendServiceErrorInterceptor( | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Moved this here from |
||
| logger log.Logger, | ||
| ) grpc.UnaryServerInterceptor { | ||
| return func( | ||
| ctx context.Context, | ||
| req interface{}, | ||
| info *grpc.UnaryServerInfo, | ||
| handler grpc.UnaryHandler, | ||
| ) (interface{}, error) { | ||
| resp, err := handler(ctx, req) | ||
| if err == nil { | ||
| return resp, nil | ||
| } | ||
|
|
||
| switch serviceErr := err.(type) { | ||
| case *serviceerrors.ShardOwnershipLost: | ||
| err = serviceerror.NewUnavailable("shard unavailable, please backoff and retry") | ||
| case *serviceerror.DataLoss: | ||
| err = serviceerror.NewUnavailable("internal history service error") | ||
| case *serviceerror.ResourceExhausted: | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. does history/matching service not needing this header?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think they do since this is the frontend adding extra headers for the user's benefit. The behavior hasn't changed from current main there. |
||
| if headerErr := grpc.SetHeader(ctx, metadata.Pairs( | ||
| ResourceExhaustedCauseHeader, serviceErr.Cause.String(), | ||
| ResourceExhaustedScopeHeader, serviceErr.Scope.String(), | ||
| )); headerErr != nil { | ||
| // So while this is *not* a user-facing error or problem in itself, | ||
| // it indicates that there might be larger connection issues at play. | ||
| logger.Error("Failed to add Resource-Exhausted headers to response", | ||
| tag.Operation(api.MethodName(info.FullMethod)), | ||
| tag.Error(headerErr)) | ||
| } | ||
| } | ||
|
|
||
| return resp, err | ||
| } | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Moved this here out of Matching - with some minor modifications.