diff --git a/.github/_typos.toml b/.github/_typos.toml new file mode 100644 index 00000000000..b37076cc277 --- /dev/null +++ b/.github/_typos.toml @@ -0,0 +1,67 @@ +# Configuration for typos spell checker +# See: https://github.com/crate-ci/typos +# +# Inline typos ignore comments: +# To ignore a specific line, add a comment on the same line: +# var myVar = "mispeling" // typos:disable-line +# +# To ignore the next line, add a comment on the line before: +# // typos:ignore-next-line +# var myVar = "mispeling" +# +# To ignore a block of code: +# // typos:off +# var myVar = "mispeling" +# var other = "anothermispeling" +# // typos:on +# +# To check only changed files (useful for local development): +# git diff --name-only --diff-filter=ACMR main...HEAD | typos --file-list - +# Or check only staged files: +# git diff --cached --name-only | typos --file-list - + +[default] +# Custom ignore patterns for inline typos comments +extend-ignore-re = [ + # Line ignore with trailing comment: // typos:disable-line or # typos:disable-line + "(?Rm)^.*(#|//)\\s*typos:disable-line$", + # Block ignore with typos:off/on: // typos:off ... // typos:on + "(?s)(#|//)\\s*typos:off.*?\\n\\s*(#|//)\\s*typos:on", + # Next-line ignore: // typos:ignore-next-line (ignores the following line) + "(#|//)\\s*typos:ignore-next-line\\n.*", +] + +[default.extend-words] +# Add custom dictionary entries here for intentional "misspellings" used in the codebase +# Preemptable is used consistently instead of "Preemptible" for caller types +Preemptable = "Preemptable" +preemptable = "preemptable" +# "ba" is a legitimate variable name in merge tests (shorthand for "b merged with a") +ba = "ba" +# Hash strings may contain letter combinations that look like typos +Ue = "Ue" +# Test data and base64 strings that contain letter combinations that look like typos +nd = "nd" +abd = "abd" +# Environment variable name that must remain for backwards compatibility +AVAILABILTY = "AVAILABILTY" +# Proto-generated field names that have typos in the proto definition +# These should be fixed in the proto file first, then regenerated +Heartbeart = "Heartbeart" +heartbeart = "heartbeart" + +Invoke = "Invoke" +invoke = "invoke" +Invokable = "Invokable" + +[files] +# Exclude generated protobuf files +extend-exclude = [ + "*.pb.go", + "*.gen.go", + "**/testdata/**", + "*.svg", +] + +[type.go] +extend-glob = ["*.go"] diff --git a/.github/workflows/linters.yml b/.github/workflows/linters.yml index 4f0fa02a796..70374f49405 100644 --- a/.github/workflows/linters.yml +++ b/.github/workflows/linters.yml @@ -133,6 +133,28 @@ jobs: exit 1 fi + typos: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Get changed files + id: changed-files + env: + BASE_REF: ${{ github.base_ref }} + run: | + git diff --name-only --diff-filter=ACMR "origin/${BASE_REF}"...HEAD | tr '\n' ' ' > changed_files.txt + echo "files=$(cat changed_files.txt)" >> "$GITHUB_OUTPUT" + + - name: check spelling with typos on changed files + if: steps.changed-files.outputs.files != '' + uses: crate-ci/typos@v1.28.4 + with: + config: .github/_typos.toml + files: ${{ steps.changed-files.outputs.files }} + linters-succeed: name: All Linters Succeed needs: @@ -141,6 +163,7 @@ jobs: - lint-actions - fmt-imports - golangci + - typos runs-on: ubuntu-latest if: always() env: diff --git a/Makefile b/Makefile index 5bb698e847b..8aa7c3ea67b 100644 --- a/Makefile +++ b/Makefile @@ -206,6 +206,9 @@ ACTIONLINT := $(LOCALBIN)/actionlint-$(ACTIONLINT_VER) $(ACTIONLINT): | $(LOCALBIN) $(call go-install-tool,$(ACTIONLINT),github.com/rhysd/actionlint/cmd/actionlint,$(ACTIONLINT_VER)) +TYPOS_VER := v1.28.4 +TYPOS := typos + WORKFLOWCHECK_VER := master # TODO: pin this specific version once 0.3.0 follow-up is released WORKFLOWCHECK := $(LOCALBIN)/workflowcheck-$(WORKFLOWCHECK_VER) $(WORKFLOWCHECK): | $(LOCALBIN) @@ -369,7 +372,7 @@ temporal-server-debug: $(ALL_SRC) ##### Checks ##### goimports: fmt-imports $(GOIMPORTS) @printf $(COLOR) "Run goimports for all files..." - @UNGENERATED_FILES=$$(find . -type f -name '*.go' -print0 | xargs -0 grep -L -e "Code generated by .* DO NOT EDIT." || true) && \ + @UNGENERATED_FILES=$$(find . -type f -name '*.go' -not -path './.git/*' -print0 | xargs -0 grep -L -e "Code generated by .* DO NOT EDIT." || true) && \ $(GOIMPORTS) -w $$UNGENERATED_FILES lint-actions: $(ACTIONLINT) @@ -385,7 +388,28 @@ fmt-imports: $(GCI) # Don't get confused, there is a single linter called gci, w @printf $(COLOR) "Formatting imports..." @$(GCI) write --skip-generated -s standard -s default ./* -lint: lint-code lint-actions lint-api lint-protos +lint-typos: + @printf $(COLOR) "Checking spelling with typos..." + @if command -v $(TYPOS) >/dev/null 2>&1; then \ + $(TYPOS) --config .github/_typos.toml; \ + else \ + printf $(RED) "WARNING: typos is not installed. Install it from https://github.com/crate-ci/typos or run: cargo install typos-cli"; \ + echo ""; \ + echo "Skipping spell check..."; \ + fi + +# Check spelling only on files changed from main branch +lint-typos-changed: + @printf $(COLOR) "Checking spelling on changed files with typos..." + @if command -v $(TYPOS) >/dev/null 2>&1; then \ + git diff --name-only --diff-filter=ACMR $(MAIN_BRANCH)...HEAD | $(TYPOS) --config .github/_typos.toml --file-list -; \ + else \ + printf $(RED) "WARNING: typos is not installed. Install it from https://github.com/crate-ci/typos or run: cargo install typos-cli"; \ + echo ""; \ + echo "Skipping spell check..."; \ + fi + +lint: lint-code lint-actions lint-api lint-protos lint-typos-changed @printf $(COLOR) "Run linters..." lint-api: $(API_LINTER) $(API_BINPB) diff --git a/chasm/lib/scheduler/generator.go b/chasm/lib/scheduler/generator.go index ded2407e4eb..4accb5b185a 100644 --- a/chasm/lib/scheduler/generator.go +++ b/chasm/lib/scheduler/generator.go @@ -16,7 +16,7 @@ type Generator struct { Scheduler chasm.Field[*Scheduler] } -// NewGenerator returns an intialized Generator component, which should +// NewGenerator returns an initialized Generator component, which should // be parented under a Scheduler root node. func NewGenerator(ctx chasm.MutableContext, scheduler *Scheduler, invoker *Invoker) *Generator { generator := &Generator{ diff --git a/chasm/lib/scheduler/invoker.go b/chasm/lib/scheduler/invoker.go index 5dcf5500cca..78cc27d98dc 100644 --- a/chasm/lib/scheduler/invoker.go +++ b/chasm/lib/scheduler/invoker.go @@ -25,7 +25,7 @@ func (i *Invoker) LifecycleState(ctx chasm.Context) chasm.LifecycleState { return chasm.LifecycleStateRunning } -// NewInvoker returns an intialized Invoker component, which should +// NewInvoker returns an initialized Invoker component, which should // be parented under a Scheduler root component. func NewInvoker(ctx chasm.MutableContext, scheduler *Scheduler) *Invoker { return &Invoker{ diff --git a/chasm/lib/scheduler/scheduler.go b/chasm/lib/scheduler/scheduler.go index a542664b3f6..a6eaeee26dc 100644 --- a/chasm/lib/scheduler/scheduler.go +++ b/chasm/lib/scheduler/scheduler.go @@ -158,7 +158,7 @@ func (s *Scheduler) LifecycleState(ctx chasm.Context) chasm.LifecycleState { return chasm.LifecycleStateRunning } -// NewRangeBackfiller returns an intialized Backfiller component, which should +// NewRangeBackfiller returns an initialized Backfiller component, which should // be parented under a Scheduler root node. func (s *Scheduler) NewRangeBackfiller( ctx chasm.MutableContext, @@ -172,7 +172,7 @@ func (s *Scheduler) NewRangeBackfiller( return backfiller } -// NewImmediateBackfiller returns an intialized Backfiller component, which should +// NewImmediateBackfiller returns an initialized Backfiller component, which should // be parented under a Scheduler root node. func (s *Scheduler) NewImmediateBackfiller( ctx chasm.MutableContext, @@ -401,7 +401,7 @@ func executionStatusFromFailure(failure *failurepb.Failure) enumspb.WorkflowExec } // HandleNexusCompletion allows Scheduler to record workflow completions from -// worfklows started by the same scheduler tree's Invoker. +// workflows started by the same scheduler tree's Invoker. func (s *Scheduler) HandleNexusCompletion( ctx chasm.MutableContext, info *persistencespb.ChasmNexusCompletion, diff --git a/chasm/ref.go b/chasm/ref.go index 44d8b5d8223..4cd55b8c493 100644 --- a/chasm/ref.go +++ b/chasm/ref.go @@ -26,8 +26,8 @@ type ComponentRef struct { // It is used to find and validate the loaded execution has the right archetype, especially when runID // is not specified in the ExecutionKey. archetypeID ArchetypeID - // executionGoType is used for determining the ComponetRef's archetype. - // When CHASM deverloper needs to create a ComponentRef, they will only provide the component type, + // executionGoType is used for determining the ComponentRef's archetype. + // When CHASM developer needs to create a ComponentRef, they will only provide the component type, // and leave the work of determining archetypeID to the CHASM framework. executionGoType reflect.Type diff --git a/chasm/tree.go b/chasm/tree.go index 084541767d0..99ddee571dc 100644 --- a/chasm/tree.go +++ b/chasm/tree.go @@ -53,7 +53,7 @@ var ( // - NeedSyncStructure: Value is deserialized, neither data nor tree structure is synced. // // For simplicity, for a dirty component node, the logic always sync structure (potentially multiple times within a transaction) first, -// and the serialize the data at the very end of a transaction. So there will never base a case where value is synced with seralizedNode, +// and the serialize the data at the very end of a transaction. So there will never base a case where value is synced with serializedNode, // but not with children. // // To update this field, ALWAYS use setValueState() method. @@ -308,7 +308,7 @@ func newTreeInitSearchAttributesAndMemo( return err } - // Theoritically we should check if the root node has a Visibility component or not. + // Theoretically we should check if the root node has a Visibility component or not. // But that doesn't really matter. Even if it doesn't have one, currentSearchAttributes // and currentMemo will just never be used. @@ -1549,7 +1549,7 @@ func (n *Node) closeTransactionForceUpdateVisibility( visibility.generateTask(mutableContext) visibilityNode.setValueState(valueStateNeedSerialize) - // We don't need to sync tree structure here for the visiblity node because we only generated a task without + // We don't need to sync tree structure here for the visibility node because we only generated a task without // changing any component fields. return nil } @@ -2684,7 +2684,7 @@ func deserializeTask( return taskValue, nil } - // TODO: consider pre-calculating the proto field num when registring the task type. + // TODO: consider pre-calculating the proto field num when registering the task type. protoMessageFound := false for i := 0; i < taskGoType.NumField(); i++ { @@ -2735,7 +2735,7 @@ func serializeTask( }, nil } - // TODO: consider pre-calculating the proto field num when registring the task type. + // TODO: consider pre-calculating the proto field num when registering the task type. var blob *commonpb.DataBlob protoMessageFound := false diff --git a/chasm/tree_test.go b/chasm/tree_test.go index 310be49a019..be8c14c6c6e 100644 --- a/chasm/tree_test.go +++ b/chasm/tree_test.go @@ -1694,18 +1694,18 @@ func (s *nodeSuite) TestRef() { subComponent11 := subComponent1.SubComponent11.Get(chasmContext) testCases := []struct { - name string - component Component - expectErr bool - expectedPath []string - expectedInitalVT *persistencespb.VersionedTransition + name string + component Component + expectErr bool + expectedPath []string + expectedInitialVT *persistencespb.VersionedTransition }{ { name: "root", component: testComponent, expectErr: false, expectedPath: nil, // same as []string{} - expectedInitalVT: &persistencespb.VersionedTransition{ + expectedInitialVT: &persistencespb.VersionedTransition{ NamespaceFailoverVersion: 1, TransitionCount: 1, }, @@ -1715,7 +1715,7 @@ func (s *nodeSuite) TestRef() { component: subComponent1, expectErr: false, expectedPath: []string{"SubComponent1"}, - expectedInitalVT: &persistencespb.VersionedTransition{ + expectedInitialVT: &persistencespb.VersionedTransition{ NamespaceFailoverVersion: 1, TransitionCount: 1, }, @@ -1725,7 +1725,7 @@ func (s *nodeSuite) TestRef() { component: subComponent11, expectErr: false, expectedPath: []string{"SubComponent1", "SubComponent11"}, - expectedInitalVT: &persistencespb.VersionedTransition{ + expectedInitialVT: &persistencespb.VersionedTransition{ NamespaceFailoverVersion: 1, TransitionCount: 1, }, @@ -1754,13 +1754,13 @@ func (s *nodeSuite) TestRef() { // Proto fields are validated separately with ProtoEqual. // executionLastUpdateVT: currentVT, - // componentInitialVT: tc.expectedInitalVT, + // componentInitialVT: tc.expectedInitialVT, } actualRef, err := DeserializeComponentRef(encodedRef) s.NoError(err) s.ProtoEqual(currentVT, actualRef.executionLastUpdateVT) - s.ProtoEqual(tc.expectedInitalVT, actualRef.componentInitialVT) + s.ProtoEqual(tc.expectedInitialVT, actualRef.componentInitialVT) actualRef.executionLastUpdateVT = nil actualRef.componentInitialVT = nil @@ -1926,7 +1926,7 @@ func (s *nodeSuite) TestCloseTransaction_ForceUpdateVisibility_RootLifecycleChan return true, nil } - // Init visiblity component + // Init visibility component testComponent.(*TestComponent).Visibility = NewComponentField(chasmCtx, NewVisibility(chasmCtx)) mutation, err := node.CloseTransaction() s.NoError(err) @@ -1979,7 +1979,7 @@ func (s *nodeSuite) TestCloseTransaction_ForceUpdateVisibility_RootSAMemoChanged return nextTransitionCount } - // Init visiblity component + // Init visibility component testComponent.(*TestComponent).Visibility = NewComponentField(chasmCtx, NewVisibility(chasmCtx)) s.nodeBackend.HandleUpdateWorkflowStateStatus = func(state enumsspb.WorkflowExecutionState, status enumspb.WorkflowExecutionStatus) (bool, error) { return true, nil @@ -2604,7 +2604,7 @@ func (s *nodeSuite) TestExecuteImmediatePureTask() { s.Len(mutations.UpdatedNodes, 2, "root and subcomponent1 should be updated") s.Empty(mutations.DeletedNodes) - // immedidate pure tasks will be executed inline and no physical chasm pure task will be generated. + // immediate pure tasks will be executed inline and no physical chasm pure task will be generated. s.Equal(tasks.MaximumKey.FireTime, s.nodeBackend.LastDeletePureTaskCall()) } @@ -2889,14 +2889,14 @@ func (s *nodeSuite) TestExecuteSideEffectTask() { ctx := NewEngineContext(context.Background(), mockEngine) chasmContext := NewMutableContext(ctx, root) - var backendValidtionFnCalled bool + var backendValidationFnCalled bool // This won't be called until access time. dummyValidationFn := func(_ NodeBackend, _ Context, _ Component) error { - backendValidtionFnCalled = true + backendValidationFnCalled = true return nil } expectValidate := func(valid bool, validationErr error) { - backendValidtionFnCalled = false + backendValidationFnCalled = false s.testLibrary.mockSideEffectTaskValidator.EXPECT().Validate( gomock.Any(), gomock.Any(), @@ -2932,7 +2932,7 @@ func (s *nodeSuite) TestExecuteSideEffectTask() { expectExecute(nil) err = root.ExecuteSideEffectTask(ctx, s.registry, executionKey, chasmTask, dummyValidationFn) s.NoError(err) - s.True(backendValidtionFnCalled) + s.True(backendValidationFnCalled) s.True(chasmTask.DeserializedTask.IsValid()) // Invalid task. @@ -2957,7 +2957,7 @@ func (s *nodeSuite) TestExecuteSideEffectTask() { expectExecute(executionErr) err = root.ExecuteSideEffectTask(ctx, s.registry, executionKey, chasmTask, dummyValidationFn) s.ErrorIs(executionErr, err) - s.True(backendValidtionFnCalled) + s.True(backendValidationFnCalled) s.False(chasmTask.DeserializedTask.IsValid()) } diff --git a/client/client_bean.go b/client/client_bean.go index 855243debe4..b885547ed0c 100644 --- a/client/client_bean.go +++ b/client/client_bean.go @@ -138,7 +138,7 @@ func (h *clientBeanImpl) GetRemoteAdminClient(cluster string) (adminservice.Admi clusterInfo, clusterFound := h.clusterMetadata.GetAllClusterInfo()[cluster] if !clusterFound { // We intentionally return internal error here. - // This error could only happen with internal mis-configuration. + // This error could only happen with internal misconfiguration. // This can happen when a namespace is config for multiple clusters. But those clusters are not connected. // We also have logic in task processing to drop tasks when namespace cluster exclude a local cluster. return nil, &serviceerror.Internal{ @@ -177,7 +177,7 @@ func (h *clientBeanImpl) GetRemoteFrontendClient(clusterName string) (grpc.Clien clusterInfo, clusterFound := h.clusterMetadata.GetAllClusterInfo()[clusterName] if !clusterFound { // We intentionally return internal error here. - // This error could only happen with internal mis-configuration. + // This error could only happen with internal misconfiguration. // This can happen when a namespace is config for multiple clusters. But those clusters are not connected. // We also have logic in task processing to drop tasks when namespace cluster exclude a local cluster. return nil, nil, &serviceerror.Internal{ diff --git a/common/api/metadata.go b/common/api/metadata.go index ca62cfbbb88..4a1f17078b8 100644 --- a/common/api/metadata.go +++ b/common/api/metadata.go @@ -3,7 +3,7 @@ package api import "strings" type ( - // Describes the scope of a method (whole cluster or inividual namespace). + // Scope describes the scope of a method (whole cluster or individual namespace). Scope int32 // Describes what level of access is needed for a method. Note that this field is @@ -16,7 +16,7 @@ type ( Polling int32 MethodMetadata struct { - // Describes the scope of a method (whole cluster or inividual namespace). + // Describes the scope of a method (whole cluster or individual namespace). Scope Scope // Describes what level of access is needed for a method (advisory). Access Access diff --git a/common/archiver/archival_metadata.go b/common/archiver/archival_metadata.go index 0761b61ea91..fb84f82c380 100644 --- a/common/archiver/archival_metadata.go +++ b/common/archiver/archival_metadata.go @@ -125,8 +125,8 @@ func NewArchivalConfig( } } -// NewDisabledArchvialConfig returns an ArchivalConfig where archival is disabled for both the cluster and the namespace -func NewDisabledArchvialConfig() ArchivalConfig { +// NewDisabledArchivalConfig returns an ArchivalConfig where archival is disabled for both the cluster and the namespace +func NewDisabledArchivalConfig() ArchivalConfig { return &archivalConfig{ staticClusterState: ArchivalDisabled, dynamicClusterState: nil, diff --git a/common/archiver/constants.go b/common/archiver/constants.go index a09a282fd04..47d0935f1a7 100644 --- a/common/archiver/constants.go +++ b/common/archiver/constants.go @@ -9,7 +9,7 @@ const ( ArchiveNonRetryableErrorMsg = "Archive method encountered an non-retryable error." // ArchiveTransientErrorMsg is the log message when the Archive() method encounters a transient error ArchiveTransientErrorMsg = "Archive method encountered a transient error." - // ArchiveSkippedInfoMsg is the log messsage when the Archive() method encounter an not found error + // ArchiveSkippedInfoMsg is the log message when the Archive() method encounter an not found error ArchiveSkippedInfoMsg = "Archive method encountered not found error and skipped the archival" // ErrReasonInvalidURI is the error reason for invalid URI @@ -32,7 +32,7 @@ var ( // ErrInvalidGetHistoryRequest is the error for invalid GetHistory request ErrInvalidGetHistoryRequest = errors.New("get archived history request is invalid") // ErrInvalidQueryVisibilityRequest is the error for invalid Query Visibility request - ErrInvalidQueryVisibilityRequest = errors.New("query visiblity request is invalid") + ErrInvalidQueryVisibilityRequest = errors.New("query visibility request is invalid") // ErrNextPageTokenCorrupted is the error for corrupted GetHistory token ErrNextPageTokenCorrupted = errors.New("next page token is corrupted") // ErrHistoryNotExist is the error for non-exist history diff --git a/common/archiver/gcloud/README.md b/common/archiver/gcloud/README.md index dadc13d2588..732999f54d6 100644 --- a/common/archiver/gcloud/README.md +++ b/common/archiver/gcloud/README.md @@ -2,7 +2,7 @@ ## Configuration See https://cloud.google.com/docs/authentication#service-accounts to understand how is made the authentication against google cloud storage -Nowdays we support three different ways in order to let Temporal know where your google keyfile credentials are located +Nowadays we support three different ways in order to let Temporal know where your google keyfile credentials are located * Temporal archival deployment.yaml configuration file * `GOOGLE_APPLICATION_CREDENTIALS` environment variable @@ -12,7 +12,7 @@ If more than one credentials location is given, then Temporal will resolve the c `GOOGLE_APPLICATION_CREDENTIALS > Temporal archival deployment.yaml > Google default credentials` -Be sure that you have created your bucket first, and have enought rights in order to read/write over your bucket. +Be sure that you have created your bucket first, and have enough rights in order to read/write over your bucket. ### Gcloud Archival example @@ -65,7 +65,7 @@ it will search all records starting from `2020-01-21T00:00:00Z` to `2020-01-21T5 ### Limitations - The only operator supported is `=` -- Currently It's not possible to guarantee the resulSet order, specially if the pageSize it's fullfilled. +- Currently It's not possible to guarantee the resultSet order, specially if the pageSize it's fulfilled. ### Example diff --git a/common/archiver/gcloud/connector/client.go b/common/archiver/gcloud/connector/client.go index 2f2aaaa74b0..743e17130d4 100644 --- a/common/archiver/gcloud/connector/client.go +++ b/common/archiver/gcloud/connector/client.go @@ -41,9 +41,9 @@ type ( } ) -// NewClient return a Temporal gcloudstorage.Client based on default google service account creadentials (ScopeFullControl required). +// NewClient return a Temporal gcloudstorage.Client based on default google service account credentials (ScopeFullControl required). // Bucket must be created by Iaas scripts, in other words, this library doesn't create the required Bucket. -// Optionaly you can set your credential path throught "GOOGLE_APPLICATION_CREDENTIALS" environment variable or through temporal config file. +// Optionally you can set your credential path through "GOOGLE_APPLICATION_CREDENTIALS" environment variable or through temporal config file. // You can find more info about "Google Setting Up Authentication for Server to Server Production Applications" under the following link // https://cloud.google.com/docs/authentication/production func NewClient(ctx context.Context, config *config.GstorageArchiver) (Client, error) { @@ -113,7 +113,7 @@ func (s *storageWrapper) Get(ctx context.Context, URI archiver.URI, fileName str return io.ReadAll(reader) } -// Query, retieves file names by provided storage query +// Query, retrieves file names by provided storage query func (s *storageWrapper) Query(ctx context.Context, URI archiver.URI, fileNamePrefix string) (fileNames []string, err error) { fileNames = make([]string, 0) bucket := s.client.Bucket(URI.Hostname()) @@ -132,7 +132,7 @@ func (s *storageWrapper) Query(ctx context.Context, URI archiver.URI, fileNamePr } -// QueryWithFilters, retieves filenames that match filter parameters. PageSize is optional, 0 means all records. +// QueryWithFilters, retrieves filenames that match filter parameters. PageSize is optional, 0 means all records. func (s *storageWrapper) QueryWithFilters(ctx context.Context, URI archiver.URI, fileNamePrefix string, pageSize, offset int, filters []Precondition) ([]string, bool, int, error) { var err error currentPos := offset diff --git a/common/archiver/gcloud/util_test.go b/common/archiver/gcloud/util_test.go index 26f335d182e..8c4df0fd23e 100644 --- a/common/archiver/gcloud/util_test.go +++ b/common/archiver/gcloud/util_test.go @@ -165,13 +165,13 @@ func (s *utilSuite) TestRunIdPrecondition() { { workflowID: "4418294404690464320", runID: "15619178330501475177", - fileName: "closeTimeout_2020-02-27T09:42:28Z_12851121011173788097_4418294404690464320_unkonwnRunID.visibility", + fileName: "closeTimeout_2020-02-27T09:42:28Z_12851121011173788097_4418294404690464320_unknownRunID.visibility", expectedResult: false, }, { workflowID: "4418294404690464320", runID: "", - fileName: "closeTimeout_2020-02-27T09:42:28Z_12851121011173788097_4418294404690464320_unkonwnRunID.visibility", + fileName: "closeTimeout_2020-02-27T09:42:28Z_12851121011173788097_4418294404690464320_unknownRunID.visibility", expectedResult: true, }, } @@ -208,7 +208,7 @@ func (s *utilSuite) TestWorkflowTypeNamePrecondition() { workflowID: "4418294404690464320", runID: "15619178330501475177", workflowTypeName: "", - fileName: "closeTimeout_2020-02-27T09:42:28Z_unkownWorkflowTypeName_4418294404690464320_15619178330501475177.visibility", + fileName: "closeTimeout_2020-02-27T09:42:28Z_unknownWorkflowTypeName_4418294404690464320_15619178330501475177.visibility", expectedResult: true, }, } diff --git a/common/archiver/metadata_mock.go b/common/archiver/metadata_mock.go index 74c55ab3db0..7e5743b0aed 100644 --- a/common/archiver/metadata_mock.go +++ b/common/archiver/metadata_mock.go @@ -22,8 +22,8 @@ type MetadataMock interface { func NewMetadataMock(controller *gomock.Controller) MetadataMock { m := &metadataMock{ MockArchivalMetadata: NewMockArchivalMetadata(controller), - defaultHistoryConfig: NewDisabledArchvialConfig(), - defaultVisibilityConfig: NewDisabledArchvialConfig(), + defaultHistoryConfig: NewDisabledArchivalConfig(), + defaultVisibilityConfig: NewDisabledArchivalConfig(), } return m } diff --git a/common/archiver/metadata_mock_test.go b/common/archiver/metadata_mock_test.go index 1043c1350d0..bc1a45bee20 100644 --- a/common/archiver/metadata_mock_test.go +++ b/common/archiver/metadata_mock_test.go @@ -27,7 +27,7 @@ func TestMetadataMock(t *testing.T) { assert.True(t, config.ClusterConfiguredForArchival()) - metadata.EXPECT().GetHistoryConfig().Return(NewDisabledArchvialConfig()) + metadata.EXPECT().GetHistoryConfig().Return(NewDisabledArchivalConfig()) config = metadata.GetHistoryConfig() assert.False(t, config.ClusterConfiguredForArchival()) @@ -39,7 +39,7 @@ func TestMetadataMock(t *testing.T) { assert.True(t, config.ClusterConfiguredForArchival()) - metadata.EXPECT().GetVisibilityConfig().Return(NewDisabledArchvialConfig()) + metadata.EXPECT().GetVisibilityConfig().Return(NewDisabledArchivalConfig()) config = metadata.GetVisibilityConfig() assert.False(t, config.ClusterConfiguredForArchival()) @@ -51,7 +51,7 @@ func TestMetadataMock(t *testing.T) { assert.True(t, config.ClusterConfiguredForArchival()) - metadata.EXPECT().GetHistoryConfig().Return(NewDisabledArchvialConfig()) + metadata.EXPECT().GetHistoryConfig().Return(NewDisabledArchivalConfig()) config = metadata.GetHistoryConfig() assert.False(t, config.ClusterConfiguredForArchival()) @@ -64,7 +64,7 @@ func TestMetadataMock(t *testing.T) { assert.True(t, config.ClusterConfiguredForArchival()) - metadata.EXPECT().GetVisibilityConfig().Return(NewDisabledArchvialConfig()) + metadata.EXPECT().GetVisibilityConfig().Return(NewDisabledArchivalConfig()) config = metadata.GetVisibilityConfig() assert.False(t, config.ClusterConfiguredForArchival()) diff --git a/common/archiver/s3store/README.md b/common/archiver/s3store/README.md index d6dd42d0f43..464f78d9e63 100644 --- a/common/archiver/s3store/README.md +++ b/common/archiver/s3store/README.md @@ -72,7 +72,7 @@ s3://// closeTimeout/2020-01-21T16:16:11Z/ ``` -Enable AWS SDK Logging with config parameter `logLevel`. For example enable debug logging with `logLevel: 4096`. Possbile Values: +Enable AWS SDK Logging with config parameter `logLevel`. For example enable debug logging with `logLevel: 4096`. Possible Values: * LogOff = 0 = 0x0 * LogDebug = 4096 = 0x1000 * LogDebugWithSigning = 4097 = 0x1001 diff --git a/common/archiver/util.go b/common/archiver/util.go index cd95eb70fcd..b21c8ae010b 100644 --- a/common/archiver/util.go +++ b/common/archiver/util.go @@ -43,7 +43,7 @@ func TagLoggerWithArchiveVisibilityRequestAndURI(logger log.Logger, request *arc tag.ArchivalRequestNamespace(request.GetNamespace()), tag.ArchivalRequestWorkflowID(request.GetWorkflowId()), tag.ArchivalRequestRunID(request.GetRunId()), - tag.ArchvialRequestWorkflowType(request.GetWorkflowTypeName()), + tag.ArchivalRequestWorkflowType(request.GetWorkflowTypeName()), tag.ArchivalRequestCloseTimestamp(request.GetCloseTime()), tag.ArchivalRequestStatus(request.GetStatus().String()), tag.ArchivalURI(URI), diff --git a/common/backoff/jitter_test.go b/common/backoff/jitter_test.go index ade3dd672f2..89cd9f642fd 100644 --- a/common/backoff/jitter_test.go +++ b/common/backoff/jitter_test.go @@ -79,7 +79,7 @@ func (s *jitterSuite) TestJitter_InputZeroValue() { s.Zero(Jitter(float64(0), rand.Float64())) } -func (s *jitterSuite) TestJitter_CoeffientZeroValue() { +func (s *jitterSuite) TestJitter_CoefficientZeroValue() { s.Equal(time.Duration(1), Jitter(time.Duration(1), 0)) s.Equal(int64(1), Jitter(int64(1), 0)) s.Equal(float64(1), Jitter(float64(1), 0)) diff --git a/common/backoff/retrypolicy.go b/common/backoff/retrypolicy.go index 8893b5407e1..dada78ee22d 100644 --- a/common/backoff/retrypolicy.go +++ b/common/backoff/retrypolicy.go @@ -11,7 +11,7 @@ import ( ) const ( - // NoInterval represents Maximim interval + // NoInterval represents Maximum interval NoInterval = 0 done time.Duration = -1 noMaximumAttempts = 0 diff --git a/common/cache/lru_test.go b/common/cache/lru_test.go index b960312e47b..da1b9f2ff95 100644 --- a/common/cache/lru_test.go +++ b/common/cache/lru_test.go @@ -47,14 +47,14 @@ func TestLRU(t *testing.T) { capture = metricsHandler.StartCapture() cache.Put("B", "Bar") cache.Put("C", "Cid") - cache.Put("D", "Delt") + cache.Put("D", "Dealt") assert.Equal(t, 4, cache.Size()) snapshot = capture.Snapshot() assert.Equal(t, float64(4), snapshot[metrics.CacheUsage.Name()][2].Value) assert.Equal(t, "Bar", cache.Get("B")) assert.Equal(t, "Cid", cache.Get("C")) - assert.Equal(t, "Delt", cache.Get("D")) + assert.Equal(t, "Dealt", cache.Get("D")) cache.Put("A", "Foo2") assert.Equal(t, "Foo2", cache.Get("A")) diff --git a/common/cache/simple_test.go b/common/cache/simple_test.go index 1becc341ed3..17b47889e4c 100644 --- a/common/cache/simple_test.go +++ b/common/cache/simple_test.go @@ -18,12 +18,12 @@ func TestSimple(t *testing.T) { cache.Put("B", "Bar") cache.Put("C", "Cid") - cache.Put("D", "Delt") + cache.Put("D", "Dealt") assert.Equal(t, 4, cache.Size()) assert.Equal(t, "Bar", cache.Get("B")) assert.Equal(t, "Cid", cache.Get("C")) - assert.Equal(t, "Delt", cache.Get("D")) + assert.Equal(t, "Dealt", cache.Get("D")) cache.Put("A", "Foo2") assert.Equal(t, "Foo2", cache.Get("A")) diff --git a/common/checksum/crc_test.go b/common/checksum/crc_test.go index b43212cd1d0..79f207db4d5 100644 --- a/common/checksum/crc_test.go +++ b/common/checksum/crc_test.go @@ -28,15 +28,15 @@ func TestCRC32OverProto(t *testing.T) { HistoryLength: 550, } - parallism := 10 + parallelism := 10 loopCount := 100 successCount := int64(0) startC := make(chan struct{}) doneWG := sync.WaitGroup{} - doneWG.Add(parallism) + doneWG.Add(parallelism) - for i := 0; i < parallism; i++ { + for i := 0; i < parallelism; i++ { go func() { defer doneWG.Done() <-startC @@ -56,5 +56,5 @@ func TestCRC32OverProto(t *testing.T) { close(startC) success := common.AwaitWaitGroup(&doneWG, time.Second) assert.True(t, success, "timed out waiting for goroutines to finish") - assert.Equal(t, int64(parallism*loopCount), successCount) + assert.Equal(t, int64(parallelism*loopCount), successCount) } diff --git a/common/cluster/metadata.go b/common/cluster/metadata.go index 1ffacb0399a..efcf22587bb 100644 --- a/common/cluster/metadata.go +++ b/common/cluster/metadata.go @@ -336,7 +336,7 @@ func (m *metadataImpl) ClusterNameForFailoverVersion(isGlobalNamespace bool, fai if !isGlobalNamespace { panic(fmt.Sprintf( - "ClusterMetadata encountered local namesapce with failover version %v", + "ClusterMetadata encountered local namespace with failover version %v", failoverVersion, )) } diff --git a/common/collection/concurrent_tx_map.go b/common/collection/concurrent_tx_map.go index 9ba1fe28370..b643c0b87dc 100644 --- a/common/collection/concurrent_tx_map.go +++ b/common/collection/concurrent_tx_map.go @@ -43,7 +43,7 @@ type ( // NewShardedConcurrentTxMap returns an instance of ShardedConcurrentMap // // ShardedConcurrentMap is a thread safe map that maintains upto nShards -// number of maps internally to allow nShards writers to be acive at the +// number of maps internally to allow nShards writers to be active at the // same time. This map *does not* use re-entrant locks, so access to the // map during iterator can cause a dead lock. // diff --git a/common/collection/concurrent_tx_map_test.go b/common/collection/concurrent_tx_map_test.go index f826e042578..f9fc574aede 100644 --- a/common/collection/concurrent_tx_map_test.go +++ b/common/collection/concurrent_tx_map_test.go @@ -66,7 +66,7 @@ func (s *ConcurrentTxMapSuite) TestGetAndDo() { s.Nil(interf, "GetAndDo should return nil when key not found") s.Nil(err, "GetAndDo should return nil when function not applied") s.False(ok, "GetAndDo should return false when key not found") - s.False(fnApplied, "GetAndDo should not apply function when key not exixts") + s.False(fnApplied, "GetAndDo should not apply function when key not exists") value = intType(1) testMap.Put(key, &value) @@ -81,7 +81,7 @@ func (s *ConcurrentTxMapSuite) TestGetAndDo() { s.Equal(*(value1), intType(2)) s.NotNil(err, "GetAndDo should return non nil when function applied") s.True(ok, "GetAndDo should return true when key found") - s.True(fnApplied, "GetAndDo should apply function when key exixts") + s.True(fnApplied, "GetAndDo should apply function when key exists") } func (s *ConcurrentTxMapSuite) TestPutOrDo() { @@ -99,7 +99,7 @@ func (s *ConcurrentTxMapSuite) TestPutOrDo() { s.Equal(value, *valueRetuern) s.Nil(err, "PutOrDo should return nil when function not applied") s.False(ok, "PutOrDo should return false when function not applied") - s.False(fnApplied, "PutOrDo should not apply function when key not exixts") + s.False(fnApplied, "PutOrDo should not apply function when key not exists") anotherValue := intType(111) interf, ok, err = testMap.PutOrDo(key, &anotherValue, func(key interface{}, value interface{}) error { @@ -112,7 +112,7 @@ func (s *ConcurrentTxMapSuite) TestPutOrDo() { s.Equal(value, *valueRetuern) s.NotNil(err, "PutOrDo should return non nil when function applied") s.True(ok, "PutOrDo should return true when function applied") - s.True(fnApplied, "PutOrDo should apply function when key exixts") + s.True(fnApplied, "PutOrDo should apply function when key exists") } func (s *ConcurrentTxMapSuite) TestRemoveIf() { diff --git a/common/collection/paging_iterator_test.go b/common/collection/paging_iterator_test.go index 7dea8144971..2d87f7ad1b9 100644 --- a/common/collection/paging_iterator_test.go +++ b/common/collection/paging_iterator_test.go @@ -80,7 +80,7 @@ func (s *pagingIteratorSuite) TestIteration_NoErr() { s.Equal([]int{1, 2, 3, 4, 5, 6}, result) } -func (s *pagingIteratorSuite) TestIteration_Err_Beginging() { +func (s *pagingIteratorSuite) TestIteration_Err_Beginning() { phase := 0 ite := NewPagingIterator(func(token []byte) ([]interface{}, []byte, error) { switch phase { @@ -99,7 +99,7 @@ func (s *pagingIteratorSuite) TestIteration_Err_Beginging() { s.False(ite.HasNext()) } -func (s *pagingIteratorSuite) TestIteration_Err_NotBegining() { +func (s *pagingIteratorSuite) TestIteration_Err_NotBeginning() { phase := 0 outputs := [][]interface{}{ diff --git a/common/collection/priority_queue.go b/common/collection/priority_queue.go index c636093a12e..6fd814a988b 100644 --- a/common/collection/priority_queue.go +++ b/common/collection/priority_queue.go @@ -20,7 +20,7 @@ func NewPriorityQueue[T any]( } } -// NewPriorityQueueWithItems creats a new priority queue +// NewPriorityQueueWithItems creates a new priority queue // with the provided list of items. // PriorityQueue will take ownership of the passed in items, // so caller should stop modifying it. diff --git a/common/config/archival.go b/common/config/archival.go index 251fa080f81..232ad1f1d01 100644 --- a/common/config/archival.go +++ b/common/config/archival.go @@ -30,11 +30,11 @@ func isArchivalConfigValid( clusterStatus string, enableRead bool, namespaceDefaultStatus string, - domianDefaultURI string, + domainDefaultURI string, specifiedProvider bool, ) bool { archivalEnabled := clusterStatus == ArchivalEnabled - URISet := len(domianDefaultURI) != 0 + URISet := len(domainDefaultURI) != 0 validEnable := archivalEnabled && URISet && specifiedProvider validDisabled := !archivalEnabled && !enableRead && namespaceDefaultStatus != ArchivalEnabled && !URISet && !specifiedProvider diff --git a/common/config/config.go b/common/config/config.go index 0ca4baaa6b6..b4c158ae54e 100644 --- a/common/config/config.go +++ b/common/config/config.go @@ -65,7 +65,7 @@ type ( PProf struct { // Port is the port on which the PProf will bind to Port int `yaml:"port"` - // Host defaults to `localhost` but can be overriden + // Host defaults to `localhost` but can be overridden // for instance in the case of dual stack IPv4/IPv6 Host string `yaml:"host"` } diff --git a/common/config/localip.go b/common/config/localip.go index a66dd4e35dc..f831e62c6f2 100644 --- a/common/config/localip.go +++ b/common/config/localip.go @@ -12,7 +12,7 @@ import ( // Scores are calculated as: // -1 for any unknown IP addreseses. // +300 for IPv4 addresses -// +100 for non-local addresses, extra +100 for "up" interaces. +// +100 for non-local addresses, extra +100 for "up" interfaces. func scoreAddr(iface net.Interface, addr net.Addr) (int, net.IP) { var ip net.IP if netAddr, ok := addr.(*net.IPNet); ok { diff --git a/common/dynamicconfig/constants.go b/common/dynamicconfig/constants.go index 98c5518c5a3..11e6e7b32f2 100644 --- a/common/dynamicconfig/constants.go +++ b/common/dynamicconfig/constants.go @@ -1307,7 +1307,7 @@ these log lines can be noisy, we want to be able to turn on and sample selective MatchingMaxTaskQueuesInDeployment = NewNamespaceIntSetting( "matching.maxTaskQueuesInDeployment", 1000, - `MatchingMaxTaskQueuesInDeployment represents the maximum number of task-queues that can be registed in a single deployment`, + `MatchingMaxTaskQueuesInDeployment represents the maximum number of task-queues that can be registered in a single deployment`, ) MatchingMaxDeployments = NewNamespaceIntSetting( "matching.maxDeployments", diff --git a/common/goro/package.go b/common/goro/package.go index 0a6aa57c80e..8abe6bc6564 100644 --- a/common/goro/package.go +++ b/common/goro/package.go @@ -1,5 +1,5 @@ // Package goro provides utilities for spawning and subsequently managing the -// liftime(s) of one or more goroutines. This package relies heavily on the +// lifetime(s) of one or more goroutines. This package relies heavily on the // context package to provide consistent cancellation semantics for long-lived // goroutines. The goal of this package is to provide a unified way to cancel // and wait on running goroutines as is often seen in "service" or "daemon" diff --git a/common/log/tag/tags.go b/common/log/tag/tags.go index ed6f29e33d5..726204b7d69 100644 --- a/common/log/tag/tags.go +++ b/common/log/tag/tags.go @@ -236,7 +236,7 @@ func WorkflowNextEventID(nextEventID int64) ZapTag { // WorkflowBeginningFirstEventID returns tag for WorkflowBeginningFirstEventID func WorkflowBeginningFirstEventID(beginningFirstEventID int64) ZapTag { - return NewInt64("wf-begining-first-event-id", beginningFirstEventID) + return NewInt64("wf-beginning-first-event-id", beginningFirstEventID) } // WorkflowEndingNextEventID returns tag for WorkflowEndingNextEventID @@ -859,8 +859,8 @@ func ArchivalRequestWorkflowID(requestWorkflowID string) ZapTag { return NewStringTag("archival-request-workflow-id", requestWorkflowID) } -// ArchvialRequestWorkflowType returns tag for RequestWorkflowType -func ArchvialRequestWorkflowType(requestWorkflowType string) ZapTag { +// ArchivalRequestWorkflowType returns tag for RequestWorkflowType +func ArchivalRequestWorkflowType(requestWorkflowType string) ZapTag { return NewStringTag("archival-request-workflow-type", requestWorkflowType) } diff --git a/common/metrics/config.go b/common/metrics/config.go index 1d98cc598fc..d42dbddf742 100644 --- a/common/metrics/config.go +++ b/common/metrics/config.go @@ -373,7 +373,7 @@ func buildTallyTimerHistogramBuckets( func setDefaultPerUnitHistogramBoundaries(clientConfig *ClientConfig) { buckets := maps.Clone(defaultPerUnitHistogramBoundaries) - // In config, when overwrite default buckets, we use [dimensionless / miliseconds / bytes] as keys. + // In config, when overwrite default buckets, we use [dimensionless / milliseconds / bytes] as keys. // But in code, we use [1 / ms / By] as key (to align with otel unit definition). So we do conversion here. if bucket, ok := clientConfig.PerUnitHistogramBoundaries[UnitNameDimensionless]; ok { buckets[Dimensionless] = bucket diff --git a/common/metrics/metric_defs.go b/common/metrics/metric_defs.go index 3a1169dfd33..2c3d2c84f2d 100644 --- a/common/metrics/metric_defs.go +++ b/common/metrics/metric_defs.go @@ -403,9 +403,9 @@ const ( ShardInfoScope = "ShardInfo" // WorkflowContextScope is the scope used by WorkflowContext component WorkflowContextScope = "WorkflowContext" - // ExecutionStatsScope is the scope used for emiting workflow execution related stats + // ExecutionStatsScope is the scope used for emitting workflow execution related stats ExecutionStatsScope = "ExecutionStats" - // SessionStatsScope is the scope used for emiting session update related stats + // SessionStatsScope is the scope used for emitting session update related stats SessionStatsScope = "SessionStats" // WorkflowCompletionStatsScope tracks workflow completion updates WorkflowCompletionStatsScope = "CompletionStats" @@ -813,7 +813,7 @@ var ( ) TaskDiscarded = NewCounterDef("task_errors_discarded") TaskSkipped = NewCounterDef("task_skipped") - TaskVersionMisMatch = NewCounterDef("task_errors_version_mismatch") + TaskVersionMismatch = NewCounterDef("task_errors_version_mismatch") TasksDependencyTaskNotCompleted = NewCounterDef("task_dependency_task_not_completed") TaskStandbyRetryCounter = NewCounterDef("task_errors_standby_retry_counter") TaskWorkflowBusyCounter = NewCounterDef( @@ -1212,32 +1212,32 @@ var ( "batcher_processor_requests", WithDescription("The number of individual workflow execution tasks successfully processed by the batch request processor"), ) - BatcherProcessorFailures = NewCounterDef("batcher_processor_errors") - BatcherOperationFailures = NewCounterDef("batcher_operation_errors") - ElasticsearchBulkProcessorRequests = NewCounterDef("elasticsearch_bulk_processor_requests") - ElasticsearchBulkProcessorQueuedRequests = NewDimensionlessHistogramDef("elasticsearch_bulk_processor_queued_requests") - ElasticsearchBulkProcessorFailures = NewCounterDef("elasticsearch_bulk_processor_errors") - ElasticsearchBulkProcessorCorruptedData = NewCounterDef("elasticsearch_bulk_processor_corrupted_data") - ElasticsearchBulkProcessorDuplicateRequest = NewCounterDef("elasticsearch_bulk_processor_duplicate_request") - ElasticsearchBulkProcessorRequestLatency = NewTimerDef("elasticsearch_bulk_processor_request_latency") - ElasticsearchBulkProcessorCommitLatency = NewTimerDef("elasticsearch_bulk_processor_commit_latency") - ElasticsearchBulkProcessorWaitAddLatency = NewTimerDef("elasticsearch_bulk_processor_wait_add_latency") - ElasticsearchBulkProcessorWaitStartLatency = NewTimerDef("elasticsearch_bulk_processor_wait_start_latency") - ElasticsearchBulkProcessorBulkSize = NewDimensionlessHistogramDef("elasticsearch_bulk_processor_bulk_size") - ElasticsearchBulkProcessorBulkResquestTookLatency = NewTimerDef("elasticsearch_bulk_processor_bulk_request_took_latency") - ElasticsearchDocumentParseFailuresCount = NewCounterDef("elasticsearch_document_parse_failures_counter") - ElasticsearchDocumentGenerateFailuresCount = NewCounterDef("elasticsearch_document_generate_failures_counter") - ElasticsearchCustomOrderByClauseCount = NewCounterDef("elasticsearch_custom_order_by_clause_counter") - CatchUpReadyShardCountGauge = NewGaugeDef("catchup_ready_shard_count") - HandoverReadyShardCountGauge = NewGaugeDef("handover_ready_shard_count") - ReplicatorMessages = NewCounterDef("replicator_messages") - ReplicatorFailures = NewCounterDef("replicator_errors") - ReplicatorLatency = NewTimerDef("replicator_latency") - ReplicatorDLQFailures = NewCounterDef("replicator_dlq_enqueue_fails") - NamespaceReplicationEnqueueDLQCount = NewCounterDef("namespace_replication_dlq_enqueue_requests") - ParentClosePolicyProcessorSuccess = NewCounterDef("parent_close_policy_processor_requests") - ParentClosePolicyProcessorFailures = NewCounterDef("parent_close_policy_processor_errors") - ScheduleMissedCatchupWindow = NewCounterDef( + BatcherProcessorFailures = NewCounterDef("batcher_processor_errors") + BatcherOperationFailures = NewCounterDef("batcher_operation_errors") + ElasticsearchBulkProcessorRequests = NewCounterDef("elasticsearch_bulk_processor_requests") + ElasticsearchBulkProcessorQueuedRequests = NewDimensionlessHistogramDef("elasticsearch_bulk_processor_queued_requests") + ElasticsearchBulkProcessorFailures = NewCounterDef("elasticsearch_bulk_processor_errors") + ElasticsearchBulkProcessorCorruptedData = NewCounterDef("elasticsearch_bulk_processor_corrupted_data") + ElasticsearchBulkProcessorDuplicateRequest = NewCounterDef("elasticsearch_bulk_processor_duplicate_request") + ElasticsearchBulkProcessorRequestLatency = NewTimerDef("elasticsearch_bulk_processor_request_latency") + ElasticsearchBulkProcessorCommitLatency = NewTimerDef("elasticsearch_bulk_processor_commit_latency") + ElasticsearchBulkProcessorWaitAddLatency = NewTimerDef("elasticsearch_bulk_processor_wait_add_latency") + ElasticsearchBulkProcessorWaitStartLatency = NewTimerDef("elasticsearch_bulk_processor_wait_start_latency") + ElasticsearchBulkProcessorBulkSize = NewDimensionlessHistogramDef("elasticsearch_bulk_processor_bulk_size") + ElasticsearchBulkProcessorBulkRequestTookLatency = NewTimerDef("elasticsearch_bulk_processor_bulk_request_took_latency") + ElasticsearchDocumentParseFailuresCount = NewCounterDef("elasticsearch_document_parse_failures_counter") + ElasticsearchDocumentGenerateFailuresCount = NewCounterDef("elasticsearch_document_generate_failures_counter") + ElasticsearchCustomOrderByClauseCount = NewCounterDef("elasticsearch_custom_order_by_clause_counter") + CatchUpReadyShardCountGauge = NewGaugeDef("catchup_ready_shard_count") + HandoverReadyShardCountGauge = NewGaugeDef("handover_ready_shard_count") + ReplicatorMessages = NewCounterDef("replicator_messages") + ReplicatorFailures = NewCounterDef("replicator_errors") + ReplicatorLatency = NewTimerDef("replicator_latency") + ReplicatorDLQFailures = NewCounterDef("replicator_dlq_enqueue_fails") + NamespaceReplicationEnqueueDLQCount = NewCounterDef("namespace_replication_dlq_enqueue_requests") + ParentClosePolicyProcessorSuccess = NewCounterDef("parent_close_policy_processor_requests") + ParentClosePolicyProcessorFailures = NewCounterDef("parent_close_policy_processor_errors") + ScheduleMissedCatchupWindow = NewCounterDef( "schedule_missed_catchup_window", WithDescription("The number of times a schedule missed an action due to the configured catchup window"), ) diff --git a/common/metrics/noop_impl.go b/common/metrics/noop_impl.go index 62f325c6ef5..1ea1fa29d94 100644 --- a/common/metrics/noop_impl.go +++ b/common/metrics/noop_impl.go @@ -16,7 +16,7 @@ type ( func newNoopMetricsHandler() *noopMetricsHandler { return &noopMetricsHandler{} } -// WithTags creates a new MetricProvder with provided []Tag +// WithTags creates a new MetricProvider with provided []Tag // Tags are merged with registered Tags from the source MetricsHandler func (n *noopMetricsHandler) WithTags(...Tag) Handler { return n diff --git a/common/metrics/tally_metrics_handler.go b/common/metrics/tally_metrics_handler.go index 71d15e80ce0..7731fa04c96 100644 --- a/common/metrics/tally_metrics_handler.go +++ b/common/metrics/tally_metrics_handler.go @@ -40,7 +40,7 @@ func NewTallyMetricsHandler(cfg ClientConfig, scope tally.Scope) *tallyMetricsHa } } -// WithTags creates a new MetricProvder with provided []Tag +// WithTags creates a new MetricProvider with provided []Tag // Tags are merged with registered Tags from the source MetricsHandler func (tmh *tallyMetricsHandler) WithTags(tags ...Tag) Handler { return &tallyMetricsHandler{ diff --git a/common/namespace/const.go b/common/namespace/const.go index 9d3f48b22c9..6c476aaeea7 100644 --- a/common/namespace/const.go +++ b/common/namespace/const.go @@ -3,11 +3,11 @@ package namespace import "time" const ( - // MinRetentionGlobal is a hard limit for the minimun retention duration for global + // MinRetentionGlobal is a hard limit for the minimum retention duration for global // namespaces (to allow time for replication). MinRetentionGlobal = 1 * 24 * time.Hour - // MinRetentionLocal is a hard limit for the minimun retention duration for local + // MinRetentionLocal is a hard limit for the minimum retention duration for local // namespaces. Allow short values but disallow zero to avoid confusion with // interpreting zero as infinite. MinRetentionLocal = 1 * time.Hour diff --git a/common/namespace/namespace.go b/common/namespace/namespace.go index a0055802b0f..8c0f611d6cf 100644 --- a/common/namespace/namespace.go +++ b/common/namespace/namespace.go @@ -180,7 +180,7 @@ func (ns *Namespace) ReplicationState() enumspb.ReplicationState { } // ActiveClusterName observes the name of the cluster that is currently active -// for this namspace. +// for this namespace. func (ns *Namespace) ActiveClusterName() string { if ns.replicationConfig == nil { return "" diff --git a/common/namespace/nsregistry/registry_test.go b/common/namespace/nsregistry/registry_test.go index 6a519d7420e..743989966f2 100644 --- a/common/namespace/nsregistry/registry_test.go +++ b/common/namespace/nsregistry/registry_test.go @@ -435,7 +435,7 @@ func (s *registrySuite) TestUpdateCache_TriggerCallBack() { newEntries := entries[2:] - // entry1 only has descrption update, so won't trigger the state change callback + // entry1 only has description update, so won't trigger the state change callback s.Len(newEntries, 1) s.Equal([]*namespace.Namespace{entry2New}, newEntries) } diff --git a/common/nexus/nexusrpc/client.go b/common/nexus/nexusrpc/client.go index 126c1f48150..e89678f25d2 100644 --- a/common/nexus/nexusrpc/client.go +++ b/common/nexus/nexusrpc/client.go @@ -46,7 +46,7 @@ var errEmptyOperationToken = errors.New("empty operation token") type UnexpectedResponseError struct { // Error message. Message string - // Optional failure that may have been emedded in the response. + // Optional failure that may have been embedded in the response. Failure *nexus.Failure // Additional transport specific details. // For HTTP, this would include the HTTP response. The response body will have already been read into memory and diff --git a/common/nexus/nexusrpc/completion.go b/common/nexus/nexusrpc/completion.go index 9c5a49df64e..27c45960a34 100644 --- a/common/nexus/nexusrpc/completion.go +++ b/common/nexus/nexusrpc/completion.go @@ -168,7 +168,7 @@ type OperationCompletionUnsuccessfulOptions struct { FailureConverter nexus.FailureConverter // OperationID is the unique ID for this operation. Used when a completion callback is received before a started response. // - // Deprecated: Use OperatonToken instead. + // Deprecated: Use OperationToken instead. OperationID string // OperationToken is the unique token for this operation. Used when a completion callback is received before a // started response. @@ -264,7 +264,7 @@ type CompletionHandler interface { type CompletionHandlerOptions struct { // Handler for completion requests. Handler CompletionHandler - // A stuctured logging handler. + // A structured logging handler. // Defaults to slog.Default(). Logger *slog.Logger // A [Serializer] to customize handler serialization behavior. diff --git a/common/nexus/nexusrpc/server.go b/common/nexus/nexusrpc/server.go index a4b16ae8930..e53c56333fc 100644 --- a/common/nexus/nexusrpc/server.go +++ b/common/nexus/nexusrpc/server.go @@ -274,7 +274,7 @@ func (h *httpHandler) contextWithTimeoutFromHTTPRequest(writer http.ResponseWrit type HandlerOptions struct { // Handler for handling service requests. Handler nexus.Handler - // A stuctured logger. + // A structured logger. // Defaults to slog.Default(). Logger *slog.Logger // Max duration to allow waiting for a single get result request. diff --git a/common/payloads/payloads.go b/common/payloads/payloads.go index ee8b0fd9df3..5744eb75ff2 100644 --- a/common/payloads/payloads.go +++ b/common/payloads/payloads.go @@ -13,19 +13,19 @@ var ( ) func EncodeString(str string) *commonpb.Payloads { - // Error can be safely ignored here becase string always can be converted. + // Error can be safely ignored here because string always can be converted. ps, _ := defaultDataConverter.ToPayloads(str) return ps } func EncodeInt(i int) *commonpb.Payloads { - // Error can be safely ignored here becase int always can be converted. + // Error can be safely ignored here because int always can be converted. ps, _ := defaultDataConverter.ToPayloads(i) return ps } func EncodeBytes(bytes []byte) *commonpb.Payloads { - // Error can be safely ignored here becase []byte always can be raw encoded. + // Error can be safely ignored here because []byte always can be raw encoded. ps, _ := defaultDataConverter.ToPayloads(bytes) return ps } diff --git a/common/persistence/cassandra/mutable_state_task_store.go b/common/persistence/cassandra/mutable_state_task_store.go index 76055665008..38fd682f519 100644 --- a/common/persistence/cassandra/mutable_state_task_store.go +++ b/common/persistence/cassandra/mutable_state_task_store.go @@ -739,7 +739,7 @@ func (d *MutableStateTaskStore) getHistoryTasks( ) (*p.InternalGetHistoryTasksResponse, error) { switch request.TaskCategory.Type() { case tasks.CategoryTypeImmediate: - return d.getHistoryImmedidateTasks(ctx, request) + return d.getHistoryImmediateTasks(ctx, request) case tasks.CategoryTypeScheduled: return d.getHistoryScheduledTasks(ctx, request) default: @@ -747,7 +747,7 @@ func (d *MutableStateTaskStore) getHistoryTasks( } } -func (d *MutableStateTaskStore) getHistoryImmedidateTasks( +func (d *MutableStateTaskStore) getHistoryImmediateTasks( ctx context.Context, request *p.GetHistoryTasksRequest, ) (*p.InternalGetHistoryTasksResponse, error) { diff --git a/common/persistence/data_blob.go b/common/persistence/data_blob.go index 10ad7f6a3ab..d4c10c7b2c2 100644 --- a/common/persistence/data_blob.go +++ b/common/persistence/data_blob.go @@ -6,7 +6,7 @@ import ( ) // NewDataBlob returns a new DataBlob. -// TODO: return an UnknowEncodingType error with the actual type string when encodingTypeStr is invalid +// TODO: return an UnknownEncodingType error with the actual type string when encodingTypeStr is invalid func NewDataBlob(data []byte, encodingTypeStr string) *commonpb.DataBlob { encodingType, err := enumspb.EncodingTypeFromString(encodingTypeStr) if err != nil { diff --git a/common/persistence/data_interfaces.go b/common/persistence/data_interfaces.go index c9137af2ee5..03c525f38a3 100644 --- a/common/persistence/data_interfaces.go +++ b/common/persistence/data_interfaces.go @@ -239,7 +239,7 @@ type ( Mode ConflictResolveWorkflowMode - // workflow to be resetted + // workflow to be reset ResetWorkflowSnapshot WorkflowSnapshot ResetWorkflowEvents []*WorkflowEvents diff --git a/common/persistence/execution_manager.go b/common/persistence/execution_manager.go index dc5c1792320..f38b270d4b1 100644 --- a/common/persistence/execution_manager.go +++ b/common/persistence/execution_manager.go @@ -1160,7 +1160,7 @@ func getCurrentBranchLastWriteVersion( // We are still checking version history first here since it's the old logic and we want to minimize the risk for now // and to account for the fact that transition history is not fully enabled. // - // Theoritically, using version history here is wrong because there can be transitions (even on Workflows) that have no + // Theoretically, using version history here is wrong because there can be transitions (even on Workflows) that have no // events (e.g. Activity Heartbeat). // // Although using version history has the benefit of ensuring the returned version don't change after the run is closed, using diff --git a/common/persistence/history_manager.go b/common/persistence/history_manager.go index 1aa76ac86c4..c14abb060fe 100644 --- a/common/persistence/history_manager.go +++ b/common/persistence/history_manager.go @@ -353,7 +353,7 @@ func (m *executionManagerImpl) serializeAppendHistoryNodesRequest( } if e.EventId != lastID+1 { return nil, &InvalidPersistenceRequestError{ - Msg: "event ID must be continous", + Msg: "event ID must be continuous", } } lastID++ diff --git a/common/persistence/persistence-tests/history_v2_persistence.go b/common/persistence/persistence-tests/history_v2_persistence.go index 52168801851..9f3de684ae7 100644 --- a/common/persistence/persistence-tests/history_v2_persistence.go +++ b/common/persistence/persistence-tests/history_v2_persistence.go @@ -429,7 +429,7 @@ func (s *HistoryV2PersistenceSuite) TestConcurrentlyCreateAndAppendBranches() { _, ok = err.(*serviceerror.DataLoss) s.Equal(true, ok) - // override more with larger txn_id, this would fix the corrupted hole so that we cna get 20 events again + // override more with larger txn_id, this would fix the corrupted hole so that we can get 20 events again events = s.genRandomEvents([]int64{7, 8}, 1) err = s.appendNewNode(branch, events, 3002) s.Nil(err) diff --git a/common/persistence/persistence-tests/shared_test.go b/common/persistence/persistence-tests/shared_test.go index 04167bc7018..ff4f2109320 100644 --- a/common/persistence/persistence-tests/shared_test.go +++ b/common/persistence/persistence-tests/shared_test.go @@ -18,7 +18,7 @@ func TestGarbageCleanupInfo(t *testing.T) { } } -func TestGarbageCleanupInfo_WithColonInWorklfowID(t *testing.T) { +func TestGarbageCleanupInfo_WithColonInWorkflowID(t *testing.T) { namespaceID := "10000000-5000-f000-f000-000000000000" workflowID := "workflow-id:2" runID := "10000000-5000-f000-f000-000000000002" diff --git a/common/persistence/persistence_interface.go b/common/persistence/persistence_interface.go index af109b7b41a..77765d2444c 100644 --- a/common/persistence/persistence_interface.go +++ b/common/persistence/persistence_interface.go @@ -369,7 +369,7 @@ type ( Mode ConflictResolveWorkflowMode - // workflow to be resetted + // workflow to be reset ResetWorkflowSnapshot InternalWorkflowSnapshot ResetWorkflowEventsNewEvents []*InternalAppendHistoryNodesRequest `json:",omitempty"` // maybe new workflow diff --git a/common/persistence/sql/execution_tasks.go b/common/persistence/sql/execution_tasks.go index d203b58d8b8..60342980a3a 100644 --- a/common/persistence/sql/execution_tasks.go +++ b/common/persistence/sql/execution_tasks.go @@ -82,7 +82,7 @@ func (m *sqlExecutionStore) getHistoryImmediateTasks( ctx context.Context, request *p.GetHistoryTasksRequest, ) (*p.InternalGetHistoryTasksResponse, error) { - // This is for backward compatiblity. + // This is for backward compatibility. // These task categories exist before the general history_immediate_tasks table is created, // so they have their own tables. categoryID := request.TaskCategory.ID() @@ -141,7 +141,7 @@ func (m *sqlExecutionStore) completeHistoryImmediateTask( ctx context.Context, request *p.CompleteHistoryTaskRequest, ) error { - // This is for backward compatiblity. + // This is for backward compatibility. // These task categories exist before the general history_immediate_tasks table is created, // so they have their own tables. categoryID := request.TaskCategory.ID() @@ -170,7 +170,7 @@ func (m *sqlExecutionStore) rangeCompleteHistoryImmediateTasks( ctx context.Context, request *p.RangeCompleteHistoryTasksRequest, ) error { - // This is for backward compatiblity. + // This is for backward compatibility. // These task categories exist before the general history_immediate_tasks table is created, // so they have their own tables. categoryID := request.TaskCategory.ID() @@ -200,7 +200,7 @@ func (m *sqlExecutionStore) getHistoryScheduledTasks( ctx context.Context, request *p.GetHistoryTasksRequest, ) (*p.InternalGetHistoryTasksResponse, error) { - // This is for backward compatiblity. + // This is for backward compatibility. // These task categories exist before the general history_scheduled_tasks table is created, // so they have their own tables. categoryID := request.TaskCategory.ID() @@ -259,7 +259,7 @@ func (m *sqlExecutionStore) completeHistoryScheduledTask( ctx context.Context, request *p.CompleteHistoryTaskRequest, ) error { - // This is for backward compatiblity. + // This is for backward compatibility. // These task categories exist before the general history_scheduled_tasks table is created, // so they have their own tables. categoryID := request.TaskCategory.ID() @@ -282,7 +282,7 @@ func (m *sqlExecutionStore) rangeCompleteHistoryScheduledTasks( ctx context.Context, request *p.RangeCompleteHistoryTasksRequest, ) error { - // This is for backward compatiblity. + // This is for backward compatibility. // These task categories exist before the general history_scheduled_tasks table is created, // so they have their own tables. categoryID := request.TaskCategory.ID() diff --git a/common/persistence/sql/execution_util.go b/common/persistence/sql/execution_util.go index 1b09c14f582..e5aaa80f33b 100644 --- a/common/persistence/sql/execution_util.go +++ b/common/persistence/sql/execution_util.go @@ -69,7 +69,7 @@ func applyWorkflowMutationTx( workflowMutation.DBRecordVersion, shardID, ); err != nil { - return serviceerror.NewUnavailablef("applyWorkflowMutationTx failed. Failed to update executions row. Erorr: %v", err) + return serviceerror.NewUnavailablef("applyWorkflowMutationTx failed. Failed to update executions row. Error: %v", err) } if err := applyTasks(ctx, @@ -238,7 +238,7 @@ func applyWorkflowSnapshotTxAsReset( workflowSnapshot.DBRecordVersion, shardID, ); err != nil { - return serviceerror.NewUnavailablef("applyWorkflowSnapshotTxAsReset failed. Failed to update executions row. Erorr: %v", err) + return serviceerror.NewUnavailablef("applyWorkflowSnapshotTxAsReset failed. Failed to update executions row. Error: %v", err) } if err := applyTasks(ctx, @@ -695,26 +695,26 @@ func createImmediateTasks( tx sqlplugin.Tx, shardID int32, categoryID int, - immedidateTasks []p.InternalHistoryTask, + immediateTasks []p.InternalHistoryTask, ) error { - // This is for backward compatiblity. + // This is for backward compatibility. // These task categories exist before the general history_immediate_tasks table is created, // so they have their own tables. switch categoryID { case tasks.CategoryIDTransfer: - return createTransferTasks(ctx, tx, shardID, immedidateTasks) + return createTransferTasks(ctx, tx, shardID, immediateTasks) case tasks.CategoryIDVisibility: - return createVisibilityTasks(ctx, tx, shardID, immedidateTasks) + return createVisibilityTasks(ctx, tx, shardID, immediateTasks) case tasks.CategoryIDReplication: - return createReplicationTasks(ctx, tx, shardID, immedidateTasks) + return createReplicationTasks(ctx, tx, shardID, immediateTasks) } - if len(immedidateTasks) == 0 { + if len(immediateTasks) == 0 { return nil } - immediateTasksRows := make([]sqlplugin.HistoryImmediateTasksRow, 0, len(immedidateTasks)) - for _, task := range immedidateTasks { + immediateTasksRows := make([]sqlplugin.HistoryImmediateTasksRow, 0, len(immediateTasks)) + for _, task := range immediateTasks { immediateTasksRows = append(immediateTasksRows, sqlplugin.HistoryImmediateTasksRow{ ShardID: shardID, CategoryID: int32(categoryID), @@ -747,7 +747,7 @@ func createScheduledTasks( categoryID int, scheduledTasks []p.InternalHistoryTask, ) error { - // This is for backward compatiblity. + // This is for backward compatibility. // These task categories exists before the general history_scheduled_tasks table is created, // so they have their own tables. if categoryID == tasks.CategoryIDTimer { @@ -1134,11 +1134,11 @@ func (m *sqlExecutionStore) createExecution( DBRecordVersion: 0, } } - return serviceerror.NewUnavailablef("createExecution failed. Erorr: %v", err) + return serviceerror.NewUnavailablef("createExecution failed. Error: %v", err) } rowsAffected, err := result.RowsAffected() if err != nil { - return serviceerror.NewUnavailablef("createExecution failed. Failed to verify number of rows affected. Erorr: %v", err) + return serviceerror.NewUnavailablef("createExecution failed. Failed to verify number of rows affected. Error: %v", err) } if rowsAffected != 1 { return serviceerror.NewNotFoundf("createExecution failed. Affected %v rows updated instead of 1.", rowsAffected) @@ -1174,11 +1174,11 @@ func updateExecution( } result, err := tx.UpdateExecutions(ctx, row) if err != nil { - return serviceerror.NewUnavailablef("updateExecution failed. Erorr: %v", err) + return serviceerror.NewUnavailablef("updateExecution failed. Error: %v", err) } rowsAffected, err := result.RowsAffected() if err != nil { - return serviceerror.NewUnavailablef("updateExecution failed. Failed to verify number of rows affected. Erorr: %v", err) + return serviceerror.NewUnavailablef("updateExecution failed. Failed to verify number of rows affected. Error: %v", err) } if rowsAffected != 1 { return serviceerror.NewNotFoundf("updateExecution failed. Affected %v rows updated instead of 1.", rowsAffected) diff --git a/common/persistence/sql/sqlplugin/mysql/session/session.go b/common/persistence/sql/sqlplugin/mysql/session/session.go index 8427a748f4b..31c9050fcf7 100644 --- a/common/persistence/sql/sqlplugin/mysql/session/session.go +++ b/common/persistence/sql/sqlplugin/mysql/session/session.go @@ -145,7 +145,7 @@ func buildDSNAttrs(dbKind sqlplugin.DbKind, cfg *config.SQL) (map[string]string, attrs[isolationLevelAttrName] = defaultIsolationLevel } - // these attrs are always overriden + // these attrs are always overridden for k, v := range dsnAttrOverrides { attrs[k] = v } diff --git a/common/persistence/sql/sqlplugin/tests/history_execution_timer.go b/common/persistence/sql/sqlplugin/tests/history_execution_timer.go index 5f9ac9997ab..958226dcb94 100644 --- a/common/persistence/sql/sqlplugin/tests/history_execution_timer.go +++ b/common/persistence/sql/sqlplugin/tests/history_execution_timer.go @@ -161,14 +161,14 @@ func (s *historyExecutionTimerSuite) TestDeleteSelect_Single() { runID := primitives.NewUUID() timerID := shuffle.String(testHistoryExecutionTimerID) - deletFilter := sqlplugin.TimerInfoMapsFilter{ + deleteFilter := sqlplugin.TimerInfoMapsFilter{ ShardID: shardID, NamespaceID: namespaceID, WorkflowID: workflowID, RunID: runID, TimerIDs: []string{timerID}, } - result, err := s.store.DeleteFromTimerInfoMaps(newExecutionContext(), deletFilter) + result, err := s.store.DeleteFromTimerInfoMaps(newExecutionContext(), deleteFilter) s.NoError(err) rowsAffected, err := result.RowsAffected() s.NoError(err) diff --git a/common/persistence/visibility/store/elasticsearch/converter_test.go b/common/persistence/visibility/store/elasticsearch/converter_test.go index 917821948dd..6f2e8ff063a 100644 --- a/common/persistence/visibility/store/elasticsearch/converter_test.go +++ b/common/persistence/visibility/store/elasticsearch/converter_test.go @@ -66,7 +66,7 @@ var supportedWhereCases = map[string]string{ "value = 1528358645.123456790": `{"bool":{"filter":{"match":{"value":{"query":1528358645.1234567}}}}}`, "id in (\"text1\",'text2') and content = 'aaaa'": `{"bool":{"filter":[{"terms":{"id":["text1","text2"]}},{"match":{"content":{"query":"aaaa"}}}]}}`, "create_time BETWEEN '2015-01-01 00:00:00' and '2016-02-02 00:00:00'": `{"bool":{"filter":{"range":{"create_time":{"from":"2015-01-01 00:00:00","include_lower":true,"include_upper":true,"to":"2016-02-02 00:00:00"}}}}}`, - "create_time nOt between '2015-01-01 00:00:00' and '2016-02-02 00:00:00'": `{"bool":{"must_not":{"range":{"create_time":{"from":"2015-01-01 00:00:00","include_lower":true,"include_upper":true,"to":"2016-02-02 00:00:00"}}}}}`, + "create_time not between '2015-01-01 00:00:00' and '2016-02-02 00:00:00'": `{"bool":{"must_not":{"range":{"create_time":{"from":"2015-01-01 00:00:00","include_lower":true,"include_upper":true,"to":"2016-02-02 00:00:00"}}}}}`, "create_time between '2015-01-01T00:00:00+0800' and '2017-01-01T00:00:00+0800' and process_id = 0 and status >= 1 and content = '三个男人' and phone = '15810324322'": `{"bool":{"filter":[{"range":{"create_time":{"from":"2015-01-01T00:00:00+0800","include_lower":true,"include_upper":true,"to":"2017-01-01T00:00:00+0800"}}},{"term":{"process_id":0}},{"range":{"status":{"from":1,"include_lower":true,"include_upper":true,"to":null}}},{"match":{"content":{"query":"三个男人"}}},{"match":{"phone":{"query":"15810324322"}}}]}}`, "value starts_with 'prefix'": `{"bool":{"filter":{"prefix":{"value":"prefix"}}}}`, "value not starts_with 'prefix'": `{"bool":{"must_not":{"prefix":{"value":"prefix"}}}}`, diff --git a/common/persistence/visibility/store/elasticsearch/processor.go b/common/persistence/visibility/store/elasticsearch/processor.go index 3b7ba46d6d8..a9f13409f1e 100644 --- a/common/persistence/visibility/store/elasticsearch/processor.go +++ b/common/persistence/visibility/store/elasticsearch/processor.go @@ -232,7 +232,7 @@ func (p *processorImpl) bulkAfterAction(_ int64, requests []elastic.BulkableRequ } // Record how long the Elasticsearch took to process the bulk request. - metrics.ElasticsearchBulkProcessorBulkResquestTookLatency.With(p.metricsHandler). + metrics.ElasticsearchBulkProcessorBulkRequestTookLatency.With(p.metricsHandler). Record(time.Duration(response.Took) * time.Millisecond) responseIndex := p.buildResponseIndex(response) diff --git a/common/persistence/visibility/store/elasticsearch/processor_test.go b/common/persistence/visibility/store/elasticsearch/processor_test.go index 92c0bd52bd2..a7834a8ec84 100644 --- a/common/persistence/visibility/store/elasticsearch/processor_test.go +++ b/common/persistence/visibility/store/elasticsearch/processor_test.go @@ -263,7 +263,7 @@ func (s *processorSuite) TestBulkAfterAction_Ack() { metrics.ElasticsearchBulkProcessorQueuedRequests.Unit(), ).Return(queuedRequestHistogram) queuedRequestHistogram.EXPECT().Record(int64(0)) - s.mockMetricHandler.EXPECT().Timer(metrics.ElasticsearchBulkProcessorBulkResquestTookLatency.Name()).Return(metrics.NoopTimerMetricFunc) + s.mockMetricHandler.EXPECT().Timer(metrics.ElasticsearchBulkProcessorBulkRequestTookLatency.Name()).Return(metrics.NoopTimerMetricFunc) s.mockMetricHandler.EXPECT().Timer(metrics.ElasticsearchBulkProcessorRequestLatency.Name()).Return(metrics.NoopTimerMetricFunc) mapVal := newAckFuture() s.esProcessor.mapToAckFuture.Put(testKey, mapVal) @@ -313,7 +313,7 @@ func (s *processorSuite) TestBulkAfterAction_Nack() { metrics.ElasticsearchBulkProcessorQueuedRequests.Unit(), ).Return(queuedRequestHistogram) queuedRequestHistogram.EXPECT().Record(int64(0)) - s.mockMetricHandler.EXPECT().Timer(metrics.ElasticsearchBulkProcessorBulkResquestTookLatency.Name()).Return(metrics.NoopTimerMetricFunc) + s.mockMetricHandler.EXPECT().Timer(metrics.ElasticsearchBulkProcessorBulkRequestTookLatency.Name()).Return(metrics.NoopTimerMetricFunc) s.mockMetricHandler.EXPECT().Timer(metrics.ElasticsearchBulkProcessorRequestLatency.Name()).Return(metrics.NoopTimerMetricFunc) mapVal := newAckFuture() s.esProcessor.mapToAckFuture.Put(testKey, mapVal) @@ -572,7 +572,7 @@ func (s *processorSuite) Test_End2End() { s.mockMetricHandler.EXPECT().Timer(metrics.ElasticsearchBulkProcessorWaitStartLatency.Name()).Return(metrics.NoopTimerMetricFunc).Times(docsCount) s.esProcessor.bulkBeforeAction(0, bulkIndexRequests) - s.mockMetricHandler.EXPECT().Timer(metrics.ElasticsearchBulkProcessorBulkResquestTookLatency.Name()).Return(metrics.NoopTimerMetricFunc) + s.mockMetricHandler.EXPECT().Timer(metrics.ElasticsearchBulkProcessorBulkRequestTookLatency.Name()).Return(metrics.NoopTimerMetricFunc) s.mockMetricHandler.EXPECT().Timer(metrics.ElasticsearchBulkProcessorRequestLatency.Name()).Return(metrics.NoopTimerMetricFunc).Times(docsCount) s.mockMetricHandler.EXPECT().Timer(metrics.ElasticsearchBulkProcessorCommitLatency.Name()).Return(metrics.NoopTimerMetricFunc).Times(docsCount) s.esProcessor.bulkAfterAction(0, bulkIndexRequests, bulkIndexResponse, nil) diff --git a/common/persistence/visibility/store/elasticsearch/visibility_store.go b/common/persistence/visibility/store/elasticsearch/visibility_store.go index c8646ddab53..6077011b7f0 100644 --- a/common/persistence/visibility/store/elasticsearch/visibility_store.go +++ b/common/persistence/visibility/store/elasticsearch/visibility_store.go @@ -1290,12 +1290,12 @@ func parsePageTokenValue( jsonNumber, ok := jsonValue.(json.Number) if !ok { return nil, serviceerror.NewInvalidArgumentf( - "invalid page token: expected interger type, got %q", jsonValue) + "invalid page token: expected integer type, got %q", jsonValue) } num, err := jsonNumber.Int64() if err != nil { return nil, serviceerror.NewInvalidArgumentf( - "invalid page token: expected interger type, got %v", jsonValue) + "invalid page token: expected integer type, got %v", jsonValue) } if num == math.MaxInt64 || num == math.MinInt64 { return nil, nil diff --git a/common/persistence/visibility/store/elasticsearch/visibility_store_read_test.go b/common/persistence/visibility/store/elasticsearch/visibility_store_read_test.go index c6c648ae0c9..f45999db218 100644 --- a/common/persistence/visibility/store/elasticsearch/visibility_store_read_test.go +++ b/common/persistence/visibility/store/elasticsearch/visibility_store_read_test.go @@ -1808,7 +1808,7 @@ func (s *ESVisibilitySuite) Test_parsePageTokenValue() { value: "123", tp: enumspb.INDEXED_VALUE_TYPE_INT, res: nil, - err: serviceerror.NewInvalidArgument("invalid page token: expected interger type, got \"123\""), + err: serviceerror.NewInvalidArgument("invalid page token: expected integer type, got \"123\""), }, { name: "DoubleFieldError", diff --git a/common/persistence/visibility/store/query/util_test.go b/common/persistence/visibility/store/query/util_test.go index ea426de6a3a..6f295ac3a7c 100644 --- a/common/persistence/visibility/store/query/util_test.go +++ b/common/persistence/visibility/store/query/util_test.go @@ -17,8 +17,8 @@ func TestUnsafeSQLString(t *testing.T) { require.Equal(t, `'foo'`, sqlparser.String(val)) // chars are not escaped - val = NewUnsafeSQLString("fo'o") - require.Equal(t, `'fo'o'`, sqlparser.String(val)) + val = NewUnsafeSQLString("fo'o") // typos:disable-line + require.Equal(t, `'fo'o'`, sqlparser.String(val)) // typos:disable-line } func TestColName(t *testing.T) { diff --git a/common/persistence/visibility/store/sql/query_converter_legacy.go b/common/persistence/visibility/store/sql/query_converter_legacy.go index 7c6c8eb0851..a59198377ad 100644 --- a/common/persistence/visibility/store/sql/query_converter_legacy.go +++ b/common/persistence/visibility/store/sql/query_converter_legacy.go @@ -70,7 +70,7 @@ var ( sqlparser.NotStartsWithStr, } - supportedKeyworkListOperators = []string{ + supportedKeywordListOperators = []string{ sqlparser.EqualStr, sqlparser.NotEqualStr, sqlparser.InStr, @@ -648,7 +648,7 @@ func isSupportedComparisonOperator(operator string) bool { } func isSupportedKeywordListOperator(operator string) bool { - return isSupportedOperator(supportedKeyworkListOperators, operator) + return isSupportedOperator(supportedKeywordListOperators, operator) } func isSupportedTextOperator(operator string) bool { diff --git a/common/persistence/visibility/visibility_manager_impl.go b/common/persistence/visibility/visibility_manager_impl.go index a0402eb3872..a6abd629fea 100644 --- a/common/persistence/visibility/visibility_manager_impl.go +++ b/common/persistence/visibility/visibility_manager_impl.go @@ -188,7 +188,7 @@ func (p *visibilityManagerImpl) newInternalVisibilityRequestBase( var searchAttrs *commonpb.SearchAttributes if len(request.SearchAttributes.GetIndexedFields()) > 0 { // Remove any system search attribute from the map. - // This is necessary because the validation can supress errors when trying + // This is necessary because the validation can suppress errors when trying // to set a value on a system search attribute. searchAttrs = &commonpb.SearchAttributes{ IndexedFields: make(map[string]*commonpb.Payload), diff --git a/common/persistence/visibility/visiblity_manager_metrics.go b/common/persistence/visibility/visibility_manager_metrics.go similarity index 100% rename from common/persistence/visibility/visiblity_manager_metrics.go rename to common/persistence/visibility/visibility_manager_metrics.go diff --git a/common/predicates/empty_test.go b/common/predicates/empty_test.go index fcc7e34ca9c..90615e192da 100644 --- a/common/predicates/empty_test.go +++ b/common/predicates/empty_test.go @@ -12,7 +12,7 @@ type ( suite.Suite *require.Assertions - emtpy Predicate[int] + empty Predicate[int] } ) @@ -24,28 +24,28 @@ func TestNoneSuite(t *testing.T) { func (s *emptySuite) SetupTest() { s.Assertions = require.New(s.T()) - s.emtpy = Empty[int]() + s.empty = Empty[int]() } func (s *emptySuite) TestEmpty_Test() { for i := 0; i != 10; i++ { - s.False(s.emtpy.Test(i)) + s.False(s.empty.Test(i)) } } func (s *emptySuite) TestEmpty_Equals() { - s.True(s.emtpy.Equals(s.emtpy)) - s.True(s.emtpy.Equals(Empty[int]())) + s.True(s.empty.Equals(s.empty)) + s.True(s.empty.Equals(Empty[int]())) - s.False(s.emtpy.Equals(newTestPredicate(1, 2, 3))) - s.False(s.emtpy.Equals(And[int]( + s.False(s.empty.Equals(newTestPredicate(1, 2, 3))) + s.False(s.empty.Equals(And[int]( newTestPredicate(1, 2, 3), newTestPredicate(2, 3, 4), ))) - s.False(s.emtpy.Equals(Or[int]( + s.False(s.empty.Equals(Or[int]( newTestPredicate(1, 2, 3), newTestPredicate(4, 5, 6), ))) - s.False(s.emtpy.Equals(Not[int](newTestPredicate(1, 2, 3)))) - s.False(s.emtpy.Equals(Universal[int]())) + s.False(s.empty.Equals(Not[int](newTestPredicate(1, 2, 3)))) + s.False(s.empty.Equals(Universal[int]())) } diff --git a/common/predicates/predicates.go b/common/predicates/predicates.go index 77dcc8e573c..f95458187de 100644 --- a/common/predicates/predicates.go +++ b/common/predicates/predicates.go @@ -2,13 +2,13 @@ package predicates type ( Predicate[T any] interface { - // Test checks if the given entity statisfy the predicate or not + // Test checks if the given entity satisfy the predicate or not Test(T) bool // Equals recursively checks if the given Predicate has the same // structure and value as the caller Predicate // NOTE: the result will contain false negatives, meaning even if - // two predicates are mathmatically equivalent, Equals may still + // two predicates are mathematically equivalent, Equals may still // return false. Equals(Predicate[T]) bool diff --git a/common/priorities/priority_util_test.go b/common/priorities/priority_util_test.go index 730ebcf0ecd..0f27188a8d1 100644 --- a/common/priorities/priority_util_test.go +++ b/common/priorities/priority_util_test.go @@ -37,37 +37,37 @@ func TestMerge(t *testing.T) { expected: defaultPriority, }, { - name: "priority key is overriden", + name: "priority key is overridden", base: defaultPriority, override: &commonpb.Priority{PriorityKey: 5}, expected: &commonpb.Priority{PriorityKey: 5}, }, { - name: "priority key is not overriden by default value", + name: "priority key is not overridden by default value", base: &commonpb.Priority{PriorityKey: 1}, override: defaultPriority, expected: &commonpb.Priority{PriorityKey: 1}, }, { - name: "fairness key is overriden", + name: "fairness key is overridden", base: defaultPriority, override: &commonpb.Priority{FairnessKey: "one"}, expected: &commonpb.Priority{FairnessKey: "one"}, }, { - name: "fairness key is not overriden by default value", + name: "fairness key is not overridden by default value", base: &commonpb.Priority{FairnessKey: "two"}, override: defaultPriority, expected: &commonpb.Priority{FairnessKey: "two"}, }, { - name: "fairness weight is overriden", + name: "fairness weight is overridden", base: defaultPriority, override: &commonpb.Priority{FairnessWeight: 3.0}, expected: &commonpb.Priority{FairnessWeight: 3.0}, }, { - name: "fairness weight is not overriden by default value", + name: "fairness weight is not overridden by default value", base: &commonpb.Priority{FairnessWeight: 3.0}, override: defaultPriority, expected: &commonpb.Priority{FairnessWeight: 3.0}, diff --git a/common/protocol/naming.go b/common/protocol/naming.go index cc7097190d8..cabdd952bd2 100644 --- a/common/protocol/naming.go +++ b/common/protocol/naming.go @@ -33,7 +33,7 @@ func (mt MessageType) String() string { return string(mt) } -// String tranforms a Type into a string +// String transforms a Type into a string func (pt Type) String() string { return string(pt) } diff --git a/common/rpc/encryption/local_store_cert_provider.go b/common/rpc/encryption/local_store_cert_provider.go index eb6b51f4b83..51f31829c1c 100644 --- a/common/rpc/encryption/local_store_cert_provider.go +++ b/common/rpc/encryption/local_store_cert_provider.go @@ -262,7 +262,7 @@ func (s *localStoreCertProvider) fetchCertificate( } if certFile != "" && certData != "" { - return nil, errors.New("only one of certFile or certData properties should be spcified") + return nil, errors.New("only one of certFile or certData properties should be specified") } var certBytes []byte diff --git a/common/rpc/interceptor/dc_redirection_policy_test.go b/common/rpc/interceptor/dc_redirection_policy_test.go index 95301aaa593..f0f2a905b70 100644 --- a/common/rpc/interceptor/dc_redirection_policy_test.go +++ b/common/rpc/interceptor/dc_redirection_policy_test.go @@ -162,7 +162,7 @@ func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect s.Equal(2, callCount) } -func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect_GlobalNamespace_NoForwarding_NamespaceNotWhiltelisted() { +func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect_GlobalNamespace_NoForwarding_NamespaceNotWhitelisted() { s.setupGlobalNamespaceWithTwoReplicationCluster(false, true) apiName := "any random API name" @@ -182,7 +182,7 @@ func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect s.Equal(2, callCount) } -func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect_GlobalNamespace_NoForwarding_APINotWhiltelisted() { +func (s *selectedAPIsForwardingRedirectionPolicySuite) TestWithNamespaceRedirect_GlobalNamespace_NoForwarding_APINotWhitelisted() { s.setupGlobalNamespaceWithTwoReplicationCluster(true, true) callCount := 0 diff --git a/common/rpc/interceptor/mask_internal_error.go b/common/rpc/interceptor/mask_internal_error.go index 090a7331416..51a5eae391d 100644 --- a/common/rpc/interceptor/mask_internal_error.go +++ b/common/rpc/interceptor/mask_internal_error.go @@ -94,13 +94,13 @@ func (mi *MaskInternalErrorDetailsInterceptor) logError( statusCode codes.Code, ) { methodName := api.MethodName(fullMethod) - overridedMethodName := telemetryOverrideOperationTag(fullMethod, methodName) + overriddenMethodName := telemetryOverrideOperationTag(fullMethod, methodName) nsName := MustGetNamespaceName(mi.namespaceRegistry, req) var logTags []tag.Tag if nsName == "" { - logTags = []tag.Tag{tag.Operation(overridedMethodName)} + logTags = []tag.Tag{tag.Operation(overriddenMethodName)} } else { - logTags = []tag.Tag{tag.Operation(overridedMethodName), tag.WorkflowNamespace(nsName.String())} + logTags = []tag.Tag{tag.Operation(overriddenMethodName), tag.WorkflowNamespace(nsName.String())} } logTags = append(logTags, tag.NewStringTag("hash", errorHash)) diff --git a/common/rpc/interceptor/mask_internal_error_test.go b/common/rpc/interceptor/mask_internal_error_test.go index 87d15fd3412..6a9bf355b48 100644 --- a/common/rpc/interceptor/mask_internal_error_test.go +++ b/common/rpc/interceptor/mask_internal_error_test.go @@ -28,7 +28,7 @@ func TestMaskUnknownOrInternalErrors(t *testing.T) { testMaskUnknownOrInternalErrors(t, statusInternal, true) } -func testMaskUnknownOrInternalErrors(t *testing.T, st *status.Status, expectRelpace bool) { +func testMaskUnknownOrInternalErrors(t *testing.T, st *status.Status, expectReplace bool) { controller := gomock.NewController(t) mockRegistry := namespace.NewMockRegistry(controller) mockLogger := log.NewMockLogger(controller) @@ -37,11 +37,11 @@ func testMaskUnknownOrInternalErrors(t *testing.T, st *status.Status, expectRelp dynamicconfig.FrontendMaskInternalErrorDetails.Get(dc), mockRegistry, mockLogger) err := serviceerror.FromStatus(st) - if expectRelpace { + if expectReplace { mockLogger.EXPECT().Error(gomock.Any(), gomock.Any()).Times(1) } errorMessage := errorMaskInterceptor.maskUnknownOrInternalErrors(nil, "test", err) - if expectRelpace { + if expectReplace { errorHash := common.ErrorHash(err) expectedMessage := fmt.Sprintf("rpc error: code = %s desc = %s (%s)", st.Message(), errorFrontendMasked, errorHash) diff --git a/common/rpc/interceptor/service_error_interceptor_test.go b/common/rpc/interceptor/service_error_interceptor_test.go index 455b2bc3a10..4bc3a7b6a86 100644 --- a/common/rpc/interceptor/service_error_interceptor_test.go +++ b/common/rpc/interceptor/service_error_interceptor_test.go @@ -48,12 +48,12 @@ func TestServiceErrorInterceptorUnknown(t *testing.T) { assert.Equal(t, codes.Unknown, status.Code(err)) } -func TestServiceErrorInterceptorSer(t *testing.T) { - serErrors := []error{ +func TestServiceErrorInterceptorSet(t *testing.T) { + setErrors := []error{ serialization.NewDeserializationError(enumspb.ENCODING_TYPE_PROTO3, nil), serialization.NewSerializationError(enumspb.ENCODING_TYPE_PROTO3, nil), } - for _, inErr := range serErrors { + for _, inErr := range setErrors { _, err := ServiceErrorInterceptor(context.Background(), nil, nil, func(_ context.Context, _ any) (any, error) { return nil, inErr diff --git a/common/rpc/interceptor/telemetry.go b/common/rpc/interceptor/telemetry.go index 1f7a54a7c73..98f3e756d6e 100644 --- a/common/rpc/interceptor/telemetry.go +++ b/common/rpc/interceptor/telemetry.go @@ -364,14 +364,14 @@ func CreateUnaryMetricsHandlerLogTags( methodName string, nsName namespace.Name, ) (metrics.Handler, []tag.Tag) { - overridedMethodName := telemetryUnaryOverrideOperationTag(fullMethod, methodName, req) + overriddenMethodName := telemetryUnaryOverrideOperationTag(fullMethod, methodName, req) if nsName == "" { - return baseMetricsHandler.WithTags(metrics.OperationTag(overridedMethodName), metrics.NamespaceUnknownTag()), - []tag.Tag{tag.Operation(overridedMethodName)} + return baseMetricsHandler.WithTags(metrics.OperationTag(overriddenMethodName), metrics.NamespaceUnknownTag()), + []tag.Tag{tag.Operation(overriddenMethodName)} } - return baseMetricsHandler.WithTags(metrics.OperationTag(overridedMethodName), metrics.NamespaceTag(nsName.String())), - []tag.Tag{tag.Operation(overridedMethodName), tag.WorkflowNamespace(nsName.String())} + return baseMetricsHandler.WithTags(metrics.OperationTag(overriddenMethodName), metrics.NamespaceTag(nsName.String())), + []tag.Tag{tag.Operation(overriddenMethodName), tag.WorkflowNamespace(nsName.String())} } func (ti *TelemetryInterceptor) unaryMetricsHandlerLogTags(req any, @@ -385,11 +385,11 @@ func (ti *TelemetryInterceptor) streamMetricsHandlerLogTags( fullMethod string, methodName string, ) (metrics.Handler, []tag.Tag) { - overridedMethodName := telemetryOverrideOperationTag(fullMethod, methodName) + overriddenMethodName := telemetryOverrideOperationTag(fullMethod, methodName) return ti.metricsHandler.WithTags( - metrics.OperationTag(overridedMethodName), + metrics.OperationTag(overriddenMethodName), metrics.NamespaceUnknownTag(), - ), []tag.Tag{tag.Operation(overridedMethodName)} + ), []tag.Tag{tag.Operation(overriddenMethodName)} } func GetMetricsHandlerFromContext( diff --git a/common/searchattribute/mapper.go b/common/searchattribute/mapper.go index bd2a720756c..6fa0cc1d8d2 100644 --- a/common/searchattribute/mapper.go +++ b/common/searchattribute/mapper.go @@ -25,7 +25,7 @@ type ( // This mapper is to be backwards compatible with versions before v1.20. // Users using standard visibility might have registered custom search attributes. // Those search attributes won't be searchable, as they weren't before version v1.20. - // Thus, this mapper will allow those search attributes to be used without being alised. + // Thus, this mapper will allow those search attributes to be used without being aliased. backCompMapper_v1_20 struct { mapper Mapper emptyStringNameTypeMap NameTypeMap diff --git a/common/searchattribute/mapper_test.go b/common/searchattribute/mapper_test.go index 5367113be7f..330df03e0f9 100644 --- a/common/searchattribute/mapper_test.go +++ b/common/searchattribute/mapper_test.go @@ -121,11 +121,11 @@ func Test_UnaliasFields(t *testing.T) { } sb, err := UnaliasFields(mapperProvider, sa, "error-namespace") require.NoError(t, err) - require.Equal(t, sa, sb, "when there is nothin to unalias should return received attributes") + require.Equal(t, sa, sb, "when there is nothing to unalias should return received attributes") sb, err = UnaliasFields(mapperProvider, sa, "unknown-namespace") require.NoError(t, err) - require.Equal(t, sa, sb, "when there is nothin to unalias should return received attributes") + require.Equal(t, sa, sb, "when there is nothing to unalias should return received attributes") // Pass through aliases are not substituted. sa = &commonpb.SearchAttributes{ @@ -135,5 +135,5 @@ func Test_UnaliasFields(t *testing.T) { } sb, err = UnaliasFields(mapperProvider, sa, "test-namespace") require.NoError(t, err) - require.Equal(t, sa, sb, "when there is nothin to unalias should return received attributes") + require.Equal(t, sa, sb, "when there is nothing to unalias should return received attributes") } diff --git a/common/searchattribute/search_attirbute.go b/common/searchattribute/search_attribute.go similarity index 100% rename from common/searchattribute/search_attirbute.go rename to common/searchattribute/search_attribute.go diff --git a/common/searchattribute/search_attribute_mock.go b/common/searchattribute/search_attribute_mock.go index 88d718f7637..a768d9e2df0 100644 --- a/common/searchattribute/search_attribute_mock.go +++ b/common/searchattribute/search_attribute_mock.go @@ -1,9 +1,9 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: search_attirbute.go +// Source: search_attribute.go // // Generated by this command: // -// mockgen -package searchattribute -source search_attirbute.go -destination search_attribute_mock.go +// mockgen -package searchattribute -source search_attribute.go -destination search_attribute_mock.go // // Package searchattribute is a generated GoMock package. diff --git a/common/tasks/group_by_scheduler.go b/common/tasks/group_by_scheduler.go index b7f8c15a311..047629a996f 100644 --- a/common/tasks/group_by_scheduler.go +++ b/common/tasks/group_by_scheduler.go @@ -95,7 +95,7 @@ func (s *GroupByScheduler[K, T]) getOrCreateScheduler(key K) RunnableScheduler { s.mu.RUnlock() if !ok { s.mu.Lock() - // Check again in case the map was populated between releasing and aquiring the lock. + // Check again in case the map was populated between releasing and acquiring the lock. if sched, ok = s.schedulers[key]; !ok { sched = s.options.SchedulerFactory(key) s.schedulers[key] = sched diff --git a/common/testing/event_generator.go b/common/testing/event_generator.go index 6f26012c712..b812dcb23da 100644 --- a/common/testing/event_generator.go +++ b/common/testing/event_generator.go @@ -334,7 +334,7 @@ func (g *EventGenerator) pickRandomVertex( return endVertex.DeepCopy() } -// NewHistoryEventEdge initials a new edge between two HistoryEventVertexes +// NewHistoryEventEdge initials a new edge between two HistoryEventVertices func NewHistoryEventEdge( start Vertex, end Vertex, diff --git a/common/worker_versioning/worker_versioning.go b/common/worker_versioning/worker_versioning.go index e64d8a44f8b..1bb393ec046 100644 --- a/common/worker_versioning/worker_versioning.go +++ b/common/worker_versioning/worker_versioning.go @@ -46,7 +46,7 @@ const ( WorkerDeploymentVersionIdDelimiter = ":" WorkerDeploymentVersionWorkflowIDPrefix = "temporal-sys-worker-deployment-version" WorkerDeploymentWorkflowIDPrefix = "temporal-sys-worker-deployment" - WorkerDeploymentVersionWorkflowIDDelimeter = ":" + WorkerDeploymentVersionWorkflowIDDelimiter = ":" WorkerDeploymentVersionWorkflowIDEscape = "|" ) @@ -228,7 +228,7 @@ func DeploymentIfValid(d *deploymentpb.Deployment) *deploymentpb.Deployment { // MakeDirectiveForWorkflowTask returns a versioning directive based on the following parameters: // - inheritedBuildId: build ID inherited from a past/previous wf execution (for Child WF or CaN) -// - assignedBuildId: the build ID to which the WF is currently assigned (i.e. mutable state's AssginedBuildId) +// - assignedBuildId: the build ID to which the WF is currently assigned (i.e. mutable state's AssignedBuildId) // - stamp: the latest versioning stamp of the execution (only needed for old versioning) // - hasCompletedWorkflowTask: if the wf has completed any WFT // - behavior: workflow's effective behavior @@ -772,7 +772,7 @@ func ValidateTaskVersionDirective( // Effective behavior and deployment of the workflow when History scheduled the WFT. directiveBehavior := directive.GetBehavior() if directiveBehavior != wfBehavior && - // Verisoning 3 pre-release (v1.26, Dec 2024) is not populating request.VersionDirective so + // Versioning 3 pre-release (v1.26, Dec 2024) is not populating request.VersionDirective so // we skip this check until v1.28 if directiveBehavior is unspecified. // TODO (shahab): remove this line after v1.27 is released. directiveBehavior != enumspb.VERSIONING_BEHAVIOR_UNSPECIFIED { @@ -973,11 +973,11 @@ func WorkerDeploymentVersionFromStringV32(s string) (*deploymentspb.WorkerDeploy // GenerateDeploymentWorkflowID is a helper that generates a system accepted // workflowID which are used in our Worker Deployment workflows func GenerateDeploymentWorkflowID(deploymentName string) string { - return WorkerDeploymentWorkflowIDPrefix + WorkerDeploymentVersionWorkflowIDDelimeter + deploymentName + return WorkerDeploymentWorkflowIDPrefix + WorkerDeploymentVersionWorkflowIDDelimiter + deploymentName } func GetDeploymentNameFromWorkflowID(workflowID string) string { - _, deploymentName, _ := strings.Cut(workflowID, WorkerDeploymentVersionWorkflowIDDelimeter) + _, deploymentName, _ := strings.Cut(workflowID, WorkerDeploymentVersionWorkflowIDDelimiter) return deploymentName } @@ -988,5 +988,5 @@ func GenerateVersionWorkflowID(deploymentName string, buildID string) string { DeploymentName: deploymentName, BuildId: buildID, }) - return WorkerDeploymentVersionWorkflowIDPrefix + WorkerDeploymentVersionWorkflowIDDelimeter + versionString + return WorkerDeploymentVersionWorkflowIDPrefix + WorkerDeploymentVersionWorkflowIDDelimiter + versionString } diff --git a/components/nexusoperations/config.go b/components/nexusoperations/config.go index 3f2b85711a2..b91cb40d9e0 100644 --- a/components/nexusoperations/config.go +++ b/components/nexusoperations/config.go @@ -123,7 +123,7 @@ var CallbackURLTemplate = dynamicconfig.NewGlobalStringSetting( "component.nexusoperations.callback.endpoint.template", "unset", `Controls the template for generating callback URLs included in Nexus operation requests, which are used to deliver asynchronous completion. -The template can be used to interpolate the {{.NamepaceName}} and {{.NamespaceID}} parameters to construct a publicly accessible URL. +The template can be used to interpolate the {{.NamespaceName}} and {{.NamespaceID}} parameters to construct a publicly accessible URL. Must be set in order to use Nexus Operations.`, ) diff --git a/components/nexusoperations/executors.go b/components/nexusoperations/executors.go index 236c14c3d63..04744d08717 100644 --- a/components/nexusoperations/executors.go +++ b/components/nexusoperations/executors.go @@ -764,7 +764,7 @@ func (e taskExecutor) executeCancelationBackoffTask(env hsm.Environment, node *h }) } -// lookupEndpint gets an endpoint from the registry, preferring to look up by ID and falling back to name lookup. +// lookupEndpoint gets an endpoint from the registry, preferring to look up by ID and falling back to name lookup. // The fallback is a temporary workaround for not implementing endpoint replication, and endpoint ID being a UUID set by // the system. We try to get the endpoint by name to support cases where an operator manually created an endpoint with // the same name in two replicas. diff --git a/components/nexusoperations/executors_test.go b/components/nexusoperations/executors_test.go index 12b4310ad9e..002e608d0f0 100644 --- a/components/nexusoperations/executors_test.go +++ b/components/nexusoperations/executors_test.go @@ -37,7 +37,7 @@ import ( ) var endpointEntry = &persistencespb.NexusEndpointEntry{ - Id: "enpdoint-id", + Id: "endpoint-id", Endpoint: &persistencespb.NexusEndpoint{ Spec: &persistencespb.NexusEndpointSpec{ Name: "endpoint", diff --git a/docs/architecture/nexus.md b/docs/architecture/nexus.md index 3d7d35f82ed..e7b528a2752 100644 --- a/docs/architecture/nexus.md +++ b/docs/architecture/nexus.md @@ -74,7 +74,7 @@ To enable Nexus in your deployment: component.nexusoperations.callback.endpoint.template: # The URL must be publicly accessible if the callback is meant to be called by external services. # When using Nexus for cross namespace calls, the URL's host is irrelevant as the address is resolved using - # membership. The URL is a Go template that interpolates the `NamepaceName` and `NamespaceID` variables. + # membership. The URL is a Go template that interpolates the `NamespaceName` and `NamespaceID` variables. - value: https://$PUBLIC_URL:7243/namespaces/{{.NamespaceName}}/nexus/callback component.callbacks.allowedAddresses: # This list is a security mechanism for limiting which callback URLs are accepted by the server. @@ -312,7 +312,7 @@ stateDiagram-v2 Failed --> [*] ``` -Cancelations are continously retried using a [configurable retry policy][nexus-retry-policy] until they succeed, +Cancellations are continuously retried using a [configurable retry policy][nexus-retry-policy] until they succeed, permanently fail, or the operation times out. ### Task Executors @@ -333,7 +333,7 @@ workflow and operation state machine. The [component.nexusoperations.callback.endpoint.template](https://github.com/temporalio/temporal/blob/7c8025aff96af7d72a91af615f1d625817842894/components/nexusoperations/config.go#L69) global dynamic config must be set to construct callback URLs or the executor will fail to process invocation tasks. When routing callbacks to external clusters and non-Temporal destinations, the URL is used and should be a value that is -publically accessible to those external destinations. Callbacks that are routed internally within the cluster resolve +publicly accessible to those external destinations. Callbacks that are routed internally within the cluster resolve the frontend URL via membership or, as a last resort, via static configuration overrides. The timeout for making a single Nexus HTTP call is configurable via: `component.nexusoperations.request.timeout` @@ -350,7 +350,7 @@ retried or continues-as-new. Similarly to Nexus Operations, callbacks are implemented via a hierarchical state machine and a set of executors, which are located in [the components directory](../../components/callbacks). -Callbacks are continously retried using a [configurable retry policy][callback-retry-policy] until they succeed, +Callbacks are continuously retried using a [configurable retry policy][callback-retry-policy] until they succeed, permanently fail, or the workflow's retention period expires. The timeout for making a single callback HTTP call is configurable via: `component.callbacks.request.timeout` diff --git a/docs/architecture/speculative-workflow-task.md b/docs/architecture/speculative-workflow-task.md index e109d223313..427da4fb8ef 100644 --- a/docs/architecture/speculative-workflow-task.md +++ b/docs/architecture/speculative-workflow-task.md @@ -23,7 +23,7 @@ the response from the `RecordWorkflowTaskStarted` API. The worker does not know transient, though. If the Workflow Task keeps failing, the attempt counter is increased in the mutable state, and transient Workflow Task events are created again - but no new failure event is written into the history again. When the Workflow Task finally completes, the `WorkflowTaskScheduled` -and `WorkflowTaskStarted` events are written to the history, followed by the `WorklfowTaskCompleted` +and `WorkflowTaskStarted` events are written to the history, followed by the `WorkflowTaskCompleted` event. > #### TODO @@ -61,7 +61,7 @@ Speculative Workflow Task was introduced to make it possible for Workflow Update for when it is rejected. This is why it doesn't persist any events or the mutable state. > #### TODO -> The task processig for Queries could be replaced by using speculative Workflow Tasks under the hood. +> The task processing for Queries could be replaced by using speculative Workflow Tasks under the hood. ## Scheduling of Speculative Workflow Task As of today, speculative Workflow Tasks are only used for Workflow Update, i.e. in the @@ -81,7 +81,7 @@ the speculative Workflow Task is converted to a normal one and creates a transfe eventually reach matching and the worker. The timeout timer task is is created for a `SCHEDULE_TO_START` timeout for every speculative -Workflow Task - even if it is on a *normal* task queue. In comparision, for a normal Workflow Task, the +Workflow Task - even if it is on a *normal* task queue. In comparison, for a normal Workflow Task, the `SCHEDULE_TO_START` timeout timer is only created for *sticky* task queues. ## Start of Speculative Workflow Task @@ -145,7 +145,7 @@ events and create a new Workflow Task as normal. > new events would be added to the history - but heartbeats would not be visible anymore. ## Conversion to Normal Workflow Task -If during the exection of a speculative Workflow Task, a mutable state write is required +If during the execution of a speculative Workflow Task, a mutable state write is required (i.e., a new events comes in), then it is converted to a normal one, and written to the database. This means the `Type` field value is changed to `WORKFLOW_TASK_TYPE_NORMAL`, an in-memory timer is replaced with a persisted timer, and the corresponding speculative Workflow Task `WorkflowTaskScheduled` diff --git a/docs/architecture/workflow-lifecycle.md b/docs/architecture/workflow-lifecycle.md index 98d17e52c43..6362ad7d133 100644 --- a/docs/architecture/workflow-lifecycle.md +++ b/docs/architecture/workflow-lifecycle.md @@ -505,7 +505,7 @@ sequenceDiagram Worker->>Frontend: RespondActivityFailed Frontend->>History: RespondActivityFailed History->>Persistence: UpdateWorkflowExecution -note over Persistence: Append History Events: ActivityTaskFailed, ActivityTaskScheduled
update MutableState & add Timer Task (activity timout) +note over Persistence: Append History Events: ActivityTaskFailed, ActivityTaskScheduled
update MutableState & add Timer Task (activity timeout) Persistence->>History: Update Succeed History->>Frontend: Respond Succeed Frontend->>Worker: Respond Succeed diff --git a/docs/architecture/workflow-update.md b/docs/architecture/workflow-update.md index 67729975a80..e8cada5c385 100644 --- a/docs/architecture/workflow-update.md +++ b/docs/architecture/workflow-update.md @@ -48,7 +48,7 @@ Note that Failure in an Update outcome is different from Update rejection. persisted in mutable state. ## Update Registry -Updates are managed through the `update.Registy` interface. A workflow's Update Registry is stored in +Updates are managed through the `update.Registry` interface. A workflow's Update Registry is stored in its `workflow.ContextImpl` struct. Each Registry has an internal map which stores *admitted and accepted* Updates, i.e., in-flight Updates only. Completed Updates are not stored there - but can still be accessed through the Registry since it has a reference to the mutable state diff --git a/docs/development/tracing.md b/docs/development/tracing.md index cf8a1abc545..8efa197074b 100644 --- a/docs/development/tracing.md +++ b/docs/development/tracing.md @@ -119,7 +119,7 @@ processing code. Spans are created by `go.opentelemetry.io/otel/trace.Tracer` objects which are themselves created by `go.opentelemetry.io/otel/trace.TracerProvider` instances. The `TracerProvider` instances are bound to a single logical service and as such a single Temporal -process will have up to four such instances (for worker, mathcing, history, and +process will have up to four such instances (for worker, matching, history, and frontend services respectively). The `Tracer` object is bound to a single logical _library_ which is different than a _service_. Consider that a history _service_ instance might run code from the temporal common library, gRPC diff --git a/proto/internal/temporal/server/api/historyservice/v1/service.proto b/proto/internal/temporal/server/api/historyservice/v1/service.proto index 855d2f84bbc..158db053ae6 100644 --- a/proto/internal/temporal/server/api/historyservice/v1/service.proto +++ b/proto/internal/temporal/server/api/historyservice/v1/service.proto @@ -111,7 +111,7 @@ service HistoryService { // SignalWithStartWorkflowExecution is used to ensure sending a signal event to a workflow execution. // If workflow is running, this results in WorkflowExecutionSignaled event recorded in the history // and a workflow task being created for the execution. - // If workflow is not running or not found, it will first try start workflow with given WorkflowIdResuePolicy, + // If workflow is not running or not found, it will first try start workflow with given WorkflowIdReusePolicy, // and record WorkflowExecutionStarted and WorkflowExecutionSignaled event in case of success. // It will return `WorkflowExecutionAlreadyStartedError` if start workflow failed with given policy. rpc SignalWithStartWorkflowExecution (SignalWithStartWorkflowExecutionRequest) returns (SignalWithStartWorkflowExecutionResponse) { @@ -169,7 +169,7 @@ service HistoryService { // scheduled first workflow task in child after recording child started in its mutable state; otherwise, // during namespace failover, it's possible that none of the clusters will schedule the first workflow task. // NOTE: This is an experimental API. If later we found there are more verification API and there's a clear pattern - // of how verification is done, we may unify them into one generic verfication API. + // of how verification is done, we may unify them into one generic verification API. rpc VerifyFirstWorkflowTaskScheduled (VerifyFirstWorkflowTaskScheduledRequest) returns (VerifyFirstWorkflowTaskScheduledResponse) { } @@ -183,7 +183,7 @@ service HistoryService { // recorded before completing the task, otherwise during namespace failover, it's possible that none of the // clusters will record the child result in parent workflow. // NOTE: This is an experimental API. If later we found there are more verification API and there's a clear pattern - // of how verification is done, we may unify them into one generic verfication API. + // of how verification is done, we may unify them into one generic verification API. rpc VerifyChildExecutionCompletionRecorded (VerifyChildExecutionCompletionRecordedRequest) returns (VerifyChildExecutionCompletionRecordedResponse) { } diff --git a/proto/internal/temporal/server/api/matchingservice/v1/request_response.proto b/proto/internal/temporal/server/api/matchingservice/v1/request_response.proto index f615461ce41..abcde0ed901 100644 --- a/proto/internal/temporal/server/api/matchingservice/v1/request_response.proto +++ b/proto/internal/temporal/server/api/matchingservice/v1/request_response.proto @@ -582,7 +582,7 @@ message ListNexusEndpointsRequest { bytes next_page_token = 1; int32 page_size = 2; // The nexus_endpoints table has a monotonically increasing version number that is incremented on every change to - // the table. This field can be used to provide the last known table version in conjuction with the `wait` field to + // the table. This field can be used to provide the last known table version in conjunction with the `wait` field to // long poll on changes to the table. // If next_page_token is not empty and the current table version does not match this field, this request will fail // with a failed precondition error. diff --git a/proto/internal/temporal/server/api/matchingservice/v1/service.proto b/proto/internal/temporal/server/api/matchingservice/v1/service.proto index 993fc290f11..a9c059ae2ae 100644 --- a/proto/internal/temporal/server/api/matchingservice/v1/service.proto +++ b/proto/internal/temporal/server/api/matchingservice/v1/service.proto @@ -118,7 +118,7 @@ service MatchingService { rpc GetBuildIdTaskQueueMapping (GetBuildIdTaskQueueMappingRequest) returns (GetBuildIdTaskQueueMappingResponse) {} // Force loading a task queue partition. Used by matching node owning root partition. // When root partition is loaded this is called for all child partitions. - // This addresses the posibility of unloaded child partitions having backlog, + // This addresses the possibility of unloaded child partitions having backlog, // but not being forwarded/synced to the root partition to find the polling // worker which triggered the root partition being loaded in the first place. rpc ForceLoadTaskQueuePartition (ForceLoadTaskQueuePartitionRequest) returns (ForceLoadTaskQueuePartitionResponse) {} diff --git a/proto/internal/temporal/server/api/persistence/v1/chasm.proto b/proto/internal/temporal/server/api/persistence/v1/chasm.proto index 0c14caab97d..7cf7d96273d 100644 --- a/proto/internal/temporal/server/api/persistence/v1/chasm.proto +++ b/proto/internal/temporal/server/api/persistence/v1/chasm.proto @@ -54,7 +54,7 @@ message ChasmComponentAttributes { // (-- api-linter: core::0141::forbidden-types=disabled --) uint32 type_id = 1; // Tasks are in their insertion order, - // i.e. by versioned transtion and versioned_transition_offset. + // i.e. by versioned transition and versioned_transition_offset. repeated Task side_effect_tasks = 2; // Tasks are ordered by their scheduled time, breaking ties by // versioned transition and versioned_transition_offset. diff --git a/proto/internal/temporal/server/api/persistence/v1/tasks.proto b/proto/internal/temporal/server/api/persistence/v1/tasks.proto index 183fc9a2c35..50d84909609 100644 --- a/proto/internal/temporal/server/api/persistence/v1/tasks.proto +++ b/proto/internal/temporal/server/api/persistence/v1/tasks.proto @@ -68,7 +68,7 @@ message TaskQueueInfo { // Whenever locking any metadata as the inactive one (drain-only), this should be set. // If the flag is true, no tasks should be written to the active table until the inactive // table has also been locked (and the flag set there for a potential reverse transition). - // After determinining that the inactive table has no more tasks left, then this + // After determining that the inactive table has no more tasks left, then this // can be cleared on the active table. bool other_has_tasks = 10; } diff --git a/schema/cassandra/temporal/schema.cql b/schema/cassandra/temporal/schema.cql index 9d664fe5981..cbf9bafd5a8 100644 --- a/schema/cassandra/temporal/schema.cql +++ b/schema/cassandra/temporal/schema.cql @@ -126,7 +126,7 @@ CREATE TABLE task_queue_user_data ( data blob, -- temporal.server.api.persistence.v1.TaskQueueUserData data_encoding text, -- Encoding type used for serialization, in practice this should always be proto3 version bigint, -- Version of this row, used for optimistic concurrency - -- task_queue_name is not a part of the parititioning key to allow cheaply iterating all task queues in a single + -- task_queue_name is not a part of the partitioning key to allow cheaply iterating all task queues in a single -- namespace. Access to this table should be infrequent enough that a single partition per namespace can be used. -- Note that this imposes a limit on total task queue user data within one namespace (see the relevant single -- partition Cassandra limits). diff --git a/schema/cassandra/temporal/versioned/v1.8/task_queue_user_data.cql b/schema/cassandra/temporal/versioned/v1.8/task_queue_user_data.cql index e89ccc36335..78ee002bdb8 100644 --- a/schema/cassandra/temporal/versioned/v1.8/task_queue_user_data.cql +++ b/schema/cassandra/temporal/versioned/v1.8/task_queue_user_data.cql @@ -8,7 +8,7 @@ CREATE TABLE task_queue_user_data ( data blob, -- temporal.server.api.persistence.v1.TaskQueueUserData data_encoding text, -- Encoding type used for serialization, in practice this should always be proto3 version bigint, -- Version of this row, used for optimistic concurrency - -- task_queue_name is not a part of the parititioning key to allow cheaply iterating all task queues in a single + -- task_queue_name is not a part of the partitioning key to allow cheaply iterating all task queues in a single -- namespace. Access to this table should be infrequent enough that a single partition per namespace can be used. -- Note that this imposes a limit on total task queue user data within one namespace (see the relevant single -- partition Cassandra limits). diff --git a/schema/sqlite/v3/temporal/schema.sql b/schema/sqlite/v3/temporal/schema.sql index 5fb8c4cec07..ad940f10a3c 100644 --- a/schema/sqlite/v3/temporal/schema.sql +++ b/schema/sqlite/v3/temporal/schema.sql @@ -390,7 +390,7 @@ CREATE TABLE nexus_endpoints ( -- Stores the version of Nexus endpoints table as a whole CREATE TABLE nexus_endpoints_partition_status ( - id INT NOT NULL DEFAULT 0 CHECK (id = 0), -- Restrict the primary key to a single value since it will only be used for enpoints + id INT NOT NULL DEFAULT 0 CHECK (id = 0), -- Restrict the primary key to a single value since it will only be used for endpoints version BIGINT NOT NULL, -- Version of the nexus_endpoints table PRIMARY KEY (id) ); diff --git a/schema/sqlite/v3/temporal/versioned/v0.5/nexus_endpoints.sql b/schema/sqlite/v3/temporal/versioned/v0.5/nexus_endpoints.sql index aa29202e467..dac8884f012 100644 --- a/schema/sqlite/v3/temporal/versioned/v0.5/nexus_endpoints.sql +++ b/schema/sqlite/v3/temporal/versioned/v0.5/nexus_endpoints.sql @@ -12,7 +12,7 @@ CREATE TABLE nexus_endpoints ( -- Stores the version of Nexus incoming endpointe as a whole CREATE TABLE nexus_endpoints_partition_status ( - id INT NOT NULL DEFAULT 0 CHECK (id = 0), -- Restrict the primary key to a single value since it will only be used for enpoints + id INT NOT NULL DEFAULT 0 CHECK (id = 0), -- Restrict the primary key to a single value since it will only be used for endpoints version BIGINT NOT NULL, -- Version of the nexus_endpoints table PRIMARY KEY (id) ); diff --git a/service/frontend/nexus_http_handler.go b/service/frontend/nexus_http_handler.go index 7f40edbdacf..bf88ebfd130 100644 --- a/service/frontend/nexus_http_handler.go +++ b/service/frontend/nexus_http_handler.go @@ -36,7 +36,7 @@ import ( type NexusHTTPHandler struct { logger log.Logger nexusHandler http.Handler - enpointRegistry commonnexus.EndpointRegistry + endpointRegistry commonnexus.EndpointRegistry namespaceRegistry namespace.Registry preprocessErrorCounter metrics.CounterFunc auth *authorization.Interceptor @@ -68,7 +68,7 @@ func NewNexusHTTPHandler( ) *NexusHTTPHandler { return &NexusHTTPHandler{ logger: logger, - enpointRegistry: endpointRegistry, + endpointRegistry: endpointRegistry, namespaceRegistry: namespaceRegistry, auth: authInterceptor, namespaceValidationInterceptor: namespaceValidationInterceptor, @@ -191,7 +191,7 @@ func (h *NexusHTTPHandler) dispatchNexusTaskByEndpoint(w http.ResponseWriter, r h.writeNexusFailure(w, http.StatusBadRequest, &nexus.Failure{Message: "invalid URL"}) return } - endpointEntry, err := h.enpointRegistry.GetByID(r.Context(), endpointID) + endpointEntry, err := h.endpointRegistry.GetByID(r.Context(), endpointID) if err != nil { h.logger.Error("invalid Nexus endpoint ID", tag.Error(err)) s, ok := status.FromError(err) diff --git a/service/frontend/workflow_handler.go b/service/frontend/workflow_handler.go index d04a56d9eb4..a3f52e03813 100644 --- a/service/frontend/workflow_handler.go +++ b/service/frontend/workflow_handler.go @@ -916,7 +916,7 @@ func (wh *WorkflowHandler) PollWorkflowTaskQueue(ctx context.Context, request *w } // These errors are expected from some versioning situations. We should not log them, it'd be too noisy. - var newerBuild *serviceerror.NewerBuildExists // expected when versioned poller is superceded + var newerBuild *serviceerror.NewerBuildExists // expected when versioned poller is superseded var failedPrecond *serviceerror.FailedPrecondition // expected when user data is disabled if errors.As(err, &newerBuild) || errors.As(err, &failedPrecond) { return nil, err @@ -1145,7 +1145,7 @@ func (wh *WorkflowHandler) PollActivityTaskQueue(ctx context.Context, request *w } // These errors are expected from some versioning situations. We should not log them, it'd be too noisy. - var newerBuild *serviceerror.NewerBuildExists // expected when versioned poller is superceded + var newerBuild *serviceerror.NewerBuildExists // expected when versioned poller is superseded var failedPrecond *serviceerror.FailedPrecondition // expected when user data is disabled if errors.As(err, &newerBuild) || errors.As(err, &failedPrecond) { return nil, err @@ -4932,7 +4932,7 @@ func (wh *WorkflowHandler) PollNexusTaskQueue(ctx context.Context, request *work } // These errors are expected from some versioning situations. We should not log them, it'd be too noisy. - var newerBuild *serviceerror.NewerBuildExists // expected when versioned poller is superceded + var newerBuild *serviceerror.NewerBuildExists // expected when versioned poller is superseded var failedPrecond *serviceerror.FailedPrecondition // expected when user data is disabled if errors.As(err, &newerBuild) || errors.As(err, &failedPrecond) { return nil, err @@ -4968,7 +4968,7 @@ func (wh *WorkflowHandler) RespondNexusTaskCompleted(ctx context.Context, reques operationToken = r.OperationId //nolint:staticcheck // SA1019 this field might be set by old clients. } if operationToken == "" { - return nil, serviceerror.NewInvalidArgument("missing opration token in response") + return nil, serviceerror.NewInvalidArgument("missing operation token in response") } tokenLimit := wh.config.MaxNexusOperationTokenLength(request.Namespace) @@ -5278,7 +5278,7 @@ func (wh *WorkflowHandler) validateVersioningInfo(nsName string, id buildIdAndFl func (wh *WorkflowHandler) validateBuildIdCompatibilityUpdate( req *workflowservice.UpdateWorkerBuildIdCompatibilityRequest, ) error { - errDeets := []string{"request to update worker build ID compatability requires: "} + errDeets := []string{"request to update worker build ID compatibility requires: "} checkIdLen := func(id string) { if len(id) > wh.config.WorkerBuildIdSizeLimit() { diff --git a/service/frontend/workflow_handler_test.go b/service/frontend/workflow_handler_test.go index 8d76c394953..54bf181a88e 100644 --- a/service/frontend/workflow_handler_test.go +++ b/service/frontend/workflow_handler_test.go @@ -1251,8 +1251,8 @@ func (s *WorkflowHandlerSuite) TestRegisterNamespace_Success_ClusterNotConfigure s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(false) s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockArchivalMetadata.EXPECT().GetHistoryConfig().Return(archiver.NewDisabledArchvialConfig()) - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) + s.mockArchivalMetadata.EXPECT().GetHistoryConfig().Return(archiver.NewDisabledArchivalConfig()) + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchivalConfig()) s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), gomock.Any()).Return(nil, serviceerror.NewNamespaceNotFound("missing-namespace")) s.mockMetadataMgr.EXPECT().CreateNamespace(gomock.Any(), gomock.Any()).Return(&persistence.CreateNamespaceResponse{ ID: testNamespaceID, @@ -1625,8 +1625,8 @@ func (s *WorkflowHandlerSuite) TestUpdateNamespace_Success_ClusterNotConfiguredF s.mockMetadataMgr.EXPECT().GetNamespace(gomock.Any(), gomock.Any()).Return(getNamespaceResp, nil) s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes() s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes() - s.mockArchivalMetadata.EXPECT().GetHistoryConfig().Return(archiver.NewDisabledArchvialConfig()) - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) + s.mockArchivalMetadata.EXPECT().GetHistoryConfig().Return(archiver.NewDisabledArchivalConfig()) + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchivalConfig()) wh := s.getWorkflowHandler(s.newConfig()) @@ -1885,7 +1885,7 @@ func (s *WorkflowHandlerSuite) TestGetArchivedHistory_Success_GetFirstPage() { } func (s *WorkflowHandlerSuite) TestListArchivedVisibility_Failure_InvalidRequest() { - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchivalConfig()) wh := s.getWorkflowHandler(s.newConfig()) @@ -1895,7 +1895,7 @@ func (s *WorkflowHandlerSuite) TestListArchivedVisibility_Failure_InvalidRequest } func (s *WorkflowHandlerSuite) TestListArchivedVisibility_Failure_ClusterNotConfiguredForArchival() { - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()) + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchivalConfig()) wh := s.getWorkflowHandler(s.newConfig()) diff --git a/service/history/api/multioperation/api.go b/service/history/api/multioperation/api.go index f2d254805d8..a75526d68c3 100644 --- a/service/history/api/multioperation/api.go +++ b/service/history/api/multioperation/api.go @@ -263,7 +263,7 @@ func (uws *updateWithStart) workflowLeaseCallback( res.GetContext().(*workflow.ContextImpl).MutableState = ms // Add the Update. - // NOTE: UpdateWorkflowAction return value is ignored since ther Starter will always create a WFT. + // NOTE: UpdateWorkflowAction return value is ignored since the Starter will always create a WFT. updateReg := res.GetContext().UpdateRegistry(ctx) if _, err := uws.updater.ApplyRequest(ctx, updateReg, ms); err != nil { // Wrapping the error so Update and Start errors can be distinguished later. diff --git a/service/history/api/recordchildworkflowcompleted/api.go b/service/history/api/recordchildworkflowcompleted/api.go index 6b97b0c6953..78b47f4ffda 100644 --- a/service/history/api/recordchildworkflowcompleted/api.go +++ b/service/history/api/recordchildworkflowcompleted/api.go @@ -42,7 +42,7 @@ func Invoke( resetRunID, err := recordChildWorkflowCompleted(ctx, request, shardContext, workflowConsistencyChecker) if errors.Is(err, consts.ErrWorkflowCompleted) { // if the parent was reset, forward the request to the new run pointed by resetRunID - // Note: An alternative solution is to load the current run here ane compare the originalRunIDs of the current run and the closed parent. + // Note: An alternative solution is to load the current run here and compare the originalRunIDs of the current run and the closed parent. // If they match, then deliver it to the current run. We should consider this optimization if we notice that reset chain is longer than 1-2 hops. if resetRunID != "" { if redirectCount >= maxResetRedirectCount { diff --git a/service/history/api/recordchildworkflowcompleted/api_test.go b/service/history/api/recordchildworkflowcompleted/api_test.go index 86f8b7c8616..c21a48aa711 100644 --- a/service/history/api/recordchildworkflowcompleted/api_test.go +++ b/service/history/api/recordchildworkflowcompleted/api_test.go @@ -51,12 +51,12 @@ func Test_Recordchildworkflowcompleted_WithForwards(t *testing.T) { EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED, }, } - mockRegistery := namespace.NewMockRegistry(ctrl) - mockRegistery.EXPECT().GetNamespaceByID(testNamespaceID).Return(&namespace.Namespace{}, nil) + mockRegistry := namespace.NewMockRegistry(ctrl) + mockRegistry.EXPECT().GetNamespaceByID(testNamespaceID).Return(&namespace.Namespace{}, nil) mockClusterMetadata := cluster.NewMockMetadata(ctrl) mockClusterMetadata.EXPECT().GetCurrentClusterName().Return("") shardContext := historyi.NewMockShardContext(ctrl) - shardContext.EXPECT().GetNamespaceRegistry().Return(mockRegistery) + shardContext.EXPECT().GetNamespaceRegistry().Return(mockRegistry) shardContext.EXPECT().GetClusterMetadata().Return(mockClusterMetadata) oldParentMutableState := historyi.NewMockMutableState(ctrl) @@ -122,12 +122,12 @@ func Test_Recordchildworkflowcompleted_WithInfiniteForwards(t *testing.T) { EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED, }, } - mockRegistery := namespace.NewMockRegistry(ctrl) - mockRegistery.EXPECT().GetNamespaceByID(testNamespaceID).Return(&namespace.Namespace{}, nil) + mockRegistry := namespace.NewMockRegistry(ctrl) + mockRegistry.EXPECT().GetNamespaceByID(testNamespaceID).Return(&namespace.Namespace{}, nil) mockClusterMetadata := cluster.NewMockMetadata(ctrl) mockClusterMetadata.EXPECT().GetCurrentClusterName().Return("") shardContext := historyi.NewMockShardContext(ctrl) - shardContext.EXPECT().GetNamespaceRegistry().Return(mockRegistery) + shardContext.EXPECT().GetNamespaceRegistry().Return(mockRegistry) shardContext.EXPECT().GetClusterMetadata().Return(mockClusterMetadata) oldParentMutableState := historyi.NewMockMutableState(ctrl) diff --git a/service/history/api/respondactivitytaskfailed/api_test.go b/service/history/api/respondactivitytaskfailed/api_test.go index 7e3e8928ccc..b78e296fc74 100644 --- a/service/history/api/respondactivitytaskfailed/api_test.go +++ b/service/history/api/respondactivitytaskfailed/api_test.go @@ -2,6 +2,7 @@ package respondactivitytaskfailed import ( "context" + "errors" "fmt" "testing" @@ -261,7 +262,7 @@ func (s *workflowSuite) Test_LastHeartBeatDetailsExist_UpdatesMutableState() { } func (s *workflowSuite) Test_RetryActivityFailsWithAnError_WillReturnTheError() { - retryError := fmt.Errorf("bizzare error") + retryError := errors.New("bizarre error") uc := newUseCase(UsecaseConfig{ attempt: int32(1), startedEventId: int64(40), diff --git a/service/history/api/respondworkflowtaskcompleted/workflow_task_completed_handler.go b/service/history/api/respondworkflowtaskcompleted/workflow_task_completed_handler.go index c5974f6ba2a..d4823114def 100644 --- a/service/history/api/respondworkflowtaskcompleted/workflow_task_completed_handler.go +++ b/service/history/api/respondworkflowtaskcompleted/workflow_task_completed_handler.go @@ -540,7 +540,7 @@ func (handler *workflowTaskCompletedHandler) handlePostCommandEagerExecuteActivi ai, ok := handler.mutableState.GetActivityByActivityID(attr.ActivityId) if !ok { - // activity cancelled in the same worflow task + // activity cancelled in the same workflow task return nil, nil } diff --git a/service/history/chasm_engine_test.go b/service/history/chasm_engine_test.go index 5fc94b69f04..ddf9f9eb0e8 100644 --- a/service/history/chasm_engine_test.go +++ b/service/history/chasm_engine_test.go @@ -572,7 +572,7 @@ func (s *chasmEngineSuite) TestUpdateComponent_Success() { }, ).Times(1) - // TODO: validate returned component once Ref() method of chasm tree is implememented. + // TODO: validate returned component once Ref() method of chasm tree is implemented. _, err := s.engine.UpdateComponent( context.Background(), ref, diff --git a/service/history/configs/config.go b/service/history/configs/config.go index 49768f03578..d21c887c5b9 100644 --- a/service/history/configs/config.go +++ b/service/history/configs/config.go @@ -246,7 +246,7 @@ type Config struct { // DefaultWorkflowTaskTimeout the default workflow task timeout DefaultWorkflowTaskTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter // WorkflowTaskHeartbeatTimeout is to timeout behavior of: RespondWorkflowTaskComplete with ForceCreateNewWorkflowTask == true - // without any commands or messages. After this timeout workflow task will be scheduled to another worker(by clear stickyness). + // without any commands or messages. After this timeout workflow task will be scheduled to another worker(by clear stickiness). WorkflowTaskHeartbeatTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter WorkflowTaskCriticalAttempts dynamicconfig.IntPropertyFn WorkflowTaskRetryMaxInterval dynamicconfig.DurationPropertyFn diff --git a/service/history/consts/const.go b/service/history/consts/const.go index 49ab82241a3..836d19c5dcb 100644 --- a/service/history/consts/const.go +++ b/service/history/consts/const.go @@ -65,8 +65,8 @@ var ( Scope: enumspb.RESOURCE_EXHAUSTED_SCOPE_NAMESPACE, Message: "workflow operation can not be applied because workflow is closing", } - // ErrEventsAterWorkflowFinish is the error indicating server error trying to write events after workflow finish event - ErrEventsAterWorkflowFinish = serviceerror.NewInternal("error validating last event being workflow finish event") + // ErrEventsAfterWorkflowFinish is the error indicating server error trying to write events after workflow finish event + ErrEventsAfterWorkflowFinish = serviceerror.NewInternal("error validating last event being workflow finish event") // ErrQueryEnteredInvalidState is error indicating query entered invalid state ErrQueryEnteredInvalidState = serviceerror.NewInvalidArgument("query entered invalid state, this should be impossible") // ErrConsistentQueryBufferExceeded is error indicating that too many consistent queries have been buffered and until buffered queries are finished new consistent queries cannot be buffered diff --git a/service/history/events/notifier_test.go b/service/history/events/notifier_test.go index a572d4c4466..473a178285f 100644 --- a/service/history/events/notifier_test.go +++ b/service/history/events/notifier_test.go @@ -129,12 +129,12 @@ func (s *notifierSuite) TestMultipleSubscriberWatchingEvents() { subscriberID, channel, err := s.notifier.WatchHistoryEvent(definition.NewWorkflowKey(namespaceID, execution.GetWorkflowId(), execution.GetRunId())) s.Nil(err) - timeourChan := time.NewTimer(time.Second * 10).C + timeoutChan := time.NewTimer(time.Second * 10).C select { case msg := <-channel: s.Equal(historyEvent, msg) - case <-timeourChan: + case <-timeoutChan: s.Fail("subscribe to new events timeout") } err = s.notifier.UnwatchHistoryEvent(definition.NewWorkflowKey(namespaceID, execution.GetWorkflowId(), execution.GetRunId()), subscriberID) diff --git a/service/history/history_engine.go b/service/history/history_engine.go index d38f40b4c20..574ddcfd85e 100644 --- a/service/history/history_engine.go +++ b/service/history/history_engine.go @@ -591,7 +591,7 @@ func (e *historyEngineImpl) RespondActivityTaskCanceled( return respondactivitytaskcanceled.Invoke(ctx, req, e.shardContext, e.workflowConsistencyChecker) } -// RecordActivityTaskHeartbeat records an hearbeat for a task. +// RecordActivityTaskHeartbeat records an heartbeat for a task. // This method can be used for two purposes. // - For reporting liveness of the activity. // - For reporting progress of the activity, this can be done even if the liveness is not configured. diff --git a/service/history/history_engine2_test.go b/service/history/history_engine2_test.go index 760f9e0ba59..610114ef0a1 100644 --- a/service/history/history_engine2_test.go +++ b/service/history/history_engine2_test.go @@ -2625,7 +2625,7 @@ func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_InitiatedEvent func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_InitiatedEventFoundOnNonCurrentBranch() { - inititatedVersion := tests.Version - 100 + initiatedVersion := tests.Version - 100 request := &historyservice.VerifyChildExecutionCompletionRecordedRequest{ NamespaceId: tests.NamespaceID.String(), ParentExecution: &commonpb.WorkflowExecution{ @@ -2637,7 +2637,7 @@ func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_InitiatedEvent RunId: "child runId", }, ParentInitiatedId: 123, - ParentInitiatedVersion: inititatedVersion, + ParentInitiatedVersion: initiatedVersion, } ms := workflow.TestGlobalMutableState(s.historyEngine.shardContext, s.mockEventsCache, log.NewTestLogger(), tests.Version, tests.WorkflowID, tests.RunID) @@ -2651,14 +2651,14 @@ func (s *engine2Suite) TestVerifyChildExecutionCompletionRecorded_InitiatedEvent { BranchToken: []byte{1, 2, 3}, Items: []*historyspb.VersionHistoryItem{ - {EventId: 100, Version: inititatedVersion}, + {EventId: 100, Version: initiatedVersion}, {EventId: 456, Version: tests.Version}, }, }, { BranchToken: []byte{4, 5, 6}, Items: []*historyspb.VersionHistoryItem{ - {EventId: 456, Version: inititatedVersion}, + {EventId: 456, Version: initiatedVersion}, }, }, }, diff --git a/service/history/history_engine_test.go b/service/history/history_engine_test.go index d46f8d3b7a0..c3f6dd6ee3b 100644 --- a/service/history/history_engine_test.go +++ b/service/history/history_engine_test.go @@ -2127,7 +2127,7 @@ func (s *engineSuite) TestRespondWorkflowTaskCompleted_WorkflowTaskHeartbeatNotT s.Nil(err) } -func (s *engineSuite) TestRespondWorkflowTaskCompleted_WorkflowTaskHeartbeatNotTimeout_ZeroOrignalScheduledTime() { +func (s *engineSuite) TestRespondWorkflowTaskCompleted_WorkflowTaskHeartbeatNotTimeout_ZeroOriginalScheduledTime() { namespaceID := tests.NamespaceID we := commonpb.WorkflowExecution{ WorkflowId: tests.WorkflowID, @@ -3572,14 +3572,14 @@ func (s *engineSuite) TestRecordActivityTaskHeartBeatSuccess_NoTimer() { s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - detais := payloads.EncodeString("details") + details := payloads.EncodeString("details") _, err := s.historyEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ NamespaceId: tests.NamespaceID.String(), HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ TaskToken: taskToken, Identity: identity, - Details: detais, + Details: details, }, }) s.Nil(err) @@ -3621,14 +3621,14 @@ func (s *engineSuite) TestRecordActivityTaskHeartBeatSuccess_TimerRunning() { s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - detais := payloads.EncodeString("details") + details := payloads.EncodeString("details") _, err := s.historyEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ NamespaceId: tests.NamespaceID.String(), HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ TaskToken: taskToken, Identity: identity, - Details: detais, + Details: details, }, }) s.Nil(err) @@ -3675,14 +3675,14 @@ func (s *engineSuite) TestRecordActivityTaskHeartBeatByIDSuccess() { s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(gwmsResponse, nil) s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - detais := payloads.EncodeString("details") + details := payloads.EncodeString("details") _, err := s.historyEngine.RecordActivityTaskHeartbeat(context.Background(), &historyservice.RecordActivityTaskHeartbeatRequest{ NamespaceId: tests.NamespaceID.String(), HeartbeatRequest: &workflowservice.RecordActivityTaskHeartbeatRequest{ TaskToken: taskToken, Identity: identity, - Details: detais, + Details: details, }, }) s.Nil(err) diff --git a/service/history/hsm/registry.go b/service/history/hsm/registry.go index ea06d97eb9c..5e96531c86f 100644 --- a/service/history/hsm/registry.go +++ b/service/history/hsm/registry.go @@ -77,8 +77,8 @@ func (r *Registry) Machine(t string) (def StateMachineDefinition, ok bool) { // RegisterTaskSerializer registers a [TaskSerializer] for a given type. // Returns an [ErrDuplicateRegistration] if a serializer for this task type has already been registered. func (r *Registry) RegisterTaskSerializer(t string, def TaskSerializer) error { - if exising, ok := r.tasks[t]; ok { - return fmt.Errorf("%w: task already registered for %v: %v", ErrDuplicateRegistration, t, exising) + if existing, ok := r.tasks[t]; ok { + return fmt.Errorf("%w: task already registered for %v: %v", ErrDuplicateRegistration, t, existing) } r.tasks[t] = def return nil diff --git a/service/history/hsm/tree.go b/service/history/hsm/tree.go index 6a6ca179112..70f29087fd6 100644 --- a/service/history/hsm/tree.go +++ b/service/history/hsm/tree.go @@ -362,7 +362,7 @@ func (n *Node) AddChild(key Key, data any) (*Node, error) { nextVersionedTransition := &persistencespb.VersionedTransition{ NamespaceFailoverVersion: n.backend.GetCurrentVersion(), // The transition count for the backend is only incremented when closing the current transaction, - // but any change to state machine node is a state transtion, + // but any change to state machine node is a state transition, // so we can safely using next transition count here is safe. TransitionCount: n.backend.NextTransitionCount(), } @@ -584,7 +584,7 @@ func MachineTransition[T any](n *Node, transitionFn func(T) (TransitionOutput, e n.persistence.LastUpdateVersionedTransition = &persistencespb.VersionedTransition{ NamespaceFailoverVersion: n.backend.GetCurrentVersion(), // The transition count for the backend is only incremented when closing the current transaction, - // but any change to state machine node is a state transtion, + // but any change to state machine node is a state transition, // so we can safely using next transition count here. TransitionCount: n.backend.NextTransitionCount(), } diff --git a/service/history/ndc/hsm_state_replicator_test.go b/service/history/ndc/hsm_state_replicator_test.go index be05ea4b28e..0f8c364eb0a 100644 --- a/service/history/ndc/hsm_state_replicator_test.go +++ b/service/history/ndc/hsm_state_replicator_test.go @@ -523,7 +523,7 @@ func (s *hsmStateReplicatorSuite) TestSyncHSM_IncomingStateNewer_WorkflowOpen() s.Len(machines.MachinesById, 1) machine := machines.MachinesById["child1"] s.Equal([]byte(hsmtest.State3), machine.Data) - s.Equal(int64(24), machine.TransitionCount) // transition count is cluster local and should only be increamented by 1 + s.Equal(int64(24), machine.TransitionCount) // transition count is cluster local and should only be incremented by 1 s.Len(request.UpdateWorkflowMutation.Tasks[tasks.CategoryTimer], 1) s.Len(request.UpdateWorkflowMutation.Tasks[tasks.CategoryOutbound], 1) s.Empty(request.UpdateWorkflowEvents) @@ -630,7 +630,7 @@ func (s *hsmStateReplicatorSuite) TestSyncHSM_IncomingStateNewer_WorkflowClosed( s.Len(machines.MachinesById, 1) machine := machines.MachinesById["child1"] s.Equal([]byte(hsmtest.State3), machine.Data) - s.Equal(int64(24), machine.TransitionCount) // transition count is cluster local and should only be increamented by 1 + s.Equal(int64(24), machine.TransitionCount) // transition count is cluster local and should only be incremented by 1 s.Len(request.UpdateWorkflowMutation.Tasks[tasks.CategoryTimer], 1) s.Len(request.UpdateWorkflowMutation.Tasks[tasks.CategoryOutbound], 1) return tests.UpdateWorkflowExecutionResponse, nil diff --git a/service/history/ndc/replication_task.go b/service/history/ndc/replication_task.go index 572633fbced..c46d1e514a4 100644 --- a/service/history/ndc/replication_task.go +++ b/service/history/ndc/replication_task.go @@ -74,9 +74,9 @@ var ( ErrInvalidExecution = serviceerror.NewInvalidArgument("invalid execution") // ErrInvalidRunID is returned if run ID is invalid ErrInvalidRunID = serviceerror.NewInvalidArgument("invalid run ID") - // ErrEventIDMismatch is returned if event ID mis-matched + // ErrEventIDMismatch is returned if event ID mismatched ErrEventIDMismatch = serviceerror.NewInvalidArgument("event ID mismatch") - // ErrEventVersionMismatch is returned if event version mis-matched + // ErrEventVersionMismatch is returned if event version mismatched ErrEventVersionMismatch = serviceerror.NewInvalidArgument("event version mismatch") // ErrNoNewRunHistory is returned if there is no new run history ErrNoNewRunHistory = serviceerror.NewInvalidArgument("no new run history events") @@ -445,7 +445,7 @@ func validateReplicateEventsRequest( } // validation on NewRunId is done in newReplicationTask, - // so that some backward compatiblity logic can be used by + // so that some backward compatibility logic can be used by // newReplicationTaskFromBatch newRunEvents, err := deserializeBlob(historySerializer, request.NewRunEvents) diff --git a/service/history/ndc/transaction_manager_test.go b/service/history/ndc/transaction_manager_test.go index 8306f4ddc64..a00a5d3f655 100644 --- a/service/history/ndc/transaction_manager_test.go +++ b/service/history/ndc/transaction_manager_test.go @@ -176,7 +176,7 @@ func (s *transactionMgrSuite) TestBackfillWorkflow_CurrentWorkflow_Active_Closed {EventId: LastCompletedWorkflowTaskStartedEventId, Version: lastWorkflowTaskStartedVersion}, }) histories := versionhistory.NewVersionHistories(versionHistory) - histroySize := rand.Int63() + historySize := rand.Int63() releaseCalled := false @@ -207,7 +207,7 @@ func (s *transactionMgrSuite) TestBackfillWorkflow_CurrentWorkflow_Active_Closed }).AnyTimes() mutableState.EXPECT().GetNextEventID().Return(nextEventID).AnyTimes() mutableState.EXPECT().GetLastCompletedWorkflowTaskStartedEventId().Return(LastCompletedWorkflowTaskStartedEventId) - mutableState.EXPECT().AddHistorySize(histroySize) + mutableState.EXPECT().AddHistorySize(historySize) s.mockWorkflowResetter.EXPECT().ResetWorkflow( ctx, @@ -236,7 +236,7 @@ func (s *transactionMgrSuite) TestBackfillWorkflow_CurrentWorkflow_Active_Closed WorkflowID: workflowID, }).Return(&persistence.GetCurrentExecutionResponse{RunID: runID}, nil) - weContext.EXPECT().PersistWorkflowEvents(gomock.Any(), s.mockShard, workflowEvents).Return(histroySize, nil) + weContext.EXPECT().PersistWorkflowEvents(gomock.Any(), s.mockShard, workflowEvents).Return(historySize, nil) weContext.EXPECT().UpdateWorkflowExecutionWithNew( gomock.Any(), s.mockShard, persistence.UpdateWorkflowModeBypassCurrent, nil, nil, historyi.TransactionPolicyPassive, (*historyi.TransactionPolicy)(nil), ).Return(nil) diff --git a/service/history/ndc/workflow_resetter.go b/service/history/ndc/workflow_resetter.go index bb26153b641..379e2b3a036 100644 --- a/service/history/ndc/workflow_resetter.go +++ b/service/history/ndc/workflow_resetter.go @@ -397,7 +397,7 @@ func (r *workflowResetterImpl) persistToDB( return err } resetRunVersion := resetWorkflow.GetMutableState().GetCurrentVersion() - currentRunVerson := currentWorkflow.GetMutableState().GetCurrentVersion() + currentRunVersion := currentWorkflow.GetMutableState().GetCurrentVersion() if _, _, _, err := r.transaction.ConflictResolveWorkflowExecution( ctx, persistence.ConflictResolveWorkflowModeUpdateCurrent, @@ -407,7 +407,7 @@ func (r *workflowResetterImpl) persistToDB( &resetRunVersion, resetWorkflowSnapshot, resetWorkflowEventsSeq, - ¤tRunVerson, + ¤tRunVersion, currentWorkflowMutation, currentWorkflowEventsSeq, currentWorkflow.GetMutableState().IsWorkflow(), @@ -1144,7 +1144,7 @@ func IsTerminatedByResetter(event *historypb.HistoryEvent) bool { return false } -// shouldExcludeAllReapplyEvents returns true if the excludeTypes map contains all the elegible re-apply event types. +// shouldExcludeAllReapplyEvents returns true if the excludeTypes map contains all the eligible re-apply event types. func (r *workflowResetterImpl) shouldExcludeAllReapplyEvents(excludeTypes map[enumspb.ResetReapplyExcludeType]struct{}) bool { for key := range enumspb.ResetReapplyExcludeType_name { eventType := enumspb.ResetReapplyExcludeType(key) diff --git a/service/history/ndc/workflow_resetter_test.go b/service/history/ndc/workflow_resetter_test.go index 69cf0ae92f0..2e88608fb61 100644 --- a/service/history/ndc/workflow_resetter_test.go +++ b/service/history/ndc/workflow_resetter_test.go @@ -956,7 +956,7 @@ func (s *workflowResetterSuite) TestReapplyEvents_WithPendingChildren() { } } -// TestReapplyEvents_WithNoPendingChildren asserts that none of the child events are picked when there is no pending child correspondng to the init event ID. +// TestReapplyEvents_WithNoPendingChildren asserts that none of the child events are picked when there is no pending child corresponding to the init event ID. func (s *workflowResetterSuite) TestReapplyEvents_WithNoPendingChildren() { testInitiatedEventID := int64(123) startedEvent := &historypb.HistoryEvent{ diff --git a/service/history/ndc/workflow_state_replicator.go b/service/history/ndc/workflow_state_replicator.go index 8b9d59c9b7b..6118df77e5e 100644 --- a/service/history/ndc/workflow_state_replicator.go +++ b/service/history/ndc/workflow_state_replicator.go @@ -1427,7 +1427,7 @@ BackfillLoop: } if isStateBased { - // If backfill suceeds but later event reapply fails, during task's next retry, + // If backfill succeeds but later event reapply fails, during task's next retry, // we still need to reapply events that have been stored in local DB. events, err := r.historySerializer.DeserializeEvents(historyBlob.rawHistory) if err != nil { diff --git a/service/history/queues/action_pending_task_count.go b/service/history/queues/action_pending_task_count.go index c37594f42fa..79e09d24587 100644 --- a/service/history/queues/action_pending_task_count.go +++ b/service/history/queues/action_pending_task_count.go @@ -50,7 +50,7 @@ func (a *actionQueuePendingTask) Name() string { func (a *actionQueuePendingTask) Run(readerGroup *ReaderGroup) bool { // first check if the alert is still valid - if a.monitor.GetTotalPendingTaskCount() <= a.attributes.CiriticalPendingTaskCount { + if a.monitor.GetTotalPendingTaskCount() <= a.attributes.CriticalPendingTaskCount { return false } @@ -64,7 +64,7 @@ func (a *actionQueuePendingTask) Run(readerGroup *ReaderGroup) bool { a.init() a.gatherStatistics(readers) a.findSliceToClear( - int(float64(a.attributes.CiriticalPendingTaskCount) * targetLoadFactor), + int(float64(a.attributes.CriticalPendingTaskCount) * targetLoadFactor), ) a.splitAndClearSlice(readers, readerGroup) return true @@ -76,7 +76,7 @@ func (a *actionQueuePendingTask) shrinkSliceLowTaskCount( for _, reader := range readers { reader.ShrinkSlices() } - return a.monitor.GetTotalPendingTaskCount() <= a.attributes.CiriticalPendingTaskCount + return a.monitor.GetTotalPendingTaskCount() <= a.attributes.CriticalPendingTaskCount } func (a *actionQueuePendingTask) init() { @@ -91,7 +91,7 @@ func (a *actionQueuePendingTask) gatherStatistics( ) { // gather statistic for // 1. total # of pending tasks per key - // 2. for each slice, # of pending taks per key + // 2. for each slice, # of pending tasks per key // 3. for each key, a list of slices that contains pending tasks from that key, // reversely ordered by slice range. Upon unloading, first unload newer slices. for _, reader := range readers { @@ -136,7 +136,8 @@ func (a *actionQueuePendingTask) findSliceToClear( sliceList := a.slicesPerKey[key] if len(sliceList) == 0 { - panic("Found key with non-zero pending task count but has no correspoding Slice") + // This should never happen - indicates a bug in the statistics gathering logic + return } // pop the first slice in the list diff --git a/service/history/queues/action_slice_count.go b/service/history/queues/action_slice_count.go index 4d04eaead0a..8c3a7d7e0ea 100644 --- a/service/history/queues/action_slice_count.go +++ b/service/history/queues/action_slice_count.go @@ -58,7 +58,7 @@ func (a *actionSliceCount) Run(readerGroup *ReaderGroup) (actionTaken bool) { isUniversalPredicate := func(s Slice) bool { return tasks.IsUniverisalPredicate(s.Scope().Predicate) } isNotUniversalPredicate := func(s Slice) bool { return !isUniversalPredicate(s) } - // peform compaction in four stages: + // perform compaction in four stages: // 1. compact slices in non-default reader with non-universal predicate // 2. compact slices in default reader with non-universal predicate // 3. compact slices in non-default reader with universal predicate diff --git a/service/history/queues/alerts.go b/service/history/queues/alerts.go index 7ab3d8a45e7..d071439961b 100644 --- a/service/history/queues/alerts.go +++ b/service/history/queues/alerts.go @@ -16,8 +16,8 @@ type ( AlertType int AlertAttributesQueuePendingTaskCount struct { - CurrentPendingTaskCount int - CiriticalPendingTaskCount int + CurrentPendingTaskCount int + CriticalPendingTaskCount int } AlertAttributesReaderStuck struct { diff --git a/service/history/queues/executable.go b/service/history/queues/executable.go index 7c92755bda8..ae582246867 100644 --- a/service/history/queues/executable.go +++ b/service/history/queues/executable.go @@ -83,7 +83,7 @@ var ( // across multiple submissions to scheduler reschedulePolicy = common.CreateTaskReschedulePolicy() taskNotReadyReschedulePolicy = common.CreateTaskNotReadyReschedulePolicy() - taskResourceExhuastedReschedulePolicy = common.CreateTaskResourceExhaustedReschedulePolicy() + taskResourceExhaustedReschedulePolicy = common.CreateTaskResourceExhaustedReschedulePolicy() dependencyTaskNotCompletedReschedulePolicy = common.CreateDependencyTaskNotCompletedReschedulePolicy() ) @@ -419,7 +419,7 @@ func (e *executableImpl) isInvalidTaskError(err error) bool { } if err == consts.ErrTaskVersionMismatch { - metrics.TaskVersionMisMatch.With(e.metricsHandler).Record(1) + metrics.TaskVersionMismatch.With(e.metricsHandler).Record(1) return true } @@ -695,7 +695,7 @@ func (e *executableImpl) Nack(err error) { submitted := false if e.shouldResubmitOnNack(e.Attempt(), err) { // we do not need to know if there any error during submission - // as long as it's not submitted, the execuable should be add + // as long as it's not submitted, the executable should be add // to the rescheduler e.SetScheduledTime(e.timeSource.Now()) submitted = e.scheduler.TrySubmit(e) @@ -824,7 +824,7 @@ func (e *executableImpl) backoffDuration( // upon system resource exhausted error and pick the longer backoff duration backoffDuration = max( backoffDuration, - taskResourceExhuastedReschedulePolicy.ComputeNextDelay(0, e.resourceExhaustedCount, err), + taskResourceExhaustedReschedulePolicy.ComputeNextDelay(0, e.resourceExhaustedCount, err), ) } diff --git a/service/history/queues/executable_test.go b/service/history/queues/executable_test.go index c1f75b8ef11..d308a71ff2e 100644 --- a/service/history/queues/executable_test.go +++ b/service/history/queues/executable_test.go @@ -830,7 +830,7 @@ func (s *executableSuite) TestTaskNack_Resubmit_Success() { go func() { // access internal state in a separate goroutine to check if there's any race condition - // between reschdule and nack. + // between reschedule and nack. s.accessInternalState(executable) }() return true @@ -848,7 +848,7 @@ func (s *executableSuite) TestTaskNack_Resubmit_Fail() { go func() { // access internal state in a separate goroutine to check if there's any race condition - // between reschdule and nack. + // between reschedule and nack. s.accessInternalState(executable) }() }).Times(1) @@ -885,7 +885,7 @@ func (s *executableSuite) TestTaskNack_Reschedule() { go func() { // access internal state in a separate goroutine to check if there's any race condition - // between reschdule and nack. + // between reschedule and nack. s.accessInternalState(executable) }() }).Times(1) diff --git a/service/history/queues/mitigator_test.go b/service/history/queues/mitigator_test.go index 741493c78f6..da742d2452a 100644 --- a/service/history/queues/mitigator_test.go +++ b/service/history/queues/mitigator_test.go @@ -61,8 +61,8 @@ func (s *mitigatorSuite) TestMitigate_ActionMatchAlert() { alert: Alert{ AlertType: AlertTypeQueuePendingTaskCount, AlertAttributesQueuePendingTaskCount: &AlertAttributesQueuePendingTaskCount{ - CurrentPendingTaskCount: 1000, - CiriticalPendingTaskCount: 500, + CurrentPendingTaskCount: 1000, + CriticalPendingTaskCount: 500, }, }, expectedAction: &actionQueuePendingTask{}, @@ -115,8 +115,8 @@ func (s *mitigatorSuite) TestMitigate_ResolveAlert() { alert := Alert{ AlertType: AlertTypeQueuePendingTaskCount, AlertAttributesQueuePendingTaskCount: &AlertAttributesQueuePendingTaskCount{ - CurrentPendingTaskCount: 1000, - CiriticalPendingTaskCount: 500, + CurrentPendingTaskCount: 1000, + CriticalPendingTaskCount: 500, }, } s.mitigator.Mitigate(alert) diff --git a/service/history/queues/monitor.go b/service/history/queues/monitor.go index 5e69b0d9230..f986a397ed1 100644 --- a/service/history/queues/monitor.go +++ b/service/history/queues/monitor.go @@ -132,8 +132,8 @@ func (m *monitorImpl) SetSlicePendingTaskCount(slice Slice, count int) { m.sendAlertLocked(&Alert{ AlertType: AlertTypeQueuePendingTaskCount, AlertAttributesQueuePendingTaskCount: &AlertAttributesQueuePendingTaskCount{ - CurrentPendingTaskCount: m.totalPendingTaskCount, - CiriticalPendingTaskCount: criticalTotalTasks, + CurrentPendingTaskCount: m.totalPendingTaskCount, + CriticalPendingTaskCount: criticalTotalTasks, }, }) } diff --git a/service/history/queues/monitor_test.go b/service/history/queues/monitor_test.go index 6f1fd19419a..22b0dc9160f 100644 --- a/service/history/queues/monitor_test.go +++ b/service/history/queues/monitor_test.go @@ -70,8 +70,8 @@ func (s *monitorSuite) TestPendingTasksStats() { s.Equal(Alert{ AlertType: AlertTypeQueuePendingTaskCount, AlertAttributesQueuePendingTaskCount: &AlertAttributesQueuePendingTaskCount{ - CurrentPendingTaskCount: threshold * 2, - CiriticalPendingTaskCount: threshold, + CurrentPendingTaskCount: threshold * 2, + CriticalPendingTaskCount: threshold, }, }, *alert) @@ -90,8 +90,8 @@ func (s *monitorSuite) TestPendingTasksStats() { s.Equal(Alert{ AlertType: AlertTypeQueuePendingTaskCount, AlertAttributesQueuePendingTaskCount: &AlertAttributesQueuePendingTaskCount{ - CurrentPendingTaskCount: threshold*2 + 1, - CiriticalPendingTaskCount: threshold, + CurrentPendingTaskCount: threshold*2 + 1, + CriticalPendingTaskCount: threshold, }, }, *alert) diff --git a/service/history/queues/queue_base.go b/service/history/queues/queue_base.go index 640907ec414..19389606594 100644 --- a/service/history/queues/queue_base.go +++ b/service/history/queues/queue_base.go @@ -37,7 +37,7 @@ const ( // so that the last slice in the default reader won't grow // infinitely. // The benefit of forcing new slice is: - // 1. As long as the last slice won't grow infinitly, task loading + // 1. As long as the last slice won't grow infinitely, task loading // for that slice will complete and it's scope (both range and // predicate) is able to shrink // 2. Current task loading implementation can only unload the entire diff --git a/service/history/queues/queue_immediate_test.go b/service/history/queues/queue_immediate_test.go index 996d03921ed..1d5e4a46755 100644 --- a/service/history/queues/queue_immediate_test.go +++ b/service/history/queues/queue_immediate_test.go @@ -63,7 +63,7 @@ func (s *immediateQueueSuite) SetupTest() { GrouperNamespaceID{}, log.NewTestLogger(), metrics.NoopMetricsHandler, - nil, // execuable factory + nil, // executable factory ) } diff --git a/service/history/queues/queue_scheduled.go b/service/history/queues/queue_scheduled.go index 4e873a463a0..8fd52281193 100644 --- a/service/history/queues/queue_scheduled.go +++ b/service/history/queues/queue_scheduled.go @@ -270,9 +270,9 @@ func (p *scheduledQueue) lookAheadTask() { return } - // no look ahead task, next loading will be triggerred at the end of the current + // no look ahead task, next loading will be triggered at the end of the current // look ahead window or when new task notification comes - // NOTE: with this we don't need a separate max poll timer, loading will be triggerred + // NOTE: with this we don't need a separate max poll timer, loading will be triggered // every maxPollInterval + jitter. p.timerGate.Update(lookAheadMaxTime) } diff --git a/service/history/queues/rescheduler.go b/service/history/queues/rescheduler.go index 22a5c3ef6a0..02764674b75 100644 --- a/service/history/queues/rescheduler.go +++ b/service/history/queues/rescheduler.go @@ -45,7 +45,7 @@ type ( Stop() } - rescheduledExecuable struct { + rescheduledExecutable struct { executable Executable rescheduleTime time.Time } @@ -64,7 +64,7 @@ type ( taskChannelKeyFn TaskChannelKeyFn sync.Mutex - pqMap map[TaskChannelKey]collection.Queue[rescheduledExecuable] + pqMap map[TaskChannelKey]collection.Queue[rescheduledExecutable] numExecutables int } ) @@ -87,7 +87,7 @@ func NewRescheduler( timerGate: timer.NewLocalGate(timeSource), taskChannelKeyFn: scheduler.TaskChannelKeyFn(), - pqMap: make(map[TaskChannelKey]collection.Queue[rescheduledExecuable]), + pqMap: make(map[TaskChannelKey]collection.Queue[rescheduledExecutable]), } } @@ -123,7 +123,7 @@ func (r *reschedulerImpl) Add( ) { r.Lock() pq := r.getOrCreatePQLocked(r.taskChannelKeyFn(executable)) - pq.Add(rescheduledExecuable{ + pq.Add(rescheduledExecutable{ executable: executable, rescheduleTime: rescheduleTime, }) @@ -151,7 +151,7 @@ func (r *reschedulerImpl) Reschedule( updatedRescheduleTime = true // set reschedule time for all tasks in this pq to be now - items := make([]rescheduledExecuable, 0, pq.Len()) + items := make([]rescheduledExecutable, 0, pq.Len()) for !pq.IsEmpty() { rescheduled := pq.Remove() // scheduled queue pre-fetches tasks, @@ -271,7 +271,7 @@ func (r *reschedulerImpl) isStopped() bool { func (r *reschedulerImpl) getOrCreatePQLocked( key TaskChannelKey, -) collection.Queue[rescheduledExecuable] { +) collection.Queue[rescheduledExecutable] { if pq, ok := r.pqMap[key]; ok { return pq } @@ -282,18 +282,18 @@ func (r *reschedulerImpl) getOrCreatePQLocked( } func (r *reschedulerImpl) newPriorityQueue( - items []rescheduledExecuable, -) collection.Queue[rescheduledExecuable] { + items []rescheduledExecutable, +) collection.Queue[rescheduledExecutable] { if items == nil { - return collection.NewPriorityQueue(r.rescheduledExecuableCompareLess) + return collection.NewPriorityQueue(r.rescheduledExecutableCompareLess) } - return collection.NewPriorityQueueWithItems(r.rescheduledExecuableCompareLess, items) + return collection.NewPriorityQueueWithItems(r.rescheduledExecutableCompareLess, items) } -func (r *reschedulerImpl) rescheduledExecuableCompareLess( - this rescheduledExecuable, - that rescheduledExecuable, +func (r *reschedulerImpl) rescheduledExecutableCompareLess( + this rescheduledExecutable, + that rescheduledExecutable, ) bool { return this.rescheduleTime.Before(that.rescheduleTime) } diff --git a/service/history/queues/slice.go b/service/history/queues/slice.go index 8fc4490d7f3..508af129c8d 100644 --- a/service/history/queues/slice.go +++ b/service/history/queues/slice.go @@ -456,7 +456,7 @@ func (s *SliceImpl) ensurePredicateSizeLimit() { // 0 == unlimited if maxPredicateSize > 0 && s.scope.Predicate.Size() > maxPredicateSize { // Due to the limitations in predicate merging logic, the predicate size can easily grow unbounded. - // The simplest mitigation is to stop merging and replace with the univeral predicate. + // The simplest mitigation is to stop merging and replace with the universal predicate. s.scope.Predicate = predicates.Universal[tasks.Task]() } } diff --git a/service/history/replication/executable_activity_state_task.go b/service/history/replication/executable_activity_state_task.go index 1909dc54450..6b978d7be5b 100644 --- a/service/history/replication/executable_activity_state_task.go +++ b/service/history/replication/executable_activity_state_task.go @@ -127,7 +127,7 @@ func (e *ExecutableActivityStateTask) Execute() error { return nil } - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) namespaceName, apply, nsError := e.GetNamespaceInfo(headers.SetCallerInfo( context.Background(), callerInfo, @@ -183,7 +183,7 @@ func (e *ExecutableActivityStateTask) HandleErr(err error) error { case nil, *serviceerror.NotFound: return nil case *serviceerrors.RetryReplication: - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) namespaceName, _, nsError := e.GetNamespaceInfo(headers.SetCallerInfo( context.Background(), callerInfo, diff --git a/service/history/replication/executable_backfill_history_events_task.go b/service/history/replication/executable_backfill_history_events_task.go index 73099a23438..69ba51cd80e 100644 --- a/service/history/replication/executable_backfill_history_events_task.go +++ b/service/history/replication/executable_backfill_history_events_task.go @@ -75,7 +75,7 @@ func (e *ExecutableBackfillHistoryEventsTask) Execute() error { return nil } - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) namespaceName, apply, nsError := e.GetNamespaceInfo(headers.SetCallerInfo( context.Background(), callerInfo, @@ -142,7 +142,7 @@ func (e *ExecutableBackfillHistoryEventsTask) HandleErr(err error) error { tag.TaskID(e.ExecutableTask.TaskID()), tag.Error(err), ) - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) switch taskErr := err.(type) { case nil, *serviceerror.NotFound: return nil diff --git a/service/history/replication/executable_history_task.go b/service/history/replication/executable_history_task.go index 46ecc00d2a9..5884086fb18 100644 --- a/service/history/replication/executable_history_task.go +++ b/service/history/replication/executable_history_task.go @@ -98,7 +98,7 @@ func (e *ExecutableHistoryTask) Execute() error { if e.TerminalState() { return nil } - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) namespaceName, apply, nsError := e.GetNamespaceInfo(headers.SetCallerInfo( context.Background(), callerInfo, @@ -148,7 +148,7 @@ func (e *ExecutableHistoryTask) HandleErr(err error) error { case nil, *serviceerror.NotFound: return nil case *serviceerrors.RetryReplication: - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) namespaceName, _, nsError := e.GetNamespaceInfo(headers.SetCallerInfo( context.Background(), callerInfo, diff --git a/service/history/replication/executable_sync_hsm_task.go b/service/history/replication/executable_sync_hsm_task.go index 1ec843629f3..11cdd8d3d87 100644 --- a/service/history/replication/executable_sync_hsm_task.go +++ b/service/history/replication/executable_sync_hsm_task.go @@ -22,7 +22,7 @@ import ( ) // This is mostly copied from ExecutableActivityStateTask -// The 4 replication executable task implemenatations are quite similar +// The 4 replication executable task implementations are quite similar // we may want to do some refactoring later. type ( @@ -77,7 +77,7 @@ func (e *ExecutableSyncHSMTask) Execute() error { return nil } - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) namespaceName, apply, nsError := e.GetNamespaceInfo(headers.SetCallerInfo( context.Background(), callerInfo, @@ -125,7 +125,7 @@ func (e *ExecutableSyncHSMTask) HandleErr(err error) error { e.MarkTaskDuplicated() return nil } - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) switch retryErr := err.(type) { case nil, *serviceerror.NotFound: return nil diff --git a/service/history/replication/executable_sync_versioned_transition_task.go b/service/history/replication/executable_sync_versioned_transition_task.go index 0b36fd2d205..e38dec1a03e 100644 --- a/service/history/replication/executable_sync_versioned_transition_task.go +++ b/service/history/replication/executable_sync_versioned_transition_task.go @@ -69,7 +69,7 @@ func (e *ExecutableSyncVersionedTransitionTask) Execute() error { return nil } - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) namespaceName, apply, nsError := e.GetNamespaceInfo(headers.SetCallerInfo( context.Background(), callerInfo, @@ -119,7 +119,7 @@ func (e *ExecutableSyncVersionedTransitionTask) HandleErr(err error) error { tag.TaskID(e.ExecutableTask.TaskID()), tag.Error(err), ) - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) switch taskErr := err.(type) { case *serviceerrors.SyncState: namespaceName, _, nsError := e.GetNamespaceInfo(headers.SetCallerInfo( diff --git a/service/history/replication/executable_task.go b/service/history/replication/executable_task.go index 3001a27452b..3173ff14d23 100644 --- a/service/history/replication/executable_task.go +++ b/service/history/replication/executable_task.go @@ -528,7 +528,7 @@ func (e *ExecutableTaskImpl) BackFillEvents( } events, err := e.EventSerializer.DeserializeEvents(batch.RawEventBatch) if err != nil { - return serviceerror.NewInternalf("failed to deserailize run history events when backfill: %v", err) + return serviceerror.NewInternalf("failed to deserialize run history events when backfill: %v", err) } newRunEvents = events } @@ -823,7 +823,7 @@ func newTaskContext( return context.WithTimeout(ctx, timeout) } -func getReplicaitonCallerInfo(priority enumsspb.TaskPriority) headers.CallerInfo { +func getReplicationCallerInfo(priority enumsspb.TaskPriority) headers.CallerInfo { switch priority { case enumsspb.TASK_PRIORITY_LOW: return headers.SystemPreemptableCallerInfo diff --git a/service/history/replication/executable_verify_versioned_transition_task.go b/service/history/replication/executable_verify_versioned_transition_task.go index a90b48cc2a8..0acd07ffc43 100644 --- a/service/history/replication/executable_verify_versioned_transition_task.go +++ b/service/history/replication/executable_verify_versioned_transition_task.go @@ -77,7 +77,7 @@ func (e *ExecutableVerifyVersionedTransitionTask) Execute() error { return nil } - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) namespaceName, apply, nsError := e.GetNamespaceInfo(headers.SetCallerInfo( context.Background(), callerInfo, @@ -258,7 +258,7 @@ func (e *ExecutableVerifyVersionedTransitionTask) HandleErr(err error) error { ) switch taskErr := err.(type) { case *serviceerrors.SyncState: - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) namespaceName, _, nsError := e.GetNamespaceInfo(headers.SetCallerInfo( context.Background(), callerInfo, diff --git a/service/history/replication/executable_workflow_state_task.go b/service/history/replication/executable_workflow_state_task.go index 3218cdc27a1..62b072452ae 100644 --- a/service/history/replication/executable_workflow_state_task.go +++ b/service/history/replication/executable_workflow_state_task.go @@ -80,7 +80,7 @@ func (e *ExecutableWorkflowStateTask) Execute() error { return nil } - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) namespaceName, apply, err := e.GetNamespaceInfo(headers.SetCallerInfo( context.Background(), callerInfo, @@ -123,7 +123,7 @@ func (e *ExecutableWorkflowStateTask) HandleErr(err error) error { e.MarkTaskDuplicated() return nil } - callerInfo := getReplicaitonCallerInfo(e.GetPriority()) + callerInfo := getReplicationCallerInfo(e.GetPriority()) switch retryErr := err.(type) { case *serviceerrors.SyncState: namespaceName, _, nsError := e.GetNamespaceInfo(headers.SetCallerInfo( diff --git a/service/history/replication/raw_task_converter.go b/service/history/replication/raw_task_converter.go index f3456c1a342..2dea1cd99da 100644 --- a/service/history/replication/raw_task_converter.go +++ b/service/history/replication/raw_task_converter.go @@ -95,7 +95,7 @@ func (c *SourceTaskConverterImpl) Convert( if namespaceEntry != nil { nsName = namespaceEntry.Name().String() } - callerInfo := getReplicaitonCallerInfo(priority) + callerInfo := getReplicationCallerInfo(priority) ctx, cancel = newTaskContext(nsName, c.config.ReplicationTaskApplyTimeout(), callerInfo) defer cancel() replicationTask, err := c.historyEngine.ConvertReplicationTask(ctx, task, targetClusterID) diff --git a/service/history/replication/stream_sender.go b/service/history/replication/stream_sender.go index b22706c46f7..42f64ea3a14 100644 --- a/service/history/replication/stream_sender.go +++ b/service/history/replication/stream_sender.go @@ -487,7 +487,7 @@ func (s *StreamSenderImpl) sendTasks( }) } - callerInfo := getReplicaitonCallerInfo(priority) + callerInfo := getReplicationCallerInfo(priority) ctx := headers.SetCallerInfo(s.server.Context(), callerInfo) iter, err := s.historyEngine.GetReplicationTasksIter( ctx, diff --git a/service/history/shard/context_impl.go b/service/history/shard/context_impl.go index 8906678697f..4b56c3afb47 100644 --- a/service/history/shard/context_impl.go +++ b/service/history/shard/context_impl.go @@ -1987,7 +1987,7 @@ func (s *ContextImpl) acquireShard() { // NOTE: engine is created & started before setting shard state to acquired. // -> namespace handover callback is registered & called before shard is able to serve traffic - // -> information for handover namespace is recorded before shard can servce traffic + // -> information for handover namespace is recorded before shard can service traffic // -> upon shard reload, no history api or task can go through for ns in handover state err = s.transition(contextRequestAcquired{engine: engine}) @@ -2147,7 +2147,7 @@ func newContext( func (s *ContextImpl) initLastUpdatesTime() { // We need to set lastUpdate time to "now" - "wait between shard updates time" + "first update interval". // This is done to make sure that first shard update` will happen around "first update interval" after "now". - // The idea is to allow queue to persist even in the case of (relativly) constantly + // The idea is to allow queue to persist even in the case of (relatively) constantly // moving shards between hosts. // Note: it still may prevent queue from progressing if shard moving rate is too high lastUpdated := s.timeSource.Now() diff --git a/service/history/shard/context_test.go b/service/history/shard/context_test.go index 9370aa88e90..d521b57592f 100644 --- a/service/history/shard/context_test.go +++ b/service/history/shard/context_test.go @@ -318,7 +318,7 @@ func (s *contextSuite) TestDeleteWorkflowExecution_ErrorAndContinue_Success() { s.Equal(tasks.DeleteWorkflowExecutionStageCurrent|tasks.DeleteWorkflowExecutionStageMutableState|tasks.DeleteWorkflowExecutionStageVisibility|tasks.DeleteWorkflowExecutionStageHistory, stage) } -func (s *contextSuite) TestDeleteWorkflowExecution_DeleteVisibilityTaskNotifiction() { +func (s *contextSuite) TestDeleteWorkflowExecution_DeleteVisibilityTaskNotification() { workflowKey := definition.WorkflowKey{ NamespaceID: tests.NamespaceID.String(), WorkflowID: tests.WorkflowID, diff --git a/service/history/shard/task_key_generator.go b/service/history/shard/task_key_generator.go index 5a05e574d26..ebb8fb5ffb2 100644 --- a/service/history/shard/task_key_generator.go +++ b/service/history/shard/task_key_generator.go @@ -71,7 +71,7 @@ func (a *taskKeyGenerator) setTaskKeys( if isScheduledTask { // Persistence might loss precision when saving to DB. // Make the task scheduled time to have the same precision as DB here, - // so that if the comparsion in the next step passes, it's guaranteed + // so that if the comparison in the next step passes, it's guaranteed // the task can be retrieved from DB by queue processor. taskScheduledTime = task.GetVisibilityTime(). Add(common.ScheduledTaskMinPrecision). @@ -88,7 +88,7 @@ func (a *taskKeyGenerator) setTaskKeys( tag.CursorTimestamp(a.taskMinScheduledTime), tag.ValueShardAllocateTimerBeforeRead, ) - // Theoritically we don't need to add the extra 1ms. + // Theoretically we don't need to add the extra 1ms. // Guess it's just to be extra safe here. taskScheduledTime = a.taskMinScheduledTime.Add(common.ScheduledTaskMinPrecision) } diff --git a/service/history/shard/task_key_manager_test.go b/service/history/shard/task_key_manager_test.go index 1e79a9c08c5..054d77a9b4d 100644 --- a/service/history/shard/task_key_manager_test.go +++ b/service/history/shard/task_key_manager_test.go @@ -162,7 +162,7 @@ func (s *taskKeyManagerSuite) TestGetExclusiveReaderHighWatermark_WithPendingTas now.Add(-time.Minute), ) - // make two calls here, otherwise the order for assgining task keys is not guaranteed + // make two calls here, otherwise the order for assigning task keys is not guaranteed _, err := s.manager.setAndTrackTaskKeys(map[tasks.Category][]tasks.Task{ tasks.CategoryTransfer: {transferTask}, }) diff --git a/service/history/statemachine_environment.go b/service/history/statemachine_environment.go index 52a1c3990de..512631da9fd 100644 --- a/service/history/statemachine_environment.go +++ b/service/history/statemachine_environment.go @@ -231,7 +231,7 @@ func (e *stateMachineEnvironment) validateStateMachineRef( (ref.StateMachineRef.MachineLastUpdateVersionedTransition != nil && ref.StateMachineRef.MachineLastUpdateVersionedTransition.TransitionCount == 0) || len(ms.GetExecutionInfo().TransitionHistory) == 0 { - // Transtion history was disabled when the ref is generated, + // Transition history was disabled when the ref is generated, // fallback to the old validation logic. return e.validateStateMachineRefWithoutTransitionHistory(ms, ref, potentialStaleState) } diff --git a/service/history/statemachine_environment_test.go b/service/history/statemachine_environment_test.go index b998ef277d5..ca77fe64cec 100644 --- a/service/history/statemachine_environment_test.go +++ b/service/history/statemachine_environment_test.go @@ -145,10 +145,10 @@ func TestValidateStateMachineRef(t *testing.T) { name: "WithTransitionHistory/StalenessCheckFailure", enableTransitionHistory: true, mutateRef: func(ref *hsm.Ref) { - mutableStateVersonedTransition := ref.StateMachineRef.MutableStateVersionedTransition + mutableStateVersionedTransition := ref.StateMachineRef.MutableStateVersionedTransition ref.StateMachineRef.MutableStateVersionedTransition = &persistencespb.VersionedTransition{ - NamespaceFailoverVersion: mutableStateVersonedTransition.NamespaceFailoverVersion + 1, - TransitionCount: mutableStateVersonedTransition.TransitionCount, + NamespaceFailoverVersion: mutableStateVersionedTransition.NamespaceFailoverVersion + 1, + TransitionCount: mutableStateVersionedTransition.TransitionCount, } }, mutateNode: func(node *hsm.Node) {}, @@ -160,10 +160,10 @@ func TestValidateStateMachineRef(t *testing.T) { name: "WithoutTransitionHistory/CanBeStale/MachineStalenessCheckFailure", enableTransitionHistory: false, mutateRef: func(ref *hsm.Ref) { - machineInitialVersonedTransition := ref.StateMachineRef.MachineInitialVersionedTransition + machineInitialVersionedTransition := ref.StateMachineRef.MachineInitialVersionedTransition ref.StateMachineRef.MachineInitialVersionedTransition = &persistencespb.VersionedTransition{ - NamespaceFailoverVersion: machineInitialVersonedTransition.NamespaceFailoverVersion + 1, - TransitionCount: machineInitialVersonedTransition.TransitionCount, + NamespaceFailoverVersion: machineInitialVersionedTransition.NamespaceFailoverVersion + 1, + TransitionCount: machineInitialVersionedTransition.TransitionCount, } }, mutateNode: func(node *hsm.Node) {}, @@ -175,10 +175,10 @@ func TestValidateStateMachineRef(t *testing.T) { name: "WithoutTransitionHistory/CannotBeStale/MachineStalenessCheckFailure", enableTransitionHistory: false, mutateRef: func(ref *hsm.Ref) { - machineInitialVersonedTransition := ref.StateMachineRef.MachineInitialVersionedTransition + machineInitialVersionedTransition := ref.StateMachineRef.MachineInitialVersionedTransition ref.StateMachineRef.MachineInitialVersionedTransition = &persistencespb.VersionedTransition{ - NamespaceFailoverVersion: machineInitialVersonedTransition.NamespaceFailoverVersion + 1, - TransitionCount: machineInitialVersonedTransition.TransitionCount, + NamespaceFailoverVersion: machineInitialVersionedTransition.NamespaceFailoverVersion + 1, + TransitionCount: machineInitialVersionedTransition.TransitionCount, } ref.TaskID = tasks.MaximumKey.TaskID }, diff --git a/service/history/tasks/requst_cancel_task.go b/service/history/tasks/request_cancel_task.go similarity index 100% rename from service/history/tasks/requst_cancel_task.go rename to service/history/tasks/request_cancel_task.go diff --git a/service/history/timer_queue_active_task_executor.go b/service/history/timer_queue_active_task_executor.go index c422a0ad763..eb3fafb7336 100644 --- a/service/history/timer_queue_active_task_executor.go +++ b/service/history/timer_queue_active_task_executor.go @@ -239,7 +239,7 @@ Loop: for _, timerSequenceID := range timerSequence.LoadAndSortActivityTimers() { if !queues.IsTimeExpired(task, referenceTime, timerSequenceID.Timestamp) { // timer sequence IDs are sorted, once there is one timer - // sequence ID not expired, all after that wil not expired + // sequence ID not expired, all after that will not expired break Loop } @@ -315,7 +315,7 @@ func (t *timerQueueActiveTaskExecutor) processSingleActivityTimeoutTask( metrics.VersioningBehaviorTag(mutableState.GetEffectiveVersioningBehavior())) if retryState == enumspb.RETRY_STATE_IN_PROGRESS { - // TODO uncommment once RETRY_STATE_PAUSED is supported + // TODO uncomment once RETRY_STATE_PAUSED is supported // || retryState == enumspb.RETRY_STATE_PAUSED { result.shouldUpdateMutableState = true return result, nil diff --git a/service/history/timer_queue_active_task_executor_test.go b/service/history/timer_queue_active_task_executor_test.go index 192da847bea..0ce8595c030 100644 --- a/service/history/timer_queue_active_task_executor_test.go +++ b/service/history/timer_queue_active_task_executor_test.go @@ -1556,7 +1556,7 @@ func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowRunTimeout_Fire() { persistenceMutableState := s.createPersistenceMutableState(mutableState, completionEvent.GetEventId(), completionEvent.GetVersion()) - for _, currrentTime := range []time.Time{ + for _, currentTime := range []time.Time{ s.now.Add(expirationTime - 1*time.Second), s.now.Add(expirationTime + 1*time.Second), } { @@ -1564,7 +1564,7 @@ func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowRunTimeout_Fire() { s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(getWorkflowExecutionResponse, nil) s.mockExecutionMgr.EXPECT().UpdateWorkflowExecution(gomock.Any(), gomock.Any()).Return(tests.UpdateWorkflowExecutionResponse, nil) - s.timeSource.Update(currrentTime) + s.timeSource.Update(currentTime) resp := s.timerQueueActiveTaskExecutor.Execute(context.Background(), s.newTaskExecutable(timerTask)) s.NoError(resp.ExecutionErr) @@ -1866,7 +1866,7 @@ func (s *timerQueueActiveTaskExecutorSuite) TestWorkflowExecutionTimeout_Noop() timerTask := &tasks.WorkflowExecutionTimeoutTask{ NamespaceID: s.namespaceID.String(), WorkflowID: execution.GetWorkflowId(), - FirstRunID: uuid.New(), // does not match the firsrt runID of the execution + FirstRunID: uuid.New(), // does not match the first runID of the execution VisibilityTimestamp: s.now, TaskID: s.mustGenerateTaskID(), } diff --git a/service/history/timer_queue_standby_task_executor.go b/service/history/timer_queue_standby_task_executor.go index 9b5535262ae..40caada3836 100644 --- a/service/history/timer_queue_standby_task_executor.go +++ b/service/history/timer_queue_standby_task_executor.go @@ -549,7 +549,7 @@ func (t *timerQueueStandbyTaskExecutor) executeWorkflowExecutionTimeoutTask( // The standby logic should continue to wait for the workflow timeout event to be replicated from the active side. // // Return non-nil post action info to indicate that verification is not done yet. - // The returned post action info can be used to resend history fron active side. + // The returned post action info can be used to resend history from active side. return newExecutionTimerPostActionInfo(mutableState) } diff --git a/service/history/timer_queue_standby_task_executor_test.go b/service/history/timer_queue_standby_task_executor_test.go index 34d95d27b72..b219e6dac16 100644 --- a/service/history/timer_queue_standby_task_executor_test.go +++ b/service/history/timer_queue_standby_task_executor_test.go @@ -1368,7 +1368,7 @@ func (s *timerQueueStandbyTaskExecutorSuite) TestProcessWorkflowExecutionTimeout timerTask := &tasks.WorkflowExecutionTimeoutTask{ NamespaceID: s.namespaceID.String(), WorkflowID: execution.GetWorkflowId(), - FirstRunID: uuid.New(), // does not match the firsrt runID of the execution + FirstRunID: uuid.New(), // does not match the first runID of the execution VisibilityTimestamp: s.now, TaskID: s.mustGenerateTaskID(), } diff --git a/service/history/timer_queue_task_executor_base_test.go b/service/history/timer_queue_task_executor_base_test.go index 07fe2f30000..399d06ffae5 100644 --- a/service/history/timer_queue_task_executor_base_test.go +++ b/service/history/timer_queue_task_executor_base_test.go @@ -82,7 +82,7 @@ func (s *timerQueueTaskExecutorBaseSuite) SetupTest() { s.testShardContext.GetLogger(), metrics.NoopMetricsHandler, config, - true, // isActive (irelevant for test) + true, // isActive (irrelevant for test) ) } diff --git a/service/history/transfer_queue_active_task_executor.go b/service/history/transfer_queue_active_task_executor.go index c72299aa747..ca5f42cfea2 100644 --- a/service/history/transfer_queue_active_task_executor.go +++ b/service/history/transfer_queue_active_task_executor.go @@ -942,7 +942,7 @@ func (t *transferQueueActiveTaskExecutor) processStartChildExecution( } } - // Note: childStarted flag above is computed from the parent's history. When this is TRUE it's guaranteed that the child was succesfully started. + // Note: childStarted flag above is computed from the parent's history. When this is TRUE it's guaranteed that the child was successfully started. // But if it's FALSE then the child *may or maynot* be started (ex: we failed to record ChildExecutionStarted event previously.) // Hence we need to check the child workflow ID and attempt to reconnect before proceeding to start a new instance of the child. // This path is usually taken when the parent is being reset and the reset point (i.e baseWorkflowInfo.LowestCommonAncestorEventId) is after the child was initiated. @@ -1107,15 +1107,15 @@ func (t *transferQueueActiveTaskExecutor) verifyChildWorkflow( return "", "", nil } - childsParentRunID := response.WorkflowExecutionInfo.ParentExecution.RunId + childrenParentRunID := response.WorkflowExecutionInfo.ParentExecution.RunId // Check if the child's parent was the base run for the current run. - if childsParentRunID == mutableState.GetExecutionInfo().OriginalExecutionRunId { + if childrenParentRunID == mutableState.GetExecutionInfo().OriginalExecutionRunId { return response.WorkflowExecutionInfo.Execution.RunId, response.WorkflowExecutionInfo.FirstRunId, nil } // load the child's parent mutable state. wfKey := mutableState.GetWorkflowKey() - wfKey.RunID = childsParentRunID + wfKey.RunID = childrenParentRunID wfContext, release, err := getWorkflowExecutionContext( ctx, t.shardContext, @@ -1129,13 +1129,13 @@ func (t *transferQueueActiveTaskExecutor) verifyChildWorkflow( } defer func() { release(retError) }() - childsParentMutableState, err := wfContext.LoadMutableState(ctx, t.shardContext) + childrenParentMutableState, err := wfContext.LoadMutableState(ctx, t.shardContext) if err != nil { return "", "", err } // now check if the child's parent's original run id and the current run's original run ID are the same. - if childsParentMutableState.GetExecutionInfo().OriginalExecutionRunId == mutableState.GetExecutionInfo().OriginalExecutionRunId { + if childrenParentMutableState.GetExecutionInfo().OriginalExecutionRunId == mutableState.GetExecutionInfo().OriginalExecutionRunId { return response.WorkflowExecutionInfo.Execution.RunId, response.WorkflowExecutionInfo.FirstRunId, nil } diff --git a/service/history/transfer_queue_standby_task_executor.go b/service/history/transfer_queue_standby_task_executor.go index 28da661178c..96d8d67d6ec 100644 --- a/service/history/transfer_queue_standby_task_executor.go +++ b/service/history/transfer_queue_standby_task_executor.go @@ -29,7 +29,7 @@ import ( ) const ( - recordChildCompletionVerificationFailedMsg = "Failed to verify child execution completion recoreded" + recordChildCompletionVerificationFailedMsg = "Failed to verify child execution completion recorded" ) type ( @@ -437,7 +437,7 @@ func (t *transferQueueStandbyTaskExecutor) processStartChildExecution( if workflowClosed && !(childStarted && childAbandon) { // NOTE: ideally for workflowClosed, child not started, parent close policy is abandon case, // we should continue to start the child workflow in active cluster, so standby logic also need to - // perform the verification. However, we can't do that due to some technial reasons. + // perform the verification. However, we can't do that due to some technical reasons. // Please check the comments in processStartChildExecution in transferQueueActiveTaskExecutor.go // for details. return nil, nil diff --git a/service/history/transfer_queue_standby_task_executor_test.go b/service/history/transfer_queue_standby_task_executor_test.go index 9051708990f..0d6cb92d3ae 100644 --- a/service/history/transfer_queue_standby_task_executor_test.go +++ b/service/history/transfer_queue_standby_task_executor_test.go @@ -803,7 +803,7 @@ func (s *transferQueueStandbyTaskExecutorSuite) TestProcessCloseExecution() { persistenceMutableState := s.createPersistenceMutableState(mutableState, event.GetEventId(), event.GetVersion()) s.mockExecutionMgr.EXPECT().GetWorkflowExecution(gomock.Any(), gomock.Any()).Return(&persistence.GetWorkflowExecutionResponse{State: persistenceMutableState}, nil) - s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchvialConfig()).AnyTimes() + s.mockArchivalMetadata.EXPECT().GetVisibilityConfig().Return(archiver.NewDisabledArchivalConfig()).AnyTimes() s.mockShard.SetCurrentTime(s.clusterName, now) s.mockHistoryClient.EXPECT().VerifyChildExecutionCompletionRecorded(gomock.Any(), expectedVerificationRequest).Return(nil, nil) diff --git a/service/history/workflow/cache/cache_test.go b/service/history/workflow/cache/cache_test.go index becf38c3771..cd348864f10 100644 --- a/service/history/workflow/cache/cache_test.go +++ b/service/history/workflow/cache/cache_test.go @@ -853,7 +853,7 @@ func (s *workflowCacheSuite) TestCacheImpl_GetCurrentRunID_NoCurrentRun() { ShardID: s.mockShard.GetShardID(), NamespaceID: namespaceID.String(), WorkflowID: execution.GetWorkflowId(), - }).Return(nil, serviceerror.NewNotFound("current worflow not found")).Times(1) + }).Return(nil, serviceerror.NewNotFound("current workflow not found")).Times(1) ctx, release, err := s.cache.GetOrCreateWorkflowExecution( context.Background(), diff --git a/service/history/workflow/cache/export_testing.go b/service/history/workflow/cache/export_testing.go index 1e810500f91..258345098d5 100644 --- a/service/history/workflow/cache/export_testing.go +++ b/service/history/workflow/cache/export_testing.go @@ -11,7 +11,7 @@ func GetMutableState(cache Cache, key Key) historyi.MutableState { return getWorkflowContext(cache, key).(*workflow.ContextImpl).MutableState } -// PutContextIfNotExist puts the given workflow Context into the cache, if it doens't already exist. +// PutContextIfNotExist puts the given workflow Context into the cache, if it doesn't already exist. // Exported for testing purposes. func PutContextIfNotExist(cache Cache, key Key, value historyi.WorkflowContext) error { _, err := cache.(*cacheImpl).PutIfNotExist(key, &cacheItem{wfContext: value}) diff --git a/service/history/workflow/context.go b/service/history/workflow/context.go index eb44445a8d7..3620b0edb7b 100644 --- a/service/history/workflow/context.go +++ b/service/history/workflow/context.go @@ -166,7 +166,7 @@ func (c *ContextImpl) LoadMutableState(ctx context.Context, shardContext history c.MutableState = mutableState } - // TODO: Use archetype ID instead of name to do the comparsion + // TODO: Use archetype ID instead of name to do the comparison // after adding archetypeID to chasm tasks as well and chasm.ArchetypeAny is removed. actualArchetype, err := c.MutableState.ChasmTree().Archetype() if err != nil { @@ -179,7 +179,7 @@ func (c *ContextImpl) LoadMutableState(ctx context.Context, shardContext history tag.NewStringTag("actual-archetype", actualArchetype), ) return nil, serviceerror.NewNotFoundf( - "CHASM Archetype missmatch for %v, expected: %s, actual: %s", + "CHASM Archetype mismatch for %v, expected: %s, actual: %s", c.workflowKey, c.archetype, actualArchetype, diff --git a/service/history/workflow/mutable_state_impl.go b/service/history/workflow/mutable_state_impl.go index bbead180e75..3d62785eac5 100644 --- a/service/history/workflow/mutable_state_impl.go +++ b/service/history/workflow/mutable_state_impl.go @@ -514,7 +514,7 @@ func NewMutableStateFromDB( if len(dbRecord.Checksum.GetValue()) > 0 { switch { - case mutableState.shouldInvalidateCheckum(): + case mutableState.shouldInvalidateChecksum(): mutableState.checksum = nil metrics.MutableStateChecksumInvalidated.With(mutableState.metricsHandler).Record(1) case mutableState.shouldVerifyChecksum(): @@ -1100,7 +1100,7 @@ func (ms *MutableStateImpl) GetCloseVersion() (int64, error) { return common.EmptyVersion, serviceerror.NewInternalf("workflow still running, current state: %v", ms.executionState.State.String()) } - // if workflow is closing in the current transation, + // if workflow is closing in the current transaction, // then the last event is closed event and the event version is the close version if lastEventVersion, ok := ms.hBuilder.LastEventVersion(); ok { return lastEventVersion, nil @@ -3237,7 +3237,7 @@ func (ms *MutableStateImpl) validateBuildIdRedirectInfo( if assignedBuildId == "" && !ms.HasCompletedAnyWorkflowTask() { // If build ID is being set for the first time, and no progress is made by unversioned workers we don't - // increment redirect counter. This is to keep the redirect counter zero for verisoned WFs that + // increment redirect counter. This is to keep the redirect counter zero for versioned WFs that // do not experience any redirects, but only initial build ID assignment. return redirectCounter, nil } @@ -3327,7 +3327,7 @@ func (ms *MutableStateImpl) UpdateBuildIdAssignment(buildId string) error { // effective version of the workflow (aka, the override version if override is set). // // If deprecated Deployment-based APIs are in use and the workflow is pinned, `pinned::` -// will be appended to the BuilIds list if it is not already present. The deployment will be +// will be appended to the BuildIds list if it is not already present. The deployment will be // the effective deployment of the workflow (aka the override deployment_series and build_id if set). // // For all other workflows (ms.GetEffectiveVersioningBehavior() != PINNED), this will append a tag to BuildIds @@ -7602,7 +7602,7 @@ func (ms *MutableStateImpl) validateNoEventsAfterWorkflowFinish( tag.WorkflowID(ms.executionInfo.WorkflowId), tag.WorkflowRunID(ms.executionState.RunId), ) - return consts.ErrEventsAterWorkflowFinish + return consts.ErrEventsAfterWorkflowFinish } } @@ -7931,7 +7931,7 @@ func (ms *MutableStateImpl) shouldVerifyChecksum() bool { return rand.Intn(100) < ms.config.MutableStateChecksumVerifyProbability(ms.namespaceEntry.Name().String()) } -func (ms *MutableStateImpl) shouldInvalidateCheckum() bool { +func (ms *MutableStateImpl) shouldInvalidateChecksum() bool { invalidateBeforeEpochSecs := int64(ms.config.MutableStateChecksumInvalidateBefore()) if invalidateBeforeEpochSecs > 0 { invalidateBefore := time.Unix(invalidateBeforeEpochSecs, 0).UTC() diff --git a/service/history/workflow/mutable_state_impl_test.go b/service/history/workflow/mutable_state_impl_test.go index 9a8090e6e59..732082444ea 100644 --- a/service/history/workflow/mutable_state_impl_test.go +++ b/service/history/workflow/mutable_state_impl_test.go @@ -1408,16 +1408,16 @@ func (s *mutableStateSuite) TestChecksumProbabilities() { func (s *mutableStateSuite) TestChecksumShouldInvalidate() { s.mockConfig.MutableStateChecksumInvalidateBefore = func() float64 { return 0 } - s.False(s.mutableState.shouldInvalidateCheckum()) + s.False(s.mutableState.shouldInvalidateChecksum()) s.mutableState.executionInfo.LastUpdateTime = timestamp.TimeNowPtrUtc() s.mockConfig.MutableStateChecksumInvalidateBefore = func() float64 { return float64((s.mutableState.executionInfo.LastUpdateTime.AsTime().UnixNano() / int64(time.Second)) + 1) } - s.True(s.mutableState.shouldInvalidateCheckum()) + s.True(s.mutableState.shouldInvalidateChecksum()) s.mockConfig.MutableStateChecksumInvalidateBefore = func() float64 { return float64((s.mutableState.executionInfo.LastUpdateTime.AsTime().UnixNano() / int64(time.Second)) - 1) } - s.False(s.mutableState.shouldInvalidateCheckum()) + s.False(s.mutableState.shouldInvalidateChecksum()) } func (s *mutableStateSuite) TestUpdateWorkflowStateStatus_Table() { @@ -2703,13 +2703,13 @@ func (s *mutableStateSuite) TestTotalEntitiesCount() { s.NoError(err) updateID := "random-updateId" - accptEvent, err := s.mutableState.AddWorkflowExecutionUpdateAcceptedEvent( + acceptEvent, err := s.mutableState.AddWorkflowExecutionUpdateAcceptedEvent( updateID, "random", 0, nil) s.NoError(err) - s.NotNil(accptEvent) + s.NotNil(acceptEvent) completedEvent, err := s.mutableState.AddWorkflowExecutionUpdateCompletedEvent( - accptEvent.EventId, &updatepb.Response{Meta: &updatepb.Meta{UpdateId: updateID}}) + acceptEvent.EventId, &updatepb.Response{Meta: &updatepb.Meta{UpdateId: updateID}}) s.NoError(err) s.NotNil(completedEvent) @@ -3111,7 +3111,7 @@ func (s *mutableStateSuite) TestRolloverAutoResetPointsWithExpiringTime() { func (s *mutableStateSuite) TestCloseTransactionUpdateTransition() { namespaceEntry := tests.GlobalNamespaceEntry - completWorkflowTaskFn := func(ms historyi.MutableState) { + completeWorkflowTaskFn := func(ms historyi.MutableState) { workflowTaskInfo := ms.GetStartedWorkflowTask() _, err := ms.AddWorkflowTaskCompletedEvent( workflowTaskInfo, @@ -3133,7 +3133,7 @@ func (s *mutableStateSuite) TestCloseTransactionUpdateTransition() { dbState.BufferedEvents = nil }, txFunc: func(ms historyi.MutableState) (*persistencespb.WorkflowExecutionInfo, error) { - completWorkflowTaskFn(ms) + completeWorkflowTaskFn(ms) mutation, _, err := ms.CloseTransactionAsMutation(historyi.TransactionPolicyActive) if err != nil { @@ -3274,7 +3274,7 @@ func (s *mutableStateSuite) TestCloseTransactionUpdateTransition() { dbState.BufferedEvents = nil }, txFunc: func(ms historyi.MutableState) (*persistencespb.WorkflowExecutionInfo, error) { - completWorkflowTaskFn(ms) + completeWorkflowTaskFn(ms) mutation, _, err := ms.CloseTransactionAsSnapshot(historyi.TransactionPolicyActive) if err != nil { @@ -3292,7 +3292,7 @@ func (s *mutableStateSuite) TestCloseTransactionUpdateTransition() { txFunc: func(ms historyi.MutableState) (*persistencespb.WorkflowExecutionInfo, error) { ms.GetExecutionInfo().PreviousTransitionHistory = ms.GetExecutionInfo().TransitionHistory ms.GetExecutionInfo().TransitionHistory = nil - completWorkflowTaskFn(ms) + completeWorkflowTaskFn(ms) mutation, _, err := ms.CloseTransactionAsSnapshot(historyi.TransactionPolicyActive) if err != nil { @@ -3350,7 +3350,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackLastUpdateVersionedTransiti err := s.mockShard.StateMachineRegistry().RegisterMachine(stateMachineDef) s.NoError(err) - completWorkflowTaskFn := func(ms historyi.MutableState) *historypb.HistoryEvent { + completeWorkflowTaskFn := func(ms historyi.MutableState) *historypb.HistoryEvent { workflowTaskInfo := ms.GetStartedWorkflowTask() completedEvent, err := ms.AddWorkflowTaskCompletedEvent( workflowTaskInfo, @@ -3378,7 +3378,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackLastUpdateVersionedTransiti { name: "Activity", testFn: func(ms historyi.MutableState) { - completedEvent := completWorkflowTaskFn(ms) + completedEvent := completeWorkflowTaskFn(ms) scheduledEvent, _, err := ms.AddActivityTaskScheduledEvent( completedEvent.GetEventId(), &commandpb.ScheduleActivityTaskCommandAttributes{}, @@ -3404,7 +3404,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackLastUpdateVersionedTransiti { name: "UserTimer", testFn: func(ms historyi.MutableState) { - completedEvent := completWorkflowTaskFn(ms) + completedEvent := completeWorkflowTaskFn(ms) newTimerID := "new-timer-id" _, _, err := ms.AddTimerStartedEvent( completedEvent.GetEventId(), @@ -3432,7 +3432,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackLastUpdateVersionedTransiti { name: "ChildExecution", testFn: func(ms historyi.MutableState) { - completedEvent := completWorkflowTaskFn(ms) + completedEvent := completeWorkflowTaskFn(ms) initiatedEvent, _, err := ms.AddStartChildWorkflowExecutionInitiatedEvent( completedEvent.GetEventId(), &commandpb.StartChildWorkflowExecutionCommandAttributes{}, @@ -3458,7 +3458,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackLastUpdateVersionedTransiti { name: "RequestCancelExternal", testFn: func(ms historyi.MutableState) { - completedEvent := completWorkflowTaskFn(ms) + completedEvent := completeWorkflowTaskFn(ms) initiatedEvent, _, err := ms.AddRequestCancelExternalWorkflowExecutionInitiatedEvent( completedEvent.GetEventId(), uuid.New(), @@ -3485,7 +3485,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackLastUpdateVersionedTransiti { name: "SignalExternal", testFn: func(ms historyi.MutableState) { - completedEvent := completWorkflowTaskFn(ms) + completedEvent := completeWorkflowTaskFn(ms) initiatedEvent, _, err := ms.AddSignalExternalWorkflowExecutionInitiatedEvent( completedEvent.GetEventId(), uuid.New(), @@ -3549,7 +3549,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackLastUpdateVersionedTransiti { name: "WorkflowTask/Completed", testFn: func(ms historyi.MutableState) { - completWorkflowTaskFn(ms) + completeWorkflowTaskFn(ms) _, _, err := ms.CloseTransactionAsMutation(historyi.TransactionPolicyActive) s.NoError(err) @@ -3561,7 +3561,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackLastUpdateVersionedTransiti { name: "WorkflowTask/Scheduled", testFn: func(ms historyi.MutableState) { - completWorkflowTaskFn(ms) + completeWorkflowTaskFn(ms) _, err := ms.AddWorkflowTaskScheduledEvent(false, enumsspb.WORKFLOW_TASK_TYPE_NORMAL) s.NoError(err) @@ -3575,7 +3575,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackLastUpdateVersionedTransiti { name: "Visibility", testFn: func(ms historyi.MutableState) { - completedEvent := completWorkflowTaskFn(ms) + completedEvent := completeWorkflowTaskFn(ms) _, err := ms.AddUpsertWorkflowSearchAttributesEvent( completedEvent.EventId, &commandpb.UpsertWorkflowSearchAttributesCommandAttributes{}, @@ -3592,7 +3592,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackLastUpdateVersionedTransiti { name: "ExecutionState", testFn: func(ms historyi.MutableState) { - completedEvent := completWorkflowTaskFn(ms) + completedEvent := completeWorkflowTaskFn(ms) _, err := ms.AddCompletedWorkflowEvent( completedEvent.EventId, &commandpb.CompleteWorkflowExecutionCommandAttributes{}, @@ -3610,7 +3610,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackLastUpdateVersionedTransiti { name: "HSM/CloseAsMutation", testFn: func(ms historyi.MutableState) { - completWorkflowTaskFn(ms) + completeWorkflowTaskFn(ms) buildHSMFn(ms) _, _, err := ms.CloseTransactionAsMutation(historyi.TransactionPolicyActive) @@ -3631,7 +3631,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackLastUpdateVersionedTransiti { name: "HSM/CloseAsSnapshot", testFn: func(ms historyi.MutableState) { - completWorkflowTaskFn(ms) + completeWorkflowTaskFn(ms) buildHSMFn(ms) _, _, err := ms.CloseTransactionAsSnapshot(historyi.TransactionPolicyActive) @@ -3677,7 +3677,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackLastUpdateVersionedTransiti func (s *mutableStateSuite) TestCloseTransactionHandleUnknownVersionedTransition() { namespaceEntry := tests.GlobalNamespaceEntry - completWorkflowTaskFn := func(ms historyi.MutableState) { + completeWorkflowTaskFn := func(ms historyi.MutableState) { workflowTaskInfo := ms.GetStartedWorkflowTask() _, err := ms.AddWorkflowTaskCompletedEvent( workflowTaskInfo, @@ -3701,7 +3701,7 @@ func (s *mutableStateSuite) TestCloseTransactionHandleUnknownVersionedTransition dbState.BufferedEvents = nil }, txFunc: func(ms historyi.MutableState) (*persistencespb.WorkflowExecutionInfo, error) { - completWorkflowTaskFn(ms) + completeWorkflowTaskFn(ms) mutation, _, err := ms.CloseTransactionAsMutation(historyi.TransactionPolicyPassive) if err != nil { @@ -3716,7 +3716,7 @@ func (s *mutableStateSuite) TestCloseTransactionHandleUnknownVersionedTransition dbState.BufferedEvents = nil }, txFunc: func(ms historyi.MutableState) (*persistencespb.WorkflowExecutionInfo, error) { - completWorkflowTaskFn(ms) + completeWorkflowTaskFn(ms) mutation, _, err := ms.CloseTransactionAsMutation(historyi.TransactionPolicyActive) if err != nil { @@ -3794,7 +3794,7 @@ func (s *mutableStateSuite) TestCloseTransactionHandleUnknownVersionedTransition dbState.BufferedEvents = nil }, txFunc: func(ms historyi.MutableState) (*persistencespb.WorkflowExecutionInfo, error) { - completWorkflowTaskFn(ms) + completeWorkflowTaskFn(ms) mutation, _, err := ms.CloseTransactionAsSnapshot(historyi.TransactionPolicyActive) if err != nil { @@ -4689,8 +4689,8 @@ func (s *mutableStateSuite) TestCloseTransactionTrackTombstones() { s.NoError(err) currentVersionedTransition := mutableState.CurrentVersionedTransition() - newVersionedTranstion := common.CloneProto(currentVersionedTransition) - newVersionedTranstion.TransitionCount += 1 + newVersionedTransition := common.CloneProto(currentVersionedTransition) + newVersionedTransition.TransitionCount += 1 _, err = mutableState.StartTransaction(s.namespaceEntry) s.NoError(err) @@ -4704,7 +4704,7 @@ func (s *mutableStateSuite) TestCloseTransactionTrackTombstones() { tombstoneBatches := mutableState.GetExecutionInfo().SubStateMachineTombstoneBatches s.Len(tombstoneBatches, 1) tombstoneBatch := tombstoneBatches[0] - protorequire.ProtoEqual(s.T(), newVersionedTranstion, tombstoneBatch.VersionedTransition) + protorequire.ProtoEqual(s.T(), newVersionedTransition, tombstoneBatch.VersionedTransition) s.True(tombstoneExists(tombstoneBatch.StateMachineTombstones, expectedTombstone)) }) } @@ -4737,8 +4737,8 @@ func (s *mutableStateSuite) TestCloseTransactionTrackTombstones_CapIfLargerThanL } currentVersionedTransition := mutableState.CurrentVersionedTransition() - newVersionedTranstion := common.CloneProto(currentVersionedTransition) - newVersionedTranstion.TransitionCount += 1 + newVersionedTransition := common.CloneProto(currentVersionedTransition) + newVersionedTransition.TransitionCount += 1 signalMap := mutableState.GetPendingSignalExternalInfos() for i := 0; i < s.mockConfig.MutableStateTombstoneCountLimit(); i++ { signalMap[int64(76+i)] = &persistencespb.SignalInfo{ @@ -4788,8 +4788,8 @@ func (s *mutableStateSuite) TestCloseTransactionTrackTombstones_OnlyTrackFirstEm } currentVersionedTransition := mutableState.CurrentVersionedTransition() - newVersionedTranstion := common.CloneProto(currentVersionedTransition) - newVersionedTranstion.TransitionCount += 1 + newVersionedTransition := common.CloneProto(currentVersionedTransition) + newVersionedTransition.TransitionCount += 1 _, err = mutableState.StartTransaction(s.namespaceEntry) s.NoError(err) @@ -5843,7 +5843,7 @@ func (s *mutableStateSuite) TestDeleteCHASMPureTasks() { expectedRemaining: 3, }, { - name: "paritial", + name: "partial", maxScheduledTime: now.Add(2 * time.Minute), expectedRemaining: 2, }, diff --git a/service/history/workflow/mutable_state_rebuilder.go b/service/history/workflow/mutable_state_rebuilder.go index 3ff6ee668d3..4d85a96d935 100644 --- a/service/history/workflow/mutable_state_rebuilder.go +++ b/service/history/workflow/mutable_state_rebuilder.go @@ -216,7 +216,7 @@ func (b *MutableStateRebuilderImpl) applyEvents( // since we do not use stickiness on the standby side // there shall be no workflowTask schedule to start timeout - // NOTE: at the beginning of the loop, stickyness is cleared + // NOTE: at the beginning of the loop, stickiness is cleared if err := taskGenerator.GenerateScheduleWorkflowTaskTasks( workflowTask.ScheduledEventID, ); err != nil { @@ -270,7 +270,7 @@ func (b *MutableStateRebuilderImpl) applyEvents( if workflowTask != nil { // since we do not use stickiness on the standby side // there shall be no workflowTask schedule to start timeout - // NOTE: at the beginning of the loop, stickyness is cleared + // NOTE: at the beginning of the loop, stickiness is cleared if err := taskGenerator.GenerateScheduleWorkflowTaskTasks( workflowTask.ScheduledEventID, ); err != nil { @@ -292,7 +292,7 @@ func (b *MutableStateRebuilderImpl) applyEvents( if workflowTask != nil { // since we do not use stickiness on the standby side // there shall be no workflowTask schedule to start timeout - // NOTE: at the beginning of the loop, stickyness is cleared + // NOTE: at the beginning of the loop, stickiness is cleared if err := taskGenerator.GenerateScheduleWorkflowTaskTasks( workflowTask.ScheduledEventID, ); err != nil { @@ -620,7 +620,7 @@ func (b *MutableStateRebuilderImpl) applyEvents( newRunID = continuedAsNewRunID } else if newRunID != continuedAsNewRunID { return nil, serviceerror.NewInternalf( - "ApplyEvents encounted newRunID mismatch for continuedAsNew event, task newRunID: %v, event newRunID: %v", + "ApplyEvents encountered newRunID mismatch for continuedAsNew event, task newRunID: %v, event newRunID: %v", newRunID, continuedAsNewRunID, ) diff --git a/service/history/workflow/state_transition_history_test.go b/service/history/workflow/state_transition_history_test.go index c50799a7bed..45b98413187 100644 --- a/service/history/workflow/state_transition_history_test.go +++ b/service/history/workflow/state_transition_history_test.go @@ -8,7 +8,7 @@ import ( "go.temporal.io/server/service/history/workflow" ) -func TestUpdatedTranstionHistory(t *testing.T) { +func TestUpdatedTransitionHistory(t *testing.T) { var hist []*persistencespb.VersionedTransition hist = workflow.UpdatedTransitionHistory(hist, 1) protorequire.ProtoSliceEqual(t, diff --git a/service/history/workflow/task_generator.go b/service/history/workflow/task_generator.go index 86956096815..6813f033290 100644 --- a/service/history/workflow/task_generator.go +++ b/service/history/workflow/task_generator.go @@ -876,11 +876,11 @@ func generateSubStateMachineTask( transitionCount int64, task hsm.Task, ) error { - ser, ok := stateMachineRegistry.TaskSerializer(task.Type()) + set, ok := stateMachineRegistry.TaskSerializer(task.Type()) if !ok { return serviceerror.NewInternalf("no task serializer for %v", task.Type()) } - data, err := ser.Serialize(task) + data, err := set.Serialize(task) if err != nil { return err } diff --git a/service/history/workflow/task_refresher.go b/service/history/workflow/task_refresher.go index 938ba10040e..4eaf431663c 100644 --- a/service/history/workflow/task_refresher.go +++ b/service/history/workflow/task_refresher.go @@ -591,7 +591,7 @@ func (r *TaskRefresherImpl) refreshTasksForSubStateMachines( // NOTE: Not all callers of TaskRefresher goes through the closeTransaction process. // If we were to regenerate tasks here by doing a state machine transition and return // a TransitionOutput, then, we need to - // 1. Call taskGenerator.GenerateDirtySubStateMachineTasks explictly to make sure + // 1. Call taskGenerator.GenerateDirtySubStateMachineTasks explicitly to make sure // tasks are added to mutable state. // 2. Make GenerateDirtySubStateMachineTasks idempotent, so that if the logic // does go through closeTransaction, no duplicate tasks are generated. diff --git a/service/history/workflow/update/registry_test.go b/service/history/workflow/update/registry_test.go index 8988e5a4124..e064a78c1bf 100644 --- a/service/history/workflow/update/registry_test.go +++ b/service/history/workflow/update/registry_test.go @@ -280,7 +280,7 @@ func TestFindOrCreate(t *testing.T) { limit += 1 _, existed, err = reg.FindOrCreate(context.Background(), tv2.UpdateID()) - require.NoError(t, err, "update #2 should have beeen created after limit increase") + require.NoError(t, err, "update #2 should have been created after limit increase") require.False(t, existed) require.Equal(t, 2, reg.Len()) }) diff --git a/service/history/workflow/update/update_test.go b/service/history/workflow/update/update_test.go index 89c441f1a2b..f0be2894441 100644 --- a/service/history/workflow/update/update_test.go +++ b/service/history/workflow/update/update_test.go @@ -529,14 +529,14 @@ func TestUpdateState(t *testing.T) { title: "transition to stateCompleted via stateAccepted in one WFT", apply: func() { // start waiters - accptCh := make(chan any, 1) + acceptCh := make(chan any, 1) go func() { status, err := upd.WaitLifecycleStage(context.Background(), UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ACCEPTED, 100*time.Millisecond) if err != nil { - accptCh <- err + acceptCh <- err return } - accptCh <- status.Outcome + acceptCh <- status.Outcome }() complCh := make(chan any, 1) go func() { @@ -565,9 +565,9 @@ func TestUpdateState(t *testing.T) { require.True(t, completed, "update state should now be completed") // ensure both waiter received completed response - accptWaiterRes := <-accptCh + acceptWaiterRes := <-acceptCh complWaiterRes := <-complCh - require.EqualExportedValues(t, successOutcome, accptWaiterRes) + require.EqualExportedValues(t, successOutcome, acceptWaiterRes) require.EqualExportedValues(t, successOutcome, complWaiterRes) // new waiter receives same response @@ -580,14 +580,14 @@ func TestUpdateState(t *testing.T) { title: "transition to stateAborted via stateAccepted in one WFT", apply: func() { // start waiters - accptCh := make(chan any, 1) + acceptCh := make(chan any, 1) go func() { status, err := upd.WaitLifecycleStage(context.Background(), UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ACCEPTED, 100*time.Millisecond) if err != nil { - accptCh <- err + acceptCh <- err return } - accptCh <- status.Outcome + acceptCh <- status.Outcome }() complCh := make(chan any, 1) go func() { @@ -615,9 +615,9 @@ func TestUpdateState(t *testing.T) { require.False(t, completed, "completed call back should not be called when aborted") // ensure both waiter received completed response - accptWaiterRes := <-accptCh + acceptWaiterRes := <-acceptCh complWaiterRes := <-complCh - require.EqualExportedValues(t, abortedOutcome, accptWaiterRes) + require.EqualExportedValues(t, abortedOutcome, acceptWaiterRes) require.EqualExportedValues(t, abortedOutcome, complWaiterRes) // new waiter receives same response diff --git a/service/matching/fair_backlog_manager.go b/service/matching/fair_backlog_manager.go index 86e9b9fd9dc..acb21d296ac 100644 --- a/service/matching/fair_backlog_manager.go +++ b/service/matching/fair_backlog_manager.go @@ -63,7 +63,7 @@ func newFairBacklogManager( isDraining bool, ) *fairBacklogManagerImpl { // For the purposes of taskQueueDB, call this just a TaskManager. It'll return errors if we - // use it incorectly. TODO(fairness): consider a cleaner way of doing this. + // use it incorrectly. TODO(fairness): consider a cleaner way of doing this. taskManager := persistence.TaskManager(fairTaskManager) bmg := &fairBacklogManagerImpl{ diff --git a/service/matching/matcher.go b/service/matching/matcher.go index 96a30b69cd3..55b449eb6a9 100644 --- a/service/matching/matcher.go +++ b/service/matching/matcher.go @@ -257,7 +257,7 @@ func (tm *TaskMatcher) OfferQuery(ctx context.Context, task *internalTask) (*mat return syncOfferTask(ctx, tm, task, tm.queryTaskC, tm.fwdr.ForwardQueryTask, true) } -// OfferNexusTask either matchs a task to a local poller or forwards it if no local pollers available. +// OfferNexusTask either matches a task to a local poller or forwards it if no local pollers available. // Local match is always attempted before forwarding. If local match occurs response and error are both nil, if // forwarding occurs then response or error is returned. func (tm *TaskMatcher) OfferNexusTask(ctx context.Context, task *internalTask) (*matchingservice.DispatchNexusTaskResponse, error) { diff --git a/service/matching/matcher_data.go b/service/matching/matcher_data.go index a5b7e33ccfc..1a644ee8de1 100644 --- a/service/matching/matcher_data.go +++ b/service/matching/matcher_data.go @@ -114,10 +114,10 @@ func (t *taskPQ) Less(i int, j int) bool { // TODO(pri): ready time is not task-specific yet, we only have whole-queue, so we don't // need to consider this here yet. // // ready time - // aready, bready := max(t.now, t.readyTimeForTask(a)), max(t.now, t.readyTimeForTask(b)) - // if aready < bready { + // already, bready := max(t.now, t.readyTimeForTask(a)), max(t.now, t.readyTimeForTask(b)) + // if already < bready { // return true - // } else if aready > bready { + // } else if already > bready { // return false // } diff --git a/service/matching/matcher_test.go b/service/matching/matcher_test.go index 5a73e0e3281..e275c4d1a2f 100644 --- a/service/matching/matcher_test.go +++ b/service/matching/matcher_test.go @@ -294,7 +294,7 @@ func (t *MatcherTestSuite) TestForwardingWhenBacklogIsYoung() { wg.Add(1) t.client.EXPECT().AddWorkflowTask(gomock.Any(), gomock.Any(), gomock.Any()).Do( func(arg0 context.Context, arg1 *matchingservice.AddWorkflowTaskRequest, arg2 ...interface{}) { - // Offer forwarding has occured + // Offer forwarding has occurred wg.Done() }, ).Return(&matchingservice.AddWorkflowTaskResponse{}, errMatchingHostThrottleTest) diff --git a/service/matching/matching_engine.go b/service/matching/matching_engine.go index c85dabf4eca..79266d5f1b2 100644 --- a/service/matching/matching_engine.go +++ b/service/matching/matching_engine.go @@ -1147,7 +1147,7 @@ func (e *matchingEngineImpl) DescribeTaskQueue( return nil, err } - // TODO bug fix: We cache the last response for each build ID. timeSinceLastFanOut is the last fan out time, that means some enteries in the cache can be more stale if + // TODO bug fix: We cache the last response for each build ID. timeSinceLastFanOut is the last fan out time, that means some entries in the cache can be more stale if // user is calling this API back-to-back but with different version selection. cacheKeyFunc := func(buildId string, taskQueueType enumspb.TaskQueueType) string { return fmt.Sprintf("dtq_enhanced:%s.%s", buildId, taskQueueType.String()) diff --git a/service/matching/physical_task_queue_manager.go b/service/matching/physical_task_queue_manager.go index d9240f01576..2174a3e5b21 100644 --- a/service/matching/physical_task_queue_manager.go +++ b/service/matching/physical_task_queue_manager.go @@ -858,7 +858,7 @@ func (c *physicalTaskQueueManagerImpl) getOrCreateTaskTracker( return tracker // tracker was created while we were waiting for the lock } - // Initalize all task trackers together; or the timeframes won't line up. + // Initialize all task trackers together; or the timeframes won't line up. c.tasksAdded[priorityKey] = newTaskTracker(c.partitionMgr.engine.timeSource) c.tasksDispatched[priorityKey] = newTaskTracker(c.partitionMgr.engine.timeSource) diff --git a/service/matching/pri_matcher.go b/service/matching/pri_matcher.go index 80f7cdf7204..f68d6dfb276 100644 --- a/service/matching/pri_matcher.go +++ b/service/matching/pri_matcher.go @@ -346,7 +346,7 @@ func (tm *priTaskMatcher) Offer(ctx context.Context, task *internalTask) (bool, if res.ctxErr != nil { return false, res.ctxErr } - if !softassert.That(tm.logger, res.poller != nil, "expeced poller from match") { + if !softassert.That(tm.logger, res.poller != nil, "expected poller from match") { return false, nil } @@ -412,7 +412,7 @@ func (tm *priTaskMatcher) OfferQuery(ctx context.Context, task *internalTask) (* return nil, err } -// OfferNexusTask either matchs a task to a local poller or forwards it if no local pollers available. +// OfferNexusTask either matches a task to a local poller or forwards it if no local pollers available. // Local match is always attempted before forwarding. If local match occurs response and error are both nil, if // forwarding occurs then response or error is returned. func (tm *priTaskMatcher) OfferNexusTask(ctx context.Context, task *internalTask) (*matchingservice.DispatchNexusTaskResponse, error) { diff --git a/service/matching/pri_task_reader.go b/service/matching/pri_task_reader.go index 8b669e528fc..46a3ddb9955 100644 --- a/service/matching/pri_task_reader.go +++ b/service/matching/pri_task_reader.go @@ -419,7 +419,7 @@ func (tr *priTaskReader) getLoadedTasks() int { func (tr *priTaskReader) ackTaskLocked(taskId int64) int64 { wasAlreadyAcked, found := tr.outstandingTasks.Get(taskId) - if !softassert.That(tr.logger, found, "completed task not found in oustandingTasks") { + if !softassert.That(tr.logger, found, "completed task not found in outstandingTasks") { return 0 } if !softassert.That(tr.logger, !wasAlreadyAcked.(bool), "completed task was already acked") { diff --git a/service/matching/ratelimit_manager.go b/service/matching/ratelimit_manager.go index 39199730f97..9b02f4a61f8 100644 --- a/service/matching/ratelimit_manager.go +++ b/service/matching/ratelimit_manager.go @@ -86,7 +86,7 @@ func newRateLimitManager(userDataManager userDataManager, r.mu.Lock() defer r.mu.Unlock() - // Overall system rate limit will be the min of the two configs that are partition wise times the number of partitons. + // Overall system rate limit will be the min of the two configs that are partition wise times the number of partitions. var cancel func() r.adminNsRate, cancel = config.AdminNamespaceToPartitionRateSub(r.setAdminNsRate) r.cancels = append(r.cancels, cancel) @@ -137,7 +137,7 @@ func (r *rateLimitManager) computeEffectiveRPSAndSourceLocked() { effectiveRPS = math.Inf(1) rateLimitSource enumspb.RateLimitSource ) - // Overall system rate limit will be the min of the two configs that are partition wise times the number of partions. + // Overall system rate limit will be the min of the two configs that are partition wise times the number of partitions. systemRPS := min( r.adminNsRate, r.adminTqRate, diff --git a/service/matching/task_queue_partition_manager.go b/service/matching/task_queue_partition_manager.go index 2f5e56fc587..e9b52472c3c 100644 --- a/service/matching/task_queue_partition_manager.go +++ b/service/matching/task_queue_partition_manager.go @@ -1167,7 +1167,7 @@ func (pm *taskQueuePartitionManagerImpl) getPhysicalQueuesForAdd( // unknownBuild flag is ignored because we don't have any special logic for it anymore. unknown build can // happen in two scenarios: // - task queue is switching to the new versioning API and the build ID is not registered in version sets. - // - task queue is still using the old API but a failover happened before verisoning data fully propagate. + // - task queue is still using the old API but a failover happened before versioning data fully propagate. // the second case is unlikely, and we do not support it anymore considering the old API is deprecated. // TODO: [cleanup-old-wv] _, err = checkVersionForStickyAdd(data, directive.GetAssignedBuildId()) @@ -1251,7 +1251,7 @@ func (pm *taskQueuePartitionManagerImpl) getVersionSetForAdd(directive *taskqueu if unknownBuild { // this could happen in two scenarios: // - task queue is switching to the new versioning API and the build ID is not registered in version sets. - // - task queue is still using the old API but a failover happened before verisoning data fully propagate. + // - task queue is still using the old API but a failover happened before versioning data fully propagate. // the second case is unlikely, and we do not support it anymore considering the old API is deprecated. return "", nil } diff --git a/service/matching/version_rule_helpers.go b/service/matching/version_rule_helpers.go index bf745158eed..dcc5fc8318e 100644 --- a/service/matching/version_rule_helpers.go +++ b/service/matching/version_rule_helpers.go @@ -44,7 +44,7 @@ var ( return serviceerror.NewFailedPreconditionf("update exceeds number of assignment rules permitted in namespace (%v/%v)", cnt, max) } // errRequireFullyRampedAssignmentRule is thrown if the task queue previously had a fully-ramped assignment rule and - // the requested operation would result in a list of assignment rules without a fully-ramped assigment rule, which + // the requested operation would result in a list of assignment rules without a fully-ramped assignment rule, which // effectively means that the task queue does not have a default version. This error is only thrown when moving from // a task queue with a fully-ramped assignment rule (aka versioned with a default Build ID / default version) to a // task queue without any fully-ramped assignment rules, because that operation would make the unversioned queue the diff --git a/service/worker/deletenamespace/activities_test.go b/service/worker/deletenamespace/activities_test.go index 0d62634f7a4..d0e7fb5ae22 100644 --- a/service/worker/deletenamespace/activities_test.go +++ b/service/worker/deletenamespace/activities_test.go @@ -59,7 +59,7 @@ func Test_ValidateNexusEndpointsActivity(t *testing.T) { nexusEndpointListDefaultPageSize: func() int { return 100 }, } - // The "fake" namespace ID is associated with a Nexus endoint. + // The "fake" namespace ID is associated with a Nexus endpoint. nexusEndpointManager.EXPECT().ListNexusEndpoints(gomock.Any(), gomock.Any()).Return(&persistence.ListNexusEndpointsResponse{ Entries: []*persistencespb.NexusEndpointEntry{ { diff --git a/service/worker/deployment/deployment_util.go b/service/worker/deployment/deployment_util.go index 9b54b93afdb..028712e83d1 100644 --- a/service/worker/deployment/deployment_util.go +++ b/service/worker/deployment/deployment_util.go @@ -37,12 +37,12 @@ const ( DeploymentMemoField = "DeploymentMemo" // for deployment wf DeploymentSeriesMemoField = "DeploymentSeriesMemo" // for deployment series wf - // Prefixes, Delimeters and Keys + // Prefixes, Delimiters and Keys DeploymentWorkflowIDPrefix = "temporal-sys-deployment" DeploymentSeriesWorkflowIDPrefix = "temporal-sys-deployment-series" - DeploymentWorkflowIDDelimeter = ":" + DeploymentWorkflowIDDelimiter = ":" DeploymentWorkflowIDEscape = "|" - DeploymentWorkflowIDInitialSize = (2 * len(DeploymentWorkflowIDDelimeter)) + len(DeploymentWorkflowIDPrefix) + DeploymentWorkflowIDInitialSize = (2 * len(DeploymentWorkflowIDDelimiter)) + len(DeploymentWorkflowIDPrefix) SeriesFieldName = "DeploymentSeries" BuildIDFieldName = "BuildID" @@ -71,7 +71,7 @@ func ValidateDeploymentWfParams(fieldName string, field string, maxIDLengthLimit return serviceerror.NewInvalidArgumentf("%v cannot be empty", fieldName) } - // Length of each field should be: (MaxIDLengthLimit - prefix and delimeter length) / 2 + // Length of each field should be: (MaxIDLengthLimit - prefix and delimiter length) / 2 if len(field) > (maxIDLengthLimit-DeploymentWorkflowIDInitialSize)/2 { return serviceerror.NewInvalidArgumentf("size of %v larger than the maximum allowed", fieldName) } @@ -79,18 +79,18 @@ func ValidateDeploymentWfParams(fieldName string, field string, maxIDLengthLimit return nil } -// EscapeChar is a helper which escapes the DeploymentWorkflowIDDelimeter character +// EscapeChar is a helper which escapes the DeploymentWorkflowIDDelimiter character // in the input string func escapeChar(s string) string { s = strings.Replace(s, DeploymentWorkflowIDEscape, DeploymentWorkflowIDEscape+DeploymentWorkflowIDEscape, -1) - s = strings.Replace(s, DeploymentWorkflowIDDelimeter, DeploymentWorkflowIDEscape+DeploymentWorkflowIDDelimeter, -1) + s = strings.ReplaceAll(s, DeploymentWorkflowIDDelimiter, DeploymentWorkflowIDEscape+DeploymentWorkflowIDDelimiter) return s } func GenerateDeploymentSeriesWorkflowID(deploymentSeriesName string) string { // escaping the reserved workflow delimiter (|) from the inputs, if present escapedSeriesName := escapeChar(deploymentSeriesName) - return DeploymentSeriesWorkflowIDPrefix + DeploymentWorkflowIDDelimeter + escapedSeriesName + return DeploymentSeriesWorkflowIDPrefix + DeploymentWorkflowIDDelimiter + escapedSeriesName } // GenerateDeploymentWorkflowID is a helper that generates a system accepted @@ -99,13 +99,13 @@ func GenerateDeploymentWorkflowID(seriesName string, buildID string) string { escapedSeriesName := escapeChar(seriesName) escapedBuildId := escapeChar(buildID) - return DeploymentWorkflowIDPrefix + DeploymentWorkflowIDDelimeter + escapedSeriesName + DeploymentWorkflowIDDelimeter + escapedBuildId + return DeploymentWorkflowIDPrefix + DeploymentWorkflowIDDelimiter + escapedSeriesName + DeploymentWorkflowIDDelimiter + escapedBuildId } func GenerateDeploymentWorkflowIDForPatternMatching(seriesName string) string { escapedSeriesName := escapeChar(seriesName) - return DeploymentWorkflowIDPrefix + DeploymentWorkflowIDDelimeter + escapedSeriesName + DeploymentWorkflowIDDelimeter + return DeploymentWorkflowIDPrefix + DeploymentWorkflowIDDelimiter + escapedSeriesName + DeploymentWorkflowIDDelimiter } // BuildQueryWithSeriesFilter is a helper which builds a query for pattern matching based on the diff --git a/service/worker/migration/activities.go b/service/worker/migration/activities.go index 3f92c9f520b..64f34411a9f 100644 --- a/service/worker/migration/activities.go +++ b/service/worker/migration/activities.go @@ -711,13 +711,13 @@ func (a *activities) checkSkipWorkflowExecution( func (a *activities) verifySingleReplicationTask( ctx context.Context, request *verifyReplicationTasksRequest, - remotAdminClient adminservice.AdminServiceClient, + remoteAdminClient adminservice.AdminServiceClient, ns *namespace.Namespace, we *commonpb.WorkflowExecution, ) (verifyResult, error) { s := time.Now() // Check if execution exists on remote cluster - mu, err := remotAdminClient.DescribeMutableState(ctx, &adminservice.DescribeMutableStateRequest{ + mu, err := remoteAdminClient.DescribeMutableState(ctx, &adminservice.DescribeMutableStateRequest{ Namespace: request.Namespace, Execution: we, SkipForceReload: true, @@ -726,7 +726,7 @@ func (a *activities) verifySingleReplicationTask( switch err.(type) { case nil: - result, err := a.workflowVerifier(ctx, request, remotAdminClient, a.adminClient, ns, we, mu) + result, err := a.workflowVerifier(ctx, request, remoteAdminClient, a.adminClient, ns, we, mu) if err == nil && result.status == verified { a.forceReplicationMetricsHandler.WithTags(metrics.NamespaceTag(request.Namespace)).Counter(metrics.VerifyReplicationTaskSuccess.Name()).Record(1) } @@ -757,7 +757,7 @@ func (a *activities) verifyReplicationTasks( ctx context.Context, request *verifyReplicationTasksRequest, details *replicationTasksHeartbeatDetails, - remotAdminClient adminservice.AdminServiceClient, + remoteAdminClient adminservice.AdminServiceClient, ns *namespace.Namespace, heartbeat func(details replicationTasksHeartbeatDetails), ) (bool, error) { @@ -777,7 +777,7 @@ func (a *activities) verifyReplicationTasks( for ; details.NextIndex < len(request.Executions); details.NextIndex++ { we := request.Executions[details.NextIndex] - r, err := a.verifySingleReplicationTask(ctx, request, remotAdminClient, ns, we) + r, err := a.verifySingleReplicationTask(ctx, request, remoteAdminClient, ns, we) if err != nil { return false, err } @@ -811,7 +811,7 @@ func (a *activities) VerifyReplicationTasks(ctx context.Context, request *verify activity.RecordHeartbeat(ctx, details) } - remotAdminClient, err := a.clientBean.GetRemoteAdminClient(request.TargetClusterName) + remoteAdminClient, err := a.clientBean.GetRemoteAdminClient(request.TargetClusterName) if err != nil { return response, err } @@ -837,7 +837,7 @@ func (a *activities) VerifyReplicationTasks(ctx context.Context, request *verify // Since replication has a lag, sleep first. time.Sleep(request.VerifyInterval) - verified, err := a.verifyReplicationTasks(ctx, request, &details, remotAdminClient, nsEntry, + verified, err := a.verifyReplicationTasks(ctx, request, &details, remoteAdminClient, nsEntry, func(d replicationTasksHeartbeatDetails) { activity.RecordHeartbeat(ctx, d) }) diff --git a/service/worker/migration/activities_test.go b/service/worker/migration/activities_test.go index 35f6d742e31..9cb934c7a8f 100644 --- a/service/worker/migration/activities_test.go +++ b/service/worker/migration/activities_test.go @@ -169,7 +169,7 @@ func (s *activitiesSuite) TestVerifyReplicationTasks_Success() { })).Return(&adminservice.DescribeMutableStateResponse{}, nil).Times(1) // Slowly replicated - replicationSlowReponses := []struct { + replicationSlowResponses := []struct { resp *adminservice.DescribeMutableStateResponse err error }{ @@ -178,7 +178,7 @@ func (s *activitiesSuite) TestVerifyReplicationTasks_Success() { {&adminservice.DescribeMutableStateResponse{}, nil}, } - for _, r := range replicationSlowReponses { + for _, r := range replicationSlowResponses { s.mockRemoteAdminClient.EXPECT().DescribeMutableState(gomock.Any(), protomock.Eq(&adminservice.DescribeMutableStateRequest{ Namespace: mockedNamespace, Execution: execution2, @@ -411,7 +411,7 @@ type mockHeartBeatRecorder struct { lastHeartBeat replicationTasksHeartbeatDetails } -func (m *mockHeartBeatRecorder) hearbeat(details replicationTasksHeartbeatDetails) { +func (m *mockHeartBeatRecorder) heartbeat(details replicationTasksHeartbeatDetails) { m.lastHeartBeat = details } @@ -481,7 +481,7 @@ func (s *activitiesSuite) Test_verifyReplicationTasks() { CheckPoint: checkPointTime, } - verified, err := s.a.verifyReplicationTasks(ctx, &request, &details, s.mockRemoteAdminClient, &testNamespace, recorder.hearbeat) + verified, err := s.a.verifyReplicationTasks(ctx, &request, &details, s.mockRemoteAdminClient, &testNamespace, recorder.heartbeat) if tc.expectedErr == nil { s.NoError(err) } @@ -523,7 +523,7 @@ func (s *activitiesSuite) Test_verifyReplicationTasksNoProgress() { } ctx := context.TODO() - verified, err := s.a.verifyReplicationTasks(ctx, &request, &details, s.mockRemoteAdminClient, &testNamespace, recorder.hearbeat) + verified, err := s.a.verifyReplicationTasks(ctx, &request, &details, s.mockRemoteAdminClient, &testNamespace, recorder.heartbeat) s.NoError(err) s.False(verified) // Verify has made progress. @@ -540,7 +540,7 @@ func (s *activitiesSuite) Test_verifyReplicationTasksNoProgress() { })).Return(nil, serviceerror.NewNotFound("")).Times(1) // All results should be either NotFound or cached and no progress should be made. - verified, err = s.a.verifyReplicationTasks(ctx, &request, &details, s.mockRemoteAdminClient, &testNamespace, recorder.hearbeat) + verified, err = s.a.verifyReplicationTasks(ctx, &request, &details, s.mockRemoteAdminClient, &testNamespace, recorder.heartbeat) s.NoError(err) s.False(verified) s.Equal(prevDetails, details) @@ -605,7 +605,7 @@ func (s *activitiesSuite) Test_verifyReplicationTasksSkipRetention() { details := replicationTasksHeartbeatDetails{} ctx := context.TODO() - verified, err := s.a.verifyReplicationTasks(ctx, &request, &details, s.mockRemoteAdminClient, ns, recorder.hearbeat) + verified, err := s.a.verifyReplicationTasks(ctx, &request, &details, s.mockRemoteAdminClient, ns, recorder.heartbeat) s.NoError(err) s.Equal(tc.verified, verified) s.Equal(recorder.lastHeartBeat, details) @@ -665,7 +665,7 @@ func (s *activitiesSuite) TestGenerateReplicationTasks_Failed() { s.Greater(len(iceptor.generateReplicationRecordedHeartbeats), 0) lastIdx := len(iceptor.generateReplicationRecordedHeartbeats) - 1 lastHeartBeat := iceptor.generateReplicationRecordedHeartbeats[lastIdx] - // Only the generation of 1st execution suceeded. + // Only the generation of 1st execution succeeded. s.Equal(0, lastHeartBeat) } diff --git a/service/worker/migration/force_replication_workflow_test.go b/service/worker/migration/force_replication_workflow_test.go index 7fef0a09cbd..f03fe8a30fd 100644 --- a/service/worker/migration/force_replication_workflow_test.go +++ b/service/worker/migration/force_replication_workflow_test.go @@ -479,7 +479,7 @@ func (s *ForceReplicationWorkflowTestSuite) TestVerifyReplicationTaskNonRetryabl }) var errMsg = "mock verify replication tasks error" - // GenerateReplicationTasks and VerifyReplicationTasks runs in paralle. GenerateReplicationTasks may not start before VerifyReplicationTasks failed. + // GenerateReplicationTasks and VerifyReplicationTasks runs in parallel. GenerateReplicationTasks may not start before VerifyReplicationTasks failed. env.OnActivity(a.GenerateReplicationTasks, mock.Anything, mock.Anything).Return(nil).Maybe() env.OnActivity(a.VerifyReplicationTasks, mock.Anything, mock.Anything).Return( verifyReplicationTasksResponse{}, diff --git a/service/worker/replicator/replication_message_processor.go b/service/worker/replicator/replication_message_processor.go index dc621c1611d..875ca12b35e 100644 --- a/service/worker/replicator/replication_message_processor.go +++ b/service/worker/replicator/replication_message_processor.go @@ -30,7 +30,7 @@ const ( pollIntervalSecs = 1 taskProcessorErrorRetryWait = time.Second taskProcessorErrorRetryBackoffCoefficient = 1 - taskProcessorErrorRetryMaxAttampts = 5 + taskProcessorErrorRetryMaxAttempts = 5 ) func newReplicationMessageProcessor( @@ -48,7 +48,7 @@ func newReplicationMessageProcessor( ) *replicationMessageProcessor { retryPolicy := backoff.NewExponentialRetryPolicy(taskProcessorErrorRetryWait). WithBackoffCoefficient(taskProcessorErrorRetryBackoffCoefficient). - WithMaximumAttempts(taskProcessorErrorRetryMaxAttampts) + WithMaximumAttempts(taskProcessorErrorRetryMaxAttempts) return &replicationMessageProcessor{ hostInfo: hostInfo, diff --git a/service/worker/scanner/build_ids/scavenger.go b/service/worker/scanner/build_ids/scavenger.go index 225672f93da..0195b115486 100644 --- a/service/worker/scanner/build_ids/scavenger.go +++ b/service/worker/scanner/build_ids/scavenger.go @@ -24,8 +24,8 @@ import ( ) const ( - BuildIdScavangerWorkflowName = "build-id-scavenger" - BuildIdScavangerActivityName = "scavenge-build-ids" + BuildIDScavengerWorkflowName = "build-id-scavenger" + BuildIDScavengerActivityName = "scavenge-build-ids" BuildIdScavengerWFID = "temporal-sys-build-id-scavenger" BuildIdScavengerTaskQueueName = "temporal-sys-build-id-scavenger-taskqueue-0" @@ -41,7 +41,7 @@ var ( ) type ( - BuildIdScavangerInput struct { + BuildIDScavengerInput struct { NamespaceListPageSize int TaskQueueListPageSize int IgnoreRetentionTime bool // If true, consider build ids added since retention time also @@ -96,18 +96,18 @@ func NewActivities( } } -// BuildIdScavangerWorkflow scans all task queue user data entries in all namespaces and cleans up unused build ids. +// BuildIDScavengerWorkflow scans all task queue user data entries in all namespaces and cleans up unused build ids. // This workflow is a wrapper around the long running ScavengeBuildIds activity. -func BuildIdScavangerWorkflow(ctx workflow.Context, input BuildIdScavangerInput) error { +func BuildIDScavengerWorkflow(ctx workflow.Context, input BuildIDScavengerInput) error { activityCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ // Give the activity enough time to scan the entire namespace StartToCloseTimeout: 6 * time.Hour, HeartbeatTimeout: 30 * time.Second, }) - return workflow.ExecuteActivity(activityCtx, BuildIdScavangerActivityName, input).Get(ctx, nil) + return workflow.ExecuteActivity(activityCtx, BuildIDScavengerActivityName, input).Get(ctx, nil) } -func (a *Activities) setDefaults(input *BuildIdScavangerInput) { +func (a *Activities) setDefaults(input *BuildIDScavengerInput) { if input.NamespaceListPageSize == 0 { input.NamespaceListPageSize = 100 } @@ -121,7 +121,7 @@ func (a *Activities) recordHeartbeat(ctx context.Context, heartbeat heartbeatDet } // ScavengeBuildIds scans all task queue user data entries in all namespaces and cleans up unused build ids. -func (a *Activities) ScavengeBuildIds(ctx context.Context, input BuildIdScavangerInput) error { +func (a *Activities) ScavengeBuildIds(ctx context.Context, input BuildIDScavengerInput) error { a.setDefaults(&input) var heartbeat heartbeatDetails @@ -161,7 +161,7 @@ func (a *Activities) ScavengeBuildIds(ctx context.Context, input BuildIdScavange func (a *Activities) processNamespaceEntry( ctx context.Context, rateLimiter quotas.RateLimiter, - input BuildIdScavangerInput, + input BuildIDScavengerInput, heartbeat *heartbeatDetails, nsId string, ) error { @@ -215,7 +215,7 @@ func (a *Activities) processNamespaceEntry( func (a *Activities) processUserDataEntry( ctx context.Context, rateLimiter quotas.RateLimiter, - input BuildIdScavangerInput, + input BuildIDScavengerInput, heartbeat heartbeatDetails, ns *namespace.Namespace, entry *persistence.TaskQueueUserDataEntry, @@ -244,7 +244,7 @@ func (a *Activities) processUserDataEntry( func (a *Activities) findBuildIdsToRemove( ctx context.Context, rateLimiter quotas.RateLimiter, - input BuildIdScavangerInput, + input BuildIDScavengerInput, heartbeat heartbeatDetails, ns *namespace.Namespace, entry *persistence.TaskQueueUserDataEntry, diff --git a/service/worker/scanner/build_ids/scavenger_test.go b/service/worker/scanner/build_ids/scavenger_test.go index ac0350851e9..8ff783d7c6f 100644 --- a/service/worker/scanner/build_ids/scavenger_test.go +++ b/service/worker/scanner/build_ids/scavenger_test.go @@ -46,7 +46,7 @@ func Test_findBuildIdsToRemove_AcceptsNilVersioningData(t *testing.T) { buildIdsRemoved, err := a.findBuildIdsToRemove( ctx, nil, - BuildIdScavangerInput{}, + BuildIDScavengerInput{}, heartbeatDetails{}, namespace.NewNamespaceForTest(nil, nil, false, nil, 0), &persistence.TaskQueueUserDataEntry{ @@ -67,17 +67,17 @@ func Test_findBuildIdsToRemove_FindsAllBuildIdsToRemove(t *testing.T) { env := testSuite.NewTestActivityEnvironment() ctrl := gomock.NewController(t) - visiblityManager := manager.NewMockVisibilityManager(ctrl) + visibilityManager := manager.NewMockVisibilityManager(ctrl) rateLimiter := quotas.NewMockRateLimiter(ctrl) a := &Activities{ logger: log.NewCLILogger(), - visibilityManager: visiblityManager, + visibilityManager: visibilityManager, removableBuildIdDurationSinceDefault: dynamicconfig.GetDurationPropertyFn(time.Hour), buildIdScavengerVisibilityRPS: dynamicconfig.GetFloatPropertyFn(1.0), } - visiblityManager.EXPECT().CountWorkflowExecutions(gomock.Any(), gomock.Any()).Times(4).DoAndReturn( + visibilityManager.EXPECT().CountWorkflowExecutions(gomock.Any(), gomock.Any()).Times(4).DoAndReturn( func(ctx context.Context, request *manager.CountWorkflowExecutionsRequest) (*manager.CountWorkflowExecutionsResponse, error) { count := 0 if strings.Contains(request.Query, fmt.Sprintf("'%s'", worker_versioning.VersionedBuildIdSearchAttribute("v3.0"))) { @@ -206,7 +206,7 @@ func Test_findBuildIdsToRemove_FindsAllBuildIdsToRemove(t *testing.T) { return a.findBuildIdsToRemove( ctx, rateLimiter, - BuildIdScavangerInput{}, + BuildIDScavengerInput{}, heartbeatDetails{}, ns, &persistence.TaskQueueUserDataEntry{ @@ -235,7 +235,7 @@ func Test_ScavengeBuildIds_Heartbeats(t *testing.T) { env.SetWorkerOptions(worker.Options{Interceptors: []interceptor.WorkerInterceptor{&iceptor}}) ctrl := gomock.NewController(t) - visiblityManager := manager.NewMockVisibilityManager(ctrl) + visibilityManager := manager.NewMockVisibilityManager(ctrl) rateLimiter := quotas.NewMockRateLimiter(ctrl) metadataManager := persistence.NewMockMetadataManager(ctrl) taskManager := persistence.NewMockTaskManager(ctrl) @@ -244,7 +244,7 @@ func Test_ScavengeBuildIds_Heartbeats(t *testing.T) { a := &Activities{ logger: log.NewCLILogger(), - visibilityManager: visiblityManager, + visibilityManager: visibilityManager, metadataManager: metadataManager, taskManager: taskManager, namespaceRegistry: namespaceRegistry, @@ -255,7 +255,7 @@ func Test_ScavengeBuildIds_Heartbeats(t *testing.T) { } rateLimiter.EXPECT().Wait(gomock.Any()).AnyTimes() - visiblityManager.EXPECT().CountWorkflowExecutions(gomock.Any(), gomock.Any()).AnyTimes().Return(&manager.CountWorkflowExecutionsResponse{ + visibilityManager.EXPECT().CountWorkflowExecutions(gomock.Any(), gomock.Any()).AnyTimes().Return(&manager.CountWorkflowExecutionsResponse{ Count: 0, }, nil) @@ -396,7 +396,7 @@ func Test_ScavengeBuildIds_Heartbeats(t *testing.T) { ) env.SetHeartbeatDetails(initialHeartbeat) env.RegisterActivity(a) - _, err := env.ExecuteActivity(a.ScavengeBuildIds, BuildIdScavangerInput{}) + _, err := env.ExecuteActivity(a.ScavengeBuildIds, BuildIDScavengerInput{}) require.NoError(t, err) require.Equal(t, []heartbeatDetails{ { diff --git a/service/worker/scanner/scanner.go b/service/worker/scanner/scanner.go index 3bd067a2251..018d9808592 100644 --- a/service/worker/scanner/scanner.go +++ b/service/worker/scanner/scanner.go @@ -181,7 +181,7 @@ func (s *Scanner) Start() error { if s.context.cfg.BuildIdScavengerEnabled() { s.wg.Add(1) - go s.startWorkflowWithRetry(ctx, build_ids.BuildIdScavengerWFStartOptions, build_ids.BuildIdScavangerWorkflowName) + go s.startWorkflowWithRetry(ctx, build_ids.BuildIdScavengerWFStartOptions, build_ids.BuildIDScavengerWorkflowName) buildIdsActivities := build_ids.NewActivities( s.context.logger, @@ -196,8 +196,8 @@ func (s *Scanner) Start() error { ) work := s.context.sdkClientFactory.NewWorker(s.context.sdkClientFactory.GetSystemClient(), build_ids.BuildIdScavengerTaskQueueName, workerOpts) - work.RegisterWorkflowWithOptions(build_ids.BuildIdScavangerWorkflow, workflow.RegisterOptions{Name: build_ids.BuildIdScavangerWorkflowName}) - work.RegisterActivityWithOptions(buildIdsActivities.ScavengeBuildIds, activity.RegisterOptions{Name: build_ids.BuildIdScavangerActivityName}) + work.RegisterWorkflowWithOptions(build_ids.BuildIDScavengerWorkflow, workflow.RegisterOptions{Name: build_ids.BuildIDScavengerWorkflowName}) + work.RegisterActivityWithOptions(buildIdsActivities.ScavengeBuildIds, activity.RegisterOptions{Name: build_ids.BuildIDScavengerActivityName}) // TODO: Nothing is gracefully stopping these workers or listening for fatal errors. if err := work.Start(); err != nil { diff --git a/service/worker/scanner/scanner_test.go b/service/worker/scanner/scanner_test.go index e89d4c03fab..b91a4887983 100644 --- a/service/worker/scanner/scanner_test.go +++ b/service/worker/scanner/scanner_test.go @@ -49,7 +49,7 @@ func (s *scannerTestSuite) TestScannerEnabled() { TaskQueueName: historyScannerTaskQueueName, } buildIdScavenger := expectedScanner{ - WFTypeName: build_ids.BuildIdScavangerWorkflowName, + WFTypeName: build_ids.BuildIDScavengerWorkflowName, TaskQueueName: build_ids.BuildIdScavengerTaskQueueName, } diff --git a/service/worker/scheduler/activities.go b/service/worker/scheduler/activities.go index 627d0c45522..58351b96619 100644 --- a/service/worker/scheduler/activities.go +++ b/service/worker/scheduler/activities.go @@ -53,12 +53,12 @@ const ( ) var ( - errTryAgain = errors.New("try again") - errWrongChain = errors.New("found running workflow with wrong FirstExecutionRunId") - errNoEvents = errors.New("GetEvents didn't return any events") - errNoAttrs = errors.New("last event did not have correct attrs") - errBlocked = errors.New("rate limiter doesn't allow any progress") - errUnkownWorkflowStatus = errors.New("unknown workflow status") + errTryAgain = errors.New("try again") + errWrongChain = errors.New("found running workflow with wrong FirstExecutionRunId") + errNoEvents = errors.New("GetEvents didn't return any events") + errNoAttrs = errors.New("last event did not have correct attrs") + errBlocked = errors.New("rate limiter doesn't allow any progress") + errUnknownWorkflowStatus = errors.New("unknown workflow status") ) func (e errFollow) Error() string { return string(e) } @@ -353,7 +353,7 @@ func (r responseBuilder) Build(event *historypb.HistoryEvent) (*schedulespb.Watc return r.makeResponse(nil, nil, event.EventTime), nil } } - return nil, errUnkownWorkflowStatus + return nil, errUnknownWorkflowStatus } func (r responseBuilder) isTooBig(m proto.Message) bool { diff --git a/service/worker/scheduler/responsebuilder_test.go b/service/worker/scheduler/responsebuilder_test.go index 3d239453f1d..97799aa1756 100644 --- a/service/worker/scheduler/responsebuilder_test.go +++ b/service/worker/scheduler/responsebuilder_test.go @@ -258,7 +258,7 @@ func TestResponseBuilder(t *testing.T) { response, err := rb.Build(&event) - assertError(t, err, errUnkownWorkflowStatus) + assertError(t, err, errUnknownWorkflowStatus) assertResponseIsNil(t, response) }) } diff --git a/service/worker/scheduler/workflow.go b/service/worker/scheduler/workflow.go index d1eeb28f481..202119761b4 100644 --- a/service/worker/scheduler/workflow.go +++ b/service/worker/scheduler/workflow.go @@ -1095,7 +1095,7 @@ func (s *scheduler) updateCustomSearchAttributes(searchAttributes *commonpb.Sear for key, valuePayload := range searchAttributes.GetIndexedFields() { var value any if err := payload.Decode(valuePayload, &value); err != nil { - s.logger.Error("error updating search attributes of the scheule", "error", err) + s.logger.Error("error updating search attributes of the schedule", "error", err) return } upsertMap[key] = value @@ -1125,7 +1125,7 @@ func (s *scheduler) updateCustomSearchAttributes(searchAttributes *commonpb.Sear } //nolint:staticcheck // SA1019 The untyped function here is more convenient. if err := workflow.UpsertSearchAttributes(s.ctx, upsertMap); err != nil { - s.logger.Error("error updating search attributes of the scheule", "error", err) + s.logger.Error("error updating search attributes of the schedule", "error", err) } } diff --git a/service/worker/workerdeployment/util.go b/service/worker/workerdeployment/util.go index 7f3528a2942..ea0b7aadfaf 100644 --- a/service/worker/workerdeployment/util.go +++ b/service/worker/workerdeployment/util.go @@ -53,10 +53,10 @@ const ( // Memos WorkerDeploymentMemoField = "WorkerDeploymentMemo" // for Worker Deployment wf - // Prefixes, Delimeters and Keys + // Prefixes, Delimiters and Keys WorkerDeploymentVersionWorkflowIDPrefix = "temporal-sys-worker-deployment-version" - WorkerDeploymentVersionWorkflowIDDelimeter = ":" - WorkerDeploymentVersionWorkflowIDInitialSize = len(WorkerDeploymentVersionWorkflowIDDelimeter) + len(WorkerDeploymentVersionWorkflowIDPrefix) + WorkerDeploymentVersionWorkflowIDDelimiter = ":" + WorkerDeploymentVersionWorkflowIDInitialSize = len(WorkerDeploymentVersionWorkflowIDDelimiter) + len(WorkerDeploymentVersionWorkflowIDPrefix) WorkerDeploymentNameFieldName = "WorkerDeploymentName" WorkerDeploymentBuildIDFieldName = "BuildID" @@ -123,7 +123,7 @@ func validateVersionWfParams(fieldName string, field string, maxIDLengthLimit in return serviceerror.NewInvalidArgumentf("%v cannot be empty", fieldName) } - // Length of each field should be: (MaxIDLengthLimit - (prefix + delimeter length)) / 2 + // Length of each field should be: (MaxIDLengthLimit - (prefix + delimiter length)) / 2 if len(field) > (maxIDLengthLimit-WorkerDeploymentVersionWorkflowIDInitialSize)/2 { return serviceerror.NewInvalidArgumentf("size of %v larger than the maximum allowed", fieldName) } diff --git a/temporal/fx.go b/temporal/fx.go index 383d38f1311..ad74f4295ae 100644 --- a/temporal/fx.go +++ b/temporal/fx.go @@ -953,7 +953,7 @@ var TraceExportModule = fx.Options( ) // ServiceTracingModule holds per-service (i.e. frontend/history/matching/worker) fx -// state. The following types can be overriden with fx.Replace/fx.Decorate: +// state. The following types can be overridden with fx.Replace/fx.Decorate: // // - []go.opentelemetry.io/otel/sdk/trace.BatchSpanProcessorOption // default: empty slice diff --git a/tests/advanced_visibility_test.go b/tests/advanced_visibility_test.go index 8b94f0ffed5..9f7a4570294 100644 --- a/tests/advanced_visibility_test.go +++ b/tests/advanced_visibility_test.go @@ -2447,7 +2447,7 @@ func (s *AdvancedVisibilitySuite) TestBuildIdScavenger_DeletesUnusedBuildId() { run, err := s.sysSDKClient.ExecuteWorkflow(ctx, sdkclient.StartWorkflowOptions{ ID: s.T().Name() + "-scavenger", TaskQueue: build_ids.BuildIdScavengerTaskQueueName, - }, build_ids.BuildIdScavangerWorkflowName, build_ids.BuildIdScavangerInput{ + }, build_ids.BuildIDScavengerWorkflowName, build_ids.BuildIDScavengerInput{ IgnoreRetentionTime: true, }) s.Require().NoError(err) diff --git a/tests/client_misc_test.go b/tests/client_misc_test.go index ad192a57730..77f7b92b51c 100644 --- a/tests/client_misc_test.go +++ b/tests/client_misc_test.go @@ -664,7 +664,7 @@ func (s *ClientMiscTestSuite) Test_FinishWorkflowWithDeferredCommands() { s.Worker().RegisterWorkflow(childWorkflowFn) s.Worker().RegisterActivity(activityFn) - id := "functional-test-finish-workflow-with-deffered-commands" + id := "functional-test-finish-workflow-with-deferred-commands" workflowOptions := sdkclient.StartWorkflowOptions{ ID: id, TaskQueue: s.TaskQueue(), @@ -1296,7 +1296,7 @@ func (s *ClientMiscTestSuite) TestBatchResetByBuildId() { // wait until we see three calls to badact s.Eventually(func() bool { return badcount.Load() >= 3 }, 10*time.Second, 200*time.Millisecond) - // at this point act2 should have been invokved once also + // at this point act2 should have been invoked once also s.Equal(int32(1), act2count.Load()) w2.Stop() diff --git a/tests/dlq_test.go b/tests/dlq_test.go index faed0dce691..e551dcdb3de 100644 --- a/tests/dlq_test.go +++ b/tests/dlq_test.go @@ -314,8 +314,8 @@ func (s *DLQSuite) TestMergeRealWorkflow() { } // Re-enqueue the workflow tasks from the DLQ, but don't fail its WFTs this time. - nonExistantID := "some-workflow-id-that-wont-exist" - s.failingWorkflowIDPrefix.Store(&nonExistantID) + nonExistentID := "some-workflow-id-that-wont-exist" + s.failingWorkflowIDPrefix.Store(&nonExistentID) token := s.mergeMessages(ctx, dlqMessageID) // Verify that the workflow task was deleted from the DLQ after merging. diff --git a/tests/nexus_workflow_test.go b/tests/nexus_workflow_test.go index 3bbe47aa20c..21603bf7c56 100644 --- a/tests/nexus_workflow_test.go +++ b/tests/nexus_workflow_test.go @@ -76,7 +76,7 @@ func (s *NexusWorkflowTestSuite) TestNexusOperationCancelation() { if !firstCancelSeen { // Fail cancel request once to test NexusOperationCancelRequestFailed event is recorded and request is retried. firstCancelSeen = true - return nexus.HandlerErrorf(nexus.HandlerErrorTypeBadRequest, "intentional non-retyrable cancel error for test") + return nexus.HandlerErrorf(nexus.HandlerErrorTypeBadRequest, "intentional non-retryable cancel error for test") } return nil }, diff --git a/tests/schedule_test.go b/tests/schedule_test.go index 8af601274e8..1b24c0df040 100644 --- a/tests/schedule_test.go +++ b/tests/schedule_test.go @@ -186,7 +186,7 @@ func (s *ScheduleFunctionalSuite) TestBasics() { // wait for visibility to stabilize on completed before calling describe, // otherwise their recent actions may flake and differ - visibilityResponse := s.getScheduleEntryFomVisibility(sid, func(ent *schedulepb.ScheduleListEntry) bool { + visibilityResponse := s.getScheduleEntryFromVisibility(sid, func(ent *schedulepb.ScheduleListEntry) bool { recentActions := ent.GetInfo().GetRecentActions() return len(recentActions) >= 2 && recentActions[1].GetStartWorkflowStatus() == enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED }) @@ -807,7 +807,7 @@ func (s *ScheduleFunctionalSuite) TestListBeforeRun() { s.NoError(err) s.cleanup(sid) - entry := s.getScheduleEntryFomVisibility(sid, nil) + entry := s.getScheduleEntryFromVisibility(sid, nil) s.NotNil(entry.Info) s.ProtoEqual(schedule.Spec, entry.Info.Spec) s.Equal(wt, entry.Info.WorkflowType.Name) @@ -920,7 +920,7 @@ func (s *ScheduleFunctionalSuite) TestListSchedulesReturnsWorkflowStatus() { s.cleanup(sid) // validate RecentActions made it to visibility - listResp := s.getScheduleEntryFomVisibility(sid, func(listResp *schedulepb.ScheduleListEntry) bool { + listResp := s.getScheduleEntryFromVisibility(sid, func(listResp *schedulepb.ScheduleListEntry) bool { return len(listResp.Info.RecentActions) >= 1 }) s.Equal(1, len(listResp.Info.RecentActions)) @@ -940,8 +940,8 @@ func (s *ScheduleFunctionalSuite) TestListSchedulesReturnsWorkflowStatus() { }) s.NoError(err) - // now wait for second recent action to land in visbility - listResp = s.getScheduleEntryFomVisibility(sid, func(listResp *schedulepb.ScheduleListEntry) bool { + // now wait for second recent action to land in visibility + listResp = s.getScheduleEntryFromVisibility(sid, func(listResp *schedulepb.ScheduleListEntry) bool { return len(listResp.Info.RecentActions) >= 2 }) @@ -1026,7 +1026,7 @@ func (s *ScheduleFunctionalSuite) TestLimitMemoSpecSize() { s.cleanup(sid) // Verify the memo field length limit was enforced. - entry := s.getScheduleEntryFomVisibility(sid, nil) + entry := s.getScheduleEntryFromVisibility(sid, nil) s.Require().NotNil(entry) spec := entry.GetInfo().GetSpec() s.Require().Equal(expectedLimit, len(spec.GetInterval())) @@ -1112,9 +1112,9 @@ func (s *ScheduleFunctionalSuite) TestNextTimeCache() { s.Equal(expectedRefills, nextTimeSideEffects) } -// getScheduleEntryFomVisibility polls visibility using ListSchedules until it finds a schedule +// getScheduleEntryFromVisibility polls visibility using ListSchedules until it finds a schedule // with the given id and for which the optional predicate function returns true. -func (s *ScheduleFunctionalSuite) getScheduleEntryFomVisibility(sid string, predicate func(*schedulepb.ScheduleListEntry) bool) *schedulepb.ScheduleListEntry { +func (s *ScheduleFunctionalSuite) getScheduleEntryFromVisibility(sid string, predicate func(*schedulepb.ScheduleListEntry) bool) *schedulepb.ScheduleListEntry { var slEntry *schedulepb.ScheduleListEntry s.Require().Eventually(func() bool { // wait for visibility listResp, err := s.FrontendClient().ListSchedules(testcore.NewContext(), &workflowservice.ListSchedulesRequest{ diff --git a/tests/signal_workflow_test.go b/tests/signal_workflow_test.go index b47f699a44a..3933ba110f4 100644 --- a/tests/signal_workflow_test.go +++ b/tests/signal_workflow_test.go @@ -332,7 +332,7 @@ func (s *SignalWorkflowTestSuite) TestSignalWorkflow_DuplicateRequest() { signalName := "my signal" signalInput := payloads.EncodeString("my signal input") requestID := uuid.New() - signalReqest := &workflowservice.SignalWorkflowExecutionRequest{ + signalRequest := &workflowservice.SignalWorkflowExecutionRequest{ Namespace: s.Namespace().String(), WorkflowExecution: &commonpb.WorkflowExecution{ WorkflowId: id, @@ -343,7 +343,7 @@ func (s *SignalWorkflowTestSuite) TestSignalWorkflow_DuplicateRequest() { Identity: identity, RequestId: requestID, } - _, err = s.FrontendClient().SignalWorkflowExecution(testcore.NewContext(), signalReqest) + _, err = s.FrontendClient().SignalWorkflowExecution(testcore.NewContext(), signalRequest) s.NoError(err) // Process signal in workflow @@ -359,7 +359,7 @@ func (s *SignalWorkflowTestSuite) TestSignalWorkflow_DuplicateRequest() { s.Equal(1, numOfSignaledEvent) // Send another signal with same request id - _, err = s.FrontendClient().SignalWorkflowExecution(testcore.NewContext(), signalReqest) + _, err = s.FrontendClient().SignalWorkflowExecution(testcore.NewContext(), signalRequest) s.NoError(err) // Process signal in workflow diff --git a/tests/task_queue_stats_test.go b/tests/task_queue_stats_test.go index 63d509b320e..cf240987e6a 100644 --- a/tests/task_queue_stats_test.go +++ b/tests/task_queue_stats_test.go @@ -356,7 +356,7 @@ func (s *TaskQueueStatsSuite) enqueueActivitiesForEachWorkflow(sets int, tqName TaskQueue: &taskqueuepb.TaskQueue{Name: tqName, Kind: enumspb.TASK_QUEUE_KIND_NORMAL}, StartToCloseTimeout: durationpb.New(time.Minute), RequestEagerExecution: false, - // Priority is inherted from the workflow + // Priority is inherited from the workflow }, }, }, diff --git a/tests/testcore/functional_test_base.go b/tests/testcore/functional_test_base.go index 4b38791e668..1cb4d19478f 100644 --- a/tests/testcore/functional_test_base.go +++ b/tests/testcore/functional_test_base.go @@ -415,14 +415,14 @@ func (s *FunctionalTestBase) TearDownCluster() { } } -// **IMPORTANT**: When overridding this, make sure to invoke `s.FunctionalTestBase.TearDownTest()`. +// TearDownTest tears down the test environment. **IMPORTANT**: When overriding this, make sure to invoke `s.FunctionalTestBase.TearDownTest()`. func (s *FunctionalTestBase) TearDownTest() { s.exportOTELTraces() s.tearDownSdk() s.testCluster.host.grpcClientInterceptor.Set(nil) } -// **IMPORTANT**: When overridding this, make sure to invoke `s.FunctionalTestBase.TearDownSubTest()`. +// TearDownSubTest tears down the sub-test environment. **IMPORTANT**: When overriding this, make sure to invoke `s.FunctionalTestBase.TearDownSubTest()`. func (s *FunctionalTestBase) TearDownSubTest() { s.exportOTELTraces() } diff --git a/tests/testcore/test_data_converter.go b/tests/testcore/test_data_converter.go index 22d60191a6e..5cbedbbbf75 100644 --- a/tests/testcore/test_data_converter.go +++ b/tests/testcore/test_data_converter.go @@ -21,7 +21,7 @@ type TestDataConverter struct { NumOfCallFromPayloads int } -// TODO (alex): use it by default SdkCleint everywhere? +// NewTestDataConverter creates a new test data converter. TODO (alex): use it by default SdkClient everywhere? func NewTestDataConverter() converter.DataConverter { return &TestDataConverter{} } diff --git a/tests/versioning_3_test.go b/tests/versioning_3_test.go index 5ddcfb0cbc2..5c973ab7363 100644 --- a/tests/versioning_3_test.go +++ b/tests/versioning_3_test.go @@ -120,7 +120,7 @@ func (s *Versioning3Suite) SetupSuite() { // Use new matcher for versioning tests. Ideally we would run everything with old and new, // but for now we pick a subset of tests. Versioning tests exercise the most features of - // matching so they're a good condidate. + // matching so they're a good candidate. dynamicconfig.MatchingUseNewMatcher.Key(): true, } s.FunctionalTestBase.SetupSuiteWithCluster(testcore.WithDynamicConfigOverrides(dynamicConfigOverrides)) diff --git a/tests/versioning_test.go b/tests/versioning_test.go index cb7003a2a2e..310f67f9e43 100644 --- a/tests/versioning_test.go +++ b/tests/versioning_test.go @@ -101,7 +101,7 @@ func (s *VersioningIntegSuite) SetupSuite() { // Use new matcher for versioning tests. Ideally we would run everything with old and new, // but for now we pick a subset of tests. Versioning tests exercise the most features of - // matching so they're a good condidate. + // matching so they're a good candidate. dynamicconfig.MatchingUseNewMatcher.Key(): true, } s.FunctionalTestBase.SetupSuiteWithCluster(testcore.WithDynamicConfigOverrides(dynamicConfigOverrides)) diff --git a/tests/worker_deployment_test.go b/tests/worker_deployment_test.go index f0f4b526740..fd8c95a08e7 100644 --- a/tests/worker_deployment_test.go +++ b/tests/worker_deployment_test.go @@ -1516,7 +1516,7 @@ func (s *WorkerDeploymentSuite) TestSetCurrentVersion_Batching() { setCurrentUpdateTime := timestamppb.Now() s.setCurrentVersion(ctx, tv, true, "") - // verify the current version has propogated to all the registered task-queues userData + // verify the current version has propagated to all the registered task-queues userData for i := 0; i < taskQueues; i++ { s.verifyTaskQueueVersioningInfo(ctx, tv.WithTaskQueueNumber(i).TaskQueue(), tv.DeploymentVersionString(), "", 0) } diff --git a/tests/workflow_failures_test.go b/tests/workflow_failures_test.go index ad808440b92..a29ea33129b 100644 --- a/tests/workflow_failures_test.go +++ b/tests/workflow_failures_test.go @@ -103,7 +103,7 @@ ListClosedLoop: s.NoError(err3) closedCount = len(resp.Executions) if closedCount == 0 { - s.Logger.Info("Closed WorkflowExecution is not yet visibile") + s.Logger.Info("Closed WorkflowExecution is not yet visible") time.Sleep(1000 * time.Millisecond) //nolint:forbidigo continue ListClosedLoop } diff --git a/tests/workflow_reset_test.go b/tests/workflow_reset_test.go index 3ec60ee153f..51e42ced22a 100644 --- a/tests/workflow_reset_test.go +++ b/tests/workflow_reset_test.go @@ -143,7 +143,7 @@ func (s *WorkflowResetSuite) TestDifferentBaseCurrentClosed() { s.assertMutableStateStatus(ctx, workflowID, currentRunID, enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED) } -// Base is reset multuple times. Assert that each time it point to the new run. +// Base is reset multiple times. Assert that each time it point to the new run. func (s *WorkflowResetSuite) TestRepeatedResets() { workflowID := "test-reset" + uuid.NewString() ctx := testcore.NewContext() diff --git a/tests/xdc/history_replication_signals_and_updates_test.go b/tests/xdc/history_replication_signals_and_updates_test.go index 0bc18c2aaeb..b3a14a1dcda 100644 --- a/tests/xdc/history_replication_signals_and_updates_test.go +++ b/tests/xdc/history_replication_signals_and_updates_test.go @@ -1167,7 +1167,7 @@ func (s *hrsuTestSuite) TestConflictResolutionGetResult() { t.cluster1.executeHistoryReplicationTasksUntil(enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED) s.EqualValues(t.cluster1.getHistory(ctx), t.cluster2.getHistory(ctx)) - // Complete the workflow in cluster2. This will cause the workflow result to be sent to cluste1. + // Complete the workflow in cluster2. This will cause the workflow result to be sent to cluster1. task, err := t.cluster2.testCluster.FrontendClient().PollWorkflowTaskQueue(ctx, &workflowservice.PollWorkflowTaskQueueRequest{ Namespace: t.tv.NamespaceName().String(), TaskQueue: t.tv.TaskQueue(), diff --git a/tests/xdc/nexus_state_replication_test.go b/tests/xdc/nexus_state_replication_test.go index 23171d4e67a..4f7cbd7cdf7 100644 --- a/tests/xdc/nexus_state_replication_test.go +++ b/tests/xdc/nexus_state_replication_test.go @@ -85,7 +85,7 @@ func (s *NexusStateReplicationSuite) TearDownSuite() { } // TestNexusOperationEventsReplicated tests that nexus related operation events and state updates are replicated -// across clusters and that the operation machinary functions as expected when failover happens. +// across clusters and that the operation machinery functions as expected when failover happens. // General outline: // 1. Start two clusters, cluster1 set to active, cluster2 set to standby. // 2. Start a workflow on cluster1. diff --git a/tests/xdc/user_data_replication_test.go b/tests/xdc/user_data_replication_test.go index 61f3fc659d0..a630d22e094 100644 --- a/tests/xdc/user_data_replication_test.go +++ b/tests/xdc/user_data_replication_test.go @@ -550,7 +550,7 @@ func (s *UserDataReplicationTestSuite) TestUserDataTombstonesAreReplicated() { ID: workflowID, TaskQueue: build_ids.BuildIdScavengerTaskQueueName, WorkflowRunTimeout: time.Second * 30, - }, build_ids.BuildIdScavangerWorkflowName, build_ids.BuildIdScavangerInput{ + }, build_ids.BuildIDScavengerWorkflowName, build_ids.BuildIDScavengerInput{ IgnoreRetentionTime: true, }) s.NoError(err) diff --git a/tools/cassandra/cqlclient.go b/tools/cassandra/cqlclient.go index aeb4040e447..36ca546a4a6 100644 --- a/tools/cassandra/cqlclient.go +++ b/tools/cassandra/cqlclient.go @@ -185,7 +185,7 @@ func (client *cqlClient) ReadSchemaVersion() (string, error) { return version, nil } -// UpdateShemaVersion updates the schema version for the Keyspace +// UpdateSchemaVersion updates the schema version for the Keyspace func (client *cqlClient) UpdateSchemaVersion(newVersion string, minCompatibleVersion string) error { query := client.session.Query(writeSchemaVersionCQL, client.keyspace, time.Now().UTC(), newVersion, minCompatibleVersion) return query.Exec() diff --git a/tools/tdbg/app.go b/tools/tdbg/app.go index 1b9c8efc4c3..966f866f63b 100644 --- a/tools/tdbg/app.go +++ b/tools/tdbg/app.go @@ -115,7 +115,7 @@ func NewCliApp(opts ...Option) *cli.App { case "never": color.NoColor = true default: - // fatih/color will inspect the enviroment and terminal and set a reasonable default. + // fatih/color will inspect the environment and terminal and set a reasonable default. } return nil } diff --git a/tools/tdbg/commands.go b/tools/tdbg/commands.go index 3715b2a0ab4..822bfb131a2 100644 --- a/tools/tdbg/commands.go +++ b/tools/tdbg/commands.go @@ -46,7 +46,7 @@ func AdminShowWorkflow(c *cli.Context, clientFactory ClientFactory) error { rid := c.String(FlagRunID) startEventId := c.Int64(FlagMinEventID) endEventId := c.Int64(FlagMaxEventID) - startEventVerion := int64(c.Int(FlagMinEventVersion)) + startEventVersion := int64(c.Int(FlagMinEventVersion)) endEventVersion := int64(c.Int(FlagMaxEventVersion)) outputFileName := c.String(FlagOutputFilename) @@ -73,7 +73,7 @@ func AdminShowWorkflow(c *cli.Context, clientFactory ClientFactory) error { }, StartEventId: startEventId, EndEventId: endEventId, - StartEventVersion: startEventVerion, + StartEventVersion: startEventVersion, EndEventVersion: endEventVersion, MaximumPageSize: 100, NextPageToken: token, diff --git a/tools/testrunner/testrunner.go b/tools/testrunner/testrunner.go index dc4421e8a21..621812a947c 100644 --- a/tools/testrunner/testrunner.go +++ b/tools/testrunner/testrunner.go @@ -233,7 +233,7 @@ func (r *runner) runTests(ctx context.Context, args []string) { log.Fatal(err) } - // If the run completely successfull, no need to retry. + // If the run completely successful, no need to retry. if currentAttempt.exitErr == nil { break }