Skip to content

Commit 7681dce

Browse files
authored
Introducing migrationProjectId and decoupling Spanner projectId (#802)
* Introducing migrationProjectId and decoupling Spanner projectId * Test file changes * Addressing comments and adding UI side changes * Fixing cleanup command * reverting whitespace changes * Not using project id from target profile * documentation changes * Addressing comments and correcting LaunchDataflowJob method
1 parent c5e5b2e commit 7681dce

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+337
-252
lines changed

cmd/cleanup.go

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,12 @@ import (
2020
"os"
2121
"path"
2222

23+
"github.com/GoogleCloudPlatform/spanner-migration-tool/common/utils"
2324
"github.com/GoogleCloudPlatform/spanner-migration-tool/logger"
2425
"github.com/GoogleCloudPlatform/spanner-migration-tool/profiles"
2526
"github.com/GoogleCloudPlatform/spanner-migration-tool/streaming"
2627
"github.com/google/subcommands"
28+
"go.uber.org/zap"
2729
)
2830

2931
// CleanupCmd is the command for cleaning up the migration resources during a minimal
@@ -108,7 +110,13 @@ func (cmd *CleanupCmd) Execute(ctx context.Context, f *flag.FlagSet, _ ...interf
108110
Pubsub: cmd.pubsub,
109111
Monitoring: cmd.monitoring,
110112
}
113+
getInfo := &utils.GetUtilInfoImpl{}
114+
migrationProjectId, err := getInfo.GetProject()
115+
if err != nil {
116+
logger.Log.Error("Could not get project id from gcloud environment. Inferring migration project id from target profile.", zap.Error(err))
117+
migrationProjectId = project
118+
}
111119
logger.Log.Info(fmt.Sprintf("Initiating job cleanup for jobId: %v \n", cmd.jobId))
112-
streaming.InitiateJobCleanup(ctx, cmd.jobId, dataShardIds, jobCleanupOptions, project, instance)
120+
streaming.InitiateJobCleanup(ctx, cmd.jobId, dataShardIds, jobCleanupOptions, migrationProjectId, project, instance)
113121
return subcommands.ExitSuccess
114122
}

cmd/data.go

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ type DataCmd struct {
4848
targetProfile string
4949
sessionJSON string
5050
filePrefix string // TODO: move filePrefix to global flags
51+
project string
5152
WriteLimit int64
5253
dryRun bool
5354
logLevel string
@@ -84,6 +85,7 @@ func (cmd *DataCmd) SetFlags(f *flag.FlagSet) {
8485
f.StringVar(&cmd.target, "target", "Spanner", "Specifies the target DB, defaults to Spanner (accepted values: `Spanner`)")
8586
f.StringVar(&cmd.targetProfile, "target-profile", "", "Flag for specifying connection profile for target database e.g., \"dialect=postgresql\"")
8687
f.StringVar(&cmd.filePrefix, "prefix", "", "File prefix for generated files")
88+
f.StringVar(&cmd.project, "project", "", "Flag spcifying default project id for all the generated resources for the migration")
8789
f.Int64Var(&cmd.WriteLimit, "write-limit", DefaultWritersLimit, "Write limit for writes to spanner")
8890
f.BoolVar(&cmd.dryRun, "dry-run", false, "Flag for generating DDL and schema conversion report without creating a spanner database")
8991
f.StringVar(&cmd.logLevel, "log-level", "DEBUG", "Configure the logging level for the command (INFO, DEBUG), defaults to DEBUG")
@@ -114,6 +116,14 @@ func (cmd *DataCmd) Execute(ctx context.Context, f *flag.FlagSet, _ ...interface
114116
err = fmt.Errorf("error while preparing prerequisites for migration: %v", err)
115117
return subcommands.ExitUsageError
116118
}
119+
if cmd.project == "" {
120+
getInfo := &utils.GetUtilInfoImpl{}
121+
cmd.project, err = getInfo.GetProject()
122+
if err != nil {
123+
logger.Log.Error("Could not get project id from gcloud environment or --project flag. Either pass the projectId in the --project flag or configure in gcloud CLI using gcloud config set", zap.Error(err))
124+
return subcommands.ExitUsageError
125+
}
126+
}
117127
var (
118128
bw *writer.BatchWriter
119129
banner string
@@ -149,7 +159,7 @@ func (cmd *DataCmd) Execute(ctx context.Context, f *flag.FlagSet, _ ...interface
149159
)
150160
if !cmd.dryRun {
151161
now := time.Now()
152-
bw, err = MigrateDatabase(ctx, targetProfile, sourceProfile, dbName, &ioHelper, cmd, conv, nil)
162+
bw, err = MigrateDatabase(ctx, cmd.project, targetProfile, sourceProfile, dbName, &ioHelper, cmd, conv, nil)
153163
if err != nil {
154164
err = fmt.Errorf("can't finish database migration for db %s: %v", dbName, err)
155165
return subcommands.ExitFailure
@@ -159,14 +169,14 @@ func (cmd *DataCmd) Execute(ctx context.Context, f *flag.FlagSet, _ ...interface
159169
conv.Audit.DryRun = true
160170
// If migration type is Minimal Downtime, validate if required resources can be generated
161171
if !conv.UI && sourceProfile.Driver == constants.MYSQL && sourceProfile.Ty == profiles.SourceProfileTypeConfig && sourceProfile.Config.ConfigType == constants.DATAFLOW_MIGRATION {
162-
err := ValidateResourceGenerationHelper(ctx, targetProfile.Conn.Sp.Project, targetProfile.Conn.Sp.Instance, sourceProfile, conv)
172+
err := ValidateResourceGenerationHelper(ctx, cmd.project, targetProfile.Conn.Sp.Project, targetProfile.Conn.Sp.Instance, sourceProfile, conv)
163173
if err != nil {
164174
return subcommands.ExitFailure
165175
}
166176
}
167177

168178
convImpl := &conversion.ConvImpl{}
169-
bw, err = convImpl.DataConv(ctx, sourceProfile, targetProfile, &ioHelper, nil, conv, true, cmd.WriteLimit, &conversion.DataFromSourceImpl{})
179+
bw, err = convImpl.DataConv(ctx, cmd.project, sourceProfile, targetProfile, &ioHelper, nil, conv, true, cmd.WriteLimit, &conversion.DataFromSourceImpl{})
170180

171181
if err != nil {
172182
err = fmt.Errorf("can't finish data conversion for db %s: %v", dbName, err)

cmd/schema.go

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ type SchemaCmd struct {
4141
target string
4242
targetProfile string
4343
filePrefix string // TODO: move filePrefix to global flags
44+
project string
4445
logLevel string
4546
dryRun bool
4647
validate bool
@@ -74,6 +75,7 @@ func (cmd *SchemaCmd) SetFlags(f *flag.FlagSet) {
7475
f.StringVar(&cmd.target, "target", "Spanner", "Specifies the target DB, defaults to Spanner (accepted values: `Spanner`)")
7576
f.StringVar(&cmd.targetProfile, "target-profile", "", "Flag for specifying connection profile for target database e.g., \"dialect=postgresql\"")
7677
f.StringVar(&cmd.filePrefix, "prefix", "", "File prefix for generated files")
78+
f.StringVar(&cmd.project, "project", "", "Flag spcifying default project id for all the generated resources for the migration")
7779
f.StringVar(&cmd.logLevel, "log-level", "DEBUG", "Configure the logging level for the command (INFO, DEBUG), defaults to DEBUG")
7880
f.BoolVar(&cmd.dryRun, "dry-run", false, "Flag for generating DDL and schema conversion report without creating a spanner database")
7981
f.BoolVar(&cmd.validate, "validate", false, "Flag for validating if all the required input parameters are present")
@@ -100,6 +102,14 @@ func (cmd *SchemaCmd) Execute(ctx context.Context, f *flag.FlagSet, _ ...interfa
100102
err = fmt.Errorf("error while preparing prerequisites for migration: %v", err)
101103
return subcommands.ExitUsageError
102104
}
105+
if cmd.project == "" {
106+
getInfo := &utils.GetUtilInfoImpl{}
107+
cmd.project, err = getInfo.GetProject()
108+
if err != nil {
109+
logger.Log.Error("Could not get project id from gcloud environment or --project flag. Either pass the projectId in the --project flag or configure in gcloud CLI using gcloud config set", zap.Error(err))
110+
return subcommands.ExitUsageError
111+
}
112+
}
103113

104114
if cmd.validate {
105115
return subcommands.ExitSuccess
@@ -113,7 +123,7 @@ func (cmd *SchemaCmd) Execute(ctx context.Context, f *flag.FlagSet, _ ...interfa
113123
schemaConversionStartTime := time.Now()
114124
var conv *internal.Conv
115125
convImpl := &conversion.ConvImpl{}
116-
conv, err = convImpl.SchemaConv(sourceProfile, targetProfile, &ioHelper, &conversion.SchemaFromSourceImpl{})
126+
conv, err = convImpl.SchemaConv(cmd.project, sourceProfile, targetProfile, &ioHelper, &conversion.SchemaFromSourceImpl{})
117127
if err != nil {
118128
return subcommands.ExitFailure
119129
}
@@ -127,7 +137,7 @@ func (cmd *SchemaCmd) Execute(ctx context.Context, f *flag.FlagSet, _ ...interfa
127137
conv.Audit.MigrationType = migration.MigrationData_SCHEMA_ONLY.Enum()
128138
conv.Audit.SkipMetricsPopulation = os.Getenv("SKIP_METRICS_POPULATION") == "true"
129139
if !cmd.dryRun {
130-
_, err = MigrateDatabase(ctx, targetProfile, sourceProfile, dbName, &ioHelper, cmd, conv, nil)
140+
_, err = MigrateDatabase(ctx, cmd.project, targetProfile, sourceProfile, dbName, &ioHelper, cmd, conv, nil)
131141
if err != nil {
132142
err = fmt.Errorf("can't finish database migration for db %s: %v", dbName, err)
133143
return subcommands.ExitFailure

cmd/schema_and_data.go

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ type SchemaAndDataCmd struct {
4444
targetProfile string
4545
SkipForeignKeys bool
4646
filePrefix string // TODO: move filePrefix to global flags
47+
project string
4748
WriteLimit int64
4849
dryRun bool
4950
logLevel string
@@ -79,6 +80,7 @@ func (cmd *SchemaAndDataCmd) SetFlags(f *flag.FlagSet) {
7980
f.StringVar(&cmd.targetProfile, "target-profile", "", "Flag for specifying connection profile for target database e.g., \"dialect=postgresql\"")
8081
f.BoolVar(&cmd.SkipForeignKeys, "skip-foreign-keys", false, "Skip creating foreign keys after data migration is complete (ddl statements for foreign keys can still be found in the downloaded schema.ddl.txt file and the same can be applied separately)")
8182
f.StringVar(&cmd.filePrefix, "prefix", "", "File prefix for generated files")
83+
f.StringVar(&cmd.project, "project", "", "Flag spcifying default project id for all the generated resources for the migration")
8284
f.Int64Var(&cmd.WriteLimit, "write-limit", DefaultWritersLimit, "Write limit for writes to spanner")
8385
f.BoolVar(&cmd.dryRun, "dry-run", false, "Flag for generating DDL and schema conversion report without creating a spanner database")
8486
f.StringVar(&cmd.logLevel, "log-level", "DEBUG", "Configure the logging level for the command (INFO, DEBUG), defaults to DEBUG")
@@ -106,6 +108,14 @@ func (cmd *SchemaAndDataCmd) Execute(ctx context.Context, f *flag.FlagSet, _ ...
106108
err = fmt.Errorf("error while preparing prerequisites for migration: %v", err)
107109
return subcommands.ExitUsageError
108110
}
111+
if cmd.project == "" {
112+
getInfo := &utils.GetUtilInfoImpl{}
113+
cmd.project, err = getInfo.GetProject()
114+
if err != nil {
115+
logger.Log.Error("Could not get project id from gcloud environment or --project flag. Either pass the projectId in the --project flag or configure in gcloud CLI using gcloud config set", zap.Error(err))
116+
return subcommands.ExitUsageError
117+
}
118+
}
109119
if cmd.validate {
110120
return subcommands.ExitSuccess
111121
}
@@ -123,7 +133,7 @@ func (cmd *SchemaAndDataCmd) Execute(ctx context.Context, f *flag.FlagSet, _ ...
123133
dbURI string
124134
)
125135
convImpl := &conversion.ConvImpl{}
126-
conv, err = convImpl.SchemaConv(sourceProfile, targetProfile, &ioHelper, &conversion.SchemaFromSourceImpl{})
136+
conv, err = convImpl.SchemaConv(cmd.project, sourceProfile, targetProfile, &ioHelper, &conversion.SchemaFromSourceImpl{})
127137
if err != nil {
128138
panic(err)
129139
}
@@ -141,7 +151,7 @@ func (cmd *SchemaAndDataCmd) Execute(ctx context.Context, f *flag.FlagSet, _ ...
141151
reportImpl := conversion.ReportImpl{}
142152
if !cmd.dryRun {
143153
reportImpl.GenerateReport(sourceProfile.Driver, nil, ioHelper.BytesRead, "", conv, cmd.filePrefix, dbName, ioHelper.Out)
144-
bw, err = MigrateDatabase(ctx, targetProfile, sourceProfile, dbName, &ioHelper, cmd, conv, nil)
154+
bw, err = MigrateDatabase(ctx, cmd.project, targetProfile, sourceProfile, dbName, &ioHelper, cmd, conv, nil)
145155
if err != nil {
146156
err = fmt.Errorf("can't finish database migration for db %s: %v", dbName, err)
147157
return subcommands.ExitFailure
@@ -156,14 +166,14 @@ func (cmd *SchemaAndDataCmd) Execute(ctx context.Context, f *flag.FlagSet, _ ...
156166
conv.Audit.SchemaConversionDuration = schemaCoversionEndTime.Sub(schemaConversionStartTime)
157167
// If migration type is Minimal Downtime, validate if required resources can be generated
158168
if !conv.UI && sourceProfile.Driver == constants.MYSQL && sourceProfile.Ty == profiles.SourceProfileTypeConfig && sourceProfile.Config.ConfigType == constants.DATAFLOW_MIGRATION {
159-
err := ValidateResourceGenerationHelper(ctx, targetProfile.Conn.Sp.Project, targetProfile.Conn.Sp.Instance, sourceProfile, conv)
169+
err := ValidateResourceGenerationHelper(ctx, cmd.project, targetProfile.Conn.Sp.Project, targetProfile.Conn.Sp.Instance, sourceProfile, conv)
160170
if err != nil {
161171
logger.Log.Error(err.Error())
162172
return subcommands.ExitFailure
163173
}
164174
}
165175

166-
bw, err = convImpl.DataConv(ctx, sourceProfile, targetProfile, &ioHelper, nil, conv, true, cmd.WriteLimit, &conversion.DataFromSourceImpl{})
176+
bw, err = convImpl.DataConv(ctx, cmd.project, sourceProfile, targetProfile, &ioHelper, nil, conv, true, cmd.WriteLimit, &conversion.DataFromSourceImpl{})
167177
if err != nil {
168178
err = fmt.Errorf("can't finish data conversion for db %s: %v", dbName, err)
169179
return subcommands.ExitFailure

cmd/utils.go

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ func PrepareMigrationPrerequisites(sourceProfileString, targetProfileString, sou
127127
}
128128

129129
// MigrateData creates database and populates data in it.
130-
func MigrateDatabase(ctx context.Context, targetProfile profiles.TargetProfile, sourceProfile profiles.SourceProfile, dbName string, ioHelper *utils.IOStreams, cmd interface{}, conv *internal.Conv, migrationError *error) (*writer.BatchWriter, error) {
130+
func MigrateDatabase(ctx context.Context, migrationProjectId string, targetProfile profiles.TargetProfile, sourceProfile profiles.SourceProfile, dbName string, ioHelper *utils.IOStreams, cmd interface{}, conv *internal.Conv, migrationError *error) (*writer.BatchWriter, error) {
131131
var (
132132
bw *writer.BatchWriter
133133
err error
@@ -148,9 +148,9 @@ func MigrateDatabase(ctx context.Context, targetProfile profiles.TargetProfile,
148148
case *SchemaCmd:
149149
err = migrateSchema(ctx, targetProfile, sourceProfile, ioHelper, conv, dbURI, adminClient)
150150
case *DataCmd:
151-
bw, err = migrateData(ctx, targetProfile, sourceProfile, ioHelper, conv, dbURI, adminClient, client, v)
151+
bw, err = migrateData(ctx, migrationProjectId, targetProfile, sourceProfile, ioHelper, conv, dbURI, adminClient, client, v)
152152
case *SchemaAndDataCmd:
153-
bw, err = migrateSchemaAndData(ctx, targetProfile, sourceProfile, ioHelper, conv, dbURI, adminClient, client, v)
153+
bw, err = migrateSchemaAndData(ctx, migrationProjectId, targetProfile, sourceProfile, ioHelper, conv, dbURI, adminClient, client, v)
154154
}
155155
if err != nil {
156156
err = fmt.Errorf("can't migrate database: %v", err)
@@ -176,7 +176,7 @@ func migrateSchema(ctx context.Context, targetProfile profiles.TargetProfile, so
176176
return nil
177177
}
178178

179-
func migrateData(ctx context.Context, targetProfile profiles.TargetProfile, sourceProfile profiles.SourceProfile,
179+
func migrateData(ctx context.Context, migrationProjectId string, targetProfile profiles.TargetProfile, sourceProfile profiles.SourceProfile,
180180
ioHelper *utils.IOStreams, conv *internal.Conv, dbURI string, adminClient *database.DatabaseAdminClient, client *sp.Client, cmd *DataCmd) (*writer.BatchWriter, error) {
181181
var (
182182
bw *writer.BatchWriter
@@ -193,14 +193,14 @@ func migrateData(ctx context.Context, targetProfile profiles.TargetProfile, sour
193193

194194
// If migration type is Minimal Downtime, validate if required resources can be generated
195195
if !conv.UI && sourceProfile.Driver == constants.MYSQL && sourceProfile.Ty == profiles.SourceProfileTypeConfig && sourceProfile.Config.ConfigType == constants.DATAFLOW_MIGRATION {
196-
err := ValidateResourceGenerationHelper(ctx, targetProfile.Conn.Sp.Project, targetProfile.Conn.Sp.Instance, sourceProfile, conv)
196+
err := ValidateResourceGenerationHelper(ctx, migrationProjectId, targetProfile.Conn.Sp.Project, targetProfile.Conn.Sp.Instance, sourceProfile, conv)
197197
if err != nil {
198198
return nil, err
199199
}
200200
}
201201

202202
c := &conversion.ConvImpl{}
203-
bw, err = c.DataConv(ctx, sourceProfile, targetProfile, ioHelper, client, conv, true, cmd.WriteLimit, &conversion.DataFromSourceImpl{})
203+
bw, err = c.DataConv(ctx, migrationProjectId, sourceProfile, targetProfile, ioHelper, client, conv, true, cmd.WriteLimit, &conversion.DataFromSourceImpl{})
204204

205205
if err != nil {
206206
err = fmt.Errorf("can't finish data conversion for db %s: %v", dbURI, err)
@@ -218,7 +218,7 @@ func migrateData(ctx context.Context, targetProfile profiles.TargetProfile, sour
218218
return bw, nil
219219
}
220220

221-
func migrateSchemaAndData(ctx context.Context, targetProfile profiles.TargetProfile, sourceProfile profiles.SourceProfile,
221+
func migrateSchemaAndData(ctx context.Context, migrationProjectId string, targetProfile profiles.TargetProfile, sourceProfile profiles.SourceProfile,
222222
ioHelper *utils.IOStreams, conv *internal.Conv, dbURI string, adminClient *database.DatabaseAdminClient, client *sp.Client, cmd *SchemaAndDataCmd) (*writer.BatchWriter, error) {
223223
spA := spanneraccessor.SpannerAccessorImpl{}
224224
adminClientImpl, err := spanneradmin.NewAdminClientImpl(ctx)
@@ -235,14 +235,14 @@ func migrateSchemaAndData(ctx context.Context, targetProfile profiles.TargetProf
235235

236236
// If migration type is Minimal Downtime, validate if required resources can be generated
237237
if !conv.UI && sourceProfile.Driver == constants.MYSQL && sourceProfile.Ty == profiles.SourceProfileTypeConfig && sourceProfile.Config.ConfigType == constants.DATAFLOW_MIGRATION {
238-
err := ValidateResourceGenerationHelper(ctx, targetProfile.Conn.Sp.Project, targetProfile.Conn.Sp.Instance, sourceProfile, conv)
238+
err := ValidateResourceGenerationHelper(ctx, migrationProjectId, targetProfile.Conn.Sp.Project, targetProfile.Conn.Sp.Instance, sourceProfile, conv)
239239
if err != nil {
240240
return nil, err
241241
}
242242
}
243243

244244
convImpl := &conversion.ConvImpl{}
245-
bw, err := convImpl.DataConv(ctx, sourceProfile, targetProfile, ioHelper, client, conv, true, cmd.WriteLimit, &conversion.DataFromSourceImpl{})
245+
bw, err := convImpl.DataConv(ctx, migrationProjectId, sourceProfile, targetProfile, ioHelper, client, conv, true, cmd.WriteLimit, &conversion.DataFromSourceImpl{})
246246

247247
if err != nil {
248248
err = fmt.Errorf("can't finish data conversion for db %s: %v", dbURI, err)
@@ -256,8 +256,8 @@ func migrateSchemaAndData(ctx context.Context, targetProfile profiles.TargetProf
256256
return bw, nil
257257
}
258258

259-
func ValidateResourceGenerationHelper(ctx context.Context, projectId string, instanceId string, sourceProfile profiles.SourceProfile, conv *internal.Conv) error {
260-
spClient, err:= spinstanceadmin.NewInstanceAdminClientImpl(ctx)
259+
func ValidateResourceGenerationHelper(ctx context.Context, migrationProjectId string, spannerProjectId string, instanceId string, sourceProfile profiles.SourceProfile, conv *internal.Conv) error {
260+
spClient, err := spinstanceadmin.NewInstanceAdminClientImpl(ctx)
261261
if err != nil {
262262
return err
263263
}
@@ -271,9 +271,9 @@ func ValidateResourceGenerationHelper(ctx context.Context, projectId string, ins
271271
}
272272
validateResource := conversion.NewValidateResourcesImpl(&spanneraccessor.SpannerAccessorImpl{}, spClient, &datastream_accessor.DatastreamAccessorImpl{},
273273
dsClient, &storageaccessor.StorageAccessorImpl{}, storageclient)
274-
err = validateResource.ValidateResourceGeneration(ctx, projectId, instanceId, sourceProfile, conv)
274+
err = validateResource.ValidateResourceGeneration(ctx, migrationProjectId, spannerProjectId, instanceId, sourceProfile, conv)
275275
if err != nil {
276276
return err
277277
}
278278
return nil
279-
}
279+
}

common/metrics/dashboard_components.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,11 +39,12 @@ var dashboardClient *dashboard.DashboardsClient
3939

4040
// MonitoringMetricsResources contains information required to create the monitoring dashboard
4141
type MonitoringMetricsResources struct {
42-
ProjectId string
42+
MigrationProjectId string
4343
DataflowJobId string
4444
DatastreamId string
4545
JobMetadataGcsBucket string
4646
PubsubSubscriptionId string
47+
SpannerProjectId string
4748
SpannerInstanceId string
4849
SpannerDatabaseId string
4950
ShardToShardResourcesMap map[string]internal.ShardResources
@@ -258,7 +259,7 @@ func createAggIndependentTopMetrics(resourceIds MonitoringMetricsResources) []*d
258259
func createAggIndependentBottomMetrics(resourceIds MonitoringMetricsResources) []*dashboardpb.MosaicLayout_Tile {
259260
shardToDashboardMappingText := ""
260261
for shardId, shardResource := range resourceIds.ShardToShardResourcesMap {
261-
shardUrl := fmt.Sprintf("https://console.cloud.google.com/monitoring/dashboards/builder/%v?project=%v", shardResource.MonitoringResources.DashboardName, resourceIds.ProjectId)
262+
shardUrl := fmt.Sprintf("https://console.cloud.google.com/monitoring/dashboards/builder/%v?project=%v", shardResource.MonitoringResources.DashboardName, resourceIds.MigrationProjectId)
262263
shardString := fmt.Sprintf("Shard [%s](%s)", shardId, shardUrl)
263264
if shardToDashboardMappingText == "" {
264265
shardToDashboardMappingText = shardString
@@ -399,7 +400,7 @@ func getCreateMonitoringDashboardRequest(
399400
Layout: &layout,
400401
}
401402
req := &dashboardpb.CreateDashboardRequest{
402-
Parent: "projects/" + resourceIds.ProjectId,
403+
Parent: "projects/" + resourceIds.MigrationProjectId,
403404
Dashboard: &db,
404405
}
405406
return req

0 commit comments

Comments
 (0)