Skip to content

Commit 79fa94e

Browse files
authored
Apply deprecations from Accelerate (#3714)
Apply deprecations
1 parent a06317a commit 79fa94e

File tree

19 files changed

+61
-51
lines changed

19 files changed

+61
-51
lines changed

examples/controlnet/train_controlnet.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -716,13 +716,14 @@ def collate_fn(examples):
716716
def main(args):
717717
logging_dir = Path(args.output_dir, args.logging_dir)
718718

719-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
719+
accelerator_project_config = ProjectConfiguration(
720+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
721+
)
720722

721723
accelerator = Accelerator(
722724
gradient_accumulation_steps=args.gradient_accumulation_steps,
723725
mixed_precision=args.mixed_precision,
724726
log_with=args.report_to,
725-
logging_dir=logging_dir,
726727
project_config=accelerator_project_config,
727728
)
728729

examples/custom_diffusion/train_custom_diffusion.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -637,13 +637,14 @@ def parse_args(input_args=None):
637637
def main(args):
638638
logging_dir = Path(args.output_dir, args.logging_dir)
639639

640-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
640+
accelerator_project_config = ProjectConfiguration(
641+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
642+
)
641643

642644
accelerator = Accelerator(
643645
gradient_accumulation_steps=args.gradient_accumulation_steps,
644646
mixed_precision=args.mixed_precision,
645647
log_with=args.report_to,
646-
logging_dir=logging_dir,
647648
project_config=accelerator_project_config,
648649
)
649650

examples/dreambooth/train_dreambooth.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -771,13 +771,14 @@ def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_atte
771771
def main(args):
772772
logging_dir = Path(args.output_dir, args.logging_dir)
773773

774-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
774+
accelerator_project_config = ProjectConfiguration(
775+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
776+
)
775777

776778
accelerator = Accelerator(
777779
gradient_accumulation_steps=args.gradient_accumulation_steps,
778780
mixed_precision=args.mixed_precision,
779781
log_with=args.report_to,
780-
logging_dir=logging_dir,
781782
project_config=accelerator_project_config,
782783
)
783784

examples/dreambooth/train_dreambooth_lora.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -653,13 +653,14 @@ def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_atte
653653
def main(args):
654654
logging_dir = Path(args.output_dir, args.logging_dir)
655655

656-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
656+
accelerator_project_config = ProjectConfiguration(
657+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
658+
)
657659

658660
accelerator = Accelerator(
659661
gradient_accumulation_steps=args.gradient_accumulation_steps,
660662
mixed_precision=args.mixed_precision,
661663
log_with=args.report_to,
662-
logging_dir=logging_dir,
663664
project_config=accelerator_project_config,
664665
)
665666

examples/instruct_pix2pix/train_instruct_pix2pix.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -387,12 +387,13 @@ def main():
387387
),
388388
)
389389
logging_dir = os.path.join(args.output_dir, args.logging_dir)
390-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
390+
accelerator_project_config = ProjectConfiguration(
391+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
392+
)
391393
accelerator = Accelerator(
392394
gradient_accumulation_steps=args.gradient_accumulation_steps,
393395
mixed_precision=args.mixed_precision,
394396
log_with=args.report_to,
395-
logging_dir=logging_dir,
396397
project_config=accelerator_project_config,
397398
)
398399

examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -405,13 +405,14 @@ def main():
405405
args = parse_args()
406406
logging_dir = Path(args.output_dir, args.logging_dir)
407407

408-
project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
408+
project_config = ProjectConfiguration(
409+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
410+
)
409411

410412
accelerator = Accelerator(
411413
gradient_accumulation_steps=args.gradient_accumulation_steps,
412414
mixed_precision=args.mixed_precision,
413415
log_with="tensorboard",
414-
logging_dir=logging_dir,
415416
project_config=project_config,
416417
)
417418

examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -404,13 +404,14 @@ def main():
404404
args = parse_args()
405405
logging_dir = Path(args.output_dir, args.logging_dir)
406406

407-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
407+
accelerator_project_config = ProjectConfiguration(
408+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
409+
)
408410

409411
accelerator = Accelerator(
410412
gradient_accumulation_steps=args.gradient_accumulation_steps,
411413
mixed_precision=args.mixed_precision,
412414
log_with="tensorboard",
413-
logging_dir=logging_dir,
414415
project_config=accelerator_project_config,
415416
)
416417

examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
import torch.utils.checkpoint
1414
from accelerate import Accelerator
1515
from accelerate.logging import get_logger
16-
from accelerate.utils import set_seed
16+
from accelerate.utils import ProjectConfiguration, set_seed
1717
from huggingface_hub import create_repo, upload_folder
1818

1919
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
@@ -363,12 +363,12 @@ def freeze_params(params):
363363
def main():
364364
args = parse_args()
365365
logging_dir = os.path.join(args.output_dir, args.logging_dir)
366-
366+
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
367367
accelerator = Accelerator(
368368
gradient_accumulation_steps=args.gradient_accumulation_steps,
369369
mixed_precision=args.mixed_precision,
370-
log_with="tensorboard",
371-
logging_dir=logging_dir,
370+
log_with=args.report_to,
371+
project_config=accelerator_project_config,
372372
)
373373

374374
# If passed along, set the training seed now.

examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
import torch.nn.functional as F
1313
import torch.utils.checkpoint
1414
from accelerate import Accelerator
15-
from accelerate.utils import set_seed
15+
from accelerate.utils import ProjectConfiguration, set_seed
1616
from huggingface_hub import HfFolder, Repository, whoami
1717
from neural_compressor.utils import logger
1818
from packaging import version
@@ -458,11 +458,13 @@ def main():
458458
args = parse_args()
459459
logging_dir = os.path.join(args.output_dir, args.logging_dir)
460460

461+
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
462+
461463
accelerator = Accelerator(
462464
gradient_accumulation_steps=args.gradient_accumulation_steps,
463465
mixed_precision=args.mixed_precision,
464466
log_with="tensorboard",
465-
logging_dir=logging_dir,
467+
project_config=accelerator_project_config,
466468
)
467469

468470
# If passed along, set the training seed now.

examples/research_projects/lora/train_text_to_image_lora.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -394,13 +394,14 @@ def main():
394394
args = parse_args()
395395
logging_dir = os.path.join(args.output_dir, args.logging_dir)
396396

397-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
397+
accelerator_project_config = ProjectConfiguration(
398+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
399+
)
398400

399401
accelerator = Accelerator(
400402
gradient_accumulation_steps=args.gradient_accumulation_steps,
401403
mixed_precision=args.mixed_precision,
402404
log_with=args.report_to,
403-
logging_dir=logging_dir,
404405
project_config=accelerator_project_config,
405406
)
406407
if args.report_to == "wandb":

examples/research_projects/mulit_token_textual_inversion/textual_inversion.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -549,14 +549,14 @@ def __getitem__(self, i):
549549
def main():
550550
args = parse_args()
551551
logging_dir = os.path.join(args.output_dir, args.logging_dir)
552-
553-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
552+
accelerator_project_config = ProjectConfiguration(
553+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
554+
)
554555

555556
accelerator = Accelerator(
556557
gradient_accumulation_steps=args.gradient_accumulation_steps,
557558
mixed_precision=args.mixed_precision,
558559
log_with=args.report_to,
559-
logging_dir=logging_dir,
560560
project_config=accelerator_project_config,
561561
)
562562

examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -464,14 +464,13 @@ def __getitem__(self, index):
464464

465465
def main(args):
466466
logging_dir = Path(args.output_dir, args.logging_dir)
467-
468-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
469-
467+
accelerator_project_config = ProjectConfiguration(
468+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
469+
)
470470
accelerator = Accelerator(
471471
gradient_accumulation_steps=args.gradient_accumulation_steps,
472472
mixed_precision=args.mixed_precision,
473473
log_with=args.report_to,
474-
logging_dir=logging_dir,
475474
project_config=accelerator_project_config,
476475
)
477476

examples/research_projects/onnxruntime/text_to_image/train_text_to_image.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -422,14 +422,14 @@ def main():
422422
),
423423
)
424424
logging_dir = os.path.join(args.output_dir, args.logging_dir)
425-
426-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
425+
accelerator_project_config = ProjectConfiguration(
426+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
427+
)
427428

428429
accelerator = Accelerator(
429430
gradient_accumulation_steps=args.gradient_accumulation_steps,
430431
mixed_precision=args.mixed_precision,
431432
log_with=args.report_to,
432-
logging_dir=logging_dir,
433433
project_config=accelerator_project_config,
434434
)
435435

examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -562,14 +562,14 @@ def __getitem__(self, i):
562562
def main():
563563
args = parse_args()
564564
logging_dir = os.path.join(args.output_dir, args.logging_dir)
565-
566-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
565+
accelerator_project_config = ProjectConfiguration(
566+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
567+
)
567568

568569
accelerator = Accelerator(
569570
gradient_accumulation_steps=args.gradient_accumulation_steps,
570571
mixed_precision=args.mixed_precision,
571572
log_with=args.report_to,
572-
logging_dir=logging_dir,
573573
project_config=accelerator_project_config,
574574
)
575575

examples/research_projects/onnxruntime/unconditional_image_generation/train_unconditional.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -289,14 +289,14 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token:
289289

290290
def main(args):
291291
logging_dir = os.path.join(args.output_dir, args.logging_dir)
292-
293-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
292+
accelerator_project_config = ProjectConfiguration(
293+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
294+
)
294295

295296
accelerator = Accelerator(
296297
gradient_accumulation_steps=args.gradient_accumulation_steps,
297298
mixed_precision=args.mixed_precision,
298-
log_with=args.logger,
299-
logging_dir=logging_dir,
299+
log_with=args.report_to,
300300
project_config=accelerator_project_config,
301301
)
302302

examples/text_to_image/train_text_to_image.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -427,13 +427,14 @@ def main():
427427
)
428428
logging_dir = os.path.join(args.output_dir, args.logging_dir)
429429

430-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
430+
accelerator_project_config = ProjectConfiguration(
431+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
432+
)
431433

432434
accelerator = Accelerator(
433435
gradient_accumulation_steps=args.gradient_accumulation_steps,
434436
mixed_precision=args.mixed_precision,
435437
log_with=args.report_to,
436-
logging_dir=logging_dir,
437438
project_config=accelerator_project_config,
438439
)
439440

examples/text_to_image/train_text_to_image_lora.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -366,15 +366,16 @@ def parse_args():
366366

367367
def main():
368368
args = parse_args()
369-
logging_dir = os.path.join(args.output_dir, args.logging_dir)
369+
logging_dir = Path(args.output_dir, args.logging_dir)
370370

371-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
371+
accelerator_project_config = ProjectConfiguration(
372+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
373+
)
372374

373375
accelerator = Accelerator(
374376
gradient_accumulation_steps=args.gradient_accumulation_steps,
375377
mixed_precision=args.mixed_precision,
376378
log_with=args.report_to,
377-
logging_dir=logging_dir,
378379
project_config=accelerator_project_config,
379380
)
380381
if args.report_to == "wandb":

examples/textual_inversion/textual_inversion.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -566,14 +566,13 @@ def __getitem__(self, i):
566566
def main():
567567
args = parse_args()
568568
logging_dir = os.path.join(args.output_dir, args.logging_dir)
569-
570-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
571-
569+
accelerator_project_config = ProjectConfiguration(
570+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
571+
)
572572
accelerator = Accelerator(
573573
gradient_accumulation_steps=args.gradient_accumulation_steps,
574574
mixed_precision=args.mixed_precision,
575575
log_with=args.report_to,
576-
logging_dir=logging_dir,
577576
project_config=accelerator_project_config,
578577
)
579578

examples/unconditional_image_generation/train_unconditional.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -287,14 +287,14 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token:
287287

288288
def main(args):
289289
logging_dir = os.path.join(args.output_dir, args.logging_dir)
290-
291-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
290+
accelerator_project_config = ProjectConfiguration(
291+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
292+
)
292293

293294
accelerator = Accelerator(
294295
gradient_accumulation_steps=args.gradient_accumulation_steps,
295296
mixed_precision=args.mixed_precision,
296297
log_with=args.logger,
297-
logging_dir=logging_dir,
298298
project_config=accelerator_project_config,
299299
)
300300

0 commit comments

Comments
 (0)