Skip to content

Commit 79fa94e

Browse files
authored
Apply deprecations from Accelerate (#3714)
Apply deprecations
1 parent a06317a commit 79fa94e

File tree

19 files changed

+61
-51
lines changed

19 files changed

+61
-51
lines changed

examples/controlnet/train_controlnet.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -716,13 +716,14 @@ def collate_fn(examples):
716716
def main(args):
717717
logging_dir = Path(args.output_dir, args.logging_dir)
718718

719-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
719+
accelerator_project_config = ProjectConfiguration(
720+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
721+
)
720722

721723
accelerator = Accelerator(
722724
gradient_accumulation_steps=args.gradient_accumulation_steps,
723725
mixed_precision=args.mixed_precision,
724726
log_with=args.report_to,
725-
logging_dir=logging_dir,
726727
project_config=accelerator_project_config,
727728
)
728729

examples/custom_diffusion/train_custom_diffusion.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -637,13 +637,14 @@ def parse_args(input_args=None):
637637
def main(args):
638638
logging_dir = Path(args.output_dir, args.logging_dir)
639639

640-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
640+
accelerator_project_config = ProjectConfiguration(
641+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
642+
)
641643

642644
accelerator = Accelerator(
643645
gradient_accumulation_steps=args.gradient_accumulation_steps,
644646
mixed_precision=args.mixed_precision,
645647
log_with=args.report_to,
646-
logging_dir=logging_dir,
647648
project_config=accelerator_project_config,
648649
)
649650

examples/dreambooth/train_dreambooth.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -771,13 +771,14 @@ def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_atte
771771
def main(args):
772772
logging_dir = Path(args.output_dir, args.logging_dir)
773773

774-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
774+
accelerator_project_config = ProjectConfiguration(
775+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
776+
)
775777

776778
accelerator = Accelerator(
777779
gradient_accumulation_steps=args.gradient_accumulation_steps,
778780
mixed_precision=args.mixed_precision,
779781
log_with=args.report_to,
780-
logging_dir=logging_dir,
781782
project_config=accelerator_project_config,
782783
)
783784

examples/dreambooth/train_dreambooth_lora.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -653,13 +653,14 @@ def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_atte
653653
def main(args):
654654
logging_dir = Path(args.output_dir, args.logging_dir)
655655

656-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
656+
accelerator_project_config = ProjectConfiguration(
657+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
658+
)
657659

658660
accelerator = Accelerator(
659661
gradient_accumulation_steps=args.gradient_accumulation_steps,
660662
mixed_precision=args.mixed_precision,
661663
log_with=args.report_to,
662-
logging_dir=logging_dir,
663664
project_config=accelerator_project_config,
664665
)
665666

examples/instruct_pix2pix/train_instruct_pix2pix.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -387,12 +387,13 @@ def main():
387387
),
388388
)
389389
logging_dir = os.path.join(args.output_dir, args.logging_dir)
390-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
390+
accelerator_project_config = ProjectConfiguration(
391+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
392+
)
391393
accelerator = Accelerator(
392394
gradient_accumulation_steps=args.gradient_accumulation_steps,
393395
mixed_precision=args.mixed_precision,
394396
log_with=args.report_to,
395-
logging_dir=logging_dir,
396397
project_config=accelerator_project_config,
397398
)
398399

examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -405,13 +405,14 @@ def main():
405405
args = parse_args()
406406
logging_dir = Path(args.output_dir, args.logging_dir)
407407

408-
project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
408+
project_config = ProjectConfiguration(
409+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
410+
)
409411

410412
accelerator = Accelerator(
411413
gradient_accumulation_steps=args.gradient_accumulation_steps,
412414
mixed_precision=args.mixed_precision,
413415
log_with="tensorboard",
414-
logging_dir=logging_dir,
415416
project_config=project_config,
416417
)
417418

examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -404,13 +404,14 @@ def main():
404404
args = parse_args()
405405
logging_dir = Path(args.output_dir, args.logging_dir)
406406

407-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
407+
accelerator_project_config = ProjectConfiguration(
408+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
409+
)
408410

409411
accelerator = Accelerator(
410412
gradient_accumulation_steps=args.gradient_accumulation_steps,
411413
mixed_precision=args.mixed_precision,
412414
log_with="tensorboard",
413-
logging_dir=logging_dir,
414415
project_config=accelerator_project_config,
415416
)
416417

examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
import torch.utils.checkpoint
1414
from accelerate import Accelerator
1515
from accelerate.logging import get_logger
16-
from accelerate.utils import set_seed
16+
from accelerate.utils import ProjectConfiguration, set_seed
1717
from huggingface_hub import create_repo, upload_folder
1818

1919
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
@@ -363,12 +363,12 @@ def freeze_params(params):
363363
def main():
364364
args = parse_args()
365365
logging_dir = os.path.join(args.output_dir, args.logging_dir)
366-
366+
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
367367
accelerator = Accelerator(
368368
gradient_accumulation_steps=args.gradient_accumulation_steps,
369369
mixed_precision=args.mixed_precision,
370-
log_with="tensorboard",
371-
logging_dir=logging_dir,
370+
log_with=args.report_to,
371+
project_config=accelerator_project_config,
372372
)
373373

374374
# If passed along, set the training seed now.

examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
import torch.nn.functional as F
1313
import torch.utils.checkpoint
1414
from accelerate import Accelerator
15-
from accelerate.utils import set_seed
15+
from accelerate.utils import ProjectConfiguration, set_seed
1616
from huggingface_hub import HfFolder, Repository, whoami
1717
from neural_compressor.utils import logger
1818
from packaging import version
@@ -458,11 +458,13 @@ def main():
458458
args = parse_args()
459459
logging_dir = os.path.join(args.output_dir, args.logging_dir)
460460

461+
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
462+
461463
accelerator = Accelerator(
462464
gradient_accumulation_steps=args.gradient_accumulation_steps,
463465
mixed_precision=args.mixed_precision,
464466
log_with="tensorboard",
465-
logging_dir=logging_dir,
467+
project_config=accelerator_project_config,
466468
)
467469

468470
# If passed along, set the training seed now.

examples/research_projects/lora/train_text_to_image_lora.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -394,13 +394,14 @@ def main():
394394
args = parse_args()
395395
logging_dir = os.path.join(args.output_dir, args.logging_dir)
396396

397-
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
397+
accelerator_project_config = ProjectConfiguration(
398+
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
399+
)
398400

399401
accelerator = Accelerator(
400402
gradient_accumulation_steps=args.gradient_accumulation_steps,
401403
mixed_precision=args.mixed_precision,
402404
log_with=args.report_to,
403-
logging_dir=logging_dir,
404405
project_config=accelerator_project_config,
405406
)
406407
if args.report_to == "wandb":

0 commit comments

Comments
 (0)