Skip to content
Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/openvino/stable-diffusion/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@ accelerate
diffusers
torch~=1.13
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there anything blocking for pytorch 2.0?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did not check it.

nncf @ git+https://github.com/openvinotoolkit/nncf.git
tomesd @ git+https://github.com/AlexKoff88/tomesd/tree/openvino
tomesd @ git+https://github.com/AlexKoff88/tomesd.git@openvino
16 changes: 7 additions & 9 deletions examples/openvino/stable-diffusion/train_text_to_image_qat.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None:
param.data.copy_(s_param.data)

def to(self, device=None, dtype=None) -> None:
r"""Move internal buffers of the ExponentialMovingAverage to `device`.
"""Move internal buffers of the ExponentialMovingAverage to `device`.

Args:
device: like `device` argument to `torch.Tensor.to`
Expand Down Expand Up @@ -313,7 +313,7 @@ def parse_args():
type=str,
default=None,
choices=["DDIM", "DDPM", "LMSDiscrete"],
help="The noise scheduler for the Diffusion pipiline used for training.",
help="The noise scheduler for the Diffusion pipeline used for training.",
)
parser.add_argument(
"--beta_start",
Expand All @@ -337,7 +337,7 @@ def parse_args():
"--noise_schedule_steps",
type=int,
default=1000,
help=("The noise scheduler max train timestemps"),
help="The noise scheduler max train timestamps",
)
parser.add_argument(
"--center_crop",
Expand Down Expand Up @@ -540,7 +540,7 @@ def parse_args():
type=str,
default="mean_min_max",
choices=["min_max", "mean_min_max", "threesigma"],
help="They way how to estimate activation quantization paramters at the initializatin step before QAT.",
help="They way how to estimate activation quantization parameters at the initialization step before QAT.",
)
parser.add_argument(
"--tune_quantizers_only",
Expand Down Expand Up @@ -775,7 +775,7 @@ def main():
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
logging_dir=logging_dir,
project_dir=logging_dir,
)

logging.basicConfig(
Expand Down Expand Up @@ -994,9 +994,7 @@ def collate_fn(examples):
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True

unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, optimizer, train_dataloader, lr_scheduler
)
optimizer, train_dataloader, lr_scheduler = accelerator.prepare(optimizer, train_dataloader, lr_scheduler)

weight_dtype = torch.float32
if args.mixed_precision == "fp16":
Expand Down Expand Up @@ -1120,7 +1118,7 @@ def collate_fn(examples):

accelerator.end_training()

# Export optimized pipline to OpenVINO
# Export optimized pipeline to OpenVINO
export_unet = compression_controller.strip(do_copy=False)
export_pipeline = StableDiffusionPipeline(
text_encoder=text_encoder,
Expand Down