Skip to content

Commit 80e34cd

Browse files
chore: ruff
1 parent 104d565 commit 80e34cd

File tree

7 files changed

+36
-36
lines changed

7 files changed

+36
-36
lines changed

invokeai/app/invocations/baseinvocation.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -587,9 +587,9 @@ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]:
587587
for field_name, field_info in cls.model_fields.items():
588588
annotation = field_info.annotation
589589
assert annotation is not None, f"{field_name} on invocation {invocation_type} has no type annotation."
590-
assert isinstance(
591-
field_info.json_schema_extra, dict
592-
), f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?"
590+
assert isinstance(field_info.json_schema_extra, dict), (
591+
f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?"
592+
)
593593

594594
original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
595595

@@ -712,9 +712,9 @@ def wrapper(cls: Type[TBaseInvocationOutput]) -> Type[TBaseInvocationOutput]:
712712
for field_name, field_info in cls.model_fields.items():
713713
annotation = field_info.annotation
714714
assert annotation is not None, f"{field_name} on invocation output {output_type} has no type annotation."
715-
assert isinstance(
716-
field_info.json_schema_extra, dict
717-
), f"{field_name} on invocation output {output_type} has a non-dict json_schema_extra, did you forget to use InputField?"
715+
assert isinstance(field_info.json_schema_extra, dict), (
716+
f"{field_name} on invocation output {output_type} has a non-dict json_schema_extra, did you forget to use InputField?"
717+
)
718718

719719
cls._original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
720720

invokeai/app/invocations/segment_anything.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -184,9 +184,9 @@ def _filter_masks(
184184
# Find the largest mask.
185185
return [max(masks, key=lambda x: float(x.sum()))]
186186
elif self.mask_filter == "highest_box_score":
187-
assert (
188-
bounding_boxes is not None
189-
), "Bounding boxes must be provided to use the 'highest_box_score' mask filter."
187+
assert bounding_boxes is not None, (
188+
"Bounding boxes must be provided to use the 'highest_box_score' mask filter."
189+
)
190190
assert len(masks) == len(bounding_boxes)
191191
# Find the index of the bounding box with the highest score.
192192
# Note that we fallback to -1.0 if the score is None. This is mainly to satisfy the type checker. In most

invokeai/app/services/config/config_default.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -482,9 +482,9 @@ def load_and_migrate_config(config_path: Path) -> InvokeAIAppConfig:
482482
try:
483483
# Meta is not included in the model fields, so we need to validate it separately
484484
config = InvokeAIAppConfig.model_validate(loaded_config_dict)
485-
assert (
486-
config.schema_version == CONFIG_SCHEMA_VERSION
487-
), f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}"
485+
assert config.schema_version == CONFIG_SCHEMA_VERSION, (
486+
f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}"
487+
)
488488
return config
489489
except Exception as e:
490490
raise RuntimeError(f"Failed to load config file {config_path}: {e}") from e

invokeai/app/services/workflow_records/workflow_records_sqlite.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -379,13 +379,13 @@ def _sync_default_workflows(self) -> None:
379379
bytes_ = path.read_bytes()
380380
workflow_from_file = WorkflowValidator.validate_json(bytes_)
381381

382-
assert workflow_from_file.id.startswith(
383-
"default_"
384-
), f'Invalid default workflow ID (must start with "default_"): {workflow_from_file.id}'
382+
assert workflow_from_file.id.startswith("default_"), (
383+
f'Invalid default workflow ID (must start with "default_"): {workflow_from_file.id}'
384+
)
385385

386-
assert (
387-
workflow_from_file.meta.category is WorkflowCategory.Default
388-
), f"Invalid default workflow category: {workflow_from_file.meta.category}"
386+
assert workflow_from_file.meta.category is WorkflowCategory.Default, (
387+
f"Invalid default workflow category: {workflow_from_file.meta.category}"
388+
)
389389

390390
workflows_from_file.append(workflow_from_file)
391391

invokeai/backend/model_manager/merge.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -115,19 +115,19 @@ def merge_diffusion_models_and_save(
115115
base_models: Set[BaseModelType] = set()
116116
variant = None if self._installer.app_config.precision == "float32" else "fp16"
117117

118-
assert (
119-
len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference
120-
), "When merging three models, only the 'add_difference' merge method is supported"
118+
assert len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference, (
119+
"When merging three models, only the 'add_difference' merge method is supported"
120+
)
121121

122122
for key in model_keys:
123123
info = store.get_model(key)
124124
model_names.append(info.name)
125-
assert isinstance(
126-
info, MainDiffusersConfig
127-
), f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging"
128-
assert info.variant == ModelVariantType(
129-
"normal"
130-
), f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged"
125+
assert isinstance(info, MainDiffusersConfig), (
126+
f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging"
127+
)
128+
assert info.variant == ModelVariantType("normal"), (
129+
f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged"
130+
)
131131

132132
# tally base models used
133133
base_models.add(info.base)

tests/app/services/download/test_download_queue.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -211,12 +211,12 @@ def event_handler(job: DownloadJob | MultiFileDownloadJob, excp: Optional[Except
211211
assert job.bytes > 0, "expected download bytes to be positive"
212212
assert job.bytes == job.total_bytes, "expected download bytes to equal total bytes"
213213
assert job.download_path == tmp_path / "sdxl-turbo"
214-
assert Path(
215-
tmp_path, "sdxl-turbo/model_index.json"
216-
).exists(), f"expected {tmp_path}/sdxl-turbo/model_inded.json to exist"
217-
assert Path(
218-
tmp_path, "sdxl-turbo/text_encoder/config.json"
219-
).exists(), f"expected {tmp_path}/sdxl-turbo/text_encoder/config.json to exist"
214+
assert Path(tmp_path, "sdxl-turbo/model_index.json").exists(), (
215+
f"expected {tmp_path}/sdxl-turbo/model_inded.json to exist"
216+
)
217+
assert Path(tmp_path, "sdxl-turbo/text_encoder/config.json").exists(), (
218+
f"expected {tmp_path}/sdxl-turbo/text_encoder/config.json to exist"
219+
)
220220

221221
assert events == {DownloadJobStatus.RUNNING, DownloadJobStatus.COMPLETED}
222222
queue.stop()

tests/backend/patches/lora_conversions/test_flux_aitoolkit_lora_conversion_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,9 +48,9 @@ def test_flux_aitoolkit_transformer_state_dict_is_in_invoke_format():
4848
model_keys = set(model.state_dict().keys())
4949

5050
for converted_key_prefix in converted_key_prefixes:
51-
assert any(
52-
model_key.startswith(converted_key_prefix) for model_key in model_keys
53-
), f"'{converted_key_prefix}' did not match any model keys."
51+
assert any(model_key.startswith(converted_key_prefix) for model_key in model_keys), (
52+
f"'{converted_key_prefix}' did not match any model keys."
53+
)
5454

5555

5656
def test_lora_model_from_flux_aitoolkit_state_dict():

0 commit comments

Comments
 (0)