Skip to content

Commit 6fde5a6

Browse files
[Tests] Fix some slow tests (#3989)
fix some slow tests
1 parent d1d0b8a commit 6fde5a6

File tree

4 files changed

+16
-5
lines changed

4 files changed

+16
-5
lines changed

src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ def enable_sequential_cpu_offload(self, gpu_id=0):
183183
self.to("cpu", silence_dtype_warnings=True)
184184
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
185185

186-
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
186+
for cpu_offloaded_model in [self.unet, self.text_encoder, self.text_encoder_2, self.vae]:
187187
cpu_offload(cpu_offloaded_model, device)
188188

189189
def enable_model_cpu_offload(self, gpu_id=0):

src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ def enable_sequential_cpu_offload(self, gpu_id=0):
191191
self.to("cpu", silence_dtype_warnings=True)
192192
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
193193

194-
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
194+
for cpu_offloaded_model in [self.unet, self.text_encoder, self.text_encoder_2, self.vae]:
195195
cpu_offload(cpu_offloaded_model, device)
196196

197197
def enable_model_cpu_offload(self, gpu_id=0):

tests/pipelines/test_pipelines_common.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -699,12 +699,16 @@ def _test_xformers_attention_forwardGenerator_pass(
699699

700700
inputs = self.get_dummy_inputs(torch_device)
701701
output_without_offload = pipe(**inputs)[0]
702-
output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload
702+
output_without_offload = (
703+
output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload
704+
)
703705

704706
pipe.enable_xformers_memory_efficient_attention()
705707
inputs = self.get_dummy_inputs(torch_device)
706708
output_with_offload = pipe(**inputs)[0]
707-
output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload
709+
output_with_offload = (
710+
output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload
711+
)
708712

709713
if test_max_difference:
710714
max_diff = np.abs(output_with_offload - output_without_offload).max()

tests/pipelines/text_to_video/test_text_to_video.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
TextToVideoSDPipeline,
2727
UNet3DConditionModel,
2828
)
29-
from diffusers.utils import load_numpy, skip_mps, slow
29+
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
3030
from diffusers.utils.testing_utils import enable_full_determinism
3131

3232
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
@@ -143,6 +143,13 @@ def test_text_to_video_default_case(self):
143143
def test_attention_slicing_forward_pass(self):
144144
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False, expected_max_diff=3e-3)
145145

146+
@unittest.skipIf(
147+
torch_device != "cuda" or not is_xformers_available(),
148+
reason="XFormers attention is only available with CUDA and `xformers` installed",
149+
)
150+
def test_xformers_attention_forwardGenerator_pass(self):
151+
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=1e-2)
152+
146153
# (todo): sayakpaul
147154
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
148155
def test_inference_batch_consistent(self):

0 commit comments

Comments
 (0)