From 9a0a226ce11015f23c4effb4e48b55e72c32b2f8 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Mon, 30 Dec 2024 10:41:48 -0500 Subject: [PATCH] Fix bitsandbytes imports in unit tests on MacOS. --- .../custom_modules/test_custom_invoke_linear_nf4.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/backend/model_manager/load/model_cache/torch_module_autocast/custom_modules/test_custom_invoke_linear_nf4.py b/tests/backend/model_manager/load/model_cache/torch_module_autocast/custom_modules/test_custom_invoke_linear_nf4.py index 3559ddea6c..f97404fb94 100644 --- a/tests/backend/model_manager/load/model_cache/torch_module_autocast/custom_modules/test_custom_invoke_linear_nf4.py +++ b/tests/backend/model_manager/load/model_cache/torch_module_autocast/custom_modules/test_custom_invoke_linear_nf4.py @@ -1,13 +1,17 @@ import pytest import torch -from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_invoke_linear_nf4 import ( - CustomInvokeLinearNF4, -) from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.torch_module_autocast import ( wrap_custom_layer, ) -from invokeai.backend.quantization.bnb_nf4 import InvokeLinearNF4 + +if not torch.cuda.is_available(): + pytest.skip("CUDA is not available", allow_module_level=True) +else: + from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_invoke_linear_nf4 import ( + CustomInvokeLinearNF4, + ) + from invokeai.backend.quantization.bnb_nf4 import InvokeLinearNF4 def build_linear_nf4_layer(orig_layer: torch.nn.Linear | None = None):