From b800fffcbbe22fb2faf5c3817d8d96329aa7a920 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 3 Jan 2025 18:11:34 -0500 Subject: [PATCH] Fix ModelCache execution device selection in unit tests. --- tests/backend/model_manager/model_manager_fixtures.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/backend/model_manager/model_manager_fixtures.py b/tests/backend/model_manager/model_manager_fixtures.py index 8d32cf20f9..0b723f211c 100644 --- a/tests/backend/model_manager/model_manager_fixtures.py +++ b/tests/backend/model_manager/model_manager_fixtures.py @@ -26,6 +26,7 @@ from invokeai.backend.model_manager.config import ( VAEDiffusersConfig, ) from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache +from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.logging import InvokeAILogger from tests.backend.model_manager.model_metadata.metadata_examples import ( HFTestLoraMetadata, @@ -95,6 +96,7 @@ def mm2_loader(mm2_app_config: InvokeAIAppConfig) -> ModelLoadServiceBase: enable_partial_loading=mm2_app_config.enable_partial_loading, max_ram_cache_size_gb=mm2_app_config.ram, max_vram_cache_size_gb=mm2_app_config.vram, + execution_device=TorchDevice.choose_torch_device(), logger=InvokeAILogger.get_logger(), ) return ModelLoadService(