mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2025-01-08 11:57:36 +08:00
Update CachedModelWithPartialLoad to use new autocast modules.
This commit is contained in:
parent
52c5061b99
commit
6bb3c96fa5
@ -1,10 +1,9 @@
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_function_autocast_context import (
|
||||
add_autocast_to_module_forward,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.torch_module_autocast import (
|
||||
AUTOCAST_MODULE_TYPE_MAPPING,
|
||||
apply_custom_layers_to_model,
|
||||
remove_custom_layers_from_model,
|
||||
)
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
@ -41,18 +40,22 @@ class CachedModelWithPartialLoad:
|
||||
self._total_bytes = sum(calc_tensor_size(p) for p in self._cpu_state_dict.values())
|
||||
self._cur_vram_bytes: int | None = None
|
||||
|
||||
apply_custom_layers_to_model(self._model)
|
||||
# self._update_model_autocast_context()
|
||||
self._modules_that_support_autocast = self._find_modules_that_support_autocast()
|
||||
self._keys_in_modules_that_do_not_support_autocast = self._find_keys_in_modules_that_do_not_support_autocast()
|
||||
|
||||
# def _find_modules_that_support_autocast(self) -> dict[str, torch.nn.Module]:
|
||||
# """Find all modules that support autocasting."""
|
||||
# # Most modules would work with autocasting, but to be safe we maintain a whitelist of supported modules.
|
||||
# # This short whitelist covers most of the weights in large models.
|
||||
# supported_modules = (torch.nn.Linear, torch.nn.Conv1d, torch.nn.Conv2d, RMSNorm, torch.nn.Embedding)
|
||||
# modules_that_support_autocast = {
|
||||
# n: m for n, m in self._model.named_modules() if isinstance(m, supported_modules)
|
||||
# }
|
||||
# return modules_that_support_autocast
|
||||
def _find_modules_that_support_autocast(self) -> dict[str, torch.nn.Module]:
|
||||
"""Find all modules that support autocasting."""
|
||||
return {n: m for n, m in self._model.named_modules() if type(m) in AUTOCAST_MODULE_TYPE_MAPPING}
|
||||
|
||||
def _find_keys_in_modules_that_do_not_support_autocast(self) -> set[str]:
|
||||
keys_in_modules_that_do_not_support_autocast = set()
|
||||
for key in self._cpu_state_dict.keys():
|
||||
for module_name in self._modules_that_support_autocast.keys():
|
||||
if key.startswith(module_name):
|
||||
break
|
||||
else:
|
||||
keys_in_modules_that_do_not_support_autocast.add(key)
|
||||
return keys_in_modules_that_do_not_support_autocast
|
||||
|
||||
@property
|
||||
def model(self) -> torch.nn.Module:
|
||||
@ -98,6 +101,25 @@ class CachedModelWithPartialLoad:
|
||||
|
||||
cur_state_dict = self._model.state_dict()
|
||||
|
||||
# First, process the keys *must* be loaded into VRAM.
|
||||
for key in self._keys_in_modules_that_do_not_support_autocast:
|
||||
param = cur_state_dict[key]
|
||||
if param.device.type == self._compute_device.type:
|
||||
continue
|
||||
|
||||
param_size = calc_tensor_size(param)
|
||||
cur_state_dict[key] = param.to(self._compute_device, copy=True)
|
||||
vram_bytes_loaded += param_size
|
||||
|
||||
if vram_bytes_loaded > vram_bytes_to_load:
|
||||
logger = InvokeAILogger.get_logger()
|
||||
logger.warning(
|
||||
f"Loaded {vram_bytes_loaded / 2**20} MB into VRAM, but only {vram_bytes_to_load / 2**20} MB were "
|
||||
"requested. This is the minimum set of weights in VRAM required to run the model."
|
||||
)
|
||||
|
||||
# Next, process the keys that can optionally be loaded into VRAM.
|
||||
fully_loaded = True
|
||||
for key, param in cur_state_dict.items():
|
||||
if param.device.type == self._compute_device.type:
|
||||
continue
|
||||
@ -106,6 +128,7 @@ class CachedModelWithPartialLoad:
|
||||
if vram_bytes_loaded + param_size > vram_bytes_to_load:
|
||||
# TODO(ryand): Should we just break here? If we couldn't fit this parameter into VRAM, is it really
|
||||
# worth continuing to search for a smaller parameter that would fit?
|
||||
fully_loaded = False
|
||||
continue
|
||||
|
||||
cur_state_dict[key] = param.to(self._compute_device, copy=True)
|
||||
@ -120,12 +143,13 @@ class CachedModelWithPartialLoad:
|
||||
if self._cur_vram_bytes is not None:
|
||||
self._cur_vram_bytes += vram_bytes_loaded
|
||||
|
||||
if self._cur_vram_bytes == self.total_bytes():
|
||||
# HACK(ryand): The model should already be on the compute device, but we have to call this to ensure that
|
||||
# all non-persistent buffers are moved (i.e. buffers that are not registered in the state dict).
|
||||
self._model.to(self._compute_device)
|
||||
if fully_loaded:
|
||||
remove_custom_layers_from_model(self._model)
|
||||
# TODO(ryand): Warn if the self.cur_vram_bytes() and self.total_bytes() are out of sync.
|
||||
else:
|
||||
apply_custom_layers_to_model(self._model)
|
||||
|
||||
# self._update_model_autocast_context()
|
||||
# TODO(ryand): Handle non-persistent buffers.
|
||||
return vram_bytes_loaded
|
||||
|
||||
@torch.no_grad()
|
||||
@ -155,25 +179,5 @@ class CachedModelWithPartialLoad:
|
||||
if self._cur_vram_bytes is not None:
|
||||
self._cur_vram_bytes -= vram_bytes_freed
|
||||
|
||||
# self._update_model_autocast_context()
|
||||
apply_custom_layers_to_model(self._model)
|
||||
return vram_bytes_freed
|
||||
|
||||
def _update_model_autocast_context(self):
|
||||
"""A helper function that should be called whenever the model's VRAM usage changes to add/remove the autocast
|
||||
context.
|
||||
"""
|
||||
# if self.cur_vram_bytes() == self.total_bytes():
|
||||
# # We remove the autocast context when the model is fully loaded into VRAM, because the context causes some
|
||||
# # runtime overhead.
|
||||
# remove_autocast_from_module_forward(self._model)
|
||||
# else:
|
||||
# # Monkey-patch the model to add autocasting to the model's forward method.
|
||||
# add_autocast_to_module_forward(self._model, self._compute_device)
|
||||
|
||||
# TODO(ryand): Make sure that enabling autocast context is a no-op on a given module if it's already enabled.
|
||||
|
||||
modules_that_support_autocast = self._find_modules_that_support_autocast()
|
||||
logger = InvokeAILogger.get_logger()
|
||||
logger.info(f"Enabling autocast context for {len(modules_that_support_autocast)} modules")
|
||||
for _, module in modules_that_support_autocast.items():
|
||||
add_autocast_to_module_forward(module, self._compute_device)
|
||||
|
@ -6,6 +6,7 @@ import torch
|
||||
from invokeai.backend.model_manager.load.model_cache.cached_model.cached_model_with_partial_load import (
|
||||
CachedModelWithPartialLoad,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.autocast_modules import CustomLinear
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
|
||||
from tests.backend.model_manager.load.model_cache.dummy_module import DummyModule
|
||||
|
||||
@ -55,6 +56,8 @@ def test_cached_model_partial_load(device: str):
|
||||
# Partially load the model into VRAM.
|
||||
target_vram_bytes = int(model_total_bytes * 0.6)
|
||||
loaded_bytes = cached_model.partial_load_to_vram(target_vram_bytes)
|
||||
|
||||
# Check that the model is partially loaded into VRAM.
|
||||
assert loaded_bytes > 0
|
||||
assert loaded_bytes < model_total_bytes
|
||||
assert loaded_bytes == cached_model.cur_vram_bytes()
|
||||
@ -62,6 +65,10 @@ def test_cached_model_partial_load(device: str):
|
||||
calc_tensor_size(p) for p in itertools.chain(model.parameters(), model.buffers()) if p.device.type == device
|
||||
)
|
||||
|
||||
# Check that the model's modules have been patched with CustomLinear layers.
|
||||
assert type(model.linear1) is CustomLinear
|
||||
assert type(model.linear2) is CustomLinear
|
||||
|
||||
|
||||
@parameterize_mps_and_cuda
|
||||
def test_cached_model_partial_unload(device: str):
|
||||
@ -78,6 +85,8 @@ def test_cached_model_partial_unload(device: str):
|
||||
# Partially unload the model from VRAM.
|
||||
bytes_to_free = int(model_total_bytes * 0.4)
|
||||
freed_bytes = cached_model.partial_unload_from_vram(bytes_to_free)
|
||||
|
||||
# Check that the model is partially unloaded from VRAM.
|
||||
assert freed_bytes >= bytes_to_free
|
||||
assert freed_bytes < model_total_bytes
|
||||
assert freed_bytes == model_total_bytes - cached_model.cur_vram_bytes()
|
||||
@ -85,6 +94,10 @@ def test_cached_model_partial_unload(device: str):
|
||||
calc_tensor_size(p) for p in itertools.chain(model.parameters(), model.buffers()) if p.device.type == "cpu"
|
||||
)
|
||||
|
||||
# Check that the model's modules are still patched with CustomLinear layers.
|
||||
assert type(model.linear1) is CustomLinear
|
||||
assert type(model.linear2) is CustomLinear
|
||||
|
||||
|
||||
@parameterize_mps_and_cuda
|
||||
def test_cached_model_full_load_and_unload(device: str):
|
||||
@ -101,9 +114,13 @@ def test_cached_model_full_load_and_unload(device: str):
|
||||
assert loaded_bytes == model_total_bytes
|
||||
assert loaded_bytes == cached_model.cur_vram_bytes()
|
||||
assert all(p.device.type == device for p in itertools.chain(model.parameters(), model.buffers()))
|
||||
assert type(model.linear1) is torch.nn.Linear
|
||||
assert type(model.linear2) is torch.nn.Linear
|
||||
|
||||
# Full unload the model from VRAM.
|
||||
unloaded_bytes = cached_model.full_unload_from_vram()
|
||||
|
||||
# Check that the model is fully unloaded from VRAM.
|
||||
assert unloaded_bytes > 0
|
||||
assert unloaded_bytes == model_total_bytes
|
||||
assert cached_model.cur_vram_bytes() == 0
|
||||
@ -125,6 +142,8 @@ def test_cached_model_full_load_from_partial(device: str):
|
||||
assert loaded_bytes > 0
|
||||
assert loaded_bytes < model_total_bytes
|
||||
assert loaded_bytes == cached_model.cur_vram_bytes()
|
||||
assert type(model.linear1) is CustomLinear
|
||||
assert type(model.linear2) is CustomLinear
|
||||
|
||||
# Full load the rest of the model into VRAM.
|
||||
loaded_bytes_2 = cached_model.full_load_to_vram()
|
||||
@ -133,6 +152,8 @@ def test_cached_model_full_load_from_partial(device: str):
|
||||
assert loaded_bytes + loaded_bytes_2 == cached_model.cur_vram_bytes()
|
||||
assert loaded_bytes + loaded_bytes_2 == model_total_bytes
|
||||
assert all(p.device.type == device for p in itertools.chain(model.parameters(), model.buffers()))
|
||||
assert type(model.linear1) is torch.nn.Linear
|
||||
assert type(model.linear2) is torch.nn.Linear
|
||||
|
||||
|
||||
@parameterize_mps_and_cuda
|
||||
@ -182,3 +203,80 @@ def test_cached_model_get_cpu_state_dict(device: str):
|
||||
assert cpu_state_dict is not None
|
||||
assert len(cpu_state_dict) == len(model.state_dict())
|
||||
assert all(p.device.type == "cpu" for p in cpu_state_dict.values())
|
||||
|
||||
|
||||
@parameterize_mps_and_cuda
|
||||
def test_cached_model_full_load_and_inference(device: str):
|
||||
model = DummyModule()
|
||||
cached_model = CachedModelWithPartialLoad(model=model, compute_device=torch.device(device))
|
||||
# Model starts in CPU memory.
|
||||
model_total_bytes = cached_model.total_bytes()
|
||||
assert cached_model.cur_vram_bytes() == 0
|
||||
|
||||
# Run inference on the CPU.
|
||||
x = model(torch.randn(1, 10))
|
||||
output1 = model(x)
|
||||
assert output1.device.type == "cpu"
|
||||
|
||||
# Full load the model into VRAM.
|
||||
loaded_bytes = cached_model.full_load_to_vram()
|
||||
assert loaded_bytes > 0
|
||||
assert loaded_bytes == model_total_bytes
|
||||
assert loaded_bytes == cached_model.cur_vram_bytes()
|
||||
assert all(p.device.type == device for p in itertools.chain(model.parameters(), model.buffers()))
|
||||
|
||||
# Run inference on the GPU.
|
||||
output2 = model(x.to(device))
|
||||
assert output2.device.type == device
|
||||
|
||||
# Full unload the model from VRAM.
|
||||
unloaded_bytes = cached_model.full_unload_from_vram()
|
||||
assert unloaded_bytes > 0
|
||||
assert unloaded_bytes == model_total_bytes
|
||||
assert cached_model.cur_vram_bytes() == 0
|
||||
assert all(p.device.type == "cpu" for p in itertools.chain(model.parameters(), model.buffers()))
|
||||
|
||||
# Run inference on the CPU again.
|
||||
output3 = model(x)
|
||||
assert output3.device.type == "cpu"
|
||||
|
||||
# The outputs should be the same for all three runs.
|
||||
assert torch.allclose(output1, output2.to("cpu"))
|
||||
assert torch.allclose(output1, output3)
|
||||
|
||||
|
||||
@parameterize_mps_and_cuda
|
||||
def test_cached_model_partial_load_and_inference(device: str):
|
||||
model = DummyModule()
|
||||
# Model starts in CPU memory.
|
||||
cached_model = CachedModelWithPartialLoad(model=model, compute_device=torch.device(device))
|
||||
model_total_bytes = cached_model.total_bytes()
|
||||
assert cached_model.cur_vram_bytes() == 0
|
||||
|
||||
# Run inference on the CPU.
|
||||
x = model(torch.randn(1, 10))
|
||||
output1 = model(x)
|
||||
assert output1.device.type == "cpu"
|
||||
|
||||
# Partially load the model into VRAM.
|
||||
target_vram_bytes = int(model_total_bytes * 0.6)
|
||||
loaded_bytes = cached_model.partial_load_to_vram(target_vram_bytes)
|
||||
|
||||
# Check that the model is partially loaded into VRAM.
|
||||
assert loaded_bytes > 0
|
||||
assert loaded_bytes < model_total_bytes
|
||||
assert loaded_bytes == cached_model.cur_vram_bytes()
|
||||
assert loaded_bytes == sum(
|
||||
calc_tensor_size(p) for p in itertools.chain(model.parameters(), model.buffers()) if p.device.type == device
|
||||
)
|
||||
|
||||
# Check that the model's modules have been patched with CustomLinear layers.
|
||||
assert type(model.linear1) is CustomLinear
|
||||
assert type(model.linear2) is CustomLinear
|
||||
|
||||
# Run inference on the GPU.
|
||||
output2 = model(x.to(device))
|
||||
assert output2.device.type == device
|
||||
|
||||
# The output should be the same as the output from the CPU.
|
||||
assert torch.allclose(output1, output2.to("cpu"))
|
||||
|
Loading…
Reference in New Issue
Block a user