Make CachedModelWithPartialLoad work with models that have non-persistent buffers.

This commit is contained in:
Ryan Dick 2024-12-23 15:46:37 +00:00
parent 6bb3c96fa5
commit 510ed6ed1f
2 changed files with 64 additions and 25 deletions

View File

@ -57,6 +57,19 @@ class CachedModelWithPartialLoad:
keys_in_modules_that_do_not_support_autocast.add(key)
return keys_in_modules_that_do_not_support_autocast
def _move_non_persistent_buffers_to_device(self, device: torch.device):
"""Move the non-persistent buffers to the target device. These buffers are not included in the state dict,
so we need to move them manually.
"""
# HACK(ryand): Typically, non-persistent buffers are moved when calling module.to(device). We don't move entire
# modules, because we manage the devices of individual tensors using the state dict. Since non-persistent
# buffers are not included in the state dict, we need to handle them manually. The only way to do this is by
# using private torch.nn.Module attributes.
for module in self._model.modules():
for name, buffer in module.named_buffers():
if name in module._non_persistent_buffers_set:
module._buffers[name] = buffer.to(device, copy=True)
@property
def model(self) -> torch.nn.Module:
return self._model
@ -149,7 +162,10 @@ class CachedModelWithPartialLoad:
else:
apply_custom_layers_to_model(self._model)
# TODO(ryand): Handle non-persistent buffers.
# Move all non-persistent buffers to the compute device. These are a weird edge case and do not participate in
# the vram_bytes_loaded tracking.
self._move_non_persistent_buffers_to_device(self._compute_device)
return vram_bytes_loaded
@torch.no_grad()
@ -179,5 +195,7 @@ class CachedModelWithPartialLoad:
if self._cur_vram_bytes is not None:
self._cur_vram_bytes -= vram_bytes_freed
# We may have gone from a fully-loaded model to a partially-loaded model, so we need to reapply the custom
# layers.
apply_custom_layers_to_model(self._model)
return vram_bytes_freed

View File

@ -8,7 +8,25 @@ from invokeai.backend.model_manager.load.model_cache.cached_model.cached_model_w
)
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.autocast_modules import CustomLinear
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
from tests.backend.model_manager.load.model_cache.dummy_module import DummyModule
class DummyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(10, 32)
self.linear2 = torch.nn.Linear(32, 64)
self.register_buffer("buffer1", torch.ones(64))
# Non-persistent buffers are not included in the state dict. We need to make sure that this case is handled
# correctly by the partial loading code.
self.register_buffer("buffer2", torch.ones(64), persistent=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.linear1(x)
x = self.linear2(x)
x = x + self.buffer1
x = x + self.buffer2
return x
parameterize_mps_and_cuda = pytest.mark.parametrize(
("device"),
@ -25,9 +43,11 @@ parameterize_mps_and_cuda = pytest.mark.parametrize(
def test_cached_model_total_bytes(device: str):
model = DummyModule()
cached_model = CachedModelWithPartialLoad(model=model, compute_device=torch.device(device))
linear_numel = 10 * 10 + 10
buffer_numel = 10 * 10
assert cached_model.total_bytes() == (2 * linear_numel + buffer_numel) * 4
linear1_numel = 10 * 32 + 32
linear2_numel = 32 * 64 + 64
buffer1_numel = 64
# Note that the non-persistent buffer (buffer2) is not included in .total_bytes() calculation.
assert cached_model.total_bytes() == (linear1_numel + linear2_numel + buffer1_numel) * 4
@parameterize_mps_and_cuda
@ -62,7 +82,9 @@ def test_cached_model_partial_load(device: str):
assert loaded_bytes < model_total_bytes
assert loaded_bytes == cached_model.cur_vram_bytes()
assert loaded_bytes == sum(
calc_tensor_size(p) for p in itertools.chain(model.parameters(), model.buffers()) if p.device.type == device
calc_tensor_size(p)
for n, p in itertools.chain(model.named_parameters(), model.named_buffers())
if p.device.type == device and n != "buffer2"
)
# Check that the model's modules have been patched with CustomLinear layers.
@ -124,7 +146,12 @@ def test_cached_model_full_load_and_unload(device: str):
assert unloaded_bytes > 0
assert unloaded_bytes == model_total_bytes
assert cached_model.cur_vram_bytes() == 0
assert all(p.device.type == "cpu" for p in itertools.chain(model.parameters(), model.buffers()))
# Note that the non-persistent buffer (buffer2) is not required to be unloaded from VRAM.
assert all(
p.device.type == "cpu"
for n, p in itertools.chain(model.named_parameters(), model.named_buffers())
if n != "buffer2"
)
@parameterize_mps_and_cuda
@ -177,7 +204,12 @@ def test_cached_model_full_unload_from_partial(device: str):
assert unloaded_bytes > 0
assert unloaded_bytes == loaded_bytes
assert cached_model.cur_vram_bytes() == 0
assert all(p.device.type == "cpu" for p in itertools.chain(model.parameters(), model.buffers()))
# Note that the non-persistent buffer (buffer2) is not required to be unloaded from VRAM.
assert all(
p.device.type == "cpu"
for n, p in itertools.chain(model.named_parameters(), model.named_buffers())
if n != "buffer2"
)
@parameterize_mps_and_cuda
@ -214,7 +246,7 @@ def test_cached_model_full_load_and_inference(device: str):
assert cached_model.cur_vram_bytes() == 0
# Run inference on the CPU.
x = model(torch.randn(1, 10))
x = torch.randn(1, 10)
output1 = model(x)
assert output1.device.type == "cpu"
@ -229,20 +261,8 @@ def test_cached_model_full_load_and_inference(device: str):
output2 = model(x.to(device))
assert output2.device.type == device
# Full unload the model from VRAM.
unloaded_bytes = cached_model.full_unload_from_vram()
assert unloaded_bytes > 0
assert unloaded_bytes == model_total_bytes
assert cached_model.cur_vram_bytes() == 0
assert all(p.device.type == "cpu" for p in itertools.chain(model.parameters(), model.buffers()))
# Run inference on the CPU again.
output3 = model(x)
assert output3.device.type == "cpu"
# The outputs should be the same for all three runs.
# The outputs should be the same for both runs.
assert torch.allclose(output1, output2.to("cpu"))
assert torch.allclose(output1, output3)
@parameterize_mps_and_cuda
@ -254,7 +274,7 @@ def test_cached_model_partial_load_and_inference(device: str):
assert cached_model.cur_vram_bytes() == 0
# Run inference on the CPU.
x = model(torch.randn(1, 10))
x = torch.randn(1, 10)
output1 = model(x)
assert output1.device.type == "cpu"
@ -267,9 +287,10 @@ def test_cached_model_partial_load_and_inference(device: str):
assert loaded_bytes < model_total_bytes
assert loaded_bytes == cached_model.cur_vram_bytes()
assert loaded_bytes == sum(
calc_tensor_size(p) for p in itertools.chain(model.parameters(), model.buffers()) if p.device.type == device
calc_tensor_size(p)
for n, p in itertools.chain(model.named_parameters(), model.named_buffers())
if p.device.type == device and n != "buffer2"
)
# Check that the model's modules have been patched with CustomLinear layers.
assert type(model.linear1) is CustomLinear
assert type(model.linear2) is CustomLinear