mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2025-01-08 11:57:36 +08:00
add migration script and update convert and face restoration paths
This commit is contained in:
parent
c7ea46a5da
commit
87ba17a1f5
17
README.md
17
README.md
@ -43,6 +43,23 @@ _Note: InvokeAI is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
requests. Be sure to use the provided templates. They will help us diagnose issues faster._
|
||||
|
||||
## FOR DEVELOPERS - MIGRATING TO THE 3.0.0 MODELS FORMAT
|
||||
|
||||
The models directory and models.yaml have changed. To migrate to the
|
||||
new layout, please follow this recipe:
|
||||
|
||||
1. Run `python scripts/migrate_models_to_3.0.py <path_to_root_directory>
|
||||
|
||||
2. This will create a new models directory named `models-3.0` and a
|
||||
new config directory named `models.yaml-3.0`, both in the current
|
||||
working directory. If you prefer to name them something else, pass
|
||||
the `--dest-directory` and/or `--dest-yaml` arguments.
|
||||
|
||||
3. Check that the new models directory and yaml file look ok.
|
||||
|
||||
4. Replace the existing directory and file, keeping backup copies just in
|
||||
case.
|
||||
|
||||
<div align="center">
|
||||
|
||||
![canvas preview](https://github.com/invoke-ai/InvokeAI/raw/main/docs/assets/canvas_preview.png)
|
||||
|
@ -76,6 +76,8 @@ from transformers import (
|
||||
|
||||
from ..stable_diffusion import StableDiffusionGeneratorPipeline
|
||||
|
||||
MODEL_ROOT = None
|
||||
|
||||
def shave_segments(path, n_shave_prefix_segments=1):
|
||||
"""
|
||||
Removes segments. Positive values shave the first segments, negative shave the last segments.
|
||||
@ -856,10 +858,7 @@ def convert_ldm_bert_checkpoint(checkpoint, config):
|
||||
|
||||
|
||||
def convert_ldm_clip_checkpoint(checkpoint):
|
||||
text_model = CLIPTextModel.from_pretrained(
|
||||
"openai/clip-vit-large-patch14", cache_dir=InvokeAIAppConfig.get_config().cache_dir
|
||||
)
|
||||
|
||||
text_model = CLIPTextModel.from_pretrained(MODEL_ROOT / 'clip-vit-large-patch14')
|
||||
keys = list(checkpoint.keys())
|
||||
|
||||
text_model_dict = {}
|
||||
@ -911,83 +910,9 @@ protected = {re.escape(x[0]): x[1] for x in textenc_transformer_conversion_lst}
|
||||
textenc_pattern = re.compile("|".join(protected.keys()))
|
||||
|
||||
|
||||
def convert_paint_by_example_checkpoint(checkpoint):
|
||||
cache_dir = InvokeAIAppConfig.get_config().cache_dir
|
||||
config = CLIPVisionConfig.from_pretrained(
|
||||
"openai/clip-vit-large-patch14", cache_dir=cache_dir
|
||||
)
|
||||
model = PaintByExampleImageEncoder(config)
|
||||
|
||||
keys = list(checkpoint.keys())
|
||||
|
||||
text_model_dict = {}
|
||||
|
||||
for key in keys:
|
||||
if key.startswith("cond_stage_model.transformer"):
|
||||
text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[
|
||||
key
|
||||
]
|
||||
|
||||
# load clip vision
|
||||
model.model.load_state_dict(text_model_dict)
|
||||
|
||||
# load mapper
|
||||
keys_mapper = {
|
||||
k[len("cond_stage_model.mapper.res") :]: v
|
||||
for k, v in checkpoint.items()
|
||||
if k.startswith("cond_stage_model.mapper")
|
||||
}
|
||||
|
||||
MAPPING = {
|
||||
"attn.c_qkv": ["attn1.to_q", "attn1.to_k", "attn1.to_v"],
|
||||
"attn.c_proj": ["attn1.to_out.0"],
|
||||
"ln_1": ["norm1"],
|
||||
"ln_2": ["norm3"],
|
||||
"mlp.c_fc": ["ff.net.0.proj"],
|
||||
"mlp.c_proj": ["ff.net.2"],
|
||||
}
|
||||
|
||||
mapped_weights = {}
|
||||
for key, value in keys_mapper.items():
|
||||
prefix = key[: len("blocks.i")]
|
||||
suffix = key.split(prefix)[-1].split(".")[-1]
|
||||
name = key.split(prefix)[-1].split(suffix)[0][1:-1]
|
||||
mapped_names = MAPPING[name]
|
||||
|
||||
num_splits = len(mapped_names)
|
||||
for i, mapped_name in enumerate(mapped_names):
|
||||
new_name = ".".join([prefix, mapped_name, suffix])
|
||||
shape = value.shape[0] // num_splits
|
||||
mapped_weights[new_name] = value[i * shape : (i + 1) * shape]
|
||||
|
||||
model.mapper.load_state_dict(mapped_weights)
|
||||
|
||||
# load final layer norm
|
||||
model.final_layer_norm.load_state_dict(
|
||||
{
|
||||
"bias": checkpoint["cond_stage_model.final_ln.bias"],
|
||||
"weight": checkpoint["cond_stage_model.final_ln.weight"],
|
||||
}
|
||||
)
|
||||
|
||||
# load final proj
|
||||
model.proj_out.load_state_dict(
|
||||
{
|
||||
"bias": checkpoint["proj_out.bias"],
|
||||
"weight": checkpoint["proj_out.weight"],
|
||||
}
|
||||
)
|
||||
|
||||
# load uncond vector
|
||||
model.uncond_vector.data = torch.nn.Parameter(checkpoint["learnable_vector"])
|
||||
return model
|
||||
|
||||
|
||||
def convert_open_clip_checkpoint(checkpoint):
|
||||
cache_dir = InvokeAIAppConfig.get_config().cache_dir
|
||||
text_model = CLIPTextModel.from_pretrained(
|
||||
"stabilityai/stable-diffusion-2", subfolder="text_encoder", cache_dir=cache_dir
|
||||
)
|
||||
text_model = CLIPTextModel.from_pretrained(MODEL_ROOT / 'stable-diffusion-2-text_encoder')
|
||||
|
||||
keys = list(checkpoint.keys())
|
||||
|
||||
@ -1283,11 +1208,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
||||
|
||||
if model_type == "FrozenOpenCLIPEmbedder":
|
||||
text_model = convert_open_clip_checkpoint(checkpoint)
|
||||
tokenizer = CLIPTokenizer.from_pretrained(
|
||||
"stabilityai/stable-diffusion-2",
|
||||
subfolder="tokenizer",
|
||||
cache_dir=cache_dir,
|
||||
)
|
||||
tokenizer = CLIPTokenizer.from_pretrained(MODEL_ROOT / 'stable-diffusion-2-tokenizer')
|
||||
pipe = pipeline_class(
|
||||
vae=vae.to(precision),
|
||||
text_encoder=text_model.to(precision),
|
||||
@ -1298,34 +1219,11 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
||||
feature_extractor=None,
|
||||
requires_safety_checker=False,
|
||||
)
|
||||
elif model_type == "PaintByExample":
|
||||
vision_model = convert_paint_by_example_checkpoint(checkpoint)
|
||||
tokenizer = CLIPTokenizer.from_pretrained(
|
||||
"openai/clip-vit-large-patch14", cache_dir=cache_dir
|
||||
)
|
||||
feature_extractor = AutoFeatureExtractor.from_pretrained(
|
||||
"CompVis/stable-diffusion-safety-checker", cache_dir=cache_dir
|
||||
)
|
||||
pipe = PaintByExamplePipeline(
|
||||
vae=vae,
|
||||
image_encoder=vision_model,
|
||||
unet=unet,
|
||||
scheduler=scheduler,
|
||||
safety_checker=None,
|
||||
feature_extractor=feature_extractor,
|
||||
)
|
||||
elif model_type in ["FrozenCLIPEmbedder", "WeightedFrozenCLIPEmbedder"]:
|
||||
text_model = convert_ldm_clip_checkpoint(checkpoint)
|
||||
tokenizer = CLIPTokenizer.from_pretrained(
|
||||
"openai/clip-vit-large-patch14", cache_dir=cache_dir
|
||||
)
|
||||
safety_checker = StableDiffusionSafetyChecker.from_pretrained(
|
||||
"CompVis/stable-diffusion-safety-checker",
|
||||
cache_dir=cache_dir,
|
||||
)
|
||||
feature_extractor = AutoFeatureExtractor.from_pretrained(
|
||||
"CompVis/stable-diffusion-safety-checker", cache_dir=cache_dir
|
||||
)
|
||||
tokenizer = CLIPTokenizer.from_pretrained(MODEL_ROOT / 'clip-vit-large-patch14')
|
||||
safety_checker = StableDiffusionSafetyChecker.from_pretrained(MODEL_ROOT / 'stable-diffusion-safety-checker')
|
||||
feature_extractor = AutoFeatureExtractor.from_pretrained(MODEL_ROOT / 'stable-diffusion-safety-checker-extractor')
|
||||
pipe = pipeline_class(
|
||||
vae=vae.to(precision),
|
||||
text_encoder=text_model.to(precision),
|
||||
@ -1338,9 +1236,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
||||
else:
|
||||
text_config = create_ldm_bert_config(original_config)
|
||||
text_model = convert_ldm_bert_checkpoint(checkpoint, text_config)
|
||||
tokenizer = BertTokenizerFast.from_pretrained(
|
||||
"bert-base-uncased", cache_dir=cache_dir
|
||||
)
|
||||
tokenizer = BertTokenizerFast.from_pretrained(MODEL_ROOT / "bert-base-uncased")
|
||||
pipe = LDMTextToImagePipeline(
|
||||
vqvae=vae,
|
||||
bert=text_model,
|
||||
@ -1354,15 +1250,19 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
||||
|
||||
|
||||
def convert_ckpt_to_diffusers(
|
||||
checkpoint_path: Union[str, Path],
|
||||
dump_path: Union[str, Path],
|
||||
**kwargs,
|
||||
checkpoint_path: Union[str, Path],
|
||||
dump_path: Union[str, Path],
|
||||
model_root: Union[str, Path],
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Takes all the arguments of load_pipeline_from_original_stable_diffusion_ckpt(),
|
||||
and in addition a path-like object indicating the location of the desired diffusers
|
||||
model to be written.
|
||||
"""
|
||||
# setting global here to avoid massive changes late at night
|
||||
global MODEL_ROOT
|
||||
MODEL_ROOT = Path(model_root) / 'core/convert'
|
||||
pipe = load_pipeline_from_original_stable_diffusion_ckpt(checkpoint_path, **kwargs)
|
||||
|
||||
pipe.save_pretrained(
|
||||
|
@ -203,12 +203,12 @@ MAX_CACHE_SIZE = 6.0 # GB
|
||||
# ├── sd-1
|
||||
# │ ├── controlnet
|
||||
# │ ├── lora
|
||||
# │ ├── diffusers
|
||||
# │ ├── pipeline
|
||||
# │ └── textual_inversion
|
||||
# ├── sd-2
|
||||
# │ ├── controlnet
|
||||
# │ ├── lora
|
||||
# │ ├── diffusers
|
||||
# │ ├── pipeline
|
||||
# │ └── textual_inversion
|
||||
# └── core
|
||||
# ├── face_reconstruction
|
||||
|
@ -21,6 +21,7 @@ class ModelVariantInfo(object):
|
||||
base_type: BaseModelType
|
||||
variant_type: ModelVariantType
|
||||
prediction_type: SchedulerPredictionType
|
||||
format: Literal['folder','checkpoint']
|
||||
image_size: int
|
||||
|
||||
class ProbeBase(object):
|
||||
@ -76,7 +77,7 @@ class ModelProbe(object):
|
||||
format = 'folder' if model_path.is_dir() else 'checkpoint'
|
||||
else:
|
||||
format = 'folder' if isinstance(model,(ConfigMixin,ModelMixin)) else 'checkpoint'
|
||||
|
||||
|
||||
model_info = None
|
||||
try:
|
||||
model_type = cls.get_model_type_from_folder(model_path, model) \
|
||||
@ -94,18 +95,22 @@ class ModelProbe(object):
|
||||
base_type = base_type,
|
||||
variant_type = variant_type,
|
||||
prediction_type = prediction_type,
|
||||
format = format,
|
||||
image_size = 768 if (base_type==BaseModelType.StableDiffusion2 \
|
||||
and prediction_type==SchedulerPredictionType.VPrediction \
|
||||
) else 512
|
||||
)
|
||||
except (KeyError, ValueError) as e:
|
||||
logger.error(f'An error occurred while probing {model_path}: {str(e)}')
|
||||
logger.error(traceback.format_exc())
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
return model_info
|
||||
|
||||
@classmethod
|
||||
def get_model_type_from_checkpoint(cls, model_path: Path, checkpoint: dict)->ModelType:
|
||||
if model_path.suffix not in ('.bin','.pt','.ckpt','.safetensors'):
|
||||
return None
|
||||
if model_path.name=='learned_embeds.bin':
|
||||
return ModelType.TextualInversion
|
||||
checkpoint = checkpoint or cls._scan_and_load_checkpoint(model_path)
|
||||
state_dict = checkpoint.get("state_dict") or checkpoint
|
||||
if any([x.startswith("model.diffusion_model") for x in state_dict.keys()]):
|
||||
@ -127,6 +132,7 @@ class ModelProbe(object):
|
||||
'''
|
||||
Get the model type of a hugging-face style folder.
|
||||
'''
|
||||
class_name = None
|
||||
if model:
|
||||
class_name = model.__class__.__name__
|
||||
else:
|
||||
@ -145,7 +151,7 @@ class ModelProbe(object):
|
||||
conf = json.load(file)
|
||||
class_name = conf['_class_name']
|
||||
|
||||
if type := cls.CLASS2TYPE.get(class_name):
|
||||
if class_name and (type := cls.CLASS2TYPE.get(class_name)):
|
||||
return type
|
||||
|
||||
# give up
|
||||
@ -209,8 +215,10 @@ class CheckpointProbeBase(ProbeBase):
|
||||
return ModelVariantType.Inpaint
|
||||
elif in_channels == 5:
|
||||
return ModelVariantType.Depth
|
||||
elif in_channels == 4:
|
||||
return ModelVariantType.Normal
|
||||
else:
|
||||
return None
|
||||
raise Exception("Cannot determine variant type")
|
||||
|
||||
class PipelineCheckpointProbe(CheckpointProbeBase):
|
||||
def get_base_type(self)->BaseModelType:
|
||||
@ -291,8 +299,11 @@ class ControlNetCheckpointProbe(CheckpointProbeBase):
|
||||
continue
|
||||
if checkpoint[key_name].shape[-1] == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif checkpoint[key_name].shape[-1] == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
elif self.checkpoint_path and self.helper:
|
||||
return self.helper(self.checkpoint_path)
|
||||
raise Exception("Unable to determine base type for {self.checkpoint_path}")
|
||||
|
||||
########################################################
|
||||
# classes for probing folders
|
||||
@ -373,14 +384,15 @@ class TextualInversionFolderProbe(FolderProbeBase):
|
||||
if not path.exists():
|
||||
return None
|
||||
checkpoint = ModelProbe._scan_and_load_checkpoint(path)
|
||||
return TextualInversionCheckpointProbe(checkpoint).get_base_type
|
||||
return TextualInversionCheckpointProbe(None,checkpoint=checkpoint).get_base_type()
|
||||
|
||||
class ControlNetFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self)->BaseModelType:
|
||||
config_file = self.folder_path / 'scheduler_config.json'
|
||||
config_file = self.folder_path / 'config.json'
|
||||
if not config_file.exists():
|
||||
return None
|
||||
config = json.load(config_file)
|
||||
raise Exception(f"Cannot determine base type for {self.folder_path}")
|
||||
with open(config_file,'r') as file:
|
||||
config = json.load(file)
|
||||
# no obvious way to distinguish between sd2-base and sd2-768
|
||||
return BaseModelType.StableDiffusion1 \
|
||||
if config['cross_attention_dim']==768 \
|
||||
|
@ -255,5 +255,6 @@ def _convert_ckpt_and_cache(
|
||||
vae=vae_model,
|
||||
vae_path=str(app_config.root_dir / vae_ckpt_path) if vae_ckpt_path else None,
|
||||
scan_needed=True,
|
||||
model_root=app_config.models_path,
|
||||
)
|
||||
return diffusers_path
|
||||
|
@ -113,7 +113,8 @@ def _convert_vae_ckpt_and_cache(self, mconfig: DictConfig) -> str:
|
||||
vae_model = convert_ldm_vae_to_diffusers(
|
||||
checkpoint = checkpoint,
|
||||
vae_config = config,
|
||||
image_size = image_size
|
||||
image_size = image_size,
|
||||
model_root = app_config.models_path,
|
||||
)
|
||||
vae_model.save_pretrained(
|
||||
diffusers_path,
|
||||
|
@ -5,7 +5,7 @@ class Restoration:
|
||||
pass
|
||||
|
||||
def load_face_restore_models(
|
||||
self, gfpgan_model_path="./models/gfpgan/GFPGANv1.4.pth"
|
||||
self, gfpgan_model_path="./models/core/face_restoration/gfpgan/GFPGANv1.4.pth"
|
||||
):
|
||||
# Load GFPGAN
|
||||
gfpgan = self.load_gfpgan(gfpgan_model_path)
|
||||
|
@ -15,7 +15,7 @@ pretrained_model_url = (
|
||||
|
||||
class CodeFormerRestoration:
|
||||
def __init__(
|
||||
self, codeformer_dir="models/codeformer", codeformer_model_path="codeformer.pth"
|
||||
self, codeformer_dir="./models/core/face_restoration/codeformer", codeformer_model_path="codeformer.pth"
|
||||
) -> None:
|
||||
|
||||
self.globals = InvokeAIAppConfig.get_config()
|
||||
@ -71,7 +71,7 @@ class CodeFormerRestoration:
|
||||
upscale_factor=1,
|
||||
use_parse=True,
|
||||
device=device,
|
||||
model_rootpath = self.globals.root_dir / "gfpgan" / "weights"
|
||||
model_rootpath = self.globals.model_path / 'core/face_restoration/gfpgan/weights'
|
||||
)
|
||||
face_helper.clean_all()
|
||||
face_helper.read_image(bgr_image_array)
|
||||
|
@ -30,8 +30,8 @@ class ESRGAN:
|
||||
upscale=4,
|
||||
act_type="prelu",
|
||||
)
|
||||
model_path = config.root_dir / "models/realesrgan/realesr-general-x4v3.pth"
|
||||
wdn_model_path = config.root_dir / "models/realesrgan/realesr-general-wdn-x4v3.pth"
|
||||
model_path = config.models_path / "core/upscaling/realesrgan/realesr-general-x4v3.pth"
|
||||
wdn_model_path = config.models_path / "core/upscaling/realesrgan/realesr-general-wdn-x4v3.pth"
|
||||
scale = 4
|
||||
|
||||
bg_upsampler = RealESRGANer(
|
||||
|
@ -30,18 +30,10 @@ class SafetyChecker(object):
|
||||
self.device = device
|
||||
|
||||
try:
|
||||
safety_model_id = "CompVis/stable-diffusion-safety-checker"
|
||||
safety_model_path = config.cache_dir
|
||||
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
|
||||
safety_model_id,
|
||||
local_files_only=True,
|
||||
cache_dir=safety_model_path,
|
||||
)
|
||||
self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained(
|
||||
safety_model_id,
|
||||
local_files_only=True,
|
||||
cache_dir=safety_model_path,
|
||||
)
|
||||
safety_model_id = config.models_path / 'core/convert/stable-diffusion-safety-checker'
|
||||
feature_extractor_id = config.models_path / 'core/convert/stable-diffusion-safety-checker-extractor'
|
||||
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id)
|
||||
self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained(feature_extractor_id)
|
||||
except Exception:
|
||||
logger.error(
|
||||
"An error was encountered while installing the safety checker:"
|
||||
|
274
scripts/migrate_models_to_3.0.py
Normal file
274
scripts/migrate_models_to_3.0.py
Normal file
@ -0,0 +1,274 @@
|
||||
'''
|
||||
Migrate the models directory and models.yaml file from an existing
|
||||
InvokeAI 2.3 installation to 3.0.0.
|
||||
'''
|
||||
|
||||
import io
|
||||
import os
|
||||
import argparse
|
||||
import shutil
|
||||
import yaml
|
||||
|
||||
import transformers
|
||||
import diffusers
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from omegaconf import OmegaConf
|
||||
from diffusers import StableDiffusionPipeline, AutoencoderKL
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from transformers import (
|
||||
CLIPTextModel,
|
||||
CLIPTokenizer,
|
||||
AutoFeatureExtractor,
|
||||
BertTokenizerFast,
|
||||
)
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.backend.model_management.model_probe import (
|
||||
ModelProbe, ModelType, BaseModelType
|
||||
)
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
transformers.logging.set_verbosity_error()
|
||||
diffusers.logging.set_verbosity_error()
|
||||
|
||||
def create_directory_structure(dest: Path):
|
||||
for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]:
|
||||
for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora,
|
||||
ModelType.ControlNet,ModelType.TextualInversion]:
|
||||
path = dest / model_base.value / model_type.value
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
path = dest / 'core'
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def copy_file(src:Path,dest:Path):
|
||||
logger.info(f'Copying {str(src)} to {str(dest)}')
|
||||
try:
|
||||
shutil.copy(src, dest)
|
||||
except Exception as e:
|
||||
logger.error(f'COPY FAILED: {str(e)}')
|
||||
|
||||
def copy_dir(src:Path,dest:Path):
|
||||
logger.info(f'Copying {str(src)} to {str(dest)}')
|
||||
try:
|
||||
shutil.copytree(src, dest)
|
||||
except Exception as e:
|
||||
logger.error(f'COPY FAILED: {str(e)}')
|
||||
|
||||
def migrate_models(src_dir: Path, dest_dir: Path):
|
||||
for root, dirs, files in os.walk(src_dir):
|
||||
for f in files:
|
||||
# hack - don't copy raw learned_embeds.bin, let them
|
||||
# be copied as part of a tree copy operation
|
||||
if f == 'learned_embeds.bin':
|
||||
continue
|
||||
try:
|
||||
model = Path(root,f)
|
||||
info = ModelProbe().heuristic_probe(model)
|
||||
if not info:
|
||||
continue
|
||||
dest = Path(dest_dir, info.base_type.value, info.model_type.value, f)
|
||||
copy_file(model, dest)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(str(e))
|
||||
for d in dirs:
|
||||
try:
|
||||
model = Path(root,d)
|
||||
info = ModelProbe().heuristic_probe(model)
|
||||
if not info:
|
||||
continue
|
||||
dest = Path(dest_dir, info.base_type.value, info.model_type.value, model.name)
|
||||
copy_dir(model, dest)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(str(e))
|
||||
|
||||
def migrate_support_models(dest_directory: Path):
|
||||
if Path('./models/clipseg').exists():
|
||||
copy_dir(Path('./models/clipseg'),dest_directory / 'core/misc/clipseg')
|
||||
if Path('./models/realesrgan').exists():
|
||||
copy_dir(Path('./models/realesrgan'),dest_directory / 'core/upscaling/realesrgan')
|
||||
for d in ['codeformer','gfpgan']:
|
||||
path = Path('./models',d)
|
||||
if path.exists():
|
||||
copy_dir(path,dest_directory / f'core/face_restoration/{d}')
|
||||
|
||||
def migrate_conversion_models(dest_directory: Path):
|
||||
# These are needed for the conversion script
|
||||
kwargs = dict(
|
||||
cache_dir = Path('./models/hub'),
|
||||
local_files_only = True
|
||||
)
|
||||
try:
|
||||
logger.info('Migrating core tokenizers and text encoders')
|
||||
target_dir = dest_directory/'core/convert'
|
||||
|
||||
# bert
|
||||
bert = BertTokenizerFast.from_pretrained("bert-base-uncased", **kwargs)
|
||||
bert.save_pretrained(target_dir/'bert-base-uncased', safe_serialization=True)
|
||||
|
||||
# sd-1
|
||||
repo_id = 'openai/clip-vit-large-patch14'
|
||||
pipeline = CLIPTokenizer.from_pretrained(repo_id, **kwargs)
|
||||
pipeline.save_pretrained(target_dir/'clip-vit-large-patch14', safe_serialization=True)
|
||||
|
||||
pipeline = CLIPTextModel.from_pretrained(repo_id, **kwargs)
|
||||
pipeline.save_pretrained(target_dir/'clip-vit-large-patch14', safe_serialization=True)
|
||||
|
||||
# sd-2
|
||||
repo_id = "stabilityai/stable-diffusion-2"
|
||||
pipeline = CLIPTokenizer.from_pretrained(repo_id, subfolder = "tokenizer", **kwargs)
|
||||
pipeline.save_pretrained(target_dir/'stable-diffusion-2-tokenizer', safe_serialization=True)
|
||||
|
||||
pipeline = CLIPTextModel.from_pretrained(repo_id,subfolder = "text_encoder", **kwargs)
|
||||
pipeline.save_pretrained(target_dir/'stable-diffusion-2-text_encoder', safe_serialization=True)
|
||||
|
||||
# VAE
|
||||
logger.info('Migrating stable diffusion VAE')
|
||||
vae = AutoencoderKL.from_pretrained('stabilityai/sd-vae-ft-mse', **kwargs)
|
||||
vae.save_pretrained(target_dir/'sd-vae-ft-mse', safe_serialization=True)
|
||||
|
||||
# safety checking
|
||||
logger.info('Migrating safety checker')
|
||||
repo_id = "CompVis/stable-diffusion-safety-checker"
|
||||
pipeline = AutoFeatureExtractor.from_pretrained(repo_id,**kwargs)
|
||||
pipeline.save_pretrained(target_dir/'stable-diffusion-safety-checker-extractor', safe_serialization=True)
|
||||
|
||||
pipeline = StableDiffusionSafetyChecker.from_pretrained(repo_id,**kwargs)
|
||||
pipeline.save_pretrained(target_dir/'stable-diffusion-safety-checker', safe_serialization=True)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(str(e))
|
||||
|
||||
def migrate_tuning_models(dest: Path):
|
||||
for subdir in ['embeddings','loras','controlnets']:
|
||||
src = Path('.',subdir)
|
||||
if not src.is_dir():
|
||||
logger.info(f'{subdir} directory not found; skipping')
|
||||
continue
|
||||
logger.info(f'Scanning {subdir}')
|
||||
migrate_models(src, dest)
|
||||
|
||||
def migrate_pipelines(dest_dir: Path, dest_yaml: io.TextIOBase):
|
||||
cache = Path('./models/hub')
|
||||
kwargs = dict(
|
||||
cache_dir = cache,
|
||||
local_files_only = True,
|
||||
safety_checker = None,
|
||||
)
|
||||
for model in cache.glob('models--*'):
|
||||
if len(list(model.glob('snapshots/**/model_index.json')))==0:
|
||||
continue
|
||||
_,owner,repo_name=model.name.split('--')
|
||||
repo_id = f'{owner}/{repo_name}'
|
||||
revisions = [x.name for x in model.glob('refs/*')]
|
||||
for revision in revisions:
|
||||
logger.info(f'Migrating {repo_id}, revision {revision}')
|
||||
try:
|
||||
pipeline = StableDiffusionPipeline.from_pretrained(
|
||||
repo_id,
|
||||
revision=revision,
|
||||
**kwargs)
|
||||
info = ModelProbe().heuristic_probe(pipeline)
|
||||
if not info:
|
||||
continue
|
||||
dest = Path(dest_dir, info.base_type.value, info.model_type.value, f'{repo_name}-{revision}')
|
||||
pipeline.save_pretrained(dest, safe_serialization=True)
|
||||
rel_path = Path('models',dest.relative_to(dest_dir))
|
||||
stanza = {
|
||||
f'{info.base_type.value}/{info.model_type.value}/{repo_name}-{revision}':
|
||||
{
|
||||
'name': repo_name,
|
||||
'path': str(rel_path),
|
||||
'description': f'diffusers model {repo_id}',
|
||||
'format': 'folder',
|
||||
'image_size': info.image_size,
|
||||
'base': info.base_type.value,
|
||||
'variant': info.variant_type.value,
|
||||
'prediction_type': info.prediction_type.value,
|
||||
}
|
||||
}
|
||||
print(yaml.dump(stanza),file=dest_yaml,end="")
|
||||
dest_yaml.flush()
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning(f'Could not load the "{revision}" version of {repo_id}. Skipping.')
|
||||
|
||||
def migrate_checkpoints(dest_dir: Path, dest_yaml: io.TextIOBase):
|
||||
# find any checkpoints referred to in old models.yaml
|
||||
conf = OmegaConf.load('./configs/models.yaml')
|
||||
orig_models_dir = Path.cwd() / 'models'
|
||||
for model_name, stanza in conf.items():
|
||||
if stanza.get('format') and stanza['format'] == 'ckpt':
|
||||
try:
|
||||
logger.info(f'Migrating checkpoint model {model_name}')
|
||||
weights = orig_models_dir.parent / stanza['weights']
|
||||
config = stanza['config']
|
||||
info = ModelProbe().heuristic_probe(weights)
|
||||
if not info:
|
||||
continue
|
||||
|
||||
# uh oh, weights is in the old models directory - move it into the new one
|
||||
if Path(weights).is_relative_to(orig_models_dir):
|
||||
dest = Path(dest_dir, info.base_type.value, info.model_type.value,weights.name)
|
||||
copy_file(weights,dest)
|
||||
weights = Path('models', info.base_type.value, info.model_type.value,weights.name)
|
||||
stanza = {
|
||||
f'{info.base_type.value}/{info.model_type.value}/{model_name}':
|
||||
{
|
||||
'name': model_name,
|
||||
'path': str(weights),
|
||||
'description': f'checkpoint model {model_name}',
|
||||
'format': 'checkpoint',
|
||||
'image_size': info.image_size,
|
||||
'base': info.base_type.value,
|
||||
'variant': info.variant_type.value,
|
||||
'config': config
|
||||
}
|
||||
}
|
||||
print(yaml.dump(stanza),file=dest_yaml,end="")
|
||||
dest_yaml.flush()
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(str(e))
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Model directory migrator")
|
||||
parser.add_argument('root_directory',
|
||||
help='Root directory (containing "models", "embeddings", "controlnets" and "loras")'
|
||||
)
|
||||
parser.add_argument('--dest-directory',
|
||||
default='./models-3.0',
|
||||
help='Destination for new models directory',
|
||||
)
|
||||
parser.add_argument('--dest-yaml',
|
||||
default='./models.yaml-3.0',
|
||||
help='Destination for new models.yaml file',
|
||||
)
|
||||
args = parser.parse_args()
|
||||
root_directory = Path(args.root_directory)
|
||||
assert root_directory.is_dir(), f"{root_directory} is not a valid directory"
|
||||
assert (root_directory / 'models').is_dir(), f"{root_directory} does not contain a 'models' subdirectory"
|
||||
|
||||
dest_directory = Path(args.dest_directory).resolve()
|
||||
dest_yaml = Path(args.dest_yaml).resolve()
|
||||
|
||||
os.chdir(root_directory)
|
||||
with open(dest_yaml,'w') as yaml_file:
|
||||
print(yaml.dump({'_version':'3.0.0'}),file=yaml_file,end="")
|
||||
create_directory_structure(dest_directory)
|
||||
migrate_support_models(dest_directory)
|
||||
migrate_conversion_models(dest_directory)
|
||||
migrate_tuning_models(dest_directory)
|
||||
migrate_pipelines(dest_directory,yaml_file)
|
||||
migrate_checkpoints(dest_directory,yaml_file)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
Loading…
Reference in New Issue
Block a user