Skip to content

Commit 8b19b75

Browse files
Merge pull request #16567 from AUTOMATIC1111/feat/sdxl-vpred
Support and automatically detect SDXL V-prediction models
2 parents 907bfb5 + 1ae073c commit 8b19b75

File tree

3 files changed

+106
-3
lines changed

3 files changed

+106
-3
lines changed

configs/sd_xl_v.yaml

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
model:
2+
target: sgm.models.diffusion.DiffusionEngine
3+
params:
4+
scale_factor: 0.13025
5+
disable_first_stage_autocast: True
6+
7+
denoiser_config:
8+
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
9+
params:
10+
num_idx: 1000
11+
12+
weighting_config:
13+
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
14+
scaling_config:
15+
target: sgm.modules.diffusionmodules.denoiser_scaling.VScaling
16+
discretization_config:
17+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
18+
19+
network_config:
20+
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
21+
params:
22+
adm_in_channels: 2816
23+
num_classes: sequential
24+
use_checkpoint: True
25+
in_channels: 4
26+
out_channels: 4
27+
model_channels: 320
28+
attention_resolutions: [4, 2]
29+
num_res_blocks: 2
30+
channel_mult: [1, 2, 4]
31+
num_head_channels: 64
32+
use_spatial_transformer: True
33+
use_linear_in_transformer: True
34+
transformer_depth: [1, 2, 10] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
35+
context_dim: 2048
36+
spatial_transformer_attn_type: softmax-xformers
37+
legacy: False
38+
39+
conditioner_config:
40+
target: sgm.modules.GeneralConditioner
41+
params:
42+
emb_models:
43+
# crossattn cond
44+
- is_trainable: False
45+
input_key: txt
46+
target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
47+
params:
48+
layer: hidden
49+
layer_idx: 11
50+
# crossattn and vector cond
51+
- is_trainable: False
52+
input_key: txt
53+
target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
54+
params:
55+
arch: ViT-bigG-14
56+
version: laion2b_s39b_b160k
57+
freeze: True
58+
layer: penultimate
59+
always_return_pooled: True
60+
legacy: False
61+
# vector cond
62+
- is_trainable: False
63+
input_key: original_size_as_tuple
64+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
65+
params:
66+
outdim: 256 # multiplied by two
67+
# vector cond
68+
- is_trainable: False
69+
input_key: crop_coords_top_left
70+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
71+
params:
72+
outdim: 256 # multiplied by two
73+
# vector cond
74+
- is_trainable: False
75+
input_key: target_size_as_tuple
76+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
77+
params:
78+
outdim: 256 # multiplied by two
79+
80+
first_stage_config:
81+
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
82+
params:
83+
embed_dim: 4
84+
monitor: val/rec_loss
85+
ddconfig:
86+
attn_type: vanilla-xformers
87+
double_z: true
88+
z_channels: 4
89+
resolution: 256
90+
in_channels: 3
91+
out_ch: 3
92+
ch: 128
93+
ch_mult: [1, 2, 4, 4]
94+
num_res_blocks: 2
95+
attn_resolutions: []
96+
dropout: 0.0
97+
lossconfig:
98+
target: torch.nn.Identity

modules/sd_models.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -783,7 +783,7 @@ def get_obj_from_str(string, reload=False):
783783
return getattr(importlib.import_module(module, package=None), cls)
784784

785785

786-
def load_model(checkpoint_info=None, already_loaded_state_dict=None):
786+
def load_model(checkpoint_info=None, already_loaded_state_dict=None, checkpoint_config=None):
787787
from modules import sd_hijack
788788
checkpoint_info = checkpoint_info or select_checkpoint()
789789

@@ -801,7 +801,8 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
801801
else:
802802
state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
803803

804-
checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
804+
if not checkpoint_config:
805+
checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
805806
clip_is_included_into_sd = any(x for x in [sd1_clip_weight, sd2_clip_weight, sdxl_clip_weight, sdxl_refiner_clip_weight] if x in state_dict)
806807

807808
timer.record("find config")
@@ -974,7 +975,7 @@ def reload_model_weights(sd_model=None, info=None, forced_reload=False):
974975
if sd_model is not None:
975976
send_model_to_trash(sd_model)
976977

977-
load_model(checkpoint_info, already_loaded_state_dict=state_dict)
978+
load_model(checkpoint_info, already_loaded_state_dict=state_dict, checkpoint_config=checkpoint_config)
978979
return model_data.sd_model
979980

980981
try:

modules/sd_models_config.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
1515
config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml")
1616
config_sdxl = os.path.join(sd_xl_repo_configs_path, "sd_xl_base.yaml")
17+
config_sdxlv = os.path.join(sd_configs_path, "sd_xl_v.yaml")
1718
config_sdxl_refiner = os.path.join(sd_xl_repo_configs_path, "sd_xl_refiner.yaml")
1819
config_sdxl_inpainting = os.path.join(sd_configs_path, "sd_xl_inpaint.yaml")
1920
config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
@@ -81,6 +82,9 @@ def guess_model_config_from_state_dict(sd, filename):
8182
if diffusion_model_input.shape[1] == 9:
8283
return config_sdxl_inpainting
8384
else:
85+
if ('v_pred' in sd):
86+
del sd['v_pred']
87+
return config_sdxlv
8488
return config_sdxl
8589

8690
if sd.get('conditioner.embedders.0.model.ln_final.weight', None) is not None:

0 commit comments

Comments
 (0)