Skip to content

Commit 10c919f

Browse files
Make it possible to load tokenizer data from checkpoints.
1 parent ce80e69 commit 10c919f

File tree

8 files changed

+26
-31
lines changed

8 files changed

+26
-31
lines changed

comfy/sd.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def load_lora_for_models(model, clip, lora, strength_model, strength_clip):
6060

6161

6262
class CLIP:
63-
def __init__(self, target=None, embedding_directory=None, no_init=False):
63+
def __init__(self, target=None, embedding_directory=None, no_init=False, tokenizer_data={}):
6464
if no_init:
6565
return
6666
params = target.params.copy()
@@ -79,7 +79,7 @@ def __init__(self, target=None, embedding_directory=None, no_init=False):
7979
if not model_management.supports_cast(load_device, dt):
8080
load_device = offload_device
8181

82-
self.tokenizer = tokenizer(embedding_directory=embedding_directory)
82+
self.tokenizer = tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data)
8383
self.patcher = comfy.model_patcher.ModelPatcher(self.cond_stage_model, load_device=load_device, offload_device=offload_device)
8484
self.layer_idx = None
8585
logging.debug("CLIP model load device: {}, offload device: {}".format(load_device, offload_device))
@@ -520,7 +520,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
520520
if clip_target is not None:
521521
clip_sd = model_config.process_clip_state_dict(sd)
522522
if len(clip_sd) > 0:
523-
clip = CLIP(clip_target, embedding_directory=embedding_directory)
523+
clip = CLIP(clip_target, embedding_directory=embedding_directory, tokenizer_data=clip_sd)
524524
m, u = clip.load_sd(clip_sd, full_model=True)
525525
if len(m) > 0:
526526
m_filter = list(filter(lambda a: ".logit_scale" not in a and ".transformer.text_projection.weight" not in a, m))

comfy/sd1_clip.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -386,7 +386,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
386386
return embed_out
387387

388388
class SDTokenizer:
389-
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, pad_to_max_length=True, min_length=None, pad_token=None):
389+
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, pad_to_max_length=True, min_length=None, pad_token=None, tokenizer_data={}):
390390
if tokenizer_path is None:
391391
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
392392
self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path)
@@ -521,10 +521,10 @@ def untokenize(self, token_weight_pair):
521521

522522

523523
class SD1Tokenizer:
524-
def __init__(self, embedding_directory=None, clip_name="l", tokenizer=SDTokenizer):
524+
def __init__(self, embedding_directory=None, tokenizer_data={}, clip_name="l", tokenizer=SDTokenizer):
525525
self.clip_name = clip_name
526526
self.clip = "clip_{}".format(self.clip_name)
527-
setattr(self, self.clip, tokenizer(embedding_directory=embedding_directory))
527+
setattr(self, self.clip, tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data))
528528

529529
def tokenize_with_weights(self, text:str, return_word_ids=False):
530530
out = {}

comfy/sd2_clip.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,12 +11,12 @@ def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, la
1111
super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0})
1212

1313
class SD2ClipHTokenizer(sd1_clip.SDTokenizer):
14-
def __init__(self, tokenizer_path=None, embedding_directory=None):
14+
def __init__(self, tokenizer_path=None, embedding_directory=None, tokenizer_data={}):
1515
super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1024)
1616

1717
class SD2Tokenizer(sd1_clip.SD1Tokenizer):
18-
def __init__(self, embedding_directory=None):
19-
super().__init__(embedding_directory=embedding_directory, clip_name="h", tokenizer=SD2ClipHTokenizer)
18+
def __init__(self, embedding_directory=None, tokenizer_data={}):
19+
super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="h", tokenizer=SD2ClipHTokenizer)
2020

2121
class SD2ClipModel(sd1_clip.SD1ClipModel):
2222
def __init__(self, device="cpu", dtype=None, **kwargs):

comfy/sdxl_clip.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,12 @@ def load_sd(self, sd):
1616
return super().load_sd(sd)
1717

1818
class SDXLClipGTokenizer(sd1_clip.SDTokenizer):
19-
def __init__(self, tokenizer_path=None, embedding_directory=None):
19+
def __init__(self, tokenizer_path=None, embedding_directory=None, tokenizer_data={}):
2020
super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1280, embedding_key='clip_g')
2121

2222

2323
class SDXLTokenizer:
24-
def __init__(self, embedding_directory=None):
24+
def __init__(self, embedding_directory=None, tokenizer_data={}):
2525
self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory)
2626
self.clip_g = SDXLClipGTokenizer(embedding_directory=embedding_directory)
2727

@@ -68,12 +68,12 @@ def __init__(self, device="cpu", dtype=None):
6868

6969

7070
class StableCascadeClipGTokenizer(sd1_clip.SDTokenizer):
71-
def __init__(self, tokenizer_path=None, embedding_directory=None):
71+
def __init__(self, tokenizer_path=None, embedding_directory=None, tokenizer_data={}):
7272
super().__init__(tokenizer_path, pad_with_end=True, embedding_directory=embedding_directory, embedding_size=1280, embedding_key='clip_g')
7373

7474
class StableCascadeTokenizer(sd1_clip.SD1Tokenizer):
75-
def __init__(self, embedding_directory=None):
76-
super().__init__(embedding_directory=embedding_directory, clip_name="g", tokenizer=StableCascadeClipGTokenizer)
75+
def __init__(self, embedding_directory=None, tokenizer_data={}):
76+
super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="g", tokenizer=StableCascadeClipGTokenizer)
7777

7878
class StableCascadeClipG(sd1_clip.SDClipModel):
7979
def __init__(self, device="cpu", max_length=77, freeze=True, layer="hidden", layer_idx=-1, dtype=None):

comfy/text_encoders/aura_t5.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,13 @@ def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None):
99
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 2, "pad": 1}, model_class=comfy.text_encoders.t5.T5, enable_attention_masks=True, zero_out_masked=True)
1010

1111
class PT5XlTokenizer(sd1_clip.SDTokenizer):
12-
def __init__(self, embedding_directory=None):
12+
def __init__(self, embedding_directory=None, tokenizer_data={}):
1313
tokenizer_path = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_pile_tokenizer"), "tokenizer.model")
1414
super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2048, embedding_key='pile_t5xl', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256, pad_token=1)
1515

1616
class AuraT5Tokenizer(sd1_clip.SD1Tokenizer):
17-
def __init__(self, embedding_directory=None):
18-
super().__init__(embedding_directory=embedding_directory, clip_name="pile_t5xl", tokenizer=PT5XlTokenizer)
17+
def __init__(self, embedding_directory=None, tokenizer_data={}):
18+
super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="pile_t5xl", tokenizer=PT5XlTokenizer)
1919

2020
class AuraT5Model(sd1_clip.SD1ClipModel):
2121
def __init__(self, device="cpu", dtype=None, **kwargs):

comfy/text_encoders/sa_t5.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,13 @@ def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None):
99
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5, enable_attention_masks=True, zero_out_masked=True)
1010

1111
class T5BaseTokenizer(sd1_clip.SDTokenizer):
12-
def __init__(self, embedding_directory=None):
12+
def __init__(self, embedding_directory=None, tokenizer_data={}):
1313
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer")
1414
super().__init__(tokenizer_path, pad_with_end=False, embedding_size=768, embedding_key='t5base', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=128)
1515

1616
class SAT5Tokenizer(sd1_clip.SD1Tokenizer):
17-
def __init__(self, embedding_directory=None):
18-
super().__init__(embedding_directory=embedding_directory, clip_name="t5base", tokenizer=T5BaseTokenizer)
17+
def __init__(self, embedding_directory=None, tokenizer_data={}):
18+
super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="t5base", tokenizer=T5BaseTokenizer)
1919

2020
class SAT5Model(sd1_clip.SD1ClipModel):
2121
def __init__(self, device="cpu", dtype=None, **kwargs):

comfy/text_encoders/sd3_clip.py

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -13,22 +13,13 @@ def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None):
1313
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5)
1414

1515
class T5XXLTokenizer(sd1_clip.SDTokenizer):
16-
def __init__(self, embedding_directory=None):
16+
def __init__(self, embedding_directory=None, tokenizer_data={}):
1717
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer")
1818
super().__init__(tokenizer_path, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=77)
1919

20-
class SDT5XXLTokenizer(sd1_clip.SD1Tokenizer):
21-
def __init__(self, embedding_directory=None):
22-
super().__init__(embedding_directory=embedding_directory, clip_name="t5xxl", tokenizer=T5XXLTokenizer)
23-
24-
class SDT5XXLModel(sd1_clip.SD1ClipModel):
25-
def __init__(self, device="cpu", dtype=None, **kwargs):
26-
super().__init__(device=device, dtype=dtype, clip_name="t5xxl", clip_model=T5XXLModel, **kwargs)
27-
28-
2920

3021
class SD3Tokenizer:
31-
def __init__(self, embedding_directory=None):
22+
def __init__(self, embedding_directory=None, tokenizer_data={}):
3223
self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory)
3324
self.clip_g = sdxl_clip.SDXLClipGTokenizer(embedding_directory=embedding_directory)
3425
self.t5xxl = T5XXLTokenizer(embedding_directory=embedding_directory)

comfy/text_encoders/spiece_tokenizer.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import os
2+
import torch
23

34
class SPieceTokenizer:
45
add_eos = True
@@ -9,6 +10,9 @@ def from_pretrained(path):
910

1011
def __init__(self, tokenizer_path):
1112
import sentencepiece
13+
if torch.is_tensor(tokenizer_path):
14+
tokenizer_path = tokenizer_path.numpy().tobytes()
15+
1216
if isinstance(tokenizer_path, bytes):
1317
self.tokenizer = sentencepiece.SentencePieceProcessor(model_proto=tokenizer_path, add_eos=self.add_eos)
1418
else:

0 commit comments

Comments
 (0)