Skip to content

Commit 3dc4d75

Browse files
committed
* Resize By Person : Dilation bug fix
* New Node : BMABClipTextEncoderSDXL * New Node : BMABCrop
1 parent ebc9238 commit 3dc4d75

File tree

5 files changed

+80
-15
lines changed

5 files changed

+80
-15
lines changed

bmab/__init__.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,14 +6,17 @@
66
'BMAB Basic': nodes.BMABBasic,
77
'BMAB Edge': nodes.BMABEdge,
88
'BMAB Upscaler': nodes.BMABUpscale,
9-
'BMAB Resize By Person': nodes.BMABResizeByPerson,
109
'BMAB Save Image': nodes.BMABSaveImage,
1110
'BMAB Upscale With Model': nodes.BMABUpscaleWithModel,
1211
'BMAB LoRA Loader': nodes.BMABLoraLoader,
1312
'BMAB Prompt': nodes.BMABPrompt,
14-
'BMAB Resize and Fill': nodes.BMABResizeAndFill,
1513
'BMAB Google Gemini Prompt': nodes.BMABGoogleGemini,
1614

15+
# Resize
16+
'BMAB Resize By Person': nodes.BMABResizeByPerson,
17+
'BMAB Resize and Fill': nodes.BMABResizeAndFill,
18+
'BMAB Crop': nodes.BMABCrop,
19+
1720
# Sampler
1821
'BMAB Integrator': nodes.BMABIntegrator,
1922
'BMAB Extractor': nodes.BMABExtractor,
@@ -24,6 +27,7 @@
2427
'BMAB Context': nodes.BMABContextNode,
2528
'BMAB Import Integrator': nodes.BMABImportIntegrator,
2629
'BMAB KSamplerKohyaDeepShrink': nodes.BMABKSamplerKohyaDeepShrink,
30+
'BMAB Clip Text Encoder SDXL': nodes.BMABClipTextEncoderSDXL,
2731

2832
# Detailer
2933
'BMAB Face Detailer': nodes.BMABFaceDetailer,
@@ -68,15 +72,18 @@
6872
'BMAB Edge': 'BMAB Edge',
6973
'BMAB DinoSam': 'BMAB DinoSam',
7074
'BMAB Upscaler': 'BMAB Upscaler',
71-
'BMAB Resize By Person': 'BMAB Resize By Person',
7275
'BMAB Control Net': 'BMAB ControlNet',
7376
'BMAB Save Image': 'BMAB Save Image',
7477
'BMAB Upscale With Model': 'BMAB Upscale With Model',
7578
'BMAB LoRA Loader': 'BMAB Lora Loader',
7679
'BMAB Prompt': 'BMAB Prompt',
77-
'BMAB Resize and Fill': 'BMAB Resize And Fill',
7880
'BMAB Google Gemini Prompt': 'BMAB Google Gemini API',
7981

82+
# Resize
83+
'BMAB Resize By Person': 'BMAB Resize By Person',
84+
'BMAB Resize and Fill': 'BMAB Resize And Fill',
85+
'BMAB Crop': 'BMAB Crop',
86+
8087
# Sampler
8188
'BMAB Integrator': 'BMAB Integrator',
8289
'BMAB KSampler': 'BMAB KSampler',
@@ -87,6 +94,7 @@
8794
'BMAB Context': 'BMAB Context',
8895
'BMAB Import Integrator': 'BMAB Import Integrator',
8996
'BMAB KSamplerKohyaDeepShrink': 'BMAB KSampler with Kohya Deep Shrink',
97+
'BMAB Clip Text Encoder SDXL': 'BMAB Clip Text Encoder SDXL',
9098

9199
# Detailer
92100
'BMAB Face Detailer': 'BMAB Face Detailer',

bmab/nodes/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,10 @@
77
from .imaging import BMABDetectAndMask, BMABLamaInpaint, BMABDetector, BMABSegmentAnything, BMABMasksToImages
88
from .imaging import BMABLoadImage
99
from .loaders import BMABLoraLoader
10-
from .resize import BMABResizeByPerson, BMABResizeAndFill
10+
from .resize import BMABResizeByPerson, BMABResizeAndFill, BMABCrop
1111
from .sampler import BMABKSampler, BMABKSamplerHiresFix, BMABPrompt, BMABIntegrator, BMABSeedGenerator, BMABExtractor
1212
from .sampler import BMABContextNode, BMABKSamplerHiresFixWithUpscaler, BMABImportIntegrator, BMABKSamplerKohyaDeepShrink
13+
from .sampler import BMABClipTextEncoderSDXL
1314
from .upscaler import BMABUpscale, BMABUpscaleWithModel
1415
from .toy import BMABGoogleGemini
1516
from .a1111api import BMABApiServer, BMABApiSDWebUIT2I, BMABApiSDWebUIT2IHiresFix, BMABApiSDWebUIControlNet

bmab/nodes/resize.py

Lines changed: 33 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def process(self, bind: BMABBind, steps, cfg_scale, sampler_name, scheduler, den
6767
if bind.context is not None:
6868
steps, cfg_scale, sampler_name, scheduler = bind.context.update(steps, cfg_scale, sampler_name, scheduler)
6969

70-
boxes, confs = predict(image, 'person_yolov8m-seg.pt', 0.35)
70+
boxes, confs = predict(image, 'person_yolov8n-seg.pt', 0.35)
7171
if len(boxes) == 0:
7272
results.append(image.convert('RGB'))
7373
continue
@@ -93,7 +93,7 @@ def process(self, bind: BMABBind, steps, cfg_scale, sampler_name, scheduler, den
9393
if method == 'stretching':
9494
results.append(stretching_image.convert('RGB'))
9595
elif method == 'inpaint':
96-
mask, box = utils.get_mask_with_alignment(image, alignment, int(image.width * image_ratio), int(image.height * image_ratio), dilation)
96+
mask, box = utils.get_mask_with_alignment(image, alignment, int(image.width * image_ratio), int(image.height * image_ratio))
9797
blur = ImageFilter.GaussianBlur(10)
9898
blur_mask = mask.filter(blur)
9999
blur_mask = ImageOps.invert(blur_mask)
@@ -112,13 +112,13 @@ def process(self, bind: BMABBind, steps, cfg_scale, sampler_name, scheduler, den
112112
'height': stretching_image.height,
113113
}
114114
image = process.process_img2img_with_mask(bind, stretching_image, img2img, mask)
115-
stretching_image.save('stretching_image.png')
116-
mask.save('mask.png')
117115
results.append(image.convert('RGB'))
118116
elif method == 'inpaint+lama':
119-
mask, box = utils.get_mask_with_alignment(image, alignment, int(image.width * image_ratio), int(image.height * image_ratio), dilation)
117+
mask, box = utils.get_mask_with_alignment(image, alignment, int(image.width * image_ratio), int(image.height * image_ratio))
120118
lama = LamaInpainting()
121119
stretching_image = lama(stretching_image, mask)
120+
stretching_image.save('stretching_image.png')
121+
mask.save('mask.png')
122122
img2img = {
123123
'steps': steps,
124124
'cfg_scale': cfg_scale,
@@ -162,3 +162,31 @@ def process(self, image, width, height, fill_black):
162162
results.append(utils.resize_and_fill(img, width, height, fill_black=fill_black))
163163
pixels = utils.get_pixels_from_pils(results)
164164
return (pixels,)
165+
166+
167+
class BMABCrop:
168+
@classmethod
169+
def INPUT_TYPES(s):
170+
return {
171+
'required': {
172+
'image': ('IMAGE',),
173+
'width': ('INT', {'default': 2, 'min': 0, 'max': 10000}),
174+
'height': ('INT', {'default': 3, 'min': 0, 'max': 10000}),
175+
'resize': (('disable', 'enable'), )
176+
},
177+
}
178+
179+
RETURN_TYPES = ('IMAGE', )
180+
RETURN_NAMES = ('image', )
181+
FUNCTION = 'process'
182+
183+
CATEGORY = 'BMAB/resize'
184+
185+
def process(self, image, width, height, resize):
186+
results = []
187+
resize = resize == 'enable'
188+
for img in utils.get_pils_from_pixels(image):
189+
results.append(utils.crop(img, width, height, resized=resize))
190+
pixels = utils.get_pixels_from_pils(results)
191+
return (pixels,)
192+

bmab/nodes/sampler.py

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
from comfy_extras.chainner_models import model_loading
99
from comfy import model_management
10+
from comfy_extras import nodes_clip_sdxl
1011

1112
from bmab import utils
1213
from bmab.nodes.binder import BMABBind
@@ -216,8 +217,8 @@ def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
216217
model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
217218
return (model_lora, clip_lora)
218219

219-
def sample(self, bind: BMABBind, steps, cfg_scale, sampler_name, scheduler, denoise=1.0, lora: BMABLoraBind = None):
220-
print('Sampler SEED', bind.seed, bind.model)
220+
def sample(self, bind: BMABBind, steps, cfg_scale, sampler_name, scheduler, denoise, lora: BMABLoraBind = None):
221+
print('Sampler SEED', bind.seed)
221222
if bind.context is not None:
222223
steps, cfg_scale, sampler_name, scheduler = bind.context.update(steps, cfg_scale, sampler_name, scheduler)
223224
if lora is not None:
@@ -517,3 +518,20 @@ def output_block_patch(h, hsp, transformer_options):
517518
return m
518519

519520

521+
class BMABClipTextEncoderSDXL(nodes_clip_sdxl.CLIPTextEncodeSDXL):
522+
523+
@classmethod
524+
def INPUT_TYPES(s):
525+
dic = super().INPUT_TYPES()
526+
dic['optional'] = {
527+
'seed': ('SEED', )
528+
}
529+
return dic
530+
531+
CATEGORY = 'BMAB/sampler'
532+
533+
def encode(self, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l, seed=None):
534+
if seed is not None:
535+
text_g = utils.parse_prompt(text_g, seed)
536+
text_l = utils.parse_prompt(text_l, seed)
537+
return super().encode(clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l)

bmab/utils/__init__.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -272,15 +272,25 @@ def resize_and_fill(im, width, height, fill_black=False):
272272
return res
273273

274274

275-
def crop(image, width, height):
276-
if image.width != width and image.height != height:
277-
raise ValueError('Image not matched')
275+
def crop(image, width, height, resized=True):
276+
# if image.width != width and image.height != height:
277+
# raise ValueError('Image not matched')
278278
iratio = image.width / image.height
279279
cratio = width / height
280280
if iratio < cratio:
281+
if resized:
282+
ratio = width / image.width
283+
image = image.resize((int(image.width * ratio), int(image.height * ratio)))
284+
else:
285+
width, height = image.width, int(image.width * height / width)
281286
y0 = (image.height - height) // 2
282287
image = image.crop((0, y0, width, y0 + height))
283288
else:
289+
if resized:
290+
ratio = height / image.height
291+
image = image.resize((int(image.width * ratio), int(image.height * ratio)))
292+
else:
293+
width, height = int(image.height * width / height), image.height
284294
x0 = (image.width - width) // 2
285295
image = image.crop((x0, 0, x0 + width, height))
286296
return image

0 commit comments

Comments
 (0)