Skip to content

Commit 9fadc6a

Browse files
committed
fix merge
2 parents 16261c3 + 731b9c8 commit 9fadc6a

File tree

9 files changed

+50
-22
lines changed

9 files changed

+50
-22
lines changed

Diff for: requirements/FUNCTIONAL.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ einops==0.3.0
1616
torch-fidelity==0.3.0
1717
transformers==4.38.1
1818
torchmetrics==0.7.0
19-
safetensors==0.3.2
19+
safetensors==0.4.2
2020
kornia==0.6
2121
accelerate==0.24.1
2222
taming-transformers-rom1504==0.0.6

Diff for: requirements/Pytorch/CPU.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
safetensors==0.3.2
1+
safetensors==0.4.2
22
torch==2.2.0 --index-url https://download.pytorch.org/whl/cpu --trusted-host download.pytorch.org
33
torchvision==0.17.0 --index-url https://download.pytorch.org/whl/cpu --trusted-host download.pytorch.org

Diff for: requirements/Pytorch/CPUBETA.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
safetensors==0.3.2
1+
safetensors==0.4.2
22
--pre torch --index-url https://download.pytorch.org/whl/nightly/cpu --trusted-host download.pytorch.org
33
--pre torchvision --index-url https://download.pytorch.org/whl/nightly/cpu --trusted-host download.pytorch.org

Diff for: requirements/Pytorch/CUDA.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
safetensors==0.3.2
1+
safetensors==0.4.2
22
torch==2.2.0 -i https://download.pytorch.org/whl/cu121 --trusted-host download.pytorch.org
33
torchvision==0.17.0 -i https://download.pytorch.org/whl/cu121 --trusted-host download.pytorch.org

Diff for: requirements/Pytorch/CUDABETA.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
safetensors==0.3.2
1+
safetensors==0.4.2
22
--pre torch -i https://download.pytorch.org/whl/nightly/cu121 --trusted-host download.pytorch.org
33
--pre torchvision -i https://download.pytorch.org/whl/nightly/cu121 --trusted-host download.pytorch.org

Diff for: requirements/Pytorch/PYTORCHBASIC.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
safetensors==0.3.2
1+
safetensors==0.4.2
22
torch==2.2.0
33
torchvision==0.17.0

Diff for: requirements/Pytorch/ROCM.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
safetensors==0.3.2
1+
safetensors==0.4.2
22
torch==2.2.0 -i https://download.pytorch.org/whl/rocm5.7 --trusted-host download.pytorch.org
33
torchvision==0.17.0 -i https://download.pytorch.org/whl/rocm5.7 --trusted-host download.pytorch.org

Diff for: requirements/Pytorch/ROCMBETA.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
safetensors==0.3.2
1+
safetensors==0.4.2
22
--pre torch -i https://download.pytorch.org/whl/nightly/rocm5.7 --trusted-host download.pytorch.org
33
--pre torchvision -i https://download.pytorch.org/whl/nightly/rocm5.7 --trusted-host download.pytorch.org

Diff for: scripts/image_server.py

+42-14
Original file line numberDiff line numberDiff line change
@@ -1414,19 +1414,21 @@ def render(modelTA, modelPV, samples_ddim, device, H, W, pixelSize, pixelvae, ti
14141414
x_sample = torch.clamp((x_sample.cpu().float()), min = 0.0, max = 1.0)
14151415
x_sample = x_sample.cpu().movedim(1, -1)
14161416
x_sample = 255.0 * x_sample[0].cpu().numpy()
1417-
x_sample = Image.fromarray(np.clip(x_sample, 0, 255).astype(np.uint8))
1417+
x_sample = np.clip(x_sample, 0, 255).astype(np.uint8)
1418+
1419+
# Denoise the generated image
1420+
x_sample = cv2.fastNlMeansDenoisingColored(x_sample, None, 6, 6, 3, 21)
14181421

14191422
# Color adjustments to account for Tiny Autoencoder
1420-
contrast = ImageEnhance.Contrast(x_sample)
1423+
contrast = ImageEnhance.Contrast(Image.fromarray(x_sample))
14211424
x_sample_contrast = contrast.enhance(1.3)
14221425
saturation = ImageEnhance.Color(x_sample_contrast)
14231426
x_sample_saturation = saturation.enhance(1.2)
14241427

14251428
# Convert back to NumPy array if necessary
14261429
x_sample = np.array(x_sample_saturation)
14271430

1428-
# Denoise the generated image
1429-
x_sample = cv2.fastNlMeansDenoisingColored(x_sample, None, 6, 6, 3, 21)
1431+
14301432
except:
14311433
if "torch.cuda.OutOfMemoryError" in traceback.format_exc() or "Invalid buffer size" in traceback.format_exc():
14321434
rprint(f"\n[#ab333d]Ran out of VRAM during decode, switching to fast pixel decoder")
@@ -1591,6 +1593,7 @@ def manageComposition(lighting, composition, loras):
15911593

15921594

15931595
def prepare_inference(
1596+
title,
15941597
prompt,
15951598
negative,
15961599
translate,
@@ -1690,7 +1693,7 @@ def prepare_inference(
16901693
seed_everything(seed)
16911694

16921695
rprint(
1693-
f"\n[#48a971]Text to Image[white] generating [#48a971]{total_images}[white] quality [#48a971]{quality}[white] images over [#48a971]{runs}[white] batches with [#48a971]{wtile}[white]x[#48a971]{htile}[white] attention tiles at [#48a971]{W}[white]x[#48a971]{H}[white] ([#48a971]{W // pixelSize}[white]x[#48a971]{H // pixelSize}[white] pixels)"
1696+
f"\n[#48a971]{title}[white] generating [#48a971]{total_images}[white] quality [#48a971]{quality}[white] images over [#48a971]{runs}[white] batches with [#48a971]{wtile}[white]x[#48a971]{htile}[white] attention tiles at [#48a971]{W}[white]x[#48a971]{H}[white] ([#48a971]{W // pixelSize}[white]x[#48a971]{H // pixelSize}[white] pixels)"
16941697
)
16951698

16961699
global model
@@ -2322,7 +2325,7 @@ def txt2img(
23222325
}
23232326

23242327

2325-
def neural_img2img(modelFileString, controlnets, prompt, negative, input_image, autocaption, translate, promptTuning, W, H, pixelSize, upscale, quality, scale, strength, lighting, composition, seed, total_images, maxBatchSize, device, precision, loras, tilingX, tilingY, preview, pixelvae, post):
2328+
def neural_img2img(modelFileString, title, controlnets, prompt, negative, input_image, autocaption, translate, promptTuning, W, H, pixelSize, upscale, quality, scale, strength, lighting, composition, seed, total_images, maxBatchSize, device, precision, loras, tilingX, tilingY, preview, pixelvae, post):
23262329
timer = time.time()
23272330
global modelCS
23282331
global modelTA
@@ -2349,8 +2352,10 @@ def neural_img2img(modelFileString, controlnets, prompt, negative, input_image,
23492352
rprint(f"[#48a971]Caption: [#494b9b]{prompt}")
23502353

23512354
conditioning, negative_conditioning, image_embed, steps, scale, runs, data, negative_data, seeds, batch, raw_loras = prepare_inference(
2352-
prompt, negative, translate, promptTuning, W, H, pixelSize, quality, scale, lighting, composition, seed, total_images, maxBatchSize, device, precision, loras, tilingX, tilingY, input_image, True)
2355+
title, prompt, negative, translate, promptTuning, W, H, pixelSize, quality, scale, lighting, composition, seed, total_images, maxBatchSize, device, precision, loras, tilingX, tilingY, input_image, True)
23532356

2357+
title = title.lower().replace(' ', '_')
2358+
23542359
model_patcher, cldm_cond, cldm_uncond = load_controlnet(
23552360
controlnets,
23562361
W,
@@ -2398,14 +2403,14 @@ def neural_img2img(modelFileString, controlnets, prompt, negative, input_image,
23982403
displayOut.append({"name": name, "seed": seed+i, "format": "bytes", "image": encodeImage(x_sample_image, "bytes"), "width": x_sample_image.width, "height": x_sample_image.height})
23992404
yield {
24002405
"action": "display_title",
2401-
"type": "neural_pixelate",
2406+
"type": title,
24022407
"value": {
24032408
"text": f"Generating... {step}/{steps} steps in batch {run+1}/{runs}"
24042409
},
24052410
}
24062411
yield {
24072412
"action": "display_image",
2408-
"type": "neural_pixelate",
2413+
"type": title,
24092414
"value": {
24102415
"images": displayOut,
24112416
"prompts": data,
@@ -2443,7 +2448,27 @@ def neural_img2img(modelFileString, controlnets, prompt, negative, input_image,
24432448
del samples_ddim
24442449

24452450
if post:
2446-
output = palettizeOutput(output)
2451+
# Reduce input image to key colors
2452+
palette_img = input_image.resize((W // 8, H // 8), resample=Image.Resampling.BILINEAR)
2453+
numColors = determine_best_k(palette_img, 96)
2454+
palette_img = palette_img.quantize(colors=numColors, method=1, kmeans=numColors, dither=0).convert("RGB")
2455+
2456+
# Extract palette colors
2457+
palette = np.concatenate([x[1] for x in palette_img.getcolors(96)]).tolist()
2458+
2459+
# Create a new palette image
2460+
tempPaletteImage = Image.new("P", (256, 1))
2461+
tempPaletteImage.putpalette(palette)
2462+
2463+
# Convert generated image to reduced input image palette
2464+
temp_output = output
2465+
output = []
2466+
for image in temp_output:
2467+
tempImage = image["image"]
2468+
# Perform quantization without dithering
2469+
image_indexed = tempImage.quantize(method=1, kmeans=numColors, palette=tempPaletteImage, dither=0).convert("RGB")
2470+
2471+
output.append({"name": image["name"], "seed": image["seed"], "format": image["format"], "image": image_indexed, "width": image["width"], "height": image["height"]})
24472472

24482473
final = []
24492474
for image in output:
@@ -2454,7 +2479,7 @@ def neural_img2img(modelFileString, controlnets, prompt, negative, input_image,
24542479
)
24552480
yield {
24562481
"action": "display_image",
2457-
"type": "neural_pixelate",
2482+
"type": title,
24582483
"value": {"images": final, "prompts": data, "negatives": negative_data},
24592484
}
24602485

@@ -2731,7 +2756,7 @@ def img2img(
27312756
displayOut = []
27322757
for i in range(batch):
27332758
x_sample_image = fastRender(modelPV, samples_ddim[i:i+1], pixelSize, W, H)
2734-
name = str(hash(str([data[i], negative_data[i], images[0].resize((16, 16), resample=Image.Resampling.NEAREST), strength, translate, promptTuning, W, H, quality, scale, device, loras, tilingX, tilingY, pixelvae, seed+i])) & 0x7FFFFFFFFFFFFFFF)
2759+
name = str(hash(str([data[i], negative_data[i], init_img.resize((16, 16), resample=Image.Resampling.NEAREST), strength, translate, promptTuning, W, H, quality, scale, device, loras, tilingX, tilingY, pixelvae, seed+i])) & 0x7FFFFFFFFFFFFFFF)
27352760
displayOut.append({"name": name, "seed": seed+i, "format": "bytes", "image": encodeImage(x_sample_image, "bytes"), "width": x_sample_image.width, "height": x_sample_image.height})
27362761
yield {
27372762
"action": "display_title",
@@ -2771,7 +2796,7 @@ def img2img(
27712796
play("iteration.wav")
27722797

27732798
seeds.append(str(seed))
2774-
name = str(hash(str([data[i], negative_data[i], images[0].resize((16, 16), resample=Image.Resampling.NEAREST), strength, translate, promptTuning, W, H, quality, scale, device, loras, tilingX, tilingY, pixelvae, seed])) & 0x7FFFFFFFFFFFFFFF)
2799+
name = str(hash(str([data[i], negative_data[i], init_img.resize((16, 16), resample=Image.Resampling.NEAREST), strength, translate, promptTuning, W, H, quality, scale, device, loras, tilingX, tilingY, pixelvae, seed])) & 0x7FFFFFFFFFFFFFFF)
27752800
output.append({"name": name, "seed": seed, "format": "png", "image": x_sample_image, "width": x_sample_image.width, "height": x_sample_image.height})
27762801

27772802
seed += 1
@@ -3098,6 +3123,8 @@ async def server(websocket):
30983123
)
30993124

31003125
# Neural pixelate
3126+
title = "Neural Pixelate"
3127+
31013128
# Decode input image
31023129
init_img = decodeImage(values["images"][0])
31033130

@@ -3114,10 +3141,11 @@ async def server(websocket):
31143141
autocaption = True
31153142

31163143
# Net models, images, and weights in order
3117-
controlnets = [{"model_file": "./models/controllora/Composition.safetensors", "image": image_blur, "weight": 0.4}]
3144+
controlnets = [{"model_file": "./models/controllora/Tile.safetensors", "image": image_blur, "weight": 1.0}, {"model_file": "./models/controllora/Composition.safetensors", "image": image, "weight": 0.7}]
31183145

31193146
for result in neural_img2img(
31203147
modelData["file"],
3148+
title,
31213149
controlnets,
31223150
values["prompt"],
31233151
values["negative"],

0 commit comments

Comments
 (0)