postprocess

This commit is contained in:
zaqxs123456 2024-10-24 23:40:15 +08:00
parent 8fb4f8753d
commit fef7bbbc8e
14 changed files with 988 additions and 9414 deletions

File diff suppressed because it is too large Load Diff

BIN
Flow.pdf Normal file

Binary file not shown.

95
app.py
View File

@ -3,14 +3,94 @@ import hashlib
import json import json
import random import random
import uuid import uuid
import cv2
from flask import Flask, request, jsonify from flask import Flask, request, jsonify
import sys import sys
import os
from PIL import Image
import io
import numpy as np
import websocket
import openpose_gen as opg import openpose_gen as opg
from comfy_socket import get_images
from postprocessing import expo_shuffle_image_steps, expo_add_to_background_image
sys.path.append('./') sys.path.append('./')
app = Flask(__name__) app = Flask(__name__)
info = json.load(open('info.json'))
comfyui_address = info['comfyui_address']
expo_raw_sd_dir = info['expo_raw_sd_dir']
expo_openpose_dir = info['expo_openpose_dir']
expo_postprocessed_dir = info['expo_postprocessed_dir']
expo_postprocess_temp_dir = info['expo_postprocess_temp_dir']
@app.route('/expo_fencing_pose', methods=['POST'])
def expo_fencing_pose():
if request.is_json:
data = request.get_json()
coordinates = data['coordinates']
canvas_size = data['canvas_size']
batch = data['batch']
step = data['step']
if coordinates is None or canvas_size is None or 'batch' not in data or 'step' not in data:
return jsonify({"status": "error", "message": "Missing data"}), 422
openpose_image_path = opg.expo_save_bodypose(canvas_size[0], canvas_size[1], coordinates, batch, step)
print(openpose_image_path)
expo_fencer_prompt(openpose_image_path, batch, step)
return jsonify({"status": "success", "message": "Data received"}), 201
else:
return jsonify({"status": "error", "message": "Request must be JSON"}), 415
def expo_fencer_prompt(openpose_image_path, batch, step):
prompt = json.loads(open("./prompts/fencer_03.json", "r", encoding="utf-8").read())
openpose_image_name = opg.upload_image(openpose_image_path)
opg.upload_image("./images/ref_black.png", "ref_black.png")
print(openpose_image_name)
prompt["3"]["inputs"]["seed"] = random.randint(0, 10000000000)
prompt["29"]["inputs"]['image'] = "ref_black.png"
prompt["17"]["inputs"]['image'] = openpose_image_name
client_id = hashlib.sha256(str(random.getrandbits(256)).encode('utf-8')).hexdigest()
ws = websocket.WebSocket()
ws.connect("ws://{}/ws?clientId={}".format(comfyui_address, client_id))
images = get_images(ws, prompt, client_id)
for node_id in images:
for idx, image_data in enumerate(images[node_id]):
image = Image.open(io.BytesIO(image_data))
image_path = os.path.join(expo_raw_sd_dir, f"{batch}_{step}.png")
image.save(image_path)
def expo_clear_images():
for file in os.listdir(expo_openpose_dir):
os.remove(os.path.join(expo_openpose_dir, file))
for file in os.listdir(expo_raw_sd_dir):
os.remove(os.path.join(expo_raw_sd_dir, file))
@app.route('/expo_postprocess', methods=['POST'])
def expo_postprocess():
print("Postprocessing")
os.makedirs(expo_postprocess_temp_dir, exist_ok=True)
shuffled_images_paths = expo_shuffle_image_steps()
background_path = os.path.join(expo_postprocess_temp_dir, 'background.png')
if not os.path.exists(background_path):
background = np.zeros((1000, 1500, 3), dtype=np.uint8)
cv2.imwrite(background_path, background)
expo_add_to_background_image(background_path, shuffled_images_paths[0][0], 0, 0)
cv2.imwrite(os.path.join(expo_postprocessed_dir, 'postprocessed.png'), background)
# expo_clear_images()
@app.route('/gen_image', methods=['POST']) @app.route('/gen_image', methods=['POST'])
def gen_image(): def gen_image():
if request.is_json: if request.is_json:
@ -23,7 +103,7 @@ def gen_image():
return jsonify({"status": "error", "message": "Missing data"}), 422 return jsonify({"status": "error", "message": "Missing data"}), 422
openpose_image_path = opg.save_bodypose(canvas_size[0], canvas_size[1], coordinates, pid) openpose_image_path = opg.save_bodypose(canvas_size[0], canvas_size[1], coordinates, pid)
# gen_fencer_prompt(openpose_image_path, pid, opg.server_address) # gen_fencer_prompt(openpose_image_path, pid, comfyui_address)
return jsonify({"status": "success", "message": "Data received"}), 201 return jsonify({"status": "success", "message": "Data received"}), 201
else: else:
@ -46,14 +126,12 @@ def gen_group_pic():
coordinates_list[i] = coordinates_list[i]['coordinates'] coordinates_list[i] = coordinates_list[i]['coordinates']
openpose_image_path = opg.save_bodypose_mulit(canvas_size[0], canvas_size[1], coordinates_list, pid) openpose_image_path = opg.save_bodypose_mulit(canvas_size[0], canvas_size[1], coordinates_list, pid)
gen_group_pic_prompt(openpose_image_path, base_image, pid, opg.server_address) gen_group_pic_prompt(openpose_image_path, base_image, pid, comfyui_address)
return jsonify({"status": "success", "message": "Data received"}), 201 return jsonify({"status": "success", "message": "Data received"}), 201
else: else:
return jsonify({"status": "error", "message": "Request must be JSON"}), 415 return jsonify({"status": "error", "message": "Request must be JSON"}), 415
def gen_fencer_prompt(openpose_image_path, pid, comfyUI_address): def gen_fencer_prompt(openpose_image_path, pid, comfyUI_address):
with open("./prompts/fencerAPI.json", "r") as f: with open("./prompts/fencerAPI.json", "r") as f:
prompt_json = f.read() prompt_json = f.read()
@ -82,5 +160,8 @@ def gen_group_pic_prompt(openpose_image_path, base_image, pid, comfyUI_address):
opg.queue_prompt(prompt, comfyUI_address) opg.queue_prompt(prompt, comfyUI_address)
if __name__ == '__main__': if __name__ == '__main__':
app.run(debug=True) expo_postprocess()
# app.run(debug=True)

48
comfy_socket.py Normal file
View File

@ -0,0 +1,48 @@
import random
import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
import uuid
import json
import urllib.request
import urllib.parse
server_address = "127.0.0.1:8188"
def queue_prompt(prompt, client_id):
p = {"prompt": prompt, "client_id": client_id}
data = json.dumps(p).encode('utf-8')
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
return json.loads(urllib.request.urlopen(req).read())
def get_image(filename, subfolder, folder_type):
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
url_values = urllib.parse.urlencode(data)
with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response:
return response.read()
def get_history(prompt_id):
with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
return json.loads(response.read())
def get_images(ws, prompt, client_id):
prompt_id = queue_prompt(prompt, client_id)['prompt_id']
output_images = {}
current_node = ""
while True:
out = ws.recv()
if isinstance(out, str):
message = json.loads(out)
if message['type'] == 'executing':
data = message['data']
if data['prompt_id'] == prompt_id:
if data['node'] is None:
break #Execution is done
else:
current_node = data['node']
else:
if current_node == 'save_image_websocket_node':
images_output = output_images.get(current_node, [])
images_output.append(out[8:])
output_images[current_node] = images_output
return output_images

7
info.json Normal file
View File

@ -0,0 +1,7 @@
{
"comfyui_address": "localhost:8188",
"expo_raw_sd_dir": "output/expo_raw_sd",
"expo_openpose_dir": "output/expo_openpose",
"expo_postprocessed_dir": "output/expo_postprocessed",
"expo_postprocess_temp_dir": "output/expo_postprocess_temp"
}

View File

@ -13,12 +13,29 @@ import sys
import hashlib import hashlib
sys.path.append('./') sys.path.append('./')
server_address = "localhost:8188" # read json from info.json
info = json.load(open('info.json'))
server_address = info['comfyui_address']
expo_openpose_dir = info['expo_openpose_dir']
def coordinates_to_keypoints(coordinates: list) -> List[skel.Keypoint]: def coordinates_to_keypoints(coordinates: list) -> List[skel.Keypoint]:
keypoints = [skel.Keypoint(coordinates[i], coordinates[i + 1]) for i in range(0, len(coordinates), 3)] keypoints = [skel.Keypoint(coordinates[i], coordinates[i + 1]) for i in range(0, len(coordinates), 3)]
return keypoints return keypoints
def expo_save_bodypose(width: int, height: int, coordinates: list, batch: int, step: int) -> None:
canvas = np.zeros((height, width, 3), dtype=np.uint8)
keypoints = coordinates_to_keypoints(coordinates)
canvas = skel.draw_bodypose(canvas, keypoints, skel.coco_limbSeq, skel.coco_colors)
# Save as {batch}_{step}.png, {batch}_{step}.png, ...
if not os.path.exists(expo_openpose_dir):
os.makedirs(expo_openpose_dir)
image_path = os.path.join(expo_openpose_dir, '%d_%d.png' % (batch, step))
image_path = image_path.replace('\\', '/')
cv2.imwrite(image_path, canvas)
return image_path
def save_bodypose(width: int, height: int, coordinates: list, pid: str) -> None: def save_bodypose(width: int, height: int, coordinates: list, pid: str) -> None:
if not hasattr(save_bodypose, 'counter'): if not hasattr(save_bodypose, 'counter'):
save_bodypose.counter = 0 # Initialize the counter attribute save_bodypose.counter = 0 # Initialize the counter attribute
@ -57,13 +74,17 @@ def save_bodypose_mulit(width: int, height: int, coordinates_list: list, pid: st
return image_path return image_path
def queue_prompt(prompt, server_address): def queue_prompt(prompt):
p = {"prompt": prompt} p = {"prompt": prompt}
data = json.dumps(p).encode('utf-8') data = json.dumps(p).encode('utf-8')
req = request.Request("http://{}/prompt".format(server_address), data=data) req = request.Request("http://{}/prompt".format(server_address), data=data)
request.urlopen(req) request.urlopen(req)
def upload_image(input_image, name, server_address, image_type="input", overwrite=True): def upload_image(input_image, image_name="", image_type="input", overwrite=True) -> str:
if image_name == "":
# generate a random name here
image_name = hashlib.sha256(str(random.getrandbits(256)).encode('utf-8')).hexdigest() + ".png"
# Check if input_image is a valid file path # Check if input_image is a valid file path
if isinstance(input_image, str) and os.path.isfile(input_image): if isinstance(input_image, str) and os.path.isfile(input_image):
file = open(input_image, 'rb') file = open(input_image, 'rb')
@ -75,7 +96,7 @@ def upload_image(input_image, name, server_address, image_type="input", overwrit
try: try:
multipart_data = MultipartEncoder( multipart_data = MultipartEncoder(
fields={ fields={
'image': (name, file, 'image/png'), 'image': (image_name, file, 'image/png'),
'type': image_type, 'type': image_type,
'overwrite': str(overwrite).lower() 'overwrite': str(overwrite).lower()
} }
@ -85,12 +106,12 @@ def upload_image(input_image, name, server_address, image_type="input", overwrit
headers = {'Content-Type': multipart_data.content_type} headers = {'Content-Type': multipart_data.content_type}
request = urllib.request.Request("http://{}/upload/image".format(server_address), data=data, headers=headers) request = urllib.request.Request("http://{}/upload/image".format(server_address), data=data, headers=headers)
with urllib.request.urlopen(request) as response: with urllib.request.urlopen(request) as response:
return response.read() return json.loads(response.read().decode('utf-8'))["name"]
finally: finally:
if close_file: if close_file:
file.close() file.close()
def upload_image_circular_queue(image_path, size, unqiue_id, server_address): def upload_image_circular_queue(image_path, size, unqiue_id):
# create a dict in this function to store the counter for each unique_id, key is the unique_id, value is the counter # create a dict in this function to store the counter for each unique_id, key is the unique_id, value is the counter
if not hasattr(upload_image_circular_queue, 'id_counter_dict'): if not hasattr(upload_image_circular_queue, 'id_counter_dict'):
upload_image_circular_queue.id_counter_dict = {} upload_image_circular_queue.id_counter_dict = {}
@ -100,7 +121,7 @@ def upload_image_circular_queue(image_path, size, unqiue_id, server_address):
image_name = hashlib.sha256((unqiue_id + str(upload_image_circular_queue.id_counter_dict[unqiue_id])).encode('utf-8')).hexdigest() + ".png" image_name = hashlib.sha256((unqiue_id + str(upload_image_circular_queue.id_counter_dict[unqiue_id])).encode('utf-8')).hexdigest() + ".png"
upload_image_circular_queue.id_counter_dict[unqiue_id] += 1 % size upload_image_circular_queue.id_counter_dict[unqiue_id] += 1 % size
upload_image(image_path, image_name, server_address) upload_image(image_path, image_name)
return image_name return image_name

228
postprocessing.py Normal file
View File

@ -0,0 +1,228 @@
import json
import math
import os
import random
import shutil
import subprocess
from concurrent.futures import ThreadPoolExecutor, as_completed
import cv2
import numpy as np
info = json.load(open('info.json'))
expo_raw_sd_dir = info['expo_raw_sd_dir']
expo_postprocessed_dir = info['expo_postprocessed_dir']
expo_postprocess_temp_dir = info['expo_postprocess_temp_dir']
def expo_get_step_by_name(image_name: str) -> int:
return int(image_name.split('_')[1].split('.')[0])
def expo_get_batch_by_name(image_name: str) -> int:
return int(image_name.split('_')[0])
def expo_shuffle_image_steps() -> list[list[str]]:
images = {}
# Read and categorize image paths by step
for image_name in os.listdir(expo_raw_sd_dir):
step = expo_get_step_by_name(image_name)
image_path = os.path.join(expo_raw_sd_dir, image_name)
if step in images:
images[step].append(image_path)
else:
images[step] = [image_path]
# Shuffle the image paths for each step
for step_images in images.values():
random.shuffle(step_images)
# Convert the dictionary to a 2D list and find the minimum length
shuffled_images = list(images.values())
min_length = min(len(step_images) for step_images in shuffled_images)
# Crop each list to the minimum length
shuffled_images = [step_images[:min_length] for step_images in shuffled_images]
# finally, get the first image of each step, put them in a list, then the second image of each step, basically transpose the list
shuffled_images = list(map(list, zip(*shuffled_images)))
return shuffled_images
def expo_add_to_background_image(background_path: str, image_path: str, output_path: str, x: int, y: int) -> str:
# Use ImageMagick to blend the image with the background using Linear Light blend mode
command = [
"magick",
background_path,
image_path,
"-geometry", f"+{x}+{y}",
"-compose", "LinearLight",
"-composite",
output_path
]
subprocess.run(command, check=True)
return output_path
def expo_add_logo(background_path: str, image_path: str, output_path: str, x: int, y: int) -> str:
# Use ImageMagick to blend the image with the background using normal blend mode
command = [
"magick",
background_path,
image_path,
"-geometry", f"+{x}+{y}",
"-compose", "Over",
"-composite",
output_path
]
subprocess.run(command, check=True)
return output_path
def expo_resize_fencer(image_path: str, output_path: str, width: int, height: int) -> str:
# Use ImageMagick to resize the image
command = [
"magick",
image_path,
"-resize", f"{width}x{height}",
output_path
]
subprocess.run(command, check=True)
return output_path
def expo_resize_fencers(path_list: list[str], is_left: bool, width: int, height: int) -> list[str]:
output_dir = os.path.join(expo_postprocess_temp_dir, f"{'left' if is_left else 'right'}_fencers")
os.makedirs(output_dir, exist_ok=True)
resized_paths = [os.path.join(output_dir, f"{i}.png") for i in range(len(path_list))]
futures_to_index = {}
with ThreadPoolExecutor() as executor:
for i, image_path in enumerate(path_list):
output_path = resized_paths[i]
future = executor.submit(expo_resize_fencer, image_path, output_path, width, height)
futures_to_index[future] = i
for future in as_completed(futures_to_index):
index = futures_to_index[future]
resized_paths[index] = future.result()
return resized_paths
def expo_motion_blur_fencer(image_path: str, output_path: str, sigma: float, direction: float) -> str:
# Use ImageMagick to apply motion blur to the image with the specified direction
command = [
"magick",
image_path,
"-motion-blur", f"0x{sigma}+{direction}",
output_path
]
subprocess.run(command, check=True)
return output_path
def expo_motion_blur_fencers(path_list: list[str], is_left: bool) -> list[str]:
futures = []
with ThreadPoolExecutor() as executor:
for i, image_path in enumerate(path_list):
sigma = 15 - 15 * i / (len(path_list) - 1)
direction = 0 if is_left else 180
future = executor.submit(expo_motion_blur_fencer, image_path, image_path, sigma, direction)
futures.append(future)
for future in as_completed(futures):
future.result()
def expo_overlay_bg_gradient(image_path: str, output_path: str, bg_gradient_path: str) -> str:
# Use ImageMagick to overlay the image with a background gradient
command = [
"magick",
image_path,
bg_gradient_path,
"-compose", "Overlay",
"-composite",
output_path
]
subprocess.run(command, check=True)
return output_path
def expo_decrese_opacity(image_path: str, output_path: str, opacity: int) -> str:
# Use ImageMagick to decrease the opacity of the image
command = [
"magick",
image_path,
"-channel", "A",
"-evaluate", "multiply", f"{opacity/100}",
output_path
]
subprocess.run(command, check=True)
return output_path
def expo_decrese_opacities(path_list: list[str]) -> list[str]:
futures = []
with ThreadPoolExecutor() as executor:
for i, image_path in enumerate(path_list):
opacity = 30 + 70 * i / (len(path_list) - 1)
future = executor.submit(expo_decrese_opacity, image_path, image_path, opacity)
futures.append(future)
for future in as_completed(futures):
future.result()
def output_to_display_folder(output_image_paths):
# copy the output images to the display folder (expo_postprocessed_dir)
# the format is {session}_{candidate}.png, this session should be the max session from expo_postprocess_dir, the candidate should be the index of the output_image_paths
session = str(current_session()).zfill(5)
for i, image_path in enumerate(output_image_paths):
candidate = str(i).zfill(5)
output_image_path = os.path.join(expo_postprocessed_dir, f"{session}_{candidate}.png")
# copy the image
shutil.copy(image_path, output_image_path)
def current_session():
max_session = 0
for file in os.listdir(expo_postprocessed_dir):
if file.endswith(".png"):
session = int(file.split("_")[0])
if session > max_session:
max_session = session
return max_session + 1
def expo_postprocess():
print("Postprocessing")
os.makedirs(expo_postprocess_temp_dir, exist_ok=True)
shuffled_images_paths = expo_shuffle_image_steps()
background_path = os.path.join(expo_postprocess_temp_dir, 'background.png')
logo_path = os.path.join(expo_postprocess_temp_dir, 'logo.png')
if not os.path.exists(background_path):
background = np.zeros((720, 1080, 3), dtype=np.uint8)
cv2.imwrite(background_path, background)
bg_gradient_folder = os.path.join(expo_postprocess_temp_dir, 'bg_gradient')
bg_gradients = [os.path.join(bg_gradient_folder, f"{i:02d}.png") for i in range(4)]
output_files = []
for i, candidate_list in enumerate(shuffled_images_paths):
left_fencer_paths = expo_resize_fencers(candidate_list, True, 500, 500)
expo_motion_blur_fencers(left_fencer_paths, True)
expo_decrese_opacities(left_fencer_paths)
temp_output_path = os.path.join(expo_postprocess_temp_dir, f"temp_{i}.png")
output_files.append(temp_output_path)
temp_background_path = background_path
for j, left_fencer_path in enumerate(left_fencer_paths):
x_position = 65 * math.pow(j, 1.3) - 132
y_position = 192
expo_add_to_background_image(temp_background_path, left_fencer_path, temp_output_path, x_position, y_position)
temp_background_path = temp_output_path
expo_overlay_bg_gradient(temp_output_path, temp_output_path, bg_gradients[i])
expo_add_logo(temp_output_path, logo_path, temp_output_path, 750, 700)
output_to_display_folder(output_files)
if __name__ == '__main__':
expo_postprocess()

218
prompts/fencer_02.json Normal file
View File

@ -0,0 +1,218 @@
{
"3": {
"inputs": {
"seed": 695262830308132,
"steps": 3,
"cfg": 2,
"sampler_name": "dpmpp_sde",
"scheduler": "karras",
"denoise": 1,
"model": [
"32",
0
],
"positive": [
"22",
0
],
"negative": [
"22",
1
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"4": {
"inputs": {
"ckpt_name": "dreamshaperXL_sfwLightningDPMSDE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "A fencer in full gear, fencing sword, 1 human, empty background, dark background, dark, empty, 1 sword, sword in hand",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Positive)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"17": {
"inputs": {
"image": "3bdafb967cede879cabdc2f1277ce5ae8fde8f4a1ff1f0c821fb9b7890bfa252.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"22": {
"inputs": {
"strength": 0.98,
"start_percent": 0,
"end_percent": 1,
"positive": [
"6",
0
],
"negative": [
"40",
0
],
"control_net": [
"43",
0
],
"image": [
"17",
0
],
"vae": [
"4",
2
]
},
"class_type": "ControlNetApplyAdvanced",
"_meta": {
"title": "Apply ControlNet"
}
},
"28": {
"inputs": {
"ipadapter_file": "ip-adapter-plus_sdxl_vit-h.safetensors"
},
"class_type": "IPAdapterModelLoader",
"_meta": {
"title": "IPAdapter Model Loader"
}
},
"29": {
"inputs": {
"image": "ref_black.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"31": {
"inputs": {
"clip_name": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
},
"class_type": "CLIPVisionLoader",
"_meta": {
"title": "Load CLIP Vision"
}
},
"32": {
"inputs": {
"weight": 1.3,
"weight_type": "style and composition",
"combine_embeds": "norm average",
"start_at": 0,
"end_at": 1,
"embeds_scaling": "K+V w/ C penalty",
"model": [
"4",
0
],
"ipadapter": [
"28",
0
],
"image": [
"29",
0
],
"clip_vision": [
"31",
0
]
},
"class_type": "IPAdapterAdvanced",
"_meta": {
"title": "IPAdapter Advanced"
}
},
"40": {
"inputs": {
"text": "blurry, drawing, horror, distorted, malformed, naked, cartoon, anime, out of focus, dull, muted colors, boring pose, no action, distracting background, colorful, (face:5.0), bad hand, (bad anatomy:5.0), worst quality, ai generated images, low quality, average quality, smoke, background, three arms, three hands, white light, (light:5.0), (shadow:5.0), (floor:5.0), 2 sword, multiple sword\n\nembedding:ac_neg1, embedding:ac_neg2, embedding:badhandv4, embedding:DeepNegative_xl_v1, embedding:NEGATIVE_HANDS, embedding:negativeXL_D, embedding:'unaestheticXL_cbp62 -neg.safetensors', embedding:verybadimagenegative_v1.3, embedding:ziprealism_neg, ",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Negative)"
}
},
"43": {
"inputs": {
"control_net_name": "diffusion_pytorch_model.safetensors",
"model": [
"4",
0
]
},
"class_type": "DiffControlNetLoader",
"_meta": {
"title": "Load ControlNet Model (diff)"
}
},
"save_image_websocket_node": {
"inputs": {
"images": [
"8",
0
]
},
"class_type": "SaveImageWebsocket",
"_meta": {
"title": "SaveImageWebsocket"
}
}
}

236
prompts/fencer_03.json Normal file
View File

@ -0,0 +1,236 @@
{
"3": {
"inputs": {
"seed": 695262830308132,
"steps": 3,
"cfg": 2,
"sampler_name": "dpmpp_sde",
"scheduler": "karras",
"denoise": 1,
"model": [
"32",
0
],
"positive": [
"22",
0
],
"negative": [
"22",
1
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"4": {
"inputs": {
"ckpt_name": "dreamshaperXL_sfwLightningDPMSDE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "A fencer in full gear, fencing sword, 1 human, empty background, dark background, dark, empty, 1 sword, sword in hand",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Positive)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"17": {
"inputs": {
"image": "ef2d127de37b942baad06145e54b0c619a1f22327b2ebbcfbec78f5564afe39d.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"22": {
"inputs": {
"strength": 0.98,
"start_percent": 0,
"end_percent": 1,
"positive": [
"6",
0
],
"negative": [
"40",
0
],
"control_net": [
"43",
0
],
"image": [
"17",
0
],
"vae": [
"4",
2
]
},
"class_type": "ControlNetApplyAdvanced",
"_meta": {
"title": "Apply ControlNet"
}
},
"28": {
"inputs": {
"ipadapter_file": "ip-adapter-plus_sdxl_vit-h.safetensors"
},
"class_type": "IPAdapterModelLoader",
"_meta": {
"title": "IPAdapter Model Loader"
}
},
"29": {
"inputs": {
"image": "ref_black.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"31": {
"inputs": {
"clip_name": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
},
"class_type": "CLIPVisionLoader",
"_meta": {
"title": "Load CLIP Vision"
}
},
"32": {
"inputs": {
"weight": 1.3,
"weight_type": "style and composition",
"combine_embeds": "norm average",
"start_at": 0,
"end_at": 1,
"embeds_scaling": "K+V w/ C penalty",
"model": [
"4",
0
],
"ipadapter": [
"28",
0
],
"image": [
"29",
0
],
"clip_vision": [
"31",
0
]
},
"class_type": "IPAdapterAdvanced",
"_meta": {
"title": "IPAdapter Advanced"
}
},
"40": {
"inputs": {
"text": "blurry, drawing, horror, distorted, malformed, naked, cartoon, anime, out of focus, dull, muted colors, boring pose, no action, distracting background, colorful, (face:5.0), bad hand, (bad anatomy:5.0), worst quality, ai generated images, low quality, average quality, smoke, background, three arms, three hands, white light, (light:5.0), (shadow:5.0), (floor:5.0), 2 sword, multiple sword\n\nembedding:ac_neg1, embedding:ac_neg2, embedding:badhandv4, embedding:DeepNegative_xl_v1, embedding:NEGATIVE_HANDS, embedding:negativeXL_D, embedding:'unaestheticXL_cbp62 -neg.safetensors', embedding:verybadimagenegative_v1.3, embedding:ziprealism_neg, ",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Negative)"
}
},
"43": {
"inputs": {
"control_net_name": "diffusion_pytorch_model.safetensors",
"model": [
"4",
0
]
},
"class_type": "DiffControlNetLoader",
"_meta": {
"title": "Load ControlNet Model (diff)"
}
},
"save_image_websocket_node": {
"inputs": {
"images": [
"51",
0
]
},
"class_type": "SaveImageWebsocket",
"_meta": {
"title": "SaveImageWebsocket"
}
},
"51": {
"inputs": {
"model": "silueta",
"alpha_matting": "true",
"alpha_matting_foreground_threshold": 240,
"alpha_matting_background_threshold": 20,
"alpha_matting_erode_size": 10,
"post_process_mask": "false",
"images": [
"8",
0
]
},
"class_type": "ImageSegmentation",
"_meta": {
"title": "ImageSegmentation"
}
}
}

85
prompts/test.json Normal file
View File

@ -0,0 +1,85 @@
{
"3": {
"class_type": "KSampler",
"inputs": {
"cfg": 8,
"denoise": 1,
"latent_image": [
"5",
0
],
"model": [
"4",
0
],
"negative": [
"7",
0
],
"positive": [
"6",
0
],
"sampler_name": "euler",
"scheduler": "normal",
"seed": 8566257,
"steps": 20
}
},
"4": {
"class_type": "CheckpointLoaderSimple",
"inputs": {
"ckpt_name": "counterfeitxl_v25.safetensors"
}
},
"5": {
"class_type": "EmptyLatentImage",
"inputs": {
"batch_size": 1,
"height": 1024,
"width": 1024
}
},
"6": {
"class_type": "CLIPTextEncode",
"inputs": {
"clip": [
"4",
1
],
"text": "masterpiece best quality girl"
}
},
"7": {
"class_type": "CLIPTextEncode",
"inputs": {
"clip": [
"4",
1
],
"text": "bad hands"
}
},
"8": {
"class_type": "VAEDecode",
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
}
},
"save_image_websocket_node": {
"class_type": "SaveImageWebsocket",
"inputs": {
"images": [
"8",
0
]
}
}
}

View File

@ -0,0 +1,8 @@
ws = websocket.WebSocket()
ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
images = get_images(ws, prompt)
for node_id in images:
for idx, image_data in enumerate(images[node_id]):
image = Image.open(io.BytesIO(image_data))
image_path = os.path.join(info['expo_raw_sd_dir'], f"{node_id}_{idx}.png")
image.save(image_path)

View File

@ -1,13 +1,15 @@
#This is an example that uses the websockets api and the SaveImageWebsocket node to get images directly without #This is an example that uses the websockets api and the SaveImageWebsocket node to get images directly without
#them being saved to disk #them being saved to disk
import random
import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client) import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
import uuid import uuid
import json import json
import urllib.request import urllib.request
import urllib.parse import urllib.parse
server_address = "127.0.0.1:8188" info = json.load(open('info.json'))
server_address = info['comfyui_address']
client_id = str(uuid.uuid4()) client_id = str(uuid.uuid4())
def queue_prompt(prompt): def queue_prompt(prompt):
@ -81,15 +83,15 @@ prompt_text = """
"4": { "4": {
"class_type": "CheckpointLoaderSimple", "class_type": "CheckpointLoaderSimple",
"inputs": { "inputs": {
"ckpt_name": "v1-5-pruned-emaonly.safetensors" "ckpt_name": "counterfeitxl_v25.safetensors"
} }
}, },
"5": { "5": {
"class_type": "EmptyLatentImage", "class_type": "EmptyLatentImage",
"inputs": { "inputs": {
"batch_size": 1, "batch_size": 1,
"height": 512, "height": 1024,
"width": 512 "width": 1024
} }
}, },
"6": { "6": {
@ -137,23 +139,33 @@ prompt_text = """
} }
""" """
prompt = json.loads(prompt_text)
#set the text prompt for our positive CLIPTextEncode #set the text prompt for our positive CLIPTextEncode
prompt["6"]["inputs"]["text"] = "masterpiece best quality man"
#set the seed for our KSampler node #set the seed for our KSampler node
prompt["3"]["inputs"]["seed"] = 5
import os
from PIL import Image
import io
prompt = json.loads(open("./prompts/fencer_02.json", "r", encoding="utf-8").read())
# prompt["6"]["inputs"]["text"] = "masterpiece best quality man"
prompt["3"]["inputs"]["seed"] = random.randint(0, 10000000000)
ws = websocket.WebSocket() ws = websocket.WebSocket()
ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id)) ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
images = get_images(ws, prompt) images = get_images(ws, prompt)
for node_id in images:
for idx, image_data in enumerate(images[node_id]):
image = Image.open(io.BytesIO(image_data))
image_path = os.path.join(info['expo_raw_sd_dir'], f"{node_id}_{idx}.png")
image.save(image_path)
#Commented out code to display the output images: # #Commented out code to display the output images:
# for node_id in images: # for node_id in images:
# for image_data in images[node_id]: # for idx, image_data in enumerate(images[node_id]):
# from PIL import Image # from PIL import Image
# import io # import io
# image = Image.open(io.BytesIO(image_data)) # image = Image.open(io.BytesIO(image_data))
# image.show() # image_path = os.path.join(output_dir, f"{node_id}_{idx}.png")
# image.save(image_path)

View File

@ -189,25 +189,25 @@ def draw_bodypose(canvas: ndarray, keypoints: List[Keypoint], limbSeq, colors, x
if keypoints is None or len(keypoints) == 0: if keypoints is None or len(keypoints) == 0:
return canvas return canvas
# for (k1_index, k2_index), color in zip(limbSeq, colors): for (k1_index, k2_index), color in zip(limbSeq, colors):
# keypoint1 = keypoints[k1_index] keypoint1 = keypoints[k1_index]
# keypoint2 = keypoints[k2_index] keypoint2 = keypoints[k2_index]
# if keypoint1 is None or keypoint2 is None or keypoint1.confidence == 0 or keypoint2.confidence == 0 or keypoint1.x <= 0 or keypoint1.y <= 0 or keypoint2.x <= 0 or keypoint2.y <= 0: if keypoint1 is None or keypoint2 is None or keypoint1.confidence == 0 or keypoint2.confidence == 0 or keypoint1.x <= 0 or keypoint1.y <= 0 or keypoint2.x <= 0 or keypoint2.y <= 0:
# # if keypoint1 is None or keypoint1.confidence == 0: # if keypoint1 is None or keypoint1.confidence == 0:
# # print(f"keypoint failed: {k1_index}") # print(f"keypoint failed: {k1_index}")
# # if keypoint2 is None or keypoint2.confidence == 0: # if keypoint2 is None or keypoint2.confidence == 0:
# # print(f"keypoint failed: {k2_index}") # print(f"keypoint failed: {k2_index}")
# continue continue
# Y = np.array([keypoint1.x, keypoint2.x]) * float(W) Y = np.array([keypoint1.x, keypoint2.x]) * float(W)
# X = np.array([keypoint1.y, keypoint2.y]) * float(H) X = np.array([keypoint1.y, keypoint2.y]) * float(H)
# mX = np.mean(X) mX = np.mean(X)
# mY = np.mean(Y) mY = np.mean(Y)
# length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
# angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
# polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth*stick_scale), int(angle), 0, 360, 1) polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth*stick_scale), int(angle), 0, 360, 1)
# cv2.fillConvexPoly(canvas, polygon, [int(float(c) * 0.6) for c in color]) cv2.fillConvexPoly(canvas, polygon, [int(float(c) * 0.6) for c in color])
for keypoint, color in zip(keypoints, colors): for keypoint, color in zip(keypoints, colors):
if keypoint is None or keypoint.confidence == 0 or keypoint.x <= 0 or keypoint.y <= 0: if keypoint is None or keypoint.confidence == 0 or keypoint.x <= 0 or keypoint.y <= 0:

1
tempCodeRunnerFile.py Normal file
View File

@ -0,0 +1 @@
750