update + test

This commit is contained in:
zaqxs123456 2024-10-14 14:50:56 +08:00
parent e7b27a58f1
commit 4ea0b0280b
7 changed files with 367 additions and 44 deletions

72
app.py
View File

@ -1,6 +1,8 @@
import base64
import hashlib
import json import json
import random
import uuid import uuid
import websocket
from flask import Flask, request, jsonify from flask import Flask, request, jsonify
import sys import sys
import openpose_gen as opg import openpose_gen as opg
@ -9,30 +11,80 @@ sys.path.append('./')
app = Flask(__name__) app = Flask(__name__)
@app.route('/coordinates', methods=['POST']) @app.route('/gen_image', methods=['POST'])
def receive_coordinates(): def gen_image():
if request.is_json: if request.is_json:
data = request.get_json() data = request.get_json()
coordinates = data['coordinates'] coordinates = data['coordinates']
canvas_size = data['canvas_size'] canvas_size = data['canvas_size']
pid = data['pid']
if not coordinates or not canvas_size: if not coordinates or not canvas_size:
return jsonify({"status": "error", "message": "Missing data"}), 422 return jsonify({"status": "error", "message": "Missing data"}), 422
opg.save_bodypose(canvas_size, canvas_size, coordinates) openpose_image_path = opg.save_bodypose(canvas_size[0], canvas_size[1], coordinates, pid)
# gen_fencer_prompt(openpose_image_path, pid, opg.server_address)
return jsonify({"status": "success", "message": "Data received"}), 201 return jsonify({"status": "success", "message": "Data received"}), 201
else: else:
return jsonify({"status": "error", "message": "Request must be JSON"}), 415 return jsonify({"status": "error", "message": "Request must be JSON"}), 415
def open_websocket_connection():
server_address='127.0.0.1:8188' @app.route('/gen_group_pic', methods=['POST'])
client_id=str(uuid.uuid4()) def init_gen_group_pic():
ws = websocket.WebSocket() if request.is_json:
ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id)) data = request.get_json()
return ws, server_address, client_id coordinates_list = data['coordinates_list']
canvas_size = data['canvas_size']
pid = data['pid']
base_image = base64.b64decode(data['base_image'])
# save base image to ./test.png
with open("test.png", "wb") as f:
f.write(base_image)
if not coordinates_list or not canvas_size or not base_image or not pid:
return jsonify({"status": "error", "message": "Missing data"}), 422
for i in range(len(coordinates_list)):
coordinates_list[i] = coordinates_list[i]['coordinates']
openpose_image_path = opg.save_bodypose_mulit(canvas_size[0], canvas_size[1], coordinates_list, pid)
# gen_group_pic(openpose_image_path, base_image, pid, opg.server_address)
return jsonify({"status": "success", "message": "Data received"}), 201
else:
return jsonify({"status": "error", "message": "Request must be JSON"}), 415
def gen_fencer_prompt(openpose_image_path, pid, comfyUI_address):
with open("fencerAPI.json", "r") as f:
prompt_json = f.read()
prompt = json.loads(prompt_json)
openpose_image_name = opg.upload_image_circular_queue(openpose_image_path, 20, pid)
opg.upload_image("ref_black.png", "ref_black.png")
prompt["3"]["inputs"]["seed"] = random.randint(0, 10000000000)
prompt["29"]["inputs"]['image'] = "ref_black.png"
prompt["17"]["inputs"]['image'] = openpose_image_name
opg.queue_prompt(prompt, comfyUI_address)
def gen_group_pic(openpose_image_path, base_image, pid, comfyUI_address):
with open("groupAPI.json", "r") as f:
prompt_json = f.read()
prompt = json.loads(prompt_json)
openpose_image_name = opg.upload_image_circular_queue(openpose_image_path, 30, pid)
base_image_name = opg.upload_image_circular_queue(base_image, 30, pid)
prompt["3"]["inputs"]["seed"] = random.randint(0, 10000000000)
prompt["17"]["inputs"]['image'] = openpose_image_name
prompt["17"]["inputs"]['image'] = base_image_name
opg.queue_prompt(prompt, comfyUI_address)
if __name__ == '__main__': if __name__ == '__main__':
app.run(debug=True) app.run(debug=True)

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

215
fencerAPI copy.json Normal file
View File

@ -0,0 +1,215 @@
{
"3": {
"inputs": {
"seed": 688190695340079,
"steps": 3,
"cfg": 2,
"sampler_name": "dpmpp_sde",
"scheduler": "karras",
"denoise": 1,
"model": [
"32",
0
],
"positive": [
"22",
0
],
"negative": [
"22",
1
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"4": {
"inputs": {
"ckpt_name": "dreamshaperXL_sfwLightningDPMSDE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "A fencer in full gear, fencing sword, 1 human, empty background, dark background, dark, empty",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Positive)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "Result",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"17": {
"inputs": {
"image": "dance_01.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"19": {
"inputs": {
"control_net_name": "diffusion_pytorch_model.safetensors"
},
"class_type": "ControlNetLoader",
"_meta": {
"title": "Load ControlNet Model"
}
},
"22": {
"inputs": {
"strength": 0.9500000000000001,
"start_percent": 0,
"end_percent": 1,
"positive": [
"6",
0
],
"negative": [
"40",
0
],
"control_net": [
"19",
0
],
"image": [
"17",
0
],
"vae": [
"4",
2
]
},
"class_type": "ControlNetApplyAdvanced",
"_meta": {
"title": "Apply ControlNet"
}
},
"28": {
"inputs": {
"ipadapter_file": "ip-adapter-plus_sdxl_vit-h.safetensors"
},
"class_type": "IPAdapterModelLoader",
"_meta": {
"title": "IPAdapter Model Loader"
}
},
"29": {
"inputs": {
"image": "ref_black.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"31": {
"inputs": {
"clip_name": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
},
"class_type": "CLIPVisionLoader",
"_meta": {
"title": "Load CLIP Vision"
}
},
"32": {
"inputs": {
"weight": 1,
"weight_type": "style and composition",
"combine_embeds": "add",
"start_at": 0,
"end_at": 1,
"embeds_scaling": "V only",
"model": [
"4",
0
],
"ipadapter": [
"28",
0
],
"image": [
"29",
0
],
"clip_vision": [
"31",
0
]
},
"class_type": "IPAdapterAdvanced",
"_meta": {
"title": "IPAdapter Advanced"
}
},
"40": {
"inputs": {
"text": "blurry, drawing, horror, distorted, malformed, naked, cartoon, anime, out of focus, dull, muted colors, boring pose, no action, distracting background, colorful, (face:5.0), bad hand, bad anatomy, worst quality, ai generated images, low quality, average quality, smoke, background, three arms, three hands, white light, (light:5.0), (shadow:5.0), (floor:5.0)\n\nembedding:ac_neg1, embedding:ac_neg2, embedding:badhandv4, embedding:DeepNegative_xl_v1, embedding:NEGATIVE_HANDS, embedding:negativeXL_D, embedding:unaestheticXL_cbp62 -neg, embedding:verybadimagenegative_v1.3, embedding:ziprealism_neg, ",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Negative)"
}
}
}

View File

@ -51,7 +51,7 @@
}, },
"6": { "6": {
"inputs": { "inputs": {
"text": "A fencer in full gear, fencing sword, 1 human, empty background, dark background, dark, empty, 1 sword, sword in hand", "text": "costume party, trick-or-treating, candy apples, pumpkin carving, haunted hayride, spooky stories, dress-up, decorations, Halloween parade, haunted corn maze, bobbing for apples, ghost stories, scary movies, pumpkin patch, fall festival, Halloween games, costume contest",
"clip": [ "clip": [
"4", "4",
1 1

30
fov_cal.py Normal file
View File

@ -0,0 +1,30 @@
import math
def hfov_to_vfov(hFOV, aspect_ratio):
return 2 * math.degrees(math.atan(math.tan(math.radians(hFOV / 2)) / aspect_ratio))
def hfov_to_dfov(hFOV, aspect_ratio):
return 2 * math.degrees(math.atan(math.sqrt(1 + (1 / aspect_ratio)**2) * math.tan(math.radians(hFOV / 2))))
def vfov_to_hfov(vFOV, aspect_ratio):
return 2 * math.degrees(math.atan(aspect_ratio * math.tan(math.radians(vFOV / 2))))
def vfov_to_dfov(vFOV, aspect_ratio):
return 2 * math.degrees(math.atan(math.sqrt(1 + aspect_ratio**2) * math.tan(math.radians(vFOV / 2))))
def dfov_to_hfov(dFOV, aspect_ratio):
return 2 * math.degrees(math.atan(math.tan(math.radians(dFOV / 2)) / math.sqrt(1 + (1 / aspect_ratio)**2)))
def dfov_to_vfov(dFOV, aspect_ratio):
return 2 * math.degrees(math.atan(math.tan(math.radians(dFOV / 2)) / math.sqrt(1 + aspect_ratio**2)))
fov = 60
aspect_ratio = 2/1
print("h2v:", hfov_to_vfov(fov, aspect_ratio))
print("h2d:", hfov_to_dfov(fov, aspect_ratio))
print("v2h:", vfov_to_hfov(fov, aspect_ratio))
print("v2d:", vfov_to_dfov(fov, aspect_ratio))
print("d2h:", dfov_to_hfov(fov, aspect_ratio))
print("d2v:", dfov_to_vfov(fov, aspect_ratio))

View File

@ -11,6 +11,7 @@ import urllib
from urllib import request, parse from urllib import request, parse
from requests_toolbelt.multipart.encoder import MultipartEncoder from requests_toolbelt.multipart.encoder import MultipartEncoder
import sys import sys
import hashlib
sys.path.append('./') sys.path.append('./')
def is_normalized(keypoints: List[skel.Keypoint]) -> bool: def is_normalized(keypoints: List[skel.Keypoint]) -> bool:
@ -87,7 +88,7 @@ def coordinates_to_keypoints(coordinates: list) -> List[skel.Keypoint]:
keypoints = [skel.Keypoint(coordinates[i], coordinates[i + 1]) for i in range(0, len(coordinates), 3)] keypoints = [skel.Keypoint(coordinates[i], coordinates[i + 1]) for i in range(0, len(coordinates), 3)]
return keypoints return keypoints
def save_bodypose(width: int, height: int, coordinates: list): def save_bodypose(width: int, height: int, coordinates: list, pid: str) -> None:
if not hasattr(save_bodypose, 'counter'): if not hasattr(save_bodypose, 'counter'):
save_bodypose.counter = 0 # Initialize the counter attribute save_bodypose.counter = 0 # Initialize the counter attribute
@ -96,21 +97,46 @@ def save_bodypose(width: int, height: int, coordinates: list):
canvas = draw_bodypose(canvas, keypoints, skel.coco_limbSeq, skel.coco_colors) canvas = draw_bodypose(canvas, keypoints, skel.coco_limbSeq, skel.coco_colors)
# Save as body_pose_output0000.png, body_pose_output0001.png, ... # Save as body_pose_output0000.png, body_pose_output0001.png, ...
image_name = 'output/body_pose_output%04d.png' % save_bodypose.counter image_path = 'output/body_pose_output%04d.png' % save_bodypose.counter
cv2.imwrite(image_name, canvas) cv2.imwrite(image_path, canvas)
gen_image(image_name)
save_bodypose.counter += 1 # Increment the counter save_bodypose.counter += 1 # Increment the counter
server_address = "http://127.0.0.1:8188" return image_path
def queue_prompt(prompt): def save_bodypose_mulit(width: int, height: int, coordinates_list: list, pid: str) -> None:
if not hasattr(save_bodypose_mulit, 'counter'):
save_bodypose_mulit.counter = 0 # Initialize the counter attribute
canvas = np.zeros((height, width, 3), dtype=np.uint8)
for coordinates in coordinates_list:
keypoints = coordinates_to_keypoints(coordinates)
canvas = draw_bodypose(canvas, keypoints, skel.coco_limbSeq, skel.coco_colors)
# Save as body_pose_output0000.png, body_pose_output0001.png, ...
image_path = 'output/body_pose_output_multi%04d.png' % save_bodypose_mulit.counter
cv2.imwrite(image_path, canvas)
save_bodypose_mulit.counter += 1 # Increment the counter
return image_path
server_address = "localhost:8188"
def queue_prompt(prompt, server_address):
p = {"prompt": prompt} p = {"prompt": prompt}
data = json.dumps(p).encode('utf-8') data = json.dumps(p).encode('utf-8')
req = request.Request(server_address + "/prompt", data=data) req = request.Request("http://{}/prompt".format(server_address), data=data)
request.urlopen(req) request.urlopen(req)
def upload_image(input_path, name, server_address, image_type="input", overwrite=False): def upload_image(input_image, name, server_address, image_type="input", overwrite=False):
with open(input_path, 'rb') as file: # Check if input_image is a valid file path
if isinstance(input_image, str) and os.path.isfile(input_image):
file = open(input_image, 'rb')
close_file = True
else:
file = input_image
close_file = False
try:
multipart_data = MultipartEncoder( multipart_data = MultipartEncoder(
fields={ fields={
'image': (name, file, 'image/png'), 'image': (name, file, 'image/png'),
@ -124,23 +150,23 @@ def upload_image(input_path, name, server_address, image_type="input", overwrite
request = urllib.request.Request("http://{}/upload/image".format(server_address), data=data, headers=headers) request = urllib.request.Request("http://{}/upload/image".format(server_address), data=data, headers=headers)
with urllib.request.urlopen(request) as response: with urllib.request.urlopen(request) as response:
return response.read() return response.read()
finally:
if close_file:
file.close()
def gen_image(openpose_image_path): def upload_image_circular_queue(image_path, size, unqiue_id, server_address):
# create a dict in this function to store the counter for each unique_id, key is the unique_id, value is the counter
if not hasattr(upload_image_circular_queue, 'id_counter_dict'):
upload_image_circular_queue.id_counter_dict = {}
# read fencerAPI.json into prompt_text if unqiue_id not in upload_image_circular_queue.id_counter_dict:
with open("fencerAPI.json", "r") as f: upload_image_circular_queue.id_counter_dict[unqiue_id] = 0
prompt_json = f.read()
prompt = json.loads(prompt_json)
# upload images image_name = hashlib.sha256((unqiue_id + str(upload_image_circular_queue.id_counter_dict[unqiue_id])).encode('utf-8')).hexdigest() + ".png"
upload_image("ref_black.png", "ref_black.png", server_address) upload_image_circular_queue.id_counter_dict[unqiue_id] += 1 % size
upload_image(openpose_image_path, "openpose_image.png", server_address) upload_image(image_path, image_name, server_address)
prompt["3"]["inputs"]["seed"] = random.randint(0, 10000000000) return image_name
prompt["29"]["inputs"]['image'] = "ref_black.png"
prompt["17"]["inputs"]['image'] = "openpose_image.png"
queue_prompt(prompt)
def main(): def main():
directory = './fixed' directory = './fixed'

BIN
test.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 MiB