update api test code

This commit is contained in:
zaqxs123456 2024-10-07 18:45:43 +08:00
parent 0509f3b26d
commit 6e6da6f918
7 changed files with 311 additions and 1 deletions

215
fencerAPI.json Normal file
View File

@ -0,0 +1,215 @@
{
"3": {
"inputs": {
"seed": 688190695340079,
"steps": 3,
"cfg": 2,
"sampler_name": "dpmpp_sde",
"scheduler": "karras",
"denoise": 1,
"model": [
"32",
0
],
"positive": [
"22",
0
],
"negative": [
"22",
1
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"4": {
"inputs": {
"ckpt_name": "dreamshaperXL_sfwLightningDPMSDE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "A fencer in full gear, fencing sword, 1 human, empty background, dark background, dark, empty",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Positive)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "Result",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"17": {
"inputs": {
"image": "dance_01.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"19": {
"inputs": {
"control_net_name": "diffusion_pytorch_model.safetensors"
},
"class_type": "ControlNetLoader",
"_meta": {
"title": "Load ControlNet Model"
}
},
"22": {
"inputs": {
"strength": 0.9500000000000001,
"start_percent": 0,
"end_percent": 1,
"positive": [
"6",
0
],
"negative": [
"40",
0
],
"control_net": [
"19",
0
],
"image": [
"17",
0
],
"vae": [
"4",
2
]
},
"class_type": "ControlNetApplyAdvanced",
"_meta": {
"title": "Apply ControlNet"
}
},
"28": {
"inputs": {
"ipadapter_file": "ip-adapter-plus_sdxl_vit-h.safetensors"
},
"class_type": "IPAdapterModelLoader",
"_meta": {
"title": "IPAdapter Model Loader"
}
},
"29": {
"inputs": {
"image": "ref_black.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"31": {
"inputs": {
"clip_name": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
},
"class_type": "CLIPVisionLoader",
"_meta": {
"title": "Load CLIP Vision"
}
},
"32": {
"inputs": {
"weight": 1,
"weight_type": "style and composition",
"combine_embeds": "add",
"start_at": 0,
"end_at": 1,
"embeds_scaling": "V only",
"model": [
"4",
0
],
"ipadapter": [
"28",
0
],
"image": [
"29",
0
],
"clip_vision": [
"31",
0
]
},
"class_type": "IPAdapterAdvanced",
"_meta": {
"title": "IPAdapter Advanced"
}
},
"40": {
"inputs": {
"text": "blurry, drawing, horror, distorted, malformed, naked, cartoon, anime, out of focus, dull, muted colors, boring pose, no action, distracting background, colorful, (face:5.0), bad hand, bad anatomy, worst quality, ai generated images, low quality, average quality, smoke, background, three arms, three hands, white light, (light:5.0), (shadow:5.0), (floor:5.0)\n\nembedding:ac_neg1, embedding:ac_neg2, embedding:badhandv4, embedding:DeepNegative_xl_v1, embedding:NEGATIVE_HANDS, embedding:negativeXL_D, embedding:unaestheticXL_cbp62 -neg, embedding:verybadimagenegative_v1.3, embedding:ziprealism_neg, ",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Negative)"
}
}
}

49
fencer_test.py Normal file
View File

@ -0,0 +1,49 @@
import json
import urllib
from urllib import request, parse
import random
from requests_toolbelt.multipart.encoder import MultipartEncoder
server_address = "http://127.0.0.1:8188"
def queue_prompt(prompt):
p = {"prompt": prompt}
data = json.dumps(p).encode('utf-8')
req = request.Request(server_address + "/prompt", data=data)
request.urlopen(req)
def upload_image(input_path, name, server_address, image_type="input", overwrite=False):
with open(input_path, 'rb') as file:
multipart_data = MultipartEncoder(
fields= {
'image': (name, file, 'image/png'),
'type': image_type,
'overwrite': str(overwrite).lower()
}
)
data = multipart_data
headers = { 'Content-Type': multipart_data.content_type }
request = urllib.request.Request("http://{}/upload/image".format(server_address), data=data, headers=headers)
with urllib.request.urlopen(request) as response:
return response.read()
def gen_image(openpose_image_path):
# read fencerAPI.json into prompt_text
with open("fencerAPI.json", "r") as f:
prompt_json = f.read()
prompt = json.loads(prompt_json)
# upload images
upload_image("ref_black.png", "ref_black.png", server_address)
upload_image(openpose_image_path, "openpose_image.png", server_address)
prompt["3"]["inputs"]["seed"] = random.randint(0, 10000000000)
prompt["29"]["inputs"]['image'] = "ref_black.png"
prompt["17"]["inputs"]['image'] = "openpose_image.png"
queue_prompt(prompt)

BIN
jumping_05.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

View File

@ -7,6 +7,9 @@ import math
import cv2
import skeleton_lib as skel
import process_json_file as pjf
import urllib
from urllib import request, parse
from requests_toolbelt.multipart.encoder import MultipartEncoder
import sys
sys.path.append('./')
@ -93,9 +96,52 @@ def save_bodypose(width: int, height: int, coordinates: list):
canvas = draw_bodypose(canvas, keypoints, skel.coco_limbSeq, skel.coco_colors)
# Save as body_pose_output0000.png, body_pose_output0001.png, ...
cv2.imwrite('output/body_pose_output%04d.png' % save_bodypose.counter, canvas)
image_name = 'output/body_pose_output%04d.png' % save_bodypose.counter
cv2.imwrite(image_name, canvas)
gen_image(image_name)
save_bodypose.counter += 1 # Increment the counter
server_address = "http://127.0.0.1:8188"
def queue_prompt(prompt):
p = {"prompt": prompt}
data = json.dumps(p).encode('utf-8')
req = request.Request(server_address + "/prompt", data=data)
request.urlopen(req)
def upload_image(input_path, name, server_address, image_type="input", overwrite=False):
with open(input_path, 'rb') as file:
multipart_data = MultipartEncoder(
fields= {
'image': (name, file, 'image/png'),
'type': image_type,
'overwrite': str(overwrite).lower()
}
)
data = multipart_data
headers = { 'Content-Type': multipart_data.content_type }
request = urllib.request.Request("http://{}/upload/image".format(server_address), data=data, headers=headers)
with urllib.request.urlopen(request) as response:
return response.read()
def gen_image(openpose_image_path):
# read fencerAPI.json into prompt_text
with open("fencerAPI.json", "r") as f:
prompt_json = f.read()
prompt = json.loads(prompt_json)
# upload images
upload_image("ref_black.png", "ref_black.png", server_address)
upload_image(openpose_image_path, "openpose_image.png", server_address)
prompt["3"]["inputs"]["seed"] = random.randint(0, 10000000000)
prompt["29"]["inputs"]['image'] = "ref_black.png"
prompt["17"]["inputs"]['image'] = "openpose_image.png"
queue_prompt(prompt)
def main():
directory = './fixed'
json_files = [f for f in os.listdir(directory) if f.endswith('.json')]

BIN
ref.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 81 KiB

After

Width:  |  Height:  |  Size: 192 KiB

BIN
ref_black.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 171 KiB

BIN
ref_tran.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 178 KiB