model list, group pic json

This commit is contained in:
zaqxs123456 2024-10-14 15:47:09 +08:00
parent 4ea0b0280b
commit 99049e22d1
16 changed files with 213 additions and 590 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
embeddings/badhandv4.pt Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,215 +0,0 @@
{
"3": {
"inputs": {
"seed": 688190695340079,
"steps": 3,
"cfg": 2,
"sampler_name": "dpmpp_sde",
"scheduler": "karras",
"denoise": 1,
"model": [
"32",
0
],
"positive": [
"22",
0
],
"negative": [
"22",
1
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"4": {
"inputs": {
"ckpt_name": "dreamshaperXL_sfwLightningDPMSDE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "A fencer in full gear, fencing sword, 1 human, empty background, dark background, dark, empty",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Positive)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "Result",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"17": {
"inputs": {
"image": "dance_01.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"19": {
"inputs": {
"control_net_name": "diffusion_pytorch_model.safetensors"
},
"class_type": "ControlNetLoader",
"_meta": {
"title": "Load ControlNet Model"
}
},
"22": {
"inputs": {
"strength": 0.9500000000000001,
"start_percent": 0,
"end_percent": 1,
"positive": [
"6",
0
],
"negative": [
"40",
0
],
"control_net": [
"19",
0
],
"image": [
"17",
0
],
"vae": [
"4",
2
]
},
"class_type": "ControlNetApplyAdvanced",
"_meta": {
"title": "Apply ControlNet"
}
},
"28": {
"inputs": {
"ipadapter_file": "ip-adapter-plus_sdxl_vit-h.safetensors"
},
"class_type": "IPAdapterModelLoader",
"_meta": {
"title": "IPAdapter Model Loader"
}
},
"29": {
"inputs": {
"image": "ref_black.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"31": {
"inputs": {
"clip_name": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
},
"class_type": "CLIPVisionLoader",
"_meta": {
"title": "Load CLIP Vision"
}
},
"32": {
"inputs": {
"weight": 1,
"weight_type": "style and composition",
"combine_embeds": "add",
"start_at": 0,
"end_at": 1,
"embeds_scaling": "V only",
"model": [
"4",
0
],
"ipadapter": [
"28",
0
],
"image": [
"29",
0
],
"clip_vision": [
"31",
0
]
},
"class_type": "IPAdapterAdvanced",
"_meta": {
"title": "IPAdapter Advanced"
}
},
"40": {
"inputs": {
"text": "blurry, drawing, horror, distorted, malformed, naked, cartoon, anime, out of focus, dull, muted colors, boring pose, no action, distracting background, colorful, (face:5.0), bad hand, bad anatomy, worst quality, ai generated images, low quality, average quality, smoke, background, three arms, three hands, white light, (light:5.0), (shadow:5.0), (floor:5.0)\n\nembedding:ac_neg1, embedding:ac_neg2, embedding:badhandv4, embedding:DeepNegative_xl_v1, embedding:NEGATIVE_HANDS, embedding:negativeXL_D, embedding:unaestheticXL_cbp62 -neg, embedding:verybadimagenegative_v1.3, embedding:ziprealism_neg, ",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Negative)"
}
}
}

View File

@ -1,219 +0,0 @@
{
"3": {
"inputs": {
"seed": 219117161329479,
"steps": 4,
"cfg": 1.5,
"sampler_name": "dpmpp_sde",
"scheduler": "karras",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"22",
0
],
"negative": [
"22",
1
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"4": {
"inputs": {
"ckpt_name": "dreamshaperXL_sfwLightningDPMSDE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "costume party, trick-or-treating, candy apples, pumpkin carving, haunted hayride, spooky stories, dress-up, decorations, Halloween parade, haunted corn maze, bobbing for apples, ghost stories, scary movies, pumpkin patch, fall festival, Halloween games, costume contest",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Positive)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "Result",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"17": {
"inputs": {
"image": "e7f6c011776e8db7cd330b54174fd76f7d0216b612387a5ffcfb81e6f0919683.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"22": {
"inputs": {
"strength": 1,
"start_percent": 0,
"end_percent": 1,
"positive": [
"6",
0
],
"negative": [
"40",
0
],
"control_net": [
"43",
0
],
"image": [
"17",
0
],
"vae": [
"4",
2
]
},
"class_type": "ControlNetApplyAdvanced",
"_meta": {
"title": "Apply ControlNet"
}
},
"28": {
"inputs": {
"ipadapter_file": "ip-adapter-plus_sdxl_vit-h.safetensors"
},
"class_type": "IPAdapterModelLoader",
"_meta": {
"title": "IPAdapter Model Loader"
}
},
"29": {
"inputs": {
"image": "ref_black.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"31": {
"inputs": {
"clip_name": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
},
"class_type": "CLIPVisionLoader",
"_meta": {
"title": "Load CLIP Vision"
}
},
"32": {
"inputs": {
"weight": 1,
"weight_type": "style and composition",
"combine_embeds": "norm average",
"start_at": 0,
"end_at": 1,
"embeds_scaling": "K+V w/ C penalty",
"model": [
"4",
0
],
"ipadapter": [
"28",
0
],
"image": [
"29",
0
],
"clip_vision": [
"31",
0
]
},
"class_type": "IPAdapterAdvanced",
"_meta": {
"title": "IPAdapter Advanced"
}
},
"40": {
"inputs": {
"text": "blurry, drawing, horror, distorted, malformed, naked, cartoon, anime, out of focus, dull, muted colors, boring pose, no action, distracting background, colorful, (face:5.0), bad hand, (bad anatomy:5.0), worst quality, ai generated images, low quality, average quality, smoke, background, three arms, three hands, white light, (light:5.0), (shadow:5.0), (floor:5.0), 2 sword, multiple sword\n\nembedding:ac_neg1, embedding:ac_neg2, embedding:badhandv4, embedding:DeepNegative_xl_v1, embedding:NEGATIVE_HANDS, embedding:negativeXL_D, embedding:unaestheticXL_cbp62 -neg, embedding:verybadimagenegative_v1.3, embedding:ziprealism_neg, ",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Negative)"
}
},
"43": {
"inputs": {
"control_net_name": "OpenPoseXL2.safetensors",
"model": [
"4",
0
]
},
"class_type": "DiffControlNetLoader",
"_meta": {
"title": "Load ControlNet Model (diff)"
}
}
}

View File

@ -1,49 +0,0 @@
import json
import urllib
from urllib import request, parse
import random
from requests_toolbelt.multipart.encoder import MultipartEncoder
server_address = "http://127.0.0.1:8188"
def queue_prompt(prompt):
p = {"prompt": prompt}
data = json.dumps(p).encode('utf-8')
req = request.Request(server_address + "/prompt", data=data)
request.urlopen(req)
def upload_image(input_path, name, server_address, image_type="input", overwrite=False):
with open(input_path, 'rb') as file:
multipart_data = MultipartEncoder(
fields= {
'image': (name, file, 'image/png'),
'type': image_type,
'overwrite': str(overwrite).lower()
}
)
data = multipart_data
headers = { 'Content-Type': multipart_data.content_type }
request = urllib.request.Request("http://{}/upload/image".format(server_address), data=data, headers=headers)
with urllib.request.urlopen(request) as response:
return response.read()
def gen_image(openpose_image_path):
# read fencerAPI.json into prompt_text
with open("fencerAPI.json", "r") as f:
prompt_json = f.read()
prompt = json.loads(prompt_json)
# upload images
upload_image("ref_black.png", "ref_black.png", server_address)
upload_image(openpose_image_path, "openpose_image.png", server_address)
prompt["3"]["inputs"]["seed"] = random.randint(0, 10000000000)
prompt["29"]["inputs"]['image'] = "ref_black.png"
prompt["17"]["inputs"]['image'] = "openpose_image.png"
queue_prompt(prompt)

176
group_pic.json Normal file
View File

@ -0,0 +1,176 @@
{
"3": {
"inputs": {
"seed": 29214331452310,
"steps": 20,
"cfg": 8,
"sampler_name": "dpmpp_2m_sde_gpu",
"scheduler": "karras",
"denoise": 0.55,
"model": [
"4",
0
],
"positive": [
"11",
0
],
"negative": [
"11",
1
],
"latent_image": [
"15",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"4": {
"inputs": {
"ckpt_name": "counterfeitV30_v30.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"6": {
"inputs": {
"text": "costume party, trick-or-treating, candy apples, pumpkin carving, haunted hayride, spooky stories, dress-up, decorations, Halloween parade, haunted corn maze, bobbing for apples, ghost stories, scary movies, pumpkin patch, fall festival, Halloween games, costume contest, spooky fun, candy adventures, pumpkin smiles, friendly frights, dress-up days, Halloween parties, ghostly games, silly scares, trick-or-treat trips, costume parades, autumn excitement, fall, fun, Halloween celebration, happy, smile",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": "text, watermark",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"10": {
"inputs": {
"image": "body_pose_output_multi0000.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"11": {
"inputs": {
"strength": 1,
"start_percent": 0,
"end_percent": 1,
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"control_net": [
"12",
0
],
"image": [
"10",
0
],
"vae": [
"4",
2
]
},
"class_type": "ControlNetApplyAdvanced",
"_meta": {
"title": "Apply ControlNet"
}
},
"12": {
"inputs": {
"control_net_name": "control_v11p_sd15_openpose_fp16.safetensors",
"model": [
"4",
0
]
},
"class_type": "DiffControlNetLoader",
"_meta": {
"title": "Load ControlNet Model (diff)"
}
},
"14": {
"inputs": {
"image": "test.png",
"upload": "image"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"15": {
"inputs": {
"pixels": [
"14",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
}
}

37
model_list.md Normal file
View File

@ -0,0 +1,37 @@
# ControlNet
* ControlNet-v1-1_fp16_safetensors:
https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/blob/main/control_v11p_sd15_openpose_fp16.safetensors
* diffusion_pytorch_model.safetensors
https://huggingface.co/InstantX/InstantID/blob/main/ControlNetModel/diffusion_pytorch_model.safetensors
# Checkpoint
* dreamshaperXL_sfwLightningDPMSDE.safetensors
https://civitai.com/models/112902?modelVersionId=355868
* counterfeitV30_v30.safetensors
https://civitai.com/models/4468/counterfeit-v30?modelVersionId=57618
* absolutereality_v181.safetensors
https://civitai.com/models/81458/absolutereality
# IPAdapter
* ip-adapter-plus_sdxl_vit-h.safetensors
https://huggingface.co/h94/IP-Adapter/blob/main/sdxl_models/ip-adapter-plus_sdxl_vit-h.safetensors
# CLIP Vision
* CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors
https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K
# embeddings
* In "embeddings" folder

View File

@ -1,107 +0,0 @@
{
"3": {
"inputs": {
"seed": 518457609939857,
"steps": 10,
"cfg": 8,
"sampler_name": "dpmpp_sde_gpu",
"scheduler": "karras",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"4": {
"inputs": {
"ckpt_name": "v1-5-pruned-emaonly.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"5": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "beautiful scenery nature country road landscape, , blue galaxy, robot",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": "text, watermark",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
}
}