diff --git a/app.py b/app.py index 2dfbc91..02e9fbc 100644 --- a/app.py +++ b/app.py @@ -14,7 +14,8 @@ import numpy as np import websocket import openpose_gen as opg from comfy_socket import get_images -from postprocessing import expo_shuffle_image_steps, expo_add_to_background_image +from postprocessing import expo_shuffle_image_steps, expo_add_to_background_image, expo_postprocess_main +import skeleton_lib as skel sys.path.append('./') app = Flask(__name__) @@ -26,8 +27,14 @@ expo_openpose_dir = info['expo_openpose_dir'] expo_postprocessed_dir = info['expo_postprocessed_dir'] expo_postprocess_temp_dir = info['expo_postprocess_temp_dir'] +on_postprocessing = False +on_testing = True + @app.route('/expo_fencing_pose', methods=['POST']) def expo_fencing_pose(): + if on_postprocessing: + return jsonify({"status": "error", "message": "Postprocessing in progress"}), 503 + if request.is_json: data = request.get_json() coordinates = data['coordinates'] @@ -38,15 +45,48 @@ def expo_fencing_pose(): if coordinates is None or canvas_size is None or 'batch' not in data or 'step' not in data: return jsonify({"status": "error", "message": "Missing data"}), 422 - openpose_image_path = opg.expo_save_bodypose(canvas_size[0], canvas_size[1], coordinates, batch, step) - print(openpose_image_path) - expo_fencer_prompt(openpose_image_path, batch, step) + right_fencer_coordinates = get_predicted_coordinates(coordinates) + + left_fencer_dir = os.path.join(expo_openpose_dir, 'left_fencer') + os.makedirs(left_fencer_dir, exist_ok=True) + right_fencer_dir = os.path.join(expo_openpose_dir, 'right_fencer') + os.makedirs(right_fencer_dir, exist_ok=True) + + left_openpose_image_path = opg.expo_save_bodypose(canvas_size[0], canvas_size[1], coordinates, batch, step, left_fencer_dir, skel.coco_limbSeq, skel.coco_colors) + right_openpose_image_path = opg.expo_save_bodypose(canvas_size[0], canvas_size[1], right_fencer_coordinates, batch, step, right_fencer_dir, skel.coco_limbSeq, skel.coco_colors) + + left_fencer_raw_image_dir = os.path.join(expo_raw_sd_dir, 'left_fencer') + os.makedirs(left_fencer_raw_image_dir, exist_ok=True) + right_fencer_raw_image_dir = os.path.join(expo_raw_sd_dir, 'right_fencer') + os.makedirs(right_fencer_raw_image_dir, exist_ok=True) + + expo_fencer_prompt(left_openpose_image_path, left_fencer_raw_image_dir, batch, step) + expo_fencer_prompt(right_openpose_image_path, right_fencer_raw_image_dir, batch, step) return jsonify({"status": "success", "message": "Data received"}), 201 else: return jsonify({"status": "error", "message": "Request must be JSON"}), 415 -def expo_fencer_prompt(openpose_image_path, batch, step): + +def get_predicted_coordinates(coordinates: list, width: int, height: int) -> list: + # TODO implement the model to predict the right fencer's coordinates + # coordinates = [x1, y1, c1, x2, y2, c2, ...], + # where x, y are the coordinates and c is the confidence score + # there should be 18 keypoints from 0 to 17 + # they are not normalized, they are by the size of the width and height + + # the the limbSeq and colors of points need to convert from and to skel.coco_limbSeq, skel.coco_colors + # those are in skeleton_lib.py + + # when testing, can visualize with the method expo_save_bodypose in openpose_gen.py + return mirror_coordinates(coordinates, width) + +def mirror_coordinates(coordinates: list, width: int) -> list: + for i in range(0, len(coordinates), 3): + coordinates[i] = width - coordinates[i] + return coordinates + +def expo_fencer_prompt(openpose_image_path, save_dir, batch, step): prompt = json.loads(open("./prompts/fencer_03.json", "r", encoding="utf-8").read()) @@ -66,30 +106,31 @@ def expo_fencer_prompt(openpose_image_path, batch, step): for node_id in images: for idx, image_data in enumerate(images[node_id]): image = Image.open(io.BytesIO(image_data)) - image_path = os.path.join(expo_raw_sd_dir, f"{batch}_{step}.png") + image_path = os.path.join(save_dir, f"{batch}_{step}.png") image.save(image_path) def expo_clear_images(): - for file in os.listdir(expo_openpose_dir): - os.remove(os.path.join(expo_openpose_dir, file)) - for file in os.listdir(expo_raw_sd_dir): - os.remove(os.path.join(expo_raw_sd_dir, file)) + if on_testing: + return + for root, dirs, files in os.walk(expo_openpose_dir): + for file in files: + os.remove(os.path.join(root, file)) + for root, dirs, files in os.walk(expo_raw_sd_dir): + for file in files: + os.remove(os.path.join(root, file)) + for root, dirs, files in os.walk(expo_postprocess_temp_dir): + for file in files: + os.remove(os.path.join(root, file)) @app.route('/expo_postprocess', methods=['POST']) def expo_postprocess(): + if on_postprocessing: + return jsonify({"status": "error", "message": "Postprocessing in progress"}), 503 print("Postprocessing") - os.makedirs(expo_postprocess_temp_dir, exist_ok=True) - - shuffled_images_paths = expo_shuffle_image_steps() - background_path = os.path.join(expo_postprocess_temp_dir, 'background.png') - if not os.path.exists(background_path): - background = np.zeros((1000, 1500, 3), dtype=np.uint8) - cv2.imwrite(background_path, background) - - expo_add_to_background_image(background_path, shuffled_images_paths[0][0], 0, 0) - cv2.imwrite(os.path.join(expo_postprocessed_dir, 'postprocessed.png'), background) - - # expo_clear_images() + on_postprocessing = True + expo_postprocess_main() + expo_clear_images() + on_postprocessing = False @app.route('/gen_image', methods=['POST']) def gen_image(): diff --git a/openpose_gen.py b/openpose_gen.py index 6f2253b..7f7ff3c 100644 --- a/openpose_gen.py +++ b/openpose_gen.py @@ -22,15 +22,15 @@ def coordinates_to_keypoints(coordinates: list) -> List[skel.Keypoint]: keypoints = [skel.Keypoint(coordinates[i], coordinates[i + 1]) for i in range(0, len(coordinates), 3)] return keypoints -def expo_save_bodypose(width: int, height: int, coordinates: list, batch: int, step: int) -> None: +def expo_save_bodypose(width: int, height: int, coordinates: list, batch: int, step: int, save_dir: str, limbSeq: list[int], colors: list[int]) -> str: canvas = np.zeros((height, width, 3), dtype=np.uint8) keypoints = coordinates_to_keypoints(coordinates) - canvas = skel.draw_bodypose(canvas, keypoints, skel.coco_limbSeq, skel.coco_colors) + canvas = skel.draw_bodypose(canvas, keypoints, limbSeq, colors) # Save as {batch}_{step}.png, {batch}_{step}.png, ... - if not os.path.exists(expo_openpose_dir): - os.makedirs(expo_openpose_dir) - image_path = os.path.join(expo_openpose_dir, '%d_%d.png' % (batch, step)) + if not os.path.exists(save_dir): + os.makedirs(save_dir) + image_path = os.path.join(save_dir, '%d_%d.png' % (batch, step)) image_path = image_path.replace('\\', '/') cv2.imwrite(image_path, canvas) diff --git a/postprocessing.py b/postprocessing.py index f37b0b0..135fb96 100644 --- a/postprocessing.py +++ b/postprocessing.py @@ -20,13 +20,13 @@ def expo_get_step_by_name(image_name: str) -> int: def expo_get_batch_by_name(image_name: str) -> int: return int(image_name.split('_')[0]) -def expo_shuffle_image_steps() -> list[list[str]]: +def expo_shuffle_image_steps(image_dir) -> list[list[str]]: images = {} # Read and categorize image paths by step - for image_name in os.listdir(expo_raw_sd_dir): + for image_name in os.listdir(image_dir): step = expo_get_step_by_name(image_name) - image_path = os.path.join(expo_raw_sd_dir, image_name) + image_path = os.path.join(image_dir, image_name) if step in images: images[step].append(image_path) else: @@ -189,13 +189,23 @@ def current_session(): return max_session + 1 -def expo_postprocess(): +def expo_postprocess_main(): print("Postprocessing") os.makedirs(expo_postprocess_temp_dir, exist_ok=True) + + left_fencer_raw_image_dir = os.path.join(expo_raw_sd_dir, 'left_fencer') + right_fencer_raw_image_dir = os.path.join(expo_raw_sd_dir, 'right_fencer') + + if not os.path.exists(left_fencer_raw_image_dir) or not os.path.exists(right_fencer_raw_image_dir): + print("Raw images not found") + return + + left_shuffled_images_paths = expo_shuffle_image_steps(left_fencer_raw_image_dir) + right_shuffled_images_paths = expo_shuffle_image_steps(right_fencer_raw_image_dir) - shuffled_images_paths = expo_shuffle_image_steps() background_path = os.path.join(expo_postprocess_temp_dir, 'background.png') logo_path = os.path.join(expo_postprocess_temp_dir, 'logo.png') + if not os.path.exists(background_path): background = np.zeros((720, 1080, 3), dtype=np.uint8) cv2.imwrite(background_path, background) @@ -205,7 +215,7 @@ def expo_postprocess(): output_files = [] - for i, candidate_list in enumerate(shuffled_images_paths): + for i, candidate_list in enumerate(left_shuffled_images_paths): left_fencer_paths = expo_resize_fencers(candidate_list, True, 500, 500) expo_motion_blur_fencers(left_fencer_paths, True) expo_decrese_opacities(left_fencer_paths) @@ -218,11 +228,28 @@ def expo_postprocess(): y_position = 192 expo_add_to_background_image(temp_background_path, left_fencer_path, temp_output_path, x_position, y_position) temp_background_path = temp_output_path + + for i, candidate_list in enumerate(right_shuffled_images_paths): + if i > len(left_shuffled_images_paths) - 1: + break + right_fencer_paths = expo_resize_fencers(candidate_list, False, 500, 500) + expo_motion_blur_fencers(right_fencer_paths, False) + expo_decrese_opacities(right_fencer_paths) + + temp_output_path = os.path.join(expo_postprocess_temp_dir, f"temp_{i}.png") + if not os.path.exists(temp_output_path): + break + + for j, right_fencer_path in enumerate(right_fencer_paths): + x_position = 1080 - (65 * math.pow(j, 1.3) - 132) + y_position = 192 + expo_add_to_background_image(temp_output_path, right_fencer_path, temp_output_path, x_position, y_position) + temp_background_path = temp_output_path - expo_overlay_bg_gradient(temp_output_path, temp_output_path, bg_gradients[i]) + expo_overlay_bg_gradient(temp_output_path, temp_output_path, bg_gradients[i % len(bg_gradients)]) expo_add_logo(temp_output_path, logo_path, temp_output_path, 750, 700) output_to_display_folder(output_files) if __name__ == '__main__': - expo_postprocess() \ No newline at end of file + expo_postprocess_main() \ No newline at end of file diff --git a/skeleton_lib.py b/skeleton_lib.py index 7be6067..70056f8 100644 --- a/skeleton_lib.py +++ b/skeleton_lib.py @@ -19,19 +19,6 @@ coco_colors = [ [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85] ] -yolo_coco_colors = [ - [255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], - [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], - [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85] -] - -yolo_coco_limbSeq = [ - [1, 2], [1, 5], [2, 3], [3, 4], - [5, 6], [6, 7], [1, 8], [8, 9], - [9, 10], [1, 11], [11, 12], [12, 13], - [1, 0], [0, 14], [14, 16], [0, 15], -] - body_25_limbSeq = [ [1, 8], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [8, 9],