import sys import cv2 import time from hpe_mp_class import hpe_mp_class import matplotlib.pyplot as plt import json # Arguments adress_input = sys.argv[1] for i in range(0, 100): if adress_input == str(i): adress_input = i scale_pose = 0.42 if len(sys.argv) > 2: scale_pose = float(sys.argv[2]) # Videocapture cap = cv2.VideoCapture(adress_input) # Preprocessing parameters crop = 1.0 frame_width = int(crop*cap.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(crop*cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # FPS variables pTime = 0 cTime = 0 # Mediapie class mp_cl = hpe_mp_class() while True: # Reading frame success, img = cap.read() # Image preprocessing img = cv2.resize(img, (frame_width, frame_height)) # Mediapipe mp_cl.process(img, scale_pose=scale_pose) mp_cl.show(img) # FPS cTime = time.time() fps = 1. / (cTime - pTime) pTime = cTime # Showing img = cv2.flip(img, 1) # mirror cv2.putText(img, str(int(fps)), (22, 32), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2) cv2.putText(img, str(int(fps)), (20, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2) cv2.imshow("Main", img) # Interface key = cv2.waitKey(1) if key == 27: break if key == 109: # Matplotlib fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') res = mp_cl.getResults() mp_cl.scaleResult(res) points_body_x = [] points_body_y = [] points_body_z = [] points_hand_x = [] points_hand_y = [] points_hand_z = [] for pp in res["poses"].values(): if pp.visibility > 0.9: points_body_x.append(pp.x) points_body_y.append(pp.y) points_body_z.append(pp.z) for hand in res["hands"]: for hp in hand.values(): points_hand_x.append(hp.x) points_hand_y.append(hp.y) points_hand_z.append(hp.z) ax.scatter(points_body_x, points_body_y, points_body_z, color='blue') ax.scatter(points_hand_x, points_hand_y, points_hand_z, color='green') plt.show() if key == 106: # JSON res = mp_cl.getJSON() with open('hierarchy_data.json', 'w', encoding='utf-8') as f: json.dump(res, f, ensure_ascii=False, indent=4)