Comments and readme update
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
|
||||
import numpy as np
|
||||
|
||||
# element of json
|
||||
# create the element of json
|
||||
def makexyzrotvis(x,y,z,rotx,roty,rotz,visible):
|
||||
ret = {}
|
||||
translation = {}
|
||||
|
||||
110
README.md
110
README.md
@@ -1,5 +1,62 @@
|
||||
# Human Pose Estimation with mediapipe
|
||||
|
||||
## Run with osc for UE4
|
||||
|
||||
```console
|
||||
python3 hpe_win.py
|
||||
```
|
||||
|
||||
The script accepts the following arguments in "config.yml" file:
|
||||
|
||||
* address_input - file path or webcam index
|
||||
* scale_pose - shoulder width in metric system
|
||||
* crop_image - coefficient if you need to resize the image
|
||||
* osc_address - address for osc client
|
||||
* osc_port - port for osc client
|
||||
* osc_message_address - address for message output via osc client
|
||||
* output_method (['file', 'osc']) - output type via file or osc client
|
||||
* mirror_image - horizontal display of the output image
|
||||
* show_image - output image output via opencv
|
||||
* apose - send only APose coordinate
|
||||
* world - world or local values of rotation and rotation
|
||||
* old_world - old convrerting model (does not work anymore)
|
||||
|
||||
To test with matplotlib, you need to run a script (example with a webcam with index 0):
|
||||
```console
|
||||
python3 hpe_videocapture.py 0
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
|
||||
Python libraries:
|
||||
* mediapipe
|
||||
* numpy
|
||||
* matplotlib (could be optional)
|
||||
* opencv-python
|
||||
* json
|
||||
* python-osc
|
||||
|
||||
The mediapipe library requires the cuda toolkit and cudnn to work with the gpu.
|
||||
|
||||
## Interface for hpe_videocapture
|
||||
|
||||
For one-time rendering of points in 3D using matplotlib, you need to press the 'm' key.
|
||||
|
||||
To write the converted data to the UE4 model in the file 'hierarchy_data.json', you need to press the 'j' key.
|
||||
|
||||
To close the program, press the 'esc' key.
|
||||
|
||||
## Requirements for setting up an experiment
|
||||
|
||||
* There must be one person in the frame.
|
||||
* The camera should be approximately at a 90 degree angle.
|
||||
* The person must enter the frame entirely.
|
||||
* It is desirable that the contours of the clothes on the person are clearly visible.
|
||||
|
||||
## Build on Windows
|
||||
|
||||
In development
|
||||
|
||||
## Запуск
|
||||
|
||||
Для тестового запуска через вебкамеру с индексом 0.
|
||||
@@ -23,56 +80,3 @@ python3 hpe_json.py --address_input 0 --show_image True
|
||||
```console
|
||||
python3 hpe_videocapture.py 0
|
||||
```
|
||||
|
||||
## Зависимости
|
||||
|
||||
Библиотеки python:
|
||||
* mediapipe
|
||||
* numpy
|
||||
* matplotlib (можно сделать опциональной)
|
||||
* opencv-python
|
||||
* json
|
||||
* python-osc
|
||||
|
||||
Библиотека mediapipe требует cudatoolkit и cudnn для работы с gpu.
|
||||
|
||||
## Интерфейс для hpe_videocapture
|
||||
|
||||
Для разовой отрисовки точек в 3D с помощью matplotlib необходимо нажать клавишу 'm'.
|
||||
|
||||
Чтобы записать сконвертированные данные в модель UE4 в файл 'hierarchy_data.json', нужно нажать клавишу 'j'.
|
||||
|
||||
Для закрытия программы нужно нажать клавишу 'esc'.
|
||||
|
||||
## Требования к постановке эксперимента
|
||||
|
||||
* Человек в кадре должен быть один.
|
||||
* Камера примерно должна быть под углом 90 градусов.
|
||||
* Человек должен входить в кадр целиком.
|
||||
* Желательно, чтобы у одежда на человеке были хорошо видны контуры.
|
||||
|
||||
## Сборка на Windows
|
||||
|
||||
В разработке
|
||||
|
||||
## Config file
|
||||
|
||||
В разработке.
|
||||
|
||||
Предположительные переменные.
|
||||
|
||||
Параметры класса hpe_mp_class из hpe_mp_class.py:
|
||||
* hands_static_image_mode = False
|
||||
* hands_max_num_hands = 2
|
||||
* hands_min_detection_confidence = 0.7
|
||||
* hands_min_tracking_confidence = 0.5
|
||||
* pose_static_image_mode = False
|
||||
* pose_upper_body_only = False
|
||||
* pose_smooth_landmarks = True
|
||||
* pose_min_detection_confidence = 0.7
|
||||
* pose_min_tracking_confidence = 0.5
|
||||
* hol_static_image_mode = False
|
||||
* hol_upper_body_only = False
|
||||
* hol_smooth_landmarks = True
|
||||
* hol_min_detection_confidence = 0.7
|
||||
* hol_min_tracking_confidence = 0.5
|
||||
|
||||
17
check_stream.py
Normal file
17
check_stream.py
Normal file
@@ -0,0 +1,17 @@
|
||||
import cv2
|
||||
|
||||
address_input = "http://localhost:8080"
|
||||
|
||||
cap = cv2.VideoCapture(address_input)
|
||||
|
||||
while True:
|
||||
# Reading frame
|
||||
success, img = cap.read()
|
||||
if success:
|
||||
cv2.imshow("Main", img)
|
||||
else:
|
||||
print("Frame not success read")
|
||||
# Interface
|
||||
key = cv2.waitKey(1)
|
||||
if key == 27:
|
||||
break
|
||||
@@ -1,5 +1,5 @@
|
||||
# Input
|
||||
address_input: "1" # input video path or webcam index
|
||||
address_input: "0" # input video path or webcam index
|
||||
|
||||
# Image processing
|
||||
scale_pose: 35.4 # shoulder width
|
||||
|
||||
@@ -15,8 +15,8 @@ logger = logging.getLogger("hpe_mp_class_logger")
|
||||
from ModelUE4 import *
|
||||
from ModelUE4_apose import bodyaposelocal
|
||||
from ModelUE4_apose import bodyaposeworld
|
||||
from ModelUE4_old import bodyconvert
|
||||
from ModelUE4_old import bodyconvertlocal
|
||||
#from ModelUE4_old import bodyconvert
|
||||
#from ModelUE4_old import bodyconvertlocal
|
||||
|
||||
class hpe_mp_class():
|
||||
|
||||
@@ -169,13 +169,14 @@ class hpe_mp_class():
|
||||
# try:
|
||||
if apose:
|
||||
if world:
|
||||
bodyaposeworld(data)
|
||||
bodyaposeworld(data) # APose world
|
||||
else:
|
||||
bodyaposelocal(data)
|
||||
bodyaposelocal(data) # APose local
|
||||
else:
|
||||
if world:
|
||||
bodyaposeworld(data)
|
||||
if self.holistic_use:
|
||||
# body converting
|
||||
poseslms = {}
|
||||
maxy = 0
|
||||
if self.results_hol.pose_landmarks:
|
||||
@@ -184,6 +185,7 @@ class hpe_mp_class():
|
||||
if lm.y > maxy:
|
||||
maxy = lm.y
|
||||
|
||||
# only rotation method
|
||||
bodyeuler(poseslms, data, self.coef)
|
||||
|
||||
# if old_world:
|
||||
@@ -191,11 +193,13 @@ class hpe_mp_class():
|
||||
# else:
|
||||
# bodyconvertwithrot(poseslms, data, self.coef, maxy)
|
||||
|
||||
# right hand converting
|
||||
rhandlms = {}
|
||||
if self.results_hol.right_hand_landmarks:
|
||||
for id, lm in enumerate(self.results_hol.right_hand_landmarks.landmark):
|
||||
rhandlms[id] = lm
|
||||
|
||||
# only rotation method
|
||||
rhandeuler(rhandlms, data, self.coef)
|
||||
|
||||
# if old_world:
|
||||
@@ -206,11 +210,13 @@ class hpe_mp_class():
|
||||
# # else:
|
||||
# # rhandconverttranslation(data)
|
||||
|
||||
# left hand converting
|
||||
lhandlms = {}
|
||||
if self.results_hol.left_hand_landmarks:
|
||||
for id, lm in enumerate(self.results_hol.left_hand_landmarks.landmark):
|
||||
lhandlms[id] = lm
|
||||
|
||||
# only rotation method
|
||||
lhandeuler(lhandlms,data,self.coef)
|
||||
|
||||
# if old_world:
|
||||
@@ -222,18 +228,18 @@ class hpe_mp_class():
|
||||
# lhandconverttranslation(data)
|
||||
else:
|
||||
bodyaposelocal(data)
|
||||
if self.holistic_use:
|
||||
poseslms = {}
|
||||
maxy = 0
|
||||
if self.results_hol.pose_landmarks:
|
||||
for id, lm in enumerate(self.results_hol.pose_landmarks.landmark):
|
||||
poseslms[id] = lm
|
||||
if lm.y > maxy:
|
||||
maxy = lm.y
|
||||
|
||||
bodyconvert(poseslms, data, self.coef, maxy)
|
||||
|
||||
bodyconvertlocal(poseslms, data, self.coef, maxy)
|
||||
# if self.holistic_use:
|
||||
# poseslms = {}
|
||||
# maxy = 0
|
||||
# if self.results_hol.pose_landmarks:
|
||||
# for id, lm in enumerate(self.results_hol.pose_landmarks.landmark):
|
||||
# poseslms[id] = lm
|
||||
# if lm.y > maxy:
|
||||
# maxy = lm.y
|
||||
#
|
||||
# bodyconvert(poseslms, data, self.coef, maxy)
|
||||
#
|
||||
# bodyconvertlocal(poseslms, data, self.coef, maxy)
|
||||
# except Exception as err:
|
||||
# logger.exception("Error json converting hpe class: " + str(err))
|
||||
|
||||
|
||||
82
hpe_win.py
82
hpe_win.py
@@ -51,50 +51,54 @@ mp_cl = hpe_mp_class()
|
||||
while True:
|
||||
# Reading frame
|
||||
success, img = cap.read()
|
||||
if success:
|
||||
# Image preprocessing
|
||||
if crop != 1.0:
|
||||
img = cv2.resize(img, (frame_width, frame_height))
|
||||
|
||||
# Image preprocessing
|
||||
if crop != 1.0:
|
||||
img = cv2.resize(img, (frame_width, frame_height))
|
||||
# # Mediapipe
|
||||
mp_cl.process(img, scale_pose=scale_pose)
|
||||
mp_cl.show(img)
|
||||
|
||||
# Mediapipe
|
||||
mp_cl.process(img, scale_pose=scale_pose)
|
||||
mp_cl.show(img)
|
||||
# FPS
|
||||
cTime = time.time()
|
||||
fps = 1. / (cTime - pTime)
|
||||
pTime = cTime
|
||||
|
||||
# FPS
|
||||
cTime = time.time()
|
||||
fps = 1. / (cTime - pTime)
|
||||
pTime = cTime
|
||||
# Showing
|
||||
if show_image:
|
||||
if mirror_image:
|
||||
img = cv2.flip(img, 1) # mirror
|
||||
cv2.putText(img, str(int(fps)), (22, 32), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)
|
||||
cv2.putText(img, str(int(fps)), (20, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
|
||||
cv2.imshow("Main", img)
|
||||
|
||||
# Showing
|
||||
if show_image:
|
||||
if mirror_image:
|
||||
img = cv2.flip(img, 1) # mirror
|
||||
cv2.putText(img, str(int(fps)), (22, 32), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)
|
||||
cv2.putText(img, str(int(fps)), (20, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
|
||||
cv2.imshow("Main", img)
|
||||
|
||||
# Output
|
||||
if output_method == 'file':
|
||||
# JSON
|
||||
res = mp_cl.getJSON(apose=apose, world=world, old_world=old_world)
|
||||
with open('hierarchy_data.json', 'w', encoding='utf-8') as f:
|
||||
json.dump(res, f, ensure_ascii=False, indent=4)
|
||||
# Output
|
||||
if output_method == 'file':
|
||||
# JSON
|
||||
res = mp_cl.getJSON(apose=apose, world=world, old_world=old_world)
|
||||
with open('hierarchy_data.json', 'w', encoding='utf-8') as f:
|
||||
json.dump(res, f, ensure_ascii=False, indent=4)
|
||||
else:
|
||||
# OSC
|
||||
res = mp_cl.getJSON(apose=apose, world=world, old_world=old_world) # convering model
|
||||
res_list = []
|
||||
# for values parsing on UE4
|
||||
for val in res.keys():
|
||||
stroka = str(val)
|
||||
for val2 in res[val]:
|
||||
if val2 == 'visible':
|
||||
stroka += " " + str(val2) + " " + str(res[val][val2])
|
||||
# res_list.append(str(val) + " " + str(val2) + " " + str(res[val][val2]))
|
||||
else:
|
||||
for val3 in res[val][val2]:
|
||||
stroka += " " + str(val2) + "_" + str(val3) + " " + str(res[val][val2][val3])
|
||||
# res_list.append(str(val) + " " + str(val2) + " " + str(val3) + " " + str(res[val][val2][val3]))
|
||||
res_list.append(stroka)
|
||||
# message sending
|
||||
client.send_message(osc_message_address, res_list)
|
||||
else:
|
||||
# OSC
|
||||
res = mp_cl.getJSON(apose=apose, world=world, old_world=old_world)
|
||||
res_list = []
|
||||
for val in res.keys():
|
||||
stroka = str(val)
|
||||
for val2 in res[val]:
|
||||
if val2 == 'visible':
|
||||
stroka += " " + str(val2) + " " + str(res[val][val2])
|
||||
# res_list.append(str(val) + " " + str(val2) + " " + str(res[val][val2]))
|
||||
else:
|
||||
for val3 in res[val][val2]:
|
||||
stroka += " " + str(val2) + "_" + str(val3) + " " + str(res[val][val2][val3])
|
||||
# res_list.append(str(val) + " " + str(val2) + " " + str(val3) + " " + str(res[val][val2][val3]))
|
||||
res_list.append(stroka)
|
||||
client.send_message(osc_message_address, res_list)
|
||||
print("Frame not success read")
|
||||
|
||||
# Interface
|
||||
key = cv2.waitKey(1)
|
||||
|
||||
Reference in New Issue
Block a user