update supervisely utils

This commit is contained in:
2022-05-18 12:01:01 +03:00
parent 49df17fb9c
commit f6af1d69a1
9 changed files with 516 additions and 0 deletions

View File

@@ -0,0 +1,217 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "07bdc0da",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import json\n",
"import numpy as np\n",
"from tqdm.notebook import tqdm\n",
"path_dataset = r'D:\\karusel'\n",
"from SuperviselyKeypointsGUI.SuperviselyKeypointsGUI import *\n",
"\n",
"\n",
"keypoints_3d_path = r'SuperviselyKeypointsGUI\\karussel_24kps.csv'\n",
"keypoints_3d = pd.read_csv(keypoints_3d_path, index_col=0).astype(float)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ca890aa0",
"metadata": {},
"outputs": [],
"source": [
"def find_image(id):\n",
" for row in coco['images']:\n",
" if row['id'] == id:\n",
" return row"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b8135031",
"metadata": {},
"outputs": [],
"source": [
"def ltrb_from_cloud(cloud_2d, imgSize, expansion=0.1):\n",
" height, width = imgSize\n",
" ltrb = np.round((cloud_2d[:, 0].min(), cloud_2d[:, 1].min(),\n",
" cloud_2d[:, 0].max(), cloud_2d[:, 1].max())).astype(int)\n",
" \n",
" if expansion > 0:\n",
" dx = np.round((ltrb[2]-ltrb[0])*expansion/2)\n",
" dy = np.round((ltrb[3]-ltrb[1])*expansion/2)\n",
" ltrb += np.array([-dx, -2*dy, dx, dy], dtype=int)\n",
" \n",
" ltrb[[0,2]] = np.clip(ltrb[[0,2]], 0, width)\n",
" ltrb[[1,3]] = np.clip(ltrb[[1,3]], 0, height)\n",
" \n",
" return ltrb\n",
"\n",
"def ltrb2ltwh(ltrb):\n",
" return np.array([ltrb[0], ltrb[1], ltrb[2]-ltrb[0], ltrb[3]-ltrb[1]], dtype=int)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "79762c08",
"metadata": {},
"outputs": [],
"source": [
"with open(os.path.join(path_dataset, 'karusel_COCO.json'), 'r') as file:\n",
" coco = json.load(file)"
]
},
{
"cell_type": "markdown",
"id": "a1cfe0e2",
"metadata": {},
"source": [
"### Bboxes from pose"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f9440b54",
"metadata": {},
"outputs": [],
"source": [
"for obj in tqdm(coco['annotations']):\n",
" keypoints_2d = np.array(obj['keypoints']).reshape((-1, 3))[:, :2]\n",
" id, width, height, file_name = find_image(obj['image_id']).values()\n",
" \n",
" bbox_ltrb = ltrb_from_cloud(keypoints_2d, (height, width), 0.4)\n",
" bbox_ltwh = ltrb2ltwh(bbox_ltrb).tolist()\n",
" obj['bbox'] = bbox_ltwh\n",
" obj['area'] = bbox_ltwh[2]*bbox_ltwh[3]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dfcb675b",
"metadata": {},
"outputs": [],
"source": [
"# with open(os.path.join(path_dataset, 'karusel_COCO.json'), 'w') as file:\n",
"# json.dump(coco, file)"
]
},
{
"cell_type": "markdown",
"id": "e8ff698d",
"metadata": {},
"source": [
"### Split COCO json to train/val"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3aec7a2b",
"metadata": {},
"outputs": [],
"source": [
"from sklearn.model_selection import train_test_split\n",
"import copy\n",
"\n",
"def split_coco_json(coco, test_size=0.2, random_state=0):\n",
" \n",
" train_idx, test_idx = train_test_split([i['id'] for i in coco['images']],\n",
" test_size=test_size, random_state=random_state)\n",
"\n",
"\n",
" train = copy.deepcopy(coco)\n",
" test = copy.deepcopy(coco)\n",
"\n",
" test['images'] = [x for x in coco['images'] if x['id'] in test_idx]\n",
" train['images'] = [x for x in coco['images'] if x['id'] in train_idx]\n",
"\n",
" test['annotations'] = [x for x in coco['annotations'] if x['image_id'] in test_idx]\n",
" train['annotations'] = [x for x in coco['annotations'] if x['image_id'] in train_idx]\n",
" return train, test"
]
},
{
"cell_type": "markdown",
"id": "1cdd1a3c",
"metadata": {},
"source": [
"### Create new splited dataset"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aafd54fb",
"metadata": {},
"outputs": [],
"source": [
"train, test = split_coco_json(coco, 0.1, random_state=777)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f6af98fc",
"metadata": {},
"outputs": [],
"source": [
"test_path_images = [os.path.join(path_dataset, 'images', 'img', x['file_name']) for x in test['images']]\n",
"train_path_images = [os.path.join(path_dataset, 'images', 'img', x['file_name']) for x in train['images']]\n",
"\n",
"import shutil\n",
"path_new_dataset = r'C:\\Users\\Kir\\Jupiter\\Nurburg\\OpenPifPaf\\Training\\Karusel_dataset'\n",
"\n",
"path_train_img = os.path.join(path_new_dataset, 'images', 'train')\n",
"path_test_img = os.path.join(path_new_dataset, 'images', 'val')\n",
"path_ann = os.path.join(path_new_dataset, 'annotations')\n",
"\n",
"os.makedirs(path_train_img, exist_ok=True)\n",
"os.makedirs(path_test_img, exist_ok=True)\n",
"os.makedirs(path_ann, exist_ok=True)\n",
"\n",
"with open(os.path.join(path_ann, 'train.json'), 'w') as file:\n",
" json.dump(train, file)\n",
" \n",
"with open(os.path.join(path_ann, 'val.json'), 'w') as file:\n",
" json.dump(test, file)\n",
"\n",
"for path in train_path_images:\n",
" shutil.copy(path, path_train_img)\n",
"\n",
"for path in test_path_images:\n",
" shutil.copy(path, path_test_img)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 264 KiB

View File

@@ -0,0 +1,5 @@
pip install wxPython==4.1.1
pip install cv2
pip install numpy
pip install pandas
pip install supervisely

View File

@@ -0,0 +1,53 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "a916d355",
"metadata": {},
"outputs": [],
"source": [
"from SuperviselyKeypointsGUI import *\n",
"\n",
"keypoints_3d_path = r'karussel_24kps.csv'\n",
"token = ''\n",
"dataset_id = 627375 #(images)\n",
"project_id = 184347 #(Nurburg-karussel)\n",
"keypoints_3d = pd.read_csv(keypoints_3d_path, index_col=0).astype(float)\n",
"sp = Start_annotation(project_id, dataset_id, token,\n",
" 'local_dataset_path', keypoints_3d_path, 'point_cloud_path')"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "d4037523",
"metadata": {},
"outputs": [],
"source": [
"sp.start()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,250 @@
import supervisely_lib as sly
import pandas as pd
import cv2 as cv
import os
# from PIL import Image
import numpy as np
# import open3d as o3d
import wx
import wx.xrc
def last_keypoints_on_img(ann_info):
updated = []
for obj in ann_info[2]['objects']:
updated.append([obj['classTitle'], obj['updatedAt']])
updated = pd.DataFrame(updated, columns=['classTitle', 'updatedAt'])
updated.updatedAt = pd.to_datetime(updated.updatedAt)
last = updated[updated.updatedAt == updated.updatedAt.max()]
return last
def label2hash(meta_json, last):
for clss in meta_json['classes']:
if clss['title'] == last['classTitle'].values[0]:
meta_nodes = clss['geometry_config']['nodes']
label2hash = {}
for name in meta_nodes:
label2hash[meta_nodes[name]['label']] = name
return label2hash
def fit(imageSize, keypoints_2d, keypoints_3d, focus=1):
objectPoints = keypoints_3d.loc[keypoints_2d.index].values
imagePoints = keypoints_2d[['x', 'y']].values.astype('float')
n = len(imagePoints)
fx = fy = focus*np.hypot(*imageSize)
cx = imageSize[1]/2
cy = imageSize[0]/2
distCoeffs = np.zeros(4, np.float32)
if n < 6:
raise ValueError('Number of keypoints must be > 5')
cameraMatrix = np.float32([[fx,0, cx],
[0, fy,cy],
[0, 0, 1]])
_, rvecs, tvecs = cv.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, flags=cv.SOLVEPNP_ITERATIVE )
return rvecs, tvecs, cameraMatrix, distCoeffs
def draw_cloud(img, points_3d, params):
imgpts, _ = cv.projectPoints(points_3d, *params)
for p in imgpts[:, 0]:
img = cv.circle(img, p.astype(int), 0, (255,20,147), -1)
def draw_keypoints(img, keypoints_3d, params):
imgpts, _ = cv.projectPoints(keypoints_3d, *params)
for p in imgpts[:, 0]:
img = cv.circle(img, p.astype(int), 5, (0,0,0), -1)
def resize(img, width=1000):
y, x = img.shape[:2]
return cv.resize(img, (width, int(y/x*width)))
class Start_annotation():
def __init__( self, project_id, dataset_id, token,
local_dataset_path, keypoints_3d_path, point_cloud_path):
adress = 'https://app.supervise.ly/'
# self.local_dataset_path = local_dataset_path
# self.points_3d = np.asarray(o3d.io.read_point_cloud(point_cloud_path).points)
self.keypoints_3d = pd.read_csv(keypoints_3d_path, index_col=0).astype(float)
self.api = sly.Api(adress, token)
self.meta_json = self.api.project.get_meta(project_id)
self.meta = sly.ProjectMeta.from_json(self.meta_json)
self.images = pd.DataFrame(self.api.image.get_list(dataset_id)).sort_values('name', ignore_index=True)
def load_ann(self, img_id):
ann_info = self.api.annotation.download(img_id)
return ann_info
def annotations(self, ann_info):
last = last_keypoints_on_img(ann_info)
if len(last) == 0:
return
nodes = ann_info[2]['objects'][last.index[0]]['nodes']
keypoints_2d = pd.DataFrame(columns=['x', 'y'])
for i in range(1, len(nodes)+1):
keypoints_2d.loc[i] = nodes[label2hash(self.meta_json, last)[str(i)]]['loc']
return keypoints_2d
def new_annotations(self, ann_info, new_keypoints):
last = last_keypoints_on_img(ann_info)
nodes = ann_info[2]['objects'][last.index[0]]['nodes']
for i in new_keypoints.index:
nodes[label2hash(self.meta_json, last)[str(i)]]['loc'] = new_keypoints.loc[i].tolist()
return ann_info
def start(self):
app = wx.App()
wnd = GUI(self.images, self.transform_by_visible)
wnd.Show(True)
app.MainLoop()
def transform_by_visible(self, idxs, img_id, name, focus=1, send=True, all_points=False, change_all=False, plot=False):
if send==plot==False:
return 'Error_empty_request'
ann_info = self.load_ann(img_id)
keypoints_2d = self.annotations(ann_info)
if keypoints_2d is None:
return 'Error_annotations'
if not all_points:
keypoints_2d = keypoints_2d.loc[idxs]
imgSize = list(ann_info.annotation['size'].values())
params = fit(imgSize, keypoints_2d, self.keypoints_3d, focus)
new_keypoints = pd.DataFrame(cv.projectPoints(self.keypoints_3d.values, *params)[0][:, 0],
columns=['x', 'y'], index=range(1, len(self.keypoints_3d)+1))
if not change_all:
new_keypoints = new_keypoints.drop(idxs)
if send:
new_ann = self.new_annotations(ann_info, new_keypoints)
new_ann = sly.Annotation.from_json(new_ann.annotation, self.meta)
self.api.annotation.upload_ann(img_id, new_ann)
if plot:
img = cv.imread(os.path.join(self.local_dataset_path, name))
# for p in keypoints_2d.values:
# img = cv.circle(img, p.astype(int), 8, (255,255,255), -1)
draw_cloud(img, self.points_3d, params)
cv.imshow(name, resize(img, 1200))
a = cv.waitKey(0)
if (a==ord('q')) | (a==233):
cv.destroyAllWindows()
return len(keypoints_2d)
class GUI( wx.Frame ):
def __init__( self, images, func):
wx.Frame.__init__ ( self, None, id = wx.ID_ANY, title = 'SuperviselyKeypointsGui', pos = wx.DefaultPosition, size = wx.Size( 250,450 ), style = wx.CAPTION|wx.CLOSE_BOX|wx.SYSTEM_MENU|wx.RESIZE_BORDER|wx.TAB_TRAVERSAL )
self.images = images
self.func = func
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
gbSizer1 = wx.GridBagSizer( 0, 0 )
gbSizer1.SetFlexibleDirection( wx.VERTICAL )
gbSizer1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
gbSizer1.SetMinSize( wx.Size( 200,400 ) )
bSizer3 = wx.BoxSizer( wx.VERTICAL )
self.m_checkBoxes = []
for i in range(25):
CheckBox = wx.CheckBox( self, wx.ID_ANY, u"{}".format(str(i+1)), wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBoxes.append(CheckBox)
bSizer3.Add( self.m_checkBoxes[i], 0, wx.ALL, 0 )
gbSizer1.Add( bSizer3, wx.GBPosition( 0, 0 ), wx.GBSpan( 1, 1 ), wx.EXPAND|wx.LEFT|wx.TOP, 5 )
wSizer5 = wx.WrapSizer( wx.HORIZONTAL, 0 )
m_radioBox5Choices = [ u"True", u"False" ]
self.radioBox_send = wx.RadioBox( self, wx.ID_ANY, u"SendKeypoints", wx.DefaultPosition, wx.DefaultSize, m_radioBox5Choices, 1, wx.RA_SPECIFY_COLS )
self.radioBox_send.SetSelection( 1 )
wSizer5.Add( self.radioBox_send, 1, wx.ALL, 5 )
m_radioBox7Choices = [ u"True", u"False" ]
self.radioBox_plot = wx.RadioBox( self, wx.ID_ANY, u"Plot", wx.DefaultPosition, wx.DefaultSize, m_radioBox7Choices, 1, wx.RA_SPECIFY_COLS )
self.radioBox_plot.SetSelection( 1 )
wSizer5.Add( self.radioBox_plot, 1, wx.ALL, 5 )
self.m_checkBox26 = wx.CheckBox(self, wx.ID_ANY, u"All points", wx.DefaultPosition, wx.DefaultSize, 0)
wSizer5.Add( self.m_checkBox26, 0, wx.ALL, 5 )
self.m_staticText7 = wx.StaticText( self, wx.ID_ANY, u"0", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText7.Wrap( -1 )
wSizer5.Add( self.m_staticText7, 0, wx.ALL, 5 )
self.m_button3 = wx.Button( self, wx.ID_ANY, u"Go", wx.DefaultPosition, wx.DefaultSize, 0 )
wSizer5.Add( self.m_button3, 0, wx.ALL, 5 )
gbSizer1.Add( wSizer5, wx.GBPosition( 0, 1 ), wx.GBSpan( 1, 1 ), wx.ALIGN_CENTER_HORIZONTAL|wx.EXPAND|wx.TOP, 0 )
m_choice3Choices = self.images.name.tolist()
self.m_choice3 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice3Choices, 0 )
self.m_choice3.SetSelection( 0 )
gbSizer1.Add( self.m_choice3, wx.GBPosition( 1, 0 ), wx.GBSpan( 2, 8 ), wx.ALL, 5 )
self.SetSizer( gbSizer1 )
self.Layout()
self.Centre( wx.BOTH )
self.m_button3.Bind( wx.EVT_BUTTON, self.Go )
def __del__( self ):
pass
def Go( self, event ):
idxs = []
for i, checkBox in enumerate(self.m_checkBoxes, start=1):
if checkBox.IsChecked():
idxs.append(i)
all_points = self.m_checkBox26.IsChecked()
if (len(idxs)<6) & (not all_points):
wx.MessageBox('Точек должно быть больше 5', 'Ошибка', wx.OK)
event.Skip()
return
if all_points:
n = 25
else:
n = len(idxs)
self.m_staticText7.SetLabel(str(n))
img_name = self.m_choice3.GetString(self.m_choice3.GetSelection())
send = self.radioBox_send.GetString(self.radioBox_send.GetSelection())
plot = self.radioBox_plot.GetString(self.radioBox_plot.GetSelection())
img_id = int(self.images[self.images.name==img_name].id.values[0])
out = self.func(idxs, img_id, img_name, 1, send=(send=='True'), plot=(plot=='True'), all_points=all_points)
if out == 'Error_empty_request':
wx.MessageBox('Выберите plot или send', 'Ошибка', wx.OK)
elif out == 'Error_annotations':
wx.MessageBox('На этом изображении нет разметки', 'Ошибка', wx.OK)
event.Skip()
return
if __name__ == '__main__':
point_cloud_path = r''
keypoints_3d_path = r'karussel_24kps.csv'
local_dataset_path = r''
token = ''
dataset_id = 627375 #(images)
project_id = 184347 #(Nurburg-karussel)
sp = Start_annotation(project_id, dataset_id, token,
local_dataset_path, keypoints_3d_path, point_cloud_path)
sp.start()

View File

@@ -0,0 +1,25 @@
,x,y,z
1,129,-137,0
2,-41,33,0
3,-65,46,0
4,22,-134,0
5,70,-253,0
6,69,-329,0
7,-37,-224,0
8,-88,-199,0
9,-143,-171,0
10,-173,-119,0
11,-219,-30,0
12,-307,-26,0
13,-207,123,0
14,-138,258,0
15,-43,240,0
16,10,242,0
17,62,229,0
18,146,174,0
19,213,138,0
20,230,59,0
21,302,-11,0
22,288,-93,0
23,172,-65,0
24,103,-14,0
1 x y z
2 1 129 -137 0
3 2 -41 33 0
4 3 -65 46 0
5 4 22 -134 0
6 5 70 -253 0
7 6 69 -329 0
8 7 -37 -224 0
9 8 -88 -199 0
10 9 -143 -171 0
11 10 -173 -119 0
12 11 -219 -30 0
13 12 -307 -26 0
14 13 -207 123 0
15 14 -138 258 0
16 15 -43 240 0
17 16 10 242 0
18 17 62 229 0
19 18 146 174 0
20 19 213 138 0
21 20 230 59 0
22 21 302 -11 0
23 22 288 -93 0
24 23 172 -65 0
25 24 103 -14 0

View File

@@ -0,0 +1,4 @@
@echo on
call C:\Anaconda\Scripts\activate.bat
C:\Anaconda\python.exe SuperviselyKeypointsGUI.py
pause

View File

@@ -0,0 +1,197 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "283f6e9c",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import numpy as np\n",
"import pandas as pd\n",
"import glob\n",
"import json\n",
"from tqdm.notebook import tqdm\n",
"\n",
"path_dataset = r'D:\\karusel'\n",
"with open(os.path.join(path_dataset, 'meta.json'), 'r') as j:\n",
" meta = json.load(j)\n",
"\n",
"imgs = glob.glob(path_dataset + '\\\\images\\\\img\\\\*', recursive=True)\n",
"anns = glob.glob(path_dataset + '\\\\images\\\\ann\\\\*', recursive=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7af948d4",
"metadata": {},
"outputs": [],
"source": [
"def label2hash(meta_json, last):\n",
" for clss in meta_json['classes']:\n",
" if clss['title'] == last['classTitle']:\n",
" meta_nodes = clss['geometry_config']['nodes']\n",
" label2hash = {}\n",
" for name in meta_nodes:\n",
" label2hash[meta_nodes[name]['label']] = name\n",
" return label2hash"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7ea0771e",
"metadata": {},
"outputs": [],
"source": [
"def annotations(meta_json, obj):\n",
" nodes = obj['nodes']\n",
" keypoints_2d = pd.DataFrame(columns=['x', 'y'])\n",
"\n",
" for i in range(1, len(nodes)+1):\n",
" keypoints_2d.loc[i] = nodes[label2hash(meta_json, obj)[str(i)]]['loc']\n",
"\n",
" keypoints_2d['v'] = 2\n",
" keypoints_2d = keypoints_2d.astype(float).round().astype(int)\n",
" return keypoints_2d[:24]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "26b6abf4",
"metadata": {},
"outputs": [],
"source": [
"def ann_json(keypoints, img_id, obj):\n",
" \n",
" annotation = {\n",
" \"id\": obj['id'],\n",
" \"segmentation\": [],\n",
" \"num_keypoints\": len(keypoints),\n",
" \"area\": 0,\n",
" \"iscrowd\": 0,\n",
" \"image_id\": img_id,\n",
" \"bbox\": [],\n",
" \"category_id\": 1,\n",
" \"keypoints\": keypoints.values.flatten().tolist()}\n",
"\n",
" return annotation\n",
"\n",
"def img_json(ann, name, id):\n",
" height, width = ann['size'].values()\n",
" image = {\n",
" \"id\": id,\n",
" \"width\": width,\n",
" \"height\": height,\n",
" \"file_name\": name,\n",
" }\n",
" return image"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "155be42d",
"metadata": {},
"outputs": [],
"source": [
"def ann_img_list(anns, imgs, meta):\n",
" annotations_list = []\n",
" image_list = []\n",
" for i in tqdm(range(len(anns))):\n",
"\n",
" with open(anns[i], 'r') as j:\n",
" ann = json.load(j)\n",
" \n",
" image_name = os.path.basename(anns[i])[:-5]\n",
" image = img_json(ann, image_name, i)\n",
" image_list.append(image)\n",
"\n",
" for obj in ann['objects']:\n",
" keypoints = annotations(meta, obj)\n",
" annotations_list.append(ann_json(keypoints, i, obj))\n",
" return image_list, annotations_list"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6593f677",
"metadata": {},
"outputs": [],
"source": [
"def COCO(image_list, annotations_list):\n",
" coco = {\n",
"\n",
" \"info\": {\n",
" \"description\": \"karusel Dataset\", \"version\": \"1.0\"\n",
" },\n",
"\n",
" \"categories\": [\n",
" {\n",
" \"supercategory\": \"NurburgRing\",\n",
" \"id\": 1,\n",
" \"name\": \"karusel\",\n",
" \"keypoints\": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,\n",
" 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],\n",
" \"skeleton\": [\n",
" [1, 2],[2, 3],[3, 4],[4, 5],[5, 6],[6, 7],[7, 8],[8, 9],[9, 10],[10, 11],\n",
" [11, 12],[12, 13],[13, 14],[14, 15],[15, 16],[16, 17],[17, 18],[18, 19],[19, 20],\n",
" [20, 21],[21, 22],[22, 23],[23, 24],[24, 1],[24, 3],[1, 5]\n",
" ]\n",
" }\n",
" ]\n",
" }\n",
"\n",
" coco['images'] = image_list\n",
" coco['annotations'] = annotations_list\n",
" return coco"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "38880d13",
"metadata": {},
"outputs": [],
"source": [
"coco_json = COCO(*ann_img_list(anns, imgs, meta))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "314565c2",
"metadata": {},
"outputs": [],
"source": [
"with open(os.path.join(path_dataset, 'karusel_COCO.json'), 'w') as file:\n",
" json.dump(coco_json, file)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,102 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "9b13894c",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import pandas as pd\n",
"import numpy as np\n",
"import cv2 as cv\n",
"import plotly.express as px\n",
"import plotly.graph_objects as go\n",
"\n",
"def roration(x):\n",
" x = np.deg2rad(x)\n",
" return np.array([[np.cos(x), -np.sin(x)],\n",
" [np.sin(x), np.cos(x)]])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3fc452a2",
"metadata": {},
"outputs": [],
"source": [
"with open(r'karusel.png.json', 'r') as file:\n",
" img_json = json.load(file)\n",
" \n",
"img = cv.imread(r'karusel.png')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cfc4d7b9",
"metadata": {},
"outputs": [],
"source": [
"df = pd.DataFrame([img_json['objects'][i]['points']['exterior'][0]\\\n",
" for i in range(len(img_json['objects']))], columns=['x','y'], index=range(1,25))\n",
"center = (1165, 874)\n",
"df -= center\n",
"df[['x', 'y']] = df.values[:, :2]@roration(90) # поворот x и y в плоскости земли\n",
"df['z'] = 0\n",
"\n",
"df = df.loc[[1, 7, 6, 20, 2, 3, 4, 17, 18, 5, 22, 19, 8,\n",
" 16, 9, 10, 11, 21, 15, 12, 14, 13, 24, 23]] # упорядочивание индексов\n",
"\n",
"df.index = list(range(1, 25))\n",
"df = df.astype(float)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "07361259",
"metadata": {},
"outputs": [],
"source": [
"img = cv.circle(img, center, 5, (0,0,0), -1)\n",
"fig = px.imshow(img)\n",
"fig.add_trace(go.Scatter(x=df.x+center[0], y=df.y+center[1], text=df.index,\n",
" marker=dict(color='red', size=5), mode='markers'))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4f18693d",
"metadata": {},
"outputs": [],
"source": [
"df[['x', 'y']] = df.values[:, :2]@roration(44)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}