karussel_v2+new_model

This commit is contained in:
2022-06-20 20:50:07 +03:00
parent f6af1d69a1
commit ac04526fe1
470 changed files with 1258 additions and 26 deletions

View File

@@ -0,0 +1,217 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 5,
"id": "07bdc0da",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import json\n",
"import numpy as np\n",
"from tqdm.notebook import tqdm\n",
"path_dataset = r'D:\\downloads\\Nurburg-karussel'\n",
"from SuperviselyKeypointsGUI.SuperviselyKeypointsGUI import *\n",
"\n",
"\n",
"keypoints_3d_path = r'SuperviselyKeypointsGUI\\karussel_24kps.csv'\n",
"keypoints_3d = pd.read_csv(keypoints_3d_path, index_col=0).astype(float)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "ca890aa0",
"metadata": {},
"outputs": [],
"source": [
"def find_image(id):\n",
" for row in coco['images']:\n",
" if row['id'] == id:\n",
" return row"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "b8135031",
"metadata": {},
"outputs": [],
"source": [
"def ltrb_from_cloud(cloud_2d, imgSize, expansion=0.1):\n",
" height, width = imgSize\n",
" ltrb = np.round((cloud_2d[:, 0].min(), cloud_2d[:, 1].min(),\n",
" cloud_2d[:, 0].max(), cloud_2d[:, 1].max())).astype(int)\n",
" \n",
" if expansion > 0:\n",
" dx = np.round((ltrb[2]-ltrb[0])*expansion/2)\n",
" dy = np.round((ltrb[3]-ltrb[1])*expansion/2)\n",
" ltrb += np.array([-dx, -2*dy, dx, dy], dtype=int)\n",
" \n",
" ltrb[[0,2]] = np.clip(ltrb[[0,2]], 0, width)\n",
" ltrb[[1,3]] = np.clip(ltrb[[1,3]], 0, height)\n",
" \n",
" return ltrb\n",
"\n",
"def ltrb2ltwh(ltrb):\n",
" return np.array([ltrb[0], ltrb[1], ltrb[2]-ltrb[0], ltrb[3]-ltrb[1]], dtype=int)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "79762c08",
"metadata": {},
"outputs": [],
"source": [
"with open(os.path.join(path_dataset, 'karusel_COCO.json'), 'r') as file:\n",
" coco = json.load(file)"
]
},
{
"cell_type": "markdown",
"id": "a1cfe0e2",
"metadata": {},
"source": [
"### Bboxes from pose"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f9440b54",
"metadata": {},
"outputs": [],
"source": [
"for obj in tqdm(coco['annotations']):\n",
" keypoints_2d = np.array(obj['keypoints']).reshape((-1, 3))[:, :2]\n",
" id, width, height, file_name = find_image(obj['image_id']).values()\n",
" \n",
" bbox_ltrb = ltrb_from_cloud(keypoints_2d, (height, width), 0.4)\n",
" bbox_ltwh = ltrb2ltwh(bbox_ltrb).tolist()\n",
" obj['bbox'] = bbox_ltwh\n",
" obj['area'] = bbox_ltwh[2]*bbox_ltwh[3]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dfcb675b",
"metadata": {},
"outputs": [],
"source": [
"# with open(os.path.join(path_dataset, 'karusel_COCO.json'), 'w') as file:\n",
"# json.dump(coco, file)"
]
},
{
"cell_type": "markdown",
"id": "e8ff698d",
"metadata": {},
"source": [
"### Split COCO json to train/val"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3aec7a2b",
"metadata": {},
"outputs": [],
"source": [
"from sklearn.model_selection import train_test_split\n",
"import copy\n",
"\n",
"def split_coco_json(coco, test_size=0.2, random_state=0):\n",
" \n",
" train_idx, test_idx = train_test_split([i['id'] for i in coco['images']],\n",
" test_size=test_size, random_state=random_state)\n",
"\n",
"\n",
" train = copy.deepcopy(coco)\n",
" test = copy.deepcopy(coco)\n",
"\n",
" test['images'] = [x for x in coco['images'] if x['id'] in test_idx]\n",
" train['images'] = [x for x in coco['images'] if x['id'] in train_idx]\n",
"\n",
" test['annotations'] = [x for x in coco['annotations'] if x['image_id'] in test_idx]\n",
" train['annotations'] = [x for x in coco['annotations'] if x['image_id'] in train_idx]\n",
" return train, test"
]
},
{
"cell_type": "markdown",
"id": "1cdd1a3c",
"metadata": {},
"source": [
"### Create new splited dataset"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aafd54fb",
"metadata": {},
"outputs": [],
"source": [
"train, test = split_coco_json(coco, 0.1, random_state=777)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f6af98fc",
"metadata": {},
"outputs": [],
"source": [
"test_path_images = [os.path.join(path_dataset, 'images', 'img', x['file_name']) for x in test['images']]\n",
"train_path_images = [os.path.join(path_dataset, 'images', 'img', x['file_name']) for x in train['images']]\n",
"\n",
"import shutil\n",
"path_new_dataset = r'C:\\Users\\Kir\\Jupiter\\Nurburg\\OpenPifPaf\\Training\\Karusel_dataset'\n",
"\n",
"path_train_img = os.path.join(path_new_dataset, 'images', 'train')\n",
"path_test_img = os.path.join(path_new_dataset, 'images', 'val')\n",
"path_ann = os.path.join(path_new_dataset, 'annotations')\n",
"\n",
"os.makedirs(path_train_img, exist_ok=True)\n",
"os.makedirs(path_test_img, exist_ok=True)\n",
"os.makedirs(path_ann, exist_ok=True)\n",
"\n",
"with open(os.path.join(path_ann, 'train.json'), 'w') as file:\n",
" json.dump(train, file)\n",
" \n",
"with open(os.path.join(path_ann, 'val.json'), 'w') as file:\n",
" json.dump(test, file)\n",
"\n",
"for path in train_path_images:\n",
" shutil.copy(path, path_train_img)\n",
"\n",
"for path in test_path_images:\n",
" shutil.copy(path, path_test_img)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,212 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "283f6e9c",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import numpy as np\n",
"import pandas as pd\n",
"import glob\n",
"import json\n",
"from tqdm.notebook import tqdm\n",
"\n",
"path_dataset = r'D:\\downloads\\Nurburg-karussel'\n",
"with open(os.path.join(path_dataset, 'meta.json'), 'r') as j:\n",
" meta = json.load(j)\n",
"\n",
"imgs = glob.glob(path_dataset + '\\\\images\\\\img\\\\*', recursive=True)\n",
"anns = glob.glob(path_dataset + '\\\\images\\\\ann\\\\*', recursive=True)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "7af948d4",
"metadata": {},
"outputs": [],
"source": [
"def label2hash(meta_json, last):\n",
" for clss in meta_json['classes']:\n",
" if clss['title'] == last['classTitle']:\n",
" meta_nodes = clss['geometry_config']['nodes']\n",
" label2hash = {}\n",
" for name in meta_nodes:\n",
" label2hash[meta_nodes[name]['label']] = name\n",
" return label2hash"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "7ea0771e",
"metadata": {},
"outputs": [],
"source": [
"def annotations(meta_json, obj):\n",
" nodes = obj['nodes']\n",
" keypoints_2d = pd.DataFrame(columns=['x', 'y'])\n",
"\n",
" for i in range(1, len(nodes)+1):\n",
" keypoints_2d.loc[i] = nodes[label2hash(meta_json, obj)[str(i)]]['loc']\n",
"\n",
" keypoints_2d['v'] = 2\n",
" keypoints_2d = keypoints_2d.astype(float).round().astype(int)\n",
" return keypoints_2d[:24]"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "26b6abf4",
"metadata": {},
"outputs": [],
"source": [
"def ann_json(keypoints, img_id, obj):\n",
" \n",
" annotation = {\n",
" \"id\": obj['id'],\n",
" \"segmentation\": [],\n",
" \"num_keypoints\": len(keypoints),\n",
" \"area\": 0,\n",
" \"iscrowd\": 0,\n",
" \"image_id\": img_id,\n",
" \"bbox\": [],\n",
" \"category_id\": 1,\n",
" \"keypoints\": keypoints.values.flatten().tolist()}\n",
"\n",
" return annotation\n",
"\n",
"def img_json(ann, name, id):\n",
" height, width = ann['size'].values()\n",
" image = {\n",
" \"id\": id,\n",
" \"width\": width,\n",
" \"height\": height,\n",
" \"file_name\": name,\n",
" }\n",
" return image"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "155be42d",
"metadata": {},
"outputs": [],
"source": [
"def ann_img_list(anns, imgs, meta):\n",
" annotations_list = []\n",
" image_list = []\n",
" for i in tqdm(range(len(anns))):\n",
"\n",
" with open(anns[i], 'r') as j:\n",
" ann = json.load(j)\n",
" \n",
" image_name = os.path.basename(anns[i])[:-5]\n",
" image = img_json(ann, image_name, i)\n",
" image_list.append(image)\n",
"\n",
" for obj in ann['objects']:\n",
" keypoints = annotations(meta, obj)\n",
" annotations_list.append(ann_json(keypoints, i, obj))\n",
" return image_list, annotations_list"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "6593f677",
"metadata": {},
"outputs": [],
"source": [
"def COCO(image_list, annotations_list):\n",
" coco = {\n",
"\n",
" \"info\": {\n",
" \"description\": \"karusel Dataset\", \"version\": \"2.0\"\n",
" },\n",
"\n",
" \"categories\": [\n",
" {\n",
" \"supercategory\": \"NurburgRing\",\n",
" \"id\": 1,\n",
" \"name\": \"karusel\",\n",
" \"keypoints\": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,\n",
" 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],\n",
" \"skeleton\": [\n",
" [1, 2],[2, 3],[3, 4],[4, 5],[5, 6],[6, 7],[7, 8],[8, 9],[9, 10],[10, 11],\n",
" [11, 12],[12, 13],[13, 14],[14, 15],[15, 16],[16, 17],[17, 18],[18, 19],[19, 20],\n",
" [20, 21],[21, 22],[22, 23],[23, 24],[24, 1],[24, 3],[1, 5]\n",
" ]\n",
" }\n",
" ]\n",
" }\n",
"\n",
" coco['images'] = image_list\n",
" coco['annotations'] = annotations_list\n",
" return coco"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "38880d13",
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "96021bfc91ce425dbef57ae348321005",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
" 0%| | 0/448 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"coco_json = COCO(*ann_img_list(anns, imgs, meta))"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "314565c2",
"metadata": {},
"outputs": [],
"source": [
"with open(os.path.join(path_dataset, 'karusel_COCO.json'), 'w') as file:\n",
" json.dump(coco_json, file)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}