v01
This commit is contained in:
64
scripts/basalt_capture_mocap.py
Executable file
64
scripts/basalt_capture_mocap.py
Executable file
@@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
import sys
|
||||
import os
|
||||
import rospy
|
||||
import argparse
|
||||
from geometry_msgs.msg import TransformStamped
|
||||
|
||||
|
||||
def callback(data):
|
||||
global out_file, time_offset
|
||||
if not out_file:
|
||||
return
|
||||
|
||||
if not time_offset:
|
||||
time_offset = rospy.Time().now() - data.header.stamp
|
||||
|
||||
out_file.write('{},{},{},{},{},{},{},{}\n'.format(
|
||||
data.header.stamp + time_offset,
|
||||
data.transform.translation.x,
|
||||
data.transform.translation.y,
|
||||
data.transform.translation.z,
|
||||
data.transform.rotation.w,
|
||||
data.transform.rotation.x,
|
||||
data.transform.rotation.y,
|
||||
data.transform.rotation.z
|
||||
))
|
||||
|
||||
|
||||
def listener():
|
||||
rospy.init_node('listener', anonymous=True)
|
||||
rospy.Subscriber('/vrpn_client/raw_transform', TransformStamped, callback)
|
||||
|
||||
rospy.spin()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
parser = argparse.ArgumentParser(description='Record Motion Capture messages from ROS (/vrpn_client/raw_transform).')
|
||||
parser.add_argument('-d', '--dataset-path', required=True, help="Path to store the result")
|
||||
args = parser.parse_args()
|
||||
|
||||
dataset_path = args.dataset_path
|
||||
|
||||
out_file = None
|
||||
time_offset = None
|
||||
|
||||
if not os.path.exists(dataset_path):
|
||||
os.makedirs(dataset_path)
|
||||
|
||||
out_file = open(dataset_path + '/data.csv', 'w')
|
||||
out_file.write('#timestamp [ns], p_RS_R_x [m], p_RS_R_y [m], p_RS_R_z [m], q_RS_w [], q_RS_x [], q_RS_y [], q_RS_z []\n')
|
||||
listener()
|
||||
out_file.close()
|
||||
|
||||
144
scripts/basalt_convert_kitti_calib.py
Executable file
144
scripts/basalt_convert_kitti_calib.py
Executable file
@@ -0,0 +1,144 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
import sys
|
||||
import math
|
||||
import numpy as np
|
||||
import os
|
||||
from string import Template
|
||||
import cv2
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Convert KITTI calibration to basalt and save it int the dataset folder as basalt_calib.json.')
|
||||
parser.add_argument('-d', '--dataset-path', required=True, help="Path to the dataset in KITTI format")
|
||||
args = parser.parse_args()
|
||||
|
||||
dataset_path = args.dataset_path
|
||||
|
||||
print(dataset_path)
|
||||
|
||||
kitti_calib_file = dataset_path + '/calib.txt'
|
||||
|
||||
|
||||
calib_template = Template('''{
|
||||
"value0": {
|
||||
"T_imu_cam": [
|
||||
{
|
||||
"px": 0.0,
|
||||
"py": 0.0,
|
||||
"pz": 0.0,
|
||||
"qx": 0.0,
|
||||
"qy": 0.0,
|
||||
"qz": 0.0,
|
||||
"qw": 1.0
|
||||
},
|
||||
{
|
||||
"px": $px,
|
||||
"py": 0.0,
|
||||
"pz": 0.0,
|
||||
"qx": 0.0,
|
||||
"qy": 0.0,
|
||||
"qz": 0.0,
|
||||
"qw": 1.0
|
||||
}
|
||||
],
|
||||
"intrinsics": [
|
||||
{
|
||||
"camera_type": "pinhole",
|
||||
"intrinsics": {
|
||||
"fx": $fx0,
|
||||
"fy": $fy0,
|
||||
"cx": $cx0,
|
||||
"cy": $cy0
|
||||
}
|
||||
},
|
||||
{
|
||||
"camera_type": "pinhole",
|
||||
"intrinsics": {
|
||||
"fx": $fx1,
|
||||
"fy": $fy1,
|
||||
"cx": $cx1,
|
||||
"cy": $cy1
|
||||
}
|
||||
}
|
||||
],
|
||||
"resolution": [
|
||||
[
|
||||
$rx,
|
||||
$ry
|
||||
],
|
||||
[
|
||||
$rx,
|
||||
$ry
|
||||
]
|
||||
],
|
||||
"vignette": [],
|
||||
"calib_accel_bias": [
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0
|
||||
],
|
||||
"calib_gyro_bias": [
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0
|
||||
],
|
||||
"imu_update_rate": 0.0,
|
||||
"accel_noise_std": [0.0, 0.0, 0.0],
|
||||
"gyro_noise_std": [0.0, 0.0, 0.0],
|
||||
"accel_bias_std": [0.0, 0.0, 0.0],
|
||||
"gyro_bias_std": [0.0, 0.0, 0.0],
|
||||
"cam_time_offset_ns": 0
|
||||
}
|
||||
}
|
||||
''')
|
||||
|
||||
|
||||
with open(kitti_calib_file, 'r') as stream:
|
||||
lines = (' '.join([x.strip('\n ') for x in stream.readlines() if x.strip('\n ') ])).split(' ')
|
||||
|
||||
if len(lines) != 52:
|
||||
print('Issues loading calibration')
|
||||
print(lines)
|
||||
|
||||
P0 = np.array([float(x) for x in lines[1:13]]).reshape(3,4)
|
||||
P1 = np.array([float(x) for x in lines[14:26]]).reshape(3,4)
|
||||
print('P0\n', P0)
|
||||
print('P1\n', P1)
|
||||
|
||||
tx = -P1[0,3]/P1[0,0]
|
||||
|
||||
img = cv2.imread(dataset_path + '/image_0/000000.png')
|
||||
rx = img.shape[1]
|
||||
ry = img.shape[0]
|
||||
|
||||
values = {'fx0': P0[0,0], 'fy0': P0[1,1], 'cx0': P0[0,2], 'cy0': P0[1,2], 'fx1': P1[0,0], 'fy1': P1[1,1], 'cx1': P1[0,2], 'cy1': P1[1,2], 'px': tx, 'rx': rx, 'ry': ry}
|
||||
|
||||
calib = calib_template.substitute(values)
|
||||
print(calib)
|
||||
|
||||
with open(dataset_path + '/basalt_calib.json', 'w') as stream2:
|
||||
stream2.write(calib)
|
||||
113
scripts/basalt_response_calib.py
Executable file
113
scripts/basalt_response_calib.py
Executable file
@@ -0,0 +1,113 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
import sys
|
||||
import math
|
||||
import os
|
||||
import cv2
|
||||
import argparse
|
||||
|
||||
import numpy as np
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
parser = argparse.ArgumentParser(description='Response calibration.')
|
||||
parser.add_argument('-d', '--dataset-path', required=True, help="Path to the dataset in Euroc format")
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
dataset_path = args.dataset_path
|
||||
|
||||
print(dataset_path)
|
||||
|
||||
timestamps = np.loadtxt(dataset_path + '/mav0/cam0/data.csv', usecols=[0], delimiter=',', dtype=np.int64)
|
||||
exposures = np.loadtxt(dataset_path + '/mav0/cam0/exposure.csv', usecols=[1], delimiter=',', dtype=np.int64).astype(np.float64) * 1e-6
|
||||
pixel_avgs = list()
|
||||
|
||||
if timestamps.shape[0] != exposures.shape[0]: print("timestamps and exposures do not match")
|
||||
|
||||
imgs = []
|
||||
|
||||
# check image data.
|
||||
for timestamp in timestamps:
|
||||
path = dataset_path + '/mav0/cam0/data/' + str(timestamp)
|
||||
img = cv2.imread(dataset_path + '/mav0/cam0/data/' + str(timestamp) + '.webp', cv2.IMREAD_GRAYSCALE)
|
||||
if len(img.shape) == 3: img = img[:,:,0]
|
||||
imgs.append(img)
|
||||
pixel_avgs.append(np.mean(img))
|
||||
|
||||
imgs = np.array(imgs)
|
||||
print(imgs.shape)
|
||||
print(imgs.dtype)
|
||||
|
||||
|
||||
|
||||
num_pixels_by_intensity = np.bincount(imgs.flat)
|
||||
print('num_pixels_by_intensity', num_pixels_by_intensity)
|
||||
|
||||
inv_resp = np.arange(num_pixels_by_intensity.shape[0], dtype=np.float64)
|
||||
inv_resp[-1] = -1.0 # Use negative numbers to detect saturation
|
||||
|
||||
|
||||
def opt_irradiance():
|
||||
corrected_imgs = inv_resp[imgs] * exposures[:, np.newaxis, np.newaxis]
|
||||
times = np.ones_like(corrected_imgs) * (exposures**2)[:, np.newaxis, np.newaxis]
|
||||
|
||||
times[corrected_imgs < 0] = 0
|
||||
corrected_imgs[corrected_imgs < 0] = 0
|
||||
|
||||
denom = np.sum(times, axis=0)
|
||||
idx = (denom != 0)
|
||||
irr = np.sum(corrected_imgs, axis=0)
|
||||
irr[idx] /= denom[idx]
|
||||
irr[denom == 0] = -1.0
|
||||
return irr
|
||||
|
||||
def opt_inv_resp():
|
||||
generated_imgs = irradiance[np.newaxis, :, :] * exposures[:, np.newaxis, np.newaxis]
|
||||
|
||||
num_pixels_by_intensity = np.bincount(imgs.flat, generated_imgs.flat >= 0)
|
||||
|
||||
generated_imgs[generated_imgs < 0] = 0
|
||||
sum_by_intensity = np.bincount(imgs.flat, generated_imgs.flat)
|
||||
|
||||
new_inv_resp = inv_resp
|
||||
|
||||
idx = np.nonzero(num_pixels_by_intensity > 0)
|
||||
new_inv_resp[idx] = sum_by_intensity[idx] / num_pixels_by_intensity[idx]
|
||||
new_inv_resp[-1] = -1.0 # Use negative numbers to detect saturation
|
||||
return new_inv_resp
|
||||
|
||||
def print_error():
|
||||
generated_imgs = irradiance[np.newaxis, :, :] * exposures[:, np.newaxis, np.newaxis]
|
||||
generated_imgs -= inv_resp[imgs]
|
||||
generated_imgs[imgs == 255] = 0
|
||||
print('Error', np.sum(generated_imgs**2))
|
||||
|
||||
for iter in range(5):
|
||||
print('Iteration', iter)
|
||||
irradiance = opt_irradiance()
|
||||
print_error()
|
||||
inv_resp = opt_inv_resp()
|
||||
print_error()
|
||||
|
||||
|
||||
|
||||
fig, (ax1, ax2) = plt.subplots(1, 2)
|
||||
ax1.plot(inv_resp[:-1])
|
||||
ax1.set(xlabel='Image Intensity', ylabel='Irradiance Value')
|
||||
ax1.set_title('Inverse Response Function')
|
||||
|
||||
|
||||
ax2.imshow(irradiance)
|
||||
ax2.set_title('Irradiance Image')
|
||||
plt.show()
|
||||
|
||||
|
||||
96
scripts/basalt_verify_dataset.py
Executable file
96
scripts/basalt_verify_dataset.py
Executable file
@@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
import sys
|
||||
import math
|
||||
import os
|
||||
import argparse
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Check the dataset. Report if any images are missing.')
|
||||
parser.add_argument('-d', '--dataset-path', required=True, help="Path to the dataset in Euroc format")
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
dataset_path = args.dataset_path
|
||||
|
||||
print(dataset_path)
|
||||
|
||||
timestamps = {}
|
||||
exposures = {}
|
||||
|
||||
for sensor in ['cam0', 'cam1', 'imu0']:
|
||||
data = np.loadtxt(dataset_path + '/mav0/' + sensor + '/data.csv', usecols=[0], delimiter=',', dtype=np.int64)
|
||||
timestamps[sensor] = data
|
||||
|
||||
# check if dataset is OK...
|
||||
for key, value in timestamps.items():
|
||||
times = value * 1e-9
|
||||
min_t = times.min()
|
||||
max_t = times.max()
|
||||
interval = max_t - min_t
|
||||
diff = times[1:] - times[:-1]
|
||||
print('==========================================')
|
||||
print('sensor', key)
|
||||
print('min timestamp', min_t)
|
||||
print('max timestamp', max_t)
|
||||
print('interval', interval)
|
||||
print('hz', times.shape[0] / interval)
|
||||
print('min time between consecutive msgs', diff.min())
|
||||
print('max time between consecutive msgs', diff.max())
|
||||
for i, d in enumerate(diff):
|
||||
# Note: 0.001 is just a hacky heuristic, since we have nothing faster than 1000Hz. Should maybe be topic-specific.
|
||||
if d < 0.001:
|
||||
print("ERROR: Difference on consecutive measurements too small: {} - {} = {}".format(times[i + 1], times[i],
|
||||
d) + ' in sensor ' + key)
|
||||
|
||||
# check if we have all images for timestamps
|
||||
timestamp_to_topic = {}
|
||||
|
||||
for key, value in timestamps.items():
|
||||
if not key.startswith('cam'):
|
||||
continue
|
||||
for v in value:
|
||||
if v not in timestamp_to_topic:
|
||||
timestamp_to_topic[v] = list()
|
||||
timestamp_to_topic[v].append(key)
|
||||
|
||||
for key in timestamp_to_topic.keys():
|
||||
if len(timestamp_to_topic[key]) != 2:
|
||||
print('timestamp', key, 'has topics', timestamp_to_topic[key])
|
||||
|
||||
# check image data.
|
||||
img_extensions = ['.png', '.jpg', '.webp']
|
||||
for key, value in timestamps.items():
|
||||
if not key.startswith('cam'):
|
||||
continue
|
||||
for v in value:
|
||||
path = dataset_path + '/mav0/' + key + '/data/' + str(v)
|
||||
img_exists = False
|
||||
for e in img_extensions:
|
||||
if os.path.exists(dataset_path + '/mav0/' + key + '/data/' + str(v) + e):
|
||||
img_exists = True
|
||||
|
||||
if not img_exists:
|
||||
print('No image data for ' + key + ' at timestamp ' + str(v))
|
||||
|
||||
exposure_file = dataset_path + '/mav0/' + key + '/exposure.csv'
|
||||
if not os.path.exists(exposure_file):
|
||||
print('No exposure data for ' + key)
|
||||
continue
|
||||
|
||||
exposure_data = np.loadtxt(exposure_file, delimiter=',', dtype=np.int64)
|
||||
for v in value:
|
||||
idx = np.searchsorted(exposure_data[:, 0], v)
|
||||
if exposure_data[idx, 0] != v:
|
||||
print('No exposure data for ' + key + ' at timestamp ' + str(v))
|
||||
135
scripts/batch/generate-batch-configs.py
Executable file
135
scripts/batch/generate-batch-configs.py
Executable file
@@ -0,0 +1,135 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
#
|
||||
# Generate basalt configurations from a batch config file.
|
||||
#
|
||||
# Example:
|
||||
# ./generate-batch-config.py /path/to/folder
|
||||
#
|
||||
# It looks for the file named `basalt_batch_config.toml` inside the given folder.
|
||||
|
||||
import os
|
||||
import toml
|
||||
import json
|
||||
import argparse
|
||||
from pprint import pprint
|
||||
from copy import deepcopy
|
||||
from collections import OrderedDict
|
||||
import itertools
|
||||
import shutil
|
||||
import datetime
|
||||
import sys
|
||||
|
||||
|
||||
def isdict(o):
|
||||
return isinstance(o, dict) or isinstance(o, OrderedDict)
|
||||
|
||||
|
||||
def merge_config(a, b):
|
||||
"merge b into a"
|
||||
for k, v in b.items():
|
||||
if k in a:
|
||||
if isdict(v) and isdict(a[k]):
|
||||
#print("dict {}".format(k))
|
||||
merge_config(a[k], b[k])
|
||||
elif not isdict(v) and not isdict(a[k]):
|
||||
a[k] = deepcopy(v)
|
||||
#print("not dict {}".format(k))
|
||||
else:
|
||||
raise RuntimeError("Incompatible types for key {}".format(k))
|
||||
else:
|
||||
a[k] = deepcopy(v)
|
||||
|
||||
|
||||
def save_config(template, configs, combination, path_prefix):
|
||||
filename = os.path.join(path_prefix, "basalt_config_{}.json".format("_".join(combination)))
|
||||
config = deepcopy(template)
|
||||
#import ipdb; ipdb.set_trace()
|
||||
for override in combination:
|
||||
merge_config(config, configs[override])
|
||||
#import ipdb; ipdb.set_trace()
|
||||
with open(filename, 'w') as f:
|
||||
json.dump(config, f, indent=4)
|
||||
print(filename)
|
||||
|
||||
|
||||
def generate_configs(root_path, cmdline=[], overwrite_existing=False, revision_override=None):
|
||||
|
||||
# load and parse batch config file
|
||||
batch_config_path = os.path.join(root_path, "basalt_batch_config.toml")
|
||||
template = toml.load(batch_config_path, OrderedDict)
|
||||
cfg = template["_batch"]
|
||||
del template["_batch"]
|
||||
|
||||
# parse batch configuration
|
||||
revision = str(cfg.get("revision", 0)) if revision_override is None else revision_override
|
||||
configs = cfg["config"]
|
||||
alternatives = cfg.get("alternatives", dict())
|
||||
combinations = cfg["combinations"]
|
||||
|
||||
# prepare output directory
|
||||
date_str = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
outdir = root_path if revision is None else os.path.join(root_path, revision)
|
||||
if overwrite_existing and os.path.exists(outdir):
|
||||
print("WARNING: output directory exists, overwriting existing files: {}".format(outdir))
|
||||
else:
|
||||
os.makedirs(outdir)
|
||||
shutil.copy(batch_config_path, outdir)
|
||||
with open(os.path.join(outdir, "timestamp"), 'w') as f:
|
||||
f.write(date_str)
|
||||
with open(os.path.join(outdir, "commandline"), 'w') as f:
|
||||
f.write(cmdline)
|
||||
|
||||
# expand single entry in combination array
|
||||
def expand_one(x):
|
||||
if x in alternatives:
|
||||
return alternatives[x]
|
||||
elif isinstance(x, list):
|
||||
# allow "inline" alternative
|
||||
return x
|
||||
else:
|
||||
return [x]
|
||||
|
||||
def flatten(l):
|
||||
for el in l:
|
||||
if isinstance(el, list):
|
||||
yield from flatten(el)
|
||||
else:
|
||||
yield el
|
||||
|
||||
# generate all configurations
|
||||
for name, description in combinations.items():
|
||||
if True or len(combinations) > 1:
|
||||
path_prefix = os.path.join(outdir, name)
|
||||
if not (overwrite_existing and os.path.exists(path_prefix)):
|
||||
os.mkdir(path_prefix)
|
||||
else:
|
||||
path_prefix = outdir
|
||||
expanded = [expand_one(x) for x in description]
|
||||
for comb in itertools.product(*expanded):
|
||||
# flatten list to allow each alternative to reference multiple configs
|
||||
comb = list(flatten(comb))
|
||||
save_config(template, configs, comb, path_prefix)
|
||||
|
||||
|
||||
def main():
|
||||
cmdline = str(sys.argv)
|
||||
parser = argparse.ArgumentParser("Generate basalt configurations from a batch config file.")
|
||||
parser.add_argument("path", help="path to look for config and templates")
|
||||
parser.add_argument("--revision", help="override revision")
|
||||
parser.add_argument("--force", "-f", action="store_true", help="overwrite existing files")
|
||||
args = parser.parse_args()
|
||||
generate_configs(args.path, cmdline, args.force, args.revision)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
26
scripts/batch/generate-tables.py
Executable file
26
scripts/batch/generate-tables.py
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
# Dependencies:
|
||||
# pip3 install -U --user py_ubjson matplotlib numpy munch scipy pylatex toml
|
||||
|
||||
# also: latexmk and latex
|
||||
#
|
||||
# Ubuntu:
|
||||
# sudo apt install texlive-latex-extra latexmk
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "python")))
|
||||
|
||||
import basalt.generate_tables
|
||||
|
||||
basalt.generate_tables.main()
|
||||
139
scripts/batch/list-jobs.sh
Executable file
139
scripts/batch/list-jobs.sh
Executable file
@@ -0,0 +1,139 @@
|
||||
#!/usr/bin/env bash
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
#
|
||||
# Usage:
|
||||
# list-jobs.sh DIRNAME [DIRNAME ...] [-s|--short] [-o|--only STATUS]
|
||||
#
|
||||
# Lists all batch jobs found in DIRNAME. If the optional argument
|
||||
# STATUS is passed, only lists jobs with that status. Multiple
|
||||
# statuses can be passed in a space-separated string.
|
||||
#
|
||||
# Possible status arguments: queued, running, completed, failed, unknown
|
||||
# You can also use 'active' as a synonym for 'queued running unknown'
|
||||
|
||||
# exit on error
|
||||
set -o errexit -o pipefail
|
||||
|
||||
|
||||
# we need GNU getopt...
|
||||
GETOPT=getopt
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
if [ -f /usr/local/opt/gnu-getopt/bin/getopt ]; then
|
||||
GETOPT="/usr/local/opt/gnu-getopt/bin/getopt"
|
||||
fi
|
||||
fi
|
||||
|
||||
# option parsing, see: https://stackoverflow.com/a/29754866/1813258
|
||||
usage() { echo "Usage: `basename $0` DIRNAME [DIRNAME ...] [-s|--short] [-o|--only STATUS]" ; exit 1; }
|
||||
|
||||
# -allow a command to fail with !’s side effect on errexit
|
||||
# -use return value from ${PIPESTATUS[0]}, because ! hosed $?
|
||||
! "$GETOPT" --test > /dev/null
|
||||
if [[ ${PIPESTATUS[0]} -ne 4 ]]; then
|
||||
echo 'I’m sorry, `getopt --test` failed in this environment.'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
OPTIONS=hsjo:
|
||||
LONGOPTS=help,short,jobids,only:
|
||||
|
||||
# -regarding ! and PIPESTATUS see above
|
||||
# -temporarily store output to be able to check for errors
|
||||
# -activate quoting/enhanced mode (e.g. by writing out “--options”)
|
||||
# -pass arguments only via -- "$@" to separate them correctly
|
||||
! PARSED=$("$GETOPT" --options=$OPTIONS --longoptions=$LONGOPTS --name "`basename $0`" -- "$@")
|
||||
if [[ ${PIPESTATUS[0]} -ne 0 ]]; then
|
||||
# e.g. return value is 1
|
||||
# then getopt has complained about wrong arguments to stdout
|
||||
usage
|
||||
fi
|
||||
# read getopt’s output this way to handle the quoting right:
|
||||
eval set -- "$PARSED"
|
||||
|
||||
SHORT=n
|
||||
ONLY=""
|
||||
JOBIDS=n
|
||||
# now enjoy the options in order and nicely split until we see --
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h|--help) usage ;;
|
||||
-s|--short) SHORT=y; shift ;;
|
||||
-j|--jobids) JOBIDS=y; shift ;;
|
||||
-o|--only) ONLY="$2"; shift 2 ;;
|
||||
--) shift; break ;;
|
||||
*) echo "Programming error"; exit 3 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# handle non-option arguments --> directories
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo "Error: Pass at least one folder"
|
||||
usage
|
||||
fi
|
||||
DIRS=("$@")
|
||||
|
||||
# status aliases:
|
||||
ONLY="${ONLY/active/queued running unknown}"
|
||||
ONLY="${ONLY/notcompleted/queued running failed unknown}"
|
||||
|
||||
contains() {
|
||||
[[ $1 =~ (^| )$2($| ) ]] && return 0 || return 1
|
||||
}
|
||||
|
||||
display() {
|
||||
if [ -z "$ONLY" ] || contains "$ONLY" $2; then
|
||||
if [ $SHORT = y ]; then
|
||||
echo "$1"
|
||||
else
|
||||
echo -n "$1 : $2"
|
||||
if [ -n "$3" ]; then
|
||||
echo -n " - $3"
|
||||
fi
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
for d in "${DIRS[@]}"; do
|
||||
for f in `find "$d" -name status.log | sort`; do
|
||||
DIR=`dirname "$f"`
|
||||
|
||||
# ignore backup folder from "rerun" scripts
|
||||
if [[ `basename $DIR` = results-backup* ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if ! grep Started "$f" > /dev/null; then
|
||||
display "$DIR" unknown "not started"
|
||||
continue
|
||||
fi
|
||||
|
||||
# job has started:
|
||||
|
||||
if grep Completed "$f" > /dev/null ; then
|
||||
display "$DIR" completed ""
|
||||
continue
|
||||
fi
|
||||
|
||||
# job has started, but not completed (cleanly)
|
||||
|
||||
# check signs of termination
|
||||
if [ -f "$DIR"/output.log ] && grep "Command terminated by signal" "$DIR"/output.log > /dev/null; then
|
||||
display "$DIR" failed killed "`grep -oP 'Command terminated by \Ksignal .+' "$DIR"/output.log`"
|
||||
continue
|
||||
fi
|
||||
|
||||
# might be running or aborted
|
||||
display "$DIR" unknown started
|
||||
|
||||
done
|
||||
done
|
||||
19
scripts/batch/plot.py
Executable file
19
scripts/batch/plot.py
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "python")))
|
||||
|
||||
import basalt.nullspace
|
||||
|
||||
basalt.nullspace.main()
|
||||
100
scripts/batch/query-config.py
Executable file
100
scripts/batch/query-config.py
Executable file
@@ -0,0 +1,100 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
#
|
||||
# Example usage:
|
||||
# $ ./query-config.py path/to/basalt_config.json value0.\"config.vio_debug\"
|
||||
# 10G
|
||||
|
||||
import json
|
||||
import toml
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
|
||||
def parse_query(query):
|
||||
query_list = []
|
||||
|
||||
quote_open_char = None
|
||||
curr = ""
|
||||
for c in query:
|
||||
if quote_open_char:
|
||||
if c == quote_open_char:
|
||||
quote_open_char = None
|
||||
else:
|
||||
curr += c
|
||||
elif c in ['"', "'"]:
|
||||
quote_open_char = c
|
||||
elif c == '.':
|
||||
query_list.append(curr)
|
||||
curr = ""
|
||||
else:
|
||||
curr += c
|
||||
query_list.append(curr)
|
||||
|
||||
return query_list
|
||||
|
||||
|
||||
def query_config(path, query, default_value=None, format_env=False, format_cli=False):
|
||||
query_list = parse_query(query)
|
||||
with open(path) as f:
|
||||
cfg = json.load(f)
|
||||
try:
|
||||
curr = cfg
|
||||
for q in query_list:
|
||||
curr = curr[q]
|
||||
result = curr
|
||||
except:
|
||||
if default_value is None:
|
||||
result = ""
|
||||
else:
|
||||
result = default_value
|
||||
if isinstance(result, dict):
|
||||
if format_env:
|
||||
lines = []
|
||||
for k, v in result.items():
|
||||
# NOTE: assumes no special escaping is necessary
|
||||
lines.append("{}='{}'".format(k, v))
|
||||
return "\n".join(lines)
|
||||
elif format_cli:
|
||||
args = ["--{} {}".format(k, v) for k, v in result.items()]
|
||||
return " ".join(args)
|
||||
else:
|
||||
result = toml.dumps(result)
|
||||
else:
|
||||
result = "{}".format(result)
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser("Parse toml file and print content of query key.")
|
||||
parser.add_argument("config_path", help="path to toml file")
|
||||
parser.add_argument("query", help="query string")
|
||||
parser.add_argument("default_value", help="value printed if query is not successful", nargs='?')
|
||||
parser.add_argument(
|
||||
"--format-env",
|
||||
action="store_true",
|
||||
help="Expect dictionary as query result and output like environment variables, i.e. VAR='VALUE' lines.")
|
||||
parser.add_argument("--format-cli",
|
||||
action="store_true",
|
||||
help="Expect dictionary as query result and output like cli arguments, i.e. --VAR 'VALUE'.")
|
||||
args = parser.parse_args()
|
||||
|
||||
res = query_config(args.config_path,
|
||||
args.query,
|
||||
default_value=args.default_value,
|
||||
format_env=args.format_env,
|
||||
format_cli=args.format_cli)
|
||||
print(res)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
25
scripts/batch/rerun-failed-in.sh
Executable file
25
scripts/batch/rerun-failed-in.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
#
|
||||
# Usage:
|
||||
# rerun-failed-in.sh FOLDER
|
||||
#
|
||||
# Reruns all failed experiments that are found in a given folder.
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
"$SCRIPT_DIR"/list-jobs.sh "$1" -s -o failed | while read f; do
|
||||
echo "$f"
|
||||
"$SCRIPT_DIR"/rerun-one-in.sh "$f" || true
|
||||
done
|
||||
35
scripts/batch/rerun-one-in.sh
Executable file
35
scripts/batch/rerun-one-in.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env bash
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
FOLDER="${1}"
|
||||
|
||||
cd "$FOLDER"
|
||||
|
||||
# backup previous files
|
||||
DATE=`date +'%Y%m%d-%H%M%S'`
|
||||
BACKUP_FOLDER=results-backup-$DATE
|
||||
for f in *.jobid *.log stats*.*json; do
|
||||
if [ -f $f ]; then
|
||||
mkdir -p $BACKUP_FOLDER
|
||||
mv $f $BACKUP_FOLDER/
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Created" > status.log
|
||||
echo "Restarted" >> status.log
|
||||
|
||||
echo "Starting run in $PWD"
|
||||
"$SCRIPT_DIR"/run-one.sh "$PWD"
|
||||
60
scripts/batch/run-all-in.sh
Executable file
60
scripts/batch/run-all-in.sh
Executable file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env bash
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
|
||||
# given folder with basalt_config_*.json, run optimization for each config in
|
||||
# corresponding subfolder
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# number of logical cores on linux and macos
|
||||
NUM_CORES=`(which nproc > /dev/null && nproc) || sysctl -n hw.logicalcpu || echo 1`
|
||||
|
||||
echo "Running on '`hostname`', nproc: $NUM_CORES"
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
# loop over all arguments, and in each folder find configs and run them
|
||||
for FOLDER in "$@"
|
||||
do
|
||||
|
||||
pushd "$FOLDER"
|
||||
|
||||
FILE_PATTERN='basalt_config_*.json'
|
||||
FILE_REGEX='basalt_config_(.*)\.json'
|
||||
|
||||
DATE=`date +'%Y%m%d-%H%M%S'`
|
||||
mkdir -p $DATE
|
||||
|
||||
declare -a RUN_DIRS=()
|
||||
|
||||
for f in `find . -name "$FILE_PATTERN" -type f | sort`; do
|
||||
if [[ `basename $f` =~ $FILE_REGEX ]]; then
|
||||
RUN_DIR=${DATE}/`dirname $f`/${BASH_REMATCH[1]}
|
||||
echo "Creating run with config $f in $RUN_DIR"
|
||||
mkdir -p "$RUN_DIR"
|
||||
cp $f "$RUN_DIR"/basalt_config.json
|
||||
echo "Created" > "$RUN_DIR"/status.log
|
||||
RUN_DIRS+=($RUN_DIR)
|
||||
else
|
||||
echo "Skipping $f"
|
||||
fi
|
||||
done
|
||||
|
||||
for RUN_DIR in "${RUN_DIRS[@]}"; do
|
||||
echo "Starting run in $RUN_DIR"
|
||||
"$SCRIPT_DIR"/run-one.sh "$RUN_DIR" || true
|
||||
done
|
||||
|
||||
popd
|
||||
|
||||
done
|
||||
83
scripts/batch/run-one.sh
Executable file
83
scripts/batch/run-one.sh
Executable file
@@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env bash
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
#
|
||||
# This script runs on the slurm nodes to run rootba for one config.
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
error() {
|
||||
local parent_lineno="$1"
|
||||
local message="$2"
|
||||
local code="${3:-1}"
|
||||
if [[ -n "$message" ]] ; then
|
||||
echo "Error on or near line ${parent_lineno}: ${message}; exiting with status ${code}"
|
||||
else
|
||||
echo "Error on or near line ${parent_lineno}; exiting with status ${code}"
|
||||
fi
|
||||
echo "Failed" >> status.log
|
||||
exit "${code}"
|
||||
}
|
||||
trap 'error ${LINENO}' ERR
|
||||
|
||||
# number of logical cores on linux and macos
|
||||
NUM_CORES=`(which nproc > /dev/null && nproc) || sysctl -n hw.logicalcpu || echo 1`
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
BASALT_BIN_DIR="${BASALT_BIN_DIR:-$SCRIPT_DIR/../../build}"
|
||||
|
||||
FOLDER="${1}"
|
||||
|
||||
cd "$FOLDER"
|
||||
|
||||
if ! which time 2> /dev/null; then
|
||||
echo "Did not find 'time' executable. Not installed?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
TIMECMD="`which time` -lp"
|
||||
else
|
||||
TIMECMD="`which time` -v"
|
||||
fi
|
||||
|
||||
echo "Started" >> status.log
|
||||
|
||||
# set environment variables according to config
|
||||
while read l; do
|
||||
if [ -n "$l" ]; then
|
||||
eval "export $l"
|
||||
fi
|
||||
done <<< `"$SCRIPT_DIR"/query-config.py basalt_config.json batch_run.env --format-env`
|
||||
|
||||
# lookup executable to run
|
||||
EXECUTABLE=`"$SCRIPT_DIR"/query-config.py basalt_config.json batch_run.executable basalt_vio`
|
||||
|
||||
# lookup args
|
||||
ARGS=`"$SCRIPT_DIR"/query-config.py basalt_config.json batch_run.args --format-cli`
|
||||
|
||||
CMD="$BASALT_BIN_DIR/$EXECUTABLE"
|
||||
|
||||
echo "Running on '`hostname`', nproc: $NUM_CORES, bin: $CMD"
|
||||
|
||||
# run as many times as specified (for timing tests to make sure filecache is hot); default is once
|
||||
rm -f output.log
|
||||
NUM_RUNS=`"$SCRIPT_DIR"/query-config.py basalt_config.json batch_run.num_runs 1`
|
||||
echo "Will run $NUM_RUNS times."
|
||||
for i in $(seq $NUM_RUNS); do
|
||||
echo ">>> Run $i" |& tee -a output.log
|
||||
{ $TIMECMD "$CMD" $ARGS; } |& tee -a output.log
|
||||
done
|
||||
|
||||
echo "Completed" >> status.log
|
||||
52
scripts/clang-format-all.sh
Executable file
52
scripts/clang-format-all.sh
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env bash
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
# Format all source files in the project.
|
||||
# Optionally take folder as argument; default is full inlude and src dirs.
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
FOLDER="${1:-$SCRIPT_DIR/../include $SCRIPT_DIR/../src $SCRIPT_DIR/../test/src}"
|
||||
|
||||
CLANG_FORMAT_COMMANDS="clang-format-11 clang-format-10 clang-format-9 clang-format"
|
||||
|
||||
# find the first available command:
|
||||
for CMD in $CLANG_FORMAT_COMMANDS; do
|
||||
if hash $CMD 2>/dev/null; then
|
||||
CLANG_FORMAT_CMD=$CMD
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z $CLANG_FORMAT_CMD ]; then
|
||||
echo "clang-format not installed..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# clang format check version
|
||||
MAJOR_VERSION_NEEDED=8
|
||||
|
||||
MAJOR_VERSION_DETECTED=`$CLANG_FORMAT_CMD -version | sed -n -E 's/.*version ([0-9]+).*/\1/p'`
|
||||
if [ -z $MAJOR_VERSION_DETECTED ]; then
|
||||
echo "Failed to parse major version (`$CLANG_FORMAT_CMD -version`)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "clang-format version $MAJOR_VERSION_DETECTED (`$CLANG_FORMAT_CMD -version`)"
|
||||
|
||||
if [ $MAJOR_VERSION_DETECTED -lt $MAJOR_VERSION_NEEDED ]; then
|
||||
echo "Looks like your clang format is too old; need at least version $MAJOR_VERSION_NEEDED"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
find $FOLDER -iname "*.?pp" -or -iname "*.h" | xargs $CLANG_FORMAT_CMD -verbose -i
|
||||
106
scripts/compare_calib.py
Executable file
106
scripts/compare_calib.py
Executable file
@@ -0,0 +1,106 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import numpy as np
|
||||
from scipy.spatial.transform import Rotation
|
||||
|
||||
|
||||
def print_abs_rel(info, v_0, v_1):
|
||||
diff = np.abs(np.linalg.norm(v_0 - v_1))
|
||||
out = f'{info}:\t{diff:.5f}'
|
||||
|
||||
if diff < 10e-7:
|
||||
out += ' (0.0%)'
|
||||
else:
|
||||
out += f' ({diff / (np.abs(np.linalg.norm(v_0)) * 100.0):.7f}%)'
|
||||
|
||||
print(out)
|
||||
|
||||
|
||||
def main(calib_path_1, calib_path_2):
|
||||
with open(calib_path_1, 'r') as c_1, open(calib_path_2, 'r') as c_2:
|
||||
calib0 = json.load(c_1)
|
||||
calib1 = json.load(c_2)
|
||||
|
||||
for i, (t_imu_cam_0, t_imu_cam_1) in enumerate(
|
||||
zip(calib0['value0']['T_imu_cam'], calib1['value0']['T_imu_cam'])):
|
||||
print(f'\nCamera {i} transformation differences')
|
||||
t_0 = np.array(list(t_imu_cam_0.values())[0:2])
|
||||
t_1 = np.array(list(t_imu_cam_1.values())[0:2])
|
||||
r_0 = Rotation(list(t_imu_cam_0.values())[3:7])
|
||||
r_1 = Rotation(list(t_imu_cam_1.values())[3:7])
|
||||
|
||||
print_abs_rel(f'Transformation', t_0, t_1)
|
||||
print_abs_rel(f'Rotation', r_0.as_rotvec(), r_1.as_rotvec())
|
||||
|
||||
for i, (intrinsics0, intrinsics1) in enumerate(
|
||||
zip(calib0['value0']['intrinsics'], calib1['value0']['intrinsics'])):
|
||||
print(f'\nCamera {i} intrinsics differences')
|
||||
|
||||
for (
|
||||
k_0, v_0), (_, v_1) in zip(
|
||||
intrinsics0['intrinsics'].items(), intrinsics1['intrinsics'].items()):
|
||||
print_abs_rel(f'Difference for {k_0}', v_0, v_1)
|
||||
|
||||
print_abs_rel('\nAccel Bias Difference',
|
||||
np.array(calib0['value0']['calib_accel_bias'][0:2]),
|
||||
np.array(calib1['value0']['calib_accel_bias'][0:2]))
|
||||
|
||||
print_abs_rel('Accel Scale Difference',
|
||||
np.array(calib0['value0']['calib_accel_bias'][3:9]),
|
||||
np.array(calib1['value0']['calib_accel_bias'][3:9]))
|
||||
|
||||
print_abs_rel('Gyro Bias Difference',
|
||||
np.array(calib0['value0']['calib_gyro_bias'][0:2]),
|
||||
np.array(calib1['value0']['calib_gyro_bias'][0:2]))
|
||||
|
||||
print_abs_rel('Gyro Scale Difference',
|
||||
np.array(calib0['value0']['calib_gyro_bias'][3:12]),
|
||||
np.array(calib1['value0']['calib_gyro_bias'][3:12]))
|
||||
|
||||
print_abs_rel(
|
||||
'\nAccel Noise Std Difference',
|
||||
calib0['value0']['accel_noise_std'],
|
||||
calib1['value0']['accel_noise_std'])
|
||||
print_abs_rel(
|
||||
'Gyro Noise Std Difference',
|
||||
calib0['value0']['gyro_noise_std'],
|
||||
calib1['value0']['gyro_noise_std'])
|
||||
print_abs_rel(
|
||||
'Accel Bias Std Difference',
|
||||
calib0['value0']['accel_bias_std'],
|
||||
calib1['value0']['accel_bias_std'])
|
||||
print_abs_rel(
|
||||
'Gyro Bias Std Difference',
|
||||
calib0['value0']['gyro_bias_std'],
|
||||
calib1['value0']['gyro_bias_std'])
|
||||
|
||||
print_abs_rel(
|
||||
'\nCam Time Offset Difference',
|
||||
calib0['value0']['cam_time_offset_ns'],
|
||||
calib0['value0']['cam_time_offset_ns'])
|
||||
|
||||
|
||||
def create_parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('calib_path_1')
|
||||
parser.add_argument('calib_path_2')
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = create_parser().parse_args()
|
||||
|
||||
main(args.calib_path_1, args.calib_path_2)
|
||||
107
scripts/eval_full/gen_results.py
Executable file
107
scripts/eval_full/gen_results.py
Executable file
@@ -0,0 +1,107 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
|
||||
|
||||
datasets = ['Seq.', 'MH_01_easy', 'MH_02_easy', 'MH_03_medium', 'MH_04_difficult',
|
||||
'MH_05_difficult', 'V1_01_easy', 'V1_02_medium',
|
||||
'V1_03_difficult', 'V2_01_easy', 'V2_02_medium']
|
||||
|
||||
|
||||
# Other results.
|
||||
|
||||
|
||||
vio = {
|
||||
'ate' : ['VIO RMS ATE [m]'],
|
||||
'time' : ['VIO Time [s]'],
|
||||
'num_frames' : ['VIO Num. Frames']
|
||||
}
|
||||
|
||||
mapping = {
|
||||
'ate' : ['MAP RMS ATE [m]'],
|
||||
'time' : ['MAP Time [s]'],
|
||||
'num_frames' : ['MAP Num. KFs']
|
||||
}
|
||||
|
||||
pose_graph = {
|
||||
'ate' : ['PG RMS ATE [m]'],
|
||||
'time' : ['PG Time [s]'],
|
||||
'num_frames' : ['PG Num. KFs']
|
||||
}
|
||||
|
||||
pure_ba = {
|
||||
'ate' : ['PG RMS ATE [m]'],
|
||||
'time' : ['PG Time [s]'],
|
||||
'num_frames' : ['PG Num. KFs']
|
||||
}
|
||||
|
||||
out_dir = sys.argv[1]
|
||||
|
||||
def load_data(x, prefix, key):
|
||||
fname = out_dir + '/' + prefix + '_' + key
|
||||
if os.path.isfile(fname):
|
||||
with open(fname, 'r') as f:
|
||||
j = json.load(f)
|
||||
res = round(j['rms_ate'], 3)
|
||||
x['ate'].append(float(res))
|
||||
x['time'].append(round(j['exec_time_ns']*1e-9, 3))
|
||||
x['num_frames'].append(j['num_frames'])
|
||||
else:
|
||||
x['ate'].append(float('Inf'))
|
||||
x['time'].append(float('Inf'))
|
||||
x['num_frames'].append(float('Inf'))
|
||||
|
||||
|
||||
for key in datasets[1:]:
|
||||
load_data(vio, 'vio', key)
|
||||
load_data(mapping, 'mapper', key)
|
||||
load_data(pose_graph, 'mapper_no_weights', key)
|
||||
load_data(pure_ba, 'mapper_no_factors', key)
|
||||
|
||||
|
||||
row_format ="{:>17}" + "{:>13}" * (len(datasets)-1)
|
||||
|
||||
datasets_short = [x[:5] for x in datasets]
|
||||
|
||||
print('\nVisual-Inertial Odometry')
|
||||
print(row_format.format(*datasets_short))
|
||||
|
||||
print(row_format.format(*vio['ate']))
|
||||
#print(row_format.format(*vio['time']))
|
||||
print(row_format.format(*vio['num_frames']))
|
||||
|
||||
print('\nVisual-Inertial Mapping')
|
||||
print(row_format.format(*datasets_short))
|
||||
|
||||
print(row_format.format(*mapping['ate']))
|
||||
#print(row_format.format(*mapping['time']))
|
||||
print(row_format.format(*mapping['num_frames']))
|
||||
|
||||
|
||||
print('\nPose-Graph optimization (Identity weights for all factors)')
|
||||
print(row_format.format(*datasets_short))
|
||||
|
||||
print(row_format.format(*pose_graph['ate']))
|
||||
#print(row_format.format(*pose_graph['time']))
|
||||
print(row_format.format(*pose_graph['num_frames']))
|
||||
|
||||
|
||||
print('\nPure BA optimization (no factors from the recovery used)')
|
||||
print(row_format.format(*datasets_short))
|
||||
|
||||
print(row_format.format(*pure_ba['ate']))
|
||||
#print(row_format.format(*pure_ba['time']))
|
||||
print(row_format.format(*pure_ba['num_frames']))
|
||||
|
||||
|
||||
83
scripts/eval_full/gen_results_kitti.py
Executable file
83
scripts/eval_full/gen_results_kitti.py
Executable file
@@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
|
||||
|
||||
datasets = ['Seq.', '00', '02', '03','04', '05', '06','07', '08', '09', '10']
|
||||
lengths = [100, 200, 300, 400, 500, 600, 700, 800]
|
||||
|
||||
# Other results.
|
||||
|
||||
|
||||
vo = {
|
||||
'trans_error': {},
|
||||
'rot_error': {}
|
||||
}
|
||||
|
||||
for l in lengths:
|
||||
vo['trans_error'][l] = ['Trans. error [%] ' + str(l) + 'm.']
|
||||
vo['rot_error'][l] = ['Rot. error [deg/m] ' + str(l) + 'm.']
|
||||
|
||||
|
||||
out_dir = sys.argv[1]
|
||||
|
||||
mean_values = {
|
||||
'mean_trans_error' : 0.0,
|
||||
'mean_rot_error' : 0.0,
|
||||
'total_num_meas' : 0.0
|
||||
}
|
||||
|
||||
def load_data(x, prefix, key, mean_values):
|
||||
fname = out_dir + '/' + prefix + '_' + key + '.txt'
|
||||
if os.path.isfile(fname):
|
||||
with open(fname, 'r') as f:
|
||||
j = json.load(f)
|
||||
res = j['results']
|
||||
for v in lengths:
|
||||
num_meas = res[str(v)]['num_meas']
|
||||
trans_error = res[str(v)]['trans_error']
|
||||
rot_error = res[str(v)]['rot_error']
|
||||
x['trans_error'][int(v)].append(round(trans_error, 5))
|
||||
x['rot_error'][int(v)].append(round(rot_error, 5))
|
||||
if num_meas > 0:
|
||||
mean_values['mean_trans_error'] += trans_error*num_meas
|
||||
mean_values['mean_rot_error'] += rot_error*num_meas
|
||||
mean_values['total_num_meas'] += num_meas
|
||||
else:
|
||||
for v in lengths:
|
||||
x['trans_error'][int(v)].append(float('inf'))
|
||||
x['rot_error'][int(v)].append(float('inf'))
|
||||
|
||||
for key in datasets[1:]:
|
||||
load_data(vo, 'rpe', key, mean_values)
|
||||
|
||||
|
||||
row_format ="{:>24}" + "{:>10}" * (len(datasets)-1)
|
||||
|
||||
datasets_short = [x[:5] for x in datasets]
|
||||
|
||||
print('\nVisual Odometry (Stereo)')
|
||||
print(row_format.format(*datasets_short))
|
||||
|
||||
for l in lengths:
|
||||
print(row_format.format(*(vo['trans_error'][l])))
|
||||
|
||||
print()
|
||||
|
||||
for l in lengths:
|
||||
print(row_format.format(*(vo['rot_error'][l])))
|
||||
|
||||
|
||||
print('Mean translation error [%] ', mean_values['mean_trans_error']/mean_values['total_num_meas'])
|
||||
print('Mean rotation error [deg/m] ', mean_values['mean_rot_error']/mean_values['total_num_meas'])
|
||||
62
scripts/eval_full/gen_results_tumvi.py
Executable file
62
scripts/eval_full/gen_results_tumvi.py
Executable file
@@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
|
||||
|
||||
datasets = ['Seq.', 'dataset-corridor1_512_16', 'dataset-magistrale1_512_16', 'dataset-room1_512_16', 'dataset-slides1_512_16']
|
||||
|
||||
# Other results.
|
||||
|
||||
|
||||
vio = {
|
||||
'ate' : ['VIO RMS ATE [m]'],
|
||||
'time' : ['VIO Time [s]'],
|
||||
'num_frames' : ['VIO Num. Frames']
|
||||
}
|
||||
|
||||
out_dir = sys.argv[1]
|
||||
|
||||
def load_data(x, prefix, key):
|
||||
fname = out_dir + '/' + prefix + '_' + key
|
||||
if os.path.isfile(fname):
|
||||
with open(fname, 'r') as f:
|
||||
j = json.load(f)
|
||||
res = round(j['rms_ate'], 3)
|
||||
x['ate'].append(float(res))
|
||||
x['time'].append(round(j['exec_time_ns']*1e-9, 3))
|
||||
x['num_frames'].append(j['num_frames'])
|
||||
else:
|
||||
x['ate'].append(float('Inf'))
|
||||
x['time'].append(float('Inf'))
|
||||
x['num_frames'].append(float('Inf'))
|
||||
|
||||
|
||||
for key in datasets[1:]:
|
||||
load_data(vio, 'vio', key)
|
||||
|
||||
|
||||
row_format ="{:>17}" + "{:>13}" * (len(datasets)-1)
|
||||
|
||||
datasets_short = [x[8:].split('_')[0] for x in datasets]
|
||||
|
||||
print('\nVisual-Inertial Odometry')
|
||||
print(row_format.format(*datasets_short))
|
||||
|
||||
print(row_format.format(*vio['ate']))
|
||||
#print(row_format.format(*vio['time']))
|
||||
print(row_format.format(*vio['num_frames']))
|
||||
|
||||
|
||||
|
||||
|
||||
44
scripts/eval_full/run_evaluations.sh
Executable file
44
scripts/eval_full/run_evaluations.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
DATASET_PATH=/data/euroc
|
||||
|
||||
DATASETS=(MH_01_easy MH_02_easy MH_03_medium MH_04_difficult MH_05_difficult V1_01_easy V1_02_medium V1_03_difficult V2_01_easy V2_02_medium)
|
||||
|
||||
|
||||
folder_name=eval_results
|
||||
mkdir $folder_name
|
||||
|
||||
|
||||
|
||||
for d in ${DATASETS[$CI_NODE_INDEX-1]}; do
|
||||
basalt_vio --dataset-path $DATASET_PATH/$d --cam-calib /usr/etc/basalt/euroc_eucm_calib.json \
|
||||
--dataset-type euroc --show-gui 0 --config-path /usr/etc/basalt/euroc_config.json \
|
||||
--result-path $folder_name/vio_$d --marg-data eval_tmp_marg_data --save-trajectory tum
|
||||
|
||||
mv trajectory.txt $folder_name/traj_vio_$d.txt
|
||||
|
||||
basalt_mapper --show-gui 0 --cam-calib /usr/etc/basalt/euroc_eucm_calib.json --config-path /usr/etc/basalt/euroc_config.json --marg-data eval_tmp_marg_data \
|
||||
--result-path $folder_name/mapper_$d
|
||||
|
||||
basalt_mapper --show-gui 0 --cam-calib /usr/etc/basalt/euroc_eucm_calib.json --config-path /usr/etc/basalt/euroc_config_no_weights.json --marg-data eval_tmp_marg_data \
|
||||
--result-path $folder_name/mapper_no_weights_$d
|
||||
|
||||
basalt_mapper --show-gui 0 --cam-calib /usr/etc/basalt/euroc_eucm_calib.json --config-path /usr/etc/basalt/euroc_config_no_factors.json --marg-data eval_tmp_marg_data \
|
||||
--result-path $folder_name/mapper_no_factors_$d
|
||||
|
||||
rm -rf eval_tmp_marg_data
|
||||
done
|
||||
|
||||
#./gen_results.py $folder_name > euroc_results.txt
|
||||
33
scripts/eval_full/run_evaluations_kitti.sh
Executable file
33
scripts/eval_full/run_evaluations_kitti.sh
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
DATASET_PATH=/data/kitti_odom_grey/sequences
|
||||
|
||||
DATASETS=(00 02 03 04 05 06 07 08 09 10)
|
||||
|
||||
|
||||
folder_name=eval_results_kitti
|
||||
mkdir $folder_name
|
||||
|
||||
for d in ${DATASETS[$CI_NODE_INDEX-1]}; do
|
||||
echo $d
|
||||
basalt_vio --dataset-path $DATASET_PATH/$d --cam-calib $DATASET_PATH/$d/basalt_calib.json \
|
||||
--dataset-type kitti --show-gui 0 --config-path /usr/etc/basalt/kitti_config.json --result-path $folder_name/vo_$d --save-trajectory kitti --use-imu 0
|
||||
|
||||
mv trajectory_kitti.txt $folder_name/kitti_$d.txt
|
||||
|
||||
basalt_kitti_eval --traj-path $folder_name/kitti_$d.txt --gt-path $DATASET_PATH/$d/poses.txt --result-path $folder_name/rpe_$d.txt
|
||||
|
||||
done
|
||||
|
||||
39
scripts/eval_full/run_evaluations_tumvi.sh
Executable file
39
scripts/eval_full/run_evaluations_tumvi.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
DATASET_PATH=/data/tumvi/512_16/
|
||||
|
||||
DATASETS=(
|
||||
dataset-corridor1_512_16
|
||||
dataset-magistrale1_512_16
|
||||
dataset-room1_512_16
|
||||
dataset-slides1_512_16
|
||||
)
|
||||
|
||||
|
||||
folder_name=eval_results_tumvi
|
||||
mkdir $folder_name
|
||||
|
||||
|
||||
|
||||
for d in ${DATASETS[$CI_NODE_INDEX-1]}; do
|
||||
basalt_vio --dataset-path $DATASET_PATH/$d --cam-calib /usr/etc/basalt/tumvi_512_eucm_calib.json \
|
||||
--dataset-type euroc --show-gui 0 --config-path /usr/etc/basalt/tumvi_512_config.json \
|
||||
--result-path $folder_name/vio_$d --save-trajectory tum
|
||||
|
||||
mv trajectory.txt $folder_name/${d}_basalt_poses.txt
|
||||
|
||||
done
|
||||
|
||||
#./gen_results_tumvi.py $folder_name > euroc_tumvi.txt
|
||||
18
scripts/install_deps.sh
Executable file
18
scripts/install_deps.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
${DIR}/install_mac_os_deps.sh
|
||||
else
|
||||
${DIR}/install_ubuntu_deps.sh
|
||||
fi
|
||||
26
scripts/install_mac_os_deps.sh
Executable file
26
scripts/install_mac_os_deps.sh
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/bin/sh
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
brew install \
|
||||
boost \
|
||||
opencv \
|
||||
cmake \
|
||||
pkgconfig \
|
||||
lz4 \
|
||||
clang-format \
|
||||
tbb \
|
||||
glew \
|
||||
eigen \
|
||||
ccache \
|
||||
lz4 \
|
||||
fmt
|
||||
|
||||
brew install llvm
|
||||
13
scripts/install_ubuntu_deps.sh
Executable file
13
scripts/install_ubuntu_deps.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc g++ cmake git libtbb-dev libeigen3-dev libglew-dev ccache libjpeg-dev libpng-dev liblz4-dev libbz2-dev libboost-regex-dev libboost-filesystem-dev libboost-date-time-dev libboost-program-options-dev libgtest-dev libopencv-dev libfmt-dev
|
||||
7
scripts/templates/license-py-sh.tmpl
Normal file
7
scripts/templates/license-py-sh.tmpl
Normal file
@@ -0,0 +1,7 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
This file is part of the Basalt project.
|
||||
https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
|
||||
Copyright (c) ${years}, ${owner}.
|
||||
All rights reserved.
|
||||
33
scripts/update-license-headers.sh
Executable file
33
scripts/update-license-headers.sh
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env bash
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
# Update license headers in source files.
|
||||
|
||||
# Dependency: licenseheaders python package (install with pip)
|
||||
|
||||
# TODO: Make it also update C++ files automatically. (Consider files with multiple headers, e.g. track.h and union_find.h)
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
DIRS=(
|
||||
"$SCRIPT_DIR/../python/"
|
||||
"$SCRIPT_DIR/../scripts"
|
||||
)
|
||||
|
||||
YEAR="2019-2021"
|
||||
OWNER="Vladyslav Usenko and Nikolaus Demmel"
|
||||
TEMPLATE="$SCRIPT_DIR/templates/license-py-sh.tmpl"
|
||||
|
||||
for d in "${DIRS[@]}"
|
||||
do
|
||||
licenseheaders -d "$d" -y $YEAR -o "$OWNER" -t "$TEMPLATE" -vv
|
||||
done
|
||||
|
||||
16
scripts/update_submodules.sh
Executable file
16
scripts/update_submodules.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
set -x
|
||||
|
||||
git submodule sync --recursive
|
||||
git submodule update --init --recursive
|
||||
|
||||
27
scripts/yapf-all.sh
Executable file
27
scripts/yapf-all.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
##
|
||||
## BSD 3-Clause License
|
||||
##
|
||||
## This file is part of the Basalt project.
|
||||
## https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
##
|
||||
## Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
## All rights reserved.
|
||||
##
|
||||
|
||||
|
||||
# Format all python source files in the project.
|
||||
# Optionally take folder as argument; default are `python` and `script` dirs.
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
# default folders if not passed
|
||||
if [ $# -lt 1 ]; then
|
||||
set -- "$SCRIPT_DIR"/../python "$SCRIPT_DIR/batch"
|
||||
fi
|
||||
|
||||
echo "Formatting: $@"
|
||||
|
||||
yapf -i -r "$@"
|
||||
Reference in New Issue
Block a user