v01
This commit is contained in:
9
python/basalt/__init__.py
Normal file
9
python/basalt/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
614
python/basalt/experiments.py
Normal file
614
python/basalt/experiments.py
Normal file
@@ -0,0 +1,614 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import hashlib
|
||||
import pickle
|
||||
import toml
|
||||
import argparse
|
||||
|
||||
from string import Template
|
||||
from glob import glob
|
||||
from collections.abc import Mapping
|
||||
from munch import Munch
|
||||
from munch import munchify
|
||||
from copy import deepcopy
|
||||
from collections import abc
|
||||
|
||||
from .run import Run
|
||||
|
||||
from .util import copy_subdict
|
||||
|
||||
_CURRENT_CACHE_VERSION = '1.3'
|
||||
"""cache version that can be incremented to invalidate all cache files in case the format changes"""
|
||||
|
||||
|
||||
def version_less(vstr1, vstr2):
|
||||
"""Order for sorting versions in the format a.b.c"""
|
||||
return vstr1.split(".") < vstr2.split(".")
|
||||
|
||||
|
||||
def compute_caching_hash(d):
|
||||
"""Generate a hash from a dictionary to use as a cache file name
|
||||
|
||||
This is intended to be used for experiments cache files
|
||||
"""
|
||||
string = json.dumps(d, sort_keys=True, ensure_ascii=True)
|
||||
h = hashlib.sha1()
|
||||
h.update(string.encode('utf8'))
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
class Experiment:
|
||||
"""Holds the logs for one experiment: a single odometry config run on a set of sequences
|
||||
|
||||
For one experiment, each sequence may have at most one Run.
|
||||
|
||||
Since for each run we have multiple log files and there may be many runs, we
|
||||
cache the loaded configs / output / log files (after preprocessing) into a single
|
||||
binary cache file (pickle). This significantly speeds up loading results when
|
||||
we have many experiments defined in a single experiments config file.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
log_dirs,
|
||||
name,
|
||||
display_name=None,
|
||||
description=None,
|
||||
caching_hash=None,
|
||||
spec=None,
|
||||
seq_name_mapping=None,
|
||||
extend=None,
|
||||
extend_override=False):
|
||||
"""Load an experiment and all it's runs from a set of directories
|
||||
|
||||
There may be no duplicate runs of the same sequence.
|
||||
|
||||
:param log_dirs: list of directories to look for runs in
|
||||
:param name: experiment name
|
||||
:param display_name: optional experiment display name
|
||||
:param description: optional experiment description
|
||||
:param caching_hash: own caching hash; mostly used to combine this hash with the has of extending experiments
|
||||
:param spec: the config spec for this experiment; mainly informational for informative error messages; the
|
||||
functionally relevant information has already be extracted an preprocessed (other arguments)
|
||||
:param seq_name_mapping: optional mapping of sequence names; may contain only part of the sequences
|
||||
:param extend: optionally provide base experiment whose runs are copied (and possibly extended)
|
||||
:param extend_override: if True, sequences in the extended experiment may be replaced, if they are also found in `log_dirs`
|
||||
"""
|
||||
|
||||
self.name = name
|
||||
self.display_name = display_name
|
||||
self.description = description
|
||||
self.caching_hash = caching_hash
|
||||
self.spec = spec
|
||||
self.runs = dict()
|
||||
|
||||
if extend is not None:
|
||||
for k, v in extend.runs.items():
|
||||
self.runs[k] = deepcopy(v)
|
||||
|
||||
seqs_ok_to_override = set(self.runs.keys()) if extend_override else set()
|
||||
|
||||
for d in log_dirs:
|
||||
run = Run(d, seq_name_mapping)
|
||||
if run.seq_name in self.runs:
|
||||
if run.seq_name in seqs_ok_to_override:
|
||||
seqs_ok_to_override.remove(run.seq_name) # ok only once
|
||||
else:
|
||||
if extend is not None and run.seq_name in extend.runs and not extend_override:
|
||||
raise RuntimeError(
|
||||
str.format(
|
||||
"{} appears both in the extended experiment {} and in the extending "
|
||||
"experiment {} but extend_override is False:\n - {}\n - {}\n", run.seq_name,
|
||||
extend.name, self.name, extend.runs[run.seq_name].dirpath, run.dirpath))
|
||||
else:
|
||||
raise RuntimeError(
|
||||
str.format(
|
||||
"{} appears multiple times in experiment {}:\n - {}\n - {}\n"
|
||||
"Do your experiment pattern(s) '{}' match too many directories? "
|
||||
"Delete the additional runs or narrow the pattern.", run.seq_name, self.name,
|
||||
self.runs[run.seq_name].dirpath, run.dirpath, "', '".join(self.spec["pattern"])))
|
||||
self.runs[run.seq_name] = run
|
||||
|
||||
def sequences(self, filter_regex=None):
|
||||
"""return list of sequence names found for this experiment
|
||||
|
||||
:param filter_regex: if provided, return only they sequences that match the regex
|
||||
"""
|
||||
if filter_regex is None:
|
||||
return self.runs.keys()
|
||||
else:
|
||||
return [k for k in self.runs.keys() if re.search(filter_regex, k)]
|
||||
|
||||
@staticmethod
|
||||
def load_spec(spec, base_path, cache_dir, seq_name_mapping=None, extra_filter_regex=None, other_specs=[]):
|
||||
"""Load a single experiment from logs or cache
|
||||
|
||||
The cache key is determined by the 'pattern', 'filter_regex' and 'extend' keys
|
||||
in the spec. That means changing the name or display name for example doesn't
|
||||
invalidate the cache. If the experiment is not found in cache, it is loaded from
|
||||
the run directories and then saved in cache.
|
||||
|
||||
:param spec: experiment spec from the config file
|
||||
:param base_path: base folder to search for run dirs in
|
||||
:param cache_dir: cache directory
|
||||
:param seq_name_mapping: optional sequence name mapping
|
||||
:param extra_filter_regex: additional filter to limit the loaded sequences on top of what is defined in the spec; if set, caching is disabled
|
||||
:param other_specs: other experiment specs in case our spec has the 'extend' option defined
|
||||
:return: loaded Experiment object
|
||||
"""
|
||||
|
||||
# disable cache if extra filtering
|
||||
if extra_filter_regex is not None:
|
||||
cache_dir = None
|
||||
|
||||
# extending some other experiment:
|
||||
extend = None
|
||||
if "extend" in spec:
|
||||
other_spec = next((s for s in other_specs if s.name == spec.extend), None)
|
||||
if other_spec is None:
|
||||
raise RuntimeError("Experiment {} extends unknown experiment {}.".format(spec.name, spec.extend))
|
||||
extend = Experiment.load_spec(other_spec,
|
||||
base_path,
|
||||
cache_dir,
|
||||
seq_name_mapping=seq_name_mapping,
|
||||
extra_filter_regex=extra_filter_regex,
|
||||
other_specs=other_specs)
|
||||
|
||||
caching_hash = None
|
||||
if cache_dir:
|
||||
caching_spec = copy_subdict(spec, ["pattern", "filter_regex"])
|
||||
if extend is not None:
|
||||
caching_spec["extend"] = extend.caching_hash
|
||||
caching_hash = compute_caching_hash(caching_spec)
|
||||
|
||||
cache_filename = "experiment-cache-{}.pickle".format(caching_hash)
|
||||
cache_path = os.path.join(cache_dir, cache_filename)
|
||||
|
||||
if os.path.isfile(cache_path):
|
||||
if not spec.overwrite_cache:
|
||||
with open(cache_path, 'rb') as f:
|
||||
cache = pickle.load(f)
|
||||
if cache.version != _CURRENT_CACHE_VERSION:
|
||||
print("> experiment: {} (cache {} has version {}; expected {})".format(
|
||||
spec.name, cache_path, cache.version, _CURRENT_CACHE_VERSION))
|
||||
else:
|
||||
print("> experiment: {} (from cache: {})".format(spec.name, cache_path))
|
||||
exp = cache.experiment
|
||||
# overwrite names according to config
|
||||
exp.name = spec.name
|
||||
exp.display_name = spec.display_name
|
||||
exp.description = spec.description
|
||||
return exp
|
||||
else:
|
||||
print("> experiment: {} (overwrite cache: {})".format(spec.name, cache_path))
|
||||
else:
|
||||
print("> experiment: {} (cache doesn't exist: {})".format(spec.name, cache_path))
|
||||
else:
|
||||
print("> experiment: {}".format(spec.name))
|
||||
|
||||
log_dirs = Experiment.get_log_dirs(base_path, spec, filter_regex=extra_filter_regex)
|
||||
|
||||
kwargs = copy_subdict(spec, ["name", "display_name", "description", "extend_override"])
|
||||
exp = Experiment(log_dirs,
|
||||
caching_hash=caching_hash,
|
||||
seq_name_mapping=seq_name_mapping,
|
||||
extend=extend,
|
||||
spec=deepcopy(spec),
|
||||
**kwargs)
|
||||
|
||||
if cache_dir:
|
||||
cache = Munch(version=_CURRENT_CACHE_VERSION, experiment=exp, spec=caching_spec)
|
||||
os.makedirs(cache_dir, exist_ok=True)
|
||||
with open(cache_path, 'wb') as f:
|
||||
pickle.dump(cache, f)
|
||||
print("experiment {} -> saved cache {}".format(spec.name, cache_path))
|
||||
|
||||
return exp
|
||||
|
||||
@staticmethod
|
||||
def get_log_dirs(base_path, spec, filter_regex=None):
|
||||
"""Return list of run directories given an experiments spec
|
||||
|
||||
:param base_path: base directory to search in
|
||||
:param spec: experiment spec, e.g. from an experiments config file
|
||||
:param filter_regex: optional additional regex; limits result to matching paths
|
||||
:return: list of (filtered) paths (joined with base path)
|
||||
"""
|
||||
log_dirs = [d for p in spec.pattern for d in glob(os.path.join(base_path, p)) if Run.is_run_dir(d)]
|
||||
if spec.filter_regex:
|
||||
log_dirs = [d for d in log_dirs if re.search(spec.filter_regex, d)]
|
||||
if filter_regex:
|
||||
log_dirs = [d for d in log_dirs if re.search(filter_regex, d)]
|
||||
return log_dirs
|
||||
|
||||
@staticmethod
|
||||
def load_all(specs, config_file, base_path, cache_dir, seq_name_mapping=None):
|
||||
"""Load a set experiments from log files or cache
|
||||
|
||||
If there is more than one experiment with the same name, an error is raised.
|
||||
|
||||
:param specs: list of experiments specs, e.g. from a experiments config file
|
||||
:param config_file: experiments config file path (currently unused)
|
||||
:param base_path: base directory relative to which all patterns in experiments are search for
|
||||
:param cache_dir: folder to look for and/or save cached experiments
|
||||
:param seq_name_mapping: optional mapping of sequence names
|
||||
:return: a dict {name: experiment}
|
||||
"""
|
||||
|
||||
# Note: Seems saving everything to one cache file isn't much faster than the per-experiments cache...
|
||||
use_combined_cache = False
|
||||
|
||||
# load all from cache
|
||||
if use_combined_cache and cache_dir:
|
||||
|
||||
overwrite_cache_any = any(e.overwrite_cache for e in specs)
|
||||
caching_specs = munchify([{k: v for k, v in s.items() if k not in ["overwrite_cache"]} for s in specs])
|
||||
meta_info = Munch(version=_CURRENT_CACHE_VERSION, options=Munch(base_path=base_path), specs=caching_specs)
|
||||
|
||||
config_filename = os.path.splitext(os.path.basename(config_file))[0]
|
||||
cache_filename = "experiment-cache-{}.pickle".format(config_filename)
|
||||
cache_path = os.path.join(cache_dir, cache_filename)
|
||||
|
||||
if os.path.isfile(cache_path):
|
||||
if not overwrite_cache_any:
|
||||
with open(cache_path, 'rb') as f:
|
||||
cached_meta_info = pickle.load(f)
|
||||
if cached_meta_info == meta_info:
|
||||
print("> loading from cache: {}".format(cache_path))
|
||||
exps = pickle.load(f)
|
||||
return exps
|
||||
|
||||
# load individually
|
||||
exps = dict()
|
||||
for spec in specs:
|
||||
if spec.name in exps:
|
||||
raise RuntimeError("experiment {} is duplicate".format(spec.name))
|
||||
exps[spec.name] = Experiment.load_spec(spec,
|
||||
base_path,
|
||||
cache_dir,
|
||||
seq_name_mapping=seq_name_mapping,
|
||||
other_specs=specs)
|
||||
|
||||
# save all to cache
|
||||
if use_combined_cache and cache_dir:
|
||||
os.makedirs(cache_dir, exist_ok=True)
|
||||
with open(cache_path, 'wb') as f:
|
||||
pickle.dump(meta_info, f)
|
||||
pickle.dump(exps, f)
|
||||
print("> saved cache {}".format(cache_path))
|
||||
|
||||
return exps
|
||||
|
||||
|
||||
def load_experiments_config(path, args=None):
|
||||
"""Load experiments config file, applying substitutions and setting defaults
|
||||
|
||||
An experiments config file defines general options, locations of experimental runs,
|
||||
and results sections that define tables and plots to render.
|
||||
|
||||
Substitutions and templates can be used to more concisely describe repetitive
|
||||
definitions (e.g. generate the same plot for ALL runs of an experiment).
|
||||
|
||||
:param log_dirs: optional command line arguments to override some values in the config
|
||||
:type log_dirs: Union[dict, argparse.Namespace]
|
||||
"""
|
||||
|
||||
config = munchify(toml.load(path))
|
||||
|
||||
# default config:
|
||||
config.setdefault("options", Munch())
|
||||
config.options.setdefault("base_path", "$config_dir")
|
||||
config.options.setdefault("cache_dir", "cache")
|
||||
config.options.setdefault("output_path", "results")
|
||||
config.options.setdefault("filter_regex", None)
|
||||
config.options.setdefault("overwrite_cache", False)
|
||||
config.options.setdefault("show_values_failed_runs", True)
|
||||
config.options.setdefault("screenread", False)
|
||||
config.options.setdefault("import_experiments", [])
|
||||
config.setdefault("seq_name_mapping", dict())
|
||||
config.setdefault("seq_displayname_mapping", dict())
|
||||
config.setdefault("substitutions", [])
|
||||
config.setdefault("templates", [])
|
||||
config.setdefault("experiments", [])
|
||||
config.setdefault("results", [])
|
||||
|
||||
# overrides from command line
|
||||
if isinstance(args, argparse.Namespace):
|
||||
args = vars(args)
|
||||
|
||||
# values
|
||||
for k in ["base_path", "cache_dir", "output_path", "filter_regex"]:
|
||||
if k in args and args[k] is not None:
|
||||
config.options[k] = args[k]
|
||||
|
||||
# positive flags
|
||||
for k in ["overwrite_cache"]:
|
||||
if k in args and args[k]:
|
||||
config.options[k] = True
|
||||
|
||||
# negative flags
|
||||
for k in ["dont_show_values_failed_runs"]:
|
||||
if k in args and args[k]:
|
||||
config.options[k] = False
|
||||
|
||||
# collapse all substitutions into one dict
|
||||
static_subs = dict()
|
||||
for d in config.substitutions:
|
||||
for k, v in d.items():
|
||||
if k in static_subs:
|
||||
raise RuntimeError("substitution {} defined multiple times".format(k))
|
||||
static_subs[k] = v
|
||||
|
||||
# create dictionary from list of templates (index by name)
|
||||
template_definitions = dict()
|
||||
for t in config.templates:
|
||||
template_definitions[t._name] = t
|
||||
|
||||
# substituion helper
|
||||
var_pattern = re.compile(r"\$\{(\w+)\}") # match '${foo}'
|
||||
|
||||
def substitute(obj, subs):
|
||||
if isinstance(obj, Mapping):
|
||||
# For mappings in general we simply recurse the 'substitute' call for the dict values.
|
||||
# In case the '_template' key is present, we do template expansion.
|
||||
if "_template" in obj:
|
||||
# template expansion
|
||||
|
||||
# single templates can be abbreviated by not putting them in a list
|
||||
# --> put the in list now to make following code the same for either case
|
||||
templates = obj._template if isinstance(obj._template, list) else [obj._template]
|
||||
|
||||
# recurse 'substitute' on non-templated part
|
||||
prototype = {k: substitute(v, subs) for k, v in obj.items() if not k.startswith("_")}
|
||||
|
||||
# loop over all templates
|
||||
result = [Munch()]
|
||||
for tmpl in templates:
|
||||
|
||||
# which arguments are defined?
|
||||
args = [k for k in tmpl if not k.startswith("_")]
|
||||
|
||||
# check template definition
|
||||
tmpl_def = template_definitions[tmpl._name]
|
||||
tmpl_args = tmpl_def._arguments if "_arguments" in tmpl_def else []
|
||||
if set(args) != set(tmpl_args):
|
||||
raise RuntimeError("Template {} required arguments {}, but supplied {} during expansion".format(
|
||||
tmpl._name, tmpl_def._arguments, args))
|
||||
|
||||
# apply template definition to all new objects
|
||||
tmp = result
|
||||
result = list()
|
||||
for new_obj in tmp:
|
||||
# create substitutions from template arguments (recursing 'substitute' call)
|
||||
all_argument_combinations = [dict()] # start with single combination (usual case)
|
||||
for arg in args:
|
||||
if isinstance(tmpl[arg], Mapping) and "_argument" in tmpl[arg]:
|
||||
if tmpl[arg]._argument == "product":
|
||||
# given list of alternative argument values: create combination for each of them
|
||||
tmp2 = all_argument_combinations
|
||||
all_argument_combinations = list()
|
||||
for d in tmp2:
|
||||
for val in substitute(tmpl[arg]._value, subs):
|
||||
d_new = deepcopy(d)
|
||||
d_new[arg] = val
|
||||
all_argument_combinations.append(d_new)
|
||||
else:
|
||||
raise RuntimeError("arugment type {} not for argument {} not implemented".format(
|
||||
tmpl[arg]._argument, arg))
|
||||
else:
|
||||
# simple argument: append to all combintations
|
||||
for d in all_argument_combinations:
|
||||
assert (arg not in d)
|
||||
d[arg] = substitute(tmpl[arg], subs)
|
||||
|
||||
# for each argument combination, create substitutions and apply template definition
|
||||
for expanded_args in all_argument_combinations:
|
||||
|
||||
subs_with_args = dict(subs)
|
||||
subs_with_args.update(expanded_args)
|
||||
|
||||
# merge template definition into result, while recursing substitute call with augmented substitutions
|
||||
new_obj2 = deepcopy(new_obj)
|
||||
for k, v in tmpl_def.items():
|
||||
if not k.startswith("_"):
|
||||
# later templates can override keys from earlier ones
|
||||
new_obj2[k] = substitute(deepcopy(v), subs_with_args)
|
||||
|
||||
result.append(new_obj2)
|
||||
|
||||
# do prototype keys last, since they may override template keys (we already recursed)
|
||||
for new_obj in result:
|
||||
for k, v in prototype.items():
|
||||
new_obj[k] = deepcopy(v)
|
||||
|
||||
if len(result) == 1:
|
||||
return new_obj
|
||||
else:
|
||||
return Munch(_return="splice", _value=result)
|
||||
else:
|
||||
# default case
|
||||
for k, v in obj.items():
|
||||
obj[k] = substitute(v, subs)
|
||||
return obj
|
||||
elif isinstance(obj, list):
|
||||
# Go over elements of list and recurse the 'substitute' call.
|
||||
# In certain cases the returned value can indicate that we should splice in the resulting list instead of
|
||||
# just inserting it.
|
||||
tmp = list()
|
||||
for v in obj:
|
||||
val = substitute(v, subs)
|
||||
if isinstance(val, dict) and "_return" in val:
|
||||
if val._return == "splice":
|
||||
tmp.extend(val._value)
|
||||
else:
|
||||
raise RuntimeError("Unknown return type {}".format(val._return))
|
||||
else:
|
||||
tmp.append(val)
|
||||
return tmp
|
||||
elif isinstance(obj, str):
|
||||
if len(obj) > 2 and obj[0] == "<" and obj[-1] == ">":
|
||||
# if string is '<FOO>', the whole string is replaced by the substitution defined for FOO
|
||||
var = obj[1:-1]
|
||||
if var in subs:
|
||||
return substitute(subs[var], subs)
|
||||
else:
|
||||
raise RuntimeError("Unknown substitution <{}>".format(var))
|
||||
else:
|
||||
# otherwise, find occurances of ${FOO} in the string an replace by a string representation
|
||||
# of the substitution defined for FOO
|
||||
obj, n = var_pattern.subn(lambda m: str(subs[m.group(1)]), obj)
|
||||
if n > 0:
|
||||
# something change --> recurse
|
||||
return substitute(obj, subs)
|
||||
else:
|
||||
# no substitution --> just return
|
||||
return obj
|
||||
else:
|
||||
return obj
|
||||
|
||||
# apply substitutions
|
||||
config.experiments = substitute(config.experiments, static_subs)
|
||||
config.results = substitute(config.results, static_subs)
|
||||
|
||||
# set default values for experiments specs
|
||||
for spec in config.experiments:
|
||||
spec.setdefault("display_name", spec.name)
|
||||
spec.setdefault("description", None)
|
||||
spec.setdefault("filter_regex", config.options.filter_regex)
|
||||
spec.setdefault("overwrite_cache", config.options.overwrite_cache)
|
||||
spec.setdefault("pattern", [])
|
||||
spec.pattern = [spec.pattern] if isinstance(spec.pattern, str) else spec.pattern # ensure list
|
||||
assert isinstance(spec.pattern, abc.Sequence), "pattern {} in experiment {} is neither string nor list".format(
|
||||
spec.pattern, spec.name)
|
||||
|
||||
# results: backwards-compatibility -- move old sections into 'results'
|
||||
if "results_tables" in config:
|
||||
for spec in config.results_tables:
|
||||
spec["class"] = "results_table"
|
||||
config.results.append(spec)
|
||||
del config["results_tables"]
|
||||
|
||||
if "summarize_sequences_tables" in config:
|
||||
for spec in config.summarize_sequences_tables:
|
||||
spec["class"] = "summarize_sequences_table"
|
||||
config.results.append(spec)
|
||||
del config["summarize_sequences_tables"]
|
||||
|
||||
if "plots" in config:
|
||||
for spec in config.plots:
|
||||
spec["class"] = "plot"
|
||||
config.results.append(spec)
|
||||
del config["plots"]
|
||||
|
||||
if "overview_tables" in config:
|
||||
for spec in config.overview_tables:
|
||||
spec["class"] = "overview_table"
|
||||
config.results.append(spec)
|
||||
del config["overview_tables"]
|
||||
|
||||
# results: default values
|
||||
for spec in config.results:
|
||||
spec.setdefault("class", "results_table")
|
||||
|
||||
# set common default values
|
||||
spec.setdefault("show", True)
|
||||
spec.setdefault("clearpage", spec["class"] == "section")
|
||||
spec.setdefault("filter_regex", None)
|
||||
|
||||
if spec["class"] == "section":
|
||||
spec.setdefault("name", "Section")
|
||||
spec.setdefault("pagewidth", None)
|
||||
elif spec["class"] == "results_table":
|
||||
spec.setdefault("metrics_legend", True)
|
||||
spec.setdefault("escape_latex_header", True)
|
||||
spec.setdefault("rotate_header", True)
|
||||
spec.setdefault("vertical_bars", True)
|
||||
spec.setdefault("export_latex", None)
|
||||
spec.setdefault("color_failed", "red")
|
||||
spec.setdefault("multirow", True)
|
||||
spec.setdefault("override_as_failed", [])
|
||||
elif spec["class"] == "summarize_sequences_table":
|
||||
spec.setdefault("header", "")
|
||||
spec.setdefault("export_latex", None)
|
||||
spec.setdefault("escape_latex_header", True)
|
||||
spec.setdefault("rotate_header", True)
|
||||
elif spec["class"] == "plot":
|
||||
spec.setdefault("plot_ate", False)
|
||||
spec.setdefault("figsize", None)
|
||||
spec.setdefault("title", None)
|
||||
spec.setdefault("reference_experiment", None)
|
||||
spec.setdefault("width", None)
|
||||
spec.setdefault("ylim", Munch())
|
||||
spec.ylim.setdefault("top", None)
|
||||
spec.ylim.setdefault("bottom", None)
|
||||
spec.setdefault("ylim_cost", Munch())
|
||||
spec.ylim_cost.setdefault("top", None)
|
||||
spec.ylim_cost.setdefault("bottom", None)
|
||||
spec.setdefault("ylim_ate", Munch())
|
||||
spec.ylim_ate.setdefault("top", None)
|
||||
spec.ylim_ate.setdefault("bottom", None)
|
||||
spec.setdefault("ylim_tolerance", Munch())
|
||||
spec.ylim_tolerance.setdefault("top", None)
|
||||
spec.ylim_tolerance.setdefault("bottom", None)
|
||||
spec.setdefault("xlim_time", Munch())
|
||||
spec.xlim_time.setdefault("right", None)
|
||||
spec.xlim_time.setdefault("left", None)
|
||||
spec.setdefault("xlim_time_fastest", Munch())
|
||||
spec.xlim_time_fastest.setdefault("right", None)
|
||||
spec.xlim_time_fastest.setdefault("left", None)
|
||||
spec.setdefault("xlim_it", Munch())
|
||||
spec.xlim_it.setdefault("right", None)
|
||||
spec.xlim_it.setdefault("left", None)
|
||||
spec.setdefault("xlimits", Munch())
|
||||
spec.xlimits.setdefault("right", None)
|
||||
spec.xlimits.setdefault("left", None)
|
||||
spec.setdefault("legend_loc", "best")
|
||||
spec.setdefault("align_fraction", None)
|
||||
spec.setdefault("layout", "horizontal")
|
||||
spec.setdefault("extend_x", False)
|
||||
if "problem_size_variants" not in spec and "memory_variants" in spec:
|
||||
# legacy support for "memory_variants"
|
||||
spec.problem_size_variants = spec.memory_variants
|
||||
del spec["memory_variants"]
|
||||
spec.setdefault("problem_size_variants", ["cam", "lm", "obs"])
|
||||
spec.setdefault("bal_cost_include", ["cost_time", "cost_it", "tr_radius", "inner_it", "memory"])
|
||||
spec.setdefault("tolerances", [0.01, 0.001, 0.00001])
|
||||
spec.setdefault("plot_tolerances", False)
|
||||
spec.setdefault("best_fit_line", True)
|
||||
spec.setdefault("reverse_zorder", False)
|
||||
spec.setdefault("plot_cost_semilogy", True)
|
||||
spec.setdefault("marker_size", 8)
|
||||
spec.setdefault("ylabel", True)
|
||||
spec.setdefault("suptitle", None)
|
||||
spec.setdefault("rotate2d", 0)
|
||||
spec.setdefault("trajectory_axes", "xy")
|
||||
elif spec["class"] == "overview_table":
|
||||
spec.setdefault("export_latex", None)
|
||||
|
||||
# expand templates in path names
|
||||
template_args = dict(config_dir=os.path.dirname(os.path.abspath(path)))
|
||||
for key in ["base_path", "output_path", "cache_dir"]:
|
||||
config.options[key] = Template(config.options[key]).substitute(**template_args)
|
||||
if isinstance(config.options.import_experiments, str):
|
||||
config.options.import_experiments = [config.options.import_experiments]
|
||||
config.options.import_experiments = [
|
||||
Template(path).substitute(**template_args) for path in config.options.import_experiments
|
||||
]
|
||||
|
||||
# import experiments
|
||||
imported_experiments = []
|
||||
for path in config.options.import_experiments:
|
||||
cfg = load_experiments_config(path, args)
|
||||
imported_experiments.extend(cfg.experiments)
|
||||
config.experiments = imported_experiments + config.experiments
|
||||
|
||||
return config
|
||||
143
python/basalt/generate_tables.py
Normal file
143
python/basalt/generate_tables.py
Normal file
@@ -0,0 +1,143 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
import argparse
|
||||
import os
|
||||
|
||||
from pylatex import Document, Section, Package, NewPage
|
||||
from pylatex import Command
|
||||
from pylatex.base_classes import Arguments
|
||||
|
||||
from .experiments import load_experiments_config
|
||||
from .experiments import Experiment
|
||||
from .latex.templates import screenread_sty
|
||||
from .util import os_open_file
|
||||
from .latex.results_table import ResultsTable
|
||||
from .latex.summarize_sequences_table import SummarizeSequencesTable
|
||||
from .latex.plot import Plot
|
||||
|
||||
|
||||
def generate_tables(args):
|
||||
|
||||
if args.change_directory:
|
||||
os.chdir(args.change_directory)
|
||||
|
||||
config = load_experiments_config(args.config, args)
|
||||
|
||||
exps = Experiment.load_all(config.experiments,
|
||||
config_file=args.config,
|
||||
base_path=config.options.base_path,
|
||||
cache_dir=config.options.cache_dir,
|
||||
seq_name_mapping=config.seq_name_mapping)
|
||||
|
||||
doc = Document(geometry_options={"tmargin": "1cm", "lmargin": "1cm"})
|
||||
|
||||
export_basepath = "{}-export".format(config.options.output_path)
|
||||
|
||||
curr = doc
|
||||
|
||||
hide_all = False
|
||||
for spec in config.results:
|
||||
if spec.show:
|
||||
|
||||
if spec["class"] == "section":
|
||||
if spec.clearpage:
|
||||
curr.append(NewPage())
|
||||
if spec.pagewidth:
|
||||
curr.append(Command("SetPageScreenWidth", Arguments(spec.pagewidth)))
|
||||
else:
|
||||
curr.append(Command("RestorePageScreenWidth"))
|
||||
hide_all = False
|
||||
curr = Section(spec.name)
|
||||
doc.append(curr)
|
||||
continue
|
||||
|
||||
if hide_all:
|
||||
continue
|
||||
|
||||
if spec.clearpage:
|
||||
curr.append(NewPage())
|
||||
|
||||
elif spec["class"] == "results_table":
|
||||
elem = ResultsTable(exps,
|
||||
spec,
|
||||
show_values_failed_runs=config.options.show_values_failed_runs,
|
||||
seq_displayname_mapping=config.seq_displayname_mapping,
|
||||
export_basepath=export_basepath)
|
||||
elif spec["class"] == "summarize_sequences_table":
|
||||
elem = SummarizeSequencesTable(exps,
|
||||
spec,
|
||||
show_values_failed_runs=config.options.show_values_failed_runs,
|
||||
seq_displayname_mapping=config.seq_displayname_mapping,
|
||||
export_basepath=export_basepath)
|
||||
elif spec["class"] == "plot":
|
||||
elem = Plot(exps,
|
||||
spec,
|
||||
seq_displayname_mapping=config.seq_displayname_mapping,
|
||||
export_basepath=export_basepath)
|
||||
else:
|
||||
raise RuntimeError("Invalid results class {}".format(spec["class"]))
|
||||
|
||||
curr.append(elem)
|
||||
else:
|
||||
if spec["class"] == "section":
|
||||
hide_all = True
|
||||
continue
|
||||
|
||||
# generate auxiliary tex files
|
||||
if config.options.screenread:
|
||||
output_dir = os.path.dirname(config.options.output_path)
|
||||
screenread_path = output_dir + "/screenread.sty"
|
||||
with open(screenread_path, "w") as f:
|
||||
f.write(screenread_sty)
|
||||
doc.packages.add(Package('screenread'))
|
||||
|
||||
# create nofloatfigure environment
|
||||
doc.preamble.append(
|
||||
Command("newenvironment", Arguments("nofloatfigure", Command("captionsetup", Arguments(type="figure")), "")))
|
||||
doc.packages.add(Package('caption'))
|
||||
doc.packages.add(Package('mathtools'))
|
||||
|
||||
# render latex
|
||||
doc.generate_pdf(config.options.output_path, clean_tex=not args.dont_clean_tex)
|
||||
|
||||
# cleanup
|
||||
if config.options.screenread and not args.dont_clean_tex:
|
||||
os.remove(screenread_path)
|
||||
|
||||
# open the generated pdf
|
||||
if args.open:
|
||||
os_open_file(config.options.output_path + ".pdf")
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
parser = argparse.ArgumentParser("Load basalt experiment logs and generate result tables and plots.")
|
||||
parser.add_argument("-C",
|
||||
"--change-directory",
|
||||
default=None,
|
||||
help="Change directory to this folder before doing anything else")
|
||||
parser.add_argument("--config", default="experiments.toml", help="specs for experiments to load")
|
||||
parser.add_argument("--base-path", default=None, help="overwrite basepath for loading logs defined in the config")
|
||||
parser.add_argument("--output-path", default=None, help="output filepath")
|
||||
parser.add_argument("--dont-clean-tex", action="store_true", help="don't remove tex file after generation")
|
||||
parser.add_argument("--cache-dir", default=None, help="load/save experiments cache from/to give folder")
|
||||
parser.add_argument("--overwrite-cache", action="store_true", help="reload all experiments independent of cache")
|
||||
parser.add_argument("--dont-show-values-failed-runs",
|
||||
action="store_true",
|
||||
help="don't attempt to show values for failed logs based on partial logs")
|
||||
parser.add_argument("--open", action="store_true", help="open after generation")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
generate_tables(args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
109
python/basalt/latex/containers.py
Normal file
109
python/basalt/latex/containers.py
Normal file
@@ -0,0 +1,109 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
import math
|
||||
|
||||
from pylatex import Package
|
||||
from pylatex.base_classes import Container
|
||||
|
||||
from ..metric import metrics_from_config
|
||||
from ..metric import ExperimentSpec
|
||||
from ..util import alphanum
|
||||
|
||||
|
||||
class MyContainer(Container):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# add packages that seem to not propagate properly from added elements
|
||||
self.packages.add(Package("xcolor"))
|
||||
self.packages.add(Package('graphicx'))
|
||||
|
||||
def dumps(self):
|
||||
return self.dumps_content()
|
||||
|
||||
|
||||
class ExperimentsContainer(MyContainer):
|
||||
|
||||
def __init__(self, seq_displayname_mapping):
|
||||
super().__init__()
|
||||
|
||||
self.seq_displayname_mapping = seq_displayname_mapping
|
||||
|
||||
def seq_displayname(self, seq):
|
||||
return self.seq_displayname_mapping.get(seq, seq)
|
||||
|
||||
|
||||
class ExperimentsTable(ExperimentsContainer):
|
||||
|
||||
def __init__(self, exps, spec, show_values_failed_runs, seq_displayname_mapping, export_basepath):
|
||||
super().__init__(seq_displayname_mapping)
|
||||
self.exps = exps
|
||||
self.spec = spec
|
||||
self.show_values_failed_runs = show_values_failed_runs
|
||||
self.export_basepath = export_basepath
|
||||
|
||||
self.experiment_specs = [ExperimentSpec(s) for s in self.spec.experiments]
|
||||
self.metrics = metrics_from_config(self.spec.metrics)
|
||||
|
||||
self.seq_names = self.sequence_names([s.name for s in self.experiment_specs])
|
||||
self.num_seqs = len(self.seq_names)
|
||||
self.num_metrics = len(self.metrics)
|
||||
self.num_exps = len(self.experiment_specs)
|
||||
|
||||
def sequence_names(self, experiment_names):
|
||||
seq_names = set()
|
||||
for s in experiment_names:
|
||||
seq_names.update(self.exps[s].sequences(filter_regex=self.spec.filter_regex))
|
||||
|
||||
return sorted(seq_names, key=alphanum)
|
||||
|
||||
def is_failed(self, exp, seq):
|
||||
if seq not in exp.runs:
|
||||
return True
|
||||
return exp.runs[seq].is_failed()
|
||||
|
||||
def render_failure(self, exp, seq):
|
||||
if seq in self.spec.override_as_failed:
|
||||
return "x"
|
||||
|
||||
if seq not in exp.runs:
|
||||
return '?'
|
||||
run = exp.runs[seq]
|
||||
|
||||
treat_as_failed = (run.log is None) if self.show_values_failed_runs else run.is_failed()
|
||||
|
||||
if treat_as_failed:
|
||||
return run.failure_str()
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_metrics(self, exp, seq, it):
|
||||
if seq not in exp.runs:
|
||||
return [math.nan for _ in self.metrics]
|
||||
run = exp.runs[seq]
|
||||
|
||||
treat_as_failed = (run.log is None) if self.show_values_failed_runs else run.is_failed()
|
||||
|
||||
if treat_as_failed:
|
||||
return [math.nan for _ in self.metrics]
|
||||
|
||||
return [m.get_value(self.exps, exp, seq, it) for m in self.metrics]
|
||||
# try:
|
||||
# return [m.get_value(self.exps, exp, seq, it) for m in self.metrics]
|
||||
# except AttributeError as e:
|
||||
# if e.args[0].startswith("local_error"):
|
||||
# if not has_imported_sophus():
|
||||
# print("To use local-error, you need to install sophuspy and flush the cache.")
|
||||
# sys.exit(1)
|
||||
# if not exp.runs[seq].log.has_cam_pos:
|
||||
# print("You cannot use local-error for experiment {}, which has no camera positions in the log.".
|
||||
# format(exp.name))
|
||||
# sys.exit(1)
|
||||
# raise
|
||||
393
python/basalt/latex/plot.py
Normal file
393
python/basalt/latex/plot.py
Normal file
@@ -0,0 +1,393 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
import numpy as np
|
||||
import os
|
||||
import math
|
||||
import functools
|
||||
|
||||
import matplotlib
|
||||
|
||||
matplotlib.use('Agg') # Not to use X server. For TravisCI.
|
||||
import matplotlib.pyplot as plt # noqa
|
||||
from matplotlib.ticker import MaxNLocator
|
||||
|
||||
prop_cycle = plt.rcParams['axes.prop_cycle']
|
||||
|
||||
#default_cycler = (cycler(linestyle=['-', '--', ':', '-.']) *
|
||||
# cycler(color=prop_cycle.by_key()['color']))
|
||||
|
||||
|
||||
class ModulusList(list):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
list.__init__(self, *args, **kwargs)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return list.__getitem__(self, key % len(self))
|
||||
|
||||
|
||||
default_colors_finite = prop_cycle.by_key()['color']
|
||||
default_colors_finite[0] = prop_cycle.by_key()['color'][0]
|
||||
default_colors_finite[1] = prop_cycle.by_key()['color'][2]
|
||||
default_colors_finite[2] = prop_cycle.by_key()['color'][3]
|
||||
default_colors_finite[3] = prop_cycle.by_key()['color'][1]
|
||||
|
||||
default_colors = ModulusList(default_colors_finite)
|
||||
#default_lines = ModulusList(["-", "-", ":", "--", "-.", ":", "--", "-."])
|
||||
#default_markers = ModulusList(["o", "s", "^", "X", "D", "P", "v", "h"])
|
||||
default_lines = ModulusList([":", "-", "-.", "--", ":", "--", "-.", "-"])
|
||||
default_markers = ModulusList(["o", "s", "^", "X", "D", "P", "v", "h"])
|
||||
|
||||
from collections import deque
|
||||
from collections import defaultdict
|
||||
|
||||
from pylatex import Figure
|
||||
from pylatex.utils import NoEscape
|
||||
|
||||
from .containers import ExperimentsContainer
|
||||
from .util import rotation2d
|
||||
|
||||
|
||||
class NoFloatFigure(Figure):
|
||||
pass
|
||||
|
||||
|
||||
class Plot(ExperimentsContainer):
|
||||
|
||||
def __init__(self, exps, spec, seq_displayname_mapping, export_basepath):
|
||||
super().__init__(seq_displayname_mapping)
|
||||
|
||||
self.width = None
|
||||
|
||||
plotters = dict(nullspace=self.plot_nullspace,
|
||||
eigenvalues=self.plot_eigenvalues,
|
||||
trajectory=self.plot_trajectory)
|
||||
|
||||
plot_fn = plotters[spec.type]
|
||||
plot_fn(exps, spec)
|
||||
|
||||
if spec.width is not None:
|
||||
self.width = spec.width
|
||||
elif self.width is None:
|
||||
self.width = 1
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
saved_file = self._save_plot(spec, export_basepath)
|
||||
|
||||
if "sequence" in spec:
|
||||
plot_name = '{} {} {}'.format(spec.type, spec.name, spec.sequence).replace("_", " ")
|
||||
else:
|
||||
plot_name = '{} {}'.format(spec.type, spec.name).replace("_", " ")
|
||||
|
||||
#with self.create(Subsection(spec.name, numbering=False)) as p:
|
||||
with self.create(NoFloatFigure()) as f:
|
||||
f.add_image(os.path.abspath(saved_file), width=NoEscape(r'{}\textwidth'.format(self.width)))
|
||||
f.add_caption(plot_name)
|
||||
|
||||
# cleanup
|
||||
plt.close('all')
|
||||
|
||||
def plot_nullspace(self, exps, spec):
|
||||
|
||||
logs = [exps[e].runs[spec.sequence].log for e in spec.experiments]
|
||||
names = [exps[e].display_name for e in spec.experiments]
|
||||
|
||||
num_plots = len(names)
|
||||
|
||||
if num_plots == 4:
|
||||
if True:
|
||||
if spec.figsize is None:
|
||||
spec.figsize = [10, 2.5]
|
||||
fig, axs = plt.subplots(1, 4, figsize=spec.figsize, sharey=True)
|
||||
else:
|
||||
if spec.figsize is None:
|
||||
spec.figsize = [10, 4.7]
|
||||
fig, axs = plt.subplots(2, 2, figsize=spec.figsize, sharey=True)
|
||||
axs = axs.flatten()
|
||||
else:
|
||||
if spec.figsize is None:
|
||||
spec.figsize = [6, 2 * num_plots]
|
||||
fig, axs = plt.subplots(num_plots, 1, figsize=spec.figsize, sharey=True)
|
||||
|
||||
if num_plots == 1:
|
||||
axs = [axs]
|
||||
|
||||
for i, (log, name) in enumerate(zip(logs, names)):
|
||||
|
||||
if log is None:
|
||||
continue
|
||||
|
||||
ax = axs[i]
|
||||
|
||||
ns = log.sums.marg_ns[1:] # skip first prior, which just is all 0
|
||||
ns = np.abs(ns) # cost change may be negative, we are only interested in the norm
|
||||
ns = np.maximum(ns, 1e-20) # clamp at very small value
|
||||
|
||||
markerfacecolor = "white"
|
||||
|
||||
markevery = 1000
|
||||
if spec.sequence == "kitti10":
|
||||
markevery = 100
|
||||
|
||||
ax.semilogy(
|
||||
ns[:, 0],
|
||||
":",
|
||||
# label="x",
|
||||
color="tab:blue")
|
||||
ax.semilogy(
|
||||
ns[:, 1],
|
||||
":",
|
||||
# label="y",
|
||||
color="tab:blue")
|
||||
ax.semilogy(
|
||||
ns[:, 2],
|
||||
":",
|
||||
# label="z",
|
||||
label="x, y, z",
|
||||
color="tab:blue",
|
||||
marker="o",
|
||||
markerfacecolor=markerfacecolor,
|
||||
markevery=(markevery // 2, markevery))
|
||||
|
||||
ax.semilogy(
|
||||
ns[:, 3],
|
||||
":",
|
||||
# label="roll",
|
||||
color="tab:orange")
|
||||
ax.semilogy(
|
||||
ns[:, 4],
|
||||
":",
|
||||
# label="pitch",
|
||||
label="roll, pitch",
|
||||
color="tab:orange",
|
||||
marker="s",
|
||||
markerfacecolor=markerfacecolor,
|
||||
markevery=(markevery // 2, markevery))
|
||||
|
||||
ax.semilogy(ns[:, 5],
|
||||
":",
|
||||
label="yaw",
|
||||
color="tab:green",
|
||||
marker="^",
|
||||
markerfacecolor=markerfacecolor,
|
||||
markevery=(0, markevery))
|
||||
|
||||
ax.semilogy(ns[:, 6],
|
||||
":",
|
||||
label="random",
|
||||
color="tab:red",
|
||||
marker="D",
|
||||
markerfacecolor=markerfacecolor,
|
||||
markevery=(0, markevery))
|
||||
|
||||
# marker on top of lines;
|
||||
|
||||
ax.semilogy(ns[:, 2],
|
||||
color="None",
|
||||
marker="o",
|
||||
markerfacecolor=markerfacecolor,
|
||||
markeredgecolor="tab:blue",
|
||||
markevery=(markevery // 2, markevery))
|
||||
ax.semilogy(ns[:, 4],
|
||||
color="None",
|
||||
marker="s",
|
||||
markerfacecolor=markerfacecolor,
|
||||
markeredgecolor="tab:orange",
|
||||
markevery=(markevery // 2, markevery))
|
||||
|
||||
#ax.set_yscale("symlog", linthresh=1e-12)
|
||||
|
||||
ax.set_title(name)
|
||||
|
||||
ax.set_yticks([1e-17, 1e-12, 1e-7, 1e-2, 1e3, 1e8])
|
||||
|
||||
if spec.sequence == "kitti10":
|
||||
ax.set_xticks([i * 100 for i in range(4)])
|
||||
#ax.xaxis.set_minor_locator(matplotlib.ticker.FixedLocator([i * 100 + 50 for i in range(4)]))
|
||||
|
||||
if i == 0:
|
||||
ax.set_ylabel("$\\Delta E_m$", rotation=0)
|
||||
ax.yaxis.set_label_coords(-0.05, 1.05)
|
||||
|
||||
if i == num_plots - 1:
|
||||
ax.legend(loc=spec.legend_loc)
|
||||
|
||||
if spec.ylim.top is not None:
|
||||
ax.set_ylim(top=spec.ylim.top)
|
||||
if spec.ylim.bottom is not None:
|
||||
ax.set_ylim(bottom=spec.ylim.bottom)
|
||||
|
||||
if spec.suptitle:
|
||||
fig.suptitle(spec.suptitle)
|
||||
|
||||
def plot_eigenvalues(self, exps, spec):
|
||||
|
||||
logs = [exps[e].runs[spec.sequence].log for e in spec.experiments]
|
||||
names = [exps[e].display_name for e in spec.experiments]
|
||||
|
||||
num_plots = 1
|
||||
|
||||
if spec.figsize is None:
|
||||
spec.figsize = [5.2, 2 * num_plots]
|
||||
|
||||
fig, axs = plt.subplots(num_plots, 1, figsize=spec.figsize)
|
||||
|
||||
ax = axs
|
||||
|
||||
for i, (log, name) in enumerate(zip(logs, names)):
|
||||
if log is not None:
|
||||
min_ev = [np.min(e) for e in log.sums.marg_ev[1:]]
|
||||
#ax.plot(min_ev, ":", label=name, color=default_colors[i])
|
||||
ax.plot(min_ev, default_lines[i], label=name, color=default_colors[i])
|
||||
|
||||
ax.set_yscale("symlog", linthresh=1e-8)
|
||||
ax.legend(loc=spec.legend_loc)
|
||||
|
||||
#ax.set_title("smallest eigenvalue {} {}".format(name, spec.sequence))
|
||||
|
||||
if spec.sequence == "eurocMH01":
|
||||
ax.set_xticks([i * 1000 for i in range(4)])
|
||||
ax.xaxis.set_minor_locator(matplotlib.ticker.FixedLocator([i * 1000 + 500 for i in range(4)]))
|
||||
|
||||
if spec.sequence == "kitti10":
|
||||
ax.set_xticks([i * 100 for i in range(4)])
|
||||
ax.xaxis.set_minor_locator(matplotlib.ticker.FixedLocator([i * 100 + 50 for i in range(4)]))
|
||||
|
||||
ax.set_yticks([-1e4, -1e-4, 0.0, 1e-4, 1e4])
|
||||
ax.set_ylim(bottom=-1e8, top=1e8)
|
||||
# ax.yaxis.tick_right()
|
||||
ax.set_ylabel("$\\sigma_{min}$", rotation=0)
|
||||
ax.yaxis.set_label_coords(0, 1.05)
|
||||
|
||||
if spec.ylim.top is not None:
|
||||
ax.set_ylim(top=spec.ylim.top)
|
||||
if spec.ylim.bottom is not None:
|
||||
ax.set_ylim(bottom=spec.ylim.bottom)
|
||||
|
||||
if spec.suptitle:
|
||||
fig.suptitle(spec.suptitle)
|
||||
|
||||
def plot_trajectory(self, exps, spec):
|
||||
|
||||
#self.width = 1.5
|
||||
|
||||
runs = [exps[e].runs[spec.sequence] for e in spec.experiments]
|
||||
names = [exps[e].display_name for e in spec.experiments]
|
||||
|
||||
linewidth_factor = 3
|
||||
|
||||
R = rotation2d(spec.rotate2d)
|
||||
|
||||
traj_axes_idx = self._axes_spec_to_index(spec.trajectory_axes)
|
||||
|
||||
if spec.figsize is None:
|
||||
spec.figsize = [6.4, 4.8]
|
||||
|
||||
fig, ax = plt.subplots(figsize=spec.figsize)
|
||||
|
||||
ax.axis("equal")
|
||||
ax.axis('off')
|
||||
#ax.set_xlabel("x")
|
||||
#ax.set_ylabel("y")
|
||||
|
||||
gt_color = "tab:grey"
|
||||
#gt_color = "black"
|
||||
|
||||
# take gt-trajectory from first experiment:
|
||||
if runs[0].traj_gt is not None:
|
||||
gt = runs[0].traj_gt[:, traj_axes_idx].T
|
||||
gt = np.matmul(R, gt)
|
||||
ax.plot(gt[0, :],
|
||||
gt[1, :],
|
||||
'-',
|
||||
zorder=1,
|
||||
linewidth=1 * linewidth_factor,
|
||||
color=gt_color,
|
||||
label="ground truth")
|
||||
|
||||
# https://matplotlib.org/stable/gallery/color/named_colors.html
|
||||
linestyles = [":", ":", "--", "-"]
|
||||
colors = [default_colors[1], default_colors[3]]
|
||||
#colors = ["tab:blue", "tab:orange"]
|
||||
linewidths = [2, 1]
|
||||
|
||||
for i, (r, name) in enumerate(zip(runs, names)):
|
||||
# plot in decreasing zorder
|
||||
#zorder = len(runs) - i + 1
|
||||
|
||||
zorder = i + 2
|
||||
|
||||
if r.traj_est is not None:
|
||||
pos = r.traj_est[:, traj_axes_idx].T
|
||||
pos = np.matmul(R, pos)
|
||||
ax.plot(
|
||||
pos[0, :],
|
||||
pos[1, :],
|
||||
linestyles[i],
|
||||
#default_lines[i],
|
||||
zorder=zorder,
|
||||
linewidth=linewidths[i] * linewidth_factor,
|
||||
label=name,
|
||||
color=colors[i])
|
||||
|
||||
#ax.set_xlim(np.min(x_gt), np.max(x_gt))
|
||||
#ax.set_ylim(np.min(y_gt), np.max(y_gt))
|
||||
|
||||
#lines = [gt]
|
||||
#colors = ['black']
|
||||
#line_segments = LineCollection(lines, colors=colors, linestyle='solid')
|
||||
|
||||
#ax.add_collection(line_segments)
|
||||
|
||||
ax.legend(loc=spec.legend_loc)
|
||||
|
||||
if spec.title is not None:
|
||||
ax.set_title(spec.title.format(sequence=self.seq_displayname(spec.sequence)))
|
||||
|
||||
@staticmethod
|
||||
def _axes_spec_to_index(axes_spec):
|
||||
index = []
|
||||
assert len(axes_spec) == 2, "Inalid axes_spec {}".format(axes_spec)
|
||||
for c in axes_spec:
|
||||
if c == "x":
|
||||
index.append(0)
|
||||
elif c == "y":
|
||||
index.append(1)
|
||||
elif c == "z":
|
||||
index.append(2)
|
||||
else:
|
||||
assert False, "Inalid axes_spec {}".format(axes_spec)
|
||||
return index
|
||||
|
||||
# static:
|
||||
filename_counters = defaultdict(int)
|
||||
|
||||
def _save_plot(self, spec, basepath, extension=".pdf"):
|
||||
|
||||
os.makedirs(basepath, exist_ok=True)
|
||||
|
||||
if "sequence" in spec:
|
||||
filename = '{}_{}_{}'.format(spec.type, spec.name, spec.sequence)
|
||||
else:
|
||||
filename = '{}_{}'.format(spec.type, spec.name)
|
||||
|
||||
filename = filename.replace(" ", "_").replace("/", "_")
|
||||
|
||||
Plot.filename_counters[filename] += 1
|
||||
counter = Plot.filename_counters[filename]
|
||||
if counter > 1:
|
||||
filename = "{}-{}".format(filename, counter)
|
||||
|
||||
filepath = os.path.join(basepath, "{}.{}".format(filename, extension.strip('.')))
|
||||
|
||||
plt.savefig(filepath)
|
||||
|
||||
return filepath
|
||||
163
python/basalt/latex/results_table.py
Normal file
163
python/basalt/latex/results_table.py
Normal file
@@ -0,0 +1,163 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
import numbers
|
||||
import os
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
|
||||
from pylatex import Subsection, Tabular, TextColor
|
||||
from pylatex import MultiRow, FootnoteText
|
||||
from pylatex.utils import italic, bold, NoEscape, escape_latex, dumps_list
|
||||
|
||||
from .containers import ExperimentsTable
|
||||
from .util import format_ratio_percent
|
||||
from .util import best_two_non_repeating
|
||||
|
||||
|
||||
class ResultsTable(ExperimentsTable):
|
||||
|
||||
def __init__(self, exps, spec, show_values_failed_runs, seq_displayname_mapping, export_basepath):
|
||||
super().__init__(exps, spec, show_values_failed_runs, seq_displayname_mapping, export_basepath)
|
||||
|
||||
self.doit()
|
||||
|
||||
def doit(self):
|
||||
|
||||
is_multirow = self.num_metrics > 1 and self.spec.multirow
|
||||
|
||||
def render_metric(value, best, second, decimals, format_string, highlight_top, relative_to):
|
||||
if isinstance(value, numbers.Number):
|
||||
if relative_to is None or relative_to == 0 or not np.isfinite(relative_to):
|
||||
# absolute number
|
||||
rendered = format_string.format(value, prec=decimals)
|
||||
else:
|
||||
# percent
|
||||
rendered = format_ratio_percent(value, relative_to, decimals=decimals)
|
||||
if highlight_top:
|
||||
if value == best:
|
||||
rendered = bold(rendered)
|
||||
elif value == second:
|
||||
rendered = italic(rendered)
|
||||
return rendered
|
||||
else:
|
||||
return value
|
||||
|
||||
if self.spec.export_latex:
|
||||
row_height = None
|
||||
else:
|
||||
row_height = 0.65 if is_multirow and self.num_metrics >= 3 else 1
|
||||
|
||||
column_spec = '|r' if self.spec.vertical_bars else 'r'
|
||||
t = Tabular('l' + column_spec * self.num_exps, row_height=row_height, pos=['t'])
|
||||
escape_header_fun = lambda text: text if self.spec.escape_latex_header else NoEscape(text)
|
||||
if self.spec.rotate_header:
|
||||
t.add_row([''] + [
|
||||
NoEscape(r"\rotatebox{90}{%s}" % escape_latex(escape_header_fun(s.display_name(self.exps[s.name]))))
|
||||
for s in self.experiment_specs
|
||||
])
|
||||
else:
|
||||
t.add_row([''] + [escape_header_fun(s.display_name(self.exps[s.name])) for s in self.experiment_specs])
|
||||
t.add_hline()
|
||||
|
||||
for seq in self.seq_names:
|
||||
fails = [self.is_failed(self.exps[s.name], seq) for s in self.experiment_specs]
|
||||
failure_strings = [self.render_failure(self.exps[s.name], seq) for s in self.experiment_specs]
|
||||
values = np.array([self.get_metrics(self.exps[s.name], seq, s.it) for s in self.experiment_specs])
|
||||
|
||||
top_values = list(range(self.num_metrics))
|
||||
for c, m in enumerate(self.metrics):
|
||||
try:
|
||||
values[:, c] = np.around(values[:, c], m.decimals)
|
||||
except IndexError:
|
||||
pass
|
||||
non_excluded_values = np.array(values[:, c])
|
||||
for i in m.exclude_columns_highlight:
|
||||
non_excluded_values[i] = math.nan
|
||||
top_values[c] = best_two_non_repeating(non_excluded_values, reverse=m.larger_is_better)
|
||||
|
||||
if is_multirow:
|
||||
rows = [[MultiRow(self.num_metrics, data=self.seq_displayname(seq))]
|
||||
] + [list(['']) for _ in range(1, self.num_metrics)]
|
||||
else:
|
||||
rows = [[self.seq_displayname(seq)]]
|
||||
for c, (fail, failure_str, value_col) in enumerate(zip(fails, failure_strings, values)):
|
||||
if failure_str is not None:
|
||||
if self.spec.color_failed:
|
||||
failure_str = TextColor(self.spec.color_failed, failure_str)
|
||||
if is_multirow:
|
||||
rows[0].append(MultiRow(self.num_metrics, data=failure_str))
|
||||
for r in range(1, self.num_metrics):
|
||||
rows[r].append('')
|
||||
else:
|
||||
rows[0].append(failure_str)
|
||||
else:
|
||||
tmp_data = [None] * self.num_metrics
|
||||
for r, m in enumerate(self.metrics):
|
||||
if m.failed_threshold and value_col[r] > m.failed_threshold:
|
||||
obj = "x"
|
||||
if self.spec.color_failed:
|
||||
obj = TextColor(self.spec.color_failed, obj)
|
||||
else:
|
||||
relative_to = None
|
||||
if m.relative_to_column is not None and m.relative_to_column != c:
|
||||
relative_to = values[m.relative_to_column, r]
|
||||
obj = render_metric(value_col[r],
|
||||
top_values[r][0],
|
||||
top_values[r][1],
|
||||
m.effective_display_decimals(),
|
||||
m.format_string,
|
||||
m.highlight_top,
|
||||
relative_to=relative_to)
|
||||
if fail and self.spec.color_failed:
|
||||
obj = TextColor(self.spec.color_failed, obj)
|
||||
tmp_data[r] = obj
|
||||
if self.num_metrics == 1 or is_multirow:
|
||||
for r, obj in enumerate(tmp_data):
|
||||
rows[r].append(obj)
|
||||
else:
|
||||
entry = []
|
||||
for v in tmp_data:
|
||||
entry.append(v)
|
||||
entry.append(NoEscape("~/~"))
|
||||
entry.pop()
|
||||
rows[0].append(dumps_list(entry))
|
||||
|
||||
for row in rows:
|
||||
t.add_row(row)
|
||||
|
||||
if is_multirow:
|
||||
t.add_hline()
|
||||
|
||||
if self.spec.export_latex:
|
||||
os.makedirs(self.export_basepath, exist_ok=True)
|
||||
t.generate_tex(os.path.join(self.export_basepath, self.spec.export_latex))
|
||||
|
||||
with self.create(Subsection(self.spec.name, numbering=False)) as p:
|
||||
|
||||
if self.spec.metrics_legend:
|
||||
legend = Tabular('|c|', row_height=row_height, pos=['t'])
|
||||
legend.add_hline()
|
||||
legend.add_row(["Metrics"])
|
||||
legend.add_hline()
|
||||
for m in self.metrics:
|
||||
legend.add_row([m.display_name])
|
||||
legend.add_hline()
|
||||
|
||||
tab = Tabular("ll")
|
||||
tab.add_row([t, legend])
|
||||
content = tab
|
||||
else:
|
||||
content = t
|
||||
|
||||
if True:
|
||||
content = FootnoteText(content)
|
||||
|
||||
p.append(content)
|
||||
88
python/basalt/latex/summarize_sequences_table.py
Normal file
88
python/basalt/latex/summarize_sequences_table.py
Normal file
@@ -0,0 +1,88 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
import numbers
|
||||
import os
|
||||
import scipy.stats
|
||||
import numpy as np
|
||||
|
||||
from pylatex import Subsection, FootnoteText, Tabular, NoEscape, escape_latex
|
||||
from pylatex.utils import italic, bold
|
||||
|
||||
from .containers import ExperimentsTable
|
||||
from .util import best_two_non_repeating
|
||||
|
||||
|
||||
class SummarizeSequencesTable(ExperimentsTable):
|
||||
|
||||
def __init__(self, exps, spec, show_values_failed_runs, seq_displayname_mapping, export_basepath):
|
||||
super().__init__(exps, spec, show_values_failed_runs, seq_displayname_mapping, export_basepath)
|
||||
|
||||
self.doit()
|
||||
|
||||
def doit(self):
|
||||
|
||||
def render_metric(value, best, second, decimals, format_string):
|
||||
if isinstance(value, numbers.Number):
|
||||
rendered = format_string.format(value, prec=decimals)
|
||||
if value == best:
|
||||
rendered = bold(rendered)
|
||||
elif value == second:
|
||||
rendered = italic(rendered)
|
||||
return rendered
|
||||
else:
|
||||
return value
|
||||
|
||||
values = np.empty((self.num_metrics, self.num_seqs, self.num_exps))
|
||||
|
||||
for i, seq in enumerate(self.seq_names):
|
||||
for j, s in enumerate(self.experiment_specs):
|
||||
values[:, i, j] = np.array(self.get_metrics(self.exps[s.name], seq, s.it))
|
||||
|
||||
means = np.empty((self.num_metrics, self.num_exps))
|
||||
for i, m in enumerate(self.metrics):
|
||||
if m.geometric_mean:
|
||||
means[i, :] = scipy.stats.gmean(values[i, :, :], axis=0)
|
||||
else:
|
||||
means[i, :] = np.mean(values[i, :, :], axis=0)
|
||||
|
||||
t = Tabular('l' + 'c' * self.num_exps)
|
||||
|
||||
t.add_hline()
|
||||
escape_header_fun = lambda text: text if self.spec.escape_latex_header else NoEscape(text)
|
||||
if self.spec.rotate_header:
|
||||
t.add_row([self.spec.header] + [
|
||||
NoEscape(r"\rotatebox{90}{%s}" % escape_latex(escape_header_fun(s.display_name(self.exps[s.name]))))
|
||||
for s in self.experiment_specs
|
||||
])
|
||||
else:
|
||||
t.add_row([self.spec.header] +
|
||||
[escape_header_fun(s.display_name(self.exps[s.name])) for s in self.experiment_specs])
|
||||
t.add_hline()
|
||||
|
||||
for i, m in enumerate(self.metrics):
|
||||
row_values = np.around(means[i, :], m.decimals)
|
||||
top_values = best_two_non_repeating(row_values, reverse=m.larger_is_better)
|
||||
row = [m.display_name]
|
||||
for v in row_values:
|
||||
# TODO: use NoEscape only if certain flag is enabled?
|
||||
row.append(
|
||||
NoEscape(
|
||||
render_metric(v, top_values[0], top_values[1], m.effective_display_decimals(),
|
||||
m.format_string)))
|
||||
t.add_row(row)
|
||||
|
||||
t.add_hline()
|
||||
|
||||
if self.spec.export_latex:
|
||||
os.makedirs(self.export_basepath, exist_ok=True)
|
||||
t.generate_tex(os.path.join(self.export_basepath, self.spec.export_latex))
|
||||
|
||||
with self.create(Subsection(self.spec.name, numbering=False)) as p:
|
||||
p.append(FootnoteText(t))
|
||||
93
python/basalt/latex/templates.py
Normal file
93
python/basalt/latex/templates.py
Normal file
@@ -0,0 +1,93 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
screenread_sty = r"""
|
||||
\ProvidesPackage{screenread}
|
||||
% Copyright (C) 2012 John Collins, collins@phys.psu.edu
|
||||
% License: LPPL 1.2
|
||||
|
||||
% Note: To avoid compatibility issues between geometry and at least one
|
||||
% class file, it may be better to set all the dimensions by hand.
|
||||
|
||||
% 20 Nov 2014 - use `pageBreakSection` instead of clobbering `section`
|
||||
% - increase longest page size to 575cm
|
||||
% - make top, right, and left margins something sensible and
|
||||
% a bit more aesthetically pleasing
|
||||
% 24 Jan 2012 Argument to \SetScreen is screen width
|
||||
% 23 Jan 2012 Remove package showlayout
|
||||
% 22 Jan 2012 Initial version, based on ideas in
|
||||
% B. Veytsman amd M. Ware, Tugboat 32 (2011) 261.
|
||||
|
||||
\RequirePackage{everyshi}
|
||||
\RequirePackage{geometry}
|
||||
|
||||
%=======================
|
||||
|
||||
\pagestyle{empty}
|
||||
|
||||
\EveryShipout{%
|
||||
\pdfpageheight=\pagetotal
|
||||
\advance\pdfpageheight by 2in
|
||||
\advance\pdfpageheight by \topmargin
|
||||
\advance\pdfpageheight by \textheight % This and next allow for footnotes
|
||||
\advance\pdfpageheight by -\pagegoal
|
||||
}
|
||||
|
||||
\AtEndDocument{\pagebreak}
|
||||
|
||||
\def\pageBreakSection{\pagebreak\section}
|
||||
|
||||
\newlength\screenwidth
|
||||
\newlength{\savedscreenwidth}
|
||||
|
||||
\newcommand\SetScreen[1]{%
|
||||
% Argument #1 is the screen width.
|
||||
% Set appropriate layout parameters, with only a little white space
|
||||
% around the text.
|
||||
\setlength\screenwidth{#1}%
|
||||
\setlength\savedscreenwidth{#1}%
|
||||
\setlength\textwidth{#1}%
|
||||
\addtolength\textwidth{-2cm}%
|
||||
\geometry{layoutwidth=\screenwidth,
|
||||
paperwidth=\screenwidth,
|
||||
textwidth=\textwidth,
|
||||
layoutheight=575cm,
|
||||
paperheight=575cm,
|
||||
textheight=575cm,
|
||||
top=1cm,
|
||||
left=1cm,
|
||||
right=1cm,
|
||||
hcentering=true
|
||||
}%
|
||||
}
|
||||
|
||||
\newcommand\SetPageScreenWidth[1]{%
|
||||
\setlength\savedscreenwidth{\screenwidth}%
|
||||
\setlength\screenwidth{#1}%
|
||||
\pdfpagewidth\screenwidth%
|
||||
\setlength\textwidth{\screenwidth}%
|
||||
\addtolength\textwidth{-2cm}%
|
||||
}
|
||||
|
||||
\newcommand\RestorePageScreenWidth{%
|
||||
\setlength\screenwidth{\savedscreenwidth}%
|
||||
\pdfpagewidth\screenwidth%
|
||||
\setlength\textwidth{\screenwidth}%
|
||||
\addtolength\textwidth{-2cm}%
|
||||
}
|
||||
|
||||
|
||||
% Compute a reasonable default screen width, and set it
|
||||
\setlength\screenwidth{\textwidth}
|
||||
\addtolength\screenwidth{1cm}
|
||||
\SetScreen{\screenwidth}
|
||||
|
||||
\endinput
|
||||
|
||||
"""
|
||||
61
python/basalt/latex/util.py
Normal file
61
python/basalt/latex/util.py
Normal file
@@ -0,0 +1,61 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
import math
|
||||
import numpy as np
|
||||
|
||||
|
||||
def best_two_non_repeating(array, reverse=False):
|
||||
if reverse:
|
||||
best = -math.inf
|
||||
second = -math.inf
|
||||
for v in array:
|
||||
if v > best:
|
||||
second = best
|
||||
best = v
|
||||
elif v < best and v > second:
|
||||
second = v
|
||||
else:
|
||||
best = math.inf
|
||||
second = math.inf
|
||||
for v in array:
|
||||
if v < best:
|
||||
second = best
|
||||
best = v
|
||||
elif v > best and v < second:
|
||||
second = v
|
||||
|
||||
return best, second
|
||||
|
||||
|
||||
def format_ratio(val, val_ref=None, decimals=0):
|
||||
if val_ref == 0:
|
||||
return "{}".format(math.inf)
|
||||
else:
|
||||
if val_ref is not None:
|
||||
val = float(val) / float(val_ref)
|
||||
return "{:.{prec}f}".format(val, prec=decimals)
|
||||
|
||||
|
||||
def format_ratio_percent(val, val_ref=None, decimals=0):
|
||||
if val_ref == 0:
|
||||
return "{}".format(val)
|
||||
else:
|
||||
if val_ref is not None:
|
||||
val = float(val) / float(val_ref)
|
||||
val = 100 * val
|
||||
return "{:.{prec}f}%".format(val, prec=decimals)
|
||||
|
||||
|
||||
def rotation2d(theta_deg):
|
||||
theta = np.radians(theta_deg)
|
||||
|
||||
R = np.array(((np.cos(theta), -np.sin(theta)), (np.sin(theta), np.cos(theta))))
|
||||
|
||||
return R
|
||||
106
python/basalt/log.py
Normal file
106
python/basalt/log.py
Normal file
@@ -0,0 +1,106 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
import ubjson
|
||||
import json
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
|
||||
from collections import Mapping
|
||||
from munch import Munch
|
||||
from munch import munchify
|
||||
|
||||
|
||||
class ExecutionStats(Munch):
|
||||
|
||||
def __init__(self, path):
|
||||
data = self._load(path)
|
||||
if data is None:
|
||||
Munch.__init__(self)
|
||||
else:
|
||||
Munch.__init__(self, data)
|
||||
|
||||
def _load(self, path):
|
||||
|
||||
if path.endswith("ubjson"):
|
||||
with open(path, 'rb') as f:
|
||||
data = ubjson.load(f)
|
||||
else:
|
||||
with open(path, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
if isinstance(data, Mapping):
|
||||
data = self._convert(data)
|
||||
|
||||
return munchify(data)
|
||||
|
||||
def _convert(self, data):
|
||||
|
||||
data_new = dict()
|
||||
|
||||
for k, v in data.items():
|
||||
if k.endswith("__values"):
|
||||
continue # skip; processed together with __index
|
||||
elif k.endswith("__index"):
|
||||
idx = v
|
||||
values = np.array(data[k.replace("__index", "__values")])
|
||||
# convert to list of arrays according to start indices
|
||||
res = np.split(values, idx[1:])
|
||||
if all(len(res[0]) == len(x) for x in res):
|
||||
res = np.array(res)
|
||||
data_new[k.replace("__index", "")] = res
|
||||
else:
|
||||
data_new[k] = np.array(v)
|
||||
|
||||
return data_new
|
||||
|
||||
def _is_imu(self):
|
||||
return len(self.marg_ev[0]) == 15
|
||||
|
||||
|
||||
def detect_log_path(dir, basename):
|
||||
|
||||
for ext in ["ubjson", "json"]:
|
||||
path = os.path.join(dir, basename + "." + ext)
|
||||
if os.path.isfile(path):
|
||||
return path
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def load_execution_stats(dir, basename):
|
||||
|
||||
path = detect_log_path(dir, basename)
|
||||
|
||||
if path is not None:
|
||||
return ExecutionStats(path)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class Log(Munch):
|
||||
|
||||
@staticmethod
|
||||
def load(dir):
|
||||
|
||||
log = Log(all=load_execution_stats(dir, "stats_all"),
|
||||
sums=load_execution_stats(dir, "stats_sums"),
|
||||
vio=load_execution_stats(dir, "stats_vio"))
|
||||
|
||||
if all([v is None for v in log.values()]):
|
||||
return None
|
||||
else:
|
||||
return log
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
Munch.__init__(self, *args, **kwargs)
|
||||
|
||||
def duration(self):
|
||||
return (self.sums.frame_id[-1] - self.sums.frame_id[0]) * 1e-9
|
||||
171
python/basalt/metric.py
Normal file
171
python/basalt/metric.py
Normal file
@@ -0,0 +1,171 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
import copy
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class ExperimentSpec:
|
||||
|
||||
def __init__(self, string):
|
||||
if "@it" in string:
|
||||
self.name, it = string.split("@it")
|
||||
self.it = int(it)
|
||||
else:
|
||||
self.name = string
|
||||
self.it = -1
|
||||
|
||||
def display_name(self, exp):
|
||||
if self.it == -1:
|
||||
return exp.display_name
|
||||
else:
|
||||
return "{} @ it{}".format(exp.display_name, self.it)
|
||||
|
||||
|
||||
class Metric:
|
||||
|
||||
def __init__(self,
|
||||
display_name,
|
||||
accessor,
|
||||
decimals,
|
||||
format_string="{:.{prec}f}",
|
||||
highlight_top=True,
|
||||
geometric_mean=False,
|
||||
larger_is_better=False):
|
||||
self.display_name = display_name
|
||||
self.accessor = accessor
|
||||
self.decimals = decimals
|
||||
self.display_decimals = None
|
||||
self.relative_to_column = None
|
||||
self.relative_to_experiment = None
|
||||
self.relative_to_metric = None
|
||||
self.ratio = True
|
||||
self.format_string = format_string
|
||||
self.highlight_top = highlight_top
|
||||
self.larger_is_better = larger_is_better
|
||||
self.exclude_columns_highlight = []
|
||||
self.geometric_mean = geometric_mean
|
||||
self.failed_threshold = None
|
||||
|
||||
def set_config(self, spec):
|
||||
# change defaults in case of "relative_to_..." display mode
|
||||
if any(k in spec for k in ["relative_to_column", "relative_to_experiment", "relative_to_metric"]):
|
||||
# maybe overwritten by explicit "decimals" / "format_string" below
|
||||
self.decimals = 3
|
||||
self.display_decimals = 3
|
||||
self.format_string = "{:.3f}"
|
||||
self.geometric_mean = True
|
||||
|
||||
if "display_name" in spec:
|
||||
self.display_name = spec.display_name
|
||||
if "decimals" in spec:
|
||||
self.decimals = spec.decimals
|
||||
if "display_decimals" in spec:
|
||||
self.display_decimals = spec.display_decimals
|
||||
if "relative_to_column" in spec:
|
||||
self.relative_to_column = spec.relative_to_column
|
||||
if "relative_to_experiment" in spec:
|
||||
self.relative_to_experiment = ExperimentSpec(spec.relative_to_experiment)
|
||||
if "relative_to_metric" in spec:
|
||||
self.relative_to_metric = spec.relative_to_metric
|
||||
if "ratio" in spec:
|
||||
self.ratio = spec.ratio
|
||||
if "format_string" in spec:
|
||||
self.format_string = spec.format_string
|
||||
if "highlight_top" in spec:
|
||||
self.highlight_top = spec.highlight_top
|
||||
if "larger_is_better" in spec:
|
||||
self.larger_is_better = spec.larger_is_better
|
||||
if "exclude_columns_highlight" in spec:
|
||||
self.exclude_columns_highlight = spec.exclude_columns_highlight
|
||||
if "geometric_mean" in spec:
|
||||
self.geometric_mean = spec.geometric_mean
|
||||
if "failed_threshold" in spec:
|
||||
self.failed_threshold = spec.failed_threshold
|
||||
|
||||
def effective_display_decimals(self):
|
||||
if self.display_decimals is not None:
|
||||
return self.display_decimals
|
||||
else:
|
||||
return self.decimals
|
||||
|
||||
def get_value(self, exps, e, s, it):
|
||||
#try:
|
||||
value = self.accessor(e.runs[s].log, it)
|
||||
#except AttributeError as err:
|
||||
# raise
|
||||
|
||||
if self.relative_to_metric is not None:
|
||||
relative_to_metric_accessor = self.relative_to_metric.accessor
|
||||
else:
|
||||
relative_to_metric_accessor = self.accessor
|
||||
|
||||
if self.relative_to_experiment is not None:
|
||||
relative_to_log = exps[self.relative_to_experiment.name].runs[s].log
|
||||
relative_to_it = self.relative_to_experiment.it
|
||||
else:
|
||||
relative_to_log = e.runs[s].log
|
||||
relative_to_it = it
|
||||
|
||||
if self.relative_to_metric is not None or self.relative_to_experiment is not None:
|
||||
base_value = relative_to_metric_accessor(relative_to_log, relative_to_it)
|
||||
if self.ratio:
|
||||
value = value / base_value
|
||||
else:
|
||||
value = base_value - value
|
||||
return value
|
||||
|
||||
|
||||
def peak_memory_opt(l, it):
|
||||
if it == -1:
|
||||
index = -1
|
||||
else:
|
||||
index = int(l.all.num_it[:it + 1].sum()) - 1
|
||||
return l.all.resident_memory_peak[index] / 1024**2
|
||||
|
||||
|
||||
# yapf: disable
|
||||
metric_desc = dict(
|
||||
ev_min=Metric("min ev", lambda l, it: min(min(x) for x in l.sums.marg_ev), 1),
|
||||
avg_num_it=Metric("avg #it", lambda l, it: np.mean(l.sums.num_it), 1),
|
||||
avg_num_it_failed=Metric("avg #it-fail", lambda l, it: np.mean(l.sums.num_it_rejected), 1),
|
||||
duration=Metric("duration (s)", lambda l, it: l.duration(), 1),
|
||||
time_marg=Metric("t marg", lambda l, it: np.sum(l.sums.marginalize), 2),
|
||||
time_opt=Metric("t opt", lambda l, it: np.sum(l.sums.optimize), 2),
|
||||
time_optmarg=Metric("t opt", lambda l, it: np.sum(l.sums.optimize) + np.sum(l.sums.marginalize), 2),
|
||||
time_exec=Metric("t exec", lambda l, it: l.vio.exec_time_s[0], 1),
|
||||
time_exec_realtimefactor=Metric("t exec (rtf)", lambda l, it: l.duration() / l.vio.exec_time_s[0], 1, larger_is_better=True),
|
||||
time_measure=Metric("t meas", lambda l, it: np.sum(l.sums.measure), 1),
|
||||
time_measure_realtimefactor=Metric("t meas (rtf)", lambda l, it: l.duration() / np.sum(l.sums.measure), 1, larger_is_better=True),
|
||||
time_exec_minus_measure=Metric("t exec - meas", lambda l, it: l.vio.exec_time_s[0] - np.sum(l.sums.measure), 1),
|
||||
time_measure_minus_optmarg=Metric("t exec - (opt + marg)", lambda l, it: np.sum(l.sums.measure) - (np.sum(l.sums.optimize) + np.sum(l.sums.marginalize)), 1),
|
||||
ate_num_kfs=Metric("ATE #kf", lambda l, it: l.vio.ate_num_kfs[0], 0),
|
||||
ate_rmse=Metric("ATE", lambda l, it: l.vio.ate_rmse[0], 3),
|
||||
peak_memory=Metric("mem peak (MB)", lambda l, it: l.vio.resident_memory_peak[0] / 1024**2, 1),
|
||||
#peak_memory_opt=Metric("mem peak opt (MB)", lambda l, it: l.all.resident_memory_peak[l.all.num_it[:it].sum()-1] / 1024**2, 1),
|
||||
peak_memory_opt=Metric("mem peak opt (MB)", peak_memory_opt, 1),
|
||||
)
|
||||
# yapf: enable
|
||||
|
||||
|
||||
def metrics_from_config(spec):
|
||||
|
||||
def get_from_spec(m):
|
||||
if isinstance(m, str):
|
||||
obj = copy.copy(metric_desc[m])
|
||||
else:
|
||||
obj = copy.copy(metric_desc[m.name])
|
||||
obj.set_config(m)
|
||||
if obj.relative_to_metric is not None:
|
||||
obj.relative_to_metric = get_from_spec(obj.relative_to_metric)
|
||||
|
||||
return obj
|
||||
|
||||
return [get_from_spec(m) for m in spec]
|
||||
209
python/basalt/nullspace.py
Normal file
209
python/basalt/nullspace.py
Normal file
@@ -0,0 +1,209 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
from .log import ExecutionStats
|
||||
|
||||
import argparse
|
||||
|
||||
import numpy as np
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
prop_cycle = plt.rcParams['axes.prop_cycle']
|
||||
|
||||
#default_cycler = (cycler(linestyle=['-', '--', ':', '-.']) *
|
||||
# cycler(color=prop_cycle.by_key()['color']))
|
||||
|
||||
default_colors = prop_cycle.by_key()['color']
|
||||
|
||||
|
||||
def plot(args):
|
||||
|
||||
log = ExecutionStats(args.path)
|
||||
|
||||
fig, axs = plt.subplots(nrows=6, ncols=1, figsize=(10, 12.0))
|
||||
i = 0
|
||||
|
||||
if log._is_imu():
|
||||
ns = log.marg_ns[1:, [0, 1, 2, 5]]
|
||||
else:
|
||||
ns = log.marg_ns[1:, 0:6]
|
||||
not_ns = log.marg_ns[1:, 6]
|
||||
|
||||
if True:
|
||||
ax = axs[i]
|
||||
i += 1
|
||||
|
||||
ax.semilogy(log.marg_ns[1:, 0], ":", label="x", color=default_colors[0])
|
||||
ax.semilogy(log.marg_ns[1:, 1], ":", label="y", color=default_colors[0])
|
||||
ax.semilogy(log.marg_ns[1:, 2], ":", label="z", color=default_colors[0])
|
||||
|
||||
ax.semilogy(log.marg_ns[1:, 3], ":", label="roll", color=default_colors[1])
|
||||
ax.semilogy(log.marg_ns[1:, 4], ":", label="pitch", color=default_colors[1])
|
||||
|
||||
ax.semilogy(log.marg_ns[1:, 5], ":", label="yaw", color=default_colors[2])
|
||||
|
||||
ax.semilogy(log.marg_ns[1:, 6], ":", label="rand", color=default_colors[3])
|
||||
|
||||
#ax.semilogy(np.min(ns, axis=1), "-.", color=default_colors[0])
|
||||
#ax.semilogy(np.max(ns, axis=1), ":", color=default_colors[0])
|
||||
#ax.semilogy(not_ns, "-", label="foo", color=default_colors[0])
|
||||
ax.set_title("nullspace")
|
||||
|
||||
ax.legend(loc="center right")
|
||||
|
||||
ev_all = np.array([x[0:7] for x in log.marg_ev[3:]])
|
||||
ev = np.array([x[x > 1e-5][0:7] for x in log.marg_ev[3:]])
|
||||
#ev = np.array([x[0:7] for x in log.marg_ev[3:]])
|
||||
|
||||
ev_ns_min = ev[:, 0]
|
||||
if log._is_imu():
|
||||
print("is vio")
|
||||
ev_ns_max = ev[:, 3]
|
||||
ev_not_ns = ev[:, 4]
|
||||
else:
|
||||
print("is vo")
|
||||
ev_ns_max = ev[:, 5]
|
||||
ev_not_ns = ev[:, 6]
|
||||
|
||||
if True:
|
||||
ax = axs[i]
|
||||
i += 1
|
||||
|
||||
ax.semilogy(ev_ns_min, "-.", color=default_colors[0])
|
||||
ax.semilogy(ev_ns_max, ":", color=default_colors[0])
|
||||
ax.semilogy(ev_not_ns, "-", label="foo", color=default_colors[0])
|
||||
|
||||
ax.set_title("eigenvalues (filtered all ev < 1e-5)")
|
||||
#ax.set_title("eigenvalues")
|
||||
#ax.legend()
|
||||
|
||||
if True:
|
||||
ax = axs[i]
|
||||
i += 1
|
||||
|
||||
ax.plot([sum(x < 1e-5) for x in ev_all], label="x < 1e-5", color=default_colors[0])
|
||||
|
||||
ax.set_title("zero ev count")
|
||||
ax.legend()
|
||||
|
||||
if False:
|
||||
ax = axs[i]
|
||||
i += 1
|
||||
|
||||
ax.plot([sum(x == 0) for x in ev_all], label="== 0", color=default_colors[0])
|
||||
|
||||
ax.set_title("zero ev count")
|
||||
ax.legend()
|
||||
|
||||
if True:
|
||||
ax = axs[i]
|
||||
i += 1
|
||||
|
||||
ax.plot([sum(x < 0) for x in ev_all], label="< 0", color=default_colors[0])
|
||||
|
||||
#ax.set_title("zero ev count")
|
||||
ax.legend()
|
||||
|
||||
if False:
|
||||
ax = axs[i]
|
||||
i += 1
|
||||
|
||||
ax.plot([sum((x > 0) & (x <= 1e-8)) for x in ev_all], label="0 < x <= 1e-8", color=default_colors[0])
|
||||
|
||||
#ax.set_title("zero ev count")
|
||||
ax.legend()
|
||||
|
||||
if False:
|
||||
ax = axs[i]
|
||||
i += 1
|
||||
|
||||
ax.plot([sum(x < -1e-8) for x in ev_all], label="< -1e-8", color=default_colors[0])
|
||||
|
||||
#ax.set_title("zero ev count")
|
||||
ax.legend()
|
||||
|
||||
if True:
|
||||
ax = axs[i]
|
||||
i += 1
|
||||
|
||||
#ax.plot([sum((1e-6 <= x) & (x <= 1e2)) for x in ev_all], label="1e-8 <= x <= 1e1", color=default_colors[0])
|
||||
|
||||
#ax.set_title("zero ev count")
|
||||
#ax.legend()
|
||||
|
||||
ev_all = np.concatenate(log.marg_ev[3:])
|
||||
ev_all = ev_all[ev_all < 1e3]
|
||||
num = len(log.marg_ev[3:])
|
||||
|
||||
ax.hist(
|
||||
ev_all,
|
||||
bins=[
|
||||
-1e2,
|
||||
-1e1,
|
||||
-1e0,
|
||||
-1e-1,
|
||||
-1e-2,
|
||||
-1e-3,
|
||||
-1e-4,
|
||||
-1e-5,
|
||||
-1e-6,
|
||||
#-1e-7,
|
||||
#-1e-8,
|
||||
#-1e-9,
|
||||
#-1e-10,
|
||||
0,
|
||||
#1e-10,
|
||||
#1e-9,
|
||||
#1e-8,
|
||||
#1e-7,
|
||||
1e-6,
|
||||
1e-5,
|
||||
1e-4,
|
||||
1e-3,
|
||||
1e-2,
|
||||
1e-1,
|
||||
1e-0,
|
||||
1e1,
|
||||
1e2,
|
||||
1e3,
|
||||
#1e4,
|
||||
#1e5,
|
||||
#1e6,
|
||||
#1e7,
|
||||
#1e8,
|
||||
#1e9,
|
||||
#1e10,
|
||||
#1e11
|
||||
])
|
||||
ax.set_xscale("symlog", linthresh=1e-6)
|
||||
y_vals = ax.get_yticks()
|
||||
ax.set_yticklabels(['{:.1f}'.format(x / num) for x in y_vals])
|
||||
ax.set_title("hist of all ev < 1e3 (count normalized by num marg-priors)")
|
||||
|
||||
if args.save:
|
||||
plt.savefig(args.save)
|
||||
|
||||
if not args.no_gui:
|
||||
plt.show()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser("Load multiple PBA logs and plot combined results for comparison.")
|
||||
parser.add_argument("path", help="log file path")
|
||||
parser.add_argument("--no-gui", action="store_true", help="show plots")
|
||||
parser.add_argument("--save", help="save plots to specified file")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
plot(args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
117
python/basalt/run.py
Normal file
117
python/basalt/run.py
Normal file
@@ -0,0 +1,117 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
import os
|
||||
from collections import Mapping
|
||||
|
||||
from .log import Log
|
||||
|
||||
from .util import load_json_if_exists
|
||||
from .util import load_text_if_exists
|
||||
from .util import load_trajectory_tum_if_exists
|
||||
|
||||
|
||||
class Run:
|
||||
"""Loads files from a single run of an experiment from a folder (config, status, output, log, ...)
|
||||
|
||||
A single run is one invocation of odometry with a specific config on a specific sequence.
|
||||
This is meant to be used on directories created with the 'generate-batch-configs' and 'run-all-in' scripts.
|
||||
It's best-effort, loading as many of the files as are present.
|
||||
"""
|
||||
|
||||
def __init__(self, dirpath, seq_name_mapping):
|
||||
self.dirpath = dirpath
|
||||
|
||||
self.config = load_json_if_exists(os.path.join(dirpath, 'basalt_config.json'))
|
||||
self.output = load_text_if_exists(os.path.join(dirpath, 'output.log'))
|
||||
self.status = load_text_if_exists(os.path.join(dirpath, 'status.log'))
|
||||
self.traj_est = load_trajectory_tum_if_exists(os.path.join(dirpath, 'trajectory.txt'))
|
||||
self.traj_gt = load_trajectory_tum_if_exists(os.path.join(dirpath, 'groundtruth.txt'))
|
||||
|
||||
self.log = Log.load(dirpath)
|
||||
|
||||
self.seq_name = self._infer_sequence_name(self.config, dirpath, seq_name_mapping)
|
||||
|
||||
print("loaded {} from '{}'".format(self.seq_name, dirpath))
|
||||
|
||||
def is_imu(self):
|
||||
return self.config.batch_run.args["use-imu"] in [1, "1", True, "true"]
|
||||
|
||||
def is_failed(self):
|
||||
if self.log is None:
|
||||
return True
|
||||
else:
|
||||
return "Completed" not in self.status
|
||||
|
||||
def failure_str(self):
|
||||
if not self.is_failed():
|
||||
return ""
|
||||
if self.output:
|
||||
if "Some of your processes may have been killed by the cgroup out-of-memory handler" in self.output:
|
||||
return "OOM"
|
||||
if "DUE TO TIME LIMIT" in self.output:
|
||||
return "OOT"
|
||||
return "x"
|
||||
|
||||
@staticmethod
|
||||
def _infer_sequence_name(config, dirpath, name_mapping):
|
||||
"""Tries to infer the sequence name from the config, or falls back to the parent folder name"""
|
||||
seq_name = ""
|
||||
try:
|
||||
type = config.batch_run.args["dataset-type"]
|
||||
path = config.batch_run.args["dataset-path"]
|
||||
seq_name = os.path.basename(path)
|
||||
if type == "euroc":
|
||||
if seq_name.startswith("dataset-"):
|
||||
# assume tumvi
|
||||
seq_name = seq_name.replace("dataset-", "tumvi-").split("_")[0]
|
||||
else:
|
||||
# assume euroc
|
||||
s = seq_name.split("_")
|
||||
seq_name = "euroc{}{}".format(s[0], s[1])
|
||||
elif type == "kitti":
|
||||
# assume kitti
|
||||
seq_name = "kitti{}".format(seq_name)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Fallback to detecting the sequence name base on the last component of the parent folder. This is intended
|
||||
# to work for run folders created with the 'generate-batch-configs' script, assuming the sequence is the
|
||||
# last component in '_batch.combinations'.
|
||||
if seq_name == "":
|
||||
seq_name = os.path.basename(dirpath).split("_")[-1]
|
||||
|
||||
# optionally remap the sequence name to something else as defined in the experiments config
|
||||
if isinstance(name_mapping, Mapping) and seq_name in name_mapping:
|
||||
seq_name = name_mapping[seq_name]
|
||||
|
||||
return seq_name
|
||||
|
||||
@staticmethod
|
||||
def is_run_dir(dirpath):
|
||||
"""Returns True if the folder may be a run directory, based on the present files
|
||||
|
||||
This is intended to be used for auto-detecting run directories in a file tree.
|
||||
"""
|
||||
|
||||
files = [
|
||||
'status.log',
|
||||
'slurm-output.log',
|
||||
'output.log',
|
||||
'stats_all.ubjson',
|
||||
'stats_all.json',
|
||||
'stats_sums.ubjson',
|
||||
'stats_sums.json',
|
||||
'stats_vio.ubjson',
|
||||
'stats_vio.json',
|
||||
]
|
||||
for f in files:
|
||||
if os.path.isfile(os.path.join(dirpath, f)):
|
||||
return True
|
||||
return False
|
||||
72
python/basalt/util.py
Normal file
72
python/basalt/util.py
Normal file
@@ -0,0 +1,72 @@
|
||||
#
|
||||
# BSD 3-Clause License
|
||||
#
|
||||
# This file is part of the Basalt project.
|
||||
# https://gitlab.com/VladyslavUsenko/basalt.git
|
||||
#
|
||||
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
|
||||
# All rights reserved.
|
||||
#
|
||||
import os
|
||||
import json
|
||||
import platform
|
||||
import subprocess
|
||||
import re
|
||||
import numpy as np
|
||||
|
||||
from munch import munchify
|
||||
|
||||
|
||||
def copy_subdict(d, keys):
|
||||
res = dict()
|
||||
for k in keys:
|
||||
if k in d:
|
||||
res[k] = d[k]
|
||||
return res
|
||||
|
||||
|
||||
def load_json_if_exists(filepath):
|
||||
if os.path.isfile(filepath):
|
||||
with open(filepath, 'r') as f:
|
||||
return munchify(json.load(f))
|
||||
return None
|
||||
|
||||
|
||||
def load_text_if_exists(filepath):
|
||||
if os.path.isfile(filepath):
|
||||
with open(filepath, 'r') as f:
|
||||
return f.read()
|
||||
return None
|
||||
|
||||
|
||||
def load_trajectory_tum(filepath):
|
||||
# first row is header
|
||||
# format for each row is: ts x y z qz qy qz qw
|
||||
traj = np.loadtxt(filepath, delimiter=" ", skiprows=1)
|
||||
# return just translation for now
|
||||
traj = traj[:, 1:4]
|
||||
return traj
|
||||
|
||||
|
||||
def load_trajectory_tum_if_exists(filepath):
|
||||
if os.path.isfile(filepath):
|
||||
return load_trajectory_tum(filepath)
|
||||
return None
|
||||
|
||||
|
||||
def os_open_file(filepath):
|
||||
if platform.system() == 'Darwin':
|
||||
subprocess.call(('open', filepath))
|
||||
elif platform.system() == 'Windows':
|
||||
os.startfile(filepath)
|
||||
else:
|
||||
subprocess.call(('xdg-open', filepath))
|
||||
|
||||
|
||||
# key for 'human' sorting
|
||||
def alphanum(key):
|
||||
|
||||
def convert(text):
|
||||
return float(text) if text.isdigit() else text
|
||||
|
||||
return [convert(c) for c in re.split('([-+]?[0-9]*\.?[0-9]*)', key)]
|
||||
Reference in New Issue
Block a user