text stringlengths 81 112k |
|---|
Resize a program's thumbnail to the desired dimension
def resize_program_image(img_url, img_size=300):
'''
Resize a program's thumbnail to the desired dimension
'''
match = re.match(r'.+/(\d+)x(\d+)/.+', img_url)
if not match:
_LOGGER.warning('Could not compute current image resolution of %... |
Get the current progress of the program in %
def get_current_program_progress(program):
'''
Get the current progress of the program in %
'''
now = datetime.datetime.now()
program_duration = get_program_duration(program)
if not program_duration:
return
progress = now - program.get('s... |
Get a program's duration in seconds
def get_program_duration(program):
'''
Get a program's duration in seconds
'''
program_start = program.get('start_time')
program_end = program.get('end_time')
if not program_start or not program_end:
_LOGGER.error('Could not determine program start an... |
Get the remaining time in seconds of a program that is currently on.
def get_remaining_time(program):
'''
Get the remaining time in seconds of a program that is currently on.
'''
now = datetime.datetime.now()
program_start = program.get('start_time')
program_end = program.get('end_time')
if... |
Extract the summary data from a program's detail page
def extract_program_summary(data):
'''
Extract the summary data from a program's detail page
'''
from bs4 import BeautifulSoup
soup = BeautifulSoup(data, 'html.parser')
try:
return soup.find(
'div', {'class': 'episode-syn... |
Set a program's summary
async def async_set_summary(program):
'''
Set a program's summary
'''
import aiohttp
async with aiohttp.ClientSession() as session:
resp = await session.get(program.get('url'))
text = await resp.text()
summary = extract_program_summary(text)
p... |
Get the program data for a channel
async def async_get_program_guide(channel, no_cache=False, refresh_interval=4):
'''
Get the program data for a channel
'''
chan = await async_determine_channel(channel)
now = datetime.datetime.now()
max_cache_age = datetime.timedelta(hours=refresh_interval)
... |
Get the current program info
async def async_get_current_program(channel, no_cache=False):
'''
Get the current program info
'''
chan = await async_determine_channel(channel)
guide = await async_get_program_guide(chan, no_cache)
if not guide:
_LOGGER.warning('Could not retrieve TV progra... |
Get or create publish
def publish(self, distribution, storage=""):
"""
Get or create publish
"""
try:
return self._publishes[distribution]
except KeyError:
self._publishes[distribution] = Publish(self.client, distribution, timestamp=self.timestamp, storag... |
Add mirror or repo to publish
def add(self, snapshot, distributions, component='main', storage=""):
""" Add mirror or repo to publish """
for dist in distributions:
self.publish(dist, storage=storage).add(snapshot, component) |
Check if publish name matches list of names or regex patterns
def _publish_match(self, publish, names=False, name_only=False):
"""
Check if publish name matches list of names or regex patterns
"""
if names:
for name in names:
if not name_only and isinstance(n... |
fill two dictionnaries : one containing all the packages for every repository
and the second one associating to every component of every publish its repository
def get_repo_information(config, client, fill_repo=False, components=[]):
""" fill two dictionnaries : one containing all the packages for ... |
Compare two publishes
It expects that other publish is same or older than this one
Return tuple (diff, equal) of dict {'component': ['snapshot']}
def compare(self, other, components=[]):
"""
Compare two publishes
It expects that other publish is same or older than this one
... |
Find this publish on remote
def _get_publish(self):
"""
Find this publish on remote
"""
publishes = self._get_publishes(self.client)
for publish in publishes:
if publish['Distribution'] == self.distribution and \
publish['Prefix'].replace("/", "_"... |
Serialize publish in YAML
def save_publish(self, save_path):
"""
Serialize publish in YAML
"""
timestamp = time.strftime("%Y%m%d%H%M%S")
yaml_dict = {}
yaml_dict["publish"] = self.name
yaml_dict["name"] = timestamp
yaml_dict["components"] = []
ya... |
Restore publish from config file
def restore_publish(self, config, components, recreate=False):
"""
Restore publish from config file
"""
if "all" in components:
components = []
try:
self.load()
publish = True
except NoSuchPublish:
... |
Load publish info from remote
def load(self):
"""
Load publish info from remote
"""
publish = self._get_publish()
self.architectures = publish['Architectures']
for source in publish['Sources']:
component = source['Component']
snapshot = source['Na... |
Return package refs for given components
def get_packages(self, component=None, components=[], packages=None):
"""
Return package refs for given components
"""
if component:
components = [component]
package_refs = []
for snapshot in self.publish_snapshots:
... |
Return tuple of architecture, package_name, version, id
def parse_package_ref(self, ref):
"""
Return tuple of architecture, package_name, version, id
"""
if not ref:
return None
parsed = re.match('(.*)\ (.*)\ (.*)\ (.*)', ref)
return parsed.groups() |
Add snapshot of component to publish
def add(self, snapshot, component='main'):
"""
Add snapshot of component to publish
"""
try:
self.components[component].append(snapshot)
except KeyError:
self.components[component] = [snapshot] |
Find snapshot on remote by name or regular expression
def _find_snapshot(self, name):
"""
Find snapshot on remote by name or regular expression
"""
remote_snapshots = self._get_snapshots(self.client)
for remote in reversed(remote_snapshots):
if remote["Name"] == name... |
Get list of source snapshot names of given snapshot
TODO: we have to decide by description at the moment
def _get_source_snapshots(self, snapshot, fallback_self=False):
"""
Get list of source snapshot names of given snapshot
TODO: we have to decide by description at the moment
... |
Create component snapshots by merging other snapshots of same component
def merge_snapshots(self):
"""
Create component snapshots by merging other snapshots of same component
"""
self.publish_snapshots = []
for component, snapshots in self.components.items():
if len(... |
Prints the time func takes to execute.
def timing_decorator(func):
"""Prints the time func takes to execute."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Wrapper for printing execution time.
Parameters
----------
print_time: bool, optional
... |
Saves and/or loads func output (must be picklable).
def save_load_result(func):
"""Saves and/or loads func output (must be picklable)."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Default behavior is no saving and loading. Specify save_name to save
and load.
... |
Saves object with pickle.
Parameters
----------
data: anything picklable
Object to save.
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
overwrite existing: bool, optional
When the save path already contains ... |
Load data with pickle.
Parameters
----------
name: str
Path to save to (includes dir, excludes extension).
extension: str, optional
File extension.
Returns
-------
Contents of file path.
def pickle_load(name, extension='.pkl'):
"""Load data with pickle.
Parameters... |
Bootstrap resamples threads of nested sampling run, returning a new
(resampled) nested sampling run.
Get the individual threads for a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
threads: None or list of numpy arrays, optional
ninit_sep: b... |
Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling p... |
Uses bootstrap resampling to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling p... |
Uses bootstrap resampling to calculate credible intervals on the
distribution of sampling errors (the uncertainty on the calculation)
for a single nested sampling run.
For more details about bootstrap resampling for estimating sampling
errors see 'Sampling errors in nested sampling parameter estimation... |
Uses the 'simulated weights' method to calculate an estimate of the
standard deviation of the distribution of sampling errors (the
uncertainty on the calculation) for a single nested sampling run.
Note that the simulated weights method is not accurate for parameter
estimation calculations.
For mor... |
r"""Estimates varaition of results due to implementation-specific
effects. See 'nestcheck: diagnostic tests for nested sampling calculations'
(Higson et al. 2019) for more details.
Uncertainties on the output are calculated numerically using the fact
that (from central limit theorem) our uncertainties ... |
Helper function for parallelising thread_values_df.
Parameters
----------
ns_run: dict
Nested sampling run dictionary.
estimator_list: list of functions
Returns
-------
vals_array: numpy array
Array of estimator values for each thread.
Has shape (len(estimator_list)... |
Applies statistical_distances to each unique pair of distribution
samples in dist_list.
Parameters
----------
dist_list: list of 1d arrays
earth_mover_dist: bool, optional
Passed to statistical_distances.
energy_dist: bool, optional
Passed to statistical_distances.
Returns
... |
Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not... |
Generate dummy data for a single nested sampling thread.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1... |
Generate dummy data for a nested sampling run.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Pa... |
Generate dummy data for a dynamic nested sampling run.
Loglikelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
... |
Get PyPI long description from the .rst file.
def get_long_description():
"""Get PyPI long description from the .rst file."""
pkg_dir = get_package_dir()
with open(os.path.join(pkg_dir, '.pypi_long_desc.rst')) as readme_file:
long_description = readme_file.read()
return long_description |
Get single-source __version__.
def get_version():
"""Get single-source __version__."""
pkg_dir = get_package_dir()
with open(os.path.join(pkg_dir, 'nestcheck/_version.py')) as ver_file:
string = ver_file.read()
return string.strip().replace('__version__ = ', '').replace('\'', '') |
Plot the allocations of live points as a function of logX for the input
sets of nested sampling runs of the type used in the dynamic nested
sampling paper (Higson et al. 2019).
Plots also include analytically calculated distributions of relative
posterior mass and relative posterior mass remaining.
... |
Plots kde estimates of distributions of samples in each cell of the
input pandas DataFrame.
There is one subplot for each dataframe column, and on each subplot there
is one kde line.
Parameters
----------
df: pandas data frame
Each cell must contain a 1d numpy array of samples.
xli... |
Creates posterior distributions and their bootstrap error functions for
input runs and estimators.
For a more detailed description and some example use cases, see 'nestcheck:
diagnostic tests for nested sampling calculations' (Higson et al. 2019).
Parameters
----------
run_list: dict or list o... |
Creates diagrams of a nested sampling run's evolution as it iterates
towards higher likelihoods, expressed as a function of log X, where X(L) is
the fraction of the prior volume with likelihood greater than some value L.
For a more detailed description and some example use cases, see 'nestcheck:
diagno... |
Helper function for plotting uncertainties on posterior distributions
using bootstrap resamples and the fgivenx module. Used by bs_param_dists
and param_logx_diagram.
Parameters
----------
run: dict
Nested sampling run to plot.
fthetas: list of functions
Quantities to plot. Each... |
Helper function for making fgivenx plots of functions with 2 array
arguments of variable lengths.
def alternate_helper(x, alt_samps, func=None):
"""Helper function for making fgivenx plots of functions with 2 array
arguments of variable lengths."""
alt_samps = alt_samps[~np.isnan(alt_samps)]
arg1 =... |
Gaussian kde with weighted samples (1d only). Uses Scott bandwidth
factor.
When all the sample weights are equal, this is equivalent to
kde = scipy.stats.gaussian_kde(theta)
return kde(x)
When the weights are not all equal, we compute the effective number
of samples as the information content... |
Calculate the relative posterior mass for some array of logx values
given the likelihood, prior and number of dimensions.
The posterior mass at each logX value is proportional to L(X)X, where L(X)
is the likelihood.
The weight is returned normalized so that the integral of the weight with
respect to... |
Helper function for plot_run_nlive.
Try returning the average of dict_in[key] and, if this does not work or if
key is None, return average of whole dict.
Parameters
----------
dict_in: dict
Values should be arrays.
key: str
Returns
-------
average: float
def average_by_ke... |
Process output from many nested sampling runs in parallel with optional
error handling and caching.
The result can be cached using the 'save_name', 'save' and 'load' kwargs
(by default this is not done). See save_load_result docstring for more
details.
Remaining kwargs passed to parallel_utils.par... |
Wrapper which applies process_func and handles some common errors so one
bad run does not spoil the whole batch.
Useful errors to handle include:
OSError: if you are not sure if all the files exist
AssertionError: if some of the many assertions fail for known reasons;
for example is there are occa... |
Loads data from a PolyChord run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies PolyChord version v1.13 or later and the setting
write_de... |
Loads data from a MultiNest run into the nestcheck dictionary format for
analysis.
N.B. producing required output file containing information about the
iso-likelihood contours within which points were sampled (where they were
"born") requies MultiNest version 3.11 or later.
Parameters
--------... |
Transforms results from a dynesty run into the nestcheck dictionary
format for analysis. This function has been tested with dynesty v9.2.0.
Note that the nestcheck point weights and evidence will not be exactly
the same as the dynesty ones as nestcheck calculates logX volumes more
precisely (using the ... |
Reads a PolyChord <root>.stats output file and returns the information
contained in a dictionary.
Parameters
----------
file_root: str
Root for run output file names (PolyChord file_root setting).
base_dir: str
Directory containing data (PolyChord base_dir setting).
Returns
... |
Convert an array of nested sampling dead and live points of the type
produced by PolyChord and MultiNest into a nestcheck nested sampling run
dictionary.
Parameters
----------
samples: 2d numpy array
Array of dead points and any remaining live points at termination.
Has #parameters ... |
Maps the iso-likelihood contours on which points were born to the
index of the dead point on this contour.
MultiNest and PolyChord use different values to identify the inital live
points which were sampled from the whole prior (PolyChord uses -1e+30
and MultiNest -0.179769313486231571E+309). However in... |
Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order.
def sample_less_than_condition(choices_in, condition):
"""Creates a random sample... |
Divides a nested sampling run into threads, using info on the indexes
at which points were sampled. See "Sampling errors in nested sampling
parameter estimation" (Higson et al. 2018) for more information.
Parameters
----------
birth_inds: 1d numpy array
Indexes of the iso-likelihood contour... |
Apply function to iterable with parallel map, and hence returns
results in order. functools.partial is used to freeze func_pre_args and
func_kwargs, meaning that the iterable argument must be the last positional
argument.
Roughly equivalent to
>>> [func(*func_pre_args, x, **func_kwargs) for x in a... |
Apply function to iterable with parallelisation and a tqdm progress bar.
Roughly equivalent to
>>> [func(*func_pre_args, x, *func_args, **func_kwargs) for x in
arg_iterable]
but will **not** necessarily return results in input order.
Parameters
----------
func: function
Func... |
If running in a jupyter notebook, then returns tqdm_notebook.
Otherwise returns a regular tqdm progress bar.
Returns
-------
progress: function
def select_tqdm():
"""If running in a jupyter notebook, then returns tqdm_notebook.
Otherwise returns a regular tqdm progress bar.
Returns
--... |
Make a panda data frame of the mean and std devs of an array of results,
including the uncertainties on the values.
This function converts the array to a DataFrame and calls summary_df on it.
Parameters
----------
results_array: 2d numpy array
names: list of str
Names for the output df... |
Make a panda data frame of the mean and std devs of each element of a
list of 1d arrays, including the uncertainties on the values.
This just converts the array to a DataFrame and calls summary_df on it.
Parameters
----------
results_list: list of 1d numpy arrays
Must have same length as n... |
Apply summary_df to a multiindex while preserving some levels.
Parameters
----------
multi_in: multiindex pandas DataFrame
inds_to_keep: None or list of strs, optional
Index levels to preserve.
kwargs: dict, optional
Keyword arguments to pass to summary_df.
Returns
-------
... |
Make a panda data frame of the mean and std devs of an array of results,
including the uncertainties on the values.
This is similar to pandas.DataFrame.describe but also includes estimates of
the numerical uncertainties.
The output DataFrame has multiindex levels:
'calculation type': mean and st... |
r"""Calculated data frame showing
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
The standard method on which to base the gain is assumed to be the... |
Transform efficiency gain data frames output by nestcheck into the
format shown in the dynamic nested sampling paper (Higson et al. 2019).
Parameters
----------
eff_gain_df: pandas DataFrame
DataFrame of the from produced by efficiency_gain_df.
Returns
-------
paper_df: pandas Data... |
r"""Calculates efficiency gain for a new method compared to a base method.
Given the variation in repeated calculations' results using the two
methods, the efficiency gain is:
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
Th... |
r"""Calculate the root meet squared error and its numerical uncertainty.
With a reasonably large number of values in values_list the uncertainty
on sq_errors should be approximately normal (from the central limit
theorem).
Uncertainties are calculated via error propagation: if :math:`\sigma`
is the... |
r"""Gives error on the ratio of 2 floats or 2 1-dimensional arrays given
their values and uncertainties. This assumes the covariance = 0, and that
the input uncertainties are small compared to the corresponding input
values. _n and _d denote the numerator and denominator respectively.
Parameters
--... |
Calculates values of list of quantities (such as the Bayesian evidence
or mean of parameters) for a single nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
estimator_list: list of functions for ... |
Converts information on samples in a nested sampling run dictionary into
a numpy array representation. This allows fast addition of more samples and
recalculation of nlive.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
det... |
Converts an array of information about samples back into a nested sampling
run dictionary (see data_processing module docstring for more details).
N.B. the output dict only contains the following keys: 'logl',
'thread_label', 'nlive_array', 'theta'. Any other keys giving additional
information about th... |
Get the individual threads from a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
threads: list of numpy array
Each thread (list element) is a samples array contain... |
Combine a list of complete nested sampling run dictionaries into a single
ns run.
Input runs must contain any repeated threads.
Parameters
----------
run_list_in: list of dicts
List of nested sampling runs in dict format (see data_processing module
docstring for more details).
... |
Combine list of threads into a single ns run.
This is different to combining runs as repeated threads are allowed, and as
some threads can start from log-likelihood contours on which no dead
point in the run is present.
Note that if all the thread labels are not unique and in ascending order,
the o... |
r"""Calculates the log posterior weights of the samples (using logarithms
to avoid overflow errors with very large or small values).
Uses the trapezium rule such that the weight of point i is
.. math:: w_i = \mathcal{L}_i (X_{i-1} - X_{i+1}) / 2
Parameters
----------
ns_run: dict
Nest... |
Get the relative posterior weights of the samples, normalised so
the maximum sample weight is 1. This is calculated from get_logw with
protection against numerical overflows.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
d... |
r"""Returns a logx vector showing the expected or simulated logx positions
of points.
The shrinkage factor between two points
.. math:: t_i = X_{i-1} / X_{i}
is distributed as the largest of :math:`n_i` uniform random variables
between 1 and 0, where :math:`n_i` is the local number of live points... |
r"""Numerically stable method for avoiding overflow errors when calculating
:math:`\log (a-b)`, given :math:`\log (a)`, :math:`\log (a)` and that
:math:`a > b`.
See https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
for more details.
Parameters
----------
loga: float
... |
Checks a nestcheck format nested sampling run dictionary has the
expected properties (see the data_processing module docstring for more
details).
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
See check_ns_run_logls docstring.
dup_wa... |
Check nested sampling run member keys and values.
Parameters
----------
run: dict
nested sampling run to check.
Raises
------
AssertionError
if run does not have expected properties.
def check_ns_run_members(run):
"""Check nested sampling run member keys and values.
P... |
Check run logls are unique and in the correct order.
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
Whether to raise and AssertionError if there are duplicate logl values.
dup_warn: bool, optional
Whether to give a UserWarning if the... |
Check thread labels and thread_min_max have expected properties.
Parameters
----------
run: dict
Nested sampling run to check.
Raises
------
AssertionError
If run does not have expected properties.
def check_ns_run_threads(run):
"""Check thread labels and thread_min_max ha... |
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
R... |
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
... |
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ... |
Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Pa... |
One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
s... |
Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
s... |
Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
... |
One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samp... |
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
def get_latex_name(func_in, **kwarg... |
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d... |
Writes PolyChord output files corresponding to the input nested sampling
run. The file root is
.. code-block:: python
root = os.path.join(run['output']['base_dir'],
run['output']['file_root'])
Output files which can be made with this function (see the PolyChord
doc... |
Converts input run into an array of the format of a PolyChord
<root>_dead-birth.txt file. Note that this in fact includes live points
remaining at termination as well as dead points.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
... |
Writes a dummy PolyChord format .stats file for tests functions for
processing stats files. This is written to:
base_dir/file_root.stats
Also returns the data in the file as a dict for comparison.
Parameters
----------
run_output_dict: dict
Output information to write to .stats file. ... |
Gets a data frame with calculation values and error diagnostics for each
run in the input run list.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_lis... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.