text stringlengths 81 112k |
|---|
Array of IoU for each (non ignored) class
def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
... |
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
def lovasz_hinge(logits, labels, per_image=True, ... |
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each... |
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is ... |
Binary Cross entropy loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
ignore: void class id
def binary_xloss(logits, labels, ignore=None):
"""
Binary Cross entropy loss
logits: [B, H, W] Vari... |
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1)
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
only_present: average only on classes present in ground truth
per_image: compute the loss per image instead ... |
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
only_present: average only on classes present in ground truth
def lovasz_softmax_flat(probas, labels, only_present=False):
... |
Flattens predictions in the batch
def flatten_probas(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
ret... |
Cross entropy loss
def xloss(logits, labels, ignore=None):
"""
Cross entropy loss
"""
return F.cross_entropy(logits, Variable(labels), ignore_index=255) |
nanmean compatible with generators.
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(np.isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
ra... |
main loop logic for trial keeper
def main_loop(args):
'''main loop logic for trial keeper'''
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
stdout_file = open(STDOUT_FULL_PATH, 'a+')
stderr_file = open(STDERR_FULL_PATH, 'a+')
trial_keeper_syslogger = RemoteLogger(args.nniman... |
Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C/g,H,W).permute(0,2,1,3,4).contiguous().view(N,C,H,W) |
return embedding for a specific file by given file path.
def load_embedding(path):
'''
return embedding for a specific file by given file path.
'''
EMBEDDING_DIM = 300
embedding_dict = {}
with open(path, 'r', encoding='utf-8') as file:
pairs = [line.strip('\r\n').split() for line in fil... |
Generate json by prediction.
def generate_predict_json(position1_result, position2_result, ids, passage_tokens):
'''
Generate json by prediction.
'''
predict_len = len(position1_result)
logger.debug('total prediction num is %s', str(predict_len))
answers = {}
for i in range(predict_len):
... |
Generate data
def generate_data(path, tokenizer, char_vcb, word_vcb, is_training=False):
'''
Generate data
'''
global root_path
qp_pairs = data.load_from_file(path=path, is_training=is_training)
tokenized_sent = 0
# qp_pairs = qp_pairs[:1000]1
for qp_pair in qp_pairs:
tokenized... |
Calculate the f1 score.
def f1_score(prediction, ground_truth):
'''
Calculate the f1 score.
'''
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same =... |
Evaluate function.
def _evaluate(dataset, predictions):
'''
Evaluate function.
'''
f1_result = exact_match = total = 0
count = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa_pair in paragraph['qas']:
total += 1
if qa_... |
Evaluate.
def evaluate(data_file, pred_file):
'''
Evaluate.
'''
expected_version = '1.1'
with open(data_file) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json['version'] != expected_version:
print('Evaluation expects v-' + expected_version +
... |
Evalutate with predictions/
def evaluate_with_predictions(data_file, predictions):
'''
Evalutate with predictions/
'''
expected_version = '1.1'
with open(data_file) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json['version'] != expected_version:
pr... |
Send command to Training Service.
command: CommandType object.
data: string payload.
def send(command, data):
"""Send command to Training Service.
command: CommandType object.
data: string payload.
"""
global _lock
try:
_lock.acquire()
data = data.encode('utf8')
... |
Receive a command from Training Service.
Returns a tuple of command (CommandType) and payload (str)
def receive():
"""Receive a command from Training Service.
Returns a tuple of command (CommandType) and payload (str)
"""
header = _in_file.read(8)
logging.getLogger(__name__).debug('Received com... |
Change json to search space in hyperopt.
Parameters
----------
in_x : dict/list/str/int/float
The part of json.
name : str
name could be ROOT, TYPE, VALUE or INDEX.
def json2space(in_x, name=ROOT):
"""
Change json to search space in hyperopt.
Parameters
----------
... |
Change json to parameters.
def json2parameter(in_x, parameter, name=ROOT):
"""
Change json to parameters.
"""
out_y = copy.deepcopy(in_x)
if isinstance(in_x, dict):
if TYPE in in_x.keys():
_type = in_x[TYPE]
name = name + '-' + _type
if _type == 'choice':... |
change parameters in NNI format to parameters in hyperopt format(This function also support nested dict.).
For example, receive parameters like:
{'dropout_rate': 0.8, 'conv_size': 3, 'hidden_size': 512}
Will change to format in hyperopt, like:
{'dropout_rate': 0.8, 'conv_size': {'_index': 1, '_v... |
Delete index infromation from params
def _split_index(params):
"""
Delete index infromation from params
"""
if isinstance(params, list):
return [params[0], _split_index(params[1])]
elif isinstance(params, dict):
if INDEX in params.keys():
return _split_index(params[VALUE... |
Parameters
----------
algorithm_name : str
algorithm_name includes "tpe", "random_search" and anneal"
def _choose_tuner(self, algorithm_name):
"""
Parameters
----------
algorithm_name : str
algorithm_name includes "tpe", "random_search" and anneal... |
Update search space definition in tuner by search_space in parameters.
Will called when first setup experiemnt or update search space in WebUI.
Parameters
----------
search_space : dict
def update_search_space(self, search_space):
"""
Update search space definition in ... |
Returns a set of trial (hyper-)parameters, as a serializable object.
Parameters
----------
parameter_id : int
Returns
-------
params : dict
def generate_parameters(self, parameter_id):
"""
Returns a set of trial (hyper-)parameters, as a serializable obj... |
Record an observation of the objective function
Parameters
----------
parameter_id : int
parameters : dict
value : dict/float
if value is dict, it should have "default" key.
value is final metrics of the trial.
def receive_trial_result(self, parameter_id... |
Unpack the idxs-vals format into the list of dictionaries that is
`misc`.
Parameters
----------
idxs_map : dict
idxs_map is a dictionary of id->id mappings so that the misc['idxs'] can
contain different numbers than the idxs argument.
def miscs_update_idxs_vals(self... |
get suggestion from hyperopt
Parameters
----------
random_search : bool
flag to indicate random search or not (default: {False})
Returns
----------
total_params : dict
parameter suggestion
def get_suggestion(self, random_search=False):
"... |
Import additional data for tuning
Parameters
----------
data:
a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'
def import_data(self, data):
"""Import additional data for tuning
Parameters
----------
data:
... |
"Lowest Mu" acquisition function
def next_hyperparameter_lowest_mu(fun_prediction,
fun_prediction_args,
x_bounds, x_types,
minimize_starting_points,
minimize_constraints_fun=None):
... |
Calculate the lowest mu
def _lowest_mu(x, fun_prediction, fun_prediction_args,
x_bounds, x_types, minimize_constraints_fun):
'''
Calculate the lowest mu
'''
# This is only for step-wise optimization
x = lib_data.match_val_type(x, x_bounds, x_types)
mu = sys.maxsize
if (minim... |
Build char embedding network for the QA model.
def build_char_states(self, char_embed, is_training, reuse, char_ids, char_lengths):
"""Build char embedding network for the QA model."""
max_char_length = self.cfg.max_char_length
inputs = dropout(tf.nn.embedding_lookup(char_embed, char_ids),
... |
data: a dict received from nni_manager, which contains:
- 'parameter_id': id of the trial
- 'value': metric value reported by nni.report_final_result()
- 'type': report type, support {'FINAL', 'PERIODICAL'}
def handle_report_metric_data(self, data):
"""
data: a... |
data: it has three keys: trial_job_id, event, hyper_params
- trial_job_id: the id generated by training service
- event: the job's state
- hyper_params: the hyperparameters generated and returned by tuner
def handle_trial_end(self, data):
"""
data: it has three ke... |
Call tuner to process final results
def _handle_final_metric_data(self, data):
"""Call tuner to process final results
"""
id_ = data['parameter_id']
value = data['value']
if id_ in _customized_parameter_ids:
self.tuner.receive_customized_trial_result(id_, _trial_para... |
Call assessor to process intermediate results
def _handle_intermediate_metric_data(self, data):
"""Call assessor to process intermediate results
"""
if data['type'] != 'PERIODICAL':
return
if self.assessor is None:
return
trial_job_id = data['trial_job_i... |
Send last intermediate result as final result to tuner in case the
trial is early stopped.
def _earlystop_notify_tuner(self, data):
"""Send last intermediate result as final result to tuner in case the
trial is early stopped.
"""
_logger.debug('Early stop notify tuner data: [%s]... |
parse reveive msgs to global variable
def parse_rev_args(receive_msg):
""" parse reveive msgs to global variable
"""
global trainloader
global testloader
global net
# Loading Data
logger.debug("Preparing data..")
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
y_t... |
train and eval the model
def train_eval():
""" train and eval the model
"""
global trainloader
global testloader
global net
(x_train, y_train) = trainloader
(x_test, y_test) = testloader
# train procedure
net.fit(
x=x_train,
y=y_train,
batch_size=args.batc... |
Run on end of each epoch
def on_epoch_end(self, epoch, logs=None):
"""
Run on end of each epoch
"""
if logs is None:
logs = dict()
logger.debug(logs)
nni.report_intermediate_result(logs["val_acc"]) |
Create a full id for a specific bracket's hyperparameter configuration
Parameters
----------
brackets_id: int
brackets id
brackets_curr_decay:
brackets curr decay
increased_id: int
increased id
Returns
-------
int
params id
def create_bracket_parame... |
Randomly generate values for hyperparameters from hyperparameter space i.e., x.
Parameters
----------
ss_spec:
hyperparameter space
random_state:
random operator to generate random values
Returns
-------
Parameter:
Parameters in this experiment
def json2paramat... |
return the values of n and r for the next round
def get_n_r(self):
"""return the values of n and r for the next round"""
return math.floor(self.n / self.eta**self.i + _epsilon), math.floor(self.r * self.eta**self.i + _epsilon) |
i means the ith round. Increase i by 1
def increase_i(self):
"""i means the ith round. Increase i by 1"""
self.i += 1
if self.i > self.bracket_id:
self.no_more_trial = True |
update trial's latest result with its sequence number, e.g., epoch number or batch number
Parameters
----------
i: int
the ith round
parameter_id: int
the id of the trial/parameter
seq: int
sequence number, e.g., epoch number or batch ... |
If the trial is finished and the corresponding round (i.e., i) has all its trials finished,
it will choose the top k trials for the next round (i.e., i+1)
Parameters
----------
i: int
the ith round
def inform_trial_end(self, i):
"""If the trial is finished and the c... |
Randomly generate num hyperparameter configurations from search space
Parameters
----------
num: int
the number of hyperparameter configurations
Returns
-------
list
a list of hyperparameter configurations. Format: [[key1, value1], [key2,... |
after generating one round of hyperconfigs, this function records the generated hyperconfigs,
creates a dict to record the performance when those hyperconifgs are running, set the number of finished configs
in this round to be 0, and increase the round number.
Parameters
----------
... |
get one trial job, i.e., one hyperparameter configuration.
def _request_one_trial_job(self):
"""get one trial job, i.e., one hyperparameter configuration."""
if not self.generated_hyper_configs:
if self.curr_s < 0:
self.curr_s = self.s_max
_logger.debug('create a... |
data: JSON object, which is search space
Parameters
----------
data: int
number of trial jobs
def handle_update_search_space(self, data):
"""data: JSON object, which is search space
Parameters
----------
data: int
number ... |
Parameters
----------
data: dict()
it has three keys: trial_job_id, event, hyper_params
trial_job_id: the id generated by training service
event: the job's state
hyper_params: the hyperparameters (a string) generated and returned by tuner
def handle_trial... |
Parameters
----------
data:
it is an object which has keys 'parameter_id', 'value', 'trial_job_id', 'type', 'sequence'.
Raises
------
ValueError
Data type not supported
def handle_report_metric_data(self, data):
"""
Parameters
... |
Returns a set of trial graph config, as a serializable object.
parameter_id : int
def generate_parameters(self, parameter_id):
"""Returns a set of trial graph config, as a serializable object.
parameter_id : int
"""
if len(self.population) <= 0:
logger.debug("the len... |
Record an observation of the objective function
parameter_id : int
parameters : dict of parameters
value: final metrics of the trial, including reward
def receive_trial_result(self, parameter_id, parameters, value):
'''
Record an observation of the objective function
par... |
Generates a CNN.
Args:
model_len: An integer. Number of convolutional layers.
model_width: An integer. Number of filters for the convolutional layers.
Returns:
An instance of the class Graph. Represents the neural architecture graph of the generated model.
def genera... |
Generates a Multi-Layer Perceptron.
Args:
model_len: An integer. Number of hidden layers.
model_width: An integer or a list of integers of length `model_len`. If it is a list, it represents the
number of nodes in each hidden layer. If it is an integer, all hidden layers h... |
Generate search space from Python source code.
Return a serializable search space object.
code_dir: directory path of source files (str)
def generate_search_space(code_dir):
"""Generate search space from Python source code.
Return a serializable search space object.
code_dir: directory path of sour... |
Expand annotations in user code.
Return dst_dir if annotation detected; return src_dir if not.
src_dir: directory path of user code (str)
dst_dir: directory to place generated files (str)
def expand_annotations(src_dir, dst_dir):
"""Expand annotations in user code.
Return dst_dir if annotation dete... |
Generate send stdout url
def gen_send_stdout_url(ip, port):
'''Generate send stdout url'''
return '{0}:{1}{2}{3}/{4}/{5}'.format(BASE_URL.format(ip), port, API_ROOT_URL, STDOUT_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID) |
Generate send error url
def gen_send_version_url(ip, port):
'''Generate send error url'''
return '{0}:{1}{2}{3}/{4}/{5}'.format(BASE_URL.format(ip), port, API_ROOT_URL, VERSION_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID) |
validate if a digit is valid
def validate_digit(value, start, end):
'''validate if a digit is valid'''
if not str(value).isdigit() or int(value) < start or int(value) > end:
raise ValueError('%s must be a digit from %s to %s' % (value, start, end)) |
validate if the dispatcher of the experiment supports importing data
def validate_dispatcher(args):
'''validate if the dispatcher of the experiment supports importing data'''
nni_config = Config(get_config_filename(args)).get_config('experimentConfig')
if nni_config.get('tuner') and nni_config['tuner'].get... |
load search space content
def load_search_space(path):
'''load search space content'''
content = json.dumps(get_json_content(path))
if not content:
raise ValueError('searchSpace file should not be empty')
return content |
call restful server to update experiment profile
def update_experiment_profile(args, key, value):
'''call restful server to update experiment profile'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
running, _ = check_rest_server_quick(rest_port)
... |
import additional data to the experiment
def import_data(args):
'''import additional data to the experiment'''
validate_file(args.filename)
validate_dispatcher(args)
content = load_search_space(args.filename)
args.port = get_experiment_port(args)
if args.port is not None:
if import_data... |
call restful server to import data to the experiment
def import_data_to_restful_server(args, content):
'''call restful server to import data to the experiment'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
running, _ = check_rest_server_quick(rest_... |
check key type
def setType(key, type):
'''check key type'''
return And(type, error=SCHEMA_TYPE_ERROR % (key, type.__name__)) |
check choice
def setChoice(key, *args):
'''check choice'''
return And(lambda n: n in args, error=SCHEMA_RANGE_ERROR % (key, str(args))) |
check number range
def setNumberRange(key, keyType, start, end):
'''check number range'''
return And(
And(keyType, error=SCHEMA_TYPE_ERROR % (key, keyType.__name__)),
And(lambda n: start <= n <= end, error=SCHEMA_RANGE_ERROR % (key, '(%s,%s)' % (start, end))),
) |
keras dropout layer.
def keras_dropout(layer, rate):
'''keras dropout layer.
'''
from keras import layers
input_dim = len(layer.input.shape)
if input_dim == 2:
return layers.SpatialDropout1D(rate)
elif input_dim == 3:
return layers.SpatialDropout2D(rate)
elif input_dim == ... |
real keras layer.
def to_real_keras_layer(layer):
''' real keras layer.
'''
from keras import layers
if is_layer(layer, "Dense"):
return layers.Dense(layer.units, input_shape=(layer.input_units,))
if is_layer(layer, "Conv"):
return layers.Conv2D(
layer.filters,
... |
judge the layer type.
Returns:
boolean -- True or False
def is_layer(layer, layer_type):
'''judge the layer type.
Returns:
boolean -- True or False
'''
if layer_type == "Input":
return isinstance(layer, StubInput)
elif layer_type == "Conv":
return isinstance(lay... |
get layer description.
def layer_description_extractor(layer, node_to_id):
'''get layer description.
'''
layer_input = layer.input
layer_output = layer.output
if layer_input is not None:
if isinstance(layer_input, Iterable):
layer_input = list(map(lambda x: node_to_id[x], layer... |
build layer from description.
def layer_description_builder(layer_information, id_to_node):
'''build layer from description.
'''
# pylint: disable=W0123
layer_type = layer_information[0]
layer_input_ids = layer_information[1]
if isinstance(layer_input_ids, Iterable):
layer_input = list... |
get layer width.
def layer_width(layer):
'''get layer width.
'''
if is_layer(layer, "Dense"):
return layer.units
if is_layer(layer, "Conv"):
return layer.filters
raise TypeError("The layer should be either Dense or Conv layer.") |
Define parameters.
def define_params(self):
'''
Define parameters.
'''
input_dim = self.input_dim
hidden_dim = self.hidden_dim
prefix = self.name
self.w_matrix = tf.Variable(tf.random_normal([input_dim, 3 * hidden_dim], stddev=0.1),
... |
Build the GRU cell.
def build(self, x, h, mask=None):
'''
Build the GRU cell.
'''
xw = tf.split(tf.matmul(x, self.w_matrix) + self.bias, 3, 1)
hu = tf.split(tf.matmul(h, self.U), 3, 1)
r = tf.sigmoid(xw[0] + hu[0])
z = tf.sigmoid(xw[1] + hu[1])
h1 = tf.ta... |
Build GRU sequence.
def build_sequence(self, xs, masks, init, is_left_to_right):
'''
Build GRU sequence.
'''
states = []
last = init
if is_left_to_right:
for i, xs_i in enumerate(xs):
h = self.build(xs_i, last, masks[i])
states... |
conv2d returns a 2d convolution layer with full stride.
def conv2d(x_input, w_matrix):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME') |
max_pool downsamples a feature map by 2X.
def max_pool(x_input, pool_size):
"""max_pool downsamples a feature map by 2X."""
return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME') |
Main function, build mnist network, run and send result to NNI.
def main(params):
'''
Main function, build mnist network, run and send result to NNI.
'''
# Import data
mnist = download_mnist_retry(params['data_dir'])
print('Mnist download data done.')
logger.debug('Mnist download data done.... |
Build the whole neural network for the QA model.
def build_net(self, is_training):
"""Build the whole neural network for the QA model."""
cfg = self.cfg
with tf.device('/cpu:0'):
word_embed = tf.get_variable(
name='word_embed', initializer=self.embed, dtype=tf.float3... |
call check_output command to read content from a file
def check_output_command(file_path, head=None, tail=None):
'''call check_output command to read content from a file'''
if os.path.exists(file_path):
if sys.platform == 'win32':
cmds = ['powershell.exe', 'type', file_path]
if ... |
kill command
def kill_command(pid):
'''kill command'''
if sys.platform == 'win32':
process = psutil.Process(pid=pid)
process.send_signal(signal.CTRL_BREAK_EVENT)
else:
cmds = ['kill', str(pid)]
call(cmds) |
install python package from pip
def install_package_command(package_name):
'''install python package from pip'''
#TODO refactor python logic
if sys.platform == "win32":
cmds = 'python -m pip install --user {0}'.format(package_name)
else:
cmds = 'python3 -m pip install --user {0}'.format... |
install requirements.txt
def install_requirements_command(requirements_path):
'''install requirements.txt'''
cmds = 'cd ' + requirements_path + ' && {0} -m pip install --user -r requirements.txt'
#TODO refactor python logic
if sys.platform == "win32":
cmds = cmds.format('python')
else:
... |
Get parameters from command line
def get_params():
''' Get parameters from command line '''
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default='/tmp/tensorflow/mnist/input_data', help="data directory")
parser.add_argument("--dropout_rate", type=float, default=0.5, he... |
Building network for mnist
def build_network(self):
'''
Building network for mnist
'''
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for ... |
get the startTime and endTime of an experiment
def get_experiment_time(port):
'''get the startTime and endTime of an experiment'''
response = rest_get(experiment_url(port), REST_TIME_OUT)
if response and check_response(response):
content = convert_time_stamp_to_date(json.loads(response.text))
... |
get the status of an experiment
def get_experiment_status(port):
'''get the status of an experiment'''
result, response = check_rest_server_quick(port)
if result:
return json.loads(response.text).get('status')
return None |
Update the experiment status in config file
def update_experiment():
'''Update the experiment status in config file'''
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
if not experiment_dict:
return None
for key in experiment_dict.keys():
i... |
check if the id is valid
def check_experiment_id(args):
'''check if the id is valid
'''
update_experiment()
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
if not experiment_dict:
print_normal('There is no experiment running...')
retur... |
Parse the arguments for nnictl stop
1.If there is an id specified, return the corresponding id
2.If there is no id specified, and there is an experiment running, return the id, or return Error
3.If the id matches an experiment, nnictl will return the id.
4.If the id ends with *, nnictl will match all id... |
get the file name of config file
def get_config_filename(args):
'''get the file name of config file'''
experiment_id = check_experiment_id(args)
if experiment_id is None:
print_error('Please set the experiment id!')
exit(1)
experiment_config = Experiments()
experiment_dict = experim... |
Convert time stamp to date time format
def convert_time_stamp_to_date(content):
'''Convert time stamp to date time format'''
start_time_stamp = content.get('startTime')
end_time_stamp = content.get('endTime')
if start_time_stamp:
start_time = datetime.datetime.utcfromtimestamp(start_time_stamp ... |
check if restful server is running
def check_rest(args):
'''check if restful server is running'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
running, _ = check_rest_server_quick(rest_port)
if not running:
print_normal('Restful server i... |
Stop the experiment which is running
def stop_experiment(args):
'''Stop the experiment which is running'''
experiment_id_list = parse_ids(args)
if experiment_id_list:
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
for experiment_id in exp... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.