text stringlengths 81 112k |
|---|
List trial
def trial_ls(args):
'''List trial'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
rest_pid = nni_config.get_config('restServerPid')
if not detect_process(rest_pid):
print_error('Experiment is not running...')
return
... |
List trial
def trial_kill(args):
'''List trial'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
rest_pid = nni_config.get_config('restServerPid')
if not detect_process(rest_pid):
print_error('Experiment is not running...')
return
... |
Get experiment information
def list_experiment(args):
'''Get experiment information'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
rest_pid = nni_config.get_config('restServerPid')
if not detect_process(rest_pid):
print_error('Experimen... |
Show the status of experiment
def experiment_status(args):
'''Show the status of experiment'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
result, response = check_rest_server_quick(rest_port)
if not result:
print_normal('Restful server... |
internal function to call get_log_content
def log_internal(args, filetype):
'''internal function to call get_log_content'''
file_name = get_config_filename(args)
if filetype == 'stdout':
file_full_path = os.path.join(NNICTL_HOME_DIR, file_name, 'stdout')
else:
file_full_path = os.path.j... |
get trial log path
def log_trial(args):
''''get trial log path'''
trial_id_path_dict = {}
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
rest_pid = nni_config.get_config('restServerPid')
if not detect_process(rest_pid):
print_error('Ex... |
show the url of web ui
def webui_url(args):
'''show the url of web ui'''
nni_config = Config(get_config_filename(args))
print_normal('{0} {1}'.format('Web UI url:', ' '.join(nni_config.get_config('webuiUrl')))) |
get the information of all experiments
def experiment_list(args):
'''get the information of all experiments'''
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
if not experiment_dict:
print('There is no experiment running...')
exit(1)
updat... |
get the interval of two times
def get_time_interval(time1, time2):
'''get the interval of two times'''
try:
#convert time to timestamp
time1 = time.mktime(time.strptime(time1, '%Y/%m/%d %H:%M:%S'))
time2 = time.mktime(time.strptime(time2, '%Y/%m/%d %H:%M:%S'))
seconds = (datetim... |
show experiment information in monitor
def show_experiment_info():
'''show experiment information in monitor'''
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
if not experiment_dict:
print('There is no experiment running...')
exit(1)
upda... |
monitor the experiment
def monitor_experiment(args):
'''monitor the experiment'''
if args.time <= 0:
print_error('please input a positive integer as time interval, the unit is second.')
exit(1)
while True:
try:
os.system('clear')
update_experiment()
... |
output: List[Dict]
def parse_trial_data(content):
"""output: List[Dict]"""
trial_records = []
for trial_data in content:
for phase_i in range(len(trial_data['hyperParameters'])):
hparam = json.loads(trial_data['hyperParameters'][phase_i])['parameters']
hparam['id'] = trial_d... |
export experiment metadata to csv
def export_trials_data(args):
"""export experiment metadata to csv
"""
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
rest_pid = nni_config.get_config('restServerPid')
if not detect_process(rest_pid):
... |
copy remote directory to local machine
def copy_remote_directory_to_local(sftp, remote_path, local_path):
'''copy remote directory to local machine'''
try:
os.makedirs(local_path, exist_ok=True)
files = sftp.listdir(remote_path)
for file in files:
remote_full_path = os.path.... |
create ssh client
def create_ssh_sftp_client(host_ip, port, username, password):
'''create ssh client'''
try:
check_environment()
import paramiko
conn = paramiko.Transport(host_ip, port)
conn.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_t... |
Change search space from json format to hyperopt format
def json2space(x, oldy=None, name=NodeType.Root.value):
"""Change search space from json format to hyperopt format
"""
y = list()
if isinstance(x, dict):
if NodeType.Type.value in x.keys():
_type = x[NodeType.Type.value]
... |
Json to pramaters.
def json2paramater(x, is_rand, random_state, oldy=None, Rand=False, name=NodeType.Root.value):
"""Json to pramaters.
"""
if isinstance(x, dict):
if NodeType.Type.value in x.keys():
_type = x[NodeType.Type.value]
_value = x[NodeType.Value.value]
... |
Delete index information from params
Parameters
----------
params : dict
Returns
-------
result : dict
def _split_index(params):
"""Delete index information from params
Parameters
----------
params : dict
Returns
-------
result : dict
"""
result = {}
... |
Parameters
----------
config : str
info : str
save_dir : str
def mutation(self, config=None, info=None, save_dir=None):
"""
Parameters
----------
config : str
info : str
save_dir : str
"""
self.result = None
self.co... |
Update search space.
Search_space contains the information that user pre-defined.
Parameters
----------
search_space : dict
def update_search_space(self, search_space):
"""Update search space.
Search_space contains the information that user pre-defined.
Param... |
Returns a dict of trial (hyper-)parameters, as a serializable object.
Parameters
----------
parameter_id : int
Returns
-------
config : dict
def generate_parameters(self, parameter_id):
"""Returns a dict of trial (hyper-)parameters, as a serializable object... |
Record the result from a trial
Parameters
----------
parameters: dict
value : dict/float
if value is dict, it should have "default" key.
value is final metrics of the trial.
def receive_trial_result(self, parameter_id, parameters, value):
'''Record the r... |
Load json file content
Parameters
----------
file_path:
path to the file
Raises
------
TypeError
Error with the file path
def get_json_content(file_path):
"""Load json file content
Parameters
----------
file_path:
path to the file
... |
Generate the Parameter Configuration Space (PCS) which defines the
legal ranges of the parameters to be optimized and their default values.
Generally, the format is:
# parameter_name categorical {value_1, ..., value_N} [default value]
# parameter_name ordinal {value_1, ..., value_N} [default value... |
Generate the scenario. The scenario-object (smac.scenario.scenario.Scenario) is used to configure SMAC and
can be constructed either by providing an actual scenario-object, or by specifing the options in a scenario file.
Reference: https://automl.github.io/SMAC3/stable/options.html
The format of the ... |
Load or create dataset
def load_data(train_path='./data/regression.train', test_path='./data/regression.test'):
'''
Load or create dataset
'''
print('Load data...')
df_train = pd.read_csv(train_path, header=None, sep='\t')
df_test = pd.read_csv(test_path, header=None, sep='\t')
num = len(df... |
The distance between two layers.
def layer_distance(a, b):
"""The distance between two layers."""
# pylint: disable=unidiomatic-typecheck
if type(a) != type(b):
return 1.0
if is_layer(a, "Conv"):
att_diff = [
(a.filters, b.filters),
(a.kernel_size, b.kernel_size)... |
The attribute distance.
def attribute_difference(att_diff):
''' The attribute distance.
'''
ret = 0
for a_value, b_value in att_diff:
if max(a_value, b_value) == 0:
ret += 0
else:
ret += abs(a_value - b_value) * 1.0 / max(a_value, b_value)
return ret * 1.0 /... |
The distance between the layers of two neural networks.
def layers_distance(list_a, list_b):
"""The distance between the layers of two neural networks."""
len_a = len(list_a)
len_b = len(list_b)
f = np.zeros((len_a + 1, len_b + 1))
f[-1][-1] = 0
for i in range(-1, len_a):
f[i][-1] = i +... |
The distance between two skip-connections.
def skip_connection_distance(a, b):
"""The distance between two skip-connections."""
if a[2] != b[2]:
return 1.0
len_a = abs(a[1] - a[0])
len_b = abs(b[1] - b[0])
return (abs(a[0] - b[0]) + abs(len_a - len_b)) / (max(a[0], b[0]) + max(len_a, len_b)... |
The distance between the skip-connections of two neural networks.
def skip_connections_distance(list_a, list_b):
"""The distance between the skip-connections of two neural networks."""
distance_matrix = np.zeros((len(list_a), len(list_b)))
for i, a in enumerate(list_a):
for j, b in enumerate(list_b... |
The distance between two neural networks.
Args:
x: An instance of NetworkDescriptor.
y: An instance of NetworkDescriptor
Returns:
The edit-distance between x and y.
def edit_distance(x, y):
"""The distance between two neural networks.
Args:
x: An instance of NetworkDescr... |
Calculate the edit distance.
Args:
train_x: A list of neural architectures.
train_y: A list of neural architectures.
Returns:
An edit-distance matrix.
def edit_distance_matrix(train_x, train_y=None):
"""Calculate the edit distance.
Args:
train_x: A list of neural archite... |
The Euclidean distance between two vectors.
def vector_distance(a, b):
"""The Euclidean distance between two vectors."""
a = np.array(a)
b = np.array(b)
return np.linalg.norm(a - b) |
Use Bourgain algorithm to embed the neural architectures based on their edit-distance.
Args:
distance_matrix: A matrix of edit-distances.
Returns:
A matrix of distances after embedding.
def bourgain_embedding_matrix(distance_matrix):
"""Use Bourgain algorithm to embed the neural architectur... |
Check if the target descriptor is in the descriptors.
def contain(descriptors, target_descriptor):
"""Check if the target descriptor is in the descriptors."""
for descriptor in descriptors:
if edit_distance(descriptor, target_descriptor) < 1e-5:
return True
return False |
Fit the regressor with more data.
Args:
train_x: A list of NetworkDescriptor.
train_y: A list of metric values.
def fit(self, train_x, train_y):
""" Fit the regressor with more data.
Args:
train_x: A list of NetworkDescriptor.
train_y: A list of m... |
Incrementally fit the regressor.
def incremental_fit(self, train_x, train_y):
""" Incrementally fit the regressor. """
if not self._first_fitted:
raise ValueError("The first_fit function needs to be called first.")
train_x, train_y = np.array(train_x), np.array(train_y)
# ... |
Fit the regressor for the first time.
def first_fit(self, train_x, train_y):
""" Fit the regressor for the first time. """
train_x, train_y = np.array(train_x), np.array(train_y)
self._x = np.copy(train_x)
self._y = np.copy(train_y)
self._distance_matrix = edit_distance_matrix... |
Predict the result.
Args:
train_x: A list of NetworkDescriptor.
Returns:
y_mean: The predicted mean.
y_std: The predicted standard deviation.
def predict(self, train_x):
"""Predict the result.
Args:
train_x: A list of NetworkDescriptor.
... |
Generate new architecture.
Args:
descriptors: All the searched neural architectures.
Returns:
graph: An instance of Graph. A morphed neural network with weights.
father_id: The father node ID in the search tree.
def generate(self, descriptors):
"""Generate ne... |
estimate the value of generated graph
def acq(self, graph):
''' estimate the value of generated graph
'''
mean, std = self.gpr.predict(np.array([graph.extract_descriptor()]))
if self.optimizemode is OptimizeMode.Maximize:
return mean + self.beta * std
return mean - s... |
add child to search tree itself.
Arguments:
u {int} -- father id
v {int} -- child id
def add_child(self, u, v):
''' add child to search tree itself.
Arguments:
u {int} -- father id
v {int} -- child id
'''
if u == -1:
... |
A recursive function to return the content of the tree in a dict.
def get_dict(self, u=None):
""" A recursive function to return the content of the tree in a dict."""
if u is None:
return self.get_dict(self.root)
children = []
for v in self.adj_list[u]:
children.... |
Train a network from a specific graph.
def train_with_graph(p_graph, qp_pairs, dev_qp_pairs):
'''
Train a network from a specific graph.
'''
global sess
with tf.Graph().as_default():
train_model = GAG(cfg, embed, p_graph)
train_model.build_net(is_training=True)
tf.get_variab... |
Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects.
Call 'generate_parameters()' by 'count' times by default.
User code must override either this function or 'generate_parameters()'.
If there's no more trial, user should raise nni.NoMoreTrialError exception in... |
Load graph
def graph_loads(graph_json):
'''
Load graph
'''
layers = []
for layer in graph_json['layers']:
layer_info = Layer(layer['graph_type'], layer['input'], layer['output'], layer['size'], layer['hash_id'])
layer_info.is_delete = layer['is_delete']
_logger.debug('append... |
Calculation of `hash_id` of Layer. Which is determined by the properties of itself, and the `hash_id`s of input layers
def update_hash(self, layers: Iterable):
"""
Calculation of `hash_id` of Layer. Which is determined by the properties of itself, and the `hash_id`s of input layers
"""
... |
update hash id of each layer, in topological order/recursively
hash id will be used in weight sharing
def update_hash(self):
"""
update hash id of each layer, in topological order/recursively
hash id will be used in weight sharing
"""
_logger.debug('update hash')
... |
Initialize root logger.
This will redirect anything from logging.getLogger() as well as stdout to specified file.
logger_file_path: path of logger file (path-like object).
def init_logger(logger_file_path, log_level_name='info'):
"""Initialize root logger.
This will redirect anything from logging.getLo... |
Create simple convolutional model
def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES):
'''
Create simple convolutional model
'''
layers = [
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(64, (3, 3), activation='relu'),... |
Load MNIST dataset
def load_mnist_data(args):
'''
Load MNIST dataset
'''
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test]
y... |
Train model
def train(args, params):
'''
Train model
'''
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
# nni
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callb... |
Run on end of each epoch
def on_epoch_end(self, epoch, logs={}):
'''
Run on end of each epoch
'''
LOG.debug(logs)
nni.report_intermediate_result(logs["val_acc"]) |
get all of config values
def get_all_config(self):
'''get all of config values'''
return json.dumps(self.config, indent=4, sort_keys=True, separators=(',', ':')) |
set {key:value} paris to self.config
def set_config(self, key, value):
'''set {key:value} paris to self.config'''
self.config = self.read_file()
self.config[key] = value
self.write_file() |
save config to local file
def write_file(self):
'''save config to local file'''
if self.config:
try:
with open(self.config_file, 'w') as file:
json.dump(self.config, file)
except IOError as error:
print('Error:', error)
... |
set {key:value} paris to self.experiment
def add_experiment(self, id, port, time, file_name, platform):
'''set {key:value} paris to self.experiment'''
self.experiments[id] = {}
self.experiments[id]['port'] = port
self.experiments[id]['startTime'] = time
self.experiments[id]['end... |
Update experiment
def update_experiment(self, id, key, value):
'''Update experiment'''
if id not in self.experiments:
return False
self.experiments[id][key] = value
self.write_file()
return True |
remove an experiment by id
def remove_experiment(self, id):
'''remove an experiment by id'''
if id in self.experiments:
self.experiments.pop(id)
self.write_file() |
save config to local file
def write_file(self):
'''save config to local file'''
try:
with open(self.experiment_file, 'w') as file:
json.dump(self.experiments, file)
except IOError as error:
print('Error:', error)
return |
load config from local file
def read_file(self):
'''load config from local file'''
if os.path.exists(self.experiment_file):
try:
with open(self.experiment_file, 'r') as file:
return json.load(file)
except ValueError:
return {}
... |
load data from file
def load_from_file(path, fmt=None, is_training=True):
'''
load data from file
'''
if fmt is None:
fmt = 'squad'
assert fmt in ['squad', 'csv'], 'input format must be squad or csv'
qp_pairs = []
if fmt == 'squad':
with open(path) as data_file:
... |
tokenize function.
def tokenize(qp_pair, tokenizer=None, is_training=False):
'''
tokenize function.
'''
question_tokens = tokenizer.tokenize(qp_pair['question'])
passage_tokens = tokenizer.tokenize(qp_pair['passage'])
if is_training:
question_tokens = question_tokens[:300]
passa... |
Build the vocab from corpus.
def collect_vocab(qp_pairs):
'''
Build the vocab from corpus.
'''
vocab = set()
for qp_pair in qp_pairs:
for word in qp_pair['question_tokens']:
vocab.add(word['word'])
for word in qp_pair['passage_tokens']:
vocab.add(word['word']... |
Shuffle the step
def shuffle_step(entries, step):
'''
Shuffle the step
'''
answer = []
for i in range(0, len(entries), step):
sub = entries[i:i+step]
shuffle(sub)
answer += sub
return answer |
Get batches data and shuffle.
def get_batches(qp_pairs, batch_size, need_sort=True):
'''
Get batches data and shuffle.
'''
if need_sort:
qp_pairs = sorted(qp_pairs, key=lambda qp: (
len(qp['passage_tokens']), qp['id']), reverse=True)
batches = [{'qp_pairs': qp_pairs[i:(i + batch... |
Get char input.
def get_char_input(data, char_dict, max_char_length):
'''
Get char input.
'''
batch_size = len(data)
sequence_length = max(len(d) for d in data)
char_id = np.zeros((max_char_length, sequence_length,
batch_size), dtype=np.int32)
char_lengths = np.zeros... |
Get word input.
def get_word_input(data, word_dict, embed, embed_dim):
'''
Get word input.
'''
batch_size = len(data)
max_sequence_length = max(len(d) for d in data)
sequence_length = max_sequence_length
word_input = np.zeros((max_sequence_length, batch_size,
embe... |
Given word return word index.
def get_word_index(tokens, char_index):
'''
Given word return word index.
'''
for (i, token) in enumerate(tokens):
if token['char_end'] == 0:
continue
if token['char_begin'] <= char_index and char_index <= token['char_end']:
return i... |
Get answer's index of begin and end.
def get_answer_begin_end(data):
'''
Get answer's index of begin and end.
'''
begin = []
end = []
for qa_pair in data:
tokens = qa_pair['passage_tokens']
char_begin = qa_pair['answer_begin']
char_end = qa_pair['answer_end']
wor... |
Get bucket by length.
def get_buckets(min_length, max_length, bucket_count):
'''
Get bucket by length.
'''
if bucket_count <= 0:
return [max_length]
unit_length = int((max_length - min_length) // (bucket_count))
buckets = [min_length + unit_length *
(i + 1) for i in range... |
tokenize function in Tokenizer.
def tokenize(self, text):
'''
tokenize function in Tokenizer.
'''
start = -1
tokens = []
for i, character in enumerate(text):
if character == ' ' or character == '\t':
if start >= 0:
word = t... |
generate new id and event hook for new Individual
def generate_new_id(self):
"""
generate new id and event hook for new Individual
"""
self.events.append(Event())
indiv_id = self.indiv_counter
self.indiv_counter += 1
return indiv_id |
initialize populations for evolution tuner
def init_population(self, population_size, graph_max_layer, graph_min_layer):
"""
initialize populations for evolution tuner
"""
population = []
graph = Graph(max_layer_num=graph_max_layer, min_layer_num=graph_min_layer,
... |
Returns a set of trial graph config, as a serializable object.
An example configuration:
```json
{
"shared_id": [
"4a11b2ef9cb7211590dfe81039b27670",
"370af04de24985e5ea5b3d72b12644c9",
"11f646e9f650f5f3fedc12b6349ec60f",
... |
Record an observation of the objective function
parameter_id : int
parameters : dict of parameters
value: final metrics of the trial, including reward
def receive_trial_result(self, parameter_id, parameters, value):
'''
Record an observation of the objective function
par... |
update data
Parameters
----------
trial_job_id: int
trial job id
trial_history: list
The history performance matrix of each trial
def _update_data(self, trial_job_id, trial_history):
"""update data
Parameters
----------
trial_job... |
trial_end
Parameters
----------
trial_job_id: int
trial job id
success: bool
True if succssfully finish the experiment, False otherwise
def trial_end(self, trial_job_id, success):
"""trial_end
Parameters
----------
... |
assess_trial
Parameters
----------
trial_job_id: int
trial job id
trial_history: list
The history performance matrix of each trial
Returns
-------
bool
AssessResult.Good or AssessResult.Bad
Raises
----... |
Copy directory from HDFS to local
def copyHdfsDirectoryToLocal(hdfsDirectory, localDirectory, hdfsClient):
'''Copy directory from HDFS to local'''
if not os.path.exists(localDirectory):
os.makedirs(localDirectory)
try:
listing = hdfsClient.list_status(hdfsDirectory)
except Exception as ... |
Copy file from HDFS to local
def copyHdfsFileToLocal(hdfsFilePath, localFilePath, hdfsClient, override=True):
'''Copy file from HDFS to local'''
if not hdfsClient.exists(hdfsFilePath):
raise Exception('HDFS file {} does not exist!'.format(hdfsFilePath))
try:
file_status = hdfsClient.get_fi... |
Copy directory from local to HDFS
def copyDirectoryToHdfs(localDirectory, hdfsDirectory, hdfsClient):
'''Copy directory from local to HDFS'''
if not os.path.exists(localDirectory):
raise Exception('Local Directory does not exist!')
hdfsClient.mkdirs(hdfsDirectory)
result = True
for file in ... |
Copy a local file to HDFS directory
def copyFileToHdfs(localFilePath, hdfsFilePath, hdfsClient, override=True):
'''Copy a local file to HDFS directory'''
if not os.path.exists(localFilePath):
raise Exception('Local file Path does not exist!')
if os.path.isdir(localFilePath):
raise Exception... |
Load dataset, use boston dataset
def load_data():
'''Load dataset, use boston dataset'''
boston = load_boston()
X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=99, test_size=0.25)
#normalize data
ss_X = StandardScaler()
ss_y = StandardScaler()
X... |
Get model according to parameters
def get_model(PARAMS):
'''Get model according to parameters'''
model_dict = {
'LinearRegression': LinearRegression(),
'SVR': SVR(),
'KNeighborsRegressor': KNeighborsRegressor(),
'DecisionTreeRegressor': DecisionTreeRegressor()
}
if not m... |
Train model and predict result
def run(X_train, X_test, y_train, y_test, PARAMS):
'''Train model and predict result'''
model.fit(X_train, y_train)
predict_y = model.predict(X_test)
score = r2_score(y_test, predict_y)
LOG.debug('r2 score: %s' % score)
nni.report_final_result(score) |
Add a skip-connection to the descriptor.
Args:
u: Number of convolutional layers before the starting point.
v: Number of convolutional layers before the ending point.
connection_type: Must be either CONCAT_CONNECT or ADD_CONNECT.
def add_skip_connection(self, u, v, connectio... |
NetworkDescriptor to json representation
def to_json(self):
''' NetworkDescriptor to json representation
'''
skip_list = []
for u, v, connection_type in self.skip_connections:
skip_list.append({"from": u, "to": v, "type": connection_type})
return {"node_list": self.... |
Add a layer to the Graph.
Args:
layer: An instance of the subclasses of StubLayer in layers.py.
input_node_id: An integer. The ID of the input node of the layer.
Returns:
output_node_id: An integer. The ID of the output node of the layer.
def add_layer(self, layer, i... |
Add a new node to node_list and give the node an ID.
Args:
node: An instance of Node.
Returns:
node_id: An integer.
def _add_node(self, node):
"""Add a new node to node_list and give the node an ID.
Args:
node: An instance of Node.
Returns:
... |
Add a new layer to the graph. The nodes should be created in advance.
def _add_edge(self, layer, input_id, output_id):
"""Add a new layer to the graph. The nodes should be created in advance."""
if layer in self.layer_to_id:
layer_id = self.layer_to_id[layer]
if input_id not in... |
Redirect the layer to a new node.
Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id`
while keeping all other property of the edge the same.
def _redirect_edge(self, u_id, v_id, new_v_id):
"""Redirect the layer to a new node.
Change the edge originall... |
Replace the layer with a new layer.
def _replace_layer(self, layer_id, new_layer):
"""Replace the layer with a new layer."""
old_layer = self.layer_list[layer_id]
new_layer.input = old_layer.input
new_layer.output = old_layer.output
new_layer.output.shape = new_layer.output_shap... |
Return the topological order of the node IDs from the input node to the output node.
def topological_order(self):
"""Return the topological order of the node IDs from the input node to the output node."""
q = Queue()
in_degree = {}
for i in range(self.n_nodes):
in_degree[i] ... |
Given two node IDs, return all the pooling layers between them.
def _get_pooling_layers(self, start_node_id, end_node_id):
"""Given two node IDs, return all the pooling layers between them."""
layer_list = []
node_list = [start_node_id]
assert self._depth_first_search(end_node_id, layer... |
Search for all the layers and nodes down the path.
A recursive function to search all the layers and nodes between the node in the node_list
and the node with target_id.
def _depth_first_search(self, target_id, layer_id_list, node_list):
"""Search for all the layers and nodes down the path.... |
Search the graph for all the layers to be widened caused by an operation.
It is an recursive function with duplication check to avoid deadlock.
It searches from a starting node u until the corresponding layers has been widened.
Args:
u: The starting node ID.
start_dim: Th... |
Insert a relu-conv-bn block after the target block.
Args:
target_id: A convolutional layer ID. The new block should be inserted after the block.
new_layer: An instance of StubLayer subclasses.
def to_deeper_model(self, target_id, new_layer):
"""Insert a relu-conv-bn block after ... |
Widen the last dimension of the output of the pre_layer.
Args:
pre_layer_id: The ID of a convolutional layer or dense layer.
n_add: The number of dimensions to add.
def to_wider_model(self, pre_layer_id, n_add):
"""Widen the last dimension of the output of the pre_layer.
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.