text stringlengths 81 112k |
|---|
Lasso Regression
def cric__lasso():
""" Lasso Regression
"""
model = sklearn.linear_model.LogisticRegression(penalty="l1", C=0.002)
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model |
Ridge Regression
def cric__ridge():
""" Ridge Regression
"""
model = sklearn.linear_model.LogisticRegression(penalty="l2")
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model |
Decision Tree
def cric__decision_tree():
""" Decision Tree
"""
model = sklearn.tree.DecisionTreeClassifier(random_state=0, max_depth=4)
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model |
Random Forest
def cric__random_forest():
""" Random Forest
"""
model = sklearn.ensemble.RandomForestClassifier(100, random_state=0)
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model |
Gradient Boosted Trees
def cric__gbm():
""" Gradient Boosted Trees
"""
import xgboost
# max_depth and subsample match the params used for the full cric data in the paper
# learning_rate was set a bit higher to allow for faster runtimes
# n_estimators was chosen based on a train/test split of t... |
Decision Tree
def human__decision_tree():
""" Decision Tree
"""
# build data
N = 1000000
M = 3
X = np.zeros((N,M))
X.shape
y = np.zeros(N)
X[0, 0] = 1
y[0] = 8
X[1, 1] = 1
y[1] = 8
X[2, 0:2] = 1
y[2] = 4
# fit model
xor_model = sklearn.tree.DecisionTree... |
Create a SHAP summary plot, colored by feature values when they are provided.
Parameters
----------
shap_values : numpy.array
Matrix of SHAP values (# samples x # features)
features : numpy.array or pandas.DataFrame or list
Matrix of feature values (# samples x # features) or a feature... |
Kernel SHAP 1000 mean ref.
color = red_blue_circle(0.5)
linestyle = solid
def kernel_shap_1000_meanref(model, data):
""" Kernel SHAP 1000 mean ref.
color = red_blue_circle(0.5)
linestyle = solid
"""
return lambda X: KernelExplainer(model.predict, kmeans(data, 1)).shap_values(X, nsamples=100... |
IME 1000
color = red_blue_circle(0.5)
linestyle = dashed
def sampling_shap_1000(model, data):
""" IME 1000
color = red_blue_circle(0.5)
linestyle = dashed
"""
return lambda X: SamplingExplainer(model.predict, data).shap_values(X, nsamples=1000) |
TreeExplainer (independent)
color = red_blue_circle(0)
linestyle = dashed
def tree_shap_independent_200(model, data):
""" TreeExplainer (independent)
color = red_blue_circle(0)
linestyle = dashed
"""
data_subsample = sklearn.utils.resample(data, replace=False, n_samples=min(200, data.shape[... |
mean(|TreeExplainer|)
color = red_blue_circle(0.25)
linestyle = solid
def mean_abs_tree_shap(model, data):
""" mean(|TreeExplainer|)
color = red_blue_circle(0.25)
linestyle = solid
"""
def f(X):
v = TreeExplainer(model).shap_values(X)
if isinstance(v, list):
retu... |
Saabas
color = red_blue_circle(0)
linestyle = dotted
def saabas(model, data):
""" Saabas
color = red_blue_circle(0)
linestyle = dotted
"""
return lambda X: TreeExplainer(model).shap_values(X, approximate=True) |
LIME Tabular 1000
def lime_tabular_regression_1000(model, data):
""" LIME Tabular 1000
"""
return lambda X: other.LimeTabularExplainer(model.predict, data, mode="regression").attributions(X, nsamples=1000) |
Deep SHAP (DeepLIFT)
def deep_shap(model, data):
""" Deep SHAP (DeepLIFT)
"""
if isinstance(model, KerasWrap):
model = model.model
explainer = DeepExplainer(model, kmeans(data, 1).data)
def f(X):
phi = explainer.shap_values(X)
if type(phi) is list and len(phi) == 1:
... |
Expected Gradients
def expected_gradients(model, data):
""" Expected Gradients
"""
if isinstance(model, KerasWrap):
model = model.model
explainer = GradientExplainer(model, data)
def f(X):
phi = explainer.shap_values(X)
if type(phi) is list and len(phi) == 1:
ret... |
Return approximate SHAP values for the model applied to the data given by X.
Parameters
----------
X : list,
if framework == 'tensorflow': numpy.array, or pandas.DataFrame
if framework == 'pytorch': torch.tensor
A tensor (or list of tensors) of samples (where... |
Returns dummy agent class for if PyTorch etc. is not installed.
def _agent_import_failed(trace):
"""Returns dummy agent class for if PyTorch etc. is not installed."""
class _AgentImportFailed(Trainer):
_name = "AgentImportFailed"
_default_config = with_common_config({})
def _setup(sel... |
Executes training.
Args:
run_or_experiment (function|class|str|Experiment): If
function|class|str, this is the algorithm or model to train.
This may refer to the name of a built-on algorithm
(e.g. RLLib's DQN or PPO), a user-defined trainable
function or clas... |
Runs and blocks until all trials finish.
Examples:
>>> experiment_spec = Experiment("experiment", my_func)
>>> run_experiments(experiments=experiment_spec)
>>> experiment_spec = {"experiment": {"run": my_func}}
>>> run_experiments(experiments=experiment_spec)
>>> run_exper... |
Flushes remaining output records in the output queues to plasma.
None is used as special type of record that is propagated from sources
to sink to notify that the end of data in a stream.
Attributes:
close (bool): A flag denoting whether the channel should be
also mar... |
Returns an appropriate preprocessor class for the given space.
def get_preprocessor(space):
"""Returns an appropriate preprocessor class for the given space."""
legacy_patch_shapes(space)
obs_shape = space.shape
if isinstance(space, gym.spaces.Discrete):
preprocessor = OneHotPreprocessor
... |
Assigns shapes to spaces that don't have shapes.
This is only needed for older gym versions that don't set shapes properly
for Tuple and Discrete spaces.
def legacy_patch_shapes(space):
"""Assigns shapes to spaces that don't have shapes.
This is only needed for older gym versions that don't set shape... |
Downsamples images from (210, 160, 3) by the configured factor.
def transform(self, observation):
"""Downsamples images from (210, 160, 3) by the configured factor."""
self.check_shape(observation)
scaled = observation[25:-25, :, :]
if self._dim < 84:
scaled = cv2.resize(sca... |
Get a new batch from the internal ring buffer.
Returns:
buf: Data item saved from inqueue.
released: True if the item is now removed from the ring buffer.
def get(self):
"""Get a new batch from the internal ring buffer.
Returns:
buf: Data item saved from inque... |
Runs one logical iteration of training.
Subclasses should override ``_train()`` instead to return results.
This class automatically fills the following fields in the result:
`done` (bool): training is terminated. Filled only if not provided.
`time_this_iter_s` (float): Time in... |
Removes subdirectory within checkpoint_folder
Parameters
----------
checkpoint_dir : path to checkpoint
def delete_checkpoint(self, checkpoint_dir):
"""Removes subdirectory within checkpoint_folder
Parameters
----------
checkpoint_dir : path to checkpoint... |
Saves the current model state to a checkpoint.
Subclasses should override ``_save()`` instead to save state.
This method dumps additional metadata alongside the saved path.
Args:
checkpoint_dir (str): Optional dir to place the checkpoint.
Returns:
Checkpoint pa... |
Saves the current model state to a Python object. It also
saves to disk but does not return the checkpoint path.
Returns:
Object holding checkpoint data.
def save_to_object(self):
"""Saves the current model state to a Python object. It also
saves to disk but does not return... |
Restores training state from a given model checkpoint.
These checkpoints are returned from calls to save().
Subclasses should override ``_restore()`` instead to restore state.
This method restores additional metadata saved with the checkpoint.
def restore(self, checkpoint_path):
"""Re... |
Restores training state from a checkpoint object.
These checkpoints are returned from calls to save_to_object().
def restore_from_object(self, obj):
"""Restores training state from a checkpoint object.
These checkpoints are returned from calls to save_to_object().
"""
info = ... |
Exports model based on export_formats.
Subclasses should override _export_model() to actually
export model to local directory.
Args:
export_formats (list): List of formats that should be exported.
export_dir (str): Optional dir to place the exported model.
... |
See Schedule.value
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / max(1, self.schedule_timesteps), 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p) |
Dump a whole json record into the given file.
Overwrite the file if the overwrite flag set.
Args:
json_info (dict): Information dict to be dumped.
json_file (str): File path to be dumped to.
overwrite(boolean)
def dump_json(json_info, json_file, overwrite=True):
"""Dump a whole js... |
Parse a whole json record from the given file.
Return None if the json file does not exists or exception occurs.
Args:
json_file (str): File path to be parsed.
Returns:
A dict of json info.
def parse_json(json_file):
"""Parse a whole json record from the given file.
Return None ... |
Parse multiple json records from the given file.
Seek to the offset as the start point before parsing
if offset set. return empty list if the json file does
not exists or exception occurs.
Args:
json_file (str): File path to be parsed.
offset (int): Initial seek position of the file.
... |
Convert the unicode element of the content to str recursively.
def unicode2str(content):
"""Convert the unicode element of the content to str recursively."""
if isinstance(content, dict):
result = {}
for key in content.keys():
result[unicode2str(key)] = unicode2str(content[key])
... |
Computes the loss of the network.
def loss(self, xs, ys):
"""Computes the loss of the network."""
return float(
self.sess.run(
self.cross_entropy, feed_dict={
self.x: xs,
self.y_: ys
})) |
Computes the gradients of the network.
def grad(self, xs, ys):
"""Computes the gradients of the network."""
return self.sess.run(
self.cross_entropy_grads, feed_dict={
self.x: xs,
self.y_: ys
}) |
Creates the queue and preprocessing operations for the dataset.
Args:
data_path: Filename for cifar10 data.
size: The number of images in the dataset.
dataset: The dataset we are using.
Returns:
queue: A Tensorflow queue for extracting the images and labels.
def build_data(dat... |
Build CIFAR image and labels.
Args:
data_path: Filename for cifar10 data.
batch_size: Input batch size.
train: True if we are training and false if we are testing.
Returns:
images: Batches of images of size
[batch_size, image_size, image_size, 3].
labels: Ba... |
Create or update a Ray cluster.
def create_or_update(cluster_config_file, min_workers, max_workers, no_restart,
restart_only, yes, cluster_name):
"""Create or update a Ray cluster."""
if restart_only or no_restart:
assert restart_only != no_restart, "Cannot set both 'restart_only' ... |
Tear down the Ray cluster.
def teardown(cluster_config_file, yes, workers_only, cluster_name):
"""Tear down the Ray cluster."""
teardown_cluster(cluster_config_file, yes, workers_only, cluster_name) |
Kills a random Ray node. For testing purposes only.
def kill_random_node(cluster_config_file, yes, cluster_name):
"""Kills a random Ray node. For testing purposes only."""
click.echo("Killed node with IP " +
kill_node(cluster_config_file, yes, cluster_name)) |
Uploads and runs a script on the specified cluster.
The script is automatically synced to the following location:
os.path.join("~", os.path.basename(script))
def submit(cluster_config_file, docker, screen, tmux, stop, start,
cluster_name, port_forward, script, script_args):
"""Uploads and ... |
Build a whole graph for the model.
def build_graph(self):
"""Build a whole graph for the model."""
self.global_step = tf.Variable(0, trainable=False)
self._build_model()
if self.mode == "train":
self._build_train_op()
else:
# Additional initialization for... |
Build the core model within the graph.
def _build_model(self):
"""Build the core model within the graph."""
with tf.variable_scope("init"):
x = self._conv("init_conv", self._images, 3, 3, 16,
self._stride_arr(1))
strides = [1, 2, 2]
activate_befo... |
Build training specific ops for the graph.
def _build_train_op(self):
"""Build training specific ops for the graph."""
num_gpus = self.hps.num_gpus if self.hps.num_gpus != 0 else 1
# The learning rate schedule is dependent on the number of gpus.
boundaries = [int(20000 * i / np.sqrt(num... |
Batch normalization.
def _batch_norm(self, name, x):
"""Batch normalization."""
with tf.variable_scope(name):
params_shape = [x.get_shape()[-1]]
beta = tf.get_variable(
"beta",
params_shape,
tf.float32,
initializer... |
L2 weight decay loss.
def _decay(self):
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find(r"DW") > 0:
costs.append(tf.nn.l2_loss(var))
return tf.multiply(self.hps.weight_decay_rate, tf.add_n(costs)) |
Convolution.
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
"DW", [filter_size, filter_size, in_filters, out_filters],... |
FullyConnected layer for final output.
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
x = tf.reshape(x, [self.hps.batch_size, -1])
w = tf.get_variable(
"DW", [x.get_shape()[1], out_dim],
initializer=tf.uniform_unit_scaling_initialize... |
Forward pass of the multi-agent controller.
Arguments:
model: TorchModel class
obs: Tensor of shape [B, n_agents, obs_size]
h: List of tensors of shape [B, n_agents, h_size]
Returns:
q_vals: Tensor of shape [B, n_agents, n_actions]
h: Tensor of shape [B, n_agents, h_siz... |
Forward pass of the loss.
Arguments:
rewards: Tensor of shape [B, T-1, n_agents]
actions: Tensor of shape [B, T-1, n_agents]
terminated: Tensor of shape [B, T-1, n_agents]
mask: Tensor of shape [B, T-1, n_agents]
obs: Tensor of shape [B, T, n_agents, ... |
Unpacks the action mask / tuple obs from agent grouping.
Returns:
obs (Tensor): flattened obs tensor of shape [B, n_agents, obs_size]
mask (Tensor): action mask, if any
def _unpack_observation(self, obs_batch):
"""Unpacks the action mask / tuple obs from agent grouping.
... |
Get a named actor which was previously created.
If the actor doesn't exist, an exception will be raised.
Args:
name: The name of the named actor.
Returns:
The ActorHandle object corresponding to the name.
def get_actor(name):
"""Get a named actor which was previously created.
If... |
Register a named actor under a string key.
Args:
name: The name of the named actor.
actor_handle: The actor object to be associated with this name
def register_actor(name, actor_handle):
"""Register a named actor under a string key.
Args:
name: The name of the named actor.
actor... |
Make sure all items of config are in schema
def check_extraneous(config, schema):
"""Make sure all items of config are in schema"""
if not isinstance(config, dict):
raise ValueError("Config {} is not a dictionary".format(config))
for k in config:
if k not in schema:
raise ValueE... |
Required Dicts indicate that no extra fields can be introduced.
def validate_config(config, schema=CLUSTER_CONFIG_SCHEMA):
"""Required Dicts indicate that no extra fields can be introduced."""
if not isinstance(config, dict):
raise ValueError("Config {} is not a dictionary".format(config))
check_r... |
Update the settings according to the keyword arguments.
Args:
kwargs: The keyword arguments to set corresponding fields.
def update(self, **kwargs):
"""Update the settings according to the keyword arguments.
Args:
kwargs: The keyword arguments to set corresponding fiel... |
Update the settings when the target fields are None.
Args:
kwargs: The keyword arguments to set corresponding fields.
def update_if_absent(self, **kwargs):
"""Update the settings when the target fields are None.
Args:
kwargs: The keyword arguments to set corresponding ... |
Deterministically compute an actor handle ID.
A new actor handle ID is generated when it is forked from another actor
handle. The new handle ID is computed as hash(old_handle_id || num_forks).
Args:
actor_handle_id (common.ObjectID): The original actor handle ID.
num_forks: The number of t... |
Deterministically compute an actor handle ID in the non-forked case.
This code path is used whenever an actor handle is pickled and unpickled
(for example, if a remote function closes over an actor handle). Then,
whenever the actor handle is used, a new actor handle ID will be generated
on the fly as a... |
Annotate an actor method.
.. code-block:: python
@ray.remote
class Foo(object):
@ray.method(num_return_vals=2)
def bar(self):
return 1, 2
f = Foo.remote()
_, _ = f.bar.remote()
Args:
num_return_vals: The number of object IDs th... |
Intentionally exit the current actor.
This function is used to disconnect an actor and exit the worker.
Raises:
Exception: An exception is raised if this is a driver or this
worker is not an actor.
def exit_actor():
"""Intentionally exit the current actor.
This function is used t... |
Get the available checkpoints for the given actor ID, return a list
sorted by checkpoint timestamp in descending order.
def get_checkpoints_for_actor(actor_id):
"""Get the available checkpoints for the given actor ID, return a list
sorted by checkpoint timestamp in descending order.
"""
checkpoint_... |
Create an actor.
Args:
args: These arguments are forwarded directly to the actor
constructor.
kwargs: These arguments are forwarded directly to the actor
constructor.
Returns:
A handle to the newly created actor.
def remote(self, *ar... |
Create an actor.
This method allows more flexibility than the remote method because
resource requirements can be specified and override the defaults in the
decorator.
Args:
args: The arguments to forward to the actor constructor.
kwargs: The keyword arguments to... |
Method execution stub for an actor handle.
This is the function that executes when
`actor.method_name.remote(*args, **kwargs)` is called. Instead of
executing locally, the method is packaged as a task and scheduled
to the remote actor instance.
Args:
method_name: Th... |
This is defined in order to make pickling work.
Args:
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
Returns:
A dictionary of the information needed to reconstruct the object.
def _... |
This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
def _deserialization_helper(self, state,... |
Bulk loads the specified inputs into device memory.
The shape of the inputs must conform to the shapes of the input
placeholders this optimizer was constructed with.
The data is split equally across all the devices. If the data is not
evenly divisible by the batch size, excess data wil... |
Run a single step of SGD.
Runs a SGD step over a slice of the preloaded batch with size given by
self._loaded_per_device_batch_size and offset given by the batch_index
argument.
Updates shared model weights based on the averaged per-device
gradients.
Args:
... |
Generate genes (encodings) for the next generation.
Use the top K (_keep_top_ratio) trials of the last generation
as candidates to generate the next generation. The action could
be selection, crossover and mutation according corresponding
ratio (_selection_bound, _crossover_bound).
... |
Perform selection action to candidates.
For example, new gene = sample_1 + the 5th bit of sample2.
Args:
candidate: List of candidate genes (encodings).
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])
... |
Perform crossover action to candidates.
For example, new gene = 60% sample_1 + 40% sample_2.
Args:
candidate: List of candidate genes (encodings).
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])
... |
Perform mutation action to candidates.
For example, randomly change 10% of original sample
Args:
candidate: List of candidate genes (encodings).
rate: Percentage of mutation bits
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.a... |
Lists trials in the directory subtree starting at the given path.
def list_trials(experiment_path, sort, output, filter_op, columns,
result_columns):
"""Lists trials in the directory subtree starting at the given path."""
if columns:
columns = columns.split(",")
if result_columns:
... |
Lists experiments in the directory subtree.
def list_experiments(project_path, sort, output, filter_op, columns):
"""Lists experiments in the directory subtree."""
if columns:
columns = columns.split(",")
commands.list_experiments(project_path, sort, output, filter_op, columns) |
Start one iteration of training and save remote id.
def _train(self, trial):
"""Start one iteration of training and save remote id."""
assert trial.status == Trial.RUNNING, trial.status
remote = trial.runner.train.remote()
# Local Mode
if isinstance(remote, dict):
... |
Starts trial and restores last result if trial was paused.
Raises:
ValueError if restoring from checkpoint fails.
def _start_trial(self, trial, checkpoint=None):
"""Starts trial and restores last result if trial was paused.
Raises:
ValueError if restoring from checkpoi... |
Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
error_msg ... |
Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial (Trial): Trial to be started.
checkpoint (Checkpoint): A Python object or path storing the state
of trial.
def start_trial(self, trial, checkpoint=None):
"""Star... |
Only returns resources if resources allocated.
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):
"""Only returns resources if resources allocated."""
prior_status = trial.status
self._stop_trial(
trial, error=error, error_msg=error_msg, stop_logger=stop_log... |
Pauses the trial.
If trial is in-flight, preserves return value in separate queue
before pausing, which is restored when Trial is resumed.
def pause_trial(self, trial):
"""Pauses the trial.
If trial is in-flight, preserves return value in separate queue
before pausing, which i... |
Tries to invoke `Trainable.reset_config()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial
trainable.
new_experiment_tag (str): New experiment name
for trial.
Returns:
... |
Fetches one result of the running trials.
Returns:
Result of the most recent trial training run.
def fetch_result(self, trial):
"""Fetches one result of the running trials.
Returns:
Result of the most recent trial training run."""
trial_future = self._find_item... |
Returns whether this runner has at least the specified resources.
This refreshes the Ray cluster resources if the time since last update
has exceeded self._refresh_period. This also assumes that the
cluster is not resizing very frequently.
def has_resources(self, resources):
"""Returns... |
Returns a human readable message for printing to the console.
def debug_string(self):
"""Returns a human readable message for printing to the console."""
if self._resources_initialized:
status = "Resources requested: {}/{} CPUs, {}/{} GPUs".format(
self._committed_resources... |
Returns a string describing the total resources available.
def resource_string(self):
"""Returns a string describing the total resources available."""
if self._resources_initialized:
res_str = "{} CPUs, {} GPUs".format(self._avail_resources.cpu,
... |
Saves the trial's state to a checkpoint.
def save(self, trial, storage=Checkpoint.DISK):
"""Saves the trial's state to a checkpoint."""
trial._checkpoint.storage = storage
trial._checkpoint.last_result = trial.last_result
if storage == Checkpoint.MEMORY:
trial._checkpoint.va... |
Checkpoints the model and erases old checkpoints
if needed.
Parameters
----------
trial : trial to save
def _checkpoint_and_erase(self, trial):
"""Checkpoints the model and erases old checkpoints
if needed.
Parameters
----------
tr... |
Restores training state from a given model checkpoint.
This will also sync the trial results to a new location
if restoring on a different node.
def restore(self, trial, checkpoint=None):
"""Restores training state from a given model checkpoint.
This will also sync the trial results t... |
Exports model of this trial based on trial.export_formats.
Return:
A dict that maps ExportFormats to successfully exported models.
def export_trial_if_needed(self, trial):
"""Exports model of this trial based on trial.export_formats.
Return:
A dict that maps ExportForm... |
Generates an actor that will execute a particular instance of
the logical operator
Attributes:
instance_id (UUID): The id of the instance the actor will execute.
operator (Operator): The metadata of the logical operator.
input (DataInput): The input gate that manages... |
Generates one actor for each instance of the given logical
operator.
Attributes:
operator (Operator): The logical operator metadata.
upstream_channels (list): A list of all upstream channels for
all instances of the operator.
downstream_channels (list): A... |
Generates all output data channels
(see: DataChannel in communication.py) for all instances of
the given logical operator.
The function constructs one data channel for each pair of
communicating operator instances (instance_1,instance_2),
where instance_1 is an instance of the g... |
Deploys and executes the physical dataflow.
def execute(self):
"""Deploys and executes the physical dataflow."""
self._collect_garbage() # Make sure everything is clean
# TODO (john): Check if dataflow has any 'logical inconsistencies'
# For example, if there is a forward partitioning ... |
Registers the given logical operator to the environment and
connects it to its upstream operator (if any).
A call to this function adds a new edge to the logical topology.
Attributes:
operator (Operator): The metadata of the logical operator.
def __register(self, operator):
... |
Sets the number of instances for the source operator of the stream.
Attributes:
num_instances (int): The level of parallelism for the source
operator of the stream.
def set_parallelism(self, num_instances):
"""Sets the number of instances for the source operator of the stream... |
Applies a map operator to the stream.
Attributes:
map_fn (function): The user-defined logic of the map.
def map(self, map_fn, name="Map"):
"""Applies a map operator to the stream.
Attributes:
map_fn (function): The user-defined logic of the map.
"""
o... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.