text
stringlengths
81
112k
Convert a ctypes int pointer array to a numpy array. def cint8_array_to_numpy(cptr, length): """Convert a ctypes int pointer array to a numpy array.""" if isinstance(cptr, ctypes.POINTER(ctypes.c_int8)): return np.fromiter(cptr, dtype=np.int8, count=length) else: raise RuntimeError('Expecte...
Convert Python dictionary to string, which is passed to C API. def param_dict_to_str(data): """Convert Python dictionary to string, which is passed to C API.""" if data is None or not data: return "" pairs = [] for key, val in data.items(): if isinstance(val, (list, tuple, set)) or is_n...
Fix the memory of multi-dimensional sliced object. def convert_from_sliced_object(data): """Fix the memory of multi-dimensional sliced object.""" if data.base is not None and isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray): if not data.flags.c_contiguous: warnings.warn("U...
Get pointer of float numpy array / list. def c_float_array(data): """Get pointer of float numpy array / list.""" if is_1d_list(data): data = np.array(data, copy=False) if is_numpy_1d_array(data): data = convert_from_sliced_object(data) assert data.flags.c_contiguous if data....
Get pointer of int numpy array / list. def c_int_array(data): """Get pointer of int numpy array / list.""" if is_1d_list(data): data = np.array(data, copy=False) if is_numpy_1d_array(data): data = convert_from_sliced_object(data) assert data.flags.c_contiguous if data.dtype ...
Predict logic. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for prediction. When data type is string, it represents the path of txt file. num_iteration : int, optional (default=-1) ...
Get size of prediction result. def __get_num_preds(self, num_iteration, nrow, predict_type): """Get size of prediction result.""" if nrow > MAX_INT32: raise LightGBMError('LightGBM cannot perform prediction for data' 'with number of rows greater than MAX_INT3...
Predict for a 2-D numpy matrix. def __pred_for_np2d(self, mat, num_iteration, predict_type): """Predict for a 2-D numpy matrix.""" if len(mat.shape) != 2: raise ValueError('Input numpy.ndarray or list must be 2 dimensional') def inner_predict(mat, num_iteration, predict_type, preds...
Predict for a CSR data. def __pred_for_csr(self, csr, num_iteration, predict_type): """Predict for a CSR data.""" def inner_predict(csr, num_iteration, predict_type, preds=None): nrow = len(csr.indptr) - 1 n_preds = self.__get_num_preds(num_iteration, nrow, predict_type) ...
Predict for a CSC data. def __pred_for_csc(self, csc, num_iteration, predict_type): """Predict for a CSC data.""" nrow = csc.shape[0] if nrow > MAX_INT32: return self.__pred_for_csr(csc.tocsr(), num_iteration, predict_type) n_preds = self.__get_num_preds(num_iteration, nrow,...
Initialize data from a 2-D numpy matrix. def __init_from_np2d(self, mat, params_str, ref_dataset): """Initialize data from a 2-D numpy matrix.""" if len(mat.shape) != 2: raise ValueError('Input numpy.ndarray must be 2 dimensional') self.handle = ctypes.c_void_p() if mat.dty...
Initialize data from a list of 2-D numpy matrices. def __init_from_list_np2d(self, mats, params_str, ref_dataset): """Initialize data from a list of 2-D numpy matrices.""" ncol = mats[0].shape[1] nrow = np.zeros((len(mats),), np.int32) if mats[0].dtype == np.float64: ptr_dat...
Initialize data from a CSR matrix. def __init_from_csr(self, csr, params_str, ref_dataset): """Initialize data from a CSR matrix.""" if len(csr.indices) != len(csr.data): raise ValueError('Length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data))) self.handle = ctypes.c_voi...
Initialize data from a CSC matrix. def __init_from_csc(self, csc, params_str, ref_dataset): """Initialize data from a CSC matrix.""" if len(csc.indices) != len(csc.data): raise ValueError('Length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data))) self.handle = ctypes.c_voi...
Lazy init. Returns ------- self : Dataset Constructed Dataset object. def construct(self): """Lazy init. Returns ------- self : Dataset Constructed Dataset object. """ if self.handle is None: if self.reference...
Create validation data align with current Dataset. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays Data source of Dataset. If string, it represents the path to txt file. label : list,...
Get subset of current Dataset. Parameters ---------- used_indices : list of int Indices used to create the subset. params : dict or None, optional (default=None) These parameters will be passed to Dataset constructor. Returns ------- subs...
Save Dataset to a binary file. Parameters ---------- filename : string Name of the output file. Returns ------- self : Dataset Returns self. def save_binary(self, filename): """Save Dataset to a binary file. Parameters -...
Set property into the Dataset. Parameters ---------- field_name : string The field name of the information. data : list, numpy 1-D array, pandas Series or None The array of data to be set. Returns ------- self : Dataset Datase...
Get property from the Dataset. Parameters ---------- field_name : string The field name of the information. Returns ------- info : numpy array A numpy array with information from the Dataset. def get_field(self, field_name): """Get prope...
Set categorical features. Parameters ---------- categorical_feature : list of int or strings Names or indices of categorical features. Returns ------- self : Dataset Dataset with set categorical features. def set_categorical_feature(self, catego...
Set predictor for continued training. It is not recommended for user to call this function. Please use init_model argument in engine.train() or engine.cv() instead. def _set_predictor(self, predictor): """Set predictor for continued training. It is not recommended for user to call thi...
Set reference Dataset. Parameters ---------- reference : Dataset Reference that is used as a template to construct the current Dataset. Returns ------- self : Dataset Dataset with set reference. def set_reference(self, reference): """Set...
Set feature name. Parameters ---------- feature_name : list of strings Feature names. Returns ------- self : Dataset Dataset with set feature name. def set_feature_name(self, feature_name): """Set feature name. Parameters ...
Set label of Dataset. Parameters ---------- label : list, numpy 1-D array, pandas Series / one-column DataFrame or None The label information to be set into Dataset. Returns ------- self : Dataset Dataset with set label. def set_label(self, labe...
Set weight of each instance. Parameters ---------- weight : list, numpy 1-D array, pandas Series or None Weight to be set for each data point. Returns ------- self : Dataset Dataset with set weight. def set_weight(self, weight): """Set w...
Set init score of Booster to start from. Parameters ---------- init_score : list, numpy 1-D array, pandas Series or None Init score for Booster. Returns ------- self : Dataset Dataset with set init score. def set_init_score(self, init_score): ...
Set group size of Dataset (used for ranking). Parameters ---------- group : list, numpy 1-D array, pandas Series or None Group size of each group. Returns ------- self : Dataset Dataset with set group. def set_group(self, group): """Set ...
Get the label of the Dataset. Returns ------- label : numpy array or None The label information from the Dataset. def get_label(self): """Get the label of the Dataset. Returns ------- label : numpy array or None The label information fro...
Get the weight of the Dataset. Returns ------- weight : numpy array or None Weight for each data point from the Dataset. def get_weight(self): """Get the weight of the Dataset. Returns ------- weight : numpy array or None Weight for each...
Get the feature penalty of the Dataset. Returns ------- feature_penalty : numpy array or None Feature penalty for each feature in the Dataset. def get_feature_penalty(self): """Get the feature penalty of the Dataset. Returns ------- feature_penalty ...
Get the monotone constraints of the Dataset. Returns ------- monotone_constraints : numpy array or None Monotone constraints: -1, 0 or 1, for each feature in the Dataset. def get_monotone_constraints(self): """Get the monotone constraints of the Dataset. Returns ...
Get the initial score of the Dataset. Returns ------- init_score : numpy array or None Init score of Booster. def get_init_score(self): """Get the initial score of the Dataset. Returns ------- init_score : numpy array or None Init score ...
Get the raw data of the Dataset. Returns ------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, list of numpy arrays or None Raw data used in the Dataset construction. def get_data(self): """Get the raw data of the Dataset. Retur...
Get the group of the Dataset. Returns ------- group : numpy array or None Group size of each group. def get_group(self): """Get the group of the Dataset. Returns ------- group : numpy array or None Group size of each group. """ ...
Get the number of rows in the Dataset. Returns ------- number_of_rows : int The number of rows in the Dataset. def num_data(self): """Get the number of rows in the Dataset. Returns ------- number_of_rows : int The number of rows in the D...
Get the number of columns (features) in the Dataset. Returns ------- number_of_columns : int The number of columns (features) in the Dataset. def num_feature(self): """Get the number of columns (features) in the Dataset. Returns ------- number_of_co...
Get a chain of Dataset objects. Starts with r, then goes to r.reference (if exists), then to r.reference.reference, etc. until we hit ``ref_limit`` or a reference loop. Parameters ---------- ref_limit : int, optional (default=100) The limit number of referen...
Add features from other Dataset to the current Dataset. Both Datasets must be constructed before calling this method. Parameters ---------- other : Dataset The Dataset to take features from. Returns ------- self : Dataset Dataset with th...
Save Dataset to a text file. This format cannot be loaded back in by LightGBM, but is useful for debugging purposes. Parameters ---------- filename : string Name of the output file. Returns ------- self : Dataset Returns self. def dump_...
Free Booster's Datasets. Returns ------- self : Booster Booster without Datasets. def free_dataset(self): """Free Booster's Datasets. Returns ------- self : Booster Booster without Datasets. """ self.__dict__.pop('train_s...
Set the network configuration. Parameters ---------- machines : list, set or string Names of machines. local_listen_port : int, optional (default=12400) TCP listen port for local machines. listen_time_out : int, optional (default=120) Socket t...
Add validation data. Parameters ---------- data : Dataset Validation data. name : string Name of validation data. Returns ------- self : Booster Booster with set validation data. def add_valid(self, data, name): """Ad...
Reset parameters of Booster. Parameters ---------- params : dict New parameters for Booster. Returns ------- self : Booster Booster with new parameters. def reset_parameter(self, params): """Reset parameters of Booster. Paramete...
Update Booster for one iteration. Parameters ---------- train_set : Dataset or None, optional (default=None) Training data. If None, last training data is used. fobj : callable or None, optional (default=None) Customized objective function. ...
Boost Booster for one iteration with customized gradient statistics. Note ---- For multi-class task, the score is group by class_id first, then group by row_id. If you want to get i-th row score in j-th class, the access way is score[j * num_data + i] and you should group grad a...
Rollback one iteration. Returns ------- self : Booster Booster with rolled back one iteration. def rollback_one_iter(self): """Rollback one iteration. Returns ------- self : Booster Booster with rolled back one iteration. """ ...
Get the index of the current iteration. Returns ------- cur_iter : int The index of the current iteration. def current_iteration(self): """Get the index of the current iteration. Returns ------- cur_iter : int The index of the current it...
Get number of models per iteration. Returns ------- model_per_iter : int The number of models per iteration. def num_model_per_iteration(self): """Get number of models per iteration. Returns ------- model_per_iter : int The number of mod...
Get number of weak sub-models. Returns ------- num_trees : int The number of weak sub-models. def num_trees(self): """Get number of weak sub-models. Returns ------- num_trees : int The number of weak sub-models. """ num_t...
Evaluate for data. Parameters ---------- data : Dataset Data for the evaluating. name : string Name of the data. feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds,...
Evaluate for validation data. Parameters ---------- feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples...
Save Booster to file. Parameters ---------- filename : string Filename to save Booster. num_iteration : int or None, optional (default=None) Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, al...
Shuffle models. Parameters ---------- start_iteration : int, optional (default=0) The first iteration that will be shuffled. end_iteration : int, optional (default=-1) The last iteration that will be shuffled. If <= 0, means the last available iterati...
Load Booster from a string. Parameters ---------- model_str : string Model will be loaded from this string. verbose : bool, optional (default=True) Whether to print messages while loading model. Returns ------- self : Booster ...
Save Booster to string. Parameters ---------- num_iteration : int or None, optional (default=None) Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, all iterations are saved. If <= 0, all iterations ar...
Dump Booster to JSON format. Parameters ---------- num_iteration : int or None, optional (default=None) Index of the iteration that should be dumped. If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped. If <= 0, all itera...
Make a prediction. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for prediction. If string, it represents the path to txt file. num_iteration : int or None, optional (default=None) ...
Refit the existing Booster by new data. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for refit. If string, it represents the path to txt file. label : list, numpy 1-D array or pandas Series ...
Get the output of a leaf. Parameters ---------- tree_id : int The index of the tree. leaf_id : int The index of the leaf in the tree. Returns ------- result : float The output of the leaf. def get_leaf_output(self, tree_id, l...
Convert to predictor. def _to_predictor(self, pred_parameter=None): """Convert to predictor.""" predictor = _InnerPredictor(booster_handle=self.handle, pred_parameter=pred_parameter) predictor.pandas_categorical = self.pandas_categorical return predictor
Get number of features. Returns ------- num_feature : int The number of features. def num_feature(self): """Get number of features. Returns ------- num_feature : int The number of features. """ out_num_feature = ctypes.c_...
Get names of features. Returns ------- result : list List with names of features. def feature_name(self): """Get names of features. Returns ------- result : list List with names of features. """ num_feature = self.num_fea...
Get feature importances. Parameters ---------- importance_type : string, optional (default="split") How the importance is calculated. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits...
Get split value histogram for the specified feature. Parameters ---------- feature : int or string The feature name or index the histogram is calculated for. If int, interpreted as index. If string, interpreted as name. Note ---- ...
Evaluate training or validation data. def __inner_eval(self, data_name, data_idx, feval=None): """Evaluate training or validation data.""" if data_idx >= self.__num_dataset: raise ValueError("Data_idx should be smaller than number of dataset") self.__get_eval_info() ret = []...
Predict for training and validation dataset. def __inner_predict(self, data_idx): """Predict for training and validation dataset.""" if data_idx >= self.__num_dataset: raise ValueError("Data_idx should be smaller than number of dataset") if self.__inner_predict_buffer[data_idx] is N...
Get inner evaluation count and names. def __get_eval_info(self): """Get inner evaluation count and names.""" if self.__need_reload_eval_info: self.__need_reload_eval_info = False out_num_eval = ctypes.c_int(0) # Get num of inner evals _safe_call(_LIB.LGBM...
Set attributes to the Booster. Parameters ---------- **kwargs The attributes to set. Setting a value to None deletes an attribute. Returns ------- self : Booster Booster with set attributes. def set_attr(self, **kwargs): """S...
Find the path to LightGBM library files. Returns ------- lib_path: list of strings List of all found library paths to LightGBM. def find_lib_path(): """Find the path to LightGBM library files. Returns ------- lib_path: list of strings List of all found library paths to Light...
Convert numpy classes to JSON serializable objects. def json_default_with_numpy(obj): """Convert numpy classes to JSON serializable objects.""" if isinstance(obj, (np.integer, np.floating, np.bool_)): return obj.item() elif isinstance(obj, np.ndarray): return obj.tolist() else: ...
Format metric string. def _format_eval_result(value, show_stdv=True): """Format metric string.""" if len(value) == 4: return '%s\'s %s: %g' % (value[0], value[1], value[2]) elif len(value) == 5: if show_stdv: return '%s\'s %s: %g + %g' % (value[0], value[1], value[2], value[4]) ...
Create a callback that prints the evaluation results. Parameters ---------- period : int, optional (default=1) The period to print the evaluation results. show_stdv : bool, optional (default=True) Whether to show stdv (if provided). Returns ------- callback : function ...
Create a callback that records the evaluation history into ``eval_result``. Parameters ---------- eval_result : dict A dictionary to store the evaluation results. Returns ------- callback : function The callback that records the evaluation history into the passed dictionary. de...
Create a callback that resets the parameter after the first iteration. Note ---- The initial parameter will still take in-effect on first iteration. Parameters ---------- **kwargs : value should be list or function List of parameters for each boosting round or a customized func...
Create a callback that activates early stopping. Note ---- Activates early stopping. The model will train until the validation score stops improving. Validation score needs to improve at least every ``early_stopping_rounds`` round(s) to continue training. Requires at least one validation da...
Perform the training with given parameters. Parameters ---------- params : dict Parameters for training. train_set : Dataset Data to be trained on. num_boost_round : int, optional (default=100) Number of boosting iterations. valid_sets : list of Datasets or None, optiona...
Make a n-fold list of Booster from random indices. def _make_n_folds(full_data, folds, nfold, params, seed, fpreproc=None, stratified=True, shuffle=True, eval_train_metric=False): """Make a n-fold list of Booster from random indices.""" full_data = full_data.construct() num_data = full_da...
Aggregate cross-validation results. def _agg_cv_result(raw_results, eval_train_metric=False): """Aggregate cross-validation results.""" cvmap = collections.defaultdict(list) metric_type = {} for one_result in raw_results: for one_line in one_result: if eval_train_metric: ...
Perform the cross-validation with given paramaters. Parameters ---------- params : dict Parameters for Booster. train_set : Dataset Data to be trained on. num_boost_round : int, optional (default=100) Number of boosting iterations. folds : generator or iterator of (train...
Logarithmic loss with non-necessarily-binary labels. def log_loss(preds, labels): """Logarithmic loss with non-necessarily-binary labels.""" log_likelihood = np.sum(labels * np.log(preds)) / len(preds) return -log_likelihood
Measure performance of an objective. Parameters ---------- objective : string 'binary' or 'xentropy' Objective function. label_type : string 'binary' or 'probability' Type of the label. data : dict Data for training. Returns ------- result : dict Experim...
Check object is not tuple or does not have 2 elements. def _check_not_tuple_of_2_elements(obj, obj_name='obj'): """Check object is not tuple or does not have 2 elements.""" if not isinstance(obj, tuple) or len(obj) != 2: raise TypeError('%s must be a tuple of 2 elements.' % obj_name)
Plot model's feature importances. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance which feature importance should be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be ...
Plot one metric during training. Parameters ---------- booster : dict or LGBMModel Dictionary returned from ``lightgbm.train()`` or LGBMModel instance. metric : string or None, optional (default=None) The metric name to plot. Only one metric supported because different metrics h...
Convert specified tree to graphviz instance. See: - https://graphviz.readthedocs.io/en/stable/api.html#digraph def _to_graphviz(tree_info, show_info, feature_names, precision=None, **kwargs): """Convert specified tree to graphviz instance. See: - https://graphviz.readthedocs.io/en/stable/api....
Create a digraph representation of specified tree. Note ---- For more information please visit https://graphviz.readthedocs.io/en/stable/api.html#digraph. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance to be converted. tree_index : int, optio...
Plot specified tree. Note ---- It is preferable to use ``create_tree_digraph()`` because of its lossless quality and returned objects can be also rendered and displayed directly inside a Jupyter notebook. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel ins...
Return the -std=c++[0x/11/14] compiler flag. The c++14 is preferred over c++0x/11 (when it is available). def cpp_flag(compiler): """Return the -std=c++[0x/11/14] compiler flag. The c++14 is preferred over c++0x/11 (when it is available). """ standards = ['-std=c++14', '-std=c++11', '-std=c++0x'] ...
query is a 1d numpy array corresponding to the vector to which you want to find the closest vector vectors is a 2d numpy array corresponding to the vectors you want to consider ban_set is a set of indicies within vectors you want to ignore for nearest match cossims is a 1d numpy array of size len(vector...
Train a supervised model and return a model object. input must be a filepath. The input text does not need to be tokenized as per the tokenize function, but it must be preprocessed and encoded as UTF-8. You might want to consult standard preprocessing scripts such as tokenizer.perl mentioned here: http...
Get the vector representation of word. def get_word_vector(self, word): """Get the vector representation of word.""" dim = self.get_dimension() b = fasttext.Vector(dim) self.f.getWordVector(b, word) return np.array(b)
Given a string, get a single vector represenation. This function assumes to be given a single line of text. We split words on whitespace (space, newline, tab, vertical tab) and the control characters carriage return, formfeed and the null character. def get_sentence_vector(self, text): ...
Given a word, get the subwords and their indicies. def get_subwords(self, word, on_unicode_error='strict'): """ Given a word, get the subwords and their indicies. """ pair = self.f.getSubwords(word, on_unicode_error) return pair[0], np.array(pair[1])
Given an index, get the corresponding vector of the Input Matrix. def get_input_vector(self, ind): """ Given an index, get the corresponding vector of the Input Matrix. """ dim = self.get_dimension() b = fasttext.Vector(dim) self.f.getInputVector(b, ind) return n...
Given a string, get a list of labels and a list of corresponding probabilities. k controls the number of returned labels. A choice of 5, will return the 5 most probable labels. By default this returns only the most likely label and probability. threshold filters the returned labe...
Get a copy of the full input matrix of a Model. This only works if the model is not quantized. def get_input_matrix(self): """ Get a copy of the full input matrix of a Model. This only works if the model is not quantized. """ if self.f.isQuant(): raise ValueE...
Get a copy of the full output matrix of a Model. This only works if the model is not quantized. def get_output_matrix(self): """ Get a copy of the full output matrix of a Model. This only works if the model is not quantized. """ if self.f.isQuant(): raise Val...
Get the entire list of words of the dictionary optionally including the frequency of the individual words. This does not include any subwords. For that please consult the function get_subwords. def get_words(self, include_freq=False, on_unicode_error='strict'): """ Get the entir...
Get the entire list of labels of the dictionary optionally including the frequency of the individual labels. Unsupervised models use words as labels, which is why get_labels will call and return get_words for this type of model. def get_labels(self, include_freq=False, on_unicode_error=...