text
stringlengths
81
112k
Execute a command as a OS terminal. Parameters ---------- *args : list of str Command and parameters to be executed Examples -------- >>> DocBuilder()._run_os('python', '--version') def _run_os(*args): """ Execute a command as a OS terminal....
Call sphinx to build documentation. Attribute `num_jobs` from the class is used. Parameters ---------- kind : {'html', 'latex'} Examples -------- >>> DocBuilder(num_jobs=4)._sphinx_build('html') def _sphinx_build(self, kind): """ Call sphinx to...
Open a browser tab showing single def _open_browser(self, single_doc_html): """ Open a browser tab showing single """ url = os.path.join('file://', DOC_PATH, 'build', 'html', single_doc_html) webbrowser.open(url, new=2)
Open the rst file `page` and extract its title. def _get_page_title(self, page): """ Open the rst file `page` and extract its title. """ fname = os.path.join(SOURCE_PATH, '{}.rst'.format(page)) option_parser = docutils.frontend.OptionParser( components=(docutils.pars...
Create in the build directory an html file with a redirect, for every row in REDIRECTS_FILE. def _add_redirects(self): """ Create in the build directory an html file with a redirect, for every row in REDIRECTS_FILE. """ html = ''' <html> <head> ...
Build HTML documentation. def html(self): """ Build HTML documentation. """ ret_code = self._sphinx_build('html') zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip') if os.path.exists(zip_fname): os.remove(zip_fname) if self.single_doc_html is...
Build PDF documentation. def latex(self, force=False): """ Build PDF documentation. """ if sys.platform == 'win32': sys.stderr.write('latex build has not been tested on windows\n') else: ret_code = self._sphinx_build('latex') os.chdir(os.path....
Clean documentation generated files. def clean(): """ Clean documentation generated files. """ shutil.rmtree(BUILD_PATH, ignore_errors=True) shutil.rmtree(os.path.join(SOURCE_PATH, 'reference', 'api'), ignore_errors=True)
Compress HTML documentation into a zip file. def zip_html(self): """ Compress HTML documentation into a zip file. """ zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip') if os.path.exists(zip_fname): os.remove(zip_fname) dirname = os.path.join(BUILD_PA...
Render a DataFrame to a LaTeX tabular/longtable environment output. def write_result(self, buf): """ Render a DataFrame to a LaTeX tabular/longtable environment output. """ # string representation of the columns if len(self.frame.columns) == 0 or len(self.frame.index) == 0: ...
r""" Combine columns belonging to a group to a single multicolumn entry according to self.multicolumn_format e.g.: a & & & b & c & will become \multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c} def _format_multicolumn(self, row, ilevels): r""" Combine c...
r""" Check following rows, whether row should be a multirow e.g.: becomes: a & 0 & \multirow{2}{*}{a} & 0 & & 1 & & 1 & b & 0 & \cline{1-2} b & 0 & def _format_multirow(self, row, ilevels, i, rows): r""" Check following rows, whet...
Print clines after multirow-blocks are finished def _print_cline(self, buf, i, icol): """ Print clines after multirow-blocks are finished """ for cl in self.clinebuf: if cl[0] == i: buf.write('\\cline{{{cl:d}-{icol:d}}}\n' .format(cl...
Checks whether the 'name' parameter for parsing is either an integer OR float that can SAFELY be cast to an integer without losing accuracy. Raises a ValueError if that is not the case. Parameters ---------- name : string Parameter name (used for error reporting) val : int or float ...
Check if the `names` parameter contains duplicates. If duplicates are found, we issue a warning before returning. Parameters ---------- names : array-like or None An array containing a list of the names used for the output DataFrame. Returns ------- names : array-like or None ...
Generic reader of line files. def _read(filepath_or_buffer: FilePathOrBuffer, kwds): """Generic reader of line files.""" encoding = kwds.get('encoding', None) if encoding is not None: encoding = re.sub('_', '-', encoding).lower() kwds['encoding'] = encoding compression = kwds.get('comp...
r""" Read a table of fixed-width formatted lines into DataFrame. Also supports optionally iterating or breaking of the file into chunks. Additional help can be found in the `online docs for IO Tools <http://pandas.pydata.org/pandas-docs/stable/io.html>`_. Parameters ---------- filepat...
Check whether or not the `columns` parameter could be converted into a MultiIndex. Parameters ---------- columns : array-like Object which may or may not be convertible into a MultiIndex Returns ------- boolean : Whether or not columns could become a MultiIndex def _is_potential_m...
Check whether or not the 'usecols' parameter is a callable. If so, enumerates the 'names' parameter and returns a set of indices for each entry in 'names' that evaluates to True. If not a callable, returns 'usecols'. def _evaluate_usecols(usecols, names): """ Check whether or not the 'usecols'...
Validates that all usecols are present in a given list of names. If not, raise a ValueError that shows what usecols are missing. Parameters ---------- usecols : iterable of usecols The columns to validate are present in names. names : iterable of names The column names to check ...
Validate the 'usecols' parameter. Checks whether or not the 'usecols' parameter contains all integers (column selection by index), strings (column by name) or is a callable. Raises a ValueError if that is not the case. Parameters ---------- usecols : list-like, callable, or None List o...
Check whether or not the 'parse_dates' parameter is a non-boolean scalar. Raises a ValueError if that is the case. def _validate_parse_dates_arg(parse_dates): """ Check whether or not the 'parse_dates' parameter is a non-boolean scalar. Raises a ValueError if that is the case. """ msg =...
return a stringified and numeric for these values def _stringify_na_values(na_values): """ return a stringified and numeric for these values """ result = [] for x in na_values: result.append(str(x)) result.append(x) try: v = float(x) # we are like 999 here ...
Get the NaN values for a given column. Parameters ---------- col : str The name of the column. na_values : array-like, dict The object listing the NaN values as strings. na_fvalues : array-like, dict The object listing the NaN values as floats. keep_default_na : bool ...
extract and return the names, index_names, col_names header is a list-of-lists returned from the parsers def _extract_multi_indexer_columns(self, header, index_names, col_names, passed_names=False): """ extract and return the names, index_names, col_names ...
Infer types of values, possibly casting Parameters ---------- values : ndarray na_values : set try_num_bool : bool, default try try to cast values to numeric (first preference) or boolean Returns: -------- converted : ndarray na_count ...
Cast values to specified type Parameters ---------- values : ndarray cast_type : string or np.dtype dtype to cast values to column : string column name - used only for error reporting Returns ------- converted : ndarray def _cast_...
Set the columns that should not undergo dtype conversions. Currently, any column that is involved with date parsing will not undergo such conversions. def _set_noconvert_columns(self): """ Set the columns that should not undergo dtype conversions. Currently, any column that is...
Sets self._col_indices usecols_key is used if there are string usecols. def _handle_usecols(self, columns, usecols_key): """ Sets self._col_indices usecols_key is used if there are string usecols. """ if self.usecols is not None: if callable(self.usecols): ...
Checks whether the file begins with the BOM character. If it does, remove it. In addition, if there is quoting in the field subsequent to the BOM, remove it as well because it technically takes place at the beginning of the name, not the middle of it. def _check_for_bom(self, first_row)...
Alert a user about a malformed row. If `self.error_bad_lines` is True, the alert will be `ParserError`. If `self.warn_bad_lines` is True, the alert will be printed out. Parameters ---------- msg : The error message to display. row_num : The row number where the parsing ...
Wrapper around iterating through `self.data` (CSV source). When a CSV error is raised, we check for specific error messages that allow us to customize the error message displayed to the user. Parameters ---------- row_num : The row number of the line being parsed. def ...
Iterate through the lines and remove any that are either empty or contain only one whitespace value Parameters ---------- lines : array-like The array of lines that we are to filter. Returns ------- filtered_lines : array-like The same ar...
Try several cases to get lines: 0) There are headers on row 0 and row 1 and their total summed lengths equals the length of the next line. Treat row 0 as columns and row 1 as indices 1) Look for implicit index: there are more columns on row 1 than row 0. If this is true, assume ...
Read rows from self.f, skipping as specified. We distinguish buffer_rows (the first <= infer_nrows lines) from the rows returned to detect_colspecs because it's simpler to leave the other locations with skiprows logic alone than to modify them to deal with the fact we skipped so...
Determine the URL corresponding to Python object def linkcode_resolve(domain, info): """ Determine the URL corresponding to Python object """ if domain != 'py': return None modname = info['module'] fullname = info['fullname'] submod = sys.modules.get(modname) if submod is None...
For those classes for which we use :: :template: autosummary/class_without_autosummary.rst the documented attributes/methods have to be listed in the class docstring. However, if one of those lists is empty, we use 'None', which then generates warnings in sphinx / ugly html output. This "autodoc-p...
Pack object `o` and write it to `stream` See :class:`Packer` for options. def pack(o, stream, **kwargs): """ Pack object `o` and write it to `stream` See :class:`Packer` for options. """ packer = Packer(**kwargs) stream.write(packer.pack(o))
Construct concatenation plan for given block manager and indexers. Parameters ---------- mgr : BlockManager indexers : dict of {axis: indexer} Returns ------- plan : list of (BlockPlacement, JoinUnit) tuples def get_mgr_concatenation_plan(mgr, indexers): """ Construct concatenatio...
Concatenate values from several join units along selected axis. def concatenate_join_units(join_units, concat_axis, copy): """ Concatenate values from several join units along selected axis. """ if concat_axis == 0 and len(join_units) > 1: # Concatenating join units along ax0 is handled in _mer...
Return dtype and N/A values to use when concatenating specified units. Returned N/A value may be None which means there was no casting involved. Returns ------- dtype na def get_empty_dtype_and_na(join_units): """ Return dtype and N/A values to use when concatenating specified units. ...
Check if the join units consist of blocks of uniform type that can be concatenated using Block.concat_same_type instead of the generic concatenate_join_units (which uses `_concat._concat_compat`). def is_uniform_join_units(join_units): """ Check if the join units consist of blocks of uniform type that ...
Reduce join_unit's shape along item axis to length. Extra items that didn't fit are returned as a separate block. def trim_join_unit(join_unit, length): """ Reduce join_unit's shape along item axis to length. Extra items that didn't fit are returned as a separate block. """ if 0 not in join_...
Combine multiple concatenation plans into one. existing_plan is updated in-place. def combine_concat_plans(plans, concat_axis): """ Combine multiple concatenation plans into one. existing_plan is updated in-place. """ if len(plans) == 1: for p in plans[0]: yield p[0], [p[1...
Temporarily set a parameter value using the with statement. Aliasing allowed. def use(self, key, value): """ Temporarily set a parameter value using the with statement. Aliasing allowed. """ old_value = self[key] try: self[key] = value yie...
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series The Stata Internal Format date to convert to datetime according to fmt fmt : str The format to convert to. Can be, tc, td, tw, tm, tq, th, ty Returns Returns ------...
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series Series or array containing datetime.datetime or datetime64[ns] to convert to the Stata Internal Format given by fmt fmt : str The format to convert to. Can be, tc, td, tw, t...
Checks the dtypes of the columns of a pandas DataFrame for compatibility with the data types and ranges supported by Stata, and converts if necessary. Parameters ---------- data : DataFrame The DataFrame to check and convert Notes ----- Numeric columns in Stata must be one of i...
Convert dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 244 are strings of this length Pandas Stata 251 - for int8 byte 252 - for int16 int 253 - for ...
Map numpy dtype to stata's default format for this type. Not terribly important since users can change this in Stata. Semantics are object -> "%DDs" where DD is the length of the string. If not a string, raise ValueError float64 -> "%10.0g" float32 -> "%9.0g" int64 -> "%9.0g" ...
Takes a bytes instance and pads it with null bytes until it's length chars. def _pad_bytes_new(name, length): """ Takes a bytes instance and pads it with null bytes until it's length chars. """ if isinstance(name, str): name = bytes(name, 'utf-8') return name + b'\x00' * (length - len(name)...
Parameters ---------- byteorder : str Byte order of the output encoding : str File encoding Returns ------- value_label : bytes Bytes containing the formatted value label def generate_value_label(self, byteorder, encoding): ""...
Map between numpy and state dtypes def _setup_dtype(self): """Map between numpy and state dtypes""" if self._dtype is not None: return self._dtype dtype = [] # Convert struct data types to numpy data type for i, typ in enumerate(self.typlist): if typ in self.NU...
Converts categorical columns to Categorical type. def _do_convert_categoricals(self, data, value_label_dict, lbllist, order_categoricals): """ Converts categorical columns to Categorical type. """ value_labels = list(value_label_dict.keys()) cat_...
Helper to call encode before writing to file for Python 3 compat. def _write(self, to_write): """ Helper to call encode before writing to file for Python 3 compat. """ self._file.write(to_write.encode(self._encoding or self._default_encoding))
Check for categorical columns, retain categorical information for Stata file and convert categorical data to int def _prepare_categoricals(self, data): """Check for categorical columns, retain categorical information for Stata file and convert categorical data to int""" is_cat = [is_ca...
Checks floating point data columns for nans, and replaces these with the generic Stata for missing value (.) def _replace_nans(self, data): # return data """Checks floating point data columns for nans, and replaces these with the generic Stata for missing value (.)""" for c in d...
Checks column names to ensure that they are valid Stata column names. This includes checks for: * Non-string names * Stata keywords * Variables that start with numbers * Variables with names that are too long When an illegal variable name is detected, it ...
Close the file if it was created by the writer. If a buffer or file-like object was passed in, for example a GzipFile, then leave this file open for the caller to close. In either case, attempt to flush the file contents to ensure they are written to disk (if supported) def _close(self...
Generates the GSO lookup table for the DataFRame Returns ------- gso_table : OrderedDict Ordered dictionary using the string found as keys and their lookup position (v,o) as values gso_df : DataFrame DataFrame where strl columns have been converted to...
Generates the binary blob of GSOs that is written to the dta file. Parameters ---------- gso_table : OrderedDict Ordered dictionary (str, vo) Returns ------- gso : bytes Binary content of dta file to be placed between strl tags Notes ...
Surround val with <tag></tag> def _tag(val, tag): """Surround val with <tag></tag>""" if isinstance(val, str): val = bytes(val, 'utf-8') return (bytes('<' + tag + '>', 'utf-8') + val + bytes('</' + tag + '>', 'utf-8'))
Write the file header def _write_header(self, data_label=None, time_stamp=None): """Write the file header""" byteorder = self._byteorder self._file.write(bytes('<stata_dta>', 'utf-8')) bio = BytesIO() # ds_format - 117 bio.write(self._tag(bytes('117', 'utf-8'), 'release'...
Called twice during file write. The first populates the values in the map with 0s. The second call writes the final map locations when all blocks have been written. def _write_map(self): """Called twice during file write. The first populates the values in the map with 0s. The second c...
Update column names for conversion to strl if they might have been changed to comply with Stata naming rules def _update_strl_names(self): """Update column names for conversion to strl if they might have been changed to comply with Stata naming rules""" # Update convert_strl if names ch...
Convert columns to StrLs if either very large or in the convert_strl variable def _convert_strls(self, data): """Convert columns to StrLs if either very large or in the convert_strl variable""" convert_cols = [ col for i, col in enumerate(data) if self.typlist[i]...
Register Pandas Formatters and Converters with matplotlib This function modifies the global ``matplotlib.units.registry`` dictionary. Pandas adds custom converters for * pd.Timestamp * pd.Period * np.datetime64 * datetime.datetime * datetime.date * datetime.time See Also -----...
Remove pandas' formatters and converters Removes the custom converters added by :func:`register`. This attempts to set the state of the registry back to the state before pandas registered its own units. Converters for pandas' own types like Timestamp and Period are removed completely. Converters for ty...
Convert :mod:`datetime` to the Gregorian date as UTC float days, preserving hours, minutes, seconds and microseconds. Return value is a :func:`float`. def _dt_to_float_ordinal(dt): """ Convert :mod:`datetime` to the Gregorian date as UTC float days, preserving hours, minutes, seconds and microseco...
Returns a default spacing between consecutive ticks for annual data. def _get_default_annual_spacing(nyears): """ Returns a default spacing between consecutive ticks for annual data. """ if nyears < 11: (min_spacing, maj_spacing) = (1, 1) elif nyears < 20: (min_spacing, maj_spacing)...
Returns the indices where the given period changes. Parameters ---------- dates : PeriodIndex Array of intervals to monitor. period : string Name of the period to monitor. def period_break(dates, period): """ Returns the indices where the given period changes. Parameters ...
Returns true if the ``label_flags`` indicate there is at least one label for this level. if the minimum view limit is not an exact integer, then the first tick label won't be shown, so we must adjust for that. def has_level_label(label_flags, vmin): """ Returns true if the ``label_flags`` indicate...
Return the :class:`~matplotlib.units.AxisInfo` for *unit*. *unit* is a tzinfo instance or None. The *axis* argument is required but not used. def axisinfo(unit, axis): """ Return the :class:`~matplotlib.units.AxisInfo` for *unit*. *unit* is a tzinfo instance or None. T...
Pick the best locator based on a distance. def get_locator(self, dmin, dmax): 'Pick the best locator based on a distance.' _check_implicitly_registered() delta = relativedelta(dmax, dmin) num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days num_sec = (delta.hours ...
Set the view limits to include the data range. def autoscale(self): """ Set the view limits to include the data range. """ dmin, dmax = self.datalim_to_dt() if dmin > dmax: dmax, dmin = dmin, dmax # We need to cap at the endpoints of valid datetime ...
Returns the default locations of ticks. def _get_default_locs(self, vmin, vmax): "Returns the default locations of ticks." if self.plot_obj.date_axis_info is None: self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) locator = self.plot_obj.date_axis_info if ...
Sets the view limits to the nearest multiples of base that contain the data. def autoscale(self): """ Sets the view limits to the nearest multiples of base that contain the data. """ # requires matplotlib >= 0.98.0 (vmin, vmax) = self.axis.get_data_interval() ...
Returns the default ticks spacing. def _set_default_format(self, vmin, vmax): "Returns the default ticks spacing." if self.plot_obj.date_axis_info is None: self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) info = self.plot_obj.date_axis_info if self.isminor...
Sets the locations of the ticks def set_locs(self, locs): 'Sets the locations of the ticks' # don't actually use the locs. This is just needed to work with # matplotlib. Force to use vmin, vmax _check_implicitly_registered() self.locs = locs (vmin, vmax) = vi = tuple(s...
Sets index names to 'index' for regular, or 'level_x' for Multi def set_default_names(data): """Sets index names to 'index' for regular, or 'level_x' for Multi""" if com._all_not_none(*data.index.names): nms = data.index.names if len(nms) == 1 and data.index.name == 'index': warning...
Converts a JSON field descriptor into its corresponding NumPy / pandas type Parameters ---------- field A JSON field descriptor Returns ------- dtype Raises ----- ValueError If the type of the provided field is unknown or currently unsupported Examples ---...
Create a Table schema from ``data``. Parameters ---------- data : Series, DataFrame index : bool, default True Whether to include ``data.index`` in the schema. primary_key : bool or None, default True column names to designate as the primary key. The default `None` will set ...
Builds a DataFrame from a given schema Parameters ---------- json : A JSON table schema precise_float : boolean Flag controlling precision when decoding string to double values, as dictated by ``read_json`` Returns ------- df : DataFrame Raises ------ N...
Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string def get_op_result_name(left, right): """ Find ...
Try to find a name to attach to the result of an operation between a and b. If only one of these has a `name` attribute, return that name. Otherwise return a consensus name if they match of None if they have different names. Parameters ---------- a : object b : object Returns ---...
Cast non-pandas objects to pandas types to unify behavior of arithmetic and comparison operations. Parameters ---------- obj: object Returns ------- out : object Notes ----- Be careful to call this *after* determining the `name` attribute to be attached to the result of th...
Return a binary method that always raises a TypeError. Parameters ---------- name : str Returns ------- invalid_op : function def make_invalid_op(name): """ Return a binary method that always raises a TypeError. Parameters ---------- name : str Returns ------- ...
Find the keyword arguments to pass to numexpr for the given operation. Parameters ---------- name : str Returns ------- eval_kwargs : dict Examples -------- >>> _gen_eval_kwargs("__add__") {} >>> _gen_eval_kwargs("rtruediv") {'reversed': True, 'truediv': True} def _g...
Find the appropriate fill value to use when filling in undefined values in the results of the given operation caused by operating on (generally dividing by) zero. Parameters ---------- name : str Returns ------- fill_value : {None, np.nan, np.inf} def _gen_fill_zeros(name): """ ...
Find the operation string, if any, to pass to numexpr for this operation. Parameters ---------- op : binary operator cls : class Returns ------- op_str : string or None def _get_opstr(op, cls): """ Find the operation string, if any, to pass to numexpr for this operation. ...
Find the name to attach to this method according to conventions for special and non-special methods. Parameters ---------- op : binary operator special : bool Returns ------- op_name : str def _get_op_name(op, special): """ Find the name to attach to this method according to c...
Make the appropriate substitutions for the given operation and class-typ into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring to attach to a generated method. Parameters ---------- op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...} typ : str {series, 'dataframe...
If a non-None fill_value is given, replace null entries in left and right with this value, but only in positions where _one_ of left/right is null, not both. Parameters ---------- left : array-like right : array-like fill_value : object Returns ------- left : array-like rig...
Apply the function `op` to only non-null points in x and y. Parameters ---------- x : array-like y : array-like op : binary operation allowed_types : class or tuple of classes Returns ------- result : ndarray[bool] def mask_cmp_op(x, y, op, allowed_types): """ Apply the fu...
If the given arithmetic operation fails, attempt it again on only the non-null elements of the input array(s). Parameters ---------- x : np.ndarray y : np.ndarray, Series, Index op : binary operator def masked_arith_op(x, y, op): """ If the given arithmetic operation fails, attempt it ...
If a comparison has mismatched types and is not necessarily meaningful, follow python3 conventions by: - returning all-False for equality - returning all-True for inequality - raising TypeError otherwise Parameters ---------- left : array-like right : scalar, array-like ...
Identify cases where a DataFrame operation should dispatch to its Series counterpart. Parameters ---------- left : DataFrame right : DataFrame op : binary operator Returns ------- override : bool def should_series_dispatch(left, right, op): """ Identify cases where a DataF...
Evaluate the frame operation func(left, right) by evaluating column-by-column, dispatching to the Series implementation. Parameters ---------- left : DataFrame right : scalar or DataFrame func : arithmetic or comparison operator str_rep : str or None, default None axis : {None, 0, 1, "i...
Wrap Series left in the given index_class to delegate the operation op to the index implementation. DatetimeIndex and TimedeltaIndex perform type checking, timezone handling, overflow checks, etc. Parameters ---------- op : binary operator (operator.add, operator.sub, ...) left : Series ri...
Assume that left or right is a Series backed by an ExtensionArray, apply the operator defined by op. def dispatch_to_extension_op(op, left, right): """ Assume that left or right is a Series backed by an ExtensionArray, apply the operator defined by op. """ # The op calls will raise TypeError i...