text stringlengths 81 112k |
|---|
Compute the variance along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of element... |
Compute the standard error in the mean along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the... |
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
--------
result : int
The index of max value in specified axis or -1 in the NA case
Examples
--------
>>> import p... |
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
--------
result : int
The index of min value in specified axis or -1 in the NA case
Examples
--------
>>> import p... |
Compute the sample skewness.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G1. The algorithm computes this coefficient directly
from the second and third central moment.
Parameters
----------
values : ndarray
axis: int, optional
skipna : boo... |
Compute the sample excess kurtosis
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
central moment.
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask... |
Parameters
----------
values : ndarray[dtype]
axis: int, optional
skipna : bool, default True
min_count: int, default 0
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : dtype
Examples
--------
>>> import pandas.core.nanops as nanops
... |
a, b: ndarrays
def nancorr(a, b, method='pearson', min_periods=None):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError('Operands to nancorr must have same size')
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
... |
Wraper for np.percentile that skips missing values, specialized to
1-dimensional case.
Parameters
----------
values : array over which to find quantiles
mask : ndarray[bool]
locations in values that should be considered missing
q : scalar or array of quantile indices to find
na_valu... |
Wraper for np.percentile that skips missing values.
Parameters
----------
values : array over which to find quantiles
q : scalar or array of quantile indices to find
axis : {0, 1}
na_value : scalar
value to return for empty or all-null values
mask : ndarray[bool]
locations i... |
Method for writting a formatted <th> cell.
If col_space is set on the formatter then that is used for
the value of min-width.
Parameters
----------
s : object
The data to be written inside the cell.
header : boolean, default False
Set to True if ... |
r"""
Read text from clipboard and pass to read_csv. See read_csv for the
full argument list
Parameters
----------
sep : str, default '\s+'
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
Returns
-------
parsed : DataFrame
de... |
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
... |
Get an iterator given an integer, slice or container.
Parameters
----------
skiprows : int, slice, container
The iterator to use to skip rows; can also be a slice.
Raises
------
TypeError
* If `skiprows` is not a slice, integer, or Container
Returns
-------
it : it... |
Try to read from a url, file or string.
Parameters
----------
obj : str, unicode, or file-like
Returns
-------
raw_text : str
def _read(obj):
"""Try to read from a url, file or string.
Parameters
----------
obj : str, unicode, or file-like
Returns
-------
raw_tex... |
Build an xpath expression to simulate bs4's ability to pass in kwargs to
search for attributes when using the lxml parser.
Parameters
----------
attrs : dict
A dict of HTML attributes. These are NOT checked for validity.
Returns
-------
expr : unicode
An XPath expression th... |
Choose the parser based on the input flavor.
Parameters
----------
flavor : str
The type of parser to use. This must be a valid backend.
Returns
-------
cls : _HtmlFrameParser subclass
The parser class based on the requested input flavor.
Raises
------
ValueError
... |
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str or file-like
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you... |
Parse and return all tables from the DOM.
Returns
-------
list of parsed (header, body, footer) tuples from tables.
def parse_tables(self):
"""
Parse and return all tables from the DOM.
Returns
-------
list of parsed (header, body, footer) tuples from t... |
Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
Returns
-------
tuple of (header, body, footer), each a list of list-of-text rows.
Notes
-----
Header and body are lists-of-lists. Top level list i... |
Given a list of <tr>s, return a list of text rows.
Parameters
----------
rows : list of node-like
List of <tr>s
Returns
-------
list of list
Each returned row is a list of str text.
Notes
-----
Any cell with ``rowspan`` o... |
Return list of tables, potentially removing hidden elements
Parameters
----------
tbl_list : list of node-like
Type of list elements will vary depending upon parser used
attr_name : str
Name of the accessor for retrieving HTML attributes
Returns
... |
Raises
------
ValueError
* If a URL that lxml cannot parse is passed.
Exception
* Any other ``Exception`` thrown. For example, trying to parse a
URL that is syntactically correct on a machine with no internet
connection will fail.
See... |
Parameters
----------
l : list of arrays
Returns
-------
a set of kinds that exist in this list of arrays
def get_dtype_kinds(l):
"""
Parameters
----------
l : list of arrays
Returns
-------
a set of kinds that exist in this list of arrays
"""
typs = set()
... |
return appropriate class of Series concat
input is either dict or array-like
def _get_series_result_type(result, objs=None):
"""
return appropriate class of Series concat
input is either dict or array-like
"""
from pandas import SparseSeries, SparseDataFrame, DataFrame
# concat Series with... |
return appropriate class of DataFrame-like concat
if all blocks are sparse, return SparseDataFrame
otherwise, return 1st obj
def _get_frame_result_type(result, objs):
"""
return appropriate class of DataFrame-like concat
if all blocks are sparse, return SparseDataFrame
otherwise, return 1st obj... |
provide concatenation of an array of arrays each of which is a single
'normalized' dtypes (in that for example, if it's object, then it is a
non-datetimelike and provide a combined dtype for the resulting array that
preserves the overall dtype if possible)
Parameters
----------
to_concat : arra... |
Concatenate an object/categorical array of arrays, each of which is a
single dtype
Parameters
----------
to_concat : array of arrays
axis : int
Axis to provide concatenation in the current implementation this is
always 0, e.g. we only have 1D categoricals
Returns
-------
... |
Combine list-like of Categorical-like, unioning categories. All
categories must have the same dtype.
.. versionadded:: 0.19.0
Parameters
----------
to_union : list-like of Categorical, CategoricalIndex,
or Series with dtype='category'
sort_categories : boolean, default False
... |
provide concatenation of an datetimelike array of arrays each of which is a
single M8[ns], datetimet64[ns, tz] or m8[ns] dtype
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
typs : set of to_concat dtypes
Returns
-------
a single array, prese... |
concat DatetimeIndex with the same tz
all inputs must be DatetimeIndex
it is used in DatetimeIndex.append also
def _concat_datetimetz(to_concat, name=None):
"""
concat DatetimeIndex with the same tz
all inputs must be DatetimeIndex
it is used in DatetimeIndex.append also
"""
# Right now... |
concat all inputs as object. DatetimeIndex, TimedeltaIndex and
PeriodIndex are converted to object dtype before concatenation
def _concat_index_asobject(to_concat, name=None):
"""
concat all inputs as object. DatetimeIndex, TimedeltaIndex and
PeriodIndex are converted to object dtype before concatenati... |
provide concatenation of an sparse/dense array of arrays each of which is a
single dtype
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
typs : set of to_concat dtypes
Returns
-------
a single array, preserving the combined dtypes
def _concat... |
Concatenates multiple RangeIndex instances. All members of "indexes" must
be of type RangeIndex; result will be RangeIndex if possible, Int64Index
otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
def _co... |
Rewrite the message of an exception.
def rewrite_exception(old_name, new_name):
"""Rewrite the message of an exception."""
try:
yield
except Exception as e:
msg = e.args[0]
msg = msg.replace(old_name, new_name)
args = (msg,)
if len(e.args) > 1:
args = arg... |
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each elem... |
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
"""
... |
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
... |
Render the built up styles to HTML.
Parameters
----------
**kwargs
Any additional keyword arguments are passed
through to ``self.template.render``.
This is useful when you need to provide
additional variables for a custom template.
..... |
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
def _update_ctx(se... |
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
def _compute(self):
"""
Execute the style functions built up i... |
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
... |
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
... |
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean... |
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
def hide_columns(self, subset):
... |
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
... |
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : {... |
Color background in a range according to the data.
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""
Color background in a range according to the data.
"""
if (not isinstance(text_color_threshold, (float, int)) or
... |
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
... |
Draw bar chart in dataframe cells.
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
... |
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : {0 or 'index', 1 or 'columns', None}, default 0
apply to each column (``axis=0`` or ``'index'``), to eac... |
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to.
color : str, default 'yellow'
axis : {0 or 'index', 1 or 'columns', None}, default 0
app... |
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to.
color : str, default 'yellow'
axis : {0 or 'index', 1 or 'columns', None}, default 0
app... |
Highlight the min or max in a Series or DataFrame.
def _highlight_extrema(data, color='yellow', max_=True):
"""
Highlight the min or max in a Series or DataFrame.
"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
... |
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for re... |
Parameters
----------
dtype : ExtensionDtype
def register(self, dtype):
"""
Parameters
----------
dtype : ExtensionDtype
"""
if not issubclass(dtype, (PandasExtensionDtype, ExtensionDtype)):
raise ValueError("can only register pandas extension... |
Parameters
----------
dtype : PandasExtensionDtype or string
Returns
-------
return the first matching dtype, otherwise return None
def find(self, dtype):
"""
Parameters
----------
dtype : PandasExtensionDtype or string
Returns
-... |
provide compat for construction of strings to numpy datetime64's with
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
def np_datetime64_compat(s, *args, **kwargs):
"""
provide compat for construction of strings to numpy datetime64'... |
provide compat for construction of an array of strings to a
np.array(..., dtype=np.datetime64(..))
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
def np_array_datetime64_compat(arr, *args, **kwargs):
"""
provide compat for con... |
Ensure incoming data can be represented as ints.
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as ints.
"""
if not issubclass(data.dtype.type, np.signedinteger):
if not np.array_equal(data, subarr):
raise TypeError('... |
we always want to get an index value, never a value
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not is_scalar(key):
raise InvalidIndexError
k = com.values_from_object(key)
loc = self.get_loc(k)
new_values = com.va... |
Determines if two Index objects contain the same elements.
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
if not isinstance(other, Index):
return False
# need to compare ... |
if we have bytes, decode them to unicode
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
if isinstance(s, np.bytes_):
s = s.decode('UTF-8')
return s |
ensure that the where is a Term or a list of Term
this makes sure that we are capturing the scope of variables
that are passed
create the terms here with a frame_level=2 (we are 2 levels down)
def _ensure_term(where, scope_level):
"""
ensure that the where is a Term or a list of Term
this makes... |
store this object, close it if we opened it
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,
append=None, **kwargs):
""" store this object, close it if we opened it """
if append:
f = lambda store: store.append(key, value, **kwargs)
else:
f = lambda s... |
Read from the store, close it if we opened it.
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
path_or_buf : string, buffer or path object
Path to the file to open, or an open :class:`pandas.HDFStore` object.
Supports any object imple... |
Check if a given group is a metadata group for a given parent_group.
def _is_metadata_of(group, parent_group):
"""Check if a given group is a metadata group for a given parent_group."""
if group._v_depth <= parent_group._v_depth:
return False
current = group
while current._v_depth > 1:
... |
get/create the info for this name
def _get_info(info, name):
""" get/create the info for this name """
try:
idx = info[name]
except KeyError:
idx = info[name] = dict()
return idx |
for a tz-aware type, return an encoded zone
def _get_tz(tz):
""" for a tz-aware type, return an encoded zone """
zone = timezones.get_timezone(tz)
if zone is None:
zone = tz.utcoffset().total_seconds()
return zone |
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray
tz : string/pickled tz object
preserve_UTC : boolean,
preserve the UTC of the result
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
... |
we take a string-like that is object dtype and coerce to a fixed size
string type
Parameters
----------
data : a numpy array of object dtype
encoding : None or string-encoding
errors : handler for encoding errors
itemsize : integer, optional, defaults to the max length of the strings
R... |
inverse of _convert_string_array
Parameters
----------
data : fixed length string dtyped array
nan_rep : the storage repr of NaN, optional
encoding : the encoding of the data, optional
errors : handler for encoding errors, default 'strict'
Returns
-------
an object array of the dec... |
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
def open(self, mode='a', **kwargs):
"""
Open the file in the specified mode
Parameters
... |
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
... |
Retrieve pandas object stored in file
Parameters
----------
key : object
Returns
-------
obj : same type as object stored in file
def get(self, key):
"""
Retrieve pandas object stored in file
Parameters
----------
key : object
... |
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
key : object
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to Non... |
return the selection as an Index
Parameters
----------
key : object
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
def select_as_c... |
return a single column from the table. This is generally only useful to
select an indexable
Parameters
----------
key : object
column: the column of interest
Exceptions
----------
raises KeyError if the column is not found (or key is not a valid
... |
Retrieve pandas objects from multiple tables
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row num... |
Store object in HDFStore
Parameters
----------
key : object
value : {Series, DataFrame}
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t)... |
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
st... |
Append to Table in file. Node must already exist and be Table
format.
Parameters
----------
key : object
value : {Series, DataFrame}
format : 'table' is the default
table(t) : table format
Write as a PyTables Table structure which may p... |
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of i... |
Create a pytables index on the table
Parameters
----------
key : object (the node to index)
Exceptions
----------
raises if the node is not a table
def create_table_index(self, key, **kwargs):
""" Create a pytables index on the table
Parameters
-... |
return a list of all the top-level nodes (that are not themselves a
pandas storage object)
def groups(self):
"""return a list of all the top-level nodes (that are not themselves a
pandas storage object)
"""
_tables()
self._check_if_open()
return [
g f... |
Walk the pytables group hierarchy for pandas objects
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
... |
return the node with the key or None if it does not exist
def get_node(self, key):
""" return the node with the key or None if it does not exist """
self._check_if_open()
try:
if not key.startswith('/'):
key = '/' + key
return self._handle.get_node(self.r... |
return the storer object for a key, raise if not in the file
def get_storer(self, key):
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
if group is None:
raise KeyError('No object named {key} in the file'.format(key=key))
s = ... |
copy the existing store to a new file, upgrading in place
Parameters
----------
propindexes: restore indexes in copied file (defaults to True)
keys : list of keys to include in the copy (defaults to all)
overwrite : overwrite (remove and replace) exist... |
Print detailed information on the store.
.. versionadded:: 0.21.0
def info(self):
"""
Print detailed information on the store.
.. versionadded:: 0.21.0
"""
output = '{type}\nFile path: {path}\n'.format(
type=type(self), path=pprint_thing(self._path))
... |
validate / deprecate formats; return the new kwargs
def _validate_format(self, format, kwargs):
""" validate / deprecate formats; return the new kwargs """
kwargs = kwargs.copy()
# validate
try:
kwargs['format'] = _FORMAT_MAP[format.lower()]
except KeyError:
... |
return a suitable class to operate
def _create_storer(self, group, format=None, value=None, append=False,
**kwargs):
""" return a suitable class to operate """
def error(t):
raise TypeError(
"cannot properly create the storer for: [{t}] [group->"
... |
set the name of this indexer
def set_name(self, name, kind_attr=None):
""" set the name of this indexer """
self.name = name
self.kind_attr = kind_attr or "{name}_kind".format(name=name)
if self.cname is None:
self.cname = name
return self |
set the position of this column in the Table
def set_pos(self, pos):
""" set the position of this column in the Table """
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
return self |
return whether I am an indexed column
def is_indexed(self):
""" return whether I am an indexed column """
try:
return getattr(self.table.cols, self.cname).is_indexed
except AttributeError:
False |
infer this column from the table: create and return a new object
def infer(self, handler):
"""infer this column from the table: create and return a new object"""
table = handler.table
new_self = self.copy()
new_self.set_table(table)
new_self.get_attr()
new_self.read_meta... |
set the values from this selection: take = take ownership
def convert(self, values, nan_rep, encoding, errors):
""" set the values from this selection: take = take ownership """
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
values =... |
maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
with an integer size
def maybe_set_size(self, min_itemsize=None):
""" maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
... |
validate this column: return the compared against itemsize
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == 'string':
c ... |
set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed
def update_info(self, info):
""" set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed """
for key in self._info_fields:
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.