text stringlengths 81 112k |
|---|
Performs the cross-validation step.
def cross_validate(self, ax):
'''
Performs the cross-validation step.
'''
# The CDPP to beat
cdpp_opt = self.get_cdpp_arr()
# Loop over all chunks
for b, brkpt in enumerate(self.breakpoints):
log.info("Cross-val... |
Computes the scatter in the validation set.
def validation_scatter(self, log_lam, b, masks, pre_v, gp, flux,
time, med):
'''
Computes the scatter in the validation set.
'''
# Update the lambda matrix
self.lam[b] = 10 ** log_lam
# Validation ... |
Utility function for populating lists with random data.
Useful for populating database with data for fuzzy testing.
Supported data-types
* *string*
For example::
populate('string',100, min_len=3, max_len=10)
create a 100 elements list with random strings
with random length between 3 and... |
NOTE: `pos_tol` is the positive (i.e., above the median)
outlier tolerance in standard deviations.
NOTE: `neg_tol` is the negative (i.e., below the median)
outlier tolerance in standard deviations.
def Search(star, pos_tol=2.5, neg_tol=50., **ps_kwargs):
'''
NOTE: `pos_tol` is the positive (i.e., a... |
Ordered iterator over dirty elements.
def iterdirty(self):
'''Ordered iterator over dirty elements.'''
return iter(chain(itervalues(self._new), itervalues(self._modified))) |
Add a new instance to this :class:`SessionModel`.
:param modified: Optional flag indicating if the ``instance`` has been
modified. By default its value is ``True``.
:param force_update: if ``instance`` is persistent, it forces an update of the
data rather than a full replacement. This is used by the
... |
delete an *instance*
def delete(self, instance, session):
'''delete an *instance*'''
if instance._meta.type == 'structure':
return self._delete_structure(instance)
inst = self.pop(instance)
instance = inst if inst is not None else instance
if instance is not No... |
Remove ``instance`` from the :class:`SessionModel`. Instance
could be a :class:`Model` or an id.
:parameter instance: a :class:`Model` or an ``id``.
:rtype: the :class:`Model` removed from session or ``None`` if
it was not in the session.
def pop(self, instance):
'''Remove ``instance`` from the :cla... |
Remove *instance* from the :class:`Session`. Instance could be a
:class:`Model` or an id.
:parameter instance: a :class:`Model` or an *id*
:rtype: the :class:`Model` removed from session or ``None`` if
it was not in the session.
def expunge(self, instance):
'''Remove *instance* from the :class:`Sess... |
\
Process results after a commit.
:parameter results: iterator over :class:`stdnet.instance_session_result`
items.
:rtype: a two elements tuple containing a list of instances saved and
a list of ids of instances deleted.
def post_commit(self, results):
'''\
Process results after a commit.
:... |
Close the transaction and commit session to the backend.
def commit(self, callback=None):
'''Close the transaction and commit session to the backend.'''
if self.executed:
raise InvalidTransaction('Invalid operation. '
'Transaction already executed.')... |
The set of instances in this :class:`Session` which have
been modified.
def dirty(self):
'''The set of instances in this :class:`Session` which have
been modified.'''
return frozenset(chain(*tuple((sm.dirty for sm
in itervalues(self._models))))) |
Begin a new :class:`Transaction`. If this :class:`Session`
is already in a :ref:`transactional state <transactional-state>`,
an error will occur. It returns the :attr:`transaction` attribute.
This method is mostly used within a ``with`` statement block::
with session.begin() as t:
t.add(...)
... |
Create a new :class:`Query` for *model*.
def query(self, model, **kwargs):
'''Create a new :class:`Query` for *model*.'''
sm = self.model(model)
query_class = sm.manager.query_class or Query
return query_class(sm._meta, self, **kwargs) |
Update or create a new instance of ``model``.
This method can raise an exception if the ``kwargs`` dictionary
contains field data that does not validate.
:param model: a :class:`StdModel`
:param kwargs: dictionary of parameters.
:returns: A two elements tuple containing ... |
Add an ``instance`` to the session.
If the session is not in a
:ref:`transactional state <transactional-state>`, this operation
commits changes to the back-end server immediately.
:parameter instance: a :class:`Model` instance. It must be registered
with the :attr:`r... |
Delete an ``instance`` or a ``query``.
Adds ``instance_or_query`` to this :class:`Session` list
of data to be deleted. If the session is not in a
:ref:`transactional state <transactional-state>`, this operation
commits changes to the backend server immediately.
:paramete... |
Returns the :class:`SessionModel` for ``model`` which
can be :class:`Model`, or a :class:`MetaClass`, or an instance
of :class:`Model`.
def model(self, model, create=True):
'''Returns the :class:`SessionModel` for ``model`` which
can be :class:`Model`, or a :class:`MetaClass`, or an instance
of :class:`Mo... |
Remove ``instance`` from this :class:`Session`. If ``instance``
is not given, it removes all instances from this :class:`Session`.
def expunge(self, instance=None):
'''Remove ``instance`` from this :class:`Session`. If ``instance``
is not given, it removes all instances from this :class:`Session`.'''
... |
Retrieve the :class:`Manager` for ``model`` which can be any of the
values valid for the :meth:`model` method.
def manager(self, model):
'''Retrieve the :class:`Manager` for ``model`` which can be any of the
values valid for the :meth:`model` method.'''
try:
return self.router[model]
... |
Create a new instance of :attr:`model` and commit it to the backend
server. This a shortcut method for the more verbose::
instance = manager.session().add(MyModel(**kwargs))
def new(self, *args, **kwargs):
'''Create a new instance of :attr:`model` and commit it to the backend
server. This a shortcut ... |
Returns a new :class:`Query` for :attr:`Manager.model`.
def query(self, session=None):
'''Returns a new :class:`Query` for :attr:`Manager.model`.'''
if session is None or session.router is not self.router:
session = self.session()
return session.query(self.model) |
Returns a new :class:`Query` for :attr:`Manager.model` with
a full text search value.
def search(self, text, lookup=None):
'''Returns a new :class:`Query` for :attr:`Manager.model` with
a full text search value.'''
return self.query().search(text, lookup=lookup) |
Create a dict given a list of key/value pairs
def pairs_to_dict(response, encoding):
"Create a dict given a list of key/value pairs"
it = iter(response)
return dict(((k.decode(encoding), v) for k, v in zip(it, it))) |
Parse data for related objects.
def load_related(self, meta, fname, data, fields, encoding):
'''Parse data for related objects.'''
field = meta.dfields[fname]
if field in meta.multifields:
fmeta = field.structure_class()._meta
if fmeta.name in ('hashtable', 'zset'):... |
Execute the query without fetching data. Returns the number of
elements in the query.
def _execute_query(self):
'''Execute the query without fetching data. Returns the number of
elements in the query.'''
pipe = self.pipe
if not self.card:
if self.meta.ordering:
... |
Perform ordering with respect model fields.
def order(self, last):
'''Perform ordering with respect model fields.'''
desc = last.desc
field = last.name
nested = last.nested
nested_args = []
while nested:
meta = nested.model._meta
nested_ar... |
Generator of load_related arguments
def related_lua_args(self):
'''Generator of load_related arguments'''
related = self.queryelem.select_related
if related:
meta = self.meta
for rel in related:
field = meta.dfields[rel]
relmodel = ... |
Remove and return a range from the ordered set by rank (index).
def ipop_range(self, start, stop=None, withscores=True, **options):
'''Remove and return a range from the ordered set by rank (index).'''
return self.backend.execute(
self.client.zpopbyrank(self.id, start, stop,
... |
Remove and return a range from the ordered set by score.
def pop_range(self, start, stop=None, withscores=True, **options):
'''Remove and return a range from the ordered set by score.'''
return self.backend.execute(
self.client.zpopbyscore(self.id, start, stop,
... |
Extract model metadata for lua script stdnet/lib/lua/odm.lua
def meta(self, meta):
'''Extract model metadata for lua script stdnet/lib/lua/odm.lua'''
data = meta.as_dict()
data['namespace'] = self.basekey(meta)
return data |
Execute a session in redis.
def execute_session(self, session_data):
'''Execute a session in redis.'''
pipe = self.client.pipeline()
for sm in session_data: # loop through model sessions
meta = sm.meta
if sm.structures:
self.flush_structure(sm, pip... |
Flush all model keys from the database
def flush(self, meta=None):
'''Flush all model keys from the database'''
pattern = self.basekey(meta) if meta else self.namespace
return self.client.delpattern('%s*' % pattern) |
Returns the covariance matrix for a given light curve
segment.
:param array_like kernel_params: A list of kernel parameters \
(white noise amplitude, red noise amplitude, and red noise timescale)
:param array_like time: The time array (*N*)
:param array_like errors: The data error array (*N*)... |
Optimizes the GP by training it on the current de-trended light curve.
Returns the white noise amplitude, red noise amplitude,
and red noise timescale.
:param array_like time: The time array
:param array_like flux: The flux array
:param array_like errors: The flux errors array
:param array_like... |
Returns the negative log-likelihood function and its gradient.
def NegLnLike(x, time, flux, errors, kernel):
'''
Returns the negative log-likelihood function and its gradient.
'''
gp = GP(kernel, x, white=True)
gp.compute(time, errors)
if OLDGEORGE:
nll = -gp.lnlikelihood(flux)
... |
Given a ``startdate`` and an ``enddate`` dates, evaluate the
date intervals from which data is not available. It return a list of
two-dimensional tuples containing start and end date for the interval.
The list could countain 0,1 or 2 tuples.
def missing_intervals(startdate, enddate, start, end,
... |
Generates dates between *atrt* and *end*.
def dategenerator(start, end, step=1, desc=False):
'''Generates dates between *atrt* and *end*.'''
delta = timedelta(abs(step))
end = max(start, end)
if desc:
dt = end
while dt >= start:
yield dt
dt -= delta
... |
A little routine to initialize the logging functionality.
:param str file_name: The name of the file to log to. \
Default :py:obj:`None` (set internally by :py:mod:`everest`)
:param int log_level: The file logging level (0-50). Default 10 (debug)
:param int screen_level: The screen logging level... |
A custom exception handler that logs errors to file.
def ExceptionHook(exctype, value, tb):
'''
A custom exception handler that logs errors to file.
'''
for line in traceback.format_exception_only(exctype, value):
log.error(line.replace('\n', ''))
for line in traceback.format_tb(tb):
... |
A custom exception handler, with :py:obj:`pdb` post-mortem for debugging.
def ExceptionHookPDB(exctype, value, tb):
'''
A custom exception handler, with :py:obj:`pdb` post-mortem for debugging.
'''
for line in traceback.format_exception_only(exctype, value):
log.error(line.replace('\n', ''))
... |
Sorts the list :py:obj:`l` by comparing :py:obj:`col2` to :py:obj:`col1`.
Specifically, finds the indices :py:obj:`i` such that ``col2[i] = col1``
and returns ``l[i]``. This is useful when comparing the CDPP values of
catalogs generated by different pipelines. The
target IDs are all the same, but won't ... |
Progress bar range with `tqdm`
def prange(*x):
'''
Progress bar range with `tqdm`
'''
try:
root = logging.getLogger()
if len(root.handlers):
for h in root.handlers:
if (type(h) is logging.StreamHandler) and \
(h.level != logging.CRIT... |
Return the front pair of the structure
def front(self, *fields):
'''Return the front pair of the structure'''
ts = self.irange(0, 0, fields=fields)
if ts:
return ts.start(), ts[0] |
Return the back pair of the structure
def back(self, *fields):
'''Return the back pair of the structure'''
ts = self.irange(-1, -1, fields=fields)
if ts:
return ts.end(), ts[0] |
Converts the "backend" into the database connection parameters.
It returns a (scheme, host, params) tuple.
def parse_backend(backend):
"""Converts the "backend" into the database connection parameters.
It returns a (scheme, host, params) tuple."""
r = urlparse.urlsplit(backend)
scheme, host = r.scheme... |
get a :class:`BackendDataServer`.
def getdb(backend=None, **kwargs):
'''get a :class:`BackendDataServer`.'''
if isinstance(backend, BackendDataServer):
return backend
backend = backend or settings.DEFAULT_BACKEND
if not backend:
return None
scheme, address, params = parse_bac... |
Calculate the key to access model data.
:parameter meta: a :class:`stdnet.odm.Metaclass`.
:parameter args: optional list of strings to prepend to the basekey.
:rtype: a native string
def basekey(self, meta, *args):
"""Calculate the key to access model data.
:parameter meta: a :class:`stdnet.odm.Metacl... |
Generator of :class:`stdnet.odm.StdModel` instances with data
from database.
:parameter meta: instance of model :class:`stdnet.odm.Metaclass`.
:parameter data: iterator over instances data.
def make_objects(self, meta, data, related_fields=None):
'''Generator of :class:`stdnet.odm.StdModel` instances wit... |
Create a backend :class:`stdnet.odm.Structure` handler.
:param instance: a :class:`stdnet.odm.Structure`
:param client: Optional client handler.
def structure(self, instance, client=None):
'''Create a backend :class:`stdnet.odm.Structure` handler.
:param instance: a :class:`stdn... |
Why is my target not in the EVEREST database?
def Search(ID, mission='k2'):
"""Why is my target not in the EVEREST database?"""
# Only K2 supported for now
assert mission == 'k2', "Only the K2 mission is supported for now."
print("Searching for target %d..." % ID)
# First check if it is in the dat... |
Download a given :py:mod:`everest` file from MAST.
:param str mission: The mission name. Default `k2`
:param str cadence: The light curve cadence. Default `lc`
:param str filename: The name of the file to download. Default \
:py:obj:`None`, in which case the default \
FITS file is ret... |
Show the data validation summary (DVS) for a given target.
:param str mission: The mission name. Default `k2`
:param str cadence: The light curve cadence. Default `lc`
:param bool clobber: If :py:obj:`True`, download and overwrite \
existing files. Default :py:obj:`False`
def DVS(ID, season=Non... |
Re-compute the :py:mod:`everest` model for the given
value of :py:obj:`lambda`.
For long cadence `k2` light curves, this should take several
seconds. For short cadence `k2` light curves, it may take a
few minutes. Note that this is a simple wrapper around
:py:func:`everest.Baseca... |
Computes the PLD flux normalization array.
..note :: `iPLD` model **only**.
def _get_norm(self):
'''
Computes the PLD flux normalization array.
..note :: `iPLD` model **only**.
'''
log.info('Computing the PLD normalization...')
# Loop over all chunks
... |
Load the FITS file from disk and populate the
class instance with its data.
def load_fits(self):
'''
Load the FITS file from disk and populate the
class instance with its data.
'''
log.info("Loading FITS file for %d." % (self.ID))
with pyfits.open(self.fitsfile... |
Plot sample postage stamps for the target with the aperture
outline marked, as well as a high-res target image (if available).
:param bool show: Show the plot or return the `(fig, ax)` instance? \
Default :py:obj:`True`
def plot_aperture(self, show=True):
'''
Plot sample... |
Plots the final de-trended light curve.
:param bool show: Show the plot or return the `(fig, ax)` instance? \
Default :py:obj:`True`
:param bool plot_raw: Show the raw light curve? Default :py:obj:`True`
:param bool plot_gp: Show the GP model prediction? \
Default ... |
Shows the data validation summary (DVS) for the target.
def dvs(self):
'''
Shows the data validation summary (DVS) for the target.
'''
DVS(self.ID, season=self.season, mission=self.mission,
model=self.model_name, clobber=self.clobber) |
Plots the light curve for the target de-trended with a given pipeline.
:param str pipeline: The name of the pipeline (lowercase). Options \
are 'everest2', 'everest1', and other mission-specific \
pipelines. For `K2`, the available pipelines are 'k2sff' \
and 'k2sc'... |
Returns the `time` and `flux` arrays for the target obtained by a given
pipeline.
Options :py:obj:`args` and :py:obj:`kwargs` are passed directly to
the :py:func:`pipelines.get` function of the mission.
def get_pipeline(self, *args, **kwargs):
'''
Returns the `time` and `flux` ... |
Mask all of the transits/eclipses of a given planet/EB. After calling
this method, you must re-compute the model by calling
:py:meth:`compute` in order for the mask to take effect.
:param float t0: The time of first transit (same units as light curve)
:param float period: The period of ... |
.. warning:: Untested!
def _plot_weights(self, show=True):
'''
.. warning:: Untested!
'''
# Set up the axes
fig = pl.figure(figsize=(12, 12))
fig.subplots_adjust(top=0.95, bottom=0.025, left=0.1, right=0.92)
fig.canvas.set_window_title(
'%s %d' % (s... |
Saves all of the de-trending information to disk in an `npz` file
def _save_npz(self):
'''
Saves all of the de-trending information to disk in an `npz` file
'''
# Save the data
d = dict(self.__dict__)
d.pop('_weights', None)
d.pop('_A', None)
d.pop('_B'... |
Runs :py:obj:`pPLD` on the target in an attempt to further optimize the
values of the PLD priors. See :py:class:`everest.detrender.pPLD`.
def optimize(self, piter=3, pmaxf=300, ppert=0.1):
'''
Runs :py:obj:`pPLD` on the target in an attempt to further optimize the
values of the PLD prio... |
Plot the light curve folded on a given `period` and centered at `t0`.
When plotting folded transits, please mask them using
:py:meth:`mask_planet` and re-compute the model using
:py:meth:`compute`.
:param float t0: The time at which to center the plot \
(same units as lig... |
Plot the light curve de-trended with a join instrumental + transit
model with the best fit transit model overlaid. The transit model
should be specified using the :py:obj:`transit_model` attribute
and should be an instance or list of instances of
:py:class:`everest.transit.TransitModel`.... |
Masks certain elements in the array `y` and linearly
interpolates over them, returning an array `y'` of the
same length.
:param array_like time: The time array
:param array_like mask: The indices to be interpolated over
:param array_like y: The dependent array
def Interpolate(time, mask, y):
'... |
Returns a generator of consecutive `n`-sized chunks of list `l`.
If `all` is `True`, returns **all** `n`-sized chunks in `l`
by iterating over the starting point.
def Chunks(l, n, all=False):
'''
Returns a generator of consecutive `n`-sized chunks of list `l`.
If `all` is `True`, returns **all** `n... |
Smooth data by convolving on a given timescale.
:param ndarray x: The data array
:param int window_len: The size of the smoothing window. Default `100`
:param str window: The window type. Default `hanning`
def Smooth(x, window_len=100, window='hanning'):
'''
Smooth data by convolving on a given ti... |
Return the scatter in ppm based on the median running standard deviation
for a window size of :py:obj:`win` = 13 cadences (for K2, this
is ~6.5 hours, as in VJ14).
:param ndarray y: The array whose CDPP is to be computed
:param int win: The window size in cadences. Default `13`
:param bool remove_o... |
Subtracts a second order Savitsky-Golay filter with window size `win`
and returns the result. This acts as a high pass filter.
def SavGol(y, win=49):
'''
Subtracts a second order Savitsky-Golay filter with window size `win`
and returns the result. This acts as a high pass filter.
'''
if len(y... |
Return the number of regressors for `npix` pixels
and PLD order `pld_order`.
:param bool cross_terms: Include pixel cross-terms? Default :py:obj:`True`
def NumRegressors(npix, pld_order, cross_terms=True):
'''
Return the number of regressors for `npix` pixels
and PLD order `pld_order`.
:param... |
Downbins an array to a smaller size.
:param array_like x: The array to down-bin
:param int newsize: The new size of the axis along which to down-bin
:param int axis: The axis to operate on. Default 0
:param str operation: The operation to perform when down-binning. \
Default `mean`
def Down... |
Called during the creation of a the :class:`StdModel`
class when :class:`Metaclass` is initialised. It fills
:attr:`Field.name` and :attr:`Field.model`. This is an internal
function users should never call.
def register_with_model(self, name, model):
'''Called during the creation of a the :class:`StdModel`
cla... |
Add this :class:`Field` to the fields of :attr:`model`.
def add_to_fields(self):
'''Add this :class:`Field` to the fields of :attr:`model`.'''
meta = self.model._meta
meta.scalarfields.append(self)
if self.index:
meta.indices.append(self) |
called by the :class:`Query` method when it needs to build
lookup on fields with additional nested fields. This is the case of
:class:`ForeignKey` and :class:`JSONField`.
:param remaining: the :ref:`double underscored` fields if this :class:`Field`
:param errorClass: Optional exception class to use if the *remaining* ... |
Retrieve the value :class:`Field` from a :class:`StdModel`
``instance``.
:param instance: The :class:`StdModel` ``instance`` invoking this function.
:param bits: Additional information for nested fields which derives from
the :ref:`double underscore <tutorial-underscore>` notation.
:return: the value of this :clas... |
Set the ``value`` for this :class:`Field` in a ``instance``
of a :class:`StdModel`.
def set_value(self, instance, value):
'''Set the ``value`` for this :class:`Field` in a ``instance``
of a :class:`StdModel`.'''
setattr(instance, self.attname, self.to_python(value)) |
lookup the value of the var_name on the stack of contexts
:var_name: TODO
:contexts: TODO
:returns: None if not found
def lookup(var_name, contexts=(), start=0):
"""lookup the value of the var_name on the stack of contexts
:var_name: TODO
:contexts: TODO
:returns: None if not found
"... |
convert delimiters to corresponding regular expressions
def delimiters_to_re(delimiters):
"""convert delimiters to corresponding regular expressions"""
# caching
delimiters = tuple(delimiters)
if delimiters in re_delimiters:
re_tag = re_delimiters[delimiters]
else:
open_tag, close_... |
check if the string text[start:end] is standalone by checking forwards
and backwards for blankspaces
:text: TODO
:(start, end): TODO
:returns: the start of next index after text[start:end]
def is_standalone(text, start, end):
"""check if the string text[start:end] is standalone by checking forwards... |
Compile a template into token tree
:template: TODO
:delimiters: TODO
:returns: the root token
def compiled(template, delimiters=DEFAULT_DELIMITERS):
"""Compile a template into token tree
:template: TODO
:delimiters: TODO
:returns: the root token
"""
re_tag = delimiters_to_re(deli... |
Escape text according to self.escape
def _escape(self, text):
"""Escape text according to self.escape"""
ret = EMPTYSTRING if text is None else str(text)
if self.escape:
return html_escape(ret)
else:
return ret |
lookup value for names like 'a.b.c' and handle filters as well
def _lookup(self, dot_name, contexts):
"""lookup value for names like 'a.b.c' and handle filters as well"""
# process filters
filters = [x for x in map(lambda x: x.strip(), dot_name.split('|'))]
dot_name = filters[0]
... |
Render the children tokens
def _render_children(self, contexts, partials):
"""Render the children tokens"""
ret = []
for child in self.children:
ret.append(child._render(contexts, partials))
return EMPTYSTRING.join(ret) |
render variable
def _render(self, contexts, partials):
"""render variable"""
value = self._lookup(self.value, contexts)
# lambda
if callable(value):
value = inner_render(str(value()), contexts, partials)
return self._escape(value) |
render section
def _render(self, contexts, partials):
"""render section"""
val = self._lookup(self.value, contexts)
if not val:
# false value
return EMPTYSTRING
# normally json has types: number/string/list/map
# but python has more, so we decide that ma... |
render inverted section
def _render(self, contexts, partials):
"""render inverted section"""
val = self._lookup(self.value, contexts)
if val:
return EMPTYSTRING
return self._render_children(contexts, partials) |
render partials
def _render(self, contexts, partials):
"""render partials"""
try:
partial = partials[self.value]
except KeyError as e:
return self._escape(EMPTYSTRING)
partial = re_insert_indent.sub(r'\1' + ' '*self.indent, partial)
return inner_render(... |
Called when the code is installed. Sets up directories and downloads
the K2 catalog.
def Setup():
'''
Called when the code is installed. Sets up directories and downloads
the K2 catalog.
'''
if not os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'cbv')):
os.makedirs(os.path.join(EVERES... |
Compute the proxy 6-hr CDPP metric.
:param array_like flux: The flux array to compute the CDPP for
:param array_like mask: The indices to be masked
:param str cadence: The light curve cadence. Default `lc`
def CDPP(flux, mask=[], cadence='lc'):
'''
Compute the proxy 6-hr CDPP metric.
:param a... |
Returns a :py:obj:`DataContainer` instance with the
raw data for the target.
:param int EPIC: The EPIC ID number
:param int season: The observing season (campaign). Default :py:obj:`None`
:param str cadence: The light curve cadence. Default `lc`
:param bool clobber: Overwrite existing files? Defaul... |
Return `neighbors` random bright stars on the same module as `EPIC`.
:param int EPIC: The EPIC ID number
:param str model: The :py:obj:`everest` model name. Only used when \
imposing CDPP bounds. Default :py:obj:`None`
:param int neighbors: Number of neighbors to return. Default 10
:param st... |
Computes and plots the CDPP statistics comparison between `model` and
`compare_to` for all known K2 planets.
:param str model: The :py:obj:`everest` model name
:param str compare_to: The :py:obj:`everest` model name or \
other K2 pipeline name
def PlanetStatistics(model='nPLD', compare_to='k2sf... |
Computes and plots the CDPP statistics comparison between short cadence
and long cadence de-trended light curves
:param campaign: The campaign number or list of campaign numbers. \
Default is to plot all campaigns
:param bool clobber: Overwrite existing files? Default :py:obj:`False`
:param ... |
Computes and plots the CDPP statistics comparison between `model`
and `compare_to` for all long cadence light curves in a given campaign
:param season: The campaign number or list of campaign numbers. \
Default is to plot all campaigns
:param bool clobber: Overwrite existing files? Default :py:o... |
Returns `True` if short cadence data is available for this target.
:param int EPIC: The EPIC ID number
:param int season: The campaign number. Default :py:obj:`None`
def HasShortCadence(EPIC, season=None):
'''
Returns `True` if short cadence data is available for this target.
:param int EPIC: The... |
Computes and plots the statistics for injection/recovery tests.
:param int campaign: The campaign number. Default 0
:param str model: The :py:obj:`everest` model name
:param bool plot: Default :py:obj:`True`
:param bool show: Show the plot? Default :py:obj:`True`. \
If :py:obj:`False`, retur... |
Generates HDU cards for inclusion in the de-trended light curve FITS file.
Used internally.
def HDUCards(headers, hdu=0):
'''
Generates HDU cards for inclusion in the de-trended light curve FITS file.
Used internally.
'''
if headers is None:
return []
if hdu == 0:
# Get i... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.