text stringlengths 81 112k |
|---|
Fetch highwater offsets for each topic/partition from Kafka cluster.
Do this for all partitions in the cluster because even if it has no
consumers, we may want to measure whether producers are successfully
producing. No need to limit this for performance because fetching broker
offsets ... |
Fetch child nodes for a given Zookeeper path.
def _get_zk_path_children(self, zk_conn, zk_path, name_for_error):
"""Fetch child nodes for a given Zookeeper path."""
children = []
try:
children = zk_conn.get_children(zk_path)
except NoNodeError:
self.log.info('No ... |
Fetch Consumer Group offsets from Zookeeper.
Also fetch consumer_groups, topics, and partitions if not
already specified in consumer_groups.
:param dict consumer_groups: The consumer groups, topics, and partitions
that you want to fetch offsets for. If consumer_groups is None, will... |
retrieve consumer offsets via the new consumer api. Offsets in this version are stored directly
in kafka (__consumer_offsets topic) rather than in zookeeper
def _get_kafka_consumer_offsets(self, instance, consumer_groups):
"""
retrieve consumer offsets via the new consumer api. Offsets in this ... |
Validate any explicitly specified consumer groups.
While the check does not require specifying consumer groups,
if they are specified this method should be used to validate them.
val = {'consumer_group': {'topic': [0, 1]}}
def _validate_explicit_consumer_groups(cls, val):
"""Validate ... |
Emit an event to the Datadog Event Stream.
def _send_event(self, title, text, tags, event_type, aggregation_key, severity='info'):
"""Emit an event to the Datadog Event Stream."""
event_dict = {
'timestamp': int(time()),
'source_type_name': self.SOURCE_TYPE_NAME,
'ms... |
Print packages that've been added, removed or changed
def display_package_changes(pre_packages, post_packages, indent=''):
"""
Print packages that've been added, removed or changed
"""
# use package name to determine what's changed
pre_package_names = {p.name: p for p in pre_packages}
post_pack... |
Resolve transient dependencies for any number of checks.
If you want to do this en masse, put `all`.
def resolve(checks, lazy, quiet):
"""Resolve transient dependencies for any number of checks.
If you want to do this en masse, put `all`.
"""
root = get_root()
if 'all' in checks:
checks... |
Pin a dependency for all checks that require it. This can
also resolve transient dependencies.
Setting the version to `none` will remove the package. You can
specify an unlimited number of additional checks to apply the
pin for via arguments.
def pin(package, version, checks, marker, resolving, lazy, ... |
Combine all dependencies for the Agent's static environment.
def freeze():
"""Combine all dependencies for the Agent's static environment."""
echo_waiting('Verifying collected packages...')
catalog, errors = make_catalog()
if errors:
for error in errors:
echo_failure(error)
... |
Restart an Agent to detect environment changes.
def reload_env(check, env):
"""Restart an Agent to detect environment changes."""
envs = get_configured_envs(check)
if not envs:
echo_failure('No active environments found for `{}`.'.format(check))
echo_info('See what is available to start via... |
Set up the gitlab instance so it can be used in OpenMetricsBaseCheck
def _create_gitlab_prometheus_instance(self, instance, init_config):
"""
Set up the gitlab instance so it can be used in OpenMetricsBaseCheck
"""
# Mapping from Prometheus metrics names to Datadog ones
# For no... |
Submit a metric as a gauge, additional tags provided will be added to
the ones from the label provided via the metrics object.
`custom_tags` is an array of 'tag:value' that will be added to the
metric when sending the gauge to Datadog.
def _submit_gauge(self, metric_name, val, metric, custom_t... |
Validate tag sets for a MongoReplicaSetClient.
def _validate_tag_sets(tag_sets):
"""Validate tag sets for a MongoReplicaSetClient.
"""
if tag_sets is None:
return tag_sets
if not isinstance(tag_sets, list):
raise TypeError((
"Tag sets %r invalid, must be a list") % (tag_set... |
Validate max_staleness.
def _validate_max_staleness(max_staleness):
"""Validate max_staleness."""
if max_staleness == -1:
return -1
if not isinstance(max_staleness, integer_types):
raise TypeError(_invalid_max_staleness_msg(max_staleness))
if max_staleness <= 0:
raise ValueErr... |
Read preference as a document.
def document(self):
"""Read preference as a document.
"""
doc = {'mode': self.__mongos_mode}
if self.__tag_sets not in (None, [{}]):
doc['tags'] = self.__tag_sets
if self.__max_staleness != -1:
doc['maxStalenessSeconds'] = s... |
Validate default configuration files.
def config(check):
"""Validate default configuration files."""
if check:
checks = [check]
else:
checks = sorted(get_valid_checks())
files_failed = {}
files_warned = {}
num_files = 0
echo_waiting('Validating default configuration files.... |
Get stats from the queue manager
def queue_manager_stats(self, queue_manager, tags):
"""
Get stats from the queue manager
"""
for mname, pymqi_value in iteritems(metrics.queue_manager_metrics()):
try:
m = queue_manager.inquire(pymqi_value)
mn... |
Grab stats from queues
def queue_stats(self, queue, tags):
"""
Grab stats from queues
"""
for mname, pymqi_value in iteritems(metrics.queue_metrics()):
try:
mname = '{}.queue.{}'.format(self.METRIC_PREFIX, mname)
m = queue.inquire(pymqi_value)... |
Get the version string for the given check.
def get_version_string(check_name):
"""
Get the version string for the given check.
"""
version = VERSION.search(read_version_file(check_name))
if version:
return version.group(1) |
Load the manifest file into a dictionary
def load_manifest(check_name):
"""
Load the manifest file into a dictionary
"""
manifest_path = os.path.join(get_root(), check_name, 'manifest.json')
if file_exists(manifest_path):
return json.loads(read_file(manifest_path))
return {} |
Returns a dictionary mapping {check-package-name --> pinned_version} from the
given file contents. We can assume lines are in the form:
datadog-active-directory==1.1.1; sys_platform == 'win32'
def parse_agent_req_file(contents):
"""
Returns a dictionary mapping {check-package-name --> pinned_versi... |
Query status and get a dictionary back.
Extract each field out of the dictionary
and stuff it in the corresponding metric.
query: show status...
field_metric_map: {"Seconds_behind_master": "mysqlSecondsBehindMaster"}
def _collect_dict(self, metric_type, field_metric_map, query, db, tag... |
Get the `pid_file` variable
def _get_pid_file_variable(self, db):
"""
Get the `pid_file` variable
"""
pid_file = None
try:
with closing(db.cursor()) as cursor:
cursor.execute("SHOW VARIABLES LIKE 'pid_file'")
pid_file = cursor.fetchone... |
Retrieve the slaves' statuses using:
1. The `performance_schema.threads` table. Non-blocking, requires version > 5.6.0
2. The `information_schema.processlist` table. Blocking
def _get_slave_status(self, db, above_560, nonblocking):
"""
Retrieve the slaves' statuses using:
1. The... |
Return a dictionary for each job counter
{
counter_group_name: [
counter_name
]
}
}
def _parse_general_counters(self, init_config):
"""
Return a dictionary for each job counter
{
counter_group_name: [
counter_... |
Return a dictionary for each job counter
{
job_name: {
counter_group_name: [
counter_name
]
}
}
}
def _parse_job_specific_counters(self, init_config):
"""
Return a dictionary for each job counter
{
... |
Return a dictionary of {app_id: (app_name, tracking_url)} for the running MapReduce applications
def _get_running_app_ids(self, rm_address, auth, ssl_verify):
"""
Return a dictionary of {app_id: (app_name, tracking_url)} for the running MapReduce applications
"""
metrics_json = self._re... |
Get metrics for each MapReduce job.
Return a dictionary for each MapReduce job
{
job_id: {
'job_name': job_name,
'app_name': app_name,
'user_name': user_name,
'tracking_url': tracking_url
}
def _mapreduce_job_metrics(self, running_apps, ... |
Get custom metrics specified for each counter
def _mapreduce_job_counters_metrics(self, running_jobs, auth, ssl_verify, addl_tags):
"""
Get custom metrics specified for each counter
"""
for job_metrics in itervalues(running_jobs):
job_name = job_metrics['job_name']
... |
Get metrics for each MapReduce task
Return a dictionary of {task_id: 'tracking_url'} for each MapReduce task
def _mapreduce_task_metrics(self, running_jobs, auth, ssl_verify, addl_tags):
"""
Get metrics for each MapReduce task
Return a dictionary of {task_id: 'tracking_url'} for each Ma... |
Parse the JSON response and set the metrics
def _set_metrics_from_json(self, metrics_json, metrics, tags):
"""
Parse the JSON response and set the metrics
"""
for status, (metric_name, metric_type) in iteritems(metrics):
metric_status = metrics_json.get(status)
... |
Set a metric
def _set_metric(self, metric_name, metric_type, value, tags=None, device_name=None):
"""
Set a metric
"""
if metric_type == self.HISTOGRAM:
self.histogram(metric_name, value, tags=tags, device_name=device_name)
elif metric_type == self.INCREMENT:
... |
Query the given URL and return the JSON response
def _rest_request_to_json(self, address, auth, ssl_verify, object_path, service_name, tags=None, *args, **kwargs):
"""
Query the given URL and return the JSON response
"""
response_json = None
tags = [] if tags is None else tags
... |
Reads `init_config` for `psutil` methods to call on the current process
Calls those methods and stores the raw output
:returns a dictionary of statistic_name: value
def _psutil_config_to_stats(self, instance):
"""
Reads `init_config` for `psutil` methods to call on the current process
... |
Saves sample metrics from psutil
:param stats: a dictionary that looks like:
{
'memory_info': OrderedDict([('rss', 24395776), ('vms', 144666624)]),
'io_counters': OrderedDict([('read_count', 4536),
('write_count', 100),
... |
Get a Docker container's IP address from its id or name.
def get_container_ip(container_id_or_name):
"""Get a Docker container's IP address from its id or name."""
command = [
'docker',
'inspect',
'-f',
'{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}',
containe... |
This utility provides a convenient way to safely set up and tear down Docker environments.
:param compose_file: A path to a Docker compose file. A custom tear
down is not required when using this.
:type compose_file: ``str``
:param build: Whether or not to build images for when ``c... |
Get the SON document representation of this DBRef.
Generally not needed by application developers
def as_doc(self):
"""Get the SON document representation of this DBRef.
Generally not needed by application developers
"""
doc = SON([("$ref", self.collection),
... |
Reset the config object to its initial state
def clear(self):
"""
Reset the config object to its initial state
"""
with self._lock:
self._config = {
CacheConfig.Morlist: {'last': defaultdict(float), 'intl': {}},
CacheConfig.Metadata: {'last': ... |
Notice: this will return the defaultdict default value also for keys
that are not in the configuration, this is a tradeoff to keep the code simple.
def get_last(self, type_, key):
"""
Notice: this will return the defaultdict default value also for keys
that are not in the configuration,... |
Get a list of possible paths containing certificates
Check is installed via pip to:
* Windows: embedded/lib/site-packages/datadog_checks/http_check
* Linux: embedded/lib/python2.7/site-packages/datadog_checks/http_check
Certificate is installed to:
* embedded/ssl/certs/cacert.pem
walk up t... |
Validate logo files. Specifying no check will validate all logos
def logos(check):
"""Validate logo files. Specifying no check will validate all logos"""
valid_checks = get_valid_integrations()
if check:
if check in valid_checks:
checks = [check]
else:
echo_info('{... |
The text option (-1) is not reliable enough when counters get large.
VBE.media_video_prd_services_01(10.93.67.16,,8080).happy18446744073709551615
2 types of data, "a" for counter ("c" in newer versions of varnish), "i" for gauge ("g")
https://github.com/varnish/Varnish-Cache/blob/master/include... |
Parse out service checks from varnishadm.
Example output:
Backend b0 is Sick
Current states good: 2 threshold: 3 window: 5
Average responsetime of good probes: 0.000000
Oldest Newest
============... |
Parse the MetricFamily from a valid requests.Response object to provide a MetricFamily object (see [0])
The text format uses iter_lines() generator.
The protobuf format directly parse the response.content property searching for Prometheus messages of type
MetricFamily [0] delimited by a varint... |
Filters out the text input line by line to avoid parsing and processing
metrics we know we don't want to process. This only works on `text/plain`
payloads, and is an INTERNAL FEATURE implemented for the kubelet check
:param input_get: line generator
:output: generator of filtered lines
... |
:param messages: dictionary as metric_name: {labels: {}, value: 10}
:param _metric: dictionary as {labels: {le: '0.001', 'custom': 'value'}}
:param _m: str as metric name
:param metric_suffix: str must be in (count or sum)
:return: value of the metric_name matched by the labels
def get_... |
Extracts MetricFamily objects from the maps generated by parsing the
strings in _extract_metrics_from_string
def _extract_metric_from_map(self, _m, messages, obj_map, obj_help):
"""
Extracts MetricFamily objects from the maps generated by parsing the
strings in _extract_metrics_from_str... |
Poll the data from prometheus and return the metrics as a generator.
def scrape_metrics(self, endpoint):
"""
Poll the data from prometheus and return the metrics as a generator.
"""
response = self.poll(endpoint)
try:
# no dry run if no label joins
if not... |
Polls the data from prometheus and pushes them as gauges
`endpoint` is the metrics endpoint to use to poll metrics from Prometheus
Note that if the instance has a 'tags' attribute, it will be pushed
automatically as additional custom tags and added to the metrics
def process(self, endpoint, **... |
Handle a prometheus metric message according to the following flow:
- search self.metrics_mapper for a prometheus.metric <--> datadog.metric mapping
- call check method with the same name as the metric
- log some info if none of the above worked
`send_histograms_buckets` is ... |
Polls the metrics from the prometheus metrics endpoint provided.
Defaults to the protobuf format, but can use the formats specified by
the PrometheusFormat class.
Custom headers can be added to the default headers.
Returns a valid requests.Response, raise requests.HTTPError if the statu... |
For each metric in the message, report it as a gauge with all labels as tags
except if a labels dict is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metri... |
If hostname is None, look at label_to_hostname setting
def _get_hostname(self, hostname, metric):
"""
If hostname is None, look at label_to_hostname setting
"""
if hostname is None and self.label_to_hostname is not None:
for label in metric.label:
if label.na... |
Extracts metrics from a prometheus summary metric and sends them as gauges
def _submit_gauges_from_summary(self, name, metric, custom_tags=None, hostname=None):
"""
Extracts metrics from a prometheus summary metric and sends them as gauges
"""
if custom_tags is None:
custom_... |
Extracts metrics from a prometheus histogram and sends them as gauges
def _submit_gauges_from_histogram(
self, name, metric, send_histograms_buckets=True, custom_tags=None, hostname=None
):
"""
Extracts metrics from a prometheus histogram and sends them as gauges
"""
if cust... |
See if a personal access token was passed
def get_auth_info(config=None):
"""
See if a personal access token was passed
"""
gh_config = (config or {}).get('github', {})
user = gh_config.get('user') or os.getenv('DD_GITHUB_USER')
token = gh_config.get('token') or os.getenv('DD_GITHUB_TOKEN')
... |
Fetch the labels from the PR and process the ones related to the changelog.
def get_changelog_types(pr_payload):
"""
Fetch the labels from the PR and process the ones related to the changelog.
"""
changelog_labels = []
for name in get_pr_labels(pr_payload):
if name.startswith(CHANGELOG_LABE... |
Get the payload for the given PR number. Let exceptions bubble up.
def get_pr(pr_num, config=None, repo=DEFAULT_REPO, raw=False):
"""
Get the payload for the given PR number. Let exceptions bubble up.
"""
response = requests.get(PR_ENDPOINT.format(repo, pr_num), auth=get_auth_info(config))
if raw:... |
Parse PR numbers from commit messages. At GitHub those have the format:
`here is the message (#1234)`
being `1234` the PR number.
def parse_pr_numbers(git_log_lines):
"""
Parse PR numbers from commit messages. At GitHub those have the format:
`here is the message (#1234)`
being `123... |
Start. Multiple calls have no effect.
Not safe to call from multiple threads at once.
def open(self):
"""Start. Multiple calls have no effect.
Not safe to call from multiple threads at once.
"""
with self._lock:
if self._thread_will_exit:
# If the b... |
For more info, see: https://tox.readthedocs.io/en/latest/plugins.html
For an example, see: https://github.com/tox-dev/tox-travis
def tox_configure(config):
"""
For more info, see: https://tox.readthedocs.io/en/latest/plugins.html
For an example, see: https://github.com/tox-dev/tox-travis
"""
se... |
Determine if collect_connection_state is set and can effectively run.
If self._collect_cx_state is True and a custom proc_location is provided, the system cannot
run `ss` or `netstat` over a custom proc_location
:param proc_location: str
:return: bool
def _is_collect_cx_state_runnable(... |
_check_linux can be run inside a container and still collects the network metrics from the host
For that procfs_path can be set to something like "/host/proc"
When a custom procfs_path is set, the collect_connection_state option is ignored
def _check_linux(self, instance):
"""
_check_li... |
Parse the output of conntrack -S
Add the parsed metrics
def _add_conntrack_stats_metrics(self, conntrack_path, tags):
"""
Parse the output of conntrack -S
Add the parsed metrics
"""
try:
output, _, _ = get_subprocess_output(["sudo", conntrack_path, "-S"], sel... |
Parse the output of the command that retrieves the connection state (either `ss` or `netstat`)
Returns a dict metric_name -> value
def _parse_linux_cx_state(self, lines, tcp_states, state_col, protocol=None, ip_version=None):
"""
Parse the output of the command that retrieves the connection sta... |
Return a mapping of network metrics by interface. For example:
{ interface:
{'bytes_sent': 0,
'bytes_rcvd': 0,
'bytes_rcvd': 0,
...
}
}
def _parse_solaris_netstat(self, netstat_output):
"""
Ret... |
Gather metrics about connections states and interfaces counters
using psutil facilities
def _check_psutil(self, instance):
"""
Gather metrics about connections states and interfaces counters
using psutil facilities
"""
custom_tags = instance.get('tags', [])
if se... |
Collect metrics about connections state using psutil
def _cx_state_psutil(self, tags=None):
"""
Collect metrics about connections state using psutil
"""
metrics = defaultdict(int)
tags = [] if tags is None else tags
for conn in psutil.net_connections():
proto... |
Collect metrics about interfaces counters using psutil
def _cx_counters_psutil(self, tags=None):
"""
Collect metrics about interfaces counters using psutil
"""
tags = [] if tags is None else tags
for iface, counters in iteritems(psutil.net_io_counters(pernic=True)):
... |
Returns a string describing the protocol for the given connection
in the form `tcp4`, 'udp4` as in `self.cx_state_gauge`
def _parse_protocol_psutil(self, conn):
"""
Returns a string describing the protocol for the given connection
in the form `tcp4`, 'udp4` as in `self.cx_state_gauge`
... |
Process the work unit, or wait for sentinel to exit
def run(self):
"""Process the work unit, or wait for sentinel to exit"""
while True:
self.running = True
workunit = self._workq.get()
if is_sentinel(workunit):
# Got sentinel
break
... |
Equivalent of the apply() builtin function. It blocks till
the result is ready.
def apply(self, func, args=(), kwds=dict()):
"""Equivalent of the apply() builtin function. It blocks till
the result is ready."""
return self.apply_async(func, args, kwds).get() |
A parallel equivalent of the map() builtin function. It
blocks till the result is ready.
This method chops the iterable into a number of chunks which
it submits to the process pool as separate tasks. The
(approximate) size of these chunks can be specified by setting
chunksize to... |
An equivalent of itertools.imap().
The chunksize argument is the same as the one used by the
map() method. For very long iterables using a large value for
chunksize can make make the job complete much faster than
using the default value of 1.
Also if chunksize is 1 then the nex... |
The same as imap() except that the ordering of the results
from the returned iterator should be considered
arbitrary. (Only when there is only one worker process is the
order guaranteed to be "correct".)
def imap_unordered(self, func, iterable, chunksize=1):
"""The same as imap() except... |
A variant of the apply() method which returns an
ApplyResult object.
If callback is specified then it should be a callable which
accepts a single argument. When the result becomes ready,
callback is applied to it (unless the call failed). callback
should complete immediately sin... |
A variant of the map() method which returns a ApplyResult
object.
If callback is specified then it should be a callable which
accepts a single argument. When the result becomes ready
callback is applied to it (unless the call failed). callback
should complete immediately since o... |
A variant of the imap() method which returns an ApplyResult
object that provides an iterator (next method(timeout)
available).
If callback is specified then it should be a callable which
accepts a single argument. When the resulting iterator becomes
ready, callback is applied to... |
A variant of the imap_unordered() method which returns an
ApplyResult object that provides an iterator (next
method(timeout) available).
If callback is specified then it should be a callable which
accepts a single argument. When the resulting iterator becomes
ready, callback is ... |
Stops the worker processes immediately without completing
outstanding work. When the pool object is garbage collected
terminate() will be called immediately.
def terminate(self):
"""Stops the worker processes immediately without completing
outstanding work. When the pool object is garba... |
Create the WorkUnit objects to process and pushes them on the
work queue. Each work unit is meant to process a slice of
iterable of size chunksize. If collector is specified, then
the ApplyResult objects associated with the jobs will notify
collector when their result becomes ready.
... |
Call the function with the args/kwds and tell the ApplyResult
that its result is ready. Correctly handles the exceptions
happening during the execution of the function
def process(self):
"""
Call the function with the args/kwds and tell the ApplyResult
that its result is ready. ... |
Returns the result when it arrives. If timeout is not None and
the result does not arrive within timeout seconds then
TimeoutError is raised. If the remote call raised an exception
then that exception will be reraised by get().
def get(self, timeout=None):
"""
Returns the result... |
Waits until the result is available or until timeout
seconds pass.
def wait(self, timeout=None):
"""Waits until the result is available or until timeout
seconds pass."""
self._event.wait(timeout)
return self._event.isSet() |
Called by a Job object to tell the result is ready, and
provides the value of this result. The object will become
ready and successful. The collector's notify_ready() method
will be called, and the callback method too
def _set_value(self, value):
"""Called by a Job object to tell the re... |
Called by a Job object to tell that an exception occured
during the processing of the function. The object will become
ready but not successful. The collector's notify_ready()
method will be called, but NOT the callback method
def _set_exception(self):
"""Called by a Job object to tell ... |
Return the next result value in the sequence. Raise
StopIteration at the end. Can raise the exception raised by
the Job
def next(self, timeout=None):
"""Return the next result value in the sequence. Raise
StopIteration at the end. Can raise the exception raised by
the Job"""
... |
Called by the CollectorIterator object to retrieve the
result's values one after another, in the order the results have
become available.
\param idx The index of the result we want, wrt collector's order
\param timeout integer telling how long to wait (in seconds)
for the result ... |
Called by the ApplyResult object (already registered via
register_result()) that it is now ready (ie. the Job's result
is available or an exception has been raised).
\param apply_result ApplyResult object telling us that the job
has been processed
def notify_ready(self, apply_result):
... |
Called by the CollectorIterator object to retrieve the
result's values one after another (order defined by the
implementation)
\param idx The index of the result we want, wrt collector's order
\param timeout integer telling how long to wait (in seconds)
for the result at index id... |
Called by the ApplyResult object (already registered via
register_result()) that it is now ready (ie. the Job's result
is available or an exception has been raised).
\param apply_result ApplyResult object telling us that the job
has been processed
def notify_ready(self, apply_result):
... |
Execute a command over the socket, or raise socket.error.
:Parameters:
- `sock`: a raw socket instance
- `dbname`: name of the database on which to run the command
- `spec`: a command document as a dict, SON, or mapping object
- `slave_ok`: whether to set the SlaveOkay wire protocol bit
... |
Receive a raw BSON message or raise socket.error.
def receive_message(
sock, operation, request_id, max_message_size=MAX_MESSAGE_SIZE):
"""Receive a raw BSON message or raise socket.error."""
header = _receive_data_on_socket(sock, 16)
length = _UNPACK_INT(header[:4])[0]
actual_op = _UNPACK_INT... |
See: https://github.com/python/cpython/blob/40ee9a3640d702bce127e9877c82a99ce817f0d1/Lib/socket.py#L691
def create_connection(self):
"""See: https://github.com/python/cpython/blob/40ee9a3640d702bce127e9877c82a99ce817f0d1/Lib/socket.py#L691"""
err = None
try:
for res in socket.getadd... |
Helper function that wraps :func:`json.dumps`.
Recursive function that handles all BSON types including
:class:`~bson.binary.Binary` and :class:`~bson.code.Code`.
:Parameters:
- `json_options`: A :class:`JSONOptions` instance used to modify the
encoding of MongoDB Extended JSON types. Defaul... |
Helper function that wraps :func:`json.loads`.
Automatically passes the object_hook for BSON type conversion.
Raises ``TypeError``, ``ValueError``, ``KeyError``, or
:exc:`~bson.errors.InvalidId` on invalid MongoDB Extended JSON.
:Parameters:
- `json_options`: A :class:`JSONOptions` instance use... |
Recursive helper method that converts BSON types so they can be
converted into json.
def _json_convert(obj, json_options=DEFAULT_JSON_OPTIONS):
"""Recursive helper method that converts BSON types so they can be
converted into json.
"""
if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 s... |
Decode a JSON datetime to python datetime.datetime.
def _parse_canonical_datetime(doc, json_options):
"""Decode a JSON datetime to python datetime.datetime."""
dtm = doc["$date"]
if len(doc) != 1:
raise TypeError('Bad $date, extra field(s): %s' % (doc,))
# mongoexport 2.6 and newer
if isins... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.