text stringlengths 81 112k |
|---|
Build the metric list to collect based on the instance preferences.
def _build_metric_list_to_collect(self, additional_metrics):
"""
Build the metric list to collect based on the instance preferences.
"""
metrics_to_collect = {}
# Defaut metrics
for default_metrics in i... |
Return and cache the list of metrics to collect.
def _get_metrics_to_collect(self, instance_key, additional_metrics):
"""
Return and cache the list of metrics to collect.
"""
if instance_key not in self.metrics_to_collect_by_instance:
self.metrics_to_collect_by_instance[inst... |
Return the submit method and the metric name to use.
The metric name is defined as follow:
* If available, the normalized metric name alias
* (Or) the normalized original metric name
def _resolve_metric(self, original_metric_name, metrics_to_collect, prefix=""):
"""
Return the ... |
Replace case-sensitive metric name characters, normalize the metric name,
prefix and suffix according to its type.
def _normalize(self, metric_name, submit_method, prefix):
"""
Replace case-sensitive metric name characters, normalize the metric name,
prefix and suffix according to its t... |
Authenticate to the database.
Available mechanisms:
* Username & password
* X.509
More information:
https://api.mongodb.com/python/current/examples/authentication.html
def _authenticate(self, database, username, password, use_x509, server_name, service_check_tags):
"""... |
Parses a MongoDB-formatted URI (e.g. mongodb://user:pass@server/db) and returns parsed elements
and a sanitized URI.
def _parse_uri(cls, server, sanitize_username=False):
"""
Parses a MongoDB-formatted URI (e.g. mongodb://user:pass@server/db) and returns parsed elements
and a sanitized ... |
Collect indexes statistics for all collections in the configuration.
This use the "$indexStats" command.
def _collect_indexes_stats(self, instance, db, tags):
"""
Collect indexes statistics for all collections in the configuration.
This use the "$indexStats" command.
"""
... |
Returns a dictionary that looks a lot like what's sent back by
db.serverStatus()
def check(self, instance):
"""
Returns a dictionary that looks a lot like what's sent back by
db.serverStatus()
"""
def total_seconds(td):
"""
Returns total seconds ... |
Get the connection either with a username and password or without
def get_normal_connection(config):
"""
Get the connection either with a username and password or without
"""
if config.username and config.password:
log.debug("connecting with username and password")
queue_manager = pymqi... |
Get the connection with SSL
def get_ssl_connection(config):
"""
Get the connection with SSL
"""
cd = pymqi.CD()
cd.ChannelName = config.channel
cd.ConnectionName = config.host_and_port
cd.ChannelType = pymqi.CMQC.MQCHT_CLNTCONN
cd.TransportType = pymqi.CMQC.MQXPT_TCP
cd.SSLCipherSpe... |
Run an Agent check.
def check_run(check, env, rate, times, pause, delay, log_level, as_json, break_point):
"""Run an Agent check."""
envs = get_configured_envs(check)
if not envs:
echo_failure('No active environments found for `{}`.'.format(check))
echo_info('See what is available to start ... |
Parse user identity out of init_config
To guarantee a uniquely identifiable user, expects
{"user": {"name": "my_username", "password": "my_password",
"domain": {"id": "my_domain_id"}
}
}
def get_user_identity(cls, instance_config):
"""
Parse ... |
Parse authorization scope out of init_config
To guarantee a uniquely identifiable scope, expects either:
{'project': {'name': 'my_project', 'domain': {'id': 'my_domain_id'}}}
OR
{'project': {'id': 'my_project_id'}}
def get_auth_scope(cls, instance_config):
"""
Parse aut... |
Parse the service catalog returned by the Identity API for an endpoint matching the Neutron service
Sends a CRITICAL service check when none are found registered in the Catalog
def get_neutron_endpoint(cls, json_resp):
"""
Parse the service catalog returned by the Identity API for an endpoint m... |
Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog
def get_nova_endpoint(cls, json_resp, nova_api_version=None):
"""
Parse the servi... |
Generic request handler for OpenStack API requests
Raises specialized Exceptions for commonly encountered error codes
def _make_request_with_auth_fallback(self, url, headers=None, params=None):
"""
Generic request handler for OpenStack API requests
Raises specialized Exceptions for comm... |
Collect stats for all reachable networks
def get_network_stats(self, tags):
"""
Collect stats for all reachable networks
"""
# FIXME: (aaditya) Check all networks defaults to true
# until we can reliably assign agents to networks to monitor
if is_affirmative(self.init_c... |
Parse u' 16:53:48 up 1 day, 21:34, 3 users, load average: 0.04, 0.14, 0.19\n'
def _parse_uptime_string(self, uptime):
""" Parse u' 16:53:48 up 1 day, 21:34, 3 users, load average: 0.04, 0.14, 0.19\n' """
uptime = uptime.strip()
load_averages = uptime[uptime.find('load average:') :].split(':... |
Guarantees a valid auth scope for this instance, and returns it
Communicates with the identity server and initializes a new scope when one is absent, or has been forcibly
removed due to token expiry
def ensure_auth_scope(self, instance):
"""
Guarantees a valid auth scope for this insta... |
Returns the hypervisor running on this host, and assumes a 1-1 between host and hypervisor
def get_local_hypervisor(self):
"""
Returns the hypervisor running on this host, and assumes a 1-1 between host and hypervisor
"""
# Look up hypervisors available filtered by my hostname
h... |
Returns all projects in the domain
def get_all_projects(self, scope):
"""
Returns all projects in the domain
"""
url = "{0}/{1}/{2}".format(self.keystone_server_url, DEFAULT_KEYSTONE_API_VERSION, "projects")
headers = {'X-Auth-Token': scope.auth_token}
try:
r... |
Returns the project that this instance of the check is scoped to
def get_scoped_project(self, project_auth_scope):
"""
Returns the project that this instance of the check is scoped to
"""
filter_params = {}
url = "{0}/{1}/{2}".format(self.keystone_server_url, DEFAULT_KEYSTONE_A... |
Returns a best guess for the hostname registered with OpenStack for this host
def get_my_hostname(self, split_hostname_on_first_period=False):
"""
Returns a best guess for the hostname registered with OpenStack for this host
"""
hostname = self.init_config.get("os_host") or self.hostna... |
Returns a list of tags for every guest server that is detected by the OpenStack
integration.
List of pairs (hostname, list_of_tags)
def get_external_host_tags(self):
""" Returns a list of tags for every guest server that is detected by the OpenStack
integration.
List of pairs (h... |
Run the Docker check for one instance.
def check(self, instance):
"""Run the Docker check for one instance."""
if not self.init_success:
# Initialization can fail if cgroups are not ready or docker daemon is down. So we retry if needed
# https://github.com/DataDog/dd-agent/issue... |
List all the containers from the API, filter and count them.
def _get_and_count_containers(self, custom_cgroups=False, healthchecks=False):
"""List all the containers from the API, filter and count them."""
# Querying the size of containers is slow, we don't do it at each run
must_query_size =... |
Generate the tags for a given entity (container or image) according to a list of tag names.
def _get_tags(self, entity=None, tag_type=None):
"""Generate the tags for a given entity (container or image) according to a list of tag names."""
# Start with custom tags
tags = list(self.custom_tags)
... |
Extra tag information from the API result (containers or images).
Cache extracted tags inside the entity object.
def _extract_tag_value(self, entity, tag_name):
"""Extra tag information from the API result (containers or images).
Cache extracted tags inside the entity object.
"""
... |
Check if a container is excluded according to the filter rules.
Requires _filter_containers to run first.
def _is_container_excluded(self, container):
"""Check if a container is excluded according to the filter rules.
Requires _filter_containers to run first.
"""
container_nam... |
Send health service checks for containers.
def _send_container_healthcheck_sc(self, containers_by_id):
"""Send health service checks for containers."""
for container in containers_by_id.itervalues():
healthcheck_tags = self._get_tags(container, HEALTHCHECK)
match = False
... |
Report container count per state
def _report_container_count(self, containers_by_id):
"""Report container count per state"""
m_func = FUNC_MAP[GAUGE][self.use_histogram]
per_state_count = defaultdict(int)
filterlambda = lambda ctr: not self._is_container_excluded(ctr)
containe... |
Report volume count per state (dangling or not)
def _report_volume_count(self):
"""Report volume count per state (dangling or not)"""
m_func = FUNC_MAP[GAUGE][self.use_histogram]
attached_volumes = self.docker_util.client.volumes(filters={'dangling': False})
dangling_volumes = self.doc... |
Find container network metrics by looking at /proc/$PID/net/dev of the container process.
def _report_net_metrics(self, container, tags):
"""Find container network metrics by looking at /proc/$PID/net/dev of the container process."""
if self._disable_net_metrics:
self.log.debug("Network met... |
Get the list of events.
def _get_events(self):
"""Get the list of events."""
events, changed_container_ids = self.docker_util.get_events()
if not self._disable_net_metrics:
self._invalidate_network_mapping_cache(events)
if changed_container_ids and self._service_discovery:
... |
Create the actual event to submit from a list of similar docker events
def _create_dd_event(self, events, image, c_tags, priority='Normal'):
"""Create the actual event to submit from a list of similar docker events"""
if not events:
return
max_timestamp = 0
status = default... |
Report metrics about the volume space usage
def _report_disk_stats(self):
"""Report metrics about the volume space usage"""
stats = {
'docker.data.used': None,
'docker.data.total': None,
'docker.data.free': None,
'docker.metadata.used': None,
... |
Cast the disk stats to float and convert them to bytes
def _format_disk_metrics(self, metrics):
"""Cast the disk stats to float and convert them to bytes"""
for name, raw_val in metrics.iteritems():
if raw_val:
match = DISK_STATS_RE.search(raw_val)
if match i... |
Calculate a percentage of used disk space for data and metadata
def _calc_percent_disk_stats(self, stats):
"""Calculate a percentage of used disk space for data and metadata"""
mtypes = ['data', 'metadata']
percs = {}
for mtype in mtypes:
used = stats.get('docker.{0}.used'.f... |
Find a specific cgroup file, containing metrics to extract.
def _get_cgroup_from_proc(self, cgroup, pid, filename):
"""Find a specific cgroup file, containing metrics to extract."""
params = {
"file": filename,
}
return DockerUtil.find_cgroup_from_proc(self._mountpoints, pid... |
Parse a cgroup pseudo file for key/values.
def _parse_cgroup_file(self, stat_file):
"""Parse a cgroup pseudo file for key/values."""
self.log.debug("Opening cgroup file: %s" % stat_file)
try:
with open(stat_file, 'r') as fp:
if 'blkio' in stat_file:
... |
Parse the blkio metrics.
def _parse_blkio_metrics(self, stats):
"""Parse the blkio metrics."""
metrics = {
'io_read': 0,
'io_write': 0,
}
for line in stats:
if 'Read' in line:
metrics['io_read'] += int(line.split()[2])
if '... |
Crawl `/proc` to find container PIDs and add them to `containers_by_id`.
def _crawl_container_pids(self, container_dict, custom_cgroups=False):
"""Crawl `/proc` to find container PIDs and add them to `containers_by_id`."""
proc_path = os.path.join(self.docker_util._docker_root, 'proc')
pid_dirs... |
Start an environment.
def start(ctx, check, env, agent, dev, base, env_vars):
"""Start an environment."""
if not file_exists(get_tox_file(check)):
abort('`{}` is not a testable check.'.format(check))
base_package = None
if base:
core_dir = os.path.expanduser(ctx.obj.get('core', ''))
... |
Generic request handler for OpenStack API requests
Raises specialized Exceptions for commonly encountered error codes
def _make_request(self, url, headers, params=None):
"""
Generic request handler for OpenStack API requests
Raises specialized Exceptions for commonly encountered error c... |
Returns all projects in the domain
def get_projects(self):
"""
Returns all projects in the domain
"""
url = urljoin(self.keystone_endpoint, "{}/{}".format(DEFAULT_KEYSTONE_API_VERSION, "projects"))
try:
r = self._make_request(url, self.headers)
return r.g... |
Parse user identity out of init_config
To guarantee a uniquely identifiable user, expects
{"user": {"name": "my_username", "password": "my_password",
"domain": {"id": "my_domain_id"}
}
}
def _get_user_identity(user):
"""
Parse user identity o... |
Parse the service catalog returned by the Identity API for an endpoint matching the Neutron service
Sends a CRITICAL service check when none are found registered in the Catalog
def _get_neutron_endpoint(cls, json_resp):
"""
Parse the service catalog returned by the Identity API for an endpoint ... |
Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog
def _get_nova_endpoint(cls, json_resp):
"""
Parse the service catalog returned by... |
Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog
def _get_valid_endpoint(resp, name, entry_type):
"""
Parse the service catalog re... |
This command will:
* Verify the uniqueness of dependency versions across all checks.
* Verify all the dependencies are pinned.
* Verify the embedded Python environment defined in the base check and requirements
listed in every integration are compatible.
def dep():
"""
This command will:
... |
Show metric info from a Prometheus endpoint.
\b
Example:
$ ddev meta prom info :8080/_status/vars
def info(endpoint):
"""Show metric info from a Prometheus endpoint.
\b
Example:
$ ddev meta prom info :8080/_status/vars
"""
endpoint = sanitize_endpoint(endpoint)
metrics = pars... |
Interactively parse metric info from a Prometheus endpoint.
def parse(ctx, endpoint, check, here):
"""Interactively parse metric info from a Prometheus endpoint."""
if here:
output_dir = os.getcwd()
else:
output_dir = path_join(get_root(), check)
if not dir_exists(output_dir):
... |
Add this operation to the _Bulk instance `bulkobj`.
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_delete(self._filter, 0, collation=self._collation) |
Add this operation to the _Bulk instance `bulkobj`.
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_replace(self._filter, self._doc, self._upsert,
collation=self._collation) |
Add this operation to the _Bulk instance `bulkobj`.
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_update(self._filter, self._doc, False, self._upsert,
collation=self._collation) |
Load a private key and the corresponding certificate. The certfile
string must be the path to a single file in PEM format containing the
certificate as well as any number of CA certificates needed to
establish the certificate's authenticity. The keyfile string, if
present, must point to ... |
Wrap an existing Python socket sock and return an ssl.SSLSocket
object.
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True, dummy=None):
"""Wrap an existing Python socket sock and return an ssl.SSLSocket
... |
Parse the binary buffer in input, searching for Prometheus messages
of type MetricFamily [0] delimited by a varint32 [1].
[0] https://github.com/prometheus/client_model/blob/086fe7ca28bde6cec2acd5223423c1475a362858/metrics.proto#L76-%20%20L81 # noqa: E501
[1] https://developers.google.com/protocol-buffers... |
Convert duplicate keys values to lists.
def multidict(ordered_pairs):
"""Convert duplicate keys values to lists."""
# read all values into lists
d = defaultdict(list)
for k, v in ordered_pairs:
d[k].append(v)
# unpack lists that have only 1 item
dict_copy = deepcopy(d)
for k, v in i... |
Generator yielding one Package instance for every corresponing line in a
requirements file
def read_packages(reqs_file):
"""
Generator yielding one Package instance for every corresponing line in a
requirements file
"""
for line in stream_file_lines(reqs_file):
line = line.strip()
... |
Dump the packages in the catalog in a requirements file
def write_packages(self, reqs_file):
"""
Dump the packages in the catalog in a requirements file
"""
write_file_lines(reqs_file, ('{}\n'.format(package) for package in self.packages)) |
Add a Package to the catalog for the given check
def add_package(self, check_name, package):
"""
Add a Package to the catalog for the given check
"""
self._package_set.add(package)
package_data = self._packages[package.name]
self._checks_deps[check_name].append(package)
... |
Publish metrics for a subcontainer and handle filtering on tags
def _update_container_metrics(self, instance, subcontainer, kube_labels):
"""Publish metrics for a subcontainer and handle filtering on tags"""
tags = list(instance.get('tags', [])) # add support for custom tags
if len(subcontain... |
Reports the number of running pods on this node, tagged by service and creator
We go though all the pods, extract tags then count them by tag list, sorted and
serialized in a pipe-separated string (it is an illegar character for tags)
def _update_pods_metrics(self, instance, pods):
"""
... |
Process kube events and send ddog events
The namespace filtering is done here instead of KubeEventRetriever
to avoid interfering with service discovery
def _update_kube_events(self, instance, pods_list, event_items):
"""
Process kube events and send ddog events
The namespace fil... |
calls kubeutil.refresh_leader and compares the resulting
leader status with the previous one.
If it changed, update the event collection logic
def refresh_leader_status(self, instance):
"""
calls kubeutil.refresh_leader and compares the resulting
leader status with the previous ... |
Main metric fetching method
def _read_rrd(self, rrd_path, hostname, device_name, tags):
''' Main metric fetching method '''
metric_count = 0
try:
info = self._get_rrd_info(rrd_path)
except Exception:
# Unable to read RRD file, ignore it
self.log.exce... |
Fetch metadata about each RRD in this Cacti DB, returning a list of
tuples of (hostname, device_name, rrd_path)
def _fetch_rrd_meta(self, connection, rrd_path_root, whitelist, field_names, tags):
''' Fetch metadata about each RRD in this Cacti DB, returning a list of
tuples of (hostname... |
Format a cacti metric name into a Datadog-friendly name
def _format_metric_name(self, m_name, cfunc):
''' Format a cacti metric name into a Datadog-friendly name '''
try:
aggr = CFUNC_TO_AGGR[cfunc]
except KeyError:
aggr = cfunc.lower()
try:
m_name =... |
PERF_100NSEC_TIMER
https://technet.microsoft.com/en-us/library/cc728274(v=ws.10).aspx
def calculate_perf_100nsec_timer(previous, current, property_name):
"""
PERF_100NSEC_TIMER
https://technet.microsoft.com/en-us/library/cc728274(v=ws.10).aspx
"""
n0 = previous[property_name]
n1 = current... |
PERF_COUNTER_BULK_COUNT
https://technet.microsoft.com/en-us/library/cc757486(v=ws.10).aspx
def calculate_perf_counter_bulk_count(previous, current, property_name):
"""
PERF_COUNTER_BULK_COUNT
https://technet.microsoft.com/en-us/library/cc757486(v=ws.10).aspx
"""
n0 = previous[property_name]
... |
PERF_COUNTER_100NS_QUEUELEN_TYPE
Average length of a queue to a resource over time in 100 nanosecond units.
https://msdn.microsoft.com/en-us/library/aa392905(v=vs.85).aspx
Formula (n1 - n0) / (d1 - d0)
def calculate_perf_counter_100ns_queuelen_type(previous, current, property_name):
"""
PERF_COU... |
Return whether the hostname and port combination was already seen
def _server_known(cls, host, port):
"""
Return whether the hostname and port combination was already seen
"""
with PostgreSql._known_servers_lock:
return (host, port) in PostgreSql._known_servers |
Store the host/port combination for this server
def _set_server_known(cls, host, port):
"""
Store the host/port combination for this server
"""
with PostgreSql._known_servers_lock:
PostgreSql._known_servers.add((host, port)) |
Add NEWER_92_METRICS to the default set of COMMON_METRICS when server
version is 9.2 or later.
Store the list of metrics in the check instance to avoid rebuilding it at
every collection cycle.
In case we have multiple instances pointing to the same postgres server
monitoring di... |
Use either COMMON_BGW_METRICS or COMMON_BGW_METRICS + NEWER_92_BGW_METRICS
depending on the postgres version.
Uses a dictionnary to save the result for each instance
def _get_bgw_metrics(self, key, db):
"""Use either COMMON_BGW_METRICS or COMMON_BGW_METRICS + NEWER_92_BGW_METRICS
depend... |
Use COMMON_ARCHIVER_METRICS to read from pg_stat_archiver as
defined in 9.4 (first version to have this table).
Uses a dictionary to save the result for each instance
def _get_archiver_metrics(self, key, db):
"""Use COMMON_ARCHIVER_METRICS to read from pg_stat_archiver as
defined in 9.4... |
Use either REPLICATION_METRICS_10, REPLICATION_METRICS_9_1, or
REPLICATION_METRICS_9_1 + REPLICATION_METRICS_9_2, depending on the
postgres version.
Uses a dictionnary to save the result for each instance
def _get_replication_metrics(self, key, db):
""" Use either REPLICATION_METRICS_10... |
Use ACTIVITY_METRICS_LT_8_3 or ACTIVITY_METRICS_8_3 or ACTIVITY_METRICS_9_2
depending on the postgres version in conjunction with ACTIVITY_QUERY_10 or ACTIVITY_QUERY_LT_10.
Uses a dictionnary to save the result for each instance
def _get_activity_metrics(self, key, db):
""" Use ACTIVITY_METRICS... |
Builds a dictionary from relations configuration while maintaining compatibility
def _build_relations_config(self, yamlconfig):
"""Builds a dictionary from relations configuration while maintaining compatibility
"""
config = {}
for element in yamlconfig:
if isinstance(elemen... |
Query pg_stat_* for various metrics
If relations is not an empty list, gather per-relation metrics
on top of that.
If custom_metrics is not an empty list, gather custom metrics defined in postgres.yaml
def _collect_stats(
self,
key,
db,
instance_tags,
rel... |
Get and memoize connections to instances
def get_connection(self, key, host, port, user, password, dbname, ssl, connect_fct, tags, use_cached=True):
"""Get and memoize connections to instances"""
if key in self.dbs and use_cached:
return self.dbs[key]
elif host != "" and user != ""... |
Given a list of custom_queries, execute each query and parse the result for metrics
def _get_custom_queries(self, db, tags, custom_queries, programming_error):
"""
Given a list of custom_queries, execute each query and parse the result for metrics
"""
for custom_query in custom_queries:... |
The XML will have Stat Nodes and Nodes that contain the metrics themselves
This code recursively goes through each Stat Node to properly setup tags
where each Stat will have a different tag key depending on the context.
def process_stats(self, stats, prefix, metric_categories, nested_tags, tags, recurs... |
Convert duration 'dur' to microseconds.
def _to_micros(dur):
"""Convert duration 'dur' to microseconds."""
if hasattr(dur, 'total_seconds'):
return int(dur.total_seconds() * 10e5)
# Python 2.6
return dur.microseconds + (dur.seconds + dur.days * 24 * 3600) * 1000000 |
Validate event listeners
def _validate_event_listeners(option, listeners):
"""Validate event listeners"""
if not isinstance(listeners, Sequence):
raise TypeError("%s must be a list or tuple" % (option,))
for listener in listeners:
if not isinstance(listener, _EventListener):
rai... |
Register a global event listener.
:Parameters:
- `listener`: A subclasses of :class:`CommandListener`,
:class:`ServerHeartbeatListener`, :class:`ServerListener`, or
:class:`TopologyListener`.
def register(listener):
"""Register a global event listener.
:Parameters:
- `listener... |
List of registered event listeners.
def event_listeners(self):
"""List of registered event listeners."""
return (self.__command_listeners[:],
self.__server_heartbeat_listeners[:],
self.__server_listeners[:],
self.__topology_listeners[:]) |
Publish a CommandStartedEvent to all command listeners.
:Parameters:
- `command`: The command document.
- `database_name`: The name of the database this command was run
against.
- `request_id`: The request id for this operation.
- `connection_id`: The address... |
Publish a CommandSucceededEvent to all command listeners.
:Parameters:
- `duration`: The command duration as a datetime.timedelta.
- `reply`: The server reply document.
- `command_name`: The command name.
- `request_id`: The request id for this operation.
- `co... |
Publish a CommandFailedEvent to all command listeners.
:Parameters:
- `duration`: The command duration as a datetime.timedelta.
- `failure`: The server reply document or failure description
document.
- `command_name`: The command name.
- `request_id`: The req... |
Publish a ServerHeartbeatStartedEvent to all server heartbeat
listeners.
:Parameters:
- `connection_id`: The address (host/port pair) of the connection.
def publish_server_heartbeat_started(self, connection_id):
"""Publish a ServerHeartbeatStartedEvent to all server heartbeat
... |
Publish a ServerHeartbeatSucceededEvent to all server heartbeat
listeners.
:Parameters:
- `connection_id`: The address (host/port pair) of the connection.
- `duration`: The execution time of the event in the highest possible
resolution for the platform.
- `reply`:... |
Publish a ServerHeartbeatFailedEvent to all server heartbeat
listeners.
:Parameters:
- `connection_id`: The address (host/port pair) of the connection.
- `duration`: The execution time of the event in the highest possible
resolution for the platform.
- `reply`: Th... |
Publish a ServerOpeningEvent to all server listeners.
:Parameters:
- `server_address`: The address (host/port pair) of the server.
- `topology_id`: A unique identifier for the topology this server
is a part of.
def publish_server_opened(self, server_address, topology_id):
... |
Publish a ServerClosedEvent to all server listeners.
:Parameters:
- `server_address`: The address (host/port pair) of the server.
- `topology_id`: A unique identifier for the topology this server
is a part of.
def publish_server_closed(self, server_address, topology_id):
"... |
Publish a ServerDescriptionChangedEvent to all server listeners.
:Parameters:
- `previous_description`: The previous server description.
- `server_address`: The address (host/port pair) of the server.
- `new_description`: The new server description.
- `topology_id`: A unique... |
Publish a TopologyOpenedEvent to all topology listeners.
:Parameters:
- `topology_id`: A unique identifier for the topology this server
is a part of.
def publish_topology_opened(self, topology_id):
"""Publish a TopologyOpenedEvent to all topology listeners.
:Parameters:
... |
Publish a TopologyClosedEvent to all topology listeners.
:Parameters:
- `topology_id`: A unique identifier for the topology this server
is a part of.
def publish_topology_closed(self, topology_id):
"""Publish a TopologyClosedEvent to all topology listeners.
:Parameters:
... |
Publish a TopologyDescriptionChangedEvent to all topology listeners.
:Parameters:
- `previous_description`: The previous topology description.
- `new_description`: The new topology description.
- `topology_id`: A unique identifier for the topology this server
is a part of.... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.