text stringlengths 81 112k |
|---|
Reads and parses postmaster.pid from the data directory
:returns dictionary of values if successful, empty dictionary otherwise
def _read_postmaster_pidfile(data_dir):
"""Reads and parses postmaster.pid from the data directory
:returns dictionary of values if successful, empty dictionary othe... |
Signal postmaster process to stop
:returns None if signaled, True if process is already gone, False if error
def signal_stop(self, mode):
"""Signal postmaster process to stop
:returns None if signaled, True if process is already gone, False if error
"""
if self.is_single_user:... |
>>> repr_size(1000)
'1000 Bytes'
>>> repr_size(8257332324597)
'7.5 TiB'
def repr_size(n_bytes):
"""
>>> repr_size(1000)
'1000 Bytes'
>>> repr_size(8257332324597)
'7.5 TiB'
"""
if n_bytes < 1024:
return '{0} Bytes'.format(n_bytes)
i = -1
while n_bytes > 1023:
... |
>>> size_as_bytes(7.5, 'T')
8246337208320
def size_as_bytes(size_, prefix):
"""
>>> size_as_bytes(7.5, 'T')
8246337208320
"""
prefix = prefix.upper()
assert prefix in si_prefixes
exponent = si_prefixes.index(prefix) + 1
return int(size_ * (1024.0 ** exponent)) |
Creates a new replica using WAL-E
Returns
-------
ExitCode
0 = Success
1 = Error, try again
2 = Error, don't try again
def run(self):
"""
Creates a new replica using WAL-E
Returns
-------
ExitCode
0 = Succ... |
determine whether it makes sense to use S3 and not pg_basebackup
def should_use_s3_to_create_replica(self):
""" determine whether it makes sense to use S3 and not pg_basebackup """
threshold_megabytes = self.wal_e.threshold_mb
threshold_percent = self.wal_e.threshold_pct
try:
... |
>>> len(list(watching(True, 1, 0)))
1
>>> len(list(watching(True, 1, 1)))
2
>>> len(list(watching(True, None, 0)))
1
def watching(w, watch, max_count=None, clear=True):
"""
>>> len(list(watching(True, 1, 0)))
1
>>> len(list(watching(True, 1, 1)))
2
>>> len(list(watching(True... |
We want to trigger a failover or switchover for the specified cluster name.
We verify that the cluster name, master name and candidate name are correct.
If so, we trigger an action and keep the client up to date.
def _do_failover_or_switchover(obj, action, cluster_name, master, candidate, force, sched... |
Rip-off of the ha.touch_member without inter-class dependencies
def touch_member(config, dcs):
''' Rip-off of the ha.touch_member without inter-class dependencies '''
p = Postgresql(config['postgresql'])
p.set_state('running')
p.set_role('master')
def restapi_connection_string(config):
pro... |
fill-in some basic configuration parameters if config file is not set
def set_defaults(config, cluster_name):
"""fill-in some basic configuration parameters if config file is not set """
config['postgresql'].setdefault('name', cluster_name)
config['postgresql'].setdefault('scope', cluster_name)
config[... |
Creates a temporary file with specified contents that persists for the context.
:param contents: binary string that will be written to the file.
:param prefix: will be prefixed to the filename.
:param suffix: will be appended to the filename.
:returns path of the created file.
def temporary_file(conte... |
Shows a diff between two strings.
If the output is to a tty the diff will be colored. Inputs are expected to be unicode strings.
def show_diff(before_editing, after_editing):
"""Shows a diff between two strings.
If the output is to a tty the diff will be colored. Inputs are expected to be unicode strings... |
Formats configuration as YAML for human consumption.
:param data: configuration as nested dictionaries
:returns unicode YAML of the configuration
def format_config_for_editing(data):
"""Formats configuration as YAML for human consumption.
:param data: configuration as nested dictionaries
:returns... |
Applies config changes specified as a list of key-value pairs.
Keys are interpreted as dotted paths into the configuration data structure. Except for paths beginning with
`postgresql.parameters` where rest of the path is used directly to allow for PostgreSQL GUCs containing dots.
Values are interpreted as ... |
Applies changes from a YAML file to configuration
:param data: configuration datastructure
:param filename: name of the YAML file, - is taken to mean standard input
:returns tuple of human readable and parsed datastructure after changes
def apply_yaml_file(data, filename):
"""Applies changes from a YA... |
Starts editor command to edit configuration in human readable format
:param before_editing: human representation before editing
:returns tuple of human readable and parsed datastructure after changes
def invoke_editor(before_editing, cluster_name):
"""Starts editor command to edit configuration in human r... |
set tags, carrying the cluster name, instance role and instance id for the EBS storage
def _tag_ebs(self, conn, role):
""" set tags, carrying the cluster name, instance role and instance id for the EBS storage """
tags = {'Name': 'spilo_' + self.cluster_name, 'Role': role, 'Instance': self.instance_id}... |
tag the current EC2 instance with a cluster role
def _tag_ec2(self, conn, role):
""" tag the current EC2 instance with a cluster role """
tags = {'Role': role}
conn.create_tags([self.instance_id], tags) |
Returns reason why this node can't promote or None if everything is ok.
def failover_limitation(self):
"""Returns reason why this node can't promote or None if everything is ok."""
if not self.reachable:
return 'not reachable'
if self.tags.get('nofailover', False):
retur... |
Return configuration tags merged with dynamically applied tags.
def get_effective_tags(self):
"""Return configuration tags merged with dynamically applied tags."""
tags = self.patroni.tags.copy()
# _disable_sync could be modified concurrently, but we don't care as attribute get and set are atom... |
If we found 'standby' key in the configuration, we need to bootstrap
not a real master, but a 'standby leader', that will take base backup
from a remote master and start follow it.
def bootstrap_standby_leader(self):
""" If we found 'standby' key in the configuration, we need to bootstr... |
Process synchronous standby beahvior.
Synchronous standbys are registered in two places postgresql.conf and DCS. The order of updating them must
be right. The invariant that should be kept is that if a node is master and sync_standby is set in DCS,
then that node must have synchronous_standby s... |
Runs specified action while trying to make sure that the node is not assigned synchronous standby status.
Tags us as not allowed to be a sync standby as we are going to go away, if we currently are wait for
leader to notice and pick an alternative one or if the leader changes or goes away we are also f... |
This function perform http get request on member.api_url and fetches its status
:returns: `_MemberStatus` object
def fetch_node_status(member):
"""This function perform http get request on member.api_url and fetches its status
:returns: `_MemberStatus` object
"""
try:
... |
Returns if instance with an wal should consider itself unhealthy to be promoted due to replication lag.
:param wal_position: Current wal position.
:returns True when node is lagging
def is_lagging(self, wal_position):
"""Returns if instance with an wal should consider itself unhealthy to be pr... |
This method tries to determine whether I am healthy enough to became a new leader candidate or not.
def _is_healthiest_node(self, members, check_replication_lag=True):
"""This method tries to determine whether I am healthy enough to became a new leader candidate or not."""
_, my_wal_position = self.st... |
Demote PostgreSQL running as master.
:param mode: One of offline, graceful or immediate.
offline is used when connection to DCS is not available.
graceful is used when failing over to another node due to user request. May only be called running async.
immediate is used when ... |
Checks if manual failover is requested and takes action if appropriate.
Cleans up failover key if failover conditions are not matched.
:returns: action message if demote was initiated, None if no action was taken
def process_manual_failover_from_leader(self):
"""Checks if manual failover is r... |
Cluster has no leader key
def process_unhealthy_cluster(self):
"""Cluster has no leader key"""
if self.is_healthiest_node():
if self.acquire_lock():
failover = self.cluster.failover
if failover:
if self.is_paused() and failover.leader and... |
conditional and unconditional restart
def restart(self, restart_data, run_async=False):
""" conditional and unconditional restart """
assert isinstance(restart_data, dict)
if (not self.restart_matches(restart_data.get('role'),
restart_data.get('postgres_ver... |
Starting up PostgreSQL may take a long time. In case we are the leader we may want to
fail over to.
def handle_starting_instance(self):
"""Starting up PostgreSQL may take a long time. In case we are the leader we may want to
fail over to."""
# Check if we are in startup, when paused de... |
In case of standby cluster this will tel us from which remote
master to stream. Config can be both patroni config or
cluster.config.data
def get_remote_member(self, member=None):
""" In case of standby cluster this will tel us from which remote
master to stream. Config can b... |
Original `machines` method(property) of `etcd.Client` class raise exception
when it failed to get list of etcd cluster members. This method is being called
only when request failed on one of the etcd members during `api_execute` call.
For us it's more important to execute original request rather... |
Fetch list of etcd-cluster member by resolving _etcd-server._tcp. SRV record.
This record should contain list of host and peer ports which could be used to run
'GET http://{host}:{port}/members' request (peer protocol)
def _get_machines_cache_from_srv(self, srv):
"""Fetch list of etcd-cluster m... |
One host might be resolved into multiple ip addresses. We will make list out of it
def _get_machines_cache_from_dns(self, host, port):
"""One host might be resolved into multiple ip addresses. We will make list out of it"""
if self.protocol == 'http':
ret = []
for af, _, _, _, s... |
This method should fill up `_machines_cache` from scratch.
It could happen only in two cases:
1. During class initialization
2. When all etcd members failed
def _load_machines_cache(self):
"""This method should fill up `_machines_cache` from scratch.
It could happen only in two ... |
Builds and executes pg_ctl command
:returns: `!True` when return_code == 0, otherwise `!False`
def pg_ctl(self, cmd, *args, **kwargs):
"""Builds and executes pg_ctl command
:returns: `!True` when return_code == 0, otherwise `!False`"""
pg_ctl = [self._pgcommand('pg_ctl'), cmd]
... |
Runs pg_isready to see if PostgreSQL is accepting connections.
:returns: 'ok' if PostgreSQL is up, 'reject' if starting up, 'no_resopnse' if not up.
def pg_isready(self):
"""Runs pg_isready to see if PostgreSQL is accepting connections.
:returns: 'ok' if PostgreSQL is up, 'reject' if starting... |
check if pg_rewind executable is there and that pg_controldata indicates
we have either wal_log_hints or checksums turned on
def can_rewind(self):
""" check if pg_rewind executable is there and that pg_controldata indicates
we have either wal_log_hints or checksums turned on
"""... |
We are always using the same cursor, therefore this method is not thread-safe!!!
You can call it from different threads only if you are holding explicit `AsyncExecutor` lock,
because the main thread is always holding this lock when running HA cycle.
def _query(self, sql, *params):
"""We are alw... |
runs a script after initdb or custom bootstrap script is called and waits until completion.
def run_bootstrap_post_init(self, config):
"""
runs a script after initdb or custom bootstrap script is called and waits until completion.
"""
cmd = config.get('post_bootstrap') or config.get('po... |
go through the replication methods to see if there are ones
that does not require a working replication connection.
def can_create_replica_without_replication_connection(self):
""" go through the replication methods to see if there are ones
that does not require a working replication co... |
create the replica according to the replica_method
defined by the user. this is a list, so we need to
loop through all methods the user supplies
def create_replica(self, clone_member):
"""
create the replica according to the replica_method
defined by the user. ... |
Returns PostmasterProcess if one is running on the data directory or None. If most recently seen process
is running updates the cached process based on pid file.
def is_running(self):
"""Returns PostmasterProcess if one is running on the data directory or None. If most recently seen process
is ... |
pick a callback command and call it without waiting for it to finish
def call_nowait(self, cb_name):
""" pick a callback command and call it without waiting for it to finish """
if self.bootstrapping:
return
if cb_name in (ACTION_ON_START, ACTION_ON_STOP, ACTION_ON_RESTART, ACTION_O... |
Waits until PostgreSQL opens ports.
def wait_for_port_open(self, postmaster, timeout):
"""Waits until PostgreSQL opens ports."""
for _ in polling_loop(timeout):
with self._cancellable_lock:
if self._is_cancelled:
return False
if not postmaste... |
It might happen that the current value of one (or more) below parameters stored in
the controldata is higher than the value stored in the global cluster configuration.
Example: max_connections in global configuration is 100, but in controldata
`Current max_connections setting: 200`. If we try t... |
Start PostgreSQL
Waits for postmaster to open ports or terminate so pg_isready can be used to check startup completion
or failure.
:returns: True if start was initiated and postmaster ports are open, False if start failed
def start(self, timeout=None, task=None, block_callbacks=False, role=No... |
Stop PostgreSQL
Supports a callback when a safepoint is reached. A safepoint is when no user backend can return a successful
commit to users. Currently this means we wait for user backends to close. But in the future alternate mechanisms
could be added.
:param on_safepoint: This callba... |
Checks if PostgreSQL has completed starting up or failed or still starting.
Should only be called when state == 'starting'
:returns: True if state was changed from 'starting'
def check_startup_state_changed(self):
"""Checks if PostgreSQL has completed starting up or failed or still starting.
... |
Waits for PostgreSQL startup to complete or fail.
:returns: True if start was successful, False otherwise
def wait_for_startup(self, timeout=None):
"""Waits for PostgreSQL startup to complete or fail.
:returns: True if start was successful, False otherwise"""
if not self.is_starting()... |
Restarts PostgreSQL.
When timeout parameter is set the call will block either until PostgreSQL has started, failed to start or
timeout arrives.
:returns: True when restart was successful and timeout did not expire when waiting.
def restart(self, timeout=None, task=None, block_callbacks=False,... |
Replace pg_hba.conf content in the PGDATA if hba_file is not defined in the
`postgresql.parameters` and pg_hba is defined in `postgresql` configuration section.
:returns: True if pg_hba.conf was rewritten.
def _replace_pg_hba(self):
"""
Replace pg_hba.conf content in the PGDATA if hba_... |
Replace pg_ident.conf content in the PGDATA if ident_file is not defined in the
`postgresql.parameters` and pg_ident is defined in the `postgresql` section.
:returns: True if pg_ident.conf was rewritten.
def _replace_pg_ident(self):
"""
Replace pg_ident.conf content in the PGDATA if id... |
return the contents of pg_controldata, or non-True value if pg_controldata call failed
def controldata(self):
""" return the contents of pg_controldata, or non-True value if pg_controldata call failed """
result = {}
# Don't try to call pg_controldata during backup restore
if self._vers... |
copy postgresql.conf to postgresql.conf.backup to be able to retrive configuration files
- originally stored as symlinks, those are normally skipped by pg_basebackup
- in case of WAL-E basebackup (see http://comments.gmane.org/gmane.comp.db.postgresql.wal-e/239)
def save_configuration_files(sel... |
restore a previously saved postgresql.conf
def restore_configuration_files(self):
""" restore a previously saved postgresql.conf """
try:
for f in self._configuration_to_save:
config_file = os.path.join(self._config_dir, f)
backup_file = os.path.join(self._da... |
- initialize the replica from an existing member (master or replica)
- initialize the replica using the replica creation method that
works without the replication connection (i.e. restore from on-disk
base backup)
def clone(self, clone_member):
"""
- init... |
Initialize a new node from scratch and start it.
def bootstrap(self, config):
""" Initialize a new node from scratch and start it. """
pg_hba = config.get('pg_hba', [])
method = config.get('method') or 'initdb'
self._running_custom_bootstrap = method != 'initdb' and method in config and... |
Finds the best candidate to be the synchronous standby.
Current synchronous standby is always preferred, unless it has disconnected or does not want to be a
synchronous standby any longer.
:returns tuple of candidate name or None, and bool showing if the member is the active synchronous standb... |
Sets a node to be synchronous standby and if changed does a reload for PostgreSQL.
def set_synchronous_standby(self, name):
"""Sets a node to be synchronous standby and if changed does a reload for PostgreSQL."""
if name and name != '*':
name = quote_ident(name)
if name != self._syn... |
Convert the server_version to integer
>>> Postgresql.postgres_version_to_int('9.5.3')
90503
>>> Postgresql.postgres_version_to_int('9.3.13')
90313
>>> Postgresql.postgres_version_to_int('10.1')
100001
>>> Postgresql.postgres_version_to_int('10') # doctest: +IGNO... |
returns the list of option names/values from postgres.opts, Empty dict if read failed or no file
def read_postmaster_opts(self):
"""returns the list of option names/values from postgres.opts, Empty dict if read failed or no file"""
result = {}
try:
with open(os.path.join(self._data_... |
run a given command in a single-user mode. If the command is empty - then just start and stop
def single_user_mode(self, command=None, options=None):
"""run a given command in a single-user mode. If the command is empty - then just start and stop"""
cmd = [self._pgcommand('postgres'), '--single', '-D',... |
>>> deep_compare({'1': None}, {})
False
>>> deep_compare({'1': {}}, {'1': None})
False
>>> deep_compare({'1': [1]}, {'1': [2]})
False
>>> deep_compare({'1': 2}, {'1': '2'})
True
>>> deep_compare({'1': {'2': [3, 4]}}, {'1': {'2': [3, 4]}})
True
def deep_compare(obj1, obj2):
"""
... |
recursively 'patch' `config` with `data`
:returns: `!True` if the `config` was changed
def patch_config(config, data):
"""recursively 'patch' `config` with `data`
:returns: `!True` if the `config` was changed"""
is_changed = False
for name, value in data.items():
if value is None:
... |
>>> parse_bool(1)
True
>>> parse_bool('off')
False
>>> parse_bool('foo')
def parse_bool(value):
"""
>>> parse_bool(1)
True
>>> parse_bool('off')
False
>>> parse_bool('foo')
"""
value = str(value).lower()
if value in ('on', 'true', 'yes', '1'):
return True
... |
As most as possible close equivalent of strtol(3) function (with base=0),
used by postgres to parse parameter values.
>>> strtol(0) == (0, '')
True
>>> strtol(1) == (1, '')
True
>>> strtol(9) == (9, '')
True
>>> strtol(' +0x400MB') == (1024, 'MB')
True
>>> strtol(' -070d') == ... |
>>> parse_int('1') == 1
True
>>> parse_int(' 0x400 MB ', '16384kB') == 64
True
>>> parse_int('1MB', 'kB') == 1024
True
>>> parse_int('1000 ms', 's') == 1
True
>>> parse_int('1GB', 'MB') is None
True
>>> parse_int(0) == 0
True
def parse_int(value, base_unit=None):
"""
... |
>>> compare_values('enum', None, 'remote_write', 'REMOTE_WRITE')
True
>>> compare_values('real', None, '1.23', 1.23)
True
def compare_values(vartype, unit, old_value, new_value):
"""
>>> compare_values('enum', None, 'remote_write', 'REMOTE_WRITE')
True
>>> compare_values('real', None, '1.23... |
Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration.
def polling_loop(timeout, interval=1):
"""Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration."""
start_time = time.time()
iteration = 0
... |
Reset the attempt counter
def reset(self):
"""Reset the attempt counter"""
self._attempts = 0
self._cur_delay = self.delay
self._cur_stoptime = None |
Return a clone of this retry manager
def copy(self):
"""Return a clone of this retry manager"""
return Retry(max_tries=self.max_tries, delay=self.delay, backoff=self.backoff,
max_jitter=self.max_jitter / 100.0, max_delay=self.max_delay, sleep_func=self.sleep_func,
... |
Decorator function to check authorization header.
Usage example:
@check_auth
def do_PUT_foo():
pass
def check_auth(func):
"""Decorator function to check authorization header.
Usage example:
@check_auth
def do_PUT_foo():
pass
"""
def wrapper(handler, *args, **kwargs... |
Default method for processing all GET requests which can not be routed to other methods
def do_GET(self, write_status_code_only=False):
"""Default method for processing all GET requests which can not be routed to other methods"""
path = '/master' if self.path == '/' else self.path
response = s... |
parses the given schedule and validates at
def parse_schedule(schedule, action):
""" parses the given schedule and validates at """
error = None
scheduled_at = None
try:
scheduled_at = dateutil.parser.parse(schedule)
if scheduled_at.tzinfo is None:
... |
Override parse_request method to enrich basic functionality of `BaseHTTPRequestHandler` class
Original class can only invoke do_GET, do_POST, do_PUT, etc method implementations if they are defined.
But we would like to have at least some simple routing mechanism, i.e.:
GET /uri1/part2 request s... |
Loads config.yaml from filesystem and applies some values which were set via ENV
def _load_config_file(self):
"""Loads config.yaml from filesystem and applies some values which were set via ENV"""
with open(self._config_file) as f:
config = yaml.safe_load(f)
patch_config(config,... |
Translate member name to valid PostgreSQL slot name.
PostgreSQL replication slot names must be valid PostgreSQL names. This function maps the wider space of
member names to valid PostgreSQL names. Names are lowercased, dashes and periods common in hostnames
are replaced with underscores, other characters a... |
Original Governor stores connection strings for each cluster members if a following format:
postgres://{username}:{password}@{connect_address}/postgres
Since each of our patroni instances provides own REST API endpoint it's good to store this information
in DCS among with postgresql connection string. I... |
Get names of DCS modules, depending on execution environment. If being packaged with PyInstaller,
modules aren't discoverable dynamically by scanning source directory because `FrozenImporter` doesn't
implement `iter_modules` method. But it is still possible to find all potential DCS modules by
iterating thr... |
>>> Member.from_node(-1, '', '', '{"conn_url": "postgres://foo@bar/postgres"}') is not None
True
>>> Member.from_node(-1, '', '', '{')
Member(index=-1, name='', session='', data={})
def from_node(index, name, session, data):
"""
>>> Member.from_node(-1, '', '', '{"conn_url": "po... |
>>> ClusterConfig.from_node(1, '{') is None
False
def from_node(index, data, modify_index=None):
"""
>>> ClusterConfig.from_node(1, '{') is None
False
"""
try:
data = json.loads(data)
except (TypeError, ValueError):
data = None
... |
>>> SyncState.from_node(1, None).leader is None
True
>>> SyncState.from_node(1, '{}').leader is None
True
>>> SyncState.from_node(1, '{').leader is None
True
>>> SyncState.from_node(1, '[]').leader is None
True
>>> SyncState.from_node(1, '{"leader": "leade... |
>>> h = TimelineHistory.from_node(1, 2)
>>> h.lines
[]
def from_node(index, value):
"""
>>> h = TimelineHistory.from_node(1, 2)
>>> h.lines
[]
"""
try:
lines = json.loads(value)
except (TypeError, ValueError):
lines = None
... |
>>> Cluster(0, 0, 0, 0, 0, 0, 0, 0).timeline
0
>>> Cluster(0, 0, 0, 0, 0, 0, 0, TimelineHistory.from_node(1, '[]')).timeline
1
>>> Cluster(0, 0, 0, 0, 0, 0, 0, TimelineHistory.from_node(1, '[["a"]]')).timeline
0
def timeline(self):
"""
>>> Cluster(0, 0, 0, 0, 0, ... |
>>> Kubernetes.subsets_changed([], [])
False
>>> Kubernetes.subsets_changed([], [k8s_client.V1EndpointSubset()])
True
>>> s1 = [k8s_client.V1EndpointSubset(addresses=[k8s_client.V1EndpointAddress(ip='1.2.3.4')])]
>>> s2 = [k8s_client.V1EndpointSubset(addresses=[k8s_client.V1Endpo... |
stop all Patronis, remove their data directory and cleanup the keys in etcd
def after_feature(context, feature):
""" stop all Patronis, remove their data directory and cleanup the keys in etcd """
context.pctl.stop_all()
shutil.rmtree(os.path.join(context.pctl.patroni_path, 'data'))
context.dcs_ctl.cle... |
terminate process and wipe out the temp work directory, but only if we actually started it
def stop(self, kill=False, timeout=15):
""" terminate process and wipe out the temp work directory, but only if we actually started it"""
super(AbstractDcsController, self).stop(kill=kill, timeout=timeout)
... |
Runs the specified ioctl on the underlying fd.
Raises WatchdogError if the device is closed.
Raises OSError or IOError (Python 2) when the ioctl fails.
def _ioctl(self, func, arg):
"""Runs the specified ioctl on the underlying fd.
Raises WatchdogError if the device is closed.
... |
Returns JSON-safe version of `string`.
If `string` is a Unicode string or a valid UTF-8, it is returned unmodified,
as it can safely be encoded to JSON string.
If `string` contains raw/binary data, it is Base64-encoded, formatted and
returned according to "data" URL scheme (RFC2397). Since JSON is not... |
Returns files dict from request context.
def get_files():
"""Returns files dict from request context."""
files = dict()
for k, v in request.files.items():
content_type = request.files[k].content_type or 'application/octet-stream'
val = json_safe(v.read(), content_type)
if files.ge... |
Returns headers dict from request context.
def get_headers(hide_env=True):
"""Returns headers dict from request context."""
headers = dict(request.headers.items())
if hide_env and ('show_env' not in request.args):
for key in ENV_HEADERS:
try:
del headers[key]
... |
Convert a MutiDict into a regular dict. If there are more than one value
for a key, the result will have a list of values for the key. Otherwise it
will have the plain value.
def semiflatten(multi):
"""Convert a MutiDict into a regular dict. If there are more than one value
for a key, the result will h... |
Since we might be hosted behind a proxy, we need to check the
X-Forwarded-Proto, X-Forwarded-Protocol, or X-Forwarded-SSL headers
to find out what protocol was used to access us.
def get_url(request):
"""
Since we might be hosted behind a proxy, we need to check the
X-Forwarded-Proto, X-Forwarded-P... |
Returns request dict of given keys.
def get_dict(*keys, **extras):
"""Returns request dict of given keys."""
_keys = ('url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json', 'method')
assert all(map(_keys.__contains__, keys))
data = request.data
form = semiflatten(request.form)
... |
Returns response object of given status code.
def status_code(code):
"""Returns response object of given status code."""
redirect = dict(headers=dict(location=REDIRECT_LOCATION))
code_map = {
301: redirect,
302: redirect,
303: redirect,
304: dict(data=''),
305: red... |
Checks user authentication using HTTP Basic Auth.
def check_basic_auth(user, passwd):
"""Checks user authentication using HTTP Basic Auth."""
auth = request.authorization
return auth and auth.username == user and auth.password == passwd |
Create HA1 hash by realm, username, password
HA1 = md5(A1) = MD5(username:realm:password)
def HA1(realm, username, password, algorithm):
"""Create HA1 hash by realm, username, password
HA1 = md5(A1) = MD5(username:realm:password)
"""
if not realm:
realm = u''
return H(b":".join([usern... |
Create HA2 md5 hash
If the qop directive's value is "auth" or is unspecified, then HA2:
HA2 = md5(A2) = MD5(method:digestURI)
If the qop directive's value is "auth-int" , then HA2 is
HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody))
def HA2(credentials, request, algorithm):
"""Create H... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.