Dataset Viewer
Auto-converted to Parquet Duplicate
code
stringlengths
15
1.19k
docstring
stringlengths
13
400
def name(data): return data["metadata"]["name"]
Returns the name of the resource in the supplied data
def namespace(data): return data["metadata"]["namespace"]
Returns the namespace of the resource in the supplied data
def read_accession_file(f): for line in f: line = line.strip() if not line or line.startswith("#"): continue yield line
Read an open accession file, returning the list of accession numbers it contains. This automatically skips blank lines and comments.
def ensure_trailing_slash(expression): if expression: expression = expression.rstrip("/") expression += "/" return expression
Add a trailing slash to rsync source/destination locations.
def escape_bytes(bytes_): res = "" for byte in bytes_: char = chr(byte) if char == "\\": res += "\\\\" elif char == "'": res += "\\'" elif char in (string.digits + string.ascii_letters + string.punctuation + " "): res += char else: ...
Convert a bytes object to an escaped string. Convert bytes to an ASCII string. Non-printable characters and a single quote (') are escaped.
def _check_entry_name(args): if args.entry == "": print("Specified entry name is empty", file=sys.stderr) return 1 return 0
Validate an entry name specified on the command line.
def _process_init_command(args, _model): assert args.command == "init" return 0
Handle the init command: create an empty password database.
def dedent2(text): output = "" lines = textwrap.dedent(text).splitlines(True) for line in lines: assert line[:1] == "|" output += line[1:] return output
Remove any common leading whitespace + '|' from every line in a given text. Remove any common leading whitespace + character '|' from every line in a given text.
def path_element_to_string(path_element): res = "" for char in path_element: if char == "\\": res += "\\\\" elif char == "/": res += "\\/" else: res += char return res
Convert a single path element to its escaped string representation.
def save_model(model, network_type, algorithm, appliance, save_model_dir): model_path = save_model_dir if not os.path.exists(model_path): open((model_path), "a").close() model.save(model_path)
Saves a model to a specified location. Models are named using a combination of their target appliance, architecture, and pruning algorithm. model (tensorflow.keras.Model): The Keras model to save.
def sim_parameters(): global rows, cols, h, per_cycle, num_cycles rows = 100 cols = 100 h = 15 per_cycle = 10**7 num_cycles = 10**2
This function defines the initial parameters used in simulations
def reaction_rates(): global bx, bm, dx, dm, sm, sx, lx bx = 1 bm = 0 dx = 0 dm = 0 sm = 1 sx = 1 lx = 2.5 return
This function defines the reaction rates for each process
def _insert_service_modes(target, connection, **kw): statement = target.insert().values( [ {"id": 1, "name": "bus"}, {"id": 2, "name": "coach"}, {"id": 3, "name": "tram"}, {"id": 4, "name": "metro"}, {"id": 5, "name": "underground"}, ] ...
Inserts service mode IDs and names after creating lookup table.
def _display_operators(operators): def sort_name(o): return o.name def filter_op(o): return any([o.email, o.address, o.website, o.twitter]) return sorted(filter(filter_op, operators), key=sort_name)
Returns sorted list of operators with any information.
def _merge_forward(graph, sequence, path, index): i = index for v in path: if v in sequence: continue after = [j for j, w in enumerate(sequence[i:], i) if v in graph.following(w)] if after: i = after[-1] + 1 sequence.insert(i, v) i += 1
Merges path into sequence, ensuring all new vertices follows the existing ones in the adjacency list.
def _merge_backward(graph, sequence, path, index): i = index for v in path[::-1]: if v in sequence: continue after = [ i - j for j, w in enumerate(sequence[i::-1]) if v in graph.preceding(w) ] if after: i = after[-1] sequence.insert(i, ...
Merges path into sequence, ensuring all new vertices precedes the existing ones in the adjacency list.
def _count_cycles(graph, sequence): cycles = set() indices = {v: i for i, v in enumerate(sequence)} for v in sequence: cycles |= {(u, v) for u in graph.preceding(v) if indices[u] > indices[v]} return cycles
Counts number of cycles in a sequence by checking the preceding nodes for every vertex in order.
def _median(collection): ordered = sorted(collection) len_ = len(collection) middle = len_ // 2 if not ordered: return -1 elif len_ % 2 == 1: return ordered[middle] else: return (ordered[middle - 1] + ordered[middle]) / 2
Calculates the median of an collection, eg a list.
def _memoize_graph(graph, method): adj = None result = None @functools.wraps(method) def _method(*args, **kwargs): nonlocal adj, result new_adj = graph.adj if adj != new_adj: result = method(*args, **kwargs) adj = new_adj return result return...
Wraps graph method in a function that remembers adjacency list and last result.
def from_adj(cls, adj_list): adj = {} for start, end in adj_list.items(): adj[start] = set(end) for v in set().union(*adj_list.values()): if v not in adj: adj[v] = set() new_graph = cls() new_graph._v = adj return new_graph
Creates graph from adjacency list as a dict of vertices and iterables of following vertices.
def define_collation(_, connection, **kw): connection.execute( "CREATE COLLATION IF NOT EXISTS utf8_numeric " "(provider = icu, locale = 'en@colNumeric=yes')" )
Define the numeric collation required for some text columns.
def _list_geojson(list_stops): geojson = { "type": "FeatureCollection", "features": [s.to_geojson() for s in list_stops], } return geojson
Creates a list of stop data in GeoJSON format.
def from_row(cls, row): return cls( row.stop_point_ref, row.arrive, row.depart, row.timing_point, row.utc_arrive, row.utc_depart, )
Creates TimetableStop instance from row returned from query.
def xml_as_dict(element): data = {} for e in element: if e.tag in data: raise ValueError(f"Multiple elements have the same tag {e.tag!r}.") default = e.get("default", None) data[e.tag] = default if e.text is None else e.text return data
Creates a dictionary from a flat XML element.
def _convert_to_text(result): if isinstance(result, list) and not result: node = None elif isinstance(result, list) and len(result) == 1: node = result[0] elif isinstance(result, list): raise ValueError("XPath query returned multiple elements.") else: node = result tr...
Takes first element from list and returns text or None.
def capitalize(_, text): list_words = text.lower().split() for _w, word in enumerate(list_words): for _c, char in enumerate(word): if char.isalpha(): list_words[_w] = word[:_c] + char.upper() + word[_c + 1 :] break return " ".join(list_words)
Capitalises every word in a string, include these enclosed within brackets and excluding apostrophes.
def _iter_every(iterable, length): iterator = iter(iterable) section = list(itertools.islice(iterator, length)) while section: yield section section = list(itertools.islice(iterator, length))
Generator for iterable split into lists with maximum length.
def _date_long_form(date): second_last = (date.day // 10) % 10 last = date.day % 10 if second_last != 1 and last == 1: ordinal = "st" elif second_last != 1 and last == 2: ordinal = "nd" elif second_last != 1 and last == 3: ordinal = "rd" else: ordinal = "th" r...
Displays a date in long form, eg 'Monday 29th April 2019'.
def _file_name(response): content = response.headers.get("content-disposition") if content and "filename" in content: file_name = re.search(r"filename=(.+)", content).group(1) else: path = urllib.parse.urlparse(response.url)[2] file_name = path.split("/")[-1] return file_name
Gets the file name from the response header or the URL name.
def iter_archive(archive): zip_ = zipfile.ZipFile(archive) for name in zip_.namelist(): with zip_.open(name) as current: yield current zip_.close()
Generator function iterating over all files in a zipped archive file. The generator will open each file, yielding its file-like object. This file will be closed before opening the next file. When the iteration is finished the archive is closed.
def tokenizeSentence(raw): if type(raw) is not str: return [] return raw.split("\n")
Function tokenizes a string to sentences based the character "new line"
def tokenizeWord(raw): if type(raw) is not str: return [] return re.findall(r"[\w]+", raw)
Function tokenizes a string to words based the non-word characters
def filterWord(listOfWords, blackSet): return [word for word in listOfWords if word not in blackSet and not word.isdigit()]
Function filters out all stop words and numbers
def filterSentence(listOfSentences, numberOfWordsPerSentence): return [l for l in listOfSentences if len(l) > numberOfWordsPerSentence]
Function filters out all sentences which have less than a number of words
def find_pivot(matrix, col): col_terms = (matrix[line][col] for line in range(col, len(matrix))) col_terms_abs = list(map(abs, col_terms)) max_abs = max(col_terms_abs) return col_terms_abs.index(max_abs) + col
Given the matrix and the column index, finds the line that should be swaped with the "current" pivot line. The number returned is the index of the line
def temper(cls, y): y ^= y >> cls._SHIFT_U y ^= (y << cls._SHIFT_S) & cls._MASK_B y ^= (y << cls._SHIFT_T) & cls._MASK_C y ^= y >> cls._SHIFT_L return y & 0xFFFFFFFF
Returns the tempered state value y, called during genrand.
def untemper(cls, y): y ^= y >> cls._SHIFT_L y ^= (y << cls._SHIFT_T) & cls._MASK_C for _ in range(7): y ^= (y << cls._SHIFT_S) & cls._MASK_B for _ in range(3): y ^= y >> cls._SHIFT_U return y & 0xFFFFFFFF
Returns the un-tempered original state value of y. (for reversing)
def sizeof_fmt(num, suffix="B"): for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: if abs(num) < 1000.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1000.0 return "%.1f%s%s" % (num, "Y", suffix)
Human-readable string for a number of bytes.
def unpack_phases(phases): active_phases = None if isinstance(phases, (list, tuple, set)): active_phases = sorted(phases) elif isinstance(phases, dict): active_phases = sorted(phases.keys()) elif type(phases) is str: active_phases = [phases] return active_phases
Convert a phases list/dict into a sorted list.
def parseLengthWithUnits(str): u = "px" s = str.strip() if s[-2:] == "px": s = s[:-2] elif s[-1:] == "%": u = "%" s = s[:-1] try: v = float(s) except: return None, None return v, u
Parse an SVG value which may or may not have units attached This version is greatly simplified in that it only allows: no units, units of px, and units of %. Everything else, it returns None for. There is a more general routine to consider in scour.py if more generality is ever needed.
def replaceHyphensWithNDashes(list): newList = [] for text in list: text = text.replace("-", "–") newList.append(text) return newList
replace hyphens with n-dashes
def normalize_query( query_string, findterms=re.compile(r'"([^"]+)"|(\S+)').findall, normspace=re.compile(r"\s{2,}").sub, ): return [normspace(" ", (t[0] or t[1]).strip()) for t in findterms(query_string)]
Find the term in query string and reduce redundant spaces.
def warning(*objs): print(*objs, file=sys.stderr)
Prints warning text/object to stderr
def zipp(params, theano_params): for kk, vv in params.items(): theano_params[kk].set_value(vv)
Push parameters to Theano shared variables
def load_pickle_dictionary(dictionary_path): with open(dictionary_path, mode="rb") as f: dictionary = pickle.load(f) return dictionary
Load a dictionary and optionally also return the inverted dictionary
def load_json(filename): with open(filename, mode="rb") as f: return json.load(f)
json loader to load Nematus vocabularies
def idx_to_word(seq, ivocab, remove_eos_token=True): if seq[-1] == 0 and remove_eos_token: seq = seq[:-1] unk_symbol = ivocab[1] translation = " ".join([ivocab.get(idx, unk_symbol) for idx in seq]) return translation
Get the words for a sequence of word IDs
def append_flags(env_var, additional_flags, env=None): if env is None: env = os.environ flags = env.get(env_var, "").split(" ") flags.extend(additional_flags) env[env_var] = " ".join(flags)
Append |additional_flags| to those already set in the value of |env_var| and assign env_var to the result.
def _is_systemd(): return os.path.exists("/run/systemd/system/")
Determine if systemd is managing system services; the implementation follows the same strategy as, for example, sd_booted() in libsystemd, or /usr/sbin/service
def NormalizeUserIdToUri(userid): userid = userid.strip() if ( userid.startswith("http:") or userid.startswith("https:") or userid.startswith("acct:") ): return userid if userid.find("@") > 0: return "acct:" + userid return "http://" + userid
Normalizes a user-provided user id to a reasonable guess at a URI.
def _ToPretty(text, indent, linelength): tl = linelength - indent output = "" for i in range(0, len(text), tl): if output: output += "\n" output += " " * indent + text[i : i + tl] return output
Makes huge text lines pretty, or at least printable.
def valid_cycle(enigma, rotor_positions, E, perm_cycle): c = E for P in perm_cycle: enigma.set_rotor_positions(rotor_positions) enigma.step_to(abs(P)) c = enigma.encrypt(c) enigma.set_rotor_positions(rotor_positions) if c == E: return True return False
Check if the permutation cycle is valid for the given configuration
def tag(context): ns_key = context.context_node.prefix ns_link = "{{{}}}".format(context.context_node.nsmap.get(ns_key)) return context.context_node.tag.replace(ns_link, "")
:return str Returns tag without namespace. Just short replacement for xpath local-name() function without arguments
def match(context, tag, *search): return any(pattern == tag for pattern in search)
:return bool search exact match for tag from several variants
def full_to_half(s): n = [] for char in s: try: num = ord(char) if num == 0x3000: num = 32 elif 0xFF01 <= num <= 0xFF5E: num -= 0xFEE0 char = chr(num) n.append(char) except: pass return ""...
Convert full-width character to half-width one
def initialize_table(table): batch_id = str(int(datetime.datetime.now().timestamp())) table.put_item( Item={ "BatchId": "LATEST", "Name": "LATEST", "FileSizeKb": 0, "NumFiles": 0, "BatchWindowStartTime": batch_id, } ) return bat...
Initialize 'Latest' Item in DynamoDB if no LATEST item is found
def isIPv4(ip): return re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip) is not None
Returns *True* if the given string is a dotted quad (four integers seperated by a period).
def conv_bool(input_value): if input_value is None: return False elif input_value is False or str(input_value).lower() in ("false", "", "no"): return False elif input_value == 0: return False else: return True
Convert anything that is not explicit false (like empty, 0 or false)
def load_log(log_file): try: with open(log_file, "r", encoding="utf-8") as temp_file: return temp_file.read().splitlines() except FileNotFoundError: with open(log_file, "a", encoding="utf-8") as temp_file: return []
Reads the processed comments/posts log file and creates it if it doesn't exist. Returns list A list of Reddit comments/posts ids.
def update_log(log_file, item_id): with open(log_file, "a", encoding="utf-8") as temp_file: temp_file.write("{}\n".format(item_id))
Updates the processed posts log with the given post id. Parameters comment_id : str A Reddit post id.
def pick_wanted_entities(entities, drop_patterns=drop_patterns): ix_to_keep = [ ix for ix, entity in enumerate(entities) if entity not in drop_patterns ] return ix_to_keep
a list of entities that correspond to a dataframe of observations for which these may be in the index a list of which entities you'd like to eliminate which indices to keep from the originating dataframe to eliminate the desired entities
def create_source_object(sources): if sources: source_object = [] srcs = sources.split("/") for ix, src in enumerate(srcs): source_object.append( {"source-name": src, "id": ix, "source-description": ""} ) return source_object return None
Format the source information as appropriate for the api
def sanitize_filename(filename): return re.sub(r"(?u)[^-\w.]", "", filename)
Make the given string into a filename by removing non-descriptive characters.
def clean_whitespace(text): text = str(text or "") stripped = text.strip() sub = re.sub( r"\s+", " ", stripped, ) return sub
Replace all contiguous whitespace with single space character, strip leading and trailing whitespace.
def _get_key(cls, owner): if hasattr(owner, "key"): return owner.key return owner
Ensures owner is a key and not entity
def is_project(project): for step in project.steps: if step.type in ("make", "package"): return True return False
Checks if a project meets the minimum step standards
def subtract_bg(signal, bg): return signal - bg
returns normalised intensity as (signal - bg)
def _random_data_and_serialization_params( n_samples=100, n_channels=1, value_range=(-2000, 2000), dtype="float64" ): raise NotImplementedError("Not implemented yet")
Get random data and serialization params
def write_story(wb, col, key, filename): try: jira_sheet = wb.get_sheet_by_name("JIRA Stories") jira_sheet[col + "2"] = key wb.save(filename) except Exception: print( """Unable to save workbook. Please close excel spreadsheet then try again.""" ...
Writes Stories to Excel Workbook.
def extract_comments(comments): size = len(comments) addtional_notes = "" for n in range(0, size): addtional_notes = addtional_notes + comments[n]["body"] + "\n" return addtional_notes
Utility method for parsing JIRA comments represented as JSON
def flat_correct(ccd, flat): flat.data = flat.data / flat.data.mean() if flat.uncertainty is not None: flat.uncertainty.array = flat.uncertainty.array / flat.data.mean() ccd.divide(flat) return ccd
Correct the image for flatfielding Parameters ccd : CCDData object Data to be flatfield corrected flat : CCDData object Flatfield to apply to the data {log} Returns ccd : CCDData object CCDData object with flat corrected
def _subsets(n): if n == 1: a = [[1]] elif n == 2: a = [[1, 0], [0, 1], [1, 1]] elif n == 3: a = [ [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1], ] else: ...
Returns all possible subsets of the set (0, 1, ..., n-1) except the empty set, listed in reversed lexicographical order according to binary representation, so that the case of the fourth root is treated last. Examples
def _init_python_printing(stringify_func): def _displayhook(arg): if arg is not None: builtins._ = None if isinstance(arg, str): print(repr(arg)) else: print(stringify_func(arg)) builtins._ = arg sys.displayhook = _displayh...
Setup printing in Python interactive session.
def as_int(n): try: result = int(n) if result != n: raise TypeError except TypeError: raise ValueError(f"{n} is not an integer") return result
Convert the argument to a builtin integer. The return value is guaranteed to be equal to the input. ValueError is raised if the input has a non-integral value. Examples
def _get_reals(cls, factors): reals = [] for factor, k in factors: real_part = cls._get_reals_sqf(factor) reals.extend([(root, factor, k) for root in real_part]) return reals
Compute real root isolating intervals for a list of factors.
def _get_complexes(cls, factors): complexes = [] for factor, k in factors: complex_part = cls._get_complexes_sqf(factor) complexes.extend([(root, factor, k) for root in complex_part]) return complexes
Compute complex root isolating intervals for a list of factors.
def _reals_index(cls, reals, index): i = 0 for j, (_, factor, k) in enumerate(reals): if index < i + k: poly, index = factor, 0 for _, factor, _ in reals[:j]: if factor == poly: index += 1 return poly, index else: ...
Map initial real root index to an index in a factor where the root belongs.
def _real_roots(cls, poly): _, factors = poly.factor_list() reals = cls._get_reals(factors) reals = cls._reals_sorted(reals) reals_count = cls._count_roots(reals) roots = [] for index in range(reals_count): roots.append(cls._reals_index(reals, index)) return roots
Get real roots of a composite polynomial.
def prefixes(seq): n = len(seq) for i in range(n): yield seq[: i + 1]
Generate all prefixes of a sequence. Examples
def postfixes(seq): n = len(seq) for i in range(n): yield seq[n - i - 1 :]
Generate all postfixes of a sequence. Examples
def telescopic_direct(L, R, n, limits): (i, a, b) = limits s = 0 for m in range(n): s += L.subs({i: a + m}) + R.subs({i: b - m}) return s
Returns the direct summation of the terms of a telescopic sum L is the term with lower index R is the term with higher index n difference between the indexes of L and R For example.
def preprocess_for_cse(expr, optimizations): for pre, _ in optimizations: if pre is not None: expr = pre(expr) return expr
Preprocess an expression to optimize for common subexpression elimination. Parameters expr : diofant expression The target expression to optimize. optimizations : list of (callable, callable) pairs The (preprocessor, postprocessor) pairs. Returns expr : diofant expression The transformed expression.
def list_all_boards(client): all_boards = client.list_boards() for counter, board in enumerate(all_boards): print(counter, board.name)
get list of all boards to determine the ID for further functions
def print_cards_from_board(board_id, client): all_boards = client.list_boards() my_board = all_boards[board_id] all_lists_on_board = my_board.list_lists() for list in all_lists_on_board: for card in list.list_cards(): print(str(card.board.name + ":" + card.description) + ":" + str(ca...
Access board with ID board_id in the client instance and print all non-archived lists with their non-archived cards
def _load_pyfunc_conf_with_model(model_path): (name, _) = _load_pyfunc_conf_with_model(model_path) return name
Loads the `python_function` flavor configuration for the specified model or throws an exception if the model does not contain the `python_function` flavor.
def feature_flag_name(f): return f
For the time being, we want the features for plugins to be treated separately than integrations (integration features prefix with integrations-). This is because in Saas Sentry, users can install the Trello and Asana plugins but not Jira even though both utilize issue-commits. By not prefixing, we can avoid making new ...
def local_cache(): global _local_cache_enabled, _local_cache_generation if _local_cache_enabled: raise RuntimeError("nested use of process global local cache") _local_cache_enabled = True try: yield finally: _local_cache_enabled = False _local_cache_generation += 1
Enables local caching for the entire process.
def partition(iterable, n): assert len(iterable) % n == 0 args = [iter(iterable)] * n return zip(*args)
Partitions an iterable into tuples of size n. Expects the iterable length to be a multiple of n.
def hide_environment_none(apps, schema_editor): EnvironmentProject = apps.get_model("sentry", "EnvironmentProject") for project in EnvironmentProject.objects.filter(environment__name="none"): project.is_hidden = True project.save()
Hide environments that are named none, since they're blacklisted and no longer can be created. We should iterate over each environment row individually in python instead so that we don't lock the DB up. This is far slower but much safer
def translate(pat): i, n = 0, len(pat) res = "" while i < n: c = pat[i] i = i + 1 if c == "\\" and i < n: res += re.escape(pat[i]) i += 1 elif c == "*": res = res + ".*" else: res = res + re.escape(c) return "^" + re...
Translate a shell PATTERN to a regular expression.
def forwards(apps, schema_editor): DiscoverSavedQuery = apps.get_model("sentry", "DiscoverSavedQuery") for query in DiscoverSavedQuery.objects.filter(version__isnull=True).all(): if "version" in query.query: query.version = query.query.get("version", 1) del query.query["version"]...
Backfill the saved queries with their version.
def playlist_json(tracks): for i, track in enumerate(tracks): tracks[i] = { "title": track[0], "artist": track[1], "explicit": track[2], } return tracks
Generate a JSON List of Dictionaries for Each Track.
def _parse_hal(raw): return json.loads(raw)
Parses the JSON body of a response.
def series_sum(n): x = 4 y = 1 if n == 0: return str("%.2f" % 0) if n == 1: return str("%.2f" % 1) else: for i in range(n - 1): y += float(1) / x x += 3 return str("%.2f" % y)
This function returns the nth sum to the 2nd decimal place.
def words_to_marks(s): number = 0 values = dict() for index, letter in enumerate(string.ascii_lowercase): values[letter] = index + 1 for i in s: number = number + values[i] return number
This functio taks a string and gives you an int value for the string where a=1 b=2
def summy(string_of_ints): numbers = string_of_ints.split() answer = 0 for i in numbers: answer = answer + int(i) return answer
This function takes a string of numbers with spaces and returns the sum of the numbers.
def sum_two_smallest_numbers(numbers): b = [] for x in range(2): d = min(int(s) for s in numbers) b.append(d) numbers.remove(d) g = sum(b) return g
This function takes a list of numbers and adds the two lowest intergers and returns the value
def longest(s1, s2): x = s1 + s2 y = "".join(set(x)) z = sorted(y) return "".join(z)
This function takes two strings and gives you a string of the unique letters from each string.
def solution(number): answer = 0 for i in range(number): if i % 3 == 0 or i % 5 == 0: answer += i return answer
This function returns all multiples of 3 and 5 below the input number
def find_it(seq): for i in seq: y = seq.count(i) if y % 2 != 0: return i break
This function returns the integer that is in the list an odd number of times
End of preview. Expand in Data Studio

Code and docstrings of maximum length 400. Code strings have stripped type hints and are formatted with black.

Downloads last month
3