code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def get_mod_subcmds(mod): ## Look in modules attributes subcmds = get_obj_subcmds(mod) path = os.path.dirname(os.path.realpath(mod.__file__)) if mod.__package__ is None: sys.path.insert(0, os.path.dirname(path)) mod.__package__ = kf.basename(path) for module_name in get_modu...
Fetch action in same directory in python module python module loaded are of this form: '%s_*.py' % prefix
def initialize(self): if not os.path.exists(self.root_dir): os.makedirs(self.root_dir) assert os.path.isdir(self.root_dir), "%s is not a directory! Please move or remove it." % self.root_dir for d in ["bin", "lib", "include"]: target_path = os.path.join(self.root...
Generate the root directory root if it doesn't already exist
def finalize(self): if self.rc_file: self.rc_file.close() if self.env_file: self.env_file.close()
finalize any open file handles
def remove(self): if self.rc_file: self.rc_file.close() if self.env_file: self.env_file.close() shutil.rmtree(self.root_dir)
Removes the sprinter directory, if it exists
def symlink_to_bin(self, name, path): self.__symlink_dir("bin", name, path) os.chmod(os.path.join(self.root_dir, "bin", name), os.stat(path).st_mode | stat.S_IXUSR | stat.S_IRUSR)
Symlink an object at path to name in the bin folder.
def remove_from_bin(self, name): self.__remove_path(os.path.join(self.root_dir, "bin", name))
Remove an object from the bin folder.
def remove_from_lib(self, name): self.__remove_path(os.path.join(self.root_dir, "lib", name))
Remove an object from the bin folder.
def remove_feature(self, feature_name): self.clear_feature_symlinks(feature_name) if os.path.exists(self.install_directory(feature_name)): self.__remove_path(self.install_directory(feature_name))
Remove an feature from the environment root folder.
def clear_feature_symlinks(self, feature_name): logger.debug("Clearing feature symlinks for %s" % feature_name) feature_path = self.install_directory(feature_name) for d in ('bin', 'lib'): if os.path.exists(os.path.join(self.root_dir, d)): for link in os.list...
Clear the symlinks for a feature in the symlinked path
def add_to_env(self, content): if not self.rewrite_config: raise DirectoryException("Error! Directory was not intialized w/ rewrite_config.") if not self.env_file: self.env_path, self.env_file = self.__get_env_handle(self.root_dir) self.env_file.write(content + '...
add content to the env script.
def add_to_rc(self, content): if not self.rewrite_config: raise DirectoryException("Error! Directory was not intialized w/ rewrite_config.") if not self.rc_file: self.rc_path, self.rc_file = self.__get_rc_handle(self.root_dir) self.rc_file.write(content + '\n')
add content to the rc script.
def add_to_gui(self, content): if not self.rewrite_config: raise DirectoryException("Error! Directory was not intialized w/ rewrite_config.") if not self.gui_file: self.gui_path, self.gui_file = self.__get_gui_handle(self.root_dir) self.gui_file.write(content + '...
add content to the gui script.
def __remove_path(self, path): curpath = os.path.abspath(os.curdir) if not os.path.exists(path): logger.warn("Attempted to remove a non-existent path %s" % path) return try: if os.path.islink(path): os.unlink(path) elif os....
Remove an object
def __get_env_handle(self, root_dir): env_path = os.path.join(root_dir, '.env') gui_path = os.path.join(root_dir, '.gui') fh = open(env_path, "w+") # .env will source utils.sh if it hasn't already fh.write(source_template % (gui_path, gui_path)) fh.write(source_t...
get the filepath and filehandle to the .env file for the environment
def __get_rc_handle(self, root_dir): rc_path = os.path.join(root_dir, '.rc') env_path = os.path.join(root_dir, '.env') fh = open(rc_path, "w+") # .rc will always source .env fh.write(source_template % (env_path, env_path)) return (rc_path, fh)
get the filepath and filehandle to the rc file for the environment
def __get_gui_handle(self, root_dir): gui_path = os.path.join(root_dir, '.gui') fh = open(gui_path, "w+") return (gui_path, fh)
get the filepath and filehandle to the .env file for the environment
def __symlink_dir(self, dir_name, name, path): target_dir = os.path.join(self.root_dir, dir_name) if not os.path.exists(target_dir): os.makedirs(target_dir) target_path = os.path.join(self.root_dir, dir_name, name) logger.debug("Attempting to symlink %s to %s..." % (...
Symlink an object at path to name in the dir_name folder. remove it if it already exists.
def create(self, options=None): if options is None: raise ValueError("Please pass in an options dict") if not _has_content(options): raise NoContentError("must supply 'document_content' or 'document_url'") default_options = { "name": "default", ...
Create a new document job (sync or async).
def list_docs(self, options=None): if options is None: raise ValueError("Please pass in an options dict") default_options = { "page": 1, "per_page": 100, "raise_exception_on_failure": False, "user_credentials": self.api_key, }...
Return list of previously created documents.
def status(self, status_id, raise_exception_on_failure=False): query = {"output": "json", "user_credentials": self.api_key} resp = requests.get( "%sstatus/%s" % (self._url, status_id), params=query, timeout=self._timeout ) if raise_exception_on_failure and resp.sta...
Return the status of the generation job.
def download(self, download_key, raise_exception_on_failure=False): query = {"output": "json", "user_credentials": self.api_key} resp = requests.get( "%sdownload/%s" % (self._url, download_key), params=query, timeout=self._timeout, ) if raise...
Download the file represented by the download_key.
def merge_INIConf(a, b): for sname in b.sections(): if a.has_section(sname): for oname in b.options(sname): a[sname][oname] = b[sname][oname] else: a[sname] = b[sname] return a
用 b 的内容覆盖 a 的内容(若重名),并返回 a
def copy_from_dict(self, adict, parent=None): if not parent: parent = self for k,v in adict.items(): if isinstance(v, dict): vDict = PYConf(v) self.copy_from_dict(v, vDict) parent[k] = vDict else: ...
从一个已经存在的 dict 中复制所有的值。 :param adict: 被复制的 dict。 :type adict: dict :param parent: 复制到哪个父对象。 若为 None 则复制到 self 。 :type parent: rookout.PYConf
def dump(self, human=False): txt = str(self) if human: txt = txt.replace(", '", ",\n'") txt = txt.replace("{", "{\n") txt = txt.replace("}", "\n}") txt = txt.replace("[", "[\n") txt = txt.replace("]", "\n]") return txt
将自身内容打印成字符串 :param bool human: 若值为 True ,则打印成易读格式。
def save_to_file(self, path, human=True): write_file(path, self.dump(human)) slog.info("Save %a done.", path)
将自身内容保存到文件。 :param str path: 保存的文件路径。 :param bool human: 参见 :func:`dump()`
def read_from_file(self, path): if not os.path.exists(path): slog.warning("The file %s is not exist.", path) return False txt = read_file(path) dic = eval(txt) self.copy_from_dict(dic) return True
从一个文本文件中读入信息。 假设该文本文件的格式与 :func:`dump()` 相同。 :param str path: 待读入的文件路径。
def _set_parameters(self, parameters): nr_f = self.f.size # sort out parameters rho0, m, tau, c = self._sort_parameters(parameters) newsize = (nr_f, len(m)) # rho0_resized = np.resize(rho0, newsize) m_resized = np.resize(m, newsize) tau_resized = np.res...
Sort out the various possible parameter inputs and return a config object (dict) We have multiple input formats: 1) a list, tuple, or numpy.ndarray, containing the linear parameters in the following order: * for single term: rho0, m1, tau1, c1 * for multiple termss: rho...
def response(self, parameters): r # get a config object self._set_parameters(parameters) terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c))) # sum up terms specs = np.sum(terms, axis=1) rcomplex = self.rho0 * (1 - specs) response = sip_re...
r"""Complex response of the Cole-Cole model:: :math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j \omega \tau_i)^c_i})\right)` Parameters ---------- parameters: list or tuple or numpy.ndarray Cole-Cole model parameters: rho0, m, tau, c (all linear) ...
def dre_drho0(self, pars): r self._set_parameters(pars) numerator = self.m * self.otc * (np.cos(self.ang) + self.otc) term = numerator / self.denom specs = np.sum(term, axis=1) result = 1 - specs return result
r""" Compute partial derivative of real parts with respect to :math:`\rho_0` :math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \rho_0} = 1 - \frac{m (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^c}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}` Note...
def dre_dlog10rho0(self, pars): # first call the linear response to set the parameters linear_response = self.dre_drho0(pars) result = np.log(10) * self.rho0 * linear_response return result
Compute partial derivative of real parts to log10(rho0)
def dre_dm(self, pars): r self._set_parameters(pars) numerator = -self.otc * (np.cos(self.ang) + self.otc) result = numerator / self.denom result *= self.rho0 return result
r""" :math:`\frac{\partial \hat{\rho'}(\omega)}{\partial m} = - \rho_0 m (\omega \tau)^c \frac{(cos(\frac{c \pi}{2}) + (\omega \tau)^c)}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
def dim_dm(self, pars): r self._set_parameters(pars) numerator = -self.otc * np.sin(self.ang) result = numerator / self.denom result *= self.rho0 return result
r""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial m} = - \rho_0 m (\omega \tau)^c \frac{sin(\frac{c \pi}{2})}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
def dim_dtau(self, pars): r self._set_parameters(pars) # term1 nom1 = - self.m * np.sin(self.ang) * self.w ** self.c *\ self.c * self.tau ** (self.c - 1) term1 = nom1 / self.denom # term2 nom2 = (self.m * self.otc * np.sin(self.ang)) *\ (2...
r""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial \tau} = \rho_0 \frac{-m \omega^c c \tau^{c-1} sin(\frac{c \pi}{2} }{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m (\omega \tau)^c sin(\frac{c \pi}{2} \right] \cdot \left[ 2 \omeg...
def dim_dc(self, pars): r self._set_parameters(pars) # term1 nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\ np.sin(self.ang) nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang) term1 = (nom1a + nom1b) / self.denom # term2 ...
r""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0 \frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m (\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m (\omeg...
def Jacobian_re_im(self, pars): r partials = [] # partials.append(self.dre_dlog10rho0(pars)[:, np.newaxis, :]) partials.append(self.dre_drho0(pars)[:, np.newaxis]) partials.append(self.dre_dm(pars)) # partials.append(self.dre_dlog10tau(pars)) partials.append(self...
r""" :math:`J` >>> import sip_models.res.cc as cc >>> import numpy as np >>> f = np.logspace(-3, 3, 20) >>> pars = [100, 0.1, 0.04, 0.8] >>> obj = cc.cc(f) >>> J = obj.Jacobian_re_im(pars)
def read_dict_or_list_from_json(desired_type: Type[dict], file_object: TextIOBase, logger: Logger, conversion_finder: ConversionFinder, **kwargs) -> Dict[str, Any]: # lazy import in order not to force use of jprops import json res = json.load(file_object) # convert ...
Helper method to read a dictionary from a .json file using json library :param file_object: :return:
def get_default_collection_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]: return [SingleFileParserFunction(parser_function=read_dict_or_list_from_json, streaming_mode=True, custom_name='read_dict_or_list_from_json', ...
Utility method to return the default parsers able to parse a dictionary from a file. :return:
def get_default_collection_converters(conversion_finder: ConversionFinder) -> List[Union[Converter[Any, dict], Converter[dict, Any]]]: return [ConverterFunction(from_type=List, to_type=Set, conversion_method=list_to_set, custom_name='list_to_set', function_args={'conversion_finder...
Utility method to return the default converters associated to dict (from dict to other type, and from other type to dict) :return:
def _get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[Any], logger: Logger) -> Dict[str, Any]: # nb of file children n_children = len(obj_on_fs.get_multifile_children()) # first extract base co...
Simply inspects the required type to find the base type expected for items of the collection, and relies on the ParserFinder to find the parsing plan :param obj_on_fs: :param desired_type: :param logger: :return:
def dispatch(self, producer=None): log.info('@Event.dispatch `{}` with subject `{}`' .format(self.name, self.subject)) producer = (producer or Registry.get_producer()) if not producer: raise MissingProducerError('You have not registered a Producer') ...
Dispatch the event, sending a message to the queue using a producer. :param producer: optional `Producer` to replace the default one.
def get_stackdelta(op): res = opstackd[op.opname] if callable(res): res = res(op) return res
Returns the number of elements that the instruction *op* adds to the stack. # Arguments op (dis.Instruction): The instruction to retrieve the stackdelta value for. # Raises KeyError: If the instruction *op* is not supported.
def load_actions(spec, group=None, expr_parser=None): if expr_parser is None: expr_parser = ExpressionParser() actions = ActionList() for name in spec: options = {} as_ = None decorators = [] if isinstance(name, dict): actionspec = dict(name) ...
Each item can be an action name as a string or a dict. When using a dict, one key/item pair must be the action name and its options and the rest action decorator names and their options. Example: load_actions(["login_required", {"flash": {"message": "hello world", "label": "warning"}}])
def load_grouped_actions(spec, default_group=None, key_prefix="actions", pop_keys=False, expr_parser=None): actions = ActionList() if expr_parser is None: expr_parser = ExpressionParser() for key in spec.keys(): if key != key_prefix and not key.startswith(key_prefix + "."): ...
Instanciates actions from a dict. Will look for a key name key_prefix and for key starting with key_prefix followed by a dot and a group name. A group name can be any string and will can be used later to filter actions. Values associated to these keys should be lists that will be loaded using load_actions()
def create_action_from_dict(name, spec, base_class=ActionsAction, metaclass=type, pop_keys=False): actions = load_grouped_actions(spec, pop_keys=pop_keys) attrs = {"actions": actions, "name": name} if "as" in spec: attrs["as_"] = spec["as"] if pop_keys: del spec["as"] fo...
Creates an action class based on a dict loaded using load_grouped_actions()
def calculate_inverse_document_frequencies(self): for doc in self.processed_corpus: for word in doc: self.inverse_document_frequencies[word] += 1 for key,value in self.inverse_document_frequencies.iteritems(): idf = log((1.0 * len(self.corpus)) / value) self.inverse_document_frequencies[key] = idf
Q.calculate_inverse_document_frequencies() -- measures how much information the term provides, i.e. whether the term is common or rare across all documents. This is obtained by dividing the total number of documents by the number of documents containing the term, and then taking the logarithm of that quoti...
def calculate_term_frequencies(self): for doc in self.processed_corpus: term_frequency_doc = defaultdict(int) for word in doc: term_frequency_doc[word] += 1 for key,value in term_frequency_doc.iteritems(): term_frequency_doc[key] = (1.0 * value) / len(doc) self.term_frequencies.append(term_...
Q.calculate_term_frequencies() -- calculate the number of times each term t occurs in document d.
def match_query_to_corpus(self): ranking = [] for i,doc in enumerate(self.processed_corpus): rank = 0.0 for word in self.processed_query: if word in doc: rank += self.term_frequencies[i][word] * self.inverse_document_frequencies[word] ranking.append((rank,i)) matching_corpus_index = 0 max_r...
Q.match_query_to_corpus() -> index -- return the matched corpus index of the user query
def process_corpus(self): for doc in self.corpus_list: doc = wt(doc) sentence = [] for word in doc: if word not in self.stop_words and word not in self.punctuation: word = self.stemmer.stem(word) sentence.append(word) self.processed_corpus.append(sentence)
Q.process_corpus() -- processes the queries defined by us, by tokenizing, stemming, and removing stop words.
def process_query(self): self.query = wt(self.query) self.processed_query = [] for word in self.query: if word not in self.stop_words and word not in self.punctuation: self.processed_query.append(self.stemmer.stem(word))
Q.process_query() -- processes the user query, by tokenizing and stemming words.
def query(self, query): self.query = query self.process_query() matching_corpus_index = self.match_query_to_corpus() return self.category_list[matching_corpus_index].strip()
Q.query(query string) -> category string -- return the matched category for any user query
def load_manifest(raw_manifest, namespace=None, **kwargs): if isinstance(raw_manifest, configparser.RawConfigParser): return Manifest(raw_manifest) manifest = create_configparser() if not manifest.has_section('config'): manifest.add_section('config') _load_manifest_interpret_sour...
wrapper method which generates the manifest from various sources
def _load_manifest_interpret_source(manifest, source, username=None, password=None, verify_certificate=True, do_inherit=True): try: if isinstance(source, string_types): if source.startswith("http"): # if manifest is a url _load_manifest_from_url(manifest, sou...
Interpret the <source>, and load the results into <manifest>
def _load_manifest_from_url(manifest, url, verify_certificate=True, username=None, password=None): try: if username and password: manifest_file_handler = StringIO(lib.authenticated_get(username, password, url, verify=verify_...
load a url body into a manifest
def _load_manifest_from_file(manifest, path): path = os.path.abspath(os.path.expanduser(path)) if not os.path.exists(path): raise ManifestException("Manifest does not exist at {0}!".format(path)) manifest.read(path) if not manifest.has_option('config', 'source'): manifest.set('confi...
load manifest from file
def formula_sections(self): if self.dtree is not None: return self.dtree.order else: return [s for s in self.manifest.sections() if s != "config"]
Return all sections related to a formula, re-ordered according to the "depends" section.
def is_affirmative(self, section, option): return self.has_option(section, option) and \ lib.is_affirmative(self.get(section, option))
Return true if the section option combo exists and it is set to a truthy value.
def write(self, file_handle): for k, v in self.inputs.write_values().items(): self.set('config', k, v) self.set('config', 'namespace', self.namespace) self.manifest.write(file_handle)
write the current state to a file manifest
def get_context_dict(self): context_dict = {} for s in self.sections(): for k, v in self.manifest.items(s): context_dict["%s:%s" % (s, k)] = v for k, v in self.inputs.values().items(): context_dict["config:{0}".format(k)] = v context_dict....
return a context dict of the desired state
def get(self, section, key, default=MANIFEST_NULL_KEY): if not self.manifest.has_option(section, key) and default is not MANIFEST_NULL_KEY: return default return self.manifest.get(section, key)
Returns the value if it exist, or default if default is set
def __parse_namespace(self): if self.manifest.has_option('config', 'namespace'): return self.manifest.get('config', 'namespace') elif self.manifest.has_option('config', 'source'): return NAMESPACE_REGEX.search(self.manifest.get('config', 'source')).groups()[0] el...
Parse the namespace from various sources
def __generate_dependency_tree(self): dependency_dict = {} for s in self.manifest.sections(): if s != "config": if self.manifest.has_option(s, 'depends'): dependency_list = [d.strip() for d in re.split('\n|,', self.manifest.get(s, 'depends'))] ...
Generate the dependency tree object
def __substitute_objects(self, value, context_dict): if type(value) == dict: return dict([(k, self.__substitute_objects(v, context_dict)) for k, v in value.items()]) elif type(value) == str: try: return value % context_dict except KeyError: ...
recursively substitute value with the context_dict
def __setup_inputs(self): input_object = Inputs() # populate input schemas for s in self.manifest.sections(): if self.has_option(s, 'inputs'): input_object.add_inputs_from_inputstring(self.get(s, 'inputs')) # add in values for k, v in self.ite...
Setup the inputs object
def validate(self): if self.target: for k in self.target.keys(): if k in self.deprecated_options: self.logger.warn( self.deprecated_options[k].format(option=k, feature=self.feature_name)) elif (k not in self.valid_o...
validates the feature configuration, and returns a list of errors (empty list if no error) validate should: * required variables * warn on unused variables errors should either be reported via self._log_error(), or raise an exception
def should_run(self): should_run = True config = self.target or self.source if config.has('systems'): should_run = False valid_systems = [s.lower() for s in config.get('systems').split(",")] for system_type, param in [('is_osx', 'osx'), ...
Returns true if the feature should run
def resolve(self): if self.source and self.target: for key in self.source.keys(): if (key not in self.dont_carry_over_options and not self.target.has(key)): self.target.set(key, self.source.get(key))
Resolve differences between the target and the source configuration
def _log_error(self, message): key = (self.feature_name, self.target.get('formula')) self.environment.log_feature_error(key, "ERROR: " + message)
Log an error for the feature
def _prompt_value(self, key, prompt_string, default=None, only_if_empty=True): main_manifest = self.target or self.source if only_if_empty and main_manifest.has(key): return main_manifest.get(key) prompt_default = default if self.source and self.source.has(key): ...
prompts the user for a value, and saves it to either the target or source manifest (whichever is appropriate for the phase) this method takes will default to the original value passed by the user in the case one exists. e.g. if a user already answered 'yes' to a question, it will use 'y...
def jinja_fragment_extension(tag, endtag=None, name=None, tag_only=False, allow_args=True, callblock_args=None): if endtag is None: endtag = "end" + tag def decorator(f): def parse(self, parser): lineno = parser.stream.next().lineno args = [] kwargs = []...
Decorator to easily create a jinja extension which acts as a fragment.
def jinja_block_as_fragment_extension(name, tagname=None, classname=None): if tagname is None: tagname = name if classname is None: classname = "%sBlockFragmentExtension" % name.capitalize() return type(classname, (BaseJinjaBlockAsFragmentExtension,), { "tags": set([tagname]), "...
Creates a fragment extension which will just act as a replacement of the block statement.
def dir_visitor(dirname, visitor): visitor(dirname) for obj in os.listdir(dirname): obj_path = os.path.join(dirname, obj) if os.path.isdir(obj_path): dir_visitor(obj_path, visitor)
_dir_visitor_ walk through all files in dirname, find directories and call the callable on them. :param dirname: Name of directory to start visiting, all subdirs will be visited :param visitor: Callable invoked on each dir visited
def replicate_directory_tree(input_dir, output_dir): def transplant_dir(target, dirname): x = dirname.replace(input_dir, target) if not os.path.exists(x): LOGGER.info('Creating: {}'.format(x)) os.makedirs(x) dir_visitor( input_dir, functools.partial(...
_replicate_directory_tree_ clone dir structure under input_dir into output dir All subdirs beneath input_dir will be created under output_dir :param input_dir: path to dir tree to be cloned :param output_dir: path to new dir where dir structure will be created
def find_templates(input_dir): templates = [] def template_finder(result, dirname): for obj in os.listdir(dirname): if obj.endswith('.mustache'): result.append(os.path.join(dirname, obj)) dir_visitor( input_dir, functools.partial(template_finder, te...
_find_templates_ traverse the input_dir structure and return a list of template files ending with .mustache :param input_dir: Path to start recursive search for mustache templates :returns: List of file paths corresponding to templates
def find_copies(input_dir, exclude_list): copies = [] def copy_finder(copies, dirname): for obj in os.listdir(dirname): pathname = os.path.join(dirname, obj) if os.path.isdir(pathname): continue if obj in exclude_list: continue ...
find files that are not templates and not in the exclude_list for copying from template to image
def render_template(template_in, file_out, context): renderer = pystache.Renderer() result = renderer.render_path(template_in, context) with open(file_out, 'w') as handle: LOGGER.info('Rendering: {} to {}'.format(template_in, file_out)) handle.write(result) shutil.copymode(template_...
_render_template_ Render a single template file, using the context provided and write the file out to the location specified #TODO: verify the template is completely rendered, no missing values
def copy_file(src, target): LOGGER.info("Copying {} to {}".format(src, target)) shutil.copyfile(src, target) shutil.copymode(src, target)
copy_file copy source to target
def process_templates(input_dir, target_dir, context): if not target_dir.endswith('/'): target_dir = "{}/".format(target_dir) if not os.path.exists(target_dir): LOGGER.info('Creating: {}'.format(target_dir)) os.makedirs(target_dir) replicate_directory_tree(input_dir, target_dir)...
_process_templates_ Given the input dir containing a set of template, clone the structure under that directory into the target dir using the context to process any mustache templates that are encountered
def process_copies(input_dir, target_dir, excludes): copies = find_copies(input_dir, excludes) for c in copies: output_file = c.replace(input_dir, target_dir) copy_file(c, output_file)
_process_copies_ Handles files to be copied across, assumes that dir structure has already been replicated
def newDevice(deviceJson, lupusec): type_tag = deviceJson.get('type') if not type_tag: _LOGGER.info('Device has no type') if type_tag in CONST.TYPE_OPENING: return LupusecBinarySensor(deviceJson, lupusec) elif type_tag in CONST.TYPE_SENSOR: return LupusecBinarySensor(devic...
Create new device object for the given type.
def get_devices(self, refresh=False, generic_type=None): _LOGGER.info("Updating all devices...") if refresh or self._devices is None: if self._devices is None: self._devices = {} responseObject = self.get_sensors() if (responseObject and ...
Get all devices from Lupusec.
def parse_from_dict(json_dict): history_columns = json_dict['columns'] history_list = MarketHistoryList( upload_keys=json_dict['uploadKeys'], history_generator=json_dict['generator'], ) for rowset in json_dict['rowsets']: generated_at = parse_datetime(rowset['generatedAt']...
Given a Unified Uploader message, parse the contents and return a MarketHistoryList instance. :param dict json_dict: A Unified Uploader message as a dict. :rtype: MarketOrderList :returns: An instance of MarketOrderList, containing the orders within.
def encode_to_json(history_list): rowsets = [] for items_in_region_list in history_list._history.values(): region_id = items_in_region_list.region_id type_id = items_in_region_list.type_id generated_at = gen_iso_datetime_str(items_in_region_list.generated_at) rows = [] ...
Encodes this MarketHistoryList instance to a JSON string. :param MarketHistoryList history_list: The history instance to serialize. :rtype: str
def load(self, configuration): try: self.config = yaml.load(open(configuration, "rb")) except IOError: try: self.config = yaml.load(configuration) except ParserError, e: raise ParserError('Error parsing config: %s' % e) ...
Load a YAML configuration file. :param configuration: Configuration filename or YAML string
def instances(self, test_type=".*"): import re data = {} for k, v in self.instances_dict.iteritems(): if re.match(test_type, v.get('test_type'), re.IGNORECASE): if 'filter_type' in v: hostfilter = { 'filtertype': v[...
Returns a dict of all instances defined using a regex :param test_type: Regular expression to match for self.instance['test_type'] value names
def none_to_blank(s, exchange=''): if isinstance(s, list): return [none_to_blank(z) for y, z in enumerate(s)] return exchange if s is None else unicode(s)
Replaces NoneType with '' >>> none_to_blank(None, '') '' >>> none_to_blank(None) '' >>> none_to_blank('something', '') u'something' >>> none_to_blank(['1', None]) [u'1', ''] :param s: String to replace :para exchange: Character to return for None, default is blank ('') :ret...
def make_good_url(url=None, addition="/"): if url is None: return None if isinstance(url, str) and isinstance(addition, str): return "%s/%s" % (url.rstrip('/'), addition.lstrip('/')) else: return None
Appends addition to url, ensuring the right number of slashes exist and the path doesn't get clobbered. >>> make_good_url('http://www.server.com/anywhere', 'else') 'http://www.server.com/anywhere/else' >>> make_good_url('http://test.com/', '/somewhere/over/the/rainbow/') 'http://test.com/somewhere/...
def build_kvasir_url( proto="https", server="localhost", port="8443", base="Kvasir", user="test", password="test", path=KVASIR_JSONRPC_PATH): uri = proto + '://' + user + '@' + password + '/' + server + ':' + port + '/' + base return make_good_url(uri, path)
Creates a full URL to reach Kvasir given specific data >>> build_kvasir_url('https', 'localhost', '8443', 'Kvasir', 'test', 'test') 'https://test@test/localhost:8443/Kvasir/api/call/jsonrpc' >>> build_kvasir_url() 'https://test@test/localhost:8443/Kvasir/api/call/jsonrpc' >>> build_kvasir_url(serve...
def get_default(parser, section, option, default): try: result = parser.get(section, option) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): result = default return result
helper to get config settings with a default if not present
def set_db_application_prefix(prefix, sep=None): global _APPLICATION_PREFIX, _APPLICATION_SEP _APPLICATION_PREFIX = prefix if (sep is not None): _APPLICATION_SEP = sep
Set the global app prefix and separator.
def find_by_index(self, cls, index_name, value): return self.backend.find_by_index(cls, index_name, value)
Find records matching index query - defer to backend.
def humanTime(seconds): ''' Convert seconds to something more human-friendly ''' intervals = ['days', 'hours', 'minutes', 'seconds'] x = deltaTime(seconds=seconds) return ' '.join('{} {}'.format(getattr(x, k), k) for k in intervals if getattr(x, k)f humanTime(seconds): ''' Convert seconds to something m...
Convert seconds to something more human-friendly
def humanTimeConverter(): ''' Cope whether we're passed a time in seconds on the command line or via stdin ''' if len(sys.argv) == 2: print humanFriendlyTime(seconds=int(sys.argv[1])) else: for line in sys.stdin: print humanFriendlyTime(int(line)) sys.exit(0f humanTimeConverter(): ''' ...
Cope whether we're passed a time in seconds on the command line or via stdin
def train(self, data, **kwargs): self.data = data for i in xrange(0,data.shape[1]): column_mean = np.mean(data.icol(i)) column_stdev = np.std(data.icol(i)) #Have to do += or "list" type will fail (ie with append) self.column_means += [column_mean...
Calculate the standard deviations and means in the training data
def predict(self, test_data, **kwargs): if test_data.shape[1]!=self.data.shape[1]: raise Exception("Test data has different number of columns than training data.") for i in xrange(0,test_data.shape[1]): test_data.loc[:,i] = test_data.icol(i) - self.column_means[i] ...
Adjust new input by the values in the training data
def action_decorator(name): def decorator(cls): action_decorators.append((name, cls)) return cls return decorator
Decorator to register an action decorator
def load_global_config(config_path): config = configparser.RawConfigParser() if os.path.exists(config_path): logger.debug("Checking and setting global parameters...") config.read(config_path) else: _initial_run() logger.info("Unable to find a global sprinter configuratio...
Load a global configuration object, and query for any required variables along the way
def print_global_config(global_config): if global_config.has_section('shell'): print("\nShell configurations:") for shell_type, set_value in global_config.items('shell'): print("{0}: {1}".format(shell_type, set_value)) if global_config.has_option('global', 'env_source_rc'): ...
print the global configuration
def create_default_config(): config = configparser.RawConfigParser() config.add_section('global') config.set('global', 'env_source_rc', False) config.add_section('shell') config.set('shell', 'bash', "true") config.set('shell', 'zsh', "true") config.set('shell', 'gui', "true") return...
Create a default configuration object, with all parameters filled
def _initial_run(): if not system.is_officially_supported(): logger.warn(warning_template + "===========================================================\n" + "Sprinter is not officially supported on {0}! Please use at your own risk.\n\n".format(system.operating_s...
Check things during the initial setting of sprinter's global config
def _configure_shell(config): config.has_section('shell') or config.add_section('shell') logger.info( "What shells or environments would you like sprinter to work with?\n" "(Sprinter will not try to inject into environments not specified here.)\n" "If you specify 'gui', sprinter wil...
Checks and queries values for the shell