text stringlengths 81 112k |
|---|
Takes an input path to the images folder of an experiment and generates
automatically the category - filenumber list needed to construct an
appropriate _categories object.
Parameters :
loader : Loader object which contains
impath : string
path to the input, i.e. ima... |
Filter the fixmat such that it only contains fixations on images
in categories that are also in the categories object
def fixations(self):
''' Filter the fixmat such that it only contains fixations on images
in categories that are also in the categories object'''
if not self._fixations:... |
Saves a new image to disk
def data(self, value):
"""
Saves a new image to disk
"""
self.loader.save_image(self.category, self.image, value) |
Returns all fixations that are on this image.
A precondition for this to work is that a fixmat
is associated with this Image object.
def fixations(self):
"""
Returns all fixations that are on this image.
A precondition for this to work is that a fixmat
is associated wi... |
Generator for creating the cross-validation slices.
Returns
A tuple of that contains two fixmats (training and test)
and two Category objects (test and train).
def generate(self):
"""
Generator for creating the cross-validation slices.
Returns
A tu... |
Computes angle and length differences up to given order and deletes
suspiciously long fixations.
Input
fm: Fixmat
Fixmat for which to comput angle and length differences
max_back: Int
Computes delta angle and amplitude up to order max_back.
dur_cap: Int
... |
Computes the mean fixation duration at forward angles.
def saccadic_momentum_effect(durations, forward_angle,
summary_stat=nanmean):
"""
Computes the mean fixation duration at forward angles.
"""
durations_per_da = np.nan * np.ones((len(e_angle) - 1,))
for i, (bo, b1) i... |
Computes a measure of fixation durations at delta angle and delta
length combinations.
def ior_effect(durations, angle_diffs, length_diffs,
summary_stat=np.mean, parallel=True, min_samples=20):
"""
Computes a measure of fixation durations at delta angle and delta
length combinations.
... |
Fits a non-linear piecewise regression to fixtaion durations for a fixmat.
Returns corrected fixation durations.
def predict_fixation_duration(
durations, angles, length_diffs, dataset=None, params=None):
"""
Fits a non-linear piecewise regression to fixtaion durations for a fixmat.
Returns c... |
Calculates the saccadic momentum effect for individual subjects.
Removes any effect of amplitude differences.
The parameters are fitted on unbinned data. The effects are
computed on binned data. See e_dist and e_angle for the binning
parameter.
def subject_predictions(fm, field='SUBJECTINDEX',
... |
Calculates how well the fixations from a set of subjects on a set of
images can be predicted with the fixations from another set of subjects
on another set of images.
The prediction is carried out by computing a fixation density map from
fixations of predicting_subjects subjects on predicting_images im... |
Calculates how well the fixations of n random subjects on one image can
be predicted with the fixations of m other random subjects.
Notes
Function that uses intersubject_auc for computing auc.
Parameters
fm : fixmat instance
category : int
Category from which the fixati... |
compute the inter-subject consistency upper bound for a fixmat.
Input:
fm : a fixmat instance
nr_subs : the number of subjects used for the prediction. Defaults
to the total number of subjects in the fixmat minus 1
scale_factor : the scale factor of the FDMs. Default is 1.... |
Compute the spatial bias lower bound for a fixmat.
Input:
fm : a fixmat instance
nr_subs : the number of subjects used for the prediction. Defaults
to the total number of subjects in the fixmat minus 1
nr_imgs : the number of images used for prediction. If given, the
... |
Calculates subscripts for indices into regularly spaced matrixes.
def ind2sub(ind, dimensions):
"""
Calculates subscripts for indices into regularly spaced matrixes.
"""
# check that the index is within range
if ind >= np.prod(dimensions):
raise RuntimeError("ind2sub: index exceeds array si... |
An exemplary sub2ind implementation to create randomization
scripts.
This function calculates indices from subscripts into regularly spaced
matrixes.
def sub2ind(indices, dimensions):
"""
An exemplary sub2ind implementation to create randomization
scripts.
This function calculates ind... |
Restores a task store from file.
def RestoreTaskStoreFactory(store_class, chunk_size, restore_file, save_file):
"""
Restores a task store from file.
"""
intm_results = np.load(restore_file)
intm = intm_results[intm_results.files[0]]
idx = np.isnan(intm).flatten().nonzero()[0]
partitions = m... |
Reschedule all running tasks.
def xmlrpc_reschedule(self):
"""
Reschedule all running tasks.
"""
if not len(self.scheduled_tasks) == 0:
self.reschedule = list(self.scheduled_tasks.items())
self.scheduled_tasks = {}
return True |
Return a new task description: ID and necessary parameters,
all are given in a dictionary
def xmlrpc_get_task(self):
"""
Return a new task description: ID and necessary parameters,
all are given in a dictionary
"""
try:
if len(self.reschedule) == 0:
... |
Take the results of a computation and put it into the results list.
def xmlrpc_task_done(self, result):
"""
Take the results of a computation and put it into the results list.
"""
(task_id, task_results) = result
del self.scheduled_tasks[task_id]
self.task_store.update_r... |
Return a status message
def xmlrpc_status(self):
"""
Return a status message
"""
return ("""
%i Jobs are still wating for execution
%i Jobs are being processed
%i Jobs are done
""" %(self.task_store.partitions -
self.results -
... |
Save results and own state into file.
def xmlrpc_save2file(self, filename):
"""
Save results and own state into file.
"""
savefile = open(filename,'wb')
try:
pickle.dump({'scheduled':self.scheduled_tasks,
'reschedule':self.reschedule},savefi... |
This function needs to be called to start the computation.
def run(self):
"""This function needs to be called to start the computation."""
(task_id, tasks) = self.server.get_task()
self.task_store.from_dict(tasks)
for (index, task) in self.task_store:
result = self.compute(i... |
Configures the task store to be the task_store described
in description
def from_dict(self, description):
"""Configures the task store to be the task_store described
in description"""
assert(self.ident == description['ident'])
self.partitions = description['partitions'... |
Partitions all tasks into groups of tasks. A group is
represented by a task_store object that indexes a sub-
set of tasks.
def partition(self):
"""Partitions all tasks into groups of tasks. A group is
represented by a task_store object that indexes a sub-
set of task... |
Fits a 3D distribution with splines.
Input:
samples: Array
Array of samples from a probability distribution
e_x: Array
Edges that define the events in the probability
distribution along the x direction. For example,
e_x[0] < samples[0] <= e_x[1] pic... |
Fits a 2D distribution with splines.
Input:
samples: Matrix or list of arrays
If matrix, it must be of size Nx2, where N is the number of
observations. If list, it must contain two arrays of length
N.
e_x: Array
Edges that define the events in the pr... |
Fits a 1D distribution with splines.
Input:
samples: Array
Array of samples from a probability distribution
e: Array
Edges that define the events in the probability
distribution. For example, e[0] < x <= e[1] is
the range of values that are associate... |
Determines knot placement based on a marginal distribution.
It places knots such that each knot covers the same amount
of probability mass. Two of the knots are reserved for the
borders which are treated seperatly. For example, a uniform
distribution with 5 knots will cause the knots to be equally
... |
Computes a 1D spline basis
Input:
length: int
length of each basis
nr_knots: int
Number of knots, i.e. number of basis functions.
spline_order: int
Order of the splines.
marginal: array, optional
Estimate of the marginal distribut... |
Computes a set of 2D spline basis functions.
The basis functions cover the entire space in height*width and can
for example be used to create fixation density maps.
Input:
width: int
width of each basis
height: int
height of each basis
nr_knots_x: i... |
Computes a set of 3D spline basis functions.
For a description of the parameters see spline_base2d.
def spline_base3d( width, height, depth, nr_knots_x = 10.0, nr_knots_y = 10.0,
nr_knots_z=10, spline_order = 3, marginal_x = None, marginal_y = None,
marginal_z = None):
"""Computes a set ... |
Evaluates the ith spline basis given by knots on points in x
def spline(x,knots,p,i=0.0):
"""Evaluates the ith spline basis given by knots on points in x"""
assert(p+1<len(knots))
return np.array([N(float(u),float(i),float(p),knots) for u in x]) |
Computes the spline colocation matrix for knots in x.
The spline collocation matrix contains all m-p-1 bases
defined by knots. Specifically it contains the ith basis
in the ith column.
Input:
x: vector to evaluate the bases on
knots: vector of knots
spline_order: orde... |
Augment knot sequence such that some boundary conditions
are met.
def augknt(knots,order):
"""Augment knot sequence such that some boundary conditions
are met."""
a = []
[a.append(knots[0]) for t in range(0,order)]
[a.append(k) for k in knots]
[a.append(knots[-1]) for t in range(0,order)]... |
Compute Spline Basis
Evaluates the spline basis of order p defined by knots
at knot i and point u.
def N(u,i,p,knots):
"""Compute Spline Basis
Evaluates the spline basis of order p defined by knots
at knot i and point u.
"""
if p == 0:
if knots[i] < u and u <=knots[i+1]:... |
Evaluates a prediction against fixations in a fixmat with different measures.
The default measures which are used are AUC, NSS and KL-divergence. This
can be changed by setting the list of measures with set_scores.
As different measures need potentially different parameters, the kw
dictionary can be us... |
wraps kldiv functionality for model evaluation
input:
prediction: 2D matrix
the model salience map
fm : fixmat
Should be filtered for the image corresponding to the prediction
def kldiv_model(prediction, fm):
"""
wraps kldiv functionality for model evaluation
i... |
Computes the Kullback-Leibler divergence between two distributions.
Parameters
p : Matrix
The first probability distribution
q : Matrix
The second probability distribution
distp : fixmat
If p is None, distp is used to compute a FDM which
is th... |
Computes Chao-Shen corrected KL-divergence between prediction
and fdm made from fixations in fm.
Parameters :
prediction : np.ndarray
a fixation density map
fm : FixMat object
def kldiv_cs_model(prediction, fm):
"""
Computes Chao-Shen corrected KL-divergence between predict... |
Computes some terms needed for the Chao-Shen KL correction.
def chao_shen(q):
"""
Computes some terms needed for the Chao-Shen KL correction.
"""
yx = q[q > 0] # remove bins with zero counts
n = np.sum(yx)
p = yx.astype(float)/n
f1 = np.sum(yx == 1) # number of singletons in the sample
... |
wraps numpy.corrcoef functionality for model evaluation
input:
prediction: 2D Matrix
the model salience map
fm: fixmat
Used to compute a FDM to which the prediction is compared.
def correlation_model(prediction, fm):
"""
wraps numpy.corrcoef functionality for model ... |
wraps nss functionality for model evaluation
input:
prediction: 2D matrix
the model salience map
fm : fixmat
Fixations that define the actuals
def nss_model(prediction, fm):
"""
wraps nss functionality for model evaluation
input:
prediction: 2D matrix
... |
Compute the normalized scanpath salience
input:
fix : list, l[0] contains y, l[1] contains x
def nss(prediction, fix):
"""
Compute the normalized scanpath salience
input:
fix : list, l[0] contains y, l[1] contains x
"""
prediction = prediction - np.mean(prediction)
predic... |
wraps roc functionality for model evaluation
Parameters:
prediction: 2D array
the model salience map
fm : fixmat
Fixations that define locations of the actuals
ctr_loc : tuple of (y.x) coordinates, optional
Allows to specify control points for spatial
... |
approximates the area under the roc curve for sets of actuals and controls.
Uses all values appearing in actuals as thresholds and lower sum
interpolation. Also returns arrays of the true positive rate and the false
positive rate that can be used for plotting the roc curve.
Parameters:
actuals ... |
Histogram based implementation of AUC unde ROC curve.
Parameters:
actuals : list
A list of numeric values for positive observations.
controls : list
A list of numeric values for negative observations.
def faster_roc(actuals, controls):
"""
Histogram based implementat... |
wraps emd functionality for model evaluation
requires:
OpenCV python bindings
input:
prediction: the model salience map
fm : fixmat filtered for the image corresponding to the prediction
def emd_model(prediction, fm):
"""
wraps emd functionality for model evaluation
requi... |
Compute the Eart Movers Distance between prediction and model.
This implementation uses opencv for doing the actual work.
Unfortunately, at the time of implementation only the SWIG
bindings werer available and the numpy arrays have to
converted by hand. This changes with opencv 2.1.
def emd(prediction... |
Parse RFC 822 dates and times
http://tools.ietf.org/html/rfc822#section-5
There are some formatting differences that are accounted for:
1. Years may be two or four digits.
2. The month and day can be swapped.
3. Additional timezone names are supported.
4. A default time and timezone are assumed... |
_to_rfc822(datetime.datetime) -> str
The datetime `strftime` method is subject to locale-specific
day and month names, so this function hardcodes the conversion.
def _to_rfc822(date):
"""_to_rfc822(datetime.datetime) -> str
The datetime `strftime` method is subject to locale-specific
day and month ... |
Formats the SQL query to use ordinal parameters instead of named
parameters.
*sql* (|string|) is the SQL query.
*params* (|dict|) maps each named parameter (|string|) to value
(|object|). If |self.named| is "numeric", then *params* can be
simply a |sequence| of values mapped by index.
Returns a 2-|tuple|... |
Formats the SQL query to use ordinal parameters instead of named
parameters.
*sql* (|string|) is the SQL query.
*many_params* (|iterable|) contains each *params* to format.
- *params* (|dict|) maps each named parameter (|string|) to value
(|object|). If |self.named| is "numeric", then *params* can be
... |
Gets the parser for the command f, if it not exists it creates a new one
def _get_parser(f):
"""
Gets the parser for the command f, if it not exists it creates a new one
"""
_COMMAND_GROUPS[f.__module__].load()
if f.__name__ not in _COMMAND_GROUPS[f.__module__].parsers:
parser = _COMMAND_G... |
Find all <a /> elements in the given html for a post. Only scan html element matching all criteria in look_in.
optionally the content to be scanned can be given as an argument.
If any have an href attribute that is not from the
one of the items in exclude_domains, append it to our lists.
:param sourc... |
Search the given html content for all <link /> elements
and return any discovered WebMention URL.
:param html: html content
:rtype: WebMention URL
def findEndpoint(html):
"""Search the given html content for all <link /> elements
and return any discovered WebMention URL.
:param html: html con... |
Discover any WebMention endpoint for a given URL.
:param link: URL to discover WebMention endpoint
:param test_urls: optional flag to test URLs for validation
:param headers: optional headers to send with any web requests
:type headers dict
:param timeout: optional timeout for web requests
:typ... |
Send to the :targetURL: a WebMention for the :sourceURL:
The WebMention will be discovered if not given in the :webmention:
parameter.
:param sourceURL: URL that is referencing :targetURL:
:param targetURL: URL of mentioned post
:param webmention: optional WebMention endpoint
:param test_urls:... |
takes the link header as a string and returns a dictionary with rel values as keys and urls as values
:param link: link header as a string
:rtype: dictionary {rel_name: rel_value}
def parse_link_header(link):
"""takes the link header as a string and returns a dictionary with rel values as keys and urls as ... |
Find all <a /> elements in the given html for a post.
If any have an href attribute that is rel="me" then include
it in the result.
:param sourceURL: the URL for the post we are scanning
:rtype: dictionary of RelMe references
def findRelMe(sourceURL):
"""Find all <a /> elements in the given html ... |
Determine if a given :resourceURL: is authoritative for the :profileURL:
TODO add https/http filtering for those who wish to limit/restrict urls to match fully
TODO add code to ensure that each item in the redirect chain is authoritative
:param profileURL: URL of the user
:param resourceURL: URL of th... |
Indent every line of text in a newline-delimited string
def indent_text(string, indent_level=2):
"""Indent every line of text in a newline-delimited string"""
indented_lines = []
indent_spaces = ' ' * indent_level
for line in string.split('\n'):
indented_lines.append(indent_spaces + line)
... |
Download a file using requests.
This is like urllib.request.urlretrieve, but:
- requests validates SSL certificates by default
- you can pass tracker objects to e.g. display a progress bar or calculate
a file hash.
def download(url, target, headers=None, trackers=()):
"""Download a file using r... |
Writes an object created by `parse` to either a file or a bytearray.
If the object doesn't end on a byte boundary, zeroes are appended to it
until it does.
def write(parsed_obj, spec=None, filename=None):
"""Writes an object created by `parse` to either a file or a bytearray.
If the object doesn't en... |
Uploads a file to an S3 bucket, as a public file.
def deploy_file(file_path, bucket):
""" Uploads a file to an S3 bucket, as a public file. """
# Paths look like:
# index.html
# css/bootstrap.min.css
logger.info("Deploying {0}".format(file_path))
# Upload the actual file to file_path
k... |
Deploy to the configured S3 bucket.
def deploy(www_dir, bucket_name):
""" Deploy to the configured S3 bucket. """
# Set up the connection to an S3 bucket.
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
# Deploy each changed file in www_dir
os.chdir(www_dir)
for root, dirs,... |
Checks if a file has changed since the last time it was deployed.
:param file_path: Path to file which should be checked. Should be relative
from root of bucket.
:param bucket_name: Name of S3 bucket to check against.
:returns: True if the file has changed, else False.
def has_change... |
Entry point for the package, as defined in setup.py.
def main():
""" Entry point for the package, as defined in setup.py. """
# Log info and above to console
logging.basicConfig(
format='%(levelname)s: %(message)s', level=logging.INFO)
# Get command line input/output arguments
msg = 'Inst... |
This keyword is used to start sikuli java process.
If library is inited with mode "OLD", sikuli java process is started automatically.
If library is inited with mode "NEW", this keyword should be used.
:param port: port of sikuli java process, if value is None or 0, a random free port will be u... |
Respond to POSTed username/password with token.
def post(self, request):
"""Respond to POSTed username/password with token."""
serializer = AuthTokenSerializer(data=request.data)
if serializer.is_valid():
token, _ = ExpiringToken.objects.get_or_create(
user=serializ... |
Return the allowed lifespan of a token as a TimeDelta object.
Defaults to 30 days.
def EXPIRING_TOKEN_LIFESPAN(self):
"""
Return the allowed lifespan of a token as a TimeDelta object.
Defaults to 30 days.
"""
try:
val = settings.EXPIRING_TOKEN_LIFESPAN
... |
Return boolean indicating token expiration.
def expired(self):
"""Return boolean indicating token expiration."""
now = timezone.now()
if self.created < now - token_settings.EXPIRING_TOKEN_LIFESPAN:
return True
return False |
Test if a token is made entirely of Unicode characters of the following
classes:
- P: punctuation
- S: symbols
- Z: separators
- M: combining marks
- C: control characters
>>> unicode_is_punctuation('word')
False
>>> unicode_is_punctuation('。')
True
>>> unicode_is_punctuati... |
Store the actual process in _process. If it doesn't exist yet, create
it.
def process(self):
"""
Store the actual process in _process. If it doesn't exist yet, create
it.
"""
if hasattr(self, '_process'):
return self._process
else:
self._p... |
Create the process by running the specified command.
def _get_process(self):
"""
Create the process by running the specified command.
"""
command = self._get_command()
return subprocess.Popen(command, bufsize=-1, close_fds=True,
stdout=subprocess.... |
Split a text into separate words.
def tokenize_list(self, text):
"""
Split a text into separate words.
"""
return [self.get_record_token(record) for record in self.analyze(text)] |
Determine whether a single word is a stopword, or whether a short
phrase is made entirely of stopwords, disregarding context.
Use of this function should be avoided; it's better to give the text
in context and let the process determine which words are the stopwords.
def is_stopword(self, text)... |
Get a canonical list representation of text, with words
separated and reduced to their base forms.
TODO: use the cache.
def normalize_list(self, text, cache=None):
"""
Get a canonical list representation of text, with words
separated and reduced to their base forms.
TO... |
Given some text, return a sequence of (stem, pos, text) triples as
appropriate for the reader. `pos` can be as general or specific as
necessary (for example, it might label all parts of speech, or it might
only distinguish function words from others).
Twitter-style hashtags and at-menti... |
Given some text, extract phrases of up to 2 content words,
and map their normalized form to the complete phrase.
def extract_phrases(self, text):
"""
Given some text, extract phrases of up to 2 content words,
and map their normalized form to the complete phrase.
"""
anal... |
Use MeCab to turn any text into its phonetic spelling, as katakana
separated by spaces.
def to_kana(text):
"""
Use MeCab to turn any text into its phonetic spelling, as katakana
separated by spaces.
"""
records = MECAB.analyze(text)
kana = []
for record in records:
if record.pro... |
Return two things about each character:
- Its transliterated value (in Roman characters, if it's a kana)
- A class of characters indicating how it affects the romanization
def get_kana_info(char):
"""
Return two things about each character:
- Its transliterated value (in Roman characters, if it's... |
Runs a line of text through MeCab, and returns the results as a
list of lists ("records") that contain the MeCab analysis of each
word.
def analyze(self, text):
"""
Runs a line of text through MeCab, and returns the results as a
list of lists ("records") that contain the MeCab a... |
Determine whether a single MeCab record represents a stopword.
This mostly determines words to strip based on their parts of speech.
If common_words is set to True (default), it will also strip common
verbs and nouns such as くる and よう. If more_stopwords is True, it
will look at the sub-... |
Given a record, get the word's part of speech.
Here we're going to return MeCab's part of speech (written in
Japanese), though if it's a stopword we prefix the part of speech
with '~'.
def get_record_pos(self, record):
"""
Given a record, get the word's part of speech.
... |
Run text through the external process, and get a list of lists
("records") that contain the analysis of each word.
def analyze(self, text):
"""
Run text through the external process, and get a list of lists
("records") that contain the analysis of each word.
"""
try:
... |
Untokenizing a text undoes the tokenizing operation, restoring
punctuation and spaces to the places that people expect them to be.
Ideally, `untokenize(tokenize(text))` should be identical to `text`,
except for line breaks.
def untokenize(words):
"""
Untokenizing a text undoes the tokenizing opera... |
r"""
Splits apart words that are written in CamelCase.
Bugs:
- Non-ASCII characters are treated as lowercase letters, even if they are
actually capital letters.
Examples:
>>> un_camel_case('1984ZXSpectrumGames')
'1984 ZX Spectrum Games'
>>> un_camel_case('aaAa aaAaA 0aA AAAa!AAA'... |
Takes a (unicode) string and yields pieces of it that are at most `maxlen`
characters, trying to break it at punctuation/whitespace. This is an
important step before using a tokenizer with a maximum buffer size.
def string_pieces(s, maxlen=1024):
"""
Takes a (unicode) string and yields pieces of it tha... |
Assign a heuristic to possible outputs from Morphy. Minimizing this
heuristic avoids incorrect stems.
def _word_badness(word):
"""
Assign a heuristic to possible outputs from Morphy. Minimizing this
heuristic avoids incorrect stems.
"""
if word.endswith('e'):
return len(word) - 2
el... |
Get the most likely stem for a word using Morphy, once the input has been
pre-processed by morphy_stem().
def _morphy_best(word, pos=None):
"""
Get the most likely stem for a word using Morphy, once the input has been
pre-processed by morphy_stem().
"""
results = []
if pos is None:
... |
Get the most likely stem for a word. If a part of speech is supplied,
the stem will be more accurate.
Valid parts of speech are:
- 'n' or 'NN' for nouns
- 'v' or 'VB' for verbs
- 'a' or 'JJ' for adjectives
- 'r' or 'RB' for adverbs
Any other part of speech will be treated as unknown.
def... |
Returns a list of (stem, tag, token) triples:
- stem: the word's uninflected form
- tag: the word's part of speech
- token: the original word, so we can reconstruct it later
def tag_and_stem(text):
"""
Returns a list of (stem, tag, token) triples:
- stem: the word's uninflected form
- tag... |
Get a list of word stems that appear in the text. Stopwords and an initial
'to' will be stripped, unless this leaves nothing in the stem.
>>> normalize_list('the dog')
['dog']
>>> normalize_list('big dogs')
['big', 'dog']
>>> normalize_list('the')
['the']
def normalize_list(text):
"""
... |
Get a canonical representation of a Wikipedia topic, which may include
a disambiguation string in parentheses.
Returns (name, disambig), where "name" is the normalized topic name,
and "disambig" is a string corresponding to the disambiguation text or
None.
def normalize_topic(topic):
"""
Get a... |
split key to elements
def key2elements(key):
"""split key to elements"""
# words = key.split('.')
# if len(words) == 4:
# return words
# # there is a dot in object name
# fieldword = words.pop(-1)
# nameword = '.'.join(words[-2:])
# if nameword[-1] in ('"', "'"):
# # The obj... |
update idf using dct
def updateidf(idf, dct):
"""update idf using dct"""
for key in list(dct.keys()):
if key.startswith('idf.'):
idftag, objkey, objname, field = key2elements(key)
if objname == '':
try:
idfobj = idf.idfobjects[objkey.upper()][... |
return the fan power in bhp given fan efficiency, Pressure rise (Pa) and flow (m3/s)
def fan_bhp(fan_tot_eff, pascal, m3s):
"""return the fan power in bhp given fan efficiency, Pressure rise (Pa) and flow (m3/s)"""
# from discussion in
# http://energy-models.com/forum/baseline-fan-power-calculation
inh... |
return inputs for E+ in pascal and m3/s
def bhp2pascal(bhp, cfm, fan_tot_eff):
"""return inputs for E+ in pascal and m3/s"""
inh2o = bhp * 6356.0 * fan_tot_eff / cfm
pascal = inh2o2pascal(inh2o)
m3s = cfm2m3s(cfm)
return pascal, m3s |
return the fan power in watts given fan efficiency, Pressure rise (Pa) and flow (m3/s)
def fan_watts(fan_tot_eff, pascal, m3s):
"""return the fan power in watts given fan efficiency, Pressure rise (Pa) and flow (m3/s)"""
# got this from a google search
bhp = fan_bhp(fan_tot_eff, pascal, m3s)
return bhp... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.