text
stringlengths
81
112k
px_per_mm = cam_resolution / image_size def MTF(self, px_per_mm): ''' px_per_mm = cam_resolution / image_size ''' res = 100 #numeric resolution r = 4 #range +-r*std #size of 1 px: px_size = 1 / px_per_mm #standard deviation of ...
return the intensity based uncertainty due to the unsharpness of the image as standard deviation method = ['convolve' , 'unsupervised_wiener'] latter one also returns the reconstructed image (deconvolution) def uncertaintyMap(self, psf, method='convolve', fitParams=None...
get the standard deviation from the PSF is evaluated as 2d Gaussian def stdDev(self): ''' get the standard deviation from the PSF is evaluated as 2d Gaussian ''' if self._corrPsf is None: self.psf() p = self._corrPsf.copy() mn = p.m...
replace all values in [grid] indicated by [mask] with the inverse distance weighted interpolation of all values within px+-kernel [power] -> distance weighting factor: 1/distance**[power] def interpolate2dStructuredIDW(grid, mask, kernel=15, power=2, fx=1, fy=1): ''' replace all values in [gr...
(Electroluminescence) signal is not stable over time especially next to cracks. This function takes a set of images and returns parameters, needed to transform uncertainty to other exposure times using [adjustUncertToExposureTime] return [signal uncertainty] obtained from l...
[images] --> list of images containing small bright spots generated by the same device images at different positions within image plane depending on the calibrated waveband the device can be a LCD display or PV 1-cell mini module This method is refe...
Simulates a lens calibration using synthetic images * images are rendered under the given HEIGHT resolution * noise and smoothing is applied * perspective and position errors are applied * images are deformed using the given CAMERA_PARAM * the detected camera parameters are used to calculate ...
creates plots showing both found GAUSSIAN peaks, the histogram, a smoothed histogram from all images within [imgDir] posExTime - position range of the exposure time in the image name e.g.: img_30s.jpg -> (4,5) outDir - dirname to save the output images show_legend - True/False show_plots - di...
Average multiple images of a homogeneous device imaged directly in front the camera lens. if [bg_imgs] are not given, background level is extracted from 1% of the cumulative intensity distribution of the averaged [imgs] This measurement method is referred as 'Method A' in --- ...
Same as [flatFieldFromCloseDistance]. Differences are: ... single-time-effect removal included ... returns the standard deviation of the image average [calcStd=True] Optional: ----------- calcStd -> set to True to also return the standard deviation nlf -> noise level function (callable) ...
signal-to-noise ratio (SNR) as mean(images) / std(images) as defined in Hinken et.al. 2011 (DOI: 10.1063/1.3541766) works on unloaded images no memory overload if too many images are given def SNR_hinken(imgs, bg=0, roi=None): ''' signal-to-noise ratio (SNR) as mean(images) / std(images)...
Transform at maximum 8 bool layers --> 2d arrays, dtype=(bool,int) to one 8bit image def boolMasksToImage(masks): ''' Transform at maximum 8 bool layers --> 2d arrays, dtype=(bool,int) to one 8bit image ''' assert len(masks) <= 8, 'can only transform up to 8 masks into image' masks =...
inverse of [boolMasksToImage] def imageToBoolMasks(arr): '''inverse of [boolMasksToImage]''' assert arr.dtype == np.uint8, 'image needs to be dtype=uint8' masks = np.unpackbits(arr).reshape(*arr.shape, 8) return np.swapaxes(masks, 2, 0)
simple and better alg. than below in_plane -> whether object has no tilt, but only rotation and translation def calcAspectRatioFromCorners(corners, in_plane=False): ''' simple and better alg. than below in_plane -> whether object has no tilt, but only rotation and translation ''' q = co...
Extends cv2.putText with [alpha] argument def putTextAlpha(img, text, alpha, org, fontFace, fontScale, color, thickness): # , lineType=None ''' Extends cv2.putText with [alpha] argument ''' x, y = cv2.getTextSize(text, fontFace, fontScale, thickness)...
for bigger ksizes it if often faster to resize an image rather than blur it... def fastMean(img, f=10, inplace=False): ''' for bigger ksizes it if often faster to resize an image rather than blur it... ''' s0,s1 = img.shape[:2] ss0 = int(round(s0/f)) ss1 = int(round(s1/f)) ...
Build a file path from *paths* and return the contents. def read(*paths): """Build a file path from *paths* and return the contents.""" try: f_name = os.path.join(*paths) with open(f_name, 'r') as f: return f.read() except IOError: print('%s not existing ... skipp...
average background images with same exposure time def averageSameExpTimes(imgs_path): ''' average background images with same exposure time ''' firsts = imgs_path[:2] imgs = imgs_path[2:] for n, i in enumerate(firsts): firsts[n] = np.asfarray(imread(i)) d = DarkCurrentMap(fi...
returns offset, ascent of image(expTime) = offset + ascent*expTime def getLinearityFunction(expTimes, imgs, mxIntensity=65535, min_ascent=0.001, ): ''' returns offset, ascent of image(expTime) = offset + ascent*expTime ''' # TODO: calculate [min_ascent] from no...
return image paths sorted for same exposure time def sortForSameExpTime(expTimes, img_paths): # , excludeSingleImg=True): ''' return image paths sorted for same exposure time ''' d = {} for e, i in zip(expTimes, img_paths): if e not in d: d[e] = [] d[e].append(i...
return exposure times, image averages for each exposure time def getDarkCurrentAverages(exposuretimes, imgs): ''' return exposure times, image averages for each exposure time ''' x, imgs_p = sortForSameExpTime(exposuretimes, imgs) s0, s1 = imgs[0].shape imgs = np.empty(shape=(len(x), s0...
get dark current function from given images and exposure times def getDarkCurrentFunction(exposuretimes, imgs, **kwargs): ''' get dark current function from given images and exposure times ''' exposuretimes, imgs = getDarkCurrentAverages(exposuretimes, imgs) offs, ascent, rmse = getLinearityFu...
return a sub image aligned along given line @param img - numpy.2darray input image to get subimage from @param line - list of 2 points [x0,y0,x1,y1]) @param height - height of output array in y @param length - width of output array @param zoom - zoom factor @param fast - speed up calcul...
calculates the estimated standard deviation map from the changes of neighbouring pixels from a center pixel within a point spread function defined by a std.dev. in x and y taken from the (sx, sy) maps sx,sy -> either 2d array of same shape as [image] of single values def positionToInten...
seems to be a more precise (but slower) way to down-scale an image def _coarsenImage(image, f): ''' seems to be a more precise (but slower) way to down-scale an image ''' from skimage.morphology import square from skimage.filters import rank from skimage.transform._warps import ...
like positionToIntensityUncertainty but calculated average uncertainty for an area [y0:y1,x0:x1] def positionToIntensityUncertaintyForPxGroup(image, std, y0, y1, x0, x1): ''' like positionToIntensityUncertainty but calculated average uncertainty for an area [y0:y1,x0:x1] ''' fy, fx = y1 -...
same as scipy.filters.maximum_filter but working excluding nans def nan_maximum_filter(arr, ksize): ''' same as scipy.filters.maximum_filter but working excluding nans ''' out = np.empty_like(arr) _calc(arr, out, ksize//2) return out
set every the pixel value of the given [img] to the median filtered one of a given kernel [size] in case the relative [threshold] is exeeded condition = '>' OR '<' def medianThreshold(img, threshold=0.1, size=3, condition='>', copy=True): ''' set every the pixel value of the given [img] to the...
fn['nanmean', 'mean', 'nanmedian', 'median'] a fast 2d filter for large kernel sizes that also works with nans the computation speed is increased because only 'every'nsth position within the median kernel is evaluated def fastFilter(arr, ksize=30, every=None, resize=True, fn='median', ...
Read EL images (*.elbin) created by the RELTRON EL Software http://www.reltron.com/Products/Solar.html def elbin(filename): ''' Read EL images (*.elbin) created by the RELTRON EL Software http://www.reltron.com/Products/Solar.html ''' # arrs = [] labels = [] # These are all ex...
see http://en.wikipedia.org/wiki/Multivariate_normal_distribution # probability density function of a vector [x,y] sx,sy -> sigma (standard deviation) mx,my: mue (mean position) rho: correlation between x and y def gaussian2d(xy, sx, sy, mx=0, my=0, rho=0, amp=1, offs=0): ''' see http://e...
fit perspective and size of the input image to the base image def fitImg(self, img_rgb): ''' fit perspective and size of the input image to the base image ''' H = self.pattern.findHomography(img_rgb)[0] H_inv = self.pattern.invertHomography(H) s = self.img_orig.sha...
scaling img cutting x percent of top and bottom part of histogram def scaleSignalCut(img, ratio, nbins=100): ''' scaling img cutting x percent of top and bottom part of histogram ''' start, stop = scaleSignalCutParams(img, ratio, nbins) img = img - start img /= (stop - start) return ...
scale the image between... backgroundToZero=True -> 0 (average background) and 1 (maximum signal) backgroundToZero=False -> signal+-3std reference -> reference image -- scale image to fit this one returns: scaled image def scaleSignal(img, fitParams=None, backgroundToZer...
return minimum, average, maximum of the background peak def getBackgroundRange(fitParams): ''' return minimum, average, maximum of the background peak ''' smn, _, _ = getSignalParameters(fitParams) bg = fitParams[0] _, avg, std = bg bgmn = max(0, avg - 3 * std) if avg + 4 * ...
compare the height of putative bg and signal peak if ratio if too height assume there is no background def hasBackground(fitParams): ''' compare the height of putative bg and signal peak if ratio if too height assume there is no background ''' signal = getSignalPeak(fitParams) bg = g...
minimum position between signal and background peak def signalMinimum2(img, bins=None): ''' minimum position between signal and background peak ''' f = FitHistogramPeaks(img, bins=bins) i = signalPeakIndex(f.fitParams) spos = f.fitParams[i][1] # spos = getSignalPeak(f.fitParams)[1] ...
intersection between signal and background peak def signalMinimum(img, fitParams=None, n_std=3): ''' intersection between signal and background peak ''' if fitParams is None: fitParams = FitHistogramPeaks(img).fitParams assert len(fitParams) > 1, 'need 2 peaks so get minimum signal' ...
return minimum, average, maximum of the signal peak def getSignalParameters(fitParams, n_std=3): ''' return minimum, average, maximum of the signal peak ''' signal = getSignalPeak(fitParams) mx = signal[1] + n_std * signal[2] mn = signal[1] - n_std * signal[2] if mn < fitParams[0][1]...
Equalize the histogram (contrast) of an image works with RGB/multi-channel images and flat-arrays @param img - image_path or np.array @param save_path if given output images will be saved there @param name_additive if given this additive will be appended to output images @return outpu...
histogram equalisation not bounded to int() or an image depth of 8 bit works also with negative numbers def _equalizeHistogram(img): ''' histogram equalisation not bounded to int() or an image depth of 8 bit works also with negative numbers ''' # to float if int: intType = None ...
Returns the local maximum of a given 2d array thresh -> if given, ignore all values below that value max_length -> limit length between value has to vary > min_increase >>> a = np.array([[0,1,2,3,2,1,0], \ [0,1,2,2,3,1,0], \ [0,1,1,2,2,3,0], \ ...
ref ... either quad, grid, homography or reference image quad --> list of four image points(x,y) marking the edges of the quad to correct homography --> h. matrix to correct perspective distortion referenceImage --> image of same object without perspective distortion def s...
Apply perspective distortion ion self.img angles are in DEG and need to be positive to fit into image def distort(self, img, rotX=0, rotY=0, quad=None): ''' Apply perspective distortion ion self.img angles are in DEG and need to be positive to fit into image ''' ...
grid -> array of polylines=((p0x,p0y),(p1x,p1y),,,) def correctGrid(self, img, grid): ''' grid -> array of polylines=((p0x,p0y),(p1x,p1y),,,) ''' self.img = imread(img) h = self.homography # TODO: cleanup only needed to get newBorder attr. if self.opts['do_cor...
...from perspective distortion: --> perspective transformation --> apply tilt factor (view factor) correction def correct(self, img): ''' ...from perspective distortion: --> perspective transformation --> apply tilt factor (view factor) correction '''...
returns camera position in world coordinates using self.rvec and self.tvec from http://stackoverflow.com/questions/14515200/python-opencv-solvepnp-yields-wrong-translation-vector def camera_position(self, pose=None): ''' returns camera position in world coordinates using self.rvec and self.t...
calculate view factor between one small and one finite surface vf =1/pi * integral(cos(beta1)*cos(beta2)/s**2) * dA according to VDI heatatlas 2010 p961 def viewAngle(self, **kwargs): ''' calculate view factor between one small and one finite surface vf =1/pi * integral(cos...
return foreground (quad) mask def foreground(self, quad=None): '''return foreground (quad) mask''' fg = np.zeros(shape=self._newBorders[::-1], dtype=np.uint8) if quad is None: quad = self.quad else: quad = quad.astype(np.int32) cv2.fillConvexPoly(f...
get tilt factor from inverse distance law https://en.wikipedia.org/wiki/Inverse-square_law def tiltFactor(self, midpointdepth=None, printAvAngle=False): ''' get tilt factor from inverse distance law https://en.wikipedia.org/wiki/Inverse-square_law ''' ...
focusAtXY - image position with is in focus if not set it is assumed that the image middle is in focus sigma_best_focus - standard deviation of the PSF within the best focus (default blur) uncertainties - contibutors for standard uncertainty ...
estimate the pose of the object plane using quad setting: self.rvec -> rotation vector self.tvec -> translation vector def _poseFromQuad(self, quad=None): ''' estimate the pose of the object plane using quad setting: self.rvec -> rotation vector ...
Draw the quad into given img def drawQuad(self, img=None, quad=None, thickness=30): ''' Draw the quad into given img ''' if img is None: img = self.img if quad is None: quad = self.quad q = np.int32(quad) c = int(img.max()) ...
draw the 3d coordinate axes into given image if image == False: create an empty image def draw3dCoordAxis(self, img=None, thickness=8): ''' draw the 3d coordinate axes into given image if image == False: create an empty image ''' if img is...
return the size of a rectangle in perspective distortion in [px] DEBUG: PUT THAT BACK IN??:: if aspectRatio is not given is will be determined def _calcQuadSize(corners, aspectRatio): ''' return the size of a rectangle in perspective distortion in [px] DEBUG: PUT THAT B...
map a 2d (x,y) Cartesian array to a polar (r, phi) array using opencv.remap def linearToPolar(img, center=None, final_radius=None, initial_radius=None, phase_width=None, interpolation=cv2.INTER_AREA, maps=None, borderVa...
map a 2d polar (r, phi) polar array to a Cartesian (x,y) array using opencv.remap def polarToLinear(img, shape=None, center=None, maps=None, interpolation=cv2.INTER_AREA, borderValue=0, borderMode=cv2.BORDER_REFLECT, **opts): ''' map a 2d polar (r, phi) polar array...
LAPM' algorithm (Nayar89) def modifiedLaplacian(img): ''''LAPM' algorithm (Nayar89)''' M = np.array([-1, 2, -1]) G = cv2.getGaussianKernel(ksize=3, sigma=-1) Lx = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=M, kernelY=G) Ly = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=G, kerne...
LAPV' algorithm (Pech2000) def varianceOfLaplacian(img): ''''LAPV' algorithm (Pech2000)''' lap = cv2.Laplacian(img, ddepth=-1)#cv2.cv.CV_64F) stdev = cv2.meanStdDev(lap)[1] s = stdev[0]**2 return s[0]
TENG' algorithm (Krotkov86) def tenengrad(img, ksize=3): ''''TENG' algorithm (Krotkov86)''' Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize) Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize) FM = Gx*Gx + Gy*Gy mn = cv2.mean(FM)[0] if np.isnan(mn): ret...
GLVN' algorithm (Santos97) def normalizedGraylevelVariance(img): ''''GLVN' algorithm (Santos97)''' mean, stdev = cv2.meanStdDev(img) s = stdev[0]**2 / mean[0] return s[0]
returns [img] intensity values along line defined by [x0, y0, x1, y1] resolution ... number or data points to evaluate order ... interpolation precision def linePlot(img, x0, y0, x1, y1, resolution=None, order=3): ''' returns [img] intensity values along line defined by [x0, y0, x1,...
calculate flatField from fitting vignetting function to averaged fit-image returns flatField, average background level, fitted image, valid indices mask def flatFieldFromFunction(self): ''' calculate flatField from fitting vignetting function to averaged fit-image returns flatField,...
calculate flatField from 2d-polynomal fit filling all high gradient areas within averaged fit-image returns flatField, average background level, fitted image, valid indices mask def flatFieldFromFit(self): ''' calculate flatField from 2d-polynomal fit filling all high gra...
Parse a named VHDL file Args: fname(str): Name of file to parse Returns: Parsed objects. def parse_vhdl_file(fname): '''Parse a named VHDL file Args: fname(str): Name of file to parse Returns: Parsed objects. ''' with open(fname, 'rt') as fh: text = fh.read() return parse_vhdl...
Parse a text buffer of VHDL code Args: text(str): Source code to parse Returns: Parsed objects. def parse_vhdl(text): '''Parse a text buffer of VHDL code Args: text(str): Source code to parse Returns: Parsed objects. ''' lex = VhdlLexer name = None kind = None saved_type = None...
Generate a canonical prototype string Args: vo (VhdlFunction, VhdlProcedure): Subprogram object Returns: Prototype string. def subprogram_prototype(vo): '''Generate a canonical prototype string Args: vo (VhdlFunction, VhdlProcedure): Subprogram object Returns: Prototype string. ''' ...
Generate a signature string Args: vo (VhdlFunction, VhdlProcedure): Subprogram object Returns: Signature string. def subprogram_signature(vo, fullname=None): '''Generate a signature string Args: vo (VhdlFunction, VhdlProcedure): Subprogram object Returns: Signature string. ''' if f...
Extract object declarations from a text buffer Args: text (str): Source code to parse type_filter (class, optional): Object class to filter results Returns: List of parsed objects. def extract_objects_from_source(self, text, type_filter=None): '''Extract object declarations from a text b...
Check if a type is a known array type Args: data_type (str): Name of type to check Returns: True if ``data_type`` is a known array type. def is_array(self, data_type): '''Check if a type is a known array type Args: data_type (str): Name of type to check Returns: Tr...
Load file of previously extracted data types Args: fname (str): Name of file to load array database from def load_array_types(self, fname): '''Load file of previously extracted data types Args: fname (str): Name of file to load array database from ''' type_defs = '' with o...
Save array type registry to a file Args: fname (str): Name of file to save array database to def save_array_types(self, fname): '''Save array type registry to a file Args: fname (str): Name of file to save array database to ''' type_defs = {'arrays': sorted(list(self.array_typ...
Add array type definitions to internal registry Args: objects (list of VhdlType or VhdlSubtype): Array types to track def _register_array_types(self, objects): '''Add array type definitions to internal registry Args: objects (list of VhdlType or VhdlSubtype): Array types to track ...
Add array type definitions from a file list to internal registry Args: source_files (list of str): Files to parse for array definitions def register_array_types_from_sources(self, source_files): '''Add array type definitions from a file list to internal registry Args: source_files (list of st...
Run lexer rules against a source text Args: text (str): Text to apply lexer to Yields: A sequence of lexer matches. def run(self, text): '''Run lexer rules against a source text Args: text (str): Text to apply lexer to Yields: A sequence of lexer matches. ''' st...
Scan the script for the version string def get_package_version(verfile): '''Scan the script for the version string''' version = None with open(verfile) as fh: try: version = [line.split('=')[1].strip().strip("'") for line in fh if \ line.startswith('__version__')][0] except In...
Parse a named Verilog file Args: fname (str): File to parse. Returns: List of parsed objects. def parse_verilog_file(fname): '''Parse a named Verilog file Args: fname (str): File to parse. Returns: List of parsed objects. ''' with open(fname, 'rt') as fh: text = fh.read() retu...
Parse a text buffer of Verilog code Args: text (str): Source code to parse Returns: List of parsed objects. def parse_verilog(text): '''Parse a text buffer of Verilog code Args: text (str): Source code to parse Returns: List of parsed objects. ''' lex = VerilogLexer name = None kin...
Extract objects from a source file Args: fname(str): Name of file to read from type_filter (class, optional): Object class to filter results Returns: List of objects extracted from the file. def extract_objects(self, fname, type_filter=None): '''Extract objects from a source file Ar...
Extract object declarations from a text buffer Args: text (str): Source code to parse type_filter (class, optional): Object class to filter results Returns: List of parsed objects. def extract_objects_from_source(self, text, type_filter=None): '''Extract object declarations from a text b...
Load schema from a JSON file def load_json_from_file(file_path): """Load schema from a JSON file""" try: with open(file_path) as f: json_data = json.load(f) except ValueError as e: raise ValueError('Given file {} is not a valid JSON file: {}'.format(file_path, e)) else: ...
Load schema from JSON string def load_json_from_string(string): """Load schema from JSON string""" try: json_data = json.loads(string) except ValueError as e: raise ValueError('Given string is not valid JSON: {}'.format(e)) else: return json_data
Recursively traverse schema dictionary and for each "leaf node", evaluate the fake value Implementation: For each key-value pair: 1) If value is not an iterable (i.e. dict or list), evaluate the fake data (base case) 2) If value is a dictionary, recurse 3) If value is a ...
Fetch the given uri and return the contents of the response. def fetch(method, uri, params_prefix=None, **params): """Fetch the given uri and return the contents of the response.""" params = urlencode(_prepare_params(params, params_prefix)) binary_params = params.encode('ASCII') # build the HTTP reque...
Fetch the given uri and return the root Element of the response. def fetch_and_parse(method, uri, params_prefix=None, **params): """Fetch the given uri and return the root Element of the response.""" doc = ElementTree.parse(fetch(method, uri, params_prefix, **params)) return _parse(doc.getroot())
Recursively convert an Element into python data types def _parse(root): """Recursively convert an Element into python data types""" if root.tag == "nil-classes": return [] elif root.get("type") == "array": return [_parse(child) for child in root] d = {} for child in root: t...
Prepares parameters to be sent to challonge.com. The `prefix` can be used to convert parameters with keys that look like ("name", "url", "tournament_type") into something like ("tournament[name]", "tournament[url]", "tournament[tournament_type]"), which is how challonge.com expects parameters describin...
Expand descendants from list of branches :param list branches: list of immediate children as TreeOfContents objs :return: list of all descendants def expandDescendants(self, branches): """ Expand descendants from list of branches :param list branches: list of immediate childre...
Parse top level of markdown :param list elements: list of source objects :return: list of filtered TreeOfContents objects def parseBranches(self, descendants): """ Parse top level of markdown :param list elements: list of source objects :return: list of filtered TreeOf...
Creates abstraction using path to file :param str path: path to markdown file :return: TreeOfContents object def fromMarkdown(md, *args, **kwargs): """ Creates abstraction using path to file :param str path: path to markdown file :return: TreeOfContents object ...
Creates abstraction using HTML :param str html: HTML :return: TreeOfContents object def fromHTML(html, *args, **kwargs): """ Creates abstraction using HTML :param str html: HTML :return: TreeOfContents object """ source = BeautifulSoup(html, 'html.parse...
Create an attachment from data. :param str type: attachment type :param kwargs data: additional attachment data :return: an attachment subclass object :rtype: `~groupy.api.attachments.Attachment` def from_data(cls, type, **data): """Create an attachment from data. :par...
Create a new image attachment from an image file. :param file fp: a file object containing binary image data :return: an image attachment :rtype: :class:`~groupy.api.attachments.Image` def from_file(self, fp): """Create a new image attachment from an image file. :param file fp...
Upload image data to the image service. Call this, rather than :func:`from_file`, you don't want to create an attachment of the image. :param file fp: a file object containing binary image data :return: the URLs for the image uploaded :rtype: dict def upload(self, fp): ...
Download the binary data of an image attachment. :param image: an image attachment :type image: :class:`~groupy.api.attachments.Image` :param str url_field: the field of the image with the right URL :param str suffix: an optional URL suffix :return: binary image data :rt...
Downlaod the binary data of an image attachment at preview size. :param str url_field: the field of the image with the right URL :return: binary image data :rtype: bytes def download_preview(self, image, url_field='url'): """Downlaod the binary data of an image attachment at preview si...
Downlaod the binary data of an image attachment at large size. :param str url_field: the field of the image with the right URL :return: binary image data :rtype: bytes def download_large(self, image, url_field='url'): """Downlaod the binary data of an image attachment at large size. ...
Downlaod the binary data of an image attachment at avatar size. :param str url_field: the field of the image with the right URL :return: binary image data :rtype: bytes def download_avatar(self, image, url_field='url'): """Downlaod the binary data of an image attachment at avatar size....
Iterate through results from all pages. :return: all results :rtype: generator def autopage(self): """Iterate through results from all pages. :return: all results :rtype: generator """ while self.items: yield from self.items self.items =...
Detect which listing mode of the given params. :params kwargs params: the params :return: one of the available modes :rtype: str :raises ValueError: if multiple modes are detected def detect_mode(cls, **params): """Detect which listing mode of the given params. :params...