text stringlengths 81 112k |
|---|
Parse the attributes and values
def _parseAttrs(self, attrsStr):
"""
Parse the attributes and values
"""
attributes = dict()
for attrStr in self.SPLIT_ATTR_COL_RE.split(attrsStr):
name, vals = self._parseAttrVal(attrStr)
if name in attributes:
... |
Parse one record.
def _parseRecord(self, gff3Set, line):
"""
Parse one record.
"""
row = line.split("\t")
if len(row) != self.GFF3_NUM_COLS:
raise GFF3Exception(
"Wrong number of columns, expected {}, got {}".format(
self.GFF3_NUM_... |
Run the parse and return the resulting Gff3Set object.
def parse(self):
"""
Run the parse and return the resulting Gff3Set object.
"""
fh = self._open()
try:
gff3Set = Gff3Set(self.fileName)
for line in fh:
self.lineNumber += 1
... |
Adds the specified dataset to this data repository.
def addDataset(self, dataset):
"""
Adds the specified dataset to this data repository.
"""
id_ = dataset.getId()
self._datasetIdMap[id_] = dataset
self._datasetNameMap[dataset.getLocalId()] = dataset
self._datas... |
Adds the specified reference set to this data repository.
def addReferenceSet(self, referenceSet):
"""
Adds the specified reference set to this data repository.
"""
id_ = referenceSet.getId()
self._referenceSetIdMap[id_] = referenceSet
self._referenceSetNameMap[reference... |
Add an ontology map to this data repository.
def addOntology(self, ontology):
"""
Add an ontology map to this data repository.
"""
self._ontologyNameMap[ontology.getName()] = ontology
self._ontologyIdMap[ontology.getId()] = ontology
self._ontologyIds.append(ontology.getI... |
Select the first peer in the datarepo with the given url simulating
the behavior of selecting by URL. This is only used during testing.
def getPeer(self, url):
"""
Select the first peer in the datarepo with the given url simulating
the behavior of selecting by URL. This is only used dur... |
Returns a dataset with the specified ID, or raises a
DatasetNotFoundException if it does not exist.
def getDataset(self, id_):
"""
Returns a dataset with the specified ID, or raises a
DatasetNotFoundException if it does not exist.
"""
if id_ not in self._datasetIdMap:
... |
Returns the dataset with the specified name.
def getDatasetByName(self, name):
"""
Returns the dataset with the specified name.
"""
if name not in self._datasetNameMap:
raise exceptions.DatasetNameNotFoundException(name)
return self._datasetNameMap[name] |
Returns the ontology with the specified ID.
def getOntology(self, id_):
"""
Returns the ontology with the specified ID.
"""
if id_ not in self._ontologyIdMap:
raise exceptions.OntologyNotFoundException(id_)
return self._ontologyIdMap[id_] |
Returns an ontology by name
def getOntologyByName(self, name):
"""
Returns an ontology by name
"""
if name not in self._ontologyNameMap:
raise exceptions.OntologyNameNotFoundException(name)
return self._ontologyNameMap[name] |
Retuns the ReferenceSet with the specified ID, or raises a
ReferenceSetNotFoundException if it does not exist.
def getReferenceSet(self, id_):
"""
Retuns the ReferenceSet with the specified ID, or raises a
ReferenceSetNotFoundException if it does not exist.
"""
if id_ no... |
Returns the reference set with the specified name.
def getReferenceSetByName(self, name):
"""
Returns the reference set with the specified name.
"""
if name not in self._referenceSetNameMap:
raise exceptions.ReferenceSetNameNotFoundException(name)
return self._refere... |
Returns the readgroup set with the specified ID.
def getReadGroupSet(self, id_):
"""
Returns the readgroup set with the specified ID.
"""
compoundId = datamodel.ReadGroupSetCompoundId.parse(id_)
dataset = self.getDataset(compoundId.dataset_id)
return dataset.getReadGroup... |
Returns the readgroup set with the specified ID.
def getVariantSet(self, id_):
"""
Returns the readgroup set with the specified ID.
"""
compoundId = datamodel.VariantSetCompoundId.parse(id_)
dataset = self.getDataset(compoundId.dataset_id)
return dataset.getVariantSet(id... |
Prints a summary of this data repository to stdout.
def printSummary(self):
"""
Prints a summary of this data repository to stdout.
"""
print("Ontologies:")
for ontology in self.getOntologys():
print(
"",
ontology.getOntologyPrefix(),
... |
Return an iterator over all read groups in the data repo
def allReadGroups(self):
"""
Return an iterator over all read groups in the data repo
"""
for dataset in self.getDatasets():
for readGroupSet in dataset.getReadGroupSets():
for readGroup in readGroupSet... |
Return an iterator over all features in the data repo
def allFeatures(self):
"""
Return an iterator over all features in the data repo
"""
for dataset in self.getDatasets():
for featureSet in dataset.getFeatureSets():
for feature in featureSet.getFeatures():
... |
Return an iterator over all call sets in the data repo
def allCallSets(self):
"""
Return an iterator over all call sets in the data repo
"""
for dataset in self.getDatasets():
for variantSet in dataset.getVariantSets():
for callSet in variantSet.getCallSets()... |
Return an iterator over all variant annotation sets
in the data repo
def allVariantAnnotationSets(self):
"""
Return an iterator over all variant annotation sets
in the data repo
"""
for dataset in self.getDatasets():
for variantSet in dataset.getVariantSets()... |
Return an iterator over all rna quantifications
def allRnaQuantifications(self):
"""
Return an iterator over all rna quantifications
"""
for dataset in self.getDatasets():
for rnaQuantificationSet in dataset.getRnaQuantificationSets():
for rnaQuantification i... |
Return an iterator over all expression levels
def allExpressionLevels(self):
"""
Return an iterator over all expression levels
"""
for dataset in self.getDatasets():
for rnaQuantificationSet in dataset.getRnaQuantificationSets():
for rnaQuantification in \
... |
Finds a peer by URL and return the first peer record with that URL.
def getPeer(self, url):
"""
Finds a peer by URL and return the first peer record with that URL.
"""
peers = list(models.Peer.select().where(models.Peer.url == url))
if len(peers) == 0:
raise exceptio... |
Get the list of peers using an SQL offset and limit. Returns a list
of peer datamodel objects in a list.
def getPeers(self, offset=0, limit=1000):
"""
Get the list of peers using an SQL offset and limit. Returns a list
of peer datamodel objects in a list.
"""
select = mo... |
Takes a model class and attempts to create a table in TSV format
that can be imported into a spreadsheet program.
def tableToTsv(self, model):
"""
Takes a model class and attempts to create a table in TSV format
that can be imported into a spreadsheet program.
"""
first ... |
Flushes the announcement table.
def clearAnnouncements(self):
"""
Flushes the announcement table.
"""
try:
q = models.Announcement.delete().where(
models.Announcement.id > 0)
q.execute()
except Exception as e:
raise exceptions.... |
Adds an announcement to the registry for later analysis.
def insertAnnouncement(self, announcement):
"""
Adds an announcement to the registry for later analysis.
"""
url = announcement.get('url', None)
try:
peers.Peer(url)
except:
raise exceptions... |
Opens this repo in the specified mode.
TODO: figure out the correct semantics of this and document
the intended future behaviour as well as the current
transitional behaviour.
def open(self, mode=MODE_READ):
"""
Opens this repo in the specified mode.
TODO: figure out t... |
Verifies that the data in the repository is consistent.
def verify(self):
"""
Verifies that the data in the repository is consistent.
"""
# TODO this should emit to a log that we can configure so we can
# have verbosity levels. We should provide a way to configure
# wher... |
Inserts the specified ontology into this repository.
def insertOntology(self, ontology):
"""
Inserts the specified ontology into this repository.
"""
try:
models.Ontology.create(
id=ontology.getName(),
name=ontology.getName(),
... |
Removes the specified ontology term map from this repository.
def removeOntology(self, ontology):
"""
Removes the specified ontology term map from this repository.
"""
q = models.Ontology.delete().where(id == ontology.getId())
q.execute() |
Inserts the specified reference into this repository.
def insertReference(self, reference):
"""
Inserts the specified reference into this repository.
"""
models.Reference.create(
id=reference.getId(),
referencesetid=reference.getParentContainer().getId(),
... |
Inserts the specified referenceSet into this repository.
def insertReferenceSet(self, referenceSet):
"""
Inserts the specified referenceSet into this repository.
"""
try:
models.Referenceset.create(
id=referenceSet.getId(),
name=referenceSet.g... |
Inserts the specified dataset into this repository.
def insertDataset(self, dataset):
"""
Inserts the specified dataset into this repository.
"""
try:
models.Dataset.create(
id=dataset.getId(),
name=dataset.getLocalId(),
descri... |
Removes the specified dataset from this repository. This performs
a cascading removal of all items within this dataset.
def removeDataset(self, dataset):
"""
Removes the specified dataset from this repository. This performs
a cascading removal of all items within this dataset.
"... |
Remove a phenotype association set from the repo
def removePhenotypeAssociationSet(self, phenotypeAssociationSet):
"""
Remove a phenotype association set from the repo
"""
q = models.Phenotypeassociationset.delete().where(
models.Phenotypeassociationset.id ==
phe... |
Removes the specified featureSet from this repository.
def removeFeatureSet(self, featureSet):
"""
Removes the specified featureSet from this repository.
"""
q = models.Featureset.delete().where(
models.Featureset.id == featureSet.getId())
q.execute() |
Removes the specified continuousSet from this repository.
def removeContinuousSet(self, continuousSet):
"""
Removes the specified continuousSet from this repository.
"""
q = models.ContinuousSet.delete().where(
models.ContinuousSet.id == continuousSet.getId())
q.exec... |
Inserts the specified readGroup into the DB.
def insertReadGroup(self, readGroup):
"""
Inserts the specified readGroup into the DB.
"""
statsJson = json.dumps(protocol.toJsonDict(readGroup.getStats()))
experimentJson = json.dumps(
protocol.toJsonDict(readGroup.getExp... |
Removes the specified readGroupSet from this repository. This performs
a cascading removal of all items within this readGroupSet.
def removeReadGroupSet(self, readGroupSet):
"""
Removes the specified readGroupSet from this repository. This performs
a cascading removal of all items withi... |
Removes the specified variantSet from this repository. This performs
a cascading removal of all items within this variantSet.
def removeVariantSet(self, variantSet):
"""
Removes the specified variantSet from this repository. This performs
a cascading removal of all items within this var... |
Removes the specified biosample from this repository.
def removeBiosample(self, biosample):
"""
Removes the specified biosample from this repository.
"""
q = models.Biosample.delete().where(
models.Biosample.id == biosample.getId())
q.execute() |
Removes the specified individual from this repository.
def removeIndividual(self, individual):
"""
Removes the specified individual from this repository.
"""
q = models.Individual.delete().where(
models.Individual.id == individual.getId())
q.execute() |
Inserts a the specified readGroupSet into this repository.
def insertReadGroupSet(self, readGroupSet):
"""
Inserts a the specified readGroupSet into this repository.
"""
programsJson = json.dumps(
[protocol.toJsonDict(program) for program in
readGroupSet.getProg... |
Removes the specified referenceSet from this repository. This performs
a cascading removal of all references within this referenceSet.
However, it does not remove any of the ReadGroupSets or items that
refer to this ReferenceSet. These must be deleted before the
referenceSet can be remov... |
Inserts a the specified variantAnnotationSet into this repository.
def insertVariantAnnotationSet(self, variantAnnotationSet):
"""
Inserts a the specified variantAnnotationSet into this repository.
"""
analysisJson = json.dumps(
protocol.toJsonDict(variantAnnotationSet.getAn... |
Inserts a the specified callSet into this repository.
def insertCallSet(self, callSet):
"""
Inserts a the specified callSet into this repository.
"""
try:
models.Callset.create(
id=callSet.getId(),
name=callSet.getLocalId(),
va... |
Inserts a the specified variantSet into this repository.
def insertVariantSet(self, variantSet):
"""
Inserts a the specified variantSet into this repository.
"""
# We cheat a little here with the VariantSetMetadata, and encode these
# within the table as a JSON dump. These shoul... |
Inserts a the specified featureSet into this repository.
def insertFeatureSet(self, featureSet):
"""
Inserts a the specified featureSet into this repository.
"""
# TODO add support for info and sourceUri fields.
try:
models.Featureset.create(
id=featu... |
Inserts a the specified continuousSet into this repository.
def insertContinuousSet(self, continuousSet):
"""
Inserts a the specified continuousSet into this repository.
"""
# TODO add support for info and sourceUri fields.
try:
models.ContinuousSet.create(
... |
Inserts the specified Biosample into this repository.
def insertBiosample(self, biosample):
"""
Inserts the specified Biosample into this repository.
"""
try:
models.Biosample.create(
id=biosample.getId(),
datasetid=biosample.getParentContaine... |
Inserts the specified individual into this repository.
def insertIndividual(self, individual):
"""
Inserts the specified individual into this repository.
"""
try:
models.Individual.create(
id=individual.getId(),
datasetId=individual.getParentC... |
Inserts the specified phenotype annotation set into this repository.
def insertPhenotypeAssociationSet(self, phenotypeAssociationSet):
"""
Inserts the specified phenotype annotation set into this repository.
"""
datasetId = phenotypeAssociationSet.getParentContainer().getId()
at... |
Inserts a the specified rnaQuantificationSet into this repository.
def insertRnaQuantificationSet(self, rnaQuantificationSet):
"""
Inserts a the specified rnaQuantificationSet into this repository.
"""
try:
models.Rnaquantificationset.create(
id=rnaQuantifica... |
Removes the specified rnaQuantificationSet from this repository. This
performs a cascading removal of all items within this
rnaQuantificationSet.
def removeRnaQuantificationSet(self, rnaQuantificationSet):
"""
Removes the specified rnaQuantificationSet from this repository. This
... |
Accepts a peer datamodel object and adds it to the registry.
def insertPeer(self, peer):
"""
Accepts a peer datamodel object and adds it to the registry.
"""
try:
models.Peer.create(
url=peer.getUrl(),
attributes=json.dumps(peer.getAttributes(... |
Remove peers by URL.
def removePeer(self, url):
"""
Remove peers by URL.
"""
q = models.Peer.delete().where(
models.Peer.url == url)
q.execute() |
Initialise this data repository, creating any necessary directories
and file paths.
def initialise(self):
"""
Initialise this data repository, creating any necessary directories
and file paths.
"""
self._checkWriteMode()
self._createSystemTable()
self._cr... |
Loads this data repository into memory.
def load(self):
"""
Loads this data repository into memory.
"""
self._readSystemTable()
self._readOntologyTable()
self._readReferenceSetTable()
self._readReferenceTable()
self._readDatasetTable()
self._readR... |
Populates the instance variables of this FeatureSet from the specified
DB row.
def populateFromRow(self, featureSetRecord):
"""
Populates the instance variables of this FeatureSet from the specified
DB row.
"""
self._dbFilePath = featureSetRecord.dataurl
self.set... |
Populates the instance variables of this FeatureSet from the specified
data URL.
Initialize dataset, using the passed dict of sources
[{source,format}] see rdflib.parse() for more
If path is set, this backend will load itself
def populateFromFile(self, dataUrl):
"""
Popu... |
find a feature and return ga4gh representation, use compoundId as
featureId
def getFeature(self, compoundId):
"""
find a feature and return ga4gh representation, use compoundId as
featureId
"""
feature = self._getFeatureById(compoundId.featureId)
feature.id = str... |
find a feature and return ga4gh representation, use 'native' id as
featureId
def _getFeatureById(self, featureId):
"""
find a feature and return ga4gh representation, use 'native' id as
featureId
"""
featureRef = rdflib.URIRef(featureId)
featureDetails = self._de... |
formulate a sparql query string based on parameters
def _filterSearchFeaturesRequest(self, reference_name, gene_symbol, name,
start, end):
"""
formulate a sparql query string based on parameters
"""
filters = []
query = self._baseQuery()
... |
return a location key form the locationMap
def _findLocation(self, reference_name, start, end):
"""
return a location key form the locationMap
"""
try:
# TODO - sequence_annotations does not have build?
return self._locationMap['hg19'][reference_name][start][end]... |
CGD uses Faldo ontology for locations, it's a bit complicated.
This function sets up an in memory cache of all locations, which
can be queried via:
locationMap[build][chromosome][begin][end] = location["_id"]
def _initializeLocationCache(self):
"""
CGD uses Faldo ontology for lo... |
Appends the specified protocolElement to the value list for this
response.
def addValue(self, protocolElement):
"""
Appends the specified protocolElement to the value list for this
response.
"""
self._numElements += 1
self._bufferSize += protocolElement.ByteSize(... |
Returns True if the response buffer is full, and False otherwise.
The buffer is full if either (1) the number of items in the value
list is >= pageSize or (2) the total length of the serialised
elements in the page is >= maxBufferSize.
If page_size or max_response_length were not set in... |
Returns a string version of the SearchResponse that has
been built by this SearchResponseBuilder.
def getSerializedResponse(self):
"""
Returns a string version of the SearchResponse that has
been built by this SearchResponseBuilder.
"""
self._protoObject.next_page_token ... |
Populates this Ontology using values in the specified DB row.
def populateFromRow(self, ontologyRecord):
"""
Populates this Ontology using values in the specified DB row.
"""
self._id = ontologyRecord.id
self._dataUrl = ontologyRecord.dataurl
self._readFile() |
Returns a GA4GH OntologyTerm object by name.
:param name: name of the ontology term, ex. "gene".
:return: GA4GH OntologyTerm object.
def getGaTermByName(self, name):
"""
Returns a GA4GH OntologyTerm object by name.
:param name: name of the ontology term, ex. "gene".
:r... |
Very heavy query: calls for the specified list of callSetIds
on chromosome 2 (11 pages, 90 seconds to fetch the entire thing
on a high-end desktop machine)
def _heavyQuery(variantSetId, callSetIds):
"""
Very heavy query: calls for the specified list of callSetIds
on chromosome 2 (11 pages, 90 secon... |
Returns (search result as JSON string, time elapsed during search)
def timeOneSearch(queryString):
"""
Returns (search result as JSON string, time elapsed during search)
"""
startTime = time.clock()
resultString = backend.runSearchVariants(queryString)
endTime = time.clock()
elapsedTime = e... |
Repeat the query several times; perhaps don't go through *all* the
pages. Returns minimum time to run backend.searchVariants() to execute
the query (as far as pageLimit allows), *not* including JSON
processing to prepare queries or parse responses.
def benchmarkOneQuery(request, repeatLimit=3, pageLimit=3... |
Converts the specified error code into the corresponding class object.
Raises a KeyError if the errorCode is not found.
def getExceptionClass(errorCode):
"""
Converts the specified error code into the corresponding class object.
Raises a KeyError if the errorCode is not found.
"""
classMap = {}... |
Converts this exception into the GA4GH protocol type so that
it can be communicated back to the client.
def toProtocolElement(self):
"""
Converts this exception into the GA4GH protocol type so that
it can be communicated back to the client.
"""
error = protocol.GAExcepti... |
Initialize new reference and perform checks.
def _init_goterm_ref(self, rec_curr, name, lnum):
"""Initialize new reference and perform checks."""
if rec_curr is None:
return GOTerm()
msg = "PREVIOUS {REC} WAS NOT TERMINATED AS EXPECTED".format(REC=name)
self._die(msg, lnum) |
Initialize new typedef and perform checks.
def _init_typedef(self, typedef_curr, name, lnum):
"""Initialize new typedef and perform checks."""
if typedef_curr is None:
return TypeDef()
msg = "PREVIOUS {REC} WAS NOT TERMINATED AS EXPECTED".format(REC=name)
self._die(msg, lnum... |
Add new fields to the current reference.
def _add_to_ref(self, rec_curr, line, lnum):
"""Add new fields to the current reference."""
# Written by DV Klopfenstein
# Examples of record lines containing ':' include:
# id: GO:0000002
# name: mitochondrial genome maintenance
... |
Update current GOTerm with optional record.
def update_rec(self, rec, name, value):
"""Update current GOTerm with optional record."""
# 'def' is a reserved word in python, do not use it as a Class attr.
if name == "def":
name = "defn"
# If we have a relationship, then we wi... |
Add new fields to the current typedef.
def _add_to_typedef(self, typedef_curr, line, lnum):
"""Add new fields to the current typedef."""
mtch = re.match(r'^(\S+):\s*(\S.*)$', line)
if mtch:
field_name = mtch.group(1)
field_value = mtch.group(2).split('!')[0].rstrip()
... |
Adds a term's nested attributes.
def _add_nested(self, rec, name, value):
"""Adds a term's nested attributes."""
# Remove comments and split term into typedef / target term.
(typedef, target_term) = value.split('!')[0].rstrip().split(' ')
# Save the nested term.
getattr(rec, na... |
Prepare to store data from user-desired optional fields.
Not loading these optional fields by default saves in space and speed.
But allow the possibility for saving these fields, if the user desires,
Including:
comment consider def is_class_level is_metadata_tag is_transit... |
Raise an Exception if file read is unexpected.
def _die(self, msg, lnum):
"""Raise an Exception if file read is unexpected."""
raise Exception("**FATAL {FILE}({LNUM}): {MSG}\n".format(
FILE=self.obo_file, LNUM=lnum, MSG=msg)) |
Write hierarchy for a GO Term record.
def write_hier_rec(self, gos_printed, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False,
include_only=None, go_marks=None,
depth=1, dp="-"):
"""Write hierarchy for a GO Term record.... |
Write hierarchy for all GO Terms in obo file.
def write_hier_all(self, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False):
"""Write hierarchy for all GO Terms in obo file."""
# Print: [biological_process, molecular_function, and cellular_component]
... |
Write hierarchy for a GO Term.
def write_hier(self, GO_id, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False,
include_only=None, go_marks=None):
"""Write hierarchy for a GO Term."""
gos_printed = set()
self[GO_id].write_hie... |
Returns all possible paths to the root node
Each path includes the term given. The order of the path is
top -> bottom, i.e. it starts with the root and ends with the
given term (inclusively).
Parameters:
-----------
- term:
the id... |
draw AMIGO style network, lineage containing one query record.
def make_graph_pydot(self, recs, nodecolor,
edgecolor, dpi,
draw_parents=True, draw_children=True):
"""draw AMIGO style network, lineage containing one query record."""
import pydot
G = pydo... |
Unpacks sqlite rows as returned by fetchall
into an array of simple dicts.
:param sqliteRows: array of rows returned from fetchall DB call
:return: array of dicts, keyed by the column names.
def sqliteRowsToDicts(sqliteRows):
"""
Unpacks sqlite rows as returned by fetchall
into an array of si... |
Construct a SQL LIMIT clause
def limitsSql(startIndex=0, maxResults=0):
"""
Construct a SQL LIMIT clause
"""
if startIndex and maxResults:
return " LIMIT {}, {}".format(startIndex, maxResults)
elif startIndex:
raise Exception("startIndex was provided, but maxResults was not")
el... |
Returns rows of a sql fetch query on demand
def iterativeFetch(query, batchSize=default_batch_size):
"""
Returns rows of a sql fetch query on demand
"""
while True:
rows = query.fetchmany(batchSize)
if not rows:
break
rowDicts = sqliteRowsToDicts(rows)
for ro... |
Parses the specified pageToken and returns a list of the specified
number of values. Page tokens are assumed to consist of a fixed
number of integers seperated by colons. If the page token does
not conform to this specification, raise a InvalidPageToken
exception.
def _parsePageToken(pageToken, numValu... |
Attempts to parse the specified key in the specified argument
dictionary into an integer. If the argument cannot be parsed,
raises a BadRequestIntegerException. If the key is not present,
return the specified default value.
def _parseIntegerArgument(args, key, defaultValue):
"""
Attempts to parse t... |
Starts a new iteration.
def _initialiseIteration(self):
"""
Starts a new iteration.
"""
self._searchIterator = self._search(
self._request.start,
self._request.end if self._request.end != 0 else None)
self._currentObject = next(self._searchIterator, None)... |
Picks up iteration from a previously provided page token. There are two
different phases here:
1) We are iterating over the initial set of intervals in which start
is < the search start coorindate.
2) We are iterating over the remaining intervals in which start >= to
the search s... |
Returns the next (object, nextPageToken) pair.
def next(self):
"""
Returns the next (object, nextPageToken) pair.
"""
if self._currentObject is None:
raise StopIteration()
nextPageToken = None
if self._nextObject is not None:
start = self._getStar... |
Returns true when an annotation should be included.
def filterVariantAnnotation(self, vann):
"""
Returns true when an annotation should be included.
"""
# TODO reintroduce feature ID search
ret = False
if len(self._effects) != 0 and not vann.transcript_effects:
... |
Returns true when any of the transcript effects
are present in the request.
def filterEffect(self, teff):
"""
Returns true when any of the transcript effects
are present in the request.
"""
ret = False
for effect in teff.effects:
ret = self._matchAnyE... |
Tests whether a requested effect and an effect
present in an annotation are equal.
def _checkIdEquality(self, requestedEffect, effect):
"""
Tests whether a requested effect and an effect
present in an annotation are equal.
"""
return self._idPresent(requestedEffect) and ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.