content stringlengths 255 17.2k |
|---|
return self.n_splits
class RepeatedKFold:
"""
Repeated :class:`KFold` cross validator.
Repeats :class:`KFold` n times with different randomization in each
repetition.
See an example in the :ref:`User Guide <use_cross_validation_iterators>`.
Args:
n_splits(int): The number ... |
ermines how the
``data`` parameter will be split (i.e. how trainsets and testsets
will be defined). If an int is passed, :class:`KFold
<surprise.model_selection.split.KFold>` is used with the
appropriate ``n_splits`` parameter. If ``None``, :class:`KFold
<surp... |
_results[f"rank_test_{m}"][indices] = np.arange(len(indices), 0, -1)
best_index[m] = mean_test_measures.argmax()
best_params[m] = self.param_combinations[best_index[m]]
best_score[m] = mean_test_measures[best_index[m]]
best_estimator[m] = self.algo_class(**best_params... |
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used
to define the parameter search space. Deterministic behavior is however
... |
"""Code clone detection parent class, based on user input data,the class will detect similar code snippets in the python file """
class CodeCloneDetection:
#Constructor for base inputs
def __init__(self,rootdir,openai_baseurl, openai_key,openai_api_type,openai_api_version,embedd_storage_path,generativeai_embedd... |
calls above create_chroma_db() to create db.
"""
try:
documents = df.apply(lambda x: Document(page_content= x["code"], metadata= {"function_name": x["function_name"], "filepath": x["filepath"]}), axis=1)
#setup the chromadb
db,chroma_client = self.create_chroma_db(doc... |
vinci-03 chat model based code clone detection. --->")
code_clone_result = []
for task in code_clone_check_tasks:
response=self.code_clone_check_with_retry(task[0]["code"], task[1]["code"])
with concurrent.futures.ThreadPoolExecutor(max_workers= max_wo... |
.to_dict(orient="records"))
code_clone_check_tasks += res
code_clone_result = []
max_workers = min(len(code_clone_check_tasks), 100)
with concurrent.futures.ThreadPoolExecutor(max_workers= max_workers) as executor:
llm_requests = {
... |
=============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confide |
deploy_dir, chunk_size):
self.files_dir = files_dir
self.deploy_dir = deploy_dir
self.chunk_size = chunk_size
try:
self.ccdreportpath = os.path.join(self.deploy_dir, "codeCloneReport")
os.makedirs(self.ccdreportpath, exist_ok = True)
except OSError as erro... |
aion_textsummary
outputStr = aion_textsummary(config_json_filename)
#scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','bin','aion_text_summarizer.py'))
#outputStr = subprocess.check_output([sys.executable, scriptPath, config_json_filename])
#out... |
']['profiler']['textCleaning']['libConfig']['lemmatizationLib']['nltk'] = "True"
configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][
'textblob'] = "False"
configSettings['advance']['profiler']['textCleaning']... |
'classification' and configSettings['basic']['algorithms']['classification']['Naive Bayes'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Naive Bayes'] = eval(request.POST.get('classification_GaussianNB'))
if proble... |
classification' and configSettings['basic']['algorithms']['classification']['Recurrent Neural Network (LSTM)'] == 'True':
configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network (LSTM)'] = eval(
request.POST.... |
Json['advance']['profiler']['normalization'][k] == 'True':
updateconfigSettingsJson['advance']['profiler']['normalizationMethod'] = k
break
#---------------- default Hypermarameter changes--- ----------Usnish--------------
... |
'LinearRegression'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][
'Linear Regression']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][
'DecisionTree'] ... |
if problem_type not in ['classification','regression']:
break
if problem_type == 'objectDetection':
from AION import pretrainedModels
ptmObj = pretrainedModels()
obModels = ptmObj.get_info(selectAlgo)
else:
obModels = {}
... |
LLaMA-2-Chat']
configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA2'] = configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']
configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA2'] = \\
configSettingsJson['basic']['modelSize']... |
ciption
if mltrain == 'training':
dataFile = request.POST.get('trainfilePath')
if(os.path.isfile(dataFile) == False) or dataFile=="":
context = {'usecaseid':selectid ,'datatrainFile':dataFile,'usecasename':usecasename,'desciption':desciption ,'error3':'error3','error1': 'Please e... |
ings.filterwarnings('ignore')
## Main class to find out seassonality and stationary in timeseries data.
class StationarySeasonalityTest:
def __init__(self,df,featurename,datetimefeature):
self.df=df
self.targetFeature=featurename
self.datetimefeature=datetimefeature
## to get th... |
FeaturesList.append('Max Tokens')
if problem_type in ['survivalAnalysis','anomalyDetection', 'timeSeriesAnomalyDetection']: #task 11997
if configSettingsJson['basic']['dateTimeFeature'] != '' and configSettingsJson['basic']['dateTimeFeature'] != 'na':
inputFeaturesList.insert(0,configSettingsJso... |
casting': #task 11997
Results['prediction'] = json.dumps(data)
singlePredictionResults.append(Results)
elif problem_type == 'stateTransition':
if str(data['Anomaly']) == 'False':
Results['prediction']... |
if not reference_generation:
reference_generation = ''
prompt = pd.DataFrame([{'prompts':prompts, 'reference_generation':reference_generation}])
prompt.to_csv(dataFile, index=False)
hypervisor, instanceid, region, image = get_instance(usecasename)
... |
):\\n'
lfs_main_func += '\\timport numpy as np\\n'
lfs_main_func += '\\tABSTAIN = -1\\n'
lfs_main_func += '\\tlabels = json.loads(json.dumps(' + json.dumps(label_list) + '))\\n'
lfs = '\\treturn np.where('
for condition in rule["conditions"]:
if "string" in condition["sel_datatype"]:
... |
size_take = num_records
display_df = df.sample(n=size_take)
weightage = np.around(label_model.get_weights(), 2)
rule_name_list = get_rule_name_list(rule_list)
analysis_df = LFAnalysis(l_data, lfs).lf_summary()
analysis_df["Rule"] = analysis_df.index
analysis_df["Rule"] = analysis_df["Rule... |
SUCCESS',Version=version)
modelid = models[0].id
p = Existusecases.objects.get(id=modelid)
deployPath = str(p.DeployPath)
if os.path.isdir(os.path.join(deployPath,'publish','package')):
for f in os.listdir(os.path.join(deployPath,'publish','package')):
if f.e... |
processed = pd.read_csv(dataFilePath)
if 'targetFeature' != '':
target_classes = df_proprocessed[targetFeature].unique()
numberofclasses = len(target_classes)
else:
target_classes = []
numberofclasses = 'Not Available'
dataPoints = df_proprocess... |
2):
best_sse = np.inf
y1 = []
total = sum(counts)
mean = float(sum(index * counts)) / total
variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1)
dispersion = mean / float(variance)
t... |
= compute.readComputeConfig()
modelID = request.POST.get('modelID')
p = Existusecases.objects.get(id=modelID)
usecasename = p.ModelName.UsecaseName
usecaseid = p.ModelName.usecaseid
runningStatus,pid,ip,port = installPackage.checkModel |
<s> import pandas as pd
import numpy as np
def get_leaderboard(file_content):
matched_lines = [line.replace('Model:-', '') for line in file_content.split('\\n') if "Model:-" in line]
df = pd.DataFrame(columns = ['Model', 'Iterations', 'Score (%)', 'Score Type', 'Best Score (%)'])
import re
try:
... |
= values[1]
projectID = values[2]
machineType = values[3]
selectedID = values[4]
regionName = values[5]
noOfInstance = values[6]
workLoad = values[7]
|
False'
elif model in ["Neural Architecture Search"]:
model.xplain = 'False'
model.flserversupport = 'False'
model.onlinelerningsupport = 'False'
supportedmodels = ["Logistic Regression","N... |
_bucket(),'gcsbuckets':get_gcs_bucket(),'usecasetab':usecasetab,'azurestorage':get_azureStorage(),
'ModelStatus': model_status, 'ModelVersion': ModelVersion, 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure}
return status,context,'upload.html'
els... |
return "Seventh Grade"
elif 8 < readability_value <= 9:
## Grade level Fifth grade to Ninth grade
return "Eighth Grade"
elif 9 < readability_value <=10:
## Grade level Fifth grade to Ninth grade
return "Ninth Grade"
elif 10 < readability_value <=11:
## Grade level Fif... |
from langchain.text_splitter import RecursiveCharacterTextSplitter
loader = PyPDFLoader(filename)
pages = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,chunk_overlap=50)
texts = text_splitter.split_documents(pages)
return(texts)
... |
userid = get_graviton_data()
gravitonURL = graviton_url
gravitonUserId = graviton_userid
# url = 'https://xenius.azurewebsites.net/api/getdata?userid=1&dataserviceid='+str(dataserviceId) +'&metadataid=' +str(metadataId)
... |
time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
df.to_csv(dataFile, index=False,sep=delimiter,quotechar=textqualifier)
request.session['datalocation'] = str(dataFile)
else:
... |
otherApps['modeltracking'] = 'Not Running'
#nooftasks = getTasks('AION_Consumer')
if len(consumerlist):
otherApps['consumer'] = 'Running'
else:
otherApps['consumer'] = 'Not Running'
#nooftasks = getTasks('AION_Service')
if len(servicelist):
otherApps... |
.get('usecasetab')+'",No_of_Permissible_Features_EDA="'+request.POST.get('edefeatures')+'",telemetryOptOut="'+request.POST.get('telemetryOptOut')+'"'
print(updated_data)
sqlite_obj.update_data(updated_data,'settingsid=1','settings')
return request.POST.get('usecasetab')
except Exception as e... |
def get_edaGraph(request):
if request.session['datatype'] == 'Normal':
from appbe.eda import ux_eda
df_temp = dict(request.GET).get('features[]')
graphType = request.GET.get('graphType')
d3_url = request.GET.get('d3_url')
mpld3_url = request.GET.get('mpld3_url')... |
size=(10,5))
for i in enumerate(feature):
dataType = dataframe[i[1]].dtypes
if dataType not in pandasNumericDtypes:
dataframe[i[1]] = pd.Categorical(dataframe[i[1]])
dataframe[i[1]] = dataframe[i[1]].cat.codes
datafram... |
[0-2])/(0?[1-9]|[12][0-9]|3[01])/(\\d\\d\\d\\d) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9])$)') == True]
aftercheckcount = check2.count()
if (beforecheckcount <= aftercheckcount):
return True
#####DD-MM-YYYY HH:MM####
check2 = data[data.str.match(
r'(^(0?[1-9]|[12][0-9]|3[01])-(0?[1-9... |
def compilepl(self, targetPath=str()):
filePath = self.fileName.format(self.containerLabel.lower()) + self.fileExt
if targetPath != str():
filePath = Path(targetPath, filePath)
kfp.compiler.Compiler().compile(self.aion_mlops, str(filePath))
def executep... |
"r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
temp = {}
# Retraing settings changes
# -------- S T A R T --------
prbType = request.POST.get('ProblemType')
... |
configSettingsJson['basic']['output']['profilerStage'] = 'True'
configSettingsJson['basic']['output']['selectorStage'] = 'True'
for key in configSettingsJson['advance']['profiler']['textConversionMethod']:
configSettingsJson['advance']['profil... |
featureOperation['type'] = 'date'
featureOperation['fillMethod'] = 'na'
featureOperation['categoryEncoding'] = 'na'
elif x in textFeature:
featureOperation['type'] = 'text'
featureOperation['fillMethod'] = 'na... |
context = {'tab': 'tabconfigure','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'ModelStatus': ModelStatus,'selected': 'modeltraning','error': 'Config Error: '+str(e)}
return context<s> '''
*
* =============================================================================
* COPYRIGH... |
driver=Ingres;servertype=ingres;server=@"+str(server_url)+",tcp_ip,VW;uid="+str(username_actian)+";pwd="+str(password_actian)+";database="+str(database_actian))
print("connected")
return conn
def getDataFromActianAvalanche(request):
server_url = request.POST.get('server_url')
username_actian =... |
'logfilepath':logfilepath}
config = json.dumps(config)
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','aion.py'))
if platform.system() == 'Windows':
outputStr = subprocess.Popen([sys.executable, scriptPath,'-m','runp... |
= 'Not Defined'
if 'ModelVersion' in request.session:
ModelVersion = request.session['ModelVersion']
else:
ModelVersion = 0
if 'ModelStatus' in request.session:
ModelStatus = request.session['ModelStatus']
else:
|
itask-qa-qg", model="valhalla/t5-base-qa-qg-hl")
for _text in docs:
res = nlp(_text)
print(res)
extracted_QnAList.extend(res)
for _record in extracted_QnAList:
extracted_QnA.append({'question': _record['question'], 'answer': _record['answ... |
query = query[:-1]
query += ');'
self.conn.execute(query)
return True
def delete_record(self,table_name,col_name, col_value):
try:
query = f"DELETE FROM {table_name} WHERE {col_name}='{col_value}'"
self.conn.execute(query)
self.conn.commit()
return 'success'
except Exception as e :
print(st... |
Type == 'object':
'''
numOfRows = self.dataFrame.shape[0]
distinctCount = len(self.dataFrame[feature].unique())
tempDff = self.dataFrame[feature]
self.dataFrame[feature]=self.dataFrame[feature].apply(lambda x: self.testNum(x))
... |
_text
df_text_list = df_text.values.tolist()
comment_words = ""
for val in df_text_list:
val = str(val)
tokens = val.split()
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
comment_words += " ".join(tokens) + " "
... |
return True
def delete_record(self,table_name,col_name, col_value):
try:
query = f"DELETE FROM {table_name} WHERE {col_name}='{col_value}'"
self.conn.execute(query)
self.conn.commit()
return 'success'
except Exception as e :
print(str(e))
print("Deletion Failed")
return 'error'
def get_d... |
print("Something went wrong "+str(e))
return table_field_obj
def get_data(connection_string,table):
engine = db.create_engine(connection_string)
connection = engine.connect()
metadata = db.MetaData()
metadata.reflect(engine)
table = db.Table(table,metadata, autoload=True, autoload_with=engine)
query = db.se... |
x
performance = {}
for y in matrix[x]:
performance[y] = matrix[x][y]
fmatrix['performance'] = performance
training_matrix.append(fmatrix)
testmatrix = resultJsonObj['data']['testmatrix']
testing_matrix = []
for x in testmatrix:
... |
SyncingTime = int(datetime.timestamp(now))
updated_data = '"state"="'+state+'","syncingTime"="'+str(SyncingTime)+'"'
sqlite_obj.update_data(updated_data,'ID="1"','syncState')
except Exception as e:
print(e)
pass
def checkTelemtry():
import subprocess
import sys
scriptPath... |
===========================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this fil |
dictionary.get("number_samples")
number_numerical_features = dictionary.get("number_numerical_features")
number_categorical_features = dictionary.get("number_categorical_features")
missing_proportion = dictionary.get("missing_proportion")
number_informative = dictionary.get("number_infor... |
isRan = True
elif 'list' in paramSpace[j]:
paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "")
isList = True
elif '[' and ']' in paramSpace[j]:
paramSpace[j] = v.split('[')[1].split(']')[0].replace(" ", "")
isList = True
x = paramSpace[j].split(',')
except:
... |
ion_matrix
from profiler.imageAug import ImageAugmentation
from pathlib import Path
class ImageLearning:
def __init__(self,dataFrame,input_directory,outputdir,modelname,hyperParam, AugEnabled,keepAugImages,operations,augConf):
self.image_list = dataFrame
self.input_directory = input_directory
self.outputdir = ... |
]) + np.sum(-1 * structdis[structdis < 0])
# calculate left sigma variance and right sigma variance
lsigma_best = np.sqrt((negsqsum/negcount))
rsigma_best = np.sqrt((possqsum/poscount))
gammahat = lsigma_best/rsigma_best
# total number of pixels - totalcount
totalcount = structdis.shape[1... |
s | ' % 'Smallest Image', min_key)
print('%-30s | ' % 'Largest Image', max_key)
print('*---------------------------------------------------------------*')
print('%-30s | ' % 'Mean Width', int(np.mean(width_list)))
print('%-30s | ' % 'Mean Height', int(np.mean(height_list)))
... |
testY):
#objClf = aion_matrix()
try:
score = 0
self.paramDictConvertion()
if self.modelName=="LinearRegression":
from sklearn import linear_model
estimator = linear_model.LinearRegression()
if self.modelName=="Lasso":
... |
= (PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix(".tar.gz")
client.get(str(tarFile), str(Path(localPath)/tarFile.name))
except:
raise
return str(Path(localPath)/tarFile.n |
dims, n_timesteps, n_bottleneck,units,activation,df):
# inputs = Input(shape = (n_timesteps, n_dims))
inputs = Input(shape = (df.shape[1], df.shape[2]))
e = keras.layers.LSTM(units, activation = activation, return_sequences = True)(inputs)
## code layer or compressed form... |
s))
# y_test = y_test.reshape((y_test.shape[0], y_test.shape[1], n_dims))
model_hist = autoencoder.fit(
X_train, X_train,
epochs=epochs,
batch_size=batch_size,
validation_split=0.1,
... |
name=df.columns
feature_name = ' '.join(map(str, feature_name))
try:
#Passing whole data,so test size set as zero.
test_size=0.0
# train_size=1-test_size
train_size=1-test_size
... |
)/(max_val - min_val)
test_data = (test_data - min_val)/(max_val - min_val)
#converte the data into float
train_data = tf.cast(train_data, dtype=tf.float32)
test_data = tf.cast(test_data, dtype=tf.float32)
return train_data,test_data
## Scaling data ,Not use... |
log.info("train_data info: \\n"+str(train_data.info()))
if (num_of_classes >= 2):
# scaler = StandardScaler()
# train_data = scaler.fit_transform(train_data)
# test_data = scaler.fit_transform(test_data)... |
def merge_pre_post_dfs(self,out_df=None):
cwd=self.deployLocation
anomaly_algorithm=str(self.anomalyMethod)
try:
in_path=os.path.normpath(os.path.join(cwd,'data'))
if not os.path.isdir(in_path):
self.log.info("<---- Anomaly detection target... |
->usecase->data directory
# df=self.read_inputdata()
## Below line overwrite incoming df with postprocesseddata
self.log.info("<----------- In autoencoder based anomaly detection algorithm main process module, the incoming datafra |
join(home,'HCLT','AION','PreTrainedModels','ObjectDetection')
pipeline_config = str(modelPath/self.modelDirName/"pipeline.config")
checkPoint = "ckpt-0"
with open(str(modelPath/self.modelDirName/"checkpoint/checkpoint")) as f:
line = f.readline()
checkPoint = line.split(':')[1].strip()[1:-1] #(model_check... |
:
bestScore =score
bestModel =model
bestParams=modelParams
bestEstimator=estimator
else:
if abs(score) < bestScore or bestScore == -sys.float_info.max:
bestScore =abs(score)
bestModel =model
bestParams=modelParams
bestEstimator=estimator
self.log.i... |
object=uqObj.uqMain_BBMRegression(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq")
self.log.info("-------> model_confidence: "+str(total_picp_percentage)+str('%'))
self.log.info("-------> model_uncertainty: "+str(total_Uncertainty_percentage)+str('%')) ... |
unqClassLst = list(setOfyTrue)
if(str(labelMaps) != '{}'):
inv_mapping_dict = {v: k for k, v in labelMaps.items()}
unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict)
unqClassLst2 = list(unqClassLst2)
else:
unqClassLst2 = unqClassLst
indexName = []
columnName = []
targetnames=[]
for... |
xFFFF:
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
else:
if tscore > btscore or btscore == -0xFFFF:
cmodel = True
btscore = tscore
else:
if(bestthreshold == -1):
if tscore > btscore or btscore == -0xFFFF:
cm... |
")
self.log.info(1-metrics.precision_score(targs, preds))
self.log.info("-------> recall for outliers ---> ")
self.log.info(1-metrics.recall_score(targs, preds))
self.log.info("-------> f1 for outliers--->")
... |
(featureData,axis=0),d).reshape(1, -1), 2, return_distance=True)
ujd.append(u_dist[0][1])
if isinstance(featureData.iloc[rand_X[j]].values, pd.core.arrays.sparse.array.SparseArray):
featureData_reshaped = np.asarray(featureData.iloc[rand_X[j]].values).reshape(1, -1)
else:
featureData_reshaped = feature... |
balfeatureData, baltargetData= tLinks.fit_resample(featureData, targetData)
#Added for checking balancing act by the algorithm.
counter = Counter(baltargetData)
self.log.info("Class counter:\\t"+str(baltargetData.value_counts()))
max_class = max(counter,key=counter.get)
max_value = max(counter.values()... |
elling'):
scoreParam = self.setScoreParams(scoreParam,modelType,categoryCountList)
if len(topFeatures) > 0:
self.log.info('\\n-------------- Training ML: Top/StatisticalBased Features Start --------------')
modelbasedon = 'StatisticalBased'
if featureEngineeringSelector.lower() == 'true':
self.log.... |
_roccurve(ytest,predictedData_fit,labelMaps,imageFolderLocation)
else:
df_test['actual'] = ytrain
df_test['predict'] = trainPredictedData
elif(model_type == 'Regression'):
self.log.info('\\n--------- Performance Matrix with Train Data ---------')
train_matrix = self.get_regres... |
Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import time
import os
import sys
import numpy as np
from numpy import arange
from numpy import argmax
import json
from sklear... |
bestrecallscore = brscore
bestprecisionscore = bpscore
self.log.info('Status:- |... ML Algorithm applied: '+modelName)
self.log.info('Status:- |... Score: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\\n')
continue
paramSpace=self.params[modelName].copy()
algoName... |
self.log.info('----------> Testing Score: '+str(score))
try:
if ((estimator.__class__.__name__ == "ABCMeta") or (model in ['SGDClassifier','XGBClassifier','CatBoostClassifier','LGBMClassifier']) ):
self.log.info('-----> Model Uncertainty Not Supported')
else:
uqObj=aionUQ(None... |
Space[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "")
isRan = True
elif 'list' in paramSpace[j]:
paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "")
isList = True
elif '[' and ']' in paramSpace[j]:
paramSpace[j] = v.split('[')[1].split(']')[0].replace(" ", "")
is... |
word'])
def dataClassifyWithKw(sentences, keywords):
df = pd.DataFrame(sentences, columns=['File'])
pattern = '|'.join(keywords)
df['Label'] = df.File.str.contains(pattern)
return df
def to_dataframe(data_loc, keywords, pretrained_type, embedding_size=300, deploy_loc=None, train=Tr... |
, fs.FeatureNameStatistics, fs.DatasetFeatureStatisticsList,
fs.Histogram)
<s> # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# ... |
options=None,
serialized_start=636,
serialized_end=685,
)
_sym_db.RegisterEnumDescriptor(_FEATURENAMESTATISTICS_TYPE)
_HISTOGRAM_HISTOGRAMTYPE = _descriptor.EnumDescriptor(
name='HistogramType',
full_name='featureStatistics.Histogram.HistogramType',
filename=None,
file=DESCRIPTOR,
values=[
_descript... |
,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max', full_name='featureStatistics.Numeri... |
, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_num_values', full_name='featureStatistics.CommonStatistics.max_num_values', inde... |
ISTIC.fields_by_name['histogram'].message_type = _HISTOGRAM
_CUSTOMSTATISTIC.oneofs_by_name['val'].fields.append(
_CUSTOMSTATISTIC.fields_by_name['num'])
_CUSTOMSTATISTIC.fields_by_name['num'].containing_oneof = _CUSTOMSTATISTIC.oneofs_by_name['val']
_CUSTOMSTATISTIC.oneofs_by_name['val'].fields.append(
_CUSTOMSTAT... |
generic data."""
def __init__(self, fs_proto, datasets_proto, histogram_proto):
self.fs_proto = fs_proto
self.datasets_proto = datasets_proto
self.histogram_proto = histogram_proto
def ProtoFromDataFrames(self, dataframes,
histogram_categorical_levels_count=None):
"""Crea... |
.name)
self.assertEqual(3, test_data.num_examples)
self.assertEqual(2, len(test_data.features))
if test_data.features[0].name == 'testFeatureInt':
numfeat = test_data.features[0]
stringfeat = test_data.features[1]
else:
numfeat = test_data.features[1]
stringfeat = test_data.feat... |
.context.feature,
example.feature_lists.feature_list, entries, i)
self._check_sequence_example_entries(entries, 2, 25, 1)
def testParseExampleSequenceFeatureListMultipleEntriesOuter(self):
# Tests parsing examples of integers in context field
examples = []
for i in range(... |
.append(example)
for i in range(3):
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'hi')
examples.append(example)
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'hey')
examples.append(example)
entries = ... |
"""
entries = {}
index = 0
for filepath in paths:
reader = iterator_from_file(filepath)
for record in reader:
if is_sequence:
sequence_example = tf.train.SequenceExample.FromString(record)
self._ParseExample(sequence_example.context.feature,
... |
='' and datetimeFeature!='NA'):
self.output_formatfile += ' def apply_output_format(self,df,modeloutput,datetimeFeature):'
self.output_formatfile += '\\n'
else:
self.output_formatfile += ' |
self.output_formatfile += ' df[\\'prediction\\'] = modeloutput.idxmax(axis=1)'
self.output_formatfile += '\\n'
if learner_type != 'DL':
self.output_formatfile += ' df[\\'probability\\'] = modeloutput.max(axis=1).rou... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.