prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
import os
import glob2
import numpy as np
import pandas as pd
import tensorflow as tf
from skimage.io import imread
# /datasets/faces_emore_112x112_folders/*/*.jpg'
default_image_names_reg = "*/*.jpg"
default_image_classes_rule = lambda path: int(os.path.basename(os.path.dirname(path)))
def pre_process_folder(data_p... | pd.value_counts(image_classes) | pandas.value_counts |
# author: <NAME>
# date: 2021-12-04
'''This script generates the correlation heat map of the
transformed data
Usage: eda.py --file_path=<file_path> --out_dir=<out_dir>
Options:
--file_path=<file_path> Path to the data file
--out_dir=<out_dir> Path (directory) to save the images
'''
import os
import panda... | pd.concat((y_train, X_train), axis=1) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
impo... | tm.box_expected([True, False, False], xbox) | pandas._testing.box_expected |
import numpy as np
import pandas as pd
from glob import glob
import os
import sys
from morphomnist import io
def find_data(dirs):
''' glob the different data from the main path '''
data = [[path for path in glob(os.path.join(path_i,'*.*'))] for path_i in dirs]
return data
def merge_datasets(list_paths, d... | pd.read_csv(path) | pandas.read_csv |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""----Definición de las librerías requeridas para la ejecución de la aplicación---"""
from flask import Flask, request, render_template #Interfaz gráfica WEB
##from flask_socketio import SocketIO
from werkzeug.utils import secure_filename #Encriptar ... | pd.DataFrame(datos_temp) | pandas.DataFrame |
from scrapers import scraper_modules as sm
from bs4 import BeautifulSoup
import pandas as pd
wta_link = 'https://www.wta.org/go-outside/hikes?b_start:int='
def get_list_of_peak_info(html: str):
html_soup = BeautifulSoup(html, 'html.parser')
a_tags = html_soup.find_all('a', attrs={'class': 'listitem-title'})
... | pd.DataFrame(peaks_dict) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
from scipy.stats import norm
from sklearn import mixture
from rnaseq_lib.math.dists import name_from_dist, DISTRIBUTIONS
# Outlier
def iqr_bounds(ys):
"""
Return upper and lower bound for an array of values
Lo... | pd.DataFrame(rows, columns=['Name', 'KS-stat', 'Pvalue']) | pandas.DataFrame |
import os
import random
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
import torch
from sklearn.metrics import pairwise_distances
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset
import matplotlib.pyplot as plt
from scripts.ssc.evaluation.ml... | pd.DataFrame({'x': Z_manifold[:, 0], 'y': Z_manifold[:, 1],'label': labels}) | pandas.DataFrame |
# Lint as: python3
"""Tests for main_heatmap."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import main_heatmap
import numpy as np
import pandas as pd
SAMPLE_LOGS_LINK = 'https:/... | pd.Series(['2020-04-21', '2020-04-20', '2020-04-19']) | pandas.Series |
import numpy as np
import pandas as pd
import datetime as dt
import pickle
import bz2
from .analyzer import summarize_returns
DATA_PATH = '../backtest/'
class Portfolio():
"""
Portfolio is the core class for event-driven backtesting. It conducts the
backtesting in the following order:
1. Initializati... | pd.Series() | pandas.Series |
import datetime
import pandas as pd
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
def plot_team(team):
years = [2012,2013,2014,2015,2016,2017]
g = pd.read_csv("audl_elo.csv")
dates = pd.to_datetime(g[(g["team_id"] == team)]["date"])
elo = g... | pd.Series(start_elo) | pandas.Series |
"""
Utility functions for gene annotation
"""
import logging
import re
import urllib
from io import StringIO
import pandas as pd
def cog2str(cog):
"""
Get the full description for a COG category letter
Parameters
----------
cog : str
COG category letter
Returns
-------
str
... | pd.read_csv(gff, sep="\t", skiprows=skiprow, names=names, header=None) | pandas.read_csv |
import dash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import pandas as pd
import plotly.express as px
import plotly.graph_objs as go
from datetime import date
import dash_loading_spinners as dls
from dash.dependencies import Input, Output, ClientsideF... | pd.to_datetime(data['Time']) | pandas.to_datetime |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_... | tm.assert_index_equal(res, exp) | pandas.util.testing.assert_index_equal |
# tests.test_regressor.test_residuals
# Ensure that the regressor residuals visualizations work.
#
# Author: <NAME> <<EMAIL>>
# Created: Sat Oct 8 16:30:39 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: test_residuals.py [7d3f5e6] <EMAIL> $
"""
Ensure that th... | pd.DataFrame(data[features]) | pandas.DataFrame |
from simio_lisa.simio_tables import *
import logging
import pandas as pd
import os
import plotly.express as px
from plotly.offline import plot
import time
from abc import ABC, abstractmethod
class SimioPlotter(ABC):
def __init__(self,
output_tables,
logger_level: int = logging.IN... | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
import glob
import click
from pathlib import Path
from eye_tracking.preprocessing.functions.et_preprocess import preprocess_et
from eye_tracking.preprocessing.functions.detect_events import make_fixations, make_blinks, make_saccades
import warnings
warnings.filterwarn... | pd.concat([df_events_all, df_events]) | pandas.concat |
import numpy as np
import seaborn as sns
from sklearn.ensemble import RandomTreesEmbedding as rte
from sklearn.cluster.hierarchical import AgglomerativeClustering as hac
import math
import warnings
import random
import networkx as nx
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi
import pandas ... | pd.read_csv("./matrix_uet.csv", delimiter="\t", header=None) | pandas.read_csv |
import numpy as np
import pandas as pd
# from scipy.stats import gamma
np.random.seed(181336)
number_regions = 5
number_strata = 10
number_units = 5000
units = np.linspace(0, number_units - 1, number_units, dtype="int16") + 10 * number_units
units = units.astype("str")
sample = pd.DataFrame(units)
sample.rename(c... | pd.merge(sample, area_type, on="cluster_id") | pandas.merge |
"""
Coding: UTF-8
Author: Randal
Time: 2021/2/20
E-mail: <EMAIL>
Description: This is a simple toolkit for data extraction of text.
The most important function in the script is about word frequency statistics.
Using re, I generalized the process in words counting, regardless of any preset
word segmentation. Besides, ... | pd.Series(did) | pandas.Series |
import os
import logging
import numpy as np
import pandas as pd
from astropy import units as u
from astropy.convolution import convolve_fft, Gaussian2DKernel, convolve
from astropy.coordinates import SkyCoord, Angle
from astropy.io import fits
from astropy.table import Table
from regions import CircleSkyRegion
import d... | pd.read_table(path_models, delim_whitespace=True, header=0) | pandas.read_table |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from skimage import io, filters, feature
import sys
from bisect import bisect_left
import time as time
from tqdm.auto import tqdm
# -------------------------------
# Functions
def apply_gaussian_filter(fluxes, sigma):
return filters.gaussian(ima... | pd.DataFrame(df) | pandas.DataFrame |
"""
Copyright (c) 2021, Stanford Neuromuscular Biomechanics Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this li... | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2021 <NAME>. All rights reserved.
# This code is licensed under Apache 2.0 with Commons Clause license (see LICENSE.md for details)
"""Custom data classes that subclass `vectorbt.data.base.Data`."""
import time
import warnings
from functools import wraps
import numpy as np
import pandas as pd
from tq... | pd.to_datetime(df['Close time'], unit='ms', utc=True) | pandas.to_datetime |
import logging
import unittest
import numpy as np
import pandas as pd
import scipy.stats as stats
from batchglm.api.models.tf1.glm_nb import Simulator
import diffxpy.api as de
class TestConstrained(unittest.TestCase):
def test_forfatal_from_string(self):
"""
Test if _from_string interface is wo... | pd.DataFrame(data=dmat, columns=coefficient_names) | pandas.DataFrame |
"""
Once the CSV files of source_ids, ages, and references are assembled,
concatenate and merge them.
Date: May 2021.
Background: Created for v0.5 target catalog merge, to simplify life.
Contents:
AGE_LOOKUP: manual lookupdictionary of common cluster ages.
get_target_catalog
assemble_initial_source_list
... | pd.read_csv(metapath) | pandas.read_csv |
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from sys import argv
dates=("2020-04-01", "2020-04-08", "2020-04-15", "2020-04-22",
"2020-04-29" ,"2020-05-06", "2020-05-13","2020-05-20", "2020-05-27", "2020-06-03",
"2020-06-10", "2020-06-17", "2020-06-24", "2020-07-01", "2020-07-08",
... | pd.to_datetime(start_date,format='%Y-%m-%d') | pandas.to_datetime |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
... | Timestamp('2000-01-15 00:15:00', tz='US/Central') | pandas.Timestamp |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: li
@file: factor_cash_flow.py
@time: 2019-05-30
"""
import gc, six
import json
import numpy as np
import pandas as pd
from utilities.calc_tools import CalcTools
from utilities.singleton import Singleton
# from basic_derivation import app
# from u... | pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code") | pandas.merge |
#!pip install fitbit
#!pip install -r requirements/base.txt
#!pip install -r requirements/dev.txt
#!pip install -r requirements/test.txt
from time import sleep
import fitbit
import cherrypy
import requests
import json
import datetime
import scipy.stats
import pandas as pd
import numpy as np
# plotting
import matplotli... | pd.date_range('2017-12-23', '2018-01-25') | pandas.date_range |
import pandas as pd
import sys
def main(argv):
if len(argv) < 2:
print('Not enough arguments provided.')
return
in_dfs = []
for input_file in sys.argv[1:-1]:
in_dfs.append(pd.read_csv(input_file))
out_df = | pd.concat(in_dfs) | pandas.concat |
#!/usr/bin/env python
import numpy as np
import netCDF4 as nc
import pandas as pd
import multiprocessing
import textwrap
import matplotlib.pyplot as plt
import lhsmdu
import glob
import json
import os
import ast
import shutil
import subprocess
from contextlib import contextmanager
import param_util as pu
import outp... | pd.read_csv(sample_matrix_path) | pandas.read_csv |
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import random
from math import sqrt
from datetime import datetime,timedelta
from pytz import timezone
from time import time
from collections import deque
from IPython.display import clear_output
from statsmodels.tools.eval_mea... | pd.read_csv("DepTotalFlights.csv",index_col=0) | pandas.read_csv |
from mock import patch
from ebmdatalab import bq
from pandas import DataFrame
import tempfile
import pytest
import os
def test_fingerprint_sql():
input_sql = 'select *, "Frob" from x -- comment\n' "where (a >= 4);"
same_sql_different_caps = 'SELECT *, "Frob" from x -- comment\n' "where (a >= 4);"
same_sql_... | DataFrame([{"a": 2}]) | pandas.DataFrame |
import pandas as pd
import bitfinex
from bitfinex.backtest import data
# old data...up to 2016 or so
btc_charts_url = 'http://api.bitcoincharts.com/v1/csv/bitfinexUSD.csv.gz'
df = pd.read_csv(btc_charts_url, names=['time', 'price', 'volume'])
df['time'] = | pd.to_datetime(df['time'], unit='s') | pandas.to_datetime |
"""Run the model calibration"""
# Spyder cannot run parallels, so always set -option_parallels=0 when testing in Spyder.
# Built-in libraries
import os
import argparse
import multiprocessing
import resource
import time
import inspect
# External libraries
from datetime import datetime
import pandas as pd
import numpy a... | pd.DataFrame(index=[0]) | pandas.DataFrame |
import pandas as pd
from scoreware.race.utils import get_last_name
def parse_general(df, headers, id):
newdf= | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2022/5/10 15:19
describe: 请描述文件用途
"""
import glob
import traceback
import pandas as pd
from tqdm import tqdm
from czsc.traders.advanced import CzscAdvancedTrader
from czsc.utils import dill_load
from czsc.objects import cal_break_even_point
class... | pd.DataFrame(_results) | pandas.DataFrame |
from os import link
import flask
from flask.globals import request
from flask import Flask, render_template
# library used for prediction
import numpy as np
import pandas as pd
import pickle
# library used for insights
import json
import plotly
import plotly.express as px
app = Flask(__name__, template_folder = 'templ... | pd.read_csv('online_shoppers_intention.csv') | pandas.read_csv |
from __future__ import print_function
# from builtins import str
# from builtins import object
import pandas as pd
from openpyxl import load_workbook
import numpy as np
import os
from .data_utils import make_dir
class XlsxRecorder(object):
"""
xlsx recorder for results
including two recorder: one for curre... | pd.Index([self.expr_name+self.folder_name]) | pandas.Index |
import pyodbc
import pandas as pd
from patientKG import *
import holoviews as hv
from holoviews import opts
from bokeh.plotting import show
import panel as pn
import networkx as nx
from ..config.bedrock_connection import *
from ..priorKnowledge import labturnaround
from patientKG import utils_pickle
from PU.pu_events i... | pd.read_sql_query('SELECT * FROM [AdvancedAnalytics].[dbo].[Patient_Episode_Ward_Stay] where ACTIVITY_IDENTIFIER = '+ item +' order by ACTIVITY_IDENTIFIER, CE_EPISODE_NUMBER, WARD_STAY_ORDER',Red004_Conn) | pandas.read_sql_query |
import torch
import numpy as np
from torch.utils import data
import pandas as pd
from sklearn.model_selection import train_test_split, KFold
from time import time
class Dataset:
def tag2tok(self, tags):
if pd.isnull(tags):
return np.nan
tok_tags = [self.tag_vocab["<s>"]]
for... | pd.Timestamp(2016, 11, 21) | pandas.Timestamp |
#%%
path = '../../dataAndModel/data/o2o/'
import os, sys, pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import date
from sklearn.linear_model import SGDClassifier, LogisticRegression
dfoff = pd.read_csv(path+'ccf_offline_stage1_train.csv')
dftest = pd.read_csv(path+'ccf_... | pd.Timedelta(15, 'D') | pandas.Timedelta |
# Importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# lightgbm for classification
from numpy import mean
from numpy import std
#from sklearn.datasets import make_classification
from lightgbm import LGBMClassifier
from sklearn.model_selection import cross_va... | pd.get_dummies(data, columns=columns_names_encod) | pandas.get_dummies |
"""Module is for data (time series and anomaly list) processing.
"""
from typing import Dict, List, Optional, Tuple, Union, overload
import numpy as np
import pandas as pd
def validate_series(
ts: Union[pd.Series, pd.DataFrame],
check_freq: bool = True,
check_categorical: bool = False,
) -> Union[pd.Ser... | pd.get_dummies(ts) | pandas.get_dummies |
import os
import pickle
import numpy as np
import pandas as pd
def aggregate_meta_info(exp_dir):
files = [os.path.join(exp_dir, f) for f in os.listdir(exp_dir) if 'meta_info' in f]
df = | pd.DataFrame(columns=['pid', 'class_target', 'spacing']) | pandas.DataFrame |
import os
if not os.path.exists("temp"):
os.mkdir("temp")
def add_pi_obj_func_test():
import os
import pyemu
pst = os.path.join("utils","dewater_pest.pst")
pst = pyemu.optimization.add_pi_obj_func(pst,out_pst_name=os.path.join("temp","dewater_pest.piobj.pst"))
print(pst.prior_information.loc["... | pd.read_csv(out_file,delim_whitespace=True) | pandas.read_csv |
"""
Functions for converting object to other types
"""
import numpy as np
import pandas as pd
from pandas.core.common import (_possibly_cast_to_datetime, is_object_dtype,
isnull)
import pandas.lib as lib
# TODO: Remove in 0.18 or 2017, which ever is sooner
def _possibly_convert_objec... | to_timedelta(values, coerce=True) | pandas.tseries.timedeltas.to_timedelta |
import pandas as pd # DataFrame Library
import tensorflow as tf # Tensorflow, library to develop and train ML models
import matplotlib.pyplot as plt # Plotting Library
from Models.myanfis import ANFIS # ANFIS model from: https://... | pd.read_csv("winequality-red.csv") | pandas.read_csv |
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team and 2021 Zilliz.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unles... | pd.read_csv(self.label_file) | pandas.read_csv |
"""Unittests for the map module."""
import unittest
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pygeos
import pyproj
import geopandas as gpd
import shapely.wkt
import numpy.testing as npt
import gnssmapper.common as cm
import gnssmapper.geo as geo
class TestObservationMethods(unittest.... | pdt.assert_frame_equal(self.map_box,reverted,check_dtype=False,atol=0.1,rtol=0.1) | pandas.testing.assert_frame_equal |
# networkx experimentation and link graph plotting tests
# not in active use for the search engine but left here for reference
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import sqlite3
from nltk import FreqDist
from networkx.drawing.nx_agraph import graphviz_layout
import spacy
nlp = s... | pd.read_csv("data/all_links.csv") | pandas.read_csv |
import logging
from typing import List
import numpy as np
import pandas as pd
from cuchemcommon.data import GenerativeWfDao
from cuchemcommon.data.generative_wf import ChemblGenerativeWfDao
from cuchemcommon.fingerprint import Embeddings
from cuchemcommon.utils.singleton import Singleton
from cuchemcommon.workflow imp... | pd.DataFrame({'transformed_smiles': [smiles[idx], smiles[idx + 1]]}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# #### Importing dataset
# 1.Since data is in form of excel file we have to use pandas read_excel to load the data
# 2.After loading it is important to check null valu... | pd.get_dummies(categorical['Destination'], drop_first=True) | pandas.get_dummies |
import zipfile
import os
import numpy as np
import pandas as pd
from pathlib import Path
__version__ = '0.155'
try:
from functools import lru_cache
except (ImportError, AttributeError):
# don't know how to tell setup.py that we only need functools32 when under 2.7.
# so we'll just include a copy (*bergh*)... | pd.to_numeric(x, errors="raise") | pandas.to_numeric |
import shlex
import os
import sys
import subprocess
import json
import pprint
import numpy as np
import pandas as pd
APPEND = '0ms'
if len(sys.argv) == 3:
APPEND = sys.argv[2]
LOG_BASE_DIR = '../logs/'
LOG_DIR = f'{LOG_BASE_DIR}/kem_{APPEND}'
PKL_DIR = './pkl/kem'
def parse_algo(l):
split = l.split('_')
ts =... | pd.read_pickle(f"{PKL_DIR}/df_eap_{APPEND}.pkl") | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
Tests that comments are properly handled during parsing
for all of the parsers defined in parsers.py
"""
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import StringIO
class CommentTests(object):
def test_comment(self):
d... | StringIO(data) | pandas.compat.StringIO |
import xml.etree.ElementTree as etree
import pandas as pd
import numpy as np
import os
# TODO: add to_pi_json() method. (Both PiTimeSeries and PiTimeSeriesCollection should be able to call this method)
# TODO: adapt to_pi_xml() and to_pi_json() from PiTimeSeries by Mattijn. Probably more robust write methods.
class ... | pd.datetime.strftime(s, "%Y-%m-%d") | pandas.datetime.strftime |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2020 azai/Rgveda/GolemQuant
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation... | pd.to_datetime(res.date) | pandas.to_datetime |
###############################################################################
# Summarize OSM roads lengths
# <NAME>, July 2018
# Purpose: summarize road lengths within features in defined shapefile
###############################################################################
import os, sys, time, subprocess, argp... | pd.DataFrame(allFeats) | pandas.DataFrame |
# coding:utf-8
import os
from pathlib import Path
import sys
import argparse
import pdb
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import pickle
import time
from datetime import datetime, timedelta
from sklearn.metrics import confu... | pd.read_csv(PATH_TO_FEATURES_DIR/"permutation_feature_imp_fold220200728_004355.csv", index_col=0) | pandas.read_csv |
# generic libraries
import os
import glob
from xml.etree import ElementTree
import numpy as np
import pandas as pd
from .read_sentinel2 import get_root_of_table
from ..generic.mapping_io import read_geo_image
# dove-C
def list_central_wavelength_dc():
center_wavelength = {"B1": 485., "B2" : 545., "B3" : 630., "... | pd.DataFrame(d) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.data_checks import (
ClassImbalanceDataCheck,
DataCheckError,
DataCheckMessageCode,
DataCheckWarning,
)
class_imbalance_data_check_name = ClassImbalanceDataCheck.name
def test_class_imbalance_errors():
X = pd.... | pd.Series([np.nan] * 10) | pandas.Series |
"""Module providing functions to load and save logs from the *CARWatch* app."""
import json
import re
import warnings
import zipfile
from pathlib import Path
from typing import Dict, Optional, Sequence, Union
import pandas as pd
from tqdm.auto import tqdm
from biopsykit.carwatch_logs import LogData
from biopsykit.uti... | pd.to_datetime(df["time"]) | pandas.to_datetime |
__author__ = "<NAME>"
__copyright__ = "BMW Group"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from tsa import Logger
import sys
import numpy as np
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
import argparse
import matplotlib... | pd.concat([self.residuals, self.residuals_forecast], axis=0) | pandas.concat |
import os
import geopandas as gpd
import numpy as np
import pandas as pd
from subprocess import call
from shapely.geometry import Point
from sklearn.feature_selection import VarianceThreshold
class CurrentLabels:
"""
Add sector code info to each property
"""
def __init__(self, path_to_file):
... | pd.get_dummies(self.census, columns=cat_columns) | pandas.get_dummies |
import numpy as np
import pandas as pd
import os, time, sys, multiprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
sys.path.append("..")
from CTGCN.utils import check_and_make_path
class DataGenerator(object):
base_path: str
input_base_path: str
outpu... | pd.read_csv(res_path, sep=',', header=0, names=['date', 'avg0', 'had0', 'l1_0', 'l2_0']) | pandas.read_csv |
import types
import warnings
import pickle
import re
from copy import deepcopy
from functools import partial, wraps
from inspect import signature
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import joblib
from . import IS_PYPY
from .. import config_context
from ._testing import _get_ar... | pd.DataFrame(X_orig, columns=names) | pandas.DataFrame |
"""
this script is meant to assess a dataset along a variety of measures
author: <NAME>
license: MIT
"""
# standard libary
import argparse
from collections import Counter, defaultdict, OrderedDict
import csv
from functools import partial
import json
import os
import re
from typing import List
# third party libraries
im... | pd.read_csv(eng_sent_path, sep='\t', header=None, names=['id', 'lang', 'text']) | pandas.read_csv |
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
import plotly as pl
import re
import requests
from .DataFrameUtil import DataFrameUtil as dfUtil
class CreateDataFrame():
"""Classe de serviços para a criação de dataframes utilizados para a construção dos gr... | pd.DataFrame(data=d) | pandas.DataFrame |
import numpy as np
import pandas as pd
import collections
from scipy.sparse import issparse
def balanced_newick_tree(num_taxa):
if num_taxa%2 != 0:
raise ValueError("There is no balanced tree on {num_taxa} taxa. Please specify an even number.")
from math import floor
def _balanced_newick_subtree(nt... | pd.DataFrame(probs, index=probs[:, 0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import sys, os
import datetime, time
from math import ceil, floor # ceil : 소수점 이하를 올림, floor : 소수점 이하를 버림
import math
import pickle
import uuid
import base64
import subprocess
from subprocess import Popen
import PyQt5
from PyQt5 import QtCore, QtGui, uic
from PyQt5 import QAxContainer
from Py... | pd.merge(self.df_daily, self.df_weekly, on='종목코드', how='outer') | pandas.merge |
from cova import FEATURETABLE, GENOME, RFS, CDS, PSEQS
from cova import utils
from Bio.Data.CodonTable import unambiguous_dna_by_id as codon_table
import os, sys, pandas, math, multiprocessing, numpy
from time import time
#### Point mutations #######
def ann_pm(vpos,vseq,ft=FEATURETABLE,cdss=CDS,ct=codon_table[1],rfs=... | pandas.concat(vlist,ignore_index=True) | pandas.concat |
#! /usr/bin/python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import RFECV
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklear... | pd.read_csv('train.csv') | pandas.read_csv |
"""
Search and recognize the name, category and
brand of a product from its description.
"""
from typing import Optional, List, Union, Dict
from itertools import combinations
import pandas as pd # type: ignore
from pymystem3 import Mystem # type: ignore
try:
from cat_model import PredictCategory # type: ignore
... | pd.read_csv(all_clean) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests_cache
import datetime
import pandas as pd
from datetime import timedelta
import pandas as pd
from pandas.io.common import ZipFile
from pandas.compat import BytesIO, StringIO, PY2
def main():
expire_after = timedelta(days=1)
if PY2:
filenam... | ZipFile(zip_data, 'r') | pandas.io.common.ZipFile |
"""By: Xiaochi (<NAME>: github.com/XC-Li"""
import pandas as pd
import os
from util_code.xml_parser import bs_parser, xml_parser, get_person_speech_pair
# from xiaodan.data_loader import get_data
from tqdm.autonotebook import tqdm # auto backend selection
# get xml data info: This function is written by <NAME>
def g... | pd.DataFrame(untagged_data_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Autor: <NAME>
# Datum: Tue Sep 14 18:00:32 2021
# Python 3.8.8
# Ubuntu 20.04.1
from typing import List, Tuple
import pandas as pd
from nltk.probability import FreqDist
from nltk.tokenize.casual import TweetTokenizer
from nltk.util import ngrams
class FeatureExtractor:
"""
Collect... | pd.Series(instance_features_vector) | pandas.Series |
"""
This module implements the intermediates computation
for plot(df) function.
"""
from sys import stderr
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import dask
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
from scipy.stats import gaussian_kde... | pd.DataFrame({srs.name: ["Others"], "cnt": [other_cnt]}) | pandas.DataFrame |
"""to create TFRecords for ML classification model training from image chips, label and class id
Author: @developmentseed
Run:
python3 tf_records_creation_classification.py \
--tile_path=data/P400_v2/ \
--csv_files=data/csv/*_class_id.csv \
--output_dir=data/classification_training_tfrecor... | pd.concat(frames) | pandas.concat |
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
import tkinter as tk
from tkinter import ttk, scrolledtext, Menu, \
messagebox as msg, Spinbox, \
filedialog
global sol,f1Var,filePathBank,\
filePathLedger,filePathBank, \
int... | pd.to_datetime(ledgerDF['Date']) | pandas.to_datetime |
#!/usr/bin/env python
"""
MeteWIBELE: quantify_prioritization module
1) Define quantitative criteria to calculate numerical ranks and prioritize the importance of protein families
2) Prioritize the importance of protein families using unsupervised or supervised approaches
Copyright (c) 2019 Harvard School of Public H... | pd.to_numeric(summary_table[mytype + "__value"], errors='coerce') | pandas.to_numeric |
#! /usr/bin/env python3
'''
HERO - Highways Enumerated by Recombination Observations
Author - <NAME>
'''
from argparse import ArgumentParser
from Bio.SeqIO import parse as BioParse
from itertools import product
import math
import multiprocessing
import os
import pandas as pd
from plotnine import *
from random import... | pd.read_csv(file_loc, header=0, sep='\t') | pandas.read_csv |
from helper import *
import pandas as pd
import os
import glob
import re
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from s... | pd.read_csv(filedir2) | pandas.read_csv |
import pandas as pd
import threading
import queue
import time
from itertools import combinations
from .logger import LoggerFactory
from lib.agentinfo import AgentInfoFactory
logger = LoggerFactory.getLogger(__name__)
class PolicyOptimizer():
def __init__(self, agentInfo, minAgents, depth, threads, timeout):
... | pd.DataFrame() | pandas.DataFrame |
## Visualize results
import matplotlib.pyplot as plt
import scipy.stats as stat
import numpy as np
import pandas as pd
from collections import defaultdict
import time, os
from operator import add
## Initialize
ML = 'LogisticRegression'
nGene = 200
adj_pval_cutoff = 0.01
test_datasets = ['Auslander', 'Prat_MELANOMA', ... | pd.read_csv('../../result/2_cross_study_prediction/across_study_performance.txt', sep='\t') | pandas.read_csv |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
impo... | assert_invalid_comparison(obj, other, box_with_array) | pandas.tests.arithmetic.common.assert_invalid_comparison |
#Online References used :
#https://github.com/imadmali/movie-scraper/blob/master/MojoLinkExtract.py
#https://www.crummy.com/software/BeautifulSoup/bs4/doc/
#https://nycdatascience.com/blog/student-works/scraping-box-office-mojo/
#https://www.youtube.com/watch?v=XQgXKtPSzUI
# https://www.youtube.com/watch?v=aIPqt-OdmS0
... | pd.read_csv(file) | pandas.read_csv |
import pandas as pd
from IPython.display import display
from scrapy.crawler import CrawlerProcess
from ecommercecrawler.spiders.kakaoshopping import KakaoshoppingSpider
from ecommercecrawler.spiders.navershopping import NavershoppingSpider
if __name__ == "__main__":
data = {"messagetype": ["pp", "a", "pm"], "tags... | pd.DataFrame(data=data) | pandas.DataFrame |
#!/usr/bin/env python3
import sys
import os
import argparse
import pandas as pd
import glob
import datetime as dt
import math
def main():
parser = argparse.ArgumentParser(description="Preprocess reference collection: randomly select samples and write into individual files in lineage-specific directories.")
p... | pd.to_datetime(args.enddate) | pandas.to_datetime |
#### Filename: Connection.py
#### Version: v1.0
#### Author: <NAME>
#### Date: March 4, 2019
#### Description: Connect to database and get atalaia dataframe.
import psycopg2
import sys
import os
import pandas as pd
import logging
from configparser import ConfigParser
from resqdb.CheckData import CheckData
import numpy... | pd.isnull(x['HOSPITAL_TIME']) | pandas.isnull |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from keras import optimizers
from keras.layers import Dense
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
fro... | pd.DataFrame(test_preds) | pandas.DataFrame |
import argparse
import os
import warnings
import subprocess
subprocess.call(['pip', 'install', 'sagemaker-experiments'])
import pandas as pd
import numpy as np
import tarfile
from smexperiments.tracker import Tracker
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from skle... | pd.DataFrame(X_train) | pandas.DataFrame |
import pandas as pd
import numpy as np
import zipfile
import os
import scipy as sp
import matplotlib.pyplot as plt
import plotly.express as px
import zipfile
import pathlib
def top_ions(col_id_unique):
""" function to compute the top species, top filename and top species/plant part for each ion
Args:
... | pd.merge(left=df1[['cluster index']],right=df2[['shared name','ZodiacScore']], how='left', left_on= 'cluster index', right_on='shared name') | pandas.merge |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create... | pd.DataFrame(h) | pandas.DataFrame |
import requests
from collections import defaultdict
import folium
import json
import pandas as pd
import os
#import plotly.express as px
coord_dict ={
'Sikkim':[27.5330,88.5122],'Andhra Pradesh':[15.9129,79.7400], 'Bihar':[25.0961,85.313], 'Chhattisgarh':[21.2787,81.8661],'Arunachal Pradesh':[28.2180,94.7278],\
... | pd.DataFrame(latest_regional_data) | pandas.DataFrame |
import numpy as np
import pandas as pd
| pd.set_option('display.expand_frame_repr', False) | pandas.set_option |
import os
import numpy as np
import pandas as pd
from pipedown.cross_validation.splitters import RandomSplitter
from pipedown.dag import DAG
from pipedown.nodes.base import Input, Model, Node, Primary
from pipedown.nodes.filters import Collate, ItemFilter
from pipedown.nodes.metrics import MeanSquaredError
def test... | pd.DataFrame() | pandas.DataFrame |
# Standard Library
import pandas as pd
import statistics as st
import numpy as np
import imdb
from datetime import datetime
from datetime import timedelta
import multiprocessing
import json
import time
import re
import random
import matplotlib.pyplot as plt
# Email Library
from email.mime.text import MIMEText as text
i... | pd.DataFrame(movie_dates_list, columns=["dates"]) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, ti... | Series([2, 3, 4]) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.