prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
# *****************************************************************************
# © Copyright IBM Corp. 2018. All Rights Reserved.
#
# This program and the accompanying materials
# are made available under the terms of the Apache V2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/... | pd.Timedelta(microseconds=1) | pandas.Timedelta |
# -*- coding: utf-8 -*-
"""HAR_Opportunity.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1qfhns0ykD6eLkoWICPu6WbdD4r7V-6Uf
# Introduction
This notebook presents the several machine learning models using CNN and LSTM for HAR. To obtain a detail... | pd.DataFrame(y_train) | pandas.DataFrame |
import pandas as pd
import pytest
# Wawa on toy YSDA
@pytest.fixture
def toy_labels_result_zbs():
return pd.Series(
['no', 'yes', 'no', 'yes', 'no'],
index= | pd.Index(['t1', 't2', 't3', 't4', 't5'], name='task') | pandas.Index |
# -*- coding: utf-8 -*-
import sys
import json
import logging
from typing import Tuple, List
from docopt import docopt
from munch import Munch
import pandas as pd
from wetterdienst import (
__version__,
metadata_for_climate_observations,
get_nearby_stations,
)
from wetterdienst.additionals.geo_location im... | pd.concat(data) | pandas.concat |
import pandas as pd
import os
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
root = '/Users/Gabe/Downloads/thesis spreadies'
# sg_1k_1k = pd.read_csv(os.path.join(root,'we_depletions_s... | pd.to_datetime(vcm_150_150['date']) | pandas.to_datetime |
# flask imports
from datetime import datetime
from eve.auth import requires_auth
from eve.render import send_response
from flask import request, abort, Blueprint, g, Response
from flask import current_app as app
# utils imports
import numpy as np
import pandas as pd
from auth.authentication import EVETokenAuth
edin... | pd.notnull(res_parcial[typ]) | pandas.notnull |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 14:02:03 2019
@author: <NAME>
"""
import pandas as pd
from pandas import ExcelWriter
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
def match2Lists(list1,list2):
"""
Loops over a list and returns fuzzy matches found in a second list.... | pd.Series(BestMatch_Unique_DtoR) | pandas.Series |
# coding=utf-8
# Author: <NAME> & <NAME>
# Date: Jan 06, 2021
#
# Description: Parse Epilepsy Foundation Forums and extract dictionary matches
#
import os
import sys
#
#include_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'include'))
include_path = '/nfs/nfs7/home/rionbr/myaura/i... | pd.set_option('display.width', 1000) | pandas.set_option |
"""A module to store some results that are parsed from .txt files."""
import os
from configparser import ConfigParser
from types import SimpleNamespace
import pandas as pd
import numpy as np
from skm_pyutils.py_table import list_to_df
from dictances.bhattacharyya import bhattacharyya
from .main import main as ctrl_... | pd.DataFrame(vals, columns=cols) | pandas.DataFrame |
from aggregate.decennial_census.decennial_census_001020 import decennial_census_001020
from aggregate.aggregation_helpers import order_aggregated_columns
import pandas as pd
from internal_review.set_internal_review_file import set_internal_review_files
from utils.PUMA_helpers import clean_PUMAs, puma_to_borough
dcp_p... | pd.concat([census20, clean_data], axis=1) | pandas.concat |
"""
Contains the machine learning code for Taxonomist
Authors:
<NAME> (1), <NAME> (1), <NAME> (1), <NAME> (2),
<NAME> (2), <NAME> (1), <NAME> (1)
Affiliations:
(1) Department of Electrical and Computer Engineering, Boston University
(2) Sandia National Laboratories
This work has been partially funded ... | pd.DataFrame(data=T, columns=self.classes_, index=X.index) | pandas.DataFrame |
# python3
# coding: utf-8
# import threading
import openpyxl as px
import pandas as pd
from helper_Word import helper_Word
from Docx_to_pdf import Docx_to_PDF
from PDF_Combiner import Combine_PDF
from shutil import copyfile
import os
# from collections import OrderedDict
import time
import json_helper
# import Excel_... | pd.DataFrame({}) | pandas.DataFrame |
import os
import time
import pickle
import numpy as np
import pandas as pd
from scipy import stats
from IPython.display import display
# Base classes
from sklearn.base import ClassifierMixin, TransformerMixin
# Random search & splitting
from sklearn.model_selection import RandomizedSearchCV, train_test_split
# Class... | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # EDA and Modeling Employee Attrition
# In[ ]:
# make sure we have the latest seaborb package
print()
# In[ ]:
# should be version 11
import seaborn as sns
sns.__version__
# In[ ]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O... | pd.read_csv("../../../input/patelprashant_employee-attrition/WA_Fn-UseC_-HR-Employee-Attrition.csv") | pandas.read_csv |
"""
PTC
---
Data handling for turn-by-turn measurement files from the ``PTC`` code, which can be obtained by performing
particle tracking of your machine through the ``MAD-X PTC`` interface. The files are very close in
structure to **TFS** files, with the difference that the data part is split into "segments" relating... | pd.DataFrame(matrices[bunch]["X"]) | pandas.DataFrame |
import logging
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.nn import functional as F
from tqdm import tqdm
from beam_search import beam_decode
logger = logging.getLogger(__name__)
def set_seed(seed):
random.seed(seed)
np.ra... | pd.DataFrame(predicted_dict) | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, ... | concat(records) | pandas.concat |
# -*- coding: utf-8 -*-
"""
This module contains the ReadSets class that is in charge
of reading the sets files, reshaping them to be used in
the build class, creating and reading the parameter files and
checking the errors in the definition of the sets and parameters
"""
import itertools as it
from openpyxl import lo... | pd.Index(self.main_years, name="Years") | pandas.Index |
# Copyright (c) 2016-2018 <NAME> <<EMAIL>>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
"""Class implementing the maelstrom method (Bruse & van Heeringen, 2018)
Examples
--------
run_maelstrom... | pd.read_table(input_table, index_col=0, comment="#") | pandas.read_table |
from datetime import datetime
from unittest import TestCase
from unittest.mock import Mock
from zoneinfo import ZoneInfo
from etl.src.extractor import TimeSeriesExtractor, Clock
import pandas as pd
from pandas.testing import assert_series_equal
def to_milliseconds(ts: datetime) -> int:
return int(ts.timestamp() ... | pd.to_datetime([self.ts_2, self.ts_3], unit="ms") | pandas.to_datetime |
import streamlit as st
import pandas as pd
from pyvis.network import Network
import networkx as nx
import matplotlib.pyplot as plt
import bz2
import pickle
import _pickle as cPickle
import pydot
import math
import numpy as num
def decompress_pickle(file):
data = bz2.BZ2File(file, 'rb')
data = cPickle.load(data)
re... | pd.merge(left = All_df,right = All_Conceptdata.loc[:,['Concept','Raw_Frequency']],how="left",left_on = 'Concept1',right_on='Concept') | pandas.merge |
import pandas as pd
import numpy as np
import matplotlib.pyplot as pl
import os
from scipy import stats
from tqdm import tqdm
import mdtraj as md
########################################################
def get_3drobot_native(data_flag):
root_dir = '/home/hyang/bio/erf/data/decoys/3DRobot_set'
pdb_list = | pd.read_csv(f'{root_dir}/pdb_no_missing_residue.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 21 10:48:15 2020
@author: <NAME>
OK TODO: make it work from the command line
OK TODO get json file
OK TODO get all json variables
OK TODO checks all JSON variables
OK TODO try except error management with setting file
OK TODO add test to see if fil... | pd.read_csv(defFilePath) | pandas.read_csv |
# %% imports
import numpy as np
import pandas as pd
import config as cfg
from src.utils.data_processing import hours_in_year, medea_path
# --------------------------------------------------------------------------- #
# %% settings and initializing
# -------------------------------------------------------------------... | pd.DataFrame(data=0, index=cfg.zones, columns=cfg.zones) | pandas.DataFrame |
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : Data Mining #
# File : \mymain.py #
# Python : 3.9.1 ... | pd.set_option('display.max_columns', None) | pandas.set_option |
import pandas as pd
import pathlib
import yaml
from cooler.util import binnify, read_chromsizes
import numpy as np
import time
from statsmodels.stats.multitest import multipletests
from scipy.stats import norm
import subprocess
import pybedtools
from concurrent.futures import as_completed, ProcessPoolExecutor
import c... | pd.read_hdf(f'{this_study_dir}/{chrom}.DMR.hdf') | pandas.read_hdf |
# Data Worker
# %%
import os
import pandas as pd
import plotly.express as px
from pypinyin import lazy_pinyin
locations_url = 'https://blog.csdn.net/envbox/article/details/80290103'
filename = 'locations.json'
sync_folder = os.environ.get('Sync', '.')
mapbox = dict(
mapbox_accesstoken=open(os.path.joi... | pd.read_html(locations_url) | pandas.read_html |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 24 19:08:39 2019
@author: <NAME> et al. - "Evaluation of machine learning models for
automatic detection of DNA double strand breaks after irradiation using a gH2AX
foci assay", PLOS One, 2020
"""
# main file for training machine learning models using previously... | pd.DataFrame(x_data[removed_im[0]]) | pandas.DataFrame |
import sklearn.ensemble as ek
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
import pickle
import pandas as pd
import numpy as np
import pyprind
impo... | pd.read_csv("data/dataset.csv") | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import scipy.stats as stats
from matplotlib import gridspec
from matplotlib.lines import Line2D
from .util import *
import seaborn as sns
from matplotlib.ticker import FormatStrFormatter
import matplotlib.pylab as pl
import matplotlib.... | pd.DatetimeIndex([date_string_current]) | pandas.DatetimeIndex |
import pandas as pd
import numpy as np
import requests
from fake_useragent import UserAgent
import io
import os
import time
import json
import demjson
from datetime import datetime
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Main Economic Indicators: https://alfred.stlouisfed.org/re... | pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d") | pandas.to_datetime |
#!/usr/bin/python
import sys
from collections import defaultdict
from os import listdir, path
import pandas
import json
PIPELINE_NAME = "neoANT-HILL"
LOG_FILE = "/params.log"
PICARD = "/home/biodocker/picard.jar"
GATK = "/home/biodocker/gatk-4.1.0.0/gatk"
SNPEFF = "/home/biodocker/snpEff/snpEff.jar"
SNPSIFT = "/h... | pandas.read_csv(result_path, sep='\t') | pandas.read_csv |
import argparse
from email.mime import image
import os
from tqdm import tqdm
import pandas as pd
import logging
from src.utils.common import read_yaml, create_directories
from src.stage_01_get_data import main as loader_main
from sklearn.metrics import confusion_matrix, f1_score
import numpy as np
import warnings
impor... | pd.DataFrame({"Actual":target, "Prediction":pred}) | pandas.DataFrame |
from django.core.management.base import BaseCommand, CommandError
from etldjango.settings import GCP_PROJECT_ID, BUCKET_NAME, BUCKET_ROOT
from .utils.storage import Bucket_handler, GetBucketData
from .utils.extractor import Data_Extractor
from datetime import datetime, timedelta
from .utils.unicodenorm import normalize... | pd.DataFrame.from_records(histo) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
from texttable import Texttable
from cape_privacy.pandas import dtypes
from cape_privacy.pandas.transformations import NumericPerturbation
from cape_privacy.pandas.transformations import DatePerturbation
from cape_privacy.pandas.transformations import NumericRounding
from cape_p... | pd.DataFrame(temp) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import talib
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 30)
pd.set_option('precision', 7)
pd.options.display.float_format = '{:,... | pd.datetime.strptime(x, '%Y-%m-%d') | pandas.datetime.strptime |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/11/8 17:48
Desc: 同花顺-板块-行业板块
http://q.10jqka.com.cn/thshy/
"""
import os
from datetime import datetime
import pandas as pd
import requests
from bs4 import BeautifulSoup
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from mssdk.utils import dem... | pd.DataFrame.from_dict(code_name_ths_map, orient="index") | pandas.DataFrame.from_dict |
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Qt5Agg") # 声明使用QT5
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import mpl_finance as mpf
from matplotlib.pylab import date2num
class Figure_Canvas(FigureCan... | pd.concat([date1, start, end, high, low], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.n... | DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0]) | pandas.DataFrame |
import datetime as dt
import gc
import json
import logging
import os
import pickle
from glob import glob
from typing import Dict, List, Optional, Tuple, Union
import h5py
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.gridspec as gs
import matplotlib.pyplot as plt
import numpy as np
impor... | pd.DatetimeIndex(dates) | pandas.DatetimeIndex |
"""Perform classifications using landsat, sentinel-1 or both."""
import os
import rasterio
import rasterio.features
import numpy as np
import pandas as pd
import geopandas as gpd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import ... | pd.Series() | pandas.Series |
# plots.py
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def randomWalk():
"""Creates plot of symmetric one-D random lattice walk"""
N = 1000 #length of random walk
s = np.zero... | pd.DataFrame({'xvals':xvals,'yvals':yvals}) | pandas.DataFrame |
#!/usr/bin/env python
"""
BSD 2-Clause License
Copyright (c) 2021 (<EMAIL>)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, thi... | pd.DataFrame(columns=['basecaller', 'genome', 'match', 'mismatch', 'deletion', 'insertion', 'unaligned', 'identity', 'error', 'mqual', 'relative read length', 'aligned \% of read']) | pandas.DataFrame |
import logging
from collections import defaultdict
import numpy as np
import pandas as pd
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster._kmeans import _mini_batch_convergence
from sklearn.utils.validation import check_random_state
from progressivis import ProgressiveError, SlotDescriptor
from progre... | pd.DataFrame({'labels': labels}, index=locs) | pandas.DataFrame |
import pathlib
import pandas as pd
import pytest
from pytest import approx
import process_improve.batch.features as features
# General
@pytest.fixture(scope="module")
def batch_data():
"""Returns a small example of a batch data set."""
folder = (
pathlib.Path(__file__).parents[2] / "process_improve... | pd.to_datetime(df["DateTime"]) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas
import decimal
class Response(object):
def __init__(self, data=None):
self.__data__ = data
def data(self):
return | pandas.DataFrame(self.__data__) | pandas.DataFrame |
from pymongo import MongoClient
import pandas as pd
pd.set_option("display.max_rows",None,"display.max_columns",None)
pd.options.mode.chained_assignment = None
import datetime
from datetime import datetime
server = MongoClient('mongodb://localhost:27017')
db=server['salesfokuz_lead']
leadsactivity = db['lead_log']
dadb... | pd.to_datetime(leaddf['punch_out']) | pandas.to_datetime |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed ... | pd.concat(all_df, ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
from typing import Mapping, List, Tuple
from collections import defaultdict, OrderedDict
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble impo... | pd.DataFrame(diabetes.data, columns=diabetes.feature_names) | pandas.DataFrame |
import sys, os, time, datetime, warnings, configparser
import pandas as pd
import numpy as np
import talib
import concurrent.futures
import matplotlib.pyplot as plt
from tqdm import tqdm
cur_path = os.path.dirname(os.path.abspath(__file__))
for _ in range(2):
root_path = cur_path[0:cur_path.rfind('/', 0, len(cur_p... | pd.to_datetime(df.index) | pandas.to_datetime |
from pathlib import Path
import logging
import numpy as np
import pandas as pd
from pytest import approx, mark
from lenskit.algorithms.user_knn import UserUser
from lenskit.algorithms.item_knn import ItemItem
from lenskit.algorithms.basic import PopScore
from lenskit.algorithms.ranking import PlackettLuce
from lenski... | pd.DataFrame({'user': 1, 'item': [2]}) | pandas.DataFrame |
import sys
import glob
import pandas as pd
from flask import Flask
from flask import jsonify
# list of dataframes
dfs = []
# Read the CSV files
for f in glob.glob("Firewall*.csv"):
print("Reading file: [%s]" % f)
local_df = pd.read_csv(f, low_memory=False)
dfs.append(local_df)
full_df = | pd.concat(dfs) | pandas.concat |
import gc as _gc
import pandas as _pd
import numpy as _np
from . import databases as _databases
from . import profiles as _profiles
class Columns(_databases.Columns):
"""
Container for the columns names defined in this module.
"""
SPLIT_SUF = '_SPLIT'
REF = 'REF'
QRY = 'QRY'
REF_SPLIT = '{}... | _pd.read_csv(filename, **kwargs) | pandas.read_csv |
import _io
import random
import numpy as np
import pandas as pd
import networkx as nx
from pandas.core.indexing import IndexingError
from recommenders.lod_reordering import LODPersonalizedReordering
import evaluation_utils as eval
class PathReordering(LODPersonalizedReordering):
def __init__(self, train_file: st... | pd.Series([], dtype=int) | pandas.Series |
import copy
import time
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, IterableDataset
class Column(object):
"""A column. Data is write-once, immutable-after.
Typical usage:
col = Column('myCol').Fill(data).SetDistribution(domain_vals)
"data" and "doma... | pd.Categorical(data, categories=dvs) | pandas.Categorical |
from flask import Flask, redirect, request, url_for,render_template
from application import app, db
from application.models import Products,Orders,Customers #,SummaryOrder,OrdersSummary,ItemTable,OrdersTable,,CustomersTable
import sqlalchemy as sql
import pandas as pd
from datetime import datetime
@app.route('/')
def ... | pd.read_sql_table('products', sql_engine) | pandas.read_sql_table |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pa... | Series([False, True, False], index=[1, 2, 3]) | pandas.Series |
import numpy as np
import pandas as pd
def fetch_students():
''' Fetches the two dataset csv files and merges it '''
student_mat = pd.read_csv("dataset/student-mat.csv")
student_por = pd.read_csv("dataset/student-por.csv")
students = pd.concat([student_mat, student_por])
return students
def crea... | pd.Series(data=0, index=students_dataframe.index) | pandas.Series |
"""
This module will include the guessing advantage implementation.
"""
from math import log, exp, sqrt, inf
from statistics import median
import time
from enum import Enum
from statsmodels.distributions.empirical_distribution import ECDF
import multiprocessing as mp
# import swifter
import numpy as np
import pandas as... | pd.concat(cdf) | pandas.concat |
from wf_core_data_dashboard import core
import wf_core_data
import mefs_utils
import pandas as pd
import inflection
import urllib.parse
import os
def generate_mefs_table_data(
test_events_path,
student_info_path,
student_assignments_path
):
test_events = pd.read_pickle(test_events_path)
student_in... | pd.isna(x) | pandas.isna |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
impo... | tm.assert_index_equal(result, expected) | pandas._testing.assert_index_equal |
# -*- coding: utf-8 -*-
"""Generator capacity factor plots .
This module contain methods that are related to the capacity factor
of generators and average output plots
"""
import logging
import numpy as np
import pandas as pd
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_data_h... | pd.DataFrame() | pandas.DataFrame |
from define_collection_wave import folder
from helpers import create_folder, headers
import requests
from datetime import date
import json
import pandas as pd
path_greggs = create_folder('4_Greggs',folder)
request_url = 'https://production-digital.greggs.co.uk/api/v1.0/articles/masters?ExcludeUnpublished=true&Exclude... | pd.DataFrame(greggs) | pandas.DataFrame |
"""
Main interface module to use pyEPR.
Contains code to conenct to ansys and to analyze hfss files using the EPR method.
Further contains code to be able to do autogenerated reports, analysis, and such.
Copyright <NAME>, <NAME>, and the pyEPR tea
2015, 2016, 2017, 2018, 2019, 2020
"""
from __future__ import print_... | pd.DataFrame(self.junctions) | pandas.DataFrame |
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
... | Period(freq='D', year=2007, month=1, day=4) | pandas.Period |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import is_scalar
from pandas.compat import to_str, string_types, numpy as numpy_compat, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import ... | pandas.Series(other) | pandas.Series |
'''
Expression.py - wrap various differential expression tools
===========================================================
:Tags: Python
Purpose
-------
This module provides tools for differential expression analysis
for a variety of methods.
Methods implemented are:
DESeq
EdgeR
ttest
The aim of this mod... | pandas.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
# Filter csv data based on application args
def filter(filter_args):
# Read in csv data
df = pd.read_csv('soilgenerate/data/12072016_plants_sheff.csv', encoding="utf-8")
## BEGIN Default Filters
# Filter nan
is_not_nan = pd.notnull(df['Growth Rate'])
df = df[is_not_nan]
... | pd.notnull(df['Planting Density per Acre, Maximum']) | pandas.notnull |
import os, gzip, logging, re
import pandas as pd
from typing import List, Tuple
from common.base_parser import BaseParser
from common.constants import *
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s',
handlers=[logging.StreamHandler()])
# protein name types:
REC_FULLNAM... | pd.concat([df, df_m]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# Author:
# <NAME>
# Emotional Sentiment on Twitter
# A coronavirus vaccine online firestorm
# In this python script you will find examples of some of the most common
# NLP (Natural Language Processing) techniques used to uncover patterns of
# sentiment and emotion on social m... | pd.concat([df_tweets, df_emotions], axis=1) | pandas.concat |
import pandas as pd
from . import processing
def plottable_sums(reference_df, behaviour, identifier_column="Animal_id", periods={}, period_label="period", metadata_columns={"TreatmentProtocol_code":"Treatment"}):
identifiers = list(set(reference_df[identifier_column]))
evaluation_df = pd.DataFrame({})
for identifie... | pd.concat([preferences_df, preferences_df_slice]) | pandas.concat |
import os
import pandas as pd
import numpy as np
import math
from datetime import datetime
import csv
from helpers import make_directory
# If date is specified, calculate ranking up until that date
def get_rankings(from_file, to_file, date=None, include_prediction=False, predicted_date_so_far=None, ranking_summary_fil... | pd.read_csv(ranking_summary_file) | pandas.read_csv |
def scatter_plot(Matrix,identifier_dataframe,cmap_categ,cmap_multiplier,title,size,screen_labels):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
"""
This function goal is to allow data visualization of 2D or 3D matrices of
data... | pd.DataFrame(identifier_dataframe) | pandas.DataFrame |
from tweepy import OAuthHandler
from tweepy import API
from tweepy import Stream
from tweepy.streaming import StreamListener
import json
import time
import sys
import pandas as pd
import numpy as np
import twitter_dataprep as dataprep
import twitter_cache as tc
class SListener(StreamListener):
def __init__(sel... | pd.concat([trump_wc,trump_wcloud]) | pandas.concat |
# The MIT License (MIT)
# Copyright (c) 2018 Massachusetts Institute of Technology
#
# Author: <NAME>
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this softwa... | pd.to_datetime(data.index) | pandas.to_datetime |
# coding: utf-8
import os
import pandas as pd
from tqdm import tqdm
from czsc.objects import RawBar, Freq
from czsc.utils.bar_generator import BarGenerator, freq_end_time
from test.test_analyze import read_1min
cur_path = os.path.split(os.path.realpath(__file__))[0]
kline = read_1min()
def test_freq_end_time():
... | pd.to_datetime("2021-11-11 09:43") | pandas.to_datetime |
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import numpy as np
import random
import pandas as pd
from sklearn.metrics import confusion_matrix, f1_score, roc_curve, auc, accuracy_score
import joblib
from collections import Counter
from sklearn.model_selection i... | pd.read_csv(pdblist, delim_whitespace=True) | pandas.read_csv |
"""
Import as:
import core.test.test_statistics as cttsta
"""
import logging
from typing import List
import numpy as np
import pandas as pd
import pytest
import core.artificial_signal_generators as casgen
import core.finance as cfinan
import core.signal_processing as csproc
import core.statistics as cstati
import h... | pd.Series([]) | pandas.Series |
import datetime
import os
import urllib
from http.client import IncompleteRead
from urllib.request import Request
import bs4 as bs
import pandas as pd
from django.conf import settings
from core.models import CourseModel, UpdateModel
# FILE PATHS
my_path = os.path.abspath(os.path.dirname(__file__))
stopwords_path = ... | pd.to_datetime(dataset_diplomas["start_date"]) | pandas.to_datetime |
"""
Functions for categoricals
"""
from itertools import chain, product
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from pandas.core.algorithms import value_counts
from .utils import last2
__all__ = [
'cat_anon',
'cat_collapse',
'cat_concat',
'cat_drop',
'cat_expand'... | pd.unique(all_cats + categories) | pandas.unique |
# %%
import rasterio
import pandas as pds
import numpy as np
import numpy.ma as ma
from sklearn.pipeline import Pipeline
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import seaborn
# %%
HI_RES = '30s'
LOW_RES... | pds.DataFrame(scaled, columns=vlab) | pandas.DataFrame |
# install imblearn package to a specific anaconda enviroment boston_house_price
# $ conda install -n boston_house_price -c conda-forge imbalanced-learn
# update imblearn package to a specific anaconda enviroment boston_house_price
# $ conda update -n boston_house_price -c glemaitre imbalanced-learn
# =================... | pd.Series(y_res) | pandas.Series |
# -*- coding: utf-8 -*-
"""
*This script contains a post-processing script for plotting times recorded by the main_constellation.py application*
Placeholder
"""
import numpy as np
import pandas as pd
from datetime import timedelta, datetime, timezone
import matplotlib.pyplot as plt
import matplotlib.patches as mpatch... | pd.DataFrame(constellation.detect) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (... | tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# Functionality to read and store in HDF5 files
import h5py
import pandas as pd
import random
import string
import os
import datetime
import json
from data_science.data_transfer.data_api import Dataset
class HDF5Dataset:
def __init__(self, file_name, file_path, dataset_id,
random_string_in_name... | pd.read_hdf(self.file_w_path, 'data/' + self._dataset_id, 'r') | pandas.read_hdf |
# -*- coding: utf-8 -*-
"""
Created on Mon May 07 17:34:56 2018
@author: gerar
"""
import os
import pandas as pd
import numpy as np
from scipy.stats.stats import pearsonr
#%%
def rmse(predictions, targets):
return np.sqrt(((predictions - targets) ** 2).mean())
#%%
def mae(predictions,targets):
return np.abs... | pd.read_table(_file,usecols=[0,2,4]) | pandas.read_table |
import logging
import traceback
import pandas as pd
import numpy as np
import seaborn as sns
from collections import defaultdict
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
import matplotlib.ticker as ticker
from matplotlib import pyplot as plt
import matplotlib.patches as mpatche... | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
"""Re-generate exps.csv from individual experiments
"""
import argparse
import logging
from os.path import join as pjoin
from logging import debug, info
import pandas as pd
import os
def create_exps_from_folders(expsdir, dffolderspath):
files = sorted(os.listdir(expsdir))
df = pd.DataF... | pd.concat([df, dfaux], axis=0, sort=False) | pandas.concat |
import librosa
import numpy as np
import pandas as pd
from os import listdir
from os.path import isfile, join
from audioread import NoBackendError
def extract_features(path, label, emotionId, startid):
"""
提取path目录下的音频文件的特征,使用librosa库
:param path: 文件路径
:param label: 情绪类型
:param startid: 开始的序列号
... | pd.Series() | pandas.Series |
"""Daylight hours from http://www.sunrisesunset.com """
import re
import datetime
import requests
from six.moves import xrange
from os.path import join, abspath, dirname
import pandas as pd
url = "http://sunrisesunset.com/calendar.asp"
r0 = re.compile("<[^>]+>| |[\r\n\t]")
r1 = re.compile(r"(\d+)(DST Begins|D... | pd.read_csv(path, parse_dates=["Date", "Sunrise", "Sunset"]) | pandas.read_csv |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import Timestamp
def create_dataframe(tuple_data):
"""Create pandas df from tuple data with a header."""
return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])
### REUSABLE FIXTURES --------------------... | Timestamp('2013-12-01 00:00:00') | pandas.Timestamp |
'''
Created on Apr 23, 2018
@author: nishant.sethi
'''
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
'''load... | pd.read_csv('deliveries.csv') | pandas.read_csv |
'''
@Author: <NAME>
@Date: 2019-07-03 16:18:27
@LastEditors: Yudi
@LastEditTime: 2019-07-19 15:40:23
@Company: Cardinal Operation
@Email: <EMAIL>
@Description:
'''
import pickle
import gzip
import os
import gc
import time
import random
from itertools import chain
import numpy as np
import pandas as pd
import scipy.spa... | pd.read_csv(p, names=['user', 'item', 'rating', 'timestamp']) | pandas.read_csv |
import matplotlib.pylab as pylab
import math as m
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pdb
from pylab import rcParams
import matplotlib as mpl
mpl.use('AGG')
font = {'size': 40}
rcParams['figure.figsize'] = 10, 8
mpl.style.use('seaborn-paper')
rcParams['figure.figsize'] = 10, ... | pd.read_csv(fname, index_col=0) | pandas.read_csv |
"""Network representation and utilities
<NAME>, <NAME> & <NAME>
"""
import os,sys
import re
import numpy as np
import pandas as pd
import geopandas as gpd
import pygeos
import pygeos.geometry as pygeom
import contextily as ctx
from rasterstats import zonal_stats
import pyproj
import pylab as pl
from IPython import dis... | pd.DataFrame.from_dict(collect_start_paths) | pandas.DataFrame.from_dict |
import pydoc
import pandas as pd
import os
import random
def read_excel():
df = pd.read_excel('/Users/ls/Downloads/babycare11-1.xlsx')
data = df.head(2)
print(str(data))
# print(df.head(2))
def merge_excel():
dfs = []
dir = '/Users/ls/babycare/'
des = '/Users/ls/babycare/babycare-stats-... | pd.read_excel(period_seven_from) | pandas.read_excel |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy import stats
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import mean_sq... | pd.read_csv(path + 'bases_william/anos_finais/dados2013_fim.csv') | pandas.read_csv |
import pandas as pd
df1 = | pd.read_csv('data//alexander_algoaddition_adddate.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Parsing of a csv game tracking sheet of type 'X', saving data in consice and relevant manner."""
# Here comes your imports
import sys
import logging as log
import pandas as pd
# Here comes your (few) global variables
# Here comes your class definitions
# Here comes... | pd.DataFrame('', index=players_goalies.index, columns=col) | pandas.DataFrame |
import sys,os
#os.chdir("/Users/utkarshvirendranigam/Desktop/Homework/Project")
# required_packages=["PyQt5","re", "scipy","itertools","random","matplotlib","pandas","numpy","sklearn","pydotplus","collections","warnings","seaborn"]
#print(os.getcwd())
# for my_package in required_packages:
# try:
# command... | pd.concat([self.list_corr_features, df[features_list[23]]],axis=1) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.