code stringlengths 2.5k 6.36M | kind stringclasses 2
values | parsed_code stringlengths 0 404k | quality_prob float64 0 0.98 | learning_prob float64 0.03 1 |
|---|---|---|---|---|
출처: https://blog.breezymind.com/2018/03/02/sklearn-feature_extraction-text-2/
```
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None
np.random.seed(0)
from konlpy.tag import Mecab
mecab = Mecab()
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.... | github_jupyter | import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None
np.random.seed(0)
from konlpy.tag import Mecab
mecab = Mecab()
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import linear_kernel, cosine_similarity
# tokenizer : 문장에서 색인어 추출... | 0.29523 | 0.809088 |
```
import numpy as np
import pandas as pd
import wisps
import wisps.simulations as wispsim
import matplotlib.pyplot as plt
from astropy.io import fits, ascii
from astropy.table import Table
%matplotlib inline
bigf= wisps.get_big_file()
bigf=bigf[bigf.snr1>=3]
#3dhst data
from astropy.io import ascii
hst3d= ascii.read... | github_jupyter | import numpy as np
import pandas as pd
import wisps
import wisps.simulations as wispsim
import matplotlib.pyplot as plt
from astropy.io import fits, ascii
from astropy.table import Table
%matplotlib inline
bigf= wisps.get_big_file()
bigf=bigf[bigf.snr1>=3]
#3dhst data
from astropy.io import ascii
hst3d= ascii.read('/u... | 0.36727 | 0.549641 |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import io
```
# Import data from json file to dataframe
##### 1. load json files and convert to three dataframe
```
business_json_file = 'business.json'
user_json_file = 'user.json'
review_json_file = 'review.json'
business = []
u... | github_jupyter | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import io
business_json_file = 'business.json'
user_json_file = 'user.json'
review_json_file = 'review.json'
business = []
user = []
review = []
for line in open(business_json_file, 'r'):
business.append(json.loads(line))
for line i... | 0.212314 | 0.755186 |
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All).
Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as we... | github_jupyter | NAME = ""
COLLABORATORS = ""
from IPython.display import Image
Image('./Media/res-param-1.png',width='700')
from IPython.display import Image
Image('./Media/res-param-2.png',width='700')
from IPython.display import Image
Image('./Media/centroid-res-param.png',width='700') | 0.151467 | 0.903465 |
# Development of Deep Learning Guided Genetic Algorithm for Material Design Optimization
Kuanlin Chen, PhD student of the schulman lab<br>
Advisor: Rebecca Schulman, PhD<br>
Johns Hopkins University
**Keywords: Machine Learning, Deep Learning, Computer Vision, Numeric Simulation, Multi-Objective Optimization**
***
#... | github_jupyter | # Package Importing
import csv, math, os, time, copy, matplotlib, datetime, keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten
from keras.layers.convolutiona... | 0.512693 | 0.987993 |
# Visualizing Logistic Regression
```
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('data/', one_hot=True)
trainimg = mnist.train.images
trainlabel = mnist.train.labels
testimg = mnist.te... | github_jupyter | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('data/', one_hot=True)
trainimg = mnist.train.images
trainlabel = mnist.train.labels
testimg = mnist.test.images
testlabel = mnist.test.label... | 0.676086 | 0.913252 |
### Closed-loop control of a deformable mirror (DM)
#### using SVD pseudo-inversion of DM influence matrix
#### and low-pass filtering of the eigenvalues for improved convergence stability
Hardware used:
* Thorlabs WFS-150 Shack-Hartmann sensor
* Mirao52e deformable mirror
This code uses Thorlabs 64-bit WFS driver i... | github_jupyter | import ctypes as ct
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
import sys
sys.path.append('./lib')
from Mirao52_utils import *
#define home dir of the code:
homeDir = 'C:/Users/Nikita/Documents/GitHub/AO-toolkit/'
#load the WFS DLL:
WFS = ct.windll.WFS_64
#Load the Mirao52e DLL:
DM = ct.win... | 0.286269 | 0.795539 |
# Statistical Relational Learning with `pslpython`
As we've seen there are several ways to work with graph-based data, including: SPARQL queries, graph algorithms traversals, ML embedding, etc.
Each of these methods makes trade-offs in terms of:
* computational costs as the graph size scales
* robustness when th... | github_jupyter | import kglab
namespaces = {
"acq": "http://example.org/stuff/",
"foaf": "http://xmlns.com/foaf/0.1/",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
}
kg = kglab.KnowledgeGraph(
name = "LINQS simple acquaintance example for PSL",
base_uri = "http://example.org/stuff/",
language = "en",
... | 0.304765 | 0.988885 |
```
import os
import sys
import time
import numpy as np
import pandas as pd
from scipy import misc
import matplotlib.pyplot as plt
from scipy import sparse
from scipy.sparse import csgraph
from scipy import linalg
from pysheds.grid import Grid
from scipy import ndimage
from matplotlib import colors
import seaborn as sn... | github_jupyter | import os
import sys
import time
import numpy as np
import pandas as pd
from scipy import misc
import matplotlib.pyplot as plt
from scipy import sparse
from scipy.sparse import csgraph
from scipy import linalg
from pysheds.grid import Grid
from scipy import ndimage
from matplotlib import colors
import seaborn as sns
i... | 0.268174 | 0.628892 |
# Final Project Submission
* Student name: `Reno Vieira Neto`
* Student pace: `self paced`
* Scheduled project review date/time: `Fri Oct 15, 2021 3pm – 3:45pm (PDT)`
* Instructor name: `James Irving`
* Blog post URL: https://renoneto.github.io/using_streamlit
#### This project originated the [following app](https://... | github_jupyter | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import re
import time
from surprise import Reader, Dataset, dump
from surprise.model_selection import cross_validate, GridSearchCV
from surprise.prediction_algorithms import KNNBasic, KNNBaseline, SVD, SVDpp
from surprise.accur... | 0.662906 | 0.885829 |
# eICU Collaborative Research Database
# Notebook 5: Prediction
This notebook explores how a decision trees can be trained to predict in-hospital mortality of patients.
## Load libraries and connect to the database
```
# Import libraries
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as p... | github_jupyter | # Import libraries
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# model building
from sklearn import ensemble, impute, metrics, preprocessing, tree
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.pipeline import Pipeline
# Make pandas dataframes p... | 0.553747 | 0.9659 |
```
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import project_functions as pf
df = pf.load_and_process('../../data/raw/games-features.csv')
```
# Dataset Explaination
Our dataset features data from the Steam games store. It showcases the games that are purchasable from... | github_jupyter | import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import project_functions as pf
df = pf.load_and_process('../../data/raw/games-features.csv')
df.head()
pf.plotOwners(df)
pf.Genrecount(df)
pf.plotRevenue(df)
indie = pf.genreratingplot(df,"GenreIsIndie")
action = pf.genrer... | 0.319758 | 0.960324 |
# Course 2 week 1 lecture notebook Ex 02
# Risk Scores, Pandas and Numpy
Here, you'll get a chance to see the risk scores implemented as Python functions.
- Atrial fibrillation: Chads-vasc score
- Liver disease: MELD score
- Heart disease: ASCVD score
Compute the chads-vasc risk score for atrial fibrillation.
- Loo... | github_jupyter | # Complete the function that calculates the chads-vasc score.
# Look for the # TODO comments to see which sections you should fill in.
def chads_vasc_score(input_c, input_h, input_a2, input_d, input_s2, input_v, input_a, input_sc):
# congestive heart failure
coef_c = 1
# Coefficient for hypertension... | 0.344113 | 0.935759 |
```
#all_slow
#export
from fastai.basics import *
#hide
from nbdev.showdoc import *
#default_exp callback.tensorboard
```
# Tensorboard
> Integration with [tensorboard](https://www.tensorflow.org/tensorboard)
First thing first, you need to install tensorboard with
```
pip install tensorboard
```
Then launch tensorbo... | github_jupyter | #all_slow
#export
from fastai.basics import *
#hide
from nbdev.showdoc import *
#default_exp callback.tensorboard
pip install tensorboard
in your terminal. You can change the logdir as long as it matches the `log_dir` you pass to `TensorBoardCallback` (default is `runs` in the working directory).
## Tensorboard Embe... | 0.718496 | 0.86511 |
I'll be answering the following questions along the way:
1. Is there any correlation between the variables?
2. What is the genre distribution?
3. What is the user rating distribution?
4. What is the user rating distribution by genre?
5. What is the price distribution by genre over the years?
6. What is the rate d... | github_jupyter | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import plotly.express as px
# text data
import string
import re
df = pd.read_csv('AmazonBooks.csv')
df = pd.read_csv('AmazonBooks.csv')
df.info()
df.head()
# Check for correlations
pd.get_dummies(df[['Year'... | 0.393152 | 0.889864 |
# Home Credit Default Risk Competition
Consider this collection of notebooks as a case-study intended for those who are beginners in Machine Learning. We have tried to expand upon the code with our comments available in some of the notebooks on Kaggle.
# Data
The data as provided by [Home Credit](http://www.homecr... | github_jupyter | ############# | 0.099733 | 0.990348 |
# LSTM Example with Scalecast
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scalecast.Forecaster import Forecaster
sns.set(rc={'figure.figsize':(15,8)})
```
## Data preprocessing
```
data = pd.read_csv('AirPassengers.csv',parse_dates=['Month'])
data.head()
dat... | github_jupyter | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scalecast.Forecaster import Forecaster
sns.set(rc={'figure.figsize':(15,8)})
data = pd.read_csv('AirPassengers.csv',parse_dates=['Month'])
data.head()
data.shape
data['Month'].min()
data['Month'].max()
f = Forecaster(y=... | 0.400632 | 0.835249 |
# Time Series with Pandas Project Exercise
For this exercise, answer the questions below given the dataset: https://fred.stlouisfed.org/series/UMTMVS
This dataset is the Value of Manufacturers' Shipments for All Manufacturing Industries.
**Import any necessary libraries.**
```
# CODE HERE
import numpy as np
import ... | github_jupyter | # CODE HERE
import numpy as np
import pandas as pd
%matplotlib inline
# CODE HERE
df = pd.read_csv('../Data/UMTMVS.csv')
# CODE HERE
df.head()
# CODE HERE
df = df.set_index('DATE')
df.head()
# CODE HERE
df.index
# CODE HERE
df.index = pd.to_datetime(df.index)
df.index
# CODE HERE
df.plot(figsize=(14,8))
#CODE HE... | 0.172416 | 0.979056 |
<a href="https://colab.research.google.com/github/Victoooooor/SimpleJobs/blob/main/movenet.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title
!pip install -q imageio
!pip install -q opencv-python
!pip install -q git+https://github.com/tenso... | github_jupyter | #@title
!pip install -q imageio
!pip install -q opencv-python
!pip install -q git+https://github.com/tensorflow/docs
#@title
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow_docs.vis import embed
import numpy as np
import cv2
import os
# Import matplotlib libraries
from matplotlib import pyplot as p... | 0.760651 | 0.820001 |
```
import numpy as np
import myUtil as mu
import matplotlib.pyplot as plt
#Generate 25 laws filters
K=np.array([[1,4,6,4,1],[-1,-2,0,2,1],[-1,0,2,0,-1],[-1,2,0,-2,1],[1,-4,6,-4,1]])
N=len(K)
laws_filters=np.zeros((N*N,N,N))
for i in range(N):
for j in range(N):
laws_filters[i*N+j]=np.matmul(K[i][:,np.newax... | github_jupyter | import numpy as np
import myUtil as mu
import matplotlib.pyplot as plt
#Generate 25 laws filters
K=np.array([[1,4,6,4,1],[-1,-2,0,2,1],[-1,0,2,0,-1],[-1,2,0,-2,1],[1,-4,6,-4,1]])
N=len(K)
laws_filters=np.zeros((N*N,N,N))
for i in range(N):
for j in range(N):
laws_filters[i*N+j]=np.matmul(K[i][:,np.newaxis],... | 0.211498 | 0.505615 |
```
# Initialize Otter
import otter
grader = otter.Notebook("lab07.ipynb")
```
# Lab 7: Crime and Penalty
Welcome to Lab 7!
```
# Run this cell to set up the notebook, but please don't change it.
# These lines import the Numpy and Datascience modules.
import numpy as np
from datascience import *
# These lines do s... | github_jupyter | # Initialize Otter
import otter
grader = otter.Notebook("lab07.ipynb")
# Run this cell to set up the notebook, but please don't change it.
# These lines import the Numpy and Datascience modules.
import numpy as np
from datascience import *
# These lines do some fancy plotting magic.
import matplotlib
%matplotlib inl... | 0.534612 | 0.973695 |
# Getting started with Captum Insights: a simple model on CIFAR10 dataset
Demonstrates how to use Captum Insights embedded in a notebook to debug a CIFAR model and test samples. This is a slight modification of the CIFAR_TorchVision_Interpret notebook.
More details about the model can be found here: https://pytorch.o... | github_jupyter | import os
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from captum.insights import AttributionVisualizer, Batch
from captum.insights.features import ImageFeature
def get_classes():
classes = [
"Plane",
"Car",
"Bird",
"Cat",
... | 0.905044 | 0.978935 |
```
import pickle
import numpy as np
import random
from tqdm import tqdm
import os
import os.path
from clear_texts import *
import tensorflow as tf
def loggin(log_str):
print(log_str)
print(tf.VERSION)
#functions for generating traning sequenses, decodinn, encoding, text generation
text = ''.join(get_textes... | github_jupyter | import pickle
import numpy as np
import random
from tqdm import tqdm
import os
import os.path
from clear_texts import *
import tensorflow as tf
def loggin(log_str):
print(log_str)
print(tf.VERSION)
#functions for generating traning sequenses, decodinn, encoding, text generation
text = ''.join(get_textes())
... | 0.583559 | 0.195517 |
# Measurement of an Acoustic Impulse Response
*This Jupyter notebook is part of a [collection of notebooks](../index.ipynb) in the masters module Selected Topics in Audio Signal Processing, Communications Engineering, Universität Rostock. Please direct questions and suggestions to [Sascha.Spors@uni-rostock.de](mailto:... | github_jupyter | %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sig
import sounddevice as sd
fs = 44100 # sampling rate
T = 5 # length of the measurement signal in sec
Tr = 2 # length of the expected system response in sec
t = np.linspace(0, T, T*fs)
x = sig.chirp(t, 20, T, 20000, 'li... | 0.568176 | 0.988646 |
# Loading Image Data
So far we've been working with fairly artificial datasets that you wouldn't typically be using in real projects. Instead, you'll likely be dealing with full-sized images like you'd get from smart phone cameras. In this notebook, we'll look at how to load images and use them to train neural network... | github_jupyter | %matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torchvision import datasets, transforms
import helper
dataset = datasets.ImageFolder('path/to/data', transform=transform)
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat... | 0.829561 | 0.991161 |
```
import course;course.header()
```
# The csv module
There are several ways to interact with files that contain data in a "comma separated value" format.
We cover the [basic csv module](https://docs.python.org/3/library/csv.html), as it is sometimes really helpful to retain only a fraction of the information of a... | github_jupyter | import course;course.header()
import csv
with open("../data/amino_acid_properties.csv") as aap:
aap_reader = csv.DictReader(aap, delimiter=",")
for line_dict in aap_reader:
print(line_dict)
break
import pprint
pprint.pprint(line_dict)
with open("../data/test.csv", "w") as output:
aap_wr... | 0.250271 | 0.852752 |
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# The Spinning Effective One-Body Hamiltonian
## Author: T... | github_jupyter | %%writefile SEOBNR/Hamiltonian-Hreal_on_top.txt
Hreal = sp.sqrt(1 + 2*eta*(Heff - 1))
%%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt
Heff = Hs + Hns - Hd + dSS*eta*u*u*u*u*(S1x*S1x + S1y*S1y + S1z*S1z + S2x*S2x + S2y*S2y + S2z*S2z)
%%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt
Hs = Hso + Hss
%%writefile -... | 0.112028 | 0.923454 |
# Background Scan Operation
How to run update operations on a namespace in background.
This notebook requires Aerospike datbase running locally and that Java kernel has been installed. Visit [Aerospike notebooks repo](https://github.com/aerospike-examples/interactive-notebooks) for additional details and the docker co... | github_jupyter | import io.github.spencerpark.ijava.IJava;
import io.github.spencerpark.jupyter.kernel.magic.common.Shell;
IJava.getKernelInstance().getMagics().registerMagics(Shell.class);
%sh asd
%%loadFromPOM
<dependencies>
<dependency>
<groupId>com.aerospike</groupId>
<artifactId>aerospike-client</artifactId>
<versio... | 0.205456 | 0.791499 |
```
import numpy as np
from matplotlib import pyplot as plt
import copy
#This corresponds to pic in book
arr = [[[-1,1],[-1,1],[1,-1],[-1,1]],
[[-1,-1],[-1,-1],[-1,1],[1,-1]],
[[-1,1],[-1,1],[1,1],[-1,1]],
[[-1,1],[-1,1],[1,1],[-1,1]]]
arr = np.array(arr)
def initialise_state(N): #N is the grid di... | github_jupyter | import numpy as np
from matplotlib import pyplot as plt
import copy
#This corresponds to pic in book
arr = [[[-1,1],[-1,1],[1,-1],[-1,1]],
[[-1,-1],[-1,-1],[-1,1],[1,-1]],
[[-1,1],[-1,1],[1,1],[-1,1]],
[[-1,1],[-1,1],[1,1],[-1,1]]]
arr = np.array(arr)
def initialise_state(N): #N is the grid dimens... | 0.184988 | 0.452294 |
# Temporal-Difference Methods
In this notebook, you will write your own implementations of many Temporal-Difference (TD) methods.
While we have provided some starter code, you are welcome to erase these hints and write your code from scratch.
---
### Part 0: Explore CliffWalkingEnv
We begin by importing the necess... | github_jupyter | import sys
import gym
import numpy as np
import random
import math
from collections import defaultdict, deque
import matplotlib.pyplot as plt
%matplotlib inline
import check_test
from plot_utils import plot_values
env = gym.make('CliffWalking-v0')
[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, ... | 0.522446 | 0.960878 |
# Spatial discretisation
So far, we've seen time derivatives and ordinary differential equations of the form
$$
\dot{u} = f(t, u).
$$
Most problems one encounters in the real world have spatial as well as time derivatives. Our first example is the [*Poisson equation*](https://en.wikipedia.org/wiki/Poisson%27s_equati... | github_jupyter | %matplotlib notebook
import numpy
from matplotlib import pyplot
import matplotlib.lines as mlines
pyplot.style.use('ggplot')
n = 200
h = 2/(n-1)
x = numpy.linspace(1,2.5,n)
pyplot.plot(x, numpy.sin(x));
def newline(p1, p2, **kwargs):
ax = pyplot.gca()
xmin, xmax = ax.get_xbound()
if(p2[0] == p1[0]):
... | 0.560974 | 0.986891 |
```
import requests
from bs4 import BeautifulSoup
import pandas as pd
```
### Get the URL of the website with election results
- <i>Here we are looking at <b>UNOFFICIAL</b> results collected by this site </i>
```
url = "https://www.tibetsun.com/elections/sikyong-2016-final-round-results#election-results"
req = reque... | github_jupyter | import requests
from bs4 import BeautifulSoup
import pandas as pd
url = "https://www.tibetsun.com/elections/sikyong-2016-final-round-results#election-results"
req = requests.get(url)
data = req.text
soup = BeautifulSoup(data)
#Overall Results
location_total_vote_list = []
ls_vote_count_list = []
pt_vote_count_lis... | 0.130923 | 0.734 |
<!--BOOK_INFORMATION-->
<img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png">
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/Pyth... | github_jupyter | %qtconsole --style solarized-dark
import numpy as np
import pandas as pd
data = pd.Series([0.25, 0.5, 0.75, 1.0])
data
data.values
data.index
data[1]
data[1:3]
data = pd.Series([0.25, 0.5, 0.75, 1.0],
index=['a', 'b', 'c', 'd'])
data
data['b']
data = pd.Series([0.25, 0.5, 0.75, 1.0],
... | 0.239172 | 0.993056 |
# Le Bloc Note pour gérer vos dépots GitHub
> Cet exercice a pour objectif de vous accompagner dans la création d'un compte [GitHub](https://github.com/) et pour sa gestion en ligne de commande depuis votre navigateur via un interpréteur interactif **jupyter** en mode **Notebook** fonctionnant, par exemple, sur le serv... | github_jupyter | git config --global user.name "votrePseudoGitHub"
git config --global user.name
git config --global user.email "prenom.nom@eleves.ecmorlaix.fr"
git config --global user.email
git config --list
mkdir ~/pNomRepo
cd ~/pNomRepo
git init
ls -a
git remote add origin https://github.com/votrePseudoGitHub/pNomRepo.git
... | 0.243732 | 0.826011 |
```
import json
from collections import Counter
import operator
import numpy as np
pl_title = json.load(open('../MODEL_1_PL_NAME_NEW/PID_PROCESSED_TITLE_LIST_PROCESSED.json'))
Train = json.load(open('../DATA_PROCESSING/PL_TRACKS_5_TRAIN.json'))
len(Train)
Train['967445']
word_tracks_list = {}
for pl in Train:
for w... | github_jupyter | import json
from collections import Counter
import operator
import numpy as np
pl_title = json.load(open('../MODEL_1_PL_NAME_NEW/PID_PROCESSED_TITLE_LIST_PROCESSED.json'))
Train = json.load(open('../DATA_PROCESSING/PL_TRACKS_5_TRAIN.json'))
len(Train)
Train['967445']
word_tracks_list = {}
for pl in Train:
for word ... | 0.165121 | 0.58945 |
# Matrizes e vetores
## License
All content can be freely used and adapted under the terms of the
[Creative Commons Attribution 4.0 International License](http://creativecommons.org/licenses/by/4.0/).

## Representação de uma matriz
Ante... | github_jupyter | v = [1, 2, 3]
print(v)
A = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
print(A)
A = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
print(A)
print(A[0])
print(A[0][0])
print(A[1][2])
for i in range(3): # Anda sobre as linhas
for j in range(3): # Anda sobre as colunas
print(A[i][j], '', end='') # end='' faz com que p... | 0.035528 | 0.981997 |
```
# Tuples
if __name__ == '__main__':
n = int(input())
integer_list = map(int,input().split())
t = tuple(integer_list)
print(hash(t))
# Lists
N = int(input())
lis=list()
for _ in range(N):
s=input().strip().split(" ")
if s[0]=="insert":
lis.insert(int(s[1]),int(s[2]))
if s[0... | github_jupyter | # Tuples
if __name__ == '__main__':
n = int(input())
integer_list = map(int,input().split())
t = tuple(integer_list)
print(hash(t))
# Lists
N = int(input())
lis=list()
for _ in range(N):
s=input().strip().split(" ")
if s[0]=="insert":
lis.insert(int(s[1]),int(s[2]))
if s[0]=="... | 0.110495 | 0.26765 |
```
%matplotlib inline
from pylab import *
```
---
# Get the data
* Load the Olivetti Face dataset
* Import the smile/no smile reference data
```
from sklearn import datasets
faces = datasets.fetch_olivetti_faces()
faces.keys()
# Display some images
for i in range(10):
face = faces.images[i]
subplot(1, 10,... | github_jupyter | %matplotlib inline
from pylab import *
from sklearn import datasets
faces = datasets.fetch_olivetti_faces()
faces.keys()
# Display some images
for i in range(10):
face = faces.images[i]
subplot(1, 10, i + 1)
imshow(face.reshape((64, 64)), cmap='gray')
axis('off')
# Download results-smile-GT-BLS.xml fro... | 0.480235 | 0.854703 |
<a href="https://colab.research.google.com/github/agemagician/CodeTrans/blob/main/prediction/single%20task/function%20documentation%20generation/php/base_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
**<h3>Predict the documentation for php c... | github_jupyter | !pip install -q transformers sentencepiece
from transformers import AutoTokenizer, AutoModelWithLMHead, SummarizationPipeline
pipeline = SummarizationPipeline(
model=AutoModelWithLMHead.from_pretrained("SEBIS/code_trans_t5_base_code_documentation_generation_php"),
tokenizer=AutoTokenizer.from_pretrained("SEBIS... | 0.548915 | 0.842539 |
```
import primes
```
# The Sieve of Eratosthenes
Sharon, Kynan, Jet
September 26th, 2017
## Description
The goal of this algorithm was to find all prime numbers up to "n". The Sieve of Eratosthenes algorithm works just like the title. First, all positive integers are generated in a list up to "n" starting with 2.... | github_jupyter | import primes
import primes
primes.eratosthenes(100)
import primes
%timeit primes.eratosthenes(80)
import primes
def try_gen_prime(n):
g = primes.gen_eratosthenes()
p = next(g)
primeList =[p]
while p < n:
p = next(g)
primeList.append(p)
return primeList
%timeit try_gen_prime(40)... | 0.307566 | 0.94256 |
<a href="https://githubtocolab.com/giswqs/geemap/blob/master/examples/notebooks/08_ee_js_to_ipynb.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"/></a>
Uncomment the following line to install [geemap](https://geemap.org) if needed.
```
# !pip install gee... | github_jupyter | # !pip install geemap
import geemap
geemap.show_youtube('RpIaalFk4H8')
import os
from geemap.conversion import *
# Create a temporary working directory
work_dir = os.path.join(os.path.expanduser('~'), 'geemap')
# Get Earth Engine JavaScript examples. There are five examples in the geemap package folder.
# Change js... | 0.395484 | 0.980034 |
```
""" Concat all data files into one [Training] """
"""
This code takes all the text files in a directory and merge them into one
"""
import pandas as pd
import os
import glob
import numpy as np
import multiprocessing
import string
import random
import time
import shutil
dataDirectory = r"/home/muddi004/muddi/GI... | github_jupyter | """ Concat all data files into one [Training] """
"""
This code takes all the text files in a directory and merge them into one
"""
import pandas as pd
import os
import glob
import numpy as np
import multiprocessing
import string
import random
import time
import shutil
dataDirectory = r"/home/muddi004/muddi/GIANT/... | 0.293607 | 0.260237 |
```
# Visualization of the KO+ChIP Gold Standard from:
# Miraldi et al. (2018) "Leveraging chromatin accessibility for transcriptional regulatory network inference in Th17 Cells"
# TO START: In the menu above, choose "Cell" --> "Run All", and network + heatmap will load
# Change "canvas" to "SVG" (drop-down menu in ce... | github_jupyter | # Visualization of the KO+ChIP Gold Standard from:
# Miraldi et al. (2018) "Leveraging chromatin accessibility for transcriptional regulatory network inference in Th17 Cells"
# TO START: In the menu above, choose "Cell" --> "Run All", and network + heatmap will load
# Change "canvas" to "SVG" (drop-down menu in cell b... | 0.609757 | 0.747455 |
# Bagging
This notebook introduces a very natural strategy to build ensembles of
machine learning models named "bagging".
"Bagging" stands for Bootstrap AGGregatING. It uses bootstrap resampling
(random sampling with replacement) to learn several models on random
variations of the training set. At predict time, the p... | github_jupyter | import pandas as pd
import numpy as np
# create a random number generator that will be used to set the randomness
rng = np.random.RandomState(1)
def generate_data(n_samples=30):
"""Generate synthetic dataset. Returns `data_train`, `data_test`,
`target_train`."""
x_min, x_max = -3, 3
x = rng.uniform(x... | 0.856317 | 0.963057 |
### Dependencies for the interactive plots apart from rdkit, oechem and other qc* packages
!conda install -c conda-forge plotly -y
!conda install -c plotly jupyter-dash -y
!conda install -c plotly plotly-orca -y
```
#imports
import numpy as np
from scipy import stats
import fragmenter
from openeye import oechem... | github_jupyter | #imports
import numpy as np
from scipy import stats
import fragmenter
from openeye import oechem
TD_datasets = [
'Fragment Stability Benchmark',
# 'Fragmenter paper',
# 'OpenFF DANCE 1 eMolecules t142 v1.0',
'OpenFF Fragmenter Validation 1.0',
'OpenFF Full TorsionDrive Benchmark 1',
'OpenFF Gen 2 Torsion Set ... | 0.668015 | 0.692207 |
```
import requests
import sys
import pandas as pd
import dateutil.parser as dp
import json
from pandas.io.json import json_normalize
# MET Frost client ID
client_id = ''
```
#### Get sources (stations) in Bergen
```
# issue an HTTP GET request
r = requests.get(
'https://frost.met.no/sources/v0.jsonld',
{'ids... | github_jupyter | import requests
import sys
import pandas as pd
import dateutil.parser as dp
import json
from pandas.io.json import json_normalize
# MET Frost client ID
client_id = ''
# issue an HTTP GET request
r = requests.get(
'https://frost.met.no/sources/v0.jsonld',
{'ids': None},
auth=(client_id, '')
)
def codec_utf... | 0.272411 | 0.258338 |
# Noisy Convolutional Neural Network Example
Build a noisy convolutional neural network with TensorFlow v2.
- Author: Gagandeep Singh
- Project: https://github.com/czgdp1807/noisy_weights
Experimental Details
- Datasets: The MNIST database of handwritten digits has been used for training and testing.
Observations
... | github_jupyter | from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow.keras import Model, layers
import numpy as np
# MNIST dataset parameters.
num_classes = 10 # total classes (0-9 digits).
# Training parameters.
learning_rate = 0.001
training_steps = 200
batch_size = 128
display_s... | 0.939519 | 0.974893 |
# GitHub Workshop
<center><img src="./Images/Git-Largest.jpg"></center>
### About Version Control System(VCS)
What is “version control”, and why should you use?
Version control is a system that records changes to a file or set of files over time so that you can recall specific versions later.
#### Centralised Ver... | github_jupyter | On branch master
Your branch is up-to-date with 'origin/master'.
nothing to commit, working directory clean
| 0.331877 | 0.870597 |
Probability theory is a cornerstone for machine learning. We can think of quantum states as probability distributions with certain properties that make them different from our classical notion of probabilities. Contrasting these properties is an easy and straightforward introduction to the most basic concepts we need i... | github_jupyter | import numpy as np
n_samples = 100
p_1 = 0.2
x_data = np.random.binomial(1, p_1, (n_samples,))
print(x_data)
frequency_of_zeros, frequency_of_ones = 0, 0
for x in x_data:
if x:
frequency_of_ones += 1/n_samples
else:
frequency_of_zeros += 1/n_samples
print(frequency_of_ones+frequency_of_zeros)
... | 0.420838 | 0.994754 |
<div style="color:white;
display:fill;
border-radius:10px;
font-size:110%;
font-family:cursive;
letter-spacing:0.5px;
background-color:#4ff0d2;
color:Black;
font-family:cursive;
padding:5px 5px 5px 5px;
">
<h1... | github_jupyter | import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
plt.style.use('dark_background')
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
import warnings
warnings.simplefilter(action='ignore', category=Warning)
dataset = pd.r... | 0.499268 | 0.770594 |
<img src="images/dask_horizontal.svg" align="right" width="30%">
# Data Storage
<img src="images/hdd.jpg" width="20%" align="right">
Efficient storage can dramatically improve performance, particularly when operating repeatedly from disk.
Decompressing text and parsing CSV files is expensive. One of the most effect... | github_jupyter | %run prep.py -d accounts
import os
filename = os.path.join('data', 'accounts.*.csv')
filename
import dask.dataframe as dd
df_csv = dd.read_csv(filename)
df_csv.head()
target = os.path.join('data', 'accounts.h5')
target
# convert to binary format, takes some time up-front
%time df_csv.to_hdf(target, '/data')
# same da... | 0.240775 | 0.978426 |
# A simple example, solved three ways
1. CVXPY + MOSEK
2. SD ADMM
3. Coordinate descent
```
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import signal
from time import time
import seaborn as sns
import cvxpy as cvx
sns.set_style('darkgrid')
impor... | github_jupyter | %load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import signal
from time import time
import seaborn as sns
import cvxpy as cvx
sns.set_style('darkgrid')
import sys
sys.path.append('..')
from osd import Problem
from osd.components import MeanSquareSmall... | 0.419053 | 0.862757 |
This is a "Neural Network" toy example which implements the basic logical gates.
Here we don't use any method to train the NN model. We just guess correct weight.
It is meant to show how in principle NN works.
```
import math
def sigmoid(x):
return 1./(1+ math.exp(-x))
def neuron(inputs, weights):
return sigmo... | github_jupyter | import math
def sigmoid(x):
return 1./(1+ math.exp(-x))
def neuron(inputs, weights):
return sigmoid(sum([x*y for x,y in zip(inputs,weights)]))
def almost_equal(x,y,epsilon=0.001):
return abs(x-y) < epsilon
def NN_OR(x1,x2):
weights =[-10, 20, 20]
inputs = [1, x1, x2]
return neuron(weights,input... | 0.609292 | 0.978073 |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from google.colab import drive
drive.mount('/content/drive')
df = pd.read_csv('/content/drive/My Drive/dataset/creditcard.csv', encoding='utf8')
df.head()
df.isnull().sum()
```
no missing values in dataframe
```
df.describe()
import seaborn a... | github_jupyter | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from google.colab import drive
drive.mount('/content/drive')
df = pd.read_csv('/content/drive/My Drive/dataset/creditcard.csv', encoding='utf8')
df.head()
df.isnull().sum()
df.describe()
import seaborn as sns
plt.figure(figsize=(15,10))
sns.heat... | 0.344003 | 0.739305 |
```
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
dataset = pd.read_csv('../data/dataset_total_smells.csv', delimiter=',', index_col=0)
print(dataset.shape)
dataset.head()
Numbers = {'TooManyMethods': [dataset['TooManyMethods'].sum()],
'ExcessiveMethodLength': [dat... | github_jupyter | import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
dataset = pd.read_csv('../data/dataset_total_smells.csv', delimiter=',', index_col=0)
print(dataset.shape)
dataset.head()
Numbers = {'TooManyMethods': [dataset['TooManyMethods'].sum()],
'ExcessiveMethodLength': [dataset... | 0.36659 | 0.550064 |
<a href="https://colab.research.google.com/github/Omaromar2255/4433/blob/main/Colab%20RDP/Colab%2033.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# **Colab RDP** : Remote Desktop to Colab Instance
> **Warning : Not for Cryptocurrency Mining<br><... | github_jupyter |
#@title **Create User**
#@markdown Enter Username and Password
import os
username = "user" #@param {type:"string"}
password = "root" #@param {type:"string"}
print("Creating User and Setting it up")
# Creation of user
os.system(f"useradd -m {username}")
# Add user to sudo group
os.system(f"adduser {username} sudo"... | 0.463201 | 0.710384 |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc" style="margin-top: 1em;"><ul class="toc-item"></ul></div>
```
from planet4 import io, markings, plotting
%matplotlib inline
image_id = 'APF0000gpu'
datapath = 'gold_comparison'
# datapath = "gold_per_obsid"
datapath = 'catalog_1.0b2'
plotting.plo... | github_jupyter | from planet4 import io, markings, plotting
%matplotlib inline
image_id = 'APF0000gpu'
datapath = 'gold_comparison'
# datapath = "gold_per_obsid"
datapath = 'catalog_1.0b2'
plotting.plot_finals(image_id, datapath=datapath)
datapath = "gold_per_imageid"
datapath = "gold_per_obsid"
from planet4.catalog_production import d... | 0.459076 | 0.715035 |

# Practical PyTorch: Translation with a Sequence to Sequence Network and Attention
In this project we will be teaching a neural network to translate from French to English.
```
[KEY: > input, = target, < output]
> il est en train de peindre un tableau .
= he is painting a pictur... | github_jupyter | [KEY: > input, = target, < output]
> il est en train de peindre un tableau .
= he is painting a picture .
< he is painting a picture .
> pourquoi ne pas essayer ce vin delicieux ?
= why not try that delicious wine ?
< why not try that delicious wine ?
> elle n est pas poete mais romanciere .
= she is not a poet but ... | 0.478285 | 0.981113 |
```
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to change the path if needed.)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read the School Data and Student Data and store into a Pandas DataFrame
school_data_df = pd.read_cs... | github_jupyter | # Dependencies and Setup
import pandas as pd
# File to Load (Remember to change the path if needed.)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read the School Data and Student Data and store into a Pandas DataFrame
school_data_df = pd.read_csv(sc... | 0.641198 | 0.861538 |
# 探索过拟合和欠拟合
在前面的两个例子中(电影影评分类和预测燃油效率),我们看到,在训练许多周期之后,我们的模型对验证数据的准确性会到达峰值,然后开始下降。
换句话说,我们的模型会过度拟合训练数据,学习如果处理过拟合很重要,尽管通常可以在训练集上实现高精度,但我们真正想要的是开发能够很好泛化测试数据(或之前未见过的数据)的模型。
过拟合的反面是欠拟合,当测试数据仍有改进空间会发生欠拟合,出现这种情况的原因有很多:模型不够强大,过度正则化,或者根本没有经过足够长的时间训练,这意味着网络尚未学习训练数据中的相关模式。
如果训练时间过长,模型将开始过度拟合,并从训练数据中学习模式,而这些模式可能并不适用于测试数据,我们需要取得平... | github_jupyter | from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
NUM_W... | 0.894657 | 0.938745 |
# Homework 2 (SIO 211A)
```
# imports
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import unyt
from geopy import distance
```
# Init
```
sns.set_theme()
sns.set_style('whitegrid')
sns.set_context('notebook')
```
# Problem 1
## Sorry to make you click on another link, but this mad... | github_jupyter | # imports
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import unyt
from geopy import distance
sns.set_theme()
sns.set_style('whitegrid')
sns.set_context('notebook')
g = 9.8 * unyt.m / unyt.s**2
H = 1000 * unyt.m
f = 1e-4 / unyt.s
R = np.sqrt(g*H)/f
R.to('km')
C_g = np.sqrt(g*H)
C_g... | 0.470007 | 0.910863 |
# Ex2 - Getting and Knowing your Data
Check out [Chipotle Exercises Video Tutorial](https://www.youtube.com/watch?v=lpuYZ5EUyS8&list=PLgJhDSE2ZLxaY_DigHeiIDC1cD09rXgJv&index=2) to watch a data scientist go through the exercises
This time we are going to pull data directly from the internet.
Special thanks to: https:/... | github_jupyter | import pandas as pd
import numpy as np
url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv'
chipo = pd.read_csv(url, sep = '\t')
chipo.head(10)
# Solution 1
chipo.shape[0] # entries <= 4622 observations
# Solution 2
chipo.info() # entries <= 4622 observations
chipo.shape[1]
c... | 0.628179 | 0.988199 |
# Simulation of Ball drop and Spring mass damper system
"Simulation of dynamic systems for dummies".
<img src="for_dummies.jpg" width="200" align="right">
This is a very simple description of how to do time simulations of a dynamic system using SciPy ODE (Ordinary Differnetial Equation) Solver.
```
from scipy.integra... | github_jupyter | from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
V_start = 150*10**3/3600 # [m/s] Train velocity at start
def train(states,t):
# states:
# [x]
x = states[0] # Position of train
dxdt = V_start # The position state will change by the speed of the train
... | 0.782372 | 0.981382 |
## 控制迷宫寻宝机器人
在这个项目中,你将使用刚刚学到的知识,尝试根据要求,编写代码,来控制一个机器人,在模拟环境中行走,并找到目标宝藏。
机器人所在的模拟环境中,会包含这样几个因素:机器人的起点、障碍物、宝藏箱。你的任务包括:
1. 分析模拟环境的数据
2. 控制机器人随机行动
3. (可选)控制机器人走到终点
* 一个良好的含有注释的代码,可以让你的程序可读性更高,尝试为你自己的代码添加相应的注释。
---
---
## 第一节 分析模拟环境的数据
首先,只有足够了解机器人所在的环境,我们的机器人才能成功找到目标宝藏,因此首先我们来对机器人所在环境的数据进行分析。在这个部分,会考察你对数据结构、控制流的了解。... | github_jupyter | environment = [[0,0,0,2],
[1,2,0,0],
[0,2,3,2]]
import helper
env_data = helper.fetch_maze()
#TODO 1模拟环境的行数
rows = None
rows = len(env_data)
#TODO 2模拟环境的列数
columns = None
columns = len(env_data[0])
#TODO 3取出模拟环境第三行第六列的元素
row_3_col_6 = None
row_3_col_6 = env_data[2][5]
print("迷宫共有", r... | 0.087199 | 0.853608 |
# Heart Rate Varability (HRV)
NeuroKit2 is the most comprehensive software for computing HRV indices, and the list of features is available below:
| Domains | Indices | NeuroKit | heartpy | HRV | pyHRV | |
|-------------------|:-------:|:---------------:|:-------:|:---:|:-----:|---|
| Time Domain ... | github_jupyter | # Load the NeuroKit package and other useful packages
import neurokit2 as nk
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = [15, 9] # Bigger images
data = nk.data("bio_resting_5min_100hz")
data.head() # Print first 5 rows
# Find peaks
peaks, info = nk.ecg_peaks(data["ECG"], samp... | 0.673514 | 0.92054 |
```
import pandas as pd
import numpy as np
import math
trips = pd.read_csv('../data/raw/201501-hubway-tripdata.csv')
trips = trips.append(pd.read_csv('../data/raw/201502-hubway-tripdata.csv'))
trips = trips.append(pd.read_csv('../data/raw/201503-hubway-tripdata.csv'))
trips = trips.append(pd.read_csv('../data/raw/20150... | github_jupyter | import pandas as pd
import numpy as np
import math
trips = pd.read_csv('../data/raw/201501-hubway-tripdata.csv')
trips = trips.append(pd.read_csv('../data/raw/201502-hubway-tripdata.csv'))
trips = trips.append(pd.read_csv('../data/raw/201503-hubway-tripdata.csv'))
trips = trips.append(pd.read_csv('../data/raw/201504-hu... | 0.174903 | 0.131368 |
<center><h1><b><span style="color:blue">Histogramming & visualisation</span></b></h1></center>
<br>
This is not an under-statement - **histograms are ubiquitous in Particle Physics!** This being said, and as far as the (general) scientific Python ecosystem is concerned, histograms aren't quite seen as first-clas... | github_jupyter | import numpy as np
from histoprint import text_hist, print_hist
text_hist(
np.random.randn(1000), bins=[-5, -3, -2, -1, -0.5, 0, 0.5, 1, 2, 3, 5], title="Variable bin widths"
)
h_numpy = np.histogram(np.random.normal(0, 1, 100000), bins=20, range=(-5, 5))
h_numpy
A = np.random.randn(1000) - 2
B = np.random.randn(... | 0.455199 | 0.984694 |
```
import numpy as np
import os
import torch
import torchvision
import torchvision.transforms as transforms
### Load dataset - Preprocessing
DATA_PATH = '/tmp/data'
BATCH_SIZE = 64
def load_mnist(path, batch_size):
if not os.path.exists(path): os.mkdir(path)
trans = transforms.Compose([transforms.ToTensor()... | github_jupyter | import numpy as np
import os
import torch
import torchvision
import torchvision.transforms as transforms
### Load dataset - Preprocessing
DATA_PATH = '/tmp/data'
BATCH_SIZE = 64
def load_mnist(path, batch_size):
if not os.path.exists(path): os.mkdir(path)
trans = transforms.Compose([transforms.ToTensor(),
... | 0.807537 | 0.641029 |
```
import os
import json
import random
import csv
import math
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
file_loc = 'mprobc_100kb.txt'
with open(file_loc) as input_:
stripped = [line.strip() for line in input_]
lines = [s.split('\t')[1:] for s in stripped if s]
header = []
final_list=[... | github_jupyter | import os
import json
import random
import csv
import math
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
file_loc = 'mprobc_100kb.txt'
with open(file_loc) as input_:
stripped = [line.strip() for line in input_]
lines = [s.split('\t')[1:] for s in stripped if s]
header = []
final_list=[]
f... | 0.078385 | 0.295883 |
# Strong Edges Network VS ACDC Fully Connected Network
```
import DSGRN
from DSGRN import *
import cProfile
import sys
sys.setrecursionlimit(10**8)
sys.path.insert(0,'../src')
import PhenotypeGraphFun
import CondensationGraph_iter
import ReducePhenotypeGraph
import PhenotypeGraphviz
import pickle
import Hb_Kni_high2... | github_jupyter | import DSGRN
from DSGRN import *
import cProfile
import sys
sys.setrecursionlimit(10**8)
sys.path.insert(0,'../src')
import PhenotypeGraphFun
import CondensationGraph_iter
import ReducePhenotypeGraph
import PhenotypeGraphviz
import pickle
import Hb_Kni_high2low
database_s = Database("/home/elizabeth/Desktop/ACDC/ACDC... | 0.105498 | 0.488954 |
**Database Systems 2017 (ITWS-6960)**
**Final Project**
**Stephan Zednik**
```
import re
import pandas
import matplotlib
%matplotlib inline
```
# Recipe Data
```
from recipes import RecipeData
```
Open a connection to the recipes database
```
recipes = RecipeData()
recipes.is_connected()
```
Use the ``RecipeDat... | github_jupyter | import re
import pandas
import matplotlib
%matplotlib inline
from recipes import RecipeData
recipes = RecipeData()
recipes.is_connected()
recipes.query().title_contains("eggnog").category("alcoholic").max_calories(400).run().head(5)
recipes.query().title_contains("eggnog").category("alcoholic").max_calories(400).sh... | 0.526586 | 0.915205 |
## SEAI 2021 - Python - Lab 1
# Intro to Python
Vincenzo Nardelli - Niccolò Salvini
# DBSCAN
DBSCAN stands for *Density-based spatial clustering of applications with noise* is a data clustering algorithm developed in 1996 by Martin Ester, Hans-Peter Kriegel, Jörg Sander, and Xiaowei Xu. It is a non-parametric densi... | github_jupyter | import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_moons
plt.rcParams["figure.figsize"] = (14, 10)
sns.set()
X, y = make_moons(n_samples=200, noise=0.05, random_state=0)
plt.scatter(X[:,0],X[:,1])
plt.show()
from sklearn.cluster import KMeans
km = KMeans(n_clusters=2)
km.fit(X)
... | 0.595257 | 0.96793 |
# Numpy
" NumPy is the fundamental package for scientific computing with Python. It contains among other things:
* a powerful N-dimensional array object
* sophisticated (broadcasting) functions
* useful linear algebra, Fourier transform, and random number capabilities "
-- From the [NumPy](http://www.numpy.org/) l... | github_jupyter | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
np.random.random((3, 2)) # Array of shape (3, 2), entries uniform in [0, 1).
np.random.seed(0)
print(np.random.random(2))
# Reset the global random state to the same state.
np.random.seed(... | 0.719581 | 0.988165 |
<a href="https://colab.research.google.com/github/Bhavani-Rajan/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/module3-make-explanatory-visualizations/LS_DS_123_Make_Explanatory_Visualizations_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Ope... | github_jupyter | # Your Work Here
## basics of matplotlib from youtube video.
import matplotlib.pyplot as plt
x1 = [1,2,3,4,5]
y1 = [1,4,9,16,25]
x2 = [6,7,8,9,10]
y2 = [1,8,27,64,125]
#plt.plot(x1,y1,label='square')
#plt.plot(x2,y2,label='cube')
plt.bar(x1,y1,label='square',color='r')
plt.bar(x2,y2,label='cube',color='c')
plt.... | 0.509764 | 0.920576 |
```
import pylab as pl
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import astropy.constants as const
import astropy.units as u
from astropy.table import Table
from tools.legacy impo... | github_jupyter | import pylab as pl
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import astropy.constants as const
import astropy.units as u
from astropy.table import Table
from tools.legacy import c... | 0.465873 | 0.883538 |
# Load X values
```
import os
dir = [x[0] for x in os.walk('/Users/apple/Desktop/eeglab14_1_2b/participant')]
dir.sort()
dir.reverse()
dir.pop()
dir.reverse()
print(len(dir))
import glob
filenames = []
for x in dir:
temp = glob.glob(x + "/*.png")
temp.sort()
filenames.append(temp)
print(len(filenames))
fro... | github_jupyter | import os
dir = [x[0] for x in os.walk('/Users/apple/Desktop/eeglab14_1_2b/participant')]
dir.sort()
dir.reverse()
dir.pop()
dir.reverse()
print(len(dir))
import glob
filenames = []
for x in dir:
temp = glob.glob(x + "/*.png")
temp.sort()
filenames.append(temp)
print(len(filenames))
from matplotlib import i... | 0.262653 | 0.627523 |
# Allegheny County Jail Statistics
## Author: Solomon Heisey
### Goal
The goal of this project is to expose statistical trends present at the Allegheny County Jail from 2015 to 2021. DISCLAIMER:
The goal of this project is not to hypothesize why certain trends appear, rather, it is designed to educate others about
th... | github_jupyter | import matplotlib.pyplot as plt
import pandas as pd
%matplotlib inline
url = 'http://tools.wprdc.org/downstream/66cdcd57-6c92-4aaa-8800-0ed9d8f03e22'
file_name = './data.csv'
try:
df_all = pd.read_csv('data.csv', sep=',', engine='python', parse_dates=['census_date'])
except FileNotFoundError:
df_all = pd.rea... | 0.295128 | 0.824214 |
```
class Stock():
def __init__(self, stock_name, stock_price, stock_type, stock_amount):
'''Initilize stock info
Attributes:
stock_name
stock_price,
stock_type,
stock_amount'''
self.name = stock_name
self.amount = ... | github_jupyter | class Stock():
def __init__(self, stock_name, stock_price, stock_type, stock_amount):
'''Initilize stock info
Attributes:
stock_name
stock_price,
stock_type,
stock_amount'''
self.name = stock_name
self.amount = stoc... | 0.436262 | 0.243912 |
```
import numpy as np
import pandas as pd
import mxnet as mx
import matplotlib.pyplot as plt
import plotly.plotly as py
import logging
logging.basicConfig(level=logging.DEBUG)
train1=pd.read_csv('../data/train.csv')
train1.shape
train1.iloc[0:4, 0:15]
train=np.asarray(train1.iloc[0:33600,:])
cv=np.asarray(train1.ilo... | github_jupyter | import numpy as np
import pandas as pd
import mxnet as mx
import matplotlib.pyplot as plt
import plotly.plotly as py
import logging
logging.basicConfig(level=logging.DEBUG)
train1=pd.read_csv('../data/train.csv')
train1.shape
train1.iloc[0:4, 0:15]
train=np.asarray(train1.iloc[0:33600,:])
cv=np.asarray(train1.iloc[33... | 0.610686 | 0.707922 |
# **Álgebra Linear**
```
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
v1 = [2,5]
v2 = [1,6,8]
v1 ,v2
type(v1)
v3 = np.array([8,3,9])
type(v3)
v3.shape[0]
v3.shape
v4=np.array([1.+2.j, 3.+4.j, 5, 6.j], dtype=complex)
v4
type(v4)
```
# **Lendo elementos de um array**
```
a... | github_jupyter | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
v1 = [2,5]
v2 = [1,6,8]
v1 ,v2
type(v1)
v3 = np.array([8,3,9])
type(v3)
v3.shape[0]
v3.shape
v4=np.array([1.+2.j, 3.+4.j, 5, 6.j], dtype=complex)
v4
type(v4)
a = np.array([7,5,3,9,0,2])
a
a[0]
a[1:]
a[1:4]
a[-1]
a[-3]
a[-6]
a[-3... | 0.255808 | 0.865793 |
```
# Hidden code cell for setup
# Imports and setup
import astropixie
import astropixie_widgets
import enum
import ipywidgets
import numpy
astropixie_widgets.config.setup_notebook()
from astropixie.data import pprint as show_data_in_table
from numpy import intersect1d as stars_in_both
class SortOrder(enum.Enum):
... | github_jupyter | # Hidden code cell for setup
# Imports and setup
import astropixie
import astropixie_widgets
import enum
import ipywidgets
import numpy
astropixie_widgets.config.setup_notebook()
from astropixie.data import pprint as show_data_in_table
from numpy import intersect1d as stars_in_both
class SortOrder(enum.Enum):
B... | 0.615781 | 0.910704 |
```
# run this cell to check your Python version is OK for this notebook!
import sys
def check_python_version_above_3_6():
major = sys.version_info.major
minor = sys.version_info.minor
if major < 3 or minor < 6:
print('ERROR you need to run this notebook with Python 3.6 or above (as f-strings used)'... | github_jupyter | # run this cell to check your Python version is OK for this notebook!
import sys
def check_python_version_above_3_6():
major = sys.version_info.major
minor = sys.version_info.minor
if major < 3 or minor < 6:
print('ERROR you need to run this notebook with Python 3.6 or above (as f-strings used)')
... | 0.427516 | 0.893588 |
# Compare Robustness
## Set up the Environment
```
# Import everything that's needed to run the notebook
import os
import pickle
import dill
import pathlib
import datetime
import random
import time
from IPython.display import display, Markdown, Latex
import pandas as pd
import numpy as np
from sklearn.pipeline impor... | github_jupyter | # Import everything that's needed to run the notebook
import os
import pickle
import dill
import pathlib
import datetime
import random
import time
from IPython.display import display, Markdown, Latex
import pandas as pd
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, Tr... | 0.652906 | 0.773216 |
# Gaussian Density Filtering for predicting mid-price raise in LOBs
In this notebook we present results of prediction if the mid price will raise in next tick for LOBs. For data preparation we use GDF filters with various parameters $r$ and $\sigma$.
For prediction we use SVM - we will use 4 features for predictions,... | github_jupyter | %matplotlib inline
from ast import literal_eval
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.svm import SVC
from typing import Tuple
import numpy as np
import itertools
import os
from collections import OrderedDict
from sklearn import preproc... | 0.587825 | 0.980034 |
## Example. Probability of a girl birth given placenta previa
**Analysis using a uniform prior distribution**
```
%matplotlib inline
import arviz as az
import matplotlib.pyplot as plt
import numpy as np
import pymc as pm
from scipy.special import expit
az.style.use('arviz-darkgrid')
%config Inline.figure_formats = ... | github_jupyter | %matplotlib inline
import arviz as az
import matplotlib.pyplot as plt
import numpy as np
import pymc as pm
from scipy.special import expit
az.style.use('arviz-darkgrid')
%config Inline.figure_formats = ['retina']
%load_ext watermark
births = 987
fem_births = 437
with pm.Model() as model_1:
theta = pm.Uniform('the... | 0.709623 | 0.987436 |
# Experimento 1: _All VS Some_
Comparando o resultado de 20 rodadas de classificação com KNN entre o modelo treinado com todos os 18 canais e o modelo treinado com apenas os 6 no qual obteve melhor desempenho
## importando bibliotecas
```
import numpy as np
import pandas as pd
from sklearn.model_selection import tr... | github_jupyter | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
# lista de str ['01', '02', ..., '24']
file_range = ['{:0>2}'.format(chb) for chb in range(1, 25)]
# Carregando sementes
seed_array = np.load('./20_seeds.npy')
print(f'seeds:... | 0.439146 | 0.770335 |
# Klasyfikatory
### Pakiety
```
import pandas as pd
import numpy as np
import category_encoders as ce
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import recall_score
from sklearn.pipeline i... | github_jupyter | import pandas as pd
import numpy as np
import category_encoders as ce
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import recall_score
from sklearn.pipeline import Pipeline
from sklearn.metric... | 0.605799 | 0.797596 |
# Shortcuts para Jupyter lab
Con la celda en azul podemos:
añadir una celda hacia arriba pulsando a
añadir una celda hacia abajo pulsando b
copiar una celda completa pulsando c
borrar una celda pulsando d(x2)
ejecutar y avanzar una celda pulsando SHIFT + INTRO
ejecutar la celda seleccionada pu... | github_jupyter | from IPython.display import IFrame
IFrame(src = "https://kapeli.com/cheat_sheets/Conda.docset/Contents/Resources/Documents/index", width=800, height=400)
# Esta celda solo funcionará para los usuarios de mac o linux, en windows el comando es dir
!pwd
!conda info
x = 9
x
x = int(10)
x
4
x = 5
2 + 4, 2 / 4, 6 - 10, 4 ... | 0.34632 | 0.928376 |
>>> Work in Progress (Following are the lecture notes of Prof Percy Liang/Prof Dorsa Sadigh - CS221 - Stanford. This is my interpretation of his excellent teaching and I take full responsibility of any misinterpretation/misinformation provided herein.)
## Lecture 5: Search 1 - Dynamic Programming, Uniform Cost Search ... | github_jupyter | import sys
sys.setrecursionlimit 100000 | 0.078735 | 0.945349 |
# Statistics
:label:`sec_statistics`
Undoubtedly, to be a top deep learning practitioner, the ability to train the state-of-the-art and high accurate models is crucial. However, it is often unclear when improvements are significant, or only the result of random fluctuations in the training process. To be able to dis... | github_jupyter | import random
from mxnet import np, npx
from d2l import mxnet as d2l
npx.set_np()
# Sample datapoints and create y coordinate
epsilon = 0.1
random.seed(8675309)
xs = np.random.normal(loc=0, scale=1, size=(300,))
ys = [
np.sum(
np.exp(-(xs[:i] - xs[i])**2 /
(2 * epsilon**2)) / np.sqrt(2 * n... | 0.76533 | 0.994396 |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# Azure Machine Lear... | github_jupyter | import os
import azureml.core
from azureml.core.compute import ComputeTarget, DataFactoryCompute
from azureml.exceptions import ComputeTargetException
from azureml.core import Workspace, Experiment
from azureml.pipeline.core import Pipeline
from azureml.core.datastore import Datastore
from azureml.data.data_reference i... | 0.217836 | 0.897201 |
# The Rational Speech Act framework
(This tutorial written by Noah Goodman and Eli Bingham)
Human language depends on the assumption of *cooperativity*, that speakers attempt to provide relevant information to the listener; listeners can use this assumption to reason *pragmatically* about the likely state of the world... | github_jupyter | #first some imports
import torch
torch.set_default_dtype(torch.float64) # double precision for numerical stability
import collections
import argparse
import matplotlib.pyplot as plt
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from search_inference import factor, HashingMarginal, mem... | 0.46223 | 0.992184 |
# Get Started
Here are some sample queries. See what BQX can do.
## Initialization
```
from bqx.query import Query as Q
from bqx.parts import Table as T, Column as C
from bqx.func import SUM
```
# 1. Simple examples
## 1.1 Make simple query.
```
q = Q().SELECT('name').FROM('sample_table')
print(q.getq())
```
## ... | github_jupyter | from bqx.query import Query as Q
from bqx.parts import Table as T, Column as C
from bqx.func import SUM
q = Q().SELECT('name').FROM('sample_table')
print(q.getq())
sample_table = T('sample_table')
name = C('name')
q = Q().SELECT(name).FROM(sample_table)
print(q.getq())
sample_table = T('sample_table')
name = C('nam... | 0.375477 | 0.887644 |
# 07.03 - NEURAL NETWORKS
```
!wget --no-cache -O init.py -q https://raw.githubusercontent.com/rramosp/ai4eng.v1/main/content/init.py
import init; init.init(force_download=False); init.get_weblink()
import numpy as np
import matplotlib.pyplot as plt
from local.lib import mlutils
from IPython.display import Image
%matp... | github_jupyter | !wget --no-cache -O init.py -q https://raw.githubusercontent.com/rramosp/ai4eng.v1/main/content/init.py
import init; init.init(force_download=False); init.get_weblink()
import numpy as np
import matplotlib.pyplot as plt
from local.lib import mlutils
from IPython.display import Image
%matplotlib inline
## KEEPOUTPUT
Im... | 0.367384 | 0.92976 |
# Style Transfer on ONNX Models with OpenVINO

This notebook demonstrates [Fast Neural Style Transfer](https://github.com/onnx/models/tree/master/vision/style_transfer/fast_neu... | github_jupyter | import sys
from enum import Enum
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import HTML, FileLink, clear_output, display
from openvino.runtime import Core, PartialShape
from yaspin import yaspin
sys.path.append("../utils")
from notebook_utils import dow... | 0.610453 | 0.951051 |
```
!wget https://download.pytorch.org/tutorial/hymenoptera_data.zip -P data/
!unzip -d data data/hymenoptera_data.zip
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models
from torchvision import transforms as T
... | github_jupyter | !wget https://download.pytorch.org/tutorial/hymenoptera_data.zip -P data/
!unzip -d data data/hymenoptera_data.zip
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models
from torchvision import transforms as T
impo... | 0.836688 | 0.817319 |
# Sentence Transformers 학습과 활용
본 노트북에서는 `klue/roberta-base` 모델을 **KLUE** 내 **STS** 데이터셋을 활용하여 모델을 훈련하는 예제를 다루게 됩니다.
학습을 통해 얻어질 `sentence-klue-roberta-base` 모델은 입력된 문장의 임베딩을 계산해 유사도를 예측하는데 사용할 수 있게 됩니다.
학습 과정 이후에는 간단한 예제 코드를 통해 모델이 어떻게 활용되는지도 함께 알아보도록 할 것입니다.
모든 소스 코드는 [`sentence-transformers`](https://github.com/UK... | github_jupyter | !pip install sentence-transformers datasets
import math
import logging
from datetime import datetime
import torch
from torch.utils.data import DataLoader
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from sentence_transformers.evaluation... | 0.546012 | 0.967472 |
# VacationPy
----
#### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
%matplotlib widget
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
impo... | github_jupyter | %matplotlib widget
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
gmaps.configure(api_key = g_key)
weather_data = pd.read_csv("output_data/cities.csv")
weather_data
fig = gmaps.figure()... | 0.354321 | 0.833426 |
**Chapter 1 – The Machine Learning landscape**
_This is the code used to generate some of the figures in chapter 1._
<table align="left">
<td>
<a href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/01_the_machine_learning_landscape.ipynb" target="_parent"><img src="https://colab.resear... | github_jupyter | # Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
def prepare_country_stats(oecd_bli, gdp_per_capita):
oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"]
oecd_bli = oecd_bli.pivot(index="Country", co... | 0.482185 | 0.941439 |
```
!pip install coremltools
# Initialise packages
from u2net import U2NETP
import coremltools as ct
from coremltools.proto import FeatureTypes_pb2 as ft
import torch
import torch.nn as nn
from torch.autograd import Variable
import os
import numpy as np
from PIL import Image
from torchvision import transforms
from ... | github_jupyter | !pip install coremltools
# Initialise packages
from u2net import U2NETP
import coremltools as ct
from coremltools.proto import FeatureTypes_pb2 as ft
import torch
import torch.nn as nn
from torch.autograd import Variable
import os
import numpy as np
from PIL import Image
from torchvision import transforms
from skim... | 0.821617 | 0.38523 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.