repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
nussl | nussl-master/nussl/core/utils.py | """
Provides utilities for running nussl algorithms that do not belong to
any specific algorithm or that are shared between algorithms.
"""
import warnings
import numpy as np
import torch
import random
from .. import musdb
import librosa
from . import constants
import os
from contextlib import contextmanager
def se... | 23,707 | 36.27673 | 99 | py |
nussl | nussl-master/nussl/core/efz_utils.py | """
The *nussl* External File Zoo (EFZ) is a server that houses all files that are too large to
bundle with *nussl* when distributing it through ``pip`` or Github. These types of files include
audio examples, benchmark files for tests, and trained neural network models.
*nussl* has built-in utilities for accessing the... | 25,938 | 40.30414 | 159 | py |
nussl | nussl-master/nussl/separation/base/deep_mixin.py | import torch
import yaml
import json
from ...ml import SeparationModel
from ...datasets import transforms as tfm
OMITTED_TRANSFORMS = (
tfm.GetExcerpt,
tfm.MagnitudeWeights,
tfm.SumSources,
tfm.Cache,
tfm.IndexSources,
)
class DeepMixin:
def load_model(self, model_path, device='cpu'):
... | 5,490 | 33.753165 | 91 | py |
nussl | nussl-master/nussl/separation/spatial/projet.py | import copy
import numpy as np
import torch
from .. import SeparationBase, SeparationException
from ... import AudioSignal
class Projet(SeparationBase):
"""
Implements the PROJET algorithm for spatial audio separation using projections.
This implementation uses PyTorch to speed up computation considerab... | 11,377 | 37.181208 | 99 | py |
nussl | nussl-master/nussl/separation/deep/deep_mask_estimation.py | import torch
from ..base import MaskSeparationBase, DeepMixin, SeparationException
from ... import ml
class DeepMaskEstimation(DeepMixin, MaskSeparationBase):
"""
Separates an audio signal using the masks produced by a deep model for every
time-frequency point. It expects that the model outputs a dictio... | 4,119 | 42.829787 | 91 | py |
nussl | nussl-master/nussl/separation/deep/deep_audio_estimation.py | import torch
from ..base import SeparationBase, DeepMixin, SeparationException
class DeepAudioEstimation(DeepMixin, SeparationBase):
"""
Separates an audio signal using a model that produces separated sources directly
in the waveform domain. It expects that the model outputs a dictionary where one
of ... | 2,455 | 39.933333 | 86 | py |
nussl | nussl-master/nussl/separation/deep/deep_clustering.py | import torch
from ..base import ClusteringSeparationBase, DeepMixin, SeparationException
class DeepClustering(DeepMixin, ClusteringSeparationBase):
"""
Clusters the embedding produced by a deep model for every time-frequency point.
This is the deep clustering source separation approach. It is flexible wi... | 2,524 | 44.089286 | 86 | py |
nussl | nussl-master/nussl/datasets/base_dataset.py | import warnings
from typing import Iterable
import copy
from torch.utils.data import Dataset
from .. import AudioSignal
from . import transforms as tfm
import tqdm
class BaseDataset(Dataset, Iterable):
"""
The BaseDataset class is the starting point for all dataset hooks
in nussl. To subclass BaseDatase... | 14,293 | 38.927374 | 90 | py |
nussl | nussl-master/nussl/datasets/transforms.py | import os
import shutil
import logging
import random
from collections import OrderedDict
import torch
import zarr
import numcodecs
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from .. import utils
# This is for when you're running multiple
# training threads
if hasattr(numcodecs, 'blosc'):
... | 30,671 | 36.496333 | 94 | py |
nussl | nussl-master/nussl/ml/confidence.py | """
There are ways to measure the quality of a separated source without
requiring ground truth. These functions operate on the output of
clustering-based separation algorithms and work by analyzing
the clusterability of the feature space used to generate the
separated sources.
"""
from sklearn.metrics import silhouett... | 14,964 | 42.502907 | 89 | py |
nussl | nussl-master/nussl/ml/networks/separation_model.py | import os
import json
import inspect
import torch
from torch import nn
import numpy as np
from . import modules
from ... import __version__
import copy
def _remove_cache_from_tfms(transforms):
"""Helper function to remove cache from transforms.
"""
from ... import datasets
transforms = copy.deepcopy(... | 12,448 | 36.954268 | 92 | py |
nussl | nussl-master/nussl/ml/networks/modules/filter_bank.py | import nussl
from torch import nn
import torch
from .... import AudioSignal
class FilterBank(nn.Module):
"""
Base class for implementing short-time filter-bank style transformations
of an audio signal.
This class accepts two different tensors, as there are two modes it can
be called in:
... | 12,694 | 36.670623 | 81 | py |
nussl | nussl-master/nussl/ml/networks/modules/blocks.py | import warnings
import torch
import torch.nn as nn
import librosa
import numpy as np
from torch.utils.checkpoint import checkpoint
class AmplitudeToDB(nn.Module):
"""
Takes a magnitude spectrogram and converts it to a log
amplitude spectrogram in decibels.
Args:
data (torch.Tensor): Magni... | 34,373 | 38.239726 | 116 | py |
nussl | nussl-master/nussl/ml/unfold/gaussian_mixture.py | import torch
import torch.nn as nn
import numpy as np
import gpytorch
class GaussianMixtureTorch(nn.Module):
def __init__(self, n_components, n_iter=5, covariance_type='diag',
covariance_init=1.0, reg_covar=1e-4):
"""
Initializes a Gaussian mixture model with n_clusters.
... | 7,016 | 36.524064 | 92 | py |
nussl | nussl-master/nussl/ml/unfold/__init__.py | """
Deep unfolding is a type of architecture where an optimization
process like clustering, non-negative matrix factorization and
other EM style algorithms (anything with update functions) are
unfolded as layers in a neural network. In practice this results
in having the operations available to do on torch Tensors. Thi... | 492 | 40.083333 | 69 | py |
nussl | nussl-master/nussl/ml/train/loss.py | from itertools import permutations, combinations
import torch
import torch.nn as nn
class L1Loss(nn.L1Loss):
DEFAULT_KEYS = {'estimates': 'input', 'source_magnitudes': 'target'}
class MSELoss(nn.MSELoss):
DEFAULT_KEYS = {'estimates': 'input', 'source_magnitudes': 'target'}
class KLDivLoss(nn.KLDivLoss):
... | 10,749 | 35.815068 | 84 | py |
nussl | nussl-master/nussl/ml/train/trainer.py | import os
import logging
import copy
import time
from datetime import timedelta
from ignite.engine import Events, Engine, EventEnum
from ignite.handlers import Timer
from ignite.contrib.handlers import ProgressBar
from ignite.metrics import RunningAverage
from torch.utils.tensorboard import SummaryWriter
import torch
... | 13,287 | 36.325843 | 91 | py |
nussl | nussl-master/nussl/ml/train/closures.py | import copy
import torch
from . import loss
from .trainer import BackwardsEvents
class Closure(object):
"""
Closures are used with ignite Engines to train a model given an optimizer
and a set of loss functions. Closures perform forward passes of models given
the input data. The loss is computed vi... | 10,220 | 36.577206 | 104 | py |
nussl | nussl-master/recipes/wham/chimera.py | """
This recipe trains and evaluates a mask inference model
on the clean data from the WHAM dataset with 8k. It's divided into
three big chunks: data preparation, training, and evaluation.
Final output of this script:
"""
import nussl
from nussl import ml, datasets, utils, separation, evaluation
import os
import torch... | 7,122 | 35.716495 | 90 | py |
nussl | nussl-master/recipes/wham/evaluate_dpcl.py | """
This recipe trains and evaluates a deep clustering model
on the clean data from the WHAM dataset with 8k. It's divided into
three big chunks: data preparation, training, and evaluation.
Final output of this script:
┌───────────────────┬────────────────────┬────────────────────┐
│ │ OVERALL (N = ... | 3,803 | 32.368421 | 90 | py |
nussl | nussl-master/recipes/wham/deep_clustering.py | """
This recipe trains and evaluates a deep clustering model
on the clean data from the WHAM dataset with 8k. It's divided into
three big chunks: data preparation, training, and evaluation.
Final output of this script:
┌───────────────────┬────────────────────┬────────────────────┐
│ │ OVERALL (N = ... | 7,718 | 35.239437 | 93 | py |
nussl | nussl-master/recipes/wham/mask_inference.py | """
This recipe trains and evaluates a mask infeerence model
on the clean data from the WHAM dataset with 8k. It's divided into
three big chunks: data preparation, training, and evaluation.
Final output of this script:
┌────────────────────┬────────────────────┬───────────────────┐
│ │ OVERALL (N... | 7,759 | 35.093023 | 89 | py |
nussl | nussl-master/tests/conftest.py | import pytest
from nussl import efz_utils
import tempfile
import os
import musdb
import zipfile
import scaper
import random
import glob
import nussl
from nussl.datasets import transforms
from nussl import datasets
import numpy as np
import torch
import json
def _unzip(path_to_zip, target_path):
with zipfile.ZipFi... | 8,129 | 30.511628 | 74 | py |
nussl | nussl-master/tests/evaluation/test_evaluation.py | import nussl
import pytest
from nussl.core.masks import SoftMask, BinaryMask
import numpy as np
from nussl.evaluation.evaluation_base import AudioSignalListMismatchError
import torch
import json
import tempfile
import os
import glob
@pytest.fixture(scope='module')
def estimated_and_true_sources(musdb_tracks):
i =... | 16,758 | 33.412731 | 84 | py |
nussl | nussl-master/tests/core/test_utils.py | import nussl
import numpy as np
from nussl.separation.base import MaskSeparationBase, SeparationBase
from nussl.core.masks import BinaryMask, SoftMask, MaskBase
import pytest
import torch
import random
import matplotlib.pyplot as plt
import os
import tempfile
def test_utils_seed():
seeds = [0, 123, 666, 15, 2]
... | 10,429 | 29.408163 | 88 | py |
nussl | nussl-master/tests/separation/test_deep.py | from nussl.separation.base import DeepMixin, SeparationException
from nussl.separation.base.deep_mixin import OMITTED_TRANSFORMS
from nussl import datasets, ml, separation, evaluation
import nussl
import torch
from torch import optim
import tempfile
import pytest
import os
import numpy as np
fix_dir = 'tests/local/tra... | 9,458 | 32.661922 | 96 | py |
nussl | nussl-master/tests/datasets/test_base_dataset.py | import pytest
from nussl.datasets import BaseDataset, transforms
from nussl.datasets.base_dataset import DataSetException
import nussl
from nussl import STFTParams
import numpy as np
import soundfile as sf
import itertools
import tempfile
import os
import torch
class BadTransform(object):
def __init__(self, fake=... | 10,049 | 30.40625 | 88 | py |
nussl | nussl-master/tests/datasets/test_transforms.py | import pytest
from nussl.datasets import transforms
from nussl.datasets.transforms import TransformException
import nussl
from nussl import STFTParams, evaluation
import numpy as np
from nussl.core.masks import BinaryMask, SoftMask
import itertools
import copy
import torch
import tempfile
import os
stft_tol = 1e-6
d... | 13,537 | 28.239741 | 79 | py |
nussl | nussl-master/tests/ml/test_gaussian_mixture.py | from nussl.ml.unfold import GaussianMixtureTorch
import torch
import numpy as np
from torch import nn
from sklearn.metrics import adjusted_mutual_info_score
from sklearn import mixture, cluster
def test_ml_gaussian_mixture():
loc = torch.randn(1, 1, 3, 2)
cov = torch.eye(2).view(1, 1, 1, 2, 2)
cov = cov.r... | 3,347 | 37.045455 | 83 | py |
nussl | nussl-master/tests/ml/test_trainer.py | from nussl import ml, datasets
import tempfile
from torch import optim
import numpy as np
import logging
import os
import torch
# uncomment if you want to see the trainer/engine logs
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:... | 9,232 | 37.152893 | 89 | py |
nussl | nussl-master/tests/ml/test_separation_model.py | import nussl
import torch
from torch import nn
from nussl.ml.networks import SeparationModel, modules, builders
from nussl import datasets
import pytest
import json
import tempfile
import copy
n_features = 257
mi_config = builders.build_recurrent_mask_inference(
n_features, 50, 2, True, 0.3, 2, 'softmax',
)
dpc... | 14,816 | 29.054767 | 78 | py |
nussl | nussl-master/tests/ml/test_loss.py | import torch
import nussl
from nussl import ml
from torch import nn
import numpy as np
from itertools import permutations
import random
import copy
def test_register_loss():
class ExampleLoss(nn.Module):
DEFAULT_KEYS = {'key1': 'arg1', 'key2': 'arg2'}
def forward(self, arg1, arg2):
ret... | 7,004 | 31.133028 | 83 | py |
nussl | nussl-master/tests/ml/test_modules.py | import torch
import nussl
from nussl.datasets import transforms
from nussl import ml
import pytest
import numpy as np
import librosa
import itertools
def test_register_module():
class ExampleModule(torch.nn.Module):
def forward(self, data):
data = data * 2
return data
assert E... | 12,748 | 29.573141 | 87 | py |
nussl | nussl-master/tests/ml/test_filterbank.py | from nussl import ml, datasets
from nussl.core.constants import ALL_WINDOWS
import nussl
import pytest
import torch
import itertools
from scipy.signal import check_COLA
import numpy as np
def test_filter_bank(one_item, monkeypatch):
pytest.raises(
NotImplementedError, ml.networks.modules.FilterBank, 2048)
... | 4,363 | 29.305556 | 92 | py |
nussl | nussl-master/tests/ml/test_confidence.py | from nussl import ml
import nussl
import torch
import numpy as np
from sklearn import datasets
import pytest
import copy
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
@pytest.fixture(scope="module")
def simple_sine_data():
nussl.utils.seed(0)
folder = 'ignored'
stft_params = nussl.STFTParams(w... | 9,796 | 31.440397 | 101 | py |
nussl | nussl-master/tests/ml/test_overfit.py | from nussl import ml, datasets, evaluation
import tempfile
from torch import optim
import numpy as np
import logging
import os
import torch
from matplotlib import pyplot as plt
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S'... | 3,197 | 31.969072 | 89 | py |
nussl | nussl-master/tests/ml/test_closures.py | from nussl import datasets, ml
from torch import optim, nn
import torch
import numpy as np
from nussl.ml.train.closures import ClosureException
import pytest
import nussl
def test_base_closure():
n_batch = 40
n_time = 400
n_freq = 129
n_sources = 4
n_embedding = 20
embedding = torch.rand(n_ba... | 8,352 | 27.030201 | 88 | py |
nussl | nussl-master/tests/ml/test_gradients.py | from nussl import ml, datasets, utils
import numpy as np
import torch
import matplotlib.pyplot as plt
import os
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
def test_gradients(mix_source_folder):
os.makedirs('tests/local/', exist_ok=True)
utils.seed(0)
tfms = datasets.transforms.Compose([
... | 7,122 | 35.906736 | 89 | py |
nussl | nussl-master/docs/tutorials/training.py | # ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Training deep models in *nuss... | 38,430 | 32.476481 | 304 | py |
nussl | nussl-master/docs/tutorials/datasets.py | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Handling data in *nussl... | 17,169 | 30.275046 | 100 | py |
nussl | nussl-master/docs/examples/spatial/projet.py | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#... | 3,113 | 25.389831 | 108 | py |
RE-Net | RE-Net-master/pretrain.py | import argparse
import numpy as np
import time
import torch
import utils
import os
from global_model import RENet_global
from sklearn.utils import shuffle
import pickle
def train(args):
# load data
num_nodes, num_rels = utils.get_total_number('./data/' + args.dataset, 'stat.txt')
train_data, train_times_o... | 5,489 | 37.93617 | 115 | py |
RE-Net | RE-Net-master/test.py | import argparse
import numpy as np
import torch
import utils
import os
from model import RENet
from global_model import RENet_global
import pickle
def test(args):
# load data
num_nodes, num_rels = utils.get_total_number('./data/' + args.dataset, 'stat.txt')
if args.dataset == 'icews_know':
train_d... | 6,522 | 36.705202 | 119 | py |
RE-Net | RE-Net-master/RGCN.py | import torch
import torch.nn as nn
import dgl.function as fn
class RGCNLayer(nn.Module):
def __init__(self, in_feat, out_feat, bias=None, activation=None,
self_loop=False, dropout=0.0):
super(RGCNLayer, self).__init__()
self.bias = bias
self.activation = activation
... | 3,494 | 35.789474 | 102 | py |
RE-Net | RE-Net-master/utils.py | import numpy as np
import os
import dgl
import torch
from collections import defaultdict
def get_total_number(inPath, fileName):
with open(os.path.join(inPath, fileName), 'r') as fr:
for line in fr:
line_split = line.split()
return int(line_split[0]), int(line_split[1])
def load_... | 11,375 | 34.003077 | 185 | py |
RE-Net | RE-Net-master/model.py | import torch.nn as nn
import numpy as np
import torch
import torch.nn.functional as F
from Aggregator import MeanAggregator, AttnAggregator, RGCNAggregator
from utils import *
import time
class RENet(nn.Module):
def __init__(self, in_dim, h_dim, num_rels, dropout=0, model=0, seq_len=10, num_k=10):
super(R... | 19,170 | 41.792411 | 175 | py |
RE-Net | RE-Net-master/Aggregator.py | import torch.nn as nn
import numpy as np
import torch
import torch.nn.functional as F
from utils import *
from RGCN import RGCNBlockLayer as RGCNLayer
class RGCNAggregator_global(nn.Module):
def __init__(self, h_dim, dropout, num_nodes, num_rels, num_bases, model, seq_len=10, maxpool=1):
super(RGCNAggrega... | 15,591 | 41.835165 | 157 | py |
RE-Net | RE-Net-master/global_model.py | import torch.nn as nn
import numpy as np
import torch
import torch.nn.functional as F
from Aggregator import RGCNAggregator_global
from utils import *
import time
class RENet_global(nn.Module):
def __init__(self, in_dim, h_dim, num_rels, dropout=0, model=0, seq_len=10, num_k=10, maxpool=1):
super(RENet_gl... | 3,002 | 29.333333 | 111 | py |
RE-Net | RE-Net-master/train.py | import argparse
import numpy as np
import time
import torch
import utils
import os
from model import RENet
from global_model import RENet_global
from sklearn.utils import shuffle
import pickle
def train(args):
# load data
num_nodes, num_rels = utils.get_total_number('./data/' + args.dataset, 'stat.txt')
i... | 10,762 | 43.292181 | 152 | py |
RE-Net | RE-Net-master/baselines/TATransE.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-12-24 01:45:03
# @Author : jimmy (jimmywangheng@qq.com)
# @Link : http://sdcs.sysu.edu.cn
# @Version : $Id$
import os
import torch
torch.multiprocessing.set_start_method("spawn")
import torch.autograd as autograd
import torch.nn as nn
import torch.nn... | 16,007 | 43.715084 | 200 | py |
RE-Net | RE-Net-master/baselines/evaluation_TTransE.py | import torch
import torch.autograd as autograd
from sklearn.metrics.pairwise import pairwise_distances, cosine_similarity
from data import *
from eval_lib import *
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor ... | 6,469 | 36.836257 | 193 | py |
RE-Net | RE-Net-master/baselines/TADistmult.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-12-24 01:45:03
# @Author : jimmy (jimmywangheng@qq.com)
# @Link : http://sdcs.sysu.edu.cn
# @Version : $Id$
import os
import torch
torch.multiprocessing.set_start_method("spawn")
import torch.autograd as autograd
import torch.nn as nn
import torch.nn... | 18,104 | 44.835443 | 200 | py |
RE-Net | RE-Net-master/baselines/loss.py | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTe... | 1,481 | 30.531915 | 142 | py |
RE-Net | RE-Net-master/baselines/model.py | import os
import math
import pickle
import numpy as np
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from LSTMLinear import LSTMModel
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = ... | 8,996 | 36.962025 | 87 | py |
RE-Net | RE-Net-master/baselines/eval_lib.py | import torch
import torch.autograd as autograd
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
def isHit10(triple, tree, cal_embedding, tripleDict, isTail):
... | 1,836 | 28.15873 | 71 | py |
RE-Net | RE-Net-master/baselines/evaluation_TADistMult.py | import os
import numpy as np
import time
import datetime
import random
import multiprocessing
import math
import torch
import torch.autograd as autograd
from sklearn.metrics.pairwise import pairwise_distances, cosine_similarity, linear_kernel
from data import *
from eval_lib import *
USE_CUDA = torch.cuda.is_availab... | 6,125 | 34.005714 | 161 | py |
RE-Net | RE-Net-master/baselines/evaluation_TATransE.py | import numpy as np
import torch
import torch.autograd as autograd
from sklearn.metrics.pairwise import pairwise_distances, cosine_similarity
from data import *
from eval_lib import *
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
el... | 6,301 | 35.218391 | 161 | py |
RE-Net | RE-Net-master/baselines/TTransE.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-12-24 01:45:03
# @Author : jimmy (jimmywangheng@qq.com)
# @Link : http://sdcs.sysu.edu.cn
# @Version : $Id$
import os
import torch
torch.multiprocessing.set_start_method("spawn")
import torch.autograd as autograd
import torch.nn as nn
import torch.nn... | 15,907 | 44.451429 | 235 | py |
RE-Net | RE-Net-master/baselines/LSTMLinear.py | import math
import torch as th
import torch
from torch import nn
import numpy as np
class LSTMModel(nn.Module):
def __init__(self, in_dim, n_layer):
super(LSTMModel, self).__init__()
self.n_layer = n_layer
self.hidden_dim = in_dim
# self.lstm = nn.LSTM(in_dim, self.hidden_dim, n_l... | 2,897 | 28.571429 | 81 | py |
RE-Net | RE-Net-master/data/YAGO/get_history_graph.py | import numpy as np
import os
from collections import defaultdict
import pickle
import dgl
import torch
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line... | 10,633 | 32.23125 | 97 | py |
RE-Net | RE-Net-master/data/ICEWS14/get_history_graph.py | import numpy as np
import os
from collections import defaultdict
import pickle
import dgl
import torch
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line... | 10,737 | 32.661442 | 97 | py |
RE-Net | RE-Net-master/data/ICEWS18/get_history_graph.py | import numpy as np
import os
from collections import defaultdict
import pickle
import dgl
import torch
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line... | 10,633 | 32.23125 | 97 | py |
RE-Net | RE-Net-master/data/WIKI/get_history_graph.py | import numpy as np
import os
from collections import defaultdict
import pickle
import dgl
import torch
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line... | 10,633 | 32.23125 | 97 | py |
RE-Net | RE-Net-master/data/GDELT/get_history_graph.py | import numpy as np
import os
from collections import defaultdict
import pickle
import dgl
import torch
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line... | 10,633 | 32.23125 | 97 | py |
OPT | OPT-main/src/main.py | import numpy as np
import os
import collections
from os.path import dirname, abspath
from copy import deepcopy
from sacred import Experiment, SETTINGS
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
import sys
import torch as th
from utils.logging import get_logg... | 3,318 | 29.731481 | 121 | py |
OPT | OPT-main/src/run.py | import datetime
import os
import pprint
import time
import threading
import torch as th
from types import SimpleNamespace as SN
from utils.logging import Logger
from utils.timehelper import time_left, time_str
from os.path import dirname, abspath
from learners import REGISTRY as le_REGISTRY
from runners import REGISTR... | 9,273 | 34.945736 | 116 | py |
OPT | OPT-main/src/modules/mixers/token_opt_qmix.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.layers import OPTTransformer
class TokenOPTQMixer(nn.Module):
def __init__(self, args):
super(TokenOPTQMixer, self).__init__()
self.args = args
self.state_shape = None
self.token_embedding = nn.Linear... | 2,185 | 33.15625 | 118 | py |
OPT | OPT-main/src/modules/mixers/entity_opt_qmix.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.layers import OPTTransformer
class EntityOPTQMixer(nn.Module):
def __init__(self, args):
super(EntityOPTQMixer, self).__init__()
self.args = args
input_shape = args.entity_shape
if self.args.entity_la... | 2,747 | 36.643836 | 118 | py |
OPT | OPT-main/src/modules/agents/entity_opt_agent.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.layers import EntityOPTTransformer
class EntityOPTAgent(nn.Module):
def __init__(self, input_shape, args):
super(EntityOPTAgent, self).__init__()
self.args = args
self.entity_shape = None
self.entity_e... | 3,074 | 37.4375 | 146 | py |
OPT | OPT-main/src/modules/agents/token_opt_agent.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.layers import TokenOPTTransformer
class TokenOPTAgent(nn.Module):
def __init__(self, input_shape, args):
super(TokenOPTAgent, self).__init__()
self.args = args
self.x_shape = None
self.token_embedding ... | 2,410 | 32.957746 | 128 | py |
OPT | OPT-main/src/modules/layers/entity_opt_attention.py | import torch
import torch.nn as nn
from entmax import sparsemax
import torch.nn.functional as F
from torch.distributions import kl_divergence
from torch.distributions import Categorical
class ScaledDotProductEntityOPTAttention(nn.Module):
def __init__(self, temperature, dropout_attn=0.0):
super().__init__... | 6,584 | 32.426396 | 127 | py |
OPT | OPT-main/src/modules/layers/token_opt_attention.py | import torch
import torch.nn as nn
from entmax import sparsemax
import torch.nn.functional as F
from torch.distributions import kl_divergence
from torch.distributions import Categorical
class ScaledDotProductTokenOPTAttention(nn.Module):
def __init__(self, temperature, dropout_attn=0.0):
super().__init__(... | 6,357 | 32.287958 | 126 | py |
OPT | OPT-main/src/modules/layers/opt_attention.py | import torch
import torch.nn as nn
from entmax import sparsemax
import torch.nn.functional as F
class ScaledDotProductOPTAttention(nn.Module):
def __init__(self, temperature, dropout_attn=0.0):
super().__init__()
self.temperature = temperature
self.dropout_attn = nn.Dropout(dropout_attn)
... | 4,856 | 29.54717 | 109 | py |
OPT | OPT-main/src/components/episode_buffer.py | import torch as th
import numpy as np
from types import SimpleNamespace as SN
class EpisodeBatch:
def __init__(self,
scheme,
groups,
batch_size,
max_seq_length,
data=None,
preprocess=None,
device... | 10,894 | 42.75502 | 134 | py |
OPT | OPT-main/src/components/action_selectors.py | import torch as th
from torch.distributions import Categorical
from .epsilon_schedules import DecayThenFlatSchedule
REGISTRY = {}
class MultinomialActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.eps... | 2,225 | 32.727273 | 112 | py |
OPT | OPT-main/src/components/transforms.py | import torch as th
class Transform:
def transform(self, tensor):
raise NotImplementedError
def infer_output_info(self, vshape_in, dtype_in):
raise NotImplementedError
class OneHot(Transform):
def __init__(self, out_dim):
self.out_dim = out_dim
def transform(self, tensor):
... | 568 | 24.863636 | 71 | py |
OPT | OPT-main/src/runners/parallel_runner.py | from envs import REGISTRY as env_REGISTRY
from functools import partial
from components.episode_buffer import EpisodeBatch
from multiprocessing import Pipe, Process
import numpy as np
import torch as th
# Based (very) heavily on SubprocVecEnv from OpenAI Baselines
# https://github.com/openai/baselines/blob/master/bas... | 10,322 | 37.518657 | 133 | py |
OPT | OPT-main/src/controllers/basic_controller.py | from modules.agents import REGISTRY as agent_REGISTRY
from components.action_selectors import REGISTRY as action_REGISTRY
import torch as th
# This multi-agent controller shares parameters between agents
class BasicMAC:
def __init__(self, scheme, groups, args):
self.n_agents = args.n_agents
self.a... | 4,552 | 40.018018 | 125 | py |
OPT | OPT-main/src/controllers/entity_controller.py | from .basic_controller import BasicMAC
import torch as th
# This multi-agent controller shares parameters between agents
# takes entities + observation masks as input
class EntityMAC(BasicMAC):
def __init__(self, scheme, groups, args):
super(EntityMAC, self).__init__(scheme, groups, args)
def forward... | 2,021 | 38.647059 | 115 | py |
OPT | OPT-main/src/controllers/token_controller.py | from .basic_controller import BasicMAC
import torch as th
# This multi-agent controller shares parameters between agents
class TokenMAC(BasicMAC):
def __init__(self, scheme, groups, args):
super(TokenMAC, self).__init__(scheme, groups, args)
def forward(self, ep_batch, t, test_mode=False):
ag... | 1,697 | 39.428571 | 128 | py |
OPT | OPT-main/src/utils/rl_utils.py | import torch as th
def build_td_lambda_targets(rewards, terminated, mask, target_qs, n_agents, gamma, td_lambda):
# Assumes <target_qs > in B*T*A and <reward >, <terminated >, <mask > in (at least) B*T-1*1
# Initialise last lambda -return for not terminated episodes
ret = target_qs.new_zeros(*targe... | 774 | 47.4375 | 110 | py |
OPT | OPT-main/src/learners/entity_opt_q_learner.py | import copy
from components.episode_buffer import EpisodeBatch
from modules.mixers.entity_opt_qmix import EntityOPTQMixer
import torch as th
from torch.optim import RMSprop, Adam
class QLearner:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = lo... | 8,046 | 43.214286 | 138 | py |
OPT | OPT-main/src/learners/token_opt_q_learner.py | import copy
from components.episode_buffer import EpisodeBatch
from modules.mixers.token_opt_qmix import TokenOPTQMixer
import torch as th
from torch.optim import RMSprop, Adam
class QLearner:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logg... | 8,202 | 45.607955 | 138 | py |
MST | MST-main/real/train_code/utils.py | import numpy as np
import scipy.io as sio
import os
import glob
import re
import torch
import torch.nn as nn
import math
import random
def _as_floats(im1, im2):
float_type = np.result_type(im1.dtype, im2.dtype, np.float32)
im1 = np.asarray(im1, dtype=float_type)
im2 = np.asarray(im2, dtype=float_type)
... | 5,293 | 27.771739 | 93 | py |
MST | MST-main/real/train_code/dataset.py | import torch.utils.data as tud
import random
import torch
import numpy as np
import scipy.io as sio
class dataset(tud.Dataset):
def __init__(self, opt, CAVE, KAIST):
super(dataset, self).__init__()
self.isTrain = opt.isTrain
self.size = opt.size
# self.path = opt.data_path
... | 3,450 | 34.57732 | 83 | py |
MST | MST-main/real/train_code/train.py | from architecture import *
from utils import *
from dataset import dataset
import torch.utils.data as tud
import torch
import torch.nn.functional as F
import time
import datetime
from torch.autograd import Variable
import os
from option import opt
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE... | 3,493 | 35.395833 | 115 | py |
MST | MST-main/real/train_code/architecture/MST_Plus_Plus.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (m... | 10,188 | 30.544892 | 116 | py |
MST | MST-main/real/train_code/architecture/DGSMP.py | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
class Resblock(nn.Module):
def __init__(self, HBW):
super(Resblock, self).__init__()
self.block1 = nn.Sequential(nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1),
... | 15,283 | 46.318885 | 148 | py |
MST | MST-main/real/train_code/architecture/DAUHST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch import einsum
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b... | 13,343 | 35.26087 | 133 | py |
MST | MST-main/real/train_code/architecture/CST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
from collections import defaultdict, Counter
import numpy as np
from tqdm import tqdm
import random
def uniform(a,... | 20,061 | 32.381032 | 129 | py |
MST | MST-main/real/train_code/architecture/MST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (m... | 8,814 | 28.881356 | 116 | py |
MST | MST-main/real/train_code/architecture/BIRNAT.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class self_attention(nn.Module):
def __init__(self, ch):
super(self_attention, self).__init__()
self.conv1 = nn.Conv2d(ch, ch // 8, 1)
self.conv2 = nn.Conv2d(ch, ch // 8, 1)
self.conv3 = nn.Conv2d(ch, ch, 1)
... | 13,326 | 35.412568 | 119 | py |
MST | MST-main/real/train_code/architecture/GAP_Net.py | import torch.nn.functional as F
import torch
import torch.nn as nn
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
def shift_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for ... | 5,524 | 28.232804 | 81 | py |
MST | MST-main/real/train_code/architecture/Lambda_Net.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum
class LambdaNetAttention(nn.Module):
def __init__(
self,
dim,
):
super().__init__()
self.dim = dim
self.to_q = nn.Linear(dim, dim//8, bias=Fa... | 5,680 | 30.38674 | 95 | py |
MST | MST-main/real/train_code/architecture/ADMM_Net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
class double_conv(nn.Module):
def __init__(self, in_channels, out_chann... | 6,191 | 29.653465 | 81 | py |
MST | MST-main/real/train_code/architecture/TSA_Net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
_NORM_BONE = False
def conv_block(in_planes, out_planes, the_kernel=3, the_stride=1, the_padding=1, flag_norm=False, flag_norm_act=True):
conv = nn.Conv2d(in_planes, out_planes, kernel_size=the_kernel, stride=the_stride, padding... | 14,086 | 41.687879 | 118 | py |
MST | MST-main/real/train_code/architecture/__init__.py | import torch
from .MST import MST
from .GAP_Net import GAP_net
from .ADMM_Net import ADMM_net
from .TSA_Net import TSA_Net
from .HDNet import HDNet, FDL
from .DGSMP import HSI_CS
from .BIRNAT import BIRNAT
from .MST_Plus_Plus import MST_Plus_Plus
from .Lambda_Net import Lambda_Net
from .CST import CST
from .DAUHST impo... | 2,403 | 36.5625 | 91 | py |
MST | MST-main/real/train_code/architecture/HDNet.py | import torch
import torch.nn as nn
import math
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias)
class MeanShift(nn.Conv2d):
def __init__(
self, rgb_range,
rgb_mean... | 12,665 | 33.048387 | 132 | py |
MST | MST-main/real/test_code/test.py | import torch
import os
import argparse
from utils import dataparallel
import scipy.io as sio
import numpy as np
from torch.autograd import Variable
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
parser = argparse.ArgumentParser(description="PyTorch HSIFUSION")
parser.add_argum... | 3,319 | 40.5 | 126 | py |
MST | MST-main/real/test_code/utils.py | import numpy as np
import scipy.io as sio
import os
import glob
import re
import torch
import torch.nn as nn
import math
import random
def _as_floats(im1, im2):
float_type = np.result_type(im1.dtype, im2.dtype, np.float32)
im1 = np.asarray(im1, dtype=float_type)
im2 = np.asarray(im2, dtype=float_type)
... | 5,293 | 27.771739 | 93 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.