entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
pytorch_code
stringlengths
200
4.05k
Focal_loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class Focal_loss(nn.Module): """ Pytorch implementation from https://github.com/richardaecn/class-balanced-loss Compute the focal loss between `logits` and the ground truth `labels`. Focal loss = -alpha_t * (1-pt)^gamma * log(pt) where pt is the probability of be...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torc...
BCV-Uniandes/SAMA
Focal_loss
false
110
[ "BSD-3-Clause" ]
0
4c732c71486af17efed17480e363298cb65c851f
https://github.com/BCV-Uniandes/SAMA/tree/4c732c71486af17efed17480e363298cb65c851f
import torch import torch.nn as nn class Model(nn.Module): """ Pytorch implementation from https://github.com/richardaecn/class-balanced-loss Compute the focal loss between `logits` and the ground truth `labels`. Focal loss = -alpha_t * (1-pt)^gamma * log(pt) where pt is the probability of being c...
ItemQueryAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch as t import torch.nn as nn class ItemQueryAttention(nn.Module): """ 基于项的注意力机制。使用查询集序列对支持集的样本序列进行注意力对齐, 得到一个支持集样本的注意力上下文向量。由于注意力向量不依赖于RNN的 上下文向量,因此该注意力属于基于项的注意力,可以并行化处理 """ def __init__(self, feature_size, hidden_size): super(ItemQueryAttention, self).__init__...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Asichurter/Few-Shot-Project
ItemQueryAttention
false
111
[ "MIT" ]
0
865cd6aa7b996c518dfa48dcc9ffad90445f9efe
https://github.com/Asichurter/Few-Shot-Project/tree/865cd6aa7b996c518dfa48dcc9ffad90445f9efe
import torch import torch as t import torch.nn as nn class Model(nn.Module): """ 基于项的注意力机制。使用查询集序列对支持集的样本序列进行注意力对齐, 得到一个支持集样本的注意力上下文向量。由于注意力向量不依赖于RNN的 上下文向量,因此该注意力属于基于项的注意力,可以并行化处理 """ def __init__(self, feature_size, hidden_size): super().__init__() self.W = nn.Linear(feature...
ChannelPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class ChannelPool(nn.Module): def forward(self, x): return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1) .unsqueeze(1)), dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride emp...
BJTU-MIMO/Channel_estimation_MRDN
ChannelPool
false
112
[ "MIT" ]
0
f41972998a5403c901bc3e5d68d4acd05e9a7f6c
https://github.com/BJTU-MIMO/Channel_estimation_MRDN/tree/f41972998a5403c901bc3e5d68d4acd05e9a7f6c
import torch import torch.nn as nn class Model(nn.Module): def forward(self, x): return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1) .unsqueeze(1)), dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
AMSoftmaxLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class AMSoftmaxLoss(nn.Module): def __init__(self, hidden_dim, speaker_num, s=30.0, m=0.4, **kwargs): """ AM Softmax Loss """ super(AMSoftmaxLoss, self).__init__() self.s = s self.m = m self...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
B06901052/s3prl
AMSoftmaxLoss
false
113
[ "MIT" ]
0
5f63d2df043d2d7c81580cd042fa2cea34746f48
https://github.com/B06901052/s3prl/tree/5f63d2df043d2d7c81580cd042fa2cea34746f48
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, hidden_dim, speaker_num, s=30.0, m=0.4, **kwargs): """ AM Softmax Loss """ super().__init__() self.s = s self.m = m self.speaker_num = speaker_num ...
MaskedConv1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch class MaskedConv1d(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1, bias=True, causal=True): if causal: padding = (kernel_size - 1) * dilation ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim import torch a...
B0BBB/seq2seq.pytorch
MaskedConv1d
false
114
[ "MIT" ]
0
54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4
https://github.com/B0BBB/seq2seq.pytorch/tree/54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch class Model(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, groups=1, bias=True, causal=True): if causal: padding = (kernel_size - 1) * dilation else:...
GRUCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class GRUCell(nn.Module): def __init__(self, input_size, hidden_size): super(GRUCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self._W = nn.Parameter(torch.FloatTensor(input_size + hidden_size, 2 *...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as ...
Avmb/lm-robustness
GRUCell
false
115
[ "BSD-3-Clause" ]
0
b5417d9aac01bff0d2a56b506eabed899fd718d4
https://github.com/Avmb/lm-robustness/tree/b5417d9aac01bff0d2a56b506eabed899fd718d4
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_size, hidden_size): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self._W = nn.Parameter(torch.FloatTensor(input_size + hidden_size, 2 * hidden_size)) ...
Downsample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Downsample(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.conv3d = nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=2, padding=1) self.bn1 = nn.InstanceNorm3d(out_channels, affine=True) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
BCV-Uniandes/SAMA
Downsample
false
116
[ "BSD-3-Clause" ]
0
4c732c71486af17efed17480e363298cb65c851f
https://github.com/BCV-Uniandes/SAMA/tree/4c732c71486af17efed17480e363298cb65c851f
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.conv3d = nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=2, padding=1) self.bn1 = nn.InstanceNorm3d(out_channels, affine=True) s...
AttentivePoolingModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class AttentivePoolingModule(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, activation='ReLU', **kwargs): super(AttentivePoolingModule, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
B06901052/s3prl
AttentivePoolingModule
false
117
[ "MIT" ]
0
5f63d2df043d2d7c81580cd042fa2cea34746f48
https://github.com/B06901052/s3prl/tree/5f63d2df043d2d7c81580cd042fa2cea34746f48
import torch import torch.nn as nn class Model(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, activation='ReLU', **kwargs): super().__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_f...
SDPAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch import torch.nn.functional as F from torch.autograd import Variable class SDPAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, dropout=0, causal=False): super(SDPAttention, ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
B0BBB/seq2seq.pytorch
SDPAttention
false
118
[ "MIT" ]
0
54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4
https://github.com/B0BBB/seq2seq.pytorch/tree/54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch import torch.nn.functional as F from torch.autograd import Variable class Model(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, dropout=0, causal=False): super().__init__() ...
SoftmaxLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class SoftmaxLoss(nn.Module): def __init__(self, hidden_dim, speaker_num, **kwargs): """ Softmax Loss """ super(SoftmaxLoss, self).__init__() self.fc = nn.Linear(hidden_dim, speaker_num) self.loss = nn.CrossEntropyLoss() def ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
B06901052/s3prl
SoftmaxLoss
false
119
[ "MIT" ]
0
5f63d2df043d2d7c81580cd042fa2cea34746f48
https://github.com/B06901052/s3prl/tree/5f63d2df043d2d7c81580cd042fa2cea34746f48
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, hidden_dim, speaker_num, **kwargs): """ Softmax Loss """ super().__init__() self.fc = nn.Linear(hidden_dim, speaker_num) self.loss = nn.CrossEntropyLoss() def forward(self, x_BxH, la...
make_residual_dense_ver1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class make_residual_dense_ver1(nn.Module): def __init__(self, nChannels, nChannels_, growthRate, kernel_size=3): super(make_residual_dense_ver1, self).__init__() self.conv = nn.Conv2d(nChannels_, growthRate, kernel_size= ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
BJTU-MIMO/Channel_estimation_MRDN
make_residual_dense_ver1
false
120
[ "MIT" ]
0
f41972998a5403c901bc3e5d68d4acd05e9a7f6c
https://github.com/BJTU-MIMO/Channel_estimation_MRDN/tree/f41972998a5403c901bc3e5d68d4acd05e9a7f6c
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, nChannels, nChannels_, growthRate, kernel_size=3): super().__init__() self.conv = nn.Conv2d(nChannels_, growthRate, kernel_size= kernel_size, padding=(kernel_size - 1) // 2, b...
SelfAttentionPooling
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttenti...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
B06901052/s3prl
SelfAttentionPooling
false
121
[ "MIT" ]
0
5f63d2df043d2d7c81580cd042fa2cea34746f48
https://github.com/B06901052/s3prl/tree/5f63d2df043d2d7c81580cd042fa2cea34746f48
import torch import torch.nn as nn class Model(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super().__init__() self....
AP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
B06901052/s3prl
AP
false
122
[ "MIT" ]
0
5f63d2df043d2d7c81580cd042fa2cea34746f48
https://github.com/B06901052/s3prl/tree/5f63d2df043d2d7c81580cd042fa2cea34746f48
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super().__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.R...
SAP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttenti...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
B06901052/s3prl
SAP
false
123
[ "MIT" ]
0
5f63d2df043d2d7c81580cd042fa2cea34746f48
https://github.com/B06901052/s3prl/tree/5f63d2df043d2d7c81580cd042fa2cea34746f48
import torch import torch.nn as nn class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super().__init__(...
Upsample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Upsample(nn.Module): def __init__(self, in_channels, out_channels, scale_factor=2): super().__init__() self.trilinear = nn.Upsample(scale_factor=scale_factor) self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=1) self.bn1 = nn.In...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
BCV-Uniandes/SAMA
Upsample
false
124
[ "BSD-3-Clause" ]
0
4c732c71486af17efed17480e363298cb65c851f
https://github.com/BCV-Uniandes/SAMA/tree/4c732c71486af17efed17480e363298cb65c851f
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, out_channels, scale_factor=2): super().__init__() self.trilinear = nn.Upsample(scale_factor=scale_factor) self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=1) self.bn1 = nn.Insta...
BasicConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class BasicConv(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=False, bn=False, bias=True): super(BasicConv, self).__init__() self.out_channels = out_planes self.conv = nn.Conv2d...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
BJTU-MIMO/Channel_estimation_MRDN
BasicConv
false
125
[ "MIT" ]
0
f41972998a5403c901bc3e5d68d4acd05e9a7f6c
https://github.com/BJTU-MIMO/Channel_estimation_MRDN/tree/f41972998a5403c901bc3e5d68d4acd05e9a7f6c
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=False, bn=False, bias=True): super().__init__() self.out_channels = out_planes self.conv = nn.Conv2d(in_planes, out_pla...
AttentionModuleV2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn.functional as F class AttentionModuleV2(torch.nn.Module): def __init__(self, hidden_size, fc_x_query=None, fc_spt_key=None, fc_spt_value=None, fc_x_update=None, fc_update=None, fc_spt_spt_query=None, fc_spt_spt_key=None, fc_spt_spt_value=None, gamm...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
ArmandNM/meta-learning
AttentionModuleV2
false
126
[ "MIT" ]
0
173fcd4b929168e9bd7948581293020a3a932857
https://github.com/ArmandNM/meta-learning/tree/173fcd4b929168e9bd7948581293020a3a932857
import math import torch import torch.nn.functional as F class Model(torch.nn.Module): def __init__(self, hidden_size, fc_x_query=None, fc_spt_key=None, fc_spt_value=None, fc_x_update=None, fc_update=None, fc_spt_spt_query=None, fc_spt_spt_key=None, fc_spt_spt_value=None, gamma_scale_gate...
make_dense
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class make_dense(nn.Module): def __init__(self, nChannels, nChannels_, growthRate, kernel_size=3): super(make_dense, self).__init__() self.conv = nn.Conv2d(nChannels_, growthRate, kernel_size= kernel_size, padding=(ker...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
BJTU-MIMO/Channel_estimation_MRDN
make_dense
false
127
[ "MIT" ]
0
f41972998a5403c901bc3e5d68d4acd05e9a7f6c
https://github.com/BJTU-MIMO/Channel_estimation_MRDN/tree/f41972998a5403c901bc3e5d68d4acd05e9a7f6c
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, nChannels, nChannels_, growthRate, kernel_size=3): super().__init__() self.conv = nn.Conv2d(nChannels_, growthRate, kernel_size= kernel_size, padding=(kernel_size - 1) // 2, b...
Scale_and_shift
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Scale_and_shift(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.rand(1)) self.bias = nn.Parameter(torch.zeros(1)) def forward(self, x): return self.weight * x + self.bias def get_inputs(): retu...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_st...
BCV-Uniandes/SAMA
Scale_and_shift
false
128
[ "BSD-3-Clause" ]
0
4c732c71486af17efed17480e363298cb65c851f
https://github.com/BCV-Uniandes/SAMA/tree/4c732c71486af17efed17480e363298cb65c851f
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.rand(1)) self.bias = nn.Parameter(torch.zeros(1)) def forward(self, x): return self.weight * x + self.bias def get_inputs(): return [torch....
make_residual_dense_ver2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class make_residual_dense_ver2(nn.Module): def __init__(self, nChannels, nChannels_, growthRate, kernel_size=3): super(make_residual_dense_ver2, self).__init__() if nChannels == nChannels_: self.conv = nn.Conv2d(nChann...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
BJTU-MIMO/Channel_estimation_MRDN
make_residual_dense_ver2
false
129
[ "MIT" ]
0
f41972998a5403c901bc3e5d68d4acd05e9a7f6c
https://github.com/BJTU-MIMO/Channel_estimation_MRDN/tree/f41972998a5403c901bc3e5d68d4acd05e9a7f6c
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, nChannels, nChannels_, growthRate, kernel_size=3): super().__init__() if nChannels == nChannels_: self.conv = nn.Conv2d(nChannels_, growthRate, kernel_size= ke...
DDPG
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn from torch.nn import functional as F class Value_Net(nn.Module): def __init__(self, observation_dim, action_dim): super(Value_Net, self).__init__() self.fc1 = nn.Linear(observation_dim + action_dim, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = n...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
BLUECARVIN/RL_baseline
DDPG
false
130
[ "MIT" ]
0
436538f49ee505e14672a67ba3c1f60886cbbea8
https://github.com/BLUECARVIN/RL_baseline/tree/436538f49ee505e14672a67ba3c1f60886cbbea8
import torch from torch import nn from torch.nn import functional as F class Value_Net(nn.Module): def __init__(self, observation_dim, action_dim): super().__init__() self.fc1 = nn.Linear(observation_dim + action_dim, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, 1...
Cell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Conv(nn.Module): def __init__(self, conv, in_channels, out_channels): super().__init__() self.conv_type = conv self.relu = nn.ReLU(inplace=True) if self.conv_type == 'conv2d': self.conv2d = nn.Conv3d(in_channels, out_channels, s...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
BCV-Uniandes/SAMA
Cell
false
131
[ "BSD-3-Clause" ]
0
4c732c71486af17efed17480e363298cb65c851f
https://github.com/BCV-Uniandes/SAMA/tree/4c732c71486af17efed17480e363298cb65c851f
import torch import torch.nn as nn class Conv(nn.Module): def __init__(self, conv, in_channels, out_channels): super().__init__() self.conv_type = conv self.relu = nn.ReLU(inplace=True) if self.conv_type == 'conv2d': self.conv2d = nn.Conv3d(in_channels, out_channels, s...
ResizeConv1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn from torch.nn import functional as F class ResizeConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, scale_factor, mode='nearest'): super().__init__() self.scale_factor = scale_factor self.mode = mode self.conv =...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
BalintHompot/uncertainty
ResizeConv1d
false
132
[ "Apache-2.0" ]
0
544c6c5cf22464d69316a31f97fc87355cd10b7e
https://github.com/BalintHompot/uncertainty/tree/544c6c5cf22464d69316a31f97fc87355cd10b7e
import torch import torch.nn as nn from torch.nn import functional as F class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, scale_factor, mode='nearest'): super().__init__() self.scale_factor = scale_factor self.mode = mode self.conv = nn.Con...
Mish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn class Mish(nn.Module): """Mish activation.""" def forward(self, x): return x * torch.tanh(nn.functional.softplus(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.gua...
Benjamin-Etheredge/lightning-bolts
Mish
false
133
[ "Apache-2.0" ]
0
1971d6a924729940b98793aa7751bdf769350aca
https://github.com/Benjamin-Etheredge/lightning-bolts/tree/1971d6a924729940b98793aa7751bdf769350aca
import torch from torch import nn class Model(nn.Module): """Mish activation.""" def forward(self, x): return x * torch.tanh(nn.functional.softplus(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ASP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
B06901052/s3prl
ASP
false
134
[ "MIT" ]
0
5f63d2df043d2d7c81580cd042fa2cea34746f48
https://github.com/B06901052/s3prl/tree/5f63d2df043d2d7c81580cd042fa2cea34746f48
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super().__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.R...
Policy_Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn from torch.nn import functional as F class Policy_Net(nn.Module): def __init__(self, observation_dim, action_dim): super(Policy_Net, self).__init__() self.fc1 = nn.Linear(observation_dim, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(25...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
BLUECARVIN/RL_baseline
Policy_Net
false
135
[ "MIT" ]
0
436538f49ee505e14672a67ba3c1f60886cbbea8
https://github.com/BLUECARVIN/RL_baseline/tree/436538f49ee505e14672a67ba3c1f60886cbbea8
import torch from torch import nn from torch.nn import functional as F class Model(nn.Module): def __init__(self, observation_dim, action_dim): super().__init__() self.fc1 = nn.Linear(observation_dim, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, action_dim) d...
Value_Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn from torch.nn import functional as F class Value_Net(nn.Module): def __init__(self, observation_dim, action_dim): super(Value_Net, self).__init__() self.fc1 = nn.Linear(observation_dim + action_dim, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = n...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_s...
BLUECARVIN/RL_baseline
Value_Net
false
136
[ "MIT" ]
0
436538f49ee505e14672a67ba3c1f60886cbbea8
https://github.com/BLUECARVIN/RL_baseline/tree/436538f49ee505e14672a67ba3c1f60886cbbea8
import torch from torch import nn from torch.nn import functional as F class Model(nn.Module): def __init__(self, observation_dim, action_dim): super().__init__() self.fc1 = nn.Linear(observation_dim + action_dim, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, 1) ...
make_dense_LReLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class make_dense_LReLU(nn.Module): def __init__(self, nChannels, growthRate, kernel_size=3): super(make_dense_LReLU, self).__init__() self.conv = nn.Conv2d(nChannels, growthRate, kernel_size= kernel_size, padding=(kern...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
BJTU-MIMO/Channel_estimation_MRDN
make_dense_LReLU
false
137
[ "MIT" ]
0
f41972998a5403c901bc3e5d68d4acd05e9a7f6c
https://github.com/BJTU-MIMO/Channel_estimation_MRDN/tree/f41972998a5403c901bc3e5d68d4acd05e9a7f6c
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, nChannels, growthRate, kernel_size=3): super().__init__() self.conv = nn.Conv2d(nChannels, growthRate, kernel_size= kernel_size, padding=(kernel_size - 1) // 2, bias=False) ...
AmdimNCELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn def tanh_clip(x, clip_val=10.0): """soft clip values to the range [-clip_val, +clip_val]""" if clip_val is not None: x_clip = clip_val * torch.tanh(1.0 / clip_val * x) else: x_clip = x return x_clip class AmdimNCELoss(nn.Module): """Compute the N...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Benjamin-Etheredge/lightning-bolts
AmdimNCELoss
false
138
[ "Apache-2.0" ]
0
1971d6a924729940b98793aa7751bdf769350aca
https://github.com/Benjamin-Etheredge/lightning-bolts/tree/1971d6a924729940b98793aa7751bdf769350aca
import torch from torch import nn def tanh_clip(x, clip_val=10.0): """soft clip values to the range [-clip_val, +clip_val]""" if clip_val is not None: x_clip = clip_val * torch.tanh(1.0 / clip_val * x) else: x_clip = x return x_clip class Model(nn.Module): """Compute the NCE scor...
SpatialGate
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class BasicConv(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=False, bn=False, bias=True): super(BasicConv, self).__init__() self.out_channels = out_plan...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
BJTU-MIMO/Channel_estimation_MRDN
SpatialGate
false
139
[ "MIT" ]
0
f41972998a5403c901bc3e5d68d4acd05e9a7f6c
https://github.com/BJTU-MIMO/Channel_estimation_MRDN/tree/f41972998a5403c901bc3e5d68d4acd05e9a7f6c
import torch import torch.nn as nn import torch.nn.functional as F class BasicConv(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=False, bn=False, bias=True): super().__init__() self.out_channels = out_planes self...
BayesLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn from torch.nn import functional as F from torch.nn import init def calculate_kl(mu_p, sig_p, mu_q, sig_q): """ Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q) Args: mu_p: mean of the Gaussian p sig_p: sta...
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libd...
BalintHompot/uncertainty
BayesLinear
false
140
[ "Apache-2.0" ]
0
544c6c5cf22464d69316a31f97fc87355cd10b7e
https://github.com/BalintHompot/uncertainty/tree/544c6c5cf22464d69316a31f97fc87355cd10b7e
import math import torch import torch.nn as nn from torch.nn import functional as F from torch.nn import init def calculate_kl(mu_p, sig_p, mu_q, sig_q): """ Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q) Args: mu_p: mean of the Gaussian p sig_p: sta...
MinPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn class MinPool(nn.Module): """Use nn.MaxPool to implement MinPool """ def __init__(self, kernel_size, ndim=3, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False): super(MinPool, self).__init__() self.pool = g...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert...
BeautyOfWeb/OPP_Analysis
MinPool
false
141
[ "MIT" ]
0
59b2dbc91e07fc14b3a130bff6fadaa19cd36b42
https://github.com/BeautyOfWeb/OPP_Analysis/tree/59b2dbc91e07fc14b3a130bff6fadaa19cd36b42
import torch import torch.nn as nn import torch.nn class Model(nn.Module): """Use nn.MaxPool to implement MinPool """ def __init__(self, kernel_size, ndim=3, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False): super().__init__() self.pool = getattr(nn, f'Ma...
QNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn class QNetwork(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed): super(QNetwork, self).__init__() self.seed = torch.manual_seed(seed) hidden_units = 512 self.fc1 = nn.Li...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
BenKang34/deep-reinforcement-learning-nanodegree
QNetwork
false
142
[ "MIT" ]
0
17c9007f757dfb1217c869fdee51798c4a21ba92
https://github.com/BenKang34/deep-reinforcement-learning-nanodegree/tree/17c9007f757dfb1217c869fdee51798c4a21ba92
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed): super().__init__() self.seed = torch.manual_seed(seed) hidden_units = 512 self.fc1 = nn.Linear(state_size, ...
SELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import Tensor from torch import nn class SELoss(nn.MSELoss): def __init__(self): super().__init__(reduction='none') def forward(self, inputs: 'Tensor', target: 'Tensor') ->Tensor: return super().forward(inputs, target).sum(1) def get_inputs(): return [torch.rand...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_str...
Benjamin-Etheredge/lightning-bolts
SELoss
false
143
[ "Apache-2.0" ]
0
1971d6a924729940b98793aa7751bdf769350aca
https://github.com/Benjamin-Etheredge/lightning-bolts/tree/1971d6a924729940b98793aa7751bdf769350aca
import torch from torch import Tensor from torch import nn class Model(nn.MSELoss): def __init__(self): super().__init__(reduction='none') def forward(self, inputs: 'Tensor', target: 'Tensor') ->Tensor: return super().forward(inputs, target).sum(1) def get_inputs(): return [torch.rand(...
BayesConv1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn from torch.nn import functional as F from torch.nn import init def calculate_kl(mu_p, sig_p, mu_q, sig_q): """ Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q) Args: mu_p: mean of the Gaussian p sig_p: sta...
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from...
BalintHompot/uncertainty
BayesConv1d
false
144
[ "Apache-2.0" ]
0
544c6c5cf22464d69316a31f97fc87355cd10b7e
https://github.com/BalintHompot/uncertainty/tree/544c6c5cf22464d69316a31f97fc87355cd10b7e
import math import torch import torch.nn as nn from torch.nn import functional as F from torch.nn import init def calculate_kl(mu_p, sig_p, mu_q, sig_q): """ Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q) Args: mu_p: mean of the Gaussian p sig_p: sta...
Actor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Actor(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, f...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
BenKang34/deep-reinforcement-learning-nanodegree
Actor
false
145
[ "MIT" ]
0
17c9007f757dfb1217c869fdee51798c4a21ba92
https://github.com/BenKang34/deep-reinforcement-learning-nanodegree/tree/17c9007f757dfb1217c869fdee51798c4a21ba92
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Model(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, f...
Conv2dTime
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Conv2dTime(nn.Conv2d): """ Implements time dependent 2d convolutions, by appending the time variable as an extra channel. """ def __init__(self, in_channels, *args, **kwargs): super(Conv2dTime, self).__init__(in_channels + 1, *args, **kwargs) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
BeeQC/ANODE-reproducibility
Conv2dTime
false
146
[ "MIT" ]
0
9d6b5a297302cdaa0bbc3908de1a94f3c28c0606
https://github.com/BeeQC/ANODE-reproducibility/tree/9d6b5a297302cdaa0bbc3908de1a94f3c28c0606
import torch import torch.nn as nn class Model(nn.Conv2d): """ Implements time dependent 2d convolutions, by appending the time variable as an extra channel. """ def __init__(self, in_channels, *args, **kwargs): super().__init__(in_channels + 1, *args, **kwargs) def forward(self, t, ...
AttentionLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch import torch.nn.functional as F class AttentionLayer(nn.Module): """ Attention layer according to https://arxiv.org/abs/1409.0473. Params: num_units: Number of units used in the attention layer """ ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
B0BBB/seq2seq.pytorch
AttentionLayer
false
147
[ "MIT" ]
0
54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4
https://github.com/B0BBB/seq2seq.pytorch/tree/54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch import torch.nn.functional as F class Model(nn.Module): """ Attention layer according to https://arxiv.org/abs/1409.0473. Params: num_units: Number of units used in the attention layer """ def __ini...
ActorCriticMLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import Tensor from torch import nn from typing import Tuple from torch.nn import functional as F class ActorCriticMLP(nn.Module): """MLP network with heads for actor and critic.""" def __init__(self, input_shape: 'Tuple[int]', n_actions: 'int', hidden_size: 'int'=128): ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Benjamin-Etheredge/lightning-bolts
ActorCriticMLP
false
148
[ "Apache-2.0" ]
0
1971d6a924729940b98793aa7751bdf769350aca
https://github.com/Benjamin-Etheredge/lightning-bolts/tree/1971d6a924729940b98793aa7751bdf769350aca
import torch from torch import Tensor from torch import nn from typing import Tuple from torch.nn import functional as F class Model(nn.Module): """MLP network with heads for actor and critic.""" def __init__(self, input_shape: 'Tuple[int]', n_actions: 'int', hidden_size: 'int'=128): """ ...
GlobalAveragePool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn class GlobalAveragePool(nn.Module): """ Average pooling in an equivariant network """ def __init__(self): """ """ super().__init__() def forward(self, x): """ """ avg = torch.mean(x, dim=[-2, -1], keepdim=True) ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_str...
BeomyeolYu/symmetrizer
GlobalAveragePool
false
149
[ "MIT" ]
0
4617c82dc8ab05ac02ac50846799e0b820ff51ce
https://github.com/BeomyeolYu/symmetrizer/tree/4617c82dc8ab05ac02ac50846799e0b820ff51ce
import torch from torch import nn class Model(nn.Module): """ Average pooling in an equivariant network """ def __init__(self): """ """ super().__init__() def forward(self, x): """ """ avg = torch.mean(x, dim=[-2, -1], keepdim=True) return ...
SpatialAttention2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch._utils class SpatialAttention2d(nn.Module): def __init__(self, channel): super(SpatialAttention2d, self).__init__() self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch._utils assert_size_stride = torch._C._dynamo....
Bhaskers-Blu-Org2/seismic-deeplearning
SpatialAttention2d
false
150
[ "MIT" ]
0
15d45fb8c9cef463fd01fae2e087ba62c98cb799
https://github.com/Bhaskers-Blu-Org2/seismic-deeplearning/tree/15d45fb8c9cef463fd01fae2e087ba62c98cb799
import torch import torch.nn as nn import torch._utils class Model(nn.Module): def __init__(self, channel): super().__init__() self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): z = self.squeeze(x) z = sel...
Critic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import tor...
BenKang34/deep-reinforcement-learning-nanodegree
Critic
false
151
[ "MIT" ]
0
17c9007f757dfb1217c869fdee51798c4a21ba92
https://github.com/BenKang34/deep-reinforcement-learning-nanodegree/tree/17c9007f757dfb1217c869fdee51798c4a21ba92
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Model(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, f...
GlobalMaxPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn class GlobalMaxPool(nn.Module): """ Max pooling in an equivariant network """ def __init__(self): """ """ super().__init__() def forward(self, x): """ """ mx = torch.max(torch.max(x, dim=-1, keepdim=True)[0], dim=-...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empt...
BeomyeolYu/symmetrizer
GlobalMaxPool
false
152
[ "MIT" ]
0
4617c82dc8ab05ac02ac50846799e0b820ff51ce
https://github.com/BeomyeolYu/symmetrizer/tree/4617c82dc8ab05ac02ac50846799e0b820ff51ce
import torch from torch import nn class Model(nn.Module): """ Max pooling in an equivariant network """ def __init__(self): """ """ super().__init__() def forward(self, x): """ """ mx = torch.max(torch.max(x, dim=-1, keepdim=True)[0], dim=-2, ...
Policy
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn class Policy(nn.Module): def __init__(self): super(Policy, self).__init__() self.conv1 = nn.Conv2d(2, 4, kernel_size=6, stride=2, bias=False) self.conv2 = nn.Conv2d(4, 16, kernel_size=6, stride=4) self.size = 9 * 9...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
BenKang34/deep-reinforcement-learning-nanodegree
Policy
false
153
[ "MIT" ]
0
17c9007f757dfb1217c869fdee51798c4a21ba92
https://github.com/BenKang34/deep-reinforcement-learning-nanodegree/tree/17c9007f757dfb1217c869fdee51798c4a21ba92
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(2, 4, kernel_size=6, stride=2, bias=False) self.conv2 = nn.Conv2d(4, 16, kernel_size=6, stride=4) self.size = 9 * 9 * 16 ...
Delta
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn from torchaudio import transforms class Delta(nn.Module): def __init__(self, order=2, **kwargs): super(Delta, self).__init__() self.order = order self.compute_delta = transforms.ComputeDeltas(**kwargs) def forward(self, x): feats = [x] ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torchaudio import transforms assert_size_stride = tor...
B06901052/s3prl
Delta
false
154
[ "MIT" ]
0
5f63d2df043d2d7c81580cd042fa2cea34746f48
https://github.com/B06901052/s3prl/tree/5f63d2df043d2d7c81580cd042fa2cea34746f48
import torch import torch.nn as nn from torchaudio import transforms class Model(nn.Module): def __init__(self, order=2, **kwargs): super().__init__() self.order = order self.compute_delta = transforms.ComputeDeltas(**kwargs) def forward(self, x): feats = [x] for o in...
ScaledLeakyReLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import math import torch from torch import nn from torch.nn import functional as F class ScaledLeakyReLU(nn.Module): def __init__(self, negative_slope=0.2): super().__init__() self.negative_slope = negative_slope def forward(self, input): out = F.leaky_relu(input, negative_slope=self...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_str...
BinahHu/stylegan2-pytorch
ScaledLeakyReLU
false
155
[ "MIT", "BSD-2-Clause", "Apache-2.0" ]
0
9975707ffd93872fce02f7e3654eb588a09e23e4
https://github.com/BinahHu/stylegan2-pytorch/tree/9975707ffd93872fce02f7e3654eb588a09e23e4
import math import torch from torch import nn from torch.nn import functional as F class Model(nn.Module): def __init__(self, negative_slope=0.2): super().__init__() self.negative_slope = negative_slope def forward(self, input): out = F.leaky_relu(input, negative_slope=self.negative_...
HSwish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn class HSwish(nn.Module): """Hard Swish activation function. See: https://arxiv.org/abs/1905.02244 """ def forward(self, x): return x * nn.functional.relu6(x + 3).div_(6) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): re...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empt...
BlueskyFR/determined
HSwish
false
156
[ "Apache-2.0" ]
0
ac734f0df11565333f9f37480cfc01dda011e349
https://github.com/BlueskyFR/determined/tree/ac734f0df11565333f9f37480cfc01dda011e349
import torch from torch import nn class Model(nn.Module): """Hard Swish activation function. See: https://arxiv.org/abs/1905.02244 """ def forward(self, x): return x * nn.functional.relu6(x + 3).div_(6) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): ret...
SchedulerTestNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch.nn import functional as F class SchedulerTestNet(torch.nn.Module): """adapted from: https://github.com/pytorch/pytorch/blob/master/test/test_optim.py.""" def __init__(self): super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C...
Benjamin-Etheredge/lightning-bolts
SchedulerTestNet
false
157
[ "Apache-2.0" ]
0
1971d6a924729940b98793aa7751bdf769350aca
https://github.com/Benjamin-Etheredge/lightning-bolts/tree/1971d6a924729940b98793aa7751bdf769350aca
import torch from torch.nn import functional as F class Model(torch.nn.Module): """adapted from: https://github.com/pytorch/pytorch/blob/master/test/test_optim.py.""" def __init__(self): super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) ...
GAB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch._utils class GAB(nn.Module): def __init__(self, input_dim, reduction=4): super(GAB, self).__init__() self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(input_dim, input_dim // reduction, kernel_size=1, stride=1)...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import ...
Bhaskers-Blu-Org2/seismic-deeplearning
GAB
false
158
[ "MIT" ]
0
15d45fb8c9cef463fd01fae2e087ba62c98cb799
https://github.com/Bhaskers-Blu-Org2/seismic-deeplearning/tree/15d45fb8c9cef463fd01fae2e087ba62c98cb799
import torch import torch.nn as nn import torch._utils class Model(nn.Module): def __init__(self, input_dim, reduction=4): super().__init__() self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d(input_dim, input_dim // reduction, kernel_size=1, stride=1) ...
CaffeNormalize
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.utils.data import torch.nn as nn class CaffeNormalize(nn.Module): def __init__(self, features, eps=1e-07): super(CaffeNormalize, self).__init__() self.scale = nn.Parameter(10.0 * torch.ones(features)) self.eps = eps def forward(self, x): x_size = x.s...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dy...
BingjieTang/DepthAwareCNN
CaffeNormalize
false
159
[ "MIT" ]
0
9d72a7dc921d1dd550507018d4b51968ef89bbb7
https://github.com/BingjieTang/DepthAwareCNN/tree/9d72a7dc921d1dd550507018d4b51968ef89bbb7
import torch import torch.utils.data import torch.nn as nn class Model(nn.Module): def __init__(self, features, eps=1e-07): super().__init__() self.scale = nn.Parameter(10.0 * torch.ones(features)) self.eps = eps def forward(self, x): x_size = x.size() norm = x.norm(2...
SCse
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch._utils class SpatialAttention2d(nn.Module): def __init__(self, channel): super(SpatialAttention2d, self).__init__() self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import ...
Bhaskers-Blu-Org2/seismic-deeplearning
SCse
false
160
[ "MIT" ]
0
15d45fb8c9cef463fd01fae2e087ba62c98cb799
https://github.com/Bhaskers-Blu-Org2/seismic-deeplearning/tree/15d45fb8c9cef463fd01fae2e087ba62c98cb799
import torch import torch.nn as nn import torch._utils class SpatialAttention2d(nn.Module): def __init__(self, channel): super().__init__() self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): z = self.squeeze(x) ...
ClippedReLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class ClippedReLU(nn.Module): def __init__(self): super(ClippedReLU, self).__init__() def forward(self, x): return x.clamp(min=0.0, max=255.0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride emp...
Bovbene/WSCCSN
ClippedReLU
false
161
[ "Apache-2.0" ]
0
7f454050218e7f2162b0bdc1cdff938d876efc0b
https://github.com/Bovbene/WSCCSN/tree/7f454050218e7f2162b0bdc1cdff938d876efc0b
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x.clamp(min=0.0, max=255.0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Rosenbrock
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import numpy as np from torch import nn class Rosenbrock(nn.Module): def __init__(self, n1, n2, a=1.0 / 20.0, b=5.0): super(Rosenbrock, self).__init__() self.n1 = n1 self.n2 = n2 self.a = a self.b = b def forward(self, x): dim2 = x.ndimension() > ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynam...
BrettLeroux/GRIPS-MCMC
Rosenbrock
false
162
[ "MIT" ]
0
154457acfc47977e25870aed76c7dc49d70608af
https://github.com/BrettLeroux/GRIPS-MCMC/tree/154457acfc47977e25870aed76c7dc49d70608af
import torch import numpy as np from torch import nn class Model(nn.Module): def __init__(self, n1, n2, a=1.0 / 20.0, b=5.0): super().__init__() self.n1 = n1 self.n2 = n2 self.a = a self.b = b def forward(self, x): dim2 = x.ndimension() > 2 dim1 = x.nd...
FakeRKHSConvNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import numpy as np from torch import nn class MaybeBatchNorm2d(nn.Module): def __init__(self, n_ftr, affine, use_bn): super().__init__() self.bn = nn.BatchNorm2d(n_ftr, affine=affine) self.use_bn = use_bn def forward(self, x): if self.use_bn: ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Benjamin-Etheredge/lightning-bolts
FakeRKHSConvNet
false
163
[ "Apache-2.0" ]
0
1971d6a924729940b98793aa7751bdf769350aca
https://github.com/Benjamin-Etheredge/lightning-bolts/tree/1971d6a924729940b98793aa7751bdf769350aca
import math import torch import numpy as np from torch import nn class MaybeBatchNorm2d(nn.Module): def __init__(self, n_ftr, affine, use_bn): super().__init__() self.bn = nn.BatchNorm2d(n_ftr, affine=affine) self.use_bn = use_bn def forward(self, x): if self.use_bn: ...
LayerNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.utils.data import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, features, eps=1e-06, gamma=1.0, beta=0.0, learnable= False): super(LayerNorm, self).__init__() if learnable: self.gamma = nn.Parameter(torch.ones(features)) se...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dy...
BingjieTang/DepthAwareCNN
LayerNorm
false
164
[ "MIT" ]
0
9d72a7dc921d1dd550507018d4b51968ef89bbb7
https://github.com/BingjieTang/DepthAwareCNN/tree/9d72a7dc921d1dd550507018d4b51968ef89bbb7
import torch import torch.utils.data import torch.nn as nn class Model(nn.Module): def __init__(self, features, eps=1e-06, gamma=1.0, beta=0.0, learnable= False): super().__init__() if learnable: self.gamma = nn.Parameter(torch.ones(features)) self.beta = nn.Parame...
NoiseInjection
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn class NoiseInjection(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, image, noise=None): if noise is None: batch, _, height, width = image.shape noise = image.new...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_str...
BinahHu/stylegan2-pytorch
NoiseInjection
false
165
[ "MIT", "BSD-2-Clause", "Apache-2.0" ]
0
9975707ffd93872fce02f7e3654eb588a09e23e4
https://github.com/BinahHu/stylegan2-pytorch/tree/9975707ffd93872fce02f7e3654eb588a09e23e4
import torch from torch import nn class Model(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, image, noise=None): if noise is None: batch, _, height, width = image.shape noise = image.new_empty(ba...
EqualConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch from torch import nn from torch.nn import functional as F class EqualConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True): super().__init__() self.weight = nn.Parameter(torch.randn(out_channel, in_channel, ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import nn assert_size_stride = torch._C._dynamo.guards.as...
BinahHu/stylegan2-pytorch
EqualConv2d
false
166
[ "MIT", "BSD-2-Clause", "Apache-2.0" ]
0
9975707ffd93872fce02f7e3654eb588a09e23e4
https://github.com/BinahHu/stylegan2-pytorch/tree/9975707ffd93872fce02f7e3654eb588a09e23e4
import math import torch from torch import nn from torch.nn import functional as F class Model(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True): super().__init__() self.weight = nn.Parameter(torch.randn(out_channel, in_channel, ...
InputInjection
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from torch.cuda.amp import autocast as autocast import torch._C import torch.serialization class InputInjection(nn.Module): """Downsampling module for CGNet.""" def __ini...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from torch.cud...
BostonCrayfish/mmsegmentation
InputInjection
false
167
[ "Apache-2.0" ]
0
e8b87242b877bfe0c32ea2630c2fd08977d7dd4b
https://github.com/BostonCrayfish/mmsegmentation/tree/e8b87242b877bfe0c32ea2630c2fd08977d7dd4b
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from torch.cuda.amp import autocast as autocast import torch._C import torch.serialization class Model(nn.Module): """Downsampling module for CGNet.""" def __init__(self,...
NormalProposal
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch from torch import nn from torch.distributions import Normal class NormalProposal(nn.Module): def __init__(self, sigma): super(NormalProposal, self).__init__() self.sigma = sigma def forward(self, x): return Normal(x, self.sigma).sample() def get_inputs(): return [t...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_str...
BrettLeroux/GRIPS-MCMC
NormalProposal
false
168
[ "MIT" ]
0
154457acfc47977e25870aed76c7dc49d70608af
https://github.com/BrettLeroux/GRIPS-MCMC/tree/154457acfc47977e25870aed76c7dc49d70608af
import torch from torch import nn from torch.distributions import Normal class Model(nn.Module): def __init__(self, sigma): super().__init__() self.sigma = sigma def forward(self, x): return Normal(x, self.sigma).sample() def get_inputs(): return [torch.rand([4, 4, 4, 4])] de...
Discriminator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np from torch import nn from torch.nn import functional as F class Discriminator(nn.Module): def __init__(self, img_shape, hidden_dim=1024): super().__init__() in_dim = int(np.prod(img_shape)) self.fc1 = nn.Linear(in_dim, hidden_dim) self.fc2 = nn.Line...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.gu...
Benjamin-Etheredge/lightning-bolts
Discriminator
false
169
[ "Apache-2.0" ]
0
1971d6a924729940b98793aa7751bdf769350aca
https://github.com/Benjamin-Etheredge/lightning-bolts/tree/1971d6a924729940b98793aa7751bdf769350aca
import torch import numpy as np from torch import nn from torch.nn import functional as F class Model(nn.Module): def __init__(self, img_shape, hidden_dim=1024): super().__init__() in_dim = int(np.prod(img_shape)) self.fc1 = nn.Linear(in_dim, hidden_dim) self.fc2 = nn.Linear(self....
DDPGActorVersion1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class DDPGActorVersion1(nn.Module): def __init__(self, state_size, action_size, seed, fc1_units=128, ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Brandon-HY-Lin/deep-reinforcement-learning
DDPGActorVersion1
false
170
[ "MIT" ]
0
d809851b6f98d1089379392d4687e2acaf1c0c79
https://github.com/Brandon-HY-Lin/deep-reinforcement-learning/tree/d809851b6f98d1089379392d4687e2acaf1c0c79
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Model(nn.Module): def __init__(self, state_size, action_size, seed, fc1_units=128, fc2_units=...
ExampleBackbone
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from torch.cuda.amp import autocast as autocast import torch._C import torch.serialization class ExampleBackbone(nn.Module): def __init__(self): super(ExampleBackbone...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim import torch.u...
BostonCrayfish/mmsegmentation
ExampleBackbone
false
171
[ "Apache-2.0" ]
0
e8b87242b877bfe0c32ea2630c2fd08977d7dd4b
https://github.com/BostonCrayfish/mmsegmentation/tree/e8b87242b877bfe0c32ea2630c2fd08977d7dd4b
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from torch.cuda.amp import autocast as autocast import torch._C import torch.serialization class Model(nn.Module): def __init__(self): super().__init__() self...
ConvRelu
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class ConvRelu(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True): super(ConvRelu, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=kernel_size // 2, bias=bias) self.relu = ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
Bovbene/WSCCSN
ConvRelu
false
172
[ "Apache-2.0" ]
0
7f454050218e7f2162b0bdc1cdff938d876efc0b
https://github.com/Bovbene/WSCCSN/tree/7f454050218e7f2162b0bdc1cdff938d876efc0b
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=kernel_size // 2, bias=bias) self.relu = nn.PReLU() d...
NormalSamples
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.utils.checkpoint def norm(x, axis=None, eps=1e-05): if axis is not None: return (x - x.mean(axis, True)) / (x.std(axis, keepdim=True) + eps) else: return (x - x.mean()) / (x.std() + eps) class NormalSamples(nn.Module): """The [reparameteriz...
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint as...
Antipurity/sensor-network
NormalSamples
false
173
[ "MIT" ]
0
c5cc67dee408da831c3ab60a03374da3c4789bd2
https://github.com/Antipurity/sensor-network/tree/c5cc67dee408da831c3ab60a03374da3c4789bd2
import torch import torch.nn as nn import torch.utils.checkpoint def norm(x, axis=None, eps=1e-05): if axis is not None: return (x - x.mean(axis, True)) / (x.std(axis, keepdim=True) + eps) else: return (x - x.mean()) / (x.std() + eps) class Model(nn.Module): """The [reparameterization tr...
Gblock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn class Gblock(nn.Module): def __init__(self, in_channels, out_channels, groups): super(Gblock, self).__init__() self.conv0 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, groups=groups) self.relu = nn.PReLU() self.conv...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_s...
Bovbene/WSCCSN
Gblock
false
174
[ "Apache-2.0" ]
0
7f454050218e7f2162b0bdc1cdff938d876efc0b
https://github.com/Bovbene/WSCCSN/tree/7f454050218e7f2162b0bdc1cdff938d876efc0b
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, out_channels, groups): super().__init__() self.conv0 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, groups=groups) self.relu = nn.PReLU() self.conv1 = nn.Conv2d...
EqualLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from torch.autograd import Function import math import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class FusedLeakyReLUFunctionBackward(Function): ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math from torch import nn assert_size...
BinahHu/stylegan2-pytorch
EqualLinear
false
175
[ "MIT", "BSD-2-Clause", "Apache-2.0" ]
0
9975707ffd93872fce02f7e3654eb588a09e23e4
https://github.com/BinahHu/stylegan2-pytorch/tree/9975707ffd93872fce02f7e3654eb588a09e23e4
from torch.autograd import Function import math import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class FusedLeakyReLUFunctionBackward(Function): ...
NodeAdaptiveEncoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F class NodeAdaptiveEncoder(nn.Module): def __init__(self, num_features, dropout=0.5): super(NodeAdaptiveEncoder, self).__init__() self.fc = nn.Parameter(torch.zeros(size=(num_features, 1))) nn.init.x...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dyn...
BruceW91/cogdl
NodeAdaptiveEncoder
false
176
[ "MIT" ]
0
1ad524375f5ba062103698a0432fc857572a6933
https://github.com/BruceW91/cogdl/tree/1ad524375f5ba062103698a0432fc857572a6933
import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, num_features, dropout=0.5): super().__init__() self.fc = nn.Parameter(torch.zeros(size=(num_features, 1))) nn.init.xavier_normal_(self.fc.data, gain=1.414)...
PPMConcat
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from torch.cuda.amp import autocast as autocast import torch._C import torch.serialization class PPMConcat(nn.ModuleList): """Pyramid Pooling Module that only concat the featu...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from torch.cud...
BostonCrayfish/mmsegmentation
PPMConcat
false
177
[ "Apache-2.0" ]
0
e8b87242b877bfe0c32ea2630c2fd08977d7dd4b
https://github.com/BostonCrayfish/mmsegmentation/tree/e8b87242b877bfe0c32ea2630c2fd08977d7dd4b
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from torch.cuda.amp import autocast as autocast import torch._C import torch.serialization class Model(nn.ModuleList): """Pyramid Pooling Module that only concat the features ...
Deep_Neural_Network
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.utils.data import torch.nn as nn class Deep_Neural_Network(nn.Module): def __init__(self, D_in, fc1_size=40, fc2_size=20, fc3_size=40, fc4_size=20, fc5_size=40): """ Neural Network model with 1 hidden layer. D_in: Dimension of input fc1_size, fc2...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dyn...
Bradnowak/Flowlytic
Deep_Neural_Network
false
178
[ "BSD-2-Clause" ]
0
d5b0951901b97d5546f2ecd700eee5b78c689646
https://github.com/Bradnowak/Flowlytic/tree/d5b0951901b97d5546f2ecd700eee5b78c689646
import torch import torch.utils.data import torch.nn as nn class Model(nn.Module): def __init__(self, D_in, fc1_size=40, fc2_size=20, fc3_size=40, fc4_size=20, fc5_size=40): """ Neural Network model with 1 hidden layer. D_in: Dimension of input fc1_size, fc2_size, etc.: D...
MADDPGActorVersion1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class MADDPGActorVersion1(nn.Module): def __init__(self, state_size, action_size, seed, fc1_units, fc2_unit...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
Brandon-HY-Lin/deep-reinforcement-learning
MADDPGActorVersion1
false
179
[ "MIT" ]
0
d809851b6f98d1089379392d4687e2acaf1c0c79
https://github.com/Brandon-HY-Lin/deep-reinforcement-learning/tree/d809851b6f98d1089379392d4687e2acaf1c0c79
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Model(nn.Module): def __init__(self, state_size, action_size, seed, fc1_units, fc2_units): ""...
ModulatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from torch.autograd import Function import math import torch from torch import nn from torch.nn import functional as F def upsample(in_tens, out_H=64): in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_t...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd...
BinahHu/stylegan2-pytorch
ModulatedConv2d
false
180
[ "MIT", "BSD-2-Clause", "Apache-2.0" ]
0
9975707ffd93872fce02f7e3654eb588a09e23e4
https://github.com/BinahHu/stylegan2-pytorch/tree/9975707ffd93872fce02f7e3654eb588a09e23e4
from torch.autograd import Function import math import torch from torch import nn from torch.nn import functional as F def upsample(in_tens, out_H=64): in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_t...
Linear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn class Linear(nn.Module): def __init__(self, node_dim, hid_dim, num_class_l1, num_class_l2, num_class_l3): super(Linear, self).__init__() self.linear_l1 = nn.Linear(node_dim, num_class_l1) self.linear_l2 = nn.Linear...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch...
BrandonBian/OSDR-GNN
Linear
false
181
[ "MIT" ]
0
0f631d5ddad77df7260c11de3507af014f9447ed
https://github.com/BrandonBian/OSDR-GNN/tree/0f631d5ddad77df7260c11de3507af014f9447ed
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): def __init__(self, node_dim, hid_dim, num_class_l1, num_class_l2, num_class_l3): super().__init__() self.linear_l1 = nn.Linear(node_dim, num_class_l1) self.linear_l2 = nn.Linear(node_dim + n...
DDPGCriticVersion1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class DDPGCriticVersion1(nn.Module): def __init__(self, state_size, action_size, seed, fcs1_units=128, ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import tor...
Brandon-HY-Lin/deep-reinforcement-learning
DDPGCriticVersion1
false
182
[ "MIT" ]
0
d809851b6f98d1089379392d4687e2acaf1c0c79
https://github.com/Brandon-HY-Lin/deep-reinforcement-learning/tree/d809851b6f98d1089379392d4687e2acaf1c0c79
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Model(nn.Module): def __init__(self, state_size, action_size, seed, fcs1_units=128, fc2_units...
ToRGB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from torch.autograd import Function import math import torch from torch import nn from torch.nn import functional as F def upsample(in_tens, out_H=64): in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_t...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math from torch import nn from torch....
BinahHu/stylegan2-pytorch
ToRGB
false
183
[ "MIT", "BSD-2-Clause", "Apache-2.0" ]
0
9975707ffd93872fce02f7e3654eb588a09e23e4
https://github.com/BinahHu/stylegan2-pytorch/tree/9975707ffd93872fce02f7e3654eb588a09e23e4
from torch.autograd import Function import math import torch from torch import nn from torch.nn import functional as F def upsample(in_tens, out_H=64): in_H = in_tens.shape[2] scale_factor = 1.0 * out_H / in_H return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_t...
Neural_Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.utils.data import torch.nn as nn class Neural_Net(nn.Module): def __init__(self, D_in): """ Neural Network model with 1 hidden layer. D_in: Dimension of input """ super(Neural_Net, self).__init__() self.fc1 = nn.Linear(D_in, 100) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data impor...
Bradnowak/Flowlytic
Neural_Net
false
184
[ "BSD-2-Clause" ]
0
d5b0951901b97d5546f2ecd700eee5b78c689646
https://github.com/Bradnowak/Flowlytic/tree/d5b0951901b97d5546f2ecd700eee5b78c689646
import torch import torch.utils.data import torch.nn as nn class Model(nn.Module): def __init__(self, D_in): """ Neural Network model with 1 hidden layer. D_in: Dimension of input """ super().__init__() self.fc1 = nn.Linear(D_in, 100) self.relu1 = nn.Sigmo...
FocalLossBinary
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.jit import torch.nn.functional as F import torch.nn.functional from functools import partial from torch.nn.modules.loss import _Loss def reduced_focal_loss(outputs: 'torch.Tensor', targets: 'torch.Tensor', threshold: 'float'=0.5, gamma: 'float'=2.0, reduction='mean'): """ Compute...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torc...
CamilaGL/nnUNet
FocalLossBinary
false
185
[ "Apache-2.0" ]
0
471ab73a6e4f67fc72d476183b5344be4cccf7ca
https://github.com/CamilaGL/nnUNet/tree/471ab73a6e4f67fc72d476183b5344be4cccf7ca
import torch import torch.jit import torch.nn.functional as F import torch.nn.functional from functools import partial from torch.nn.modules.loss import _Loss def reduced_focal_loss(outputs: 'torch.Tensor', targets: 'torch.Tensor', threshold: 'float'=0.5, gamma: 'float'=2.0, reduction='mean'): """ Compute...
Vgg16
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn import torch.nn.functional as F class Vgg16(nn.Module): def __init__(self): super(Vgg16, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_s...
Arthur1511/CAD-COVID
Vgg16
false
186
[ "MIT" ]
0
daab5d70b9f811da41f702e92179a15ca4809fa5
https://github.com/Arthur1511/CAD-COVID/tree/daab5d70b9f811da41f702e92179a15ca4809fa5
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = ...
Dueling_QNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn.functional as F import torch.nn as nn class Dueling_QNetwork(nn.Module): def __init__(self, state_size, action_size, seed, fc1_units=64, fc2_units=64): super().__init__() self.seed = torch.manual_seed(seed) self.fc1_a = nn.Linear(state_size, fc1_units)...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
Brandon-HY-Lin/deep-reinforcement-learning
Dueling_QNetwork
false
187
[ "MIT" ]
0
d809851b6f98d1089379392d4687e2acaf1c0c79
https://github.com/Brandon-HY-Lin/deep-reinforcement-learning/tree/d809851b6f98d1089379392d4687e2acaf1c0c79
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): def __init__(self, state_size, action_size, seed, fc1_units=64, fc2_units=64): super().__init__() self.seed = torch.manual_seed(seed) self.fc1_a = nn.Linear(state_size, fc1_units) se...
NAC
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from torch.nn import Module import torch from torch.nn.parameter import Parameter from torch.nn import functional from torch.nn import init from torch.nn.modules import Module import torch.utils.data class NAC(Module): def __init__(self, n_in, n_out): super().__init__() self.W_hat = Parameter(tor...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn impor...
Caerisse/deep_focus
NAC
false
188
[ "MIT" ]
0
a6549e0b222a01569b224fb651666ef5dbb5072f
https://github.com/Caerisse/deep_focus/tree/a6549e0b222a01569b224fb651666ef5dbb5072f
from torch.nn import Module import torch from torch.nn.parameter import Parameter from torch.nn import functional from torch.nn import init from torch.nn.modules import Module import torch.utils.data class Model(Module): def __init__(self, n_in, n_out): super().__init__() self.W_hat = Parameter(t...
AttwNetHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.distributed import torch.optim.lr_scheduler import torch.utils.data class AttwNetHead(nn.Module): def __init__(self, idim, hdim, odim): super().__init__() self.mlp_attn = nn.Linear(idim, 1, bias=False) self.mlp_out = nn.Linear(idim, odim, bi...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
CFM-MSG/SDN
AttwNetHead
false
189
[ "MIT" ]
0
f309602dc2bb73117355003f3744f8e5450dbccc
https://github.com/CFM-MSG/SDN/tree/f309602dc2bb73117355003f3744f8e5450dbccc
import torch import torch.nn as nn import torch.distributed import torch.optim.lr_scheduler import torch.utils.data class Model(nn.Module): def __init__(self, idim, hdim, odim): super().__init__() self.mlp_attn = nn.Linear(idim, 1, bias=False) self.mlp_out = nn.Linear(idim, odim, bias=Fal...
Embeddings
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import math import torch import torch.nn as nn class Embeddings(nn.Module): def __init__(self, d_model, vocab): super(Embeddings, self).__init__() self.lut = nn.Embedding(vocab, d_model) self.d_model = d_model def forward(self, x): newx = x.long() embeddingMat = self....
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_st...
CallMeSp/My_flowQA
Embeddings
false
190
[ "Apache-2.0" ]
0
87d82551f614b089771b22a1992e2be41a2995b3
https://github.com/CallMeSp/My_flowQA/tree/87d82551f614b089771b22a1992e2be41a2995b3
import math import torch import torch.nn as nn class Model(nn.Module): def __init__(self, d_model, vocab): super().__init__() self.lut = nn.Embedding(vocab, d_model) self.d_model = d_model def forward(self, x): newx = x.long() embeddingMat = self.lut(newx) * math.sqrt...
ConvBlockINEDense
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn from torch.nn import init as init class ConvBlockINEDense(nn.Module): def __init__(self, n_ch, act='relu', ksize=3, norm='in', padding_mode= 'circular'): super().__init__() padding = (ksize - 1) // 2 if act == 'lrelu': self.act = nn.Le...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
BaekduChoi/Halftoning
ConvBlockINEDense
false
191
[ "BSD-3-Clause" ]
0
9459d202c0b3b4e587e6d89af04c4bcfaa604d31
https://github.com/BaekduChoi/Halftoning/tree/9459d202c0b3b4e587e6d89af04c4bcfaa604d31
import torch from torch import nn from torch.nn import init as init class Model(nn.Module): def __init__(self, n_ch, act='relu', ksize=3, norm='in', padding_mode= 'circular'): super().__init__() padding = (ksize - 1) // 2 if act == 'lrelu': self.act = nn.LeakyReLU(0.2,...
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self, n_obs, n_mid, n_action): super().__init__() self.fc1 = nn.Linear(n_obs, n_mid) self.fc2 = nn.Linear(n_mid, n_mid) self.fc3 = nn.Linear(n_mid, n_action) def forward(self...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_...
CUSP-NYU/autonomous-drone-swarm
Net
false
192
[ "MIT" ]
0
3bad474fd9641521588af61ddc48b84ffc746a74
https://github.com/CUSP-NYU/autonomous-drone-swarm/tree/3bad474fd9641521588af61ddc48b84ffc746a74
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, n_obs, n_mid, n_action): super().__init__() self.fc1 = nn.Linear(n_obs, n_mid) self.fc2 = nn.Linear(n_mid, n_mid) self.fc3 = nn.Linear(n_mid, n_action) def forward(se...
HadamardProduct
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.distributed import torch.optim.lr_scheduler import torch.utils.data class HadamardProduct(nn.Module): def __init__(self, idim_1, idim_2, hdim): super(HadamardProduct, self).__init__() self.fc_1 = nn.Linear(idim_1, hdim) self.fc_2 = nn.Linear...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import ...
CFM-MSG/SDN
HadamardProduct
false
193
[ "MIT" ]
0
f309602dc2bb73117355003f3744f8e5450dbccc
https://github.com/CFM-MSG/SDN/tree/f309602dc2bb73117355003f3744f8e5450dbccc
import torch import torch.nn as nn import torch.distributed import torch.optim.lr_scheduler import torch.utils.data class Model(nn.Module): def __init__(self, idim_1, idim_2, hdim): super().__init__() self.fc_1 = nn.Linear(idim_1, hdim) self.fc_2 = nn.Linear(idim_2, hdim) self.fc_...
Classifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn import torch.optim.lr_scheduler class Classifier(nn.Module): def __init__(self, latent_size, output_size): super().__init__() self.fc1 = nn.Linear(latent_size, 100) self.relu1 = nn.LeakyReLU(0.2) self.fc2 = nn.Linear(100, 50) self.relu2 = ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.optim.lr_scheduler assert_size_stride = torch....
CMU-IDS-2020/fp-index-out-of-bounds
Classifier
false
194
[ "BSD-3-Clause" ]
0
1a9fdaac8288a980e759a0c2e46f28294d25c71f
https://github.com/CMU-IDS-2020/fp-index-out-of-bounds/tree/1a9fdaac8288a980e759a0c2e46f28294d25c71f
import torch from torch import nn import torch.optim.lr_scheduler class Model(nn.Module): def __init__(self, latent_size, output_size): super().__init__() self.fc1 = nn.Linear(latent_size, 100) self.relu1 = nn.LeakyReLU(0.2) self.fc2 = nn.Linear(100, 50) self.relu2 = nn.Le...
LCCALayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn def mean_channels(F): assert F.dim() == 4 spatial_sum = F.sum(3, keepdim=True).sum(2, keepdim=True) return spatial_sum / (F.size(2) * F.size(3)) def stdv_channels(F): assert F.dim() == 4 F_mean = mean_channels(F) F_variance = (F - F_mean).pow(2).sum(3, keep...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as ...
Cai631/PMDN
LCCALayer
false
195
[ "Apache-2.0" ]
0
3eca931fbef64f612572d24c856a91342bbdea59
https://github.com/Cai631/PMDN/tree/3eca931fbef64f612572d24c856a91342bbdea59
import torch import torch.nn as nn def mean_channels(F): assert F.dim() == 4 spatial_sum = F.sum(3, keepdim=True).sum(2, keepdim=True) return spatial_sum / (F.size(2) * F.size(3)) def stdv_channels(F): assert F.dim() == 4 F_mean = mean_channels(F) F_variance = (F - F_mean).pow(2).sum(3, keep...
Generator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch from torch import nn import torch.nn.functional as F import torch.optim.lr_scheduler class Generator(nn.Module): """Define standard linear + softmax generation step.""" def __init__(self, d_model, vocab): super(Generator, self).__init__() self.proj = nn.Linear(d_model, vocab) ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
CMU-IDS-2020/fp-index-out-of-bounds
Generator
false
196
[ "BSD-3-Clause" ]
0
1a9fdaac8288a980e759a0c2e46f28294d25c71f
https://github.com/CMU-IDS-2020/fp-index-out-of-bounds/tree/1a9fdaac8288a980e759a0c2e46f28294d25c71f
import torch from torch import nn import torch.nn.functional as F import torch.optim.lr_scheduler class Model(nn.Module): """Define standard linear + softmax generation step.""" def __init__(self, d_model, vocab): super().__init__() self.proj = nn.Linear(d_model, vocab) def forward(self,...
BDiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn def centercrop(image, w, h): _nt, _ct, ht, wt = image.size() padw, padh = (wt - w) // 2, (ht - h) // 2 if padw > 0 and padh > 0: image = image[:, :, padh:-padh, padw:-padw] return image def flatten(x): x_flat = x.clone() x_flat = x_flat.view(x.shape...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride emp...
CarlosPena00/pytorch-unet
BDiceLoss
false
197
[ "MIT" ]
0
8365bace23e4b04b9c5b75cd6720807ea8cac5ab
https://github.com/CarlosPena00/pytorch-unet/tree/8365bace23e4b04b9c5b75cd6720807ea8cac5ab
import torch import torch.nn as nn def centercrop(image, w, h): _nt, _ct, ht, wt = image.size() padw, padh = (wt - w) // 2, (ht - h) // 2 if padw > 0 and padh > 0: image = image[:, :, padh:-padh, padw:-padw] return image def flatten(x): x_flat = x.clone() x_flat = x_flat.view(x.shape...
BCELoss2c
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class BCELoss2c(nn.Module): def __init__(self): super(BCELoss2c, self).__init__() self.bce0 = nn.BCEWithLogitsLoss() self.bce1 = nn.BCEWithLogitsLoss() None def forward(self, y_pred, y_true, weights=None): loss_0 = self.bce0(y_pred[:...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torc...
CarlosPena00/pytorch-unet
BCELoss2c
false
198
[ "MIT" ]
0
8365bace23e4b04b9c5b75cd6720807ea8cac5ab
https://github.com/CarlosPena00/pytorch-unet/tree/8365bace23e4b04b9c5b75cd6720807ea8cac5ab
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.bce0 = nn.BCEWithLogitsLoss() self.bce1 = nn.BCEWithLogitsLoss() None def forward(self, y_pred, y_true, weights=None): loss_0 = self.bce0(y_pred[:, 0], y_true[:, 0])...
BCELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn class BCELoss(nn.Module): def __init__(self): super(BCELoss, self).__init__() self.bce = nn.BCEWithLogitsLoss() def forward(self, y_pred, y_true, weights=None): loss_0 = self.bce(y_pred[:, 0], y_true[:, 0]) loss_1 = self.bce(y_pred[:, 1], y_...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torc...
CarlosPena00/pytorch-unet
BCELoss
false
199
[ "MIT" ]
0
8365bace23e4b04b9c5b75cd6720807ea8cac5ab
https://github.com/CarlosPena00/pytorch-unet/tree/8365bace23e4b04b9c5b75cd6720807ea8cac5ab
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.bce = nn.BCEWithLogitsLoss() def forward(self, y_pred, y_true, weights=None): loss_0 = self.bce(y_pred[:, 0], y_true[:, 0]) loss_1 = self.bce(y_pred[:, 1], y_true[:, 1]) ...
NALU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
from torch.nn import Module import torch from torch.nn.parameter import Parameter from torch.nn import functional from torch.nn import init from torch.nn.modules import Module import torch.utils.data class NAC(Module): def __init__(self, n_in, n_out): super().__init__() self.W_hat = Parameter(tor...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math fr...
Caerisse/deep_focus
NALU
false
200
[ "MIT" ]
0
a6549e0b222a01569b224fb651666ef5dbb5072f
https://github.com/Caerisse/deep_focus/tree/a6549e0b222a01569b224fb651666ef5dbb5072f
from torch.nn import Module import torch from torch.nn.parameter import Parameter from torch.nn import functional from torch.nn import init from torch.nn.modules import Module import torch.utils.data class NAC(Module): def __init__(self, n_in, n_out): super().__init__() self.W_hat = Parameter(tor...
WeightedBDiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn def centercrop(image, w, h): _nt, _ct, ht, wt = image.size() padw, padh = (wt - w) // 2, (ht - h) // 2 if padw > 0 and padh > 0: image = image[:, :, padh:-padh, padw:-padw] return image class WeightedBDiceLoss(nn.Module): def __init__(self): su...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride emp...
CarlosPena00/pytorch-unet
WeightedBDiceLoss
false
201
[ "MIT" ]
0
8365bace23e4b04b9c5b75cd6720807ea8cac5ab
https://github.com/CarlosPena00/pytorch-unet/tree/8365bace23e4b04b9c5b75cd6720807ea8cac5ab
import torch import torch.nn as nn def centercrop(image, w, h): _nt, _ct, ht, wt = image.size() padw, padh = (wt - w) // 2, (ht - h) // 2 if padw > 0 and padh > 0: image = image[:, :, padh:-padh, padw:-padw] return image class Model(nn.Module): def __init__(self): super().__init...
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.functional as F class DiceLoss(nn.Module): def __init__(self, dims=(1, 2, 3)) ->None: super(DiceLoss, self).__init__() self.eps: 'float' = 1e-06 self.dims = dims def forward(self, input: 'torch.Tensor', target: 'torch.Tensor', ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn ...
CarlosPena00/pytorch-unet
DiceLoss
false
202
[ "MIT" ]
0
8365bace23e4b04b9c5b75cd6720807ea8cac5ab
https://github.com/CarlosPena00/pytorch-unet/tree/8365bace23e4b04b9c5b75cd6720807ea8cac5ab
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, dims=(1, 2, 3)) ->None: super().__init__() self.eps: 'float' = 1e-06 self.dims = dims def forward(self, input: 'torch.Tensor', target: 'torch.Tensor', weights=None) -...
StableBCELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch class StableBCELoss(torch.nn.modules.Module): def __init__(self): super(StableBCELoss, self).__init__() def forward(self, input, target): neg_abs = -input.abs() loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log() return loss.mean() def get_in...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = t...
CarlosPena00/pytorch-unet
StableBCELoss
false
203
[ "MIT" ]
0
8365bace23e4b04b9c5b75cd6720807ea8cac5ab
https://github.com/CarlosPena00/pytorch-unet/tree/8365bace23e4b04b9c5b75cd6720807ea8cac5ab
import torch class Model(torch.nn.modules.Module): def __init__(self): super().__init__() def forward(self, input, target): neg_abs = -input.abs() loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log() return loss.mean() def get_inputs(): return [torch.r...
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F import torch.distributed import torch.optim.lr_scheduler import torch.utils.data class MultiHeadAttention(nn.Module): def __init__(self, idim, odim, nhead=1, use_bias=True): super(MultiHeadAttention, self).__init__() self.idim = i...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
CFM-MSG/SDN
MultiHeadAttention
false
204
[ "MIT" ]
0
f309602dc2bb73117355003f3744f8e5450dbccc
https://github.com/CFM-MSG/SDN/tree/f309602dc2bb73117355003f3744f8e5450dbccc
import torch import torch.nn as nn import torch.nn.functional as F import torch.distributed import torch.optim.lr_scheduler import torch.utils.data class Model(nn.Module): def __init__(self, idim, odim, nhead=1, use_bias=True): super().__init__() self.idim = idim self.odim = odim ...
MSELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.functional as F class MSELoss(nn.Module): def __init__(self) ->None: super(MSELoss, self).__init__() self.mse_loss = nn.MSELoss() def forward(self, input: 'torch.Tensor', target: 'torch.Tensor', w=None ) ->torch.Tensor: input...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn ...
CarlosPena00/pytorch-unet
MSELoss
false
205
[ "MIT" ]
0
8365bace23e4b04b9c5b75cd6720807ea8cac5ab
https://github.com/CarlosPena00/pytorch-unet/tree/8365bace23e4b04b9c5b75cd6720807ea8cac5ab
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self) ->None: super().__init__() self.mse_loss = nn.MSELoss() def forward(self, input: 'torch.Tensor', target: 'torch.Tensor', w=None ) ->torch.Tensor: input_soft = F.softm...
MutiLevelEnhance
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.nn as nn import torch.nn.functional as F import torch.distributed import torch.optim.lr_scheduler import torch.utils.data class MutiLevelEnhance(nn.Module): def __init__(self, idim, odim, nhead=1, use_bias=True): super(MutiLevelEnhance, self).__init__() self.idim = idim ...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime....
CFM-MSG/SDN
MutiLevelEnhance
false
206
[ "MIT" ]
0
f309602dc2bb73117355003f3744f8e5450dbccc
https://github.com/CFM-MSG/SDN/tree/f309602dc2bb73117355003f3744f8e5450dbccc
import torch import torch.nn as nn import torch.nn.functional as F import torch.distributed import torch.optim.lr_scheduler import torch.utils.data class Model(nn.Module): def __init__(self, idim, odim, nhead=1, use_bias=True): super().__init__() self.idim = idim self.odim = odim ...
GDL
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import numpy as np from torch import nn import torch.jit import torch.nn.functional def sum_tensor(inp, axes, keepdim=False): axes = np.unique(axes).astype(int) if keepdim: for ax in axes: inp = inp.sum(int(ax), keepdim=True) else: for ax in sorted(axes, reverse=Tr...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np from torch import nn import torch.jit import torch.nn.functional assert_size_stride = torch._C._dynamo.guards.assert_size...
CamilaGL/nnUNet
GDL
false
207
[ "Apache-2.0" ]
0
471ab73a6e4f67fc72d476183b5344be4cccf7ca
https://github.com/CamilaGL/nnUNet/tree/471ab73a6e4f67fc72d476183b5344be4cccf7ca
import torch import numpy as np from torch import nn import torch.jit import torch.nn.functional def sum_tensor(inp, axes, keepdim=False): axes = np.unique(axes).astype(int) if keepdim: for ax in axes: inp = inp.sum(int(ax), keepdim=True) else: for ax in sorted(axes, reverse=Tr...
Dice
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _al...
import torch import torch.nn as nn import torch.nn.functional as F class DiceLoss(nn.Module): def __init__(self, dims=(1, 2, 3)) ->None: super(DiceLoss, self).__init__() self.eps: 'float' = 1e-06 self.dims = dims def forward(self, input: 'torch.Tensor', target: 'torch.Tensor', ...
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn ...
CarlosPena00/pytorch-unet
Dice
false
208
[ "MIT" ]
0
8365bace23e4b04b9c5b75cd6720807ea8cac5ab
https://github.com/CarlosPena00/pytorch-unet/tree/8365bace23e4b04b9c5b75cd6720807ea8cac5ab
import torch import torch.nn as nn import torch.nn.functional as F class DiceLoss(nn.Module): def __init__(self, dims=(1, 2, 3)) ->None: super().__init__() self.eps: 'float' = 1e-06 self.dims = dims def forward(self, input: 'torch.Tensor', target: 'torch.Tensor', weights=None...
SDNE_layer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _alig...
import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F class SDNE_layer(nn.Module): def __init__(self, num_node, hidden_size1, hidden_size2, droput, alpha, beta, nu1, nu2): super(SDNE_layer, self).__init__() self.num_node = num_node self.hidden_...
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch....
BruceW91/cogdl
SDNE_layer
false
209
[ "MIT" ]
0
1ad524375f5ba062103698a0432fc857572a6933
https://github.com/BruceW91/cogdl/tree/1ad524375f5ba062103698a0432fc857572a6933
import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, num_node, hidden_size1, hidden_size2, droput, alpha, beta, nu1, nu2): super().__init__() self.num_node = num_node self.hidden_size1 = hidden_size1 ...