English model architecture v2
Browse files- english/shobdo_english.py +35 -22
english/shobdo_english.py
CHANGED
|
@@ -3,37 +3,50 @@ import torch.nn as nn
|
|
| 3 |
import torch.nn.functional as F
|
| 4 |
|
| 5 |
class ConvBnRelu(nn.Module):
|
| 6 |
-
def __init__(self,i,o,k=3,s=1,p=1):
|
| 7 |
super().__init__()
|
| 8 |
-
self.b=nn.Sequential(
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
class LightCNN(nn.Module):
|
| 12 |
def __init__(self):
|
| 13 |
super().__init__()
|
| 14 |
-
self.b1=nn.Sequential(ConvBnRelu(1,32),ConvBnRelu(32,32),
|
| 15 |
-
|
| 16 |
-
self.
|
| 17 |
-
|
| 18 |
-
self.
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
return self.pool(x).squeeze(2)
|
| 23 |
|
| 24 |
class BiLSTM(nn.Module):
|
| 25 |
-
def __init__(self,i,h,o):
|
| 26 |
super().__init__()
|
| 27 |
-
self.rnn=nn.LSTM(i,h,bidirectional=True,batch_first=True)
|
| 28 |
-
self.fc=nn.Linear(h*2,o)
|
| 29 |
-
def forward(self,
|
|
|
|
|
|
|
| 30 |
|
| 31 |
class Model(nn.Module):
|
| 32 |
-
def __init__(self,input_channel,output_channel,hidden_size,num_class):
|
| 33 |
super().__init__()
|
| 34 |
-
self.cnn=LightCNN()
|
| 35 |
-
self.rnn=nn.Sequential(
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
import torch.nn.functional as F
|
| 4 |
|
| 5 |
class ConvBnRelu(nn.Module):
|
| 6 |
+
def __init__(self, i, o, k=3, s=1, p=1):
|
| 7 |
super().__init__()
|
| 8 |
+
self.b = nn.Sequential(
|
| 9 |
+
nn.Conv2d(i, o, k, s, p, bias=False),
|
| 10 |
+
nn.BatchNorm2d(o),
|
| 11 |
+
nn.ReLU(inplace=True)
|
| 12 |
+
)
|
| 13 |
+
def forward(self, x): return self.b(x)
|
| 14 |
|
| 15 |
class LightCNN(nn.Module):
|
| 16 |
def __init__(self):
|
| 17 |
super().__init__()
|
| 18 |
+
self.b1 = nn.Sequential(ConvBnRelu(1,32), ConvBnRelu(32,32),
|
| 19 |
+
nn.MaxPool2d(2,2))
|
| 20 |
+
self.b2 = nn.Sequential(ConvBnRelu(32,64), ConvBnRelu(64,64),
|
| 21 |
+
nn.MaxPool2d(2,2))
|
| 22 |
+
self.b3 = nn.Sequential(ConvBnRelu(64,128), ConvBnRelu(128,128),
|
| 23 |
+
nn.MaxPool2d((2,1)))
|
| 24 |
+
self.b4 = nn.Sequential(ConvBnRelu(128,256), ConvBnRelu(256,256),
|
| 25 |
+
nn.MaxPool2d((2,1)))
|
| 26 |
+
self.b5 = nn.Sequential(ConvBnRelu(256,256), ConvBnRelu(256,256))
|
| 27 |
+
self.pool = nn.AdaptiveAvgPool2d((1, None))
|
| 28 |
+
def forward(self, x):
|
| 29 |
+
for b in [self.b1,self.b2,self.b3,self.b4,self.b5]: x = b(x)
|
| 30 |
return self.pool(x).squeeze(2)
|
| 31 |
|
| 32 |
class BiLSTM(nn.Module):
|
| 33 |
+
def __init__(self, i, h, o):
|
| 34 |
super().__init__()
|
| 35 |
+
self.rnn = nn.LSTM(i, h, bidirectional=True, batch_first=True)
|
| 36 |
+
self.fc = nn.Linear(h*2, o)
|
| 37 |
+
def forward(self, x):
|
| 38 |
+
o, _ = self.rnn(x)
|
| 39 |
+
return self.fc(o)
|
| 40 |
|
| 41 |
class Model(nn.Module):
|
| 42 |
+
def __init__(self, input_channel, output_channel, hidden_size, num_class):
|
| 43 |
super().__init__()
|
| 44 |
+
self.cnn = LightCNN()
|
| 45 |
+
self.rnn = nn.Sequential(
|
| 46 |
+
BiLSTM(256, hidden_size, hidden_size),
|
| 47 |
+
BiLSTM(hidden_size, hidden_size, num_class)
|
| 48 |
+
)
|
| 49 |
+
def forward(self, x):
|
| 50 |
+
f = self.cnn(x).permute(0, 2, 1)
|
| 51 |
+
o = self.rnn(f).permute(1, 0, 2)
|
| 52 |
+
return F.log_softmax(o, dim=2)
|