Is it an error, or was it implemented like this for simplification purposes?
Paper:
![imagen](https://user-images.githubusercontent.com/30293331/120087979-bebffc80-c0ec-11eb-90aa-20b9991f5949.png)
Code:
|
class Decoder(nn.Module): |
|
def __init__(self,input_size,hidden_dim,output_size,NUM_LAYERS): |
|
super(Decoder, self).__init__() |
|
self.input_size=input_size |
|
self.hidden_dim = hidden_dim |
|
self.output_size=output_size |
|
|
|
self.lstm = nn.LSTM(input_size, hidden_dim, num_layers = NUM_LAYERS) |
|
self.hidden2label = nn.Linear(hidden_dim, output_size) |
|
self.init_weight() |
|
|
|
def forward(self, inputs): |
|
self.lstm.flatten_parameters() |
|
lstm_out, self.hidden = self.lstm(inputs,None) |
|
y = self.hidden2label(lstm_out) |
|
return y |