Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

lstm.py 2.4 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
  1. import yaml
  2. import torch
  3. from torch import nn
  4. from torch.nn import init
  5. from model.att import Attention
  6. from model.base import ModelBase
  7. with open('params.yaml', 'r') as f:
  8. PARAMS = yaml.safe_load(f)
  9. if torch.cuda.is_available():
  10. DEVICE = torch.device('cuda', PARAMS.get('gpu', 0))
  11. else:
  12. DEVICE = torch.device('cpu')
  13. class Model(ModelBase):
  14. def __init__(self, vocab_size, embed_dim, hidden_size, n_layers, dropout, num_classes, attention_method,
  15. padding_idx, *args, **kwargs):
  16. super(Model, self).__init__()
  17. self.hidden_size = hidden_size
  18. self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx)
  19. self.lstm = nn.LSTM(embed_dim, hidden_size, n_layers, dropout=dropout, bidirectional=True)
  20. self.attn = Attention(2 * hidden_size, attention_method)
  21. self.fc = nn.Linear(2 * hidden_size, 1)
  22. self.init_weights()
  23. def init_weights(self):
  24. nn.init.xavier_normal_(self.embedding.weight)
  25. for param in self.lstm.parameters():
  26. if len(param.shape) >= 2:
  27. init.orthogonal_(param.data)
  28. else:
  29. nn.init.zeros_(param.data)
  30. nn.init.kaiming_normal_(self.fc.weight, mode='fan_out', nonlinearity='sigmoid')
  31. nn.init.constant_(self.fc.bias, 0)
  32. def forward(self, text, text_lengths, hidden=None):
  33. # text = [L x B]
  34. sorted_lengths, sorted_idx = text_lengths.sort(descending=True)
  35. _, unsorted_idx = sorted_idx.sort()
  36. sorted_text = torch.index_select(text, -1, sorted_idx)
  37. emb = self.embedding(sorted_text)
  38. packed = nn.utils.rnn.pack_padded_sequence(emb, sorted_lengths.to(torch.device('cpu'), copy=True))
  39. outputs, hidden = self.lstm(packed, hidden)
  40. hidden_state, cell_state = hidden
  41. hidden_state = hidden_state[-2:, :, :].view(1, -1, 2 * self.hidden_size).squeeze(0)
  42. outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs)
  43. attn_weights = self.attn(hidden_state, outputs)
  44. # attn_weights = [batch_size x 1 x lengths]
  45. context = torch.bmm(attn_weights, outputs.transpose(0, 1)).squeeze(1)
  46. pred = self.fc(context).sigmoid()
  47. pred = torch.index_select(pred, 0, unsorted_idx)
  48. return pred
  49. def load_model(self, model_path):
  50. self.load_state_dict(torch.load(model_path))
  51. self.eval()
  52. def save_model(self, model_path):
  53. torch.save(self.state_dict(), model_path)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...