Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

monolingual_dataset.py 6.9 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
  1. # Copyright (c) 2017-present, Facebook, Inc.
  2. # All rights reserved.
  3. #
  4. # This source code is licensed under the license found in the LICENSE file in
  5. # the root directory of this source tree. An additional grant of patent rights
  6. # can be found in the PATENTS file in the same directory.
  7. import numpy as np
  8. import torch
  9. from . import data_utils, FairseqDataset
  10. def collate(samples, pad_idx, eos_idx):
  11. if len(samples) == 0:
  12. return {}
  13. def merge(key, is_list=False):
  14. if is_list:
  15. res = []
  16. for i in range(len(samples[0][key])):
  17. res.append(data_utils.collate_tokens(
  18. [s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False,
  19. ))
  20. return res
  21. else:
  22. return data_utils.collate_tokens(
  23. [s[key] for s in samples], pad_idx, eos_idx, left_pad=False,
  24. )
  25. is_target_list = isinstance(samples[0]['target'], list)
  26. return {
  27. 'id': torch.LongTensor([s['id'] for s in samples]),
  28. 'nsentences': len(samples),
  29. 'ntokens': sum(len(s['source']) for s in samples),
  30. 'net_input': {
  31. 'src_tokens': merge('source'),
  32. 'src_lengths': torch.LongTensor([
  33. s['source'].numel() for s in samples
  34. ]),
  35. },
  36. 'target': merge('target', is_target_list),
  37. }
  38. class MonolingualDataset(FairseqDataset):
  39. """
  40. A wrapper around torch.utils.data.Dataset for monolingual data.
  41. Args:
  42. dataset (torch.utils.data.Dataset): dataset to wrap
  43. sizes (List[int]): sentence lengths
  44. vocab (~fairseq.data.Dictionary): vocabulary
  45. shuffle (bool, optional): shuffle the elements before batching
  46. (default: True).
  47. """
  48. def __init__(self, dataset, sizes, src_vocab, tgt_vocab, add_eos_for_other_targets, shuffle,
  49. targets=None):
  50. self.dataset = dataset
  51. self.sizes = np.array(sizes)
  52. self.vocab = src_vocab
  53. self.tgt_vocab = tgt_vocab
  54. self.add_eos_for_other_targets = add_eos_for_other_targets
  55. self.shuffle = shuffle
  56. assert targets is None or all(t in {'self', 'future', 'past'} for t in targets), \
  57. "targets must be none or one of 'self', 'future', 'past'"
  58. if targets is not None and len(targets) == 0:
  59. targets = None
  60. self.targets = targets
  61. def __getitem__(self, index):
  62. source, future_target, past_target = self.dataset[index]
  63. source, target = self._make_source_target(source, future_target, past_target)
  64. return {'id': index, 'source': source, 'target': target}
  65. def __len__(self):
  66. return len(self.dataset)
  67. def _make_source_target(self, source, future_target, past_target):
  68. if self.targets is not None:
  69. target = []
  70. if self.add_eos_for_other_targets and (('self' in self.targets) or ('past' in self.targets)) \
  71. and source[-1] != self.vocab.eos():
  72. # append eos at the end of source
  73. source = torch.cat([source, source.new([self.vocab.eos()])])
  74. if 'future' in self.targets:
  75. future_target = torch.cat([future_target, future_target.new([self.vocab.pad()])])
  76. if 'past' in self.targets:
  77. # first token is before the start of sentence which is only used in "none" break mode when
  78. # add_eos_for_other_targets is False
  79. past_target = torch.cat([past_target.new([self.vocab.pad()]), past_target[1:], source[-2, None]])
  80. for t in self.targets:
  81. if t == 'self':
  82. target.append(source)
  83. elif t == 'future':
  84. target.append(future_target)
  85. elif t == 'past':
  86. target.append(past_target)
  87. else:
  88. raise Exception('invalid target ' + t)
  89. if len(target) == 1:
  90. target = target[0]
  91. else:
  92. target = future_target
  93. return source, self._filter_vocab(target)
  94. def _filter_vocab(self, target):
  95. if len(self.tgt_vocab) != len(self.vocab):
  96. def _filter(target):
  97. mask = target.ge(len(self.tgt_vocab))
  98. if mask.any():
  99. target[mask] = self.tgt_vocab.unk()
  100. return target
  101. if isinstance(target, list):
  102. return [_filter(t) for t in target]
  103. return _filter(target)
  104. return target
  105. def collater(self, samples):
  106. """Merge a list of samples to form a mini-batch.
  107. Args:
  108. samples (List[dict]): samples to collate
  109. Returns:
  110. dict: a mini-batch with the following keys:
  111. - `id` (LongTensor): example IDs in the original input order
  112. - `ntokens` (int): total number of tokens in the batch
  113. - `net_input` (dict): the input to the Model, containing keys:
  114. - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
  115. the source sentence of shape `(bsz, src_len)`. Padding will
  116. appear on the right.
  117. - `target` (LongTensor): a padded 2D Tensor of tokens in the
  118. target sentence of shape `(bsz, tgt_len)`. Padding will appear
  119. on the right.
  120. """
  121. return collate(samples, self.vocab.pad(), self.vocab.eos())
  122. def get_dummy_batch(self, num_tokens, max_positions, tgt_len=128):
  123. """Return a dummy batch with a given number of tokens."""
  124. if isinstance(max_positions, float) or isinstance(max_positions, int):
  125. tgt_len = min(tgt_len, max_positions)
  126. bsz = max(num_tokens // tgt_len, 1)
  127. target = self.vocab.dummy_sentence(tgt_len + 2)
  128. source, past_target, future_target = target[1:-1], target[2:], target[:-2]
  129. source, target = self._make_source_target(source, past_target, future_target)
  130. return self.collater([
  131. {'id': i, 'source': source, 'target': target}
  132. for i in range(bsz)
  133. ])
  134. def num_tokens(self, index):
  135. """Return the number of tokens in a sample. This value is used to
  136. enforce ``--max-tokens`` during batching."""
  137. return self.sizes[index]
  138. def size(self, index):
  139. """Return an example's size as a float or tuple. This value is used when
  140. filtering a dataset with ``--max-positions``."""
  141. return self.sizes[index]
  142. def ordered_indices(self):
  143. """Return an ordered list of indices. Batches will be constructed based
  144. on this order."""
  145. if self.shuffle:
  146. order = [np.random.permutation(len(self))]
  147. else:
  148. order = [np.arange(len(self))]
  149. order.append(self.sizes)
  150. return np.lexsort(order)
  151. @property
  152. def supports_prefetch(self):
  153. return getattr(self.dataset, 'supports_prefetch', False)
  154. def prefetch(self, indices):
  155. self.dataset.prefetch(indices)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...