Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

interactive.py 6.5 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
  1. #!/usr/bin/env python3 -u
  2. # Copyright (c) 2017-present, Facebook, Inc.
  3. # All rights reserved.
  4. #
  5. # This source code is licensed under the license found in the LICENSE file in
  6. # the root directory of this source tree. An additional grant of patent rights
  7. # can be found in the PATENTS file in the same directory.
  8. """
  9. Translate raw text with a trained model. Batches data on-the-fly.
  10. """
  11. from collections import namedtuple
  12. import fileinput
  13. import sys
  14. import numpy as np
  15. import torch
  16. from fairseq import data, options, tasks, tokenizer, utils
  17. from fairseq.sequence_generator import SequenceGenerator
  18. from fairseq.utils import import_user_module
  19. Batch = namedtuple('Batch', 'srcs tokens lengths')
  20. Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments')
  21. def buffered_read(input, buffer_size):
  22. buffer = []
  23. for src_str in fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")):
  24. buffer.append(src_str.strip())
  25. if len(buffer) >= buffer_size:
  26. yield buffer
  27. buffer = []
  28. if len(buffer) > 0:
  29. yield buffer
  30. def make_batches(lines, args, task, max_positions):
  31. tokens = [
  32. tokenizer.Tokenizer.tokenize(src_str, task.source_dictionary, add_if_not_exist=False).long()
  33. for src_str in lines
  34. ]
  35. lengths = np.array([t.numel() for t in tokens])
  36. itr = task.get_batch_iterator(
  37. dataset=data.LanguagePairDataset(tokens, lengths, task.source_dictionary),
  38. max_tokens=args.max_tokens,
  39. max_sentences=args.max_sentences,
  40. max_positions=max_positions,
  41. ).next_epoch_itr(shuffle=False)
  42. for batch in itr:
  43. yield Batch(
  44. srcs=[lines[i] for i in batch['id']],
  45. tokens=batch['net_input']['src_tokens'],
  46. lengths=batch['net_input']['src_lengths'],
  47. ), batch['id']
  48. def main(args):
  49. import_user_module(args)
  50. if args.buffer_size < 1:
  51. args.buffer_size = 1
  52. if args.max_tokens is None and args.max_sentences is None:
  53. args.max_sentences = 1
  54. assert not args.sampling or args.nbest == args.beam, \
  55. '--sampling requires --nbest to be equal to --beam'
  56. assert not args.max_sentences or args.max_sentences <= args.buffer_size, \
  57. '--max-sentences/--batch-size cannot be larger than --buffer-size'
  58. print(args)
  59. use_cuda = torch.cuda.is_available() and not args.cpu
  60. # Setup task, e.g., translation
  61. task = tasks.setup_task(args)
  62. # Load ensemble
  63. print('| loading model(s) from {}'.format(args.path))
  64. models, _model_args = utils.load_ensemble_for_inference(
  65. args.path.split(':'), task, model_arg_overrides=eval(args.model_overrides),
  66. )
  67. # Set dictionaries
  68. tgt_dict = task.target_dictionary
  69. # Optimize ensemble for generation
  70. for model in models:
  71. model.make_generation_fast_(
  72. beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
  73. need_attn=args.print_alignment,
  74. )
  75. if args.fp16:
  76. model.half()
  77. # Initialize generator
  78. translator = SequenceGenerator(
  79. models, tgt_dict, beam_size=args.beam, minlen=args.min_len,
  80. stop_early=(not args.no_early_stop), normalize_scores=(not args.unnormalized),
  81. len_penalty=args.lenpen, unk_penalty=args.unkpen,
  82. sampling=args.sampling, sampling_topk=args.sampling_topk, sampling_temperature=args.sampling_temperature,
  83. diverse_beam_groups=args.diverse_beam_groups, diverse_beam_strength=args.diverse_beam_strength,
  84. match_source_len=args.match_source_len, no_repeat_ngram_size=args.no_repeat_ngram_size,
  85. )
  86. if use_cuda:
  87. translator.cuda()
  88. # Load alignment dictionary for unknown word replacement
  89. # (None if no unknown word replacement, empty if no path to align dictionary)
  90. align_dict = utils.load_align_dict(args.replace_unk)
  91. def make_result(src_str, hypos):
  92. result = Translation(
  93. src_str='O\t{}'.format(src_str),
  94. hypos=[],
  95. pos_scores=[],
  96. alignments=[],
  97. )
  98. # Process top predictions
  99. for hypo in hypos[:min(len(hypos), args.nbest)]:
  100. hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
  101. hypo_tokens=hypo['tokens'].int().cpu(),
  102. src_str=src_str,
  103. alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,
  104. align_dict=align_dict,
  105. tgt_dict=tgt_dict,
  106. remove_bpe=args.remove_bpe,
  107. )
  108. result.hypos.append('H\t{}\t{}'.format(hypo['score'], hypo_str))
  109. result.pos_scores.append('P\t{}'.format(
  110. ' '.join(map(
  111. lambda x: '{:.4f}'.format(x),
  112. hypo['positional_scores'].tolist(),
  113. ))
  114. ))
  115. result.alignments.append(
  116. 'A\t{}'.format(' '.join(map(lambda x: str(utils.item(x)), alignment)))
  117. if args.print_alignment else None
  118. )
  119. return result
  120. def process_batch(batch):
  121. tokens = batch.tokens
  122. lengths = batch.lengths
  123. if use_cuda:
  124. tokens = tokens.cuda()
  125. lengths = lengths.cuda()
  126. encoder_input = {'src_tokens': tokens, 'src_lengths': lengths}
  127. translations = translator.generate(
  128. encoder_input,
  129. maxlen=int(args.max_len_a * tokens.size(1) + args.max_len_b),
  130. )
  131. return [make_result(batch.srcs[i], t) for i, t in enumerate(translations)]
  132. max_positions = utils.resolve_max_positions(
  133. task.max_positions(),
  134. *[model.max_positions() for model in models]
  135. )
  136. if args.buffer_size > 1:
  137. print('| Sentence buffer size:', args.buffer_size)
  138. print('| Type the input sentence and press return:')
  139. for inputs in buffered_read(args.input, args.buffer_size):
  140. indices = []
  141. results = []
  142. for batch, batch_indices in make_batches(inputs, args, task, max_positions):
  143. indices.extend(batch_indices)
  144. results.extend(process_batch(batch))
  145. for i in np.argsort(indices):
  146. result = results[i]
  147. print(result.src_str)
  148. for hypo, pos_scores, align in zip(result.hypos, result.pos_scores, result.alignments):
  149. print(hypo)
  150. print(pos_scores)
  151. if align is not None:
  152. print(align)
  153. def cli_main():
  154. parser = options.get_generation_parser(interactive=True)
  155. args = options.parse_args_and_arch(parser)
  156. main(args)
  157. if __name__ == '__main__':
  158. cli_main()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...