Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

generate.py 6.6 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
  1. #!/usr/bin/env python3
  2. # Copyright (c) 2017-present, Facebook, Inc.
  3. # All rights reserved.
  4. #
  5. # This source code is licensed under the license found in the LICENSE file in
  6. # the root directory of this source tree. An additional grant of patent rights
  7. # can be found in the PATENTS file in the same directory.
  8. #
  9. import torch
  10. from fairseq import bleu, data, options, tokenizer, utils
  11. from fairseq.meters import StopwatchMeter, TimeMeter
  12. from fairseq.sequence_generator import SequenceGenerator
  13. def main():
  14. parser = options.get_parser('Generation')
  15. parser.add_argument('--path', metavar='FILE', required=True, action='append',
  16. help='path(s) to model file(s)')
  17. dataset_args = options.add_dataset_args(parser)
  18. dataset_args.add_argument('--batch-size', default=32, type=int, metavar='N',
  19. help='batch size')
  20. dataset_args.add_argument('--gen-subset', default='test', metavar='SPLIT',
  21. help='data subset to generate (train, valid, test)')
  22. dataset_args.add_argument('--num-shards', default=1, type=int, metavar='N',
  23. help='shard generation over N shards')
  24. dataset_args.add_argument('--shard-id', default=0, type=int, metavar='ID',
  25. help='id of the shard to generate (id < num_shards)')
  26. options.add_generation_args(parser)
  27. args = parser.parse_args()
  28. if args.no_progress_bar and args.log_format is None:
  29. args.log_format = 'none'
  30. print(args)
  31. use_cuda = torch.cuda.is_available() and not args.cpu
  32. if hasattr(torch, 'set_grad_enabled'):
  33. torch.set_grad_enabled(False)
  34. # Load dataset
  35. if args.replace_unk is None:
  36. dataset = data.load_dataset(args.data, [args.gen_subset], args.source_lang, args.target_lang)
  37. else:
  38. dataset = data.load_raw_text_dataset(args.data, [args.gen_subset], args.source_lang, args.target_lang)
  39. if args.source_lang is None or args.target_lang is None:
  40. # record inferred languages in args
  41. args.source_lang, args.target_lang = dataset.src, dataset.dst
  42. # Load ensemble
  43. print('| loading model(s) from {}'.format(', '.join(args.path)))
  44. models, _ = utils.load_ensemble_for_inference(args.path, dataset.src_dict, dataset.dst_dict)
  45. print('| [{}] dictionary: {} types'.format(dataset.src, len(dataset.src_dict)))
  46. print('| [{}] dictionary: {} types'.format(dataset.dst, len(dataset.dst_dict)))
  47. print('| {} {} {} examples'.format(args.data, args.gen_subset, len(dataset.splits[args.gen_subset])))
  48. # Optimize ensemble for generation
  49. for model in models:
  50. model.make_generation_fast_(
  51. beamable_mm_beam_size=None if args.no_beamable_mm else args.beam)
  52. # Initialize generator
  53. translator = SequenceGenerator(
  54. models, beam_size=args.beam, stop_early=(not args.no_early_stop),
  55. normalize_scores=(not args.unnormalized), len_penalty=args.lenpen,
  56. unk_penalty=args.unkpen)
  57. if use_cuda:
  58. translator.cuda()
  59. # Load alignment dictionary for unknown word replacement
  60. # (None if no unknown word replacement, empty if no path to align dictionary)
  61. align_dict = utils.load_align_dict(args.replace_unk)
  62. # Generate and compute BLEU score
  63. scorer = bleu.Scorer(dataset.dst_dict.pad(), dataset.dst_dict.eos(), dataset.dst_dict.unk())
  64. max_positions = min(model.max_encoder_positions() for model in models)
  65. itr = dataset.eval_dataloader(
  66. args.gen_subset, max_sentences=args.batch_size, max_positions=max_positions,
  67. skip_invalid_size_inputs_valid_test=args.skip_invalid_size_inputs_valid_test)
  68. if args.num_shards > 1:
  69. if args.shard_id < 0 or args.shard_id >= args.num_shards:
  70. raise ValueError('--shard-id must be between 0 and num_shards')
  71. itr = data.sharded_iterator(itr, args.num_shards, args.shard_id)
  72. num_sentences = 0
  73. with utils.build_progress_bar(args, itr) as t:
  74. wps_meter = TimeMeter()
  75. gen_timer = StopwatchMeter()
  76. translations = translator.generate_batched_itr(
  77. t, maxlen_a=args.max_len_a, maxlen_b=args.max_len_b,
  78. cuda_device=0 if use_cuda else None, timer=gen_timer)
  79. for sample_id, src_tokens, target_tokens, hypos in translations:
  80. # Process input and ground truth
  81. target_tokens = target_tokens.int().cpu()
  82. # Either retrieve the original sentences or regenerate them from tokens.
  83. if align_dict is not None:
  84. src_str = dataset.splits[args.gen_subset].src.get_original_text(sample_id)
  85. target_str = dataset.splits[args.gen_subset].dst.get_original_text(sample_id)
  86. else:
  87. src_str = dataset.src_dict.string(src_tokens, args.remove_bpe)
  88. target_str = dataset.dst_dict.string(target_tokens, args.remove_bpe, escape_unk=True)
  89. if not args.quiet:
  90. print('S-{}\t{}'.format(sample_id, src_str))
  91. print('T-{}\t{}'.format(sample_id, target_str))
  92. # Process top predictions
  93. for i, hypo in enumerate(hypos[:min(len(hypos), args.nbest)]):
  94. hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
  95. hypo_tokens=hypo['tokens'].int().cpu(),
  96. src_str=src_str,
  97. alignment=hypo['alignment'].int().cpu(),
  98. align_dict=align_dict,
  99. dst_dict=dataset.dst_dict,
  100. remove_bpe=args.remove_bpe)
  101. if not args.quiet:
  102. print('H-{}\t{}\t{}'.format(sample_id, hypo['score'], hypo_str))
  103. print('A-{}\t{}'.format(sample_id, ' '.join(map(str, alignment))))
  104. # Score only the top hypothesis
  105. if i == 0:
  106. if align_dict is not None or args.remove_bpe is not None:
  107. # Convert back to tokens for evaluation with unk replacement and/or without BPE
  108. target_tokens = tokenizer.Tokenizer.tokenize(target_str,
  109. dataset.dst_dict,
  110. add_if_not_exist=True)
  111. scorer.add(target_tokens, hypo_tokens)
  112. wps_meter.update(src_tokens.size(0))
  113. t.log({'wps': round(wps_meter.avg)})
  114. num_sentences += 1
  115. print('| Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} tokens/s)'.format(
  116. num_sentences, gen_timer.n, gen_timer.sum, 1. / gen_timer.avg))
  117. print('| Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, scorer.result_string()))
  118. if __name__ == '__main__':
  119. main()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...