Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

tokenizer.py 4.7 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
  1. # Copyright (c) 2017-present, Facebook, Inc.
  2. # All rights reserved.
  3. #
  4. # This source code is licensed under the license found in the LICENSE file in
  5. # the root directory of this source tree. An additional grant of patent rights
  6. # can be found in the PATENTS file in the same directory.
  7. from collections import Counter
  8. from multiprocessing import Pool
  9. import os
  10. import re
  11. import torch
  12. SPACE_NORMALIZER = re.compile(r"\s+")
  13. def tokenize_line(line):
  14. line = SPACE_NORMALIZER.sub(" ", line)
  15. line = line.strip()
  16. return line.split()
  17. def safe_readline(f):
  18. pos = f.tell()
  19. while True:
  20. try:
  21. return f.readline()
  22. except UnicodeDecodeError:
  23. pos -= 1
  24. f.seek(pos) # search where this character begins
  25. class Tokenizer:
  26. @staticmethod
  27. def add_file_to_dictionary_single_worker(filename, tokenize, eos_word, worker_id=0, num_workers=1):
  28. counter = Counter()
  29. with open(filename, 'r', encoding='utf-8') as f:
  30. size = os.fstat(f.fileno()).st_size
  31. chunk_size = size // num_workers
  32. offset = worker_id * chunk_size
  33. end = offset + chunk_size
  34. f.seek(offset)
  35. if offset > 0:
  36. safe_readline(f) # drop first incomplete line
  37. line = f.readline()
  38. while line:
  39. for word in tokenize(line):
  40. counter.update([word])
  41. counter.update([eos_word])
  42. if f.tell() > end:
  43. break
  44. line = f.readline()
  45. return counter
  46. @staticmethod
  47. def add_file_to_dictionary(filename, dict, tokenize, num_workers):
  48. def merge_result(counter):
  49. for w, c in counter.items():
  50. dict.add_symbol(w, c)
  51. if num_workers > 1:
  52. pool = Pool(processes=num_workers)
  53. results = []
  54. for worker_id in range(num_workers):
  55. results.append(pool.apply_async(
  56. Tokenizer.add_file_to_dictionary_single_worker,
  57. (filename, tokenize, dict.eos_word, worker_id, num_workers)
  58. ))
  59. pool.close()
  60. pool.join()
  61. for r in results:
  62. merge_result(r.get())
  63. else:
  64. merge_result(Tokenizer.add_file_to_dictionary_single_worker(filename, tokenize, dict.eos_word))
  65. @staticmethod
  66. def binarize(
  67. filename, dict, consumer, tokenize=tokenize_line, append_eos=True,
  68. reverse_order=False, offset=0, end=-1,
  69. ):
  70. nseq, ntok = 0, 0
  71. replaced = Counter()
  72. def replaced_consumer(word, idx):
  73. if idx == dict.unk_index and word != dict.unk_word:
  74. replaced.update([word])
  75. with open(filename, 'r', encoding='utf-8') as f:
  76. f.seek(offset)
  77. # next(f) breaks f.tell(), hence readline() must be used
  78. line = safe_readline(f)
  79. while line:
  80. if end > 0 and f.tell() > end:
  81. break
  82. ids = Tokenizer.tokenize(
  83. line=line,
  84. dict=dict,
  85. tokenize=tokenize,
  86. add_if_not_exist=False,
  87. consumer=replaced_consumer,
  88. append_eos=append_eos,
  89. reverse_order=reverse_order,
  90. )
  91. nseq += 1
  92. ntok += len(ids)
  93. consumer(ids)
  94. line = f.readline()
  95. return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': replaced}
  96. @staticmethod
  97. def find_offsets(filename, num_chunks):
  98. with open(filename, 'r', encoding='utf-8') as f:
  99. size = os.fstat(f.fileno()).st_size
  100. chunk_size = size // num_chunks
  101. offsets = [0 for _ in range(num_chunks + 1)]
  102. for i in range(1, num_chunks):
  103. f.seek(chunk_size * i)
  104. safe_readline(f)
  105. offsets[i] = f.tell()
  106. return offsets
  107. @staticmethod
  108. def tokenize(line, dict, tokenize=tokenize_line, add_if_not_exist=True,
  109. consumer=None, append_eos=True, reverse_order=False):
  110. words = tokenize(line)
  111. if reverse_order:
  112. words = list(reversed(words))
  113. nwords = len(words)
  114. ids = torch.IntTensor(nwords + 1 if append_eos else nwords)
  115. for i, word in enumerate(words):
  116. if add_if_not_exist:
  117. idx = dict.add_symbol(word)
  118. else:
  119. idx = dict.index(word)
  120. if consumer is not None:
  121. consumer(word, idx)
  122. ids[i] = idx
  123. if append_eos:
  124. ids[nwords] = dict.eos_index
  125. return ids
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...