Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

tokenization.py 10 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
  1. # coding=utf-8
  2. # Copyright 2018 The Google AI Language Team Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Tokenization classes."""
  16. from __future__ import absolute_import
  17. from __future__ import division
  18. from __future__ import print_function
  19. import collections
  20. import unicodedata
  21. import six
  22. import tensorflow as tf
  23. def convert_to_unicode(text):
  24. """Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
  25. if six.PY3:
  26. if isinstance(text, str):
  27. return text
  28. elif isinstance(text, bytes):
  29. return text.decode("utf-8", "ignore")
  30. else:
  31. raise ValueError("Unsupported string type: %s" % (type(text)))
  32. elif six.PY2:
  33. if isinstance(text, str):
  34. return text.decode("utf-8", "ignore")
  35. elif isinstance(text, unicode):
  36. return text
  37. else:
  38. raise ValueError("Unsupported string type: %s" % (type(text)))
  39. else:
  40. raise ValueError("Not running on Python2 or Python 3?")
  41. def printable_text(text):
  42. """Returns text encoded in a way suitable for print or `tf.logging`."""
  43. # These functions want `str` for both Python2 and Python3, but in one case
  44. # it's a Unicode string and in the other it's a byte string.
  45. if six.PY3:
  46. if isinstance(text, str):
  47. return text
  48. elif isinstance(text, bytes):
  49. return text.decode("utf-8", "ignore")
  50. else:
  51. raise ValueError("Unsupported string type: %s" % (type(text)))
  52. elif six.PY2:
  53. if isinstance(text, str):
  54. return text
  55. elif isinstance(text, unicode):
  56. return text.encode("utf-8")
  57. else:
  58. raise ValueError("Unsupported string type: %s" % (type(text)))
  59. else:
  60. raise ValueError("Not running on Python2 or Python 3?")
  61. def load_vocab(vocab_file):
  62. """Loads a vocabulary file into a dictionary."""
  63. vocab = collections.OrderedDict()
  64. index = 0
  65. with tf.gfile.GFile(vocab_file, "r") as reader:
  66. while True:
  67. token = convert_to_unicode(reader.readline())
  68. if not token:
  69. break
  70. token = token.strip()
  71. vocab[token] = index
  72. index += 1
  73. return vocab
  74. def convert_by_vocab(vocab, items):
  75. """Converts a sequence of [tokens|ids] using the vocab."""
  76. return [vocab[item] for item in items]
  77. def convert_tokens_to_ids(vocab, tokens):
  78. return convert_by_vocab(vocab, tokens)
  79. def convert_ids_to_tokens(inv_vocab, ids):
  80. return convert_by_vocab(inv_vocab, ids)
  81. def whitespace_tokenize(text):
  82. """Runs basic whitespace cleaning and splitting on a piece of text."""
  83. text = text.strip()
  84. if not text:
  85. return []
  86. tokens = text.split()
  87. return tokens
  88. class FullTokenizer(object):
  89. """Runs end-to-end tokenziation."""
  90. def __init__(self, vocab_file, do_lower_case=True):
  91. self.vocab = load_vocab(vocab_file)
  92. self.inv_vocab = {v: k for k, v in self.vocab.items()}
  93. self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
  94. self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
  95. def tokenize(self, text):
  96. split_tokens = []
  97. for token in self.basic_tokenizer.tokenize(text):
  98. for sub_token in self.wordpiece_tokenizer.tokenize(token):
  99. split_tokens.append(sub_token)
  100. return split_tokens
  101. def convert_tokens_to_ids(self, tokens):
  102. return convert_by_vocab(self.vocab, tokens)
  103. def convert_ids_to_tokens(self, ids):
  104. return convert_by_vocab(self.inv_vocab, ids)
  105. class BasicTokenizer(object):
  106. """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
  107. def __init__(self, do_lower_case=True):
  108. """Constructs a BasicTokenizer.
  109. Args:
  110. do_lower_case: Whether to lower case the input.
  111. """
  112. self.do_lower_case = do_lower_case
  113. def tokenize(self, text):
  114. """Tokenizes a piece of text."""
  115. text = convert_to_unicode(text)
  116. text = self._clean_text(text)
  117. # This was added on November 1st, 2018 for the multilingual and Chinese
  118. # models. This is also applied to the English models now, but it doesn't
  119. # matter since the English models were not trained on any Chinese data
  120. # and generally don't have any Chinese data in them (there are Chinese
  121. # characters in the vocabulary because Wikipedia does have some Chinese
  122. # words in the English Wikipedia.).
  123. text = self._tokenize_chinese_chars(text)
  124. orig_tokens = whitespace_tokenize(text)
  125. split_tokens = []
  126. for token in orig_tokens:
  127. if self.do_lower_case:
  128. token = token.lower()
  129. token = self._run_strip_accents(token)
  130. split_tokens.extend(self._run_split_on_punc(token))
  131. output_tokens = whitespace_tokenize(" ".join(split_tokens))
  132. return output_tokens
  133. def _run_strip_accents(self, text):
  134. """Strips accents from a piece of text."""
  135. text = unicodedata.normalize("NFD", text)
  136. output = []
  137. for char in text:
  138. cat = unicodedata.category(char)
  139. if cat == "Mn":
  140. continue
  141. output.append(char)
  142. return "".join(output)
  143. def _run_split_on_punc(self, text):
  144. """Splits punctuation on a piece of text."""
  145. chars = list(text)
  146. i = 0
  147. start_new_word = True
  148. output = []
  149. while i < len(chars):
  150. char = chars[i]
  151. if _is_punctuation(char):
  152. output.append([char])
  153. start_new_word = True
  154. else:
  155. if start_new_word:
  156. output.append([])
  157. start_new_word = False
  158. output[-1].append(char)
  159. i += 1
  160. return ["".join(x) for x in output]
  161. def _tokenize_chinese_chars(self, text):
  162. """Adds whitespace around any CJK character."""
  163. output = []
  164. for char in text:
  165. cp = ord(char)
  166. if self._is_chinese_char(cp):
  167. output.append(" ")
  168. output.append(char)
  169. output.append(" ")
  170. else:
  171. output.append(char)
  172. return "".join(output)
  173. def _is_chinese_char(self, cp):
  174. """Checks whether CP is the codepoint of a CJK character."""
  175. # This defines a "chinese character" as anything in the CJK Unicode block:
  176. # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
  177. #
  178. # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
  179. # despite its name. The modern Korean Hangul alphabet is a different block,
  180. # as is Japanese Hiragana and Katakana. Those alphabets are used to write
  181. # space-separated words, so they are not treated specially and handled
  182. # like the all of the other languages.
  183. if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
  184. (cp >= 0x3400 and cp <= 0x4DBF) or #
  185. (cp >= 0x20000 and cp <= 0x2A6DF) or #
  186. (cp >= 0x2A700 and cp <= 0x2B73F) or #
  187. (cp >= 0x2B740 and cp <= 0x2B81F) or #
  188. (cp >= 0x2B820 and cp <= 0x2CEAF) or
  189. (cp >= 0xF900 and cp <= 0xFAFF) or #
  190. (cp >= 0x2F800 and cp <= 0x2FA1F)): #
  191. return True
  192. return False
  193. def _clean_text(self, text):
  194. """Performs invalid character removal and whitespace cleanup on text."""
  195. output = []
  196. for char in text:
  197. cp = ord(char)
  198. if cp == 0 or cp == 0xfffd or _is_control(char):
  199. continue
  200. if _is_whitespace(char):
  201. output.append(" ")
  202. else:
  203. output.append(char)
  204. return "".join(output)
  205. class WordpieceTokenizer(object):
  206. """Runs WordPiece tokenziation."""
  207. def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
  208. self.vocab = vocab
  209. self.unk_token = unk_token
  210. self.max_input_chars_per_word = max_input_chars_per_word
  211. def tokenize(self, text):
  212. """Tokenizes a piece of text into its word pieces.
  213. This uses a greedy longest-match-first algorithm to perform tokenization
  214. using the given vocabulary.
  215. For example:
  216. input = "unaffable"
  217. output = ["un", "##aff", "##able"]
  218. Args:
  219. text: A single token or whitespace separated tokens. This should have
  220. already been passed through `BasicTokenizer.
  221. Returns:
  222. A list of wordpiece tokens.
  223. """
  224. text = convert_to_unicode(text)
  225. output_tokens = []
  226. for token in whitespace_tokenize(text):
  227. chars = list(token)
  228. if len(chars) > self.max_input_chars_per_word:
  229. output_tokens.append(self.unk_token)
  230. continue
  231. is_bad = False
  232. start = 0
  233. sub_tokens = []
  234. while start < len(chars):
  235. end = len(chars)
  236. cur_substr = None
  237. while start < end:
  238. substr = "".join(chars[start:end])
  239. if start > 0:
  240. substr = "##" + substr
  241. if substr in self.vocab:
  242. cur_substr = substr
  243. break
  244. end -= 1
  245. if cur_substr is None:
  246. is_bad = True
  247. break
  248. sub_tokens.append(cur_substr)
  249. start = end
  250. if is_bad:
  251. output_tokens.append(self.unk_token)
  252. else:
  253. output_tokens.extend(sub_tokens)
  254. return output_tokens
  255. def _is_whitespace(char):
  256. """Checks whether `chars` is a whitespace character."""
  257. # \t, \n, and \r are technically contorl characters but we treat them
  258. # as whitespace since they are generally considered as such.
  259. if char == " " or char == "\t" or char == "\n" or char == "\r":
  260. return True
  261. cat = unicodedata.category(char)
  262. if cat == "Zs":
  263. return True
  264. return False
  265. def _is_control(char):
  266. """Checks whether `chars` is a control character."""
  267. # These are technically control characters but we count them as whitespace
  268. # characters.
  269. if char == "\t" or char == "\n" or char == "\r":
  270. return False
  271. cat = unicodedata.category(char)
  272. if cat.startswith("C"):
  273. return True
  274. return False
  275. def _is_punctuation(char):
  276. """Checks whether `chars` is a punctuation character."""
  277. cp = ord(char)
  278. # We treat all non-letter/number ASCII as punctuation.
  279. # Characters such as "^", "$", and "`" are not in the Unicode
  280. # Punctuation class but we treat them as punctuation anyways, for
  281. # consistency.
  282. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
  283. (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
  284. return True
  285. cat = unicodedata.category(char)
  286. if cat.startswith("P"):
  287. return True
  288. return False
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...