Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

tokenization_test.py 4.5 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
  1. # coding=utf-8
  2. # Copyright 2018 The Google AI Language Team Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. from __future__ import absolute_import
  16. from __future__ import division
  17. from __future__ import print_function
  18. import os
  19. import tempfile
  20. import tokenization
  21. import six
  22. import tensorflow as tf
  23. class TokenizationTest(tf.test.TestCase):
  24. def test_full_tokenizer(self):
  25. vocab_tokens = [
  26. "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
  27. "##ing", ","
  28. ]
  29. with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
  30. if six.PY2:
  31. vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
  32. else:
  33. vocab_writer.write("".join(
  34. [x + "\n" for x in vocab_tokens]).encode("utf-8"))
  35. vocab_file = vocab_writer.name
  36. tokenizer = tokenization.FullTokenizer(vocab_file)
  37. os.unlink(vocab_file)
  38. tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
  39. self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
  40. self.assertAllEqual(
  41. tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
  42. def test_chinese(self):
  43. tokenizer = tokenization.BasicTokenizer()
  44. self.assertAllEqual(
  45. tokenizer.tokenize(u"ah\u535A\u63A8zz"),
  46. [u"ah", u"\u535A", u"\u63A8", u"zz"])
  47. def test_basic_tokenizer_lower(self):
  48. tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
  49. self.assertAllEqual(
  50. tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
  51. ["hello", "!", "how", "are", "you", "?"])
  52. self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
  53. def test_basic_tokenizer_no_lower(self):
  54. tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
  55. self.assertAllEqual(
  56. tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
  57. ["HeLLo", "!", "how", "Are", "yoU", "?"])
  58. def test_wordpiece_tokenizer(self):
  59. vocab_tokens = [
  60. "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
  61. "##ing"
  62. ]
  63. vocab = {}
  64. for (i, token) in enumerate(vocab_tokens):
  65. vocab[token] = i
  66. tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
  67. self.assertAllEqual(tokenizer.tokenize(""), [])
  68. self.assertAllEqual(
  69. tokenizer.tokenize("unwanted running"),
  70. ["un", "##want", "##ed", "runn", "##ing"])
  71. self.assertAllEqual(
  72. tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
  73. def test_convert_tokens_to_ids(self):
  74. vocab_tokens = [
  75. "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
  76. "##ing"
  77. ]
  78. vocab = {}
  79. for (i, token) in enumerate(vocab_tokens):
  80. vocab[token] = i
  81. self.assertAllEqual(
  82. tokenization.convert_tokens_to_ids(
  83. vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
  84. def test_is_whitespace(self):
  85. self.assertTrue(tokenization._is_whitespace(u" "))
  86. self.assertTrue(tokenization._is_whitespace(u"\t"))
  87. self.assertTrue(tokenization._is_whitespace(u"\r"))
  88. self.assertTrue(tokenization._is_whitespace(u"\n"))
  89. self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
  90. self.assertFalse(tokenization._is_whitespace(u"A"))
  91. self.assertFalse(tokenization._is_whitespace(u"-"))
  92. def test_is_control(self):
  93. self.assertTrue(tokenization._is_control(u"\u0005"))
  94. self.assertFalse(tokenization._is_control(u"A"))
  95. self.assertFalse(tokenization._is_control(u" "))
  96. self.assertFalse(tokenization._is_control(u"\t"))
  97. self.assertFalse(tokenization._is_control(u"\r"))
  98. self.assertFalse(tokenization._is_control(u"\U0001F4A9"))
  99. def test_is_punctuation(self):
  100. self.assertTrue(tokenization._is_punctuation(u"-"))
  101. self.assertTrue(tokenization._is_punctuation(u"$"))
  102. self.assertTrue(tokenization._is_punctuation(u"`"))
  103. self.assertTrue(tokenization._is_punctuation(u"."))
  104. self.assertFalse(tokenization._is_punctuation(u"A"))
  105. self.assertFalse(tokenization._is_punctuation(u" "))
  106. if __name__ == "__main__":
  107. tf.test.main()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...