Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

modeling.py 37 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
  1. # coding=utf-8
  2. # Copyright 2018 The Google AI Language Team Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """The main BERT model and related functions."""
  16. from __future__ import absolute_import
  17. from __future__ import division
  18. from __future__ import print_function
  19. import collections
  20. import copy
  21. import json
  22. import math
  23. import re
  24. import numpy as np
  25. import six
  26. import tensorflow as tf
  27. class BertConfig(object):
  28. """Configuration for `BertModel`."""
  29. def __init__(self,
  30. vocab_size,
  31. hidden_size=768,
  32. num_hidden_layers=12,
  33. num_attention_heads=12,
  34. intermediate_size=3072,
  35. hidden_act="gelu",
  36. hidden_dropout_prob=0.1,
  37. attention_probs_dropout_prob=0.1,
  38. max_position_embeddings=512,
  39. type_vocab_size=16,
  40. initializer_range=0.02):
  41. """Constructs BertConfig.
  42. Args:
  43. vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
  44. hidden_size: Size of the encoder layers and the pooler layer.
  45. num_hidden_layers: Number of hidden layers in the Transformer encoder.
  46. num_attention_heads: Number of attention heads for each attention layer in
  47. the Transformer encoder.
  48. intermediate_size: The size of the "intermediate" (i.e., feed-forward)
  49. layer in the Transformer encoder.
  50. hidden_act: The non-linear activation function (function or string) in the
  51. encoder and pooler.
  52. hidden_dropout_prob: The dropout probability for all fully connected
  53. layers in the embeddings, encoder, and pooler.
  54. attention_probs_dropout_prob: The dropout ratio for the attention
  55. probabilities.
  56. max_position_embeddings: The maximum sequence length that this model might
  57. ever be used with. Typically set this to something large just in case
  58. (e.g., 512 or 1024 or 2048).
  59. type_vocab_size: The vocabulary size of the `token_type_ids` passed into
  60. `BertModel`.
  61. initializer_range: The stdev of the truncated_normal_initializer for
  62. initializing all weight matrices.
  63. """
  64. self.vocab_size = vocab_size
  65. self.hidden_size = hidden_size
  66. self.num_hidden_layers = num_hidden_layers
  67. self.num_attention_heads = num_attention_heads
  68. self.hidden_act = hidden_act
  69. self.intermediate_size = intermediate_size
  70. self.hidden_dropout_prob = hidden_dropout_prob
  71. self.attention_probs_dropout_prob = attention_probs_dropout_prob
  72. self.max_position_embeddings = max_position_embeddings
  73. self.type_vocab_size = type_vocab_size
  74. self.initializer_range = initializer_range
  75. @classmethod
  76. def from_dict(cls, json_object):
  77. """Constructs a `BertConfig` from a Python dictionary of parameters."""
  78. config = BertConfig(vocab_size=None)
  79. for (key, value) in six.iteritems(json_object):
  80. config.__dict__[key] = value
  81. return config
  82. @classmethod
  83. def from_json_file(cls, json_file):
  84. """Constructs a `BertConfig` from a json file of parameters."""
  85. with tf.gfile.GFile(json_file, "r") as reader:
  86. text = reader.read()
  87. return cls.from_dict(json.loads(text))
  88. def to_dict(self):
  89. """Serializes this instance to a Python dictionary."""
  90. output = copy.deepcopy(self.__dict__)
  91. return output
  92. def to_json_string(self):
  93. """Serializes this instance to a JSON string."""
  94. return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
  95. class BertModel(object):
  96. """BERT model ("Bidirectional Encoder Representations from Transformers").
  97. Example usage:
  98. ```python
  99. # Already been converted into WordPiece token ids
  100. input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
  101. input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
  102. token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
  103. config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
  104. num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
  105. model = modeling.BertModel(config=config, is_training=True,
  106. input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
  107. label_embeddings = tf.get_variable(...)
  108. pooled_output = model.get_pooled_output()
  109. logits = tf.matmul(pooled_output, label_embeddings)
  110. ...
  111. ```
  112. """
  113. def __init__(self,
  114. config,
  115. is_training,
  116. input_ids,
  117. input_mask=None,
  118. token_type_ids=None,
  119. use_one_hot_embeddings=False,
  120. scope=None):
  121. """Constructor for BertModel.
  122. Args:
  123. config: `BertConfig` instance.
  124. is_training: bool. true for training model, false for eval model. Controls
  125. whether dropout will be applied.
  126. input_ids: int32 Tensor of shape [batch_size, seq_length].
  127. input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
  128. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
  129. use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
  130. embeddings or tf.embedding_lookup() for the word embeddings.
  131. scope: (optional) variable scope. Defaults to "bert".
  132. Raises:
  133. ValueError: The config is invalid or one of the input tensor shapes
  134. is invalid.
  135. """
  136. config = copy.deepcopy(config)
  137. if not is_training:
  138. config.hidden_dropout_prob = 0.0
  139. config.attention_probs_dropout_prob = 0.0
  140. input_shape = get_shape_list(input_ids, expected_rank=2)
  141. batch_size = input_shape[0]
  142. seq_length = input_shape[1]
  143. if input_mask is None:
  144. input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
  145. if token_type_ids is None:
  146. token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
  147. with tf.variable_scope(scope, default_name="bert"):
  148. with tf.variable_scope("embeddings"):
  149. # Perform embedding lookup on the word ids.
  150. (self.embedding_output, self.embedding_table) = embedding_lookup(
  151. input_ids=input_ids,
  152. vocab_size=config.vocab_size,
  153. embedding_size=config.hidden_size,
  154. initializer_range=config.initializer_range,
  155. word_embedding_name="word_embeddings",
  156. use_one_hot_embeddings=use_one_hot_embeddings)
  157. # Add positional embeddings and token type embeddings, then layer
  158. # normalize and perform dropout.
  159. self.embedding_output = embedding_postprocessor(
  160. input_tensor=self.embedding_output,
  161. use_token_type=True,
  162. token_type_ids=token_type_ids,
  163. token_type_vocab_size=config.type_vocab_size,
  164. token_type_embedding_name="token_type_embeddings",
  165. use_position_embeddings=True,
  166. position_embedding_name="position_embeddings",
  167. initializer_range=config.initializer_range,
  168. max_position_embeddings=config.max_position_embeddings,
  169. dropout_prob=config.hidden_dropout_prob)
  170. with tf.variable_scope("encoder"):
  171. # This converts a 2D mask of shape [batch_size, seq_length] to a 3D
  172. # mask of shape [batch_size, seq_length, seq_length] which is used
  173. # for the attention scores.
  174. attention_mask = create_attention_mask_from_input_mask(
  175. input_ids, input_mask)
  176. # Run the stacked transformer.
  177. # `sequence_output` shape = [batch_size, seq_length, hidden_size].
  178. self.all_encoder_layers = transformer_model(
  179. input_tensor=self.embedding_output,
  180. attention_mask=attention_mask,
  181. hidden_size=config.hidden_size,
  182. num_hidden_layers=config.num_hidden_layers,
  183. num_attention_heads=config.num_attention_heads,
  184. intermediate_size=config.intermediate_size,
  185. intermediate_act_fn=get_activation(config.hidden_act),
  186. hidden_dropout_prob=config.hidden_dropout_prob,
  187. attention_probs_dropout_prob=config.attention_probs_dropout_prob,
  188. initializer_range=config.initializer_range,
  189. do_return_all_layers=True)
  190. self.sequence_output = self.all_encoder_layers[-1]
  191. # The "pooler" converts the encoded sequence tensor of shape
  192. # [batch_size, seq_length, hidden_size] to a tensor of shape
  193. # [batch_size, hidden_size]. This is necessary for segment-level
  194. # (or segment-pair-level) classification tasks where we need a fixed
  195. # dimensional representation of the segment.
  196. with tf.variable_scope("pooler"):
  197. # We "pool" the model by simply taking the hidden state corresponding
  198. # to the first token. We assume that this has been pre-trained
  199. first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
  200. self.pooled_output = tf.layers.dense(
  201. first_token_tensor,
  202. config.hidden_size,
  203. activation=tf.tanh,
  204. kernel_initializer=create_initializer(config.initializer_range))
  205. def get_pooled_output(self):
  206. return self.pooled_output
  207. def get_sequence_output(self):
  208. """Gets final hidden layer of encoder.
  209. Returns:
  210. float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
  211. to the final hidden of the transformer encoder.
  212. """
  213. return self.sequence_output
  214. def get_all_encoder_layers(self):
  215. return self.all_encoder_layers
  216. def get_embedding_output(self):
  217. """Gets output of the embedding lookup (i.e., input to the transformer).
  218. Returns:
  219. float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
  220. to the output of the embedding layer, after summing the word
  221. embeddings with the positional embeddings and the token type embeddings,
  222. then performing layer normalization. This is the input to the transformer.
  223. """
  224. return self.embedding_output
  225. def get_embedding_table(self):
  226. return self.embedding_table
  227. def gelu(x):
  228. """Gaussian Error Linear Unit.
  229. This is a smoother version of the RELU.
  230. Original paper: https://arxiv.org/abs/1606.08415
  231. Args:
  232. x: float Tensor to perform activation.
  233. Returns:
  234. `x` with the GELU activation applied.
  235. """
  236. cdf = 0.5 * (1.0 + tf.tanh(
  237. (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
  238. return x * cdf
  239. def get_activation(activation_string):
  240. """Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
  241. Args:
  242. activation_string: String name of the activation function.
  243. Returns:
  244. A Python function corresponding to the activation function. If
  245. `activation_string` is None, empty, or "linear", this will return None.
  246. If `activation_string` is not a string, it will return `activation_string`.
  247. Raises:
  248. ValueError: The `activation_string` does not correspond to a known
  249. activation.
  250. """
  251. # We assume that anything that"s not a string is already an activation
  252. # function, so we just return it.
  253. if not isinstance(activation_string, six.string_types):
  254. return activation_string
  255. if not activation_string:
  256. return None
  257. act = activation_string.lower()
  258. if act == "linear":
  259. return None
  260. elif act == "relu":
  261. return tf.nn.relu
  262. elif act == "gelu":
  263. return gelu
  264. elif act == "tanh":
  265. return tf.tanh
  266. else:
  267. raise ValueError("Unsupported activation: %s" % act)
  268. def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
  269. """Compute the union of the current variables and checkpoint variables."""
  270. assignment_map = {}
  271. initialized_variable_names = {}
  272. name_to_variable = collections.OrderedDict()
  273. for var in tvars:
  274. name = var.name
  275. m = re.match("^(.*):\\d+$", name)
  276. if m is not None:
  277. name = m.group(1)
  278. name_to_variable[name] = var
  279. init_vars = tf.train.list_variables(init_checkpoint)
  280. assignment_map = collections.OrderedDict()
  281. for x in init_vars:
  282. (name, var) = (x[0], x[1])
  283. if name not in name_to_variable:
  284. continue
  285. assignment_map[name] = name
  286. initialized_variable_names[name] = 1
  287. initialized_variable_names[name + ":0"] = 1
  288. return (assignment_map, initialized_variable_names)
  289. def dropout(input_tensor, dropout_prob):
  290. """Perform dropout.
  291. Args:
  292. input_tensor: float Tensor.
  293. dropout_prob: Python float. The probability of dropping out a value (NOT of
  294. *keeping* a dimension as in `tf.nn.dropout`).
  295. Returns:
  296. A version of `input_tensor` with dropout applied.
  297. """
  298. if dropout_prob is None or dropout_prob == 0.0:
  299. return input_tensor
  300. output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
  301. return output
  302. def layer_norm(input_tensor, name=None):
  303. """Run layer normalization on the last dimension of the tensor."""
  304. return tf.contrib.layers.layer_norm(
  305. inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
  306. def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
  307. """Runs layer normalization followed by dropout."""
  308. output_tensor = layer_norm(input_tensor, name)
  309. output_tensor = dropout(output_tensor, dropout_prob)
  310. return output_tensor
  311. def create_initializer(initializer_range=0.02):
  312. """Creates a `truncated_normal_initializer` with the given range."""
  313. return tf.truncated_normal_initializer(stddev=initializer_range)
  314. def embedding_lookup(input_ids,
  315. vocab_size,
  316. embedding_size=128,
  317. initializer_range=0.02,
  318. word_embedding_name="word_embeddings",
  319. use_one_hot_embeddings=False):
  320. """Looks up words embeddings for id tensor.
  321. Args:
  322. input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
  323. ids.
  324. vocab_size: int. Size of the embedding vocabulary.
  325. embedding_size: int. Width of the word embeddings.
  326. initializer_range: float. Embedding initialization range.
  327. word_embedding_name: string. Name of the embedding table.
  328. use_one_hot_embeddings: bool. If True, use one-hot method for word
  329. embeddings. If False, use `tf.gather()`.
  330. Returns:
  331. float Tensor of shape [batch_size, seq_length, embedding_size].
  332. """
  333. # This function assumes that the input is of shape [batch_size, seq_length,
  334. # num_inputs].
  335. #
  336. # If the input is a 2D tensor of shape [batch_size, seq_length], we
  337. # reshape to [batch_size, seq_length, 1].
  338. if input_ids.shape.ndims == 2:
  339. input_ids = tf.expand_dims(input_ids, axis=[-1])
  340. embedding_table = tf.get_variable(
  341. name=word_embedding_name,
  342. shape=[vocab_size, embedding_size],
  343. initializer=create_initializer(initializer_range))
  344. flat_input_ids = tf.reshape(input_ids, [-1])
  345. if use_one_hot_embeddings:
  346. one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
  347. output = tf.matmul(one_hot_input_ids, embedding_table)
  348. else:
  349. output = tf.gather(embedding_table, flat_input_ids)
  350. input_shape = get_shape_list(input_ids)
  351. output = tf.reshape(output,
  352. input_shape[0:-1] + [input_shape[-1] * embedding_size])
  353. return (output, embedding_table)
  354. def embedding_postprocessor(input_tensor,
  355. use_token_type=False,
  356. token_type_ids=None,
  357. token_type_vocab_size=16,
  358. token_type_embedding_name="token_type_embeddings",
  359. use_position_embeddings=True,
  360. position_embedding_name="position_embeddings",
  361. initializer_range=0.02,
  362. max_position_embeddings=512,
  363. dropout_prob=0.1):
  364. """Performs various post-processing on a word embedding tensor.
  365. Args:
  366. input_tensor: float Tensor of shape [batch_size, seq_length,
  367. embedding_size].
  368. use_token_type: bool. Whether to add embeddings for `token_type_ids`.
  369. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
  370. Must be specified if `use_token_type` is True.
  371. token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
  372. token_type_embedding_name: string. The name of the embedding table variable
  373. for token type ids.
  374. use_position_embeddings: bool. Whether to add position embeddings for the
  375. position of each token in the sequence.
  376. position_embedding_name: string. The name of the embedding table variable
  377. for positional embeddings.
  378. initializer_range: float. Range of the weight initialization.
  379. max_position_embeddings: int. Maximum sequence length that might ever be
  380. used with this model. This can be longer than the sequence length of
  381. input_tensor, but cannot be shorter.
  382. dropout_prob: float. Dropout probability applied to the final output tensor.
  383. Returns:
  384. float tensor with same shape as `input_tensor`.
  385. Raises:
  386. ValueError: One of the tensor shapes or input values is invalid.
  387. """
  388. input_shape = get_shape_list(input_tensor, expected_rank=3)
  389. batch_size = input_shape[0]
  390. seq_length = input_shape[1]
  391. width = input_shape[2]
  392. output = input_tensor
  393. if use_token_type:
  394. if token_type_ids is None:
  395. raise ValueError("`token_type_ids` must be specified if"
  396. "`use_token_type` is True.")
  397. token_type_table = tf.get_variable(
  398. name=token_type_embedding_name,
  399. shape=[token_type_vocab_size, width],
  400. initializer=create_initializer(initializer_range))
  401. # This vocab will be small so we always do one-hot here, since it is always
  402. # faster for a small vocabulary.
  403. flat_token_type_ids = tf.reshape(token_type_ids, [-1])
  404. one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
  405. token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
  406. token_type_embeddings = tf.reshape(token_type_embeddings,
  407. [batch_size, seq_length, width])
  408. output += token_type_embeddings
  409. if use_position_embeddings:
  410. assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
  411. with tf.control_dependencies([assert_op]):
  412. full_position_embeddings = tf.get_variable(
  413. name=position_embedding_name,
  414. shape=[max_position_embeddings, width],
  415. initializer=create_initializer(initializer_range))
  416. # Since the position embedding table is a learned variable, we create it
  417. # using a (long) sequence length `max_position_embeddings`. The actual
  418. # sequence length might be shorter than this, for faster training of
  419. # tasks that do not have long sequences.
  420. #
  421. # So `full_position_embeddings` is effectively an embedding table
  422. # for position [0, 1, 2, ..., max_position_embeddings-1], and the current
  423. # sequence has positions [0, 1, 2, ... seq_length-1], so we can just
  424. # perform a slice.
  425. position_embeddings = tf.slice(full_position_embeddings, [0, 0],
  426. [seq_length, -1])
  427. num_dims = len(output.shape.as_list())
  428. # Only the last two dimensions are relevant (`seq_length` and `width`), so
  429. # we broadcast among the first dimensions, which is typically just
  430. # the batch size.
  431. position_broadcast_shape = []
  432. for _ in range(num_dims - 2):
  433. position_broadcast_shape.append(1)
  434. position_broadcast_shape.extend([seq_length, width])
  435. position_embeddings = tf.reshape(position_embeddings,
  436. position_broadcast_shape)
  437. output += position_embeddings
  438. output = layer_norm_and_dropout(output, dropout_prob)
  439. return output
  440. def create_attention_mask_from_input_mask(from_tensor, to_mask):
  441. """Create 3D attention mask from a 2D tensor mask.
  442. Args:
  443. from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
  444. to_mask: int32 Tensor of shape [batch_size, to_seq_length].
  445. Returns:
  446. float Tensor of shape [batch_size, from_seq_length, to_seq_length].
  447. """
  448. from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
  449. batch_size = from_shape[0]
  450. from_seq_length = from_shape[1]
  451. to_shape = get_shape_list(to_mask, expected_rank=2)
  452. to_seq_length = to_shape[1]
  453. to_mask = tf.cast(
  454. tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
  455. # We don't assume that `from_tensor` is a mask (although it could be). We
  456. # don't actually care if we attend *from* padding tokens (only *to* padding)
  457. # tokens so we create a tensor of all ones.
  458. #
  459. # `broadcast_ones` = [batch_size, from_seq_length, 1]
  460. broadcast_ones = tf.ones(
  461. shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
  462. # Here we broadcast along two dimensions to create the mask.
  463. mask = broadcast_ones * to_mask
  464. return mask
  465. def attention_layer(from_tensor,
  466. to_tensor,
  467. attention_mask=None,
  468. num_attention_heads=1,
  469. size_per_head=512,
  470. query_act=None,
  471. key_act=None,
  472. value_act=None,
  473. attention_probs_dropout_prob=0.0,
  474. initializer_range=0.02,
  475. do_return_2d_tensor=False,
  476. batch_size=None,
  477. from_seq_length=None,
  478. to_seq_length=None):
  479. """Performs multi-headed attention from `from_tensor` to `to_tensor`.
  480. This is an implementation of multi-headed attention based on "Attention
  481. is all you Need". If `from_tensor` and `to_tensor` are the same, then
  482. this is self-attention. Each timestep in `from_tensor` attends to the
  483. corresponding sequence in `to_tensor`, and returns a fixed-with vector.
  484. This function first projects `from_tensor` into a "query" tensor and
  485. `to_tensor` into "key" and "value" tensors. These are (effectively) a list
  486. of tensors of length `num_attention_heads`, where each tensor is of shape
  487. [batch_size, seq_length, size_per_head].
  488. Then, the query and key tensors are dot-producted and scaled. These are
  489. softmaxed to obtain attention probabilities. The value tensors are then
  490. interpolated by these probabilities, then concatenated back to a single
  491. tensor and returned.
  492. In practice, the multi-headed attention are done with transposes and
  493. reshapes rather than actual separate tensors.
  494. Args:
  495. from_tensor: float Tensor of shape [batch_size, from_seq_length,
  496. from_width].
  497. to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
  498. attention_mask: (optional) int32 Tensor of shape [batch_size,
  499. from_seq_length, to_seq_length]. The values should be 1 or 0. The
  500. attention scores will effectively be set to -infinity for any positions in
  501. the mask that are 0, and will be unchanged for positions that are 1.
  502. num_attention_heads: int. Number of attention heads.
  503. size_per_head: int. Size of each attention head.
  504. query_act: (optional) Activation function for the query transform.
  505. key_act: (optional) Activation function for the key transform.
  506. value_act: (optional) Activation function for the value transform.
  507. attention_probs_dropout_prob: (optional) float. Dropout probability of the
  508. attention probabilities.
  509. initializer_range: float. Range of the weight initializer.
  510. do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
  511. * from_seq_length, num_attention_heads * size_per_head]. If False, the
  512. output will be of shape [batch_size, from_seq_length, num_attention_heads
  513. * size_per_head].
  514. batch_size: (Optional) int. If the input is 2D, this might be the batch size
  515. of the 3D version of the `from_tensor` and `to_tensor`.
  516. from_seq_length: (Optional) If the input is 2D, this might be the seq length
  517. of the 3D version of the `from_tensor`.
  518. to_seq_length: (Optional) If the input is 2D, this might be the seq length
  519. of the 3D version of the `to_tensor`.
  520. Returns:
  521. float Tensor of shape [batch_size, from_seq_length,
  522. num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
  523. true, this will be of shape [batch_size * from_seq_length,
  524. num_attention_heads * size_per_head]).
  525. Raises:
  526. ValueError: Any of the arguments or tensor shapes are invalid.
  527. """
  528. def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
  529. seq_length, width):
  530. output_tensor = tf.reshape(
  531. input_tensor, [batch_size, seq_length, num_attention_heads, width])
  532. output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
  533. return output_tensor
  534. from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
  535. to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
  536. if len(from_shape) != len(to_shape):
  537. raise ValueError(
  538. "The rank of `from_tensor` must match the rank of `to_tensor`.")
  539. if len(from_shape) == 3:
  540. batch_size = from_shape[0]
  541. from_seq_length = from_shape[1]
  542. to_seq_length = to_shape[1]
  543. elif len(from_shape) == 2:
  544. if (batch_size is None or from_seq_length is None or to_seq_length is None):
  545. raise ValueError(
  546. "When passing in rank 2 tensors to attention_layer, the values "
  547. "for `batch_size`, `from_seq_length`, and `to_seq_length` "
  548. "must all be specified.")
  549. # Scalar dimensions referenced here:
  550. # B = batch size (number of sequences)
  551. # F = `from_tensor` sequence length
  552. # T = `to_tensor` sequence length
  553. # N = `num_attention_heads`
  554. # H = `size_per_head`
  555. from_tensor_2d = reshape_to_matrix(from_tensor)
  556. to_tensor_2d = reshape_to_matrix(to_tensor)
  557. # `query_layer` = [B*F, N*H]
  558. query_layer = tf.layers.dense(
  559. from_tensor_2d,
  560. num_attention_heads * size_per_head,
  561. activation=query_act,
  562. name="query",
  563. kernel_initializer=create_initializer(initializer_range))
  564. # `key_layer` = [B*T, N*H]
  565. key_layer = tf.layers.dense(
  566. to_tensor_2d,
  567. num_attention_heads * size_per_head,
  568. activation=key_act,
  569. name="key",
  570. kernel_initializer=create_initializer(initializer_range))
  571. # `value_layer` = [B*T, N*H]
  572. value_layer = tf.layers.dense(
  573. to_tensor_2d,
  574. num_attention_heads * size_per_head,
  575. activation=value_act,
  576. name="value",
  577. kernel_initializer=create_initializer(initializer_range))
  578. # `query_layer` = [B, N, F, H]
  579. query_layer = transpose_for_scores(query_layer, batch_size,
  580. num_attention_heads, from_seq_length,
  581. size_per_head)
  582. # `key_layer` = [B, N, T, H]
  583. key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
  584. to_seq_length, size_per_head)
  585. # Take the dot product between "query" and "key" to get the raw
  586. # attention scores.
  587. # `attention_scores` = [B, N, F, T]
  588. attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
  589. attention_scores = tf.multiply(attention_scores,
  590. 1.0 / math.sqrt(float(size_per_head)))
  591. if attention_mask is not None:
  592. # `attention_mask` = [B, 1, F, T]
  593. attention_mask = tf.expand_dims(attention_mask, axis=[1])
  594. # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
  595. # masked positions, this operation will create a tensor which is 0.0 for
  596. # positions we want to attend and -10000.0 for masked positions.
  597. adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
  598. # Since we are adding it to the raw scores before the softmax, this is
  599. # effectively the same as removing these entirely.
  600. attention_scores += adder
  601. # Normalize the attention scores to probabilities.
  602. # `attention_probs` = [B, N, F, T]
  603. attention_probs = tf.nn.softmax(attention_scores)
  604. # This is actually dropping out entire tokens to attend to, which might
  605. # seem a bit unusual, but is taken from the original Transformer paper.
  606. attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
  607. # `value_layer` = [B, T, N, H]
  608. value_layer = tf.reshape(
  609. value_layer,
  610. [batch_size, to_seq_length, num_attention_heads, size_per_head])
  611. # `value_layer` = [B, N, T, H]
  612. value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
  613. # `context_layer` = [B, N, F, H]
  614. context_layer = tf.matmul(attention_probs, value_layer)
  615. # `context_layer` = [B, F, N, H]
  616. context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
  617. if do_return_2d_tensor:
  618. # `context_layer` = [B*F, N*H]
  619. context_layer = tf.reshape(
  620. context_layer,
  621. [batch_size * from_seq_length, num_attention_heads * size_per_head])
  622. else:
  623. # `context_layer` = [B, F, N*H]
  624. context_layer = tf.reshape(
  625. context_layer,
  626. [batch_size, from_seq_length, num_attention_heads * size_per_head])
  627. return context_layer
  628. def transformer_model(input_tensor,
  629. attention_mask=None,
  630. hidden_size=768,
  631. num_hidden_layers=12,
  632. num_attention_heads=12,
  633. intermediate_size=3072,
  634. intermediate_act_fn=gelu,
  635. hidden_dropout_prob=0.1,
  636. attention_probs_dropout_prob=0.1,
  637. initializer_range=0.02,
  638. do_return_all_layers=False):
  639. """Multi-headed, multi-layer Transformer from "Attention is All You Need".
  640. This is almost an exact implementation of the original Transformer encoder.
  641. See the original paper:
  642. https://arxiv.org/abs/1706.03762
  643. Also see:
  644. https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
  645. Args:
  646. input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
  647. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
  648. seq_length], with 1 for positions that can be attended to and 0 in
  649. positions that should not be.
  650. hidden_size: int. Hidden size of the Transformer.
  651. num_hidden_layers: int. Number of layers (blocks) in the Transformer.
  652. num_attention_heads: int. Number of attention heads in the Transformer.
  653. intermediate_size: int. The size of the "intermediate" (a.k.a., feed
  654. forward) layer.
  655. intermediate_act_fn: function. The non-linear activation function to apply
  656. to the output of the intermediate/feed-forward layer.
  657. hidden_dropout_prob: float. Dropout probability for the hidden layers.
  658. attention_probs_dropout_prob: float. Dropout probability of the attention
  659. probabilities.
  660. initializer_range: float. Range of the initializer (stddev of truncated
  661. normal).
  662. do_return_all_layers: Whether to also return all layers or just the final
  663. layer.
  664. Returns:
  665. float Tensor of shape [batch_size, seq_length, hidden_size], the final
  666. hidden layer of the Transformer.
  667. Raises:
  668. ValueError: A Tensor shape or parameter is invalid.
  669. """
  670. if hidden_size % num_attention_heads != 0:
  671. raise ValueError(
  672. "The hidden size (%d) is not a multiple of the number of attention "
  673. "heads (%d)" % (hidden_size, num_attention_heads))
  674. attention_head_size = int(hidden_size / num_attention_heads)
  675. input_shape = get_shape_list(input_tensor, expected_rank=3)
  676. batch_size = input_shape[0]
  677. seq_length = input_shape[1]
  678. input_width = input_shape[2]
  679. # The Transformer performs sum residuals on all layers so the input needs
  680. # to be the same as the hidden size.
  681. if input_width != hidden_size:
  682. raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
  683. (input_width, hidden_size))
  684. # We keep the representation as a 2D tensor to avoid re-shaping it back and
  685. # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
  686. # the GPU/CPU but may not be free on the TPU, so we want to minimize them to
  687. # help the optimizer.
  688. prev_output = reshape_to_matrix(input_tensor)
  689. all_layer_outputs = []
  690. for layer_idx in range(num_hidden_layers):
  691. with tf.variable_scope("layer_%d" % layer_idx):
  692. layer_input = prev_output
  693. with tf.variable_scope("attention"):
  694. attention_heads = []
  695. with tf.variable_scope("self"):
  696. attention_head = attention_layer(
  697. from_tensor=layer_input,
  698. to_tensor=layer_input,
  699. attention_mask=attention_mask,
  700. num_attention_heads=num_attention_heads,
  701. size_per_head=attention_head_size,
  702. attention_probs_dropout_prob=attention_probs_dropout_prob,
  703. initializer_range=initializer_range,
  704. do_return_2d_tensor=True,
  705. batch_size=batch_size,
  706. from_seq_length=seq_length,
  707. to_seq_length=seq_length)
  708. attention_heads.append(attention_head)
  709. attention_output = None
  710. if len(attention_heads) == 1:
  711. attention_output = attention_heads[0]
  712. else:
  713. # In the case where we have other sequences, we just concatenate
  714. # them to the self-attention head before the projection.
  715. attention_output = tf.concat(attention_heads, axis=-1)
  716. # Run a linear projection of `hidden_size` then add a residual
  717. # with `layer_input`.
  718. with tf.variable_scope("output"):
  719. attention_output = tf.layers.dense(
  720. attention_output,
  721. hidden_size,
  722. kernel_initializer=create_initializer(initializer_range))
  723. attention_output = dropout(attention_output, hidden_dropout_prob)
  724. attention_output = layer_norm(attention_output + layer_input)
  725. # The activation is only applied to the "intermediate" hidden layer.
  726. with tf.variable_scope("intermediate"):
  727. intermediate_output = tf.layers.dense(
  728. attention_output,
  729. intermediate_size,
  730. activation=intermediate_act_fn,
  731. kernel_initializer=create_initializer(initializer_range))
  732. # Down-project back to `hidden_size` then add the residual.
  733. with tf.variable_scope("output"):
  734. layer_output = tf.layers.dense(
  735. intermediate_output,
  736. hidden_size,
  737. kernel_initializer=create_initializer(initializer_range))
  738. layer_output = dropout(layer_output, hidden_dropout_prob)
  739. layer_output = layer_norm(layer_output + attention_output)
  740. prev_output = layer_output
  741. all_layer_outputs.append(layer_output)
  742. if do_return_all_layers:
  743. final_outputs = []
  744. for layer_output in all_layer_outputs:
  745. final_output = reshape_from_matrix(layer_output, input_shape)
  746. final_outputs.append(final_output)
  747. return final_outputs
  748. else:
  749. final_output = reshape_from_matrix(prev_output, input_shape)
  750. return final_output
  751. def get_shape_list(tensor, expected_rank=None, name=None):
  752. """Returns a list of the shape of tensor, preferring static dimensions.
  753. Args:
  754. tensor: A tf.Tensor object to find the shape of.
  755. expected_rank: (optional) int. The expected rank of `tensor`. If this is
  756. specified and the `tensor` has a different rank, and exception will be
  757. thrown.
  758. name: Optional name of the tensor for the error message.
  759. Returns:
  760. A list of dimensions of the shape of tensor. All static dimensions will
  761. be returned as python integers, and dynamic dimensions will be returned
  762. as tf.Tensor scalars.
  763. """
  764. if name is None:
  765. name = tensor.name
  766. if expected_rank is not None:
  767. assert_rank(tensor, expected_rank, name)
  768. shape = tensor.shape.as_list()
  769. non_static_indexes = []
  770. for (index, dim) in enumerate(shape):
  771. if dim is None:
  772. non_static_indexes.append(index)
  773. if not non_static_indexes:
  774. return shape
  775. dyn_shape = tf.shape(tensor)
  776. for index in non_static_indexes:
  777. shape[index] = dyn_shape[index]
  778. return shape
  779. def reshape_to_matrix(input_tensor):
  780. """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
  781. ndims = input_tensor.shape.ndims
  782. if ndims < 2:
  783. raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
  784. (input_tensor.shape))
  785. if ndims == 2:
  786. return input_tensor
  787. width = input_tensor.shape[-1]
  788. output_tensor = tf.reshape(input_tensor, [-1, width])
  789. return output_tensor
  790. def reshape_from_matrix(output_tensor, orig_shape_list):
  791. """Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
  792. if len(orig_shape_list) == 2:
  793. return output_tensor
  794. output_shape = get_shape_list(output_tensor)
  795. orig_dims = orig_shape_list[0:-1]
  796. width = output_shape[-1]
  797. return tf.reshape(output_tensor, orig_dims + [width])
  798. def assert_rank(tensor, expected_rank, name=None):
  799. """Raises an exception if the tensor rank is not of the expected rank.
  800. Args:
  801. tensor: A tf.Tensor to check the rank of.
  802. expected_rank: Python integer or list of integers, expected rank.
  803. name: Optional name of the tensor for the error message.
  804. Raises:
  805. ValueError: If the expected shape doesn't match the actual shape.
  806. """
  807. if name is None:
  808. name = tensor.name
  809. expected_rank_dict = {}
  810. if isinstance(expected_rank, six.integer_types):
  811. expected_rank_dict[expected_rank] = True
  812. else:
  813. for x in expected_rank:
  814. expected_rank_dict[x] = True
  815. actual_rank = tensor.shape.ndims
  816. if actual_rank not in expected_rank_dict:
  817. scope_name = tf.get_variable_scope().name
  818. raise ValueError(
  819. "For the tensor `%s` in scope `%s`, the actual rank "
  820. "`%d` (shape = %s) is not equal to the expected rank `%s`" %
  821. (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...