Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

style_learn.py 6.5 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
  1. import sys
  2. import collections
  3. import json
  4. from zipfile import ZipFile
  5. import numpy as np
  6. from keras.layers import TimeDistributed, Dense, Activation, Input, Dropout
  7. from keras.layers.embeddings import Embedding
  8. from keras.layers.recurrent import LSTM
  9. from keras.models import Model
  10. from keras.utils import to_categorical
  11. from keras.preprocessing.sequence import pad_sequences
  12. from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support
  13. from sklearn.model_selection import train_test_split
  14. def read_json_zip_file(in_file, maxsize=256, read_limit=2000, read_offset=0):
  15. with ZipFile(in_file) as z:
  16. all_x = []
  17. i = 0
  18. for fname in z.filelist:
  19. if read_offset > 0:
  20. read_offset -= 1
  21. continue
  22. with z.open(fname) as f:
  23. # all_x += json.load(f) # works only in python36
  24. all_x += json.loads(f.read().decode('utf-8'))
  25. i += 1
  26. if i >= read_limit:
  27. break
  28. lengths = [len(x) for x in all_x]
  29. print('Input sequence length range: ', max(lengths), min(lengths))
  30. short_x = [x for x in all_x if len(x) <= maxsize]
  31. print('# of short sequences: {n}/{m} '.format(n=len(short_x), m=len(all_x)))
  32. X = [[c[0] for c in x] for x in short_x]
  33. y = [[c[1] for c in y] for y in short_x]
  34. return X, y
  35. def calc_score(yh, pr):
  36. coords = [np.where(yhh > 0)[0][0] for yhh in yh]
  37. yh = [yhh[co:] for yhh, co in zip(yh, coords)]
  38. ypr = [prr[co:] for prr, co in zip(pr, coords)]
  39. fyh = [c for row in yh for c in row]
  40. fpr = [c for row in ypr for c in row]
  41. return fyh, fpr
  42. def build_vocabulary(X, y, min_word_freq):
  43. corpus = (c for x in X for c in x)
  44. ind2word = ["{pad}", "{unk}"] + [w for w, c in collections.Counter(corpus).items() if c >= min_word_freq]
  45. word2ind = collections.defaultdict(lambda: 1, {word: index for index, word in enumerate(ind2word)})
  46. ind2label = ["{pad}"] + list(set([c for x in y for c in x]))
  47. label2ind = {label: index for index, label in enumerate(ind2label)}
  48. print('Vocabulary size:', len(word2ind), len(label2ind))
  49. return ind2word, word2ind, ind2label, label2ind
  50. def encode_by_vocab(X, y, word2ind, label2ind, maxlen=None):
  51. if type(maxlen) != int:
  52. maxlen = max([len(x) for x in X])
  53. print('Maximum sequence length:', maxlen)
  54. X_enc = [[word2ind[c] for c in x] for x in X if len(x) <= maxlen]
  55. max_label = len(label2ind)
  56. y_enc = [[0] * (maxlen - len(ey)) + [label2ind[c] for c in ey] for ey in y if len(ey) <= maxlen]
  57. y_enc = [to_categorical(ey, max_label) for ey in y_enc]
  58. X_enc = pad_sequences(X_enc, maxlen=maxlen)
  59. y_enc = pad_sequences(y_enc, maxlen=maxlen)
  60. return X_enc, y_enc, maxlen
  61. def build_model(max_sentence_length, vocab_size, num_tags, embedding_size, lstm_size):
  62. """ Compiles a keras model """
  63. l_input = Input(shape=(max_sentence_length,))
  64. l_embed = Embedding(vocab_size, embedding_size, input_length=max_sentence_length, mask_zero=True)(l_input)
  65. l_lstm = LSTM(lstm_size, return_sequences=True)(l_embed)
  66. l_dense = TimeDistributed(Dense(num_tags))(l_lstm)
  67. l_active = Activation('softmax')(l_dense)
  68. model = Model(inputs=l_input, outputs=l_active)
  69. model.compile(loss='categorical_crossentropy', optimizer='adam')
  70. return model
  71. def save_metrics(out_dir='../model', **kwargs):
  72. for fname,metric in kwargs.items():
  73. with open(f'{out_dir}/metrics/{fname}.json', 'w') as f:
  74. try:
  75. json.dump({fname: metric}, f)
  76. except Exception as e:
  77. print(f'Failed to save metric {fname} due to error: {e}')
  78. def fit_file(in_file, tp):
  79. X, y = read_json_zip_file(in_file, tp["max_sentence_size"], tp["read_limit"])
  80. ind2word, word2ind, ind2label, label2ind = build_vocabulary(X, y, tp["min_word_freq"])
  81. X_enc, y_enc, seq_size = encode_by_vocab(X, y, word2ind, label2ind)
  82. assert set(map(len, y_enc)) == {seq_size}
  83. assert set(map(len, X_enc)) == {seq_size}
  84. with open(tp["out_dir"]+'/model_params.json', 'w') as f:
  85. json.dump({
  86. "word2ind": dict(word2ind),
  87. "label2ind": dict(label2ind),
  88. "max_length": seq_size
  89. }, f)
  90. X_train, X_test, y_train, y_test = train_test_split(X_enc, y_enc, test_size=tp["test_size"])
  91. print('Training and testing tensor shapes:', X_train.shape, X_test.shape, y_train.shape, y_test.shape)
  92. model = build_model(seq_size, len(word2ind), len(label2ind), tp["embedding_size"], tp["lstm_size"])
  93. model.fit(X_train, y_train, batch_size=tp["batch_size"], epochs=tp["epochs"], validation_data=(X_test, y_test))
  94. test_score = model.evaluate(X_test, y_test, batch_size=tp["batch_size"])
  95. print('Raw test score:', test_score)
  96. pr = model.predict(X_train).argmax(2)
  97. yh = y_train.argmax(2)
  98. fyh, fpr = calc_score(yh, pr)
  99. train_acc = accuracy_score(fyh, fpr)
  100. print('Training accuracy:', train_acc)
  101. print('Training confusion matrix:')
  102. train_confusion = confusion_matrix(fyh, fpr)
  103. print(train_confusion)
  104. train_results = precision_recall_fscore_support(fyh, fpr)
  105. print('Training results:')
  106. print(train_results)
  107. pr = model.predict(X_test).argmax(2)
  108. yh = y_test.argmax(2)
  109. fyh, fpr = calc_score(yh, pr)
  110. test_acc = accuracy_score(fyh, fpr)
  111. print('Testing accuracy:', test_acc)
  112. print('Testing confusion matrix:')
  113. test_confusion = confusion_matrix(fyh, fpr)
  114. print(test_confusion)
  115. test_results = precision_recall_fscore_support(fyh, fpr)
  116. print('Testing results:')
  117. print(test_results)
  118. save_metrics(
  119. out_dir=tp['out_dir'],
  120. test_score=test_score,
  121. train_acc=train_acc,
  122. train_confusion=train_confusion.tolist(),
  123. train_recall=train_results[0].tolist(),
  124. train_precision=train_results[1].tolist(),
  125. train_fbeta=train_results[2].tolist(),
  126. train_support=train_results[3].tolist(),
  127. test_acc=test_acc,
  128. test_confusion=test_confusion.tolist(),
  129. test_recall=test_results[0].tolist(),
  130. test_precision=test_results[1].tolist(),
  131. test_fbeta=test_results[2].tolist(),
  132. test_support=test_results[3].tolist(),
  133. )
  134. # Save the model architecture
  135. with open(tp["out_dir"]+'/model_arch.json', 'w') as f:
  136. f.write(model.to_json())
  137. model.save_weights(tp["out_dir"]+'/model_weights.h5')
  138. if __name__ == "__main__":
  139. import yaml
  140. with open('train_params.yaml', 'r') as f:
  141. train_params = yaml.load(f)
  142. print("Train params:")
  143. print(train_params)
  144. fit_file('../data/0.zip', train_params)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...