Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

preprocess.py 7.5 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
  1. import random
  2. from argparse import ArgumentParser
  3. import common
  4. import pickle
  5. '''
  6. This script preprocesses the data from MethodPaths. It truncates methods with too many contexts,
  7. and pads methods with less paths with spaces.
  8. '''
  9. def save_dictionaries(dataset_name, word_to_count, path_to_count, target_to_count,
  10. num_training_examples):
  11. save_dict_file_path = '{}.dict.c2v'.format(dataset_name)
  12. with open(save_dict_file_path, 'wb') as file:
  13. pickle.dump(word_to_count, file)
  14. pickle.dump(path_to_count, file)
  15. pickle.dump(target_to_count, file)
  16. pickle.dump(num_training_examples, file)
  17. print('Dictionaries saved to: {}'.format(save_dict_file_path))
  18. def process_file(file_path, data_file_role, dataset_name, word_to_count, path_to_count, max_contexts):
  19. sum_total = 0
  20. sum_sampled = 0
  21. total = 0
  22. empty = 0
  23. max_unfiltered = 0
  24. output_path = '{}.{}.c2v'.format(dataset_name, data_file_role)
  25. with open(output_path, 'w') as outfile:
  26. with open(file_path, 'r') as file:
  27. for line in file:
  28. parts = line.rstrip('\n').split(' ')
  29. target_name = parts[0]
  30. contexts = parts[1:]
  31. if len(contexts) > max_unfiltered:
  32. max_unfiltered = len(contexts)
  33. sum_total += len(contexts)
  34. if len(contexts) > max_contexts:
  35. context_parts = [c.split(',') for c in contexts]
  36. full_found_contexts = [c for i, c in enumerate(contexts)
  37. if context_full_found(context_parts[i], word_to_count, path_to_count)]
  38. partial_found_contexts = [c for i, c in enumerate(contexts)
  39. if context_partial_found(context_parts[i], word_to_count, path_to_count)
  40. and not context_full_found(context_parts[i], word_to_count,
  41. path_to_count)]
  42. if len(full_found_contexts) > max_contexts:
  43. contexts = random.sample(full_found_contexts, max_contexts)
  44. elif len(full_found_contexts) <= max_contexts \
  45. and len(full_found_contexts) + len(partial_found_contexts) > max_contexts:
  46. contexts = full_found_contexts + \
  47. random.sample(partial_found_contexts, max_contexts - len(full_found_contexts))
  48. else:
  49. contexts = full_found_contexts + partial_found_contexts
  50. if len(contexts) == 0:
  51. empty += 1
  52. continue
  53. sum_sampled += len(contexts)
  54. csv_padding = " " * (max_contexts - len(contexts))
  55. outfile.write(target_name + ' ' + " ".join(contexts) + csv_padding + '\n')
  56. total += 1
  57. print('File: ' + data_file_path)
  58. print('Average total contexts: ' + str(float(sum_total) / total))
  59. print('Average final (after sampling) contexts: ' + str(float(sum_sampled) / total))
  60. print('Total examples: ' + str(total))
  61. print('Empty examples: ' + str(empty))
  62. print('Max number of contexts per word: ' + str(max_unfiltered))
  63. return total
  64. def context_full_found(context_parts, word_to_count, path_to_count):
  65. return context_parts[0] in word_to_count \
  66. and context_parts[1] in path_to_count and context_parts[2] in word_to_count
  67. def context_partial_found(context_parts, word_to_count, path_to_count):
  68. return context_parts[0] in word_to_count \
  69. or context_parts[1] in path_to_count or context_parts[2] in word_to_count
  70. if __name__ == '__main__':
  71. parser = ArgumentParser()
  72. parser.add_argument("-trd", "--train_data", dest="train_data_path",
  73. help="path to training data file", required=True)
  74. parser.add_argument("-ted", "--test_data", dest="test_data_path",
  75. help="path to test data file", required=True)
  76. parser.add_argument("-vd", "--val_data", dest="val_data_path",
  77. help="path to validation data file", required=True)
  78. parser.add_argument("-mc", "--max_contexts", dest="max_contexts", default=200,
  79. help="number of max contexts to keep", required=False)
  80. parser.add_argument("-wvs", "--word_vocab_size", dest="word_vocab_size", default=1301136,
  81. help="Max number of origin word in to keep in the vocabulary", required=False)
  82. parser.add_argument("-pvs", "--path_vocab_size", dest="path_vocab_size", default=911417,
  83. help="Max number of paths to keep in the vocabulary", required=False)
  84. parser.add_argument("-tvs", "--target_vocab_size", dest="target_vocab_size", default=261245,
  85. help="Max number of target words to keep in the vocabulary", required=False)
  86. parser.add_argument("-wh", "--word_histogram", dest="word_histogram",
  87. help="word histogram file", metavar="FILE", required=True)
  88. parser.add_argument("-ph", "--path_histogram", dest="path_histogram",
  89. help="path_histogram file", metavar="FILE", required=True)
  90. parser.add_argument("-th", "--target_histogram", dest="target_histogram",
  91. help="target histogram file", metavar="FILE", required=True)
  92. parser.add_argument("-o", "--output_name", dest="output_name",
  93. help="output name - the base name for the created dataset", metavar="FILE", required=True,
  94. default='data')
  95. args = parser.parse_args()
  96. train_data_path = args.train_data_path
  97. test_data_path = args.test_data_path
  98. val_data_path = args.val_data_path
  99. word_histogram_path = args.word_histogram
  100. path_histogram_path = args.path_histogram
  101. word_histogram_data = common.common.load_vocab_from_histogram(word_histogram_path, start_from=1,
  102. max_size=int(args.word_vocab_size),
  103. return_counts=True)
  104. _, _, _, word_to_count = word_histogram_data
  105. _, _, _, path_to_count = common.common.load_vocab_from_histogram(path_histogram_path, start_from=1,
  106. max_size=int(args.path_vocab_size),
  107. return_counts=True)
  108. _, _, _, target_to_count = common.common.load_vocab_from_histogram(args.target_histogram, start_from=1,
  109. max_size=int(args.target_vocab_size),
  110. return_counts=True)
  111. num_training_examples = 0
  112. for data_file_path, data_role in zip([test_data_path, val_data_path, train_data_path], ['test', 'val', 'train']):
  113. num_examples = process_file(file_path=data_file_path, data_file_role=data_role, dataset_name=args.output_name,
  114. word_to_count=word_to_count, path_to_count=path_to_count,
  115. max_contexts=int(args.max_contexts))
  116. if data_role == 'train':
  117. num_training_examples = num_examples
  118. save_dictionaries(dataset_name=args.output_name, word_to_count=word_to_count,
  119. path_to_count=path_to_count, target_to_count=target_to_count,
  120. num_training_examples=num_training_examples)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...