Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

pipeline.yaml 6.4 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
  1. meta:
  2. extract_upstream: False
  3. extract_product: False
  4. jupyter_hot_reload: True
  5. jupyter_functions_as_notebooks: True
  6. tasks:
  7. # download area grouped tasks
  8. - source: github_search.paperswithcode_task_areas.prepare_area_grouped_tasks
  9. product: "data/paperswithcode_tasks.csv"
  10. # run word2vec on natural language data
  11. - source: github_search.word2vec.train_abstract_readme_w2v
  12. upstream:
  13. - make_readmes
  14. params:
  15. embedding_dim: "{{word2vec.dimension}}"
  16. epochs: "{{word2vec.epochs}}"
  17. product:
  18. "output/abstract_readme_w2v{{word2vec.dimension}}.bin"
  19. # prepare data for token2vec (modified import2vec)
  20. - source: github_search.data_engineering.prepare_module_corpus
  21. params:
  22. python_file_paths: ["{{python_files_csv_path}}"]
  23. product: "output/module_corpus.csv"
  24. # train token2vec model
  25. - source: github_search.token2vec.train_token2vec
  26. upstream:
  27. - prepare_module_corpus
  28. params:
  29. n_iterations: 10000
  30. n_positive_imports: 32
  31. embedding_dim: "{{word2vec.dimension}}"
  32. product:
  33. model_path: "output/import2vec_module_vectors{{word2vec.dimension}}.bin"
  34. # prepare paper dataset with imports extracted per-project
  35. - source: github_search.data_engineering.prepare_paperswithcode_with_imports_df
  36. upstream:
  37. - prepare_module_corpus
  38. params:
  39. python_file_paths: ["{{python_files_csv_path}}"]
  40. product: "output/papers_with_imports.csv"
  41. # prepare python dependency graph records
  42. - source: github_search.data_engineering.prepare_dependency_records
  43. params:
  44. sample_files_per_repo: 100
  45. add_filename_repo_label: True
  46. python_file_paths: ["{{python_files_csv_path}}"]
  47. product: "output/dependency_records.csv"
  48. # additional information for dependency records
  49. - source: github_search.data_engineering.postprocess_dependency_records
  50. upstream:
  51. - prepare_dependency_records
  52. - prepare_paperswithcode_with_imports_df
  53. params:
  54. use_additional_records: False
  55. description_mode: "{{gnn.description_mode}}"
  56. product: "output/processed_dependency_records.csv"
  57. # make igraph from dependency records
  58. - source: github_search.data_engineering.make_igraph
  59. upstream:
  60. prepare_dependency_records
  61. product: "output/call_igraph.pkl"
  62. # extract python function df
  63. # f has columns
  64. # ['repo_name', 'path', 'function_name', 'function_code']
  65. - source: github_search.data_engineering.make_function_code_df
  66. params:
  67. python_file_path: "{{python_files_csv_path}}"
  68. max_depth: 10
  69. product:
  70. "output/python_functions.feather"
  71. # get github readmes
  72. - source: github_search.data_engineering.make_readmes
  73. params:
  74. paperswithcode_with_tasks_path: "data/paperswithcode_with_tasks.csv"
  75. max_workers: 24
  76. upstream: prepare_paperswithcode_with_imports_df
  77. product: "output/papers_with_readmes.csv"
  78. # train FastText model on Python files
  79. - source: github_search.data_engineering.train_python_token_fasttext
  80. params:
  81. python_file_path: "{{python_files_csv_path}}"
  82. dim: "{{fasttext.dimension}}"
  83. epoch: "{{fasttext.epochs}}"
  84. n_cores: 16
  85. product:
  86. "output/python_files_fasttext_dim{{fasttext.dimension}}.bin"
  87. # # train GraphSAGE model
  88. # - source: github_search.pytorch_geometric_training.run_gnn_experiment
  89. # upstream:
  90. # - postprocess_dependency_records
  91. # - train_python_token_fasttext
  92. # params:
  93. # fasttext_model_path: "output/python_files_fasttext_dim{{fasttext.dimension}}.bin"
  94. # epochs: "{{gnn.epochs}}"
  95. # hidden_channels: "{{gnn.hidden_channels}}"
  96. # batch_size: "{{gnn.batch_size}}"
  97. # num_layers: "{{gnn.layers}}"
  98. # test_run: "{{test_run}}"
  99. # lr: 0.001
  100. # model_name: "{{gnn.model_name}}"
  101. # use_self_connection: "{{gnn.use_self_connection}}"
  102. # description_mode: "{{gnn.description_mode}}"
  103. # product:
  104. # plot_file: "output/loss_{{gnn.model_name}}_fasttext_dim{{fasttext.dimension}}_epochs{{gnn.epochs}}_dim{{gnn.hidden_channels}}_layers{{gnn.layers}}.png"
  105. # gnn_token_embeddings: "output/{{gnn.model_name}}_embeddings_fasttext_dim{fasttext.dimension}}_epochs{{gnn.epochs}}_dim{{gnn.hidden_channels}}_layers{{gnn.layers}}.bin"
  106. # model_path: "output/{{gnn.model_name}}_model_{{gnn.epochs}}_dim{{gnn.hidden_channels}}_layers{{gnn.layers}}.pth"
  107. # # train-test split for tasks
  108. # - source: github_search.matching_zsl.prepare_task_train_test_split
  109. # upstream:
  110. # - postprocess_dependency_records
  111. # - train_python_token_fasttext
  112. # - prepare_area_grouped_tasks
  113. # product:
  114. # train: "output/tasks_train.csv"
  115. # test: "output/tasks_test.csv"
  116. # task embeddings
  117. # prepares data for task_embeddings_app streamlit app
  118. # depends on sentence_embeddings_main
  119. - source: github_search.embedding_utils.prepare_reduced_embeddings
  120. upstream:
  121. - train_abstract_readme_w2v
  122. - train_python_token_fasttext
  123. params:
  124. rnn_model_path: "output/sbert/sru2x256_epoch350"
  125. codebert_model_path: "output/sbert/codebert15"
  126. product: "output/reduced_features.pkl"
  127. # for each repo select files that are most similar
  128. # similarity is cosine similarity in provided model
  129. - source: github_search.python_code_analysis.select_repo_files
  130. upstream:
  131. - make_readmes
  132. params:
  133. similar_col: "readme"
  134. bow_vectorizer_class: "CountVectorizer"
  135. files_per_repo: 10
  136. python_files_path: "{{python_files_csv_path}}"
  137. product: "output/selected_python_files.feather"
  138. - source: github_search.python_code_analysis.extract_python_comments
  139. upstream:
  140. - select_repo_files
  141. params:
  142. line_neighbor_threshold: 2
  143. product:
  144. "output/selected_python_files_comments.feather"
  145. - source: github_search.python_code_analysis.extract_python_imports
  146. upstream:
  147. - select_repo_files
  148. product:
  149. "output/selected_python_files_imports.feather"
  150. - source: github_search.seq2seq_utils.prepare_path_docid_seq2seq_df
  151. upstream:
  152. - select_repo_files
  153. - make_readmes
  154. product:
  155. "output/path_docid_seq2seq.csv"
  156. - source: github_search.seq2seq_utils.prepare_seq2seq_dataset
  157. name: prepare_path_docid_seq2seq_dataset
  158. upstream:
  159. - prepare_path_docid_seq2seq_df
  160. params:
  161. base_model: "{{transformer.T5_model}}"
  162. max_source_length: "{{transformer.max_source_length}}"
  163. max_target_length: "{{transformer.max_target_length}}"
  164. product:
  165. "output/seq2seq_hf_dataset"
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...