Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

pipeline.yaml 12 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
  1. executor:
  2. dotted_path: ploomber.executors.Serial
  3. build_in_subprocess: False
  4. meta:
  5. extract_upstream: False
  6. extract_product: False
  7. jupyter_hot_reload: True
  8. jupyter_functions_as_notebooks: True
  9. tasks:
  10. # download area grouped tasks
  11. - source: github_search.papers_with_code.paperswithcode_task_areas.prepare_area_grouped_tasks
  12. product: "data/paperswithcode_tasks.csv"
  13. - source: github_search.papers_with_code.ploomber.prepare_paperswithcode_df
  14. name: pwc_data.prepare_raw_paperswithcode_df
  15. params:
  16. paperswithcode_filename: "data/links-between-papers-and-code.json.gz"
  17. papers_filename: "data/papers-with-abstracts.json.gz"
  18. product:
  19. paperswithcode_path: "output/raw_paperswithcode_df.csv"
  20. - source: github_search.papers_with_code.ploomber.prepare_filtered_paperswithcode_df
  21. name: pwc_data.prepare_final_paperswithcode_df
  22. upstream:
  23. - pwc_data.prepare_raw_paperswithcode_df
  24. params:
  25. min_task_count: 10
  26. product:
  27. paperswithcode_path: "{{paperswithcode_path}}"
  28. task_counts_path: "output/task_counts.csv"
  29. # READMEs
  30. # get github readmes
  31. - source: github_search.elixir_runner.download_readmes_pb
  32. name: pwc_data.download_readmes
  33. upstream:
  34. - pwc_data.prepare_final_paperswithcode_df
  35. product: "output/paperswithcode_readmes.json"
  36. - source: github_search.papers_with_code.ploomber.prepare_paperswithcode_with_readmes_pb
  37. name: pwc_data.prepare_paperswithcode_with_readmes
  38. upstream:
  39. - pwc_data.prepare_final_paperswithcode_df
  40. - pwc_data.download_readmes
  41. product: "output/paperswithcode_with_readmes.json.gz"
  42. # train-test split for tasks
  43. # tasks are stratified by paperswithcode area
  44. - source: github_search.train_test_split.prepare_task_train_test_split
  45. upstream:
  46. - prepare_area_grouped_tasks
  47. - pwc_data.prepare_final_paperswithcode_df
  48. params:
  49. test_size: 1
  50. product:
  51. train: "output/tasks_train.csv"
  52. test: "output/tasks_test.csv"
  53. #######################
  54. # code2doc
  55. #######################
  56. - source: github_search.pipelines.steps.code2doc_prepare_data_pb
  57. name: code2doc.prepare_data
  58. params:
  59. repos_df_path: "output/paperswithcode_with_readmes.json.gz"
  60. python_code_path: "output/repo_selected_files.parquet"
  61. product:
  62. repos_df_path: "output/repos_with_all_data.jsonl"
  63. # for some reason there are errors in parquet so we'll save it to feather
  64. selected_python_code_path: "output/selected_python_code.feather"
  65. - source: github_search.pipelines.steps.create_repos_sample_pb
  66. name: code2doc.create_repo_sample
  67. upstream:
  68. - code2doc.prepare_data
  69. params:
  70. min_task_size: 5
  71. n_repos_per_task: 10
  72. max_task_size: 500
  73. max_random_baseline_score: 0.5
  74. product:
  75. sampled_repos: "output/code2doc/sample_per_task_5_repos/sampled_repos5.jsonl"
  76. # to run this step serve ollama on the appropriate port
  77. - source: github_search.pipelines.steps.generate_code2doc_readmes_pb
  78. name: code2doc.generate_readmes
  79. upstream:
  80. - code2doc.create_repo_sample
  81. - code2doc.prepare_data
  82. params:
  83. lm_model_name: "codellama"
  84. lm_base_url: "http://localhost:11430"
  85. small_lm_base_url: "http://localhost:11431"
  86. files_per_repo: 10
  87. product:
  88. "output/code2doc/sample_per_task_5_repos/codellama_generated_readmes5.jsonl"
  89. # the stack
  90. - source: github_search.the_stack.prepare_the_stack_files
  91. name:
  92. the_stack.prepare_files
  93. params:
  94. paperswithcode_path: "data/paperswithcode_with_tasks.csv"
  95. delete_temporary_files: True
  96. product: "data/the_stack_paperswithcode_repos"
  97. - source: github_search.the_stack.prepare_the_stack_df
  98. name:
  99. the_stack.prepare_df
  100. upstream:
  101. - the_stack.prepare_files
  102. product: "output/the_stack_paperswithcode_files.parquet"
  103. # prepare data for similarity learning from paperswithcode
  104. - source: github_search.sentence_embeddings.datasets.prepare_paperswithcode_data
  105. name: sentence_embeddings.prepare_paperswithcode_data
  106. product:
  107. datasets: "data/datasets.json.gz"
  108. methods: "data/methods.json.gz"
  109. # prepare data for similarity learning from dbpedia
  110. - source: github_search.sentence_embeddings.datasets.prepare_dbpedia_machine_learning_data
  111. name: sentence_embeddings.prepare_dbpedia_data
  112. product: "data/dbpedia_ml_records.csv"
  113. - source: github_search.sentence_embeddings.datasets.prepare_data
  114. name: sentence_embeddings.prepare_data
  115. upstream:
  116. - sentence_embeddings.prepare_dbpedia_data
  117. - sentence_embeddings.prepare_paperswithcode_data
  118. product: "data/sentence_similarity_data.csv"
  119. #
  120. - source: github_search.train_test_split.prepare_repo_train_test_split
  121. upstream:
  122. - pwc_data.prepare_paperswithcode_with_readmes
  123. - prepare_task_train_test_split
  124. product:
  125. train: "output/repos_train.json"
  126. test: "output/repos_test.json"
  127. # extract python tokens for BoW baseline
  128. - source: github_search.bow_baseline.extract_python_tokens
  129. product: "output/python_files_with_tokens_df.feather"
  130. #
  131. - source: github_search.bow_baseline.prepare_bow_retrieval_evaluation_results
  132. name: prepare_bow_retrieval_evaluation_results_readme
  133. upstream:
  134. - prepare_task_train_test_split
  135. params:
  136. index: python_tokenized_files
  137. product: "output/python_files_retrieval_results.csv"
  138. #
  139. - source: github_search.bow_baseline.prepare_bow_retrieval_evaluation_results
  140. name: prepare_bow_retrieval_evaluation_results_python_files
  141. upstream:
  142. - prepare_task_train_test_split
  143. params:
  144. index: project_readmes
  145. product: "output/readme_retrieval_results.csv"
  146. # run word2vec on natural language data
  147. - source: github_search.word2vec.train_abstract_readme_w2v
  148. upstream:
  149. - pwc_data.prepare_paperswithcode_with_readmes
  150. params:
  151. embedding_dim: "{{word2vec.dimension}}"
  152. epochs: "{{word2vec.epochs}}"
  153. product:
  154. binary: "output/abstract_readme_w2v{{word2vec.dimension}}.bin"
  155. txt: "output/abstract_readme_w2v{{word2vec.dimension}}.txt"
  156. # run word2vec on code
  157. - source: github_search.word2vec.train_python_code_w2v
  158. params:
  159. python_file_path: "{{python_files_path}}"
  160. embedding_dim: "{{word2vec.dimension}}"
  161. product:
  162. binary: "output/python_code_w2v{{word2vec.dimension}}.bin"
  163. txt: "output/python_code_w2v{{word2vec.dimension}}.txt"
  164. #
  165. # imports
  166. #
  167. - source: github_search.imports.prepare_data.prepare_file_imports
  168. name: imports.prepare_file_imports
  169. params:
  170. python_files_path: "{{python_files_path}}"
  171. product: "output/python_file_imports.feather"
  172. - source: github_search.imports.training.train_import_word2vec
  173. name: imports.train_w2v
  174. upstream:
  175. - imports.prepare_file_imports
  176. params:
  177. embedding_dim: "{{word2vec.dimension}}"
  178. epochs: "{{word2vec.epochs}}"
  179. product:
  180. binary: "output/imports_w2v{{word2vec.dimension}}.bin"
  181. txt: "output/imports_w2v{{word2vec.dimension}}.txt"
  182. - source: github_search.imports.training.train_import_rnn_file_similarity_model
  183. name: imports.train_rnn
  184. upstream:
  185. - imports.prepare_file_imports
  186. - imports.train_w2v
  187. params:
  188. epochs: 2
  189. batch_size: 256
  190. rnn_config: "{{rnn_config}}"
  191. product: "output/models/import_lstm"
  192. #
  193. # sentence embeddings
  194. #
  195. # make word2vec aggregator model
  196. - source: github_search.sentence_embeddings.models.prepare_word2vec_sentence_embedding_model
  197. name: sentence_embeddings.prepare_w2v_model
  198. upstream:
  199. - train_abstract_readme_w2v
  200. product:
  201. "output/abstract_readme_embedder"
  202. # prepare data for token2vec (modified import2vec)
  203. - source: github_search.data_engineering.prepare_module_corpus
  204. params:
  205. python_file_paths: ["{{python_files_path}}"]
  206. product: "output/module_corpus.csv"
  207. # train token2vec model
  208. - source: github_search.token2vec.train_token2vec
  209. upstream:
  210. - prepare_module_corpus
  211. params:
  212. n_iterations: 10000
  213. n_positive_imports: 32
  214. embedding_dim: "{{word2vec.dimension}}"
  215. product:
  216. model_path: "output/import2vec_module_vectors{{word2vec.dimension}}.bin"
  217. # prepare paper dataset with imports extracted per-project
  218. - source: github_search.data_engineering.prepare_paperswithcode_with_imports_df
  219. upstream:
  220. - prepare_module_corpus
  221. params:
  222. python_file_paths: ["{{python_files_path}}"]
  223. product: "output/papers_with_imports.csv"
  224. # prepare python dependency graph records
  225. - source: github_search.data_engineering.prepare_dependency_records
  226. name:
  227. dependency_graph.prepare_records
  228. params:
  229. sample_files_per_repo: 1000
  230. add_repo_col: True
  231. use_basename: False
  232. python_file_path: "{{python_files_path}}"
  233. excluded_prefix: "venv"
  234. product: "output/dependency_records.feather"
  235. # additional information for dependency records
  236. - source: github_search.data_engineering.postprocess_dependency_records
  237. name:
  238. dependency_graph.postprocess_records
  239. upstream:
  240. - dependency_graph.prepare_records
  241. - prepare_paperswithcode_with_imports_df
  242. params:
  243. use_additional_records: False
  244. description_mode: False
  245. product: "output/processed_dependency_records.feather"
  246. #
  247. #
  248. # GRAPHS
  249. #
  250. #
  251. # Records from Neo4J
  252. - source: github_search.neo4j_graph.prepare_neo4j_dependency_records
  253. upstream:
  254. - prepare_repo_train_test_split
  255. params:
  256. graph_dependencies_path: "output/dependency_records/repo_dependencies_articlerank.json"
  257. id_col: repo
  258. rel_col: edge_type
  259. product:
  260. train: "output/dependency_records/graph_dependencies_train.json"
  261. test: "output/dependency_records/graph_dependencies_test.json"
  262. # extract python function df
  263. # f has columns
  264. # ['repo_name', 'path', 'function_name', 'function_code']
  265. - source: github_search.data_engineering.prepare_function_code_df
  266. params:
  267. python_file_path: "{{python_files_path}}"
  268. max_depth: 10
  269. n_cores: 4
  270. product:
  271. "output/python_functions.feather"
  272. - source: github_search.data_engineering.prepare_function_signatures_df
  273. params:
  274. python_file_path: "{{python_files_path}}"
  275. n_cores: 4
  276. product:
  277. "output/python_signatures.parquet"
  278. # train FastText model on Python files
  279. - source: github_search.data_engineering.train_python_token_fasttext
  280. params:
  281. python_file_path: "{{python_files_path}}"
  282. dim: "{{fasttext.dimension}}"
  283. epoch: "{{fasttext.epochs}}"
  284. n_cores: 16
  285. product:
  286. "output/python_files_fasttext_dim{{fasttext.dimension}}.bin"
  287. #
  288. - source: github_search.summarization.prepare_function_df_with_summarized_code
  289. params:
  290. transformer_model_name: "{{summarization.transformer_model_name}}"
  291. upstream: prepare_function_code_df
  292. product:
  293. "output/python_files_descriptions_{{summarization.transformer_model_name}}.feather"
  294. - source: github_search.graphs.prepare_graph.prepare_from_dependency_records
  295. name: graph.prepare_from_dependency_records
  296. upstream:
  297. - dependency_graph.prepare_records
  298. params:
  299. used_edges:
  300. - "repo-file"
  301. product: "output/dependency_records_igraph.pkl"
  302. - source: github_search.graphs.prepare_graph.prepare_from_function_code
  303. name: graph.prepare_from_function_code
  304. upstream:
  305. - prepare_function_code_df
  306. product: "output/function_code_igraph.pkl"
  307. # prepare graph list
  308. - source: github_search.graphs.data_preparation.prepare_dataset_with_transformer
  309. name: gnn.prepare_dataset_with_transformer
  310. params:
  311. sentence_transformer_model_name: "{{gnn.sentence_transformer_model_name}}"
  312. batch_size: 128
  313. paperswithcode_path: "{{paperswithcode_path}}"
  314. upstream:
  315. - prepare_area_grouped_tasks
  316. - graph.prepare_from_function_code
  317. product: "output/graph_list.pkl"
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...