Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

project.org_archive 16 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
  1. # -*- mode: org -*-
  2. Archived entries from file /home/salotz/tree/lab/projects/wepy.lysozyme_test/project.org
  3. * Running simulations
  4. :PROPERTIES:
  5. :ARCHIVE_TIME: 2019-11-14 Thu 21:19
  6. :ARCHIVE_FILE: ~/tree/lab/projects/wepy.lysozyme_test/project.org
  7. :ARCHIVE_CATEGORY: project
  8. :END:
  9. ** Standalone Runs
  10. To validate that our system is okay we do a couple of standalone runs
  11. on the system.
  12. *** Script
  13. Here is a script for running a basic kind of simulation:
  14. #+begin_src python :tangle scripts/run.py
  15. import simtk.openmm as omm
  16. import simtk.openmm.app as omma
  17. import simtk.unit as unit
  18. import mdtraj as mdj
  19. from openmmtools import testsystems
  20. # from seh_prep.modules import StateDataFrameOMMReporter
  21. STEP_TIME = 0.002 * unit.picoseconds
  22. LANGEVIN_DEFAULTS = (
  23. 300.0*unit.kelvin,
  24. 1/unit.picosecond,
  25. STEP_TIME
  26. )
  27. PLATFORM = 'CPU'
  28. FRAME_INTERVAL_TIME = 10 * unit.picosecond
  29. FRAME_INTERVAL = int(round(FRAME_INTERVAL_TIME / STEP_TIME))
  30. def gen_sim(platform):
  31. integrator = omm.LangevinIntegrator(*LANGEVIN_DEFAULTS)
  32. test_sys = testsystems.LysozymeImplicit()
  33. # write a PDB file for reference
  34. top = mdj.Topology.from_openmm(test_sys.topology)
  35. traj = mdj.Trajectory(test_sys.positions.value_in_unit(unit.nanometer), topology=top)
  36. traj.save_pdb('ref.pdb')
  37. if platform == 'CUDA':
  38. platform_kwargs = {'Precision' : 'single'}
  39. elif platform == 'OpenCL':
  40. platform_kwargs = {"Precision" : 'single'}
  41. elif platform == 'CPU':
  42. platform_kwargs = {}
  43. elif platform == 'Reference':
  44. platform_kwargs = {}
  45. else:
  46. raise ValueError
  47. platform = omm.Platform.getPlatformByName(platform)
  48. simulation = omma.Simulation(test_sys.topology, test_sys.system, integrator,
  49. platform=platform,
  50. platformProperties=platform_kwargs)
  51. simulation.context.setPositions(test_sys.positions)
  52. # add some reporters
  53. simulation.reporters.append(omma.DCDReporter('traj.dcd',
  54. reportInterval=FRAME_INTERVAL,
  55. enforcePeriodicBox=False))
  56. # simulation.reporters.append(StateDataFrameOMMReporter(report_interval=10))
  57. simulation.reporters.append(omma.StateDataReporter('state.csv',
  58. FRAME_INTERVAL,
  59. step=True,
  60. time=True,
  61. potentialEnergy=True,
  62. kineticEnergy=True,
  63. totalEnergy=True,
  64. temperature=True,
  65. volume=True,
  66. ))
  67. return simulation
  68. if __name__ == "__main__":
  69. import sys
  70. import time
  71. # time in sampling to run
  72. sim_time = float(sys.argv[1]) #* unit.second
  73. platform = sys.argv[2]
  74. print("Running simulation for {}".format(sim_time))
  75. tranche_time = 10 * unit.picosecond
  76. tranche_steps = int(round(tranche_time / STEP_TIME))
  77. print(f"running tranches of {tranche_steps} steps")
  78. simulation = gen_sim(platform)
  79. print("Starting the simulation")
  80. curr_time = 0.
  81. step_counter = 0
  82. start_time = time.time()
  83. while curr_time < sim_time:
  84. print("starting from step {}".format(step_counter))
  85. print(type(tranche_steps))
  86. simulation.step(tranche_steps)
  87. curr_time = time.time() - start_time
  88. step_counter += tranche_steps
  89. print("Ran {}".format(step_counter))
  90. print("Time currently is: {}".format(curr_time))
  91. # add up the sampling time
  92. sampling_time = step_counter * STEP_TIME
  93. print(f"Ran a total of {step_counter} steps")
  94. print(f"Ran a total of {sampling_time}")
  95. #+end_src
  96. *** run settings
  97. #+BEGIN_SRC toml :tangle hpcc/standalone/run_settings.toml
  98. # test run value
  99. walltime = "24:00:00"
  100. memory = "20gb"
  101. num_cpus = 2
  102. # test run value
  103. num_gpus = 1
  104. constraint = "[intel18|intel16]"
  105. #+END_SRC
  106. *** context settings
  107. #+BEGIN_SRC toml :tangle hpcc/standalone/context_settings.toml
  108. [context]
  109. # its okay to leave out the exec dir, but you can specify this and the
  110. #run will get executed here and then be moved back
  111. # exec_dir = "$SCRATCH/exec"
  112. # epilog is something that can be run after the run, if it is empty
  113. # here that is okay, this is really only important for moving data
  114. # back after being executed somewhere else
  115. epilog = ""
  116. # this section is for the setup of the run in the script
  117. [context.setup]
  118. # this is the parent dir for the jobs, there should be a directory
  119. # here called 'jobs' and each job will will get it's own unique
  120. # directory
  121. goal_dir_path = "$SCRATCH/tree/lab/projects/wepy.lysozyme_test/hpcc/standalone"
  122. # will load these modules from lmod i.e. 'module load <modname>'
  123. lmod_modules = ["CUDA/10.0.130"]
  124. # everything in here is translated directly into environment
  125. # variables, e.g. "export ANACONDA_DIR=path/to/anaconda"
  126. [context.setup.env_vars]
  127. ANACONDA_DIR = "/mnt/home/lotzsamu/anaconda3"
  128. #+END_SRC
  129. *** Tasks
  130. **** 1
  131. #+begin_src bash :shebang "#!/bin/bash --login" :tangle hpcc/standalone/tasks/initial.sh
  132. source $HOME/.bashrc
  133. # use the ANACONDA_DIR which is set in the environment variables in
  134. # the setup to setup this shell for anaconda
  135. . ${ANACONDA_DIR}/etc/profile.d/conda.sh
  136. # activate the proper virtualenv
  137. conda activate lysozyme_test
  138. # this will run for under 24 hours
  139. script="$SCRATCH/tree/lab/projects/wepy.lysozyme_test/scripts/run.py"
  140. ${ANACONDA_DIR}/envs/lysozyme_test/bin/python $script 84000 CUDA
  141. #+end_src
  142. *** Retrieving results
  143. #+begin_src bash
  144. rsync -ravvhhiz \
  145. lotzsamu@rsync.hpcc.msu.edu:/mnt/gs18/scratch/users/lotzsamu/tree/lab/projects/wepy.lysozyme_test/hpcc/standalone/jobs/ \
  146. $HOME/tree/lab/projects/wepy.lysozyme_test/hpcc/standalone/jobs
  147. #+end_src
  148. ** Wepy Runs
  149. To validate that our system is okay we do a couple of standalone runs
  150. on the system.
  151. *** By Time
  152. Here is a script for running a basic kind of simulation:
  153. #+begin_src python :tangle scripts/wepy_run_by_time.py
  154. if __name__ == "__main__":
  155. import sys
  156. import logging
  157. from multiprocessing_logging import install_mp_handler
  158. from wepy_tools.sim_makers.openmm.lysozyme import LysozymeImplicitOpenMMSimMaker
  159. logging.getLogger().setLevel(logging.DEBUG)
  160. install_mp_handler()
  161. if sys.argv[1] == "-h" or sys.argv[1] == "--help":
  162. print("arguments: n_cycles, n_steps, n_walkers, n_workers, platform, resampler")
  163. else:
  164. walltime = int(sys.argv[1])
  165. #n_cycles = int(sys.argv[1])
  166. n_steps = int(sys.argv[2])
  167. n_walkers = int(sys.argv[3])
  168. n_workers = int(sys.argv[4])
  169. platform = sys.argv[5]
  170. resampler = sys.argv[6]
  171. print("Number of steps: {}".format(n_steps))
  172. print("Time to run: {}".format(walltime))
  173. # print("Number of cycles: {}".format(n_cycles))
  174. sim_maker = LysozymeImplicitOpenMMSimMaker()
  175. apparatus = sim_maker.make_apparatus(
  176. integrator='LangevinIntegrator',
  177. resampler=resampler,
  178. bc='UnbindingBC',
  179. platform=platform,
  180. )
  181. config = sim_maker.make_configuration(apparatus,
  182. work_mapper='TaskMapper',
  183. platform=platform)
  184. sim_manager = sim_maker.make_sim_manager(n_walkers, apparatus, config)
  185. sim_manager.run_simulation_by_time(walltime, n_steps, num_workers=n_workers)
  186. #+end_src
  187. *** By Num Cycles
  188. Here is a script for running a basic kind of simulation:
  189. #+begin_src python :tangle scripts/wepy_run_by_cycles.py
  190. if __name__ == "__main__":
  191. import sys
  192. import logging
  193. from multiprocessing_logging import install_mp_handler
  194. from wepy_tools.sim_makers.openmm.lysozyme import LysozymeImplicitOpenMMSimMaker
  195. logging.getLogger().setLevel(logging.DEBUG)
  196. install_mp_handler()
  197. if sys.argv[1] == "-h" or sys.argv[1] == "--help":
  198. print("arguments: n_cycles, n_steps, n_walkers, n_workers, platform, resampler")
  199. else:
  200. n_cycles = int(sys.argv[1])
  201. n_steps = int(sys.argv[2])
  202. n_walkers = int(sys.argv[3])
  203. n_workers = int(sys.argv[4])
  204. platform = sys.argv[5]
  205. resampler = sys.argv[6]
  206. print("Number of steps: {}".format(n_steps))
  207. print("Number of cycles: {}".format(n_cycles))
  208. sim_maker = LysozymeImplicitOpenMMSimMaker()
  209. apparatus = sim_maker.make_apparatus(
  210. integrator='LangevinIntegrator',
  211. resampler=resampler,
  212. bc='UnbindingBC',
  213. platform=platform,
  214. )
  215. config = sim_maker.make_configuration(apparatus,
  216. work_mapper='TaskMapper',
  217. platform=platform)
  218. sim_manager = sim_maker.make_sim_manager(n_walkers, apparatus, config)
  219. sim_manager.run_simulation_by_time(n_cycles, n_steps, num_workers=n_workers)
  220. #+end_src
  221. *** run settings
  222. #+BEGIN_SRC toml :tangle hpcc/wepy/run_settings.toml
  223. # test run value
  224. walltime = "168:00:00"
  225. memory = "20gb"
  226. num_cpus = 4
  227. num_gpus = 8
  228. constraint = "[intel18|intel16]"
  229. #+END_SRC
  230. *** context settings
  231. #+BEGIN_SRC toml :tangle hpcc/wepy/context_settings.toml
  232. [context]
  233. # its okay to leave out the exec dir, but you can specify this and the
  234. #run will get executed here and then be moved back
  235. # exec_dir = "$SCRATCH/exec"
  236. # epilog is something that can be run after the run, if it is empty
  237. # here that is okay, this is really only important for moving data
  238. # back after being executed somewhere else
  239. epilog = ""
  240. # this section is for the setup of the run in the script
  241. [context.setup]
  242. # this is the parent dir for the jobs, there should be a directory
  243. # here called 'jobs' and each job will will get it's own unique
  244. # directory
  245. goal_dir_path = "$SCRATCH/tree/lab/projects/wepy.lysozyme_test/hpcc/wepy"
  246. # will load these modules from lmod i.e. 'module load <modname>'
  247. lmod_modules = ["CUDA/10.0.130"]
  248. # everything in here is translated directly into environment
  249. # variables, e.g. "export ANACONDA_DIR=path/to/anaconda"
  250. [context.setup.env_vars]
  251. ANACONDA_DIR = "/mnt/home/lotzsamu/anaconda3"
  252. #+END_SRC
  253. *** Tasks
  254. **** NoResampler test
  255. #+begin_src bash :shebang "#!/bin/bash" :tangle hpcc/wepy/tasks/noresampler_test.sh
  256. # use the ANACONDA_DIR which is set in the environment variables in
  257. # the setup to setup this shell for anaconda
  258. . ${ANACONDA_DIR}/etc/profile.d/conda.sh
  259. # activate the proper virtualenv
  260. conda activate lysozyme_test
  261. ${ANACONDA_DIR}/envs/lysozyme_test/bin/python -m simtk.testInstallation
  262. WALLTIME=590400
  263. N_STEPS=10000 # 20 picoseconds
  264. N_WALKERS=48
  265. N_WORKERS=8
  266. PLATFORM="CUDA"
  267. RESAMPLER="NoResampler"
  268. script="$SCRATCH/tree/lab/projects/wepy.lysozyme_test/scripts/wepy_run_by_time.py"
  269. ${ANACONDA_DIR}/envs/lysozyme_test/bin/python $script \
  270. $WALLTIME \
  271. $N_STEPS \
  272. $N_WALKERS \
  273. $N_WORKERS \
  274. $PLATFORM \
  275. $RESAMPLER
  276. #+end_src
  277. **** WExplore test
  278. #+begin_src bash :shebang "#!/bin/bash" :tangle hpcc/wepy/tasks/wexplore_test.sh
  279. # use the ANACONDA_DIR which is set in the environment variables in
  280. # the setup to setup this shell for anaconda
  281. . ${ANACONDA_DIR}/etc/profile.d/conda.sh
  282. # activate the proper virtualenv
  283. conda activate lysozyme_test
  284. ${ANACONDA_DIR}/envs/lysozyme_test/bin/python -m simtk.testInstallation
  285. WALLTIME=590400
  286. N_STEPS=10000 # 20 picoseconds
  287. N_WALKERS=48
  288. N_WORKERS=8
  289. PLATFORM="CUDA"
  290. RESAMPLER="WExploreResampler"
  291. script="$SCRATCH/tree/lab/projects/wepy.lysozyme_test/scripts/wepy_run_by_time.py"
  292. ${ANACONDA_DIR}/envs/lysozyme_test/bin/python $script \
  293. $WALLTIME \
  294. $N_STEPS \
  295. $N_WALKERS \
  296. $N_WORKERS \
  297. $PLATFORM \
  298. $RESAMPLER
  299. #+end_src
  300. **** REVO test
  301. #+begin_src bash :shebang "#!/bin/bash" :tangle hpcc/wepy/tasks/revo_test.sh
  302. # use the ANACONDA_DIR which is set in the environment variables in
  303. # the setup to setup this shell for anaconda
  304. . ${ANACONDA_DIR}/etc/profile.d/conda.sh
  305. # activate the proper virtualenv
  306. conda activate lysozyme_test
  307. ${ANACONDA_DIR}/envs/lysozyme_test/bin/python -m simtk.testInstallation
  308. WALLTIME=590400
  309. N_STEPS=10000 # 20 picoseconds
  310. N_WALKERS=48
  311. N_WORKERS=8
  312. PLATFORM="CUDA"
  313. RESAMPLER="REVOResampler"
  314. script="$SCRATCH/tree/lab/projects/wepy.lysozyme_test/scripts/wepy_run_by_time.py"
  315. ${ANACONDA_DIR}/envs/lysozyme_test/bin/python $script \
  316. $WALLTIME \
  317. $N_STEPS \
  318. $N_WALKERS \
  319. $N_WORKERS \
  320. $PLATFORM \
  321. $RESAMPLER
  322. #+end_src
  323. *** Retrieving results
  324. #+begin_src bash
  325. rsync -ravvhhiz \
  326. lotzsamu@rsync.hpcc.msu.edu:/mnt/gs18/scratch/users/lotzsamu/tree/lab/projects/wepy.lysozyme_test/hpcc/wepy/jobs/ \
  327. $HOME/tree/lab/projects/wepy.lysozyme_test/hpcc/wepy/jobs
  328. #+end_src
  329. Archived entries from file /home/salotz/tree/lab/projects/wepy.lysozyme_test/project.org
  330. * Analysis
  331. We only consider the wepy runs here. The standalone was just a test.
  332. ** Tasks
  333. *** Initialization
  334. Domain specific stuff related to working with the project:
  335. **** Header
  336. #+BEGIN_SRC python :tangle lib/wepy_lysozyme/_tasks.py
  337. """Generated file from the analysis.org file. Do not edit directly."""
  338. #+END_SRC
  339. **** Imports
  340. #+BEGIN_SRC python :tangle lib/seh_pathway_hopping/_tasks.py
  341. # standard library
  342. import os
  343. import os.path as osp
  344. import pickle
  345. # de facto standard library
  346. import numpy as np
  347. import pandas as pd
  348. import matplotlib.pyplot as plt
  349. import prefect
  350. # extra non-domain specific
  351. import joblib
  352. import sqlalchemy as sqla
  353. # 3rd party domain specific
  354. import mdtraj as mdj
  355. import simtk.unit as unit
  356. # auxiliary supporting repos
  357. import geomm
  358. import wepy
  359. # truly ad hoc
  360. #+END_SRC
  361. **** Paths
  362. #+BEGIN_SRC python :tangle lib/seh_pathway_hopping/_tasks.py
  363. ## Paths
  364. # for localizing paths to very commonly used resources and resrouces
  365. # which may change schema. The directory structure for the rest is the
  366. # schema, so just use osp.join(project_path(), 'subpath/to/resource')
  367. # for the rest so a lot of work is reduced in specifying all of them
  368. def projects_path():
  369. user_home = osp.expanduser('~')
  370. studio = osp.join(user_home, 'tree/lab/studio')
  371. projects = osp.join(studio, 'projects/index')
  372. return projects
  373. def project_path():
  374. return osp.join(projects_path(), 'seh.pathway_hopping')
  375. def data_path():
  376. return osp.join(project_path(), 'data')
  377. def media_path():
  378. return osp.join(project_path(), 'media')
  379. def sqlite_path():
  380. return osp.join(project_path(), 'db/db.sqlite')
  381. def joblib_cache_path():
  382. return osp.join(project_path(), 'cache/joblib')
  383. #+END_SRC
  384. **** Setup
  385. Set up caching of the tasks.
  386. #+BEGIN_SRC python :tangle lib/seh_pathway_hopping/_tasks.py
  387. ## Setup
  388. # create the sqlite database
  389. # set up the joblib cache
  390. jlmem = joblib.Memory(joblib_cache_path())
  391. # set this when you want to do some recursion stuff with contigtrees
  392. def set_recursion_limit():
  393. recursion_limit = 5000
  394. import sys; sys.setrecursionlimit(recursion_limit)
  395. print("Setting recursion limit to {}".format(recursion_limit))
  396. # set the recursion depth since it is always needing to be increased
  397. set_recursion_limit()
  398. #+END_SRC
  399. *** Visualizing Trajectories
  400. For each run we want to visualize trajectories of the simulations no
  401. matter if we get exit points are not.
  402. So we get traces for every end point.
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...