Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

legacy_distributed_data_parallel.py 6.3 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
  1. # Copyright (c) 2017-present, Facebook, Inc.
  2. # All rights reserved.
  3. #
  4. # This source code is licensed under the license found in the LICENSE file in
  5. # the root directory of this source tree. An additional grant of patent rights
  6. # can be found in the PATENTS file in the same directory.
  7. """
  8. A modified version of the legacy DistributedDataParallel module that uses c10d
  9. communication primitives. This is necessary for models that have conditional
  10. computation (e.g., AdaptiveSoftmax) and which therefore do not work with the
  11. c10d version of DDP.
  12. This version also supports the *accumulate_grads* feature, which allows faster
  13. training with `--update-freq`.
  14. """
  15. import copy
  16. import torch
  17. from torch import nn
  18. from torch.autograd import Variable
  19. from . import distributed_utils
  20. class LegacyDistributedDataParallel(nn.Module):
  21. """Implements distributed data parallelism at the module level.
  22. A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.
  23. This version uses a c10d process group for communication and does not
  24. broadcast buffers.
  25. Args:
  26. module (~torch.nn.Module): module to be parallelized
  27. world_size (int): number of parallel workers
  28. process_group (optional): the c10d process group to be used for
  29. distributed data all-reduction. If None, the default process group
  30. will be used.
  31. buffer_size (int, optional): number of elements to buffer before
  32. performing all-reduce (default: 256M).
  33. """
  34. def __init__(self, module, world_size, process_group=None, buffer_size=2**28):
  35. super().__init__()
  36. self.module = module
  37. self.world_size = world_size
  38. self.process_group = process_group
  39. # Never use a bigger buffer than the number of model params
  40. self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters()))
  41. self.buffer = None
  42. # Flag used by the NCCL backend to make sure we only reduce gradients
  43. # one time in the execution engine
  44. self.need_reduction = False
  45. # We can also forcibly accumulate grads locally and only do the
  46. # all-reduce at some later time
  47. self.accumulate_grads = False
  48. # For NCCL backend, since every single NCCL call is asynchoronous, we
  49. # therefore directly enqueue all the NCCL reduction calls to the
  50. # default CUDA stream without spawning up other reduction threads.
  51. # This achieves the best performance.
  52. self._register_grad_hook()
  53. def __getstate__(self):
  54. attrs = copy.copy(self.__dict__)
  55. return attrs
  56. def __setstate__(self, state):
  57. super().__setstate__(state)
  58. self._register_grad_hook()
  59. def forward(self, *inputs, **kwargs):
  60. self.need_reduction = True
  61. return self.module(*inputs, **kwargs)
  62. def _register_grad_hook(self):
  63. """
  64. This function registers the callback all-reduction function for the
  65. NCCL backend. All gradients will be all reduced in one single step.
  66. The NCCL reduction will directly be enqueued into the default CUDA
  67. stream. Therefore, no synchronization is needed.
  68. """
  69. def all_reduce(params):
  70. buffer = self.buffer
  71. nonzero_buffer = False
  72. if len(params) > 1:
  73. offset = 0
  74. for p in params:
  75. sz = p.numel()
  76. if p.grad is not None:
  77. buffer[offset:offset+sz].copy_(p.grad.data.view(-1))
  78. nonzero_buffer = True
  79. else:
  80. buffer[offset:offset+sz].zero_()
  81. offset += sz
  82. else:
  83. # we only have a single grad to all-reduce
  84. p = params[0]
  85. if p.grad is not None:
  86. buffer = p.grad.data
  87. nonzero_buffer = True
  88. elif p.numel() <= self.buffer.numel():
  89. buffer = buffer[:p.numel()]
  90. buffer.zero_()
  91. else:
  92. buffer = torch.zeros_like(p)
  93. if nonzero_buffer:
  94. buffer.div_(self.world_size)
  95. distributed_utils.all_reduce(buffer, self.process_group)
  96. # copy all-reduced grads back into their original place
  97. offset = 0
  98. for p in params:
  99. sz = p.numel()
  100. if p.grad is not None:
  101. p.grad.data.copy_(buffer[offset:offset+sz].view_as(p))
  102. else:
  103. p.grad = buffer[offset:offset+sz].view_as(p).clone()
  104. offset += sz
  105. def reduction_fn():
  106. # This function only needs to be called once
  107. if not self.need_reduction or self.accumulate_grads:
  108. return
  109. self.need_reduction = False
  110. if self.buffer is None:
  111. self.buffer = next(self.module.parameters()).new(self.buffer_size)
  112. # All-reduce the gradients in buckets
  113. offset = 0
  114. buffered_params = []
  115. for param in self.module.parameters():
  116. if not param.requires_grad:
  117. continue
  118. if param.grad is None:
  119. param.grad = torch.zeros_like(param)
  120. if param.grad.requires_grad:
  121. raise RuntimeError("DistributedDataParallel only works "
  122. "with gradients that don't require "
  123. "grad")
  124. sz = param.numel()
  125. if sz > self.buffer.numel():
  126. # all-reduce big params directly
  127. all_reduce([param])
  128. else:
  129. if offset + sz > self.buffer.numel():
  130. all_reduce(buffered_params)
  131. offset = 0
  132. buffered_params.clear()
  133. buffered_params.append(param)
  134. offset += sz
  135. if len(buffered_params) > 0:
  136. all_reduce(buffered_params)
  137. # Now register the reduction hook on the parameters
  138. for p in self.module.parameters():
  139. def allreduce_hook(*unused):
  140. Variable._execution_engine.queue_callback(reduction_fn)
  141. if p.requires_grad:
  142. p.register_hook(allreduce_hook)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...