Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

transforms.py 8.3 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
  1. import torch
  2. from torch.nn import functional as F
  3. import numpy as np
  4. DEFAULT_MIN_BIN_WIDTH = 1e-3
  5. DEFAULT_MIN_BIN_HEIGHT = 1e-3
  6. DEFAULT_MIN_DERIVATIVE = 1e-3
  7. def piecewise_rational_quadratic_transform(inputs,
  8. unnormalized_widths,
  9. unnormalized_heights,
  10. unnormalized_derivatives,
  11. inverse=False,
  12. tails=None,
  13. tail_bound=1.,
  14. min_bin_width=DEFAULT_MIN_BIN_WIDTH,
  15. min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
  16. min_derivative=DEFAULT_MIN_DERIVATIVE):
  17. if tails is None:
  18. spline_fn = rational_quadratic_spline
  19. spline_kwargs = {}
  20. else:
  21. spline_fn = unconstrained_rational_quadratic_spline
  22. spline_kwargs = {
  23. 'tails': tails,
  24. 'tail_bound': tail_bound
  25. }
  26. outputs, logabsdet = spline_fn(
  27. inputs=inputs,
  28. unnormalized_widths=unnormalized_widths,
  29. unnormalized_heights=unnormalized_heights,
  30. unnormalized_derivatives=unnormalized_derivatives,
  31. inverse=inverse,
  32. min_bin_width=min_bin_width,
  33. min_bin_height=min_bin_height,
  34. min_derivative=min_derivative,
  35. **spline_kwargs
  36. )
  37. return outputs, logabsdet
  38. def searchsorted(bin_locations, inputs, eps=1e-6):
  39. bin_locations[..., -1] += eps
  40. return torch.sum(
  41. inputs[..., None] >= bin_locations,
  42. dim=-1
  43. ) - 1
  44. def unconstrained_rational_quadratic_spline(inputs,
  45. unnormalized_widths,
  46. unnormalized_heights,
  47. unnormalized_derivatives,
  48. inverse=False,
  49. tails='linear',
  50. tail_bound=1.,
  51. min_bin_width=DEFAULT_MIN_BIN_WIDTH,
  52. min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
  53. min_derivative=DEFAULT_MIN_DERIVATIVE):
  54. inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
  55. outside_interval_mask = ~inside_interval_mask
  56. outputs = torch.zeros_like(inputs)
  57. logabsdet = torch.zeros_like(inputs)
  58. if tails == 'linear':
  59. unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
  60. constant = np.log(np.exp(1 - min_derivative) - 1)
  61. unnormalized_derivatives[..., 0] = constant
  62. unnormalized_derivatives[..., -1] = constant
  63. outputs[outside_interval_mask] = inputs[outside_interval_mask]
  64. logabsdet[outside_interval_mask] = 0
  65. else:
  66. raise RuntimeError('{} tails are not implemented.'.format(tails))
  67. outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
  68. inputs=inputs[inside_interval_mask],
  69. unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
  70. unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
  71. unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
  72. inverse=inverse,
  73. left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
  74. min_bin_width=min_bin_width,
  75. min_bin_height=min_bin_height,
  76. min_derivative=min_derivative
  77. )
  78. return outputs, logabsdet
  79. def rational_quadratic_spline(inputs,
  80. unnormalized_widths,
  81. unnormalized_heights,
  82. unnormalized_derivatives,
  83. inverse=False,
  84. left=0., right=1., bottom=0., top=1.,
  85. min_bin_width=DEFAULT_MIN_BIN_WIDTH,
  86. min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
  87. min_derivative=DEFAULT_MIN_DERIVATIVE):
  88. if torch.min(inputs) < left or torch.max(inputs) > right:
  89. raise ValueError('Input to a transform is not within its domain')
  90. num_bins = unnormalized_widths.shape[-1]
  91. if min_bin_width * num_bins > 1.0:
  92. raise ValueError('Minimal bin width too large for the number of bins')
  93. if min_bin_height * num_bins > 1.0:
  94. raise ValueError('Minimal bin height too large for the number of bins')
  95. widths = F.softmax(unnormalized_widths, dim=-1)
  96. widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
  97. cumwidths = torch.cumsum(widths, dim=-1)
  98. cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
  99. cumwidths = (right - left) * cumwidths + left
  100. cumwidths[..., 0] = left
  101. cumwidths[..., -1] = right
  102. widths = cumwidths[..., 1:] - cumwidths[..., :-1]
  103. derivatives = min_derivative + F.softplus(unnormalized_derivatives)
  104. heights = F.softmax(unnormalized_heights, dim=-1)
  105. heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
  106. cumheights = torch.cumsum(heights, dim=-1)
  107. cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
  108. cumheights = (top - bottom) * cumheights + bottom
  109. cumheights[..., 0] = bottom
  110. cumheights[..., -1] = top
  111. heights = cumheights[..., 1:] - cumheights[..., :-1]
  112. if inverse:
  113. bin_idx = searchsorted(cumheights, inputs)[..., None]
  114. else:
  115. bin_idx = searchsorted(cumwidths, inputs)[..., None]
  116. input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
  117. input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
  118. input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
  119. delta = heights / widths
  120. input_delta = delta.gather(-1, bin_idx)[..., 0]
  121. input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
  122. input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
  123. input_heights = heights.gather(-1, bin_idx)[..., 0]
  124. if inverse:
  125. a = (((inputs - input_cumheights) * (input_derivatives
  126. + input_derivatives_plus_one
  127. - 2 * input_delta)
  128. + input_heights * (input_delta - input_derivatives)))
  129. b = (input_heights * input_derivatives
  130. - (inputs - input_cumheights) * (input_derivatives
  131. + input_derivatives_plus_one
  132. - 2 * input_delta))
  133. c = - input_delta * (inputs - input_cumheights)
  134. discriminant = b.pow(2) - 4 * a * c
  135. assert (discriminant >= 0).all()
  136. root = (2 * c) / (-b - torch.sqrt(discriminant))
  137. outputs = root * input_bin_widths + input_cumwidths
  138. theta_one_minus_theta = root * (1 - root)
  139. denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
  140. * theta_one_minus_theta)
  141. derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
  142. + 2 * input_delta * theta_one_minus_theta
  143. + input_derivatives * (1 - root).pow(2))
  144. logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
  145. return outputs, -logabsdet
  146. else:
  147. theta = (inputs - input_cumwidths) / input_bin_widths
  148. theta_one_minus_theta = theta * (1 - theta)
  149. numerator = input_heights * (input_delta * theta.pow(2)
  150. + input_derivatives * theta_one_minus_theta)
  151. denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
  152. * theta_one_minus_theta)
  153. outputs = input_cumheights + numerator / denominator
  154. derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
  155. + 2 * input_delta * theta_one_minus_theta
  156. + input_derivatives * (1 - theta).pow(2))
  157. logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
  158. return outputs, logabsdet
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...