Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

relu_layer.hpp 3.0 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
  1. #ifndef CAFFE_RELU_LAYER_HPP_
  2. #define CAFFE_RELU_LAYER_HPP_
  3. #include <vector>
  4. #include "caffe/blob.hpp"
  5. #include "caffe/layer.hpp"
  6. #include "caffe/proto/caffe.pb.h"
  7. #include "caffe/layers/neuron_layer.hpp"
  8. namespace caffe {
  9. /**
  10. * @brief Rectified Linear Unit non-linearity @f$ y = \max(0, x) @f$.
  11. * The simple max is fast to compute, and the function does not saturate.
  12. */
  13. template <typename Dtype>
  14. class ReLULayer : public NeuronLayer<Dtype> {
  15. public:
  16. /**
  17. * @param param provides ReLUParameter relu_param,
  18. * with ReLULayer options:
  19. * - negative_slope (\b optional, default 0).
  20. * the value @f$ \nu @f$ by which negative values are multiplied.
  21. */
  22. explicit ReLULayer(const LayerParameter& param)
  23. : NeuronLayer<Dtype>(param) {}
  24. virtual inline const char* type() const { return "ReLU"; }
  25. protected:
  26. /**
  27. * @param bottom input Blob vector (length 1)
  28. * -# @f$ (N \times C \times H \times W) @f$
  29. * the inputs @f$ x @f$
  30. * @param top output Blob vector (length 1)
  31. * -# @f$ (N \times C \times H \times W) @f$
  32. * the computed outputs @f$
  33. * y = \max(0, x)
  34. * @f$ by default. If a non-zero negative_slope @f$ \nu @f$ is provided,
  35. * the computed outputs are @f$ y = \max(0, x) + \nu \min(0, x) @f$.
  36. */
  37. virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
  38. const vector<Blob<Dtype>*>& top);
  39. virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
  40. const vector<Blob<Dtype>*>& top);
  41. /**
  42. * @brief Computes the error gradient w.r.t. the ReLU inputs.
  43. *
  44. * @param top output Blob vector (length 1), providing the error gradient with
  45. * respect to the outputs
  46. * -# @f$ (N \times C \times H \times W) @f$
  47. * containing error gradients @f$ \frac{\partial E}{\partial y} @f$
  48. * with respect to computed outputs @f$ y @f$
  49. * @param propagate_down see Layer::Backward.
  50. * @param bottom input Blob vector (length 1)
  51. * -# @f$ (N \times C \times H \times W) @f$
  52. * the inputs @f$ x @f$; Backward fills their diff with
  53. * gradients @f$
  54. * \frac{\partial E}{\partial x} = \left\{
  55. * \begin{array}{lr}
  56. * 0 & \mathrm{if} \; x \le 0 \\
  57. * \frac{\partial E}{\partial y} & \mathrm{if} \; x > 0
  58. * \end{array} \right.
  59. * @f$ if propagate_down[0], by default.
  60. * If a non-zero negative_slope @f$ \nu @f$ is provided,
  61. * the computed gradients are @f$
  62. * \frac{\partial E}{\partial x} = \left\{
  63. * \begin{array}{lr}
  64. * \nu \frac{\partial E}{\partial y} & \mathrm{if} \; x \le 0 \\
  65. * \frac{\partial E}{\partial y} & \mathrm{if} \; x > 0
  66. * \end{array} \right.
  67. * @f$.
  68. */
  69. virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
  70. const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
  71. virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
  72. const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
  73. };
  74. } // namespace caffe
  75. #endif // CAFFE_RELU_LAYER_HPP_
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...