Caffe2 - C++ API
A deep learning, cross platform ML framework
lpnorm_op.cc
1 #include "caffe2/operators/lpnorm_op.h"
2 
3 namespace caffe2 {
4 
5 template <>
6 bool LpNormOp<float, CPUContext>::RunOnDevice() {
7  auto& X = Input(X_IN);
8  auto* norm = Output(OUT);
9  norm->Resize(1);
10  const float* X_data = X.data<float>();
11  const float size = average_ ? (float)X.size() : 1.0f;
12  CAFFE_ENFORCE_GT(size, 0);
13  if (p_ == 1) {
14  *(norm->mutable_data<float>()) =
15  (ConstEigenVectorMap<float>(X_data, X.size()).array()).abs().sum() /
16  size;
17  // L1(x) = sum(|x|), L1_average(x) = sum(\x\) / x.size()
18  } else if (p_ == 2) {
19  *(norm->mutable_data<float>()) =
20  (ConstEigenVectorMap<float>(X_data, X.size()).array()).square().sum() /
21  size;
22  // L2(x) = (sum(|x|^2)), L2_average(x) = sum(|x|^2) / x.size()
23  }
24  return true;
25 }
26 
27 template <>
28 bool LpNormGradientOp<float, CPUContext>::RunOnDevice() {
29  auto& X = Input(X_IN);
30  auto& dnorm = Input(DER_NORM_IN);
31  auto* dX = Output(DER_X_OUT);
32  CAFFE_ENFORCE_EQ(dnorm.ndim(), 1);
33  CAFFE_ENFORCE_EQ(dnorm.dim32(0), 1);
34  dX->ResizeLike(X);
35  const float kEps = 1e-12f;
36  const float size = average_ ? (float)X.size() : 1.0f;
37  if (p_ == 1) {
38  // Todo: implement in eigen
39  for (int i = 0; i < X.size(); ++i) {
40  float temp = (X.data<float>())[i];
41  if (temp < -kEps) {
42  dX->mutable_data<float>()[i] = -(dnorm.data<float>())[0] / size;
43  } else if (temp > kEps) {
44  dX->mutable_data<float>()[i] = (dnorm.data<float>())[0] / size;
45  } else {
46  dX->mutable_data<float>()[i] = 0;
47  }
48  }
49  } else if (p_ == 2) {
50  EigenVectorMap<float>(dX->mutable_data<float>(), X.size()).array() =
51  ConstEigenVectorMap<float>(X.data<float>(), X.size()).array() * 2.0f *
52  ((dnorm.data<float>())[0] / size);
53  }
54 
55  return true;
56 }
57 
58 namespace {
59 // LpNorm
60 REGISTER_CPU_OPERATOR(LpNorm, LpNormOp<float, CPUContext>);
61 REGISTER_CPU_OPERATOR(LpNormGradient, LpNormGradientOp<float, CPUContext>);
62 
63 OPERATOR_SCHEMA(LpNorm)
64  .NumInputs(1)
65  .NumOutputs(1)
66  .SetDoc(R"DOC(
67 Given one input float tensor X, and produces one output float tensor
68 of the Lp norm of tensor X, computed as Lp(x) = sum over |x^p|,
69 in which p is either 1 or 2(currently only supports l1 and l2 norm),
70 determined by the argument p.
71 )DOC")
72  .Input(0, "X", "1D input tensor")
73  .Output(0, "Z", "1D output tensor")
74  .Arg("p", "Order of the norm in p-norm")
75  .Arg(
76  "average",
77  "whehther we calculate norm or averaged_norm."
78  "The Lp_averaged_norm(x) is defined as"
79  "Lp_averaged_norm(x) = LpNorm(x) / size(x)");
80 
81 OPERATOR_SCHEMA(LpNormGradient)
82  .NumInputs(2)
83  .NumOutputs(1)
84  .SetDoc(R"DOC(
85 Given one input float tensor X, derivative dout, and produces one output
86 float tensor dX. dX is the derivative of the Lp norm of tensor X, computed as
87 dx = d(sum over |x^p|)/dx, in which p is either 1 or 2(currently only
88 supports l1 and l2 norm) determined by the argument p.
89 )DOC")
90  .Input(0, "X", "1D input tensor")
91  .Input(1, "dout", "1D input tensor")
92  .Output(0, "dx", "1D output tensor")
93  .Arg("p", "Order of the norm in p-norm")
94  .Arg(
95  "average",
96  "whehther we calculate norm or averaged_norm."
97  "The Lp_averaged_norm(x) is defined as"
98  "Lp_averaged_normgradient(x) = LpNormGradient(x) / size(x)");
99 
100 class GetLpNormGradient : public GradientMakerBase {
101  using GradientMakerBase::GradientMakerBase;
102  vector<OperatorDef> GetGradientDefs() override {
103  return SingleGradientDef(
104  "LpNormGradient",
105  "",
106  vector<string>{I(0), GO(0)},
107  vector<string>{GI(0)});
108  }
109 };
110 
111 REGISTER_GRADIENT(LpNorm, GetLpNormGradient);
112 } // namespace
113 
114 } // namespace caffe2
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...