1 #include "caffe2/operators/lpnorm_op.h" 6 bool LpNormOp<float, CPUContext>::RunOnDevice() {
8 auto* norm = Output(OUT);
10 const float* X_data = X.data<
float>();
11 const float size = average_ ? (float)X.size() : 1.0f;
12 CAFFE_ENFORCE_GT(size, 0);
14 *(norm->mutable_data<
float>()) =
15 (ConstEigenVectorMap<float>(X_data, X.size()).array()).abs().sum() /
19 *(norm->mutable_data<
float>()) =
20 (ConstEigenVectorMap<float>(X_data, X.size()).array()).square().sum() /
28 bool LpNormGradientOp<float, CPUContext>::RunOnDevice() {
29 auto& X = Input(X_IN);
30 auto& dnorm = Input(DER_NORM_IN);
31 auto* dX = Output(DER_X_OUT);
32 CAFFE_ENFORCE_EQ(dnorm.ndim(), 1);
33 CAFFE_ENFORCE_EQ(dnorm.dim32(0), 1);
35 const float kEps = 1e-12f;
36 const float size = average_ ? (float)X.size() : 1.0f;
39 for (
int i = 0; i < X.size(); ++i) {
40 float temp = (X.data<
float>())[i];
42 dX->mutable_data<
float>()[i] = -(dnorm.data<
float>())[0] / size;
43 }
else if (temp > kEps) {
44 dX->mutable_data<
float>()[i] = (dnorm.data<
float>())[0] / size;
46 dX->mutable_data<
float>()[i] = 0;
50 EigenVectorMap<float>(dX->mutable_data<
float>(), X.size()).array() =
51 ConstEigenVectorMap<float>(X.data<
float>(), X.size()).array() * 2.0f *
52 ((dnorm.data<
float>())[0] / size);
60 REGISTER_CPU_OPERATOR(LpNorm, LpNormOp<float, CPUContext>);
61 REGISTER_CPU_OPERATOR(LpNormGradient, LpNormGradientOp<float, CPUContext>);
63 OPERATOR_SCHEMA(LpNorm)
67 Given one input float tensor X, and produces one output float tensor 68 of the Lp norm of tensor X, computed as Lp(x) = sum over |x^p|, 69 in which p is either 1 or 2(currently only supports l1 and l2 norm), 70 determined by the argument p. 72 .Input(0, "X",
"1D input tensor")
73 .Output(0,
"Z",
"1D output tensor")
74 .Arg(
"p",
"Order of the norm in p-norm")
77 "whehther we calculate norm or averaged_norm." 78 "The Lp_averaged_norm(x) is defined as" 79 "Lp_averaged_norm(x) = LpNorm(x) / size(x)");
81 OPERATOR_SCHEMA(LpNormGradient)
85 Given one input float tensor X, derivative dout, and produces one output 86 float tensor dX. dX is the derivative of the Lp norm of tensor X, computed as 87 dx = d(sum over |x^p|)/dx, in which p is either 1 or 2(currently only 88 supports l1 and l2 norm) determined by the argument p. 90 .Input(0, "X",
"1D input tensor")
91 .Input(1,
"dout",
"1D input tensor")
92 .Output(0,
"dx",
"1D output tensor")
93 .Arg(
"p",
"Order of the norm in p-norm")
96 "whehther we calculate norm or averaged_norm." 97 "The Lp_averaged_norm(x) is defined as" 98 "Lp_averaged_normgradient(x) = LpNormGradient(x) / size(x)");
100 class GetLpNormGradient :
public GradientMakerBase {
101 using GradientMakerBase::GradientMakerBase;
102 vector<OperatorDef> GetGradientDefs()
override {
103 return SingleGradientDef(
106 vector<string>{I(0), GO(0)},
107 vector<string>{GI(0)});
111 REGISTER_GRADIENT(LpNorm, GetLpNormGradient);
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...