1 #include "adagrad_op.h" 5 REGISTER_CPU_OPERATOR(Adagrad, AdagradOp<float, CPUContext>);
6 OPERATOR_SCHEMA(Adagrad)
9 .AllowInplace({{0, 0}, {1, 1}})
12 Computes the AdaGrad update for an input gradient and accumulated 13 history. Concretely, given inputs (param, grad, moment, learning_rate), 16 new_moment = moment + square(grad) 17 new_grad = learning_rate * grad / (sqrt(new_moment) + epsilon) 18 new_param = param + new_grad 19 and returns (new_param, new_moment). 22 .Input(0, "param",
"Parameters to be updated")
23 .Input(1,
"moment",
"Moment history")
24 .Input(2,
"grad",
"Gradient computed")
25 .Input(3,
"lr",
"learning rate")
26 .Output(0,
"output_param",
"Updated parameters")
27 .Output(1,
"output_moment",
"Updated moment")
28 .Arg(
"epsilon",
"Default 1e-5")
31 "Default 1. If it is in (0, 1), the gradient square sum " 32 "is decayed by this factor.");
34 REGISTER_CPU_OPERATOR(SparseAdagrad, SparseAdagradOp<float, CPUContext>);
35 OPERATOR_SCHEMA(SparseAdagrad)
38 .EnforceOneToOneInplace()
41 Given inputs (param, moment, indices, grad, lr), runs the dense AdaGrad 42 update on (param, grad, moment[indices], lr), and returns (new_param, 43 new_moment) as in the dense case. 46 .Input(0, "param",
"Parameters to be updated")
47 .Input(1,
"moment",
"Moment history")
48 .Input(2,
"indices",
"Sparse indices")
49 .Input(3,
"grad",
"Gradient computed")
50 .Input(4,
"lr",
"learning rate")
51 .Output(0,
"output_param",
"Updated parameters")
52 .Output(1,
"output_moment_1",
"Updated moment")
53 .Arg(
"epsilon",
"Default 1e-5");
55 REGISTER_CPU_OPERATOR(
57 RowWiseSparseAdagradOp<float, CPUContext>);
58 OPERATOR_SCHEMA(RowWiseSparseAdagrad)
61 .EnforceOneToOneInplace()
64 Given inputs (param, moment, indices, grad, lr), runs a modified sparse Adagrad 65 update on (param, grad, moment[indices], lr), and returns (new_param, 66 new_momwnr), where moment is a 1D tensor with length equal to the number of 67 rows in param: shape(moment) == shape(param)[0]. Each element of moment is 68 applied to an entire row of param, and the new moment is calculated by adding 69 the average squared sum of gradients across each row. Note that indices must 70 also be a 1D tensor indexing into the rows of param. 73 .Input(0, "param",
"Parameters to be updated")
74 .Input(1,
"moment",
"Moment history")
75 .Input(2,
"indices",
"Sparse indices")
76 .Input(3,
"grad",
"Gradient computed")
77 .Input(4,
"lr",
"learning rate")
78 .Output(0,
"output_param",
"Updated parameters")
79 .Output(1,
"output_moment_1",
"Updated moment")
80 .Arg(
"epsilon",
"Default 1e-5");
82 SHOULD_NOT_DO_GRADIENT(Adagrad);
83 SHOULD_NOT_DO_GRADIENT(SparseAdagrad);
84 SHOULD_NOT_DO_GRADIENT(RowWiseSparseAdagrad);
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...