5 REGISTER_CPU_OPERATOR(Adam, AdamOp<float, CPUContext>);
9 .AllowInplace({{0, 0}, {1, 1}, {2, 2}})
12 Computes the Adam update (https://arxiv.org/abs/1412.6980) for an 13 input gradient and momentum parameters. Concretely, given inputs 14 (param, m1, m2, grad, lr, iters), 17 corrected_local_rate = lr * sqrt(1 - power(beta2, t)) / 19 m1_o = (beta1 * m1) + (1 - beta1) * grad 20 m2_o = (beta2 * m2) + (1 - beta2) * np.square(grad) 21 grad_o = corrected_local_rate * m1_o / \ 22 (sqrt(m2_o) + epsilon) 23 param_o = param + grad_o 25 and returns (param_o, m1_o, m2_o) 28 .Input(0, "param",
"Parameters to be updated")
29 .Input(1,
"moment_1",
"First moment history")
30 .Input(2,
"moment_2",
"Second moment history")
31 .Input(3,
"grad",
"Gradient computed")
32 .Input(4,
"lr",
"learning rate")
33 .Input(5,
"iter",
"iteration number")
34 .Output(0,
"output_param",
"Updated parameters")
35 .Output(1,
"output_moment_1",
"Updated first moment")
36 .Output(2,
"output_moment_2",
"Updated second moment")
37 .Arg(
"beta1",
"Default 0.9")
38 .Arg(
"beta2",
"Default 0.999")
39 .Arg(
"epsilon",
"Default 1e-5");
41 REGISTER_CPU_OPERATOR(SparseAdam, SparseAdamOp<float, CPUContext>);
42 OPERATOR_SCHEMA(SparseAdam)
45 .EnforceInplace({{0, 0}, {1, 1}, {2, 2}})
48 Computes the Adam Update for the sparse case. 49 Given inputs (param, moment1, moment2, indices, grad, lr, iter), runs the dense 50 Adam on (param, moment1[indices], momemnt2[indices], lr, iter) and returns 51 (new_param, new_moment1, new_moment2) as in dense case 54 .Input(0, "param",
"Parameters to be updated")
55 .Input(1,
"moment_1",
"First moment history")
56 .Input(2,
"moment_2",
"Second moment history")
57 .Input(3,
"indices",
"Sparse indices")
58 .Input(4,
"grad",
"Gradient computed")
59 .Input(5,
"lr",
"learning rate")
60 .Input(6,
"iter",
"iteration number")
61 .Output(0,
"output_param",
"Updated parameters")
62 .Output(1,
"output_moment_1",
"Updated first moment")
63 .Output(2,
"output_moment_2",
"Updated second moment")
64 .Arg(
"beta1",
"Default 0.9")
65 .Arg(
"beta2",
"Default 0.999")
66 .Arg(
"epsilon",
"Default 1e-5");
68 REGISTER_CPU_OPERATOR(
70 RowWiseSparseAdamOp<float, CPUContext>);
71 OPERATOR_SCHEMA(RowWiseSparseAdam)
74 .EnforceInplace({{0, 0}, {1, 1}, {2, 2}})
77 Computes a modified Adam Update for the sparse case. 78 Given inputs (param, moment1, moment2, indices, grad, lr, iter), runs the 79 Adam update on (param, moment1[indices], moment2[indices], lr, iter) and returns 80 (new_param, new_moment1, new_moment2), where moment2 is a 1D tensor 81 with length equal to the number of rows in param: 82 shape(moment2) == shape(param)[0]. Each element of moment2 is 83 applied to an entire row of param, and the new moment2 values are 84 calculated by averaging across the row. 87 .Input(0, "param",
"Parameters to be updated")
88 .Input(1,
"moment_1",
"First moment history")
89 .Input(2,
"moment_2",
"Second moment history")
90 .Input(3,
"indices",
"Sparse indices")
91 .Input(4,
"grad",
"Gradient computed")
92 .Input(5,
"lr",
"learning rate")
93 .Input(6,
"iter",
"iteration number")
94 .Output(0,
"output_param",
"Updated parameters")
95 .Output(1,
"output_moment_1",
"Updated first moment")
96 .Output(2,
"output_moment_2",
"Updated second moment")
97 .Arg(
"beta1",
"Default 0.9")
98 .Arg(
"beta2",
"Default 0.999")
99 .Arg(
"epsilon",
"Default 1e-5");
101 SHOULD_NOT_DO_GRADIENT(Adam);
102 SHOULD_NOT_DO_GRADIENT(SparseAdam);
103 SHOULD_NOT_DO_GRADIENT(RowWiseSparseAdam);
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...