2 #include "caffe2/core/timer.h" 3 #include "caffe2/utils/math.h" 9 constexpr
size_t kL1CacheSizeBytes = 16 * 1024;
16 inline void quantize2bNeon(
size_t QC,
17 const float* __restrict__ Xdata,
19 float inter_center_distance,
20 std::array<uint8_t*, k2b1bXBits> XQdata) {
22 const auto offset_plus_2_inter_center_distance = vdupq_n_f32(offset + 2 * inter_center_distance);
23 const auto offset_plus_inter_center_distance = vdupq_n_f32(offset + inter_center_distance);
24 const auto offset_ = vdupq_n_f32(offset);
25 const uint8x8_t shifts = {1 << 0, 1 << 1, 1 << 2, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 1 << 7};
27 for (
size_t qc = 0; qc < QC; qc += 8) {
28 std::array<std::array<uint8x8_t, 8>, k2b1bXBits> ps;
29 for (
auto i = 0; i < k2b1bXBits; ++i) {
30 for (
auto j = 0; j < 8; ++j) {
31 ps[i][j] = vdup_n_u8(0);
35 for (
auto j = 0; j < 8; ++j) {
36 const auto x0 = vld1q_f32(&Xdata[qc * 8 + j * 8 + 0]);
37 const auto x1 = vld1q_f32(&Xdata[qc * 8 + j * 8 + 4]);
53 auto join = [](uint32x4_t a, uint32x4_t b) -> uint8x8_t {
54 return vmovn_u16(vcombine_u16(vmovn_u32(a), vmovn_u32(b)));
57 const auto x_geq_offset_plus_2_inter_center_distance =
58 join(vcgeq_s32(vreinterpretq_s32_f32(x0),
59 vreinterpretq_s32_f32(offset_plus_2_inter_center_distance)),
60 vcgeq_s32(vreinterpretq_s32_f32(x1),
61 vreinterpretq_s32_f32(offset_plus_2_inter_center_distance)));
62 const auto x_ge_offset =
63 join(vcgeq_s32(vreinterpretq_s32_f32(x0), vreinterpretq_s32_f32(offset_)),
64 vcgeq_s32(vreinterpretq_s32_f32(x1), vreinterpretq_s32_f32(offset_)));
66 const auto x_lt_offset_plus_inter_center_distance =
67 join(vcltq_s32(vreinterpretq_s32_f32(x0),
68 vreinterpretq_s32_f32(offset_plus_inter_center_distance)),
69 vcltq_s32(vreinterpretq_s32_f32(x1),
70 vreinterpretq_s32_f32(offset_plus_inter_center_distance)));
72 const auto p1_mask = vmvn_u8(x_lt_offset_plus_inter_center_distance);
73 const auto p0_mask = vorr_u8(vand_u8(x_ge_offset, x_lt_offset_plus_inter_center_distance),
74 x_geq_offset_plus_2_inter_center_distance);
75 ps[0][j] = vand_u8(shifts, p0_mask);
76 ps[1][j] = vand_u8(shifts, p1_mask);
79 for (
auto i = 0; i < 2; ++i) {
80 const auto p01 = vpadd_u8(ps[i][0], ps[i][1]);
81 const auto p23 = vpadd_u8(ps[i][2], ps[i][3]);
82 const auto p45 = vpadd_u8(ps[i][4], ps[i][5]);
83 const auto p67 = vpadd_u8(ps[i][6], ps[i][7]);
84 const auto p0123 = vpadd_u8(p01, p23);
85 const auto p4567 = vpadd_u8(p45, p67);
86 vst1_u8(XQdata[i] + qc, vpadd_u8(p0123, p4567));
91 void uniformQuantize2b1bNeon(QConvState* state,
93 const std::vector<std::unique_ptr<TensorCPU>>& XQ,
95 float inter_center_distance) {
96 CAFFE_ENFORCE_GT(X.ndim(), 1);
97 const size_t C = X.dim32(X.ndim() - 1);
98 const size_t N = X.size() / C;
99 const size_t QC = divRoundUp(C, 8);
101 XQs[X.ndim() - 1] = QC;
102 CAFFE_ENFORCE_EQ(XQ.size(), k2b1bXBits);
103 for (
auto i = 0; i < k2b1bXBits; ++i) {
106 const float* Xdata = X.data<
float>();
107 std::array<uint8_t*, k2b1bXBits> XQdata;
108 for (
size_t i = 0; i < k2b1bXBits; ++i) {
109 XQdata[i] = XQ[i]->mutable_data<uint8_t>();
111 CAFFE_ENFORCE_GT(offset, 0);
112 CAFFE_ENFORCE_GT(inter_center_distance, 0);
113 size_t QCUnroll = ((C / 8) / 8) * 8;
118 const size_t rowsPerBlock =
119 std::max<size_t>(std::floor<size_t>(double(4 * kL1CacheSizeBytes) / double(17 * C)), 1);
120 state->parallelFor(divRoundUp(N, rowsPerBlock), [&](
size_t nb) {
121 for (
size_t n = nb * rowsPerBlock; n < std::min<size_t>(nb * rowsPerBlock + rowsPerBlock, N);
123 std::array<uint8_t*, k2b1bXBits> XQoff = {{
124 XQdata[0] + 0 + QC * n, XQdata[1] + 0 + QC * n,
126 quantize2bNeon(QCUnroll, &Xdata[0 + C * n], offset, inter_center_distance, XQoff);
127 for (
size_t qc = QCUnroll; qc < QC; ++qc) {
129 std::array<uint8_t, k2b1bXBits> p = {{0, 0}};
130 for (
size_t b = 0; b < 8; ++b) {
131 const size_t c = qc * 8 + b;
133 float v = Xdata[c + C * n];
136 }
else if (v < offset + inter_center_distance) {
138 }
else if (v < offset + 2 * inter_center_distance) {
146 for (
auto i = 0; i < k2b1bXBits; ++i) {
147 XQdata[i][qc + QC * n] = p[i];
154 template <
size_t TileSize,
size_t TileDepthBytes>
155 void uniformQuantize2b1bNeonPacked(QConvState* state,
157 const std::vector<std::unique_ptr<TensorCPU>>& XQ,
159 float inter_center_distance) {
160 const size_t M = X.size_to_dim(3);
161 const size_t K = X.size() / M;
162 const size_t QK = divRoundUp(K, 8);
163 const size_t numTiles = divRoundUp(M, TileSize);
164 const size_t numTilesDepth = divRoundUp(QK, TileDepthBytes);
165 for (
size_t i = 0; i < k2b1bXBits; ++i) {
166 XQ[i]->Resize(numTiles, numTilesDepth, TileSize, TileDepthBytes);
168 const float* Xdata = X.data<
float>();
169 std::array<uint8_t*, k2b1bXBits> XQdata;
170 for (
auto i = 0; i < k2b1bXBits; ++i) {
171 XQdata[i] = XQ[i]->mutable_data<uint8_t>();
173 CAFFE_ENFORCE_GT(offset, 0);
174 CAFFE_ENFORCE_GT(inter_center_distance, 0);
178 const size_t tilesPerBlock = std::max<size_t>(
179 std::floor<size_t>(double(4 * kL1CacheSizeBytes) / double(17 * K * TileSize)), 1);
180 state->parallelFor(divRoundUp(numTiles, tilesPerBlock), [&](
size_t nb) {
181 for (
size_t i = nb * tilesPerBlock;
182 i < std::min<size_t>(nb * tilesPerBlock + tilesPerBlock, numTiles);
184 for (
size_t j = 0; j < numTilesDepth; ++j) {
185 if (i != numTiles - 1 && j != numTilesDepth - 1) {
187 for (
auto ii = 0; ii < TileSize; ++ii) {
188 size_t m = i * TileSize + ii;
189 size_t k = j * TileDepthBytes * 8;
190 std::array<uint8_t*, k2b1bXBits> XQoff = {
191 {XQdata[0] + TileDepthBytes * ii + TileDepthBytes * TileSize * j +
192 TileSize * TileDepthBytes * numTilesDepth * i,
193 XQdata[1] + TileDepthBytes * ii + TileDepthBytes * TileSize * j +
194 TileSize * TileDepthBytes * numTilesDepth * i}};
195 quantize2bNeon(TileDepthBytes, &Xdata[m * K + k], offset, inter_center_distance, XQoff);
198 for (
size_t ii = 0; ii < TileSize; ++ii) {
199 size_t m = i * TileSize + ii;
200 size_t k = j * TileDepthBytes * 8;
201 std::array<uint8_t*, k2b1bXBits> XQoff = {
202 {XQdata[0] + TileDepthBytes * ii + TileDepthBytes * TileSize * j +
203 TileSize * TileDepthBytes * numTilesDepth * i,
204 XQdata[1] + TileDepthBytes * ii + TileDepthBytes * TileSize * j +
205 TileSize * TileDepthBytes * numTilesDepth * i}};
206 if (m < M && k + TileDepthBytes * 8 <= K) {
209 TileDepthBytes, &Xdata[m * K + k], offset, inter_center_distance, XQoff);
213 std::array<float, 8 * TileDepthBytes> Xpad = {{0}};
215 std::copy(&Xdata[m * K + k], &Xdata[m * K + K], Xpad.begin());
217 quantize2bNeon(TileDepthBytes, Xpad.data(), offset, inter_center_distance, XQoff);
228 template <
size_t TileSize,
size_t TileDepthBytes>
229 void qpack_tiles(QConvState* state,
const TensorCPU& X,
size_t axis, TensorCPU* XP) {
230 const size_t M = X.size_to_dim(axis);
231 const size_t QK = X.size() / M;
232 const size_t numTiles = divRoundUp(M, TileSize);
233 const size_t numTilesDepth = divRoundUp(QK, TileDepthBytes);
234 XP->Resize(numTiles, numTilesDepth, TileSize, TileDepthBytes);
236 const auto* __restrict__ Xdata = X.data<uint8_t>();
237 auto* __restrict__ XPdata = XP->mutable_data<uint8_t>();
241 const size_t tilesPerBlock = std::max<size_t>(
242 std::floor<size_t>(double(kL1CacheSizeBytes) / double(2 * TileSize * QK)), 1);
243 state->parallelFor(divRoundUp(numTiles, tilesPerBlock), [&](
size_t nb) {
244 for (
size_t i = nb * tilesPerBlock;
245 i < std::min<size_t>(nb * tilesPerBlock + tilesPerBlock, numTiles);
247 for (
size_t j = 0; j < numTilesDepth; ++j) {
248 if (i != numTiles - 1 && j != numTilesDepth - 1) {
250 for (
auto ii = 0; ii < TileSize; ++ii) {
251 auto m = i * TileSize + ii;
252 auto qk = j * TileDepthBytes;
253 std::memcpy(&XPdata[TileDepthBytes * ii + TileDepthBytes * TileSize * j +
254 TileSize * TileDepthBytes * numTilesDepth * i],
259 for (
size_t ii = 0; ii < TileSize; ++ii) {
260 for (
size_t jj = 0; jj < TileDepthBytes; ++jj) {
261 size_t m = i * TileSize + ii;
262 size_t qk = j * TileDepthBytes + jj;
264 if (m < M && qk < QK) {
266 pval = Xdata[m * QK + qk];
268 XPdata[jj + TileDepthBytes * ii + TileDepthBytes * TileSize * j +
269 TileSize * TileDepthBytes * numTilesDepth * i] = pval;
283 template <
size_t kUnrollM,
size_t kUnrollN,
size_t TileDepthBytes,
typename F>
284 void qgess_packed(
const uint8_t* __restrict__ Ablock,
285 const uint8_t* __restrict__ Bblock,
286 float* __restrict__ Cblock,
287 const size_t Cstride,
291 static_assert(kUnrollN % 8 == 0,
"");
292 static_assert(TileDepthBytes == 16,
"");
293 DCHECK_EQ(QK % 16, 0);
294 uint16x8_t acc[kUnrollM][kUnrollN / 8];
295 for (
size_t mm = 0; mm < kUnrollM; ++mm) {
296 for (
size_t nn = 0; nn < kUnrollN / 8; ++nn) {
297 acc[mm][nn] = vdupq_n_u16(0);
301 const size_t QK16Unroll = (QK / 16) * 16;
302 for (; qk < QK16Unroll; qk += 16) {
303 uint8x16_t Areg[kUnrollM];
304 for (
size_t mm = 0; mm < kUnrollM; ++mm) {
305 Areg[mm] = vld1q_u8(Ablock);
309 for (
size_t nn = 0; nn < kUnrollN / 8; ++nn) {
311 for (
size_t nnn = 0; nnn < 8; ++nnn) {
312 Breg[nnn] = vld1q_u8(Bblock);
315 for (
size_t mm = 0; mm < kUnrollM; ++mm) {
317 for (
size_t nnn = 0; nnn < 8; ++nnn) {
318 cnts[nnn] = vcntq_u8(veorq_u8(Breg[nnn], Areg[mm]));
321 for (
size_t nnn = 0; nnn < 8; ++nnn) {
322 ps[nnn] = vadd_u8(vget_low_u8(cnts[nnn]), vget_high_u8(cnts[nnn]));
325 for (
size_t nnn = 0; nnn < 4; ++nnn) {
326 pss[nnn] = vpadd_u8(ps[2 * nnn], ps[2 * nnn + 1]);
329 for (
size_t nnn = 0; nnn < 2; ++nnn) {
330 psss[nnn] = vpadd_u8(pss[2 * nnn], pss[2 * nnn + 1]);
332 uint8x16_t out = vcombine_u8(psss[0], psss[1]);
333 acc[mm][nn] = vpadalq_u8(acc[mm][nn], out);
338 for (
size_t mm = 0; mm < kUnrollM; ++mm) {
339 auto* Crow = Cblock + mm * Cstride;
340 for (
size_t nn = 0; nn < kUnrollN / 8; ++nn) {
341 const int32x4_t K_ = vdupq_n_s32(QK * 8);
342 const int16x4_t two = vdup_n_s16(2);
343 const int16x4_t acc0123_l = vreinterpret_s16_u16(vget_low_u16(acc[mm][nn]));
344 const int16x4_t acc0123_h = vreinterpret_s16_u16(vget_high_u16(acc[mm][nn]));
345 const int32x4_t K_minus_2_acc0123_l = vmlsl_s16(K_, two, acc0123_l);
346 const int32x4_t K_minus_2_acc0123_h = vmlsl_s16(K_, two, acc0123_h);
347 f(Crow + nn * 8 + 0, vcvtq_f32_s32(K_minus_2_acc0123_l), Nstart + nn * 8 + 0);
348 f(Crow + nn * 8 + 4, vcvtq_f32_s32(K_minus_2_acc0123_h), Nstart + nn * 8 + 4);
355 template <
size_t TileSize,
size_t TileDepthBytes,
typename F>
356 inline void qgemm_nt_packed(
357 QConvState* state,
const TensorCPU& A,
const TensorCPU& B, TensorCPU* C, F&& f = F()) {
358 CAFFE_ENFORCE_EQ(A.ndim(), 4);
359 CAFFE_ENFORCE_EQ(B.ndim(), 4);
360 CAFFE_ENFORCE_EQ(A.dim(2), TileSize);
361 CAFFE_ENFORCE_EQ(B.dim(2), TileSize);
362 CAFFE_ENFORCE_EQ(A.dim(3), TileDepthBytes);
363 CAFFE_ENFORCE_EQ(B.dim(3), TileDepthBytes);
364 const size_t MT = A.dim(0);
365 const size_t NT = B.dim(0);
366 const size_t M = MT * TileSize;
367 const size_t N = NT * TileSize;
369 const size_t QKT = A.dim(1);
370 const size_t K = QKT * 8 * TileDepthBytes;
371 const size_t QK = K / 8;
372 CAFFE_ENFORCE_EQ(A.dim(1), B.dim(1));
374 const auto* Adata = A.data<uint8_t>();
375 const auto* Bdata = B.data<uint8_t>();
376 auto* Cdata = C->mutable_data<
float>();
388 size_t tilesPerBlock =
389 std::floor((std::sqrt(256 * kL1CacheSizeBytes + K * K) - K) / (32 * TileSize));
390 if (tilesPerBlock < 1) {
393 CAFFE_ENFORCE_LT(K, std::pow(2, 16));
394 CAFFE_ENFORCE_EQ(M % TileSize, 0);
395 CAFFE_ENFORCE_EQ(N % TileSize, 0);
396 const size_t MNumTiles = M / TileSize;
397 const size_t NNumTiles = N / TileSize;
398 const size_t MNumBlocks = divRoundUp(MNumTiles, tilesPerBlock);
399 const size_t NNumBlocks = divRoundUp(NNumTiles, tilesPerBlock);
401 state->parallelFor(MNumBlocks * NNumBlocks, [&](
size_t mn) {
402 const size_t mBlockIdx = mn / NNumBlocks;
403 const size_t nBlockIdx = mn % NNumBlocks;
404 const size_t mTileStart = mBlockIdx * tilesPerBlock;
405 const size_t nTileStart = nBlockIdx * tilesPerBlock;
406 for (
size_t mBlockTileIdx = 0;
407 mBlockTileIdx < tilesPerBlock && mBlockTileIdx + mTileStart < MNumTiles;
409 const size_t mTileIdx = mBlockTileIdx + mTileStart;
410 for (
size_t nBlockTileIdx = 0;
411 nBlockTileIdx < tilesPerBlock && nBlockTileIdx + nTileStart < NNumTiles;
413 const size_t nTileIdx = nBlockTileIdx + nTileStart;
416 const auto* Ablock = &Adata[mTileIdx * QK * TileSize];
417 const auto* Bblock = &Bdata[nTileIdx * QK * TileSize];
418 auto* Cblock = &Cdata[mTileIdx * TileSize * N + nTileIdx * TileSize];
419 const size_t Cstride = N;
420 qgess_packed<TileSize, TileSize, TileDepthBytes, F>(
421 Ablock, Bblock, Cblock, Cstride, QK, nTileIdx * TileSize, std::forward<F>(f));
427 void run2b1bConvIm2ColGEMM(QConvState* state,
428 const ConvArgs& args,
432 const size_t KH = state->WQ->dim32(1);
433 const size_t KW = state->WQ->dim32(2);
434 const size_t OH = (X.dim32(1) - KH + args.pad_t + args.pad_b) / args.stride_h + 1;
435 const size_t OW = (X.dim32(2) - KW + args.pad_l + args.pad_r) / args.stride_w + 1;
436 const size_t OC = state->WQ->dim32(0);
437 const size_t QK = KH * KW * divRoundUp(X.dim32(3), 8);
438 Y->Resize(X.dim32(0), OH, OW, OC);
439 if (!state->WQPacked) {
440 state->WQPacked = caffe2::make_unique<TensorCPU>();
441 qpack_tiles<kGEMMTileSize, kGEMMTileDepthBytes>(state, *(state->WQ), 1, state->WQPacked.get());
442 CAFFE_ENFORCE_EQ(state->WQPacked->dim32(0), divRoundUp(OC, kGEMMTileSize));
443 CAFFE_ENFORCE_EQ(state->WQPacked->dim32(1), divRoundUp(QK, kGEMMTileDepthBytes));
444 CAFFE_ENFORCE_EQ(state->WQPacked->dim32(2), kGEMMTileSize);
445 CAFFE_ENFORCE_EQ(state->WQPacked->dim32(3), kGEMMTileDepthBytes);
451 for (
size_t i = 0; i < state->bias->size(); ++i) {
452 state->WQN->mutable_data<
float>()[i] += 2.0f / 3 * state->bias->data<
float>()[i];
459 const size_t QKPadding = divRoundUp(QK, kGEMMTileDepthBytes) * kGEMMTileDepthBytes - QK;
460 if (QKPadding != 0) {
461 for (
size_t i = 0; i < state->WQN->size(); ++i) {
462 state->WQN->mutable_data<
float>()[i] -= QKPadding * 8;
466 CAFFE_ENFORCE(!state->bias.get());
468 const bool is_1x1 = KH == 1 && KW == 1 && args.pad_l == 0 && args.pad_r == 0 && args.pad_b == 0 &&
469 args.pad_t == 0 && args.stride_h == 1 && args.stride_w == 1;
472 CAFFE_ENFORCE_EQ(OH, X.dim32(1));
473 CAFFE_ENFORCE_EQ(OW, X.dim32(2));
474 uniformQuantize2b1bNeonPacked<kGEMMTileSize, kGEMMTileDepthBytes>(
475 state, X, state->XQs, 0.5, 1.0);
477 uniformQuantize2b1bNeon(state, X, state->XQs, 0.5, 1.0);
479 TensorCPU* YQ0 = state->YQs[0].get();
481 if (state->WQ->dim32(0) % kGEMMTileSize == 0) {
486 for (
size_t i = 0; i < k2b1bXBits; ++i) {
487 const auto& XQ = *(state->XQs[i]);
489 qim2col(args, XQ, *(state->WQ), state->scratchColBuffer.get());
490 qpack_tiles<kGEMMTileSize, kGEMMTileDepthBytes>(
491 state, *(state->scratchColBuffer), 3, state->scratch.get());
495 const auto* __restrict__ WQNdata = state->WQN->data<
float>();
498 qgemm_nt_packed<kGEMMTileSize, kGEMMTileDepthBytes>(
500 is_1x1 ? XQ : *(state->scratch),
503 [WQNdata](
float* __restrict__ acc, float32x4_t value,
size_t channel) {
505 const float32x4_t _32 = vdupq_n_f32(3.0f / 2);
506 const float32x4_t _12 = vdupq_n_f32(1.0f / 2);
507 const float32x4_t WQNc_32 = vmulq_f32(_32, vld1q_f32(WQNdata + channel));
508 const float32x4_t WQNc_32_value_12 = vmlaq_f32(WQNc_32, _12, value);
509 vst1q_f32(acc, WQNc_32_value_12);
513 qgemm_nt_packed<kGEMMTileSize, kGEMMTileDepthBytes>(
515 is_1x1 ? XQ : *(state->scratch),
518 [](
float* __restrict__ acc, float32x4_t value,
size_t channel) {
519 const float32x4_t curr = vld1q_f32(acc);
520 vst1q_f32(acc, vaddq_f32(curr, value));
530 const size_t F = state->WQ->dim(0);
531 const size_t N = Y->size() / F;
532 const size_t NP = YQ0->dim32(0);
533 const size_t FP = YQ0->dim32(1);
534 math::CopyMatrix<CPUContext>(
535 sizeof(float), N, F, YQ0->data<
float>(), FP, Y->mutable_data<
float>(), F,
nullptr);
537 CAFFE_ENFORCE_EQ(Y->dim32(0), divRoundUp(X.dim32(0) * OH * OW, kGEMMTileSize) * kGEMMTileSize);
538 CAFFE_ENFORCE_EQ(Y->dim32(1), OC);
539 Y->Shrink(X.dim32(0) * OH * OW);
540 Y->Reshape(std::vector<TIndex>{{TIndex(X.dim(0)), TIndex(OH), TIndex(OW), TIndex(OC)}});
544 bool run2b1bConvNeon(QConvState* state,
const ConvArgs& args,
const TensorCPU& X, TensorCPU* Y) {
547 CAFFE_ENFORCE_EQ(X.ndim(), 4);
548 run2b1bConvIm2ColGEMM(state, args, X, Y);
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...