Caffe2 - C++ API
A deep learning, cross platform ML framework
ulp_neon.cc
1 #include "ulp_neon.h"
2 #include "caffe2/core/timer.h"
3 #include "caffe2/utils/math.h"
4 
5 namespace caffe2 {
6 
7 // TODO: tune this with cache size detection code. Changing to 32 helps on some
8 // devices (Snapdragon 820).
9 constexpr size_t kL1CacheSizeBytes = 16 * 1024;
10 
11 #ifdef __ARM_NEON__
12 
13 // Applies 2-bit uniform quantization to the floating point data at Xdata,
14 // storing QC bytes into XQdata (i.e. reading 8 * QC floats from Xdata).
15 // Requires QC to be a multiple of 8.
16 inline void quantize2bNeon(size_t QC,
17  const float* __restrict__ Xdata,
18  float offset,
19  float inter_center_distance,
20  std::array<uint8_t*, k2b1bXBits> XQdata) {
21  DCHECK_EQ(QC % 8, 0);
22  const auto offset_plus_2_inter_center_distance = vdupq_n_f32(offset + 2 * inter_center_distance);
23  const auto offset_plus_inter_center_distance = vdupq_n_f32(offset + inter_center_distance);
24  const auto offset_ = vdupq_n_f32(offset);
25  const uint8x8_t shifts = {1 << 0, 1 << 1, 1 << 2, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 1 << 7};
26 
27  for (size_t qc = 0; qc < QC; qc += 8) {
28  std::array<std::array<uint8x8_t, 8>, k2b1bXBits> ps;
29  for (auto i = 0; i < k2b1bXBits; ++i) {
30  for (auto j = 0; j < 8; ++j) {
31  ps[i][j] = vdup_n_u8(0);
32  }
33  }
34 
35  for (auto j = 0; j < 8; ++j) {
36  const auto x0 = vld1q_f32(&Xdata[qc * 8 + j * 8 + 0]);
37  const auto x1 = vld1q_f32(&Xdata[qc * 8 + j * 8 + 4]);
38 
39  // logic.
40  // if (v >= offset + inter_center_distance) {
41  // p[1] |= 1 << b;
42  // } else {
43  // p[1] |= 0 << b;
44  // }
45 
46  // if ((v >= offset && v < offset + inter_center_distance) ||
47  // (v >= offset * 2 * inter_center_distance)) {
48  // p[0] |= 1 << b;
49  // } else {
50  // p[0] |= 0 << b;
51  // }
52 
53  auto join = [](uint32x4_t a, uint32x4_t b) -> uint8x8_t {
54  return vmovn_u16(vcombine_u16(vmovn_u32(a), vmovn_u32(b)));
55  };
56 
57  const auto x_geq_offset_plus_2_inter_center_distance =
58  join(vcgeq_s32(vreinterpretq_s32_f32(x0),
59  vreinterpretq_s32_f32(offset_plus_2_inter_center_distance)),
60  vcgeq_s32(vreinterpretq_s32_f32(x1),
61  vreinterpretq_s32_f32(offset_plus_2_inter_center_distance)));
62  const auto x_ge_offset =
63  join(vcgeq_s32(vreinterpretq_s32_f32(x0), vreinterpretq_s32_f32(offset_)),
64  vcgeq_s32(vreinterpretq_s32_f32(x1), vreinterpretq_s32_f32(offset_)));
65 
66  const auto x_lt_offset_plus_inter_center_distance =
67  join(vcltq_s32(vreinterpretq_s32_f32(x0),
68  vreinterpretq_s32_f32(offset_plus_inter_center_distance)),
69  vcltq_s32(vreinterpretq_s32_f32(x1),
70  vreinterpretq_s32_f32(offset_plus_inter_center_distance)));
71 
72  const auto p1_mask = vmvn_u8(x_lt_offset_plus_inter_center_distance);
73  const auto p0_mask = vorr_u8(vand_u8(x_ge_offset, x_lt_offset_plus_inter_center_distance),
74  x_geq_offset_plus_2_inter_center_distance);
75  ps[0][j] = vand_u8(shifts, p0_mask);
76  ps[1][j] = vand_u8(shifts, p1_mask);
77  }
78 
79  for (auto i = 0; i < 2; ++i) {
80  const auto p01 = vpadd_u8(ps[i][0], ps[i][1]);
81  const auto p23 = vpadd_u8(ps[i][2], ps[i][3]);
82  const auto p45 = vpadd_u8(ps[i][4], ps[i][5]);
83  const auto p67 = vpadd_u8(ps[i][6], ps[i][7]);
84  const auto p0123 = vpadd_u8(p01, p23);
85  const auto p4567 = vpadd_u8(p45, p67);
86  vst1_u8(XQdata[i] + qc, vpadd_u8(p0123, p4567));
87  }
88  }
89 }
90 
91 void uniformQuantize2b1bNeon(QConvState* state,
92  const TensorCPU& X,
93  const std::vector<std::unique_ptr<TensorCPU>>& XQ,
94  float offset,
95  float inter_center_distance) {
96  CAFFE_ENFORCE_GT(X.ndim(), 1);
97  const size_t C = X.dim32(X.ndim() - 1);
98  const size_t N = X.size() / C;
99  const size_t QC = divRoundUp(C, 8);
100  auto XQs = X.dims();
101  XQs[X.ndim() - 1] = QC;
102  CAFFE_ENFORCE_EQ(XQ.size(), k2b1bXBits);
103  for (auto i = 0; i < k2b1bXBits; ++i) {
104  XQ[i]->Resize(XQs);
105  }
106  const float* Xdata = X.data<float>();
107  std::array<uint8_t*, k2b1bXBits> XQdata;
108  for (size_t i = 0; i < k2b1bXBits; ++i) {
109  XQdata[i] = XQ[i]->mutable_data<uint8_t>();
110  }
111  CAFFE_ENFORCE_GT(offset, 0);
112  CAFFE_ENFORCE_GT(inter_center_distance, 0);
113  size_t QCUnroll = ((C / 8) / 8) * 8;
114  // Each worker loads an L1 cache sized block.
115  // We read/write B * K * 4 + 2 * B * (K / 8), so to fit inside C, we have
116  // B = 4 * C / 17 K.
117  // QCUnroll = 0;
118  const size_t rowsPerBlock =
119  std::max<size_t>(std::floor<size_t>(double(4 * kL1CacheSizeBytes) / double(17 * C)), 1);
120  state->parallelFor(divRoundUp(N, rowsPerBlock), [&](size_t nb) {
121  for (size_t n = nb * rowsPerBlock; n < std::min<size_t>(nb * rowsPerBlock + rowsPerBlock, N);
122  ++n) {
123  std::array<uint8_t*, k2b1bXBits> XQoff = {{
124  XQdata[0] + 0 + QC * n, XQdata[1] + 0 + QC * n,
125  }};
126  quantize2bNeon(QCUnroll, &Xdata[0 + C * n], offset, inter_center_distance, XQoff);
127  for (size_t qc = QCUnroll; qc < QC; ++qc) {
128  // compute the block in X.
129  std::array<uint8_t, k2b1bXBits> p = {{0, 0}};
130  for (size_t b = 0; b < 8; ++b) {
131  const size_t c = qc * 8 + b;
132  if (c < C) {
133  float v = Xdata[c + C * n];
134  if (v < offset) {
135  // zero'd already.
136  } else if (v < offset + inter_center_distance) {
137  p[0] |= 1 << b;
138  } else if (v < offset + 2 * inter_center_distance) {
139  p[1] |= 1 << b;
140  } else {
141  p[0] |= 1 << b;
142  p[1] |= 1 << b;
143  }
144  }
145  }
146  for (auto i = 0; i < k2b1bXBits; ++i) {
147  XQdata[i][qc + QC * n] = p[i];
148  }
149  }
150  }
151  });
152 }
153 
154 template <size_t TileSize, size_t TileDepthBytes>
155 void uniformQuantize2b1bNeonPacked(QConvState* state,
156  const TensorCPU& X,
157  const std::vector<std::unique_ptr<TensorCPU>>& XQ,
158  float offset,
159  float inter_center_distance) {
160  const size_t M = X.size_to_dim(3);
161  const size_t K = X.size() / M;
162  const size_t QK = divRoundUp(K, 8);
163  const size_t numTiles = divRoundUp(M, TileSize);
164  const size_t numTilesDepth = divRoundUp(QK, TileDepthBytes);
165  for (size_t i = 0; i < k2b1bXBits; ++i) {
166  XQ[i]->Resize(numTiles, numTilesDepth, TileSize, TileDepthBytes);
167  }
168  const float* Xdata = X.data<float>();
169  std::array<uint8_t*, k2b1bXBits> XQdata;
170  for (auto i = 0; i < k2b1bXBits; ++i) {
171  XQdata[i] = XQ[i]->mutable_data<uint8_t>();
172  }
173  CAFFE_ENFORCE_GT(offset, 0);
174  CAFFE_ENFORCE_GT(inter_center_distance, 0);
175  // Each worker loads an L1 cache sized block.
176  // We read/write B * K * TileSize * 4 + 2 * B * TileSize * (K / 8), so to fit inside C, we have
177  // B = 4 * C / (17 * K * TileSize).
178  const size_t tilesPerBlock = std::max<size_t>(
179  std::floor<size_t>(double(4 * kL1CacheSizeBytes) / double(17 * K * TileSize)), 1);
180  state->parallelFor(divRoundUp(numTiles, tilesPerBlock), [&](size_t nb) {
181  for (size_t i = nb * tilesPerBlock;
182  i < std::min<size_t>(nb * tilesPerBlock + tilesPerBlock, numTiles);
183  ++i) {
184  for (size_t j = 0; j < numTilesDepth; ++j) {
185  if (i != numTiles - 1 && j != numTilesDepth - 1) {
186  // we have a full tile. Just memcpy.
187  for (auto ii = 0; ii < TileSize; ++ii) {
188  size_t m = i * TileSize + ii;
189  size_t k = j * TileDepthBytes * 8;
190  std::array<uint8_t*, k2b1bXBits> XQoff = {
191  {XQdata[0] + TileDepthBytes * ii + TileDepthBytes * TileSize * j +
192  TileSize * TileDepthBytes * numTilesDepth * i,
193  XQdata[1] + TileDepthBytes * ii + TileDepthBytes * TileSize * j +
194  TileSize * TileDepthBytes * numTilesDepth * i}};
195  quantize2bNeon(TileDepthBytes, &Xdata[m * K + k], offset, inter_center_distance, XQoff);
196  }
197  } else {
198  for (size_t ii = 0; ii < TileSize; ++ii) {
199  size_t m = i * TileSize + ii;
200  size_t k = j * TileDepthBytes * 8;
201  std::array<uint8_t*, k2b1bXBits> XQoff = {
202  {XQdata[0] + TileDepthBytes * ii + TileDepthBytes * TileSize * j +
203  TileSize * TileDepthBytes * numTilesDepth * i,
204  XQdata[1] + TileDepthBytes * ii + TileDepthBytes * TileSize * j +
205  TileSize * TileDepthBytes * numTilesDepth * i}};
206  if (m < M && k + TileDepthBytes * 8 <= K) {
207  // We can just read the stripe directly.
208  quantize2bNeon(
209  TileDepthBytes, &Xdata[m * K + k], offset, inter_center_distance, XQoff);
210  } else {
211  // We need to pad the stripe to the full amount read by
212  // quantize2bNeon.
213  std::array<float, 8 * TileDepthBytes> Xpad = {{0}};
214  if (m < M) {
215  std::copy(&Xdata[m * K + k], &Xdata[m * K + K], Xpad.begin());
216  }
217  quantize2bNeon(TileDepthBytes, Xpad.data(), offset, inter_center_distance, XQoff);
218  }
219  }
220  }
221  }
222  }
223  });
224 }
225 
226 // Packs a matrix (of size MxK) into a tiled array of size
227 // (M/TileSize)x(K/TileDepthBytes)xTileSizexTileDepthBytes.
228 template <size_t TileSize, size_t TileDepthBytes>
229 void qpack_tiles(QConvState* state, const TensorCPU& X, size_t axis, TensorCPU* XP) {
230  const size_t M = X.size_to_dim(axis);
231  const size_t QK = X.size() / M;
232  const size_t numTiles = divRoundUp(M, TileSize);
233  const size_t numTilesDepth = divRoundUp(QK, TileDepthBytes);
234  XP->Resize(numTiles, numTilesDepth, TileSize, TileDepthBytes);
235 
236  const auto* __restrict__ Xdata = X.data<uint8_t>();
237  auto* __restrict__ XPdata = XP->mutable_data<uint8_t>();
238  // Load L1 sized tiles per thread.
239  // We read/write 2 * B * QK * TileSize bytes, so
240  // B = C / (2 * QK * TileSize)
241  const size_t tilesPerBlock = std::max<size_t>(
242  std::floor<size_t>(double(kL1CacheSizeBytes) / double(2 * TileSize * QK)), 1);
243  state->parallelFor(divRoundUp(numTiles, tilesPerBlock), [&](size_t nb) {
244  for (size_t i = nb * tilesPerBlock;
245  i < std::min<size_t>(nb * tilesPerBlock + tilesPerBlock, numTiles);
246  ++i) {
247  for (size_t j = 0; j < numTilesDepth; ++j) {
248  if (i != numTiles - 1 && j != numTilesDepth - 1) {
249  // we have a full tile. Just memcpy.
250  for (auto ii = 0; ii < TileSize; ++ii) {
251  auto m = i * TileSize + ii;
252  auto qk = j * TileDepthBytes;
253  std::memcpy(&XPdata[TileDepthBytes * ii + TileDepthBytes * TileSize * j +
254  TileSize * TileDepthBytes * numTilesDepth * i],
255  &Xdata[m * QK + qk],
256  TileDepthBytes);
257  }
258  } else {
259  for (size_t ii = 0; ii < TileSize; ++ii) {
260  for (size_t jj = 0; jj < TileDepthBytes; ++jj) {
261  size_t m = i * TileSize + ii;
262  size_t qk = j * TileDepthBytes + jj;
263  uint8_t pval = 0;
264  if (m < M && qk < QK) {
265  // get value from X
266  pval = Xdata[m * QK + qk];
267  }
268  XPdata[jj + TileDepthBytes * ii + TileDepthBytes * TileSize * j +
269  TileSize * TileDepthBytes * numTilesDepth * i] = pval;
270  }
271  }
272  }
273  }
274  }
275  });
276 }
277 
278 // Computes the kUnrollM x kUnrollM tile of a GEMM by multiplying two packed
279 // slices of size (kUnrolLMxK). These tiles are constructed by the qpack_tiles
280 // function, which packs an input array of size [M][K] into an
281 // [M/TileSize][K/TileDepthBytes][TileSize][TileDepthBytes], which ensures all
282 // the array accesses in this function is contiguous.
283 template <size_t kUnrollM, size_t kUnrollN, size_t TileDepthBytes, typename F>
284 void qgess_packed(const uint8_t* __restrict__ Ablock,
285  const uint8_t* __restrict__ Bblock,
286  float* __restrict__ Cblock,
287  const size_t Cstride,
288  const size_t QK,
289  const size_t Nstart,
290  F&& f) {
291  static_assert(kUnrollN % 8 == 0, "");
292  static_assert(TileDepthBytes == 16, "");
293  DCHECK_EQ(QK % 16, 0);
294  uint16x8_t acc[kUnrollM][kUnrollN / 8];
295  for (size_t mm = 0; mm < kUnrollM; ++mm) {
296  for (size_t nn = 0; nn < kUnrollN / 8; ++nn) {
297  acc[mm][nn] = vdupq_n_u16(0);
298  }
299  }
300  size_t qk = 0;
301  const size_t QK16Unroll = (QK / 16) * 16;
302  for (; qk < QK16Unroll; qk += 16) {
303  uint8x16_t Areg[kUnrollM];
304  for (size_t mm = 0; mm < kUnrollM; ++mm) {
305  Areg[mm] = vld1q_u8(Ablock);
306  Ablock += 16;
307  }
308 
309  for (size_t nn = 0; nn < kUnrollN / 8; ++nn) {
310  uint8x16_t Breg[8];
311  for (size_t nnn = 0; nnn < 8; ++nnn) {
312  Breg[nnn] = vld1q_u8(Bblock);
313  Bblock += 16;
314  }
315  for (size_t mm = 0; mm < kUnrollM; ++mm) {
316  uint8x16_t cnts[8];
317  for (size_t nnn = 0; nnn < 8; ++nnn) {
318  cnts[nnn] = vcntq_u8(veorq_u8(Breg[nnn], Areg[mm]));
319  }
320  uint8x8_t ps[8];
321  for (size_t nnn = 0; nnn < 8; ++nnn) {
322  ps[nnn] = vadd_u8(vget_low_u8(cnts[nnn]), vget_high_u8(cnts[nnn]));
323  }
324  uint8x8_t pss[4];
325  for (size_t nnn = 0; nnn < 4; ++nnn) {
326  pss[nnn] = vpadd_u8(ps[2 * nnn], ps[2 * nnn + 1]);
327  }
328  uint8x8_t psss[2];
329  for (size_t nnn = 0; nnn < 2; ++nnn) {
330  psss[nnn] = vpadd_u8(pss[2 * nnn], pss[2 * nnn + 1]);
331  }
332  uint8x16_t out = vcombine_u8(psss[0], psss[1]);
333  acc[mm][nn] = vpadalq_u8(acc[mm][nn], out);
334  }
335  }
336  }
337 
338  for (size_t mm = 0; mm < kUnrollM; ++mm) {
339  auto* Crow = Cblock + mm * Cstride;
340  for (size_t nn = 0; nn < kUnrollN / 8; ++nn) {
341  const int32x4_t K_ = vdupq_n_s32(QK * 8);
342  const int16x4_t two = vdup_n_s16(2);
343  const int16x4_t acc0123_l = vreinterpret_s16_u16(vget_low_u16(acc[mm][nn]));
344  const int16x4_t acc0123_h = vreinterpret_s16_u16(vget_high_u16(acc[mm][nn]));
345  const int32x4_t K_minus_2_acc0123_l = vmlsl_s16(K_, two, acc0123_l);
346  const int32x4_t K_minus_2_acc0123_h = vmlsl_s16(K_, two, acc0123_h);
347  f(Crow + nn * 8 + 0, vcvtq_f32_s32(K_minus_2_acc0123_l), Nstart + nn * 8 + 0);
348  f(Crow + nn * 8 + 4, vcvtq_f32_s32(K_minus_2_acc0123_h), Nstart + nn * 8 + 4);
349  }
350  }
351 }
352 
353 // Computes the (normal + transpose) matrix-matrix product of two -1/1 binary
354 // matrices, laid out in the standard format.
355 template <size_t TileSize, size_t TileDepthBytes, typename F>
356 inline void qgemm_nt_packed(
357  QConvState* state, const TensorCPU& A, const TensorCPU& B, TensorCPU* C, F&& f = F()) {
358  CAFFE_ENFORCE_EQ(A.ndim(), 4);
359  CAFFE_ENFORCE_EQ(B.ndim(), 4);
360  CAFFE_ENFORCE_EQ(A.dim(2), TileSize);
361  CAFFE_ENFORCE_EQ(B.dim(2), TileSize);
362  CAFFE_ENFORCE_EQ(A.dim(3), TileDepthBytes);
363  CAFFE_ENFORCE_EQ(B.dim(3), TileDepthBytes);
364  const size_t MT = A.dim(0);
365  const size_t NT = B.dim(0);
366  const size_t M = MT * TileSize;
367  const size_t N = NT * TileSize;
368 
369  const size_t QKT = A.dim(1);
370  const size_t K = QKT * 8 * TileDepthBytes;
371  const size_t QK = K / 8;
372  CAFFE_ENFORCE_EQ(A.dim(1), B.dim(1));
373  C->Resize(M, N);
374  const auto* Adata = A.data<uint8_t>();
375  const auto* Bdata = B.data<uint8_t>();
376  auto* Cdata = C->mutable_data<float>();
377 
378  // Assume TxT tile. Each input slice is of size T x (K/8) bytes, and the output
379  // is a tile of size T x T x sizeof(float) bytes. We want the sum of this to fit
380  // in L1 cache. This means for a block number of tiles B , we load B * T * K /
381  // 8 + B * T * K / 8 + B * B * T * T * sizeof(float).
382 
383  // If cache size = C, we get
384  // B = 1/(32 * T) (sqrt(256 C + K^2) - K)
385  // taking floor (by integer division), gives the result.
386 
387  // Assume 16KB L1 cache.
388  size_t tilesPerBlock =
389  std::floor((std::sqrt(256 * kL1CacheSizeBytes + K * K) - K) / (32 * TileSize));
390  if (tilesPerBlock < 1) {
391  tilesPerBlock = 1;
392  }
393  CAFFE_ENFORCE_LT(K, std::pow(2, 16));
394  CAFFE_ENFORCE_EQ(M % TileSize, 0);
395  CAFFE_ENFORCE_EQ(N % TileSize, 0);
396  const size_t MNumTiles = M / TileSize;
397  const size_t NNumTiles = N / TileSize;
398  const size_t MNumBlocks = divRoundUp(MNumTiles, tilesPerBlock);
399  const size_t NNumBlocks = divRoundUp(NNumTiles, tilesPerBlock);
400 
401  state->parallelFor(MNumBlocks * NNumBlocks, [&](size_t mn) {
402  const size_t mBlockIdx = mn / NNumBlocks;
403  const size_t nBlockIdx = mn % NNumBlocks;
404  const size_t mTileStart = mBlockIdx * tilesPerBlock;
405  const size_t nTileStart = nBlockIdx * tilesPerBlock;
406  for (size_t mBlockTileIdx = 0;
407  mBlockTileIdx < tilesPerBlock && mBlockTileIdx + mTileStart < MNumTiles;
408  ++mBlockTileIdx) {
409  const size_t mTileIdx = mBlockTileIdx + mTileStart;
410  for (size_t nBlockTileIdx = 0;
411  nBlockTileIdx < tilesPerBlock && nBlockTileIdx + nTileStart < NNumTiles;
412  ++nBlockTileIdx) {
413  const size_t nTileIdx = nBlockTileIdx + nTileStart;
414  // A layout: [M/TileSize][QK / TileDepth][TileSize][TileDepth]
415  // C layout: [M/TileSize][TileSize][N/TileSize][TileSize]
416  const auto* Ablock = &Adata[mTileIdx * QK * TileSize];
417  const auto* Bblock = &Bdata[nTileIdx * QK * TileSize];
418  auto* Cblock = &Cdata[mTileIdx * TileSize * N + nTileIdx * TileSize];
419  const size_t Cstride = N;
420  qgess_packed<TileSize, TileSize, TileDepthBytes, F>(
421  Ablock, Bblock, Cblock, Cstride, QK, nTileIdx * TileSize, std::forward<F>(f));
422  }
423  }
424  });
425 }
426 
427 void run2b1bConvIm2ColGEMM(QConvState* state,
428  const ConvArgs& args,
429  const TensorCPU& X,
430  TensorCPU* Y) {
431  // TODO: packing + quantization in same block.
432  const size_t KH = state->WQ->dim32(1);
433  const size_t KW = state->WQ->dim32(2);
434  const size_t OH = (X.dim32(1) - KH + args.pad_t + args.pad_b) / args.stride_h + 1;
435  const size_t OW = (X.dim32(2) - KW + args.pad_l + args.pad_r) / args.stride_w + 1;
436  const size_t OC = state->WQ->dim32(0);
437  const size_t QK = KH * KW * divRoundUp(X.dim32(3), 8);
438  Y->Resize(X.dim32(0), OH, OW, OC);
439  if (!state->WQPacked) {
440  state->WQPacked = caffe2::make_unique<TensorCPU>();
441  qpack_tiles<kGEMMTileSize, kGEMMTileDepthBytes>(state, *(state->WQ), 1, state->WQPacked.get());
442  CAFFE_ENFORCE_EQ(state->WQPacked->dim32(0), divRoundUp(OC, kGEMMTileSize));
443  CAFFE_ENFORCE_EQ(state->WQPacked->dim32(1), divRoundUp(QK, kGEMMTileDepthBytes));
444  CAFFE_ENFORCE_EQ(state->WQPacked->dim32(2), kGEMMTileSize);
445  CAFFE_ENFORCE_EQ(state->WQPacked->dim32(3), kGEMMTileDepthBytes);
446 
447  // We can fuse the bias addition into the filter normalization. We can
448  // replace the bias + 3/2 normalization factor by replacing normalization
449  // with (2/3 bias + normalization), and setting bias to zero.
450  if (state->bias) {
451  for (size_t i = 0; i < state->bias->size(); ++i) {
452  state->WQN->mutable_data<float>()[i] += 2.0f / 3 * state->bias->data<float>()[i];
453  }
454  }
455  state->bias.reset();
456 
457  // If we have to pad when we pack our weight tiles, then we need to adjust
458  // the normalization factor by the number of zeros that we added.
459  const size_t QKPadding = divRoundUp(QK, kGEMMTileDepthBytes) * kGEMMTileDepthBytes - QK;
460  if (QKPadding != 0) {
461  for (size_t i = 0; i < state->WQN->size(); ++i) {
462  state->WQN->mutable_data<float>()[i] -= QKPadding * 8;
463  }
464  }
465  }
466  CAFFE_ENFORCE(!state->bias.get());
467  // Since 1x1s are so common, we fuse the quantization + packing steps.
468  const bool is_1x1 = KH == 1 && KW == 1 && args.pad_l == 0 && args.pad_r == 0 && args.pad_b == 0 &&
469  args.pad_t == 0 && args.stride_h == 1 && args.stride_w == 1;
470 
471  if (is_1x1) {
472  CAFFE_ENFORCE_EQ(OH, X.dim32(1));
473  CAFFE_ENFORCE_EQ(OW, X.dim32(2));
474  uniformQuantize2b1bNeonPacked<kGEMMTileSize, kGEMMTileDepthBytes>(
475  state, X, state->XQs, 0.5, 1.0);
476  } else {
477  uniformQuantize2b1bNeon(state, X, state->XQs, 0.5, 1.0);
478  }
479  TensorCPU* YQ0 = state->YQs[0].get();
480 
481  if (state->WQ->dim32(0) % kGEMMTileSize == 0) {
482  // We can run inplace by operating on our Y vector, and then shrinking Y.
483  YQ0 = Y;
484  }
485 
486  for (size_t i = 0; i < k2b1bXBits; ++i) {
487  const auto& XQ = *(state->XQs[i]);
488  if (!is_1x1) {
489  qim2col(args, XQ, *(state->WQ), state->scratchColBuffer.get());
490  qpack_tiles<kGEMMTileSize, kGEMMTileDepthBytes>(
491  state, *(state->scratchColBuffer), 3, state->scratch.get());
492  }
493 
494  {
495  const auto* __restrict__ WQNdata = state->WQN->data<float>();
496  switch (i) {
497  case 0:
498  qgemm_nt_packed<kGEMMTileSize, kGEMMTileDepthBytes>(
499  state,
500  is_1x1 ? XQ : *(state->scratch),
501  *(state->WQPacked),
502  YQ0,
503  [WQNdata](float* __restrict__ acc, float32x4_t value, size_t channel) {
504  // acc[c] = 3/2 WQN[c] + 1/2 value[c];
505  const float32x4_t _32 = vdupq_n_f32(3.0f / 2);
506  const float32x4_t _12 = vdupq_n_f32(1.0f / 2);
507  const float32x4_t WQNc_32 = vmulq_f32(_32, vld1q_f32(WQNdata + channel));
508  const float32x4_t WQNc_32_value_12 = vmlaq_f32(WQNc_32, _12, value);
509  vst1q_f32(acc, WQNc_32_value_12);
510  });
511  break;
512  case 1:
513  qgemm_nt_packed<kGEMMTileSize, kGEMMTileDepthBytes>(
514  state,
515  is_1x1 ? XQ : *(state->scratch),
516  *(state->WQPacked),
517  YQ0,
518  [](float* __restrict__ acc, float32x4_t value, size_t channel) {
519  const float32x4_t curr = vld1q_f32(acc);
520  vst1q_f32(acc, vaddq_f32(curr, value));
521  });
522  break;
523  }
524  }
525  }
526 
527  if (YQ0 != Y) {
528  // In this case, the stride does not match, so we need to copy the output
529  // data into the contiguous Y matrix.
530  const size_t F = state->WQ->dim(0);
531  const size_t N = Y->size() / F;
532  const size_t NP = YQ0->dim32(0);
533  const size_t FP = YQ0->dim32(1);
534  math::CopyMatrix<CPUContext>(
535  sizeof(float), N, F, YQ0->data<float>(), FP, Y->mutable_data<float>(), F, nullptr);
536  } else {
537  CAFFE_ENFORCE_EQ(Y->dim32(0), divRoundUp(X.dim32(0) * OH * OW, kGEMMTileSize) * kGEMMTileSize);
538  CAFFE_ENFORCE_EQ(Y->dim32(1), OC);
539  Y->Shrink(X.dim32(0) * OH * OW);
540  Y->Reshape(std::vector<TIndex>{{TIndex(X.dim(0)), TIndex(OH), TIndex(OW), TIndex(OC)}});
541  }
542 }
543 
544 bool run2b1bConvNeon(QConvState* state, const ConvArgs& args, const TensorCPU& X, TensorCPU* Y) {
545  // TODO: insert specialized cases (e.g. depthwise convolutions, the direct
546  // convolution.
547  CAFFE_ENFORCE_EQ(X.ndim(), 4);
548  run2b1bConvIm2ColGEMM(state, args, X, Y);
549  return true;
550 }
551 
552 #endif
553 
554 } // namespace caffe2
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...