Caffe2 - C++ API
A deep learning, cross platform ML framework
tensor.h
1 #ifndef CAFFE2_CORE_TENSOR_H_
2 #define CAFFE2_CORE_TENSOR_H_
3 
4 #include <cstddef>
5 #include <cstdint>
6 #include <fstream>
7 #include <sstream>
8 #include <type_traits>
9 #include <typeinfo>
10 #include <vector>
11 
12 #include "caffe2/core/common.h"
13 #include "caffe2/core/flags.h"
14 #include "caffe2/core/context.h"
15 #include "caffe2/core/typeid.h"
16 #include "caffe2/core/logging.h"
17 
18 // A global boolean variable to control whether we free memory when a Tensor
19 // is shrinked to a smaller size. As a result, a Tensor is always going to
20 // keep the memory allocated for its maximum capacity reshaped to so far.
21 CAFFE2_DECLARE_bool(caffe2_keep_on_shrink);
22 
23 // Since we can have high variance in blob memory allocated across different
24 // inputs in the same run, we will shrink the blob only if the memory gain
25 // is larger than this flag in bytes.
26 CAFFE2_DECLARE_int64(caffe2_max_keep_on_shrink_memory);
27 
28 namespace caffe2 {
29 
33 inline vector<TIndex> ToVectorTIndex(const std::vector<int>& src) {
34  return vector<TIndex>(src.begin(), src.end());
35 }
36 
40 inline TIndex size_from_dim_(int k, const vector<TIndex>& dims) {
41  TIndex r = 1;
42  for (int i = k; i < dims.size(); ++i) {
43  r *= dims[i];
44  }
45  return r;
46 }
47 
48 // Product of all dims up to
49 inline TIndex size_to_dim_(int k, const vector<TIndex>& dims) {
50  CAFFE_ENFORCE(k <= dims.size());
51  TIndex r = 1;
52  for (int i = 0; i < k; ++i) {
53  r *= dims[i];
54  }
55  return r;
56 }
57 
58 // Product of all dims between k and l (not including dims[k] and dims[l])
59 inline TIndex size_between_dim_(int k, int l, const vector<TIndex>& dims) {
60  CAFFE_ENFORCE(l < dims.size());
61  TIndex r = 1;
62  if (k < l) {
63  for (int i = k + 1; i < l; ++i) {
64  r *= dims[i];
65  }
66  } else {
67  for (int i = l + 1; i < k; ++i) {
68  r *= dims[i];
69  }
70  }
71  return r;
72 }
73 
74 inline int canonical_axis_index_(int axis_index, int ndims) {
75  CAFFE_ENFORCE_GE(axis_index, -ndims);
76  CAFFE_ENFORCE_LT(axis_index, ndims);
77  if (axis_index < 0) {
78  return axis_index + ndims;
79  }
80  return axis_index;
81 }
82 
92 template <class Context>
93 class Tensor {
94  public:
98  Tensor() {}
99 
106  explicit Tensor(const vector<TIndex>& dims) { Resize(dims); }
107  explicit Tensor(const vector<int>& dims) { Resize(dims); }
108 
119  template <class SrcContext, class ContextForCopy>
120  Tensor(const Tensor<SrcContext>& src, ContextForCopy* context) {
121  CopyFrom(src, context);
122  }
123 
135  template <class SrcContext>
136  explicit Tensor(const Tensor<SrcContext>& src) {
137  CopyFrom(src);
138  }
139 
143  template <typename T>
144  Tensor(const vector<TIndex>& dims, const vector<T>& values, Context* context)
145  : meta_(TypeMeta::Make<T>()) {
146  Resize(dims);
147  CAFFE_ENFORCE_EQ_WITH_CALLER(values.size(), size_);
148  context->template Copy<T, CPUContext, Context>(size_, values.data(), mutable_data<T>());
149  }
150 
154  template <typename T,
155  typename = typename std::enable_if<std::is_scalar<T>::value>::type>
156  Tensor(const T& value, Context* context) {
157  Resize(vector<TIndex>{});
158  context->template Copy<T, CPUContext, Context>(size_, &value, mutable_data<T>());
159  }
160 
165  template <class SrcContext, class ContextForCopy>
166  void CopyFrom(const Tensor<SrcContext>& src, ContextForCopy* context) {
167  if ((void*)&src == (void*)this) {
168  return;
169  }
170  meta_ = src.meta();
171  Resize(src.dims());
172  if (size() > 0) {
173  if (meta_.copy()) {
174  meta_.copy()(src.raw_data(), raw_mutable_data(), size());
175  } else {
176  context->template CopyBytes<SrcContext, Context>(
177  nbytes(), src.raw_data(), raw_mutable_data());
178  }
179  }
180  }
181 
189  template <class SrcContext>
190  inline void CopyFrom(const Tensor<SrcContext>& src) {
191  SrcContext tmp_context;
192  CopyFrom(src, &tmp_context);
193  }
194 
195  virtual ~Tensor() noexcept {}
196 
206  template <class ContextForCopy>
207  void Extend(TIndex num, float growthPct, ContextForCopy* context) {
208  CAFFE_ENFORCE_GE_WITH_CALLER(dims_.size(), 1);
209  auto newDims = dims_;
210  newDims[0] += num;
211  if (!data_) {
212  Resize(newDims);
213  return;
214  }
215  auto newSize = std::accumulate(
216  newDims.begin(),
217  newDims.end(),
218  static_cast<TIndex>(1),
219  std::multiplies<TIndex>());
220  if (newSize * meta_.itemsize() <= capacity_) {
221  dims_ = newDims;
222  size_ = newSize;
223  return;
224  }
225  auto newCapacity = dims_;
226  newCapacity[0] = std::max<size_t>(
227  newDims[0], std::ceil(dims_[0] * (growthPct + 100) / 100));
228  Reserve(newCapacity, context);
229  dims_ = newDims;
230  size_ = newSize;
231  }
232 
233  template <class T, class ContextForCopy>
234  void Reserve(const std::vector<T>& newCapacity, ContextForCopy* context) {
235  auto newSize = std::accumulate(
236  newCapacity.begin(),
237  newCapacity.end(),
238  static_cast<TIndex>(1),
239  std::multiplies<TIndex>());
240  if (newSize * meta_.itemsize() <= capacity_) {
241  return;
242  }
243  auto oldData = std::move(data_);
244  auto oldSize = size_;
245  auto oldDims = dims_;
246  Resize(newCapacity);
247  auto* newData = raw_mutable_data(meta_);
248  context->template CopyItems<ContextForCopy, ContextForCopy>(
249  meta_, oldSize, oldData.get(), newData);
250  dims_ = oldDims;
251  size_ = oldSize;
252  reserved_ = true;
253  }
254 
261  void Shrink(TIndex outer_dim) {
262  CAFFE_ENFORCE_WITH_CALLER(dims_.size() >= 1, "Tensor must be at least 1D");
263  CAFFE_ENFORCE_WITH_CALLER(
264  outer_dim <= dims_[0],
265  "New outer dimension must be smaller than current.");
266  dims_[0] = outer_dim;
267  size_ = std::accumulate(
268  dims_.begin(),
269  dims_.end(),
270  static_cast<TIndex>(1),
271  std::multiplies<TIndex>());
272  }
273 
287  template <typename... Ts>
288  void Resize(Ts... dim_source) {
289  bool size_changed = SetDims(dim_source...);
290  if (size_changed) {
291  // If needed, we will free the data. the next mutable_data() call
292  // will create the data storage.
293  int64_t new_size = size_ * meta_.itemsize();
294  bool reset_tensor = false;
295  if (reserved_) {
296  // If tensor is reserved then don't claim its memeory unless capacity_
297  // is smaller than new size
298  reset_tensor = capacity_ < new_size;
299  } else {
300  reset_tensor = capacity_ < new_size || !FLAGS_caffe2_keep_on_shrink ||
301  capacity_ - new_size > FLAGS_caffe2_max_keep_on_shrink_memory;
302  }
303 
304  if (reset_tensor) {
305  FreeMemory();
306  }
307  }
308  }
309 
314  template <class OtherContext>
315  inline void ResizeLike(const Tensor<OtherContext>& src_tensor) {
316  // Note: need casting for different context types.
317  if (static_cast<void*>(this) != static_cast<const void*>(&src_tensor)) {
318  Resize(src_tensor.dims());
319  }
320  }
321 
326  inline void Reshape(const vector<TIndex>& dims) {
327  TIndex new_size = 1;
328  for (auto d : dims) {
329  CAFFE_ENFORCE_GE_WITH_CALLER(d, 0);
330  new_size *= d;
331  }
332  CAFFE_ENFORCE_WITH_CALLER(
333  new_size == size_,
334  "New size and old size are not equal. You cannot use Reshape, "
335  "but should use Resize."
336  // TODO(jiayq): remove the following warning after pending diffs
337  // stabilize.
338  " The old caffe2 mixes Reshape and Resize but this behavior has "
339  "been changed. If you find this error, most likely you will need "
340  "to change corresponding code from Reshape to Resize.");
341  dims_ = dims;
342  }
343 
344  inline void Reshape(const vector<int>& dims) {
345  Reshape(ToVectorTIndex(dims));
346  }
347 
353  inline void FreeMemory() {
354  data_.reset();
355  capacity_ = 0;
356  // If reserved is true and we changed tensor memory then it is fine
357  // to switch it to false, if Resize is called from Reserve and it triggers
358  // FreeMemory() then reserved_ will be set to true at end of Reserve()
359  reserved_ = false;
360  }
361 
367  string DebugString() const {
368  std::stringstream ss;
369  ss << "A Tensor of item size " << itemsize() << " and type "
370  << meta_.name() << " and dimension (";
371  for (int d : dims_) {
372  ss << d << ",";
373  }
374  ss << ").";
375  return ss.str();
376  }
377 
378  void swap(Tensor<Context>& other) {
379  std::swap(dims_, other.dims_);
380  std::swap(size_, other.size_);
381  std::swap(meta_, other.meta_);
382  std::swap(data_, other.data_);
383  std::swap(shares_data_, other.shares_data_);
384  std::swap(capacity_, other.capacity_);
385  std::swap(reserved_, other.reserved_);
386  }
387 
400  void ShareData(const Tensor& src) {
401  meta_ = src.meta();
402  CAFFE_ENFORCE_EQ_WITH_CALLER(
403  src.size_,
404  size_,
405  "Size mismatch - did you call reshape before sharing the data?");
406  // It is possible that the source tensor hasn't called mutable_data() yet,
407  // in which case ShareData() doesn't make much sense since we don't really
408  // know what to share yet.
409  CAFFE_ENFORCE_WITH_CALLER(
410  src.data_.get() || src.size_ == 0,
411  "Source tensor has no content and has size > 0");
412  // Finally, do sharing.
413  data_ = src.data_;
414  capacity_ = src.capacity_;
415  shares_data_ = true;
416  }
417 
427  template <typename T, typename Deleter = MemoryDeleter>
428  void ShareExternalPointer(T* src, size_t capacity = 0, Deleter d = nullptr) {
429  ShareExternalPointer(src, TypeMeta::Make<T>(), capacity, d);
430  }
431 
432  template <typename Deleter = MemoryDeleter>
434  void* src,
435  const TypeMeta& meta,
436  size_t capacity = 0,
437  Deleter d = nullptr) {
438  meta_ = meta;
439  CAFFE_ENFORCE_WITH_CALLER(
440  meta_.id(),
441  "To share with a raw external pointer you need to have meta "
442  "already set.");
443  CAFFE_ENFORCE_WITH_CALLER(
444  size_ >= 0,
445  "To share data with a raw pointer, you need to set shape first.");
446  // Check if the deleter is a MemoryDeleter and is a simple nullptr.
447  if (std::is_same<MemoryDeleter, Deleter>::value &&
448  reinterpret_cast<MemoryDeleter*>(&d)[0] == nullptr) {
449  // Use aliasing constructor trick to avoid calling the destructor.
450  data_ = std::shared_ptr<void>(std::shared_ptr<void>(), src);
451  } else {
452  data_.reset(src, d);
453  }
454  // Sets capacity. If not specified, we will implicitly assume that
455  // the capacity is the current size.
456  if (capacity) {
457  capacity_ = capacity;
458  } else {
459  capacity_ = nbytes();
460  }
461  shares_data_ = true;
462  }
463 
464  bool shares_data() const {
465  return shares_data_;
466  }
467 
472  inline const void* raw_data() const {
473  CAFFE_ENFORCE_WITH_CALLER(data_.get() || size_ == 0);
474  return data_.get();
475  }
476 
483  template <typename T>
484  inline const T* data() const {
485  CAFFE_ENFORCE_WITH_CALLER(
486  data_.get() || size_ == 0,
487  "The tensor is of non-zero shape, but its data is not allocated yet. "
488  "Caffe2 uses a lazy allocation, so you will need to call "
489  "mutable_data() or raw_mutable_data() to actually allocate memory.");
490  CAFFE_ENFORCE_WITH_CALLER(
491  IsType<T>(),
492  "Tensor type mismatch, caller expects elements to be ",
493  TypeMeta::TypeName<T>(),
494  " while tensor contains ",
495  meta_.name());
496  return static_cast<T*>(data_.get());
497  }
498 
510  inline void* raw_mutable_data(const TypeMeta& meta) {
511  // For 0-size tensors it's fine to return any pointer (including nullptr)
512  if (meta_ == meta && (data_.get() || size_ == 0)) {
513  return data_.get();
514  } else {
515  bool had_special_dtor = meta_.dtor() != nullptr;
516  meta_ = meta;
517  CAFFE_ENFORCE_WITH_CALLER(
518  size_ >= 0,
519  "Tensor is not initialized. You probably need to call Resize() "
520  "before calling mutable_data()");
521 
522  // We can reuse the existing buffer if the current data does not have
523  // a special destructor and the new data doesn't have a special
524  // constructor.
525  if (size_ == 0 ||
526  (meta.ctor() == nullptr && !had_special_dtor &&
527  capacity_ >= size_ * meta_.itemsize())) {
528  return data_.get();
529  }
530  if (meta.ctor()) {
531  // For types that need placement new, we will call it, as well as
532  // making sure that when the data is freed, it calls the right
533  // destruction procedure.
534  auto size = size_;
535  auto dtor = meta_.dtor();
536  auto ptr_and_deleter = Context::New(size_ * meta_.itemsize());
537  auto deleter = ptr_and_deleter.second;
538  data_.reset(
539  ptr_and_deleter.first, [size, dtor, deleter](void* ptr) -> void {
540  dtor(ptr, size);
541  deleter(ptr);
542  });
543  meta_.ctor()(data_.get(), size_);
544  } else {
545  // For fundamental type, new and delete is easier.
546  auto ptr_and_deleter = Context::New(size_ * meta_.itemsize());
547  data_.reset(ptr_and_deleter.first, ptr_and_deleter.second);
548  }
549  capacity_ = size_ * meta_.itemsize();
550  return data_.get();
551  }
552  }
553 
563  inline void* raw_mutable_data() {
564  CAFFE_ENFORCE_WITH_CALLER(
565  meta_.id() != 0,
566  "Calling raw_mutable_data() without meta, but the current meta is "
567  "of unknown type.");
568  return raw_mutable_data(meta_);
569  }
570 
577  template <typename T>
578  inline T* mutable_data() {
579  if ((size_ == 0 || data_.get()) && IsType<T>()) {
580  return static_cast<T*>(data_.get());
581  }
582  return static_cast<T*>(raw_mutable_data(TypeMeta::Make<T>()));
583  }
584 
585 
589  inline int ndim() const { return dims_.size(); }
593  inline TIndex size() const { return size_; }
597  inline size_t itemsize() const { return meta_.itemsize(); }
603  inline size_t nbytes() const { return size_ * meta_.itemsize(); }
604 
605  inline size_t capacity_nbytes() const {
606  return capacity_;
607  }
611  inline const vector<TIndex>& dims() const { return dims_; }
612 
613  inline TIndex size_from_dim(int k) const {
614  return size_from_dim_(k, dims_);
615  }
616 
617  inline TIndex size_to_dim(int k) const {
618  return size_to_dim_(k, dims_);
619  }
620 
621  inline TIndex size_between_dim(int k, int l) const {
622  return size_between_dim_(k, l, dims_);
623  }
624 
636  inline int canonical_axis_index(int axis_index) const {
637  return canonical_axis_index_(axis_index, ndim());
638  }
639 
643  template <typename T>
644  inline bool IsType() const { return meta_.Match<T>(); }
648  inline const TypeMeta& meta() const { return meta_; }
649 
657  inline int dim32(const int i) const {
658  #ifndef NDEBUG
659  CAFFE_ENFORCE_LT_WITH_CALLER(i, dims_.size(), "Exceeding ndim limit");
660  CAFFE_ENFORCE_GE_WITH_CALLER(i, 0, "Cannot have negative dimension index");
661  #endif
662  CAFFE_ENFORCE_LT_WITH_CALLER(dims_[i], std::numeric_limits<int>::max());
663  return static_cast<int>(dims_[i]);
664  }
665 
671  inline TIndex dim(const int i) const {
672  #ifndef NDEBUG
673  CAFFE_ENFORCE_LT_WITH_CALLER(i, dims_.size(), "Exceeding ndim limit");
674  CAFFE_ENFORCE_GE_WITH_CALLER(i, 0, "Cannot have negative dimension index");
675  #endif
676  return dims_[i];
677  }
678 
679  protected:
680  vector<TIndex> dims_;
681  TIndex size_ = -1;
682  TypeMeta meta_;
683  std::shared_ptr<void> data_;
684  bool shares_data_ = false;
685  size_t capacity_ = 0;
686  bool reserved_ = false;
687  // In case of chunk load we store how much data was already loaded
688 
689  private:
690  template <
691  typename T,
692  typename = typename std::enable_if<std::is_integral<T>::value>::type>
693  bool SetDims(const vector<T>& src) {
694  auto old_size = size_;
695  dims_.resize(src.size());
696  TIndex new_size = 1;
697  for (unsigned int i = 0; i < src.size(); ++i) {
698  new_size *= src[i];
699  dims_[i] = src[i];
700  }
701  size_ = new_size;
702  return size_ != old_size;
703  }
704 
705  bool SetDims() {
706  auto old_size = size_;
707  dims_.resize(0);
708  size_ = 1;
709  return size_ != old_size;
710  }
711 
712  // TODO(jiayq): maybe rewrite the following functions with initializer list.
713  // NVCC does not play well with initializer lists last time, but worth
714  // another shot.
715  bool SetDims(const TIndex d0) {
716  auto old_size = size_;
717  dims_.resize(1);
718  dims_[0] = d0;
719  size_ = d0;
720  return size_ != old_size;
721  }
722 
723  bool SetDims(const TIndex d0, const TIndex d1) {
724  auto old_size = size_;
725  dims_.resize(2);
726  dims_[0] = d0;
727  dims_[1] = d1;
728  size_ = d0 * d1;
729  return size_ != old_size;
730  }
731 
732  bool SetDims(const TIndex d0, const TIndex d1, const TIndex d2) {
733  auto old_size = size_;
734  dims_.resize(3);
735  dims_[0] = d0;
736  dims_[1] = d1;
737  dims_[2] = d2;
738  size_ = d0 * d1 * d2;
739  return size_ != old_size;
740  }
741 
742  bool
743  SetDims(const TIndex d0, const TIndex d1, const TIndex d2, const TIndex d3) {
744  auto old_size = size_;
745  dims_.resize(4);
746  dims_[0] = d0;
747  dims_[1] = d1;
748  dims_[2] = d2;
749  dims_[3] = d3;
750  size_ = d0 * d1 * d2 * d3;
751  return size_ != old_size;
752  }
753 
754  // Note(jiayq): possibly a rule-of-three violation, but we explicitly
755  // discourage the use of = for Tensors.
756  Tensor& operator=(const Tensor& src) = delete;
757 };
758 
759 // For simplicity, we will typedef Tensor<CPUContext> to TensorCPU.
761 
762 constexpr int k_limit_default_ = 1000;
763 
764 // Type call registry
765 typedef TypeMeta (*TypeCall)(const void*);
766 TypeCall GetTypeCallFunction(CaffeTypeId id);
767 void RegisterTypeCallFunction(CaffeTypeId id, TypeCall c);
768 
769 template <class Context>
770 TypeMeta GetTensorType(const void* c) {
771  const Tensor<Context>* tc = static_cast<const Tensor<Context>*>(c);
772  return tc->meta();
773 }
774 
775 // Shape call registry
776 typedef vector<TIndex> (*TensorInfoCall)(
777  const void*,
778  bool* shares_data,
779  size_t* capacity,
780  DeviceOption* device);
781 TensorInfoCall GetTensorInfoFunction(CaffeTypeId id);
782 void RegisterTensorInfoFunction(CaffeTypeId id, TensorInfoCall c);
783 
784 template <class Context>
785 vector<TIndex> GetTensorInfo(
786  const void* c,
787  bool* shares_data,
788  size_t* capacity,
789  DeviceOption* device) {
790  const Tensor<Context>* tc = static_cast<const Tensor<Context>*>(c);
791  *shares_data = tc->shares_data();
792  *capacity = tc->capacity_nbytes();
793  device->set_device_type(CPU);
794  device->set_cuda_gpu_id(0);
795  return tc->dims();
796 }
797 
799  public:
800  explicit TensorPrinter(
801  const std::string& tensor_name = "",
802  const std::string& file_name = "",
803  int limit = k_limit_default_);
804  ~TensorPrinter();
805 
806  template <class T>
807  void Print(const Tensor<CPUContext>& tensor);
808 
809  template <class Context>
810  void PrintMeta(const Tensor<Context>& tensor);
811 
812  template <class Context>
813  string MetaStr(const Tensor<Context>& tensor);
814 
815  private:
816  bool to_file_;
817  int limit_;
818  std::unique_ptr<std::ofstream> log_file_;
819  std::string tensor_name_;
820 };
821 
822 template <class T>
823 void TensorPrinter::Print(const Tensor<CPUContext>& tensor) {
824  std::stringstream values_stream;
825  // One most likely doesn't want to print int64-number of items for visual
826  // inspection, so we cast down to int here.
827  int total_count = static_cast<int>(
828  std::min(tensor.size(), TIndex(limit_)));
829  const T* tensor_data = tensor.template data<T>();
830  for (int i = 0; i < total_count - 1; ++i) {
831  values_stream << tensor_data[i] << ",";
832  }
833  // We do not add a comma after the last item.
834  values_stream << tensor_data[total_count - 1];
835  if (to_file_) {
836  (*log_file_) << MetaStr(tensor) << values_stream.str() << std::endl;
837  } else {
838  // Log to console.
839  LOG(INFO) << MetaStr(tensor) << values_stream.str();
840  }
841 }
842 
843 template <class Context>
844 void TensorPrinter::PrintMeta(const Tensor<Context>& tensor) {
845  if (to_file_) {
846  (*log_file_) << MetaStr(tensor) << std::endl;
847  } else {
848  LOG(INFO) << MetaStr(tensor);
849  }
850 }
851 
852 template <class Context>
853 std::string TensorPrinter::MetaStr(const Tensor<Context>& tensor) {
854  std::stringstream meta_stream;
855  meta_stream << "Tensor " << tensor_name_ << " of type "
856  << tensor.meta().name() << ". Dims: (";
857  for (const auto dim : tensor.dims()) {
858  meta_stream << dim << ",";
859  }
860  meta_stream << "): ";
861  return meta_stream.str();
862 }
863 
864 } // namespace caffe2
865 #endif // CAFFE2_CORE_TENSOR_H_
void Extend(TIndex num, float growthPct, ContextForCopy *context)
Extends the outer-most dimension of this tensor by num elements, preserving the existing data...
Definition: tensor.h:207
const T * data() const
Returns a typed pointer of the underlying storage.
Definition: tensor.h:484
size_t itemsize() const
Return the number of bytes each item takes in the tensor.
Definition: tensor.h:597
const TypeMeta & meta() const
Returns the TypeMeta object associated with the current data type.
Definition: tensor.h:648
Tensor(const vector< TIndex > &dims, const vector< T > &values, Context *context)
Creates a tensor, and fills its contents with the given values.
Definition: tensor.h:144
TIndex dim(const int i) const
Returns the i-th dimension of the tensor.
Definition: tensor.h:671
void Shrink(TIndex outer_dim)
Shrinks the outer-most dimension to given size, keeping the data.
Definition: tensor.h:261
PlacementNew ctor() const
Returns the placement new function pointer for individual items.
Definition: typeid.h:149
int canonical_axis_index(int axis_index) const
Returns the &#39;canonical&#39; version of a (usually) user-specified axis, allowing for negative indexing (e...
Definition: tensor.h:636
void ShareExternalPointer(T *src, size_t capacity=0, Deleter d=nullptr)
Shares the data with an externally managed pointer.
Definition: tensor.h:428
Tensor is the basic class in Caffe2 that stores a contiguous memory with its shape information...
Definition: tensor.h:93
int dim32(const int i) const
Returns the i-th dimension of the tensor in int.
Definition: tensor.h:657
void * raw_mutable_data()
Returns a mutable raw pointer of the underlying storage.
Definition: tensor.h:563
void CopyFrom(const Tensor< SrcContext > &src, ContextForCopy *context)
Copies the data from a source tensor, with a contex provided to carry out the underlying memcpy opera...
Definition: tensor.h:166
Tensor(const Tensor< SrcContext > &src, ContextForCopy *context)
Creates a tensor from a source tensor, copying over the content.
Definition: tensor.h:120
TIndex size() const
Returns the size (i.e.
Definition: tensor.h:593
T * mutable_data()
Returns a typed pointer of the underlying storage.
Definition: tensor.h:578
void FreeMemory()
Release whatever memory the tensor was holding but keep size and type information.
Definition: tensor.h:353
const vector< TIndex > & dims() const
Returns the dimensions of the tensor as a vector.
Definition: tensor.h:611
void CopyFrom(const Tensor< SrcContext > &src)
Copies the data from a source tensor.
Definition: tensor.h:190
void Resize(Ts...dim_source)
Resizes a tensor.
Definition: tensor.h:288
TIndex size_from_dim_(int k, const vector< TIndex > &dims)
Return product of all dimensions starting from K.
Definition: tensor.h:40
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Tensor()
Initializes an empty tensor.
Definition: tensor.h:98
const CaffeTypeId & id() const
Returns the type id.
Definition: typeid.h:137
const char * name() const
Returns a printable name for the type.
Definition: typeid.h:167
const void * raw_data() const
Returns a const raw void* pointer of the underlying storage.
Definition: tensor.h:472
TypedCopy copy() const
Returns the typed copy function pointer for individual iterms.
Definition: typeid.h:155
void Reshape(const vector< TIndex > &dims)
Resizes the tensor without touching underlying storage.
Definition: tensor.h:326
string DebugString() const
A utility function to print the debug string for the tensor.
Definition: tensor.h:367
size_t nbytes() const
Returns the total number of bytes of the storage.
Definition: tensor.h:603
void ShareData(const Tensor &src)
Shares the data with another tensor.
Definition: tensor.h:400
Tensor(const T &value, Context *context)
Creates a scalar tensor, and fills its content with the given value.
Definition: tensor.h:156
Tensor(const vector< TIndex > &dims)
Creates a tensor of the given dimension.
Definition: tensor.h:106
void ResizeLike(const Tensor< OtherContext > &src_tensor)
Resize the tensor like the source tensor.
Definition: tensor.h:315
TypedDestructor dtor() const
Returns the destructor function pointer for individual items.
Definition: typeid.h:161
TypeMeta is a thin class that allows us to store the type of a container such as a blob...
Definition: typeid.h:88
vector< TIndex > ToVectorTIndex(const std::vector< int > &src)
A utility function to convert vector<int> to vector<TIndex>.
Definition: tensor.h:33
Commandline flags support for Caffe2.
bool IsType() const
Checks if the tensor content is of the given data type.
Definition: tensor.h:644
int ndim() const
Returns the number of dimensions of the data.
Definition: tensor.h:589
Tensor(const Tensor< SrcContext > &src)
Creates a tensor from a source tensor, copying over the content.
Definition: tensor.h:136
void * raw_mutable_data(const TypeMeta &meta)
Returns a mutable raw pointer of the underlying storage.
Definition: tensor.h:510
const size_t & itemsize() const
Returns the size of the item.
Definition: typeid.h:143