Caffe2 - Python API
A deep learning, cross platform ML framework
helper.py
1 ## @package onnx
2 # Module caffe2.python.onnx.helper
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6 from __future__ import unicode_literals
7 
8 from caffe2.proto import caffe2_pb2
9 from onnx.backend.base import namedtupledict
10 
11 from caffe2.python.onnx.workspace import Workspace
13 
14 import io
15 import logging
16 import time
17 
18 
19 log = logging.getLogger(__name__)
20 
21 
22 def dummy_name(used_names=None):
23  if used_names is None:
24  return C.new_dummy_name()
25  else:
26  C.reset_dummy_name(set(used_names))
27  return None
28 
29 def c2_native_run_op(op_def, inputs):
30  ws = Workspace()
31  if isinstance(inputs, dict):
32  for key, value in inputs.items():
33  ws.FeedBlob(key, value, op_def.device_option)
34  else:
35  assert(len(op_def.input) == len(inputs))
36  for key, value in zip(op_def.input, inputs):
37  ws.FeedBlob(key, value, op_def.device_option)
38 
39  ws.RunOperatorOnce(op_def)
40 
41  output_names = op_def.output
42  output_values = [ws.FetchBlob(name) for name in output_names]
43  return ws, namedtupledict('Outputs', output_names)(*output_values)
44 
45 
46 def c2_native_run_net(init_net, predict_net, inputs):
47  ws = Workspace()
48  if init_net:
49  ws.RunNetOnce(init_net)
50 
51  if isinstance(inputs, dict):
52  for key, value in inputs.items():
53  ws.FeedBlob(key, value, predict_net.device_option)
54  else:
55  uninitialized = [input_name
56  for input_name in predict_net.external_input
57  if not ws.HasBlob(input_name)]
58  if len(uninitialized) == len(inputs):
59  for key, value in zip(uninitialized, inputs):
60  ws.FeedBlob(key, value, predict_net.device_option)
61  else:
62  # If everything is initialized,
63  # we just initialized the first len(inputs) external_input.
64  assert(len(inputs) <= len(predict_net.external_input))
65  for i in range(len(inputs)):
66  ws.FeedBlob(predict_net.external_input[i], inputs[i],
67  predict_net.device_option)
68 
69  ws.RunNetOnce(predict_net)
70 
71  output_names = predict_net.external_output
72  output_values = [ws.FetchBlob(name) for name in output_names]
73  return ws, namedtupledict('Outputs', output_names)(*output_values)
74 
75 
76 def load_caffe2_net(file):
77  net = caffe2_pb2.NetDef()
78  with open(file, "rb") as f:
79  net.ParseFromString(f.read())
80  return net
81 
82 
83 def save_caffe2_net(net, file, output_txt=False):
84  with open(file, "wb") as f:
85  f.write(net.SerializeToString())
86  if output_txt:
87  with open(file + "txt", "w") as f:
88  f.write(str(net))
89 
90 
91 def benchmark_caffe2_model(init_net, predict_net, warmup_iters=3, main_iters=10, layer_details=True):
92  '''
93  Run the benchmark net on the target model.
94  Return the execution time per iteration (millisecond).
95  '''
96  ws = Workspace()
97  if init_net:
98  ws.RunNetOnce(init_net)
99  ws.CreateNet(predict_net)
100  results = ws.BenchmarkNet(predict_net.name, warmup_iters, main_iters, layer_details)
101  del ws
102  return results[0]
103 
104 
105 def benchmark_pytorch_model(model, inputs, training=False, warmup_iters=3,
106  main_iters=10, verbose=False):
107  '''
108  Run the model several times, and measure the execution time.
109  Return the execution time per iteration (millisecond).
110  '''
111  for _i in range(warmup_iters):
112  model(*inputs)
113  total_pytorch_time = 0.0
114  for _i in range(main_iters):
115  ts = time.time()
116  model(*inputs)
117  te = time.time()
118  total_pytorch_time += te - ts
119  log.info("The PyTorch model execution time per iter is {} milliseconds, "
120  "{} iters per second.".format(total_pytorch_time / main_iters * 1000,
121  main_iters / total_pytorch_time))
122  return total_pytorch_time * 1000 / main_iters