Caffe2 - Python API
A deep learning, cross platform ML framework
pipeline.py
1 ## @package pipeline
2 # Module caffe2.python.pipeline
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6 from __future__ import unicode_literals
7 
8 from caffe2.python import core, queue_util
9 from caffe2.python.dataio import Reader, Writer
10 from caffe2.python.net_builder import NetBuilder, ops
11 from caffe2.python.schema import as_record, Field
12 from caffe2.python.task import Node, Task, TaskGroup
13 
14 
15 class Output(object):
16  """
17  Represents the result of a processor function. A processor can either
18  return an Output, or it can return a record, in which case an Output will be
19  created for it afterwards.
20  """
21  def __init__(self, nets=None, record=None, should_stop=None):
22  builder_children = NetBuilder.current().get()
23  assert nets is None or len(builder_children) == 0, (
24  'Cannot both use `ops` syntax and return a list of nets.')
25  if nets is None:
26  nets = builder_children
27  if isinstance(nets, core.Net):
28  nets = [nets]
29  self.nets = [] if nets is None else list(nets)
30  self.record = None if record is None else as_record(record)
31  self.should_stop = should_stop
32 
33 
34 DEFAULT_QUEUE_CAPACITY = 100
35 
36 
37 def _init_output(output, capacity, global_init_net, global_exit_net):
38  if output is None:
39  out_queue = queue_util.Queue(
40  capacity=(
41  capacity if capacity is not None
42  else DEFAULT_QUEUE_CAPACITY))
43  writer = out_queue.writer()
44  elif isinstance(output, Writer):
45  assert capacity is None, 'capacity would not be used.'
46  out_queue = None
47  writer = output
48  elif hasattr(output, 'writer'):
49  assert capacity is None, 'capacity would not be used.'
50  out_queue = output
51  writer = output.writer()
52  else:
53  raise ValueError('output must be a reader, queue or stream.')
54  writer.setup_ex(global_init_net, global_exit_net)
55  return out_queue, writer
56 
57 
58 def make_processor(processor):
59  if processor is None:
60  return lambda rec: rec
61  elif isinstance(processor, core.Net):
62  return NetProcessor(processor)
63  else:
64  return processor
65 
66 
67 def normalize_processor_output(output):
68  """
69  Allow for processors to return results in several formats.
70  TODO(azzolini): simplify once all processors use NetBuilder API.
71  """
72  if isinstance(output, Output):
73  """ Processor returned an Output. """
74  return output
75  elif isinstance(output, Field):
76  """ Processor returned a record. """
77  return Output(record=output)
78  elif isinstance(output, tuple):
79  is_record_and_blob = (
80  len(output) == 2 and
81  isinstance(output[0], Field) and
82  isinstance(output[1], core.BlobReference))
83  if is_record_and_blob:
84  """ Processor returned (record, stop_blob) """
85  return Output(None, *output)
86  else:
87  """ Processor returned (nets, record, stop_blob) """
88  return Output(*output)
89  else:
90  """ Processor returned nets, no output """
91  return Output(output)
92 
93 
94 def pipe(
95  input, output=None, num_threads=1, processor=None, name=None,
96  capacity=None, group=None, num_runtime_threads=1):
97  """
98  Given a Reader, Queue or DataStream in `input`, and optionally, a Writer,
99  Queue or DataStream in `output`, creates a Task that, when run, will
100  pipe the input into the output, using multiple parallel threads.
101  Additionally, if a processor is given, it will be called between reading
102  and writing steps, allowing it to transform the record.
103 
104  Args:
105  input: either a Reader, Queue or DataStream that will be read
106  until a stop is signaled either by the reader or the
107  writer.
108  output: either a Writer, a Queue or a DataStream that will be
109  writen to as long as neither reader nor writer signal
110  a stop condition. If output is not provided or is None,
111  a Queue is created with given `capacity` and writen to.
112  num_threads: number of concurrent threads used for processing and
113  piping. If set to 0, no Task is created, and a
114  reader is returned instead -- the reader returned will
115  read from the reader passed in and process it.
116  ** DEPRECATED **. Use `num_runtime_threads` instead.
117  This option will be removed once all readers/processors
118  support `num_runtime_threads`.
119  processor: (optional) function that takes an input record and
120  optionally returns a record; this will be called
121  between read and write steps. If the processor does
122  not return a record, a writer will not be instantiated.
123  Processor can also be a core.Net with input and output
124  records properly set. In that case, a NetProcessor is
125  instantiated, cloning the net for each of the threads.
126  name: (optional) name of the task to be created.
127  capacity: when output is not passed, a queue of given `capacity`
128  is created and written to.
129  group: (optional) explicitly add the created Task to this
130  TaskGroup, instead of using the currently active one.
131  num_runtime_threads: Similar to `num_threads`, but instead of expanding
132  the tasks with a `for` loop in python, does that at
133  runtime. This is preferable to `num_threads`, but some
134  processors/readers still require to be called multiple
135  times in python.
136 
137  Returns:
138  Output Queue, DataStream, Reader, or None, depending on the parameters
139  passed.
140  """
141  result, _ = _pipe_step(
142  input, output, num_threads, processor, name, capacity, group,
143  num_runtime_threads)
144  return result
145 
146 
147 def pipe_and_output(
148  input, output=None, num_threads=1, processor=None, name=None,
149  capacity=None, group=None, num_runtime_threads=1, final_outputs=None):
150  """
151  Similar to `pipe`, with the additional ability for the pipe Task to
152  return output values to the `Session` once done.
153 
154  Returns:
155  Tuple (out_queue, *task_outputs)
156  out_queue: same as return value of `pipe`.
157  task_outputs: TaskOutput object, fetchable from the client after
158  session.run() returns.
159  """
160  assert num_threads > 0
161  result, task = _pipe_step(
162  input, output, num_threads, processor, name, capacity, group,
163  num_runtime_threads, final_outputs)
164  output = None
165  if final_outputs is not None:
166  output = task.outputs()
167  if type(final_outputs) not in (list, tuple):
168  output = output[0]
169  return result, output
170 
171 
172 def processor_name(processor):
173  if hasattr(processor, 'name'):
174  return processor.name
175  if hasattr(processor, 'func_name'):
176  if processor.func_name == '<lambda>':
177  return processor.__module__
178  if hasattr(processor, 'im_class'):
179  return '%s.%s' % (processor.im_class.__name__, processor.func_name)
180  return processor.func_name
181  return processor.__class__.__name__
182 
183 
184 def _runtime_threads_task(name, group, final_outputs, reader, num_threads,
185  output, capacity):
186  node_name = str(Node.current())
187  profiler_name = "{0}/{1}/{2}/{3}/{4}".format(
188  node_name,
189  "pipe",
190  name,
191  processor_name(input) if input else "NoInput",
192  processor_name(output) if output else "NoOutput")
193 
194  with Task(name=name, group=group, outputs=final_outputs,
195  num_instances=num_threads) as task:
196  global_exit_net = core.Net('pipe:exit')
197  global_init_net = core.Net('pipe:init')
198  reader.setup_ex(global_init_net, global_exit_net)
199 
200  init_net = core.Net('pipe:instance:init')
201  exit_net = core.Net('pipe:instance:exit')
202  read_nets, status, rec = reader.read_record_ex(init_net, exit_net)
203  init_net.ConstantFill(
204  [], [status],
205  shape=[],
206  value=False,
207  dtype=core.DataType.BOOL
208  )
209 
210  if rec is not None:
211  out_queue, writer = _init_output(
212  output, capacity, global_init_net, global_exit_net)
213  write_nets, _ = writer.write_record_ex(
214  rec, init_net, exit_net, status)
215  else:
216  out_queue = None
217  write_nets = []
218 
219  with ops.task_init():
220  ops.net(global_init_net)
221  with ops.task_instance_init():
222  ops.net(init_net)
223 
224  timer_start_net = core.Net('timer_start')
225  timer = timer_start_net.TimerBegin([], counter_name=profiler_name)
226  timer_end_net = core.Net('timer_end')
227  timer_end_net.TimerEnd(timer, [])
228 
229  ops.net(core.execution_step(
230  'body',
231  [timer_start_net] + list(read_nets) + list(write_nets) +
232  [timer_end_net],
233  should_stop_blob=status))
234  ops.net(timer_end_net)
235 
236  with ops.task_instance_exit():
237  ops.net(exit_net)
238  with ops.task_exit():
239  ops.net(global_exit_net)
240 
241  return out_queue, task
242 
243 
244 def _static_threads_task(name, group, final_outputs, reader, num_threads,
245  output, capacity):
246  node_name = str(Node.current())
247  profiler_name = "{0}/{1}/{2}/{3}/{4}".format(
248  node_name,
249  "pipe",
250  name,
251  processor_name(input) if input else "NoInput",
252  processor_name(output) if output else "NoOutput")
253 
254  with Task(name=name, group=group, outputs=final_outputs) as task:
255  global_exit_net = core.Net('exit')
256  global_init_net = core.Net('init')
257  reader.setup_ex(global_init_net, global_exit_net)
258 
259  out_queue = None
260  writer = None
261 
262  steps = []
263  for thread_id in range(num_threads):
264  with NetBuilder(name='t:%d' % thread_id) as nb:
265  init_net = core.Net('init')
266  exit_net = core.Net('exit')
267  read_nets, status, rec = reader.read_record_ex(
268  init_net, exit_net)
269  init_net.ConstantFill(
270  [], [status],
271  shape=[],
272  value=False,
273  dtype=core.DataType.BOOL
274  )
275 
276  if rec is not None:
277  if writer is None:
278  # hack so that the out queue gets the right name prefix
279  # (otherwise they would be prefixed with the thread id)
280  with NetBuilder(_fullname=task.name):
281  out_queue, writer = _init_output(
282  output, capacity, global_init_net,
283  global_exit_net)
284  write_nets, _ = writer.write_record_ex(
285  rec, init_net, exit_net, status)
286  else:
287  write_nets = []
288 
289  timer_start_net = core.Net('timer_start')
290  timer = timer_start_net.TimerBegin([], counter_name=profiler_name)
291  timer_end_net = core.Net('timer_end')
292  timer_end_net.TimerEnd(timer, [])
293 
294  ops.net(init_net)
295  ops.net(core.execution_step(
296  'body',
297  [timer_start_net] + list(read_nets) + list(write_nets) +
298  [timer_end_net],
299  should_stop_blob=status))
300  ops.net(timer_end_net)
301  ops.net(exit_net)
302  steps.append(core.to_execution_step(nb))
303  ops.net(global_init_net)
304  ops.net(core.execution_step('body', steps, concurrent_substeps=True))
305  ops.net(global_exit_net)
306  return out_queue, task
307 
308 
309 def _pipe_step(
310  input, output=None, num_threads=1, processor=None, name=None,
311  capacity=None, group=None, num_runtime_threads=None, final_outputs=None):
312  """
313  """
314  assert num_threads <= 1 or num_runtime_threads <= 1, (
315  'Only one of num_threads or num_runtime_threads must be set.')
316 
317  if isinstance(input, Reader):
318  reader = input
319  elif hasattr(input, 'reader'):
320  reader = input.reader()
321  else:
322  raise ValueError('in must be a reader, queue or stream.')
323 
324  if processor is not None:
325  reader = ProcessingReader(reader, processor)
326 
327  if num_threads == 0 or num_runtime_threads == 0:
328  assert output is None
329  return reader, None
330 
331  if name is None and processor is not None:
332  name = processor_name(processor)
333  if name is None and output is not None:
334  name = 'pipe_into:%s' % processor_name(output)
335  if name is None:
336  name = 'pipe_from:%s' % processor_name(input)
337 
338  if num_threads > 1:
339  return _static_threads_task(
340  name, group, final_outputs, reader, num_threads, output, capacity)
341  else:
342  return _runtime_threads_task(
343  name, group, final_outputs, reader, num_runtime_threads, output,
344  capacity)
345 
346 
348  """
349  Reader that reads from an upstream reader, calls the processor, and returns
350  the processed record.
351  """
352  def __init__(self, reader, processor):
353  Reader.__init__(self)
354  self.reader = reader
355  self.processor = make_processor(processor)
356 
357  def setup_ex(self, init_net, finish_net):
358  self.reader.setup_ex(init_net, finish_net)
359 
360  def read_ex(self, init_net, exit_net):
361  read_nets, status, rec = self.reader.read_record_ex(init_net, exit_net)
362  # We don't use status as stop_blob of NetBuilder it's not guarantee that
363  # it would end up being the true stob_blob. For example,
364  # ReaderWithLimitBase doesn't pass the status through but rather copy
365  # from it.
366  with NetBuilder() as nb:
367  # Current NetBuilder is optionally used inside the processor,
368  # then its children are retrived inside of
369  # normalize_processor_output.
370  # Once readers and writers also use NetBuilder,
371  # this logic will be more natural.
372  result = normalize_processor_output(self.processor(rec))
373  read_nets += result.nets
374  if result.should_stop or nb._stop_blob:
375  stop_net = core.Net('stop_net')
376  if result.should_stop:
377  stop_net.Or([status, result.should_stop], [status])
378  if nb._stop_blob:
379  stop_net.Or([status, nb._stop_blob], [status])
380  read_nets.append(stop_net)
381  if hasattr(self.processor, 'setup'):
382  init_net.add_attribute(TaskGroup.LOCAL_SETUP, self.processor)
383  self._set_schema(result.record)
384  fields = result.record.field_blobs() if result.record else None
385  return read_nets, status, fields
386 
387 
388 class NetProcessor(object):
389  """
390  Processor that clones a core.Net each time it's called, executing
391  the cloned net as the processor. It requires the Net to have input
392  and (optionally) output records set, with net.set_input_record() and
393  net.set_output_record().
394  """
395  def __init__(self, net, stop_signal=None, thread_init_nets=None, name=None):
396  assert isinstance(net, core.Net)
397  assert stop_signal is None or isinstance(
398  stop_signal, core.BlobReference)
399  self.name = name or str(net)
400  self.thread_init_nets = thread_init_nets or []
401  self.net = net
402  self._stop_signal = stop_signal
403  self._blob_maps = []
404  self._frozen = False
405  self._cloned_init_nets = []
406 
407  def setup(self, init_net):
408  self._frozen = True
409  cloned_init_nets = self._cloned_init_nets
410  self._cloned_init_nets = []
411  return cloned_init_nets
412 
413  def __call__(self, rec):
414  assert not self._frozen
415  prefix = NetBuilder.current().name + '/'
416  blob_remap = {}
417  for net in self.thread_init_nets:
418  new_net, _ = core.clone_and_bind_net(
419  net, str(net) + prefix, prefix, blob_remap)
420  self._cloned_init_nets.append(new_net)
421 
422  new_net, remappings = core.clone_and_bind_net(
423  self.net, str(self.net) + prefix, prefix, blob_remap, rec)
424 
425  if self._stop_signal is None:
426  stop_signal = None
427  elif str(self._stop_signal) in remappings:
428  stop_signal = core.BlobReference(
429  remappings[str(self._stop_signal)],
430  net=new_net)
431  else:
432  stop_signal = self._stop_signal
433 
434  self._blob_maps.append(remappings)
435  return Output([new_net], new_net.output_record(), stop_signal)
436 
437  def blob_maps(self):
438  self._frozen = True
439  return self._blob_maps
def _set_schema(self, schema)
Definition: dataio.py:47