Caffe2 - Python API
A deep learning, cross platform ML framework
sparse_lookup.py
1 ## @package sparse_lookup
2 # Module caffe2.python.layers.sparse_lookup
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6 from __future__ import unicode_literals
7 
8 from caffe2.python.helpers.arg_scope import get_current_scope
9 from caffe2.python import schema
10 from caffe2.python.layers.layers import (
11  get_categorical_limit,
12  get_key,
13  IdList,
14  IdScoreList,
15  LayerPsParam,
16  ModelLayer,
17 )
18 import collections
19 import functools
20 import math
21 import numpy as np
22 import operator
23 
24 
25 def get_sparse_lookup_predictor_version(version):
26  assert version in {'fp32', 'fp16', 'uint8rowwise', 'fused_uint8rowwise'},\
27  "Unexpected version of sparse_lookup layer {0}".format(version)
28  return version
29 
30 
31 def _is_id_list(input_record):
32  return schema.equal_schemas(input_record, IdList)
33 
34 
35 def _is_id_score_list(input_record):
36  return schema.equal_schemas(input_record,
37  IdScoreList,
38  check_field_types=False)
39 
40 
41 class SparseLookup(ModelLayer):
42  _id_list_supported_reducers = [
43  'LogMeanExp', 'LogSumExp', 'Max', 'Mean', 'Sum',
44  'WeightedSum', 'WeightedMean', 'Sqrt', 'None']
45 
46  _id_score_list_supported_reducers = [
47  'PositionWeighted', 'Mean', 'Sum', 'WeightedSum', 'WeightedMean', 'None']
48 
49  def __init__(self, model, input_record, inner_shape, reducer,
50  weight_init=None, weight_optim=None,
51  name='sparse_lookup', regularizer=None, **kwargs):
52 
53  super(SparseLookup, self).__init__(model, name, input_record, **kwargs)
54 
55  # TODO Add some asserts about input type
56  if isinstance(inner_shape, int):
57  inner_shape = [inner_shape]
58  assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
59  "Unexpected type for inner_shape, expected list or tuple, got {0}".\
60  format(type(inner_shape))
61 
62  if reducer == "PositionWeighted":
63  assert _is_id_score_list(self.input_record), (
64  "PositionWeighted only support IdScoreList, but got {} " +
65  "please use PositionWeighted layer to convert IdList " +
66  "to IdScoreList").format(repr(self.input_record))
67  self.external_weights = input_record.values()
68  self.reducer = reducer
69 
70  input_dim = get_categorical_limit(input_record)
71  assert input_dim > 0, (
72  "{} should have categorical limit > 0, but got {}".format(
73  get_key(input_record)(), input_dim))
74 
75  scale = math.sqrt(1.0 / input_dim)
76  self.shape = [input_dim] + inner_shape
77  self.weight_init = weight_init if weight_init else (
78  'UniformFill', {'min': -scale, 'max': scale})
79 
80  if _is_id_list(self.input_record):
81  sparse_key = self.input_record.items()
82  elif _is_id_score_list(self.input_record):
83  sparse_key = self.input_record.keys()
84  else:
85  raise NotImplementedError()
86 
87  if self.input_record.lengths.metadata:
88  avg_length = self.input_record.lengths.metadata.expected_value
89  else:
90  avg_length = None
91 
92  self.w = self.create_param(
93  param_name='w',
94  shape=self.shape,
95  initializer=self.weight_init,
96  optimizer=weight_optim,
97  ps_param=LayerPsParam(
98  sparse_key=sparse_key,
99  average_length=avg_length),
100  regularizer=regularizer
101  )
102 
103  self.scale_bias_init = ('ConstantFill', {'value': 0.0})
104 
105  self.scale_bias = self.create_param(
106  param_name='scale_bias',
107  shape=[],
108  initializer=self.scale_bias_init,
109  optimizer=model.NoOptim,
110  )
111 
113  (np.float32, inner_shape),
114  self.get_next_blob_reference('output'),
115  )
116 
117  def get_memory_usage(self):
118  return functools.reduce(operator.mul, self.shape) * 4
119 
120  def get_fp16_compatible_parameters(self):
121  return [self.w]
122 
123  def support_8bit(self):
124  # Rowwise quantization makes sense only if shape it's 2D matrix with
125  # second dimension >= 8
126  if len(self.shape) != 2 or self.shape[1] < 8:
127  return False
128  return True
129 
130  def get_8bits_compatible_parameters(self, fused=True):
131  if not self.support_8bit():
132  return []
133  if fused:
134  RowwiseQuantized8BitsWeight = collections.namedtuple(
135  'RowwiseQuantized8BitsWeight', 'w'
136  )
137  return [RowwiseQuantized8BitsWeight(self.w)]
138  else:
139  RowwiseQuantized8BitsWeight = collections.namedtuple(
140  'RowwiseQuantized8BitsWeight', 'w, scale_bias'
141  )
142  return [RowwiseQuantized8BitsWeight(self.w, self.scale_bias)]
143 
144  def _gather_wrapper(self, net, version, in_indices, out):
145  # Gather can work on all kinds of input data types, and output
146  # data with the same type. Convert the output of Gather to float,
147  # because the follow-up Ops expect fp32.
148  if version == 'fp32':
149  return net.Gather([self.w, in_indices], out)
150  elif version == 'fp16':
151  gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
152 
153  return net.HalfToFloat(gathered_w, out)
154  elif version == 'uint8rowwise':
155  gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
156  gathered_scale_bias = net.Gather(
157  [self.scale_bias, in_indices],
158  'gathered_scale_bias'
159  )
160 
161  return net.Rowwise8BitQuantizedToFloat(
162  [gathered_w, gathered_scale_bias], out)
163  elif version == 'fused_uint8rowwise':
164  gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
165  return net.Fused8BitRowwiseQuantizedToFloat(gathered_w, out)
166  else:
167  raise "Unsupported version of operators in SparseLookup " +\
168  "layer: {0}".format(version)
169 
170  def _sparse_lengths_weighted_reducer(
171  self, in_indices, weights, reducer,
172  net, version, grad_on_weights=0):
173  op_input = [
174  self.w,
175  weights,
176  in_indices,
177  self.input_record.lengths()
178  ]
179  layer_name = 'SparseLengths' + reducer
180 
181  if version in ['fp32', 'fp16']:
182  # SparseLengths* Ops will accept either fp16 or fp32 embedding
183  # matrix and output fp32 pooled embedding
184  net.__getattr__(layer_name)(
185  op_input,
186  self.output_schema.field_blobs(),
187  grad_on_weights=grad_on_weights,
188  )
189  elif version == 'uint8rowwise':
190  op_input.insert(len(op_input), self.scale_bias)
191  net.__getattr__(layer_name + '8BitsRowwise')(
192  op_input, self.output_schema.field_blobs())
193  elif version == 'fused_uint8rowwise':
194  net.__getattr__(layer_name + 'Fused8BitRowwise')(
195  op_input, self.output_schema.field_blobs())
196  else:
197  raise "Unsupported version of operator in SparseLookUp " +\
198  "layer: {0}".format(version)
199 
200  # deal with sparse features of id_list type
201  def _add_ops_id_list(self, net, version):
202  assert self.reducer in self._id_list_supported_reducers, (
203  "Unsupported reducer: {} for ID_LIST".format(self.reducer)
204  )
205  if self.reducer in ['Sum', 'Mean', 'WeightedSum', 'WeightedMean']:
206  op_input = [self.w,
207  self.input_record.items(),
208  self.input_record.lengths()]
209 
210  # For id list features, the behaviors of 'Sum' and
211  # 'WeightedSum' are identical, since we can regard the weight on each
212  # id as 1. Similarly, for 'Mean' and 'WeightedMean'.
213  if self.reducer == 'WeightedSum':
214  self.reducer = 'Sum'
215  elif self.reducer == 'WeightedMean':
216  self.reducer = 'Mean'
217 
218  layer_name = 'SparseLengths' + self.reducer
219  if version in ['fp32', 'fp16']:
220  # SparseLengths* Ops will accept either fp16 or fp32 embedding
221  # matrix and output fp32 pooled embedding
222  net.__getattr__(layer_name)(
223  op_input,
224  self.output_schema.field_blobs(),
225  )
226  elif version == 'uint8rowwise':
227  op_input.insert(len(op_input), self.scale_bias)
228  net.__getattr__(layer_name + '8BitsRowwise')(
229  op_input, self.output_schema.field_blobs())
230  elif version == 'fused_uint8rowwise':
231  net.__getattr__(layer_name + 'Fused8BitRowwise')(
232  op_input, self.output_schema.field_blobs())
233  else:
234  raise "Unsupported version of operator in SparseLookUp " +\
235  "layer: {0}".format(version)
236 
237  elif self.reducer == 'Sqrt':
238  sqrt_weight = net.LengthsToWeights(
239  [self.input_record.lengths()],
240  [net.NextScopedBlob('lengths_sqrt')],
241  power=0.5,
242  )
244  self.input_record.items(),
245  sqrt_weight,
246  'WeightedSum', net, version)
247 
248  elif self.reducer == 'None':
249  # Gather operator will gather the embedding for each id of
250  # each IdList.
251  self._gather_wrapper(net, version, self.input_record.items(),
252  self.output_schema.field_blobs())
253 
254  else:
255  table_rows = self._gather_wrapper(
256  net, version, self.input_record.items(), 'table_rows')
257 
258  segment_ids = net.LengthsToSegmentIds(
259  self.input_record.lengths(),
260  self.input_record.lengths() + '_sid')
261  net.__getattr__('SortedSegmentRange' + self.reducer)(
262  [table_rows, segment_ids],
263  self.output_schema.field_blobs(),
264  )
265 
266  # deal with sparse features of id_score_list type
267  def _add_ops_id_score_list(self, net, version):
268  assert self.reducer in self._id_score_list_supported_reducers, (
269  "Unsupported reducer: {} for ID_SCORE_LIST".format(self.reducer)
270  )
271  if self.reducer in ['WeightedSum', 'WeightedMean']:
273  self.input_record.keys(),
274  self.input_record.values(),
275  self.reducer, net, version)
276 
277  elif self.reducer in ['Sum', 'Mean']:
278  op_input = [self.w,
279  self.input_record.keys(),
280  self.input_record.lengths()]
281 
282  layer_name = 'SparseLengths' + self.reducer
283 
284  if version in ['fp32', 'fp16']:
285  net.__getattr__(layer_name)(
286  op_input,
287  self.output_schema.field_blobs(),
288  )
289  elif version == 'uint8rowwise':
290  net.__getattr__(layer_name + '8BitsRowwise')(
291  op_input, self.output_schema.field_blobs())
292  elif version == 'fused_uint8rowwise':
293  net.__getattr__(layer_name + 'Fused8BitRowwise')(
294  op_input, self.output_schema.field_blobs())
295  else:
296  raise "Unsupported version of operator in SparseLookUp " +\
297  "layer: {0}".format(version)
298 
299  elif self.reducer == 'PositionWeighted':
301  self.input_record.keys(),
302  self.external_weights,
303  'WeightedSum', net, version, grad_on_weights=1)
304 
305  elif self.reducer == 'None':
306  # Gather operator will gather the embedding for each id of
307  # each IdList.
308  self._gather_wrapper(net, version, self.input_record.keys(),
309  self.output_schema.field_blobs())
310  else:
311  raise "Only Sum, Mean, None are supported for IdScoreList input." +\
312  "Trying to create with {}".format(self.reducer)
313 
314  def add_ops(self, net):
315  cur_scope = get_current_scope()
316  version = get_sparse_lookup_predictor_version(
317  **cur_scope.get(get_sparse_lookup_predictor_version.__name__,
318  {'version': 'fp32'}))
319 
320  # TODO(amalevich): Layer should not be responsible for decision about
321  # quantization.
322  if not self.support_8bit() and version in {'uint8rowwise',
323  'fused_uint8rowwise'}:
324  version = 'fp32'
325 
326  if _is_id_list(self.input_record):
327  self._add_ops_id_list(net, version=version)
328  elif _is_id_score_list(self.input_record):
329  self._add_ops_id_score_list(net, version=version)
330  else:
331  raise "Unsupported input type {0}".format(self.input_record)
def _sparse_lengths_weighted_reducer(self, in_indices, weights, reducer, net, version, grad_on_weights=0)
def _gather_wrapper(self, net, version, in_indices, out)