Python tensorflow.python.framework.tensor_util.MakeNdarray() Examples

The following are 30 code examples of tensorflow.python.framework.tensor_util.MakeNdarray(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.framework.tensor_util , or try the search function .
Example #1
Source File: math_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _SumGrad(op, grad):
  """Gradient for Sum."""
  # Fast path for when reducing to a scalar and ndims is known: adds only
  # Reshape and Tile ops (and possibly a Shape).
  if (op.inputs[0].get_shape().ndims is not None and
      op.inputs[1].op.type == "Const"):
    rank = op.inputs[0].get_shape().ndims
    axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value"))
    if np.array_equal(axes, np.arange(rank)):  # Reduce all dims.
      grad = array_ops.reshape(grad, [1] * rank)
      # If shape is not fully defined (but rank is), we use Shape.
      if op.inputs[0].get_shape().is_fully_defined():
        input_shape = op.inputs[0].get_shape().as_list()
      else:
        input_shape = array_ops.shape(op.inputs[0])
      return [array_ops.tile(grad, input_shape), None]

  input_shape = array_ops.shape(op.inputs[0])
  output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
  tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
  grad = array_ops.reshape(grad, output_shape_kept_dims)
  return [array_ops.tile(grad, tile_scaling), None] 
Example #2
Source File: tensorflow_frozenparser.py    From MMdnn with MIT License 6 votes vote down vote up
def _get_bias(self, source_node, IR_node):
        if not source_node.out_edges:
            return

        add_node = self.tf_graph.get_node(source_node.out_edges[0])
        if add_node.type != "Add" and add_node.type != "BiasAdd":
            return

        variable = self.check_const(self.tf_graph.get_node(add_node.in_edges[1])) #add_bias node
        if not variable or variable.type != 'Const':
            return


        bias_value = variable.get_attr('value')
        bias = tensor_util.MakeNdarray(bias_value)

        # assert variable.get_attr('_output_shapes')[0].dim[0].size == IR_node.attr['kernel_shape'].list.i[-1]


        add_node.real_name = IR_node.name
        add_node.covered = True
        IR_node.attr['use_bias'].b = True
        current_layer = self.weights[source_node.name]
        current_layer['bias'] = bias 
Example #3
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testHalf(self):
    t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=np.float16))
    self.assertProtoEquals("""
      dtype: DT_HALF
      tensor_shape {
        dim {
          size: 2
        }
      }
      half_val: 18688
      half_val: 19712
      """, t)

    a = tensor_util.MakeNdarray(t)
    self.assertEquals(np.float16, a.dtype)
    self.assertAllClose(np.array([10.0, 20.0], dtype=np.float16), a) 
Example #4
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testLargeNegativeInt(self):
    # We don't use the min np.int64 value here
    # because it breaks np.abs().
    #
    # np.iinfo(np.int64).min = -9223372036854775808
    # np.iinfo(np.int64).max = 9223372036854775807
    # np.abs(-9223372036854775808) = -9223372036854775808
    value = np.iinfo(np.int64).min + 1
    t = tensor_util.make_tensor_proto(value)
    self.assertProtoEquals("""
      dtype: DT_INT64
      tensor_shape {}
      int64_val: %d
      """ % value, t)
    a = tensor_util.MakeNdarray(t)
    self.assertEquals(np.int64, a.dtype)
    self.assertAllClose(np.array(value, dtype=np.int64), a) 
Example #5
Source File: tensorflow_frozenparser.py    From MMdnn with MIT License 6 votes vote down vote up
def rename_StridedSlice(self, source_node):
        IR_node = self._convert_identity_operation(source_node, end_idx=1, new_op = 'Slice')
        kwargs = {}
        kwargs = {
            'begin_mask' : source_node.get_attr('begin_mask'),
            'end_mask'   : source_node.get_attr('end_mask'),
        }

        starts = self.get_parent(source_node.name, [1]).layer.attr['value'].tensor
        starts = tensor_util.MakeNdarray(starts).tolist()
        kwargs['starts'] = starts

        ends = self.get_parent(source_node.name, [2]).layer.attr['value'].tensor
        ends = tensor_util.MakeNdarray(ends).tolist()
        kwargs['ends'] = ends

        if self.get_parent(source_node.name, [3]) != None:
            strides = self.get_parent(source_node.name, [3]).layer.attr['value'].tensor
            strides = tensor_util.MakeNdarray(strides).tolist()
            kwargs['strides'] = strides

        assign_IRnode_values(IR_node, kwargs) 
Example #6
Source File: tensorflow_frozenparser.py    From MMdnn with MIT License 6 votes vote down vote up
def rename_MatMul(self, source_node):
        IR_node = self._convert_identity_operation(source_node, end_idx=1)
        input_weight_node = self.src_graph.get_parent(source_node.name, [1])
        weightnode = self.check_const(input_weight_node)
        weight_value = weightnode.get_attr('value')

        weight = tensor_util.MakeNdarray(weight_value)
        self.set_weight(source_node.name, 'weights', weight)

        units = source_node.layer.attr['_output_shapes'].list.shape[-1].dim[-1].size
        IR_node.attr['units'].i = units

        if source_node.out_edges and self.tf_graph.get_node(source_node.out_edges[0]).type == 'BiasAdd':
            add_node = self.tf_graph.get_node(source_node.out_edges[0])
            add_node.covered = True
            add_node.real_name = source_node.real_name

            TensorflowParser2._copy_and_reop(source_node, IR_node, 'FullyConnected')
            variable = self.tf_graph.get_node(add_node.in_edges[1]) #add_bias node
            biasnode = self.check_const(variable)
            bias_value = biasnode.get_attr('value')
            bias = tensor_util.MakeNdarray(bias_value)
            self.set_weight(source_node.name, 'bias', bias)
            IR_node.attr['use_bias'].b = True 
Example #7
Source File: quantize_graph.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def quantize_weight_rounded(input_node):
  """Returns a replacement node for input_node containing bucketed floats."""
  input_tensor = input_node.attr["value"].tensor
  tensor_value = tensor_util.MakeNdarray(input_tensor)
  shape = input_tensor.tensor_shape
  # Currently, the parameter FLAGS.bitdepth is used to compute the
  # number of buckets as 1 << FLAGS.bitdepth, meaning the number of
  # buckets can only be a power of 2.
  # This could be fixed by introducing a new parameter, num_buckets,
  # which would allow for more flexibility in chosing the right model
  # size/accuracy tradeoff. But I didn't want to add more parameters
  # to this script than absolutely necessary.
  num_buckets = 1 << FLAGS.bitdepth
  tensor_value_rounded = quantize_array(tensor_value, num_buckets)
  tensor_shape_list = tensor_util.TensorShapeProtoToList(shape)
  return [create_constant_node(input_node.name, tensor_value_rounded,
                               tf.float32, shape=tensor_shape_list)] 
Example #8
Source File: optimize_for_inference_lib.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def values_from_const(node_def):
  """Extracts the values from a const NodeDef as a numpy ndarray.

  Args:
    node_def: Const NodeDef that has the values we want to access.

  Returns:
    Numpy ndarray containing the values.

  Raises:
    ValueError: If the node isn't a Const.
  """
  if node_def.op != "Const":
    raise ValueError(
        "Node named '%s' should be a Const op for values_from_const." %
        node_def.name)
  input_tensor = node_def.attr["value"].tensor
  tensor_value = tensor_util.MakeNdarray(input_tensor)
  return tensor_value 
Example #9
Source File: math_grad.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _SumGrad(op, grad):
  """Gradient for Sum."""
  # Fast path for when reducing to a scalar and ndims is known: adds only
  # Reshape and Tile ops (and possibly a Shape).
  if (op.inputs[0].get_shape().ndims is not None and op.inputs[1].op.type ==
      "Const"):
    rank = op.inputs[0].get_shape().ndims
    axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value"))
    if np.array_equal(axes, np.arange(rank)):  # Reduce all dims.
      grad = array_ops.reshape(grad, [1] * rank)
      # If shape is not fully defined (but rank is), we use Shape.
      if op.inputs[0].get_shape().is_fully_defined():
        input_shape = op.inputs[0].get_shape().as_list()
      else:
        input_shape = array_ops.shape(op.inputs[0])
      return [array_ops.tile(grad, input_shape), None]

  input_shape = array_ops.shape(op.inputs[0])
  output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
  tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
  grad = array_ops.reshape(grad, output_shape_kept_dims)
  return [array_ops.tile(grad, tile_scaling), None] 
Example #10
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testComplex64NpArray(self):
    t = tensor_util.make_tensor_proto(
        np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex64)
    # scomplex_val are real_0, imag_0, real_1, imag_1, ...
    self.assertProtoEquals("""
      dtype: DT_COMPLEX64
      tensor_shape { dim { size: 2 } dim { size: 2 } }
      scomplex_val: 1
      scomplex_val: 2
      scomplex_val: 3
      scomplex_val: 4
      scomplex_val: 5
      scomplex_val: 6
      scomplex_val: 7
      scomplex_val: 8
      """, t)
    a = tensor_util.MakeNdarray(t)
    self.assertEquals(np.complex64, a.dtype)
    self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a) 
Example #11
Source File: tensorflow_parser.py    From MMdnn with MIT License 6 votes vote down vote up
def _rename_Const(self, source_node):
        IR_node = self._convert_identity_operation(source_node, in_edge_count=0, new_op='Constant') # Constant
        value = source_node.get_attr('value')
        if value.float_val:
            shape = tuple(self.tensor_shape_to_list(value.tensor_shape))
            value = np.full(shape, value.float_val[0])
        elif value.int_val:
            shape = tuple(self.tensor_shape_to_list(value.tensor_shape))
            value = np.full(shape, value.int_val[0])
        else:
            value = np.array(tensor_util.MakeNdarray(value).tolist())
        
        if value.ndim > 1:
            self.set_weight(source_node.name, 'value', value)
        else:
            kwargs = {'value': value}
            assign_IRnode_values(IR_node, kwargs) 
Example #12
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testComplex128N(self):
    t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
                                      dtype=tf.complex128)
    self.assertProtoEquals("""
      dtype: DT_COMPLEX128
      tensor_shape { dim { size: 1 } dim { size: 3 } }
      dcomplex_val: 1
      dcomplex_val: 2
      dcomplex_val: 3
      dcomplex_val: 4
      dcomplex_val: 5
      dcomplex_val: 6
      """, t)
    a = tensor_util.MakeNdarray(t)
    self.assertEquals(np.complex128, a.dtype)
    self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a) 
Example #13
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testComplex64N(self):
    t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
                                      dtype=tf.complex64)
    self.assertProtoEquals("""
      dtype: DT_COMPLEX64
      tensor_shape { dim { size: 1 } dim { size: 3 } }
      scomplex_val: 1
      scomplex_val: 2
      scomplex_val: 3
      scomplex_val: 4
      scomplex_val: 5
      scomplex_val: 6
      """, t)
    a = tensor_util.MakeNdarray(t)
    self.assertEquals(np.complex64, a.dtype)
    self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a) 
Example #14
Source File: tensorflow_parser.py    From MMdnn with MIT License 6 votes vote down vote up
def rename_Slice(self, source_node):
        input_node_begin = self.get_parent(source_node.name, [1])
        input_node_size = self.get_parent(source_node.name, [2])

        begin = tensor_util.MakeNdarray(input_node_begin.layer.attr['value'].tensor)
        size = tensor_util.MakeNdarray(input_node_size.layer.attr['value'].tensor)

        IR_node = self._convert_identity_operation(source_node, in_edge_count=1, new_op='Slice')

        # TODO:  axis
        end = size + begin
        kwargs = {
            'starts' : begin,
            'ends' : end
        }

        assign_IRnode_values(IR_node, kwargs) 
Example #15
Source File: optimize_for_inference_lib.py    From lambda-packs with MIT License 6 votes vote down vote up
def values_from_const(node_def):
  """Extracts the values from a const NodeDef as a numpy ndarray.

  Args:
    node_def: Const NodeDef that has the values we want to access.

  Returns:
    Numpy ndarray containing the values.

  Raises:
    ValueError: If the node isn't a Const.
  """
  if node_def.op != "Const":
    raise ValueError(
        "Node named '%s' should be a Const op for values_from_const." %
        node_def.name)
  input_tensor = node_def.attr["value"].tensor
  tensor_value = tensor_util.MakeNdarray(input_tensor)
  return tensor_value 
Example #16
Source File: quantize_graph.py    From pokemon-mini with Apache License 2.0 6 votes vote down vote up
def quantize_weight_rounded(input_node):
  """Returns a replacement node for input_node containing bucketed floats."""
  input_tensor = input_node.attr["value"].tensor
  tensor_value = tensor_util.MakeNdarray(input_tensor)
  shape = input_tensor.tensor_shape
  # Currently, the parameter FLAGS.bitdepth is used to compute the
  # number of buckets as 1 << FLAGS.bitdepth, meaning the number of
  # buckets can only be a power of 2.
  # This could be fixed by introducing a new parameter, num_buckets,
  # which would allow for more flexibility in chosing the right model
  # size/accuracy tradeoff. But I didn't want to add more parameters
  # to this script than absolutely necessary.
  num_buckets = 1 << FLAGS.bitdepth
  tensor_value_rounded = quantize_array(tensor_value, num_buckets)
  tensor_shape_list = tensor_util.TensorShapeProtoToList(shape)
  return [
      create_constant_node(
          input_node.name,
          tensor_value_rounded,
          dtypes.float32,
          shape=tensor_shape_list)
  ] 
Example #17
Source File: quantize_graph.py    From sketch-to-react-native with MIT License 6 votes vote down vote up
def quantize_weight_rounded(input_node):
  """Returns a replacement node for input_node containing bucketed floats."""
  input_tensor = input_node.attr["value"].tensor
  tensor_value = tensor_util.MakeNdarray(input_tensor)
  shape = input_tensor.tensor_shape
  # Currently, the parameter FLAGS.bitdepth is used to compute the
  # number of buckets as 1 << FLAGS.bitdepth, meaning the number of
  # buckets can only be a power of 2.
  # This could be fixed by introducing a new parameter, num_buckets,
  # which would allow for more flexibility in chosing the right model
  # size/accuracy tradeoff. But I didn't want to add more parameters
  # to this script than absolutely necessary.
  num_buckets = 1 << FLAGS.bitdepth
  tensor_value_rounded = quantize_array(tensor_value, num_buckets)
  tensor_shape_list = tensor_util.TensorShapeProtoToList(shape)
  return [
      create_constant_node(
          input_node.name,
          tensor_value_rounded,
          dtypes.float32,
          shape=tensor_shape_list)
  ] 
Example #18
Source File: quantize_graph.py    From MobileNet with Apache License 2.0 6 votes vote down vote up
def quantize_weight_rounded(input_node):
  """Returns a replacement node for input_node containing bucketed floats."""
  input_tensor = input_node.attr["value"].tensor
  tensor_value = tensor_util.MakeNdarray(input_tensor)
  shape = input_tensor.tensor_shape
  # Currently, the parameter FLAGS.bitdepth is used to compute the
  # number of buckets as 1 << FLAGS.bitdepth, meaning the number of
  # buckets can only be a power of 2.
  # This could be fixed by introducing a new parameter, num_buckets,
  # which would allow for more flexibility in chosing the right model
  # size/accuracy tradeoff. But I didn't want to add more parameters
  # to this script than absolutely necessary.
  num_buckets = 1 << FLAGS.bitdepth
  tensor_value_rounded = quantize_array(tensor_value, num_buckets)
  tensor_shape_list = tensor_util.TensorShapeProtoToList(shape)
  return [
      create_constant_node(
          input_node.name,
          tensor_value_rounded,
          dtypes.float32,
          shape=tensor_shape_list)
  ] 
Example #19
Source File: math_grad.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _SumGrad(op, grad):
  """Gradient for Sum."""
  # Fast path for when reducing to a scalar and ndims is known: adds only
  # Reshape and Tile ops (and possibly a Shape).
  if (op.inputs[0].get_shape().ndims is not None and
      op.inputs[1].op.type == "Const"):
    rank = op.inputs[0].get_shape().ndims
    axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value"))
    if np.array_equal(axes, np.arange(rank)):  # Reduce all dims.
      grad = array_ops.reshape(grad, [1] * rank)
      # If shape is not fully defined (but rank is), we use Shape.
      if op.inputs[0].get_shape().is_fully_defined():
        input_shape = op.inputs[0].get_shape().as_list()
      else:
        input_shape = array_ops.shape(op.inputs[0])
      return [array_ops.tile(grad, input_shape), None]

  input_shape = array_ops.shape(op.inputs[0])
  output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
  tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
  grad = array_ops.reshape(grad, output_shape_kept_dims)
  return [array_ops.tile(grad, tile_scaling), None] 
Example #20
Source File: optimize_for_inference_lib.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def values_from_const(node_def):
  """Extracts the values from a const NodeDef as a numpy ndarray.

  Args:
    node_def: Const NodeDef that has the values we want to access.

  Returns:
    Numpy ndarray containing the values.

  Raises:
    ValueError: If the node isn't a Const.
  """
  if node_def.op != "Const":
    raise ValueError(
        "Node named '%s' should be a Const op for values_from_const." %
        node_def.name)
  input_tensor = node_def.attr["value"].tensor
  tensor_value = tensor_util.MakeNdarray(input_tensor)
  return tensor_value 
Example #21
Source File: quantize_graph.py    From tensorflow-for-poets-2 with Apache License 2.0 6 votes vote down vote up
def quantize_weight_rounded(input_node):
  """Returns a replacement node for input_node containing bucketed floats."""
  input_tensor = input_node.attr["value"].tensor
  tensor_value = tensor_util.MakeNdarray(input_tensor)
  shape = input_tensor.tensor_shape
  # Currently, the parameter FLAGS.bitdepth is used to compute the
  # number of buckets as 1 << FLAGS.bitdepth, meaning the number of
  # buckets can only be a power of 2.
  # This could be fixed by introducing a new parameter, num_buckets,
  # which would allow for more flexibility in chosing the right model
  # size/accuracy tradeoff. But I didn't want to add more parameters
  # to this script than absolutely necessary.
  num_buckets = 1 << FLAGS.bitdepth
  tensor_value_rounded = quantize_array(tensor_value, num_buckets)
  tensor_shape_list = tensor_util.TensorShapeProtoToList(shape)
  return [
      create_constant_node(
          input_node.name,
          tensor_value_rounded,
          dtypes.float32,
          shape=tensor_shape_list)
  ] 
Example #22
Source File: rewriter.py    From MMdnn with MIT License 5 votes vote down vote up
def store_const_to_top(self, match_result):
        top_node = list(match_result._pattern_to_op.values())[0]
        kwargs = dict()
        for pattern, op in match_result._pattern_to_op.items():
            if pattern.name and pattern.type == 'Const':
                if tensor_util.MakeNdarray(op.get_attr('value')).shape == (1, ):
                    kwargs[pattern.name] = np.asscalar(tensor_util.MakeNdarray(op.get_attr('value')))
                else:
                    kwargs[pattern.name] = np.squeeze(tensor_util.MakeNdarray(op.get_attr('value')))
        if hasattr(top_node, 'kwargs'):
            top_node.kwargs.update(kwargs)
        else:
            top_node.kwargs = kwargs 
Example #23
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testStringTuple(self):
    t = tensor_util.make_tensor_proto((b"a", b"ab", b"abc", b"abcd"))
    self.assertProtoEquals("""
      dtype: DT_STRING
      tensor_shape { dim { size: 4 } }
      string_val: "a"
      string_val: "ab"
      string_val: "abc"
      string_val: "abcd"
      """, t)
    a = tensor_util.MakeNdarray(t)
    self.assertEquals(np.object, a.dtype)
    self.assertAllEqual(np.array((b"a", b"ab", b"abc", b"abcd")), a) 
Example #24
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testComplex128(self):
    t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex128)
    self.assertProtoEquals("""
      dtype: DT_COMPLEX128
      tensor_shape {}
      dcomplex_val: 1
      dcomplex_val: 2
      """, t)
    a = tensor_util.MakeNdarray(t)
    self.assertEquals(np.complex128, a.dtype)
    self.assertAllEqual(np.array(1 + 2j), a) 
Example #25
Source File: tensorflow_frozenparser.py    From MMdnn with MIT License 5 votes vote down vote up
def _convert_reduction_operators(self, source_node, new_op = None):
        IR_node = self._convert_identity_operation(source_node, 1, new_op)

        # keep dims
        IR_node.attr['keepdims'].b = source_node.layer.attr['keep_dims'].b

        # axes
        axes = self.get_parent(source_node.name, [1]).layer.attr['value'].tensor
        axes = tensor_util.MakeNdarray(axes)
        IR_node.attr['axes'].list.i.extend(axes) 
Example #26
Source File: tensorflow_frozenparser.py    From MMdnn with MIT License 5 votes vote down vote up
def _convert_layers_instancenorm(self, source_node):
        IR_node = self.IR_graph.node.add()
        TensorflowParser2._copy_and_reop(source_node, IR_node, 'InstanceNorm')

        # epsilon
        epsilon = self.get_parent(source_node.name, [1])
        epsilon_value = epsilon.get_attr('value').float_val[0]
        IR_node.attr['epsilon'].f = epsilon_value

        # beta
        output_node = self.get_son(source_node.name, [0, 0, 0, 0], True)
        beta = self.get_parent(output_node.name, [1, 0, 0, 0, 0, 1], True)
        beta_tensor = beta.get_attr('value')
        beta = tensor_util.MakeNdarray(beta_tensor)
        self.set_weight(source_node.name, 'bias', beta)


        # gamma (scale)
        IR_node.attr['scale'].b = True
        son = self.get_son(source_node.name, [0, 0, 0], True)
        gamma = self.get_parent(son.name, [1, 1, 0, 0, 0, 1], True)
        gamma_tensor = gamma.get_attr('value')
        scale = tensor_util.MakeNdarray(gamma_tensor)
        self.set_weight(source_node.name, 'scale', scale)
        # output_node = self.get_son(source_node.name, [0, 0, 0, 0], True)

        assert output_node.type == 'Add'
        input_node = self.get_parent(output_node.name, [0, 0])
        IR_node.input.append(input_node.real_name)

        output_node.real_name = source_node.name

        # assert False 
Example #27
Source File: tensorflow_frozenparser.py    From MMdnn with MIT License 5 votes vote down vote up
def _rename_Const(self, source_node):
        IR_node = self._convert_identity_operation(source_node, end_idx=0, new_op='Constant') # Constant
        value = source_node.get_attr('value')
        if value.float_val:
            value = value.float_val[0]
        elif value.int_val:
            value = value.int_val[0]
        else:
            value = tensor_util.MakeNdarray(value).tolist()
        kwargs = {'value': value}
        assign_IRnode_values(IR_node, kwargs) 
Example #28
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testComplexWithImplicitRepeat(self):
    for dtype, np_dtype in [(tf.complex64, np.complex64),
                            (tf.complex128, np.complex128)]:
      t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4],
                                        dtype=dtype)
      a = tensor_util.MakeNdarray(t)
      self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)],
                                    [(1+1j), (1+1j), (1+1j), (1+1j)],
                                    [(1+1j), (1+1j), (1+1j), (1+1j)]],
                                   dtype=np_dtype), a) 
Example #29
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testStringNestedTuple(self):
    t = tensor_util.make_tensor_proto(((b"a", b"ab"), (b"abc", b"abcd")))
    self.assertProtoEquals("""
      dtype: DT_STRING
      tensor_shape { dim { size: 2 } dim { size: 2 } }
      string_val: "a"
      string_val: "ab"
      string_val: "abc"
      string_val: "abcd"
      """, t)
    a = tensor_util.MakeNdarray(t)
    self.assertEquals(np.object, a.dtype)
    self.assertAllEqual(np.array(((b"a", b"ab"), (b"abc", b"abcd"))), a) 
Example #30
Source File: tensorflow_parser.py    From MMdnn with MIT License 5 votes vote down vote up
def rename_StridedSlice(self, source_node):
        # TODO: Current it is only for slice

        if self.get_parent(source_node.name, [1]).type != 'Const':
            self._add_constant_node(source_node)
            IR_node = self._convert_identity_operation(source_node, new_op='Slice')
            return

        IR_node = self._convert_identity_operation(source_node, in_edge_count=1, new_op='Slice')
        kwargs = {
            'begin_mask' : source_node.get_attr('begin_mask'),
            'end_mask'   : source_node.get_attr('end_mask'),
            'shrink_axis_mask': source_node.get_attr('shrink_axis_mask'),
            'new_axis_mask' :source_node.get_attr('new_axis_mask')
        }

        starts = self.get_parent(source_node.name, [1]).layer.attr['value'].tensor
        starts = tensor_util.MakeNdarray(starts).tolist()
        kwargs['starts'] = starts

        ends = self.get_parent(source_node.name, [2]).layer.attr['value'].tensor
        ends = tensor_util.MakeNdarray(ends).tolist()
        kwargs['ends'] = ends

        if self.get_parent(source_node.name, [3]) != None:
            strides = self.get_parent(source_node.name, [3]).layer.attr['value'].tensor
            strides = tensor_util.MakeNdarray(strides).tolist()
            kwargs['strides'] = strides

        assign_IRnode_values(IR_node, kwargs)