Python tensorflow.python.platform.tf_logging.warn() Examples

The following are 30 code examples of tensorflow.python.platform.tf_logging.warn(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.platform.tf_logging , or try the search function .
Example #1
Source File: rnn_cell.py    From ecm with Apache License 2.0 6 votes vote down vote up
def __init__(self, cell, num_proj, input_size=None):
        """Create a cell with input projection.

        Args:
            cell: an RNNCell, a projection of inputs is added before it.
            num_proj: Python integer.    The dimension to project to.
            input_size: Deprecated and unused.

        Raises:
            TypeError: if cell is not an RNNCell.
        """
        if input_size is not None:
            logging.warn("%s: The input_size parameter is deprecated.", self)
        if not isinstance(cell, RNNCell):
            raise TypeError("The parameter cell is not RNNCell.")
        self._cell = cell
        self._num_proj = num_proj 
Example #2
Source File: rnn_cell_impl.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self, num_units, forget_bias=1.0,
               state_is_tuple=True, activation=None, reuse=None):
    """Initialize the basic LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell.
      forget_bias: float, The bias added to forget gates (see above).
      state_is_tuple: If True, accepted and returned states are 2-tuples of
        the `c_state` and `m_state`.  If False, they are concatenated
        along the column axis.  The latter behavior will soon be deprecated.
      activation: Activation function of the inner states.  Default: `tanh`.
      reuse: (optional) Python boolean describing whether to reuse variables
        in an existing scope.  If not `True`, and the existing scope already has
        the given variables, an error is raised.
    """
    super(BasicLSTMCell, self).__init__(_reuse=reuse)
    if not state_is_tuple:
      logging.warn("%s: Using a concatenated state is slower and will soon be "
                   "deprecated.  Use state_is_tuple=True.", self)
    self._num_units = num_units
    self._forget_bias = forget_bias
    self._state_is_tuple = state_is_tuple
    self._activation = activation or math_ops.tanh 
Example #3
Source File: saved_model_export_utils.py    From lambda-packs with MIT License 6 votes vote down vote up
def garbage_collect_exports(export_dir_base, exports_to_keep):
  """Deletes older exports, retaining only a given number of the most recent.

  Export subdirectories are assumed to be named with monotonically increasing
  integers; the most recent are taken to be those with the largest values.

  Args:
    export_dir_base: the base directory under which each export is in a
      versioned subdirectory.
    exports_to_keep: the number of recent exports to retain.
  """
  if exports_to_keep is None:
    return

  keep_filter = gc.largest_export_versions(exports_to_keep)
  delete_filter = gc.negation(keep_filter)
  for p in delete_filter(gc.get_paths(export_dir_base,
                                      parser=_export_version_parser)):
    try:
      gfile.DeleteRecursively(p.path)
    except errors_impl.NotFoundError as e:
      logging.warn('Can not delete %s recursively: %s', p.path, e) 
Example #4
Source File: bn_basic_lstm.py    From tensorflow_end2end_speech_recognition with MIT License 6 votes vote down vote up
def __init__(self, num_units, is_training, forget_bias=1.0, input_size=None,
                 state_is_tuple=True, reuse=None):
        """Initialize the basic LSTM cell.
        Args:
          num_units: int, The number of units in the LSTM cell.
          is_training: bool, set True when training.
          forget_bias: float, The bias added to forget gates (see above).
          input_size: Deprecated and unused.
          state_is_tuple: If True, accepted and returned states are 2-tuples of
            the `c_state` and `m_state`.  If False, they are concatenated
            along the column axis.  The latter behavior will soon be deprecated.
          reuse: (optional) Python boolean describing whether to reuse variables
            in an existing scope.  If not `True`, and the existing scope already has
            the given variables, an error is raised.
        """
        if not state_is_tuple:
            logging.warn("%s: Using a concatenated state is slower and will soon be "
                         "deprecated.  Use state_is_tuple=True.", self)
        if input_size is not None:
            logging.warn("%s: The input_size parameter is deprecated.", self)
        self._num_units = num_units
        self._forget_bias = forget_bias
        self._state_is_tuple = state_is_tuple
        self._reuse = reuse
        self._is_training = is_training 
Example #5
Source File: graph_actions.py    From lambda-packs with MIT License 6 votes vote down vote up
def _write_summary_results(output_dir, eval_results, current_global_step):
  """Writes eval results into summary file in given dir."""
  logging.info('Saving evaluation summary for step %d: %s', current_global_step,
               _eval_results_to_str(eval_results))
  summary_writer = get_summary_writer(output_dir)
  summary = summary_pb2.Summary()
  for key in eval_results:
    if eval_results[key] is None:
      continue
    value = summary.value.add()
    value.tag = key
    if (isinstance(eval_results[key], np.float32) or
        isinstance(eval_results[key], float)):
      value.simple_value = float(eval_results[key])
    else:
      logging.warn('Skipping summary for %s, must be a float or np.float32.',
                   key)
  summary_writer.add_summary(summary, current_global_step)
  summary_writer.flush() 
Example #6
Source File: feature_column.py    From lambda-packs with MIT License 6 votes vote down vote up
def __new__(cls,
              column_name,
              size,
              dimension,
              hash_key,
              combiner="sqrtn",
              initializer=None):
    if initializer is not None and not callable(initializer):
      raise ValueError("initializer must be callable if specified. "
                       "column_name: {}".format(column_name))
    if initializer is None:
      logging.warn("The default stddev value of initializer will change from "
                   "\"0.1\" to \"1/sqrt(dimension)\" after 2017/02/25.")
      stddev = 0.1
      initializer = init_ops.truncated_normal_initializer(
          mean=0.0, stddev=stddev)
    return super(_ScatteredEmbeddingColumn, cls).__new__(cls, column_name, size,
                                                         dimension, hash_key,
                                                         combiner,
                                                         initializer) 
Example #7
Source File: variable_scope.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def variable_op_scope(values,
                      name_or_scope,
                      default_name=None,
                      initializer=None,
                      regularizer=None,
                      caching_device=None,
                      partitioner=None,
                      custom_getter=None,
                      reuse=None,
                      dtype=None):
  """Deprecated: context manager for defining an op that creates variables."""
  logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
               " use tf.variable_scope(name, default_name, values)")
  with variable_scope(name_or_scope,
                      default_name=default_name,
                      values=values,
                      initializer=initializer,
                      regularizer=regularizer,
                      caching_device=caching_device,
                      partitioner=partitioner,
                      custom_getter=custom_getter,
                      reuse=reuse,
                      dtype=dtype) as scope:
    yield scope 
Example #8
Source File: event_accumulator.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _ParseFileVersion(file_version):
  """Convert the string file_version in event.proto into a float.

  Args:
    file_version: String file_version from event.proto

  Returns:
    Version number as a float.
  """
  tokens = file_version.split('brain.Event:')
  try:
    return float(tokens[-1])
  except ValueError:
    ## This should never happen according to the definition of file_version
    ## specified in event.proto.
    logging.warn(('Invalid event.proto file_version. Defaulting to use of '
                  'out-of-order event.step logic for purging expired events.'))
    return -1 
Example #9
Source File: core_rnn_cell_impl.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def __init__(self, num_units, forget_bias=1.0, input_size=None,
               state_is_tuple=True, activation=tanh):
    """Initialize the basic LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell.
      forget_bias: float, The bias added to forget gates (see above).
      input_size: Deprecated and unused.
      state_is_tuple: If True, accepted and returned states are 2-tuples of
        the `c_state` and `m_state`.  If False, they are concatenated
        along the column axis.  The latter behavior will soon be deprecated.
      activation: Activation function of the inner states.
    """
    if not state_is_tuple:
      logging.warn("%s: Using a concatenated state is slower and will soon be "
                   "deprecated.  Use state_is_tuple=True.", self)
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._forget_bias = forget_bias
    self._state_is_tuple = state_is_tuple
    self._activation = activation 
Example #10
Source File: core_rnn_cell_impl.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def __init__(self, cell, num_proj, input_size=None):
    """Create a cell with input projection.

    Args:
      cell: an RNNCell, a projection of inputs is added before it.
      num_proj: Python integer.  The dimension to project to.
      input_size: Deprecated and unused.

    Raises:
      TypeError: if cell is not an RNNCell.
    """
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    if not isinstance(cell, RNNCell):
      raise TypeError("The parameter cell is not RNNCell.")
    self._cell = cell
    self._num_proj = num_proj 
Example #11
Source File: graph_actions.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _write_summary_results(output_dir, eval_results, current_global_step):
  """Writes eval results into summary file in given dir."""
  logging.info('Saving evaluation summary for step %d: %s', current_global_step,
               _eval_results_to_str(eval_results))
  summary_writer = get_summary_writer(output_dir)
  summary = summary_pb2.Summary()
  for key in eval_results:
    if eval_results[key] is None:
      continue
    value = summary.value.add()
    value.tag = key
    if (isinstance(eval_results[key], np.float32) or
        isinstance(eval_results[key], float)):
      value.simple_value = float(eval_results[key])
    else:
      logging.warn('Skipping summary for %s, must be a float or np.float32.',
                   key)
  summary_writer.add_summary(summary, current_global_step)
  summary_writer.flush() 
Example #12
Source File: tpu_estimator.py    From Chinese-XLNet with Apache License 2.0 6 votes vote down vote up
def _validate_input_pipeline(self):
    """Validates the input pipeline.

    Perform some sanity checks to log user friendly information. We should
    error out to give users better error message. But, if
    _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
    user code, so, log a warning.

    Raises:
      RuntimeError: If the validation failed.
    """
    if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
      err_msg = ('Input pipeline contains one or more QueueRunners. '
                 'It could be slow and not scalable. Please consider '
                 'converting your input pipeline to use `tf.data` instead (see '
                 'https://www.tensorflow.org/guide/datasets for '
                 'instructions.')
      if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
        raise RuntimeError(err_msg)
      else:
        logging.warn(err_msg) 
Example #13
Source File: basic_lstm.py    From tensorflow_end2end_speech_recognition with MIT License 6 votes vote down vote up
def __init__(self, num_units, forget_bias=1.0, input_size=None,
                 state_is_tuple=True, reuse=None):
        """Initialize the basic LSTM cell.
        Args:
          num_units: int, The number of units in the LSTM cell.
          forget_bias: float, The bias added to forget gates (see above).
          input_size: Deprecated and unused.
          state_is_tuple: If True, accepted and returned states are 2-tuples of
            the `c_state` and `m_state`.  If False, they are concatenated
            along the column axis.  The latter behavior will soon be deprecated.
          reuse: (optional) Python boolean describing whether to reuse variables
            in an existing scope.  If not `True`, and the existing scope already has
            the given variables, an error is raised.
        """
        if not state_is_tuple:
            logging.warn("%s: Using a concatenated state is slower and will soon be "
                         "deprecated.  Use state_is_tuple=True.", self)
        if input_size is not None:
            logging.warn("%s: The input_size parameter is deprecated.", self)
        self._num_units = num_units
        self._forget_bias = forget_bias
        self._state_is_tuple = state_is_tuple
        self._reuse = reuse 
Example #14
Source File: rnn_cell.py    From ecm with Apache License 2.0 6 votes vote down vote up
def __init__(self, num_units, forget_bias=1.0, input_size=None,
                             state_is_tuple=True, activation=tanh):
        """Initialize the basic LSTM cell.

        Args:
            num_units: int, The number of units in the LSTM cell.
            forget_bias: float, The bias added to forget gates (see above).
            input_size: Deprecated and unused.
            state_is_tuple: If True, accepted and returned states are 2-tuples of
                the `c_state` and `m_state`.    If False, they are concatenated
                along the column axis.    The latter behavior will soon be deprecated.
            activation: Activation function of the inner states.
        """
        if not state_is_tuple:
            logging.warn("%s: Using a concatenated state is slower and will soon be "
                                     "deprecated.    Use state_is_tuple=True.", self)
        if input_size is not None:
            logging.warn("%s: The input_size parameter is deprecated.", self)
        self._num_units = num_units
        self._forget_bias = forget_bias
        self._state_is_tuple = state_is_tuple
        self._activation = activation 
Example #15
Source File: graph_actions.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _write_summary_results(output_dir, eval_results, current_global_step):
  """Writes eval results into summary file in given dir."""
  logging.info('Saving evaluation summary for step %d: %s', current_global_step,
               _eval_results_to_str(eval_results))
  summary_writer = get_summary_writer(output_dir)
  summary = summary_pb2.Summary()
  for key in eval_results:
    if eval_results[key] is None:
      continue
    value = summary.value.add()
    value.tag = key
    if (isinstance(eval_results[key], np.float32) or
        isinstance(eval_results[key], float)):
      value.simple_value = float(eval_results[key])
    else:
      logging.warn('Skipping summary for %s, must be a float or np.float32.',
                   key)
  summary_writer.add_summary(summary, current_global_step)
  summary_writer.flush() 
Example #16
Source File: event_accumulator.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _ParseFileVersion(file_version):
  """Convert the string file_version in event.proto into a float.

  Args:
    file_version: String file_version from event.proto

  Returns:
    Version number as a float.
  """
  tokens = file_version.split('brain.Event:')
  try:
    return float(tokens[-1])
  except ValueError:
    ## This should never happen according to the definition of file_version
    ## specified in event.proto.
    logging.warn(('Invalid event.proto file_version. Defaulting to use of '
                  'out-of-order event.step logic for purging expired events.'))
    return -1 
Example #17
Source File: rnn_cell.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def __init__(self, num_units, forget_bias=1.0, input_size=None,
               state_is_tuple=True, activation=tanh):
    """Initialize the basic LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell.
      forget_bias: float, The bias added to forget gates (see above).
      input_size: Deprecated and unused.
      state_is_tuple: If True, accepted and returned states are 2-tuples of
        the `c_state` and `m_state`.  If False, they are concatenated
        along the column axis.  The latter behavior will soon be deprecated.
      activation: Activation function of the inner states.
    """
    if not state_is_tuple:
      logging.warn("%s: Using a concatenated state is slower and will soon be "
                   "deprecated.  Use state_is_tuple=True.", self)
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._forget_bias = forget_bias
    self._state_is_tuple = state_is_tuple
    self._activation = activation 
Example #18
Source File: variable_scope.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def variable_op_scope(values,
                      name_or_scope,
                      default_name=None,
                      initializer=None,
                      regularizer=None,
                      caching_device=None,
                      partitioner=None,
                      custom_getter=None,
                      reuse=None,
                      dtype=None):
  """Deprecated: context manager for defining an op that creates variables."""
  logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
               " use tf.variable_scope(name, default_name, values)")
  with variable_scope(name_or_scope,
                      default_name=default_name,
                      values=values,
                      initializer=initializer,
                      regularizer=regularizer,
                      caching_device=caching_device,
                      partitioner=partitioner,
                      custom_getter=custom_getter,
                      reuse=reuse,
                      dtype=dtype) as scope:
    yield scope 
Example #19
Source File: rnn_cell.py    From ROLO with Apache License 2.0 6 votes vote down vote up
def __init__(self, cell, num_proj, input_size=None):
    """Create a cell with input projection.

    Args:
      cell: an RNNCell, a projection of inputs is added before it.
      num_proj: Python integer.  The dimension to project to.
      input_size: Deprecated and unused.

    Raises:
      TypeError: if cell is not an RNNCell.
    """
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    if not isinstance(cell, RNNCell):
      raise TypeError("The parameter cell is not RNNCell.")
    self._cell = cell
    self._num_proj = num_proj 
Example #20
Source File: rnn_cell.py    From ROLO with Apache License 2.0 6 votes vote down vote up
def __init__(self, num_units, forget_bias=1.0, input_size=None,
               state_is_tuple=True, activation=tanh):
    """Initialize the basic LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell.
      forget_bias: float, The bias added to forget gates (see above).
      input_size: Deprecated and unused.
      state_is_tuple: If True, accepted and returned states are 2-tuples of
        the `c_state` and `m_state`.  If False, they are concatenated
        along the column axis.  The latter behavior will soon be deprecated.
      activation: Activation function of the inner states.
    """
    if not state_is_tuple:
      logging.warn("%s: Using a concatenated state is slower and will soon be "
                   "deprecated.  Use state_is_tuple=True.", self)
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._forget_bias = forget_bias
    self._state_is_tuple = state_is_tuple
    self._activation = activation 
Example #21
Source File: ConvLSTMCell.py    From Conv3D_BICLSTM with MIT License 6 votes vote down vote up
def __init__(self, num_units, input_size=None,
               use_peepholes=False, cell_clip=None,
               initializer=None, num_proj=None, proj_clip=None,
               num_unit_shards=1, num_proj_shards=1,
               forget_bias=1.0, state_is_tuple=False,
               activation=tanh):

#    if not state_is_tuple:
#      logging.warn(
#          "%s: Using a concatenated state is slower and will soon be "
#          "deprecated.  Use state_is_tuple=True." % self)
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated." % self)

    #self._use_peepholes = use_peepholes
    #self._cell_clip = cell_clip
    #self._initializer = initializer
    #self._num_proj = num_proj
    #self._num_unit_shards = num_unit_shards
    #self._num_proj_shards = num_proj_shards

    self._num_units = num_units
    self._forget_bias = forget_bias
    self._state_is_tuple = state_is_tuple
    self._activation = activation 
Example #22
Source File: ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def op_scope(values, name, default_name=None):
  """DEPRECATED. Same as name_scope above, just different argument order."""
  logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
               " use tf.name_scope(name, default_name, values)")
  with name_scope(name, default_name=default_name, values=values) as scope:
    yield scope 
Example #23
Source File: ops.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def op_scope(values, name, default_name=None):
  """DEPRECATED. Same as name_scope above, just different argument order."""
  logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
               " use tf.name_scope(name, default_name, values)")
  with name_scope(name, default_name=default_name, values=values) as scope:
    yield scope 
Example #24
Source File: variable_scope.py    From lambda-packs with MIT License 5 votes vote down vote up
def variable_op_scope(values,
                      name_or_scope,
                      default_name=None,
                      initializer=None,
                      regularizer=None,
                      caching_device=None,
                      partitioner=None,
                      custom_getter=None,
                      reuse=None,
                      dtype=None,
                      use_resource=None):
  """Deprecated: context manager for defining an op that creates variables."""
  logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
               " use tf.variable_scope(name, default_name, values)")
  with variable_scope(name_or_scope,
                      default_name=default_name,
                      values=values,
                      initializer=initializer,
                      regularizer=regularizer,
                      caching_device=caching_device,
                      partitioner=partitioner,
                      custom_getter=custom_getter,
                      reuse=reuse,
                      dtype=dtype,
                      use_resource=use_resource) as scope:
    yield scope 
Example #25
Source File: rnn_cell.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __init__(self, num_units, input_size=None, activation=tanh):
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._activation = activation 
Example #26
Source File: core_rnn_cell.py    From lambda-packs with MIT License 5 votes vote down vote up
def __init__(self,
               cell,
               num_proj,
               activation=None,
               input_size=None,
               reuse=None):
    """Create a cell with input projection.

    Args:
      cell: an RNNCell, a projection of inputs is added before it.
      num_proj: Python integer.  The dimension to project to.
      activation: (optional) an optional activation function.
      input_size: Deprecated and unused.
      reuse: (optional) Python boolean describing whether to reuse variables
        in an existing scope.  If not `True`, and the existing scope already has
        the given variables, an error is raised.

    Raises:
      TypeError: if cell is not an RNNCell.
    """
    super(InputProjectionWrapper, self).__init__(_reuse=reuse)
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    if not _like_rnncell(cell):
      raise TypeError("The parameter cell is not RNNCell.")
    self._cell = cell
    self._num_proj = num_proj
    self._activation = activation 
Example #27
Source File: rnn_cell.py    From Multiview2Novelview with MIT License 5 votes vote down vote up
def __init__(self, num_units, forget_bias=1.0,
               input_size=None, activation=math_ops.tanh,
               layer_norm=True, norm_gain=1.0, norm_shift=0.0,
               dropout_keep_prob=1.0, dropout_prob_seed=None,
               reuse=None):
    """Initializes the basic LSTM cell.
    Args:
      num_units: int, The number of units in the LSTM cell.
      forget_bias: float, The bias added to forget gates (see above).
      input_size: Deprecated and unused.
      activation: Activation function of the inner states.
      layer_norm: If `True`, layer normalization will be applied.
      norm_gain: float, The layer normalization gain initial value. If
        `layer_norm` has been set to `False`, this argument will be ignored.
      norm_shift: float, The layer normalization shift initial value. If
        `layer_norm` has been set to `False`, this argument will be ignored.
      dropout_keep_prob: unit Tensor or float between 0 and 1 representing the
        recurrent dropout probability value. If float and 1.0, no dropout will
        be applied.
      dropout_prob_seed: (optional) integer, the randomness seed.
      reuse: (optional) Python boolean describing whether to reuse variables
        in an existing scope.  If not `True`, and the existing scope already has
        the given variables, an error is raised.
    """
    super(LayerNormBasicLSTMCell, self).__init__(_reuse=reuse)

    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)

    self._num_units = num_units
    self._activation = activation
    self._forget_bias = forget_bias
    self._keep_prob = dropout_keep_prob
    self._seed = dropout_prob_seed
    self._layer_norm = layer_norm
    self._norm_gain = norm_gain
    self._norm_shift = norm_shift
    self._reuse = reuse 
Example #28
Source File: feature_column.py    From lambda-packs with MIT License 5 votes vote down vote up
def __new__(cls,
              sparse_id_column,
              dimension,
              combiner="mean",
              initializer=None,
              ckpt_to_load_from=None,
              tensor_name_in_ckpt=None,
              shared_embedding_name=None,
              shared_vocab_size=None,
              max_norm=None,
              trainable=True):
    if initializer is not None and not callable(initializer):
      raise ValueError("initializer must be callable if specified. "
                       "Embedding of column_name: {}".format(
                           sparse_id_column.name))

    if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
      raise ValueError("Must specify both `ckpt_to_load_from` and "
                       "`tensor_name_in_ckpt` or none of them.")
    if initializer is None:
      logging.warn("The default stddev value of initializer will change from "
                   "\"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" after "
                   "2017/02/25.")
      stddev = 1 / math.sqrt(sparse_id_column.length)
      initializer = init_ops.truncated_normal_initializer(
          mean=0.0, stddev=stddev)
    return super(_EmbeddingColumn, cls).__new__(cls, sparse_id_column,
                                                dimension, combiner,
                                                initializer, ckpt_to_load_from,
                                                tensor_name_in_ckpt,
                                                shared_embedding_name,
                                                shared_vocab_size,
                                                max_norm,
                                                trainable) 
Example #29
Source File: rnn_cell.py    From ROLO with Apache License 2.0 5 votes vote down vote up
def __init__(self, num_units, input_size=None, activation=tanh):
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._activation = activation 
Example #30
Source File: rnn_cell.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __init__(self, num_units, input_size=None, activation=tanh):
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._activation = activation