Python tensorflow.as_dtype() Examples
The following are 30
code examples of tensorflow.as_dtype().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: bayes.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def __init__(self, mean_initializer=tf.random_normal_initializer(stddev=0.1), stddev_initializer=tf.random_uniform_initializer( minval=1e-5, maxval=0.1), mean_regularizer=None, stddev_regularizer=None, mean_constraint=None, stddev_constraint=positive(), seed=None, dtype=tf.float32): """Constructs the initializer.""" super(TrainableNormal, self).__init__() self.mean_initializer = mean_initializer self.stddev_initializer = stddev_initializer self.mean_regularizer = mean_regularizer self.stddev_regularizer = stddev_regularizer self.mean_constraint = mean_constraint self.stddev_constraint = stddev_constraint self.seed = seed self.dtype = tf.as_dtype(dtype)
Example #2
Source File: variable_schema.py From multi-agent-emergence-environments with MIT License | 6 votes |
def __init__(self, shape, dtype): """Creates a schema for a variable used in policy. Allows for symbolic definition of shape. Shape can consist of integers, as well as strings BATCH and TIMESTEPS. This is taken advantage of in the optimizers, to create placeholders or variables that asynchronously prefetch the inputs. Parameters ---------- shape: [int, np.int64, np.int32, or str] shape of the variable, e.g. [12, 4], [BATCH, 12], [BATCH, 'timestep'] dtype: tensorflow type of the variable, e.g. tf.float32, tf.int32 """ assert all(isinstance(s, (int, np.int64, np.int32)) or s in [BATCH, TIMESTEPS] for s in shape), 'Bad shape %s' % shape self.shape = shape self.dtype = tf.as_dtype(dtype)
Example #3
Source File: special.py From zhusuan with MIT License | 6 votes |
def __init__(self, dtype, batch_shape=None, value_shape=None, group_ndims=0, is_continuous=None, **kwargs): dtype = tf.float32 if dtype is None else tf.as_dtype(dtype).base_dtype self.explicit_batch_shape = tf.TensorShape(batch_shape) self.explicit_value_shape = tf.TensorShape(value_shape) if is_continuous is None: is_continuous = dtype.is_floating super(Empirical, self).__init__( dtype=dtype, param_dtype=None, is_continuous=is_continuous, is_reparameterized=False, use_path_derivative=False, group_ndims=group_ndims, **kwargs)
Example #4
Source File: misc.py From OpenNMT-tf with MIT License | 6 votes |
def read_summaries(event_dir, event_file_pattern="events.out.tfevents.*"): """Reads summaries from TensorFlow event files. Args: event_dir: Directory containing event files. event_file_pattern: The pattern to look for event files. Returns: A list of tuple (step, dict of summaries), sorted by step. """ if not tf.io.gfile.exists(event_dir): return [] summaries = collections.defaultdict(dict) for event_file in tf.io.gfile.glob(os.path.join(event_dir, event_file_pattern)): for event in tf.compat.v1.train.summary_iterator(event_file): if not event.HasField("summary"): continue for value in event.summary.value: tensor_proto = value.tensor tensor = tf.io.parse_tensor( tensor_proto.SerializeToString(), tf.as_dtype(tensor_proto.dtype)) summaries[event.step][value.tag] = tf.get_static_value(tensor) return list(sorted(summaries.items(), key=lambda x: x[0]))
Example #5
Source File: network.py From ai-platform with MIT License | 6 votes |
def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None): if out_mul != 1.0: expr = [x * out_mul for x in expr] if out_add != 0.0: expr = [x + out_add for x in expr] if out_shrink > 1: ksize = [1, 1, out_shrink, out_shrink] expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr] if out_dtype is not None: if tf.as_dtype(out_dtype).is_integer: expr = [tf.round(x) for x in expr] expr = [tf.saturate_cast(x, out_dtype) for x in expr] return expr
Example #6
Source File: initializer.py From zero with BSD 3-Clause "New" or "Revised" License | 6 votes |
def get_initializer(initializer, initializer_gain): tfdtype = tf.as_dtype(dtype.floatx()) if initializer == "uniform": max_val = initializer_gain return tf.random_uniform_initializer(-max_val, max_val, dtype=tfdtype) elif initializer == "normal": return tf.random_normal_initializer(0.0, initializer_gain, dtype=tfdtype) elif initializer == "normal_unit_scaling": return tf.variance_scaling_initializer(initializer_gain, mode="fan_avg", distribution="normal", dtype=tfdtype) elif initializer == "uniform_unit_scaling": return tf.variance_scaling_initializer(initializer_gain, mode="fan_avg", distribution="uniform", dtype=tfdtype) else: tf.logging.warn("Unrecognized initializer: %s" % initializer) tf.logging.warn("Return to default initializer: glorot_uniform_initializer") return tf.glorot_uniform_initializer(dtype=tfdtype)
Example #7
Source File: bayes.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def build(self, shape, dtype=None, add_variable_fn=None): """Builds the initializer, with the variables captured by the caller.""" if dtype is None: dtype = self.dtype self.shape = shape self.dtype = tf.as_dtype(dtype) self.mean = add_variable_fn( 'mean', shape=shape, initializer=self.mean_initializer, regularizer=self.mean_regularizer, constraint=self.mean_constraint, dtype=dtype, trainable=True) self.stddev = add_variable_fn( 'stddev', shape=shape, initializer=self.stddev_initializer, regularizer=self.stddev_regularizer, constraint=self.stddev_constraint, dtype=dtype, trainable=True) self.built = True
Example #8
Source File: attacks_tfe.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, model, dtypestr='float32'): """ :param model: An instance of the cleverhans.model.Model class. :param back: The backend to use. Inherited from AttackBase class. :param dtypestr: datatype of the input data samples and crafted adversarial attacks. """ # Validate the input arguments. if dtypestr != 'float32' and dtypestr != 'float64': raise ValueError("Unexpected input for argument dtypestr.") import tensorflow as tf tfe = tf.contrib.eager self.tf_dtype = tf.as_dtype(dtypestr) self.np_dtype = np.dtype(dtypestr) if not isinstance(model, Model): raise ValueError("The model argument should be an instance of" " the cleverhans.model.Model class.") # Prepare attributes self.model = model self.dtypestr = dtypestr
Example #9
Source File: attention_wrapper.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def _maybe_mask_score(score, memory_sequence_length, score_mask_value): if memory_sequence_length is None: return score if score_mask_value is None: score_mask_value = tf.as_dtype(score.dtype).as_numpy_dtype(-np.inf) score_mask = tf.sequence_mask( memory_sequence_length, maxlen=tf.shape(score)[1]) score_mask_values = score_mask_value * tf.ones_like(score) return tf.where(score_mask, score, score_mask_values)
Example #10
Source File: mtf_transformer.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def slice_dtype(self): return tf.as_dtype(self._hparams.slice_dtype)
Example #11
Source File: transformer_fuse.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def train_fn(features, params, initializer=None): with tf.variable_scope(params.scope_name or "model", initializer=initializer, reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(features['source'], params) loss, logits, state, _ = decoder(features['target'], state, params) return { "loss": loss }
Example #12
Source File: transformer_rpr.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def score_fn(features, params, initializer=None): params = copy.copy(params) params = util.closing_dropout(params) params.label_smooth = 0.0 with tf.variable_scope(params.scope_name or "model", initializer=initializer, reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(features['source'], params) _, _, _, scores = decoder(features['target'], state, params) return { "score": scores }
Example #13
Source File: transformer_rpr.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def train_fn(features, params, initializer=None): with tf.variable_scope(params.scope_name or "model", initializer=initializer, reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(features['source'], params) loss, logits, state, _ = decoder(features['target'], state, params) return { "loss": loss }
Example #14
Source File: transformer_fuse.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def infer_fn(params): params = copy.copy(params) params = util.closing_dropout(params) def encoding_fn(source): with tf.variable_scope(params.scope_name or "model", reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(source, params) state["decoder"] = { "state": state["decoder_initializer"] } return state def decoding_fn(target, state, time): with tf.variable_scope(params.scope_name or "model", reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): if params.search_mode == "cache": state['time'] = time step_loss, step_logits, step_state, _ = decoder( target, state, params) del state['time'] else: estate = encoder(state, params) estate['dev_decode'] = True _, step_logits, _, _ = decoder(target, estate, params) step_state = state return step_logits, step_state return encoding_fn, decoding_fn # register the model, with a unique name
Example #15
Source File: attention_model.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def create_attention_mechanism(attention_option, num_units, memory, source_sequence_length, mode): """Create attention mechanism based on the attention_option.""" del mode # unused score_mask_value = tf.convert_to_tensor( tf.as_dtype(memory.dtype).as_numpy_dtype(-np.inf)) # Mechanism if attention_option == "luong": attention_mechanism = tf.contrib.seq2seq.LuongAttention( num_units, memory, memory_sequence_length=source_sequence_length, score_mask_value=score_mask_value) elif attention_option == "scaled_luong": attention_mechanism = tf.contrib.seq2seq.LuongAttention( num_units, memory, memory_sequence_length=source_sequence_length, score_mask_value=score_mask_value, scale=True) elif attention_option == "bahdanau": attention_mechanism = tf.contrib.seq2seq.BahdanauAttention( num_units, memory, memory_sequence_length=source_sequence_length, score_mask_value=score_mask_value) elif attention_option == "normed_bahdanau": attention_mechanism = tf.contrib.seq2seq.BahdanauAttention( num_units, memory, memory_sequence_length=source_sequence_length, score_mask_value=score_mask_value, normalize=True, dtype=memory.dtype) else: raise ValueError("Unknown attention option %s" % attention_option) return attention_mechanism
Example #16
Source File: transformer_fuse.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def score_fn(features, params, initializer=None): params = copy.copy(params) params = util.closing_dropout(params) params.label_smooth = 0.0 with tf.variable_scope(params.scope_name or "model", initializer=initializer, reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(features['source'], params) _, _, _, scores = decoder(features['target'], state, params) return { "score": scores }
Example #17
Source File: attention_wrapper.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def _maybe_mask_score(score, memory_sequence_length, score_mask_value): if memory_sequence_length is None: return score if score_mask_value is None: score_mask_value = tf.as_dtype(score.dtype).as_numpy_dtype(-np.inf) score_mask = tf.sequence_mask( memory_sequence_length, maxlen=tf.shape(score)[1]) score_mask_values = score_mask_value * tf.ones_like(score) return tf.where(score_mask, score, score_mask_values)
Example #18
Source File: tensorflow_backend.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def zeros(shape, dtype=None, name=None): """Instantiates an all-zeros variable and returns it. # Arguments shape: Tuple of integers, shape of returned Keras variable dtype: String, data type of returned Keras variable name: String, name of returned Keras variable # Returns A variable (including Keras metadata), filled with `0.0`. Note that if `shape` was symbolic, we cannot return a variable, and will return a dynamically-shaped tensor instead. # Example ```python >>> from keras import backend as K >>> kvar = K.zeros((3,4)) >>> K.eval(kvar) array([[ 0., 0., 0., 0.], [ 0., 0., 0., 0.], [ 0., 0., 0., 0.]], dtype=float32) ``` """ if dtype is None: dtype = floatx() tf_dtype = tf.as_dtype(dtype) v = tf.zeros(shape=shape, dtype=tf_dtype, name=name) if py_all(v.get_shape().as_list()): return variable(v, dtype=dtype, name=name) return v
Example #19
Source File: tensorflow_backend.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def ones(shape, dtype=None, name=None): """Instantiates an all-ones variable and returns it. # Arguments shape: Tuple of integers, shape of returned Keras variable. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. # Returns A Keras variable, filled with `1.0`. Note that if `shape` was symbolic, we cannot return a variable, and will return a dynamically-shaped tensor instead. # Example ```python >>> from keras import backend as K >>> kvar = K.ones((3,4)) >>> K.eval(kvar) array([[ 1., 1., 1., 1.], [ 1., 1., 1., 1.], [ 1., 1., 1., 1.]], dtype=float32) ``` """ if dtype is None: dtype = floatx() tf_dtype = tf.as_dtype(dtype) v = tf.ones(shape=shape, dtype=tf_dtype, name=name) if py_all(v.get_shape().as_list()): return variable(v, dtype=dtype, name=name) return v
Example #20
Source File: transformer_aan.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def train_fn(features, params, initializer=None): with tf.variable_scope(params.scope_name or "model", initializer=initializer, reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(features['source'], params) loss, logits, state, _ = decoder(features['target'], state, params) return { "loss": loss }
Example #21
Source File: transformer_l0drop.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def score_fn(features, params, initializer=None): params = copy.copy(params) params = util.closing_dropout(params) params.label_smooth = 0.0 with tf.variable_scope(params.scope_name or "model", initializer=initializer, reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(features['source'], params) _, _, _, scores = decoder(features['target'], state, params) return { "score": scores }
Example #22
Source File: transformer_l0drop.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def train_fn(features, params, initializer=None): with tf.variable_scope(params.scope_name or "model", initializer=initializer, reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(features['source'], params) loss, logits, state, _ = decoder(features['target'], state, params) return { "loss": loss }
Example #23
Source File: transformer_fixup.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def infer_fn(params): params = copy.copy(params) params = util.closing_dropout(params) def encoding_fn(source): with tf.variable_scope(params.scope_name or "model", reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(source, params) state["decoder"] = { "state": state["decoder_initializer"] } return state def decoding_fn(target, state, time): with tf.variable_scope(params.scope_name or "model", reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): if params.search_mode == "cache": state['time'] = time step_loss, step_logits, step_state, _ = decoder( target, state, params) del state['time'] else: estate = encoder(state, params) estate['dev_decode'] = True _, step_logits, _, _ = decoder(target, estate, params) step_state = state return step_logits, step_state return encoding_fn, decoding_fn # register the model, with a unique name
Example #24
Source File: transformer_fixup.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def score_fn(features, params, initializer=None): params = copy.copy(params) params = util.closing_dropout(params) params.label_smooth = 0.0 with tf.variable_scope(params.scope_name or "model", initializer=initializer, reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(features['source'], params) _, _, _, scores = decoder(features['target'], state, params) return { "score": scores }
Example #25
Source File: transformer_fixup.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def train_fn(features, params, initializer=None): with tf.variable_scope(params.scope_name or "model", initializer=initializer, reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(features['source'], params) loss, logits, state, _ = decoder(features['target'], state, params) return { "loss": loss }
Example #26
Source File: rnnsearch.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def score_fn(features, params, initializer=None): params = copy.copy(params) params = util.closing_dropout(params) params.label_smooth = 0.0 with tf.variable_scope(params.scope_name or "model", initializer=initializer, reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(features['source'], params) _, _, _, scores = decoder(features['target'], state, params) return { "score": scores }
Example #27
Source File: rnnsearch.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def train_fn(features, params, initializer=None): with tf.variable_scope(params.scope_name or "model", initializer=initializer, reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(features['source'], params) loss, logits, state, _ = decoder(features['target'], state, params) return { "loss": loss }
Example #28
Source File: transformer_aan.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def infer_fn(params): params = copy.copy(params) params = util.closing_dropout(params) def encoding_fn(source): with tf.variable_scope(params.scope_name or "model", reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(source, params) state["decoder"] = { "state": state["decoder_initializer"] } return state def decoding_fn(target, state, time): with tf.variable_scope(params.scope_name or "model", reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): if params.search_mode == "cache": state['time'] = time step_loss, step_logits, step_state, _ = decoder( target, state, params) del state['time'] else: estate = encoder(state, params) estate['dev_decode'] = True _, step_logits, _, _ = decoder(target, estate, params) step_state = state return step_logits, step_state return encoding_fn, decoding_fn # register the model, with a unique name
Example #29
Source File: transformer_aan.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def score_fn(features, params, initializer=None): params = copy.copy(params) params = util.closing_dropout(params) params.label_smooth = 0.0 with tf.variable_scope(params.scope_name or "model", initializer=initializer, reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(features['source'], params) _, _, _, scores = decoder(features['target'], state, params) return { "score": scores }
Example #30
Source File: deepnmt.py From zero with BSD 3-Clause "New" or "Revised" License | 5 votes |
def score_fn(features, params, initializer=None): params = copy.copy(params) params = util.closing_dropout(params) params.label_smooth = 0.0 with tf.variable_scope(params.scope_name or "model", initializer=initializer, reuse=tf.AUTO_REUSE, dtype=tf.as_dtype(dtype.floatx()), custom_getter=dtype.float32_variable_storage_getter): state = encoder(features['source'], params) _, _, _, scores = decoder(features['target'], state, params) return { "score": scores }