Python tensorflow.name_scope() Examples
The following are 30
code examples of tensorflow.name_scope().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: mnist_histogram.py From deep-learning-note with MIT License | 6 votes |
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): # 同一层神经网络放在一个统一的命名空间下 with tf.name_scope(layer_name): with tf.name_scope('weights'): # 权重及监控变量 weights = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=0.1)) variable_summaries(weights, layer_name+'/weights') with tf.name_scope('biases'): # 偏置及监控变量 biases = tf.Variable(tf.constant(0.0, shape=[output_dim])) variable_summaries(biases, layer_name + '/biases') with tf.name_scope('Wx_plus_b'): preactivate = tf.matmul(input_tensor, weights) + biases # 记录神经网络输出节点在经过激活函数之前的分布 tf.summary.histogram(layer_name + '/pre_activations', preactivate) activations = act(preactivate, name='activation') # 记录神经网络输出节点在经过激活函数之后的分布 tf.summary.histogram(layer_name + '/activations', activations) return activations
Example #2
Source File: model_deploy.py From DOTA_models with Apache License 2.0 | 6 votes |
def clone_scope(self, clone_index): """Name scope to create the clone. Args: clone_index: Int, representing the clone_index. Returns: A name_scope suitable for `tf.name_scope()`. Raises: ValueError: if `clone_index` is greater or equal to the number of clones". """ if clone_index >= self._num_clones: raise ValueError('clone_index must be less than num_clones') scope = '' if self._num_clones > 1: scope = 'clone_%d' % clone_index return scope
Example #3
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 6 votes |
def finalize_autosummaries(): global _autosummary_finalized if _autosummary_finalized: return _autosummary_finalized = True init_uninited_vars([var for vars in _autosummary_vars.values() for var in vars]) with tf.device(None), tf.control_dependencies(None): for name, vars in _autosummary_vars.items(): id = name.replace('/', '_') with absolute_name_scope('Autosummary/' + id): sum = tf.add_n(vars) avg = sum[0] / sum[1] with tf.control_dependencies([avg]): # read before resetting reset_ops = [tf.assign(var, tf.zeros(2)) for var in vars] with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting tf.summary.scalar(name, avg) # Internal helper for creating autosummary accumulators.
Example #4
Source File: tfutil.py From disentangling_conditional_gans with MIT License | 6 votes |
def autosummary(name, value): id = name.replace('/', '_') if is_tf_expression(value): with tf.name_scope('summary_' + id), tf.device(value.device): update_op = _create_autosummary_var(name, value) with tf.control_dependencies([update_op]): return tf.identity(value) else: # python scalar or numpy array if name not in _autosummary_immediate: with absolute_name_scope('Autosummary/' + id), tf.device(None), tf.control_dependencies(None): update_value = tf.placeholder(tf.float32) update_op = _create_autosummary_var(name, update_value) _autosummary_immediate[name] = update_op, update_value update_op, update_value = _autosummary_immediate[name] run(update_op, {update_value: np.float32(value)}) return value # Create the necessary ops to include autosummaries in TensorBoard report. # Note: This should be done only once per graph.
Example #5
Source File: 11_w2v_visual.py From deep-learning-note with MIT License | 6 votes |
def _create_loss(self): """ Step 4: define the loss function """ with tf.name_scope('loss'): # construct variables for NCE loss nce_weight = tf.get_variable('nce_weight', shape=[self.vocab_size, self.embed_size], initializer=tf.truncated_normal_initializer( stddev=1.0 / (self.embed_size ** 0.5))) nce_bias = tf.get_variable('nce_bias', initializer=tf.zeros([VOCAB_SIZE])) # define loss function to be NCE loss function self.loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weight, biases=nce_bias, labels=self.target_words, inputs=self.embed, num_sampled=self.num_sampled, num_classes=self.vocab_size), name='loss')
Example #6
Source File: siamese_network_semantic.py From deep-siamese-text-similarity with MIT License | 6 votes |
def stackedRNN(self, x, dropout, scope, embedding_size, sequence_length, hidden_units): n_hidden=hidden_units n_layers=3 # Prepare data shape to match `static_rnn` function requirements x = tf.unstack(tf.transpose(x, perm=[1, 0, 2])) # print(x) # Define lstm cells with tensorflow # Forward direction cell with tf.name_scope("fw"+scope),tf.variable_scope("fw"+scope): stacked_rnn_fw = [] for _ in range(n_layers): fw_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True) lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(fw_cell,output_keep_prob=dropout) stacked_rnn_fw.append(lstm_fw_cell) lstm_fw_cell_m = tf.nn.rnn_cell.MultiRNNCell(cells=stacked_rnn_fw, state_is_tuple=True) outputs, _ = tf.nn.static_rnn(lstm_fw_cell_m, x, dtype=tf.float32) return outputs[-1]
Example #7
Source File: transE.py From TensorFlow-TransX with MIT License | 5 votes |
def __init__(self, config): entity_total = config.entity relation_total = config.relation batch_size = config.batch_size size = config.hidden_size margin = config.margin self.pos_h = tf.placeholder(tf.int32, [None]) self.pos_t = tf.placeholder(tf.int32, [None]) self.pos_r = tf.placeholder(tf.int32, [None]) self.neg_h = tf.placeholder(tf.int32, [None]) self.neg_t = tf.placeholder(tf.int32, [None]) self.neg_r = tf.placeholder(tf.int32, [None]) with tf.name_scope("embedding"): self.ent_embeddings = tf.get_variable(name = "ent_embedding", shape = [entity_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False)) self.rel_embeddings = tf.get_variable(name = "rel_embedding", shape = [relation_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False)) pos_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_h) pos_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_t) pos_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.pos_r) neg_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_h) neg_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_t) neg_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.neg_r) if config.L1_flag: pos = tf.reduce_sum(abs(pos_h_e + pos_r_e - pos_t_e), 1, keep_dims = True) neg = tf.reduce_sum(abs(neg_h_e + neg_r_e - neg_t_e), 1, keep_dims = True) self.predict = pos else: pos = tf.reduce_sum((pos_h_e + pos_r_e - pos_t_e) ** 2, 1, keep_dims = True) neg = tf.reduce_sum((neg_h_e + neg_r_e - neg_t_e) ** 2, 1, keep_dims = True) self.predict = pos with tf.name_scope("output"): self.loss = tf.reduce_sum(tf.maximum(pos - neg + margin, 0))
Example #8
Source File: model_base.py From DOTA_models with Apache License 2.0 | 5 votes |
def _max_pool(self, x, pool_size, stride): with tf.name_scope('max_pool') as name_scope: x = tf.layers.max_pooling2d( x, pool_size, stride, padding='SAME', data_format=self._data_format) tf.logging.info('image after unit %s: %s', name_scope, x.get_shape()) return x
Example #9
Source File: model_base.py From DOTA_models with Apache License 2.0 | 5 votes |
def _global_avg_pool(self, x): with tf.name_scope('global_avg_pool') as name_scope: assert x.get_shape().ndims == 4 if self._data_format == 'channels_first': x = tf.reduce_mean(x, [2, 3]) else: x = tf.reduce_mean(x, [1, 2]) tf.logging.info('image after unit %s: %s', name_scope, x.get_shape()) return x
Example #10
Source File: cmp_utils.py From DOTA_models with Apache License 2.0 | 5 votes |
def rotate_preds(loc_on_map, relative_theta, map_size, preds, output_valid_mask): with tf.name_scope('rotate'): flow_op = tf_utils.get_flow(loc_on_map, relative_theta, map_size=map_size) if type(preds) != list: rotated_preds, valid_mask_warps = tf_utils.dense_resample(preds, flow_op, output_valid_mask) else: rotated_preds = [] ;valid_mask_warps = [] for pred in preds: rotated_pred, valid_mask_warp = tf_utils.dense_resample(pred, flow_op, output_valid_mask) rotated_preds.append(rotated_pred) valid_mask_warps.append(valid_mask_warp) return rotated_preds, valid_mask_warps
Example #11
Source File: cmp_utils.py From DOTA_models with Apache License 2.0 | 5 votes |
def get_visual_frustum(map_size, shape_like, expand_dims=[0,0]): with tf.name_scope('visual_frustum'): l = np.tril(np.ones(map_size)) ;l = l + l[:,::-1] l = (l == 2).astype(np.float32) for e in expand_dims: l = np.expand_dims(l, axis=e) confs_probs = tf.constant(l, dtype=tf.float32) confs_probs = tf.ones_like(shape_like, dtype=tf.float32) * confs_probs return confs_probs
Example #12
Source File: cmp_summary.py From DOTA_models with Apache License 2.0 | 5 votes |
def _add_summaries(m, args, summary_mode, arop_full_summary_iters): task_params = args.navtask.task_params summarize_ops = [m.lr_op, m.global_step_op, m.sample_gt_prob_op] + \ m.loss_ops + m.acc_ops summarize_names = ['lr', 'global_step', 'sample_gt_prob_op'] + \ m.loss_ops_names + ['acc_{:d}'.format(i) for i in range(len(m.acc_ops))] to_aggregate = [0, 0, 0] + [1]*len(m.loss_ops_names) + [1]*len(m.acc_ops) scope_name = 'summary' with tf.name_scope(scope_name): s_ops = nu.add_default_summaries(summary_mode, arop_full_summary_iters, summarize_ops, summarize_names, to_aggregate, m.action_prob_op, m.input_tensors, scope_name=scope_name) if summary_mode == 'val': arop, arop_summary_iters, arop_eval_fns = _summary_vis( m, task_params.batch_size, task_params.num_steps, arop_full_summary_iters) s_ops.additional_return_ops += arop s_ops.arop_summary_iters += arop_summary_iters s_ops.arop_eval_fns += arop_eval_fns if args.arch.readout_maps: arop, arop_summary_iters, arop_eval_fns = _summary_readout_maps( m, task_params.num_steps, arop_full_summary_iters) s_ops.additional_return_ops += arop s_ops.arop_summary_iters += arop_summary_iters s_ops.arop_eval_fns += arop_eval_fns return s_ops
Example #13
Source File: tf_utils.py From DOTA_models with Apache License 2.0 | 5 votes |
def step_gt_prob(step, step_number_op): # Change samping probability from 1 to -1 at step steps. with tf.name_scope('step_gt_prob'): out = tf.cond(tf.less(step_number_op, step), lambda: tf.constant(1.), lambda: tf.constant(-1.)) return out
Example #14
Source File: layers.py From DOTA_models with Apache License 2.0 | 5 votes |
def _summarize_vars_and_grads(grads_and_vars): tf.logging.info('Trainable variables:') tf.logging.info('-' * 60) for grad, var in grads_and_vars: tf.logging.info(var) def tag(name, v=var): return v.op.name + '_' + name # Variable summary mean = tf.reduce_mean(var) tf.summary.scalar(tag('mean'), mean) with tf.name_scope(tag('stddev')): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar(tag('stddev'), stddev) tf.summary.scalar(tag('max'), tf.reduce_max(var)) tf.summary.scalar(tag('min'), tf.reduce_min(var)) tf.summary.histogram(tag('histogram'), var) # Gradient summary if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad tf.summary.histogram(tag('gradient'), grad_values) tf.summary.scalar(tag('gradient_norm'), tf.global_norm([grad_values])) else: tf.logging.info('Var %s has no gradient', var.op.name)
Example #15
Source File: layers.py From DOTA_models with Apache License 2.0 | 5 votes |
def accuracy(logits, targets, weights): """Computes prediction accuracy. Args: logits: 2-D classifier logits [timesteps*batch_size, num_classes] targets: 1-D [timesteps*batch_size] integer tensor. weights: 1-D [timesteps*batch_size] float tensor. Returns: Accuracy: float scalar. """ with tf.name_scope('accuracy'): eq = tf.cast(tf.equal(predictions(logits), targets), tf.float32) return tf.identity( tf.reduce_sum(weights * eq) / _num_labels(weights), name='accuracy')
Example #16
Source File: tf_utils.py From DOTA_models with Apache License 2.0 | 5 votes |
def inverse_sigmoid_decay(k, global_step_op): with tf.name_scope('inverse_sigmoid_decay'): k = tf.constant(k, dtype=tf.float32) tmp = k*tf.exp(-tf.cast(global_step_op, tf.float32)/k) tmp = tmp / (1. + tmp) return tmp
Example #17
Source File: model_base.py From DOTA_models with Apache License 2.0 | 5 votes |
def _avg_pool(self, x, pool_size, stride): with tf.name_scope('avg_pool') as name_scope: x = tf.layers.average_pooling2d( x, pool_size, stride, 'SAME', data_format=self._data_format) tf.logging.info('image after unit %s: %s', name_scope, x.get_shape()) return x
Example #18
Source File: cifar10_model.py From DOTA_models with Apache License 2.0 | 5 votes |
def forward_pass(self, x, input_data_format='channels_last'): """Build the core model within the graph.""" if self._data_format != input_data_format: if input_data_format == 'channels_last': # Computation requires channels_first. x = tf.transpose(x, [0, 3, 1, 2]) else: # Computation requires channels_last. x = tf.transpose(x, [0, 2, 3, 1]) # Image standardization. x = x / 128 - 1 x = self._conv(x, 3, 16, 1) x = self._batch_norm(x) x = self._relu(x) # Use basic (non-bottleneck) block and ResNet V1 (post-activation). res_func = self._residual_v1 # 3 stages of block stacking. for i in range(3): with tf.name_scope('stage'): for j in range(self.n): if j == 0: # First block in a stage, filters and strides may change. x = res_func(x, 3, self.filters[i], self.filters[i + 1], self.strides[i]) else: # Following blocks in a stage, constant filters and unit stride. x = res_func(x, 3, self.filters[i + 1], self.filters[i + 1], 1) x = self._global_avg_pool(x) x = self._fully_connected(x, self.num_classes) return x
Example #19
Source File: seq2seq_lib.py From DOTA_models with Apache License 2.0 | 5 votes |
def sequence_loss_by_example(inputs, targets, weights, loss_function, average_across_timesteps=True, name=None): """Sampled softmax loss for a sequence of inputs (per example). Args: inputs: List of 2D Tensors of shape [batch_size x hid_dim]. targets: List of 1D batch-sized int32 Tensors of the same length as logits. weights: List of 1D batch-sized float-Tensors of the same length as logits. loss_function: Sampled softmax function (inputs, labels) -> loss average_across_timesteps: If set, divide the returned cost by the total label weight. name: Optional name for this operation, default: 'sequence_loss_by_example'. Returns: 1D batch-sized float Tensor: The log-perplexity for each sequence. Raises: ValueError: If len(inputs) is different from len(targets) or len(weights). """ if len(targets) != len(inputs) or len(weights) != len(inputs): raise ValueError('Lengths of logits, weights, and targets must be the same ' '%d, %d, %d.' % (len(inputs), len(weights), len(targets))) with tf.name_scope(values=inputs + targets + weights, name=name, default_name='sequence_loss_by_example'): log_perp_list = [] for inp, target, weight in zip(inputs, targets, weights): crossent = loss_function(inp, target) log_perp_list.append(crossent * weight) log_perps = tf.add_n(log_perp_list) if average_across_timesteps: total_size = tf.add_n(weights) total_size += 1e-12 # Just to avoid division by 0 for all-0 weights. log_perps /= total_size return log_perps
Example #20
Source File: actor.py From neural-combinatorial-optimization-rl-tensorflow with MIT License | 5 votes |
def build_optim(self): # Update moving_mean and moving_variance for batch normalization layers update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): with tf.name_scope('baseline'): # Update baseline reward_mean, reward_var = tf.nn.moments(self.reward,axes=[0]) self.base_op = tf.assign(self.avg_baseline, self.alpha*self.avg_baseline+(1.0-self.alpha)*reward_mean) tf.summary.scalar('average baseline',self.avg_baseline) with tf.name_scope('reinforce'): # Actor learning rate self.lr1 = tf.train.exponential_decay(self.lr1_start, self.global_step, self.lr1_decay_step,self.lr1_decay_rate, staircase=False, name="learning_rate1") # Optimizer self.opt1 = tf.train.AdamOptimizer(learning_rate=self.lr1,beta1=0.9,beta2=0.99, epsilon=0.0000001) # Discounted reward self.reward_baseline = tf.stop_gradient(self.reward - self.avg_baseline - self.critic.predictions) # [Batch size, 1] variable_summaries('reward_baseline',self.reward_baseline, with_max_min = True) # Loss self.loss1 = tf.reduce_mean(self.reward_baseline*self.log_softmax,0) tf.summary.scalar('loss1', self.loss1) # Minimize step gvs = self.opt1.compute_gradients(self.loss1) capped_gvs = [(tf.clip_by_norm(grad, 1.), var) for grad, var in gvs if grad is not None] # L2 clip self.train_step1 = self.opt1.apply_gradients(capped_gvs, global_step=self.global_step) with tf.name_scope('state_value'): # Critic learning rate self.lr2 = tf.train.exponential_decay(self.lr2_start, self.global_step2, self.lr2_decay_step,self.lr2_decay_rate, staircase=False, name="learning_rate1") # Optimizer self.opt2 = tf.train.AdamOptimizer(learning_rate=self.lr2,beta1=0.9,beta2=0.99, epsilon=0.0000001) # Loss weights_ = 1.0 #weights_ = tf.exp(self.log_softmax-tf.reduce_max(self.log_softmax)) # probs / max_prob self.loss2 = tf.losses.mean_squared_error(self.reward - self.avg_baseline, self.critic.predictions, weights = weights_) tf.summary.scalar('loss2', self.loss1) # Minimize step gvs2 = self.opt2.compute_gradients(self.loss2) capped_gvs2 = [(tf.clip_by_norm(grad, 1.), var) for grad, var in gvs2 if grad is not None] # L2 clip self.train_step2 = self.opt1.apply_gradients(capped_gvs2, global_step=self.global_step2)
Example #21
Source File: actor.py From neural-combinatorial-optimization-rl-tensorflow with MIT License | 5 votes |
def variable_summaries(name,var, with_max_min=False): with tf.name_scope(name): mean = tf.reduce_mean(var) tf.summary.scalar('mean', mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev', stddev) if with_max_min == True: tf.summary.scalar('max', tf.reduce_max(var)) tf.summary.scalar('min', tf.reduce_min(var))
Example #22
Source File: encoder.py From neural-combinatorial-optimization-rl-tensorflow with MIT License | 5 votes |
def __init__(self, config): self.batch_size = config.batch_size # batch size self.max_length = config.max_length # input sequence length (number of cities) self.input_dimension = config.input_dimension # dimension of a city (coordinates) self.input_embed = config.hidden_dim # dimension of embedding space (actor) self.num_heads = config.num_heads self.num_stacks = config.num_stacks self.initializer = tf.contrib.layers.xavier_initializer() # variables initializer self.is_training = not config.inference_mode #with tf.name_scope('encode_'): # self.encode()
Example #23
Source File: actor.py From neural-combinatorial-optimization-rl-tensorflow with MIT License | 5 votes |
def build_optim(self): # Update moving_mean and moving_variance for batch normalization layers update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): with tf.name_scope('reinforce'): # Actor learning rate self.lr1 = tf.train.exponential_decay(self.lr1_start, self.global_step, self.lr1_decay_step,self.lr1_decay_rate, staircase=False, name="learning_rate1") # Optimizer self.opt1 = tf.train.AdamOptimizer(learning_rate=self.lr1,beta1=0.9,beta2=0.99, epsilon=0.0000001) # Discounted reward self.reward_baseline = tf.stop_gradient(self.reward - self.critic.predictions) # [Batch size, 1] variable_summaries('reward_baseline',self.reward_baseline, with_max_min = True) # Loss self.loss1 = tf.reduce_mean(self.reward_baseline*self.log_softmax,0) tf.summary.scalar('loss1', self.loss1) # Minimize step gvs = self.opt1.compute_gradients(self.loss1) capped_gvs = [(tf.clip_by_norm(grad, 1.), var) for grad, var in gvs if grad is not None] # L2 clip self.train_step1 = self.opt1.apply_gradients(capped_gvs, global_step=self.global_step) with tf.name_scope('state_value'): # Critic learning rate self.lr2 = tf.train.exponential_decay(self.lr2_start, self.global_step2, self.lr2_decay_step,self.lr2_decay_rate, staircase=False, name="learning_rate1") # Optimizer self.opt2 = tf.train.AdamOptimizer(learning_rate=self.lr2,beta1=0.9,beta2=0.99, epsilon=0.0000001) # Loss self.loss2 = tf.losses.mean_squared_error(self.reward, self.critic.predictions, weights = 1.0) tf.summary.scalar('loss2', self.loss1) # Minimize step gvs2 = self.opt2.compute_gradients(self.loss2) capped_gvs2 = [(tf.clip_by_norm(grad, 1.), var) for grad, var in gvs2 if grad is not None] # L2 clip self.train_step2 = self.opt1.apply_gradients(capped_gvs2, global_step=self.global_step2)
Example #24
Source File: actor.py From neural-combinatorial-optimization-rl-tensorflow with MIT License | 5 votes |
def variable_summaries(name,var, with_max_min=False): with tf.name_scope(name): mean = tf.reduce_mean(var) tf.summary.scalar('mean', mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev', stddev) if with_max_min == True: tf.summary.scalar('max', tf.reduce_max(var)) tf.summary.scalar('min', tf.reduce_min(var))
Example #25
Source File: 17_conv_mnist.py From deep-learning-note with MIT License | 5 votes |
def eval(self): ''' Count the number of right predictions in a batch ''' with tf.name_scope('predict'): preds = tf.nn.softmax(self.logits) correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(self.label, 1)) self.accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
Example #26
Source File: 17_conv_mnist.py From deep-learning-note with MIT License | 5 votes |
def summary(self): ''' Create summaries to write on TensorBoard ''' with tf.name_scope('summaries'): tf.compat.v1.summary.scalar('loss', self.loss) tf.compat.v1.summary.scalar('accuracy', self.accuracy) tf.compat.v1.summary.histogram('histogram loss', self.loss) self.summary_op = tf.compat.v1.summary.merge_all()
Example #27
Source File: 17_conv_mnist.py From deep-learning-note with MIT License | 5 votes |
def loss(self): ''' define loss function use softmax cross entropy with logits as the loss function compute mean cross entropy, softmax is applied internally ''' # with tf.name_scope('loss'): entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.label, logits=self.logits) self.loss = tf.reduce_mean(entropy, name='loss')
Example #28
Source File: 11_w2v_visual.py From deep-learning-note with MIT License | 5 votes |
def _create_summaries(self): with tf.name_scope('summaries'): tf.compat.v1.summary.scalar('loss', self.loss) tf.compat.v1.summary.histogram('histogram loss', self.loss) # because you have several summaries, we should merge them all # into one op to make it easier to manage self.summary_op = tf.summary.merge_all()
Example #29
Source File: 11_w2v_visual.py From deep-learning-note with MIT License | 5 votes |
def _create_embedding(self): """ Step 2 + 3: define weights and embedding lookup. In word2vec, it's actually the weights that we care about """ with tf.name_scope('embed'): self.embed_matrix = tf.get_variable('embed_matrix', shape=[self.vocab_size, self.embed_size], initializer=tf.random_uniform_initializer()) self.embed = tf.nn.embedding_lookup(self.embed_matrix, self.center_words, name='embedding')
Example #30
Source File: 11_w2v_visual.py From deep-learning-note with MIT License | 5 votes |
def _import_data(self): """ Step 1: import data """ with tf.name_scope('data'): self.iterator = self.dataset.make_initializable_iterator() self.center_words, self.target_words = self.iterator.get_next()