Python tensorflow.contrib.slim.get_model_variables() Examples
The following are 30
code examples of tensorflow.contrib.slim.get_model_variables().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.slim
, or try the search function
.
Example #1
Source File: model_wrappers.py From nips-2017-adversarial with MIT License | 7 votes |
def load_ckpt(ckpt_name, var_scope_name, scope, constructor, input_tensor, label_offset, load_weights, **kwargs): """ Arguments ckpt_name file name of the checkpoint var_scope_name name of the variable scope scope arg_scope constructor constructor of the model input_tensor tensor of input image label_offset whether it is 1000 classes or 1001 classes, if it is 1001, remove class 0 load_weights whether to load weights kwargs is_training create_aux_logits """ with slim.arg_scope(scope): logits, endpoints = constructor(\ input_tensor, num_classes=1000+label_offset, \ scope=var_scope_name, **kwargs) if load_weights: init_fn = slim.assign_from_checkpoint_fn(\ ckpt_name, slim.get_model_variables(var_scope_name)) init_fn(K.get_session()) return logits, endpoints
Example #2
Source File: slim_train_test.py From SSD_tensorflow_VOC with Apache License 2.0 | 6 votes |
def __add_summaries(self,end_points,learning_rate,total_loss): for end_point in end_points: x = end_points[end_point] tf.summary.histogram('activations/' + end_point, x) tf.summary.scalar('sparsity/' + end_point, tf.nn.zero_fraction(x)) for loss in tf.get_collection(tf.GraphKeys.LOSSES): tf.summary.scalar('losses/%s' % loss.op.name, loss) # Add total_loss to summary. tf.summary.scalar('total_loss', total_loss) # Add summaries for variables. for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) tf.summary.scalar('learning_rate', learning_rate) return
Example #3
Source File: image_generation.py From TwinGAN with Apache License 2.0 | 6 votes |
def prepare_inception_score_classifier(classifier_name, num_classes, images, return_saver=True): network_fn = nets_factory.get_network_fn( classifier_name, num_classes=num_classes, weight_decay=0.0, is_training=False, ) # Note: you may need to change the prediction_fn here. try: logits, end_points = network_fn(images, prediction_fn=tf.sigmoid, create_aux_logits=False) except TypeError: tf.logging.warning('Cannot specify prediction_fn=tf.sigmoid, create_aux_logits=False.') logits, end_points = network_fn(images, ) variables_to_restore = slim.get_model_variables(scope=nets_factory.scopes_map[classifier_name]) predictions = end_points['Predictions'] if return_saver: saver = tf.train.Saver(variables_to_restore) return predictions, end_points, saver else: return predictions, end_points
Example #4
Source File: im_model.py From tumblr-emotions with Apache License 2.0 | 6 votes |
def get_init_fn(checkpoints_dir, model_name='inception_v1.ckpt'): """Returns a function run by the chief worker to warm-start the training. """ checkpoint_exclude_scopes=["InceptionV1/Logits", "InceptionV1/AuxLogits"] exclusions = [scope.strip() for scope in checkpoint_exclude_scopes] variables_to_restore = [] for var in slim.get_model_variables(): excluded = False for exclusion in exclusions: if var.op.name.startswith(exclusion): excluded = True break if not excluded: variables_to_restore.append(var) return slim.assign_from_checkpoint_fn( os.path.join(checkpoints_dir, model_name), variables_to_restore)
Example #5
Source File: train.py From AAMS with MIT License | 6 votes |
def _get_init_fn(): vgg_checkpoint_path = "vgg_19.ckpt" if tf.gfile.IsDirectory(vgg_checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(vgg_checkpoint_path) else: checkpoint_path = vgg_checkpoint_path variables_to_restore = [] for var in slim.get_model_variables(): tf.logging.info('model_var: %s' % var) excluded = False for exclusion in ['vgg_19/fc']: if var.op.name.startswith(exclusion): excluded = True tf.logging.info('exclude:%s' % exclusion) break if not excluded: variables_to_restore.append(var) tf.logging.info('Fine-tuning from %s' % checkpoint_path) return slim.assign_from_checkpoint_fn( checkpoint_path, variables_to_restore, ignore_missing_vars=True)
Example #6
Source File: export_model.py From Youtube-8M-WILLOW with Apache License 2.0 | 6 votes |
def build_prediction_graph(self, serialized_examples): video_id, model_input_raw, labels_batch, num_frames = ( self.reader.prepare_serialized_examples(serialized_examples)) feature_dim = len(model_input_raw.get_shape()) - 1 model_input = tf.nn.l2_normalize(model_input_raw, feature_dim) with tf.name_scope("model"): result = self.model.create_model( model_input, num_frames=num_frames, vocab_size=self.reader.num_classes, labels=labels_batch, is_training=False) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) predictions = result["predictions"] top_predictions, top_indices = tf.nn.top_k(predictions, _TOP_PREDICTIONS_IN_OUTPUT) return video_id, top_indices, top_predictions
Example #7
Source File: train_model.py From SSD_tensorflow_VOC with Apache License 2.0 | 6 votes |
def __add_summaries(self,end_points,learning_rate,total_loss): # Add summaries for end_points (activations). for end_point in end_points: x = end_points[end_point] tf.summary.histogram('activations/' + end_point, x) tf.summary.scalar('sparsity/' + end_point, tf.nn.zero_fraction(x)) # Add summaries for losses and extra losses. tf.summary.scalar('total_loss', total_loss) for loss in tf.get_collection('EXTRA_LOSSES'): tf.summary.scalar(loss.op.name, loss) # Add summaries for variables. for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) return
Example #8
Source File: export_model.py From AttentionCluster with Apache License 2.0 | 6 votes |
def build_prediction_graph(self, serialized_examples): video_id, model_input_raw, labels_batch, num_frames = ( self.reader.prepare_serialized_examples(serialized_examples)) feature_dim = len(model_input_raw.get_shape()) - 1 model_input = tf.nn.l2_normalize(model_input_raw, feature_dim) with tf.variable_scope("tower"): result = self.model.create_model( model_input, num_frames=num_frames, vocab_size=self.reader.num_classes, labels=labels_batch, is_training=False) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) predictions = result["predictions"] top_predictions, top_indices = tf.nn.top_k(predictions, _TOP_PREDICTIONS_IN_OUTPUT) return video_id, top_indices, top_predictions
Example #9
Source File: net.py From DenseHumanBodyCorrespondences with The Unlicense | 6 votes |
def classify(model_range, seg_range, feature_lr, classifier_lr): feat_opt = tf.train.AdamOptimizer(feature_lr) clas_opt = tf.train.AdamOptimizer(classifier_lr) for model in model_range: for seg in seg_range: with tf.variable_scope('classifier-{}-{}'.format(model, seg)): self.preds[(model, seg)] = slim.conv2d(self.feature, 500, [1, 1]) self.clas_vars[(model, seg)] = slim.get_model_variables()[-2:] with tf.variable_scope('losses-{}-{}'.format(model, seg)): self.losses[(model, seg)] = self.loss(self.labels, self.preds[(model, seg)]) grad = tf.gradients(self.losses[(model, seg)], self.feat_vars + self.clas_vars[(model, seg)]) train_op_feat = feat_opt.apply_gradients(zip(grad[:-2], self.feat_vars)) train_op_clas = clas_opt.apply_gradients(zip(grad[-2:], self.clas_vars[(model, seg)])) self.train_ops[(model, seg)] = tf.group(train_op_feat, train_op_clas) return self.losses, self.train_ops
Example #10
Source File: export_model.py From Y8M with Apache License 2.0 | 6 votes |
def build_prediction_graph(self, serialized_examples): video_id, model_input_raw, labels_batch, num_frames = ( self.reader.prepare_serialized_examples(serialized_examples)) feature_dim = len(model_input_raw.get_shape()) - 1 model_input = tf.nn.l2_normalize(model_input_raw, feature_dim) with tf.variable_scope("tower"): result = self.model.create_model( model_input, num_frames=num_frames, vocab_size=self.reader.num_classes, labels=labels_batch, is_training=False) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) predictions = result["predictions"] top_predictions, top_indices = tf.nn.top_k(predictions, _TOP_PREDICTIONS_IN_OUTPUT) return video_id, top_indices, top_predictions
Example #11
Source File: export_model.py From Y8M with Apache License 2.0 | 6 votes |
def build_prediction_graph(self, serialized_examples): video_id, model_input_raw, labels_batch, num_frames = ( self.reader.prepare_serialized_examples(serialized_examples)) feature_dim = len(model_input_raw.get_shape()) - 1 model_input = tf.nn.l2_normalize(model_input_raw, feature_dim) with tf.variable_scope("tower"): layers_keep_probs=tf.Variable([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], tf.float32, name="layers_keep_probs") result = self.model.create_model( model_input, num_frames=num_frames, vocab_size=self.reader.num_classes, labels=labels_batch, is_training=False, layers_keep_probs=layers_keep_probs) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) predictions = result["predictions"] top_predictions, top_indices = tf.nn.top_k(predictions, _TOP_PREDICTIONS_IN_OUTPUT) return video_id, top_indices, top_predictions
Example #12
Source File: pretrained.py From SSD_tensorflow_VOC with Apache License 2.0 | 6 votes |
def get_init_fn(self, checkpoint_path): """Returns a function run by the chief worker to warm-start the training.""" checkpoint_exclude_scopes=["InceptionV4/Logits", "InceptionV4/AuxLogits"] exclusions = [scope.strip() for scope in checkpoint_exclude_scopes] variables_to_restore = [] for var in slim.get_model_variables(): excluded = False for exclusion in exclusions: if var.op.name.startswith(exclusion): excluded = True break if not excluded: variables_to_restore.append(var) return slim.assign_from_checkpoint_fn( checkpoint_path, variables_to_restore)
Example #13
Source File: export_model.py From Y8M with Apache License 2.0 | 6 votes |
def build_prediction_graph(self, serialized_examples): video_id, model_input_raw, labels_batch, num_frames = ( self.reader.prepare_serialized_examples(serialized_examples)) feature_dim = len(model_input_raw.get_shape()) - 1 model_input = tf.nn.l2_normalize(model_input_raw, feature_dim) with tf.variable_scope("tower"): layers_keep_probs=tf.Variable([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], tf.float32, name="layers_keep_probs") result = self.model.create_model( model_input, num_frames=num_frames, vocab_size=self.reader.num_classes, labels=labels_batch, is_training=False, layers_keep_probs=layers_keep_probs) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) predictions = result["predictions"] top_predictions, top_indices = tf.nn.top_k(predictions, _TOP_PREDICTIONS_IN_OUTPUT) return video_id, top_indices, top_predictions
Example #14
Source File: model_wrappers.py From nips-2017-adversarial with MIT License | 5 votes |
def load_ckpt(ckpt_name, var_scope_name, scope, constructor, input_tensor, label_offset, load_weights, **kwargs): """ kwargs are is_training and create_aux_logits """ print(var_scope_name) with slim.arg_scope(scope): logits, endpoints = constructor(\ input_tensor, num_classes=1000+label_offset, \ scope=var_scope_name, **kwargs) if load_weights: init_fn = slim.assign_from_checkpoint_fn(\ ckpt_name, slim.get_model_variables(var_scope_name)) init_fn(K.get_session()) return logits, endpoints
Example #15
Source File: train_app.py From cosine_metric_learning with GNU General Public License v3.0 | 5 votes |
def finalize(preprocess_fn, network_factory, checkpoint_path, image_shape, output_filename): """Finalize model, i.e., strip off training variables and only save model variables to checkpoint file. Parameters ---------- preprocess_fn : Callable[tf.Tensor] -> tf.Tensor A callable that applies preprocessing to a given input image tensor of dtype tf.uint8 and returns a floating point representation (tf.float32). network_factory : Callable[tf.Tensor] -> (tf.Tensor, tf.Tensor) A callable that takes as argument a preprocessed input image of dtype tf.float32 and returns the feature representation as well as a logits tensors. The logits may be set to None if not required by the loss. checkpoint_path : str The checkpoint file to load. image_shape : Tuple[int, int, int] Image shape (height, width, channels). output_filename : str The checkpoint file to write. """ with tf.Session(graph=tf.Graph()) as session: input_var = tf.placeholder(tf.uint8, (None, ) + image_shape) image_var = tf.map_fn( lambda x: preprocess_fn(x, is_training=False), input_var, back_prop=False, dtype=tf.float32) network_factory(image_var) loader = tf.train.Saver(slim.get_variables_to_restore()) loader.restore(session, checkpoint_path) saver = tf.train.Saver(slim.get_model_variables()) saver.save(session, output_filename, global_step=None)
Example #16
Source File: solver.py From minimal-entropy-correlation-alignment with MIT License | 5 votes |
def test(self): trg_images, trg_labels = self.load_mnist(self.mnist_dir, split='test') # build a graph model = self.model model.build_model() config = tf.ConfigProto() config.allow_soft_placement = True config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: tf.global_variables_initializer().run() print ('Loading model.') variables_to_restore = slim.get_model_variables() restorer = tf.train.Saver(variables_to_restore) restorer.restore(sess, self.trained_model) trg_acc, trg_entr = sess.run(fetches=[model.trg_accuracy, model.trg_entropy], feed_dict={model.trg_images: trg_images[:], model.trg_labels: trg_labels[:]}) print ('test acc [%.3f]' %(trg_acc)) print ('entropy [%.3f]' %(trg_entr)) with open('test_'+ str(model.alpha) +'_' + model.method + '.txt', "a") as resfile: resfile.write(str(trg_acc)+'\t'+str(trg_entr)+'\n') #~ print confusion_matrix(trg_labels, trg_pred)
Example #17
Source File: AM3_TADAM.py From am3 with Apache License 2.0 | 5 votes |
def __init__(self, model_path, batch_size): self.batch_size = batch_size latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir=os.path.join(model_path, 'train')) step = int(os.path.basename(latest_checkpoint).split('-')[1]) default_params = get_arguments() #flags = Namespace(load_and_save_params(vars(default_params), model_path)) flags = Namespace(load_and_save_params(default_params=dict(), exp_dir=model_path)) image_size = get_image_size(flags.data_dir) with tf.Graph().as_default(): pretrain_images_pl, pretrain_labels_pl = placeholder_inputs( batch_size=batch_size, image_size=image_size, scope='inputs/pretrain') logits = build_feat_extract_pretrain_graph(pretrain_images_pl, flags, is_training=False) self.pretrain_images_pl = pretrain_images_pl self.pretrain_labels_pl = pretrain_labels_pl init_fn = slim.assign_from_checkpoint_fn( latest_checkpoint, slim.get_model_variables('Model')) config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) # Run init before loading the weights self.sess.run(tf.global_variables_initializer()) # Load weights init_fn(self.sess) self.flags = flags self.logits = logits self.logits_size = self.logits.get_shape().as_list()[-1] self.step = step
Example #18
Source File: export_model.py From youtube-8m with Apache License 2.0 | 5 votes |
def build_prediction_graph(self, serialized_examples): input_data_dict = ( self.reader.prepare_serialized_examples(serialized_examples)) video_id = input_data_dict["video_ids"] model_input_raw = input_data_dict["video_matrix"] labels_batch = input_data_dict["labels"] num_frames = input_data_dict["num_frames"] feature_dim = len(model_input_raw.get_shape()) - 1 model_input = tf.nn.l2_normalize(model_input_raw, feature_dim) with tf.variable_scope("tower"): result = self.model.create_model(model_input, num_frames=num_frames, vocab_size=self.reader.num_classes, labels=labels_batch, is_training=False) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) predictions = result["predictions"] top_predictions, top_indices = tf.nn.top_k(predictions, _TOP_PREDICTIONS_IN_OUTPUT) return video_id, top_indices, top_predictions
Example #19
Source File: test_imagenet_attacks.py From cleverhans with MIT License | 5 votes |
def test_clean_accuracy(self): """Check model is accurate on unperturbed images.""" input_dir = FLAGS.input_image_dir metadata_file_path = FLAGS.metadata_file_path num_images = 16 batch_shape = (num_images, 299, 299, 3) images, labels = load_images( input_dir, metadata_file_path, batch_shape) nb_classes = 1001 tf.logging.set_verbosity(tf.logging.INFO) with tf.Graph().as_default(): # Prepare graph x_input = tf.placeholder(tf.float32, shape=batch_shape) y_label = tf.placeholder(tf.int32, shape=(num_images,)) model = InceptionModel(nb_classes) logits = model.get_logits(x_input) acc = _top_1_accuracy(logits, y_label) # Run computation saver = tf.train.Saver(slim.get_model_variables()) session_creator = tf.train.ChiefSessionCreator( scaffold=tf.train.Scaffold(saver=saver), checkpoint_filename_with_path=FLAGS.checkpoint_path, master=FLAGS.master) with tf.train.MonitoredSession(session_creator=session_creator) as sess: acc_val = sess.run(acc, feed_dict={x_input: images, y_label: labels}) tf.logging.info('Accuracy: %s', acc_val) assert acc_val > 0.8
Example #20
Source File: solver.py From dl-uncertainty with MIT License | 5 votes |
def test(self, checkpoint): print '[*] Test.' images, labels = self.load_mnist(self.mnist_dir, split='test') with tf.Session(config=self.config) as sess: variables_to_restore = slim.get_model_variables(scope='encoder') variables_to_restore += slim.get_model_variables(scope='decoder') restorer = tf.train.Saver(variables_to_restore) print '[*] Loading '+ checkpoint restorer.restore(sess, checkpoint) summary_writer = tf.summary.FileWriter(logdir=self.log_dir+'/test', graph=tf.get_default_graph()) print ('[*] Start testing.') num_batches = int(images.shape[0] / self.batch_size) for i in range(num_batches): feed_dict = {self.model.images: images[i*self.batch_size:(i+1)*self.batch_size]} summary = sess.run(self.model.summary_op, feed_dict) summary_writer.add_summary(summary, i) print ('Batch: [%d/%d] ' \ %(i, num_batches))
Example #21
Source File: model_wrappers.py From nips-2017-adversarial with MIT License | 5 votes |
def load_ckpt(ckpt_name, var_scope_name, scope, constructor, input_tensor, label_offset, load_weights, **kwargs): """ kwargs are is_training and create_aux_logits """ print(var_scope_name) with slim.arg_scope(scope): logits, endpoints = constructor(\ input_tensor, num_classes=1000+label_offset, \ scope=var_scope_name, **kwargs) if load_weights: init_fn = slim.assign_from_checkpoint_fn(\ ckpt_name, slim.get_model_variables(var_scope_name)) init_fn(K.get_session()) return logits, endpoints
Example #22
Source File: model_wrappers.py From nips-2017-adversarial with MIT License | 5 votes |
def load_ckpt(ckpt_name, var_scope_name, scope, constructor, input_tensor, label_offset, load_weights, **kwargs): """ kwargs are is_training and create_aux_logits """ print(var_scope_name) with slim.arg_scope(scope): logits, endpoints = constructor(\ input_tensor, num_classes=1000+label_offset, \ scope=var_scope_name, **kwargs) if load_weights: init_fn = slim.assign_from_checkpoint_fn(\ ckpt_name, slim.get_model_variables(var_scope_name)) init_fn(K.get_session()) return logits, endpoints
Example #23
Source File: multi_gpus_train.py From sense_classification with Apache License 2.0 | 5 votes |
def get_variables_to_restore(include_vars=[], exclude_global_pool=False): variables_to_restore = [] for var in slim.get_model_variables(): if exclude_global_pool and 'global_pool' in var.op.name: #print(var) continue variables_to_restore.append(var) for var in slim.get_variables_to_restore(include=include_vars): if exclude_global_pool and 'global_pool' in var.op.name: #print(var) continue variables_to_restore.append(var) return variables_to_restore
Example #24
Source File: test_imagenet_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_clean_accuracy(self): """Check model is accurate on unperturbed images.""" input_dir = FLAGS.input_image_dir metadata_file_path = FLAGS.metadata_file_path num_images = 16 batch_shape = (num_images, 299, 299, 3) images, labels = load_images( input_dir, metadata_file_path, batch_shape) num_classes = 1001 tf.logging.set_verbosity(tf.logging.INFO) with tf.Graph().as_default(): # Prepare graph x_input = tf.placeholder(tf.float32, shape=batch_shape) y_label = tf.placeholder(tf.int32, shape=(num_images,)) model = InceptionModel(num_classes) logits = model.get_logits(x_input) acc = _top_1_accuracy(logits, y_label) # Run computation saver = tf.train.Saver(slim.get_model_variables()) session_creator = tf.train.ChiefSessionCreator( scaffold=tf.train.Scaffold(saver=saver), checkpoint_filename_with_path=FLAGS.checkpoint_path, master=FLAGS.master) with tf.train.MonitoredSession( session_creator=session_creator) as sess: acc_val = sess.run(acc, feed_dict={ x_input: images, y_label: labels}) tf.logging.info('Accuracy: %s', acc_val) assert acc_val > 0.8
Example #25
Source File: export_model.py From youtube8mchallenge with Apache License 2.0 | 5 votes |
def build_prediction_graph(self, serialized_examples): if self.distill: video_id, model_input_raw, labels_batch, num_frames, distill_preds = ( self.reader.prepare_serialized_examples(serialized_examples)) else: video_id, model_input_raw, labels_batch, num_frames = ( self.reader.prepare_serialized_examples(serialized_examples)) feature_dim = len(model_input_raw.get_shape()) - 1 model_input = tf.nn.l2_normalize(model_input_raw, feature_dim) with tf.variable_scope("tower"): result = self.model.create_model( model_input, num_frames=num_frames, vocab_size=self.reader.num_classes, labels=labels_batch, is_training=False) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) predictions = result["predictions"] top_predictions, top_indices = tf.nn.top_k(predictions, _TOP_PREDICTIONS_IN_OUTPUT) return video_id, top_indices, top_predictions
Example #26
Source File: train_object_detector.py From MobileNet with Apache License 2.0 | 5 votes |
def _add_variables_summaries(learning_rate): summaries = [] for variable in slim.get_model_variables(): summaries.append(tf.summary.histogram(variable.op.name, variable)) summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate)) return summaries
Example #27
Source File: pretrained.py From SSD_tensorflow_VOC with Apache License 2.0 | 5 votes |
def use_vgg16(self): with tf.Graph().as_default(): image_size = vgg.vgg_16.default_image_size img_path = "../../data/misec_images/First_Student_IC_school_bus_202076.jpg" checkpoint_path = "../../data/trained_models/vgg16/vgg_16.ckpt" image_string = tf.read_file(img_path) image = tf.image.decode_jpeg(image_string, channels=3) processed_image = vgg_preprocessing.preprocess_image(image, image_size, image_size, is_training=False) processed_images = tf.expand_dims(processed_image, 0) # Create the model, use the default arg scope to configure the batch norm parameters. with slim.arg_scope(vgg.vgg_arg_scope()): # 1000 classes instead of 1001. logits, _ = vgg.vgg_16(processed_images, num_classes=1000, is_training=False) probabilities = tf.nn.softmax(logits) init_fn = slim.assign_from_checkpoint_fn( checkpoint_path, slim.get_model_variables('vgg_16')) with tf.Session() as sess: init_fn(sess) np_image, probabilities = sess.run([image, probabilities]) probabilities = probabilities[0, 0:] sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])] self.disp_names(sorted_inds,probabilities,include_background=False) plt.figure() plt.imshow(np_image.astype(np.uint8)) plt.axis('off') plt.title(img_path) plt.show() return
Example #28
Source File: slim_walk.py From SSD_tensorflow_VOC with Apache License 2.0 | 5 votes |
def disp_model_info(): with tf.Graph().as_default(): # Dummy placeholders for arbitrary number of 1d inputs and outputs inputs = tf.placeholder(tf.float32, shape=(None, 1)) outputs = tf.placeholder(tf.float32, shape=(None, 1)) # Build model predictions, end_points = regression_model(inputs) # Print name and shape of each tensor. print("Layers") for k, v in end_points.items(): print('name = {}, shape = {}'.format(v.name, v.get_shape())) # Print name and shape of parameter nodes (values not yet initialized) print("\n") print("Parameters") for v in slim.get_model_variables(): print('name = {}, shape = {}'.format(v.name, v.get_shape())) print("\n") print("Local Parameters") for v in slim.get_local_variables(): print('name = {}, shape = {}'.format(v.name, v.get_shape())) return
Example #29
Source File: trainOps.py From generalize-unseen-domains with MIT License | 5 votes |
def test(self, target): test_images, test_labels = self.load_test_data(target=self.target_dataset) # build a graph print 'Building model' self.model.mode='train_encoder' self.model.build_model() print 'Built' with tf.Session() as sess: tf.global_variables_initializer().run() print ('Loading pre-trained model.') variables_to_restore = slim.get_model_variables(scope='encoder') restorer = tf.train.Saver(variables_to_restore) restorer.restore(sess, os.path.join(self.model_save_path,'encoder')) N = 100 #set accordingly to GPU memory target_accuracy = 0 target_loss = 0 print 'Calculating accuracy' for test_images_batch, test_labels_batch in zip(np.array_split(test_images, N), np.array_split(test_labels, N)): feed_dict = {self.model.z: test_images_batch, self.model.labels: test_labels_batch} target_accuracy_tmp, target_loss_tmp = sess.run([self.model.accuracy, self.model.min_loss], feed_dict) target_accuracy += target_accuracy_tmp/float(N) target_loss += target_loss_tmp/float(N) print ('Target accuracy: [%.4f] target loss: [%.4f]'%(target_accuracy, target_loss))
Example #30
Source File: train_classifier_mgr.py From SSD_tensorflow_VOC with Apache License 2.0 | 5 votes |
def _add_variables_summaries(learning_rate): summaries = [] for variable in slim.get_model_variables(): summaries.append(tf.summary.histogram(variable.op.name, variable)) summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate)) return summaries