Python tensorflow.python.framework.ops() Examples
The following are 19
code examples of tensorflow.python.framework.ops().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.framework
, or try the search function
.
Example #1
Source File: meta.py From learning-to-learn with Apache License 2.0 | 6 votes |
def meta_minimize(self, make_loss, len_unroll, learning_rate=0.01, **kwargs): """Returns an operator minimizing the meta-loss. Args: make_loss: Callable which returns the optimizee loss; note that this should create its ops in the default graph. len_unroll: Number of steps to unroll. learning_rate: Learning rate for the Adam optimizer. **kwargs: keyword arguments forwarded to meta_loss. Returns: namedtuple containing (step, update, reset, fx, x) """ info = self.meta_loss(make_loss, len_unroll, **kwargs) optimizer = tf.train.AdamOptimizer(learning_rate) step = optimizer.minimize(info.loss) return MetaStep(step, *info[1:])
Example #2
Source File: feature_column_ops.py From lambda-packs with MIT License | 5 votes |
def _add_variable_collection(weight_collections): if weight_collections: weight_collections = list( set(list(weight_collections) + [ops.GraphKeys.GLOBAL_VARIABLES])) return weight_collections # TODO(jamieas): remove the following logic once all FeatureColumn types are # supported for sequences. # pylint: disable=protected-access
Example #3
Source File: model.py From tf-hrnet with BSD 3-Clause "New" or "Revised" License | 5 votes |
def forward_train(self, train_input): batch_norm_params = {'epsilon': 1e-5, 'scale': True, 'is_training': True, 'updates_collections': ops.GraphKeys.UPDATE_OPS} with slim.arg_scope([layers.batch_norm], **batch_norm_params): with slim.arg_scope([slim.conv2d], weights_initializer=he_normal_fanout(), weights_regularizer=slim.l2_regularizer(self.cfg['NET']['weight_l2_scale'])): final_logit = self._forward(train_input) return final_logit
Example #4
Source File: model.py From tf-hrnet with BSD 3-Clause "New" or "Revised" License | 5 votes |
def forward_eval(self, eval_input): batch_norm_params = {'epsilon': 1e-5, 'scale': True, 'is_training': False, 'updates_collections': ops.GraphKeys.UPDATE_OPS} with slim.arg_scope([layers.batch_norm], **batch_norm_params): with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(self.cfg['NET']['weight_l2_scale'])): final_logit = self._forward(eval_input) return final_logit
Example #5
Source File: model.py From tf-hrnet with BSD 3-Clause "New" or "Revised" License | 5 votes |
def model_summary(self): cnt = Counter() ops = ['ResizeNearestNeighbor', 'Relu', 'Conv2D'] for op in tf.get_default_graph().get_operations(): if op.type in ops: cnt[op.type] += 1 print(cnt)
Example #6
Source File: inception_v2_tpu_model.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size): """Define kernel size which is automatically reduced for small input. If the shape of the input images is unknown at graph construction time this function assumes that the input images are is large enough. Args: input_tensor: input tensor of size [batch_size, height, width, channels]. kernel_size: desired kernel size of length 2: [kernel_height, kernel_width] Returns: a tensor with the kernel size. TODO(jrru): Make this function work with unknown shapes. Theoretically, this can be done with the code below. Problems are two-fold: (1) If the shape was known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot handle tensors that define the kernel size. shape = tf.shape(input_tensor) return = tf.stack([tf.minimum(shape[1], kernel_size[0]), tf.minimum(shape[2], kernel_size[1])]) """ shape = input_tensor.get_shape().as_list() if shape[1] is None or shape[2] is None: kernel_size_out = kernel_size else: kernel_size_out = [ min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1]) ] return kernel_size_out
Example #7
Source File: inception_v3.py From tf-slim with Apache License 2.0 | 5 votes |
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size): """Define kernel size which is automatically reduced for small input. If the shape of the input images is unknown at graph construction time this function assumes that the input images are is large enough. Args: input_tensor: input tensor of size [batch_size, height, width, channels]. kernel_size: desired kernel size of length 2: [kernel_height, kernel_width] Returns: a tensor with the kernel size. Make this function work with unknown shapes. Theoretically, this can be done with the code below. Problems are two-fold: (1) If the shape was known, it will be lost. (2) inception.tf.contrib.slim.ops._two_element_tuple cannot handle tensors that define the kernel size. shape = tf.shape(input_tensor) return = tf.stack([tf.minimum(shape[1], kernel_size[0]), tf.minimum(shape[2], kernel_size[1])]) """ shape = input_tensor.get_shape().as_list() if shape[1] is None or shape[2] is None: kernel_size_out = kernel_size else: kernel_size_out = [ min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1]) ] return kernel_size_out
Example #8
Source File: inception_v2_tpu_model.py From class-balanced-loss with MIT License | 5 votes |
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size): """Define kernel size which is automatically reduced for small input. If the shape of the input images is unknown at graph construction time this function assumes that the input images are is large enough. Args: input_tensor: input tensor of size [batch_size, height, width, channels]. kernel_size: desired kernel size of length 2: [kernel_height, kernel_width] Returns: a tensor with the kernel size. TODO(jrru): Make this function work with unknown shapes. Theoretically, this can be done with the code below. Problems are two-fold: (1) If the shape was known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot handle tensors that define the kernel size. shape = tf.shape(input_tensor) return = tf.stack([tf.minimum(shape[1], kernel_size[0]), tf.minimum(shape[2], kernel_size[1])]) """ shape = input_tensor.get_shape().as_list() if shape[1] is None or shape[2] is None: kernel_size_out = kernel_size else: kernel_size_out = [ min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1]) ] return kernel_size_out
Example #9
Source File: inception_v2_tpu_model.py From tpu_models with Apache License 2.0 | 5 votes |
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size): """Define kernel size which is automatically reduced for small input. If the shape of the input images is unknown at graph construction time this function assumes that the input images are is large enough. Args: input_tensor: input tensor of size [batch_size, height, width, channels]. kernel_size: desired kernel size of length 2: [kernel_height, kernel_width] Returns: a tensor with the kernel size. TODO(jrru): Make this function work with unknown shapes. Theoretically, this can be done with the code below. Problems are two-fold: (1) If the shape was known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot handle tensors that define the kernel size. shape = tf.shape(input_tensor) return = tf.stack([tf.minimum(shape[1], kernel_size[0]), tf.minimum(shape[2], kernel_size[1])]) """ shape = input_tensor.get_shape().as_list() if shape[1] is None or shape[2] is None: kernel_size_out = kernel_size else: kernel_size_out = [ min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1]) ] return kernel_size_out
Example #10
Source File: inception_v4_model.py From class-balanced-loss with MIT License | 4 votes |
def inception_v4_arg_scope(weight_decay=0.00004, batch_norm_var_collection='moving_vars', batch_norm_decay=0.9997, batch_norm_epsilon=0.001, updates_collections=ops.GraphKeys.UPDATE_OPS, use_fused_batchnorm=True, activation_fn=nn_ops.relu): """Defines the default InceptionV3 arg scope. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_var_collection: The name of the collection for the batch norm variables. batch_norm_decay: Decay for batch norm moving average batch_norm_epsilon: Small float added to variance to avoid division by zero updates_collections: Collections for the update ops of the layer use_fused_batchnorm: Enable fused batchnorm. activation_fn: Activation function for conv2d. Returns: An `arg_scope` to use for the inception v3 model. """ batch_norm_params = { # Decay for the moving averages. 'decay': batch_norm_decay, # epsilon to prevent 0s in variance. 'epsilon': batch_norm_epsilon, # collection containing update_ops. 'updates_collections': updates_collections, # Use fused batch norm if possible. 'fused': use_fused_batchnorm, # collection containing the moving mean and moving variance. 'variables_collections': { 'beta': None, 'gamma': None, 'moving_mean': [batch_norm_var_collection], 'moving_variance': [batch_norm_var_collection], } } normalizer_fn = slim.batch_norm normalizer_params = batch_norm_params # Set weight_decay for weights in Conv and FC layers. with slim.arg_scope( [slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay)): with slim.arg_scope( [slim.conv2d], weights_initializer=slim.variance_scaling_initializer(), activation_fn=activation_fn, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params) as sc: return sc
Example #11
Source File: feature_column_ops.py From lambda-packs with MIT License | 4 votes |
def _input_from_feature_columns(columns_to_tensors, feature_columns, weight_collections, trainable, scope, output_rank, default_name): """Implementation of `input_from(_sequence)_feature_columns`.""" columns_to_tensors = columns_to_tensors.copy() check_feature_columns(feature_columns) with variable_scope.variable_scope(scope, default_name=default_name, values=columns_to_tensors.values()): output_tensors = [] transformer = _Transformer(columns_to_tensors) if weight_collections: weight_collections = list(set(list(weight_collections) + [ops.GraphKeys.GLOBAL_VARIABLES])) for column in sorted(set(feature_columns), key=lambda x: x.key): with variable_scope.variable_scope(None, default_name=column.name, values=columns_to_tensors.values()): transformed_tensor = transformer.transform(column) if output_rank == 3: transformed_tensor = nest.map_structure( functools.partial( _maybe_reshape_input_tensor, column_name=column.name, output_rank=output_rank), transformed_tensor) try: # pylint: disable=protected-access arguments = column._deep_embedding_lookup_arguments( transformed_tensor) output_tensors.append( fc._embeddings_from_arguments( # pylint: disable=protected-access column, arguments, weight_collections, trainable, output_rank=output_rank)) except NotImplementedError as ee: try: # pylint: disable=protected-access output_tensors.append(column._to_dnn_input_layer( transformed_tensor, weight_collections, trainable, output_rank=output_rank)) except ValueError as e: raise ValueError('Error creating input layer for column: {}.\n' '{}, {}'.format(column.name, e, ee)) return array_ops.concat(output_tensors, output_rank - 1)
Example #12
Source File: inception_v2_tpu_model.py From class-balanced-loss with MIT License | 4 votes |
def inception_v2_arg_scope(weight_decay=0.00004, batch_norm_var_collection='moving_vars', batch_norm_decay=0.9997, batch_norm_epsilon=0.001, updates_collections=ops.GraphKeys.UPDATE_OPS, use_fused_batchnorm=True): """Defines the default InceptionV2 arg scope. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_var_collection: The name of the collection for the batch norm variables. batch_norm_decay: Decay for batch norm moving average batch_norm_epsilon: Small float added to variance to avoid division by zero updates_collections: Collections for the update ops of the layer use_fused_batchnorm: Enable fused batchnorm. Returns: An `arg_scope` to use for the inception v3 model. """ batch_norm_params = { # Decay for the moving averages. 'decay': batch_norm_decay, # epsilon to prevent 0s in variance. 'epsilon': batch_norm_epsilon, # collection containing update_ops. 'updates_collections': updates_collections, # Enable fused batchnorm. 'fused': use_fused_batchnorm, # collection containing the moving mean and moving variance. 'variables_collections': { 'beta': None, 'gamma': None, 'moving_mean': [batch_norm_var_collection], 'moving_variance': [batch_norm_var_collection], } } # Set weight_decay for weights in Conv and FC layers. with arg_scope( [layers.conv2d, layers_lib.fully_connected], weights_regularizer=regularizers.l2_regularizer(weight_decay)): with arg_scope( [layers.conv2d], weights_initializer=initializers.variance_scaling_initializer(), activation_fn=nn_ops.relu, normalizer_fn=layers_lib.batch_norm, normalizer_params=batch_norm_params) as sc: return sc
Example #13
Source File: nn.py From deepsleepnet with Apache License 2.0 | 4 votes |
def batch_norm_new(name, input_var, is_train, decay=0.999, epsilon=1e-5): """Batch normalization modified from BatchNormLayer in Tensorlayer. Source: <https://github.com/zsdonghao/tensorlayer/blob/master/tensorlayer/layers.py#L2190> """ inputs_shape = input_var.get_shape() axis = list(range(len(inputs_shape) - 1)) params_shape = inputs_shape[-1:] with tf.variable_scope(name) as scope: # Trainable beta and gamma variables beta = tf.get_variable('beta', shape=params_shape, initializer=tf.zeros_initializer) gamma = tf.get_variable('gamma', shape=params_shape, initializer=tf.random_normal_initializer(mean=1.0, stddev=0.002)) # Moving mean and variance updated during training moving_mean = tf.get_variable('moving_mean', params_shape, initializer=tf.zeros_initializer, trainable=False) moving_variance = tf.get_variable('moving_variance', params_shape, initializer=tf.constant_initializer(1.), trainable=False) # Compute mean and variance along axis batch_mean, batch_variance = tf.nn.moments(input_var, axis, name='moments') # Define ops to update moving_mean and moving_variance update_moving_mean = moving_averages.assign_moving_average(moving_mean, batch_mean, decay, zero_debias=False) update_moving_variance = moving_averages.assign_moving_average(moving_variance, batch_variance, decay, zero_debias=False) # Define a function that : # 1. Update moving_mean & moving_variance with batch_mean & batch_variance # 2. Then return the batch_mean & batch_variance def mean_var_with_update(): with tf.control_dependencies([update_moving_mean, update_moving_variance]): return tf.identity(batch_mean), tf.identity(batch_variance) # Perform different ops for training and testing if is_train: mean, variance = mean_var_with_update() normed = tf.nn.batch_normalization(input_var, mean, variance, beta, gamma, epsilon) else: normed = tf.nn.batch_normalization(input_var, moving_mean, moving_variance, beta, gamma, epsilon) # mean, variance = tf.cond( # is_train, # mean_var_with_update, # Training # lambda: (moving_mean, moving_variance) # Testing - it will use the moving_mean and moving_variance (fixed during test) that are computed during training # ) # normed = tf.nn.batch_normalization(input_var, mean, variance, beta, gamma, epsilon) return normed
Example #14
Source File: tf.py From deep500 with BSD 3-Clause "New" or "Revised" License | 4 votes |
def custom_op(op: Union[CustomOp, CompilableOp, TFCompiledOp], stateful=True, name=None, use_autodiff=False, compile_only=False, return_handle=False): """ Registers a custom Tensorflow operator from `CustomOp`, `CompilableOp`, or `TFCompiledOp` objects. @param op The custom operator. If numpy is not used, automatic differentiation via Tensorflow applies. @param stateful True if the operation is not a pure function (enables sub-expression elimination optimizations if False). @param name Specify a custom name for this operation. @param use_autodiff If true, uses tensorflow tensors, otherwise assumes numpy arrays. @param compile_only If true, returns a TFCompiledOp instead of an instantiated op @param return_handle (for C++ ops) If true, also returns a direct handle to the operator object and library as a 3-tuple: (operator, library, handle). @return A tf.Operation object (or a function) that calls the custom operator. """ if isinstance(op, CompilableOp): result = _custom_cpp_op(op, stateful, name) if compile_only: return result else: op = result if isinstance(op, TFCompiledOp): result = _create_op_handle(op) if return_handle: return result else: return result[0] elif isinstance(op, CustomOp): if use_autodiff == True: return op.forward def _fwd(*inputs): return op.forward(*inputs) def _bwd(tfop, *grads): def _actual_bwd(*args): return op.backward(args[:len(grads)], args[len(grads):(len(grads)+len(tfop.inputs))], args[(len(grads)+len(tfop.inputs)):]) return tf.py_func(_actual_bwd, (list(grads) + list(tfop.inputs) + list(tfop.outputs)), [inp.dtype for inp in op.input_descriptors], stateful=stateful) # Gradient replacement adapted from https://gist.github.com/harpone/3453185b41d8d985356cbe5e57d67342 # Generate a unique name to avoid duplicates rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8)) tf.RegisterGradient(rnd_name)(_bwd) def result(*inputs): g = tf.get_default_graph() with g.gradient_override_map({"PyFunc": rnd_name, "PyFuncStateless": rnd_name}): return tf.py_func(_fwd, inputs, [out.dtype for out in op.output_descriptors], stateful=stateful, name=name) return result
Example #15
Source File: inception_v4_model.py From tpu_models with Apache License 2.0 | 4 votes |
def inception_v4_arg_scope(weight_decay=0.00004, batch_norm_var_collection='moving_vars', batch_norm_decay=0.9997, batch_norm_epsilon=0.001, updates_collections=ops.GraphKeys.UPDATE_OPS, use_fused_batchnorm=True, activation_fn=nn_ops.relu): """Defines the default InceptionV3 arg scope. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_var_collection: The name of the collection for the batch norm variables. batch_norm_decay: Decay for batch norm moving average batch_norm_epsilon: Small float added to variance to avoid division by zero updates_collections: Collections for the update ops of the layer use_fused_batchnorm: Enable fused batchnorm. activation_fn: Activation function for conv2d. Returns: An `arg_scope` to use for the inception v3 model. """ batch_norm_params = { # Decay for the moving averages. 'decay': batch_norm_decay, # epsilon to prevent 0s in variance. 'epsilon': batch_norm_epsilon, # collection containing update_ops. 'updates_collections': updates_collections, # Use fused batch norm if possible. 'fused': use_fused_batchnorm, # collection containing the moving mean and moving variance. 'variables_collections': { 'beta': None, 'gamma': None, 'moving_mean': [batch_norm_var_collection], 'moving_variance': [batch_norm_var_collection], } } normalizer_fn = slim.batch_norm normalizer_params = batch_norm_params # Set weight_decay for weights in Conv and FC layers. with slim.arg_scope( [slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay)): with slim.arg_scope( [slim.conv2d], weights_initializer=slim.variance_scaling_initializer(), activation_fn=activation_fn, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params) as sc: return sc
Example #16
Source File: inception_v2_tpu_model.py From tpu_models with Apache License 2.0 | 4 votes |
def inception_v2_arg_scope(weight_decay=0.00004, batch_norm_var_collection='moving_vars', batch_norm_decay=0.9997, batch_norm_epsilon=0.001, updates_collections=ops.GraphKeys.UPDATE_OPS, use_fused_batchnorm=True): """Defines the default InceptionV2 arg scope. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_var_collection: The name of the collection for the batch norm variables. batch_norm_decay: Decay for batch norm moving average batch_norm_epsilon: Small float added to variance to avoid division by zero updates_collections: Collections for the update ops of the layer use_fused_batchnorm: Enable fused batchnorm. Returns: An `arg_scope` to use for the inception v3 model. """ batch_norm_params = { # Decay for the moving averages. 'decay': batch_norm_decay, # epsilon to prevent 0s in variance. 'epsilon': batch_norm_epsilon, # collection containing update_ops. 'updates_collections': updates_collections, # Enable fused batchnorm. 'fused': use_fused_batchnorm, # collection containing the moving mean and moving variance. 'variables_collections': { 'beta': None, 'gamma': None, 'moving_mean': [batch_norm_var_collection], 'moving_variance': [batch_norm_var_collection], } } # Set weight_decay for weights in Conv and FC layers. with arg_scope( [layers.conv2d, layers_lib.fully_connected], weights_regularizer=regularizers.l2_regularizer(weight_decay)): with arg_scope( [layers.conv2d], weights_initializer=initializers.variance_scaling_initializer(), activation_fn=nn_ops.relu, normalizer_fn=layers_lib.batch_norm, normalizer_params=batch_norm_params) as sc: return sc
Example #17
Source File: inception_v3.py From tf-slim with Apache License 2.0 | 4 votes |
def inception_v3_arg_scope(weight_decay=0.00004, batch_norm_var_collection='moving_vars', batch_norm_decay=0.9997, batch_norm_epsilon=0.001, updates_collections=ops.GraphKeys.UPDATE_OPS, use_fused_batchnorm=True): """Defines the default InceptionV3 arg scope. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_var_collection: The name of the collection for the batch norm variables. batch_norm_decay: Decay for batch norm moving average batch_norm_epsilon: Small float added to variance to avoid division by zero updates_collections: Collections for the update ops of the layer use_fused_batchnorm: Enable fused batchnorm. Returns: An `arg_scope` to use for the inception v3 model. """ batch_norm_params = { # Decay for the moving averages. 'decay': batch_norm_decay, # epsilon to prevent 0s in variance. 'epsilon': batch_norm_epsilon, # collection containing update_ops. 'updates_collections': updates_collections, # Use fused batch norm if possible. 'fused': use_fused_batchnorm, # collection containing the moving mean and moving variance. 'variables_collections': { 'beta': None, 'gamma': None, 'moving_mean': [batch_norm_var_collection], 'moving_variance': [batch_norm_var_collection], } } # Set weight_decay for weights in Conv and FC layers. with arg_scope( [layers.conv2d, layers_lib.fully_connected], weights_regularizer=regularizers.l2_regularizer(weight_decay)): with arg_scope( [layers.conv2d], weights_initializer=initializers.variance_scaling_initializer(), activation_fn=nn_ops.relu, normalizer_fn=layers_lib.batch_norm, normalizer_params=batch_norm_params) as sc: return sc
Example #18
Source File: inception_v4_model.py From training_results_v0.5 with Apache License 2.0 | 4 votes |
def inception_v4_arg_scope(weight_decay=0.00004, batch_norm_var_collection='moving_vars', batch_norm_decay=0.9997, batch_norm_epsilon=0.001, updates_collections=ops.GraphKeys.UPDATE_OPS, use_fused_batchnorm=True, activation_fn=nn_ops.relu): """Defines the default InceptionV3 arg scope. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_var_collection: The name of the collection for the batch norm variables. batch_norm_decay: Decay for batch norm moving average batch_norm_epsilon: Small float added to variance to avoid division by zero updates_collections: Collections for the update ops of the layer use_fused_batchnorm: Enable fused batchnorm. activation_fn: Activation function for conv2d. Returns: An `arg_scope` to use for the inception v3 model. """ batch_norm_params = { # Decay for the moving averages. 'decay': batch_norm_decay, # epsilon to prevent 0s in variance. 'epsilon': batch_norm_epsilon, # collection containing update_ops. 'updates_collections': updates_collections, # Use fused batch norm if possible. 'fused': use_fused_batchnorm, # collection containing the moving mean and moving variance. 'variables_collections': { 'beta': None, 'gamma': None, 'moving_mean': [batch_norm_var_collection], 'moving_variance': [batch_norm_var_collection], } } normalizer_fn = slim.batch_norm normalizer_params = batch_norm_params # Set weight_decay for weights in Conv and FC layers. with slim.arg_scope( [slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay)): with slim.arg_scope( [slim.conv2d], weights_initializer=slim.variance_scaling_initializer(), activation_fn=activation_fn, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params) as sc: return sc
Example #19
Source File: inception_v2_tpu_model.py From training_results_v0.5 with Apache License 2.0 | 4 votes |
def inception_v2_arg_scope(weight_decay=0.00004, batch_norm_var_collection='moving_vars', batch_norm_decay=0.9997, batch_norm_epsilon=0.001, updates_collections=ops.GraphKeys.UPDATE_OPS, use_fused_batchnorm=True): """Defines the default InceptionV2 arg scope. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_var_collection: The name of the collection for the batch norm variables. batch_norm_decay: Decay for batch norm moving average batch_norm_epsilon: Small float added to variance to avoid division by zero updates_collections: Collections for the update ops of the layer use_fused_batchnorm: Enable fused batchnorm. Returns: An `arg_scope` to use for the inception v3 model. """ batch_norm_params = { # Decay for the moving averages. 'decay': batch_norm_decay, # epsilon to prevent 0s in variance. 'epsilon': batch_norm_epsilon, # collection containing update_ops. 'updates_collections': updates_collections, # Enable fused batchnorm. 'fused': use_fused_batchnorm, # collection containing the moving mean and moving variance. 'variables_collections': { 'beta': None, 'gamma': None, 'moving_mean': [batch_norm_var_collection], 'moving_variance': [batch_norm_var_collection], } } # Set weight_decay for weights in Conv and FC layers. with arg_scope( [layers.conv2d, layers_lib.fully_connected], weights_regularizer=regularizers.l2_regularizer(weight_decay)): with arg_scope( [layers.conv2d], weights_initializer=initializers.variance_scaling_initializer(), activation_fn=nn_ops.relu, normalizer_fn=layers_lib.batch_norm, normalizer_params=batch_norm_params) as sc: return sc