Python tensorflow.concats() Examples
The following are 13
code examples of tensorflow.concats().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: vgslspecs.py From DOTA_models with Apache License 2.0 | 5 votes |
def AddParallel(self, prev_layer, index): """tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don't match. """ if self.model_str[index] != '(': return None, None index += 1 layers = [] num_dims = 0 # Each parallel must output the same, including any reduction factor, in # all dimensions except depth. # We have to save the starting factors, so they don't get reduced by all # the elements of the parallel, only once. original_factors = self.reduction_factors final_factors = None while index < len(self.model_str) and self.model_str[index] != ')': self.reduction_factors = original_factors layer, index = self.BuildFromString(prev_layer, index) if num_dims == 0: num_dims = len(layer.get_shape()) elif num_dims != len(layer.get_shape()): raise ValueError('All elements of parallel must return same num dims') layers.append(layer) if final_factors: if final_factors != self.reduction_factors: raise ValueError('All elements of parallel must scale the same') else: final_factors = self.reduction_factors if index == len(self.model_str): raise ValueError('Missing ) at end of parallel!' + self.model_str) return tf.concat(axis=num_dims - 1, values=layers), index + 1
Example #2
Source File: vgslspecs.py From yolo_v2 with Apache License 2.0 | 5 votes |
def AddParallel(self, prev_layer, index): """tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don't match. """ if self.model_str[index] != '(': return None, None index += 1 layers = [] num_dims = 0 # Each parallel must output the same, including any reduction factor, in # all dimensions except depth. # We have to save the starting factors, so they don't get reduced by all # the elements of the parallel, only once. original_factors = self.reduction_factors final_factors = None while index < len(self.model_str) and self.model_str[index] != ')': self.reduction_factors = original_factors layer, index = self.BuildFromString(prev_layer, index) if num_dims == 0: num_dims = len(layer.get_shape()) elif num_dims != len(layer.get_shape()): raise ValueError('All elements of parallel must return same num dims') layers.append(layer) if final_factors: if final_factors != self.reduction_factors: raise ValueError('All elements of parallel must scale the same') else: final_factors = self.reduction_factors if index == len(self.model_str): raise ValueError('Missing ) at end of parallel!' + self.model_str) return tf.concat(axis=num_dims - 1, values=layers), index + 1
Example #3
Source File: vgslspecs.py From Gun-Detector with Apache License 2.0 | 5 votes |
def AddParallel(self, prev_layer, index): """tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don't match. """ if self.model_str[index] != '(': return None, None index += 1 layers = [] num_dims = 0 # Each parallel must output the same, including any reduction factor, in # all dimensions except depth. # We have to save the starting factors, so they don't get reduced by all # the elements of the parallel, only once. original_factors = self.reduction_factors final_factors = None while index < len(self.model_str) and self.model_str[index] != ')': self.reduction_factors = original_factors layer, index = self.BuildFromString(prev_layer, index) if num_dims == 0: num_dims = len(layer.get_shape()) elif num_dims != len(layer.get_shape()): raise ValueError('All elements of parallel must return same num dims') layers.append(layer) if final_factors: if final_factors != self.reduction_factors: raise ValueError('All elements of parallel must scale the same') else: final_factors = self.reduction_factors if index == len(self.model_str): raise ValueError('Missing ) at end of parallel!' + self.model_str) return tf.concat(axis=num_dims - 1, values=layers), index + 1
Example #4
Source File: vgslspecs.py From ad-versarial with MIT License | 5 votes |
def AddParallel(self, prev_layer, index, reuse=None): """tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don't match. """ if self.model_str[index] != '(': return None, None index += 1 layers = [] num_dims = 0 # Each parallel must output the same, including any reduction factor, in # all dimensions except depth. # We have to save the starting factors, so they don't get reduced by all # the elements of the parallel, only once. original_factors = self.reduction_factors final_factors = None while index < len(self.model_str) and self.model_str[index] != ')': self.reduction_factors = original_factors layer, index = self.BuildFromString(prev_layer, index) if num_dims == 0: num_dims = len(layer.get_shape()) elif num_dims != len(layer.get_shape()): raise ValueError('All elements of parallel must return same num dims') layers.append(layer) if final_factors: if final_factors != self.reduction_factors: raise ValueError('All elements of parallel must scale the same') else: final_factors = self.reduction_factors if index == len(self.model_str): raise ValueError('Missing ) at end of parallel!' + self.model_str) return tf.concat(axis=num_dims - 1, values=layers), index + 1
Example #5
Source File: vgslspecs.py From Action_Recognition_Zoo with MIT License | 5 votes |
def AddParallel(self, prev_layer, index): """tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don't match. """ if self.model_str[index] != '(': return None, None index += 1 layers = [] num_dims = 0 # Each parallel must output the same, including any reduction factor, in # all dimensions except depth. # We have to save the starting factors, so they don't get reduced by all # the elements of the parallel, only once. original_factors = self.reduction_factors final_factors = None while index < len(self.model_str) and self.model_str[index] != ')': self.reduction_factors = original_factors layer, index = self.BuildFromString(prev_layer, index) if num_dims == 0: num_dims = len(layer.get_shape()) elif num_dims != len(layer.get_shape()): raise ValueError('All elements of parallel must return same num dims') layers.append(layer) if final_factors: if final_factors != self.reduction_factors: raise ValueError('All elements of parallel must scale the same') else: final_factors = self.reduction_factors if index == len(self.model_str): raise ValueError('Missing ) at end of parallel!' + self.model_str) return tf.concat(num_dims - 1, layers), index + 1
Example #6
Source File: vgslspecs.py From ECO-pytorch with BSD 2-Clause "Simplified" License | 5 votes |
def AddParallel(self, prev_layer, index): """tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don't match. """ if self.model_str[index] != '(': return None, None index += 1 layers = [] num_dims = 0 # Each parallel must output the same, including any reduction factor, in # all dimensions except depth. # We have to save the starting factors, so they don't get reduced by all # the elements of the parallel, only once. original_factors = self.reduction_factors final_factors = None while index < len(self.model_str) and self.model_str[index] != ')': self.reduction_factors = original_factors layer, index = self.BuildFromString(prev_layer, index) if num_dims == 0: num_dims = len(layer.get_shape()) elif num_dims != len(layer.get_shape()): raise ValueError('All elements of parallel must return same num dims') layers.append(layer) if final_factors: if final_factors != self.reduction_factors: raise ValueError('All elements of parallel must scale the same') else: final_factors = self.reduction_factors if index == len(self.model_str): raise ValueError('Missing ) at end of parallel!' + self.model_str) return tf.concat(num_dims - 1, layers), index + 1
Example #7
Source File: vgslspecs.py From hands-detection with MIT License | 5 votes |
def AddParallel(self, prev_layer, index): """tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don't match. """ if self.model_str[index] != '(': return None, None index += 1 layers = [] num_dims = 0 # Each parallel must output the same, including any reduction factor, in # all dimensions except depth. # We have to save the starting factors, so they don't get reduced by all # the elements of the parallel, only once. original_factors = self.reduction_factors final_factors = None while index < len(self.model_str) and self.model_str[index] != ')': self.reduction_factors = original_factors layer, index = self.BuildFromString(prev_layer, index) if num_dims == 0: num_dims = len(layer.get_shape()) elif num_dims != len(layer.get_shape()): raise ValueError('All elements of parallel must return same num dims') layers.append(layer) if final_factors: if final_factors != self.reduction_factors: raise ValueError('All elements of parallel must scale the same') else: final_factors = self.reduction_factors if index == len(self.model_str): raise ValueError('Missing ) at end of parallel!' + self.model_str) return tf.concat(axis=num_dims - 1, values=layers), index + 1
Example #8
Source File: vgslspecs.py From object_detection_kitti with Apache License 2.0 | 5 votes |
def AddParallel(self, prev_layer, index): """tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don't match. """ if self.model_str[index] != '(': return None, None index += 1 layers = [] num_dims = 0 # Each parallel must output the same, including any reduction factor, in # all dimensions except depth. # We have to save the starting factors, so they don't get reduced by all # the elements of the parallel, only once. original_factors = self.reduction_factors final_factors = None while index < len(self.model_str) and self.model_str[index] != ')': self.reduction_factors = original_factors layer, index = self.BuildFromString(prev_layer, index) if num_dims == 0: num_dims = len(layer.get_shape()) elif num_dims != len(layer.get_shape()): raise ValueError('All elements of parallel must return same num dims') layers.append(layer) if final_factors: if final_factors != self.reduction_factors: raise ValueError('All elements of parallel must scale the same') else: final_factors = self.reduction_factors if index == len(self.model_str): raise ValueError('Missing ) at end of parallel!' + self.model_str) return tf.concat(axis=num_dims - 1, values=layers), index + 1
Example #9
Source File: vgslspecs.py From object_detection_with_tensorflow with MIT License | 5 votes |
def AddParallel(self, prev_layer, index): """tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don't match. """ if self.model_str[index] != '(': return None, None index += 1 layers = [] num_dims = 0 # Each parallel must output the same, including any reduction factor, in # all dimensions except depth. # We have to save the starting factors, so they don't get reduced by all # the elements of the parallel, only once. original_factors = self.reduction_factors final_factors = None while index < len(self.model_str) and self.model_str[index] != ')': self.reduction_factors = original_factors layer, index = self.BuildFromString(prev_layer, index) if num_dims == 0: num_dims = len(layer.get_shape()) elif num_dims != len(layer.get_shape()): raise ValueError('All elements of parallel must return same num dims') layers.append(layer) if final_factors: if final_factors != self.reduction_factors: raise ValueError('All elements of parallel must scale the same') else: final_factors = self.reduction_factors if index == len(self.model_str): raise ValueError('Missing ) at end of parallel!' + self.model_str) return tf.concat(axis=num_dims - 1, values=layers), index + 1
Example #10
Source File: vgslspecs.py From HumanRecognition with MIT License | 5 votes |
def AddParallel(self, prev_layer, index): """tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don't match. """ if self.model_str[index] != '(': return None, None index += 1 layers = [] num_dims = 0 # Each parallel must output the same, including any reduction factor, in # all dimensions except depth. # We have to save the starting factors, so they don't get reduced by all # the elements of the parallel, only once. original_factors = self.reduction_factors final_factors = None while index < len(self.model_str) and self.model_str[index] != ')': self.reduction_factors = original_factors layer, index = self.BuildFromString(prev_layer, index) if num_dims == 0: num_dims = len(layer.get_shape()) elif num_dims != len(layer.get_shape()): raise ValueError('All elements of parallel must return same num dims') layers.append(layer) if final_factors: if final_factors != self.reduction_factors: raise ValueError('All elements of parallel must scale the same') else: final_factors = self.reduction_factors if index == len(self.model_str): raise ValueError('Missing ) at end of parallel!' + self.model_str) return tf.concat(axis=num_dims - 1, values=layers), index + 1
Example #11
Source File: vgslspecs.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def AddParallel(self, prev_layer, index): """tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don't match. """ if self.model_str[index] != '(': return None, None index += 1 layers = [] num_dims = 0 # Each parallel must output the same, including any reduction factor, in # all dimensions except depth. # We have to save the starting factors, so they don't get reduced by all # the elements of the parallel, only once. original_factors = self.reduction_factors final_factors = None while index < len(self.model_str) and self.model_str[index] != ')': self.reduction_factors = original_factors layer, index = self.BuildFromString(prev_layer, index) if num_dims == 0: num_dims = len(layer.get_shape()) elif num_dims != len(layer.get_shape()): raise ValueError('All elements of parallel must return same num dims') layers.append(layer) if final_factors: if final_factors != self.reduction_factors: raise ValueError('All elements of parallel must scale the same') else: final_factors = self.reduction_factors if index == len(self.model_str): raise ValueError('Missing ) at end of parallel!' + self.model_str) return tf.concat(axis=num_dims - 1, values=layers), index + 1
Example #12
Source File: vgslspecs.py From models with Apache License 2.0 | 5 votes |
def AddParallel(self, prev_layer, index): """tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don't match. """ if self.model_str[index] != '(': return None, None index += 1 layers = [] num_dims = 0 # Each parallel must output the same, including any reduction factor, in # all dimensions except depth. # We have to save the starting factors, so they don't get reduced by all # the elements of the parallel, only once. original_factors = self.reduction_factors final_factors = None while index < len(self.model_str) and self.model_str[index] != ')': self.reduction_factors = original_factors layer, index = self.BuildFromString(prev_layer, index) if num_dims == 0: num_dims = len(layer.get_shape()) elif num_dims != len(layer.get_shape()): raise ValueError('All elements of parallel must return same num dims') layers.append(layer) if final_factors: if final_factors != self.reduction_factors: raise ValueError('All elements of parallel must scale the same') else: final_factors = self.reduction_factors if index == len(self.model_str): raise ValueError('Missing ) at end of parallel!' + self.model_str) return tf.concat(axis=num_dims - 1, values=layers), index + 1
Example #13
Source File: vgslspecs.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def AddParallel(self, prev_layer, index): """tf.concats outputs of layers that run on the same inputs. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor of the parallel, end index in model_str. Raises: ValueError: If () are unbalanced or the elements don't match. """ if self.model_str[index] != '(': return None, None index += 1 layers = [] num_dims = 0 # Each parallel must output the same, including any reduction factor, in # all dimensions except depth. # We have to save the starting factors, so they don't get reduced by all # the elements of the parallel, only once. original_factors = self.reduction_factors final_factors = None while index < len(self.model_str) and self.model_str[index] != ')': self.reduction_factors = original_factors layer, index = self.BuildFromString(prev_layer, index) if num_dims == 0: num_dims = len(layer.get_shape()) elif num_dims != len(layer.get_shape()): raise ValueError('All elements of parallel must return same num dims') layers.append(layer) if final_factors: if final_factors != self.reduction_factors: raise ValueError('All elements of parallel must scale the same') else: final_factors = self.reduction_factors if index == len(self.model_str): raise ValueError('Missing ) at end of parallel!' + self.model_str) return tf.concat(axis=num_dims - 1, values=layers), index + 1