Python caffe2.python.brew.fc() Examples
The following are 14
code examples of caffe2.python.brew.fc().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
caffe2.python.brew
, or try the search function
.

Example #1
Source File: mnist.py From batch-shipyard with MIT License | 6 votes |
def AddLeNetModel(model, data): ''' This part is the standard LeNet model: from data to the softmax prediction. For each convolutional layer we specify dim_in - number of input channels and dim_out - number or output channels. Also each Conv and MaxPool layer changes the image size. For example, kernel of size 5 reduces each side of an image by 4. While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides each side in half. ''' # Image size: 28 x 28 -> 24 x 24 conv1 = brew.conv(model, data, 'conv1', dim_in=1, dim_out=20, kernel=5) # Image size: 24 x 24 -> 12 x 12 pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2) # Image size: 12 x 12 -> 8 x 8 conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=50, kernel=5) # Image size: 8 x 8 -> 4 x 4 pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2) # 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size fc3 = brew.fc(model, pool2, 'fc3', dim_in=50 * 4 * 4, dim_out=500) fc3 = brew.relu(model, fc3, fc3) pred = brew.fc(model, fc3, 'pred', 500, 10) softmax = brew.softmax(model, pred, 'softmax') return softmax
Example #2
Source File: benchmarks.py From dlcookbook-dlbs with Apache License 2.0 | 6 votes |
def create_model(model_builder, model, enable_tensor_core, float16_compute, loss_scale=1.0): """Creates one model replica. :param obj model_builder: A model instance that contains `forward_pass_builder` method. :param model: Caffe2's model helper class instances. :type model: :py:class:`caffe2.python.model_helper.ModelHelper` :param bool enable_tensor_core: If true, Volta's tensor core ops are enabled. :param float loss_scale: Scale loss for multi-GPU training. :return: Head nodes (softmax or loss depending on phase) """ initializer = (pFP16Initializer if model_builder.dtype == 'float16' else Initializer) with brew.arg_scope([brew.conv, brew.fc], WeightInitializer=initializer, BiasInitializer=initializer, enable_tensor_core=enable_tensor_core, float16_compute=float16_compute): outputs = model_builder.forward_pass_builder(model, loss_scale=loss_scale) return outputs
Example #3
Source File: model.py From dlcookbook-dlbs with Apache License 2.0 | 6 votes |
def add_head_nodes(self, model, v, dim_in, fc_name, loss_scale=1.0): """Adds dense and softmax head nodes. :param model_helper.ModelHelper model: Current model to use. :param obj v: Input blobs. :param int dim_in: Number of input features. :param str fc_name: Name of a fully connected operator. :param float loss_scale: For multi-GPU case. :return: List with one head node. A softmax node if `phase` is `inference` else `loss`. """ v = brew.fc(model, v, fc_name, dim_in=dim_in, dim_out=self.num_classes) if self.dtype == 'float16': print("[INFO] Converting logits from float16 to float32 for softmax layer") v = model.net.HalfToFloat(v, v + '_fp32') if self.phase == 'inference': softmax = brew.softmax(model, v, 'softmax') head_nodes = [softmax] else: softmax, loss = model.SoftmaxWithLoss([v, 'softmax_label'], ['softmax', 'loss']) prefix = model.net.Proto().name loss = model.Scale(loss, prefix + "_loss", scale=loss_scale) head_nodes = [loss] return head_nodes
Example #4
Source File: acoustic_model.py From dlcookbook-dlbs with Apache License 2.0 | 6 votes |
def forward_pass_builder(self, model, loss_scale=1.0): """ This function adds the operators, layers to the network. It should return a list of loss-blobs that are used for computing the loss gradient. This function is also passed an internally calculated loss_scale parameter that is used to scale your loss to normalize for the number of GPUs. Signature: function(model, loss_scale) """ v = 'data' dim_in = self.input_shape[0] for idx in range(5): v = brew.fc(model, v, 'fc%d' % (idx+1), dim_in=dim_in, dim_out=2048) v = brew.relu(model, v, 'relu%d' % (idx+1)) dim_in = 2048 return self.add_head_nodes(model, v, dim_in, 'fc%d' % (idx+2), loss_scale=loss_scale)
Example #5
Source File: sensor_net.py From dlcookbook-dlbs with Apache License 2.0 | 6 votes |
def forward_pass_builder(self, model, loss_scale=1.0): """ This function adds the operators, layers to the network. It should return a list of loss-blobs that are used for computing the loss gradient. This function is also passed an internally calculated loss_scale parameter that is used to scale your loss to normalize for the number of GPUs. Signature: function(model, loss_scale) """ v = 'data' dim_in = self.input_shape[0] for idx in range(3): v = brew.fc(model, v, 'fc%d' % (idx+1), dim_in=dim_in, dim_out=1024) v = brew.relu(model, v, 'relu%d' % (idx+1)) dim_in = 1024 return self.add_head_nodes(model, v, dim_in, 'fc%d' % (idx+2), loss_scale=loss_scale)
Example #6
Source File: classification_no_db_example.py From peters-stuff with GNU General Public License v3.0 | 5 votes |
def create_model(m, device_opts) : with core.DeviceScope(device_opts): conv1 = brew.conv(m, 'data', 'conv1', dim_in=1, dim_out=20, kernel=5) pool1 = brew.max_pool(m, conv1, 'pool1', kernel=2, stride=2) conv2 = brew.conv(m, pool1, 'conv2', dim_in=20, dim_out=50, kernel=5) pool2 = brew.max_pool(m, conv2, 'pool2', kernel=2, stride=2) fc3 = brew.fc(m, pool2, 'fc3', dim_in=50 * 4 * 4, dim_out=500) fc3 = brew.relu(m, fc3, fc3) pred = brew.fc(m, fc3, 'pred', 500, 2) softmax = brew.softmax(m, pred, 'softmax') m.net.AddExternalOutput(softmax) return softmax # add loss and optimizer
Example #7
Source File: demo_caffe2.py From tensorboardX with MIT License | 5 votes |
def AddLeNetModel(model, data): ''' This part is the standard LeNet model: from data to the softmax prediction. For each convolutional layer we specify dim_in - number of input channels and dim_out - number or output channels. Also each Conv and MaxPool layer changes the image size. For example, kernel of size 5 reduces each side of an image by 4. While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides each side in half. ''' # Image size: 28 x 28 -> 24 x 24 conv1 = brew.conv(model, data, 'conv1', dim_in=1, dim_out=20, kernel=5) # Image size: 24 x 24 -> 12 x 12 pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2) # Image size: 12 x 12 -> 8 x 8 conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=100, kernel=5) # Image size: 8 x 8 -> 4 x 4 pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2) # 50 * 4 * 4 stands for dim_out from previous layer multiplied by the # image size fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500) relu = brew.relu(model, fc3, fc3) pred = brew.fc(model, relu, 'pred', 500, 10) softmax = brew.softmax(model, pred, 'softmax') return softmax
Example #8
Source File: test_caffe2.py From tensorboardX with MIT License | 5 votes |
def test_simple_cnnmodel(self): model = cnn.CNNModelHelper("NCHW", name="overfeat") workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32)) workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int)) with core.NameScope("conv1"): conv1 = model.Conv("data", "conv1", 3, 96, 11, stride=4) relu1 = model.Relu(conv1, conv1) pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2) with core.NameScope("classifier"): fc = model.FC(pool1, "fc", 4096, 1000) pred = model.Softmax(fc, "pred") xent = model.LabelCrossEntropy([pred, "label"], "xent") loss = model.AveragedLoss(xent, "loss") blob_name_tracker = {} graph = tb.model_to_graph_def( model, blob_name_tracker=blob_name_tracker, shapes={}, show_simplified=False, ) compare_proto(graph, self) # cnn.CNNModelHelper is deprecated, so we also test with # model_helper.ModelHelper. The model used in this test is taken from the # Caffe2 MNIST tutorial. Also use show_simplified=False here.
Example #9
Source File: test_caffe2.py From tensorboardX with MIT License | 5 votes |
def test_simple_model(self): model = model_helper.ModelHelper(name="mnist") # how come those inputs don't break the forward pass =.=a workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32)) workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int)) with core.NameScope("conv1"): conv1 = brew.conv(model, "data", 'conv1', dim_in=1, dim_out=20, kernel=5) # Image size: 24 x 24 -> 12 x 12 pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2) # Image size: 12 x 12 -> 8 x 8 conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=100, kernel=5) # Image size: 8 x 8 -> 4 x 4 pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2) with core.NameScope("classifier"): # 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500) relu = brew.relu(model, fc3, fc3) pred = brew.fc(model, relu, 'pred', 500, 10) softmax = brew.softmax(model, pred, 'softmax') xent = model.LabelCrossEntropy([softmax, "label"], 'xent') # compute the expected loss loss = model.AveragedLoss(xent, "loss") model.net.RunAllOnMKL() model.param_init_net.RunAllOnMKL() model.AddGradientOperators([loss], skip=1) blob_name_tracker = {} graph = tb.model_to_graph_def( model, blob_name_tracker=blob_name_tracker, shapes={}, show_simplified=False, ) compare_proto(graph, self)
Example #10
Source File: alexnet_owt.py From dlcookbook-dlbs with Apache License 2.0 | 5 votes |
def forward_pass_builder(self, model, loss_scale=1.0): """ This function adds the operators, layers to the network. It should return a list of loss-blobs that are used for computing the loss gradient. This function is also passed an internally calculated loss_scale parameter that is used to scale your loss to normalize for the number of GPUs. Signature: function(model, loss_scale) """ is_inference = self.phase == 'inference' v = 'data' v = brew.conv(model, v, 'conv1', 3, 64, kernel=11, stride=4) v = brew.relu(model, v, 'relu1') v = brew.max_pool(model, v, 'pool1', kernel=3, stride=2) v = brew.conv(model, v, 'conv2', 64, 192, kernel=5, pad=2, group=1) v = brew.relu(model, v, 'relu2') v = brew.max_pool(model, v, 'pool2', kernel=3, stride=2) v = brew.conv(model, v, 'conv3', 192, 384, kernel=3, pad=1) v = brew.relu(model, v, 'relu3') v = brew.conv(model, v, 'conv4', 384, 256, kernel=3, pad=1, group=1) v = brew.relu(model, v, 'relu4') v = brew.conv(model, v, 'conv5', 256, 256, kernel=3, pad=1, group=1) v = brew.relu(model, v, 'relu5') v = brew.max_pool(model, v, 'pool5', kernel=3, stride=2) v = brew.fc(model, v, 'fc6', dim_in=9216, dim_out=4096) v = brew.relu(model, v, 'relu6') v = brew.dropout(model, v, 'drop6', ratio=0.5, is_test=is_inference) v = brew.fc(model, v, 'fc7', dim_in=4096, dim_out=4096) v = brew.relu(model, v, 'relu7') v = brew.dropout(model, v, 'drop7', ratio=0.5, is_test=is_inference) return self.add_head_nodes(model, v, 4096, 'fc8', loss_scale=loss_scale)
Example #11
Source File: vgg.py From dlcookbook-dlbs with Apache License 2.0 | 5 votes |
def forward_pass_builder(self, model, loss_scale=1.0): """ This function adds the operators, layers to the network. It should return a list of loss-blobs that are used for computing the loss gradient. This function is also passed an internally calculated loss_scale parameter that is used to scale your loss to normalize for the number of GPUs. Signature: function(model, loss_scale) """ is_inference = self.phase == 'inference' layers, filters = VGG.specs[self.__model]['specs'] v = 'data' dim_in = self.input_shape[0] for i, num in enumerate(layers): for j in range(num): v = brew.conv(model, v, 'conv%d_%d' % (i+1, j+1), dim_in, filters[i], kernel=3, pad=1) v = brew.relu(model, v, 'relu%d_%d' % (i+1, j+1)) dim_in = filters[i] v = brew.max_pool(model, v, 'pool%d' % (i+1), kernel=2, stride=2) dim_in = 25088 # 512 * 7 * 7 (output tensor of previous max pool layer) for i in range(2): v = brew.fc(model, v, 'fc%d' % (6+i), dim_in=dim_in, dim_out=4096) v = brew.relu(model, v, 'relu%d' % (6+i)) v = brew.dropout(model, v, 'drop%d' % (6+i), ratio=0.5, is_test=is_inference) dim_in = 4096 return self.add_head_nodes(model, v, 4096, 'fc8', loss_scale=loss_scale)
Example #12
Source File: overfeat.py From dlcookbook-dlbs with Apache License 2.0 | 5 votes |
def forward_pass_builder(self, model, loss_scale=1.0): """ This function adds the operators, layers to the network. It should return a list of loss-blobs that are used for computing the loss gradient. This function is also passed an internally calculated loss_scale parameter that is used to scale your loss to normalize for the number of GPUs. Signature: function(model, loss_scale) """ is_inference = self.phase == 'inference' v = 'data' # Layer1 v = brew.conv(model, v, 'conv1', 3, 96, kernel=11, stride=4) v = brew.relu(model, v, 'relu1') v = brew.max_pool(model, v, 'pool1', kernel=2, stride=2) # Layer2 v = brew.conv(model, v, 'conv2', 96, 256, kernel=5) v = brew.relu(model, v, 'relu2') v = brew.max_pool(model, v, 'pool2', kernel=2, stride=2) # Layer3 v = brew.conv(model, v, 'conv3', 256, 512, kernel=3, pad=1) v = brew.relu(model, v, 'relu3') # Layer4 v = brew.conv(model, v, 'conv4', 512, 1024, kernel=3, pad=1) v = brew.relu(model, v, 'relu4') # Layer5 v = brew.conv(model, v, 'conv5', 1024, 1024, kernel=3, pad=1) v = brew.relu(model, v, 'relu5') v = brew.max_pool(model, v, 'pool5', kernel=2, stride=2) # Layer6 v = brew.fc(model, v, 'fc6', dim_in=6*6*1024, dim_out=3072) v = brew.relu(model, v, 'relu6') v = brew.dropout(model, v, 'drop6', ratio=0.5, is_test=is_inference) # Layer7 v = brew.fc(model, v, 'fc7', dim_in=3072, dim_out=4096) v = brew.relu(model, v, 'relu7') v = brew.dropout(model, v, 'drop7', ratio=0.5, is_test=is_inference) return self.add_head_nodes(model, v, 4096, 'fc8', loss_scale=loss_scale)
Example #13
Source File: alexnet.py From dlcookbook-dlbs with Apache License 2.0 | 5 votes |
def forward_pass_builder(self, model, loss_scale=1.0): """ This function adds the operators, layers to the network. It should return a list of loss-blobs that are used for computing the loss gradient. This function is also passed an internally calculated loss_scale parameter that is used to scale your loss to normalize for the number of GPUs. Signature: function(model, loss_scale) """ is_inference = self.phase == 'inference' v = 'data' v = brew.conv(model, v, 'conv1', 3, 96, kernel=11, stride=4) v = brew.relu(model, v, 'relu1') v = brew.lrn(model, v, 'norm1', size=5, alpha=0.0001, beta=0.75) v = brew.max_pool(model, v, 'pool1', kernel=3, stride=2) v = brew.conv(model, v, 'conv2', 96, 256, kernel=5, pad=2, group=1) v = brew.relu(model, v, 'relu2') v = brew.lrn(model, v, 'norm2', size=5, alpha=0.0001, beta=0.75) v = brew.max_pool(model, v, 'pool2', kernel=3, stride=2) v = brew.conv(model, v, 'conv3', 256, 384, kernel=3, pad=1) v = brew.relu(model, v, 'relu3') v = brew.conv(model, v, 'conv4', 384, 384, kernel=3, pad=1, group=1) v = brew.relu(model, v, 'relu4') v = brew.conv(model, v, 'conv5', 384, 256, kernel=3, pad=1, group=1) v = brew.relu(model, v, 'relu5') v = brew.max_pool(model, v, 'pool5', kernel=3, stride=2) v = brew.fc(model, v, 'fc6', dim_in=9216, dim_out=4096) v = brew.relu(model, v, 'relu6') v = brew.dropout(model, v, 'drop6', ratio=0.5, is_test=is_inference) v = brew.fc(model, v, 'fc7', dim_in=4096, dim_out=4096) v = brew.relu(model, v, 'relu7') v = brew.dropout(model, v, 'drop7', ratio=0.5, is_test=is_inference) return self.add_head_nodes(model, v, 4096, 'fc8', loss_scale=loss_scale)
Example #14
Source File: CIFAR10_Part2.py From tutorials with Apache License 2.0 | 4 votes |
def Add_Original_CIFAR10_Model(model, data, num_classes, image_height, image_width, image_channels): # Convolutional layer 1 conv1 = brew.conv(model, data, 'conv1', dim_in=image_channels, dim_out=32, kernel=5, stride=1, pad=2) h,w = update_dims(height=image_height, width=image_width, kernel=5, stride=1, pad=2) # Pooling layer 1 pool1 = brew.max_pool(model, conv1, 'pool1', kernel=3, stride=2) h,w = update_dims(height=h, width=w, kernel=3, stride=2, pad=0) # ReLU layer 1 relu1 = brew.relu(model, pool1, 'relu1') # Convolutional layer 2 conv2 = brew.conv(model, relu1, 'conv2', dim_in=32, dim_out=32, kernel=5, stride=1, pad=2) h,w = update_dims(height=h, width=w, kernel=5, stride=1, pad=2) # ReLU layer 2 relu2 = brew.relu(model, conv2, 'relu2') # Pooling layer 1 pool2 = brew.average_pool(model, relu2, 'pool2', kernel=3, stride=2) h,w = update_dims(height=h, width=w, kernel=3, stride=2, pad=0) # Convolutional layer 3 conv3 = brew.conv(model, pool2, 'conv3', dim_in=32, dim_out=64, kernel=5, stride=1, pad=2) h,w = update_dims(height=h, width=w, kernel=5, stride=1, pad=2) # ReLU layer 3 relu3 = brew.relu(model, conv3, 'relu3') # Pooling layer 3 pool3 = brew.average_pool(model, relu3, 'pool3', kernel=3, stride=2) h,w = update_dims(height=h, width=w, kernel=3, stride=2, pad=0) # Fully connected layers fc1 = brew.fc(model, pool3, 'fc1', dim_in=64*h*w, dim_out=64) fc2 = brew.fc(model, fc1, 'fc2', dim_in=64, dim_out=num_classes) # Softmax layer softmax = brew.softmax(model, fc2, 'softmax') return softmax # ## Test Saved Model From Part 1 # # ### Construct Model for Testing # # The first thing we need is a model helper object that we can attach the lmdb reader to. # In[4]: # Create a ModelHelper object with init_params=False