Python detectron.utils.c2.gauss_fill() Examples

The following are 18 code examples of detectron.utils.c2.gauss_fill(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module detectron.utils.c2 , or try the search function .
Example #1
Source File: cluster_rcnn_heads.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 6 votes vote down vote up
def add_cluster_rcnn_outputs(model, blob_in, dim):
    """Add Cluster RoI classification and bounding box regression output ops."""
    # cluster Box classification layer
    model.FC(
        blob_in,
        'cluster_cls_score',
        dim,
        2,
        weight_init=gauss_fill(0.01),
        bias_init=const_fill(0.0)
    )
    if not model.train:  # == if test
        # Only add softmax when testing; during training the softmax is combined
        # with the label cross entropy loss for numerical stability
        model.Softmax('cluster_cls_score', 'cluster_cls_prob', engine='CUDNN')
    # Box regression layer
    num_bbox_reg_classes = 2
    model.FC(
        blob_in,
        'cluster_bbox_pred',
        dim,
        num_bbox_reg_classes * 4,
        weight_init=gauss_fill(0.001),
        bias_init=const_fill(0.0)
    ) 
Example #2
Source File: fast_rcnn_heads.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 5 votes vote down vote up
def add_fast_rcnn_outputs(model, blob_in, dim):
    """Add RoI classification and bounding box regression output ops."""
    # Box classification layer
    model.FC(
        blob_in,
        'cls_score',
        dim,
        cfg.MODEL.NUM_CLASSES if not cfg.MODEL.Cluster_RCNN_ON else cfg.MODEL.NUM_CLASSES-1,
        weight_init=gauss_fill(0.01),
        bias_init=const_fill(0.0)
    )
    if not model.train:  # == if test
        # Only add softmax when testing; during training the softmax is combined
        # with the label cross entropy loss for numerical stability
        model.Softmax('cls_score', 'cls_prob', engine='CUDNN')
    # Box regression layer
    num_bbox_reg_classes = (
        2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG  else cfg.MODEL.NUM_CLASSES
        if not cfg.MODEL.Cluster_RCNN_ON else cfg.MODEL.NUM_CLASSES-1
    )
    model.FC(
        blob_in,
        'bbox_pred',
        dim,
        num_bbox_reg_classes * 4,
        weight_init=gauss_fill(0.001),
        bias_init=const_fill(0.0)
    )

    if cfg.MODEL.CASCADE_ON:
        # add stage parameters to list
        if '1' not in model.stage_params:
            model.stage_params['1'] = []
        for idx in range(-2, 0):
            model.stage_params['1'].append(model.weights[idx])
            model.stage_params['1'].append(model.biases[idx]) 
Example #3
Source File: cascade_rcnn_heads.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 5 votes vote down vote up
def add_cascade_rcnn_outputs(model, blob_in, dim, stage):
    """Add RoI classification and bounding box regression output ops."""
    stage_name = "_{}".format(stage)
    model.FC(
        blob_in,
        "cls_score" + stage_name,
        dim,
        model.num_classes,
        weight_init=gauss_fill(0.01),
        bias_init=const_fill(0.0),
    )
    if not model.train:  # == if test
        # Only add softmax when testing; during training the softmax is combined
        # with the label cross entropy loss for numerical stability
        model.Softmax("cls_score" + stage_name, "cls_prob" + stage_name, engine="CUDNN")

    num_bbox_reg_classes = 2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else model.num_classes
    model.FC(
        blob_in,
        "bbox_pred" + stage_name,
        dim,
        num_bbox_reg_classes * 4,
        weight_init=gauss_fill(0.001),
        bias_init=const_fill(0.0),
    )
    # add stage parameters to list
    if str(stage) not in model.stage_params:
        model.stage_params[str(stage)] = []
    for idx in range(-2, 0):
        model.stage_params[str(stage)].append(model.weights[idx])
        model.stage_params[str(stage)].append(model.biases[idx])
    return "cls_prob" + stage_name, "bbox_pred" + stage_name 
Example #4
Source File: cascade_rcnn_heads.py    From CBNet with Apache License 2.0 5 votes vote down vote up
def add_cascade_rcnn_outputs(model, blob_in, dim, stage):
    """Add RoI classification and bounding box regression output ops."""
    stage_name = "_{}".format(stage)
    model.FC(
        blob_in,
        "cls_score" + stage_name,
        dim,
        model.num_classes,
        weight_init=gauss_fill(0.01),
        bias_init=const_fill(0.0),
    )
    if not model.train:  # == if test
        # Only add softmax when testing; during training the softmax is combined
        # with the label cross entropy loss for numerical stability
        model.Softmax("cls_score" + stage_name, "cls_prob" + stage_name, engine="CUDNN")

    num_bbox_reg_classes = 2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else model.num_classes
    model.FC(
        blob_in,
        "bbox_pred" + stage_name,
        dim,
        num_bbox_reg_classes * 4,
        weight_init=gauss_fill(0.001),
        bias_init=const_fill(0.0),
    )
    # add stage parameters to list
    if str(stage) not in model.stage_params:
        model.stage_params[str(stage)] = []
    for idx in range(-2, 0):
        model.stage_params[str(stage)].append(model.weights[idx])
        model.stage_params[str(stage)].append(model.biases[idx])
    return "cls_prob" + stage_name, "bbox_pred" + stage_name 
Example #5
Source File: fast_rcnn_heads.py    From Detectron-Cascade-RCNN with Apache License 2.0 5 votes vote down vote up
def add_fast_rcnn_outputs(model, blob_in, dim):
    """Add RoI classification and bounding box regression output ops."""
    # Box classification layer
    model.FC(
        blob_in,
        'cls_score',
        dim,
        model.num_classes,
        weight_init=gauss_fill(0.01),
        bias_init=const_fill(0.0)
    )
    if not model.train:  # == if test
        # Only add softmax when testing; during training the softmax is combined
        # with the label cross entropy loss for numerical stability
        model.Softmax('cls_score', 'cls_prob', engine='CUDNN')
    # Box regression layer
    num_bbox_reg_classes = (
        2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else model.num_classes
    )
    model.FC(
        blob_in,
        'bbox_pred',
        dim,
        num_bbox_reg_classes * 4,
        weight_init=gauss_fill(0.001),
        bias_init=const_fill(0.0)
    )

    if cfg.MODEL.CASCADE_ON:
        # add stage parameters to list
        if '1' not in model.stage_params:
            model.stage_params['1'] = []
        for idx in range(-2, 0):
            model.stage_params['1'].append(model.weights[idx])
            model.stage_params['1'].append(model.biases[idx]) 
Example #6
Source File: cascade_rcnn_heads.py    From Detectron-Cascade-RCNN with Apache License 2.0 5 votes vote down vote up
def add_cascade_rcnn_outputs(model, blob_in, dim, stage):
    """Add RoI classification and bounding box regression output ops."""
    stage_name = "_{}".format(stage)
    model.FC(
        blob_in,
        "cls_score" + stage_name,
        dim,
        model.num_classes,
        weight_init=gauss_fill(0.01),
        bias_init=const_fill(0.0),
    )
    if not model.train:  # == if test
        # Only add softmax when testing; during training the softmax is combined
        # with the label cross entropy loss for numerical stability
        model.Softmax("cls_score" + stage_name, "cls_prob" + stage_name, engine="CUDNN")

    num_bbox_reg_classes = 2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else model.num_classes
    model.FC(
        blob_in,
        "bbox_pred" + stage_name,
        dim,
        num_bbox_reg_classes * 4,
        weight_init=gauss_fill(0.001),
        bias_init=const_fill(0.0),
    )
    # add stage parameters to list
    if str(stage) not in model.stage_params:
        model.stage_params[str(stage)] = []
    for idx in range(-2, 0):
        model.stage_params[str(stage)].append(model.weights[idx])
        model.stage_params[str(stage)].append(model.biases[idx])
    return "cls_prob" + stage_name, "bbox_pred" + stage_name 
Example #7
Source File: fast_rcnn_heads.py    From CBNet with Apache License 2.0 5 votes vote down vote up
def add_fast_rcnn_outputs(model, blob_in, dim):
    """Add RoI classification and bounding box regression output ops."""
    # Box classification layer
    model.FC(
        blob_in,
        'cls_score',
        dim,
        model.num_classes,
        weight_init=gauss_fill(0.01),
        bias_init=const_fill(0.0)
    )
    if not model.train:  # == if test
        # Only add softmax when testing; during training the softmax is combined
        # with the label cross entropy loss for numerical stability
        model.Softmax('cls_score', 'cls_prob', engine='CUDNN')
    # Box regression layer
    num_bbox_reg_classes = (
        2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else model.num_classes
    )
    model.FC(
        blob_in,
        'bbox_pred',
        dim,
        num_bbox_reg_classes * 4,
        weight_init=gauss_fill(0.001),
        bias_init=const_fill(0.0)
    )

    if cfg.MODEL.CASCADE_ON:
        # add stage parameters to list
        if '1' not in model.stage_params:
            model.stage_params['1'] = []
        for idx in range(-2, 0):
            model.stage_params['1'].append(model.weights[idx])
            model.stage_params['1'].append(model.biases[idx]) 
Example #8
Source File: fast_rcnn_heads.py    From Detectron with Apache License 2.0 5 votes vote down vote up
def add_fast_rcnn_outputs(model, blob_in, dim):
    """Add RoI classification and bounding box regression output ops."""
    # Box classification layer
    model.FC(
        blob_in,
        'cls_score',
        dim,
        model.num_classes,
        weight_init=gauss_fill(0.01),
        bias_init=const_fill(0.0)
    )
    if not model.train:  # == if test
        # Only add softmax when testing; during training the softmax is combined
        # with the label cross entropy loss for numerical stability
        model.Softmax('cls_score', 'cls_prob', engine='CUDNN')
    # Box regression layer
    num_bbox_reg_classes = (
        2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else model.num_classes
    )
    model.FC(
        blob_in,
        'bbox_pred',
        dim,
        num_bbox_reg_classes * 4,
        weight_init=gauss_fill(0.001),
        bias_init=const_fill(0.0)
    ) 
Example #9
Source File: fast_rcnn_heads.py    From Detectron-DA-Faster-RCNN with Apache License 2.0 5 votes vote down vote up
def add_fast_rcnn_outputs(model, blob_in, dim):
    """Add RoI classification and bounding box regression output ops."""
    # Box classification layer
    model.FC(
        blob_in,
        'cls_score',
        dim,
        model.num_classes,
        weight_init=gauss_fill(0.01),
        bias_init=const_fill(0.0)
    )
    if not model.train:  # == if test
        # Only add softmax when testing; during training the softmax is combined
        # with the label cross entropy loss for numerical stability
        model.Softmax('cls_score', 'cls_prob', engine='CUDNN')
    # Box regression layer
    num_bbox_reg_classes = (
        2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else model.num_classes
    )
    model.FC(
        blob_in,
        'bbox_pred',
        dim,
        num_bbox_reg_classes * 4,
        weight_init=gauss_fill(0.001),
        bias_init=const_fill(0.0)
    ) 
Example #10
Source File: model_builder.py    From Detectron-DA-Faster-RCNN with Apache License 2.0 5 votes vote down vote up
def _add_image_level_classifier(model, blob_in, dim_in, spatial_scale_in):
    from detectron.utils.c2 import const_fill
    from detectron.utils.c2 import gauss_fill
    
    def negateGrad(inputs, outputs):
        outputs[0].feed(inputs[0].data)
    def grad_negateGrad(inputs, outputs):
        scale = cfg.TRAIN.DA_IMG_GRL_WEIGHT
        grad_output = inputs[-1]
        outputs[0].reshape(grad_output.shape)
        outputs[0].data[...] = -1.0*scale*grad_output.data
    
    model.GradientScalerLayer([blob_in], ['da_grl'], -1.0*cfg.TRAIN.DA_IMG_GRL_WEIGHT)
    model.Conv('da_grl', 'da_conv_1', dim_in, 512, kernel=1, pad=0, stride=1, weight_init=gauss_fill(0.001), bias_init=const_fill(0.0))    
    model.Relu('da_conv_1', 'da_conv_1')
    model.Conv('da_conv_1', 'da_conv_2',
        512,
        1,
        kernel=1,
        pad=0,
        stride=1,
        weight_init=gauss_fill(0.001),
        bias_init=const_fill(0.0)
    )
    if model.train:
        model.net.SpatialNarrowAs(
            ['da_label_wide', 'da_conv_2'], 'da_label'
        )
        loss_da = model.net.SigmoidCrossEntropyLoss(
            ['da_conv_2', 'da_label'],
            'loss_da',
            scale=model.GetLossScale()
        )
        loss_gradient = blob_utils.get_loss_gradients(model, [loss_da])
        model.AddLosses('loss_da')
        return loss_gradient
    else:
        return None 
Example #11
Source File: mask_rcnn_heads.py    From CBNet with Apache License 2.0 4 votes vote down vote up
def add_mask_rcnn_outputs(model, blob_in, dim):
    """Add Mask R-CNN specific outputs: either mask logits or probs."""
    num_cls = cfg.MODEL.NUM_CLASSES if cfg.MRCNN.CLS_SPECIFIC_MASK else 1

    if cfg.MRCNN.USE_FC_OUTPUT:
        # Predict masks with a fully connected layer (ignore 'fcn' in the blob
        # name)
        dim_fc = int(dim * (cfg.MRCNN.RESOLUTION / cfg.MRCNN.UPSAMPLE_RATIO)**2)
        blob_out = model.FC(
            blob_in,
            'mask_fcn_logits',
            dim_fc,
            num_cls * cfg.MRCNN.RESOLUTION**2,
            weight_init=gauss_fill(0.001),
            bias_init=const_fill(0.0)
        )
    else:
        # Predict mask using Conv

        # Use GaussianFill for class-agnostic mask prediction; fills based on
        # fan-in can be too large in this case and cause divergence
        fill = (
            cfg.MRCNN.CONV_INIT
            if cfg.MRCNN.CLS_SPECIFIC_MASK else 'GaussianFill'
        )
        blob_out = model.Conv(
            blob_in,
            'mask_fcn_logits',
            dim,
            num_cls,
            kernel=1,
            pad=0,
            stride=1,
            weight_init=(fill, {'std': 0.001}),
            bias_init=const_fill(0.0)
        )

        if cfg.MRCNN.UPSAMPLE_RATIO > 1:
            blob_out = model.BilinearInterpolation(
                'mask_fcn_logits', 'mask_fcn_logits_up', num_cls, num_cls,
                cfg.MRCNN.UPSAMPLE_RATIO
            )

    if not model.train:  # == if test
        blob_out = model.net.Sigmoid(blob_out, 'mask_fcn_probs')

    return blob_out 
Example #12
Source File: fast_rcnn_heads.py    From KL-Loss with Apache License 2.0 4 votes vote down vote up
def add_fast_rcnn_outputs(model, blob_in, dim):
    """Add RoI classification and bounding box regression output ops."""
    # Box classification layer
    model.FC(
        blob_in,
        'cls_score',
        dim,
        model.num_classes,
        weight_init=gauss_fill(0.01),
        bias_init=const_fill(0.0)
    )
    if not model.train:  # == if test
        # Only add softmax when testing; during training the softmax is combined
        # with the label cross entropy loss for numerical stability
        model.Softmax('cls_score', 'cls_prob', engine='CUDNN')
    # Box regression layer
    num_bbox_reg_classes = (
        2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else model.num_classes
    )
    
    model.FC(
        blob_in,
        'bbox_pred',
        dim,
        num_bbox_reg_classes * 4,
        weight_init=gauss_fill(0.001),
        bias_init=const_fill(0.0)
    )
    if cfg.PRED_STD:
        if cfg.PRED_STD_LOG:
            bias = 0.
            model.FC(
                blob_in, #'blob_in0'
                'bbox_pred_std',
                dim,
                num_bbox_reg_classes * 4,
                weight_init=gauss_fill(0.0001),
                bias_init=const_fill(bias)
            )
            model.net.Copy('bbox_pred_std', 'bbox_pred_std_abs')
            #model.Relu('bbox_pred_std', 'bbox_pred_std_abs')
            #model.net.Sigmoid('bbox_pred_std', 'bbox_pred_std_abs') 
Example #13
Source File: mask_rcnn_heads.py    From Detectron-DA-Faster-RCNN with Apache License 2.0 4 votes vote down vote up
def add_mask_rcnn_outputs(model, blob_in, dim):
    """Add Mask R-CNN specific outputs: either mask logits or probs."""
    num_cls = cfg.MODEL.NUM_CLASSES if cfg.MRCNN.CLS_SPECIFIC_MASK else 1

    if cfg.MRCNN.USE_FC_OUTPUT:
        # Predict masks with a fully connected layer (ignore 'fcn' in the blob
        # name)
        dim_fc = int(dim * (cfg.MRCNN.RESOLUTION / cfg.MRCNN.UPSAMPLE_RATIO)**2)
        blob_out = model.FC(
            blob_in,
            'mask_fcn_logits',
            dim_fc,
            num_cls * cfg.MRCNN.RESOLUTION**2,
            weight_init=gauss_fill(0.001),
            bias_init=const_fill(0.0)
        )
    else:
        # Predict mask using Conv

        # Use GaussianFill for class-agnostic mask prediction; fills based on
        # fan-in can be too large in this case and cause divergence
        fill = (
            cfg.MRCNN.CONV_INIT
            if cfg.MRCNN.CLS_SPECIFIC_MASK else 'GaussianFill'
        )
        blob_out = model.Conv(
            blob_in,
            'mask_fcn_logits',
            dim,
            num_cls,
            kernel=1,
            pad=0,
            stride=1,
            weight_init=(fill, {'std': 0.001}),
            bias_init=const_fill(0.0)
        )

        if cfg.MRCNN.UPSAMPLE_RATIO > 1:
            blob_out = model.BilinearInterpolation(
                'mask_fcn_logits', 'mask_fcn_logits_up', num_cls, num_cls,
                cfg.MRCNN.UPSAMPLE_RATIO
            )

    if not model.train:  # == if test
        blob_out = model.net.Sigmoid(blob_out, 'mask_fcn_probs')

    return blob_out 
Example #14
Source File: model_builder.py    From Detectron-DA-Faster-RCNN with Apache License 2.0 4 votes vote down vote up
def _add_instance_level_classifier(model, blob_in, dim_in, spatial_scale):
    from detectron.utils.c2 import const_fill
    from detectron.utils.c2 import gauss_fill

    def negateGrad(inputs, outputs):
        outputs[0].feed(inputs[0].data)
    def grad_negateGrad(inputs, outputs):
        scale = cfg.TRAIN.DA_INS_GRL_WEIGHT
        grad_output = inputs[-1]
        outputs[0].reshape(grad_output.shape)
        outputs[0].data[...] = -1.0*scale*grad_output.data
    model.RoIFeatureTransform(
        blob_in,
        'da_pool5',
        blob_rois='da_rois',
        method=cfg.FAST_RCNN.ROI_XFORM_METHOD,
        resolution=7,
        sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO,
        spatial_scale=spatial_scale
    )
    model.FCShared('da_pool5', 'da_fc6', dim_in * 7 * 7, 4096, 
        weight='fc6_w', bias='fc6_b')
    model.Relu('da_fc6', 'da_fc6')
    model.FCShared('da_fc6', 'da_fc7', 4096, 4096,
        weight='fc7_w', bias='fc7_b')
    da_blobs = model.Relu('da_fc7', 'da_fc7')
    model.GradientScalerLayer([da_blobs], ['dc_grl'], -1.0*cfg.TRAIN.DA_INS_GRL_WEIGHT)
    model.FC('dc_grl', 'dc_ip1', 4096, 1024,
             weight_init=gauss_fill(0.01), bias_init=const_fill(0.0))
    model.Relu('dc_ip1', 'dc_relu_1')
    model.Dropout('dc_relu_1', 'dc_drop_1', ratio=0.5, is_test=False)

    model.FC('dc_drop_1', 'dc_ip2', 1024, 1024,
             weight_init=gauss_fill(0.01), bias_init=const_fill(0.0))
    model.Relu('dc_ip2', 'dc_relu_2')
    model.Dropout('dc_relu_2', 'dc_drop_2', ratio=0.5, is_test=False)

    dc_ip3 = model.FC('dc_drop_2', 'dc_ip3', 1024, 1,
                      weight_init=gauss_fill(0.05), bias_init=const_fill(0.0))
    loss_gradient = None
    if model.train:
        dc_loss = model.net.SigmoidCrossEntropyLoss(
            [dc_ip3, 'dc_label'],
            'loss_dc',
            scale=model.GetLossScale()
        )
        loss_gradient = blob_utils.get_loss_gradients(model, [dc_loss])
        model.AddLosses('loss_dc')
    return loss_gradient, da_blobs, 4096 
Example #15
Source File: mask_rcnn_heads.py    From Detectron with Apache License 2.0 4 votes vote down vote up
def add_mask_rcnn_outputs(model, blob_in, dim):
    """Add Mask R-CNN specific outputs: either mask logits or probs."""
    num_cls = cfg.MODEL.NUM_CLASSES if cfg.MRCNN.CLS_SPECIFIC_MASK else 1

    if cfg.MRCNN.USE_FC_OUTPUT:
        # Predict masks with a fully connected layer (ignore 'fcn' in the blob
        # name)
        dim_fc = int(dim * (cfg.MRCNN.RESOLUTION / cfg.MRCNN.UPSAMPLE_RATIO)**2)
        blob_out = model.FC(
            blob_in,
            'mask_fcn_logits',
            dim_fc,
            num_cls * cfg.MRCNN.RESOLUTION**2,
            weight_init=gauss_fill(0.001),
            bias_init=const_fill(0.0)
        )
    else:
        # Predict mask using Conv

        # Use GaussianFill for class-agnostic mask prediction; fills based on
        # fan-in can be too large in this case and cause divergence
        fill = (
            cfg.MRCNN.CONV_INIT
            if cfg.MRCNN.CLS_SPECIFIC_MASK else 'GaussianFill'
        )
        blob_out = model.Conv(
            blob_in,
            'mask_fcn_logits',
            dim,
            num_cls,
            kernel=1,
            pad=0,
            stride=1,
            weight_init=(fill, {'std': 0.001}),
            bias_init=const_fill(0.0)
        )

        if cfg.MRCNN.UPSAMPLE_RATIO > 1:
            blob_out = model.BilinearInterpolation(
                'mask_fcn_logits', 'mask_fcn_logits_up', num_cls, num_cls,
                cfg.MRCNN.UPSAMPLE_RATIO
            )

    if not model.train:  # == if test
        blob_out = model.net.Sigmoid(blob_out, 'mask_fcn_probs')

    return blob_out 
Example #16
Source File: mask_rcnn_heads.py    From Detectron-Cascade-RCNN with Apache License 2.0 4 votes vote down vote up
def add_mask_rcnn_outputs(model, blob_in, dim):
    """Add Mask R-CNN specific outputs: either mask logits or probs."""
    num_cls = cfg.MODEL.NUM_CLASSES if cfg.MRCNN.CLS_SPECIFIC_MASK else 1

    if cfg.MRCNN.USE_FC_OUTPUT:
        # Predict masks with a fully connected layer (ignore 'fcn' in the blob
        # name)
        dim_fc = int(dim * (cfg.MRCNN.RESOLUTION / cfg.MRCNN.UPSAMPLE_RATIO)**2)
        blob_out = model.FC(
            blob_in,
            'mask_fcn_logits',
            dim_fc,
            num_cls * cfg.MRCNN.RESOLUTION**2,
            weight_init=gauss_fill(0.001),
            bias_init=const_fill(0.0)
        )
    else:
        # Predict mask using Conv

        # Use GaussianFill for class-agnostic mask prediction; fills based on
        # fan-in can be too large in this case and cause divergence
        fill = (
            cfg.MRCNN.CONV_INIT
            if cfg.MRCNN.CLS_SPECIFIC_MASK else 'GaussianFill'
        )
        blob_out = model.Conv(
            blob_in,
            'mask_fcn_logits',
            dim,
            num_cls,
            kernel=1,
            pad=0,
            stride=1,
            weight_init=(fill, {'std': 0.001}),
            bias_init=const_fill(0.0)
        )

        if cfg.MRCNN.UPSAMPLE_RATIO > 1:
            blob_out = model.BilinearInterpolation(
                'mask_fcn_logits', 'mask_fcn_logits_up', num_cls, num_cls,
                cfg.MRCNN.UPSAMPLE_RATIO
            )

    if not model.train:  # == if test
        blob_out = model.net.Sigmoid(blob_out, 'mask_fcn_probs')

    return blob_out 
Example #17
Source File: mask_rcnn_heads.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 4 votes vote down vote up
def add_mask_rcnn_outputs(model, blob_in, dim):
    """Add Mask R-CNN specific outputs: either mask logits or probs."""
    num_cls = cfg.MODEL.NUM_CLASSES if cfg.MRCNN.CLS_SPECIFIC_MASK else 1

    if cfg.MRCNN.USE_FC_OUTPUT:
        # Predict masks with a fully connected layer (ignore 'fcn' in the blob
        # name)
        dim_fc = int(dim * (cfg.MRCNN.RESOLUTION / cfg.MRCNN.UPSAMPLE_RATIO)**2)
        blob_out = model.FC(
            blob_in,
            'mask_fcn_logits',
            dim_fc,
            num_cls * cfg.MRCNN.RESOLUTION**2,
            weight_init=gauss_fill(0.001),
            bias_init=const_fill(0.0)
        )
    else:
        # Predict mask using Conv

        # Use GaussianFill for class-agnostic mask prediction; fills based on
        # fan-in can be too large in this case and cause divergence
        fill = (
            cfg.MRCNN.CONV_INIT
            if cfg.MRCNN.CLS_SPECIFIC_MASK else 'GaussianFill'
        )
        blob_out = model.Conv(
            blob_in,
            'mask_fcn_logits',
            dim,
            num_cls,
            kernel=1,
            pad=0,
            stride=1,
            weight_init=(fill, {'std': 0.001}),
            bias_init=const_fill(0.0)
        )

        if cfg.MRCNN.UPSAMPLE_RATIO > 1:
            blob_out = model.BilinearInterpolation(
                'mask_fcn_logits', 'mask_fcn_logits_up', num_cls, num_cls,
                cfg.MRCNN.UPSAMPLE_RATIO
            )

    if not model.train:  # == if test
        blob_out = model.net.Sigmoid(blob_out, 'mask_fcn_probs')

    return blob_out 
Example #18
Source File: mask_rcnn_heads.py    From KL-Loss with Apache License 2.0 4 votes vote down vote up
def add_mask_rcnn_outputs(model, blob_in, dim):
    """Add Mask R-CNN specific outputs: either mask logits or probs."""
    num_cls = cfg.MODEL.NUM_CLASSES if cfg.MRCNN.CLS_SPECIFIC_MASK else 1

    if cfg.MRCNN.USE_FC_OUTPUT:
        # Predict masks with a fully connected layer (ignore 'fcn' in the blob
        # name)
        dim_fc = int(dim * (cfg.MRCNN.RESOLUTION / cfg.MRCNN.UPSAMPLE_RATIO)**2)
        blob_out = model.FC(
            blob_in,
            'mask_fcn_logits',
            dim_fc,
            num_cls * cfg.MRCNN.RESOLUTION**2,
            weight_init=gauss_fill(0.001),
            bias_init=const_fill(0.0)
        )
    else:
        # Predict mask using Conv

        # Use GaussianFill for class-agnostic mask prediction; fills based on
        # fan-in can be too large in this case and cause divergence
        fill = (
            cfg.MRCNN.CONV_INIT
            if cfg.MRCNN.CLS_SPECIFIC_MASK else 'GaussianFill'
        )
        blob_out = model.Conv(
            blob_in,
            'mask_fcn_logits',
            dim,
            num_cls,
            kernel=1,
            pad=0,
            stride=1,
            weight_init=(fill, {'std': 0.001}),
            bias_init=const_fill(0.0)
        )

        if cfg.MRCNN.UPSAMPLE_RATIO > 1:
            blob_out = model.BilinearInterpolation(
                'mask_fcn_logits', 'mask_fcn_logits_up', num_cls, num_cls,
                cfg.MRCNN.UPSAMPLE_RATIO
            )

    if not model.train:  # == if test
        blob_out = model.net.Sigmoid(blob_out, 'mask_fcn_probs')

    return blob_out