Python model.roi_crop.functions.roi_crop.RoICropFunction() Examples
The following are 27
code examples of model.roi_crop.functions.roi_crop.RoICropFunction().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
model.roi_crop.functions.roi_crop
, or try the search function
.
Example #1
Source File: net_utils.py From faster-rcnn.pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #2
Source File: net_utils.py From DA_Detection with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #3
Source File: net_utils.py From DetNet_pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #4
Source File: net_utils.py From dafrcnn-pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #5
Source File: net_utils.py From dafrcnn-pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #6
Source File: net_utils.py From DIoU-pytorch-detectron with GNU General Public License v3.0 | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #7
Source File: net_utils.py From detectron-self-train with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #8
Source File: net_utils.py From Large-Scale-VRD.pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #9
Source File: net_utils.py From DivMatch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #10
Source File: net_utils.py From PMFNet with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #11
Source File: net_utils.py From OICR-pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #12
Source File: net_utils.py From PANet with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #13
Source File: net_utils.py From bottom-up-features with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #14
Source File: net_utils.py From CIOD with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:, :, :, 1], grid_clone.data[:, :, :, 0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #15
Source File: net_utils.py From cascade-rcnn_Pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #16
Source File: net_utils.py From Context-aware-ZSR with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #17
Source File: net_utils.py From Detectron.pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #18
Source File: net_utils.py From pytorch-lighthead with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #19
Source File: model_builder.py From pcl.pytorch with MIT License | 5 votes |
def roi_feature_transform(self, blobs_in, rois, method='RoIPoolF', resolution=7, spatial_scale=1. / 16., sampling_ratio=0): """Add the specified RoI pooling method. The sampling_ratio argument is supported for some, but not all, RoI transform methods. RoIFeatureTransform abstracts away: - Use of FPN or not - Specifics of the transform method """ assert method in {'RoIPoolF', 'RoICrop', 'RoIAlign'}, \ 'Unknown pooling method: {}'.format(method) # Single feature level # rois: holds R regions of interest, each is a 5-tuple # (batch_idx, x1, y1, x2, y2) specifying an image batch index and a # rectangle (x1, y1, x2, y2) if method == 'RoIPoolF': xform_out = RoIPoolFunction(resolution, resolution, spatial_scale)(blobs_in, rois) elif method == 'RoICrop': grid_xy = net_utils.affine_grid_gen(rois, blobs_in.size()[2:], self.grid_size) grid_yx = torch.stack( [grid_xy.data[:, :, :, 1], grid_xy.data[:, :, :, 0]], 3).contiguous() xform_out = RoICropFunction()(blobs_in, Variable(grid_yx).detach()) if cfg.CROP_RESIZE_WITH_MAX_POOL: xform_out = F.max_pool2d(xform_out, 2, 2) elif method == 'RoIAlign': xform_out = RoIAlignFunction( resolution, resolution, spatial_scale, sampling_ratio)(blobs_in, rois) return xform_out
Example #20
Source File: net_utils.py From pcl.pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #21
Source File: net_utils.py From FPN_Pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #22
Source File: net_utils.py From fpn.pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #23
Source File: net_utils.py From RFCN_CoupleNet.pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #24
Source File: net_utils.py From FPN-Pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #25
Source File: net_utils.py From Distilling-Object-Detectors with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #26
Source File: net_utils.py From Detectron.pytorch with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()
Example #27
Source File: net_utils.py From pytorch-detect-to-track with MIT License | 5 votes |
def compare_grid_sample(): # do gradcheck N = random.randint(1, 8) C = 2 # random.randint(1, 8) H = 5 # random.randint(1, 8) W = 4 # random.randint(1, 8) input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True) input_p = input.clone().data.contiguous() grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True) grid_clone = grid.clone().contiguous() out_offcial = F.grid_sample(input, grid) grad_outputs = Variable(torch.rand(out_offcial.size()).cuda()) grad_outputs_clone = grad_outputs.clone().contiguous() grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous()) grad_input_off = grad_inputs[0] crf = RoICropFunction() grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda() out_stn = crf.forward(input_p, grid_yx) grad_inputs = crf.backward(grad_outputs_clone.data) grad_input_stn = grad_inputs[0] pdb.set_trace() delta = (grad_input_off.data - grad_input_stn).sum()