Python cv2.vconcat() Examples

The following are 2 code examples of cv2.vconcat(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: define_new_pose_config.py    From simba with GNU Lesser General Public License v3.0 4 votes vote down vote up
def define_new_pose_configuration(configName, noAnimals, noBps, Imagepath, BpNameList, animalNumber):
    global ix, iy
    global centerCordStatus

    def draw_circle(event,x,y,flags,param):
        global ix,iy
        global centerCordStatus
        if (event == cv2.EVENT_LBUTTONDBLCLK):
            if centerCordStatus == False:
                cv2.circle(overlay,(x,y-sideImageHeight),10,colorList[-i],-1)
                cv2.putText(overlay,str(bpNumber+1), (x+4,y-sideImageHeight), cv2.FONT_HERSHEY_SIMPLEX, 0.7, colorList[i], 2)
                cv2.imshow('Define pose', overlay)
                centerCordStatus = True
    im = cv2.imread(Imagepath)
    imHeight, imWidth = im.shape[0], im.shape[1]
    if imWidth < 300:
        im = imutils.resize(im, width=800)
        imHeight, imWidth = im.shape[0], im.shape[1]
        im = np.uint8(im)
    fontScale = max(imWidth, imHeight) / (max(imWidth, imHeight) * 1.2)
    cv2.namedWindow('Define pose', cv2.WINDOW_NORMAL)
    overlay = im.copy()
    colorList = []
    for color in range(len(BpNameList)):
        r, g, b = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
        colorTuple = (r, g, b)
        colorList.append(colorTuple)
    for i in range(len(BpNameList)):
        cv2.namedWindow('Define pose', cv2.WINDOW_NORMAL)
        centerCordStatus = False
        bpNumber = i
        sideImage = np.zeros((100, imWidth, 3), np.uint8)
        sideImageHeight, sideImageWidth = sideImage.shape[0], sideImage.shape[1]
        cv2.putText(sideImage, 'Double left click ' + BpNameList[i] + '. Press ESC to continue.', (10, 50), cv2.FONT_HERSHEY_SIMPLEX, fontScale, colorList[i], 2)
        ix, iy = -1, -1
        while (1):
            cv2.setMouseCallback('Define pose', draw_circle)
            imageConcat = cv2.vconcat([sideImage, overlay])
            cv2.imshow('Define pose', imageConcat)
            k = cv2.waitKey(20) & 0xFF
            if k == 27:
                cv2.destroyWindow('Define pose')
                break

    overlay = cv2.resize(overlay, (250,300))
    imagePath = os.path.join(os.getcwd(), 'pose_configurations', 'schematics')
    namePath = os.path.join(os.getcwd(), 'pose_configurations', 'configuration_names', 'pose_config_names.csv')
    bpPath = os.path.join(os.getcwd(), 'pose_configurations', 'bp_names', 'bp_names.csv')
    noAnimalsPath = os.path.join(os.getcwd(), 'pose_configurations', 'no_animals', 'no_animals.csv')
    imageNos = len(glob.glob(imagePath + '/*.png'))
    newImageName = 'Picture' + str(imageNos+1) + '.png'
    imageOutPath = os.path.join(imagePath, newImageName)
    BpNameList = ','.join(BpNameList)

    with open(namePath, 'a') as fd:
        fd.write(configName + '\n')
    with open(bpPath, 'a') as fd:
        fd.write(BpNameList + '\n')
    with open(noAnimalsPath, 'a') as fd:
        fd.write(animalNumber + '\n')
    cv2.imwrite(imageOutPath, overlay) 
Example #2
Source File: utils.py    From EndoscopyDepthEstimation-Pytorch with GNU General Public License v3.0 4 votes vote down vote up
def generate_training_output(colors_1, scaled_depth_maps_1, boundaries, intrinsic_matrices, is_hsv, epoch,
                             results_root):
    color_inputs_cpu = colors_1.data.cpu().numpy()
    pred_depths_cpu = scaled_depth_maps_1.data.cpu().numpy()
    boundaries_cpu = boundaries.data.cpu().numpy()
    intrinsics_cpu = intrinsic_matrices.data.cpu().numpy()
    color_imgs = []
    pred_depth_imgs = []

    for j in range(colors_1.shape[0]):
        color_img = color_inputs_cpu[j]
        pred_depth_img = pred_depths_cpu[j]

        color_img = np.moveaxis(color_img, source=[0, 1, 2], destination=[2, 0, 1])
        color_img = color_img * 0.5 + 0.5
        color_img[color_img < 0.0] = 0.0
        color_img[color_img > 1.0] = 1.0
        color_img = np.uint8(255 * color_img)
        if is_hsv:
            color_img = cv2.cvtColor(color_img, cv2.COLOR_HSV2BGR_FULL)

        pred_depth_img = np.moveaxis(pred_depth_img, source=[0, 1, 2], destination=[2, 0, 1])

        if j == 0:
            # Write point cloud
            boundary = boundaries_cpu[j]
            intrinsic = intrinsics_cpu[j]
            boundary = np.moveaxis(boundary, source=[0, 1, 2], destination=[2, 0, 1])
            point_cloud = point_cloud_from_depth(pred_depth_img, color_img, boundary,
                                                 intrinsic,
                                                 point_cloud_downsampling=1)
            write_point_cloud(
                str(results_root / "point_cloud_epoch_{epoch}_index_{index}.ply".format(epoch=epoch,
                                                                                        index=j)),
                point_cloud)

        color_img = cv2.resize(color_img, dsize=(300, 300))
        pred_depth_img = cv2.resize(pred_depth_img, dsize=(300, 300))
        color_imgs.append(color_img)

        if j == 0:
            histr = cv2.calcHist([pred_depth_img], [0], None, histSize=[100], ranges=[0, 1000])
            plt.plot(histr, color='b')
            plt.xlim([0, 40])
            plt.savefig(
                str(results_root / 'generated_depth_hist_{epoch}.jpg'.format(epoch=epoch)))
            plt.clf()
        display_depth_img = display_depth_map(pred_depth_img)
        pred_depth_imgs.append(display_depth_img)

    final_color = color_imgs[0]
    final_pred_depth = pred_depth_imgs[0]
    for j in range(colors_1.shape[0] - 1):
        final_color = cv2.hconcat((final_color, color_imgs[j + 1]))
        final_pred_depth = cv2.hconcat((final_pred_depth, pred_depth_imgs[j + 1]))

    final = cv2.vconcat((final_color, final_pred_depth))
    cv2.imwrite(str(results_root / 'generated_mask_{epoch}.jpg'.format(epoch=epoch)),
                final)