Python skimage.measure.compare_psnr() Examples

The following are 23 code examples of skimage.measure.compare_psnr(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module skimage.measure , or try the search function .
Example #1
Source File: evaluate.py    From fastMRI with MIT License 6 votes vote down vote up
def psnr(gt, pred):
    """ Compute Peak Signal to Noise Ratio metric (PSNR) """
    return compare_psnr(gt, pred, data_range=gt.max()) 
Example #2
Source File: compare_imgs.py    From imgcomp-cvpr with GNU General Public License v3.0 6 votes vote down vote up
def compare(inp_img, out_img, calc_ssim=True, calc_msssim=True, calc_psnr=True):
    inp_img = _read_if_not_array(inp_img)
    out_img = _read_if_not_array(out_img)

    assert inp_img.shape == out_img.shape

    def get_ssim():
        return compare_ssim(inp_img, out_img, multichannel=True, gaussian_weights=True, sigma=1.5)

    def get_msssim():
        return MultiScaleSSIM(make_batched(inp_img), make_batched(out_img))

    def get_psnr():
        return compare_psnr(inp_img, out_img)

    def _run_if(cond, fn):
        return fn() if cond else None

    return _run_if(calc_ssim, get_ssim), _run_if(calc_msssim, get_msssim), _run_if(calc_psnr, get_psnr) 
Example #3
Source File: metric_gen.py    From PSSR with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def slice_process(x1, x2, y):
    if len(x1.shape) == 3: x1 = x1[0,:,:]
    if len(x2.shape) == 3: x2 = x2[0,:,:]
    if len(y.shape) == 3: y = y[0,:,:]

    # a scaled and shifted version of pred and bilinear
    x1 = 2*x1 + 100
    x2 = 2*x2 + 100

    # normalize/scale images
    (y_norm1, x1_norm) = norm_minmse(y, x1)
    (y_norm2, x2_norm) = norm_minmse(y, x2)

    # calulate psnr and ssim of the normalized/scaled images
    psnr1 = compare_psnr(*(y_norm1, x1_norm), data_range = 1.)
    psnr2 = compare_psnr(*(y_norm2, x2_norm), data_range = 1.)
    ssim1 = compare_ssim(*(y_norm1, x1_norm), data_range = 1.)
    ssim2 = compare_ssim(*(y_norm2, x2_norm), data_range = 1.)
    return psnr1, ssim1, psnr2, ssim2, y_norm1, x1_norm, y_norm2, x2_norm 
Example #4
Source File: utils.py    From Learning_to_See_in_the_Dark_PyTorch with MIT License 6 votes vote down vote up
def get_psnr(im1, im2):
    return compare_psnr(im1, im2, data_range=255) 
Example #5
Source File: utilty.py    From dcscn-super-resolution with MIT License 6 votes vote down vote up
def compute_psnr_and_ssim(image1, image2, border_size=0):
    """
    Computes PSNR and SSIM index from 2 images.
    We round it and clip to 0 - 255. Then shave 'scale' pixels from each border.
    """
    if len(image1.shape) == 2:
        image1 = image1.reshape(image1.shape[0], image1.shape[1], 1)
    if len(image2.shape) == 2:
        image2 = image2.reshape(image2.shape[0], image2.shape[1], 1)

    if image1.shape[0] != image2.shape[0] or image1.shape[1] != image2.shape[1] or image1.shape[2] != image2.shape[2]:
        return None

    image1 = trim_image_as_file(image1)
    image2 = trim_image_as_file(image2)

    if border_size > 0:
        image1 = image1[border_size:-border_size, border_size:-border_size, :]
        image2 = image2[border_size:-border_size, border_size:-border_size, :]

    psnr = compare_psnr(image1, image2, data_range=255)
    ssim = compare_ssim(image1, image2, win_size=11, gaussian_weights=True, multichannel=True, K1=0.01, K2=0.03,
                        sigma=1.5, data_range=255)
    return psnr, ssim 
Example #6
Source File: metrics.py    From proSR with GNU General Public License v3.0 5 votes vote down vote up
def eval_psnr_and_ssim(im1, im2, scale):
    im1_t = np.atleast_3d(img_as_float(im1))
    im2_t = np.atleast_3d(img_as_float(im2))

    if im1_t.shape[2] == 1 or im2_t.shape[2] == 1:
        im1_t = im1_t[..., 0]
        im2_t = im2_t[..., 0]

    else:
        im1_t = rgb2ycbcr(im1_t)[:, :, 0:1] / 255.0
        im2_t = rgb2ycbcr(im2_t)[:, :, 0:1] / 255.0

    if scale > 1:
        im1_t = mod_crop(im1_t, scale)
        im2_t = mod_crop(im2_t, scale)

        # NOTE conventionally, crop scale+6 pixels (EDSR, VDSR etc)
        im1_t = crop_boundaries(im1_t, int(scale) + 6)
        im2_t = crop_boundaries(im2_t, int(scale) + 6)

    psnr_val = compare_psnr(im1_t, im2_t)
    ssim_val = compare_ssim(
        im1_t,
        im2_t,
        win_size=11,
        gaussian_weights=True,
        multichannel=True,
        data_range=1.0,
        K1=0.01,
        K2=0.03,
        sigma=1.5)

    return psnr_val, ssim_val 
Example #7
Source File: cnn_train.py    From Evolutionary-Autoencoders with MIT License 5 votes vote down vote up
def calcPSNR(self, image1, image2):
        image1 *= 255
        image2 *= 255
        image1[image1>255] = 255
        image1[image1<0] = 0
        image2[image2>255] = 255
        image2[image2<0] = 0
        return compare_psnr(image1, image2, data_range=255)

    # For validation/test 
Example #8
Source File: cnn_train.py    From Evolutionary-Autoencoders with MIT License 5 votes vote down vote up
def gaussian_noise(self, inp, mean, std):
        noise = Variable(inp.data.new(inp.size()).normal_(mean, std))
        noise = torch.div(noise, 255.0)
        return inp + noise

    # calc PSNR by using "compare_psnr of skimage.measure" 
Example #9
Source File: cnn_train.py    From Evolutionary-Autoencoders with MIT License 5 votes vote down vote up
def calcPSNR(self, image1, image2):
        image1 *= 255
        image2 *= 255
        image1[image1>255] = 255
        image1[image1<0] = 0
        image2[image2>255] = 255
        image2[image2<0] = 0
        return compare_psnr(image1, image2, data_range=255) 
Example #10
Source File: test_bm3d.py    From pybm3d with GNU General Public License v3.0 5 votes vote down vote up
def test_bm3d_color(color_noise_data):
    """Tests BM3D color image denoising."""
    img, noisy_img, noise_std_dev = color_noise_data

    out = pybm3d.bm3d.bm3d(noisy_img, noise_std_dev)

    noise_psnr = compare_psnr(img, noisy_img)
    out_psnr = compare_psnr(img, out)

    assert out_psnr > noise_psnr 
Example #11
Source File: test_bm3d.py    From pybm3d with GNU General Public License v3.0 5 votes vote down vote up
def test_bm3d(noise_data):
    """Tests BM3D grayscale image denoising."""
    img, noisy_img, noise_std_dev = noise_data

    out = pybm3d.bm3d.bm3d(noisy_img, noise_std_dev)

    noise_psnr = compare_psnr(img, noisy_img)
    out_psnr = compare_psnr(img, out)

    assert out_psnr > noise_psnr 
Example #12
Source File: benchmark.py    From tensorflow-SRGAN with MIT License 5 votes vote down vote up
def PSNR(self, gt, pred):
    #gt = gt.astype(np.float64)
    #pred = pred.astype(np.float64)
    #mse = np.mean((pred - gt)**2)
    #psnr = 10*np.log10(255*255/mse)
    #return psnr
    return compare_psnr(gt, pred, data_range=255) 
Example #13
Source File: model.py    From face_inpainting with MIT License 5 votes vote down vote up
def get_psnr(self, img_true, img_gen):
        return compare_psnr(img_true.astype(np.float32), img_gen.astype(np.float32)) 
Example #14
Source File: hsi_evaluate.py    From tensorflow-exercise with Apache License 2.0 5 votes vote down vote up
def mpsnr(x_true, x_pred):
    """

    :param x_true: 高光谱图像:格式:(H, W, C)
    :param x_pred: 高光谱图像:格式:(H, W, C)
    :return: 计算原始高光谱数据与重构高光谱数据的均方误差
    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
    """
    n_bands = x_true.shape[2]
    p = [compare_psnr(x_true[:, :, k], x_pred[:, :, k], dynamic_range=np.max(x_true[:, :, k])) for k in range(n_bands)]
    return np.mean(p) 
Example #15
Source File: evaluate.py    From sigmanet with MIT License 5 votes vote down vote up
def psnr(gt, pred):
    """ Compute Peak Signal to Noise Ratio metric (PSNR) """
    return compare_psnr(gt, pred, data_range=gt.max()) 
Example #16
Source File: metrics.py    From inpainting-gmcnn-keras with MIT License 5 votes vote down vote up
def psnr(y_true, y_pred, max_value=2):
  psnr_metric = measure.compare_psnr(y_true, y_pred, max_value)
  return psnr_metric 
Example #17
Source File: solver.py    From CARN-pytorch with MIT License 5 votes vote down vote up
def psnr(im1, im2):
    def im2double(im):
        min_val, max_val = 0, 255
        out = (im.astype(np.float64)-min_val) / (max_val-min_val)
        return out
        
    im1 = im2double(im1)
    im2 = im2double(im2)
    psnr = measure.compare_psnr(im1, im2, data_range=1)
    return psnr 
Example #18
Source File: metrics.py    From vaapi-fits with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __compare_psnr(planes):
  a, b = planes
  if (a == b).all():
    # Avoid "Warning: divide by zero encountered in double_scalars" generated
    # by skimage.measure.compare_psnr when a and b are exactly the same.
    return 100
  return skimage_psnr(a, b) 
Example #19
Source File: data_utils.py    From FastDVDNet with MIT License 5 votes vote down vote up
def calculate_psnr(output_img, target_img):
    target_tf = torch2numpy(target_img)
    output_tf = torch2numpy(output_img)
    psnr = 0.0
    n = 0.0
    for im_idx in range(output_tf.shape[0]):
        psnr += compare_psnr(target_tf[im_idx, ...],
                                             output_tf[im_idx, ...],
                                             data_range=255)
        n += 1.0
    return psnr / n 
Example #20
Source File: main.py    From DnCNN-keras with MIT License 4 votes vote down vote up
def test(model):
    
    print('Start to test on {}'.format(args.test_dir))
    out_dir = save_dir + args.test_dir.split('/')[-1] + '/'
    if not os.path.exists(out_dir):
            os.mkdir(out_dir)
            
    name = []
    psnr = []
    ssim = []
    file_list = glob.glob('{}/*.png'.format(args.test_dir))
    for file in file_list:
        # read image
        img_clean = np.array(Image.open(file), dtype='float32') / 255.0
        img_test = img_clean + np.random.normal(0, args.sigma/255.0, img_clean.shape)
        img_test = img_test.astype('float32')
        # predict
        x_test = img_test.reshape(1, img_test.shape[0], img_test.shape[1], 1) 
        y_predict = model.predict(x_test)
        # calculate numeric metrics
        img_out = y_predict.reshape(img_clean.shape)
        img_out = np.clip(img_out, 0, 1)
        psnr_noise, psnr_denoised = compare_psnr(img_clean, img_test), compare_psnr(img_clean, img_out)
        ssim_noise, ssim_denoised = compare_ssim(img_clean, img_test), compare_ssim(img_clean, img_out)
        psnr.append(psnr_denoised)
        ssim.append(ssim_denoised)
        # save images
        filename = file.split('/')[-1].split('.')[0]    # get the name of image file
        name.append(filename)
        img_test = Image.fromarray((img_test*255).astype('uint8'))
        img_test.save(out_dir+filename+'_sigma'+'{}_psnr{:.2f}.png'.format(args.sigma, psnr_noise))
        img_out = Image.fromarray((img_out*255).astype('uint8')) 
        img_out.save(out_dir+filename+'_psnr{:.2f}.png'.format(psnr_denoised))
    
    psnr_avg = sum(psnr)/len(psnr)
    ssim_avg = sum(ssim)/len(ssim)
    name.append('Average')
    psnr.append(psnr_avg)
    ssim.append(ssim_avg)
    print('Average PSNR = {0:.2f}, SSIM = {1:.2f}'.format(psnr_avg, ssim_avg))
    
    pd.DataFrame({'name':np.array(name), 'psnr':np.array(psnr), 'ssim':np.array(ssim)}).to_csv(out_dir+'/metrics.csv', index=True) 
Example #21
Source File: replicate.py    From Dehaze-GAN with MIT License 4 votes vote down vote up
def test(self, ckpt_dir):

        # Weight values as used in the paper.
        total_ssim = 0
        total_psnr = 0
        psnr_weight = 1/20
        ssim_weight = 1

        self.A_test = np.load('A_test.npy') #Valset 2
        self.B_test = np.load('B_test.npy')
        self.A_test = (self.A_test/255)*2 - 1

        print('Building Model')
        self.build()
        print('Model Built')

        with tf.Session() as sess:

            print('Loading Checkpoint')
            self.ckpt = tf.train.latest_checkpoint(ckpt_dir, latest_filename=None)
            self.saver.restore(sess, self.ckpt)
            print('Checkpoint Loaded')

            for i in range(len(self.A_test)):

                x = np.expand_dims(self.A_test[i], axis = 0)
                feed = {self.RealA :x}
                img = self.FakeB.eval(feed_dict = feed)

                print('Test image', i, end = '\r')

                A_img = (((img[0] + 1)/2) * 255).astype(np.uint8)
                B_img = (self.B_test[i]).astype(np.uint8)

                psnr = compare_psnr(B_img, A_img)
                s = ssim(B_img, A_img, multichannel = True)

                total_psnr = total_psnr + psnr
                total_ssim = total_ssim + s

            average_psnr = total_psnr / len(self.A_test)
            average_ssim = total_ssim / len(self.A_test)

            score = average_psnr * psnr_weight + average_ssim * ssim_weight

            line = 'Score: %.6f, PSNR: %.6f, SSIM: %.6f' %(score, average_psnr, average_ssim)
            print(line) 
Example #22
Source File: model.py    From Dehaze-GAN with MIT License 4 votes vote down vote up
def test(self, input_dir, GT_dir):

        total_ssim = 0
        total_psnr = 0
        psnr_weight = 1/20
        ssim_weight = 1

        GT_list = os.listdir(GT_dir)
        input_list = os.listdir(input_dir)

        print('Loading Model')
        self.build_model()
        print('Model Loaded')

        with tf.Session() as self.sess:

            init_op = tf.global_variables_initializer()
            self.sess.run(init_op)

            print('Loading Checkpoint')
            ckpt = tf.train.latest_checkpoint(self.ckpt_dir)
            self.saver.restore(self.sess, ckpt)
            self.step = tf.train.get_or_create_global_step()
            print('Checkpoint Loaded')

            for i, (img_file, GT_file) in enumerate(zip(input_list, GT_list), 1):

                img = cv2.imread(os.path.join(input_dir, img_file), 1)
                GT = cv2.imread(os.path.join(GT_dir, GT_file), 1).astype(np.uint8)

                print('Test image', i, end = '\r')

                img = ((np.expand_dims(img, axis = 0) / 255) * 2) - 1
                feed_dict = {self.RealA: img, self.isTrain: False}
                generated_B = self.FakeB.eval(feed_dict = feed_dict)
                generated_B = (((generated_B[0] + 1)/2) * 255).astype(np.uint8)        

                psnr = compare_psnr(GT, generated_B)
                ssim = compare_ssim(GT, generated_B, multichannel = True)

                total_psnr = total_psnr + psnr
                total_ssim = total_ssim + ssim

            average_psnr = total_psnr / len(GT_list)
            average_ssim = total_ssim / len(GT_list)

            score = average_psnr * psnr_weight + average_ssim * ssim_weight

            line = 'Score: %.6f, PSNR: %.6f, SSIM: %.6f' %(score, average_psnr, average_ssim)
            print(line) 
Example #23
Source File: eval_Vimeo90K.py    From FeatureFlow with MIT License 4 votes vote down vote up
def validate():
    # For details see training.
    psnr = 0
    ie = 0
    tloss = 0

    with torch.no_grad():
        for testIndex, testData in tqdm(enumerate(testloader, 0)):
            frame0, frameT, frame1 = testData

            img0 = frame0.cuda()
            img1 = frame1.cuda()
            IFrame = frameT.cuda()

            img0_e = torch.cat([img0, torch.tanh(bdcn(img0)[0])], dim=1)
            img1_e = torch.cat([img1, torch.tanh(bdcn(img1)[0])], dim=1)
            IFrame_e = torch.cat([IFrame, torch.tanh(bdcn(IFrame)[0])], dim=1)
            _, _, ref_imgt = structure_gen((img0_e, img1_e, IFrame_e))
            loss, MSE_val, IE, imgt = detail_enhance((img0, img1, IFrame, ref_imgt))
            imgt = torch.clamp(imgt, max=1., min=-1.)
            IFrame_np = IFrame.squeeze(0).cpu().numpy()
            imgt_np = imgt.squeeze(0).cpu().numpy()
            imgt_png = np.uint8(((imgt_np + 1.0) / 2.0).transpose(1, 2, 0)[:, :, ::-1] * 255)
            IFrame_png = np.uint8(((IFrame_np + 1.0) /2.0).transpose(1, 2, 0)[:, :, ::-1] * 255)
            imgpath = args.imgpath + '/' + str(testIndex)
            if not os.path.isdir(imgpath):
                os.system('mkdir -p %s' % imgpath)
            cv2.imwrite(imgpath + '/imgt.png', imgt_png)
            cv2.imwrite(imgpath + '/IFrame.png', IFrame_png)

            PSNR = compare_psnr(IFrame_np, imgt_np, data_range=2)
            print('PSNR:', PSNR)

            loss = torch.mean(loss)
            MSE_val = torch.mean(MSE_val)

            if testIndex % 100 == 99:
                vImg = torch.cat([revNormalize(frame0[0]).unsqueeze(0), revNormalize(frame1[0]).unsqueeze(0),
                                  revNormalize(imgt.cpu()[0]).unsqueeze(0), revNormalize(frameT[0]).unsqueeze(0),
                                  revNormalize(ref_imgt.cpu()[0]).unsqueeze(0)],
                                 dim=0)


                vImg = torch.clamp(vImg, max=1., min=0)
                vis.images(vImg, win='vImage', env=args.visdom_env, nrow=2, opts={'title': 'visual_image'})

            # psnr
            tloss += loss.item()

            psnr += PSNR
            ie += IE

    return (psnr / len(testloader)), (tloss / len(testloader)), MSE_val, (ie / len(testloader))


# --Initialization--