Python cv2.IMREAD_ANYCOLOR Examples

The following are 24 code examples of cv2.IMREAD_ANYCOLOR(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: doc3dwc_loader.py    From DewarpNet with MIT License 7 votes vote down vote up
def __getitem__(self, index):
        im_name = self.files[self.split][index]                # 1/824_8-cp_Page_0503-7Nw0001
        im_path = pjoin(self.root, 'img',  im_name + '.png')  
        lbl_path=pjoin(self.root, 'wc', im_name + '.exr')
        im = m.imread(im_path,mode='RGB')
        im = np.array(im, dtype=np.uint8)
        lbl = cv2.imread(lbl_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        lbl = np.array(lbl, dtype=np.float)
        if 'val' in self.split:
            im, lbl=tight_crop(im/255.0,lbl)
        if self.augmentations:          #this is for training, default false for validation\
            tex_id=random.randint(0,len(self.txpths)-1)
            txpth=self.txpths[tex_id] 
            tex=cv2.imread(os.path.join(self.root[:-7],txpth)).astype(np.uint8)
            bg=cv2.resize(tex,self.img_size,interpolation=cv2.INTER_NEAREST)
            im,lbl=data_aug(im,lbl,bg)
        if self.is_transform:
            im, lbl = self.transform(im, lbl)
        return im, lbl 
Example #2
Source File: bagdump.py    From udacity-driving-reader with Apache License 2.0 7 votes vote down vote up
def write_image(bridge, outdir, msg, fmt='png'):
    results = {}
    image_filename = os.path.join(outdir, str(msg.header.stamp.to_nsec()) + '.' + fmt)
    try:
        if hasattr(msg, 'format') and 'compressed' in msg.format:
            buf = np.ndarray(shape=(1, len(msg.data)), dtype=np.uint8, buffer=msg.data)
            cv_image = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR)
            if cv_image.shape[2] != 3:
                print("Invalid image %s" % image_filename)
                return results
            results['height'] = cv_image.shape[0]
            results['width'] = cv_image.shape[1]
            # Avoid re-encoding if we don't have to
            if check_format(msg.data) == fmt:
                buf.tofile(image_filename)
            else:
                cv2.imwrite(image_filename, cv_image)
        else:
            cv_image = bridge.imgmsg_to_cv2(msg, "bgr8")
            cv2.imwrite(image_filename, cv_image)
    except CvBridgeError as e:
        print(e)
    results['filename'] = image_filename
    return results 
Example #3
Source File: utils.py    From pytorch-serverless with MIT License 6 votes vote down vote up
def open_image(path):
	""" Opens an image using OpenCV given the file path.
	:param path: the file path of the image
	:return: the image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0
	"""
	flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR
	path = str(path)
	if not os.path.exists(path):
		raise OSError(f'No such file or directory: {path}')
	elif os.path.isdir(path):
		raise OSError(f'Is a directory: {path}')
	else:
		try:
			im = cv2.imread(str(path), flags).astype(np.float32)/255
			if im is None: raise OSError(f'File not recognized by opencv: {path}')
			return cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
		except Exception as e:
			raise OSError(f'Error handling image at: {path}') from e 
Example #4
Source File: gray_image_detector.py    From image_utility with MIT License 6 votes vote down vote up
def main():
    lg = ListGenerator()
    files_to_check = lg.generate_list(args.dir, ['jpg'])
    print("Total files: {}".format(len(files_to_check)))

    gray_img_list = []
    num_checked = 0
    for each_file in tqdm(files_to_check[54145:54146]):
        img = cv2.imread(each_file, cv2.IMREAD_ANYCOLOR)
        print(each_file)

        # Preview gray images.
        if len(img.shape) != 3:
            gray_img_list.append(each_file)
            cv2.imshow("gray", img)
            if cv2.waitKey(100) == 27:
                break

    print("Total gray images: {}".format(len(gray_img_list))) 
Example #5
Source File: dataset_util.py    From Gated2Depth with MIT License 6 votes vote down vote up
def read_gated_image(base_dir, gta_pass, img_id, data_type, num_bits=10, scale_images=False,
                     scaled_img_width=None, scaled_img_height=None,
                     normalize_images=False):
    gated_imgs = []
    normalizer = 2 ** num_bits - 1.

    for gate_id in range(3):
        gate_dir = os.path.join(base_dir, gta_pass, 'gated%d_10bit' % gate_id)
        img = cv2.imread(os.path.join(gate_dir, img_id + '.png'), cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        if data_type == 'real':
            img = img[crop_size:(img.shape[0] - crop_size), crop_size:(img.shape[1] - crop_size)]
            img = img.copy()
            img[img > 2 ** 10 - 1] = normalizer
        img = np.float32(img / normalizer)
        gated_imgs.append(np.expand_dims(img, axis=2))

    img = np.concatenate(gated_imgs, axis=2)
    if normalize_images:
        mean = np.mean(img, axis=2, keepdims=True)
        std = np.std(img, axis=2, keepdims=True)
        img = (img - mean) / (std + np.finfo(float).eps)
    if scale_images:
        img = cv2.resize(img, dsize=(scaled_img_width, scaled_img_height), interpolation=cv2.INTER_AREA)
    return np.expand_dims(img, axis=0) 
Example #6
Source File: image.py    From ImageAnalysis with MIT License 6 votes vote down vote up
def load_rgb(self, equalize=False):
        # print("Loading:", self.image_file)
        try:
            img_rgb = cv2.imread(self.image_file, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
            if equalize:
                # equalize val (essentially gray scale level)
                clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
                hsv = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2HSV)
                hue, sat, val = cv2.split(hsv)
                aeq = clahe.apply(val)
                # recombine
                hsv = cv2.merge((hue,sat,aeq))
                # convert back to rgb
                img_rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
            h, w = img_rgb.shape[:2]
            self.node.setInt('height', h)
            self.node.setInt('width', w)
            return img_rgb

        except:
            print(self.image_file + ":\n" + "  rgb load error: " \
                + str(sys.exc_info()[1]))
            return None 
Example #7
Source File: test_flownet_2015.py    From DF-Net with MIT License 5 votes vote down vote up
def get_flow(path):
    bgr = cv2.imread(path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
    invalid = bgr[:, :, 0] == 0
    out_flow = (bgr[:, :, 2:0:-1].astype('f4') - 2**15) / 64.
    out_flow[invalid] = 0
    return out_flow, bgr[:, :, 0] 
Example #8
Source File: create_vkitti_tf_record.py    From motion-rcnn with MIT License 5 votes vote down vote up
def _read_image(filename, rgb=False):
  "Read (h, w, 3) image from .png."
  if not rgb:
    with open(filename, 'rb') as f:
      image = f.read()
    return image

  image = cv2.imread(filename, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
  h, w, _c = image.shape
  assert image.dtype == np.uint8 and _c == 3
  if rgb:
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
  return image 
Example #9
Source File: create_vkitti_tf_record.py    From motion-rcnn with MIT License 5 votes vote down vote up
def _read_flow(flow_fn):
  "Convert from .png to (h, w, 2) (flow_x, flow_y) float32 array"
  # read png to bgr in 16 bit unsigned short
  bgr = cv2.imread(flow_fn, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
  h, w, _c = bgr.shape
  assert bgr.dtype == np.uint16 and _c == 3
  # b == invalid flow flag == 0 for sky or other invalid flow
  invalid = bgr[..., 0] == 0
  # g,r == flow_y,x normalized by height,width and scaled to [0;2**16 - 1]
  out_flow = 2.0 / (2**16 - 1.0) * bgr[..., 2:0:-1].astype('f4') - 1
  out_flow[..., 0] *= w - 1
  out_flow[..., 1] *= h - 1
  out_flow[invalid] = np.nan # 0 or another value (e.g., np.nan)
  return out_flow 
Example #10
Source File: create_kitti_tf_record.py    From motion-rcnn with MIT License 5 votes vote down vote up
def _read_disparity_image(filename):
  "Read (h, w, 1) uint16 KITTI disparity image from .png."
  image = cv2.imread(filename, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
  h, w = image.shape[:2]
  assert image.dtype == np.uint16 and len(image.shape) == 2
  return image 
Example #11
Source File: create_kitti_tf_record.py    From motion-rcnn with MIT License 5 votes vote down vote up
def _read_image(filename, rgb=False):
  "Read (h, w, 3) image from .png."
  if not rgb:
    with open(filename, 'rb') as f:
      image = f.read()
    return image
  image = cv2.imread(filename, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
  h, w, _c = image.shape
  assert image.dtype == np.uint8 and _c == 3
  if rgb:
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
  return image 
Example #12
Source File: create_kitti_tf_record.py    From motion-rcnn with MIT License 5 votes vote down vote up
def _read_flow(flow_fn):
  "Convert from .png to (h, w, 2) (flow_x, flow_y) float32 array"
  # read png to bgr in 16 bit unsigned short
  bgr = cv2.imread(flow_fn, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
  rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
  h, w, _c = rgb.shape
  assert rgb.dtype == np.uint16 and _c == 3
  invalid = rgb[:, :, 2] == 0
  # g,r == flow_y,x normalized by height,width and scaled to [0;2**16 - 1]
  out_flow = (rgb[:, :, :2] - 2 ** 15) / 64.0
  print(out_flow.shape, invalid.shape)
  out_flow[invalid] = np.nan # 0 or another value (e.g., np.nan)
  return out_flow 
Example #13
Source File: ac3d.py    From ImageAnalysis with MIT License 5 votes vote down vote up
def make_textures_opencv(src_dir, project_dir, image_list, resolution=256):
    dst_dir = os.path.join(project_dir, 'models')
    if not os.path.exists(dst_dir):
        print("Notice: creating texture directory =", dst_dir)
        os.makedirs(dst_dir)
    for image in image_list:
        src = image.image_file
        dst = os.path.join(dst_dir, image.name + '.JPG')
        if not os.path.exists(dst):
            print(src)
            src = cv2.imread(src, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
            height, width = src.shape[:2]
            # downscale image first
            method = cv2.INTER_AREA  # cv2.INTER_AREA
            scale = cv2.resize(src, (0,0),
                               fx=resolution/float(width),
                               fy=resolution/float(height),
                               interpolation=method)
            # convert to hsv color space
            hsv = cv2.cvtColor(scale, cv2.COLOR_BGR2HSV)
            hue,sat,val = cv2.split(hsv)
            # adaptive histogram equalization on 'value' channel
            clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
            aeq = clahe.apply(val)
            # recombine
            hsv = cv2.merge((hue,sat,aeq))
            # convert back to rgb
            result = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
            cv2.imwrite(dst, result)
            print("Texture %dx%d %s" % (resolution, resolution, dst)) 
Example #14
Source File: convert_to_tfrecords.py    From SSMA with GNU General Public License v3.0 5 votes vote down vote up
def convert(f, record_name):
    count = 0.0
    writer = tf.python_io.TFRecordWriter(record_name)

    for name in f:
        modality1 = cv2.imread(name[0])
        modality2 = cv2.imread(name[1])
        label = cv2.imread(name[2], cv2.IMREAD_ANYCOLOR)
        try:
            assert len(label.shape)==2
        except AssertionError, e:
            raise( AssertionError( "Label should be one channel!" ) )
        
        height = modality1.shape[0]
        width = modality1.shape[1]
        modality1 = modality1.tostring()
        modality2 = modality2.tostring()
        label = label.tostring()
        features = {'height':_int64_feature(height),
                    'width':_int64_feature(width),
                    'modality1':_bytes_feature(modality1),
                    'label':_bytes_feature(label),
                    'modality2':_bytes_feature(modality2)
                   }
        example = tf.train.Example(features=tf.train.Features(feature=features))
        writer.write(example.SerializeToString())

        if (count+1)%1 == 0:
            print 'Processed data: {}'.format(count)

        count = count+1 
Example #15
Source File: gen_lmdb_cache.py    From sanet_relocal_demo with GNU General Public License v3.0 5 votes vote down vote up
def read_img(self, img_key):
        img_str = np.fromstring(self.read_by_key(img_key), dtype=np.uint8)
        img = np.asarray(cv2.imdecode(img_str, cv2.IMREAD_ANYCOLOR)).reshape((240, 320))
        return img 
Example #16
Source File: convert_to_tfrecords.py    From AdapNet-pp with GNU General Public License v3.0 5 votes vote down vote up
def convert(f, record_name, mean_flag):
    count = 0.0
    writer = tf.python_io.TFRecordWriter(record_name)

    if mean_flag:
        mean = np.zeros(cv2.imread(f[0][0]).shape, np.float32)

    for name in f:
        modality1 = cv2.imread(name[0])
        if mean_flag:
            mean += modality1
        
        label = cv2.imread(name[1], cv2.IMREAD_ANYCOLOR)
        try:
            assert len(label.shape)==2
        except AssertionError, e:
            raise( AssertionError( "Label should be one channel!" ) )
            
        height = modality1.shape[0]
        width = modality1.shape[1]
        modality1 = modality1.tostring()
        label = label.tostring()
        features = {'height':_int64_feature(height),
                    'width':_int64_feature(width),
                    'modality1':_bytes_feature(modality1),
                    'label':_bytes_feature(label),
                   }
        example = tf.train.Example(features=tf.train.Features(feature=features))
        writer.write(example.SerializeToString())

        if (count+1)%1 == 0:
            print 'Processed data: {}'.format(count)

        count = count+1 
Example #17
Source File: test_flownet_2012.py    From DF-Net with MIT License 5 votes vote down vote up
def get_flow(path):
    bgr = cv2.imread(path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
    invalid = bgr[:, :, 0] == 0
    out_flow = (bgr[:, :, 2:0:-1].astype('f4') - 2**15) / 64.
    out_flow[invalid] = 0
    return out_flow, bgr[:, :, 0] 
Example #18
Source File: image.py    From DeepDepthDenoising with MIT License 5 votes vote down vote up
def load_image(filename, data_type=torch.float32):
    color_img = numpy.array(cv2.imread(filename, cv2.IMREAD_ANYCOLOR))
    h, w, c = color_img.shape
    color_data = color_img.astype(numpy.float32).transpose(2, 0, 1)
    return torch.from_numpy(
        color_data.reshape(1, c, h, w)        
    ).type(data_type) / 255.0 
Example #19
Source File: doc3dbmnoimgc_loader.py    From DewarpNet with MIT License 5 votes vote down vote up
def __getitem__(self, index):
        im_name = self.files[self.split][index]                 #1/2Xec_Page_453X56X0001.png
        im_path = pjoin(self.altroot, 'img',  im_name + '.png')  
        img_foldr,fname=im_name.split('/')
        recon_foldr='chess48'
        wc_path = pjoin(self.altroot, 'wc' , im_name + '.exr')
        bm_path = pjoin(self.altroot, 'bm' , im_name + '.mat')
        alb_path = pjoin(self.root,'recon',img_foldr,recon_foldr, fname[:-4]+recon_foldr+'0001.png')

        wc = cv2.imread(wc_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        bm = h5.loadmat(bm_path)['bm']
        alb = m.imread(alb_path,mode='RGB')
        if self.is_transform:
            im, lbl = self.transform(wc,bm,alb)
        return im, lbl 
Example #20
Source File: create_npy.py    From vkitti3D-dataset with MIT License 5 votes vote down vote up
def process_frame(image_path: str) -> Tuple[np.ndarray, np.ndarray, str, str]:
    """
    fix given frame
    :param image_path: path to frame which should be fixed
    :return: fixed frame
    """
    seq_no = image_path.split('/')[-3]
    img_no = image_path.split('/')[-1].split('.')[0]

    depth_path = f"{depth_root}/{seq_no}/clone/{img_no}.png"
    semantic_path = f"{labels_root}/{seq_no}/clone/{img_no}.png"

    # BGR -> RGB
    rgb_map = cv2.imread(image_path)[:, :, (2, 1, 0)]

    # convert centimeters to meters
    depth_map = cv2.imread(depth_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH) / 100.

    # semantic image
    semantic_map = cv2.imread(semantic_path)[:, :, (2, 1, 0)]
    label_map = np.apply_along_axis(lambda r: rgb2label[tuple(r)], 2, semantic_map)

    # backprojection to camera space
    x3 = (xv - center_x) / focal_x * depth_map
    y3 = (yv - center_y) / focal_y * depth_map

    erg = np.stack((depth_map, -x3, -y3), axis=-1).reshape((-1, 3))
    erg = np.hstack((erg, rgb_map.reshape(-1, 3), label_map.reshape(-1, 1)))

    # delete sky points
    erg = distance_cutoff(erg, g_cutoff)

    if g_is_v1:
        return None, erg, seq_no, img_no
    else:
        erg = remove_car_shadows(erg, img_no, g_bb_eps)
        worldspace = transform2worldspace(erg, img_no)
        return worldspace, erg, seq_no, img_no 
Example #21
Source File: convert_to_3_channel.py    From image_utility with MIT License 5 votes vote down vote up
def main():
    # Read in image list to be converted.
    with open('gray.json', 'r') as fp:
        img_list = json.load(fp)
    logging.debug("Total files to be converted: {}".format(len(img_list)))

    # Convert them into 3 channel images.
    for each_file in tqdm(img_list):
        img = cv2.imread(each_file, cv2.IMREAD_ANYCOLOR)
        if len(img.shape) == 3:
            print("Not a gray image: {}".format(each_file))
            continue

        cv2.imshow('preview', img)
        if cv2.waitKey(30) == 27:
            break

        # Do convertion
        img_converted = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

        # Write to file.
        cv2.imwrite(each_file, img_converted)

        # Check if convertion failed.
        img = cv2.imread(each_file, cv2.IMREAD_ANYCOLOR)
        assert len(img.shape) == 3, "Convertion failed: {}".format(each_file) 
Example #22
Source File: utils.py    From pytorch-serverless with MIT License 5 votes vote down vote up
def open_image_url(url):
	"""  Opens an image using OpenCV from a URL.
	:param url: url path of the image
	:return: the image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0
	"""
	flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR
	url = str(url)
	resp = urllib.request.urlopen(url)
	try:
		im = np.asarray(bytearray(resp.read()))
		im = cv2.imdecode(im, flags).astype(np.float32)/255
		if im is None: raise OSError(f'File from url not recognized by opencv: {url}')
		return im
	except Exception as e:
		raise OSError(f'Error handling image from url at: {url}') from e 
Example #23
Source File: panda3d.py    From ImageAnalysis with MIT License 4 votes vote down vote up
def make_textures_opencv(src_dir, analysis_dir, image_list, resolution=512):
    dst_dir = os.path.join(analysis_dir, 'models')
    if not os.path.exists(dst_dir):
        log("Notice: creating texture directory =", dst_dir)
        os.makedirs(dst_dir)
    for image in image_list:
        src = image.image_file
        dst = os.path.join(dst_dir, image.name + '.JPG')
        log(src, '->', dst)
        if not os.path.exists(dst):
            src = cv2.imread(src, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
            height, width = src.shape[:2]
            # downscale image first
            method = cv2.INTER_AREA  # cv2.INTER_AREA
            scale = cv2.resize(src, (0,0),
                               fx=resolution/float(width),
                               fy=resolution/float(height),
                               interpolation=method)
            do_equalize = False
            if do_equalize:
                # convert to hsv color space
                hsv = cv2.cvtColor(scale, cv2.COLOR_BGR2HSV)
                hue,sat,val = cv2.split(hsv)
                # adaptive histogram equalization on 'value' channel
                clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
                aeq = clahe.apply(val)
                # recombine
                hsv = cv2.merge((hue,sat,aeq))
                # convert back to rgb
                result = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
            else:
                result = scale
            cv2.imwrite(dst, result)
            qlog("Texture %dx%d %s" % (resolution, resolution, dst))
    # make the dummy.jpg image from the first texture
    #src = os.path.join(dst_dir, image_list[0].image_file)
    src = image_list[0].image_file
    dst = os.path.join(dst_dir, "dummy.jpg")
    log("Dummy:", src, dst)
    if not os.path.exists(dst):
        src = cv2.imread(src, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
        height, width = src.shape[:2]
        # downscale image first
        method = cv2.INTER_AREA  # cv2.INTER_AREA
        resolution = 64
        dummy = cv2.resize(src, (0,0),
                           fx=resolution/float(width),
                           fy=resolution/float(height),
                           interpolation=method)
        cv2.imwrite(dst, dummy)
        qlog("Texture %dx%d %s" % (resolution, resolution, dst)) 
Example #24
Source File: augmentationske2e.py    From DewarpNet with MIT License 4 votes vote down vote up
def data_aug(im, fm, bg):
    im=im/255.0
    bg=bg/255.0
    # im, fm = tight_crop(im, fm) 
    # change background img
    # msk = fm[:, :, 0] > 0
    msk=((fm[:,:,0]!=0)&(fm[:,:,1]!=0)&(fm[:,:,2]!=0)).astype(np.uint8)
    msk = np.expand_dims(msk, axis=2)
    # replace bg
    [fh, fw, _] = im.shape
    chance=random.random()
    if chance > 0.3:
        bg = cv2.resize(bg, (200, 200))
        bg = np.tile(bg, (3, 3, 1))
        bg = bg[: fh, : fw, :]
    elif chance < 0.3 and chance> 0.2:
        c = np.array([random.random(), random.random(), random.random()])
        bg = np.ones((fh, fw, 3)) * c
    else:
        bg=np.zeros((fh, fw, 3))
        msk=np.ones((fh, fw, 3))
    im = bg * (1 - msk) + im * msk
    # jitter color
    im = color_jitter(im, 0.2, 0.2, 0.6, 0.6)
    # im = change_hue_sat(im)
    # im = change_intensity(im)

    # plt.imshow(im)
    # plt.show()
    # plt.imshow(fm)
    # plt.show()
    return im, fm




# def main():
#     tex_id=random.randint(1,5640)
#     with open(os.path.join(root[:-7],'augtexnames.txt'),'r') as f:
#         for i in range(tex_id):
#             txpth=f.readline().strip()

#     for im_name in filenames:
        
#         im_path = os.path.join(root,'img',im_name+'.png')
#         img=cv2.imread(im_path).astype(np.uint8)
        
#         lbl_path = os.path.join(root, 'wc',im_name+'.exr')
#         lbl = cv2.imread(lbl_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)

#         tex=cv2.imread(os.path.join(root[:-7],txpth)).astype(np.uint8)
#         bg=cv2.resize(tex,(img.shape[1],img.shape[0]),interpolation=cv2.INTER_LANCZOS4)

#         img,lbl=data_aug(img,lbl,bg)

# if __name__ == '__main__':
#     main()