Python cv2.setNumThreads() Examples
The following are 17
code examples of cv2.setNumThreads().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: base_dataset.py From pytorch_segmentation with MIT License | 6 votes |
def __init__(self, root, split, mean, std, base_size=None, augment=True, val=False, crop_size=321, scale=True, flip=True, rotate=False, blur=False, return_id=False): self.root = root self.split = split self.mean = mean self.std = std self.augment = augment self.crop_size = crop_size if self.augment: self.base_size = base_size self.scale = scale self.flip = flip self.rotate = rotate self.blur = blur self.val = val self.files = [] self._set_files() self.to_tensor = transforms.ToTensor() self.normalize = transforms.Normalize(mean, std) self.return_id = return_id cv2.setNumThreads(0)
Example #2
Source File: fit_harn.py From netharn with Apache License 2.0 | 6 votes |
def _check_thread_safety(harn): """ References: https://github.com/pytorch/pytorch/issues/1355 """ import cv2 n_workers = max(loader.num_workers for loader in harn.loaders.values() if loader is not None) if n_workers > 1: n_threads = cv2.getNumThreads() if n_threads > 1: msg = ('OpenCV threadcount of {} is non-zero and a DataLoader ' 'is using {} workers. This may cause deadlocks ' 'To be safe use cv2.setNumThreads(0)').format( n_threads, n_workers) warnings.warn(msg, RuntimeWarning) harn.warn(msg)
Example #3
Source File: api.py From netharn with Apache License 2.0 | 6 votes |
def configure_hacks(config={}, **kw): """ Configures hacks to fix global settings in external modules Args: config (dict): exected to contain they key "workers" with an integer value equal to the number of dataloader processes. **kw: can also be used to specify config items Modules we currently hack: * cv2 - fix thread count """ config = _update_defaults(config, kw) if config['workers'] > 0: import cv2 cv2.setNumThreads(0)
Example #4
Source File: run_training.py From pytracking with GNU General Public License v3.0 | 6 votes |
def run_training(train_module, train_name, cudnn_benchmark=True): """Run a train scripts in train_settings. args: train_module: Name of module in the "train_settings/" folder. train_name: Name of the train settings file. cudnn_benchmark: Use cudnn benchmark or not (default is True). """ # This is needed to avoid strange crashes related to opencv cv.setNumThreads(0) torch.backends.cudnn.benchmark = cudnn_benchmark print('Training: {} {}'.format(train_module, train_name)) settings = ws_settings.Settings() settings.module_name = train_module settings.script_name = train_name settings.project_path = 'ltr/{}/{}'.format(train_module, train_name) expr_module = importlib.import_module('ltr.train_settings.{}.{}'.format(train_module, train_name)) expr_func = getattr(expr_module, 'run') expr_func(settings)
Example #5
Source File: base.py From deeplab-pytorch with MIT License | 6 votes |
def __init__( self, root, split, ignore_label, mean_bgr, augment=True, base_size=None, crop_size=321, scales=(1.0), flip=True, ): self.root = root self.split = split self.ignore_label = ignore_label self.mean_bgr = np.array(mean_bgr) self.augment = augment self.base_size = base_size self.crop_size = crop_size self.scales = scales self.flip = flip self.files = [] self._set_files() cv2.setNumThreads(0)
Example #6
Source File: data_loader.py From uois with GNU General Public License v3.0 | 5 votes |
def __getitem__(self, idx): cv2.setNumThreads(0) # some hack to make sure pyTorch doesn't deadlock. Found at https://github.com/pytorch/pytorch/issues/1355. Seems to work for me # Get label filename label_filename = self.starts[idx] label = cv2.imread(str(os.path.join(self.base_dir, 'Labels', label_filename))) # Shape: [H x W x 3] label = label[..., 0] == 255 # Turn it into a {0,1} binary mask with shape: [H x W] label = label.astype(np.uint8) # find corresponding image file img_file = label_filename.split('_')[0] + '.jpg' img = cv2.imread(str(os.path.join(self.base_dir, 'Images', img_file))) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # These might not be the same size. resize them to the smaller one if label.shape[0] < img.shape[0]: new_size = label.shape[::-1] # (W, H) else: new_size = img.shape[:2][::-1] label = cv2.resize(label, new_size) img = cv2.resize(img, new_size) img_crop, morphed_label_crop, label_crop = self.transform(img, label) return { 'rgb' : img_crop, 'initial_masks' : morphed_label_crop, 'labels' : label_crop }
Example #7
Source File: cocostuff.py From SPNet with MIT License | 5 votes |
def __init__( self, root, split="train", base_size=513, crop_size=321, mean=(104.008, 116.669, 122.675), scale=(0.5, 1.5), warp=True, flip=True, preload=False, visibility_mask=None, ): self.root = root self.split = split self.base_size = base_size self.crop_size = crop_size self.mean = np.array(mean) self.scale = scale self.warp = warp self.flip = flip self.preload = preload self.files = np.array([]) self.images = [] self.labels = [] self.ignore_label = None self.visibility_mask = visibility_mask self._set_files() if self.preload: self._preload_data() cv2.setNumThreads(0)
Example #8
Source File: ReDWebDataset.py From YouTube3D with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, width, height): """ Args: width and height are only used to determine the output aspect ratio, not the actual output size """ self.ops = [] cv2.setNumThreads(0) self.width = float(width) self.height = float(height)
Example #9
Source File: YoutubeDataset.py From YouTube3D with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, width, height): """ Args: width and height are only used to determine the output aspect ratio, not the actual output size """ self.ops = [] cv2.setNumThreads(0) self.width = float(width) self.height = float(height)
Example #10
Source File: DIWDataset.py From YouTube3D with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, width, height): """ Args: width and height are only used to determine the output aspect ratio, not the actual output size """ self.ops = [] cv2.setNumThreads(0) self.width = float(width) self.height = float(height)
Example #11
Source File: multicore.py From imgaug with MIT License | 5 votes |
def _Pool_initialize_worker(augseq, seed_start): # pylint: disable=invalid-name, protected-access # Not using this seems to have caused infinite hanging in the case # of gaussian blur on at least MacOSX. # It is also in most cases probably not sensible to use multiple # threads while already running augmentation in multiple processes. cv2.setNumThreads(0) if seed_start is None: # pylint falsely thinks in older versions that # multiprocessing.current_process() was not callable, see # https://github.com/PyCQA/pylint/issues/1699 # pylint: disable=not-callable process_name = _get_context().current_process().name # pylint: enable=not-callable # time_ns() exists only in 3.7+ if sys.version_info[0] == 3 and sys.version_info[1] >= 7: seed_offset = time.time_ns() else: seed_offset = int(time.time() * 10**6) % 10**6 seed = hash(process_name) + seed_offset _reseed_global_local(seed, augseq) Pool._WORKER_SEED_START = seed_start Pool._WORKER_AUGSEQ = augseq # not sure if really necessary, but shouldn't hurt either Pool._WORKER_AUGSEQ.localize_random_state_() # This could be a classmethod or staticmethod of Pool in 3.x, but in 2.7 that # leads to pickle errors.
Example #12
Source File: kitti_dataset.py From PointRCNN with MIT License | 5 votes |
def get_image(self, idx): assert False, 'DO NOT USE cv2 NOW, AVOID DEADLOCK' import cv2 # cv2.setNumThreads(0) # for solving deadlock when switching epoch img_file = os.path.join(self.image_dir, '%06d.png' % idx) assert os.path.exists(img_file) return cv2.imread(img_file) # (H, W, 3) BGR mode
Example #13
Source File: cocostuff.py From PanopticSegmentation with MIT License | 5 votes |
def __init__( self, root, split="train", base_size=513, crop_size=321, mean=(104.008, 116.669, 122.675), scale=(0.5, 0.75, 1.0, 1.25, 1.5), warp=True, flip=True, preload=False, ): self.root = root self.split = split self.base_size = base_size self.crop_size = crop_size self.mean = np.array(mean) self.scale = scale self.warp = warp self.flip = flip self.preload = preload self.files = [] self.images = [] self.labels = [] self.ignore_label = None self._set_files() if self.preload: self._preload_data() cv2.setNumThreads(0)
Example #14
Source File: data_loader.py From uois with GNU General Public License v3.0 | 4 votes |
def __getitem__(self, idx): cv2.setNumThreads(0) # some hack to make sure pyTorch doesn't deadlock. Found at https://github.com/pytorch/pytorch/issues/1355 # Get scene directory scene_idx = idx // NUM_VIEWS_PER_SCENE scene_dir = self.scene_dirs[scene_idx] # Get view number view_num = idx % NUM_VIEWS_PER_SCENE # Label foreground_labels_filename = scene_dir + f"segmentation_{view_num:05d}.png" label_abs_path = '/'.join(foreground_labels_filename.split('/')[-2:]) # Used for evaluation foreground_labels = util_.imread_indexed(foreground_labels_filename) foreground_labels, direction_labels = self.process_label(foreground_labels) # RGB image rgb_img_filename = scene_dir + f"rgb_{view_num:05d}.jpeg" rgb_img = cv2.cvtColor(cv2.imread(rgb_img_filename), cv2.COLOR_BGR2RGB) rgb_img = self.process_rgb(rgb_img) # Depth image if self.train_or_test == 'train': depth_img_filename = scene_dir + f"depth_{view_num:05d}.png" elif self.train_or_test == 'test': depth_img_filename = scene_dir + f"depth_noisy_{view_num:05d}.png" depth_img = cv2.imread(depth_img_filename, cv2.IMREAD_ANYDEPTH) # This reads a 16-bit single-channel image. Shape: [H x W] xyz_img = self.process_depth(depth_img, foreground_labels) # Turn these all into torch tensors rgb_img = data_augmentation.array_to_tensor(rgb_img) # Shape: [3 x H x W] xyz_img = data_augmentation.array_to_tensor(xyz_img) # Shape: [3 x H x W] foreground_labels = data_augmentation.array_to_tensor(foreground_labels) # Shape: [H x W] direction_labels = data_augmentation.array_to_tensor(direction_labels) # Shape: [2 x H x W] return {'rgb' : rgb_img, 'xyz' : xyz_img, 'foreground_labels' : foreground_labels, 'direction_labels' : direction_labels, 'scene_dir' : scene_dir, 'view_num' : view_num, 'label_abs_path' : label_abs_path, }
Example #15
Source File: data_loader.py From uois with GNU General Public License v3.0 | 4 votes |
def __getitem__(self, idx): cv2.setNumThreads(0) # some hack to make sure pyTorch doesn't deadlock. Found at https://github.com/pytorch/pytorch/issues/1355. Seems to work for me # Get scene directory scene_idx = idx // 5 scene_dir = self.scene_dirs[scene_idx] # Get view number view_num = idx % 5 + 2 # objects start at rgb_00002.jpg # Label foreground_labels_filename = scene_dir + f"segmentation_{view_num:05d}.png" label_abs_path = '/'.join(foreground_labels_filename.split('/')[-2:]) # Used for evaluation foreground_labels = util_.imread_indexed(foreground_labels_filename) # Grab a random object and use that mask obj_ids = np.unique(foreground_labels) if obj_ids[0] == 0: obj_ids = obj_ids[1:] # get rid of background if obj_ids[0] == 1: obj_ids = obj_ids[1:] # get rid of table num_pixels = 1; num_pixel_tries = 0 while num_pixels < 2: if num_pixel_tries > 100: print("ERROR. Pixels too small. Choosing a new image.") print(scene_dir, view_num, num_pixels, obj_ids, np.unique(foreground_labels)) # Choose a new image to use instead new_idx = np.random.randint(0, self.len) return self.__getitem__(new_idx) obj_id = np.random.choice(obj_ids) label = (foreground_labels == obj_id).astype(np.uint8) num_pixels = np.count_nonzero(label) num_pixel_tries += 1 # RGB image img_filename = scene_dir + f"rgb_{view_num:05d}.jpeg" img = cv2.cvtColor(cv2.imread(img_filename), cv2.COLOR_BGR2RGB) # Processing img_crop, morphed_label_crop, label_crop = self.transform(img, label) return { 'rgb' : img_crop, 'initial_masks' : morphed_label_crop, 'labels' : label_crop, 'label_abs_path' : label_abs_path, }
Example #16
Source File: cocostuff.py From IIC with MIT License | 4 votes |
def __init__(self, config=None, split=None, purpose=None, preload=False): super(_Coco, self).__init__() self.split = split self.purpose = purpose self.root = config.dataset_root self.single_mode = hasattr(config, "single_mode") and config.single_mode # always used (labels fields used to make relevancy mask for train) self.gt_k = config.gt_k self.pre_scale_all = config.pre_scale_all self.pre_scale_factor = config.pre_scale_factor self.input_sz = config.input_sz self.include_rgb = config.include_rgb self.no_sobel = config.no_sobel assert ((not hasattr(config, "mask_input")) or (not config.mask_input)) self.mask_input = False # only used if purpose is train if purpose == "train": self.use_random_scale = config.use_random_scale if self.use_random_scale: self.scale_max = config.scale_max self.scale_min = config.scale_min self.jitter_tf = tvt.ColorJitter(brightness=config.jitter_brightness, contrast=config.jitter_contrast, saturation=config.jitter_saturation, hue=config.jitter_hue) self.flip_p = config.flip_p # 0.5 self.use_random_affine = config.use_random_affine if self.use_random_affine: self.aff_min_rot = config.aff_min_rot self.aff_max_rot = config.aff_max_rot self.aff_min_shear = config.aff_min_shear self.aff_max_shear = config.aff_max_shear self.aff_min_scale = config.aff_min_scale self.aff_max_scale = config.aff_max_scale assert (not preload) self.files = [] self.images = [] self.labels = [] if not osp.exists(config.fine_to_coarse_dict): generate_fine_to_coarse(config.fine_to_coarse_dict) with open(config.fine_to_coarse_dict, "rb") as dict_f: d = pickle.load(dict_f) self._fine_to_coarse_dict = d["fine_index_to_coarse_index"] cv2.setNumThreads(0)
Example #17
Source File: potsdam.py From IIC with MIT License | 4 votes |
def __init__(self, config=None, split=None, purpose=None, preload=False): super(_Potsdam, self).__init__() self.split = split self.purpose = purpose self.root = config.dataset_root self.single_mode = hasattr(config, "single_mode") and config.single_mode assert (os.path.exists(os.path.join(self.root, "debugged.out"))) # always used (labels fields used to make relevancy mask for train) self.gt_k = config.gt_k self.pre_scale_all = config.pre_scale_all self.pre_scale_factor = config.pre_scale_factor self.input_sz = config.input_sz self.include_rgb = config.include_rgb self.no_sobel = config.no_sobel # only used if purpose is train if purpose == "train": self.use_random_scale = config.use_random_scale if self.use_random_scale: self.scale_max = config.scale_max self.scale_min = config.scale_min self.jitter_tf = tvt.ColorJitter(brightness=config.jitter_brightness, contrast=config.jitter_contrast, saturation=config.jitter_saturation, hue=config.jitter_hue) self.flip_p = config.flip_p # 0.5 self.use_random_affine = config.use_random_affine if self.use_random_affine: self.aff_min_rot = config.aff_min_rot self.aff_max_rot = config.aff_max_rot self.aff_min_shear = config.aff_min_shear self.aff_max_shear = config.aff_max_shear self.aff_min_scale = config.aff_min_scale self.aff_max_scale = config.aff_max_scale self.preload = preload self.files = [] self.images = [] self.labels = [] self._set_files() if self.preload: self._preload_data() cv2.setNumThreads(0)