Python maskrcnn_benchmark.utils.cv2_util.findContours() Examples
The following are 29
code examples of maskrcnn_benchmark.utils.cv2_util.findContours().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
maskrcnn_benchmark.utils.cv2_util
, or try the search function
.
Example #1
Source File: predictor.py From remote_sensing_object_detection_2019 with MIT License | 6 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #2
Source File: predictor.py From RRPN_pytorch with MIT License | 6 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #3
Source File: predictor.py From training with Apache License 2.0 | 6 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #4
Source File: predictor.py From RRPN_pytorch with MIT License | 6 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #5
Source File: predictor.py From maskrcnn-benchmark with MIT License | 6 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None].astype(np.uint8) contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #6
Source File: segmentation_mask.py From maskrcnn-benchmark with MIT License | 6 votes |
def _findContours(self): contours = [] masks = self.masks.detach().numpy() for mask in masks: mask = cv2.UMat(mask) contour, hierarchy = cv2_util.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 ) reshaped_contour = [] for entity in contour: assert len(entity.shape) == 3 assert ( entity.shape[1] == 1 ), "Hierarchical contours are not allowed" reshaped_contour.append(entity.reshape(-1).tolist()) contours.append(reshaped_contour) return contours
Example #7
Source File: predictor.py From RRPN_pytorch with MIT License | 6 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #8
Source File: segmentation_mask.py From sampling-free with MIT License | 6 votes |
def _findContours(self): contours = [] masks = self.masks.detach().numpy() for mask in masks: mask = cv2.UMat(mask) contour, hierarchy = cv2_util.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 ) reshaped_contour = [] for entity in contour: assert len(entity.shape) == 3 assert ( entity.shape[1] == 1 ), "Hierarchical contours are not allowed" reshaped_contour.append(entity.reshape(-1).tolist()) contours.append(reshaped_contour) return contours
Example #9
Source File: predictor.py From argoverse_baselinetracker with MIT License | 6 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #10
Source File: predictor.py From remote_sensing_object_detection_2019 with MIT License | 6 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #11
Source File: predictor.py From remote_sensing_object_detection_2019 with MIT License | 6 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #12
Source File: predictor.py From DF-Traffic-Sign-Identification with MIT License | 6 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #13
Source File: predictor.py From DetNAS with MIT License | 6 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #14
Source File: segmentation_mask.py From DetNAS with MIT License | 6 votes |
def _findContours(self): contours = [] masks = self.masks.detach().numpy() for mask in masks: mask = cv2.UMat(mask) contour, hierarchy = cv2_util.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 ) reshaped_contour = [] for entity in contour: assert len(entity.shape) == 3 assert ( entity.shape[1] == 1 ), "Hierarchical contours are not allowed" reshaped_contour.append(entity.reshape(-1).tolist()) contours.append(reshaped_contour) return contours
Example #15
Source File: segmentation_mask.py From Clothing-Detection with GNU General Public License v3.0 | 6 votes |
def _findContours(self): contours = [] masks = self.masks.detach().numpy() for mask in masks: mask = cv2.UMat(mask) contour, hierarchy = cv2_util.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 ) reshaped_contour = [] for entity in contour: assert len(entity.shape) == 3 assert ( entity.shape[1] == 1 ), "Hierarchical contours are not allowed" reshaped_contour.append(entity.reshape(-1).tolist()) contours.append(reshaped_contour) return contours
Example #16
Source File: predictor.py From R2CNN.pytorch with MIT License | 6 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #17
Source File: predictor.py From Res2Net-maskrcnn with MIT License | 6 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #18
Source File: predictor.py From sampling-free with MIT License | 5 votes |
def overlay_mask(self, image, predictions): """ Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`. """ masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels) image = image.astype(np.float) masks = masks.squeeze(1) for mask, color in zip(masks, colors): idx = np.nonzero(mask) alpha=0.4 image[idx[0], idx[1], :] *= 1.0 - alpha image[idx[0], idx[1], :] += alpha * color #for mask, color in zip(masks, colors): # thresh = mask[0, :, :, None] # contours, hierarchy = cv2_util.findContours( # thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE # ) # image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
Example #19
Source File: instances2dict_with_polygons.py From TinyBenchmark with MIT License | 4 votes |
def instances2dict_with_polygons(imageFileList, verbose=False): imgCount = 0 instanceDict = {} if not isinstance(imageFileList, list): imageFileList = [imageFileList] if verbose: print("Processing {} images...".format(len(imageFileList))) for imageFileName in imageFileList: # Load image img = Image.open(imageFileName) # Image as numpy array imgNp = np.array(img) # Initialize label categories instances = {} for label in labels: instances[label.name] = [] # Loop through all instance ids in instance image for instanceId in np.unique(imgNp): if instanceId < 1000: continue instanceObj = Instance(imgNp, instanceId) instanceObj_dict = instanceObj.toDict() #instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict()) if id2label[instanceObj.labelID].hasInstances: mask = (imgNp == instanceId).astype(np.uint8) contour, hier = cv2_util.findContours( mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) polygons = [c.reshape(-1).tolist() for c in contour] instanceObj_dict['contours'] = polygons instances[id2label[instanceObj.labelID].name].append(instanceObj_dict) imgKey = os.path.abspath(imageFileName) instanceDict[imgKey] = instances imgCount += 1 if verbose: print("\rImages Processed: {}".format(imgCount), end=' ') sys.stdout.flush() if verbose: print("") return instanceDict
Example #20
Source File: instances2dict_with_polygons.py From RRPN_pytorch with MIT License | 4 votes |
def instances2dict_with_polygons(imageFileList, verbose=False): imgCount = 0 instanceDict = {} if not isinstance(imageFileList, list): imageFileList = [imageFileList] if verbose: print("Processing {} images...".format(len(imageFileList))) for imageFileName in imageFileList: # Load image img = Image.open(imageFileName) # Image as numpy array imgNp = np.array(img) # Initialize label categories instances = {} for label in labels: instances[label.name] = [] # Loop through all instance ids in instance image for instanceId in np.unique(imgNp): if instanceId < 1000: continue instanceObj = Instance(imgNp, instanceId) instanceObj_dict = instanceObj.toDict() #instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict()) if id2label[instanceObj.labelID].hasInstances: mask = (imgNp == instanceId).astype(np.uint8) contour, hier = cv2_util.findContours( mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) polygons = [c.reshape(-1).tolist() for c in contour] instanceObj_dict['contours'] = polygons instances[id2label[instanceObj.labelID].name].append(instanceObj_dict) imgKey = os.path.abspath(imageFileName) instanceDict[imgKey] = instances imgCount += 1 if verbose: print("\rImages Processed: {}".format(imgCount), end=' ') sys.stdout.flush() if verbose: print("") return instanceDict
Example #21
Source File: instances2dict_with_polygons.py From DF-Traffic-Sign-Identification with MIT License | 4 votes |
def instances2dict_with_polygons(imageFileList, verbose=False): imgCount = 0 instanceDict = {} if not isinstance(imageFileList, list): imageFileList = [imageFileList] if verbose: print("Processing {} images...".format(len(imageFileList))) for imageFileName in imageFileList: # Load image img = Image.open(imageFileName) # Image as numpy array imgNp = np.array(img) # Initialize label categories instances = {} for label in labels: instances[label.name] = [] # Loop through all instance ids in instance image for instanceId in np.unique(imgNp): if instanceId < 1000: continue instanceObj = Instance(imgNp, instanceId) instanceObj_dict = instanceObj.toDict() #instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict()) if id2label[instanceObj.labelID].hasInstances: mask = (imgNp == instanceId).astype(np.uint8) contour, hier = cv2_util.findContours( mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) polygons = [c.reshape(-1).tolist() for c in contour] instanceObj_dict['contours'] = polygons instances[id2label[instanceObj.labelID].name].append(instanceObj_dict) imgKey = os.path.abspath(imageFileName) instanceDict[imgKey] = instances imgCount += 1 if verbose: print("\rImages Processed: {}".format(imgCount), end=' ') sys.stdout.flush() if verbose: print("") return instanceDict
Example #22
Source File: instances2dict_with_polygons.py From NAS-FCOS with BSD 2-Clause "Simplified" License | 4 votes |
def instances2dict_with_polygons(imageFileList, verbose=False): imgCount = 0 instanceDict = {} if not isinstance(imageFileList, list): imageFileList = [imageFileList] if verbose: print("Processing {} images...".format(len(imageFileList))) for imageFileName in imageFileList: # Load image img = Image.open(imageFileName) # Image as numpy array imgNp = np.array(img) # Initialize label categories instances = {} for label in labels: instances[label.name] = [] # Loop through all instance ids in instance image for instanceId in np.unique(imgNp): if instanceId < 1000: continue instanceObj = Instance(imgNp, instanceId) instanceObj_dict = instanceObj.toDict() #instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict()) if id2label[instanceObj.labelID].hasInstances: mask = (imgNp == instanceId).astype(np.uint8) contour, hier = cv2_util.findContours( mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) polygons = [c.reshape(-1).tolist() for c in contour] instanceObj_dict['contours'] = polygons instances[id2label[instanceObj.labelID].name].append(instanceObj_dict) imgKey = os.path.abspath(imageFileName) instanceDict[imgKey] = instances imgCount += 1 if verbose: print("\rImages Processed: {}".format(imgCount), end=' ') sys.stdout.flush() if verbose: print("") return instanceDict
Example #23
Source File: instances2dict_with_polygons.py From training with Apache License 2.0 | 4 votes |
def instances2dict_with_polygons(imageFileList, verbose=False): imgCount = 0 instanceDict = {} if not isinstance(imageFileList, list): imageFileList = [imageFileList] if verbose: print("Processing {} images...".format(len(imageFileList))) for imageFileName in imageFileList: # Load image img = Image.open(imageFileName) # Image as numpy array imgNp = np.array(img) # Initialize label categories instances = {} for label in labels: instances[label.name] = [] # Loop through all instance ids in instance image for instanceId in np.unique(imgNp): if instanceId < 1000: continue instanceObj = Instance(imgNp, instanceId) instanceObj_dict = instanceObj.toDict() #instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict()) if id2label[instanceObj.labelID].hasInstances: mask = (imgNp == instanceId).astype(np.uint8) contour, hier = cv2_util.findContours( mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) polygons = [c.reshape(-1).tolist() for c in contour] instanceObj_dict['contours'] = polygons instances[id2label[instanceObj.labelID].name].append(instanceObj_dict) imgKey = os.path.abspath(imageFileName) instanceDict[imgKey] = instances imgCount += 1 if verbose: print("\rImages Processed: {}".format(imgCount), end=' ') sys.stdout.flush() if verbose: print("") return instanceDict
Example #24
Source File: instances2dict_with_polygons.py From maskrcnn-benchmark with MIT License | 4 votes |
def instances2dict_with_polygons(imageFileList, verbose=False): imgCount = 0 instanceDict = {} if not isinstance(imageFileList, list): imageFileList = [imageFileList] if verbose: print("Processing {} images...".format(len(imageFileList))) for imageFileName in imageFileList: # Load image img = Image.open(imageFileName) # Image as numpy array imgNp = np.array(img) # Initialize label categories instances = {} for label in labels: instances[label.name] = [] # Loop through all instance ids in instance image for instanceId in np.unique(imgNp): if instanceId < 1000: continue instanceObj = Instance(imgNp, instanceId) instanceObj_dict = instanceObj.toDict() #instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict()) if id2label[instanceObj.labelID].hasInstances: mask = (imgNp == instanceId).astype(np.uint8) contour, hier = cv2_util.findContours( mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) polygons = [c.reshape(-1).tolist() for c in contour] instanceObj_dict['contours'] = polygons instances[id2label[instanceObj.labelID].name].append(instanceObj_dict) imgKey = os.path.abspath(imageFileName) instanceDict[imgKey] = instances imgCount += 1 if verbose: print("\rImages Processed: {}".format(imgCount), end=' ') sys.stdout.flush() if verbose: print("") return instanceDict
Example #25
Source File: instances2dict_with_polygons.py From sampling-free with MIT License | 4 votes |
def instances2dict_with_polygons(imageFileList, verbose=False): imgCount = 0 instanceDict = {} if not isinstance(imageFileList, list): imageFileList = [imageFileList] if verbose: print("Processing {} images...".format(len(imageFileList))) for imageFileName in imageFileList: # Load image img = Image.open(imageFileName) # Image as numpy array imgNp = np.array(img) # Initialize label categories instances = {} for label in labels: instances[label.name] = [] # Loop through all instance ids in instance image for instanceId in np.unique(imgNp): if instanceId < 1000: continue instanceObj = Instance(imgNp, instanceId) instanceObj_dict = instanceObj.toDict() #instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict()) if id2label[instanceObj.labelID].hasInstances: mask = (imgNp == instanceId).astype(np.uint8) contour, hier = cv2_util.findContours( mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) polygons = [c.reshape(-1).tolist() for c in contour] instanceObj_dict['contours'] = polygons instances[id2label[instanceObj.labelID].name].append(instanceObj_dict) imgKey = os.path.abspath(imageFileName) instanceDict[imgKey] = instances imgCount += 1 if verbose: print("\rImages Processed: {}".format(imgCount), end=' ') sys.stdout.flush() if verbose: print("") return instanceDict
Example #26
Source File: instances2dict_with_polygons.py From remote_sensing_object_detection_2019 with MIT License | 4 votes |
def instances2dict_with_polygons(imageFileList, verbose=False): imgCount = 0 instanceDict = {} if not isinstance(imageFileList, list): imageFileList = [imageFileList] if verbose: print("Processing {} images...".format(len(imageFileList))) for imageFileName in imageFileList: # Load image img = Image.open(imageFileName) # Image as numpy array imgNp = np.array(img) # Initialize label categories instances = {} for label in labels: instances[label.name] = [] # Loop through all instance ids in instance image for instanceId in np.unique(imgNp): if instanceId < 1000: continue instanceObj = Instance(imgNp, instanceId) instanceObj_dict = instanceObj.toDict() #instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict()) if id2label[instanceObj.labelID].hasInstances: mask = (imgNp == instanceId).astype(np.uint8) contour, hier = cv2_util.findContours( mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) polygons = [c.reshape(-1).tolist() for c in contour] instanceObj_dict['contours'] = polygons instances[id2label[instanceObj.labelID].name].append(instanceObj_dict) imgKey = os.path.abspath(imageFileName) instanceDict[imgKey] = instances imgCount += 1 if verbose: print("\rImages Processed: {}".format(imgCount), end=' ') sys.stdout.flush() if verbose: print("") return instanceDict
Example #27
Source File: instances2dict_with_polygons.py From DetNAS with MIT License | 4 votes |
def instances2dict_with_polygons(imageFileList, verbose=False): imgCount = 0 instanceDict = {} if not isinstance(imageFileList, list): imageFileList = [imageFileList] if verbose: print("Processing {} images...".format(len(imageFileList))) for imageFileName in imageFileList: # Load image img = Image.open(imageFileName) # Image as numpy array imgNp = np.array(img) # Initialize label categories instances = {} for label in labels: instances[label.name] = [] # Loop through all instance ids in instance image for instanceId in np.unique(imgNp): if instanceId < 1000: continue instanceObj = Instance(imgNp, instanceId) instanceObj_dict = instanceObj.toDict() #instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict()) if id2label[instanceObj.labelID].hasInstances: mask = (imgNp == instanceId).astype(np.uint8) contour, hier = cv2_util.findContours( mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) polygons = [c.reshape(-1).tolist() for c in contour] instanceObj_dict['contours'] = polygons instances[id2label[instanceObj.labelID].name].append(instanceObj_dict) imgKey = os.path.abspath(imageFileName) instanceDict[imgKey] = instances imgCount += 1 if verbose: print("\rImages Processed: {}".format(imgCount), end=' ') sys.stdout.flush() if verbose: print("") return instanceDict
Example #28
Source File: instances2dict_with_polygons.py From R2CNN.pytorch with MIT License | 4 votes |
def instances2dict_with_polygons(imageFileList, verbose=False): imgCount = 0 instanceDict = {} if not isinstance(imageFileList, list): imageFileList = [imageFileList] if verbose: print("Processing {} images...".format(len(imageFileList))) for imageFileName in imageFileList: # Load image img = Image.open(imageFileName) # Image as numpy array imgNp = np.array(img) # Initialize label categories instances = {} for label in labels: instances[label.name] = [] # Loop through all instance ids in instance image for instanceId in np.unique(imgNp): if instanceId < 1000: continue instanceObj = Instance(imgNp, instanceId) instanceObj_dict = instanceObj.toDict() #instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict()) if id2label[instanceObj.labelID].hasInstances: mask = (imgNp == instanceId).astype(np.uint8) contour, hier = cv2_util.findContours( mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) polygons = [c.reshape(-1).tolist() for c in contour] instanceObj_dict['contours'] = polygons instances[id2label[instanceObj.labelID].name].append(instanceObj_dict) imgKey = os.path.abspath(imageFileName) instanceDict[imgKey] = instances imgCount += 1 if verbose: print("\rImages Processed: {}".format(imgCount), end=' ') sys.stdout.flush() if verbose: print("") return instanceDict
Example #29
Source File: instances2dict_with_polygons.py From Res2Net-maskrcnn with MIT License | 4 votes |
def instances2dict_with_polygons(imageFileList, verbose=False): imgCount = 0 instanceDict = {} if not isinstance(imageFileList, list): imageFileList = [imageFileList] if verbose: print("Processing {} images...".format(len(imageFileList))) for imageFileName in imageFileList: # Load image img = Image.open(imageFileName) # Image as numpy array imgNp = np.array(img) # Initialize label categories instances = {} for label in labels: instances[label.name] = [] # Loop through all instance ids in instance image for instanceId in np.unique(imgNp): if instanceId < 1000: continue instanceObj = Instance(imgNp, instanceId) instanceObj_dict = instanceObj.toDict() #instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict()) if id2label[instanceObj.labelID].hasInstances: mask = (imgNp == instanceId).astype(np.uint8) contour, hier = cv2_util.findContours( mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) polygons = [c.reshape(-1).tolist() for c in contour] instanceObj_dict['contours'] = polygons instances[id2label[instanceObj.labelID].name].append(instanceObj_dict) imgKey = os.path.abspath(imageFileName) instanceDict[imgKey] = instances imgCount += 1 if verbose: print("\rImages Processed: {}".format(imgCount), end=' ') sys.stdout.flush() if verbose: print("") return instanceDict