Python mrcnn.utils.Dataset() Examples
The following are 5
code examples of mrcnn.utils.Dataset().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mrcnn.utils
, or try the search function
.

Example #1
Source File: single_class.py From deepdiy with MIT License | 5 votes |
def add_additional_info(self): for i,j in self.additional_info.items(): setattr(self,i,j) ############################################################ # Dataset ############################################################
Example #2
Source File: coco.py From dataiku-contrib with Apache License 2.0 | 4 votes |
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None): """Runs official COCO evaluation. dataset: A Dataset object with valiadtion data eval_type: "bbox" or "segm" for bounding box or segmentation evaluation limit: if not 0, it's the number of images to use for evaluation """ # Pick COCO images from the dataset image_ids = image_ids or dataset.image_ids # Limit to a subset if limit: image_ids = image_ids[:limit] # Get corresponding COCO image IDs. coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids] t_prediction = 0 t_start = time.time() results = [] for i, image_id in enumerate(image_ids): # Load image image = dataset.load_image(image_id) # Run detection t = time.time() r = model.detect([image], verbose=0)[0] t_prediction += (time.time() - t) # Convert results to COCO format # Cast masks to uint8 because COCO tools errors out on bool image_results = build_coco_results(dataset, coco_image_ids[i:i + 1], r["rois"], r["class_ids"], r["scores"], r["masks"].astype(np.uint8)) results.extend(image_results) # Load results. This modifies results with additional attributes. coco_results = coco.loadRes(results) # Evaluate cocoEval = COCOeval(coco, coco_results, eval_type) cocoEval.params.imgIds = coco_image_ids cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() print("Prediction time: {}. Average {}/image".format( t_prediction, t_prediction / len(image_ids))) print("Total time: ", time.time() - t_start) ############################################################ # Training ############################################################
Example #3
Source File: coco.py From PanopticSegmentation with MIT License | 4 votes |
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None): """Runs official COCO evaluation. dataset: A Dataset object with valiadtion data eval_type: "bbox" or "segm" for bounding box or segmentation evaluation limit: if not 0, it's the number of images to use for evaluation """ # Pick COCO images from the dataset image_ids = image_ids or dataset.image_ids # Limit to a subset if limit: image_ids = image_ids[:limit] # Get corresponding COCO image IDs. coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids] t_prediction = 0 t_start = time.time() results = [] for i, image_id in enumerate(image_ids): # Load image image = dataset.load_image(image_id) # Run detection t = time.time() r = model.detect([image], verbose=0)[0] t_prediction += (time.time() - t) # Convert results to COCO format # Cast masks to uint8 because COCO tools errors out on bool image_results = build_coco_results(dataset, coco_image_ids[i:i + 1], r["rois"], r["class_ids"], r["scores"], r["masks"].astype(np.uint8)) results.extend(image_results) # Load results. This modifies results with additional attributes. coco_results = coco.loadRes(results) # Evaluate cocoEval = COCOeval(coco, coco_results, eval_type) cocoEval.params.imgIds = coco_image_ids cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() print("Prediction time: {}. Average {}/image".format( t_prediction, t_prediction / len(image_ids))) print("Total time: ", time.time() - t_start) return results ############################################################ # Training ############################################################
Example #4
Source File: coco.py From deep-learning-explorer with Apache License 2.0 | 4 votes |
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None): """Runs official COCO evaluation. dataset: A Dataset object with valiadtion data eval_type: "bbox" or "segm" for bounding box or segmentation evaluation limit: if not 0, it's the number of images to use for evaluation """ # Pick COCO images from the dataset image_ids = image_ids or dataset.image_ids # Limit to a subset if limit: image_ids = image_ids[:limit] # Get corresponding COCO image IDs. coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids] t_prediction = 0 t_start = time.time() results = [] for i, image_id in enumerate(image_ids): # Load image image = dataset.load_image(image_id) # Run detection t = time.time() r = model.detect([image], verbose=0)[0] t_prediction += (time.time() - t) # Convert results to COCO format # Cast masks to uint8 because COCO tools errors out on bool image_results = build_coco_results(dataset, coco_image_ids[i:i + 1], r["rois"], r["class_ids"], r["scores"], r["masks"].astype(np.uint8)) results.extend(image_results) # Load results. This modifies results with additional attributes. coco_results = coco.loadRes(results) # Evaluate cocoEval = COCOeval(coco, coco_results, eval_type) cocoEval.params.imgIds = coco_image_ids cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() print("Prediction time: {}. Average {}/image".format( t_prediction, t_prediction / len(image_ids))) print("Total time: ", time.time() - t_start) ############################################################ # Training ############################################################
Example #5
Source File: coco.py From bird_species_classification with MIT License | 4 votes |
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None): """Runs official COCO evaluation. dataset: A Dataset object with valiadtion data eval_type: "bbox" or "segm" for bounding box or segmentation evaluation limit: if not 0, it's the number of images to use for evaluation """ # Pick COCO images from the dataset image_ids = image_ids or dataset.image_ids # Limit to a subset if limit: image_ids = image_ids[:limit] # Get corresponding COCO image IDs. coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids] t_prediction = 0 t_start = time.time() results = [] for i, image_id in enumerate(image_ids): # Load image image = dataset.load_image(image_id) # Run detection t = time.time() r = model.detect([image], verbose=0)[0] t_prediction += (time.time() - t) # Convert results to COCO format # Cast masks to uint8 because COCO tools errors out on bool image_results = build_coco_results(dataset, coco_image_ids[i:i + 1], r["rois"], r["class_ids"], r["scores"], r["masks"].astype(np.uint8)) results.extend(image_results) # Load results. This modifies results with additional attributes. coco_results = coco.loadRes(results) # Evaluate cocoEval = COCOeval(coco, coco_results, eval_type) cocoEval.params.imgIds = coco_image_ids cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() print("Prediction time: {}. Average {}/image".format( t_prediction, t_prediction / len(image_ids))) print("Total time: ", time.time() - t_start) ############################################################ # Training ############################################################