Python utils.image.transform_inverse() Examples
The following are 30
code examples of utils.image.transform_inverse().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils.image
, or try the search function
.
Example #1
Source File: tester.py From Faster_RCNN_for_DOTA with Apache License 2.0 | 5 votes |
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
Example #2
Source File: demo.py From RoITransformer_DOTA with MIT License | 5 votes |
def draw_all_poly_detection(im_array, detections, class_names, scale, cfg, threshold=0.2): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ # pdb.set_trace() import cv2 import random color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color try: dets = detections[j] except: pdb.set_trace() for det in dets: bbox = det[:8] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) # draw first point cv2.circle(im, (bbox[0], bbox[1]), 3, (0, 0, 255), -1) for i in range(3): cv2.line(im, (bbox[i * 2], bbox[i * 2 + 1]), (bbox[(i+1) * 2], bbox[(i+1) * 2 + 1]), color=color, thickness=2) cv2.line(im, (bbox[6], bbox[7]), (bbox[0], bbox[1]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
Example #3
Source File: tester.py From RoITransformer_DOTA with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-3): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #4
Source File: tester.py From RoITransformer_DOTA with MIT License | 5 votes |
def draw_all_poly_detection(im_array, detections, class_names, scale, cfg, threshold=0.2): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import cv2 import random color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) if DEBUG: class_names = ['__background__', 'fg'] for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color dets = detections[j] for det in dets: bbox = det[:8] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) # draw first point cv2.circle(im, (bbox[0], bbox[1]), 3, (0, 0, 255), -1) for i in range(3): cv2.line(im, (bbox[i * 2], bbox[i * 2 + 1]), (bbox[(i+1) * 2], bbox[(i+1) * 2 + 1]), color=color, thickness=2) cv2.line(im, (bbox[6], bbox[7]), (bbox[0], bbox[1]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
Example #5
Source File: tester.py From Relation-Networks-for-Object-Detection with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-3): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #6
Source File: tester.py From Accel with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-4): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #7
Source File: tester.py From Accel with MIT License | 5 votes |
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import cv2 import random color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
Example #8
Source File: tester.py From Accel with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-4): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #9
Source File: tester.py From Accel with MIT License | 5 votes |
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import cv2 import random color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
Example #10
Source File: tester.py From Accel with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-3): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #11
Source File: tester.py From Faster_RCNN_for_DOTA with Apache License 2.0 | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-3): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #12
Source File: tester.py From RoITransformer_DOTA with MIT License | 5 votes |
def draw_all_poly_detection(im_array, detections, class_names, scale, cfg, threshold=0.2): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import cv2 import random color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) if DEBUG: class_names = ['__background__', 'fg'] for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color dets = detections[j] for det in dets: bbox = det[:8] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) # draw first point cv2.circle(im, (bbox[0], bbox[1]), 3, (0, 0, 255), -1) for i in range(3): cv2.line(im, (bbox[i * 2], bbox[i * 2 + 1]), (bbox[(i+1) * 2], bbox[(i+1) * 2 + 1]), color=color, thickness=2) cv2.line(im, (bbox[6], bbox[7]), (bbox[0], bbox[1]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
Example #13
Source File: tester.py From Faster_RCNN_for_DOTA with Apache License 2.0 | 5 votes |
def draw_all_quadrangle_detection(im_array, detections, class_names, scale, cfg, threshold=0.2): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color dets = detections[j] for det in dets: bbox = det[:8] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) for i in range(3): cv2.line(im, (bbox[i * 2], bbox[i * 2 + 1]), (bbox[(i+1) * 2], bbox[(i+1) * 2 + 1]), color=color, thickness=2) cv2.line(im, (bbox[6], bbox[7]), (bbox[0], bbox[1]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
Example #14
Source File: tester.py From Sequence-Level-Semantics-Aggregation with Apache License 2.0 | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=0.1): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #15
Source File: tester.py From Decoupled-Classification-Refinement with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-3): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #16
Source File: tester.py From Decoupled-Classification-Refinement with MIT License | 5 votes |
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import cv2 import random color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
Example #17
Source File: tester.py From Decoupled-Classification-Refinement with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-3): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #18
Source File: tester.py From Decoupled-Classification-Refinement with MIT License | 5 votes |
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import cv2 import random color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
Example #19
Source File: tester.py From Decoupled-Classification-Refinement with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-3): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #20
Source File: tester.py From Decoupled-Classification-Refinement with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-3): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #21
Source File: tester.py From Decoupled-Classification-Refinement with MIT License | 5 votes |
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import cv2 import random color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
Example #22
Source File: tester.py From Deep-Feature-Flow with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-3): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #23
Source File: tester.py From kaggle-rsna18 with MIT License | 5 votes |
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import cv2 import random color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
Example #24
Source File: tester.py From kaggle-rsna18 with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-3): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #25
Source File: tester.py From kaggle-rsna18 with MIT License | 5 votes |
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import cv2 import random color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
Example #26
Source File: tester.py From kaggle-rsna18 with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-3): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #27
Source File: tester.py From kaggle-rsna18 with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-3): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #28
Source File: tester.py From MANet_for_Video_Object_Detection with Apache License 2.0 | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=0.1): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
Example #29
Source File: tester.py From MANet_for_Video_Object_Detection with Apache License 2.0 | 5 votes |
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=0.1): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import cv2 import random color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
Example #30
Source File: tester.py From Deep-Feature-Flow with MIT License | 5 votes |
def vis_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-4): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ import matplotlib.pyplot as plt import random im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) plt.imshow(im) for j, name in enumerate(class_names): if name == '__background__': continue color = (random.random(), random.random(), random.random()) # generate a random color dets = detections[j] for det in dets: bbox = det[:4] * scale score = det[-1] if score < threshold: continue rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=3.5) plt.gca().add_patch(rect) plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()