Python skimage.color.rgb2gray() Examples
The following are 30
code examples of skimage.color.rgb2gray().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
skimage.color
, or try the search function
.
Example #1
Source File: test_color.py From snowy with MIT License | 6 votes |
def test_luminance(): source = sn.load('tests/sobel_input.png')[:,:,:3] L = rgb2gray(source) skresult = np.dstack([L, L, L]) small_skresult = sn.resize(skresult, width=256) L = sn.rgb_to_luminance(source) snresult = np.dstack([L, L, L]) small_snresult = sn.resize(snresult, width=256) L = skimage_sobel(source) sksobel = np.dstack([L, L, L]) small_sksobel = sn.resize(sksobel, width=256) L = sn.rgb_to_luminance(source) L = sn.compute_sobel(L) snsobel = np.dstack([L, L, L]) small_snsobel = sn.resize(snsobel, width=256) sn.show(np.hstack([ small_skresult, small_snresult, small_sksobel, small_snsobel]))
Example #2
Source File: trainer.py From Disentangled-Person-Image-Generation with MIT License | 6 votes |
def generate(self, x_fixed, x_target_fixed, pose_fixed, part_bbox_fixed, root_path=None, path=None, idx=None, save=True): G_pose_rcv, G_pose = self.sess.run([self.G_pose_rcv, self.G_pose]) G_pose_inflated = py_poseInflate(G_pose_rcv, is_normalized=True, radius=4, img_H=256, img_W=256) # G = self.sess.run(self.G, {self.x: x_fixed, self.G_pose_inflated: G_pose_inflated, self.part_bbox: part_bbox_fixed}) G_pose_inflated_img = np.tile(np.amax((G_pose_inflated+1)*127.5, axis=-1, keepdims=True), [1,1,1,3]) # ssim_G_x_list = [] # for i in xrange(G_pose.shape[0]): # G_gray = rgb2gray((G[i,:]).clip(min=0,max=255).astype(np.uint8)) # x_gray = rgb2gray(((x_fixed[i,:]+1)*127.5).clip(min=0,max=255).astype(np.uint8)) # ssim_G_x_list.append(ssim(G_gray, x_gray, data_range=x_gray.max() - x_gray.min(), multichannel=False)) # ssim_G_x_mean = np.mean(ssim_G_x_list) if path is None and save: # path = os.path.join(root_path, '{}_G_ssim{}.png'.format(idx,ssim_G_x_mean)) # save_image(G, path) # print("[*] Samples saved: {}".format(path)) path = os.path.join(root_path, '{}_G_pose.png'.format(idx)) save_image(G_pose, path) print("[*] Samples saved: {}".format(path)) path = os.path.join(root_path, '{}_G_pose_inflated.png'.format(idx)) save_image(G_pose_inflated_img, path) print("[*] Samples saved: {}".format(path)) return G_pose
Example #3
Source File: generator.py From ad-versarial with MIT License | 6 votes |
def get_resized_image(file, ratio): img = util.img_as_float(io.imread(file)) if len(img.shape) >= 3 and img.shape[2] == 4: img = color.rgba2rgb(img) if len(img.shape) == 2: img = color.gray2rgb(img) eimg = filters.sobel(color.rgb2gray(img)) width = img.shape[1] height = img.shape[0] mode, rm_paths = get_lines_to_remove((width, height), ratio) if mode: logger.debug("Carving %s %s paths ", rm_paths, mode) outh = transform.seam_carve(img, eimg, mode, rm_paths) return outh else: return img
Example #4
Source File: colorize.py From faceai with MIT License | 6 votes |
def colorize(): path = './img/colorize/colorize2.png' # cv2.imwrite('./img/colorize3.png', cv2.imread(path, 0)) x, y, image_shape = get_train_data(path) model = build_model() model.load_weights('./data/simple_colorize.h5') output = model.predict(x) output *= 128 tmp = np.zeros((200, 200, 3)) tmp[:, :, 0] = x[0][:, :, 0] tmp[:, :, 1:] = output[0] colorizePath = path.replace(".png", "-res.png") imsave(colorizePath, lab2rgb(tmp)) cv2.imshow("I", cv2.imread(path)) cv2.imshow("II", cv2.imread(colorizePath)) cv2.waitKey(0) cv2.destroyAllWindows() # imsave("test_image_gray.png", rgb2gray(lab2rgb(tmp)))
Example #5
Source File: image1.py From ASR33 with MIT License | 6 votes |
def load_image(filename, width, invert, gamma): # Read the image img = imageio.imread(filename) if img.shape[-1] == 4: # Blend the alpha channel img = color.rgba2rgb(img) # Grayscale img = color.rgb2gray(img) # Resample and adjust the aspect ratio width_px = (3 * width) * 16 img_width = 1.0 * width_px img_height = int(img.shape[0] * 3 * (img_width / (4 * img.shape[1]))) img = transform.resize(img, (img_height, img_width), anti_aliasing=True, mode='constant') # Adjust the exposure img = exposure.adjust_gamma(img, gamma) if invert: img = 1 - img return img
Example #6
Source File: helpFunctions.py From TCDTIMITprocessing with GNU General Public License v3.0 | 6 votes |
def convertToGrayScale (rootDir, dirNames): nbConverted = 0 for root, dirs, files in os.walk(rootDir): files.sort(key=tryint) for file in files: parentDir = os.path.basename(root) fname = os.path.splitext(file)[0] # no path, no extension. only filename if parentDir in dirNames: # convert all images in here to grayscale, store to dirName_gray newDirPath = ''.join([os.path.dirname(root), os.sep, parentDir + "_gray"]) newFilePath = ''.join([newDirPath, os.sep, fname + "_gray.jpg"]) if not os.path.exists(newDirPath): os.makedirs(newDirPath) if not os.path.exists(newFilePath): # read in grayscale, write to new path # with OpenCV: weird results (gray image larger than color ?!?) # img = cv2.imread(root+os.sep+file, 0) # cv2.imwrite(newFilePath, img) img_gray = rgb2gray(io.imread(root + os.sep + file)) io.imsave(newFilePath, img_gray) # don't write to disk if already exists nbConverted += 1 # print(nbConverted, " files have been converted to Grayscale") return 0
Example #7
Source File: trainer.py From Pose-Guided-Person-Image-Generation with MIT License | 6 votes |
def generate(self, x_fixed, x_target_fixed, pose_target_fixed, root_path=None, path=None, idx=None, save=True): G = self.sess.run(self.G, {self.x: x_fixed, self.pose_target: pose_target_fixed}) ssim_G_x_list = [] # x_0_255 = utils_wgan.unprocess_image(x_target_fixed, 127.5, 127.5) for i in xrange(G.shape[0]): # G_gray = rgb2gray((G[i,:]/127.5-1).clip(min=-1,max=1)) # x_target_gray = rgb2gray((x_target_fixed[i,:]).clip(min=-1,max=1)) G_gray = rgb2gray((G[i,:]).clip(min=0,max=255).astype(np.uint8)) x_target_gray = rgb2gray(((x_target_fixed[i,:]+1)*127.5).clip(min=0,max=255).astype(np.uint8)) ssim_G_x_list.append(ssim(G_gray, x_target_gray, data_range=x_target_gray.max() - x_target_gray.min(), multichannel=False)) ssim_G_x_mean = np.mean(ssim_G_x_list) if path is None and save: path = os.path.join(root_path, '{}_G_ssim{}.png'.format(idx,ssim_G_x_mean)) save_image(G, path) print("[*] Samples saved: {}".format(path)) return G
Example #8
Source File: build_classifier.py From Pic-Numero with MIT License | 5 votes |
def get_textural_features(img): img = img_as_ubyte(rgb2gray(img)) glcm = greycomatrix(img, [1], [0], 256, symmetric=True, normed=True) dissimilarity = greycoprops(glcm, 'dissimilarity')[0, 0] correlation = greycoprops(glcm, 'correlation')[0, 0] homogeneity = greycoprops(glcm, 'homogeneity')[0, 0] energy = greycoprops(glcm, 'energy')[0, 0] feature = np.array([dissimilarity, correlation, homogeneity, energy]) return feature ## featureRepresentation = {'image', 'pca', 'glcm'}
Example #9
Source File: vision.py From geoseg with MIT License | 5 votes |
def canny_edge(img, sigma=1): """ args: img : 2D or 3D array return: edge: outline of image """ if len(img.shape) == 3: img = rgb2gray(img) edge_bool = feature.canny(img, sigma) edge_img = np.zeros((edge_bool.shape), np.uint8) edge_img[edge_bool] = 255 return edge_img
Example #10
Source File: train.py From Hopfield-Network with MIT License | 5 votes |
def main(): # Load data camera = skimage.data.camera() astronaut = rgb2gray(skimage.data.astronaut()) horse = skimage.data.horse() coffee = rgb2gray(skimage.data.coffee()) # Marge data data = [camera, astronaut, horse, coffee] # Preprocessing print("Start to data preprocessing...") data = [preprocessing(d) for d in data] # Create Hopfield Network Model model = network.HopfieldNetwork() model.train_weights(data) # Generate testset test = [get_corrupted_input(d, 0.3) for d in data] predicted = model.predict(test, threshold=0, asyn=False) print("Show prediction results...") plot(data, test, predicted) print("Show network weights matrix...") #model.plot_weights()
Example #11
Source File: gym.py From angela with MIT License | 5 votes |
def _prepro(self, frame): """Pre-process 210x160x3 uint8 frame into 80x80 float32 frame.""" frame = rgb2gray(frame) # convert to grayscale #print('_prepro() frame after rgb2gray: {}'.format(frame)) # DEBUG frame = cv2.resize(frame, (80, 80), interpolation=cv2.INTER_AREA) # downsample #print('_prepro() frame after resize: {}'.format(frame)) # DEBUG return frame
Example #12
Source File: gym.py From angela with MIT License | 5 votes |
def _prepro(self, frame): """Pre-process 96x96x3 uint8 frame into 96x96 float32 frame.""" frame = rgb2gray(frame) # convert to grayscale #print('_prepro() frame after rgb2gray: {}'.format(frame)) # DEBUG return frame
Example #13
Source File: atari_environment.py From tensorflow-rl with Apache License 2.0 | 5 votes |
def get_preprocessed_frame(self, observation): if isinstance(self.env.observation_space, Discrete): expanded_obs = np.zeros(self.env.observation_space.n, dtype=np.float32) expanded_obs[observation] = 1 return expanded_obs elif len(observation.shape) > 1: if not self.use_rgb: observation = rgb2gray(observation) return resize(observation, (self.resized_width, self.resized_height)) else: return observation
Example #14
Source File: test_main.py From pyelastix with MIT License | 5 votes |
def test_register_affine_gray(): # Get fixed image image_fixed = imageio.imread('imageio:chelsea.png') image_fixed = color.rgb2gray(image_fixed) # Generate moving image image_moving = transform.rotate(image_fixed, angle=15, resize=True) # Convert both images to float32 image_fixed = image_fixed.astype('float32') image_moving = image_moving.astype('float32') # Initialize and adjust the parameters params = pyelastix.get_default_params(type='AFFINE') params.FixedInternalImagePixelType = "float" params.MovingInternalImagePixelType = "float" params.ResultImagePixelType = "float" params.NumberOfResolutions = 3 params.MaximumNumberOfIterations = 1000 # Register image_registered, field = pyelastix.register( image_moving, image_fixed, params) # Check the results assert image_registered == pytest.approx(image_fixed, rel=1)
Example #15
Source File: inferer.py From DeepVOG with GNU General Public License v3.0 | 5 votes |
def _preprocess_image(img, shape_correct): """ Args: img (numpy array): unprocessed image with shape (w, h, 3) and values int [0, 255] Returns: output_img (numpy array): processed grayscale image with shape ( 240, 320, 1) and values float [0,1] """ output_img = np.zeros((240, 320, 3)) img = img / 255 img = rgb2gray(img) if not shape_correct: img = resize(img, (240, 320)) output_img[:, :, :] = img.reshape(240, 320, 1) return output_img
Example #16
Source File: ClassificationModule.py From HistoQC with BSD 3-Clause Clear License | 5 votes |
def compute_laplace(img, params): laplace_ksize = int(params.get("laplace_ksize", 3)) return laplace(rgb2gray(img), ksize=laplace_ksize)[:, :, None]
Example #17
Source File: atari_env_deprecated.py From reinforce_py with MIT License | 5 votes |
def preprocess(self, observ): return resize(rgb2gray(observ), self.screen_size)
Example #18
Source File: count.py From Pic-Numero with MIT License | 5 votes |
def main(): numberOfImages = 11; # TODO: AUTOMATICALLY GET NUMBER OF IMAGES # Get number of images. Remeber to divide by 2 as for every relevant image, # theres also the comparison image. # if ".DS_Store" in os.listdir("Wheat_ROIs"): # numberOfImages = (len(os.listdir("Wheat_ROIs")) - 1)/2; # else: # numberOfImages = len(os.listdir("Wheat_ROIs"))/2; # For each ROI image in folder for i in tqdm.tqdm(range(1, numberOfImages+1)): # Load image filename = "../Wheat_ROIs/{:03d}_ROI.png".format(i); img = misc.imread(filename); img_gray = rgb2gray(img); # Detect blobs. See http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.blob_doh # for function documentation blobs = blob_doh(img_gray, min_sigma=1, max_sigma=100, threshold=.01) # Display blobs on image and save image fig, ax = plt.subplots() plt.title("Number of Blobs Detected: {}".format(blobs.shape[0])) plt.grid(False) ax.imshow(img, interpolation='nearest') for blob in blobs: y, x, r = blob c = plt.Circle((x, y), r, color='red', linewidth=2, fill=False) ax.add_patch(c) fig.savefig("../Wheat_ROIs/{:03d}_Blob.png".format(i))
Example #19
Source File: spectral_roi.py From Pic-Numero with MIT License | 5 votes |
def extract_roi(img, labels_to_keep=[1,2]): ''' Given a wheat image, this method returns an image containing only the region of interest. Args: img: input image. labels_to_keep: cluster labels to be kept in image while pixels belonging to clusters besides these ones are removed. Return: roi_img: Input image containing only the region of interest. ''' label_img = segmentation.slic(img, compactness=30, n_segments=6) labels = np.unique(label_img);print(labels) gray = rgb2gray(img); for label in labels: if(label not in labels_to_keep): logicalIndex = (label_img == label) gray[logicalIndex] = 0; #Display.show_image(gray) return gray
Example #20
Source File: CNN.py From Pic-Numero with MIT License | 5 votes |
def get_textural_features(img): img = img_as_ubyte(rgb2gray(img)) glcm = greycomatrix(img, [1], [0], 256, symmetric=True, normed=True) dissimilarity = greycoprops(glcm, 'dissimilarity')[0, 0] correlation = greycoprops(glcm, 'correlation')[0, 0] homogeneity = greycoprops(glcm, 'homogeneity')[0, 0] energy = greycoprops(glcm, 'energy')[0, 0] feature = np.array([dissimilarity, correlation, homogeneity, energy]) return feature
Example #21
Source File: atari_1step_qlearning.py From FRU with MIT License | 5 votes |
def get_preprocessed_frame(self, observation): """ 0) Atari frames: 210 x 160 1) Get image grayscale 2) Rescale image 110 x 84 3) Crop center 84 x 84 (you can crop top/bottom according to the game) """ return resize(rgb2gray(observation), (110, 84))[13:110 - 13, :]
Example #22
Source File: trainer.py From Disentangled-Person-Image-Generation with MIT License | 5 votes |
def generate(self, x_fixed, x_target_fixed, pose_fixed, part_bbox_fixed, part_vis_fixed, root_path=None, path=None, idx=None, save=True): G = self.sess.run(self.G, {self.x: x_fixed, self.pose: pose_fixed, self.part_bbox: part_bbox_fixed, self.part_vis: part_vis_fixed}) ssim_G_x_list = [] for i in xrange(G.shape[0]): G_gray = rgb2gray((G[i,:]).clip(min=0,max=255).astype(np.uint8)) x_gray = rgb2gray(((x_fixed[i,:]+1)*127.5).clip(min=0,max=255).astype(np.uint8)) ssim_G_x_list.append(ssim(G_gray, x_gray, data_range=x_gray.max() - x_gray.min(), multichannel=False)) ssim_G_x_mean = np.mean(ssim_G_x_list) if path is None and save: path = os.path.join(root_path, '{}_G_ssim{}.png'.format(idx,ssim_G_x_mean)) save_image(G, path) print("[*] Samples saved: {}".format(path)) return G # def generate(self, x_fixed, x_target_fixed, pose_fixed, pose_target_fixed, part_bbox_fixed, root_path=None, path=None, idx=None, save=True): # G = self.sess.run(self.G, {self.x: x_fixed, self.pose: pose_fixed, self.pose_target: pose_target_fixed, self.part_bbox: part_bbox_fixed}) # ssim_G_x_list = [] # for i in xrange(G.shape[0]): # G_gray = rgb2gray((G[i,:]).clip(min=0,max=255).astype(np.uint8)) # x_gray = rgb2gray(((x_fixed[i,:]+1)*127.5).clip(min=0,max=255).astype(np.uint8)) # ssim_G_x_list.append(ssim(G_gray, x_gray, data_range=x_gray.max() - x_gray.min(), multichannel=False)) # ssim_G_x_mean = np.mean(ssim_G_x_list) # if path is None and save: # path = os.path.join(root_path, '{}_G_ssim{}.png'.format(idx,ssim_G_x_mean)) # save_image(G, path) # print("[*] Samples saved: {}".format(path)) # return G
Example #23
Source File: trainer.py From Disentangled-Person-Image-Generation with MIT License | 5 votes |
def generate(self, x_fixed, x_target_fixed, pose_fixed, part_bbox_fixed, root_path=None, path=None, idx=None, save=True): G_pose_rcv, G_pose = self.sess.run([self.G_pose_rcv, self.G_pose]) G_pose_inflated = py_poseInflate(G_pose_rcv, is_normalized=True, radius=4, img_H=128, img_W=64) G = self.sess.run(self.G, {self.x: x_fixed, self.G_pose_inflated: G_pose_inflated, self.part_bbox: part_bbox_fixed}) G_pose_inflated_img = np.tile(np.amax((G_pose_inflated+1)*127.5, axis=-1, keepdims=True), [1,1,1,3]) ssim_G_x_list = [] for i in xrange(G.shape[0]): G_gray = rgb2gray((G[i,:]).clip(min=0,max=255).astype(np.uint8)) x_gray = rgb2gray(((x_fixed[i,:]+1)*127.5).clip(min=0,max=255).astype(np.uint8)) ssim_G_x_list.append(ssim(G_gray, x_gray, data_range=x_gray.max() - x_gray.min(), multichannel=False)) ssim_G_x_mean = np.mean(ssim_G_x_list) if path is None and save: path = os.path.join(root_path, '{}_G_ssim{}.png'.format(idx,ssim_G_x_mean)) save_image(G, path) print("[*] Samples saved: {}".format(path)) path = os.path.join(root_path, '{}_G_pose.png'.format(idx)) save_image(G_pose, path) print("[*] Samples saved: {}".format(path)) path = os.path.join(root_path, '{}_G_pose_inflated.png'.format(idx)) save_image(G_pose_inflated_img, path) print("[*] Samples saved: {}".format(path)) return G ################################################################################################# ####################################### DF train models ######################################### ######################### DeepFashion with AppPose BodyROI ################################
Example #24
Source File: trainer.py From Disentangled-Person-Image-Generation with MIT License | 5 votes |
def generate(self, x_fixed, x_target_fixed, pose_fixed, part_bbox_fixed, part_vis_fixed, root_path=None, path=None, idx=None, save=True): G = self.sess.run(self.G, {self.x: x_fixed, self.pose: pose_fixed, self.part_bbox: part_bbox_fixed, self.part_vis: part_vis_fixed}) ssim_G_x_list = [] for i in xrange(G.shape[0]): G_gray = rgb2gray((G[i,:]).clip(min=0,max=255).astype(np.uint8)) x_gray = rgb2gray(((x_fixed[i,:]+1)*127.5).clip(min=0,max=255).astype(np.uint8)) ssim_G_x_list.append(ssim(G_gray, x_gray, data_range=x_gray.max() - x_gray.min(), multichannel=False)) ssim_G_x_mean = np.mean(ssim_G_x_list) if path is None and save: path = os.path.join(root_path, '{}_G_ssim{}.png'.format(idx,ssim_G_x_mean)) save_image(G, path) print("[*] Samples saved: {}".format(path)) return G
Example #25
Source File: utils.py From reinforcement-learning-pytorch with MIT License | 5 votes |
def pre_process(image): image = np.array(image) image = resize(image, (84, 84, 3)) image = rgb2gray(image) return image
Example #26
Source File: image2.py From ASR33 with MIT License | 5 votes |
def load_image(filename, width, invert, gamma): # Read the image img = imageio.imread(filename) if img.shape[-1] == 4: # Blend the alpha channel img = color.rgba2rgb(img, background=(0, 0, 0)) # Grayscale img = color.rgb2gray(img) # Adjust the exposure img = exposure.adjust_gamma(img, gamma) if invert: img = util.invert(img) # Resample and adjust the aspect ratio width_px = (3 * width) * CELLPX img_width = 1.0 * width_px img_height = int(img.shape[0] * 3 * (img_width / (4 * img.shape[1]))) img = transform.resize(img, (img_height, img_width), anti_aliasing=True, mode='reflect') img = (img - img.min()) / (img.max() - img.min()) return img
Example #27
Source File: DQN_Atari_Space_Invaders.py From ReinforcementLearning with Apache License 2.0 | 5 votes |
def preprocess_frame(frame): gray = rgb2gray(frame) cropped_frame = gray[8:-12, 4:-12] normalized_frame = cropped_frame / 255.0 preprocessed_frame = transform.resize(normalized_frame, frame_size) return preprocessed_frame
Example #28
Source File: datasets.py From e2c-pytorch with Apache License 2.0 | 5 votes |
def all_states(cls): _env = gym.make('Pendulum-v0').env width = GymPendulumDataset.width height = GymPendulumDataset.height X = np.zeros((360, width, height)) for i in range(360): th = i / 360. * 2 * np.pi state = _env.render_state(th) X[i, :, :] = resize(rgb2gray(state), (width, height), mode='reflect') _env.close() _env.viewer.close() return X
Example #29
Source File: datasets.py From e2c-pytorch with Apache License 2.0 | 5 votes |
def __init__(self, root, split): if split not in ['train', 'test', 'all']: raise ValueError dir = os.path.join(root, split) filenames = glob.glob(os.path.join(dir, '*.png')) if split == 'all': filenames = glob.glob(os.path.join(root, 'train/*.png')) filenames.extend(glob.glob(os.path.join(root, 'test/*.png'))) filenames = sorted( filenames, key=lambda x: int(os.path.basename(x).split('.')[0])) images = [] for f in filenames: img = plt.imread(f) img[img != 1] = 0 images.append(resize(rgb2gray(img), [48, 48], mode='constant')) self.images = np.array(images, dtype=np.float32) self.images = self.images.reshape([len(images), 48, 48, 1]) action_filename = os.path.join(root, 'actions.txt') with open(action_filename) as infile: actions = np.array([float(l) for l in infile.readlines()]) self.actions = actions[:len(self.images)].astype(np.float32) self.actions = self.actions.reshape(len(actions), 1)
Example #30
Source File: data_augmentation.py From dataset_loaders with GNU General Public License v3.0 | 5 votes |
def my_label2rgboverlay(labels, cmap, image, bglabel=None, bg_color=(0., 0., 0.), alpha=0.2): '''Superimpose a mask over an image Convert a label mask to RGB applying a color map and superimposing it over an image as a transparent overlay''' image_float = gray2rgb(img_as_float(rgb2gray(image))) label_image = my_label2rgb(labels, cmap, bglabel=bglabel, bg_color=bg_color) output = image_float * alpha + label_image * (1 - alpha) return output