Python tensorflow.keras.preprocessing.image.img_to_array() Examples
The following are 15
code examples of tensorflow.keras.preprocessing.image.img_to_array().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.preprocessing.image
, or try the search function
.
Example #1
Source File: __init__.py From platypush with MIT License | 6 votes |
def _get_image(cls, image_file: str, model: Model) -> np.ndarray: input_shape = model.inputs[0].shape size = input_shape[1:3].as_list() assert len(size) == 2, 'The model {} does not have enough dimensions to process an image (shape: {})'.format( model.name, size) colors = input_shape[3:] assert colors, ('The model {} requires a tensor with at least 3 inputs in order to process images: ' + '[WIDTH, HEIGHT, COLORS]').format(model.name) if colors[0] == 1: color_mode = 'grayscale' elif colors[0] == 3: color_mode = 'rgb' elif colors[0] == 4: color_mode = 'rgba' else: raise AssertionError('The input tensor should have either 1 (grayscale), 3 (rgb) or 4 (rgba) units. ' + 'Found: {}'.format(colors[0])) img = image.load_img(image_file, target_size=size, color_mode=color_mode) return image.img_to_array(img)
Example #2
Source File: feature_extractor.py From sis with MIT License | 6 votes |
def extract(self, img): """ Extract a deep feature from an input image Args: img: from PIL.Image.open(path) or tensorflow.keras.preprocessing.image.load_img(path) Returns: feature (np.ndarray): deep feature with the shape=(4096, ) """ img = img.resize((224, 224)) # VGG must take a 224x224 img as an input img = img.convert('RGB') # Make sure img is color x = image.img_to_array(img) # To np.array. Height x Width x Channel. dtype=float32 x = np.expand_dims(x, axis=0) # (H, W, C)->(1, H, W, C), where the first elem is the number of img x = preprocess_input(x) # Subtracting avg values for each pixel feature = self.model.predict(x)[0] # (1, 4096) -> (4096, ) return feature / np.linalg.norm(feature) # Normalize
Example #3
Source File: pipeline_invoke_python.py From models with Apache License 2.0 | 6 votes |
def _transform_request(request): request = request.decode('utf-8') request = unquote(request) # Direct http example if request.startswith('http'): request = download_image(request) else: # Slack Label Example request_array = request.split('&') print(request_array) result = [value for value in request_array if value.startswith('text=')] if len(result) > 0: request = download_image(result[0][5:]) print(request) predict_img = image.load_img(request, target_size=(224, 224)) predict_img_array = image.img_to_array(predict_img) predict_img_array = np.expand_dims(predict_img_array, axis=0) predict_preprocess_img = preprocess_input(predict_img_array) return predict_preprocess_img
Example #4
Source File: pipeline_invoke_python.py From models with Apache License 2.0 | 6 votes |
def _transform_request(request): request = request.decode('utf-8') request = unquote(request) # Direct http example if request.startswith('http'): request = download_image(request) else: # Slack Label Example request_array = request.split('&') print(request_array) result = [value for value in request_array if value.startswith('text=')] if len(result) > 0: request = download_image(result[0][5:]) print(request) predict_img = image.load_img(request, target_size=(224, 224)) predict_img_array = image.img_to_array(predict_img) predict_img_array = np.expand_dims(predict_img_array, axis=0) predict_preprocess_img = preprocess_input(predict_img_array) return predict_preprocess_img
Example #5
Source File: pipeline_invoke_python.py From models with Apache License 2.0 | 6 votes |
def _transform_request(request): request = request.decode('utf-8') request = unquote(request) # Direct http example if request.startswith('http'): request = download_image(request) else: # Slack Label Example request_array = request.split('&') print(request_array) result = [value for value in request_array if value.startswith('text=')] if len(result) > 0: request = download_image(result[0][5:]) print(request) predict_img = image.load_img(request, target_size=(224, 224)) predict_img_array = image.img_to_array(predict_img) predict_img_array = np.expand_dims(predict_img_array, axis=0) predict_preprocess_img = preprocess_input(predict_img_array) return predict_preprocess_img
Example #6
Source File: pipeline_invoke_python.py From models with Apache License 2.0 | 6 votes |
def _transform_request(request): request = request.decode('utf-8') request = unquote(request) # Direct http example if request.startswith('http'): request = download_image(request) else: # Slack Label Example request_array = request.split('&') print(request_array) result = [value for value in request_array if value.startswith('text=')] if len(result) > 0: request = download_image(result[0][5:]) print(request) predict_img = image.load_img(request, target_size=(224, 224)) predict_img_array = image.img_to_array(predict_img) predict_img_array = np.expand_dims(predict_img_array, axis=0) predict_preprocess_img = preprocess_input(predict_img_array) return predict_preprocess_img
Example #7
Source File: pipeline_invoke_python.py From models with Apache License 2.0 | 6 votes |
def _transform_request(request): request = request.decode('utf-8') request = unquote(request) # Direct http example if request.startswith('http'): request = download_image(request) else: # Slack Label Example request_array = request.split('&') print(request_array) result = [value for value in request_array if value.startswith('text=')] if len(result) > 0: request = download_image(result[0][5:]) print(request) predict_img = image.load_img(request, target_size=(224, 224)) predict_img_array = image.img_to_array(predict_img) predict_img_array = np.expand_dims(predict_img_array, axis=0) predict_preprocess_img = preprocess_input(predict_img_array) return predict_preprocess_img
Example #8
Source File: inception_resnet_v2.py From armory with MIT License | 5 votes |
def preprocessing_fn(x: np.ndarray) -> np.ndarray: shape = (299, 299) # Expected input shape of model output = [] for i in range(x.shape[0]): im_raw = image.array_to_img(x[i]) im = image.img_to_array(im_raw.resize(shape)) output.append(im) output = preprocess_input_inception_resnet_v2(np.array(output)) return output
Example #9
Source File: densenet121_resisc45.py From armory with MIT License | 5 votes |
def preprocessing_fn(x: np.ndarray) -> np.ndarray: shape = (224, 224) # Expected input shape of model output = [] for i in range(x.shape[0]): im_raw = image.array_to_img(x[i]) im = image.img_to_array(im_raw.resize(shape)) output.append(im) output = preprocess_input_densenet121_resisc(np.array(output)) return output
Example #10
Source File: resnet50.py From armory with MIT License | 5 votes |
def preprocessing_fn(x: np.ndarray) -> np.ndarray: shape = (224, 224) # Expected input shape of model output = [] for i in range(x.shape[0]): im_raw = image.array_to_img(x[i]) im = image.img_to_array(im_raw.resize(shape)) output.append(im) output = preprocess_input_resnet50(np.array(output)) return output
Example #11
Source File: Predict.py From Semantic-segmentation-of-remote-sensing-images with Apache License 2.0 | 5 votes |
def predict(TEST_SET,image_size): print("载入网络权重中……") try: model = Unet(6,(32,32,4),0.001,0.00001) # build UNet model.load_weights('UnetDen169SGD.h5') except: print("载入失败!") stride = image_size print("进行预测分割拼图中……") for n in range(len(TEST_SET)): path = TEST_SET[n] image = Image.open(basePath+path) w,h = image.size padding_h = (h//stride + 1) * stride padding_w = (w//stride + 1) * stride padding_img = np.zeros((padding_h,padding_w,4),dtype=np.uint8) image=img_to_array(image) padding_img[0:h,0:w,:] = image[:,:,:] padding_img = padding_img.astype("float") / 255.0 mask_whole = np.zeros((padding_h,padding_w),dtype=np.uint8) for i in range(padding_h//stride): for j in range(padding_w//stride): crop = padding_img[i*stride:i*stride+image_size,j*stride:j*stride+image_size,:4] ch,cw,_ = crop.shape if ch != 32 or cw != 32: print('尺寸不正确,请检查!') continue crop = np.expand_dims(crop, axis=0) pred = model.predict(crop) pred = np.argmax(pred,axis=3) pred = pred.flatten() pred = labelencoder.inverse_transform(pred) pred = pred.reshape((32,32)).astype(np.uint8) mask_whole[i*stride:i*stride+image_size,j*stride:j*stride+image_size] = pred[:,:] cv2.imwrite(basePath2+'predict\\%s'%path,mask_whole[0:h,0:w])
Example #12
Source File: Segnet预测.py From Semantic-segmentation-of-remote-sensing-images with Apache License 2.0 | 5 votes |
def predict(args): # load the trained convolutional neural network print("载入网络权重中……") model = load_model(args["model"]) stride = args['stride'] print("进行预测分割拼图中……") for n in range(len(TEST_SET)): path = TEST_SET[n] #load the image image = cv2.imread(basePath+'train\\' + path) h,w,_ = image.shape padding_h = (h//stride + 1) * stride padding_w = (w//stride + 1) * stride padding_img = np.zeros((padding_h,padding_w,3),dtype=np.uint8) padding_img[0:h,0:w,:] = image[:,:,:] padding_img = padding_img.astype("float") / divisor padding_img = img_to_array(padding_img) mask_whole = np.zeros((padding_h,padding_w),dtype=np.uint8) for i in range(padding_h//stride): for j in range(padding_w//stride): crop = padding_img[i*stride:i*stride+image_size,j*stride:j*stride+image_size,:3] ch,cw,_= crop.shape if ch != 32 or cw != 32: print('尺寸不正确,请检查!') continue crop = np.expand_dims(crop, axis=0) pred = model.predict_classes(crop,verbose=2) pred = labelencoder.inverse_transform(pred[0]) pred = pred.reshape((32,32)).astype(np.uint8) mask_whole[i*stride:i*stride+image_size,j*stride:j*stride+image_size] = pred[:,:] cv2.imwrite(basePath+'predict/'+path,mask_whole[0:h,0:w])
Example #13
Source File: 训练.py From Semantic-segmentation-of-remote-sensing-images with Apache License 2.0 | 5 votes |
def generateData(batch_size,data=[]): while True: train_data = [] train_label = [] batch = 0 for i in (range(len(data))): url = data[i] batch += 1 img = load_img(filepath + 'train/' + url) img = img_to_array(img) train_data.append(img) label = load_img(filepath + 'label/' + url, grayscale=True) label = img_to_array(label).reshape((img_w * img_h,)) train_label.append(label) if batch % batch_size==0: train_data = np.array(train_data) train_label = np.array(train_label).flatten() #拍平 train_label = labelencoder.transform(train_label) train_label = to_categorical(train_label, num_classes=n_label) #编码输出便签 train_label = train_label.reshape((batch_size,img_w,img_h,n_label)) yield (train_data,train_label) train_data = [] train_label = [] batch = 0 #生成测试的数据
Example #14
Source File: 训练.py From Semantic-segmentation-of-remote-sensing-images with Apache License 2.0 | 5 votes |
def generateValidData(batch_size,data=[]): while True: valid_data = [] valid_label = [] batch = 0 for i in (range(len(data))): url = data[i] batch += 1 img = load_img(filepath + 'train/' + url) img = img_to_array(img) valid_data.append(img) label = load_img(filepath + 'label/' + url, grayscale=True) label = img_to_array(label).reshape((img_w * img_h,)) valid_label.append(label) if batch % batch_size==0: valid_data = np.array(valid_data) valid_label = np.array(valid_label).flatten() valid_label = labelencoder.transform(valid_label) valid_label = to_categorical(valid_label, num_classes=n_label) valid_label = valid_label.reshape((batch_size,img_w,img_h,n_label)) yield (valid_data,valid_label) valid_data = [] valid_label = [] batch = 0 #定义模型-网络模型
Example #15
Source File: FCN8S预测.py From Semantic-segmentation-of-remote-sensing-images with Apache License 2.0 | 5 votes |
def predict(args): # load the trained convolutional neural network print("载入网络权重中……") model = load_model(args["model"],custom_objects={'dice_coef': dice_coef}) stride = args['stride'] print("进行预测分割拼图中……") for n in range(len(TEST_SET)): path = TEST_SET[n] #load the image image = cv2.imread(basePath+'train\\' + path) h,w,_ = image.shape padding_h = (h//stride + 1) * stride padding_w = (w//stride + 1) * stride padding_img = np.zeros((padding_h,padding_w,3),dtype=np.uint8) padding_img[0:h,0:w,:] = image[:,:,:] padding_img = padding_img.astype("float") / 255.0 padding_img = img_to_array(padding_img) mask_whole = np.zeros((padding_h,padding_w),dtype=np.uint8) for i in range(padding_h//stride): for j in range(padding_w//stride): crop = padding_img[i*stride:i*stride+image_size,j*stride:j*stride+image_size,:3] ch,cw,_ = crop.shape #print(ch,cw,_) if ch != 32 or cw != 32: print('尺寸不正确,请检查!') continue crop = np.expand_dims(crop, axis=0) pred = model.predict(crop,verbose=2) pred=np.argmax(pred,axis=3) pred=pred.flatten() pred = labelencoder.inverse_transform(pred) pred = pred.reshape((32,32)).astype(np.uint8) mask_whole[i*stride:i*stride+image_size,j*stride:j*stride+image_size] = pred[:,:] cv2.imwrite(basePath+'predict/'+path,mask_whole[0:h,0:w])