org.tensorflow.types.UInt8 Java Examples
The following examples show how to use
org.tensorflow.types.UInt8.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ObjectDetector.java From OpenLabeler with Apache License 2.0 | 6 votes |
private static Tensor<?> makeImageTensor(BufferedImage img) throws IOException { if (img.getType() == BufferedImage.TYPE_BYTE_INDEXED || img.getType() == BufferedImage.TYPE_BYTE_BINARY || img.getType() == BufferedImage.TYPE_BYTE_GRAY || img.getType() == BufferedImage.TYPE_USHORT_GRAY) { BufferedImage bgr = new BufferedImage(img.getWidth(), img.getHeight(), BufferedImage.TYPE_3BYTE_BGR); bgr.getGraphics().drawImage(img, 0, 0, null); img = bgr; } if (img.getType() != BufferedImage.TYPE_3BYTE_BGR) { throw new IOException( String.format( "Expected 3-byte BGR encoding in BufferedImage, found %d. This code could be made more robust", img.getType())); } byte[] data = ((DataBufferByte) img.getData().getDataBuffer()).getData(); // ImageIO.read seems to produce BGR-encoded images, but the model expects RGB. bgr2rgb(data); final long BATCH_SIZE = 1; final long CHANNELS = 3; long[] shape = new long[]{BATCH_SIZE, img.getHeight(), img.getWidth(), CHANNELS}; return Tensor.create(UInt8.class, shape, ByteBuffer.wrap(data)); }
Example #2
Source File: ObjectDetectionTensorflowInputConverter.java From tensorflow with Apache License 2.0 | 6 votes |
private static Tensor<UInt8> makeImageTensor(byte[] imageBytes) throws IOException { ByteArrayInputStream is = new ByteArrayInputStream(imageBytes); BufferedImage img = ImageIO.read(is); if (img.getType() != BufferedImage.TYPE_3BYTE_BGR) { throw new IllegalArgumentException( String.format("Expected 3-byte BGR encoding in BufferedImage, found %d", img.getType())); } byte[] data = ((DataBufferByte) img.getData().getDataBuffer()).getData(); // ImageIO.read produces BGR-encoded images, while the model expects RGB. bgrToRgb(data); //Expand dimensions since the model expects images to have shape: [1, None, None, 3] long[] shape = new long[] { BATCH_SIZE, img.getHeight(), img.getWidth(), CHANNELS }; return Tensor.create(UInt8.class, shape, ByteBuffer.wrap(data)); }
Example #3
Source File: DeepLab.java From cineast with MIT License | 6 votes |
public synchronized int[][] processImage(Tensor<UInt8> input) { Tensor<Long> result = session.runner().feed("ImageTensor", input) .fetch("SemanticPredictions").run().get(0).expect(Long.class); int len = result.numElements(); LongBuffer buf = LongBuffer.allocate(len); result.writeTo(buf); result.close(); long[] resultShape = result.shape(); long[] resultArray = buf.array(); int w = (int) resultShape[2]; int h = (int) resultShape[1]; int[][] resultMatrix = new int[w][h]; for (int i = 0; i < resultArray.length; ++i) { resultMatrix[i % w][i / w] = (int) resultArray[i]; } return resultMatrix; }
Example #4
Source File: TensorFlowUtil.java From konduit-serving with Apache License 2.0 | 5 votes |
public static Class<?> toTFType(NDArrayType t){ switch (t){ case DOUBLE: return Double.class; case FLOAT: return Float.class; case INT64: return Long.class; case INT32: return Integer.class; case INT16: return Short.class; case INT8: return Byte.class; case UINT8: return UInt8.class; case BOOL: return Boolean.class; case UTF8: return String.class; case UINT64: case UINT32: case UINT16: case BFLOAT16: case FLOAT16: default: throw new UnsupportedOperationException("Type not supported by TF Java: " + t); } }
Example #5
Source File: GraphBuilder.java From tensorflow-java-examples-spring with Do What The F*ck You Want To Public License | 5 votes |
public Output<UInt8> decodeJpeg(Output<String> contents, long channels) { return graph.opBuilder("DecodeJpeg", "DecodeJpeg") .addInput(contents) .setAttr("channels", channels) .build() .<UInt8>output(0); }
Example #6
Source File: GraphBuilder.java From tensorflow-example-java with Do What The F*ck You Want To Public License | 5 votes |
public Output<UInt8> decodeJpeg(Output<String> contents, long channels) { return graph.opBuilder("DecodeJpeg", "DecodeJpeg") .addInput(contents) .setAttr("channels", channels) .build() .<UInt8>output(0); }
Example #7
Source File: ImageClassifier.java From video-stream-classification with Apache License 2.0 | 5 votes |
Output<UInt8> decodeJpeg(Output<String> contents, long channels) { return g.opBuilder("DecodeJpeg", "DecodeJpeg") .addInput(contents) .setAttr("channels", channels) .build() .<UInt8>output(0); }
Example #8
Source File: LabelImage.java From tensorflow-java with MIT License | 5 votes |
Output<UInt8> decodeJpeg(Output<String> contents, long channels) { return g.opBuilder("DecodeJpeg", "DecodeJpeg") .addInput(contents) .setAttr("channels", channels) .build() .<UInt8>output(0); }
Example #9
Source File: DeepLab.java From cineast with MIT License | 5 votes |
/** * returns the class label index for every pixel of the rescaled image */ public synchronized int[][] processImage(BufferedImage img) { Tensor<UInt8> input = prepareImage(img); int[][] _return = processImage(input); input.close(); return _return; }
Example #10
Source File: GraphBuilder.java From cineast with MIT License | 5 votes |
public Output<UInt8> decodeJpeg(Output<String> contents, long channels) { return g.opBuilder("DecodeJpeg", "DecodeJpeg") .addInput(contents) .setAttr("channels", channels) .build() .<UInt8>output(0); }
Example #11
Source File: GraphBuilder.java From orbit-image-analysis with GNU General Public License v3.0 | 4 votes |
Output<UInt8> decodeJpeg(Output<String> contents, long channels) { return g.opBuilder("DecodeJpeg", "DecodeJpeg").addInput(contents) .setAttr("channels", channels).build().<UInt8> output(0); }
Example #12
Source File: ConceptMasks.java From cineast with MIT License | 4 votes |
@Override public synchronized void processSegment(SegmentContainer shot) { if (shot == null || shot.getMostRepresentativeFrame() == null || shot.getMostRepresentativeFrame().getImage() == null || shot.getMostRepresentativeFrame().getImage() == VideoFrame.EMPTY_VIDEO_FRAME) { return; } Tensor<UInt8> inputTensor = DeepLab .prepareImage(shot.getMostRepresentativeFrame().getImage().getBufferedImage()); int[][] tmp = this.ade20k.processImage(inputTensor); List<DeepLabLabel> ade20kLabels = linearize( DeepLabLabel.fromAde20kId(tmp)); List<DeepLabLabel> cityscapesLabels = linearize( DeepLabLabel.fromCityscapesId(this.cityscapes.processImage(inputTensor))); List<DeepLabLabel> pascalvocLabels = linearize( DeepLabLabel.fromPascalVocId(this.pascalvoc.processImage(inputTensor))); inputTensor.close(); ArrayList<LinkedList<DeepLabLabel>> ade20kPartitions = GridPartitioner .partition(ade20kLabels, tmp.length, tmp[0].length, GRID_PARTITIONS, GRID_PARTITIONS); ArrayList<LinkedList<DeepLabLabel>> cityscapesPartitions = GridPartitioner .partition(cityscapesLabels, tmp.length, tmp[0].length, GRID_PARTITIONS, GRID_PARTITIONS); ArrayList<LinkedList<DeepLabLabel>> pascalvocLabelsPartitions = GridPartitioner .partition(pascalvocLabels, tmp.length, tmp[0].length, GRID_PARTITIONS, GRID_PARTITIONS); float[] vector = new float[2 * GRID_PARTITIONS * GRID_PARTITIONS]; for (int i = 0; i < GRID_PARTITIONS * GRID_PARTITIONS; ++i) { ArrayList<DeepLabLabel> labels = new ArrayList<>(); labels.addAll(ade20kPartitions.get(i)); labels.addAll(cityscapesPartitions.get(i)); labels.addAll(pascalvocLabelsPartitions.get(i)); DeepLabLabel dominantLabel = DeepLabLabel.getDominantLabel(labels); vector[2 * i] = dominantLabel.getEmbeddX(); vector[2 * i + 1] = dominantLabel.getEmbeddY(); } persist(shot.getId(), new FloatVectorImpl(vector)); }
Example #13
Source File: ConceptMasksAde20k.java From cineast with MIT License | 4 votes |
@Override public synchronized void processSegment(SegmentContainer shot) { if (shot == null || shot.getMostRepresentativeFrame() == null || shot.getMostRepresentativeFrame().getImage() == null || shot.getMostRepresentativeFrame().getImage() == VideoFrame.EMPTY_VIDEO_FRAME) { return; } if(this.ade20k == null){ try{ this.ade20k = new DeepLabAde20k(); }catch (RuntimeException e){ LOGGER.error(LogHelper.getStackTrace(e)); return; } } Tensor<UInt8> inputTensor = DeepLab .prepareImage(shot.getMostRepresentativeFrame().getImage().getBufferedImage()); int[][] tmp = this.ade20k.processImage(inputTensor); List<DeepLabLabel> ade20kLabels = linearize( DeepLabLabel.fromAde20kId(tmp)); inputTensor.close(); ArrayList<LinkedList<DeepLabLabel>> ade20kPartitions = GridPartitioner .partition(ade20kLabels, tmp.length, tmp[0].length, GRID_PARTITIONS, GRID_PARTITIONS); float[] vector = new float[2 * GRID_PARTITIONS * GRID_PARTITIONS]; for (int i = 0; i < GRID_PARTITIONS * GRID_PARTITIONS; ++i) { DeepLabLabel dominantLabel = DeepLabLabel.getDominantLabel(ade20kPartitions.get(i)); vector[2 * i] = dominantLabel.getEmbeddX(); vector[2 * i + 1] = dominantLabel.getEmbeddY(); } persist(shot.getId(), new FloatVectorImpl(vector)); }