Java Code Examples for org.tensorflow.Tensor#close()
The following examples show how to use
org.tensorflow.Tensor#close() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DeepLab.java From cineast with MIT License | 6 votes |
public synchronized int[][] processImage(Tensor<UInt8> input) { Tensor<Long> result = session.runner().feed("ImageTensor", input) .fetch("SemanticPredictions").run().get(0).expect(Long.class); int len = result.numElements(); LongBuffer buf = LongBuffer.allocate(len); result.writeTo(buf); result.close(); long[] resultShape = result.shape(); long[] resultArray = buf.array(); int w = (int) resultShape[2]; int h = (int) resultShape[1]; int[][] resultMatrix = new int[w][h]; for (int i = 0; i < resultArray.length; ++i) { resultMatrix[i % w][i / w] = (int) resultArray[i]; } return resultMatrix; }
Example 2
Source File: FaceRecognizer.java From server_face_recognition with GNU General Public License v3.0 | 5 votes |
/** * Running neural network * * @param image cropped, centralized face * @return describing of a face based on 128 float features */ private FaceFeatures passImageThroughNeuralNetwork(BufferedImage image, int faceType) { FaceFeatures features; try (Session session = new Session(graph)) { Tensor<Float> feedImage = Tensors.create(imageToMultiDimensionalArray(image)); long timeResponse = System.currentTimeMillis(); Tensor<Float> response = session.runner() .feed("input", feedImage) .feed("phase_train", Tensor.create(false)) .fetch("embeddings") .run().get(0) .expect(Float.class); FileUtils.timeSpent(timeResponse, "RESPONSE"); final long[] shape = response.shape(); //first dimension should return 1 as for image with normal size //second dimension should give 128 characteristics of face if (shape[0] != 1 || shape[1] != 128) { throw new IllegalStateException("illegal output values: 1 = " + shape[0] + " 2 = " + shape[1]); } float[][] featuresHolder = new float[1][128]; response.copyTo(featuresHolder); features = new FaceFeatures(featuresHolder[0], faceType); response.close(); } return features; }
Example 3
Source File: TensorFlowService.java From tensorflow-spring-cloud-stream-app-starters with Apache License 2.0 | 5 votes |
public Tensor evaluate(Map<String, Object> feeds, String outputName, int outputIndex) { try (Session session = new Session(graph)) { Runner runner = session.runner(); // Keep tensor references to release them in the finally block Tensor[] feedTensors = new Tensor[feeds.size()]; try { int i = 0; for (Entry<String, Object> e : feeds.entrySet()) { String feedName = e.getKey(); feedTensors[i] = toFeedTensor(e.getValue()); runner = runner.feed(feedName, feedTensors[i]); i++; } return runner.fetch(outputName).run().get(outputIndex); } finally { // Release all feed tensors for (Tensor tensor : feedTensors) { if (tensor != null) { tensor.close(); } } } } }
Example 4
Source File: YOLO.java From cineast with MIT License | 5 votes |
/** * Executes graph on the given preprocessed image * * @param image preprocessed image * @return output tensor returned by tensorFlow */ private float[] executeYOLOGraph(final Tensor<Float> image) { Tensor<Float> result = yoloSession.runner().feed("input", image).fetch("output").run().get(0) .expect(Float.class); float[] outputTensor = new float[getOutputSizeByShape(result)]; FloatBuffer floatBuffer = FloatBuffer.wrap(outputTensor); result.writeTo(floatBuffer); result.close(); return outputTensor; }
Example 5
Source File: DeepLab.java From cineast with MIT License | 5 votes |
/** * returns the class label index for every pixel of the rescaled image */ public synchronized int[][] processImage(BufferedImage img) { Tensor<UInt8> input = prepareImage(img); int[][] _return = processImage(input); input.close(); return _return; }
Example 6
Source File: JTensor.java From zoltar with Apache License 2.0 | 4 votes |
/** * Create a new {@link JTensor} instance by extracting data from the underlying {@link Tensor} and * closing it afterwards. */ public static JTensor create(final Tensor<?> tensor) { final JTensor jt; try { switch (tensor.dataType()) { case STRING: if (tensor.numDimensions() == 0) { final String value = new String(tensor.bytesValue(), UTF_8); jt = new AutoValue_JTensor( tensor.dataType(), tensor.numDimensions(), tensor.shape(), value); } else { final int[] dimensions = toIntExact(tensor.shape()); final Object byteArray = tensor.copyTo(Array.newInstance(byte[].class, toIntExact(tensor.shape()))); jt = new AutoValue_JTensor( tensor.dataType(), tensor.numDimensions(), tensor.shape(), toStringArray(byteArray, tensor.numElements(), dimensions)); } break; case INT32: final IntBuffer intBuf = IntBuffer.allocate(tensor.numElements()); tensor.writeTo(intBuf); jt = new AutoValue_JTensor( tensor.dataType(), tensor.numDimensions(), tensor.shape(), intBuf.array()); break; case INT64: final LongBuffer longBuf = LongBuffer.allocate(tensor.numElements()); tensor.writeTo(longBuf); jt = new AutoValue_JTensor( tensor.dataType(), tensor.numDimensions(), tensor.shape(), longBuf.array()); break; case FLOAT: final FloatBuffer floatBuf = FloatBuffer.allocate(tensor.numElements()); tensor.writeTo(floatBuf); jt = new AutoValue_JTensor( tensor.dataType(), tensor.numDimensions(), tensor.shape(), floatBuf.array()); break; case DOUBLE: final DoubleBuffer doubleBuf = DoubleBuffer.allocate(tensor.numElements()); tensor.writeTo(doubleBuf); jt = new AutoValue_JTensor( tensor.dataType(), tensor.numDimensions(), tensor.shape(), doubleBuf.array()); break; case BOOL: final boolean[] array = new boolean[tensor.numElements()]; tensor.copyTo(array); jt = new AutoValue_JTensor( tensor.dataType(), tensor.numDimensions(), tensor.shape(), array); break; default: throw new IllegalStateException("Unsupported data type " + tensor.dataType()); } } finally { tensor.close(); } return jt; }
Example 7
Source File: TensorFlowService.java From tensorflow with Apache License 2.0 | 4 votes |
/** * Evaluates a pre-trained tensorflow model (encoded as {@link Graph}). Use the feeds parameter to feed in the * model input data and fetch-names to specify the output tensors. * * @param feeds Named map of input tensors. Tensors can be encoded as {@link Tensor} or JSON string objects. * @param fetchedNames Names of the output tensors computed by the model. * @return Returns the computed output tensors. The names of the output tensors is defined by the fetchedNames * argument */ public Map<String, Tensor<?>> evaluate(Map<String, Object> feeds, List<String> fetchedNames) { try (Session session = new Session(graph)) { Runner runner = session.runner(); // Keep tensor references to release them in the finally block Tensor[] feedTensors = new Tensor[feeds.size()]; try { // Feed in the input named tensors int inputIndex = 0; for (Entry<String, Object> e : feeds.entrySet()) { String feedName = e.getKey(); feedTensors[inputIndex] = toFeedTensor(e.getValue()); runner = runner.feed(feedName, feedTensors[inputIndex]); inputIndex++; } // Set the tensor name to be fetched after the evaluation for (String fetchName : fetchedNames) { runner.fetch(fetchName); } // Evaluate the input List<Tensor<?>> outputTensors = runner.run(); // Extract the output tensors Map<String, Tensor<?>> outTensorMap = new HashMap<>(); for (int outputIndex = 0; outputIndex < fetchedNames.size(); outputIndex++) { outTensorMap.put(fetchedNames.get(outputIndex), outputTensors.get(outputIndex)); } return outTensorMap; } finally { // Release all feed tensors for (Tensor tensor : feedTensors) { if (tensor != null) { tensor.close(); } } } } }
Example 8
Source File: ConceptMasks.java From cineast with MIT License | 4 votes |
@Override public synchronized void processSegment(SegmentContainer shot) { if (shot == null || shot.getMostRepresentativeFrame() == null || shot.getMostRepresentativeFrame().getImage() == null || shot.getMostRepresentativeFrame().getImage() == VideoFrame.EMPTY_VIDEO_FRAME) { return; } Tensor<UInt8> inputTensor = DeepLab .prepareImage(shot.getMostRepresentativeFrame().getImage().getBufferedImage()); int[][] tmp = this.ade20k.processImage(inputTensor); List<DeepLabLabel> ade20kLabels = linearize( DeepLabLabel.fromAde20kId(tmp)); List<DeepLabLabel> cityscapesLabels = linearize( DeepLabLabel.fromCityscapesId(this.cityscapes.processImage(inputTensor))); List<DeepLabLabel> pascalvocLabels = linearize( DeepLabLabel.fromPascalVocId(this.pascalvoc.processImage(inputTensor))); inputTensor.close(); ArrayList<LinkedList<DeepLabLabel>> ade20kPartitions = GridPartitioner .partition(ade20kLabels, tmp.length, tmp[0].length, GRID_PARTITIONS, GRID_PARTITIONS); ArrayList<LinkedList<DeepLabLabel>> cityscapesPartitions = GridPartitioner .partition(cityscapesLabels, tmp.length, tmp[0].length, GRID_PARTITIONS, GRID_PARTITIONS); ArrayList<LinkedList<DeepLabLabel>> pascalvocLabelsPartitions = GridPartitioner .partition(pascalvocLabels, tmp.length, tmp[0].length, GRID_PARTITIONS, GRID_PARTITIONS); float[] vector = new float[2 * GRID_PARTITIONS * GRID_PARTITIONS]; for (int i = 0; i < GRID_PARTITIONS * GRID_PARTITIONS; ++i) { ArrayList<DeepLabLabel> labels = new ArrayList<>(); labels.addAll(ade20kPartitions.get(i)); labels.addAll(cityscapesPartitions.get(i)); labels.addAll(pascalvocLabelsPartitions.get(i)); DeepLabLabel dominantLabel = DeepLabLabel.getDominantLabel(labels); vector[2 * i] = dominantLabel.getEmbeddX(); vector[2 * i + 1] = dominantLabel.getEmbeddY(); } persist(shot.getId(), new FloatVectorImpl(vector)); }
Example 9
Source File: ConceptMasksAde20k.java From cineast with MIT License | 4 votes |
@Override public synchronized void processSegment(SegmentContainer shot) { if (shot == null || shot.getMostRepresentativeFrame() == null || shot.getMostRepresentativeFrame().getImage() == null || shot.getMostRepresentativeFrame().getImage() == VideoFrame.EMPTY_VIDEO_FRAME) { return; } if(this.ade20k == null){ try{ this.ade20k = new DeepLabAde20k(); }catch (RuntimeException e){ LOGGER.error(LogHelper.getStackTrace(e)); return; } } Tensor<UInt8> inputTensor = DeepLab .prepareImage(shot.getMostRepresentativeFrame().getImage().getBufferedImage()); int[][] tmp = this.ade20k.processImage(inputTensor); List<DeepLabLabel> ade20kLabels = linearize( DeepLabLabel.fromAde20kId(tmp)); inputTensor.close(); ArrayList<LinkedList<DeepLabLabel>> ade20kPartitions = GridPartitioner .partition(ade20kLabels, tmp.length, tmp[0].length, GRID_PARTITIONS, GRID_PARTITIONS); float[] vector = new float[2 * GRID_PARTITIONS * GRID_PARTITIONS]; for (int i = 0; i < GRID_PARTITIONS * GRID_PARTITIONS; ++i) { DeepLabLabel dominantLabel = DeepLabLabel.getDominantLabel(ade20kPartitions.get(i)); vector[2 * i] = dominantLabel.getEmbeddX(); vector[2 * i + 1] = dominantLabel.getEmbeddY(); } persist(shot.getId(), new FloatVectorImpl(vector)); }