boofcv.io.image.ConvertBufferedImage Java Examples

The following examples show how to use boofcv.io.image.ConvertBufferedImage. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ContourHelper.java    From cineast with MIT License 6 votes vote down vote up
/**
 * Applies a contour-detection algorithm on the provided image and returns a list of detected contours. First, the image
 * is converted to a BinaryImage using a threshold algorithm (Otsu). Afterwards, blobs in the image are detected using
 * an 8-connect rule.
 *
 * This method provides the best results if the image is a black & white, i.e. factually binary, image!
 * See {@link ContourHelper#segmentImageByColour(BufferedImage,float[])} to convert a coloured image to a binary image.
 *
 * @param image BufferedImage in which contours should be detected.
 * @return List of contours.
 */
public static List<Contour> getContours(BufferedImage image) {
    /* Draw a black frame around to image so as to make sure that all detected contours are internal contours. */
    BufferedImage resized = new BufferedImage(image.getWidth() + 4, image.getHeight() + 4, image.getType());
    Graphics g = resized.getGraphics();
    g.setColor(Color.BLACK);
    g.fillRect(0,0,resized.getWidth(),resized.getHeight());
    g.drawImage(image, 2,2, image.getWidth(), image.getHeight(), null);

    /* Convert to BufferedImage to Gray-scale image and prepare Binary image. */
    GrayF32 input = ConvertBufferedImage.convertFromSingle(resized, null, GrayF32.class);
    GrayU8 binary = new GrayU8(input.width,input.height);
    GrayS32 label = new GrayS32(input.width,input.height);

    /* Select a global threshold using Otsu's method and apply that threshold. */
    double threshold = GThresholdImageOps.computeOtsu(input, 0, 255);
    ThresholdImageOps.threshold(input, binary,(float)threshold,true);

    /* Remove small blobs through erosion and dilation;  The null in the input indicates that it should internally
     * declare the work image it needs this is less efficient, but easier to code. */
    GrayU8 filtered = BinaryImageOps.erode8(binary, 1, null);
    filtered = BinaryImageOps.dilate8(filtered, 1, null);

    /* Detect blobs inside the image using an 8-connect rule. */
    return BinaryImageOps.contour(filtered, ConnectRule.EIGHT, label);
}
 
Example #2
Source File: ImageDesc.java    From MtgDesktopCompanion with GNU General Public License v3.0 6 votes vote down vote up
public ImageDesc(BufferedImage in, BufferedImage flipin)
{
	if(!AverageHash.isInitiated())
	{
		AverageHash.init(2, 2);
	}
	hash = AverageHash.avgHash(in,2,2);
	if(flipin != null)
	{
		flipped = AverageHash.avgHash(flipin,2,2);
	}
	int[] histogram = new int[256];
	int[] transform = new int[256];
	
	GrayU8 img = ConvertBufferedImage.convertFromSingle(in, null, GrayU8.class);
	GrayU8 norm = img.createSameShape();
	ImageStatistics.histogram(img,0,histogram);
	EnhanceImageOps.equalize(histogram, transform);
	EnhanceImageOps.applyTransform(img, transform, norm);
	GrayF32 normf = new GrayF32(img.width,img.height);
	ConvertImage.convert(norm, normf);
	desc.reset();
	describeImage(normf,desc);
}
 
Example #3
Source File: EdgeImg.java    From cineast with MIT License 6 votes vote down vote up
public static List<Boolean> getEdgePixels(MultiImage img, List<Boolean> out) {
	LOGGER.traceEntry();
	if (out == null) {
		out = new ArrayList<Boolean>(img.getWidth() * img.getHeight());
	} else {
		out.clear();
	}
	
	BufferedImage withBackground = new BufferedImage(img.getWidth(), img.getHeight(), BufferedImage.TYPE_INT_RGB);
	Graphics g = withBackground.getGraphics();
	g.setColor(Color.white);
	g.fillRect(0, 0, img.getWidth(), img.getHeight());
	g.drawImage(img.getBufferedImage(), 0, 0, null);
	
	GrayU8 gray = ConvertBufferedImage.convertFrom(withBackground, (GrayU8) null);
	if(!isSolid(gray)){
		getCanny().process(gray, THRESHOLD_LOW, THRESHOLD_HIGH, gray);
	}

	for (int i = 0; i < gray.data.length; ++i) {
		out.add(gray.data[i] != 0);
	}
	LOGGER.traceExit();
	return out;
}
 
Example #4
Source File: EdgeImg.java    From cineast with MIT License 6 votes vote down vote up
public static boolean[] getEdgePixels(MultiImage img, boolean[] out) {
	LOGGER.traceEntry();

	if (out == null || out.length != img.getWidth() * img.getHeight()) {
		out = new boolean[img.getWidth() * img.getHeight()];
	}

	GrayU8 gray = ConvertBufferedImage.convertFrom(img.getBufferedImage(), (GrayU8) null);

	if(!isSolid(gray)){
		getCanny().process(gray, THRESHOLD_LOW, THRESHOLD_HIGH, gray);
		
	}

	for (int i = 0; i < gray.data.length; ++i) {
		out[i] = (gray.data[i] != 0);
	}

	LOGGER.traceExit();
	return out;
}
 
Example #5
Source File: EHD.java    From cineast with MIT License 5 votes vote down vote up
protected static float[] process(MultiImage img, float[] hist) {
  GrayU8 gray = ConvertBufferedImage.convertFrom(img.getBufferedImage(), (GrayU8) null);
  int width = img.getWidth(), height = img.getHeight();
  for (int x = 0; x < 4; ++x) {
    for (int y = 0; y < 4; ++y) {
      GrayU8 subImage = gray
          .subimage(width * x / 4, height * y / 4, width * (x + 1) / 4, height * (y + 1) / 4,
              null);
      int count = 0;
      int[] tmp = new int[5];
      for (int xx = 0; xx < subImage.getWidth() - 1; xx += 2) {
        for (int yy = 0; yy < subImage.getHeight() - 1; yy += 2) {
          count++;
          int index = edgeType(
              subImage.unsafe_get(xx, yy),
              subImage.unsafe_get(xx + 1, yy),
              subImage.unsafe_get(xx, yy + 1),
              subImage.unsafe_get(xx + 1, yy + 1)
          );
          if (index > -1) {
            tmp[index]++;
          }
        }
      }
      int offset = (4 * x + y) * 5;
      for (int i = 0; i < 5; ++i) {
        hist[offset + i] += ((float) tmp[i]) / (float) count;
      }
    }
  }
  return hist;
}
 
Example #6
Source File: Vision.java    From BotLibre with Eclipse Public License 1.0 5 votes vote down vote up
/**
 * HSV stores color information in Hue and Saturation while intensity is in Value.  This computes a 2D histogram
 * from hue and saturation only, which makes it lighting independent.
 */
public double[] coupledHueSat(byte[] image) throws IOException {
	Planar<GrayF32> rgb = new Planar<GrayF32>(GrayF32.class,1,1,3);
	Planar<GrayF32> hsv = new Planar<GrayF32>(GrayF32.class,1,1,3);

	BufferedImage buffered = ImageIO.read(new ByteArrayInputStream(image));
	if (buffered == null) {
		throw new RuntimeException("Can't load image!");
	}

	rgb.reshape(buffered.getWidth(), buffered.getHeight());
	hsv.reshape(buffered.getWidth(), buffered.getHeight());

	ConvertBufferedImage.convertFrom(buffered, rgb, true);
	ColorHsv.rgbToHsv_F32(rgb, hsv);

	Planar<GrayF32> hs = hsv.partialSpectrum(0,1);

	// The number of bins is an important parameter.  Try adjusting it
	Histogram_F64 histogram = new Histogram_F64(12,12);
	histogram.setRange(0, 0, 2.0 * Math.PI); // range of hue is from 0 to 2PI
	histogram.setRange(1, 0, 1.0);		 // range of saturation is from 0 to 1

	// Compute the histogram
	GHistogramFeatureOps.histogram(hs,histogram);

	UtilFeature.normalizeL2(histogram); // normalize so that image size doesn't matter

	return histogram.value;
}
 
Example #7
Source File: ContourBoundingBox.java    From MtgDesktopCompanion with GNU General Public License v3.0 5 votes vote down vote up
public BufferedImage getTransformedImage(BufferedImage in, boolean flip)
{
       try
       {
           Planar<GrayF32> input = ConvertBufferedImage.convertFromPlanar(in, null, true, GrayF32.class);

           RemovePerspectiveDistortion<Planar<GrayF32>> removePerspective =
                   new RemovePerspectiveDistortion<>(300, 418, ImageType.pl(3, GrayF32.class));

           int start = longEdge();

           if(flip)
           {
               start = (start+2)%4;
           }

           if( !removePerspective.apply(input,
                   new Point2D_F64(corners[start].x,corners[start].y),
                   new Point2D_F64(corners[(start+1)%4].x,corners[(start+1)%4].y),
                   new Point2D_F64(corners[(start+2)%4].x,corners[(start+2)%4].y),
                   new Point2D_F64(corners[(start+3)%4].x,corners[(start+3)%4].y)
                                   ) ){
               return null;
           }
           Planar<GrayF32> output = removePerspective.getOutput();
           return ConvertBufferedImage.convertTo_F32(output,null,true);
       }
	catch(Exception e)
	{
		return null;
	}
}
 
Example #8
Source File: EdgeList.java    From cineast with MIT License 5 votes vote down vote up
public static List<EdgeContour> getEdgeList(MultiImage img){
	LOGGER.traceEntry();
	BufferedImage withBackground = new BufferedImage(img.getWidth(), img.getHeight(), BufferedImage.TYPE_INT_RGB);
	Graphics g = withBackground.getGraphics();
	g.setColor(Color.white);
	g.fillRect(0, 0, img.getWidth(), img.getHeight());
	g.drawImage(img.getBufferedImage(), 0, 0, null);
	GrayU8 gray = ConvertBufferedImage.convertFrom(withBackground, (GrayU8) null);
	CannyEdge<GrayU8, GrayS16> canny = getCanny();
	canny.process(gray, THRESHOLD_LOW, THRESHOLD_HIGH, null);
	List<EdgeContour> _return = canny.getContours();
	LOGGER.traceExit();
	return _return;
}
 
Example #9
Source File: EdgeImg.java    From cineast with MIT License 5 votes vote down vote up
public static MultiImage getEdgeImg(MultiImage img) {
	LOGGER.traceEntry();

	GrayU8 gray = ConvertBufferedImage.convertFrom(img.getBufferedImage(), (GrayU8) null);
	if(!isSolid(gray)){
		getCanny().process(gray, THRESHOLD_LOW, THRESHOLD_HIGH, gray);
	}

	BufferedImage bout = VisualizeBinaryData.renderBinary(gray, false, null);

	return LOGGER.traceExit(MultiImageFactory.newMultiImage(bout));
}
 
Example #10
Source File: SURFHelper.java    From cineast with MIT License 5 votes vote down vote up
/**
 * Returns SURF descriptors for an image using the settings above. Uses the BoofCV fast SURF algorithm,
 * which yields less images but operates a bit faster.
 *
 * @param image Image for which to obtain the SURF descriptors.
 * @return
 */
public static DetectDescribePoint<GrayF32, BrightFeature> getFastSurf(BufferedImage image) {
     /* Obtain raw SURF descriptors using the configuration above (FH-9 according to [1]). */
    GrayF32 gray = ConvertBufferedImage.convertFromSingle(image, null, GrayF32.class);
    ConfigFastHessian config = new ConfigFastHessian(0, 2, FH_MAX_FEATURES_PER_SCALE, FH_INITIAL_SAMPLE_SIZE, FH_INITIAL_SIZE, FH_NUMBER_SCALES_PER_OCTAVE, FH_NUMBER_OF_OCTAVES);
    DetectDescribePoint<GrayF32, BrightFeature> surf = FactoryDetectDescribe.surfFast(config, null, null, GrayF32.class);
    surf.detect(gray);
    return surf;
}
 
Example #11
Source File: SURFHelper.java    From cineast with MIT License 5 votes vote down vote up
/**
 * Returns SURF descriptors for an image using the settings above. Uses the BoofCV stable SURF algorithm.
 *
 * @param image Image for which to obtain the SURF descriptors.
 * @return
 */
public static DetectDescribePoint<GrayF32, BrightFeature> getStableSurf(BufferedImage image) {
     /* Obtain raw SURF descriptors using the configuration above (FH-9 according to [1]). */
    GrayF32 gray = ConvertBufferedImage.convertFromSingle(image, null, GrayF32.class);
    ConfigFastHessian config = new ConfigFastHessian(0, 2, FH_MAX_FEATURES_PER_SCALE, FH_INITIAL_SAMPLE_SIZE, FH_INITIAL_SIZE, FH_NUMBER_SCALES_PER_OCTAVE, FH_NUMBER_OF_OCTAVES);
    DetectDescribePoint<GrayF32, BrightFeature> surf = FactoryDetectDescribe.surfStable(config, null, null, GrayF32.class);
    surf.detect(gray);
    return surf;
}
 
Example #12
Source File: ContourHelper.java    From cineast with MIT License 5 votes vote down vote up
/**
 * Segments a colored image by turning all pixels that are close to the provided color to white.
 *
 * @param image The image that should be converted.
 * @param colorRgb The colour that should be turned to white.
 * @return Converted image where pixels close to the provided color are white and the others are black
 */
public static BufferedImage segmentImageByColour(BufferedImage image, float[] colorRgb) {
    /* Phase 1): Convert average RGB color to HSV. */
    final float[] avgHsvColor = new float[]{0.0f,0.0f,0.0f};
    ColorHsv.rgbToHsv(colorRgb[0], colorRgb[1], colorRgb[2], avgHsvColor);

    /* Phase 2a): Convert the input BufferedImage to a HSV image and extract hue and saturation bands, which are independent of intensity. */
    final Planar<GrayF32> input = ConvertBufferedImage.convertFromPlanar(image,null, true, GrayF32.class);
    final Planar<GrayF32> hsv = input.createSameShape();
    ColorHsv.rgbToHsv_F32(input,hsv);

    final GrayF32 H = hsv.getBand(0);
    final GrayF32 S = hsv.getBand(1);

    /* Phase 2b): Determine thresholds. */
    float maxDist2 = 0.4f*0.4f;
    float adjustUnits = (float)(Math.PI/2.0);

    /* Phase 3): For each pixel in the image, determine distance to average color. If color is closed, turn pixel white. */
    final BufferedImage output = new BufferedImage(input.width,input.height,BufferedImage.TYPE_INT_RGB);
    for(int y = 0; y < hsv.height; y++) {
        for(int x = 0; x < hsv.width; x++) {
            // Hue is an angle in radians, so simple subtraction doesn't work
            float dh = UtilAngle.dist(H.unsafe_get(x,y),avgHsvColor[0]);
            float ds = (S.unsafe_get(x,y)-avgHsvColor[1])*adjustUnits;

            // this distance measure is a bit naive, but good enough for to demonstrate the concept
            float dist2 = dh*dh + ds*ds;
            if( dist2 <= maxDist2 ) {
                output.setRGB(x,y,Color.WHITE.getRGB());
            }
        }
    }
    return output;
}
 
Example #13
Source File: RealTimeImageRecognition.java    From hazelcast-jet-demos with Apache License 2.0 5 votes vote down vote up
/**
 * The actual classification of the images by using the pre-trained model.
 */
private static Entry<String, Double> classifyWithModel(ImageClassifierVggCifar10 classifier, BufferedImage image) {
    Planar<GrayF32> planar = new Planar<>(GrayF32.class, image.getWidth(), image.getHeight(), 3);
    ConvertBufferedImage.convertFromPlanar(image, planar, true, GrayF32.class);
    classifier.classify(planar);
    return classifier.getAllResults().stream()
                     .map(score -> entry(classifier.getCategories().get(score.category), score.score))
                     .max(Comparator.comparing(Entry::getValue)).get();
}
 
Example #14
Source File: PathList.java    From cineast with MIT License 4 votes vote down vote up
public static LinkedList<Pair<Integer, ArrayList<AssociatedPair>>> getDensePaths(List<VideoFrame> videoFrames) {
    if (videoFrames.size() < 2) {
        return null;
    }

    PkltConfig configKlt = new PkltConfig(3, new int[]{1, 2, 4});
    configKlt.config.maxPerPixelError = 45;
    ImageGradient<GrayU8, GrayS16> gradient = FactoryDerivative.sobel(GrayU8.class, GrayS16.class);
    PyramidDiscrete<GrayU8> pyramidForeward = FactoryPyramid.discreteGaussian(configKlt.pyramidScaling, -1, 2, true, ImageType.single(GrayU8.class));
    PyramidDiscrete<GrayU8> pyramidBackward = FactoryPyramid.discreteGaussian(configKlt.pyramidScaling, -1, 2, true, ImageType.single(GrayU8.class));
    PyramidKltTracker<GrayU8, GrayS16> trackerForeward = FactoryTrackerAlg.kltPyramid(configKlt.config, GrayU8.class, null);
    PyramidKltTracker<GrayU8, GrayS16> trackerBackward = FactoryTrackerAlg.kltPyramid(configKlt.config, GrayU8.class, null);

    GrayS16[] derivX = null;
    GrayS16[] derivY = null;

    LinkedList<PyramidKltFeature> tracks = new LinkedList<PyramidKltFeature>();
    LinkedList<Pair<Integer, ArrayList<AssociatedPair>>> paths = new LinkedList<Pair<Integer, ArrayList<AssociatedPair>>>();

    GrayU8 gray = null;
    int frameIdx = 0;
    int cnt = 0;
    for (VideoFrame videoFrame : videoFrames) {
        ++frameIdx;

        if (cnt >= frameInterval) {
            cnt = 0;
            continue;
        }
        cnt += 1;

        gray = ConvertBufferedImage.convertFrom(videoFrame.getImage().getBufferedImage(), gray);
        ArrayList<AssociatedPair> tracksPairs = new ArrayList<AssociatedPair>();

        if (frameIdx == 0) {
            tracks = denseSampling(gray, derivX, derivY, samplingInterval, configKlt, gradient, pyramidBackward, trackerBackward);
        } else {
            tracking(gray, derivX, derivY, tracks, tracksPairs, gradient, pyramidForeward, pyramidBackward, trackerForeward, trackerBackward);
            tracks = denseSampling(gray, derivX, derivY, samplingInterval, configKlt, gradient, pyramidBackward, trackerBackward);
        }

        paths.add(new Pair<Integer, ArrayList<AssociatedPair>>(frameIdx, tracksPairs));
    }
    return paths;
}
 
Example #15
Source File: PathList.java    From cineast with MIT License 4 votes vote down vote up
public static void showBineryImage(GrayU8 image) {
    PixelMath.multiply(image, 255, image);
    BufferedImage out = ConvertBufferedImage.convertTo(image, null);
    ShowImages.showWindow(out, "Output");
}
 
Example #16
Source File: MaskGenerator.java    From cineast with MIT License 4 votes vote down vote up
public static void showBineryImage(GrayU8 image){
	PixelMath.multiply(image,255,image);
	BufferedImage out = ConvertBufferedImage.convertTo(image,null);
	ShowImages.showWindow(out,"Output");
}
 
Example #17
Source File: HOGHelper.java    From cineast with MIT License 3 votes vote down vote up
/**
 * Returns HOG descriptors for an image using the provided settings.
 *
 * @param image Image for which to obtain the HOG descriptors.
 * @param config ConfigDenseHog object that specifies the parameters for the HOG algorithm.
 * @return DescribeImageDense object containing the HOG descriptor.
 */
public static DescribeImageDense<GrayU8,TupleDesc_F64> getHOGDescriptors(BufferedImage image, ConfigDenseHoG config) {
    GrayU8 gray = ConvertBufferedImage.convertFromSingle(image, null, GrayU8.class);
    DescribeImageDense<GrayU8,TupleDesc_F64> desc = FactoryDescribeImageDense.hog(config, ImageType.single(GrayU8.class));
    desc.process(gray);
    return desc;
}