boofcv.struct.image.GrayF32 Java Examples

The following examples show how to use boofcv.struct.image.GrayF32. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ContourHelper.java    From cineast with MIT License 6 votes vote down vote up
/**
 * Applies a contour-detection algorithm on the provided image and returns a list of detected contours. First, the image
 * is converted to a BinaryImage using a threshold algorithm (Otsu). Afterwards, blobs in the image are detected using
 * an 8-connect rule.
 *
 * This method provides the best results if the image is a black & white, i.e. factually binary, image!
 * See {@link ContourHelper#segmentImageByColour(BufferedImage,float[])} to convert a coloured image to a binary image.
 *
 * @param image BufferedImage in which contours should be detected.
 * @return List of contours.
 */
public static List<Contour> getContours(BufferedImage image) {
    /* Draw a black frame around to image so as to make sure that all detected contours are internal contours. */
    BufferedImage resized = new BufferedImage(image.getWidth() + 4, image.getHeight() + 4, image.getType());
    Graphics g = resized.getGraphics();
    g.setColor(Color.BLACK);
    g.fillRect(0,0,resized.getWidth(),resized.getHeight());
    g.drawImage(image, 2,2, image.getWidth(), image.getHeight(), null);

    /* Convert to BufferedImage to Gray-scale image and prepare Binary image. */
    GrayF32 input = ConvertBufferedImage.convertFromSingle(resized, null, GrayF32.class);
    GrayU8 binary = new GrayU8(input.width,input.height);
    GrayS32 label = new GrayS32(input.width,input.height);

    /* Select a global threshold using Otsu's method and apply that threshold. */
    double threshold = GThresholdImageOps.computeOtsu(input, 0, 255);
    ThresholdImageOps.threshold(input, binary,(float)threshold,true);

    /* Remove small blobs through erosion and dilation;  The null in the input indicates that it should internally
     * declare the work image it needs this is less efficient, but easier to code. */
    GrayU8 filtered = BinaryImageOps.erode8(binary, 1, null);
    filtered = BinaryImageOps.dilate8(filtered, 1, null);

    /* Detect blobs inside the image using an 8-connect rule. */
    return BinaryImageOps.contour(filtered, ConnectRule.EIGHT, label);
}
 
Example #2
Source File: ImageDesc.java    From MtgDesktopCompanion with GNU General Public License v3.0 6 votes vote down vote up
public ImageDesc(BufferedImage in, BufferedImage flipin)
{
	if(!AverageHash.isInitiated())
	{
		AverageHash.init(2, 2);
	}
	hash = AverageHash.avgHash(in,2,2);
	if(flipin != null)
	{
		flipped = AverageHash.avgHash(flipin,2,2);
	}
	int[] histogram = new int[256];
	int[] transform = new int[256];
	
	GrayU8 img = ConvertBufferedImage.convertFromSingle(in, null, GrayU8.class);
	GrayU8 norm = img.createSameShape();
	ImageStatistics.histogram(img,0,histogram);
	EnhanceImageOps.equalize(histogram, transform);
	EnhanceImageOps.applyTransform(img, transform, norm);
	GrayF32 normf = new GrayF32(img.width,img.height);
	ConvertImage.convert(norm, normf);
	desc.reset();
	describeImage(normf,desc);
}
 
Example #3
Source File: ImageDesc.java    From MtgDesktopCompanion with GNU General Public License v3.0 6 votes vote down vote up
public static FastQueue<BrightFeature> readDescIn(ByteBuffer buf,DetectDescribePoint<GrayF32,BrightFeature> ddp)
{
	FastQueue<BrightFeature> d = UtilFeature.createQueue(ddp,0);
	int dts = buf.getInt();
	for(int i=0;i<dts;i++)
	{
		int vs = buf.getInt();
		BrightFeature f = new BrightFeature(vs);
		double[] vls = new double[vs];
		for(int j=0;j<vs;j++)
		{
			vls[j]=buf.getDouble();
		}
		f.set(vls);
		d.add(f);
	}
	return d;
}
 
Example #4
Source File: RealTimeImageRecognition.java    From hazelcast-jet-demos with Apache License 2.0 5 votes vote down vote up
/**
 * The actual classification of the images by using the pre-trained model.
 */
private static Entry<String, Double> classifyWithModel(ImageClassifierVggCifar10 classifier, BufferedImage image) {
    Planar<GrayF32> planar = new Planar<>(GrayF32.class, image.getWidth(), image.getHeight(), 3);
    ConvertBufferedImage.convertFromPlanar(image, planar, true, GrayF32.class);
    classifier.classify(planar);
    return classifier.getAllResults().stream()
                     .map(score -> entry(classifier.getCategories().get(score.category), score.score))
                     .max(Comparator.comparing(Entry::getValue)).get();
}
 
Example #5
Source File: SURFCodebookGenerator.java    From cineast with MIT License 5 votes vote down vote up
/**
 * Processes the content (i.e. creates descriptors) and add the generated
 * descriptors to the cluster.
 *
 * @param content The image to process.
 */
@Override
protected void process(BufferedImage content) {
    DetectDescribePoint<GrayF32, BrightFeature> surf = SURFHelper.getFastSurf(content);
    for (int i=0;i<surf.getNumberOfFeatures();i++) {
        this.cluster.addReference(surf.getDescription(i));
    }
}
 
Example #6
Source File: SURF.java    From cineast with MIT License 5 votes vote down vote up
@Override
public void processSegment(SegmentContainer shot) {
    if (shot.getMostRepresentativeFrame() == VideoFrame.EMPTY_VIDEO_FRAME) {
        return;
    }
    DetectDescribePoint<GrayF32, BrightFeature> descriptors = SURFHelper.getStableSurf(shot.getMostRepresentativeFrame().getImage().getBufferedImage());
    if (descriptors != null && descriptors.getNumberOfFeatures() > 0) {
      float[] histogram_f = this.histogram(true, descriptors);
      this.persist(shot.getId(), new FloatVectorImpl(histogram_f));
    } else {
      LOGGER.warn("No SURF feature could be extracted for segment {}. This is not necessarily an error!", shot.getId());
    }
}
 
Example #7
Source File: SURF.java    From cineast with MIT License 5 votes vote down vote up
/**
* This method represents the first step that's executed when processing query. The associated SegmentContainer is
* examined and feature-vectors are being generated. The generated vectors are returned by this method together with an
* optional weight-vector.
* <p>
* <strong>Important: </strong> The weight-vector must have the same size as the feature-vectors returned by the method.
*
* @param sc SegmentContainer that was submitted to the feature module.
* @param qc A QueryConfig object that contains query-related configuration parameters. Can still be edited.
* @return List of feature vectors for lookup.
*/
@Override
protected List<float[]> preprocessQuery(SegmentContainer sc, ReadableQueryConfig qc) {
    /* Prepare feature pair. */
    List<float[]> features = new ArrayList<>(1);

    /* Extract features. */
    DetectDescribePoint<GrayF32, BrightFeature> descriptors = SURFHelper.getStableSurf(sc.getAvgImg().getBufferedImage());
    if (descriptors != null && descriptors.getNumberOfFeatures() > 0) {
        features.add(this.histogram(true, descriptors));
    }

    return features;
}
 
Example #8
Source File: ContourHelper.java    From cineast with MIT License 5 votes vote down vote up
/**
 * Segments a colored image by turning all pixels that are close to the provided color to white.
 *
 * @param image The image that should be converted.
 * @param colorRgb The colour that should be turned to white.
 * @return Converted image where pixels close to the provided color are white and the others are black
 */
public static BufferedImage segmentImageByColour(BufferedImage image, float[] colorRgb) {
    /* Phase 1): Convert average RGB color to HSV. */
    final float[] avgHsvColor = new float[]{0.0f,0.0f,0.0f};
    ColorHsv.rgbToHsv(colorRgb[0], colorRgb[1], colorRgb[2], avgHsvColor);

    /* Phase 2a): Convert the input BufferedImage to a HSV image and extract hue and saturation bands, which are independent of intensity. */
    final Planar<GrayF32> input = ConvertBufferedImage.convertFromPlanar(image,null, true, GrayF32.class);
    final Planar<GrayF32> hsv = input.createSameShape();
    ColorHsv.rgbToHsv_F32(input,hsv);

    final GrayF32 H = hsv.getBand(0);
    final GrayF32 S = hsv.getBand(1);

    /* Phase 2b): Determine thresholds. */
    float maxDist2 = 0.4f*0.4f;
    float adjustUnits = (float)(Math.PI/2.0);

    /* Phase 3): For each pixel in the image, determine distance to average color. If color is closed, turn pixel white. */
    final BufferedImage output = new BufferedImage(input.width,input.height,BufferedImage.TYPE_INT_RGB);
    for(int y = 0; y < hsv.height; y++) {
        for(int x = 0; x < hsv.width; x++) {
            // Hue is an angle in radians, so simple subtraction doesn't work
            float dh = UtilAngle.dist(H.unsafe_get(x,y),avgHsvColor[0]);
            float ds = (S.unsafe_get(x,y)-avgHsvColor[1])*adjustUnits;

            // this distance measure is a bit naive, but good enough for to demonstrate the concept
            float dist2 = dh*dh + ds*ds;
            if( dist2 <= maxDist2 ) {
                output.setRGB(x,y,Color.WHITE.getRGB());
            }
        }
    }
    return output;
}
 
Example #9
Source File: SURFHelper.java    From cineast with MIT License 5 votes vote down vote up
/**
 * Returns SURF descriptors for an image using the settings above. Uses the BoofCV stable SURF algorithm.
 *
 * @param image Image for which to obtain the SURF descriptors.
 * @return
 */
public static DetectDescribePoint<GrayF32, BrightFeature> getStableSurf(BufferedImage image) {
     /* Obtain raw SURF descriptors using the configuration above (FH-9 according to [1]). */
    GrayF32 gray = ConvertBufferedImage.convertFromSingle(image, null, GrayF32.class);
    ConfigFastHessian config = new ConfigFastHessian(0, 2, FH_MAX_FEATURES_PER_SCALE, FH_INITIAL_SAMPLE_SIZE, FH_INITIAL_SIZE, FH_NUMBER_SCALES_PER_OCTAVE, FH_NUMBER_OF_OCTAVES);
    DetectDescribePoint<GrayF32, BrightFeature> surf = FactoryDetectDescribe.surfStable(config, null, null, GrayF32.class);
    surf.detect(gray);
    return surf;
}
 
Example #10
Source File: SURFHelper.java    From cineast with MIT License 5 votes vote down vote up
/**
 * Returns SURF descriptors for an image using the settings above. Uses the BoofCV fast SURF algorithm,
 * which yields less images but operates a bit faster.
 *
 * @param image Image for which to obtain the SURF descriptors.
 * @return
 */
public static DetectDescribePoint<GrayF32, BrightFeature> getFastSurf(BufferedImage image) {
     /* Obtain raw SURF descriptors using the configuration above (FH-9 according to [1]). */
    GrayF32 gray = ConvertBufferedImage.convertFromSingle(image, null, GrayF32.class);
    ConfigFastHessian config = new ConfigFastHessian(0, 2, FH_MAX_FEATURES_PER_SCALE, FH_INITIAL_SAMPLE_SIZE, FH_INITIAL_SIZE, FH_NUMBER_SCALES_PER_OCTAVE, FH_NUMBER_OF_OCTAVES);
    DetectDescribePoint<GrayF32, BrightFeature> surf = FactoryDetectDescribe.surfFast(config, null, null, GrayF32.class);
    surf.detect(gray);
    return surf;
}
 
Example #11
Source File: ContourBoundingBox.java    From MtgDesktopCompanion with GNU General Public License v3.0 5 votes vote down vote up
public BufferedImage getTransformedImage(BufferedImage in, boolean flip)
{
       try
       {
           Planar<GrayF32> input = ConvertBufferedImage.convertFromPlanar(in, null, true, GrayF32.class);

           RemovePerspectiveDistortion<Planar<GrayF32>> removePerspective =
                   new RemovePerspectiveDistortion<>(300, 418, ImageType.pl(3, GrayF32.class));

           int start = longEdge();

           if(flip)
           {
               start = (start+2)%4;
           }

           if( !removePerspective.apply(input,
                   new Point2D_F64(corners[start].x,corners[start].y),
                   new Point2D_F64(corners[(start+1)%4].x,corners[(start+1)%4].y),
                   new Point2D_F64(corners[(start+2)%4].x,corners[(start+2)%4].y),
                   new Point2D_F64(corners[(start+3)%4].x,corners[(start+3)%4].y)
                                   ) ){
               return null;
           }
           Planar<GrayF32> output = removePerspective.getOutput();
           return ConvertBufferedImage.convertTo_F32(output,null,true);
       }
	catch(Exception e)
	{
		return null;
	}
}
 
Example #12
Source File: Vision.java    From BotLibre with Eclipse Public License 1.0 5 votes vote down vote up
/**
 * HSV stores color information in Hue and Saturation while intensity is in Value.  This computes a 2D histogram
 * from hue and saturation only, which makes it lighting independent.
 */
public double[] coupledHueSat(byte[] image) throws IOException {
	Planar<GrayF32> rgb = new Planar<GrayF32>(GrayF32.class,1,1,3);
	Planar<GrayF32> hsv = new Planar<GrayF32>(GrayF32.class,1,1,3);

	BufferedImage buffered = ImageIO.read(new ByteArrayInputStream(image));
	if (buffered == null) {
		throw new RuntimeException("Can't load image!");
	}

	rgb.reshape(buffered.getWidth(), buffered.getHeight());
	hsv.reshape(buffered.getWidth(), buffered.getHeight());

	ConvertBufferedImage.convertFrom(buffered, rgb, true);
	ColorHsv.rgbToHsv_F32(rgb, hsv);

	Planar<GrayF32> hs = hsv.partialSpectrum(0,1);

	// The number of bins is an important parameter.  Try adjusting it
	Histogram_F64 histogram = new Histogram_F64(12,12);
	histogram.setRange(0, 0, 2.0 * Math.PI); // range of hue is from 0 to 2PI
	histogram.setRange(1, 0, 1.0);		 // range of saturation is from 0 to 1

	// Compute the histogram
	GHistogramFeatureOps.histogram(hs,histogram);

	UtilFeature.normalizeL2(histogram); // normalize so that image size doesn't matter

	return histogram.value;
}
 
Example #13
Source File: ImageDesc.java    From MtgDesktopCompanion with GNU General Public License v3.0 4 votes vote down vote up
private void describeImage(GrayF32 input, FastQueue<BrightFeature> descs) {
	detDesc.detect(input);
	for (int i = 0; i < detDesc.getNumberOfFeatures(); i++) {
		descs.grow().setTo(detDesc.getDescription(i));
	}
}