Java Code Examples for org.opencv.imgproc.Imgproc#dilate()
The following examples show how to use
org.opencv.imgproc.Imgproc#dilate() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MainActivity.java From Android_OCV_Movement_Detection with MIT License | 6 votes |
public Mat onCameraFrame(CvCameraViewFrame inputFrame) { contours.clear(); //gray frame because it requires less resource to process mGray = inputFrame.gray(); //this function converts the gray frame into the correct RGB format for the BackgroundSubtractorMOG apply function Imgproc.cvtColor(mGray, mRgb, Imgproc.COLOR_GRAY2RGB); //apply detects objects moving and produces a foreground mask //the lRate updates dynamically dependent upon seekbar changes sub.apply(mRgb, mFGMask, lRate); //erode and dilate are used to remove noise from the foreground mask Imgproc.erode(mFGMask, mFGMask, new Mat()); Imgproc.dilate(mFGMask, mFGMask, new Mat()); //drawing contours around the objects by first called findContours and then calling drawContours //RETR_EXTERNAL retrieves only external contours //CHAIN_APPROX_NONE detects all pixels for each contour Imgproc.findContours(mFGMask, contours, new Mat(), Imgproc.RETR_EXTERNAL , Imgproc.CHAIN_APPROX_NONE); //draws all the contours in red with thickness of 2 Imgproc.drawContours(mRgb, contours, -1, new Scalar(255, 0, 0), 2); return mRgb; }
Example 2
Source File: MainActivity.java From MOAAP with MIT License | 6 votes |
@Override protected void onActivityResult(int requestCode, int resultCode, Intent imageReturnedIntent) { //Put it there, just in case:) super.onActivityResult(requestCode, resultCode, imageReturnedIntent); switch(requestCode) { case SELECT_PHOTO: if(resultCode == RESULT_OK && read_external_storage_granted){ try { final Uri imageUri = imageReturnedIntent.getData(); final InputStream imageStream = getContentResolver().openInputStream(imageUri); final Bitmap selectedImage = BitmapFactory.decodeStream(imageStream); src = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC4); Utils.bitmapToMat(selectedImage, src); src_gray = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC1); switch (ACTION_MODE) { case HomeActivity.GAUSSIAN_BLUR: Imgproc.GaussianBlur(src, src, new Size(9, 9), 0); break; case HomeActivity.MEAN_BLUR: Imgproc.blur(src, src, new Size(9, 9)); break; case HomeActivity.MEDIAN_BLUR: Imgproc.medianBlur(src, src, 9); break; case HomeActivity.SHARPEN: Mat kernel = new Mat(3, 3, CvType.CV_16SC1); //int[] values = {0, -1, 0, -1, 5, -1, 0, -1, 0}; Log.d("imageType", CvType.typeToString(src.type()) + ""); kernel.put(0, 0, 0, -1, 0, -1, 5, -1, 0, -1, 0); Imgproc.filter2D(src, src, src_gray.depth(), kernel); break; case HomeActivity.DILATE: Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY); Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY); Mat kernelDilate = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3)); Imgproc.dilate(src_gray, src_gray, kernelDilate); Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4); break; case HomeActivity.ERODE: Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY); Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY); Mat kernelErode = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5)); Imgproc.erode(src_gray, src_gray, kernelErode); Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4); break; case HomeActivity.THRESHOLD: Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY); Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY); Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4); break; case HomeActivity.ADAPTIVE_THRESHOLD: Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY); Imgproc.adaptiveThreshold(src_gray, src_gray, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 3, 0); Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4); break; } Bitmap processedImage = Bitmap.createBitmap(src.cols(), src.rows(), Bitmap.Config.ARGB_8888); Log.i("imageType", CvType.typeToString(src.type()) + ""); Utils.matToBitmap(src, processedImage); ivImage.setImageBitmap(selectedImage); ivImageProcessed.setImageBitmap(processedImage); Log.i("process", "process done"); } catch (FileNotFoundException e) { e.printStackTrace(); } } break; } }
Example 3
Source File: OpenCVoperation.java From Human-hair-detection with Apache License 2.0 | 6 votes |
public void skinSegmentation() { matrix3_skindetection = new Mat(matrix2_grabcut.size(),matrix2_grabcut.type()); matrix3_skindetection.setTo(new Scalar(0,0,255)); Mat skinMask = new Mat(); Mat hsvMatrix = new Mat(); Scalar lower = new Scalar(0,48,80); Scalar upper = new Scalar(20,255,255); Imgproc.cvtColor(matrix2_grabcut, hsvMatrix, Imgproc.COLOR_BGR2HSV); Core.inRange(hsvMatrix, lower, upper, skinMask); Mat kernel =Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE,new Size(11,11)); Imgproc.erode(skinMask, skinMask, kernel); Imgproc.dilate(skinMask, skinMask, kernel); Imgproc.GaussianBlur(skinMask,skinMask, new Size(3,3), 0); Core.bitwise_and(matrix2_grabcut, matrix2_grabcut, matrix3_skindetection,skinMask); Imgcodecs.imwrite(resultDirectory+skinDetectionOutput , matrix3_skindetection); }
Example 4
Source File: Morphology.java From go-bees with GNU General Public License v3.0 | 6 votes |
@Override public Mat process(@NonNull Mat frame) { if (frame.empty()) { Log.e("Invalid input frame."); return null; } Mat tmp = frame.clone(); // Step 1: erode to remove legs Imgproc.erode(tmp, tmp, KERNEL3); // Step 2: dilate to join bodies and heads Imgproc.dilate(tmp, tmp, KERNEL2); for (int i = 0; i < REPETITIONS_DILATE; i++) { Imgproc.dilate(tmp, tmp, kernelDilate); } // Step 3: erode to recover original size Imgproc.erode(tmp, tmp, KERNEL1); for (int i = 0; i < REPETITIONS_ERODE; i++) { Imgproc.erode(tmp, tmp, kernelErode); } return tmp; }
Example 5
Source File: BackgroundSubtractor.java From opencv-fun with GNU Affero General Public License v3.0 | 5 votes |
public Mat createMask(Mat camera) { // copy as we are going to destruct those images maybe Mat camBlur= camera.clone(); Mat backgroundBlur = calib.getBackgroundImage().clone(); // remove noise Imgproc.blur(backgroundBlur, backgroundBlur, new Size(calib.getBlurSize(), calib.getBlurSize())); Imgproc.blur(camBlur, camBlur, new Size(calib.getBlurSize(), calib.getBlurSize())); // take abs diff and create binary image in all 3 channels Mat diff = new Mat(); Core.absdiff(backgroundBlur, camBlur, diff); Imgproc.threshold(diff, diff, calib.getSubtractionThreshold(), 255, Imgproc.THRESH_BINARY); // extract color channels and merge them to single bitmask Mat r = ColorSpace.getChannel(diff, 2); Mat g = ColorSpace.getChannel(diff, 1); Mat b = ColorSpace.getChannel(diff, 0); Mat mask = r.clone(); Core.add(mask, g, mask); Core.add(mask, b, mask); // dilate to remove some black gaps within balls Imgproc.dilate(mask, mask, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(calib.getMorphSize(), calib.getMorphSize()))); return mask; }
Example 6
Source File: Finder.java From SikuliNG with MIT License | 5 votes |
public static List<Element> getElements(Picture picture, boolean external) { Mat mEdges = detectEdges(picture); List<MatOfPoint> contours = getContours(mEdges, external); Mat mResult = drawContours(contours, mEdges); Imgproc.dilate(mResult, mResult, Element.getNewMat()); Imgproc.dilate(mResult, mResult, Element.getNewMat()); return contoursToRectangle(getContours(mResult, external)); }
Example 7
Source File: Finder.java From SikuliNG with MIT License | 5 votes |
public static List<Element> detectChanges(Mat base, Mat mChanged) { int PIXEL_DIFF_THRESHOLD = 3; int IMAGE_DIFF_THRESHOLD = 5; Mat mBaseGray = Element.getNewMat(); Mat mChangedGray = Element.getNewMat(); Mat mDiffAbs = Element.getNewMat(); Mat mDiffTresh = Element.getNewMat(); Mat mChanges = Element.getNewMat(); List<Element> rectangles = new ArrayList<>(); Imgproc.cvtColor(base, mBaseGray, toGray); Imgproc.cvtColor(mChanged, mChangedGray, toGray); Core.absdiff(mBaseGray, mChangedGray, mDiffAbs); Imgproc.threshold(mDiffAbs, mDiffTresh, PIXEL_DIFF_THRESHOLD, 0.0, Imgproc.THRESH_TOZERO); if (Core.countNonZero(mDiffTresh) > IMAGE_DIFF_THRESHOLD) { Imgproc.threshold(mDiffAbs, mDiffAbs, PIXEL_DIFF_THRESHOLD, 255, Imgproc.THRESH_BINARY); Imgproc.dilate(mDiffAbs, mDiffAbs, Element.getNewMat()); Mat se = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5)); Imgproc.morphologyEx(mDiffAbs, mDiffAbs, Imgproc.MORPH_CLOSE, se); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Mat mHierarchy = Element.getNewMat(); Imgproc.findContours(mDiffAbs, contours, mHierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE); rectangles = contoursToRectangle(contours); Core.subtract(mDiffAbs, mDiffAbs, mChanges); Imgproc.drawContours(mChanges, contours, -1, new Scalar(255)); //logShow(mDiffAbs); } return rectangles; }
Example 8
Source File: MovementDetectionProcessor.java From Camdroid with Apache License 2.0 | 5 votes |
protected void execute() { out = gray(); Imgproc.equalizeHist(out, out); synchronized (mog) { mog.apply(out, this.mask, (double) (-10 + learning_rate) / 10); } Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_DILATE, new Size(3, 3)); Imgproc.dilate(mask, mask, kernel); ArrayList<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Imgproc.findContours(this.mask, contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); double maxheight = object_max_size * this.in.height() / 100; double minheight = object_min_size * this.in.height() / 100; Iterator<MatOfPoint> each = contours.iterator(); each = contours.iterator(); while (each.hasNext()) { MatOfPoint contour = each.next(); Rect rect = Imgproc.boundingRect(contour); if (rect.height > minheight && rect.height < maxheight) { Imgproc.rectangle(out, rect.tl(), rect.br(), new Scalar(255, 0, 0), 1); } } }
Example 9
Source File: ColorBlobDetector.java From OpenCV-AndroidSamples with MIT License | 5 votes |
public void process(Mat rgbaImage) { Imgproc.pyrDown(rgbaImage, mPyrDownMat); Imgproc.pyrDown(mPyrDownMat, mPyrDownMat); Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL); Core.inRange(mHsvMat, mLowerBound, mUpperBound, mMask); Imgproc.dilate(mMask, mDilatedMask, new Mat()); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Imgproc.findContours(mDilatedMask, contours, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); // Find max contour area double maxArea = 0; Iterator<MatOfPoint> each = contours.iterator(); while (each.hasNext()) { MatOfPoint wrapper = each.next(); double area = Imgproc.contourArea(wrapper); if (area > maxArea) maxArea = area; } // Filter contours by area and resize to fit the original image size mContours.clear(); each = contours.iterator(); while (each.hasNext()) { MatOfPoint contour = each.next(); if (Imgproc.contourArea(contour) > mMinContourArea*maxArea) { Core.multiply(contour, new Scalar(4,4), contour); mContours.add(contour); } } }
Example 10
Source File: Filter.java From FTCVision with MIT License | 5 votes |
/** * Dilate the image using morphological transformations * * @param img Image matrix * @param amount Amount to dilate = 0 */ public static void dilate(Mat img, int amount) { Mat kernel = Imgproc.getStructuringElement(Imgproc.CV_SHAPE_RECT, new Size(2 * amount + 1, 2 * amount + 1), new Point(amount, amount)); Imgproc.dilate(img, img, kernel); }
Example 11
Source File: ColorBlobDetector.java From hand_finger_recognition_android with MIT License | 5 votes |
public void process(Mat rgbaImage) { Imgproc.pyrDown(rgbaImage, mPyrDownMat); Imgproc.pyrDown(mPyrDownMat, mPyrDownMat); Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL); Core.inRange(mHsvMat, mLowerBound, mUpperBound, mMask); Imgproc.dilate(mMask, mDilatedMask, new Mat()); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Imgproc.findContours(mDilatedMask, contours, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); // Find max contour area double maxArea = 0; Iterator<MatOfPoint> each = contours.iterator(); while (each.hasNext()) { MatOfPoint wrapper = each.next(); double area = Imgproc.contourArea(wrapper); if (area > maxArea) maxArea = area; } // Filter contours by area and resize to fit the original image size mContours.clear(); each = contours.iterator(); while (each.hasNext()) { MatOfPoint contour = each.next(); if (Imgproc.contourArea(contour) > mMinContourArea*maxArea) { Core.multiply(contour, new Scalar(4,4), contour); mContours.add(contour); } } }
Example 12
Source File: DigitRecognizer.java From MOAAP with MIT License | 5 votes |
int FindMatch(Mat test_image) { //Dilate the image Imgproc.dilate(test_image, test_image, Imgproc.getStructuringElement(Imgproc.CV_SHAPE_CROSS, new Size(3,3))); //Resize the image to match it with the sample image size Imgproc.resize(test_image, test_image, new Size(width, height)); //Convert the image to grayscale Imgproc.cvtColor(test_image, test_image, Imgproc.COLOR_RGB2GRAY); //Adaptive Threshold Imgproc.adaptiveThreshold(test_image,test_image,255,Imgproc.ADAPTIVE_THRESH_MEAN_C, Imgproc.THRESH_BINARY_INV,15, 2); Mat test = new Mat(1, test_image.rows() * test_image.cols(), CvType.CV_32FC1); int count = 0; for(int i = 0 ; i < test_image.rows(); i++) { for(int j = 0 ; j < test_image.cols(); j++) { test.put(0, count, test_image.get(i, j)[0]); count++; } } Mat results = new Mat(1, 1, CvType.CV_8U); //K-NN Prediction //return (int)knn.findNearest(test, 10, results); //SVM Prediction return (int)svm.predict(test); }
Example 13
Source File: ALTMRetinex.java From OptimizedImageEnhance with MIT License | 5 votes |
private static Mat localAdaptation(Mat Lg, int rows, int cols, int r, double eps, double krnlRatio) { int krnlSz = Stream.of(3.0, rows * krnlRatio, cols * krnlRatio).max(Double::compare).orElse(3.0).intValue(); // maximum filter: using dilate to extract the local maximum of a image block, which acts as the maximum filter // Meanwhile, minimum filter can be achieved by using erode function Mat Lg_ = new Mat(); Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(krnlSz, krnlSz), new Point(-1, -1)); Imgproc.dilate(Lg, Lg_, kernel); // guided image filter return Filters.GuidedImageFilter(Lg, Lg_, r, eps); }
Example 14
Source File: GeneralUtils.java From super-cloudops with Apache License 2.0 | 5 votes |
/** * 膨胀 * * @param src * @return */ public static Mat dialate(Mat src) { Mat outImage = new Mat(); // size 越小,腐蚀的单位越小,图片越接近原图 Mat structImage = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(2, 2)); Imgproc.dilate(src, outImage, structImage, new Point(-1, -1), 2); src = outImage; return src; }
Example 15
Source File: SXOpenCV.java From SikuliX1 with MIT License | 5 votes |
public static List<Match> doFindChanges(Image original, Image changed) { List<Match> changes = new ArrayList<>(); if (changed.isValid()) { int PIXEL_DIFF_THRESHOLD = 3; int IMAGE_DIFF_THRESHOLD = 5; Mat previousGray = SXOpenCV.newMat(); Mat nextGray = SXOpenCV.newMat(); Mat mDiffAbs = SXOpenCV.newMat(); Mat mDiffTresh = SXOpenCV.newMat(); Imgproc.cvtColor(original.getContent(), previousGray, toGray); Imgproc.cvtColor(changed.getContent(), nextGray, toGray); Core.absdiff(previousGray, nextGray, mDiffAbs); Imgproc.threshold(mDiffAbs, mDiffTresh, PIXEL_DIFF_THRESHOLD, 0.0, Imgproc.THRESH_TOZERO); if (Core.countNonZero(mDiffTresh) > IMAGE_DIFF_THRESHOLD) { Imgproc.threshold(mDiffAbs, mDiffAbs, PIXEL_DIFF_THRESHOLD, 255, Imgproc.THRESH_BINARY); Imgproc.dilate(mDiffAbs, mDiffAbs, SXOpenCV.newMat()); Mat se = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5)); Imgproc.morphologyEx(mDiffAbs, mDiffAbs, Imgproc.MORPH_CLOSE, se); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Mat mHierarchy = SXOpenCV.newMat(); Imgproc.findContours(mDiffAbs, contours, mHierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE); changes = contoursToRectangle(contours); } } return changes; }
Example 16
Source File: Finder.java From SikuliX1 with MIT License | 5 votes |
public static List<Region> findChanges(FindInput2 findInput) { findInput.setAttributes(); Mat previousGray = SXOpenCV.newMat(); Mat nextGray = SXOpenCV.newMat(); Mat mDiffAbs = SXOpenCV.newMat(); Mat mDiffThresh = SXOpenCV.newMat(); Imgproc.cvtColor(findInput.getBase(), previousGray, toGray); Imgproc.cvtColor(findInput.getTarget(), nextGray, toGray); Core.absdiff(previousGray, nextGray, mDiffAbs); Imgproc.threshold(mDiffAbs, mDiffThresh, PIXEL_DIFF_THRESHOLD, 0.0, Imgproc.THRESH_TOZERO); List<Region> rectangles = new ArrayList<>(); if (Core.countNonZero(mDiffThresh) > IMAGE_DIFF_THRESHOLD) { Imgproc.threshold(mDiffAbs, mDiffAbs, PIXEL_DIFF_THRESHOLD, 255, Imgproc.THRESH_BINARY); Imgproc.dilate(mDiffAbs, mDiffAbs, SXOpenCV.newMat()); Mat se = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5)); Imgproc.morphologyEx(mDiffAbs, mDiffAbs, Imgproc.MORPH_CLOSE, se); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Mat mHierarchy = SXOpenCV.newMat(); Imgproc.findContours(mDiffAbs, contours, mHierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE); for (MatOfPoint contour : contours) { int x1 = 99999; int y1 = 99999; int x2 = 0; int y2 = 0; List<org.opencv.core.Point> points = contour.toList(); for (Point point : points) { int x = (int) point.x; int y = (int) point.y; if (x < x1) x1 = x; if (x > x2) x2 = x; if (y < y1) y1 = y; if (y > y2) y2 = y; } Region rect = new Region(x1, y1, x2 - x1, y2 - y1); rectangles.add(rect); } } return rectangles; }
Example 17
Source File: CVProcessor.java From CVScanner with GNU General Public License v3.0 | 4 votes |
public static List<MatOfPoint> findContours(Mat src){ Mat img = src.clone(); //find contours double ratio = getScaleRatio(img.size()); int width = (int) (img.size().width / ratio); int height = (int) (img.size().height / ratio); Size newSize = new Size(width, height); Mat resizedImg = new Mat(newSize, CvType.CV_8UC4); Imgproc.resize(img, resizedImg, newSize); img.release(); Imgproc.medianBlur(resizedImg, resizedImg, 7); Mat cannedImg = new Mat(newSize, CvType.CV_8UC1); Imgproc.Canny(resizedImg, cannedImg, 70, 200, 3, true); resizedImg.release(); Imgproc.threshold(cannedImg, cannedImg, 70, 255, Imgproc.THRESH_OTSU); Mat dilatedImg = new Mat(newSize, CvType.CV_8UC1); Mat morph = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3)); Imgproc.dilate(cannedImg, dilatedImg, morph, new Point(-1, -1), 2, 1, new Scalar(1)); cannedImg.release(); morph.release(); ArrayList<MatOfPoint> contours = new ArrayList<>(); Mat hierarchy = new Mat(); Imgproc.findContours(dilatedImg, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); hierarchy.release(); dilatedImg.release(); Log.d(TAG, "contours found: " + contours.size()); Collections.sort(contours, new Comparator<MatOfPoint>() { @Override public int compare(MatOfPoint o1, MatOfPoint o2) { return Double.valueOf(Imgproc.contourArea(o2)).compareTo(Imgproc.contourArea(o1)); } }); return contours; }
Example 18
Source File: ColorBlobDetector.java From FTCVision with MIT License | 4 votes |
/** * Process an rgba image. The results can be drawn on retrieved later. * This method does not modify the image. * * @param rgbaImage An RGBA image matrix */ public void process(Mat rgbaImage) { Imgproc.pyrDown(rgbaImage, mPyrDownMat); Imgproc.pyrDown(mPyrDownMat, mPyrDownMat); Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL); //Test whether we need two inRange operations (only if the hue crosses over 255) if (upperBound.getScalar().val[0] <= 255) { Core.inRange(mHsvMat, lowerBound.getScalar(), upperBound.getScalar(), mMask); } else { //We need two operations - we're going to OR the masks together Scalar lower = lowerBound.getScalar().clone(); Scalar upper = upperBound.getScalar().clone(); while (upper.val[0] > 255) upper.val[0] -= 255; double tmp = lower.val[0]; lower.val[0] = 0; //Mask 1 - from 0 to n Core.inRange(mHsvMat, lower, upper, mMaskOne); //Mask 2 - from 255-n to 255 lower.val[0] = tmp; upper.val[0] = 255; Core.inRange(mHsvMat, lower, upper, mMask); //OR the two masks Core.bitwise_or(mMaskOne, mMask, mMask); } //Dilate (blur) the mask to decrease processing power Imgproc.dilate(mMask, mDilatedMask, new Mat()); List<MatOfPoint> contourListTemp = new ArrayList<>(); Imgproc.findContours(mDilatedMask, contourListTemp, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); // Filter contours by area and resize to fit the original image size contours.clear(); for (MatOfPoint c : contourListTemp) { Core.multiply(c, new Scalar(4, 4), c); contours.add(new Contour(c)); } }
Example 19
Source File: Tool.java From SikuliNG with MIT License | 4 votes |
private void actionSegments() { if (evaluatingSegments) { segmentState++; if (segmentState > segmentStateAll) { segments.clear(); } } else { activeSide = ALL; evaluatingSegments = true; segmentState = segmentStateDetail; contours = new ArrayList<>(); } if (segmentState == segmentStateDetail) { segments = Finder.getElements(shot); } else if (segmentState == segmentStateExternal) { segments = Finder.getElements(shot, true); } else if (segmentState == segmentStateAll) { if (segments.size() > 1) { int x = 99999; int y = 99999; int w = 0; int h = 0; for (Element segment : segments) { if (segment.x < x) { x = segment.x; } if (segment.y < y) { y = segment.y; } if (segment.x + segment.w > x + w) { w = segment.x + segment.w - x; } if (segment.y + segment.h > y + h) { h = segment.y + segment.h - y; } } segments.clear(); segments.add(new Element(x, y, w, h)); } else { contours = Finder.getElement(shot); mShot = shot.getContent().clone(); Imgproc.fillPoly(mShot, contours, oColorBlack); Imgproc.dilate(mShot, mShot, Element.getNewMat()); contours = Finder.getElement(new Picture(mShot)); resizeToFrame(new Picture(mShot)); return; } } if (segments.size() > 0) { Story segmented = new Story(shot, true); for (Element rect : segments) { segmented.add(Symbol.rectangle(rect.w, rect.h).setColor(Color.red), new Element(rect.x, rect.y)); } Picture picture = segmented.get(); resizeToFrame(picture); } else { activeSide = ALL; evaluatingSegments = false; resizeToFrame(); } }
Example 20
Source File: NativeClass.java From AndroidDocumentScanner with MIT License | 4 votes |
public List<MatOfPoint2f> getPoints(Mat src) { // Blur the image to filter out the noise. Mat blurred = new Mat(); Imgproc.medianBlur(src, blurred, 9); // Set up images to use. Mat gray0 = new Mat(blurred.size(), CvType.CV_8U); Mat gray = new Mat(); // For Core.mixChannels. List<MatOfPoint> contours = new ArrayList<>(); List<MatOfPoint2f> rectangles = new ArrayList<>(); List<Mat> sources = new ArrayList<>(); sources.add(blurred); List<Mat> destinations = new ArrayList<>(); destinations.add(gray0); // To filter rectangles by their areas. int srcArea = src.rows() * src.cols(); // Find squares in every color plane of the image. for (int c = 0; c < 3; c++) { int[] ch = {c, 0}; MatOfInt fromTo = new MatOfInt(ch); Core.mixChannels(sources, destinations, fromTo); // Try several threshold levels. for (int l = 0; l < THRESHOLD_LEVEL; l++) { if (l == 0) { // HACK: Use Canny instead of zero threshold level. // Canny helps to catch squares with gradient shading. // NOTE: No kernel size parameters on Java API. Imgproc.Canny(gray0, gray, 10, 20); // Dilate Canny output to remove potential holes between edge segments. Imgproc.dilate(gray, gray, Mat.ones(new Size(3, 3), 0)); } else { int threshold = (l + 1) * 255 / THRESHOLD_LEVEL; Imgproc.threshold(gray0, gray, threshold, 255, Imgproc.THRESH_BINARY); } // Find contours and store them all as a list. Imgproc.findContours(gray, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE); for (MatOfPoint contour : contours) { MatOfPoint2f contourFloat = MathUtils.toMatOfPointFloat(contour); double arcLen = Imgproc.arcLength(contourFloat, true) * 0.02; // Approximate polygonal curves. MatOfPoint2f approx = new MatOfPoint2f(); Imgproc.approxPolyDP(contourFloat, approx, arcLen, true); if (isRectangle(approx, srcArea)) { rectangles.add(approx); } } } } return rectangles; }