Java Code Examples for org.opencv.core.Core#multiply()
The following examples show how to use
org.opencv.core.Core#multiply() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MainActivity.java From MOAAP with MIT License | 7 votes |
public void DifferenceOfGaussian() { Mat grayMat = new Mat(); Mat blur1 = new Mat(); Mat blur2 = new Mat(); //Converting the image to grayscale Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY); Imgproc.GaussianBlur(grayMat, blur1, new Size(15, 15), 5); Imgproc.GaussianBlur(grayMat, blur2, new Size(21, 21), 5); //Subtracting the two blurred images Mat DoG = new Mat(); Core.absdiff(blur1, blur2, DoG); //Inverse Binary Thresholding Core.multiply(DoG, new Scalar(100), DoG); Imgproc.threshold(DoG, DoG, 50, 255, Imgproc.THRESH_BINARY_INV); //Converting Mat back to Bitmap Utils.matToBitmap(DoG, currentBitmap); imageView.setImageBitmap(currentBitmap); }
Example 2
Source File: TransmissionEstimate.java From OptimizedImageEnhance with MIT License | 6 votes |
public static Mat transEstimate(Mat img, int patchSz, double[] airlight, double lambda, double fTrans, int r, double eps, double gamma) { int rows = img.rows(); int cols = img.cols(); List<Mat> bgr = new ArrayList<>(); Core.split(img, bgr); int type = bgr.get(0).type(); // calculate the transmission map Mat T = computeTrans(img, patchSz, rows, cols, type, airlight, lambda, fTrans); // refine the transmission map img.convertTo(img, CvType.CV_8UC1); Mat gray = new Mat(); Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY); gray.convertTo(gray, CvType.CV_32F); Core.divide(gray, new Scalar(255.0), gray); T = Filters.GuidedImageFilter(gray, T, r, eps); Mat Tsmooth = new Mat(); Imgproc.GaussianBlur(T, Tsmooth, new Size(81, 81), 40); Mat Tdetails = new Mat(); Core.subtract(T, Tsmooth, Tdetails); Core.multiply(Tdetails, new Scalar(gamma), Tdetails); Core.add(Tsmooth, Tdetails, T); return T; }
Example 3
Source File: ALTMRetinex.java From OptimizedImageEnhance with MIT License | 6 votes |
private static List<Mat> globalAdaptation(Mat b, Mat g, Mat r, int rows, int cols) { // Calculate Lw & maximum of Lw Mat Lw = new Mat(rows, cols, r.type()); Core.multiply(r, new Scalar(rParam), r); Core.multiply(g, new Scalar(gParam), g); Core.multiply(b, new Scalar(bParam), b); Core.add(r, g, Lw); Core.add(Lw, b, Lw); double LwMax = Core.minMaxLoc(Lw).maxVal; // the maximum luminance value // Calculate log-average luminance and get global adaptation result Mat Lw_ = Lw.clone(); Core.add(Lw_, new Scalar(0.001), Lw_); Core.log(Lw_, Lw_); double LwAver = Math.exp(Core.sumElems(Lw_).val[0] / (rows * cols)); Mat Lg = Lw.clone(); Core.divide(Lg, new Scalar(LwAver), Lg); Core.add(Lg, new Scalar(1.0), Lg); Core.log(Lg, Lg); Core.divide(Lg, new Scalar(Math.log(LwMax / LwAver + 1.0)), Lg); // Lg is the global adaptation List<Mat> list = new ArrayList<>(); list.add(Lw); list.add(Lg); return list; }
Example 4
Source File: DetectionHelper.java From ml-authentication with Apache License 2.0 | 6 votes |
private static double getImageBrightness(Mat img){ Mat temp = new Mat(); List<Mat> color = new ArrayList<Mat>(3); Mat lum = new Mat(); temp = img; Core.split(temp, color); if(color.size() > 0){ Core.multiply(color.get(0), new Scalar(0.299), color.get(0)); Core.multiply(color.get(1), new Scalar(0.587), color.get(1)); Core.multiply(color.get(2), new Scalar(0.114), color.get(2)); Core.add(color.get(0),color.get(1),lum); Core.add(lum, color.get(2), lum); Scalar sum = Core.sumElems(lum); return sum.val[0]/((1<<8 - 1)*img.rows() * img.cols()) * 2; } else { return 1; } }
Example 5
Source File: DetectionHelper.java From ml-authentication with Apache License 2.0 | 6 votes |
private static double getImageBrightness(Mat img){ Mat temp = new Mat(); List<Mat> color = new ArrayList<Mat>(3); Mat lum = new Mat(); temp = img; Core.split(temp, color); if(color.size() > 0){ Core.multiply(color.get(0), new Scalar(0.299), color.get(0)); Core.multiply(color.get(1), new Scalar(0.587), color.get(1)); Core.multiply(color.get(2), new Scalar(0.114), color.get(2)); Core.add(color.get(0),color.get(1),lum); Core.add(lum, color.get(2), lum); Scalar sum = Core.sumElems(lum); return sum.val[0]/((1<<8 - 1)*img.rows() * img.cols()) * 2; } else { return 1; } }
Example 6
Source File: BlkTransEstimate.java From OptimizedImageEnhance with MIT License | 5 votes |
private static Mat preDehaze(Mat img, double a, double nTrans) { // nOut = ( (blkIm - a) * nTrans + 128 * a ) / 128; Core.subtract(img, new Scalar(a), img); Core.multiply(img, new Scalar(nTrans), img); Core.add(img, new Scalar(128.0 * a), img); Core.divide(img, new Scalar(128.0), img); return img; }
Example 7
Source File: OptimizedContrastEnhance.java From OptimizedImageEnhance with MIT License | 5 votes |
@SuppressWarnings("unused") public static Mat enhanceEachChannel(Mat image, int blkSize, int patchSize, double lambda, double eps, int krnlSize) { image.convertTo(image, CvType.CV_32F); // split image to three channels List<Mat> bgr = new ArrayList<>(); Core.split(image, bgr); Mat bChannel = bgr.get(0); Mat gChannel = bgr.get(1); Mat rChannel = bgr.get(2); // obtain air-light double[] airlight = AirlightEstimate.estimate(image, blkSize); // obtain coarse transmission map and refine it for each channel double fTrans = 0.3; Mat T = TransmissionEstimate.transEstimateEachChannel(bChannel, patchSize, airlight[0], lambda, fTrans); Core.subtract(T, new Scalar(1.0), T); Core.multiply(T, new Scalar(-1.0), T); Mat Tb = Filters.GuidedImageFilter(bChannel, T, krnlSize, eps); T = TransmissionEstimate.transEstimateEachChannel(gChannel, patchSize, airlight[1], lambda, fTrans); Core.subtract(T, new Scalar(1.0), T); Core.multiply(T, new Scalar(-1.0), T); Mat Tg = Filters.GuidedImageFilter(gChannel, T, krnlSize, eps); T = TransmissionEstimate.transEstimateEachChannel(rChannel, patchSize, airlight[2], lambda, fTrans); Core.subtract(T, new Scalar(1.0), T); Core.multiply(T, new Scalar(-1.0), T); Mat Tr = Filters.GuidedImageFilter(rChannel, T, krnlSize, eps); // dehaze bChannel = dehaze(bChannel, Tb, airlight[0]); gChannel = dehaze(gChannel, Tg, airlight[1]); rChannel = dehaze(rChannel, Tr, airlight[2]); Mat outval = new Mat(); Core.merge(new ArrayList<>(Arrays.asList(bChannel, gChannel, rChannel)), outval); return outval; }
Example 8
Source File: DarkChannelPriorDehaze.java From OptimizedImageEnhance with MIT License | 5 votes |
private static Mat dehaze(Mat channel, Mat t, double minAtmosLight) { Mat t_ = new Mat(); Core.subtract(t, new Scalar(1.0), t_); Core.multiply(t_, new Scalar(-1.0 * minAtmosLight), t_); Core.subtract(channel, t_, channel); Core.divide(channel, t, channel); return channel; }
Example 9
Source File: RemoveBackScatter.java From OptimizedImageEnhance with MIT License | 5 votes |
private static Mat pyramidFuse(Mat w1, Mat w2, Mat img1, Mat img2, int level) { // Normalized weight Mat sumW = new Mat(); Core.add(w1, w2, sumW); Core.divide(w1, sumW, w1); Core.multiply(w1, new Scalar(2.0), w1); Core.divide(w2, sumW, w2); Core.multiply(w2, new Scalar(2.0), w2); // Pyramid decomposition and reconstruct return ImgDecompose.fuseTwoImage(w1, img1, w2, img2, level); }
Example 10
Source File: RemoveBackScatter.java From OptimizedImageEnhance with MIT License | 5 votes |
private static Mat dehazeProcess(Mat img, Mat trans, double[] airlight) { Mat balancedImg = Filters.SimplestColorBalance(img, 5); Mat bCnl = new Mat(); Core.extractChannel(balancedImg, bCnl, 0); Mat gCnl = new Mat(); Core.extractChannel(balancedImg, gCnl, 1); Mat rCnl = new Mat(); Core.extractChannel(balancedImg, rCnl, 2); // get mean value double bMean = Core.mean(bCnl).val[0]; double gMean = Core.mean(gCnl).val[0]; double rMean = Core.mean(rCnl).val[0]; // get transmission map for each channel Mat Tb = trans.clone(); Core.multiply(Tb, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / bMean * 0.8), Tb); Mat Tg = trans.clone(); Core.multiply(Tg, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / gMean * 0.9), Tg); Mat Tr = trans.clone(); Core.multiply(Tr, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / rMean * 0.8), Tr); // dehaze by formula // blue channel Mat bChannel = new Mat(); Core.subtract(bCnl, new Scalar(airlight[0]), bChannel); Core.divide(bChannel, Tb, bChannel); Core.add(bChannel, new Scalar(airlight[0]), bChannel); // green channel Mat gChannel = new Mat(); Core.subtract(gCnl, new Scalar(airlight[1]), gChannel); Core.divide(gChannel, Tg, gChannel); Core.add(gChannel, new Scalar(airlight[1]), gChannel); // red channel Mat rChannel = new Mat(); Core.subtract(rCnl, new Scalar(airlight[2]), rChannel); Core.divide(rChannel, Tr, rChannel); Core.add(rChannel, new Scalar(airlight[2]), rChannel); Mat dehazed = new Mat(); Core.merge(new ArrayList<>(Arrays.asList(bChannel, gChannel, rChannel)), dehazed); return dehazed; }
Example 11
Source File: ColorBlobDetector.java From FaceT with Mozilla Public License 2.0 | 5 votes |
public void process(Mat rgbaImage) { Imgproc.pyrDown(rgbaImage, mPyrDownMat); Imgproc.pyrDown(mPyrDownMat, mPyrDownMat); Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL); Core.inRange(mHsvMat, mLowerBound, mUpperBound, mMask); Imgproc.dilate(mMask, mDilatedMask, new Mat()); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Imgproc.findContours(mDilatedMask, contours, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); // Find max contour area double maxArea = 0; Iterator<MatOfPoint> each = contours.iterator(); while (each.hasNext()) { MatOfPoint wrapper = each.next(); double area = Imgproc.contourArea(wrapper); if (area > maxArea) maxArea = area; } // Filter contours by area and resize to fit the original image size mContours.clear(); each = contours.iterator(); while (each.hasNext()) { MatOfPoint contour = each.next(); if (Imgproc.contourArea(contour) > mMinContourArea*maxArea) { Core.multiply(contour, new Scalar(4,4), contour); mContours.add(contour); } } }
Example 12
Source File: GammaCorrection.java From Android-Face-Recognition-with-Deep-Learning-Library with Apache License 2.0 | 5 votes |
public PreProcessor preprocessImage(PreProcessor preProcessor) { List<Mat> images = preProcessor.getImages(); List<Mat> processed = new ArrayList<Mat>(); for (Mat img : images){ img.convertTo(img, CvType.CV_32F); Core.divide(img, INT_MAX, img); Core.pow(img, gamma, img); Core.multiply(img, INT_MAX, img); img.convertTo(img, CvType.CV_8U); processed.add(img); } preProcessor.setImages(processed); return preProcessor; }
Example 13
Source File: ColorBlobDetector.java From OpenCV-AndroidSamples with MIT License | 5 votes |
public void process(Mat rgbaImage) { Imgproc.pyrDown(rgbaImage, mPyrDownMat); Imgproc.pyrDown(mPyrDownMat, mPyrDownMat); Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL); Core.inRange(mHsvMat, mLowerBound, mUpperBound, mMask); Imgproc.dilate(mMask, mDilatedMask, new Mat()); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Imgproc.findContours(mDilatedMask, contours, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); // Find max contour area double maxArea = 0; Iterator<MatOfPoint> each = contours.iterator(); while (each.hasNext()) { MatOfPoint wrapper = each.next(); double area = Imgproc.contourArea(wrapper); if (area > maxArea) maxArea = area; } // Filter contours by area and resize to fit the original image size mContours.clear(); each = contours.iterator(); while (each.hasNext()) { MatOfPoint contour = each.next(); if (Imgproc.contourArea(contour) > mMinContourArea*maxArea) { Core.multiply(contour, new Scalar(4,4), contour); mContours.add(contour); } } }
Example 14
Source File: ColorBlobDetector.java From hand_finger_recognition_android with MIT License | 5 votes |
public void process(Mat rgbaImage) { Imgproc.pyrDown(rgbaImage, mPyrDownMat); Imgproc.pyrDown(mPyrDownMat, mPyrDownMat); Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL); Core.inRange(mHsvMat, mLowerBound, mUpperBound, mMask); Imgproc.dilate(mMask, mDilatedMask, new Mat()); List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Imgproc.findContours(mDilatedMask, contours, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); // Find max contour area double maxArea = 0; Iterator<MatOfPoint> each = contours.iterator(); while (each.hasNext()) { MatOfPoint wrapper = each.next(); double area = Imgproc.contourArea(wrapper); if (area > maxArea) maxArea = area; } // Filter contours by area and resize to fit the original image size mContours.clear(); each = contours.iterator(); while (each.hasNext()) { MatOfPoint contour = each.next(); if (Imgproc.contourArea(contour) > mMinContourArea*maxArea) { Core.multiply(contour, new Scalar(4,4), contour); mContours.add(contour); } } }
Example 15
Source File: DarkChannelPriorDehaze.java From OptimizedImageEnhance with MIT License | 4 votes |
public static Mat enhance(Mat image, double krnlRatio, double minAtmosLight, double eps) { image.convertTo(image, CvType.CV_32F); // extract each color channel List<Mat> rgb = new ArrayList<>(); Core.split(image, rgb); Mat rChannel = rgb.get(0); Mat gChannel = rgb.get(1); Mat bChannel = rgb.get(2); int rows = rChannel.rows(); int cols = rChannel.cols(); // derive the dark channel from original image Mat dc = rChannel.clone(); for (int i = 0; i < image.rows(); i++) { for (int j = 0; j < image.cols(); j++) { double min = Math.min(rChannel.get(i, j)[0], Math.min(gChannel.get(i, j)[0], bChannel.get(i, j)[0])); dc.put(i, j, min); } } // minimum filter int krnlSz = Double.valueOf(Math.max(Math.max(rows * krnlRatio, cols * krnlRatio), 3.0)).intValue(); Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(krnlSz, krnlSz), new Point(-1, -1)); Imgproc.erode(dc, dc, kernel); // get coarse transmission map Mat t = dc.clone(); Core.subtract(t, new Scalar(255.0), t); Core.multiply(t, new Scalar(-1.0), t); Core.divide(t, new Scalar(255.0), t); // obtain gray scale image Mat gray = new Mat(); Imgproc.cvtColor(image, gray, Imgproc.COLOR_RGB2GRAY); Core.divide(gray, new Scalar(255.0), gray); // refine transmission map int r = krnlSz * 4; t = Filters.GuidedImageFilter(gray, t, r, eps); // get minimum atmospheric light minAtmosLight = Math.min(minAtmosLight, Core.minMaxLoc(dc).maxVal); // dehaze each color channel rChannel = dehaze(rChannel, t, minAtmosLight); gChannel = dehaze(gChannel, t, minAtmosLight); bChannel = dehaze(bChannel, t, minAtmosLight); // merge three color channels to a image Mat outval = new Mat(); Core.merge(new ArrayList<>(Arrays.asList(rChannel, gChannel, bChannel)), outval); outval.convertTo(outval, CvType.CV_8UC1); return outval; }
Example 16
Source File: ColorBlobDetector.java From FTCVision with MIT License | 4 votes |
/** * Process an rgba image. The results can be drawn on retrieved later. * This method does not modify the image. * * @param rgbaImage An RGBA image matrix */ public void process(Mat rgbaImage) { Imgproc.pyrDown(rgbaImage, mPyrDownMat); Imgproc.pyrDown(mPyrDownMat, mPyrDownMat); Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL); //Test whether we need two inRange operations (only if the hue crosses over 255) if (upperBound.getScalar().val[0] <= 255) { Core.inRange(mHsvMat, lowerBound.getScalar(), upperBound.getScalar(), mMask); } else { //We need two operations - we're going to OR the masks together Scalar lower = lowerBound.getScalar().clone(); Scalar upper = upperBound.getScalar().clone(); while (upper.val[0] > 255) upper.val[0] -= 255; double tmp = lower.val[0]; lower.val[0] = 0; //Mask 1 - from 0 to n Core.inRange(mHsvMat, lower, upper, mMaskOne); //Mask 2 - from 255-n to 255 lower.val[0] = tmp; upper.val[0] = 255; Core.inRange(mHsvMat, lower, upper, mMask); //OR the two masks Core.bitwise_or(mMaskOne, mMask, mMask); } //Dilate (blur) the mask to decrease processing power Imgproc.dilate(mMask, mDilatedMask, new Mat()); List<MatOfPoint> contourListTemp = new ArrayList<>(); Imgproc.findContours(mDilatedMask, contourListTemp, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); // Filter contours by area and resize to fit the original image size contours.clear(); for (MatOfPoint c : contourListTemp) { Core.multiply(c, new Scalar(4, 4), c); contours.add(new Contour(c)); } }