Java Code Examples for org.opencv.core.Core#split()
The following examples show how to use
org.opencv.core.Core#split() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: GuidedFilterFlashExample.java From OptimizedImageEnhance with MIT License | 6 votes |
public static void main(String[] args) { String imgPath = "src/main/resources/dcp_images/flash/cave-flash.bmp"; String guidedImgPath = "src/main/resources/dcp_images/flash/cave-noflash.bmp"; Mat image = Imgcodecs.imread(imgPath, Imgcodecs.CV_LOAD_IMAGE_COLOR); new ImShow("image").showImage(image); image.convertTo(image, CvType.CV_32F); Mat guide = Imgcodecs.imread(guidedImgPath, Imgcodecs.CV_LOAD_IMAGE_COLOR); guide.convertTo(guide, CvType.CV_32F); List<Mat> img = new ArrayList<>(); List<Mat> gid = new ArrayList<>(); Core.split(image, img); Core.split(guide, gid); int r = 8; double eps = 0.02 * 0.02; Mat q_r = Filters.GuidedImageFilter(img.get(0), gid.get(0), r, eps); Mat q_g = Filters.GuidedImageFilter(img.get(1), gid.get(1), r, eps); Mat q_b = Filters.GuidedImageFilter(img.get(2), gid.get(2), r, eps); Mat q = new Mat(); Core.merge(new ArrayList<>(Arrays.asList(q_r, q_g, q_b)), q); q.convertTo(q, CvType.CV_8UC1); new ImShow("q").showImage(q); }
Example 2
Source File: DetectionHelper.java From ml-authentication with Apache License 2.0 | 6 votes |
private static double getImageBrightness(Mat img){ Mat temp = new Mat(); List<Mat> color = new ArrayList<Mat>(3); Mat lum = new Mat(); temp = img; Core.split(temp, color); if(color.size() > 0){ Core.multiply(color.get(0), new Scalar(0.299), color.get(0)); Core.multiply(color.get(1), new Scalar(0.587), color.get(1)); Core.multiply(color.get(2), new Scalar(0.114), color.get(2)); Core.add(color.get(0),color.get(1),lum); Core.add(lum, color.get(2), lum); Scalar sum = Core.sumElems(lum); return sum.val[0]/((1<<8 - 1)*img.rows() * img.cols()) * 2; } else { return 1; } }
Example 3
Source File: DetectionHelper.java From ml-authentication with Apache License 2.0 | 6 votes |
private static double getImageBrightness(Mat img){ Mat temp = new Mat(); List<Mat> color = new ArrayList<Mat>(3); Mat lum = new Mat(); temp = img; Core.split(temp, color); if(color.size() > 0){ Core.multiply(color.get(0), new Scalar(0.299), color.get(0)); Core.multiply(color.get(1), new Scalar(0.587), color.get(1)); Core.multiply(color.get(2), new Scalar(0.114), color.get(2)); Core.add(color.get(0),color.get(1),lum); Core.add(lum, color.get(2), lum); Scalar sum = Core.sumElems(lum); return sum.val[0]/((1<<8 - 1)*img.rows() * img.cols()) * 2; } else { return 1; } }
Example 4
Source File: EnhanceFunc.java From ImageEnhanceViaFusion with MIT License | 6 votes |
private static Mat[] applyCLAHE(Mat img, Mat L) { Mat[] result = new Mat[2]; CLAHE clahe = Imgproc.createCLAHE(); clahe.setClipLimit(2.0); Mat L2 = new Mat(); clahe.apply(L, L2); Mat LabIm2 = new Mat(); List<Mat> lab = new ArrayList<Mat>(); Core.split(img, lab); Core.merge(new ArrayList<Mat>(Arrays.asList(L2, lab.get(1), lab.get(2))), LabIm2); Mat img2 = new Mat(); Imgproc.cvtColor(LabIm2, img2, Imgproc.COLOR_Lab2BGR); result[0] = img2; result[1] = L2; return result; }
Example 5
Source File: HistogramEqualization.java From opencv-fun with GNU Affero General Public License v3.0 | 6 votes |
public static void main (String[] args) { CVLoader.load(); // load the image Mat img = Highgui.imread("data/topdown-9.png"); Mat equ = new Mat(); img.copyTo(equ); Imgproc.blur(equ, equ, new Size(3, 3)); Imgproc.cvtColor(equ, equ, Imgproc.COLOR_BGR2YCrCb); List<Mat> channels = new ArrayList<Mat>(); Core.split(equ, channels); Imgproc.equalizeHist(channels.get(0), channels.get(0)); Core.merge(channels, equ); Imgproc.cvtColor(equ, equ, Imgproc.COLOR_YCrCb2BGR); Mat gray = new Mat(); Imgproc.cvtColor(equ, gray, Imgproc.COLOR_BGR2GRAY); Mat grayOrig = new Mat(); Imgproc.cvtColor(img, grayOrig, Imgproc.COLOR_BGR2GRAY); ImgWindow.newWindow(img); ImgWindow.newWindow(equ); ImgWindow.newWindow(gray); ImgWindow.newWindow(grayOrig); }
Example 6
Source File: GuidedFilterEnhanceExample.java From OptimizedImageEnhance with MIT License | 6 votes |
public static void main(String[] args) { String imgPath = "src/main/resources/dcp_images/enhancement/tulips.bmp"; Mat image = Imgcodecs.imread(imgPath, Imgcodecs.CV_LOAD_IMAGE_COLOR); new ImShow("image").showImage(image); image.convertTo(image, CvType.CV_32F); List<Mat> img = new ArrayList<>(); Core.split(image, img); int r = 16; double eps = 0.01; Mat q_r = Filters.GuidedImageFilter(img.get(0), img.get(0), r, eps); Mat q_g = Filters.GuidedImageFilter(img.get(1), img.get(1), r, eps); Mat q_b = Filters.GuidedImageFilter(img.get(2), img.get(2), r, eps); Mat q = new Mat(); Core.merge(new ArrayList<>(Arrays.asList(q_r, q_g, q_b)), q); q.convertTo(q, CvType.CV_8UC1); new ImShow("q").showImage(q); }
Example 7
Source File: FusionEnhance.java From OptimizedImageEnhance with MIT License | 6 votes |
private static Mat[] applyCLAHE(Mat img, Mat L) { Mat[] result = new Mat[2]; CLAHE clahe = Imgproc.createCLAHE(); clahe.setClipLimit(2.0); Mat L2 = new Mat(); clahe.apply(L, L2); Mat LabIm2 = new Mat(); List<Mat> lab = new ArrayList<>(); Core.split(img, lab); Core.merge(new ArrayList<>(Arrays.asList(L2, lab.get(1), lab.get(2))), LabIm2); Mat img2 = new Mat(); Imgproc.cvtColor(LabIm2, img2, Imgproc.COLOR_Lab2BGR); result[0] = img2; result[1] = L2; return result; }
Example 8
Source File: OptimizedContrastEnhance.java From OptimizedImageEnhance with MIT License | 6 votes |
public static Mat enhance(Mat image, int blkSize, int patchSize, double lambda, double eps, int krnlSize) { image.convertTo(image, CvType.CV_32F); // obtain air-light double[] airlight = AirlightEstimate.estimate(image, blkSize); // obtain coarse transmission map double fTrans = 0.5; Mat T = TransmissionEstimate.transEstimate(image, patchSize, airlight, lambda, fTrans); // refine the transmission map Mat gray = new Mat(); Imgproc.cvtColor(image, gray, Imgproc.COLOR_RGB2GRAY); Core.divide(gray, new Scalar(255.0), gray); T = Filters.GuidedImageFilter(gray, T, krnlSize, eps); // dehaze List<Mat> bgr = new ArrayList<>(); Core.split(image, bgr); Mat bChannel = dehaze(bgr.get(0), T, airlight[0]); //Core.normalize(bChannel, bChannel, 0, 255, Core.NORM_MINMAX); Mat gChannel = dehaze(bgr.get(1), T, airlight[1]); //Core.normalize(gChannel, gChannel, 0, 255, Core.NORM_MINMAX); Mat rChannel = dehaze(bgr.get(2), T, airlight[2]); //Core.normalize(rChannel, rChannel, 0, 255, Core.NORM_MINMAX); Mat dehazedImg = new Mat(); Core.merge(new ArrayList<>(Arrays.asList(bChannel, gChannel, rChannel)), dehazedImg); return dehazedImg; }
Example 9
Source File: TransmissionEstimate.java From OptimizedImageEnhance with MIT License | 6 votes |
public static Mat transEstimate(Mat img, int patchSz, double[] airlight, double lambda, double fTrans, int r, double eps, double gamma) { int rows = img.rows(); int cols = img.cols(); List<Mat> bgr = new ArrayList<>(); Core.split(img, bgr); int type = bgr.get(0).type(); // calculate the transmission map Mat T = computeTrans(img, patchSz, rows, cols, type, airlight, lambda, fTrans); // refine the transmission map img.convertTo(img, CvType.CV_8UC1); Mat gray = new Mat(); Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY); gray.convertTo(gray, CvType.CV_32F); Core.divide(gray, new Scalar(255.0), gray); T = Filters.GuidedImageFilter(gray, T, r, eps); Mat Tsmooth = new Mat(); Imgproc.GaussianBlur(T, Tsmooth, new Size(81, 81), 40); Mat Tdetails = new Mat(); Core.subtract(T, Tsmooth, Tdetails); Core.multiply(Tdetails, new Scalar(gamma), Tdetails); Core.add(Tsmooth, Tdetails, T); return T; }
Example 10
Source File: OptimizedContrastEnhance.java From OptimizedImageEnhance with MIT License | 5 votes |
@SuppressWarnings("unused") public static Mat enhanceEachChannel(Mat image, int blkSize, int patchSize, double lambda, double eps, int krnlSize) { image.convertTo(image, CvType.CV_32F); // split image to three channels List<Mat> bgr = new ArrayList<>(); Core.split(image, bgr); Mat bChannel = bgr.get(0); Mat gChannel = bgr.get(1); Mat rChannel = bgr.get(2); // obtain air-light double[] airlight = AirlightEstimate.estimate(image, blkSize); // obtain coarse transmission map and refine it for each channel double fTrans = 0.3; Mat T = TransmissionEstimate.transEstimateEachChannel(bChannel, patchSize, airlight[0], lambda, fTrans); Core.subtract(T, new Scalar(1.0), T); Core.multiply(T, new Scalar(-1.0), T); Mat Tb = Filters.GuidedImageFilter(bChannel, T, krnlSize, eps); T = TransmissionEstimate.transEstimateEachChannel(gChannel, patchSize, airlight[1], lambda, fTrans); Core.subtract(T, new Scalar(1.0), T); Core.multiply(T, new Scalar(-1.0), T); Mat Tg = Filters.GuidedImageFilter(gChannel, T, krnlSize, eps); T = TransmissionEstimate.transEstimateEachChannel(rChannel, patchSize, airlight[2], lambda, fTrans); Core.subtract(T, new Scalar(1.0), T); Core.multiply(T, new Scalar(-1.0), T); Mat Tr = Filters.GuidedImageFilter(rChannel, T, krnlSize, eps); // dehaze bChannel = dehaze(bChannel, Tb, airlight[0]); gChannel = dehaze(gChannel, Tg, airlight[1]); rChannel = dehaze(rChannel, Tr, airlight[2]); Mat outval = new Mat(); Core.merge(new ArrayList<>(Arrays.asList(bChannel, gChannel, rChannel)), outval); return outval; }
Example 11
Source File: TransmissionEstimate.java From OptimizedImageEnhance with MIT License | 5 votes |
public static Mat transEstimate(Mat img, int patchSz, double[] airlight, double lambda, double fTrans) { int rows = img.rows(); int cols = img.cols(); List<Mat> bgr = new ArrayList<>(); Core.split(img, bgr); int type = bgr.get(0).type(); // calculate the transmission map return computeTrans(img, patchSz, rows, cols, type, airlight, lambda, fTrans); }
Example 12
Source File: Filters.java From OptimizedImageEnhance with MIT License | 5 votes |
/** * Simplest Color Balance. Performs color balancing via histogram * normalization. * * @param img input color or gray scale image * @param percent controls the percentage of pixels to clip to white and black. (normally, choose 1~10) * @return Balanced image in CvType.CV_32F */ public static Mat SimplestColorBalance(Mat img, int percent) { if (percent <= 0) percent = 5; img.convertTo(img, CvType.CV_32F); List<Mat> channels = new ArrayList<>(); int rows = img.rows(); // number of rows of image int cols = img.cols(); // number of columns of image int chnls = img.channels(); // number of channels of image double halfPercent = percent / 200.0; if (chnls == 3) Core.split(img, channels); else channels.add(img); List<Mat> results = new ArrayList<>(); for (int i = 0; i < chnls; i++) { // find the low and high precentile values (based on the input percentile) Mat flat = new Mat(); channels.get(i).reshape(1, 1).copyTo(flat); Core.sort(flat, flat, Core.SORT_ASCENDING); double lowVal = flat.get(0, (int) Math.floor(flat.cols() * halfPercent))[0]; double topVal = flat.get(0, (int) Math.ceil(flat.cols() * (1.0 - halfPercent)))[0]; // saturate below the low percentile and above the high percentile Mat channel = channels.get(i); for (int m = 0; m < rows; m++) { for (int n = 0; n < cols; n++) { if (channel.get(m, n)[0] < lowVal) channel.put(m, n, lowVal); if (channel.get(m, n)[0] > topVal) channel.put(m, n, topVal); } } Core.normalize(channel, channel, 0.0, 255.0 / 2, Core.NORM_MINMAX); channel.convertTo(channel, CvType.CV_32F); results.add(channel); } Mat outval = new Mat(); Core.merge(results, outval); return outval; }
Example 13
Source File: ColorSpace.java From opencv-fun with GNU Affero General Public License v3.0 | 5 votes |
public static Mat getChannel(Mat orig, int colorSpace, int channelIdx) { Mat hsv = new Mat(); Imgproc.cvtColor(orig, hsv, colorSpace); List<Mat> channels = new ArrayList<Mat>(); for(int i = 0; i < hsv.channels(); i++) { Mat channel = new Mat(); channels.add(channel); } Core.split(hsv, channels); return channels.get(channelIdx); }
Example 14
Source File: ColorSpace.java From opencv-fun with GNU Affero General Public License v3.0 | 5 votes |
public static Mat getChannel(Mat img, int channelIdx) { List<Mat> channels = new ArrayList<Mat>(); for(int i = 0; i < img.channels(); i++) { Mat channel = new Mat(); channels.add(channel); } Core.split(img, channels); return channels.get(channelIdx); }
Example 15
Source File: ALTMRetinex.java From OptimizedImageEnhance with MIT License | 4 votes |
public static Mat enhance(Mat image, int r, double eps, double eta, double lambda, double krnlRatio) { image.convertTo(image, CvType.CV_32F); // extract each color channel List<Mat> bgr = new ArrayList<>(); Core.split(image, bgr); Mat bChannel = bgr.get(0); Mat gChannel = bgr.get(1); Mat rChannel = bgr.get(2); int m = rChannel.rows(); int n = rChannel.cols(); // Global Adaptation List<Mat> list = globalAdaptation(bChannel, gChannel, rChannel, m, n); Mat Lw = list.get(0); Mat Lg = list.get(1); // Local Adaptation Mat Hg = localAdaptation(Lg, m, n, r, eps, krnlRatio); Lg.convertTo(Lg, CvType.CV_32F); // process Mat alpha = new Mat(m, n, rChannel.type()); Core.divide(Lg, new Scalar(Core.minMaxLoc(Lg).maxVal / eta), alpha); //Core.multiply(alpha, new Scalar(eta), alpha); Core.add(alpha, new Scalar(1.0), alpha); //alpha = adjustment(alpha, 1.25); Mat Lg_ = new Mat(m, n, rChannel.type()); Core.add(Lg, new Scalar(1.0 / 255.0), Lg_); Core.log(Lg_, Lg_); double beta = Math.exp(Core.sumElems(Lg_).val[0] / (m * n)) * lambda; Mat Lout = new Mat(m, n, rChannel.type()); Core.divide(Lg, Hg, Lout); Core.add(Lout, new Scalar(beta), Lout); Core.log(Lout, Lout); Core.normalize(alpha.mul(Lout), Lout, 0, 255, Core.NORM_MINMAX); Mat gain = obtainGain(Lout, Lw, m, n); // output Core.divide(rChannel.mul(gain), new Scalar(Core.minMaxLoc(rChannel).maxVal / 255.0), rChannel); // Red Channel Core.divide(gChannel.mul(gain), new Scalar(Core.minMaxLoc(gChannel).maxVal / 255.0), gChannel); // Green Channel Core.divide(bChannel.mul(gain), new Scalar(Core.minMaxLoc(bChannel).maxVal / 255.0), bChannel); // Blue Channel // merge three color channels to a image Mat outval = new Mat(); Core.merge(new ArrayList<>(Arrays.asList(bChannel, gChannel, rChannel)), outval); outval.convertTo(outval, CvType.CV_8UC1); return outval; }
Example 16
Source File: DarkChannelPriorDehaze.java From OptimizedImageEnhance with MIT License | 4 votes |
public static Mat enhance(Mat image, double krnlRatio, double minAtmosLight, double eps) { image.convertTo(image, CvType.CV_32F); // extract each color channel List<Mat> rgb = new ArrayList<>(); Core.split(image, rgb); Mat rChannel = rgb.get(0); Mat gChannel = rgb.get(1); Mat bChannel = rgb.get(2); int rows = rChannel.rows(); int cols = rChannel.cols(); // derive the dark channel from original image Mat dc = rChannel.clone(); for (int i = 0; i < image.rows(); i++) { for (int j = 0; j < image.cols(); j++) { double min = Math.min(rChannel.get(i, j)[0], Math.min(gChannel.get(i, j)[0], bChannel.get(i, j)[0])); dc.put(i, j, min); } } // minimum filter int krnlSz = Double.valueOf(Math.max(Math.max(rows * krnlRatio, cols * krnlRatio), 3.0)).intValue(); Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(krnlSz, krnlSz), new Point(-1, -1)); Imgproc.erode(dc, dc, kernel); // get coarse transmission map Mat t = dc.clone(); Core.subtract(t, new Scalar(255.0), t); Core.multiply(t, new Scalar(-1.0), t); Core.divide(t, new Scalar(255.0), t); // obtain gray scale image Mat gray = new Mat(); Imgproc.cvtColor(image, gray, Imgproc.COLOR_RGB2GRAY); Core.divide(gray, new Scalar(255.0), gray); // refine transmission map int r = krnlSz * 4; t = Filters.GuidedImageFilter(gray, t, r, eps); // get minimum atmospheric light minAtmosLight = Math.min(minAtmosLight, Core.minMaxLoc(dc).maxVal); // dehaze each color channel rChannel = dehaze(rChannel, t, minAtmosLight); gChannel = dehaze(gChannel, t, minAtmosLight); bChannel = dehaze(bChannel, t, minAtmosLight); // merge three color channels to a image Mat outval = new Mat(); Core.merge(new ArrayList<>(Arrays.asList(rChannel, gChannel, bChannel)), outval); outval.convertTo(outval, CvType.CV_8UC1); return outval; }
Example 17
Source File: EnhanceFunc.java From ImageEnhanceViaFusion with MIT License | 4 votes |
public static void main(String[] args) { String imgPath = "images/5.jpg"; Mat image = Imgcodecs.imread(imgPath, Imgcodecs.CV_LOAD_IMAGE_COLOR); new ImShow("original").showImage(image); // color balance Mat img1 = ColorBalance.SimplestColorBalance(image, 5); img1.convertTo(img1, CvType.CV_8UC1); // Perform sRGB to CIE Lab color space conversion Mat LabIm1 = new Mat(); Imgproc.cvtColor(img1, LabIm1, Imgproc.COLOR_BGR2Lab); Mat L1 = new Mat(); Core.extractChannel(LabIm1, L1, 0); // apply CLAHE Mat[] result = applyCLAHE(LabIm1, L1); Mat img2 = result[0]; Mat L2 = result[1]; // calculate normalized weight Mat w1 = calWeight(img1, L1); Mat w2 = calWeight(img2, L2); Mat sumW = new Mat(); Core.add(w1, w2, sumW); Core.divide(w1, sumW, w1); Core.divide(w2, sumW, w2); // construct the gaussian pyramid for weight int level = 5; Mat[] weight1 = Pyramid.GaussianPyramid(w1, level); Mat[] weight2 = Pyramid.GaussianPyramid(w2, level); // construct the laplacian pyramid for input image channel img1.convertTo(img1, CvType.CV_32F); img2.convertTo(img2, CvType.CV_32F); List<Mat> bgr = new ArrayList<Mat>(); Core.split(img1, bgr); Mat[] bCnl1 = Pyramid.LaplacianPyramid(bgr.get(0), level); Mat[] gCnl1 = Pyramid.LaplacianPyramid(bgr.get(1), level); Mat[] rCnl1 = Pyramid.LaplacianPyramid(bgr.get(2), level); bgr.clear(); Core.split(img2, bgr); Mat[] bCnl2 = Pyramid.LaplacianPyramid(bgr.get(0), level); Mat[] gCnl2 = Pyramid.LaplacianPyramid(bgr.get(1), level); Mat[] rCnl2 = Pyramid.LaplacianPyramid(bgr.get(2), level); // fusion process Mat[] bCnl = new Mat[level]; Mat[] gCnl = new Mat[level]; Mat[] rCnl = new Mat[level]; for (int i = 0; i < level; i++) { Mat cn = new Mat(); Core.add(bCnl1[i].mul(weight1[i]), bCnl2[i].mul(weight2[i]), cn); bCnl[i] = cn.clone(); Core.add(gCnl1[i].mul(weight1[i]), gCnl2[i].mul(weight2[i]), cn); gCnl[i] = cn.clone(); Core.add(rCnl1[i].mul(weight1[i]), rCnl2[i].mul(weight2[i]), cn); rCnl[i] = cn.clone(); } // reconstruct & output Mat bChannel = Pyramid.PyramidReconstruct(bCnl); Mat gChannel = Pyramid.PyramidReconstruct(gCnl); Mat rChannel = Pyramid.PyramidReconstruct(rCnl); Mat fusion = new Mat(); Core.merge(new ArrayList<Mat>(Arrays.asList(bChannel, gChannel, rChannel)), fusion); fusion.convertTo(fusion, CvType.CV_8UC1); new ImShow("fusion").showImage(fusion); }
Example 18
Source File: ColorBalance.java From ImageEnhanceViaFusion with MIT License | 4 votes |
/** * Simplest Color Balance. Performs color balancing via histogram * normalization. * * @param img * input color or gray scale image * @param percent * controls the percentage of pixels to clip to white and black. * (normally, choose 1~10) * @return Balanced image in CvType.CV_32F */ public static Mat SimplestColorBalance(Mat img, int percent) { if (percent <= 0) percent = 5; img.convertTo(img, CvType.CV_32F); List<Mat> channels = new ArrayList<Mat>(); int rows = img.rows(); // number of rows of image int cols = img.cols(); // number of columns of image int chnls = img.channels(); // number of channels of image double halfPercent = percent / 200.0; if (chnls == 3) { Core.split(img, channels); } else { channels.add(img); } List<Mat> results = new ArrayList<Mat>(); for (int i = 0; i < chnls; i++) { // find the low and high precentile values (based on the input percentile) Mat flat = new Mat(); channels.get(i).reshape(1, 1).copyTo(flat); Core.sort(flat, flat, Core.SORT_ASCENDING); double lowVal = flat.get(0, (int) Math.floor(flat.cols() * halfPercent))[0]; double topVal = flat.get(0, (int) Math.ceil(flat.cols() * (1.0 - halfPercent)))[0]; // saturate below the low percentile and above the high percentile Mat channel = channels.get(i); for (int m = 0; m < rows; m++) { for (int n = 0; n < cols; n++) { if (channel.get(m, n)[0] < lowVal) channel.put(m, n, lowVal); if (channel.get(m, n)[0] > topVal) channel.put(m, n, topVal); } } Core.normalize(channel, channel, 0, 255, Core.NORM_MINMAX); channel.convertTo(channel, CvType.CV_32F); results.add(channel); } Mat outval = new Mat(); Core.merge(results, outval); return outval; }
Example 19
Source File: LeviColorFilter.java From DogeCV with GNU General Public License v3.0 | 4 votes |
/** * Process a image and return a mask * @param input - Input image to process * @param mask - Output mask */ @Override public void process(Mat input, Mat mask) { channels = new ArrayList<>(); switch(color){ case RED: if(threshold == -1){ threshold = 164; } Imgproc.cvtColor(input, input, Imgproc.COLOR_RGB2Lab); Imgproc.GaussianBlur(input,input,new Size(3,3),0); Core.split(input, channels); Imgproc.threshold(channels.get(1), mask, threshold, 255, Imgproc.THRESH_BINARY); break; case BLUE: if(threshold == -1){ threshold = 145; } Imgproc.cvtColor(input, input, Imgproc.COLOR_RGB2YUV); Imgproc.GaussianBlur(input,input,new Size(3,3),0); Core.split(input, channels); Imgproc.threshold(channels.get(1), mask, threshold, 255, Imgproc.THRESH_BINARY); break; case WHITE: if(threshold == -1) { threshold = 150; } Imgproc.cvtColor(input, input, Imgproc.COLOR_RGB2Lab); Imgproc.GaussianBlur(input,input,new Size(3,3),0); Core.split(input, channels); Core.inRange(channels.get(0), new Scalar(threshold, 150, 40), new Scalar(255, 150, 150), mask); break; case YELLOW: if(threshold == -1){ threshold = 70; } Mat lab = new Mat(input.size(), 0); Imgproc.cvtColor(input, lab, Imgproc.COLOR_RGB2Lab); Mat temp = new Mat(); Core.inRange(input, new Scalar(0,0,0), new Scalar(255,255,164), temp); Mat mask2 = new Mat(input.size(), 0); temp.copyTo(mask2); input.copyTo(input, mask2); mask2.release(); temp.release(); lab.release(); Imgproc.cvtColor(input, input, Imgproc.COLOR_RGB2YUV); Imgproc.GaussianBlur(input,input,new Size(3,3),0); Core.split(input, channels); if(channels.size() > 0){ Imgproc.threshold(channels.get(1), mask, threshold, 255, Imgproc.THRESH_BINARY_INV); } break; } for(int i=0;i<channels.size();i++){ channels.get(i).release(); } input.release(); }
Example 20
Source File: BlkTransEstimate.java From OptimizedImageEnhance with MIT License | 4 votes |
public static double blkEstimate(Mat blkIm, double[] airlight, double lambda, double fTrans) { double Trans = 0.0; double nTrans = Math.floor(1.0 / fTrans * 128); double fMinCost = Double.MAX_VALUE; int numberOfPixels = blkIm.rows() * blkIm.cols() * blkIm.channels(); double nCounter = 0.0; List<Mat> bgr = new ArrayList<>(); Core.split(blkIm, bgr); while (nCounter < (1.0 - fTrans) * 10) { // initial dehazing process to calculate the loss information Mat bChannel = bgr.get(0).clone(); bChannel = preDehaze(bChannel, airlight[0], nTrans); Mat gChannel = bgr.get(1).clone(); gChannel = preDehaze(gChannel, airlight[1], nTrans); Mat rChannel = bgr.get(2).clone(); rChannel = preDehaze(rChannel, airlight[2], nTrans); // find the pixels with over-255 value and below-0 value, and // calculate the sum of information loss double nSumOfLoss = 0.0; for (int i = 0; i < bChannel.rows(); i++) { for (int j = 0; j < bChannel.cols(); j++) { if (bChannel.get(i, j)[0] > 255.0) nSumOfLoss += (bChannel.get(i, j)[0] - 255.0) * (bChannel.get(i, j)[0] - 255.0); else if (bChannel.get(i, j)[0] < 0.0) nSumOfLoss += bChannel.get(i, j)[0] * bChannel.get(i, j)[0]; if (gChannel.get(i, j)[0] > 255.0) nSumOfLoss += (gChannel.get(i, j)[0] - 255.0) * (gChannel.get(i, j)[0] - 255.0); else if (gChannel.get(i, j)[0] < 0.0) nSumOfLoss += gChannel.get(i, j)[0] * gChannel.get(i, j)[0]; if (rChannel.get(i, j)[0] > 255.0) nSumOfLoss += (rChannel.get(i, j)[0] - 255.0) * (rChannel.get(i, j)[0] - 255.0); else if (rChannel.get(i, j)[0] < 0.0) nSumOfLoss += rChannel.get(i, j)[0] * rChannel.get(i, j)[0]; } } // calculate the value of sum of square out double nSumOfSquareOuts = Core.sumElems(bChannel.mul(bChannel)).val[0] + Core.sumElems(gChannel.mul(gChannel)).val[0] + Core.sumElems(rChannel.mul(rChannel)).val[0]; // calculate the value of sum of out double nSumOfOuts = Core.sumElems(bChannel).val[0] + Core.sumElems(gChannel).val[0] + Core.sumElems(rChannel).val[0]; // calculate the mean value of the block image double fMean = nSumOfOuts / numberOfPixels; // calculate the cost function double fCost = lambda * nSumOfLoss / numberOfPixels - (nSumOfSquareOuts / numberOfPixels - fMean * fMean); // find the minimum cost and the related transmission if (nCounter == 0 || fMinCost > fCost) { fMinCost = fCost; Trans = fTrans; } fTrans = fTrans + 0.1; nTrans = 1.0 / fTrans * 128.0; nCounter = nCounter + 1; } return Trans; }