Java Code Examples for org.opencv.imgproc.Imgproc#equalizeHist()
The following examples show how to use
org.opencv.imgproc.Imgproc#equalizeHist() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: OCRProcessor.java From Camdroid with Apache License 2.0 | 8 votes |
protected void execute() { out = gray(); Imgproc.equalizeHist(out, out); Core.normalize(out, out, min, max, Core.NORM_MINMAX); Imgproc.adaptiveThreshold(out, out, 255, Imgproc.THRESH_BINARY, Imgproc.ADAPTIVE_THRESH_MEAN_C, blocksize, reduction); byte[] data = new byte[(int) out.total()]; out.get(0, 0, data); this.tessBaseAPI.setImage(data, out.width(), out.height(), out.channels(), (int) out.step1()); String utf8Text = this.tessBaseAPI.getUTF8Text(); int score = this.tessBaseAPI.meanConfidence(); this.tessBaseAPI.clear(); if (score >= SIMPLETEXT_MIN_SCORE && utf8Text.length() > 0) { simpleText = utf8Text; } else { simpleText = new String(); } }
Example 2
Source File: HistogramEqualization.java From opencv-fun with GNU Affero General Public License v3.0 | 6 votes |
public static void main (String[] args) { CVLoader.load(); // load the image Mat img = Highgui.imread("data/topdown-9.png"); Mat equ = new Mat(); img.copyTo(equ); Imgproc.blur(equ, equ, new Size(3, 3)); Imgproc.cvtColor(equ, equ, Imgproc.COLOR_BGR2YCrCb); List<Mat> channels = new ArrayList<Mat>(); Core.split(equ, channels); Imgproc.equalizeHist(channels.get(0), channels.get(0)); Core.merge(channels, equ); Imgproc.cvtColor(equ, equ, Imgproc.COLOR_YCrCb2BGR); Mat gray = new Mat(); Imgproc.cvtColor(equ, gray, Imgproc.COLOR_BGR2GRAY); Mat grayOrig = new Mat(); Imgproc.cvtColor(img, grayOrig, Imgproc.COLOR_BGR2GRAY); ImgWindow.newWindow(img); ImgWindow.newWindow(equ); ImgWindow.newWindow(gray); ImgWindow.newWindow(grayOrig); }
Example 3
Source File: OpenCVNonMavenExamples.java From Java-for-Data-Science with MIT License | 5 votes |
public void enhanceImageContrast() { Mat source = Imgcodecs.imread("GrayScaleParrot.png", Imgcodecs.CV_LOAD_IMAGE_GRAYSCALE); Mat destination = new Mat(source.rows(), source.cols(), source.type()); Imgproc.equalizeHist(source, destination); Imgcodecs.imwrite("enhancedParrot.jpg", destination); }
Example 4
Source File: Lines.java From DogeCV with GNU General Public License v3.0 | 5 votes |
/** * Modern OpenCV line segment detection - far better than Canny, but must be carefully adjusted. * @param original The original image to be scanned, as an RGB image * @param scale The factor by which the image is to be downscaled * @param minLength The minimum line segment length to be returned * @return A List of Lines found */ public static List<Line> getOpenCvLines(Mat original, int scale, double minLength) { Mat raw = new Mat(); Imgproc.resize(original.clone(), raw, new Size((int) (original.size().width/scale), (int) (original.size().height/scale))); if(raw.channels() > 1) { Imgproc.cvtColor(raw, raw, Imgproc.COLOR_RGB2GRAY); } Imgproc.equalizeHist(raw, raw); Imgproc.blur(raw, raw, new Size(3,3)); //Line Segment Detection 2 Mat linesM1 = new Mat(); detector.detect(raw, linesM1); ArrayList<Line> lines = new ArrayList<Line>(); for (int x = 0; x < linesM1.rows(); x++) { double[] vec = linesM1.get(x, 0); Point start = new Point(vec[0],vec[1]); Point end = new Point(vec[2], vec[3]); Line line = new Line(start, end); line = new Line(new Point((int)line.x1*scale, (int) line.y1*scale), new Point((int)line.x2*scale, (int)line.y2*scale)); if(line.length() > minLength) lines.add(line); } raw.release(); linesM1.release(); return lines; }
Example 5
Source File: FaceDetectionController.java From ExoVisix with MIT License | 5 votes |
/** * Method for face detection and tracking * * @param frame * it looks for faces in this frame */ private void detectAndDisplay(Mat frame) { MatOfRect faces = new MatOfRect(); Mat grayFrame = new Mat(); // convert the frame in gray scale Imgproc.cvtColor(frame, grayFrame, Imgproc.COLOR_BGR2GRAY); // equalize the frame histogram to improve the result Imgproc.equalizeHist(grayFrame, grayFrame); // compute minimum face size (20% of the frame height, in our case) if (this.absoluteFaceSize == 0) { int height = grayFrame.rows(); if (Math.round(height * 0.2f) > 0) { this.absoluteFaceSize = Math.round(height * 0.2f); } } // detect faces this.faceCascade.detectMultiScale(grayFrame, faces, 1.1, 2, 0 | Objdetect.CASCADE_SCALE_IMAGE, new Size(this.absoluteFaceSize, this.absoluteFaceSize), new Size()); // each rectangle in faces is a face: draw them! Rect[] facesArray = faces.toArray(); for (int i = 0; i < facesArray.length; i++) { Imgproc.rectangle(frame, facesArray[i].tl(), facesArray[i].br(), new Scalar(7, 255, 90), 4); System.out.println(facesArray[i].tl()); System.out.println(facesArray[i].br()); } }
Example 6
Source File: HistogrammEqualization.java From Android-Face-Recognition-with-Deep-Learning-Library with Apache License 2.0 | 5 votes |
public PreProcessor preprocessImage(PreProcessor preProcessor) { List<Mat> images = preProcessor.getImages(); List<Mat> processed = new ArrayList<Mat>(); for (Mat img : images){ img.convertTo(img, CvType.CV_8U); Imgproc.equalizeHist(img, img); processed.add(img); } preProcessor.setImages(processed); return preProcessor; }
Example 7
Source File: AutoCalibrationManager.java From ShootOFF with GNU General Public License v3.0 | 5 votes |
@Override public void process(Frame frame) { if (frame.getTimestamp() - lastFrameCheck < minimumInterval) return; lastFrameCheck = frame.getTimestamp(); Imgproc.equalizeHist(frame.getOriginalMat(), frame.getOriginalMat()); final List<MatOfPoint2f> listPatterns = findPatterns(frame.getOriginalMat(), true); if (listPatterns.isEmpty()) return; final Optional<Dimension2D> paperRes = findPaperPattern(frame.getOriginalMat(), listPatterns); if (paperRes.isPresent()) ((StepFindPaperPattern) stepFindPaperPattern).addPaperDimensions(paperRes.get(), true); if (listPatterns.isEmpty()) return; // Technically there could still be more than one pattern // or even a pattern that is much too small // But damn if we're gonna fix every problem the user gives us final Optional<Bounds> bounds = calibrateFrame(listPatterns.get(0), frame.getOriginalMat()); if (bounds.isPresent()) { boundsResult = bounds.get(); } else { boundsResult = null; } }
Example 8
Source File: MovementDetectionProcessor.java From Camdroid with Apache License 2.0 | 5 votes |
protected void execute() { out = gray(); Imgproc.equalizeHist(out, out); synchronized (mog) { mog.apply(out, this.mask, (double) (-10 + learning_rate) / 10); } Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_DILATE, new Size(3, 3)); Imgproc.dilate(mask, mask, kernel); ArrayList<MatOfPoint> contours = new ArrayList<MatOfPoint>(); Imgproc.findContours(this.mask, contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); double maxheight = object_max_size * this.in.height() / 100; double minheight = object_min_size * this.in.height() / 100; Iterator<MatOfPoint> each = contours.iterator(); each = contours.iterator(); while (each.hasNext()) { MatOfPoint contour = each.next(); Rect rect = Imgproc.boundingRect(contour); if (rect.height > minheight && rect.height < maxheight) { Imgproc.rectangle(out, rect.tl(), rect.br(), new Scalar(255, 0, 0), 1); } } }
Example 9
Source File: KMeansMatcher.java From mvisc with GNU General Public License v3.0 | 4 votes |
public void computeModel(ArrayList<MetaData> photos) { numPhotos = photos.size(); model.setNumPhotos(numPhotos); MatOfKeyPoint[] keypoints = new MatOfKeyPoint[numPhotos]; Mat[] descriptors = new Mat[numPhotos]; Mat allDescriptors = new Mat(); ArrayList<Integer> descriptorLabels = new ArrayList<Integer>(); // compute keypoints and descriptors Mat currentImg = null; for (int a = 0; a < numPhotos; a++) { // System.out.println("now:" + animalFiles.get(a)); currentImg = Highgui.imread(photos.get(a).getZooName().toString(), 0); Imgproc.resize(currentImg, currentImg, new Size(150, 250)); Imgproc.equalizeHist(currentImg, currentImg); Imgproc.threshold(currentImg, currentImg, 127, 255, Imgproc.THRESH_BINARY); featureDetector.detect(currentImg, keypoints[a]); descriptorExtractor.compute(currentImg, keypoints[a], descriptors[a]); allDescriptors.push_back(descriptors[a]); for (int i = 0; i < descriptors[a].rows(); i++) descriptorLabels.add(a); } System.out.println("label size:" + descriptorLabels.size()); Mat clusterLabels = new Mat(); Mat centers = new Mat(); // set up all desriptors, init criteria allDescriptors.convertTo(allDescriptors, CvType.CV_32F); TermCriteria criteria = new TermCriteria(TermCriteria.EPS + TermCriteria.MAX_ITER, 100, 0.1); long before = System.currentTimeMillis(); // compute clusters System.out.print("creating kmeans clusters..."); Core.kmeans(allDescriptors, k, clusterLabels, criteria, 10, Core.KMEANS_PP_CENTERS, centers); System.out.println("done."); // map k-means centroid labels to descriptors of all images ArrayList<ArrayList<Integer>> clusterImageMap = new ArrayList<ArrayList<Integer>>(); for (int nk = 0; nk < k + 1; nk++) clusterImageMap.add(new ArrayList<Integer>()); for (int r = 0; r < clusterLabels.rows(); r++) clusterImageMap.get((int) clusterLabels.get(r, 0)[0]).add(descriptorLabels.get(r)); model.setCentroids(centers); model.setLabels(clusterLabels); model.setClusterImageMap(clusterImageMap); model.setKeypoints(keypoints); model.setDescriptors(descriptors); }
Example 10
Source File: FXController.java From Face-Recognition with Apache License 2.0 | 4 votes |
/** * Method for face detection and tracking * * @param frame * it looks for faces in this frame */ private void detectAndDisplay(Mat frame) { MatOfRect faces = new MatOfRect(); Mat grayFrame = new Mat(); // convert the frame in gray scale Imgproc.cvtColor(frame, grayFrame, Imgproc.COLOR_BGR2GRAY); // equalize the frame histogram to improve the result Imgproc.equalizeHist(grayFrame, grayFrame); // compute minimum face size (20% of the frame height, in our case) if (this.absoluteFaceSize == 0) { int height = grayFrame.rows(); if (Math.round(height * 0.2f) > 0) { this.absoluteFaceSize = Math.round(height * 0.2f); } } // detect faces this.faceCascade.detectMultiScale(grayFrame, faces, 1.1, 2, 0 | Objdetect.CASCADE_SCALE_IMAGE, new Size(this.absoluteFaceSize, this.absoluteFaceSize), new Size()); // each rectangle in faces is a face: draw them! Rect[] facesArray = faces.toArray(); for (int i = 0; i < facesArray.length; i++) { Imgproc.rectangle(frame, facesArray[i].tl(), facesArray[i].br(), new Scalar(0, 255, 0), 3); // Crop the detected faces Rect rectCrop = new Rect(facesArray[i].tl(), facesArray[i].br()); Mat croppedImage = new Mat(frame, rectCrop); // Change to gray scale Imgproc.cvtColor(croppedImage, croppedImage, Imgproc.COLOR_BGR2GRAY); // Equalize histogram Imgproc.equalizeHist(croppedImage, croppedImage); // Resize the image to a default size Mat resizeImage = new Mat(); Size size = new Size(250,250); Imgproc.resize(croppedImage, resizeImage, size); // check if 'New user' checkbox is selected // if yes start collecting training data (50 images is enough) if ((newUser.isSelected() && !newname.isEmpty())) { if (index<20) { Imgcodecs.imwrite("resources/trainingset/combined/" + random + "-" + newname + "_" + (index++) + ".png", resizeImage); } } // int prediction = faceRecognition(resizeImage); double[] returnedResults = faceRecognition(resizeImage); double prediction = returnedResults[0]; double confidence = returnedResults[1]; // System.out.println("PREDICTED LABEL IS: " + prediction); int label = (int) prediction; String name = ""; if (names.containsKey(label)) { name = names.get(label); } else { name = "Unknown"; } // Create the text we will annotate the box with: // String box_text = "Prediction = " + prediction + " Confidence = " + confidence; String box_text = "Prediction = " + name + " Confidence = " + confidence; // Calculate the position for annotated text (make sure we don't // put illegal values in there): double pos_x = Math.max(facesArray[i].tl().x - 10, 0); double pos_y = Math.max(facesArray[i].tl().y - 10, 0); // And now put it into the image: Imgproc.putText(frame, box_text, new Point(pos_x, pos_y), Core.FONT_HERSHEY_PLAIN, 1.0, new Scalar(0, 255, 0, 2.0)); } }
Example 11
Source File: AutoCalibrationManager.java From ShootOFF with GNU General Public License v3.0 | 4 votes |
public Mat prepTestFrame(BufferedImage frame) { final Mat mat = preProcessFrame(Camera.bufferedImageToMat(frame)); Imgproc.equalizeHist(mat, mat); return mat; }
Example 12
Source File: NormalizeGrayProcessor.java From Camdroid with Apache License 2.0 | 4 votes |
protected void execute() { out = gray(); Imgproc.equalizeHist(out, out); Core.normalize(out, out, min, max, Core.NORM_MINMAX); }
Example 13
Source File: ImgprocessUtils.java From classchecks with Apache License 2.0 | 3 votes |
/** * * @Title: grayEqualizeHist * @Description: 直方图均衡化 * @param grayImg * @return * Mat * @throws */ public static Mat grayEqualizeHist(Mat grayImg) { //Mat gray = new Mat(); //Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY); Mat heqResult = new Mat(); // 直方图均衡化 Imgproc.equalizeHist(grayImg, heqResult); return heqResult; }