Java Code Examples for android.graphics.RectF#intersect()
The following examples show how to use
android.graphics.RectF#intersect() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ProminentObjectProcessor.java From mlkit-material-android with Apache License 2.0 | 5 votes |
private boolean objectBoxOverlapsConfirmationReticle( GraphicOverlay graphicOverlay, FirebaseVisionObject object) { RectF boxRect = graphicOverlay.translateRect(object.getBoundingBox()); float reticleCenterX = graphicOverlay.getWidth() / 2f; float reticleCenterY = graphicOverlay.getHeight() / 2f; RectF reticleRect = new RectF( reticleCenterX - reticleOuterRingRadius, reticleCenterY - reticleOuterRingRadius, reticleCenterX + reticleOuterRingRadius, reticleCenterY + reticleOuterRingRadius); return reticleRect.intersect(boxRect); }
Example 2
Source File: OC_Wm1.java From GLEXP-Team-onebillion with Apache License 2.0 | 5 votes |
public void touchUpAtPoint(PointF pt,View v) { finishLock.lock(); finishLock.unlockWithCondition(MSE_UP); if(status() == STATUS_DRAGGING) { setStatus(STATUS_CHECKING); RectF r = new RectF(wordLine.frame); RectF tr = new RectF(target.frame); r.top -= tr.height(); if(r.intersect(tr)) { OBUtils.runOnOtherThread(new OBUtils.RunLambda() { public void run() throws Exception { placeLabel((OBLabel)target); stage2Check(); } }); } else { goHome(); } return; } }
Example 3
Source File: FabTransformationBehavior.java From material-components-android with Apache License 2.0 | 5 votes |
private void calculateChildVisibleBoundsAtEndOfExpansion( @NonNull View child, @NonNull FabTransformationSpec spec, @NonNull MotionTiming translationXTiming, @NonNull MotionTiming translationYTiming, float fromX, float fromY, float toX, float toY, @NonNull RectF childBounds) { float translationX = calculateValueOfAnimationAtEndOfExpansion(spec, translationXTiming, fromX, toX); float translationY = calculateValueOfAnimationAtEndOfExpansion(spec, translationYTiming, fromY, toY); // Calculate the window bounds. Rect window = tmpRect; child.getWindowVisibleDisplayFrame(window); RectF windowF = tmpRectF1; windowF.set(window); // Calculate the visible bounds of the child given its translation and window bounds. RectF childVisibleBounds = tmpRectF2; calculateWindowBounds(child, childVisibleBounds); childVisibleBounds.offset(translationX, translationY); childVisibleBounds.intersect(windowF); childBounds.set(childVisibleBounds); }
Example 4
Source File: RCTCameraUtils.java From react-native-camera-face-detector with MIT License | 4 votes |
/** * Computes a Camera.Area corresponding to the new focus area to focus the camera on. This is * done by deriving a square around the center of a MotionEvent pointer (with side length equal * to FOCUS_AREA_MOTION_EVENT_EDGE_LENGTH), then transforming this rectangle's/square's * coordinates into the (-1000, 1000) coordinate system used for camera focus areas. * * Also note that we operate on RectF instances for the most part, to avoid any integer * division rounding errors going forward. We only round at the very end for playing into * the final focus areas list. * * @throws RuntimeException if unable to compute valid intersection between MotionEvent region * and SurfaceTexture region. */ protected static Camera.Area computeFocusAreaFromMotionEvent(final MotionEvent event, final int surfaceTextureWidth, final int surfaceTextureHeight) { // Get position of first touch pointer. final int pointerId = event.getPointerId(0); final int pointerIndex = event.findPointerIndex(pointerId); final float centerX = event.getX(pointerIndex); final float centerY = event.getY(pointerIndex); // Build event rect. Note that coordinates increase right and down, such that left <= right // and top <= bottom. final RectF eventRect = new RectF( centerX - FOCUS_AREA_MOTION_EVENT_EDGE_LENGTH, // left centerY - FOCUS_AREA_MOTION_EVENT_EDGE_LENGTH, // top centerX + FOCUS_AREA_MOTION_EVENT_EDGE_LENGTH, // right centerY + FOCUS_AREA_MOTION_EVENT_EDGE_LENGTH // bottom ); // Intersect this rect with the rect corresponding to the full area of the parent surface // texture, making sure we are not placing any amount of the eventRect outside the parent // surface's area. final RectF surfaceTextureRect = new RectF( (float) 0, // left (float) 0, // top (float) surfaceTextureWidth, // right (float) surfaceTextureHeight // bottom ); final boolean intersectSuccess = eventRect.intersect(surfaceTextureRect); if (!intersectSuccess) { throw new RuntimeException( "MotionEvent rect does not intersect with SurfaceTexture rect; unable to " + "compute focus area" ); } // Transform into (-1000, 1000) focus area coordinate system. See // https://developer.android.com/reference/android/hardware/Camera.Area.html. // Note that if this is ever changed to a Rect instead of RectF, be cautious of integer // division rounding! final RectF focusAreaRect = new RectF( (eventRect.left / surfaceTextureWidth) * 2000 - 1000, // left (eventRect.top / surfaceTextureHeight) * 2000 - 1000, // top (eventRect.right / surfaceTextureWidth) * 2000 - 1000, // right (eventRect.bottom / surfaceTextureHeight) * 2000 - 1000 // bottom ); Rect focusAreaRectRounded = new Rect(); focusAreaRect.round(focusAreaRectRounded); return new Camera.Area(focusAreaRectRounded, FOCUS_AREA_WEIGHT); }